summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile6
-rw-r--r--drivers/acpi/Kconfig20
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/ac.c3
-rw-r--r--drivers/acpi/acpi_ipmi.c525
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconfig.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h23
-rw-r--r--drivers/acpi/acpica/acglobal.h11
-rw-r--r--drivers/acpi/acpica/achware.h4
-rw-r--r--drivers/acpi/acpica/acinterp.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h20
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h18
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h10
-rw-r--r--drivers/acpi/acpica/amlresrc.h2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c64
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c14
-rw-r--r--drivers/acpi/acpica/evgpe.c278
-rw-r--r--drivers/acpi/acpica/evgpeblk.c35
-rw-r--r--drivers/acpi/acpica/evgpeinit.c27
-rw-r--r--drivers/acpi/acpica/evgpeutil.c41
-rw-r--r--drivers/acpi/acpica/evmisc.c94
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c6
-rw-r--r--drivers/acpi/acpica/evsci.c2
-rw-r--r--drivers/acpi/acpica/evxface.c79
-rw-r--r--drivers/acpi/acpica/evxfevnt.c602
-rw-r--r--drivers/acpi/acpica/evxfgpe.c696
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c10
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c4
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c34
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c8
-rw-r--r--drivers/acpi/acpica/nsalloc.c15
-rw-r--r--drivers/acpi/acpica/nsdump.c17
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nseval.c4
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c7
-rw-r--r--drivers/acpi/acpica/nsxfobj.c2
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c4
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psparse.c27
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c9
-rw-r--r--drivers/acpi/acpica/rsaddr.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c2
-rw-r--r--drivers/acpi/acpica/rscreate.c2
-rw-r--r--drivers/acpi/acpica/rsdump.c2
-rw-r--r--drivers/acpi/acpica/rsinfo.c2
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c2
-rw-r--r--drivers/acpi/acpica/rslist.c2
-rw-r--r--drivers/acpi/acpica/rsmemory.c2
-rw-r--r--drivers/acpi/acpica/rsmisc.c2
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/rsxface.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c5
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c3
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c2
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxferror.c2
-rw-r--r--drivers/acpi/apei/apei-internal.h2
-rw-r--r--drivers/acpi/apei/cper.c311
-rw-r--r--drivers/acpi/apei/einj.c2
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/acpi/apei/ghes.c431
-rw-r--r--drivers/acpi/apei/hest.c26
-rw-r--r--drivers/acpi/battery.c15
-rw-r--r--drivers/acpi/bus.c153
-rw-r--r--drivers/acpi/button.c9
-rw-r--r--drivers/acpi/debugfs.c20
-rw-r--r--drivers/acpi/dock.c2
-rw-r--r--drivers/acpi/ec.c5
-rw-r--r--drivers/acpi/fan.c27
-rw-r--r--drivers/acpi/glue.c5
-rw-r--r--drivers/acpi/internal.h13
-rw-r--r--drivers/acpi/numa.c8
-rw-r--r--drivers/acpi/nvs.c145
-rw-r--r--drivers/acpi/osl.c48
-rw-r--r--drivers/acpi/pci_root.c35
-rw-r--r--drivers/acpi/power.c128
-rw-r--r--drivers/acpi/proc.c41
-rw-r--r--drivers/acpi/processor_core.c4
-rw-r--r--drivers/acpi/processor_driver.c80
-rw-r--r--drivers/acpi/processor_idle.c28
-rw-r--r--drivers/acpi/processor_throttling.c190
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/scan.c70
-rw-r--r--drivers/acpi/sleep.c23
-rw-r--r--drivers/acpi/sysfs.c19
-rw-r--r--drivers/acpi/thermal.c5
-rw-r--r--drivers/acpi/video.c106
-rw-r--r--drivers/acpi/video_detect.c52
-rw-r--r--drivers/acpi/wakeup.c26
-rw-r--r--drivers/ata/Kconfig30
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/acard-ahci.c528
-rw-r--r--drivers/ata/ahci.c3
-rw-r--r--drivers/ata/ahci.h3
-rw-r--r--drivers/ata/libahci.c11
-rw-r--r--drivers/ata/libata-core.c5
-rw-r--r--drivers/ata/libata-scsi.c84
-rw-r--r--drivers/ata/pata_hpt366.c55
-rw-r--r--drivers/ata/pata_hpt37x.c258
-rw-r--r--drivers/ata/pata_hpt3x2n.c154
-rw-r--r--drivers/ata/pata_mpc52xx.c2
-rw-r--r--drivers/ata/sata_vsc.c2
-rw-r--r--drivers/atm/ambassador.c19
-rw-r--r--drivers/atm/idt77105.c2
-rw-r--r--drivers/atm/idt77252.h2
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/atm/solos-pci.c5
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/base/base.h62
-rw-r--r--drivers/base/bus.c15
-rw-r--r--drivers/base/class.c42
-rw-r--r--drivers/base/core.c65
-rw-r--r--drivers/base/node.c21
-rw-r--r--drivers/base/power/generic_ops.c6
-rw-r--r--drivers/base/power/main.c176
-rw-r--r--drivers/base/power/runtime.c56
-rw-r--r--drivers/base/power/wakeup.c20
-rw-r--r--drivers/block/Kconfig1
-rw-r--r--drivers/block/Makefile2
-rw-r--r--drivers/block/aoe/Makefile2
-rw-r--r--drivers/block/cciss.c25
-rw-r--r--drivers/block/cciss.h4
-rw-r--r--drivers/block/cciss_cmd.h2
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_main.c7
-rw-r--r--drivers/block/drbd/drbd_nl.c103
-rw-r--r--drivers/block/floppy.c17
-rw-r--r--drivers/block/loop.c14
-rw-r--r--drivers/block/nbd.c3
-rw-r--r--drivers/block/pktcdvd.c22
-rw-r--r--drivers/block/rbd.c19
-rw-r--r--drivers/bluetooth/ath3k.c80
-rw-r--r--drivers/bluetooth/btusb.c12
-rw-r--r--drivers/cdrom/cdrom.c59
-rw-r--r--drivers/char/Kconfig21
-rw-r--r--drivers/char/Makefile13
-rw-r--r--drivers/char/agp/Kconfig2
-rw-r--r--drivers/char/agp/agp.h1
-rw-r--r--drivers/char/agp/amd-k7-agp.c19
-rw-r--r--drivers/char/agp/amd64-agp.c9
-rw-r--r--drivers/char/agp/compat_ioctl.c1
-rw-r--r--drivers/char/agp/compat_ioctl.h1
-rw-r--r--drivers/char/agp/frontend.c8
-rw-r--r--drivers/char/agp/generic.c27
-rw-r--r--drivers/char/agp/intel-agp.c36
-rw-r--r--drivers/char/agp/intel-agp.h17
-rw-r--r--drivers/char/agp/intel-gtt.c864
-rw-r--r--drivers/char/bfin_jtag_comm.c8
-rw-r--r--drivers/char/hw_random/via-rng.c10
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c27
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c45
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c3
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c52
-rw-r--r--drivers/char/ramoops.c13
-rw-r--r--drivers/char/raw.c14
-rw-r--r--drivers/char/snsc.h1
-rw-r--r--drivers/char/tpm/tpm_tis.c6
-rw-r--r--drivers/char/virtio_console.c28
-rw-r--r--drivers/clocksource/acpi_pm.c6
-rw-r--r--drivers/clocksource/tcb_clksrc.c4
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/cpufreq.c27
-rw-r--r--drivers/cpuidle/cpuidle.c92
-rw-r--r--drivers/crypto/mv_cesa.c2
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/omap-aes.c260
-rw-r--r--drivers/crypto/omap-sham.c374
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/crypto/padlock-sha.c8
-rw-r--r--drivers/crypto/padlock.h23
-rw-r--r--drivers/dca/dca-core.c2
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/amba-pl08x.c1215
-rw-r--r--drivers/dma/at_hdmac.c19
-rw-r--r--drivers/dma/fsldma.c4
-rw-r--r--drivers/dma/imx-dma.c26
-rw-r--r--drivers/dma/imx-sdma.c88
-rw-r--r--drivers/dma/intel_mid_dma.c39
-rw-r--r--drivers/dma/iop-adma.c4
-rw-r--r--drivers/dma/ipu/ipu_idmac.c50
-rw-r--r--drivers/dma/mpc512x_dma.c187
-rw-r--r--drivers/dma/pch_dma.c19
-rw-r--r--drivers/dma/ste_dma40.c191
-rw-r--r--drivers/dma/ste_dma40_ll.c246
-rw-r--r--drivers/dma/ste_dma40_ll.h36
-rw-r--r--drivers/edac/amd64_edac.c28
-rw-r--r--drivers/edac/amd8131_edac.h2
-rw-r--r--drivers/edac/cell_edac.c4
-rw-r--r--drivers/edac/edac_core.h2
-rw-r--r--drivers/edac/i7core_edac.c6
-rw-r--r--drivers/edac/ppc4xx_edac.c6
-rw-r--r--drivers/firewire/Kconfig8
-rw-r--r--drivers/firewire/core-card.c11
-rw-r--r--drivers/firewire/core-cdev.c7
-rw-r--r--drivers/firewire/core-transaction.c58
-rw-r--r--drivers/firewire/core.h4
-rw-r--r--drivers/firewire/net.c56
-rw-r--r--drivers/firewire/nosy.c3
-rw-r--r--drivers/firewire/ohci.c672
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/firmware/dmi_scan.c11
-rw-r--r--drivers/gpio/Kconfig14
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/adp5588-gpio.c39
-rw-r--r--drivers/gpio/cs5535-gpio.c145
-rw-r--r--drivers/gpio/langwell_gpio.c25
-rw-r--r--drivers/gpio/max732x.c38
-rw-r--r--drivers/gpio/ml_ioh_gpio.c352
-rw-r--r--drivers/gpio/pca953x.c66
-rw-r--r--drivers/gpio/pl061.c28
-rw-r--r--drivers/gpio/stmpe-gpio.c36
-rw-r--r--drivers/gpio/sx150x.c46
-rw-r--r--drivers/gpio/tc3589x-gpio.c36
-rw-r--r--drivers/gpio/timbgpio.c26
-rw-r--r--drivers/gpio/vr41xx_giu.c48
-rw-r--r--drivers/gpio/wm8994-gpio.c24
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c6
-rw-r--r--drivers/gpu/drm/drm_crtc.c20
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c51
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c24
-rw-r--r--drivers/gpu/drm/drm_fops.c2
-rw-r--r--drivers/gpu/drm/drm_info.c9
-rw-r--r--drivers/gpu/drm/drm_irq.c576
-rw-r--r--drivers/gpu/drm/drm_mm.c40
-rw-r--r--drivers/gpu/drm/drm_stub.c10
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c548
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c824
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c108
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h626
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3830
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c23
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c132
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c1377
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c99
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c155
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c882
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h308
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c104
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h91
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c17
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c40
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1360
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c89
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h24
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c51
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c39
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c77
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c11
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c116
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c108
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1216
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h154
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c152
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c41
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig4
-rw-r--r--drivers/gpu/drm/nouveau/Makefile17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c335
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c383
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c207
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c66
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h440
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c188
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c117
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c171
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c1210
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c420
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c171
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h65
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c53
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c754
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c37
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h75
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c212
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c300
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.c69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.h45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c439
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h113
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c102
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c240
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c645
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c124
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c203
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c244
-rw-r--r--drivers/gpu/drm/nouveau/nv30_fb.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c236
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mc.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c27
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c422
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c344
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c71
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c114
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c42
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c198
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c680
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c378
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c175
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c190
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c140
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c269
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c365
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c725
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h64
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c2874
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c317
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h3
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/ObjectID.h48
-rw-r--r--drivers/gpu/drm/radeon/atom.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios.h999
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c174
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c874
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c154
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h6
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h56
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c5
-rw-r--r--drivers/gpu/drm/radeon/ni.c316
-rw-r--r--drivers/gpu/drm/radeon/ni_reg.h86
-rw-r--r--drivers/gpu/drm/radeon/nid.h41
-rw-r--r--drivers/gpu/drm/radeon/r100.c166
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h13
-rw-r--r--drivers/gpu/drm/radeon/r100d.h2
-rw-r--r--drivers/gpu/drm/radeon/r200.c18
-rw-r--r--drivers/gpu/drm/radeon/r300.c83
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r300d.h1
-rw-r--r--drivers/gpu/drm/radeon/r420.c2
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c413
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c11
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c29
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c46
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h6
-rw-r--r--drivers/gpu/drm/radeon/r600d.h57
-rw-r--r--drivers/gpu/drm/radeon/radeon.h161
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c155
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h66
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1327
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c61
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c531
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c229
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c70
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h40
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c99
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h82
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace_points.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r3006
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r4207
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs6006
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv51523
-rw-r--r--drivers/gpu/drm/radeon/rs400.c15
-rw-r--r--drivers/gpu/drm/radeon/rs600.c134
-rw-r--r--drivers/gpu/drm/radeon/rs690.c12
-rw-r--r--drivers/gpu/drm/radeon/rv515.c10
-rw-r--r--drivers/gpu/drm/radeon/rv770.c230
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h55
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c156
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c138
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c29
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c169
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c3
-rw-r--r--drivers/gpu/stub/Kconfig3
-rw-r--r--drivers/gpu/vga/Kconfig2
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c78
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/hid/Kconfig98
-rw-r--r--drivers/hid/Makefile15
-rw-r--r--drivers/hid/hid-3m-pct.c2
-rw-r--r--drivers/hid/hid-a4tech.c6
-rw-r--r--drivers/hid/hid-apple.c63
-rw-r--r--drivers/hid/hid-axff.c14
-rw-r--r--drivers/hid/hid-belkin.c4
-rw-r--r--drivers/hid/hid-cando.c4
-rw-r--r--drivers/hid/hid-cherry.c3
-rw-r--r--drivers/hid/hid-core.c126
-rw-r--r--drivers/hid/hid-cypress.c4
-rw-r--r--drivers/hid/hid-debug.c4
-rw-r--r--drivers/hid/hid-drff.c14
-rw-r--r--drivers/hid/hid-egalax.c2
-rw-r--r--drivers/hid/hid-elecom.c3
-rw-r--r--drivers/hid/hid-emsff.c161
-rw-r--r--drivers/hid/hid-gaff.c13
-rw-r--r--drivers/hid/hid-ids.h18
-rw-r--r--drivers/hid/hid-input.c35
-rw-r--r--drivers/hid/hid-kye.c4
-rw-r--r--drivers/hid/hid-lg.c15
-rw-r--r--drivers/hid/hid-lg2ff.c9
-rw-r--r--drivers/hid/hid-lg3ff.c9
-rw-r--r--drivers/hid/hid-lg4ff.c9
-rw-r--r--drivers/hid/hid-lgff.c8
-rw-r--r--drivers/hid/hid-magicmouse.c20
-rw-r--r--drivers/hid/hid-microsoft.c7
-rw-r--r--drivers/hid/hid-monterey.c3
-rw-r--r--drivers/hid/hid-mosart.c23
-rw-r--r--drivers/hid/hid-multitouch.c516
-rw-r--r--drivers/hid/hid-ntrig.c9
-rw-r--r--drivers/hid/hid-ortek.c3
-rw-r--r--drivers/hid/hid-petalynx.c7
-rw-r--r--drivers/hid/hid-picolcd.c58
-rw-r--r--drivers/hid/hid-pl.c16
-rw-r--r--drivers/hid/hid-prodikeys.c27
-rw-r--r--drivers/hid/hid-quanta.c2
-rw-r--r--drivers/hid/hid-roccat-kone.c404
-rw-r--r--drivers/hid/hid-roccat-kone.h19
-rw-r--r--drivers/hid/hid-roccat-koneplus.c837
-rw-r--r--drivers/hid/hid-roccat-koneplus.h224
-rw-r--r--drivers/hid/hid-roccat-pyra.c411
-rw-r--r--drivers/hid/hid-roccat-pyra.h23
-rw-r--r--drivers/hid/hid-roccat.c53
-rw-r--r--drivers/hid/hid-roccat.h5
-rw-r--r--drivers/hid/hid-samsung.c8
-rw-r--r--drivers/hid/hid-sjoy.c16
-rw-r--r--drivers/hid/hid-sony.c11
-rw-r--r--drivers/hid/hid-stantum.c2
-rw-r--r--drivers/hid/hid-sunplus.c3
-rw-r--r--drivers/hid/hid-tmff.c27
-rw-r--r--drivers/hid/hid-topseed.c1
-rw-r--r--drivers/hid/hid-wacom.c28
-rw-r--r--drivers/hid/hid-zpff.c11
-rw-r--r--drivers/hid/hid-zydacron.c11
-rw-r--r--drivers/hid/hidraw.c36
-rw-r--r--drivers/hid/usbhid/Kconfig2
-rw-r--r--drivers/hid/usbhid/Makefile6
-rw-r--r--drivers/hid/usbhid/hid-core.c105
-rw-r--r--drivers/hid/usbhid/hid-pidff.c164
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hid/usbhid/hiddev.c241
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hid/usbhid/usbkbd.c24
-rw-r--r--drivers/hwmon/Kconfig47
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/abituguru.c36
-rw-r--r--drivers/hwmon/abituguru3.c45
-rw-r--r--drivers/hwmon/ad7414.c1
-rw-r--r--drivers/hwmon/adm9240.c32
-rw-r--r--drivers/hwmon/ads7828.c4
-rw-r--r--drivers/hwmon/adt7411.c1
-rw-r--r--drivers/hwmon/adt7470.c4
-rw-r--r--drivers/hwmon/applesmc.c1633
-rw-r--r--drivers/hwmon/asb100.c9
-rw-r--r--drivers/hwmon/asus_atk0110.c30
-rw-r--r--drivers/hwmon/coretemp.c11
-rw-r--r--drivers/hwmon/dme1737.c201
-rw-r--r--drivers/hwmon/ds620.c337
-rw-r--r--drivers/hwmon/emc1403.c18
-rw-r--r--drivers/hwmon/f71805f.c29
-rw-r--r--drivers/hwmon/f71882fg.c23
-rw-r--r--drivers/hwmon/fschmd.c5
-rw-r--r--drivers/hwmon/hp_accel.c13
-rw-r--r--drivers/hwmon/hwmon-vid.c11
-rw-r--r--drivers/hwmon/hwmon.c4
-rw-r--r--drivers/hwmon/ibmaem.c4
-rw-r--r--drivers/hwmon/it87.c30
-rw-r--r--drivers/hwmon/jc42.c35
-rw-r--r--drivers/hwmon/k10temp.c5
-rw-r--r--drivers/hwmon/lis3lv02d.c23
-rw-r--r--drivers/hwmon/lm63.c59
-rw-r--r--drivers/hwmon/lm70.c5
-rw-r--r--drivers/hwmon/lm78.c14
-rw-r--r--drivers/hwmon/lm85.c23
-rw-r--r--drivers/hwmon/lm93.c21
-rw-r--r--drivers/hwmon/lm95241.c495
-rw-r--r--drivers/hwmon/pc87360.c53
-rw-r--r--drivers/hwmon/pc87427.c26
-rw-r--r--drivers/hwmon/pcf8591.c5
-rw-r--r--drivers/hwmon/pkgtemp.c7
-rw-r--r--drivers/hwmon/sht21.c307
-rw-r--r--drivers/hwmon/sis5595.c10
-rw-r--r--drivers/hwmon/smsc47b397.c13
-rw-r--r--drivers/hwmon/smsc47m1.c31
-rw-r--r--drivers/hwmon/via-cputemp.c35
-rw-r--r--drivers/hwmon/via686a.c24
-rw-r--r--drivers/hwmon/vt1211.c30
-rw-r--r--drivers/hwmon/vt8231.c10
-rw-r--r--drivers/hwmon/w83627ehf.c23
-rw-r--r--drivers/hwmon/w83627hf.c17
-rw-r--r--drivers/hwmon/w83781d.c29
-rw-r--r--drivers/hwmon/w83792d.c44
-rw-r--r--drivers/hwmon/w83793.c38
-rw-r--r--drivers/hwmon/w83795.c4
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c31
-rw-r--r--drivers/i2c/busses/Kconfig8
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c24
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c900
-rw-r--r--drivers/i2c/busses/i2c-i801.c1
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c6
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.h2
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c45
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c2
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c10
-rw-r--r--drivers/i2c/busses/i2c-ocores.c145
-rw-r--r--drivers/i2c/busses/i2c-omap.c45
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/i2c/busses/scx200_acb.c200
-rw-r--r--drivers/i2c/i2c-core.c119
-rw-r--r--drivers/i2c/muxes/Kconfig12
-rw-r--r--drivers/i2c/muxes/Makefile1
-rw-r--r--drivers/i2c/muxes/gpio-i2cmux.c184
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/idle/intel_idle.c89
-rw-r--r--drivers/infiniband/core/cache.c4
-rw-r--r--drivers/infiniband/core/device.c11
-rw-r--r--drivers/infiniband/core/sa_query.c4
-rw-r--r--drivers/infiniband/core/ucma.c22
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c5
-rw-r--r--drivers/infiniband/hw/amso1100/c2_vq.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c56
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c36
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c10
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c12
-rw-r--r--drivers/infiniband/hw/mthca/Kconfig2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.c35
-rw-r--r--drivers/infiniband/hw/nes/nes.h4
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c103
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h10
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c82
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c37
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_cq.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c155
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c11
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c379
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c39
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c80
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c45
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c32
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c29
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c57
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h12
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c51
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c65
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c397
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h46
-rw-r--r--drivers/input/Kconfig6
-rw-r--r--drivers/input/gameport/gameport.c2
-rw-r--r--drivers/input/input.c37
-rw-r--r--drivers/input/joystick/Kconfig10
-rw-r--r--drivers/input/joystick/Makefile1
-rw-r--r--drivers/input/joystick/as5011.c367
-rw-r--r--drivers/input/keyboard/Kconfig26
-rw-r--r--drivers/input/keyboard/Makefile2
-rw-r--r--drivers/input/keyboard/aaed2000_kbd.c186
-rw-r--r--drivers/input/keyboard/gpio_keys.c6
-rw-r--r--drivers/input/keyboard/tegra-kbc.c783
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c5
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c6
-rw-r--r--drivers/input/misc/rotary_encoder.c4
-rw-r--r--drivers/input/mouse/Kconfig10
-rw-r--r--drivers/input/mouse/bcm5974.c40
-rw-r--r--drivers/input/mouse/synaptics.c32
-rw-r--r--drivers/input/mouse/synaptics.h23
-rw-r--r--drivers/input/serio/Kconfig8
-rw-r--r--drivers/input/serio/ct82c710.c8
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h21
-rw-r--r--drivers/input/serio/i8042.c6
-rw-r--r--drivers/input/serio/serio.c13
-rw-r--r--drivers/input/serio/serport.c24
-rw-r--r--drivers/input/sparse-keymap.c1
-rw-r--r--drivers/input/tablet/wacom_sys.c2
-rw-r--r--drivers/input/tablet/wacom_wac.c27
-rw-r--r--drivers/input/touchscreen/Kconfig30
-rw-r--r--drivers/input/touchscreen/ad7879-i2c.c17
-rw-r--r--drivers/input/touchscreen/ads7846.c38
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c39
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c15
-rw-r--r--drivers/input/touchscreen/eeti_ts.c16
-rw-r--r--drivers/input/touchscreen/mcs5000_ts.c17
-rw-r--r--drivers/input/touchscreen/migor_ts.c12
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c5
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c191
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c4
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c4
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c4
-rw-r--r--drivers/isdn/hardware/eicon/istream.c2
-rw-r--r--drivers/isdn/hardware/mISDN/ipac.h4
-rw-r--r--drivers/isdn/hardware/mISDN/isar.h2
-rw-r--r--drivers/isdn/hisax/isdnl2.c28
-rw-r--r--drivers/isdn/hysdn/hysdn_defs.h2
-rw-r--r--drivers/isdn/hysdn/hysdn_init.c26
-rw-r--r--drivers/isdn/hysdn/hysdn_net.c3
-rw-r--r--drivers/isdn/hysdn/hysdn_procconf.c3
-rw-r--r--drivers/isdn/icn/icn.c3
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c2
-rw-r--r--drivers/leds/leds-lp5521.c57
-rw-r--r--drivers/leds/leds-lp5523.c59
-rw-r--r--drivers/leds/leds-pca9532.c66
-rw-r--r--drivers/leds/leds-pwm.c1
-rw-r--r--drivers/leds/ledtrig-backlight.c61
-rw-r--r--drivers/leds/ledtrig-gpio.c15
-rw-r--r--drivers/lguest/page_tables.c2
-rw-r--r--drivers/lguest/x86/core.c4
-rw-r--r--drivers/macintosh/macio_asic.c7
-rw-r--r--drivers/macintosh/therm_pm72.c34
-rw-r--r--drivers/macintosh/via-pmu-backlight.c4
-rw-r--r--drivers/macintosh/via-pmu.c2
-rw-r--r--drivers/md/Kconfig24
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bitmap.c12
-rw-r--r--drivers/md/dm-crypt.c618
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-ioctl.c111
-rw-r--r--drivers/md/dm-kcopyd.c57
-rw-r--r--drivers/md/dm-log-userspace-base.c139
-rw-r--r--drivers/md/dm-log-userspace-transfer.c1
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-mpath.c67
-rw-r--r--drivers/md/dm-raid.c697
-rw-r--r--drivers/md/dm-raid1.c19
-rw-r--r--drivers/md/dm-snap-persistent.c4
-rw-r--r--drivers/md/dm-snap.c62
-rw-r--r--drivers/md/dm-stripe.c27
-rw-r--r--drivers/md/dm-table.c40
-rw-r--r--drivers/md/dm.c29
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/md.c292
-rw-r--r--drivers/md/md.h17
-rw-r--r--drivers/md/multipath.c1
-rw-r--r--drivers/md/raid0.c42
-rw-r--r--drivers/md/raid1.c39
-rw-r--r--drivers/md/raid10.c30
-rw-r--r--drivers/md/raid5.c63
-rw-r--r--drivers/media/common/saa7146_core.c2
-rw-r--r--drivers/media/common/saa7146_fops.c8
-rw-r--r--drivers/media/common/saa7146_vbi.c2
-rw-r--r--drivers/media/common/saa7146_video.c20
-rw-r--r--drivers/media/common/tuners/Kconfig2
-rw-r--r--drivers/media/common/tuners/tda8290.c130
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c6
-rw-r--r--drivers/media/dvb/firewire/firedtv-rc.c9
-rw-r--r--drivers/media/dvb/frontends/Kconfig2
-rw-r--r--drivers/media/dvb/frontends/af9013.c4
-rw-r--r--drivers/media/dvb/frontends/ix2505v.c2
-rw-r--r--drivers/media/dvb/frontends/mb86a20s.c36
-rw-r--r--drivers/media/dvb/ttpci/av7110_ca.c2
-rw-r--r--drivers/media/radio/Kconfig14
-rw-r--r--drivers/media/radio/Makefile1
-rw-r--r--drivers/media/radio/radio-aimslab.c1
-rw-r--r--drivers/media/radio/radio-gemtek-pci.c478
-rw-r--r--drivers/media/radio/radio-maxiradio.c4
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c9
-rw-r--r--drivers/media/rc/ene_ir.c23
-rw-r--r--drivers/media/rc/ene_ir.h2
-rw-r--r--drivers/media/rc/imon.c60
-rw-r--r--drivers/media/rc/ir-lirc-codec.c6
-rw-r--r--drivers/media/rc/ir-raw.c2
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-nec.c52
-rw-r--r--drivers/media/rc/keymaps/rc-rc6-mce.c6
-rw-r--r--drivers/media/rc/mceusb.c10
-rw-r--r--drivers/media/rc/nuvoton-cir.c6
-rw-r--r--drivers/media/rc/rc-main.c28
-rw-r--r--drivers/media/rc/streamzap.c14
-rw-r--r--drivers/media/video/Kconfig11
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/adv7175.c11
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c39
-rw-r--r--drivers/media/video/bt8xx/bttv.h1
-rw-r--r--drivers/media/video/cafe_ccic.c15
-rw-r--r--drivers/media/video/cpia2/cpia2.h2
-rw-r--r--drivers/media/video/cpia2/cpia2_core.c65
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c104
-rw-r--r--drivers/media/video/cx18/cx18-driver.c24
-rw-r--r--drivers/media/video/cx18/cx18-driver.h3
-rw-r--r--drivers/media/video/cx18/cx18-streams.h3
-rw-r--r--drivers/media/video/cx18/cx23418.h2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-dvb.c5
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c22
-rw-r--r--drivers/media/video/cx25840/cx25840-ir.c2
-rw-r--r--drivers/media/video/davinci/vpif.c177
-rw-r--r--drivers/media/video/davinci/vpif.h20
-rw-r--r--drivers/media/video/davinci/vpif_capture.c451
-rw-r--r--drivers/media/video/davinci/vpif_capture.h2
-rw-r--r--drivers/media/video/davinci/vpif_display.c474
-rw-r--r--drivers/media/video/davinci/vpif_display.h2
-rw-r--r--drivers/media/video/davinci/vpss.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c19
-rw-r--r--drivers/media/video/et61x251/et61x251.h24
-rw-r--r--drivers/media/video/gspca/benq.c2
-rw-r--r--drivers/media/video/gspca/conex.c4
-rw-r--r--drivers/media/video/gspca/cpia1.c2
-rw-r--r--drivers/media/video/gspca/etoms.c4
-rw-r--r--drivers/media/video/gspca/finepix.c2
-rw-r--r--drivers/media/video/gspca/gl860/gl860.c2
-rw-r--r--drivers/media/video/gspca/gspca.c210
-rw-r--r--drivers/media/video/gspca/gspca.h2
-rw-r--r--drivers/media/video/gspca/jeilinj.c2
-rw-r--r--drivers/media/video/gspca/jpeg.h4
-rw-r--r--drivers/media/video/gspca/konica.c2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c2
-rw-r--r--drivers/media/video/gspca/mars.c2
-rw-r--r--drivers/media/video/gspca/mr97310a.c2
-rw-r--r--drivers/media/video/gspca/ov519.c8
-rw-r--r--drivers/media/video/gspca/ov534.c29
-rw-r--r--drivers/media/video/gspca/ov534_9.c2
-rw-r--r--drivers/media/video/gspca/pac207.c2
-rw-r--r--drivers/media/video/gspca/pac7302.c4
-rw-r--r--drivers/media/video/gspca/pac7311.c4
-rw-r--r--drivers/media/video/gspca/sn9c2028.c2
-rw-r--r--drivers/media/video/gspca/sn9c20x.c2
-rw-r--r--drivers/media/video/gspca/sonixb.c270
-rw-r--r--drivers/media/video/gspca/sonixj.c155
-rw-r--r--drivers/media/video/gspca/spca1528.c2
-rw-r--r--drivers/media/video/gspca/spca500.c2
-rw-r--r--drivers/media/video/gspca/spca501.c2
-rw-r--r--drivers/media/video/gspca/spca505.c2
-rw-r--r--drivers/media/video/gspca/spca508.c2
-rw-r--r--drivers/media/video/gspca/spca561.c2
-rw-r--r--drivers/media/video/gspca/sq905.c2
-rw-r--r--drivers/media/video/gspca/sq905c.c2
-rw-r--r--drivers/media/video/gspca/sq930x.c2
-rw-r--r--drivers/media/video/gspca/stk014.c2
-rw-r--r--drivers/media/video/gspca/stv0680.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c2
-rw-r--r--drivers/media/video/gspca/sunplus.c2
-rw-r--r--drivers/media/video/gspca/t613.c2
-rw-r--r--drivers/media/video/gspca/tv8532.c2
-rw-r--r--drivers/media/video/gspca/vc032x.c2
-rw-r--r--drivers/media/video/gspca/xirlink_cit.c2
-rw-r--r--drivers/media/video/gspca/zc3xx.c33
-rw-r--r--drivers/media/video/hdpvr/Makefile4
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c32
-rw-r--r--drivers/media/video/hdpvr/hdpvr-i2c.c149
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c7
-rw-r--r--drivers/media/video/hdpvr/hdpvr.h8
-rw-r--r--drivers/media/video/ir-kbd-i2c.c25
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c9
-rw-r--r--drivers/media/video/mt9v011.c54
-rw-r--r--drivers/media/video/mt9v011.h36
-rw-r--r--drivers/media/video/omap/omap_vout.c2
-rw-r--r--drivers/media/video/ov7670.c74
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-core.c61
-rw-r--r--drivers/media/video/saa7115.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c51
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c80
-rw-r--r--drivers/media/video/saa7164/saa7164-core.c4
-rw-r--r--drivers/media/video/sn9c102/sn9c102_devtable.h74
-rw-r--r--drivers/media/video/sn9c102/sn9c102_sensor.h2
-rw-r--r--drivers/media/video/sr030pc30.c10
-rw-r--r--drivers/media/video/tda9875.c411
-rw-r--r--drivers/media/video/tlg2300/pd-video.c13
-rw-r--r--drivers/media/video/tvp7002.c2
-rw-r--r--drivers/media/video/v4l2-common.c19
-rw-r--r--drivers/media/video/v4l2-ctrls.c34
-rw-r--r--drivers/media/video/v4l2-dev.c9
-rw-r--r--drivers/media/video/v4l2-device.c16
-rw-r--r--drivers/media/video/v4l2-ioctl.c20
-rw-r--r--drivers/media/video/via-camera.c2
-rw-r--r--drivers/media/video/w9966.c1
-rw-r--r--drivers/media/video/zoran/zoran_card.c2
-rw-r--r--drivers/memstick/core/memstick.c21
-rw-r--r--drivers/memstick/core/mspro_block.c151
-rw-r--r--drivers/memstick/host/jmb38x_ms.c120
-rw-r--r--drivers/message/fusion/lsi/mpi_log_sas.h2
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/message/fusion/mptbase.h4
-rw-r--r--drivers/message/fusion/mptctl.c8
-rw-r--r--drivers/message/fusion/mptsas.c2
-rw-r--r--drivers/message/fusion/mptscsih.c7
-rw-r--r--drivers/message/i2o/i2o_block.c2
-rw-r--r--drivers/mfd/88pm860x-core.c36
-rw-r--r--drivers/mfd/Kconfig16
-rw-r--r--drivers/mfd/Makefile3
-rw-r--r--drivers/mfd/ab3550-core.c28
-rw-r--r--drivers/mfd/ab8500-core.c306
-rw-r--r--drivers/mfd/ab8500-debugfs.c1016
-rw-r--r--drivers/mfd/ab8500-spi.c143
-rw-r--r--drivers/mfd/asic3.c66
-rw-r--r--drivers/mfd/cs5535-mfd.c151
-rw-r--r--drivers/mfd/davinci_voicecodec.c4
-rw-r--r--drivers/mfd/ezx-pcap.c25
-rw-r--r--drivers/mfd/htc-egpio.c27
-rw-r--r--drivers/mfd/htc-i2cpld.c40
-rw-r--r--drivers/mfd/jz4740-adc.c25
-rw-r--r--drivers/mfd/max8925-core.c30
-rw-r--r--drivers/mfd/max8998-irq.c37
-rw-r--r--drivers/mfd/max8998.c134
-rw-r--r--drivers/mfd/mc13xxx-core.c2
-rw-r--r--drivers/mfd/mfd-core.c4
-rw-r--r--drivers/mfd/sh_mobile_sdhi.c6
-rw-r--r--drivers/mfd/sm501.c9
-rw-r--r--drivers/mfd/stmpe.c28
-rw-r--r--drivers/mfd/t7l66xb.c20
-rw-r--r--drivers/mfd/tc6393xb.c22
-rw-r--r--drivers/mfd/tps65010.c2
-rw-r--r--drivers/mfd/tps6586x.c46
-rw-r--r--drivers/mfd/twl-core.c2
-rw-r--r--drivers/mfd/twl4030-irq.c28
-rw-r--r--drivers/mfd/twl6030-irq.c2
-rw-r--r--drivers/mfd/ucb1x00-ts.c12
-rw-r--r--drivers/mfd/vx855.c2
-rw-r--r--drivers/mfd/wm831x-core.c17
-rw-r--r--drivers/mfd/wm831x-i2c.c14
-rw-r--r--drivers/mfd/wm831x-irq.c53
-rw-r--r--drivers/mfd/wm831x-spi.c18
-rw-r--r--drivers/mfd/wm8350-irq.c32
-rw-r--r--drivers/mfd/wm8994-core.c157
-rw-r--r--drivers/mfd/wm8994-irq.c32
-rw-r--r--drivers/misc/Kconfig4
-rw-r--r--drivers/misc/arm-charlcd.c2
-rw-r--r--drivers/misc/bmp085.c1
-rw-r--r--drivers/misc/cs5535-mfgpt.c73
-rw-r--r--drivers/misc/eeprom/at24.c43
-rw-r--r--drivers/misc/tifm_core.c2
-rw-r--r--drivers/misc/vmw_balloon.c15
-rw-r--r--drivers/mmc/card/Kconfig1
-rw-r--r--drivers/mmc/card/block.c2
-rw-r--r--drivers/mmc/core/Kconfig11
-rw-r--r--drivers/mmc/core/bus.c8
-rw-r--r--drivers/mmc/core/core.c206
-rw-r--r--drivers/mmc/core/core.h9
-rw-r--r--drivers/mmc/core/debugfs.c5
-rw-r--r--drivers/mmc/core/host.c206
-rw-r--r--drivers/mmc/core/host.h21
-rw-r--r--drivers/mmc/core/mmc.c91
-rw-r--r--drivers/mmc/core/mmc_ops.c101
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/sd.c16
-rw-r--r--drivers/mmc/core/sdio.c36
-rw-r--r--drivers/mmc/core/sdio_bus.c32
-rw-r--r--drivers/mmc/host/Kconfig43
-rw-r--r--drivers/mmc/host/Makefile3
-rw-r--r--drivers/mmc/host/au1xmmc.c2
-rw-r--r--drivers/mmc/host/bfin_sdh.c2
-rw-r--r--drivers/mmc/host/davinci_mmc.c80
-rw-r--r--drivers/mmc/host/dw_mmc.c1796
-rw-r--r--drivers/mmc/host/dw_mmc.h168
-rw-r--r--drivers/mmc/host/jz4740_mmc.c5
-rw-r--r--drivers/mmc/host/mmci.c109
-rw-r--r--drivers/mmc/host/mmci.h5
-rw-r--r--drivers/mmc/host/msm_sdcc.c52
-rw-r--r--drivers/mmc/host/mxcmmc.c53
-rw-r--r--drivers/mmc/host/sdhci-dove.c70
-rw-r--r--drivers/mmc/host/sdhci-of-core.c13
-rw-r--r--drivers/mmc/host/sdhci-pci.c161
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c6
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c102
-rw-r--r--drivers/mmc/host/sdhci-tegra.c257
-rw-r--r--drivers/mmc/host/sdhci.c45
-rw-r--r--drivers/mmc/host/sdhci.h3
-rw-r--r--drivers/mmc/host/sdricoh_cs.c4
-rw-r--r--drivers/mmc/host/tmio_mmc.c561
-rw-r--r--drivers/mmc/host/tmio_mmc.h228
-rw-r--r--drivers/mmc/host/ushc.c1
-rw-r--r--drivers/mtd/Kconfig19
-rw-r--r--drivers/mtd/Makefile2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c55
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c116
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1
-rw-r--r--drivers/mtd/chips/cfi_util.c2
-rw-r--r--drivers/mtd/chips/fwh_lock.h2
-rw-r--r--drivers/mtd/devices/block2mtd.c10
-rw-r--r--drivers/mtd/devices/m25p80.c39
-rw-r--r--drivers/mtd/devices/sst25l.c4
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/amd76xrom.c7
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c5
-rw-r--r--drivers/mtd/maps/ck804xrom.c7
-rw-r--r--drivers/mtd/maps/esb2rom.c9
-rw-r--r--drivers/mtd/maps/ichxrom.c9
-rw-r--r--drivers/mtd/maps/physmap_of.c4
-rw-r--r--drivers/mtd/maps/scx200_docflash.c5
-rw-r--r--drivers/mtd/maps/tqm8xxl.c2
-rw-r--r--drivers/mtd/mtdchar.c14
-rw-r--r--drivers/mtd/mtdconcat.c1
-rw-r--r--drivers/mtd/mtdoops.c8
-rw-r--r--drivers/mtd/mtdpart.c30
-rw-r--r--drivers/mtd/nand/Kconfig1
-rw-r--r--drivers/mtd/nand/ams-delta.c80
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c2
-rw-r--r--drivers/mtd/nand/fsmc_nand.c89
-rw-r--r--drivers/mtd/nand/jz4740_nand.c57
-rw-r--r--drivers/mtd/nand/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/nand_base.c27
-rw-r--r--drivers/mtd/nand/nand_bbt.c3
-rw-r--r--drivers/mtd/nand/nandsim.c39
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c2
-rw-r--r--drivers/mtd/nand/r852.c2
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c5
-rw-r--r--drivers/mtd/onenand/omap2.c80
-rw-r--r--drivers/mtd/onenand/onenand_base.c81
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c10
-rw-r--r--drivers/mtd/onenand/samsung.c7
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/mtd/ubi/vtbl.c6
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/arm/ks8695net.c290
-rw-r--r--drivers/net/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/benet/be_cmds.c5
-rw-r--r--drivers/net/benet/be_main.c4
-rw-r--r--drivers/net/bfin_mac.c83
-rw-r--r--drivers/net/bfin_mac.h11
-rw-r--r--drivers/net/bna/bnad_ethtool.c1
-rw-r--r--drivers/net/bnx2.c21
-rw-r--r--drivers/net/bnx2.h1
-rw-r--r--drivers/net/bnx2x/bnx2x.h35
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c65
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h20
-rw-r--r--drivers/net/bnx2x/bnx2x_dump.h988
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c47
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h220
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c234
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c149
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h80
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c9
-rw-r--r--drivers/net/bonding/bond_3ad.c10
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/Kconfig4
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c138
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/mcp251x.c2
-rw-r--r--drivers/net/can/mscan/Kconfig2
-rw-r--r--drivers/net/can/pch_can.c5
-rw-r--r--drivers/net/can/softing/Kconfig30
-rw-r--r--drivers/net/can/softing/Makefile6
-rw-r--r--drivers/net/can/softing/softing.h167
-rw-r--r--drivers/net/can/softing/softing_cs.c360
-rw-r--r--drivers/net/can/softing/softing_fw.c691
-rw-r--r--drivers/net/can/softing/softing_main.c894
-rw-r--r--drivers/net/can/softing/softing_platform.h40
-rw-r--r--drivers/net/cassini.c6
-rw-r--r--drivers/net/chelsio/subr.c2
-rw-r--r--drivers/net/cnic.c45
-rw-r--r--drivers/net/cxgb3/mc5.c2
-rw-r--r--drivers/net/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c95
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c13
-rw-r--r--drivers/net/davinci_emac.c2
-rw-r--r--drivers/net/depca.c6
-rw-r--r--drivers/net/dl2k.c4
-rw-r--r--drivers/net/dm9000.c9
-rw-r--r--drivers/net/dnet.c3
-rw-r--r--drivers/net/e1000/e1000_hw.c330
-rw-r--r--drivers/net/e1000/e1000_hw.h62
-rw-r--r--drivers/net/e1000/e1000_main.c47
-rw-r--r--drivers/net/e1000/e1000_osdep.h20
-rw-r--r--drivers/net/e1000e/82571.c83
-rw-r--r--drivers/net/e1000e/Makefile2
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/es2lan.c6
-rw-r--r--drivers/net/e1000e/ethtool.c56
-rw-r--r--drivers/net/e1000e/hw.h5
-rw-r--r--drivers/net/e1000e/ich8lan.c81
-rw-r--r--drivers/net/e1000e/lib.c23
-rw-r--r--drivers/net/e1000e/netdev.c330
-rw-r--r--drivers/net/e1000e/param.c6
-rw-r--r--drivers/net/e1000e/phy.c46
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c6
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/fec.c249
-rw-r--r--drivers/net/fec.h5
-rw-r--r--drivers/net/forcedeth.c36
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/gianfar.c13
-rw-r--r--drivers/net/gianfar.h10
-rw-r--r--drivers/net/greth.c221
-rw-r--r--drivers/net/greth.h2
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/igbvf/vf.c2
-rw-r--r--drivers/net/irda/bfin_sir.h2
-rw-r--r--drivers/net/irda/donauboe.h2
-rw-r--r--drivers/net/irda/sh_irda.c14
-rw-r--r--drivers/net/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c753
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c142
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c53
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c214
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h91
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c6
-rw-r--r--drivers/net/ll_temac_main.c2
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/mlx4/alloc.c3
-rw-r--r--drivers/net/mlx4/catas.c6
-rw-r--r--drivers/net/mlx4/en_main.c3
-rw-r--r--drivers/net/mlx4/en_netdev.c3
-rw-r--r--drivers/net/mlx4/fw.c4
-rw-r--r--drivers/net/mlx4/main.c17
-rw-r--r--drivers/net/mlx4/mcg.c23
-rw-r--r--drivers/net/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/niu.c61
-rw-r--r--drivers/net/ns83820.c5
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c118
-rw-r--r--drivers/net/pcmcia/axnet_cs.c6
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c1
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/ppp_async.c10
-rw-r--r--drivers/net/ppp_deflate.c9
-rw-r--r--drivers/net/ppp_generic.c9
-rw-r--r--drivers/net/ppp_mppe.c7
-rw-r--r--drivers/net/ppp_synctty.c3
-rw-r--r--drivers/net/qlcnic/qlcnic.h24
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c2
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c63
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c10
-rw-r--r--drivers/net/r8169.c277
-rw-r--r--drivers/net/sfc/efx.c18
-rw-r--r--drivers/net/sfc/ethtool.c22
-rw-r--r--drivers/net/sfc/falcon.c25
-rw-r--r--drivers/net/sfc/net_driver.h10
-rw-r--r--drivers/net/sis900.c3
-rw-r--r--drivers/net/skge.c3
-rw-r--r--drivers/net/sky2.c143
-rw-r--r--drivers/net/sky2.h6
-rw-r--r--drivers/net/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/tehuti.c6
-rw-r--r--drivers/net/tg3.c103
-rw-r--r--drivers/net/tg3.h3
-rw-r--r--drivers/net/tile/tilepro.c10
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/ucc_geth.c3
-rw-r--r--drivers/net/usb/cdc_ncm.c246
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/net/usb/kaweth.c1
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/virtio_net.c27
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c93
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c274
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h7
-rw-r--r--drivers/net/vxge/vxge-config.c2
-rw-r--r--drivers/net/vxge/vxge-main.c1
-rw-r--r--drivers/net/vxge/vxge-traffic.h2
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c2
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c143
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c37
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c9
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/b43/phy_g.c2
-rw-r--r--drivers/net/wireless/b43legacy/phy.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c15
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c67
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ict.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-legacy.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c14
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c6
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c40
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c11
-rw-r--r--drivers/net/wireless/wl1251/acx.h4
-rw-r--r--drivers/net/wireless/wl1251/main.c3
-rw-r--r--drivers/net/wireless/wl1251/wl1251.h2
-rw-r--r--drivers/net/wireless/wl12xx/acx.h4
-rw-r--r--drivers/net/wireless/wl12xx/spi.c3
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h4
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/xen-netfront.c98
-rw-r--r--drivers/net/xilinx_emaclite.c1
-rw-r--r--drivers/nfc/Kconfig30
-rw-r--r--drivers/nfc/Makefile5
-rw-r--r--drivers/nfc/pn544.c893
-rw-r--r--drivers/of/Kconfig8
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/address.c54
-rw-r--r--drivers/of/fdt.c424
-rw-r--r--drivers/of/of_mdio.c26
-rw-r--r--drivers/of/of_net.c48
-rw-r--r--drivers/of/pdt.c112
-rw-r--r--drivers/of/platform.c22
-rw-r--r--drivers/parport/share.c4
-rw-r--r--drivers/pci/Kconfig1
-rw-r--r--drivers/pci/hotplug/acpiphp.h1
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c1
-rw-r--r--drivers/pci/msi.c5
-rw-r--r--drivers/pci/msi.h6
-rw-r--r--drivers/pci/pci-acpi.c3
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pci-stub.c7
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/pci/pci.c25
-rw-r--r--drivers/pci/pci.h14
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c1
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h3
-rw-r--r--drivers/pci/pcie/aspm.c21
-rw-r--r--drivers/pci/pcie/pme.c31
-rw-r--r--drivers/pci/pcie/portdrv.h5
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c23
-rw-r--r--drivers/pci/pcie/portdrv_core.c25
-rw-r--r--drivers/pci/pcie/portdrv_pci.c37
-rw-r--r--drivers/pcmcia/Kconfig12
-rw-r--r--drivers/pcmcia/m32r_cfc.h2
-rw-r--r--drivers/pcmcia/m32r_pcc.h2
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c2
-rw-r--r--drivers/pcmcia/pcmcia_resource.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.h1
-rw-r--r--drivers/pcmcia/pxa2xx_lubbock.c1
-rw-r--r--drivers/platform/x86/Kconfig29
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acer-wmi.c435
-rw-r--r--drivers/platform/x86/asus-laptop.c2
-rw-r--r--drivers/platform/x86/asus_acpi.c10
-rw-r--r--drivers/platform/x86/classmate-laptop.c19
-rw-r--r--drivers/platform/x86/compal-laptop.c8
-rw-r--r--drivers/platform/x86/dell-laptop.c26
-rw-r--r--drivers/platform/x86/eeepc-laptop.c13
-rw-r--r--drivers/platform/x86/eeepc-wmi.c609
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c8
-rw-r--r--drivers/platform/x86/ideapad-laptop.c259
-rw-r--r--drivers/platform/x86/intel_ips.c2
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c112
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c12
-rw-r--r--drivers/platform/x86/intel_scu_ipcutil.c133
-rw-r--r--drivers/platform/x86/sony-laptop.c14
-rw-r--r--drivers/platform/x86/tc1100-wmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c15
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/platform/x86/wmi.c133
-rw-r--r--drivers/pnp/Makefile6
-rw-r--r--drivers/pnp/core.c7
-rw-r--r--drivers/pnp/driver.c7
-rw-r--r--drivers/pnp/isapnp/Makefile6
-rw-r--r--drivers/pnp/pnpacpi/Makefile3
-rw-r--r--drivers/pnp/pnpacpi/core.c93
-rw-r--r--drivers/pnp/pnpbios/Makefile5
-rw-r--r--drivers/power/Kconfig20
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/collie_battery.c13
-rw-r--r--drivers/power/ds2760_battery.c2
-rw-r--r--drivers/power/gpio-charger.c188
-rw-r--r--drivers/power/intel_mid_battery.c2
-rw-r--r--drivers/power/isp1704_charger.c201
-rw-r--r--drivers/power/jz4740-battery.c13
-rw-r--r--drivers/power/max17042_battery.c239
-rw-r--r--drivers/power/olpc_battery.c114
-rw-r--r--drivers/power/power_supply_core.c6
-rw-r--r--drivers/power/s3c_adc_battery.c16
-rw-r--r--drivers/power/tosa_battery.c13
-rw-r--r--drivers/power/wm97xx_battery.c4
-rw-r--r--drivers/power/z2_battery.c6
-rw-r--r--drivers/pps/Kconfig11
-rw-r--r--drivers/pps/Makefile3
-rw-r--r--drivers/pps/clients/Kconfig7
-rw-r--r--drivers/pps/clients/Makefile1
-rw-r--r--drivers/pps/clients/pps-ktimer.c44
-rw-r--r--drivers/pps/clients/pps-ldisc.c59
-rw-r--r--drivers/pps/clients/pps_parport.c258
-rw-r--r--drivers/pps/generators/Kconfig13
-rw-r--r--drivers/pps/generators/Makefile9
-rw-r--r--drivers/pps/generators/pps_gen_parport.c282
-rw-r--r--drivers/pps/kapi.c210
-rw-r--r--drivers/pps/kc.c122
-rw-r--r--drivers/pps/kc.h46
-rw-r--r--drivers/pps/pps.c156
-rw-r--r--drivers/ps3/Makefile2
-rw-r--r--drivers/rapidio/rio-scan.c162
-rw-r--r--drivers/rapidio/rio-sysfs.c16
-rw-r--r--drivers/rapidio/rio.c76
-rw-r--r--drivers/rapidio/switches/idt_gen2.c95
-rw-r--r--drivers/rapidio/switches/idtcps.c6
-rw-r--r--drivers/rapidio/switches/tsi568.c13
-rw-r--r--drivers/rapidio/switches/tsi57x.c56
-rw-r--r--drivers/regulator/88pm8607.c3
-rw-r--r--drivers/regulator/Kconfig22
-rw-r--r--drivers/regulator/Makefile3
-rw-r--r--drivers/regulator/ab3100.c5
-rw-r--r--drivers/regulator/ab8500.c453
-rw-r--r--drivers/regulator/core.c462
-rw-r--r--drivers/regulator/da903x.c17
-rw-r--r--drivers/regulator/isl6271a-regulator.c8
-rw-r--r--drivers/regulator/lp3971.c10
-rw-r--r--drivers/regulator/lp3972.c10
-rw-r--r--drivers/regulator/max1586.c30
-rw-r--r--drivers/regulator/max8649.c3
-rw-r--r--drivers/regulator/max8660.c14
-rw-r--r--drivers/regulator/max8925-regulator.c3
-rw-r--r--drivers/regulator/max8952.c3
-rw-r--r--drivers/regulator/max8998.c102
-rw-r--r--drivers/regulator/mc13783-regulator.c385
-rw-r--r--drivers/regulator/mc13892-regulator.c635
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c241
-rw-r--r--drivers/regulator/mc13xxx.h101
-rw-r--r--drivers/regulator/pcap-regulator.c7
-rw-r--r--drivers/regulator/pcf50633-regulator.c5
-rw-r--r--drivers/regulator/tps65023-regulator.c9
-rw-r--r--drivers/regulator/tps6507x-regulator.c10
-rw-r--r--drivers/regulator/tps6524x-regulator.c693
-rw-r--r--drivers/regulator/tps6586x-regulator.c15
-rw-r--r--drivers/regulator/twl-regulator.c11
-rw-r--r--drivers/regulator/wm831x-dcdc.c32
-rw-r--r--drivers/regulator/wm831x-ldo.c59
-rw-r--r--drivers/regulator/wm8350-regulator.c24
-rw-r--r--drivers/regulator/wm8400-regulator.c8
-rw-r--r--drivers/regulator/wm8994-regulator.c45
-rw-r--r--drivers/rtc/class.c14
-rw-r--r--drivers/rtc/interface.c557
-rw-r--r--drivers/rtc/rtc-at32ap700x.c19
-rw-r--r--drivers/rtc/rtc-at91rm9200.c20
-rw-r--r--drivers/rtc/rtc-at91sam9.c20
-rw-r--r--drivers/rtc/rtc-bfin.c21
-rw-r--r--drivers/rtc/rtc-cmos.c19
-rw-r--r--drivers/rtc/rtc-dev.c25
-rw-r--r--drivers/rtc/rtc-ds1286.c41
-rw-r--r--drivers/rtc/rtc-ds1305.c43
-rw-r--r--drivers/rtc/rtc-ds1307.c61
-rw-r--r--drivers/rtc/rtc-ds1374.c37
-rw-r--r--drivers/rtc/rtc-ds3232.c14
-rw-r--r--drivers/rtc/rtc-lib.c28
-rw-r--r--drivers/rtc/rtc-m41t80.c30
-rw-r--r--drivers/rtc/rtc-m48t59.c21
-rw-r--r--drivers/rtc/rtc-max6902.c3
-rw-r--r--drivers/rtc/rtc-max8998.c54
-rw-r--r--drivers/rtc/rtc-mrst.c31
-rw-r--r--drivers/rtc/rtc-msm6242.c2
-rw-r--r--drivers/rtc/rtc-mv.c20
-rw-r--r--drivers/rtc/rtc-omap.c34
-rw-r--r--drivers/rtc/rtc-proc.c6
-rw-r--r--drivers/rtc/rtc-rp5c01.c2
-rw-r--r--drivers/rtc/rtc-rs5c372.c48
-rw-r--r--drivers/rtc/rtc-s3c.c12
-rw-r--r--drivers/rtc/rtc-sa1100.c22
-rw-r--r--drivers/rtc/rtc-sh.c11
-rw-r--r--drivers/rtc/rtc-test.c21
-rw-r--r--drivers/rtc/rtc-vr41xx.c38
-rw-r--r--drivers/s390/block/dasd_alias.c6
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_genhd.c2
-rw-r--r--drivers/s390/block/xpram.c4
-rw-r--r--drivers/s390/char/keyboard.c3
-rw-r--r--drivers/s390/char/tape.h8
-rw-r--r--drivers/s390/char/tape_34xx.c59
-rw-r--r--drivers/s390/char/tape_3590.c83
-rw-r--r--drivers/s390/char/tape_class.h1
-rw-r--r--drivers/s390/cio/device.c1
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/netiucv.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c149
-rw-r--r--drivers/s390/net/qeth_l2_main.c22
-rw-r--r--drivers/s390/net/qeth_l3_main.c22
-rw-r--r--drivers/s390/net/smsgiucv.c2
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c2
-rw-r--r--drivers/sbus/char/jsflash.c2
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aic7xxx_old/aic7xxx.seq2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_reg_def.h4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_seq.c6
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h11
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c114
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c2
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c2
-rw-r--r--drivers/scsi/dc395x.c8
-rw-r--r--drivers/scsi/ipr.c10
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c2
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c19
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c64
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/pmcraid.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c10
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_lib.c15
-rw-r--r--drivers/scsi/scsi_netlink.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/sd.c111
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/sr.c174
-rw-r--r--drivers/scsi/sr.h3
-rw-r--r--drivers/scsi/sr_ioctl.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2
-rw-r--r--drivers/sfi/sfi_core.c2
-rw-r--r--drivers/sh/clk/core.c1
-rw-r--r--drivers/sh/intc/chip.c6
-rw-r--r--drivers/spi/Kconfig16
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/amba-pl022.c2
-rw-r--r--drivers/spi/ath79_spi.c292
-rw-r--r--drivers/spi/atmel_spi.c4
-rw-r--r--drivers/spi/dw_spi_mmio.c5
-rw-r--r--drivers/spi/pxa2xx_spi_pci.c61
-rw-r--r--drivers/spi/spi.c92
-rw-r--r--drivers/spi/spi_imx.c6
-rw-r--r--drivers/spi/spi_sh_msiof.c8
-rw-r--r--drivers/spi/spi_tegra.c2
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/ssb/Kconfig2
-rw-r--r--drivers/ssb/pcmcia.c2
-rw-r--r--drivers/ssb/scan.c10
-rw-r--r--drivers/staging/Kconfig12
-rw-r--r--drivers/staging/Makefile6
-rw-r--r--drivers/staging/adis16255/Kconfig11
-rw-r--r--drivers/staging/adis16255/Makefile1
-rw-r--r--drivers/staging/adis16255/adis16255.c468
-rw-r--r--drivers/staging/adis16255/adis16255.h12
-rw-r--r--drivers/staging/asus_oled/asus_oled.c2
-rw-r--r--drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c4
-rw-r--r--drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c4
-rw-r--r--drivers/staging/ath6kl/os/linux/ar6000_drv.c2
-rw-r--r--drivers/staging/autofs/dirhash.c92
-rw-r--r--drivers/staging/batman-adv/Kconfig26
-rw-r--r--drivers/staging/batman-adv/Makefile22
-rw-r--r--drivers/staging/batman-adv/README240
-rw-r--r--drivers/staging/batman-adv/TODO14
-rw-r--r--drivers/staging/batman-adv/aggregation.c275
-rw-r--r--drivers/staging/batman-adv/aggregation.h43
-rw-r--r--drivers/staging/batman-adv/bat_debugfs.c343
-rw-r--r--drivers/staging/batman-adv/bat_debugfs.h33
-rw-r--r--drivers/staging/batman-adv/bat_sysfs.c558
-rw-r--r--drivers/staging/batman-adv/bat_sysfs.h42
-rw-r--r--drivers/staging/batman-adv/bitarray.c201
-rw-r--r--drivers/staging/batman-adv/bitarray.h47
-rw-r--r--drivers/staging/batman-adv/hard-interface.c647
-rw-r--r--drivers/staging/batman-adv/hard-interface.h58
-rw-r--r--drivers/staging/batman-adv/hash.c306
-rw-r--r--drivers/staging/batman-adv/hash.h100
-rw-r--r--drivers/staging/batman-adv/icmp_socket.c359
-rw-r--r--drivers/staging/batman-adv/icmp_socket.h34
-rw-r--r--drivers/staging/batman-adv/main.c217
-rw-r--r--drivers/staging/batman-adv/main.h183
-rw-r--r--drivers/staging/batman-adv/originator.c533
-rw-r--r--drivers/staging/batman-adv/originator.h36
-rw-r--r--drivers/staging/batman-adv/packet.h134
-rw-r--r--drivers/staging/batman-adv/ring_buffer.c52
-rw-r--r--drivers/staging/batman-adv/ring_buffer.h28
-rw-r--r--drivers/staging/batman-adv/routing.c1389
-rw-r--r--drivers/staging/batman-adv/routing.h46
-rw-r--r--drivers/staging/batman-adv/send.c580
-rw-r--r--drivers/staging/batman-adv/send.h41
-rw-r--r--drivers/staging/batman-adv/soft-interface.c398
-rw-r--r--drivers/staging/batman-adv/soft-interface.h32
-rw-r--r--drivers/staging/batman-adv/sysfs-class-net-batman-adv14
-rw-r--r--drivers/staging/batman-adv/sysfs-class-net-mesh41
-rw-r--r--drivers/staging/batman-adv/translation-table.c518
-rw-r--r--drivers/staging/batman-adv/translation-table.h45
-rw-r--r--drivers/staging/batman-adv/types.h241
-rw-r--r--drivers/staging/batman-adv/unicast.c269
-rw-r--r--drivers/staging/batman-adv/unicast.h39
-rw-r--r--drivers/staging/batman-adv/vis.c895
-rw-r--r--drivers/staging/batman-adv/vis.h37
-rw-r--r--drivers/staging/bcm/Adapter.h134
-rw-r--r--drivers/staging/bcm/Arp.c94
-rw-r--r--drivers/staging/bcm/Bcmchar.c967
-rw-r--r--drivers/staging/bcm/Bcmnet.c404
-rw-r--r--drivers/staging/bcm/CmHost.c139
-rw-r--r--drivers/staging/bcm/CmHost.h3
-rw-r--r--drivers/staging/bcm/DDRInit.c17
-rw-r--r--drivers/staging/bcm/Debug.c41
-rw-r--r--drivers/staging/bcm/Debug.h88
-rw-r--r--drivers/staging/bcm/HandleControlPacket.c39
-rw-r--r--drivers/staging/bcm/HostMibs.h7
-rw-r--r--drivers/staging/bcm/IPv6Protocol.c10
-rw-r--r--drivers/staging/bcm/IPv6ProtocolHdr.h3
-rw-r--r--drivers/staging/bcm/InterfaceDld.c136
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.c20
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.h2
-rw-r--r--drivers/staging/bcm/InterfaceInit.c854
-rw-r--r--drivers/staging/bcm/InterfaceInit.h28
-rw-r--r--drivers/staging/bcm/InterfaceIsr.c44
-rw-r--r--drivers/staging/bcm/InterfaceMisc.c29
-rw-r--r--drivers/staging/bcm/InterfaceMisc.h3
-rw-r--r--drivers/staging/bcm/InterfaceRx.c40
-rw-r--r--drivers/staging/bcm/InterfaceTx.c74
-rw-r--r--drivers/staging/bcm/InterfaceTx.h6
-rw-r--r--drivers/staging/bcm/Interfacemain.h10
-rw-r--r--drivers/staging/bcm/LeakyBucket.c72
-rw-r--r--drivers/staging/bcm/Macros.h40
-rw-r--r--drivers/staging/bcm/Makefile4
-rw-r--r--drivers/staging/bcm/Misc.c536
-rw-r--r--drivers/staging/bcm/Osal_Misc.c27
-rw-r--r--drivers/staging/bcm/PHSModule.c234
-rw-r--r--drivers/staging/bcm/PHSModule.h45
-rw-r--r--drivers/staging/bcm/Protocol.h6
-rw-r--r--drivers/staging/bcm/Prototypes.h122
-rw-r--r--drivers/staging/bcm/Qos.c103
-rw-r--r--drivers/staging/bcm/TODO25
-rw-r--r--drivers/staging/bcm/Transmit.c393
-rw-r--r--drivers/staging/bcm/cntrl_SignalingInterface.h254
-rw-r--r--drivers/staging/bcm/headers.h40
-rw-r--r--drivers/staging/bcm/hostmibs.c35
-rw-r--r--drivers/staging/bcm/led_control.c135
-rw-r--r--drivers/staging/bcm/nvm.c642
-rw-r--r--drivers/staging/bcm/nvm.h80
-rw-r--r--drivers/staging/bcm/osal_misc.h49
-rw-r--r--drivers/staging/brcm80211/README10
-rw-r--r--drivers/staging/brcm80211/brcmfmac/README5
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmsdh.c12
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c270
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c59
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc_linux.c4
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd.h19
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_bus.h6
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_cdc.c40
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_common.c18
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_custom_gpio.c36
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_linux.c148
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_linux_sched.c1
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_proto.h7
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_sdio.c348
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c586
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h48
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_iw.c578
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_iw.h2
-rw-r--r--drivers/staging/brcm80211/include/bcm_rpc.h79
-rw-r--r--drivers/staging/brcm80211/include/bcm_rpc_tp.h137
-rw-r--r--drivers/staging/brcm80211/include/bcm_xdr.h60
-rw-r--r--drivers/staging/brcm80211/include/bcmdefs.h63
-rw-r--r--drivers/staging/brcm80211/include/bcmsdbus.h6
-rw-r--r--drivers/staging/brcm80211/include/bcmsdh.h12
-rw-r--r--drivers/staging/brcm80211/include/bcmsdh_sdmmc.h6
-rw-r--r--drivers/staging/brcm80211/include/bcmsrom.h8
-rw-r--r--drivers/staging/brcm80211/include/bcmutils.h60
-rw-r--r--drivers/staging/brcm80211/include/d11.h7
-rw-r--r--drivers/staging/brcm80211/include/dbus.h353
-rw-r--r--drivers/staging/brcm80211/include/epivers.h44
-rw-r--r--drivers/staging/brcm80211/include/hnddma.h91
-rw-r--r--drivers/staging/brcm80211/include/hndpmu.h44
-rw-r--r--drivers/staging/brcm80211/include/linux_osl.h407
-rw-r--r--drivers/staging/brcm80211/include/linuxver.h38
-rw-r--r--drivers/staging/brcm80211/include/nicpci.h12
-rw-r--r--drivers/staging/brcm80211/include/osl.h209
-rw-r--r--drivers/staging/brcm80211/include/proto/ethernet.h54
-rw-r--r--drivers/staging/brcm80211/include/proto/wpa.h94
-rw-r--r--drivers/staging/brcm80211/include/rpc_osl.h2
-rw-r--r--drivers/staging/brcm80211/include/siutils.h28
-rw-r--r--drivers/staging/brcm80211/include/wlioctl.h342
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phy_cmn.c63
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phy_hal.h6
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phy_int.h4
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phy_lcn.c13
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phy_n.c59
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phytbl_lcn.c3
-rw-r--r--drivers/staging/brcm80211/phy/wlc_phytbl_n.c3
-rw-r--r--drivers/staging/brcm80211/sys/wl_dbg.h72
-rw-r--r--drivers/staging/brcm80211/sys/wl_export.h8
-rw-r--r--drivers/staging/brcm80211/sys/wl_mac80211.c1083
-rw-r--r--drivers/staging/brcm80211/sys/wl_mac80211.h50
-rw-r--r--drivers/staging/brcm80211/sys/wl_ucode.h12
-rw-r--r--drivers/staging/brcm80211/sys/wl_ucode_loader.c13
-rw-r--r--drivers/staging/brcm80211/sys/wlc_alloc.c70
-rw-r--r--drivers/staging/brcm80211/sys/wlc_alloc.h12
-rw-r--r--drivers/staging/brcm80211/sys/wlc_ampdu.c316
-rw-r--r--drivers/staging/brcm80211/sys/wlc_ampdu.h30
-rw-r--r--drivers/staging/brcm80211/sys/wlc_antsel.c49
-rw-r--r--drivers/staging/brcm80211/sys/wlc_antsel.h16
-rw-r--r--drivers/staging/brcm80211/sys/wlc_bmac.c806
-rw-r--r--drivers/staging/brcm80211/sys/wlc_bmac.h182
-rw-r--r--drivers/staging/brcm80211/sys/wlc_bsscfg.h3
-rw-r--r--drivers/staging/brcm80211/sys/wlc_cfg.h24
-rw-r--r--drivers/staging/brcm80211/sys/wlc_channel.c102
-rw-r--r--drivers/staging/brcm80211/sys/wlc_event.c16
-rw-r--r--drivers/staging/brcm80211/sys/wlc_event.h5
-rw-r--r--drivers/staging/brcm80211/sys/wlc_mac80211.c1309
-rw-r--r--drivers/staging/brcm80211/sys/wlc_mac80211.h312
-rw-r--r--drivers/staging/brcm80211/sys/wlc_phy_shim.c14
-rw-r--r--drivers/staging/brcm80211/sys/wlc_pub.h50
-rw-r--r--drivers/staging/brcm80211/sys/wlc_rate.c6
-rw-r--r--drivers/staging/brcm80211/sys/wlc_rpc.h527
-rw-r--r--drivers/staging/brcm80211/sys/wlc_rpctx.h71
-rw-r--r--drivers/staging/brcm80211/sys/wlc_scb.h2
-rw-r--r--drivers/staging/brcm80211/sys/wlc_stf.c72
-rw-r--r--drivers/staging/brcm80211/sys/wlc_stf.h31
-rw-r--r--drivers/staging/brcm80211/sys/wlc_types.h31
-rw-r--r--drivers/staging/brcm80211/util/aiutils.c25
-rw-r--r--drivers/staging/brcm80211/util/bcmotp.c22
-rw-r--r--drivers/staging/brcm80211/util/bcmsrom.c95
-rw-r--r--drivers/staging/brcm80211/util/bcmutils.c112
-rw-r--r--drivers/staging/brcm80211/util/bcmwifi.c4
-rw-r--r--drivers/staging/brcm80211/util/hnddma.c136
-rw-r--r--drivers/staging/brcm80211/util/hndpmu.c174
-rw-r--r--drivers/staging/brcm80211/util/linux_osl.c247
-rw-r--r--drivers/staging/brcm80211/util/nicpci.c125
-rw-r--r--drivers/staging/brcm80211/util/nvram/nvram_ro.c4
-rw-r--r--drivers/staging/brcm80211/util/sbutils.c13
-rw-r--r--drivers/staging/brcm80211/util/siutils.c206
-rw-r--r--drivers/staging/comedi/Kconfig5
-rw-r--r--drivers/staging/comedi/drivers.c30
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1516.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c2
-rw-r--r--drivers/staging/comedi/drivers/ii_pci20kc.c66
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c7
-rw-r--r--drivers/staging/comedi/drivers/mite.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c71
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c2
-rw-r--r--drivers/staging/comedi/drivers/s526.c139
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c2
-rw-r--r--drivers/staging/cptm1217/Kconfig12
-rw-r--r--drivers/staging/cptm1217/Makefile2
-rw-r--r--drivers/staging/cptm1217/TODO5
-rw-r--r--drivers/staging/cptm1217/clearpad_tm1217.c675
-rw-r--r--drivers/staging/cptm1217/cp_tm1217.h9
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.c2
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c6
-rw-r--r--drivers/staging/cs5535_gpio/Kconfig11
-rw-r--r--drivers/staging/cs5535_gpio/Makefile1
-rw-r--r--drivers/staging/cs5535_gpio/TODO6
-rw-r--r--drivers/staging/cs5535_gpio/cs5535_gpio.c (renamed from drivers/char/cs5535_gpio.c)0
-rw-r--r--drivers/staging/cx25821/cx25821-alsa.c2
-rw-r--r--drivers/staging/cxt1e1/comet.c8
-rw-r--r--drivers/staging/cxt1e1/functions.c8
-rw-r--r--drivers/staging/cxt1e1/hwprobe.c4
-rw-r--r--drivers/staging/cxt1e1/linux.c18
-rw-r--r--drivers/staging/cxt1e1/musycc.c58
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c38
-rw-r--r--drivers/staging/cxt1e1/sbecom_inline_linux.h4
-rw-r--r--drivers/staging/easycap/Kconfig1
-rw-r--r--drivers/staging/easycap/Makefile1
-rw-r--r--drivers/staging/easycap/README93
-rw-r--r--drivers/staging/easycap/easycap.h186
-rw-r--r--drivers/staging/easycap/easycap_debug.h2
-rw-r--r--drivers/staging/easycap/easycap_ioctl.c2286
-rw-r--r--drivers/staging/easycap/easycap_low.c895
-rw-r--r--drivers/staging/easycap/easycap_main.c2774
-rw-r--r--drivers/staging/easycap/easycap_settings.c272
-rw-r--r--drivers/staging/easycap/easycap_sound.c603
-rw-r--r--drivers/staging/easycap/easycap_testcard.c372
-rw-r--r--drivers/staging/et131x/et131x_initpci.c2
-rw-r--r--drivers/staging/frontier/alphatrack.c4
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c54
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c8
-rw-r--r--drivers/staging/ft1000/ft1000-usb/Makefile2
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_debug.c (renamed from drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c)435
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_download.c386
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c433
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.h4
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_proc.c373
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.c34
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.h62
-rw-r--r--drivers/staging/go7007/go7007-driver.c2
-rw-r--r--drivers/staging/hv/Makefile2
-rw-r--r--drivers/staging/hv/blkvsc.c52
-rw-r--r--drivers/staging/hv/blkvsc_drv.c118
-rw-r--r--drivers/staging/hv/channel.c338
-rw-r--r--drivers/staging/hv/channel_mgmt.c244
-rw-r--r--drivers/staging/hv/channel_mgmt.h245
-rw-r--r--drivers/staging/hv/connection.c56
-rw-r--r--drivers/staging/hv/hv.c346
-rw-r--r--drivers/staging/hv/hv.h36
-rw-r--r--drivers/staging/hv/hv_api.h278
-rw-r--r--drivers/staging/hv/hv_utils.c99
-rw-r--r--drivers/staging/hv/netvsc.c939
-rw-r--r--drivers/staging/hv/netvsc.h164
-rw-r--r--drivers/staging/hv/netvsc_api.h64
-rw-r--r--drivers/staging/hv/netvsc_drv.c85
-rw-r--r--drivers/staging/hv/osd.c108
-rw-r--r--drivers/staging/hv/osd.h22
-rw-r--r--drivers/staging/hv/ring_buffer.c404
-rw-r--r--drivers/staging/hv/ring_buffer.h56
-rw-r--r--drivers/staging/hv/rndis.h353
-rw-r--r--drivers/staging/hv/rndis_filter.c640
-rw-r--r--drivers/staging/hv/rndis_filter.h2
-rw-r--r--drivers/staging/hv/storvsc.c584
-rw-r--r--drivers/staging/hv/storvsc_api.h54
-rw-r--r--drivers/staging/hv/storvsc_drv.c95
-rw-r--r--drivers/staging/hv/vmbus.c274
-rw-r--r--drivers/staging/hv/vmbus_api.h24
-rw-r--r--drivers/staging/hv/vmbus_drv.c351
-rw-r--r--drivers/staging/hv/vmbus_private.h7
-rw-r--r--drivers/staging/hv/vstorage.h106
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio627
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-dds93
-rw-r--r--drivers/staging/iio/Kconfig6
-rw-r--r--drivers/staging/iio/Makefile7
-rw-r--r--drivers/staging/iio/TODO4
-rw-r--r--drivers/staging/iio/accel/Kconfig27
-rw-r--r--drivers/staging/iio/accel/Makefile12
-rw-r--r--drivers/staging/iio/accel/accel.h20
-rw-r--r--drivers/staging/iio/accel/adis16201.h150
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c659
-rw-r--r--drivers/staging/iio/accel/adis16201_ring.c218
-rw-r--r--drivers/staging/iio/accel/adis16201_trigger.c122
-rw-r--r--drivers/staging/iio/accel/adis16203.h143
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c568
-rw-r--r--drivers/staging/iio/accel/adis16203_ring.c211
-rw-r--r--drivers/staging/iio/accel/adis16203_trigger.c122
-rw-r--r--drivers/staging/iio/accel/adis16204.h151
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c613
-rw-r--r--drivers/staging/iio/accel/adis16204_ring.c206
-rw-r--r--drivers/staging/iio/accel/adis16204_trigger.c122
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c2
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c2
-rw-r--r--drivers/staging/iio/adc/Kconfig91
-rw-r--r--drivers/staging/iio/adc/Makefile15
-rw-r--r--drivers/staging/iio/adc/ad7150.c877
-rw-r--r--drivers/staging/iio/adc/ad7152.c610
-rw-r--r--drivers/staging/iio/adc/ad7291.c1039
-rw-r--r--drivers/staging/iio/adc/ad7298.c501
-rw-r--r--drivers/staging/iio/adc/ad7314.c308
-rw-r--r--drivers/staging/iio/adc/ad7476_core.c2
-rw-r--r--drivers/staging/iio/adc/ad7745.c734
-rw-r--r--drivers/staging/iio/adc/ad7816.c535
-rw-r--r--drivers/staging/iio/adc/ad7887.h105
-rw-r--r--drivers/staging/iio/adc/ad7887_core.c305
-rw-r--r--drivers/staging/iio/adc/ad7887_ring.c266
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c2
-rw-r--r--drivers/staging/iio/adc/adt7310.c952
-rw-r--r--drivers/staging/iio/adc/adt7410.c915
-rw-r--r--drivers/staging/iio/adc/adt75.c732
-rw-r--r--drivers/staging/iio/addac/Kconfig25
-rw-r--r--drivers/staging/iio/addac/Makefile7
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c170
-rw-r--r--drivers/staging/iio/addac/adt7316-spi.c180
-rw-r--r--drivers/staging/iio/addac/adt7316.c2402
-rw-r--r--drivers/staging/iio/addac/adt7316.h33
-rw-r--r--drivers/staging/iio/dac/Kconfig21
-rw-r--r--drivers/staging/iio/dac/Makefile6
-rw-r--r--drivers/staging/iio/dac/ad5446.c323
-rw-r--r--drivers/staging/iio/dac/ad5446.h96
-rw-r--r--drivers/staging/iio/dac/ad5624r.h21
-rw-r--r--drivers/staging/iio/dac/ad5624r_spi.c300
-rw-r--r--drivers/staging/iio/dac/dac.h6
-rw-r--r--drivers/staging/iio/dds/Kconfig56
-rw-r--r--drivers/staging/iio/dds/Makefile11
-rw-r--r--drivers/staging/iio/dds/ad5930.c170
-rw-r--r--drivers/staging/iio/dds/ad9832.c264
-rw-r--r--drivers/staging/iio/dds/ad9834.c477
-rw-r--r--drivers/staging/iio/dds/ad9834.h112
-rw-r--r--drivers/staging/iio/dds/ad9850.c156
-rw-r--r--drivers/staging/iio/dds/ad9852.c305
-rw-r--r--drivers/staging/iio/dds/ad9910.c440
-rw-r--r--drivers/staging/iio/dds/ad9951.c249
-rw-r--r--drivers/staging/iio/dds/dds.h110
-rw-r--r--drivers/staging/iio/gyro/Kconfig38
-rw-r--r--drivers/staging/iio/gyro/Makefile12
-rw-r--r--drivers/staging/iio/gyro/adis16060.h101
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c319
-rw-r--r--drivers/staging/iio/gyro/adis16080.h102
-rw-r--r--drivers/staging/iio/gyro/adis16080_core.c271
-rw-r--r--drivers/staging/iio/gyro/adis16130.h108
-rw-r--r--drivers/staging/iio/gyro/adis16130_core.c313
-rw-r--r--drivers/staging/iio/gyro/adis16251.h185
-rw-r--r--drivers/staging/iio/gyro/adis16251_core.c777
-rw-r--r--drivers/staging/iio/gyro/adis16260.h3
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c142
-rw-r--r--drivers/staging/iio/gyro/adis16260_platform_data.h19
-rw-r--r--drivers/staging/iio/gyro/gyro.h9
-rw-r--r--drivers/staging/iio/imu/adis16350_core.c1
-rw-r--r--drivers/staging/iio/meter/Kconfig61
-rw-r--r--drivers/staging/iio/meter/Makefile15
-rw-r--r--drivers/staging/iio/meter/ade7753.c730
-rw-r--r--drivers/staging/iio/meter/ade7753.h140
-rw-r--r--drivers/staging/iio/meter/ade7754.c756
-rw-r--r--drivers/staging/iio/meter/ade7754.h161
-rw-r--r--drivers/staging/iio/meter/ade7758.h171
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c866
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c212
-rw-r--r--drivers/staging/iio/meter/ade7758_trigger.c125
-rw-r--r--drivers/staging/iio/meter/ade7759.c670
-rw-r--r--drivers/staging/iio/meter/ade7759.h122
-rw-r--r--drivers/staging/iio/meter/ade7854-i2c.c272
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c360
-rw-r--r--drivers/staging/iio/meter/ade7854.c680
-rw-r--r--drivers/staging/iio/meter/ade7854.h245
-rw-r--r--drivers/staging/iio/meter/meter.h396
-rw-r--r--drivers/staging/iio/resolver/Kconfig54
-rw-r--r--drivers/staging/iio/resolver/Makefile7
-rw-r--r--drivers/staging/iio/resolver/ad2s120x.c310
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c872
-rw-r--r--drivers/staging/iio/resolver/ad2s90.c159
-rw-r--r--drivers/staging/iio/sysfs.h6
-rw-r--r--drivers/staging/intel_sst/Kconfig1
-rw-r--r--drivers/staging/intel_sst/intel_sst.c116
-rw-r--r--drivers/staging/intel_sst/intel_sst.h8
-rw-r--r--drivers/staging/intel_sst/intel_sst_app_interface.c314
-rw-r--r--drivers/staging/intel_sst/intel_sst_common.h10
-rw-r--r--drivers/staging/intel_sst/intel_sst_drv_interface.c209
-rw-r--r--drivers/staging/intel_sst/intel_sst_dsp.c70
-rw-r--r--drivers/staging/intel_sst/intel_sst_fw_ipc.h113
-rw-r--r--drivers/staging/intel_sst/intel_sst_ioctl.h15
-rw-r--r--drivers/staging/intel_sst/intel_sst_ipc.c194
-rw-r--r--drivers/staging/intel_sst/intel_sst_pvt.c36
-rw-r--r--drivers/staging/intel_sst/intel_sst_stream.c20
-rw-r--r--drivers/staging/intel_sst/intel_sst_stream_encoded.c214
-rw-r--r--drivers/staging/intel_sst/intelmid.c192
-rw-r--r--drivers/staging/intel_sst/intelmid.h5
-rw-r--r--drivers/staging/intel_sst/intelmid_ctrl.c28
-rw-r--r--drivers/staging/intel_sst/intelmid_msic_control.c24
-rw-r--r--drivers/staging/intel_sst/intelmid_pvt.c54
-rw-r--r--drivers/staging/intel_sst/intelmid_v0_control.c32
-rw-r--r--drivers/staging/intel_sst/intelmid_v1_control.c70
-rw-r--r--drivers/staging/intel_sst/intelmid_v2_control.c101
-rw-r--r--drivers/staging/keucr/init.c420
-rw-r--r--drivers/staging/keucr/init.h3
-rw-r--r--drivers/staging/keucr/ms.c10
-rw-r--r--drivers/staging/keucr/smilmain.c4
-rw-r--r--drivers/staging/keucr/smilsub.c4
-rw-r--r--drivers/staging/line6/capture.c2
-rw-r--r--drivers/staging/line6/midi.c4
-rw-r--r--drivers/staging/line6/playback.c2
-rw-r--r--drivers/staging/lirc/TODO.lirc_zilog36
-rw-r--r--drivers/staging/lirc/lirc_imon.c1
-rw-r--r--drivers/staging/lirc/lirc_it87.c1
-rw-r--r--drivers/staging/lirc/lirc_parallel.c19
-rw-r--r--drivers/staging/lirc/lirc_sasem.c1
-rw-r--r--drivers/staging/lirc/lirc_serial.c3
-rw-r--r--drivers/staging/lirc/lirc_sir.c1
-rw-r--r--drivers/staging/lirc/lirc_zilog.c678
-rw-r--r--drivers/staging/memrar/memrar.h19
-rw-r--r--drivers/staging/msm/Makefile4
-rw-r--r--drivers/staging/msm/lcdc_toshiba_wvga_pt.c2
-rw-r--r--drivers/staging/msm/msm_fb.c8
-rw-r--r--drivers/staging/msm/msm_fb_bl.c2
-rw-r--r--drivers/staging/msm/tvenc.c5
-rw-r--r--drivers/staging/olpc_dcon/TODO1
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c15
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h20
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c168
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c4
-rw-r--r--drivers/staging/phison/phison.c2
-rw-r--r--drivers/staging/pohmelfs/crypto.c6
-rw-r--r--drivers/staging/pohmelfs/net.c2
-rw-r--r--drivers/staging/rt2860/chip/mac_pci.h7
-rw-r--r--drivers/staging/rt2860/chip/mac_usb.h5
-rw-r--r--drivers/staging/rt2860/chip/rtmp_mac.h19
-rw-r--r--drivers/staging/rt2860/chip/rtmp_phy.h14
-rw-r--r--drivers/staging/rt2860/chips/rt3090.c8
-rw-r--r--drivers/staging/rt2860/chips/rt30xx.c13
-rw-r--r--drivers/staging/rt2860/common/ba_action.c18
-rw-r--r--drivers/staging/rt2860/common/cmm_data.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_data_pci.c4
-rw-r--r--drivers/staging/rt2860/common/cmm_mac_pci.c10
-rw-r--r--drivers/staging/rt2860/common/cmm_mac_usb.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_wpa.c22
-rw-r--r--drivers/staging/rt2860/common/ee_efuse.c15
-rw-r--r--drivers/staging/rt2860/common/mlme.c13
-rw-r--r--drivers/staging/rt2860/common/rt_rf.c3
-rw-r--r--drivers/staging/rt2860/common/rtmp_init.c24
-rw-r--r--drivers/staging/rt2860/common/rtmp_mcu.c4
-rw-r--r--drivers/staging/rt2860/common/spectrum.c2
-rw-r--r--drivers/staging/rt2860/mlme.h41
-rw-r--r--drivers/staging/rt2860/oid.h15
-rw-r--r--drivers/staging/rt2860/pci_main_dev.c22
-rw-r--r--drivers/staging/rt2860/rt_linux.c35
-rw-r--r--drivers/staging/rt2860/rt_linux.h11
-rw-r--r--drivers/staging/rt2860/rt_main_dev.c20
-rw-r--r--drivers/staging/rt2860/rt_pci_rbus.c7
-rw-r--r--drivers/staging/rt2860/rt_usb.c45
-rw-r--r--drivers/staging/rt2860/rtmp.h71
-rw-r--r--drivers/staging/rt2860/rtmp_def.h37
-rw-r--r--drivers/staging/rt2860/rtmp_timer.h13
-rw-r--r--drivers/staging/rt2860/spectrum.h12
-rw-r--r--drivers/staging/rt2860/sta/assoc.c15
-rw-r--r--drivers/staging/rt2860/sta/auth.c10
-rw-r--r--drivers/staging/rt2860/sta/connect.c38
-rw-r--r--drivers/staging/rt2860/sta/rtmp_data.c35
-rw-r--r--drivers/staging/rt2860/sta/sanity.c5
-rw-r--r--drivers/staging/rt2860/sta/sync.c25
-rw-r--r--drivers/staging/rt2860/sta/wpa.c5
-rw-r--r--drivers/staging/rt2860/sta_ioctl.c25
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c21
-rw-r--r--drivers/staging/rt2860/wpa.h5
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c2
-rw-r--r--drivers/staging/rtl8192e/r8192E_core.c6
-rw-r--r--drivers/staging/rtl8192e/r819xE_phy.c2
-rw-r--r--drivers/staging/rtl8192u/Makefile1
-rw-r--r--drivers/staging/rtl8192u/dot11d.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/Makefile2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.h3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c12
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c47
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c27
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c6
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c6
-rw-r--r--drivers/staging/rtl8712/TODO1
-rw-r--r--drivers/staging/rtl8712/hal_init.c11
-rw-r--r--drivers/staging/rtl8712/osdep_service.h25
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c2
-rw-r--r--drivers/staging/rtl8712/usb_intf.c147
-rw-r--r--drivers/staging/samsung-laptop/samsung-laptop.c2
-rw-r--r--drivers/staging/sep/Kconfig10
-rw-r--r--drivers/staging/sep/Makefile2
-rw-r--r--drivers/staging/sep/TODO5
-rw-r--r--drivers/staging/sep/sep_dev.h156
-rw-r--r--drivers/staging/sep/sep_driver.c3577
-rw-r--r--drivers/staging/sep/sep_driver_api.h297
-rw-r--r--drivers/staging/sep/sep_driver_config.h239
-rw-r--r--drivers/staging/sep/sep_driver_hw_defs.h233
-rw-r--r--drivers/staging/sm7xx/smtcfb.c12
-rw-r--r--drivers/staging/smbfs/dir.c17
-rw-r--r--drivers/staging/smbfs/inode.c4
-rw-r--r--drivers/staging/smbfs/proto.h2
-rw-r--r--drivers/staging/solo6x10/Kconfig4
-rw-r--r--drivers/staging/solo6x10/TODO4
-rw-r--r--drivers/staging/solo6x10/solo6010-core.c42
-rw-r--r--drivers/staging/solo6x10/solo6010-disp.c6
-rw-r--r--drivers/staging/solo6x10/solo6010-enc.c6
-rw-r--r--drivers/staging/solo6x10/solo6010-g723.c22
-rw-r--r--drivers/staging/solo6x10/solo6010-gpio.c4
-rw-r--r--drivers/staging/solo6x10/solo6010-i2c.c13
-rw-r--r--drivers/staging/solo6x10/solo6010-osd-font.h60
-rw-r--r--drivers/staging/solo6x10/solo6010-p2m.c156
-rw-r--r--drivers/staging/solo6x10/solo6010-tw28.c118
-rw-r--r--drivers/staging/solo6x10/solo6010-v4l2-enc.c370
-rw-r--r--drivers/staging/solo6x10/solo6010-v4l2.c197
-rw-r--r--drivers/staging/solo6x10/solo6010.h39
-rw-r--r--drivers/staging/speakup/kobjects.c11
-rw-r--r--drivers/staging/speakup/main.c33
-rw-r--r--drivers/staging/speakup/spk_types.h2
-rw-r--r--drivers/staging/spectra/ffsport.c61
-rw-r--r--drivers/staging/spectra/ffsport.h1
-rw-r--r--drivers/staging/spectra/flash.c4
-rw-r--r--drivers/staging/spectra/lld_emu.c6
-rw-r--r--drivers/staging/spectra/lld_nand.c177
-rw-r--r--drivers/staging/ste_rmi4/Kconfig9
-rw-r--r--drivers/staging/ste_rmi4/Makefile4
-rw-r--r--drivers/staging/ste_rmi4/TODO7
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c1178
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h50
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c8
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c19
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/io_sm.h21
-rw-r--r--drivers/staging/tidspbridge/rmgr/nldr.c2
-rw-r--r--drivers/staging/tm6000/tm6000-video.c46
-rw-r--r--drivers/staging/usbip/stub.h1
-rw-r--r--drivers/staging/usbip/stub_dev.c18
-rw-r--r--drivers/staging/usbip/stub_rx.c4
-rw-r--r--drivers/staging/usbip/vhci.h6
-rw-r--r--drivers/staging/usbip/vhci_hcd.c68
-rw-r--r--drivers/staging/usbip/vhci_rx.c50
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.c235
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.h2
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.c264
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.h2
-rw-r--r--drivers/staging/vme/devices/vme_user.c79
-rw-r--r--drivers/staging/vme/vme.c116
-rw-r--r--drivers/staging/vme/vme_bridge.h2
-rw-r--r--drivers/staging/vt6655/card.c2
-rw-r--r--drivers/staging/vt6655/iwctl.c2
-rw-r--r--drivers/staging/vt6655/wpa2.c4
-rw-r--r--drivers/staging/vt6656/baseband.c2
-rw-r--r--drivers/staging/vt6656/card.c2
-rw-r--r--drivers/staging/vt6656/dpc.c4
-rw-r--r--drivers/staging/vt6656/iwctl.c2
-rw-r--r--drivers/staging/vt6656/power.c2
-rw-r--r--drivers/staging/vt6656/rxtx.c3
-rw-r--r--drivers/staging/vt6656/tkip.c37
-rw-r--r--drivers/staging/vt6656/wpa2.c4
-rw-r--r--drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c10
-rw-r--r--drivers/staging/winbond/Makefile1
-rw-r--r--drivers/staging/winbond/core.h2
-rw-r--r--drivers/staging/winbond/mac_structures.h545
-rw-r--r--drivers/staging/winbond/mds.c70
-rw-r--r--drivers/staging/winbond/mds_f.h3
-rw-r--r--drivers/staging/winbond/mds_s.h4
-rw-r--r--drivers/staging/winbond/mlmetxrx.c62
-rw-r--r--drivers/staging/winbond/mlmetxrx_f.h23
-rw-r--r--drivers/staging/winbond/mto.c5
-rw-r--r--drivers/staging/winbond/phy_calibration.c5
-rw-r--r--drivers/staging/winbond/phy_calibration.h2
-rw-r--r--drivers/staging/winbond/reg.c36
-rw-r--r--drivers/staging/winbond/sysdef.h31
-rw-r--r--drivers/staging/winbond/wb35reg.c33
-rw-r--r--drivers/staging/winbond/wb35reg_f.h2
-rw-r--r--drivers/staging/winbond/wb35reg_s.h77
-rw-r--r--drivers/staging/winbond/wb35rx.c30
-rw-r--r--drivers/staging/winbond/wb35rx_f.h2
-rw-r--r--drivers/staging/winbond/wb35tx.c31
-rw-r--r--drivers/staging/winbond/wb35tx_f.h1
-rw-r--r--drivers/staging/winbond/wbhal.h (renamed from drivers/staging/winbond/wbhal_s.h)14
-rw-r--r--drivers/staging/winbond/wbhal_f.h81
-rw-r--r--drivers/staging/winbond/wblinux_f.h17
-rw-r--r--drivers/staging/winbond/wbusb.c50
-rw-r--r--drivers/staging/winbond/wbusb_s.h18
-rw-r--r--drivers/staging/wlags49_h2/wl_pci.c2
-rw-r--r--drivers/staging/wlags49_h2/wl_profile.c2
-rw-r--r--drivers/staging/wlags49_h2/wl_sysfs.c4
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c45
-rw-r--r--drivers/staging/wlan-ng/p80211conv.h6
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c6
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h10
-rw-r--r--drivers/staging/wlan-ng/p80211types.h4
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c26
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c9
-rw-r--r--drivers/staging/xgifb/vb_setmode.c6
-rw-r--r--drivers/staging/zram/xvmalloc.c6
-rw-r--r--drivers/staging/zram/zram_drv.c7
-rw-r--r--drivers/target/Kconfig32
-rw-r--r--drivers/target/Makefile23
-rw-r--r--drivers/target/target_core_alua.c1991
-rw-r--r--drivers/target/target_core_alua.h126
-rw-r--r--drivers/target/target_core_cdb.c1131
-rw-r--r--drivers/target/target_core_configfs.c3240
-rw-r--r--drivers/target/target_core_device.c1693
-rw-r--r--drivers/target/target_core_fabric_configfs.c1034
-rw-r--r--drivers/target/target_core_fabric_lib.c451
-rw-r--r--drivers/target/target_core_file.c688
-rw-r--r--drivers/target/target_core_file.h50
-rw-r--r--drivers/target/target_core_hba.c185
-rw-r--r--drivers/target/target_core_hba.h7
-rw-r--r--drivers/target/target_core_iblock.c810
-rw-r--r--drivers/target/target_core_iblock.h40
-rw-r--r--drivers/target/target_core_pr.c4252
-rw-r--r--drivers/target/target_core_pr.h67
-rw-r--r--drivers/target/target_core_pscsi.c1470
-rw-r--r--drivers/target/target_core_pscsi.h65
-rw-r--r--drivers/target/target_core_rd.c1091
-rw-r--r--drivers/target/target_core_rd.h73
-rw-r--r--drivers/target/target_core_scdb.c105
-rw-r--r--drivers/target/target_core_scdb.h10
-rw-r--r--drivers/target/target_core_tmr.c404
-rw-r--r--drivers/target/target_core_tpg.c839
-rw-r--r--drivers/target/target_core_transport.c6164
-rw-r--r--drivers/target/target_core_ua.c332
-rw-r--r--drivers/target/target_core_ua.h36
-rw-r--r--drivers/telephony/ixj.c6
-rw-r--r--drivers/thermal/thermal_sys.c122
-rw-r--r--drivers/tty/Makefile2
-rw-r--r--drivers/tty/hvc/Makefile12
-rw-r--r--drivers/tty/hvc/hvc_beat.c (renamed from drivers/char/hvc_beat.c)0
-rw-r--r--drivers/tty/hvc/hvc_console.c (renamed from drivers/char/hvc_console.c)0
-rw-r--r--drivers/tty/hvc/hvc_console.h (renamed from drivers/char/hvc_console.h)0
-rw-r--r--drivers/tty/hvc/hvc_dcc.c (renamed from drivers/char/hvc_dcc.c)0
-rw-r--r--drivers/tty/hvc/hvc_irq.c (renamed from drivers/char/hvc_irq.c)0
-rw-r--r--drivers/tty/hvc/hvc_iseries.c (renamed from drivers/char/hvc_iseries.c)0
-rw-r--r--drivers/tty/hvc/hvc_iucv.c (renamed from drivers/char/hvc_iucv.c)0
-rw-r--r--drivers/tty/hvc/hvc_rtas.c (renamed from drivers/char/hvc_rtas.c)0
-rw-r--r--drivers/tty/hvc/hvc_tile.c (renamed from drivers/char/hvc_tile.c)0
-rw-r--r--drivers/tty/hvc/hvc_udbg.c (renamed from drivers/char/hvc_udbg.c)0
-rw-r--r--drivers/tty/hvc/hvc_vio.c (renamed from drivers/char/hvc_vio.c)2
-rw-r--r--drivers/tty/hvc/hvc_xen.c (renamed from drivers/char/hvc_xen.c)0
-rw-r--r--drivers/tty/hvc/hvcs.c (renamed from drivers/char/hvcs.c)0
-rw-r--r--drivers/tty/hvc/hvsi.c (renamed from drivers/char/hvsi.c)0
-rw-r--r--drivers/tty/n_gsm.c1
-rw-r--r--drivers/tty/n_hdlc.c90
-rw-r--r--drivers/tty/serial/21285.c (renamed from drivers/serial/21285.c)0
-rw-r--r--drivers/tty/serial/68328serial.c (renamed from drivers/serial/68328serial.c)29
-rw-r--r--drivers/tty/serial/68328serial.h (renamed from drivers/serial/68328serial.h)0
-rw-r--r--drivers/tty/serial/68360serial.c (renamed from drivers/serial/68360serial.c)1
-rw-r--r--drivers/tty/serial/8250.c (renamed from drivers/serial/8250.c)3
-rw-r--r--drivers/tty/serial/8250.h (renamed from drivers/serial/8250.h)0
-rw-r--r--drivers/tty/serial/8250_accent.c (renamed from drivers/serial/8250_accent.c)0
-rw-r--r--drivers/tty/serial/8250_acorn.c (renamed from drivers/serial/8250_acorn.c)0
-rw-r--r--drivers/tty/serial/8250_boca.c (renamed from drivers/serial/8250_boca.c)0
-rw-r--r--drivers/tty/serial/8250_early.c (renamed from drivers/serial/8250_early.c)0
-rw-r--r--drivers/tty/serial/8250_exar_st16c554.c (renamed from drivers/serial/8250_exar_st16c554.c)0
-rw-r--r--drivers/tty/serial/8250_fourport.c (renamed from drivers/serial/8250_fourport.c)0
-rw-r--r--drivers/tty/serial/8250_gsc.c (renamed from drivers/serial/8250_gsc.c)0
-rw-r--r--drivers/tty/serial/8250_hp300.c (renamed from drivers/serial/8250_hp300.c)0
-rw-r--r--drivers/tty/serial/8250_hub6.c (renamed from drivers/serial/8250_hub6.c)0
-rw-r--r--drivers/tty/serial/8250_mca.c (renamed from drivers/serial/8250_mca.c)0
-rw-r--r--drivers/tty/serial/8250_pci.c (renamed from drivers/serial/8250_pci.c)0
-rw-r--r--drivers/tty/serial/8250_pnp.c (renamed from drivers/serial/8250_pnp.c)0
-rw-r--r--drivers/tty/serial/Kconfig (renamed from drivers/serial/Kconfig)77
-rw-r--r--drivers/tty/serial/Makefile (renamed from drivers/serial/Makefile)0
-rw-r--r--drivers/tty/serial/altera_jtaguart.c (renamed from drivers/serial/altera_jtaguart.c)0
-rw-r--r--drivers/tty/serial/altera_uart.c (renamed from drivers/serial/altera_uart.c)0
-rw-r--r--drivers/tty/serial/amba-pl010.c (renamed from drivers/serial/amba-pl010.c)0
-rw-r--r--drivers/tty/serial/amba-pl011.c (renamed from drivers/serial/amba-pl011.c)0
-rw-r--r--drivers/tty/serial/apbuart.c (renamed from drivers/serial/apbuart.c)0
-rw-r--r--drivers/tty/serial/apbuart.h (renamed from drivers/serial/apbuart.h)0
-rw-r--r--drivers/tty/serial/atmel_serial.c (renamed from drivers/serial/atmel_serial.c)5
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c (renamed from drivers/serial/bcm63xx_uart.c)0
-rw-r--r--drivers/tty/serial/bfin_5xx.c (renamed from drivers/serial/bfin_5xx.c)653
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c (renamed from drivers/serial/bfin_sport_uart.c)0
-rw-r--r--drivers/tty/serial/bfin_sport_uart.h (renamed from drivers/serial/bfin_sport_uart.h)0
-rw-r--r--drivers/tty/serial/clps711x.c (renamed from drivers/serial/clps711x.c)0
-rw-r--r--drivers/tty/serial/cpm_uart/Makefile (renamed from drivers/serial/cpm_uart/Makefile)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart.h (renamed from drivers/serial/cpm_uart/cpm_uart.h)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c (renamed from drivers/serial/cpm_uart/cpm_uart_core.c)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c (renamed from drivers/serial/cpm_uart/cpm_uart_cpm1.c)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h (renamed from drivers/serial/cpm_uart/cpm_uart_cpm1.h)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c (renamed from drivers/serial/cpm_uart/cpm_uart_cpm2.c)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h (renamed from drivers/serial/cpm_uart/cpm_uart_cpm2.h)0
-rw-r--r--drivers/tty/serial/crisv10.c (renamed from drivers/serial/crisv10.c)0
-rw-r--r--drivers/tty/serial/crisv10.h (renamed from drivers/serial/crisv10.h)0
-rw-r--r--drivers/tty/serial/dz.c (renamed from drivers/serial/dz.c)0
-rw-r--r--drivers/tty/serial/dz.h (renamed from drivers/serial/dz.h)0
-rw-r--r--drivers/tty/serial/icom.c (renamed from drivers/serial/icom.c)0
-rw-r--r--drivers/tty/serial/icom.h (renamed from drivers/serial/icom.h)0
-rw-r--r--drivers/tty/serial/ifx6x60.c (renamed from drivers/serial/ifx6x60.c)0
-rw-r--r--drivers/tty/serial/ifx6x60.h (renamed from drivers/serial/ifx6x60.h)0
-rw-r--r--drivers/tty/serial/imx.c (renamed from drivers/serial/imx.c)0
-rw-r--r--drivers/tty/serial/ioc3_serial.c (renamed from drivers/serial/ioc3_serial.c)0
-rw-r--r--drivers/tty/serial/ioc4_serial.c (renamed from drivers/serial/ioc4_serial.c)0
-rw-r--r--drivers/tty/serial/ip22zilog.c (renamed from drivers/serial/ip22zilog.c)0
-rw-r--r--drivers/tty/serial/ip22zilog.h (renamed from drivers/serial/ip22zilog.h)0
-rw-r--r--drivers/tty/serial/jsm/Makefile (renamed from drivers/serial/jsm/Makefile)0
-rw-r--r--drivers/tty/serial/jsm/jsm.h (renamed from drivers/serial/jsm/jsm.h)0
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c (renamed from drivers/serial/jsm/jsm_driver.c)0
-rw-r--r--drivers/tty/serial/jsm/jsm_neo.c (renamed from drivers/serial/jsm/jsm_neo.c)0
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c (renamed from drivers/serial/jsm/jsm_tty.c)0
-rw-r--r--drivers/tty/serial/kgdboc.c (renamed from drivers/serial/kgdboc.c)0
-rw-r--r--drivers/tty/serial/m32r_sio.c (renamed from drivers/serial/m32r_sio.c)0
-rw-r--r--drivers/tty/serial/m32r_sio.h (renamed from drivers/serial/m32r_sio.h)0
-rw-r--r--drivers/tty/serial/m32r_sio_reg.h (renamed from drivers/serial/m32r_sio_reg.h)0
-rw-r--r--drivers/tty/serial/max3100.c (renamed from drivers/serial/max3100.c)2
-rw-r--r--drivers/tty/serial/max3107-aava.c (renamed from drivers/serial/max3107-aava.c)0
-rw-r--r--drivers/tty/serial/max3107.c (renamed from drivers/serial/max3107.c)2
-rw-r--r--drivers/tty/serial/max3107.h (renamed from drivers/serial/max3107.h)0
-rw-r--r--drivers/tty/serial/mcf.c (renamed from drivers/serial/mcf.c)0
-rw-r--r--drivers/tty/serial/mfd.c (renamed from drivers/serial/mfd.c)0
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c (renamed from drivers/serial/mpc52xx_uart.c)0
-rw-r--r--drivers/tty/serial/mpsc.c (renamed from drivers/serial/mpsc.c)0
-rw-r--r--drivers/tty/serial/mrst_max3110.c (renamed from drivers/serial/mrst_max3110.c)0
-rw-r--r--drivers/tty/serial/mrst_max3110.h (renamed from drivers/serial/mrst_max3110.h)0
-rw-r--r--drivers/tty/serial/msm_serial.c (renamed from drivers/serial/msm_serial.c)0
-rw-r--r--drivers/tty/serial/msm_serial.h (renamed from drivers/serial/msm_serial.h)0
-rw-r--r--drivers/tty/serial/mux.c (renamed from drivers/serial/mux.c)0
-rw-r--r--drivers/tty/serial/netx-serial.c (renamed from drivers/serial/netx-serial.c)0
-rw-r--r--drivers/tty/serial/nwpserial.c (renamed from drivers/serial/nwpserial.c)0
-rw-r--r--drivers/tty/serial/of_serial.c (renamed from drivers/serial/of_serial.c)1
-rw-r--r--drivers/tty/serial/omap-serial.c (renamed from drivers/serial/omap-serial.c)0
-rw-r--r--drivers/tty/serial/pch_uart.c (renamed from drivers/serial/pch_uart.c)0
-rw-r--r--drivers/tty/serial/pmac_zilog.c (renamed from drivers/serial/pmac_zilog.c)0
-rw-r--r--drivers/tty/serial/pmac_zilog.h (renamed from drivers/serial/pmac_zilog.h)0
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c (renamed from drivers/serial/pnx8xxx_uart.c)0
-rw-r--r--drivers/tty/serial/pxa.c (renamed from drivers/serial/pxa.c)0
-rw-r--r--drivers/tty/serial/s3c2400.c (renamed from drivers/serial/s3c2400.c)0
-rw-r--r--drivers/tty/serial/s3c2410.c (renamed from drivers/serial/s3c2410.c)0
-rw-r--r--drivers/tty/serial/s3c2412.c (renamed from drivers/serial/s3c2412.c)0
-rw-r--r--drivers/tty/serial/s3c2440.c (renamed from drivers/serial/s3c2440.c)0
-rw-r--r--drivers/tty/serial/s3c24a0.c (renamed from drivers/serial/s3c24a0.c)0
-rw-r--r--drivers/tty/serial/s3c6400.c (renamed from drivers/serial/s3c6400.c)0
-rw-r--r--drivers/tty/serial/s5pv210.c (renamed from drivers/serial/s5pv210.c)0
-rw-r--r--drivers/tty/serial/sa1100.c (renamed from drivers/serial/sa1100.c)0
-rw-r--r--drivers/tty/serial/samsung.c (renamed from drivers/serial/samsung.c)4
-rw-r--r--drivers/tty/serial/samsung.h (renamed from drivers/serial/samsung.h)0
-rw-r--r--drivers/tty/serial/sb1250-duart.c (renamed from drivers/serial/sb1250-duart.c)2
-rw-r--r--drivers/tty/serial/sc26xx.c (renamed from drivers/serial/sc26xx.c)0
-rw-r--r--drivers/tty/serial/serial_core.c (renamed from drivers/serial/serial_core.c)0
-rw-r--r--drivers/tty/serial/serial_cs.c (renamed from drivers/serial/serial_cs.c)1
-rw-r--r--drivers/tty/serial/serial_ks8695.c (renamed from drivers/serial/serial_ks8695.c)0
-rw-r--r--drivers/tty/serial/serial_lh7a40x.c (renamed from drivers/serial/serial_lh7a40x.c)0
-rw-r--r--drivers/tty/serial/serial_txx9.c (renamed from drivers/serial/serial_txx9.c)0
-rw-r--r--drivers/tty/serial/sh-sci.c (renamed from drivers/serial/sh-sci.c)102
-rw-r--r--drivers/tty/serial/sh-sci.h (renamed from drivers/serial/sh-sci.h)153
-rw-r--r--drivers/tty/serial/sn_console.c (renamed from drivers/serial/sn_console.c)0
-rw-r--r--drivers/tty/serial/suncore.c (renamed from drivers/serial/suncore.c)0
-rw-r--r--drivers/tty/serial/suncore.h (renamed from drivers/serial/suncore.h)0
-rw-r--r--drivers/tty/serial/sunhv.c (renamed from drivers/serial/sunhv.c)0
-rw-r--r--drivers/tty/serial/sunsab.c (renamed from drivers/serial/sunsab.c)0
-rw-r--r--drivers/tty/serial/sunsab.h (renamed from drivers/serial/sunsab.h)0
-rw-r--r--drivers/tty/serial/sunsu.c (renamed from drivers/serial/sunsu.c)0
-rw-r--r--drivers/tty/serial/sunzilog.c (renamed from drivers/serial/sunzilog.c)0
-rw-r--r--drivers/tty/serial/sunzilog.h (renamed from drivers/serial/sunzilog.h)0
-rw-r--r--drivers/tty/serial/timbuart.c (renamed from drivers/serial/timbuart.c)0
-rw-r--r--drivers/tty/serial/timbuart.h (renamed from drivers/serial/timbuart.h)0
-rw-r--r--drivers/tty/serial/uartlite.c (renamed from drivers/serial/uartlite.c)0
-rw-r--r--drivers/tty/serial/ucc_uart.c (renamed from drivers/serial/ucc_uart.c)0
-rw-r--r--drivers/tty/serial/vr41xx_siu.c (renamed from drivers/serial/vr41xx_siu.c)0
-rw-r--r--drivers/tty/serial/vt8500_serial.c (renamed from drivers/serial/vt8500_serial.c)0
-rw-r--r--drivers/tty/serial/zs.c (renamed from drivers/serial/zs.c)0
-rw-r--r--drivers/tty/serial/zs.h (renamed from drivers/serial/zs.h)0
-rw-r--r--drivers/tty/sysrq.c19
-rw-r--r--drivers/tty/tty_io.c8
-rw-r--r--drivers/tty/vt/selection.c4
-rw-r--r--drivers/tty/vt/vc_screen.c16
-rw-r--r--drivers/tty/vt/vt.c135
-rw-r--r--drivers/tty/vt/vt_ioctl.c60
-rw-r--r--drivers/usb/class/cdc-acm.c1
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/core/Kconfig6
-rw-r--r--drivers/usb/core/driver.c7
-rw-r--r--drivers/usb/core/endpoint.c2
-rw-r--r--drivers/usb/core/hcd-pci.c7
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hub.c44
-rw-r--r--drivers/usb/core/quirks.c8
-rw-r--r--drivers/usb/gadget/Kconfig9
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c268
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.h9
-rw-r--r--drivers/usb/gadget/composite.c5
-rw-r--r--drivers/usb/gadget/f_mass_storage.c3
-rw-r--r--drivers/usb/gadget/f_phonet.c15
-rw-r--r--drivers/usb/gadget/imx_udc.c2
-rw-r--r--drivers/usb/gadget/langwell_udc.c4
-rw-r--r--drivers/usb/gadget/pch_udc.c127
-rw-r--r--drivers/usb/gadget/printer.c19
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c2
-rw-r--r--drivers/usb/gadget/storage_common.c7
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-au1xxx.c2
-rw-r--r--drivers/usb/host/ehci-fsl.c13
-rw-r--r--drivers/usb/host/ehci-fsl.h3
-rw-r--r--drivers/usb/host/ehci-hcd.c19
-rw-r--r--drivers/usb/host/ehci-hub.c7
-rw-r--r--drivers/usb/host/ehci-mxc.c25
-rw-r--r--drivers/usb/host/ehci-omap.c6
-rw-r--r--drivers/usb/host/ehci-pci.c35
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c1
-rw-r--r--drivers/usb/host/fhci-hcd.c4
-rw-r--r--drivers/usb/host/fhci-tds.c4
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c11
-rw-r--r--drivers/usb/host/imx21-hcd.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c2
-rw-r--r--drivers/usb/host/sl811-hcd.c1
-rw-r--r--drivers/usb/host/xhci-dbg.c9
-rw-r--r--drivers/usb/host/xhci-mem.c10
-rw-r--r--drivers/usb/host/xhci-ring.c131
-rw-r--r--drivers/usb/host/xhci.c74
-rw-r--r--drivers/usb/host/xhci.h18
-rw-r--r--drivers/usb/misc/adutux.c2
-rw-r--r--drivers/usb/misc/iowarrior.c2
-rw-r--r--drivers/usb/misc/ldusb.c2
-rw-r--r--drivers/usb/misc/usbled.c2
-rw-r--r--drivers/usb/misc/uss720.c1
-rw-r--r--drivers/usb/musb/blackfin.c1
-rw-r--r--drivers/usb/musb/musb_core.c11
-rw-r--r--drivers/usb/musb/musb_core.h13
-rw-r--r--drivers/usb/musb/musb_debugfs.c1
-rw-r--r--drivers/usb/musb/musb_dma.h3
-rw-r--r--drivers/usb/musb/musb_gadget.c75
-rw-r--r--drivers/usb/musb/musb_gadget.h8
-rw-r--r--drivers/usb/musb/musb_host.c11
-rw-r--r--drivers/usb/musb/musbhsdma.h19
-rw-r--r--drivers/usb/musb/omap2430.c1
-rw-r--r--drivers/usb/otg/Kconfig2
-rw-r--r--drivers/usb/otg/nop-usb-xceiv.c2
-rw-r--r--drivers/usb/otg/ulpi.c2
-rw-r--r--drivers/usb/serial/ch341.c10
-rw-r--r--drivers/usb/serial/cp210x.c16
-rw-r--r--drivers/usb/serial/digi_acceleport.c10
-rw-r--r--drivers/usb/serial/ftdi_sio.c39
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h32
-rw-r--r--drivers/usb/serial/generic.c20
-rw-r--r--drivers/usb/serial/io_edgeport.c4
-rw-r--r--drivers/usb/serial/io_tables.h1
-rw-r--r--drivers/usb/serial/iuu_phoenix.c1
-rw-r--r--drivers/usb/serial/keyspan.h4
-rw-r--r--drivers/usb/serial/keyspan_pda.c17
-rw-r--r--drivers/usb/serial/moto_modem.c1
-rw-r--r--drivers/usb/serial/option.c24
-rw-r--r--drivers/usb/serial/oti6858.c1
-rw-r--r--drivers/usb/serial/pl2303.c12
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/qcaux.c3
-rw-r--r--drivers/usb/serial/siemens_mpi.c1
-rw-r--r--drivers/usb/serial/sierra.c3
-rw-r--r--drivers/usb/serial/spcp8x5.c7
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/serial/usb-serial.c8
-rw-r--r--drivers/usb/serial/usb_debug.c1
-rw-r--r--drivers/usb/serial/usb_wwan.c15
-rw-r--r--drivers/usb/serial/visor.c12
-rw-r--r--drivers/usb/storage/unusual_cypress.h5
-rw-r--r--drivers/usb/storage/unusual_devs.h32
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c2
-rw-r--r--drivers/vhost/net.c9
-rw-r--r--drivers/vhost/vhost.c18
-rw-r--r--drivers/vhost/vhost.h6
-rw-r--r--drivers/video/Kconfig20
-rw-r--r--drivers/video/arkfb.c12
-rw-r--r--drivers/video/atmel_lcdfb.c2
-rw-r--r--drivers/video/aty/aty128fb.c14
-rw-r--r--drivers/video/aty/atyfb_base.c12
-rw-r--r--drivers/video/aty/radeon_backlight.c2
-rw-r--r--drivers/video/aty/radeon_pm.c10
-rw-r--r--drivers/video/backlight/88pm860x_bl.c6
-rw-r--r--drivers/video/backlight/l4f00242t03.c106
-rw-r--r--drivers/video/backlight/ltv350qv.c9
-rw-r--r--drivers/video/backlight/max8925_bl.c2
-rw-r--r--drivers/video/bf537-lq035.c58
-rw-r--r--drivers/video/chipsfb.c8
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/console/fbcon.c42
-rw-r--r--drivers/video/console/vgacon.c9
-rw-r--r--drivers/video/da8xx-fb.c11
-rw-r--r--drivers/video/ep93xx-fb.c6
-rw-r--r--drivers/video/fbmem.c12
-rw-r--r--drivers/video/fbsysfs.c20
-rw-r--r--drivers/video/geode/gxfb_core.c8
-rw-r--r--drivers/video/geode/lxfb_core.c8
-rw-r--r--drivers/video/i810/i810_main.c8
-rw-r--r--drivers/video/imxfb.c2
-rw-r--r--drivers/video/jz4740_fb.c8
-rw-r--r--drivers/video/matrox/matroxfb_base.c70
-rw-r--r--drivers/video/modedb.c420
-rw-r--r--drivers/video/mx3fb.c8
-rw-r--r--drivers/video/nuc900fb.c11
-rw-r--r--drivers/video/nvidia/nv_backlight.c2
-rw-r--r--drivers/video/nvidia/nvidia.c8
-rw-r--r--drivers/video/omap2/displays/Kconfig27
-rw-r--r--drivers/video/omap2/displays/Makefile5
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c365
-rw-r--r--drivers/video/omap2/displays/panel-generic.c174
-rw-r--r--drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c325
-rw-r--r--drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c165
-rw-r--r--drivers/video/omap2/displays/panel-taal.c2
-rw-r--r--drivers/video/omap2/displays/panel-toppoly-tdo35s.c164
-rw-r--r--drivers/video/omap2/dss/dispc.c636
-rw-r--r--drivers/video/omap2/dss/dpi.c40
-rw-r--r--drivers/video/omap2/dss/dsi.c27
-rw-r--r--drivers/video/omap2/dss/dss.h35
-rw-r--r--drivers/video/omap2/dss/dss_features.c66
-rw-r--r--drivers/video/omap2/dss/dss_features.h10
-rw-r--r--drivers/video/omap2/dss/manager.c80
-rw-r--r--drivers/video/omap2/dss/overlay.c55
-rw-r--r--drivers/video/omap2/dss/rfbi.c20
-rw-r--r--drivers/video/omap2/dss/sdi.c24
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c5
-rw-r--r--drivers/video/ps3fb.c16
-rw-r--r--drivers/video/pxa168fb.c6
-rw-r--r--drivers/video/pxa3xx-gcu.c4
-rw-r--r--drivers/video/riva/fbdev.c2
-rw-r--r--drivers/video/s3c2410fb.c5
-rw-r--r--drivers/video/s3fb.c16
-rw-r--r--drivers/video/savage/savagefb_driver.c8
-rw-r--r--drivers/video/sh_mobile_hdmi.c106
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c4
-rw-r--r--drivers/video/sm501fb.c8
-rw-r--r--drivers/video/sstfb.c2
-rw-r--r--drivers/video/tmiofb.c10
-rw-r--r--drivers/video/udlfb.c5
-rw-r--r--drivers/video/via/viafbdev.c8
-rw-r--r--drivers/video/vt8500lcdfb.c28
-rw-r--r--drivers/video/vt8623fb.c12
-rw-r--r--drivers/video/xen-fbfront.c23
-rw-r--r--drivers/virtio/virtio_pci.c20
-rw-r--r--drivers/w1/masters/omap_hdq.c28
-rw-r--r--drivers/w1/slaves/Kconfig11
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds2423.c166
-rw-r--r--drivers/w1/w1_family.h1
-rw-r--r--drivers/watchdog/Kconfig56
-rw-r--r--drivers/watchdog/Makefile6
-rw-r--r--drivers/watchdog/alim1535_wdt.c2
-rw-r--r--drivers/watchdog/alim7101_wdt.c2
-rw-r--r--drivers/watchdog/ath79_wdt.c305
-rw-r--r--drivers/watchdog/booke_wdt.c35
-rw-r--r--drivers/watchdog/f71808e_wdt.c78
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c12
-rw-r--r--drivers/watchdog/ks8695_wdt.c2
-rw-r--r--drivers/watchdog/m54xx_wdt.c227
-rw-r--r--drivers/watchdog/nv_tco.c512
-rw-r--r--drivers/watchdog/nv_tco.h64
-rw-r--r--drivers/watchdog/sp5100_tco.c480
-rw-r--r--drivers/watchdog/sp5100_tco.h41
-rw-r--r--drivers/watchdog/w83627hf_wdt.c8
-rw-r--r--drivers/xen/Kconfig20
-rw-r--r--drivers/xen/Makefile5
-rw-r--r--drivers/xen/events.c21
-rw-r--r--drivers/xen/gntdev.c665
-rw-r--r--drivers/xen/grant-table.c46
-rw-r--r--drivers/xen/manage.c10
-rw-r--r--drivers/xen/platform-pci.c21
-rw-r--r--drivers/xen/xenbus/Makefile5
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c351
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h31
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c276
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c294
-rw-r--r--drivers/xen/xenfs/xenbus.c31
2493 files changed, 156019 insertions, 67699 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 3d93b3a3d630..9bfb71ff3a6a 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -26,6 +26,8 @@ source "drivers/ata/Kconfig"
source "drivers/md/Kconfig"
+source "drivers/target/Kconfig"
+
source "drivers/message/fusion/Kconfig"
source "drivers/firewire/Kconfig"
@@ -88,6 +90,8 @@ source "drivers/memstick/Kconfig"
source "drivers/leds/Kconfig"
+source "drivers/nfc/Kconfig"
+
source "drivers/accessibility/Kconfig"
source "drivers/infiniband/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index bf15ce7493d2..b423bb16c3a8 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_XEN) += xen/
# regulators early, since some subsystems rely on them to initialize
obj-$(CONFIG_REGULATOR) += regulator/
-# char/ comes before serial/ etc so that the VT console is the boot-time
+# tty/ comes before char/ so that the VT console is the boot-time
# default.
obj-y += tty/
obj-y += char/
@@ -38,14 +38,14 @@ obj-$(CONFIG_CONNECTOR) += connector/
obj-$(CONFIG_FB_I810) += video/i810/
obj-$(CONFIG_FB_INTEL) += video/intelfb/
-obj-y += serial/
obj-$(CONFIG_PARPORT) += parport/
-obj-y += base/ block/ misc/ mfd/
+obj-y += base/ block/ misc/ mfd/ nfc/
obj-$(CONFIG_NUBUS) += nubus/
obj-y += macintosh/
obj-$(CONFIG_IDE) += ide/
obj-$(CONFIG_SCSI) += scsi/
obj-$(CONFIG_ATA) += ata/
+obj-$(CONFIG_TARGET_CORE) += target/
obj-$(CONFIG_MTD) += mtd/
obj-$(CONFIG_SPI) += spi/
obj-y += net/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 3f3489c5ca8c..2aa042a5da6d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -51,12 +51,7 @@ config ACPI_PROCFS
For backwards compatibility, this option allows
deprecated /proc/acpi/ files to exist, even when
they have been replaced by functions in /sys.
- The deprecated files (and their replacements) include:
- /proc/acpi/processor/*/throttling (/sys/class/thermal/
- cooling_device*/*)
- /proc/acpi/video/*/brightness (/sys/class/backlight/)
- /proc/acpi/thermal_zone/*/* (/sys/class/thermal/)
This option has no effect on /proc/acpi/ files
and functions which do not yet exist in /sys.
@@ -74,6 +69,8 @@ config ACPI_PROCFS_POWER
/proc/acpi/ac_adapter/* (sys/class/power_supply/*)
This option has no effect on /proc/acpi/ directories
and functions, which do not yet exist in /sys
+ This option, together with the proc directories, will be
+ deleted in 2.6.39.
Say N to delete power /proc/acpi/ directories that have moved to /sys/
@@ -209,6 +206,17 @@ config ACPI_PROCESSOR
To compile this driver as a module, choose M here:
the module will be called processor.
+config ACPI_IPMI
+ tristate "IPMI"
+ depends on EXPERIMENTAL && IPMI_SI && IPMI_HANDLER
+ default n
+ help
+ This driver enables the ACPI to access the BMC controller. And it
+ uses the IPMI request/response message to communicate with BMC
+ controller, which can be found on on the server.
+
+ To compile this driver as a module, choose M here:
+ the module will be called as acpi_ipmi.
config ACPI_HOTPLUG_CPU
bool
@@ -310,7 +318,7 @@ config ACPI_PCI_SLOT
the module will be called pci_slot.
config X86_PM_TIMER
- bool "Power Management Timer Support" if EMBEDDED
+ bool "Power Management Timer Support" if EXPERT
depends on X86
default y
help
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 3d031d02e54b..d113fa5100b2 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -24,7 +24,7 @@ acpi-y += atomicio.o
# sleep related files
acpi-y += wakeup.o
acpi-y += sleep.o
-acpi-$(CONFIG_ACPI_SLEEP) += proc.o
+acpi-$(CONFIG_ACPI_SLEEP) += proc.o nvs.o
#
@@ -69,5 +69,6 @@ processor-y += processor_idle.o processor_thermal.o
processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
+obj-$(CONFIG_ACPI_IPMI) += acpi_ipmi.o
obj-$(CONFIG_ACPI_APEI) += apei/
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 25d3aaebc10d..58c3f74bd84c 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -197,7 +197,8 @@ static int acpi_ac_add_fs(struct acpi_device *device)
{
struct proc_dir_entry *entry = NULL;
-
+ printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
+ " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
if (!acpi_device_dir(device)) {
acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
acpi_ac_dir);
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
new file mode 100644
index 000000000000..f40acef80269
--- /dev/null
+++ b/drivers/acpi/acpi_ipmi.c
@@ -0,0 +1,525 @@
+/*
+ * acpi_ipmi.c - ACPI IPMI opregion
+ *
+ * Copyright (C) 2010 Intel Corporation
+ * Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/ipmi.h>
+#include <linux/device.h>
+#include <linux/pnp.h>
+
+MODULE_AUTHOR("Zhao Yakui");
+MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
+MODULE_LICENSE("GPL");
+
+#define IPMI_FLAGS_HANDLER_INSTALL 0
+
+#define ACPI_IPMI_OK 0
+#define ACPI_IPMI_TIMEOUT 0x10
+#define ACPI_IPMI_UNKNOWN 0x07
+/* the IPMI timeout is 5s */
+#define IPMI_TIMEOUT (5 * HZ)
+
+struct acpi_ipmi_device {
+ /* the device list attached to driver_data.ipmi_devices */
+ struct list_head head;
+ /* the IPMI request message list */
+ struct list_head tx_msg_list;
+ struct mutex tx_msg_lock;
+ acpi_handle handle;
+ struct pnp_dev *pnp_dev;
+ ipmi_user_t user_interface;
+ int ipmi_ifnum; /* IPMI interface number */
+ long curr_msgid;
+ unsigned long flags;
+ struct ipmi_smi_info smi_data;
+};
+
+struct ipmi_driver_data {
+ struct list_head ipmi_devices;
+ struct ipmi_smi_watcher bmc_events;
+ struct ipmi_user_hndl ipmi_hndlrs;
+ struct mutex ipmi_lock;
+};
+
+struct acpi_ipmi_msg {
+ struct list_head head;
+ /*
+ * General speaking the addr type should be SI_ADDR_TYPE. And
+ * the addr channel should be BMC.
+ * In fact it can also be IPMB type. But we will have to
+ * parse it from the Netfn command buffer. It is so complex
+ * that it is skipped.
+ */
+ struct ipmi_addr addr;
+ long tx_msgid;
+ /* it is used to track whether the IPMI message is finished */
+ struct completion tx_complete;
+ struct kernel_ipmi_msg tx_message;
+ int msg_done;
+ /* tx data . And copy it from ACPI object buffer */
+ u8 tx_data[64];
+ int tx_len;
+ u8 rx_data[64];
+ int rx_len;
+ struct acpi_ipmi_device *device;
+};
+
+/* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
+struct acpi_ipmi_buffer {
+ u8 status;
+ u8 length;
+ u8 data[64];
+};
+
+static void ipmi_register_bmc(int iface, struct device *dev);
+static void ipmi_bmc_gone(int iface);
+static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
+static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device);
+static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device);
+
+static struct ipmi_driver_data driver_data = {
+ .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
+ .bmc_events = {
+ .owner = THIS_MODULE,
+ .new_smi = ipmi_register_bmc,
+ .smi_gone = ipmi_bmc_gone,
+ },
+ .ipmi_hndlrs = {
+ .ipmi_recv_hndl = ipmi_msg_handler,
+ },
+};
+
+static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi)
+{
+ struct acpi_ipmi_msg *ipmi_msg;
+ struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+
+ ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
+ if (!ipmi_msg) {
+ dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n");
+ return NULL;
+ }
+ init_completion(&ipmi_msg->tx_complete);
+ INIT_LIST_HEAD(&ipmi_msg->head);
+ ipmi_msg->device = ipmi;
+ return ipmi_msg;
+}
+
+#define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff)
+#define IPMI_OP_RGN_CMD(offset) (offset & 0xff)
+static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
+ acpi_physical_address address,
+ acpi_integer *value)
+{
+ struct kernel_ipmi_msg *msg;
+ struct acpi_ipmi_buffer *buffer;
+ struct acpi_ipmi_device *device;
+
+ msg = &tx_msg->tx_message;
+ /*
+ * IPMI network function and command are encoded in the address
+ * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
+ */
+ msg->netfn = IPMI_OP_RGN_NETFN(address);
+ msg->cmd = IPMI_OP_RGN_CMD(address);
+ msg->data = tx_msg->tx_data;
+ /*
+ * value is the parameter passed by the IPMI opregion space handler.
+ * It points to the IPMI request message buffer
+ */
+ buffer = (struct acpi_ipmi_buffer *)value;
+ /* copy the tx message data */
+ msg->data_len = buffer->length;
+ memcpy(tx_msg->tx_data, buffer->data, msg->data_len);
+ /*
+ * now the default type is SYSTEM_INTERFACE and channel type is BMC.
+ * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
+ * the addr type should be changed to IPMB. Then we will have to parse
+ * the IPMI request message buffer to get the IPMB address.
+ * If so, please fix me.
+ */
+ tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ tx_msg->addr.channel = IPMI_BMC_CHANNEL;
+ tx_msg->addr.data[0] = 0;
+
+ /* Get the msgid */
+ device = tx_msg->device;
+ mutex_lock(&device->tx_msg_lock);
+ device->curr_msgid++;
+ tx_msg->tx_msgid = device->curr_msgid;
+ mutex_unlock(&device->tx_msg_lock);
+}
+
+static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
+ acpi_integer *value, int rem_time)
+{
+ struct acpi_ipmi_buffer *buffer;
+
+ /*
+ * value is also used as output parameter. It represents the response
+ * IPMI message returned by IPMI command.
+ */
+ buffer = (struct acpi_ipmi_buffer *)value;
+ if (!rem_time && !msg->msg_done) {
+ buffer->status = ACPI_IPMI_TIMEOUT;
+ return;
+ }
+ /*
+ * If the flag of msg_done is not set or the recv length is zero, it
+ * means that the IPMI command is not executed correctly.
+ * The status code will be ACPI_IPMI_UNKNOWN.
+ */
+ if (!msg->msg_done || !msg->rx_len) {
+ buffer->status = ACPI_IPMI_UNKNOWN;
+ return;
+ }
+ /*
+ * If the IPMI response message is obtained correctly, the status code
+ * will be ACPI_IPMI_OK
+ */
+ buffer->status = ACPI_IPMI_OK;
+ buffer->length = msg->rx_len;
+ memcpy(buffer->data, msg->rx_data, msg->rx_len);
+}
+
+static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
+{
+ struct acpi_ipmi_msg *tx_msg, *temp;
+ int count = HZ / 10;
+ struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+
+ list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
+ /* wake up the sleep thread on the Tx msg */
+ complete(&tx_msg->tx_complete);
+ }
+
+ /* wait for about 100ms to flush the tx message list */
+ while (count--) {
+ if (list_empty(&ipmi->tx_msg_list))
+ break;
+ schedule_timeout(1);
+ }
+ if (!list_empty(&ipmi->tx_msg_list))
+ dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n");
+}
+
+static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
+{
+ struct acpi_ipmi_device *ipmi_device = user_msg_data;
+ int msg_found = 0;
+ struct acpi_ipmi_msg *tx_msg;
+ struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
+
+ if (msg->user != ipmi_device->user_interface) {
+ dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
+ "returned user %p, expected user %p\n",
+ msg->user, ipmi_device->user_interface);
+ ipmi_free_recv_msg(msg);
+ return;
+ }
+ mutex_lock(&ipmi_device->tx_msg_lock);
+ list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
+ if (msg->msgid == tx_msg->tx_msgid) {
+ msg_found = 1;
+ break;
+ }
+ }
+
+ mutex_unlock(&ipmi_device->tx_msg_lock);
+ if (!msg_found) {
+ dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
+ "returned.\n", msg->msgid);
+ ipmi_free_recv_msg(msg);
+ return;
+ }
+
+ if (msg->msg.data_len) {
+ /* copy the response data to Rx_data buffer */
+ memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len);
+ tx_msg->rx_len = msg->msg.data_len;
+ tx_msg->msg_done = 1;
+ }
+ complete(&tx_msg->tx_complete);
+ ipmi_free_recv_msg(msg);
+};
+
+static void ipmi_register_bmc(int iface, struct device *dev)
+{
+ struct acpi_ipmi_device *ipmi_device, *temp;
+ struct pnp_dev *pnp_dev;
+ ipmi_user_t user;
+ int err;
+ struct ipmi_smi_info smi_data;
+ acpi_handle handle;
+
+ err = ipmi_get_smi_info(iface, &smi_data);
+
+ if (err)
+ return;
+
+ if (smi_data.addr_src != SI_ACPI) {
+ put_device(smi_data.dev);
+ return;
+ }
+
+ handle = smi_data.addr_info.acpi_info.acpi_handle;
+
+ mutex_lock(&driver_data.ipmi_lock);
+ list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
+ /*
+ * if the corresponding ACPI handle is already added
+ * to the device list, don't add it again.
+ */
+ if (temp->handle == handle)
+ goto out;
+ }
+
+ ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
+
+ if (!ipmi_device)
+ goto out;
+
+ pnp_dev = to_pnp_dev(smi_data.dev);
+ ipmi_device->handle = handle;
+ ipmi_device->pnp_dev = pnp_dev;
+
+ err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
+ ipmi_device, &user);
+ if (err) {
+ dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n");
+ kfree(ipmi_device);
+ goto out;
+ }
+ acpi_add_ipmi_device(ipmi_device);
+ ipmi_device->user_interface = user;
+ ipmi_device->ipmi_ifnum = iface;
+ mutex_unlock(&driver_data.ipmi_lock);
+ memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info));
+ return;
+
+out:
+ mutex_unlock(&driver_data.ipmi_lock);
+ put_device(smi_data.dev);
+ return;
+}
+
+static void ipmi_bmc_gone(int iface)
+{
+ struct acpi_ipmi_device *ipmi_device, *temp;
+
+ mutex_lock(&driver_data.ipmi_lock);
+ list_for_each_entry_safe(ipmi_device, temp,
+ &driver_data.ipmi_devices, head) {
+ if (ipmi_device->ipmi_ifnum != iface)
+ continue;
+
+ acpi_remove_ipmi_device(ipmi_device);
+ put_device(ipmi_device->smi_data.dev);
+ kfree(ipmi_device);
+ break;
+ }
+ mutex_unlock(&driver_data.ipmi_lock);
+}
+/* --------------------------------------------------------------------------
+ * Address Space Management
+ * -------------------------------------------------------------------------- */
+/*
+ * This is the IPMI opregion space handler.
+ * @function: indicates the read/write. In fact as the IPMI message is driven
+ * by command, only write is meaningful.
+ * @address: This contains the netfn/command of IPMI request message.
+ * @bits : not used.
+ * @value : it is an in/out parameter. It points to the IPMI message buffer.
+ * Before the IPMI message is sent, it represents the actual request
+ * IPMI message. After the IPMI message is finished, it represents
+ * the response IPMI message returned by IPMI command.
+ * @handler_context: IPMI device context.
+ */
+
+static acpi_status
+acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
+ u32 bits, acpi_integer *value,
+ void *handler_context, void *region_context)
+{
+ struct acpi_ipmi_msg *tx_msg;
+ struct acpi_ipmi_device *ipmi_device = handler_context;
+ int err, rem_time;
+ acpi_status status;
+ /*
+ * IPMI opregion message.
+ * IPMI message is firstly written to the BMC and system software
+ * can get the respsonse. So it is unmeaningful for the read access
+ * of IPMI opregion.
+ */
+ if ((function & ACPI_IO_MASK) == ACPI_READ)
+ return AE_TYPE;
+
+ if (!ipmi_device->user_interface)
+ return AE_NOT_EXIST;
+
+ tx_msg = acpi_alloc_ipmi_msg(ipmi_device);
+ if (!tx_msg)
+ return AE_NO_MEMORY;
+
+ acpi_format_ipmi_msg(tx_msg, address, value);
+ mutex_lock(&ipmi_device->tx_msg_lock);
+ list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
+ mutex_unlock(&ipmi_device->tx_msg_lock);
+ err = ipmi_request_settime(ipmi_device->user_interface,
+ &tx_msg->addr,
+ tx_msg->tx_msgid,
+ &tx_msg->tx_message,
+ NULL, 0, 0, 0);
+ if (err) {
+ status = AE_ERROR;
+ goto end_label;
+ }
+ rem_time = wait_for_completion_timeout(&tx_msg->tx_complete,
+ IPMI_TIMEOUT);
+ acpi_format_ipmi_response(tx_msg, value, rem_time);
+ status = AE_OK;
+
+end_label:
+ mutex_lock(&ipmi_device->tx_msg_lock);
+ list_del(&tx_msg->head);
+ mutex_unlock(&ipmi_device->tx_msg_lock);
+ kfree(tx_msg);
+ return status;
+}
+
+static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi)
+{
+ if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
+ return;
+
+ acpi_remove_address_space_handler(ipmi->handle,
+ ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler);
+
+ clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
+}
+
+static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi)
+{
+ acpi_status status;
+
+ if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
+ return 0;
+
+ status = acpi_install_address_space_handler(ipmi->handle,
+ ACPI_ADR_SPACE_IPMI,
+ &acpi_ipmi_space_handler,
+ NULL, ipmi);
+ if (ACPI_FAILURE(status)) {
+ struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+ dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space "
+ "handle\n");
+ return -EINVAL;
+ }
+ set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
+ return 0;
+}
+
+static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
+{
+
+ INIT_LIST_HEAD(&ipmi_device->head);
+
+ mutex_init(&ipmi_device->tx_msg_lock);
+ INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
+ ipmi_install_space_handler(ipmi_device);
+
+ list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
+}
+
+static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device)
+{
+ /*
+ * If the IPMI user interface is created, it should be
+ * destroyed.
+ */
+ if (ipmi_device->user_interface) {
+ ipmi_destroy_user(ipmi_device->user_interface);
+ ipmi_device->user_interface = NULL;
+ }
+ /* flush the Tx_msg list */
+ if (!list_empty(&ipmi_device->tx_msg_list))
+ ipmi_flush_tx_msg(ipmi_device);
+
+ list_del(&ipmi_device->head);
+ ipmi_remove_space_handler(ipmi_device);
+}
+
+static int __init acpi_ipmi_init(void)
+{
+ int result = 0;
+
+ if (acpi_disabled)
+ return result;
+
+ mutex_init(&driver_data.ipmi_lock);
+
+ result = ipmi_smi_watcher_register(&driver_data.bmc_events);
+
+ return result;
+}
+
+static void __exit acpi_ipmi_exit(void)
+{
+ struct acpi_ipmi_device *ipmi_device, *temp;
+
+ if (acpi_disabled)
+ return;
+
+ ipmi_smi_watcher_unregister(&driver_data.bmc_events);
+
+ /*
+ * When one smi_watcher is unregistered, it is only deleted
+ * from the smi_watcher list. But the smi_gone callback function
+ * is not called. So explicitly uninstall the ACPI IPMI oregion
+ * handler and free it.
+ */
+ mutex_lock(&driver_data.ipmi_lock);
+ list_for_each_entry_safe(ipmi_device, temp,
+ &driver_data.ipmi_devices, head) {
+ acpi_remove_ipmi_device(ipmi_device);
+ put_device(ipmi_device->smi_data.dev);
+ kfree(ipmi_device);
+ }
+ mutex_unlock(&driver_data.ipmi_lock);
+}
+
+module_init(acpi_ipmi_init);
+module_exit(acpi_ipmi_exit);
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index a7e1d1aa4107..eec2eadd2431 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -14,7 +14,7 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
evmisc.o evrgnini.o evxface.o evxfregn.o \
- evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o
+ evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o
acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 3e50c74ed4a1..e0ba17f0a7c8 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index b17d8de9f6ff..ab87396c2c07 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 72e9d5eb083c..eb0b1f8dee6d 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 894a0ff2a946..666271b65418 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index a6f99cc37a19..41d247daf461 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,8 +51,6 @@ acpi_status acpi_ev_initialize_events(void);
acpi_status acpi_ev_install_xrupt_handlers(void);
-acpi_status acpi_ev_install_fadt_gpes(void);
-
u32 acpi_ev_fixed_event_detect(void);
/*
@@ -82,9 +80,9 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info);
acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
-acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
+acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
-acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
+acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 gpe_number);
@@ -93,6 +91,8 @@ struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
struct acpi_gpe_block_info
*gpe_block);
+acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info);
+
/*
* evgpeblk - Upper-level GPE block support
*/
@@ -107,12 +107,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
acpi_status
acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
- void *ignored);
+ void *context);
acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block);
u32
-acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info,
+acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
+ struct acpi_gpe_event_info *gpe_event_info,
u32 gpe_number);
/*
@@ -126,10 +127,6 @@ acpi_status
acpi_ev_match_gpe_method(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
-acpi_status
-acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
- u32 level, void *context, void **return_value);
-
/*
* evgpeutil - GPE utilities
*/
@@ -138,6 +135,10 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
+acpi_status
+acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block, void *context);
+
struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number);
acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index ad88fcae4eb9..82a1bd283db8 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -146,6 +146,9 @@ u8 acpi_gbl_system_awake_and_running;
extern u32 acpi_gbl_nesting_level;
+ACPI_EXTERN u32 acpi_gpe_count;
+ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
+
/* Support for dynamic control method tracing mechanism */
ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
@@ -225,8 +228,10 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_present;
*/
ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */
ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
+ACPI_EXTERN spinlock_t _acpi_ev_global_lock_pending_lock; /* For global lock */
#define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock
#define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock
+#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
/*****************************************************************************
*
@@ -370,7 +375,9 @@ ACPI_EXTERN struct acpi_fixed_event_handler
ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
ACPI_EXTERN struct acpi_gpe_block_info
*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
-ACPI_EXTERN u8 acpi_all_gpes_initialized;
+ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
+ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler;
+ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 167470ad2d21..e7213beaafc7 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -94,7 +94,7 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
struct acpi_gpe_register_info *gpe_register_info);
acpi_status
-acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action);
+acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action);
acpi_status
acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 049e203bd621..3731e1c34b83 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 2ceb0c05b2d7..edc25867ad9d 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -408,17 +408,23 @@ struct acpi_predefined_data {
/* Dispatch info for each GPE -- either a method or handler, cannot be both */
-struct acpi_handler_info {
- acpi_event_handler address; /* Address of handler, if any */
+struct acpi_gpe_handler_info {
+ acpi_gpe_handler address; /* Address of handler, if any */
void *context; /* Context to be passed to handler */
struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */
- u8 orig_flags; /* Original misc info about this GPE */
- u8 orig_enabled; /* Set if the GPE was originally enabled */
+ u8 original_flags; /* Original (pre-handler) GPE info */
+ u8 originally_enabled; /* True if GPE was originally enabled */
+};
+
+struct acpi_gpe_notify_object {
+ struct acpi_namespace_node *node;
+ struct acpi_gpe_notify_object *next;
};
union acpi_gpe_dispatch_info {
struct acpi_namespace_node *method_node; /* Method node for this GPE level */
- struct acpi_handler_info *handler;
+ struct acpi_gpe_handler_info *handler; /* Installed GPE handler */
+ struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */
};
/*
@@ -458,7 +464,7 @@ struct acpi_gpe_block_info {
u32 register_count; /* Number of register pairs in block */
u16 gpe_count; /* Number of individual GPEs in block */
u8 block_base_number; /* Base GPE number for this block */
- u8 initialized; /* If set, the GPE block has been initialized */
+ u8 initialized; /* TRUE if this block is initialized */
};
/* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 8d5c9e0a495f..b7491ee1fba6 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index d44d3bc5b847..79a598c67fe3 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index bdbfaf22bd14..1055769f2f01 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -93,12 +93,10 @@
#define AOPOBJ_AML_CONSTANT 0x01 /* Integer is an AML constant */
#define AOPOBJ_STATIC_POINTER 0x02 /* Data is part of an ACPI table, don't delete */
-#define AOPOBJ_DATA_VALID 0x04 /* Object is intialized and data is valid */
+#define AOPOBJ_DATA_VALID 0x04 /* Object is initialized and data is valid */
#define AOPOBJ_OBJECT_INITIALIZED 0x08 /* Region is initialized, _REG was run */
#define AOPOBJ_SETUP_COMPLETE 0x10 /* Region setup is complete */
#define AOPOBJ_INVALID 0x20 /* Host OS won't allow a Region address */
-#define AOPOBJ_MODULE_LEVEL 0x40 /* Method is actually module-level code */
-#define AOPOBJ_MODIFIED_NAMESPACE 0x80 /* Method modified the namespace */
/******************************************************************************
*
@@ -175,7 +173,7 @@ struct acpi_object_region {
};
struct acpi_object_method {
- ACPI_OBJECT_COMMON_HEADER u8 method_flags;
+ ACPI_OBJECT_COMMON_HEADER u8 info_flags;
u8 param_count;
u8 sync_level;
union acpi_operand_object *mutex;
@@ -183,13 +181,21 @@ struct acpi_object_method {
union {
ACPI_INTERNAL_METHOD implementation;
union acpi_operand_object *handler;
- } extra;
+ } dispatch;
u32 aml_length;
u8 thread_count;
acpi_owner_id owner_id;
};
+/* Flags for info_flags field above */
+
+#define ACPI_METHOD_MODULE_LEVEL 0x01 /* Method is actually module-level code */
+#define ACPI_METHOD_INTERNAL_ONLY 0x02 /* Method is implemented internally (_OSI) */
+#define ACPI_METHOD_SERIALIZED 0x04 /* Method is serialized */
+#define ACPI_METHOD_SERIALIZED_PENDING 0x08 /* Method is to be marked serialized */
+#define ACPI_METHOD_MODIFIED_NAMESPACE 0x10 /* Method modified the namespace */
+
/******************************************************************************
*
* Objects that can be notified. All share a common notify_info area.
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index 8c15ff43f42b..bb2ccfad7376 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index d0bb0fd3e57a..5ea1e06afa20 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 10998d369ad0..94e73c97cf85 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 528bcbaf4ce7..f08b55b7f3a0 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 6e5dd97949fe..1623b245dde2 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 62a576e34361..967f08124eba 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 72e4183c1937..99c140d8e348 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 1f484ba228fc..f4f0998d3967 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -480,16 +480,10 @@ typedef enum {
AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D
} AML_ACCESS_ATTRIBUTE;
-/* Bit fields in method_flags byte */
+/* Bit fields in the AML method_flags byte */
#define AML_METHOD_ARG_COUNT 0x07
#define AML_METHOD_SERIALIZED 0x08
#define AML_METHOD_SYNC_LEVEL 0xF0
-/* METHOD_FLAGS_ARG_COUNT is not used internally, define additional flags */
-
-#define AML_METHOD_INTERNAL_ONLY 0x01
-#define AML_METHOD_RESERVED1 0x02
-#define AML_METHOD_RESERVED2 0x04
-
#endif /* __AMLCODE_H__ */
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 0e5798fcbb19..59122cde247c 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 347bee1726f1..34be60c0e448 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index cc4a38c57558..a7718bf2b9a1 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index d94dd8974b55..5d797751e205 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,6 @@
#include <acpi/acpi.h>
#include "accommon.h"
-#include "amlcode.h"
#include "acdispat.h"
#include "acinterp.h"
#include "acnamesp.h"
@@ -201,7 +200,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
/*
* If this method is serialized, we need to acquire the method mutex.
*/
- if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) {
+ if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) {
/*
* Create a mutex for the method if it is defined to be Serialized
* and a mutex has not already been created. We defer the mutex creation
@@ -413,8 +412,9 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
/* Invoke an internal method if necessary */
- if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
- status = obj_desc->method.extra.implementation(next_walk_state);
+ if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
+ status =
+ obj_desc->method.dispatch.implementation(next_walk_state);
if (status == AE_OK) {
status = AE_CTRL_TERMINATE;
}
@@ -579,11 +579,14 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
/*
* Delete any namespace objects created anywhere within the
- * namespace by the execution of this method. Unless this method
- * is a module-level executable code method, in which case we
- * want make the objects permanent.
+ * namespace by the execution of this method. Unless:
+ * 1) This method is a module-level executable code method, in which
+ * case we want make the objects permanent.
+ * 2) There are other threads executing the method, in which case we
+ * will wait until the last thread has completed.
*/
- if (!(method_desc->method.flags & AOPOBJ_MODULE_LEVEL)) {
+ if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL)
+ && (method_desc->method.thread_count == 1)) {
/* Delete any direct children of (created by) this method */
@@ -593,12 +596,17 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
/*
* Delete any objects that were created by this method
* elsewhere in the namespace (if any were created).
+ * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the
+ * deletion such that we don't have to perform an entire
+ * namespace walk for every control method execution.
*/
if (method_desc->method.
- flags & AOPOBJ_MODIFIED_NAMESPACE) {
+ info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) {
acpi_ns_delete_namespace_by_owner(method_desc->
method.
owner_id);
+ method_desc->method.info_flags &=
+ ~ACPI_METHOD_MODIFIED_NAMESPACE;
}
}
}
@@ -629,19 +637,43 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
* Serialized if it appears that the method is incorrectly written and
* does not support multiple thread execution. The best example of this
* is if such a method creates namespace objects and blocks. A second
- * thread will fail with an AE_ALREADY_EXISTS exception
+ * thread will fail with an AE_ALREADY_EXISTS exception.
*
* This code is here because we must wait until the last thread exits
- * before creating the synchronization semaphore.
+ * before marking the method as serialized.
*/
- if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED)
- && (!method_desc->method.mutex)) {
- (void)acpi_ds_create_method_mutex(method_desc);
+ if (method_desc->method.
+ info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
+ if (walk_state) {
+ ACPI_INFO((AE_INFO,
+ "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
+ walk_state->method_node->name.
+ ascii));
+ }
+
+ /*
+ * Method tried to create an object twice and was marked as
+ * "pending serialized". The probable cause is that the method
+ * cannot handle reentrancy.
+ *
+ * The method was created as not_serialized, but it tried to create
+ * a named object and then blocked, causing the second thread
+ * entrance to begin and then fail. Workaround this problem by
+ * marking the method permanently as Serialized when the last
+ * thread exits here.
+ */
+ method_desc->method.info_flags &=
+ ~ACPI_METHOD_SERIALIZED_PENDING;
+ method_desc->method.info_flags |=
+ ACPI_METHOD_SERIALIZED;
+ method_desc->method.sync_level = 0;
}
/* No more threads, we can free the owner_id */
- if (!(method_desc->method.flags & AOPOBJ_MODULE_LEVEL)) {
+ if (!
+ (method_desc->method.
+ info_flags & ACPI_METHOD_MODULE_LEVEL)) {
acpi_ut_release_owner_id(&method_desc->method.owner_id);
}
}
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 8095306fcd8c..905ce29a92e1 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 8e85f54a8e0e..f42e17e5c252 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 7c0e74227171..bbecf293aeeb 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 15135c25aa9b..2c477ce172fa 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 6b0b5d08d97a..fe40e4c6554f 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 140a9d002959..52566ff5e903 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index d1e701709dac..76a661fc1e09 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 83155dd8671e..a6c374ef9914 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index c61c3039c31a..d458b041e651 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -217,9 +217,17 @@ u32 acpi_ev_fixed_event_detect(void)
status_bit_mask)
&& (fixed_enable & acpi_gbl_fixed_event_info[i].
enable_bit_mask)) {
+ /*
+ * Found an active (signalled) event. Invoke global event
+ * handler if present.
+ */
+ acpi_fixed_event_count[i]++;
+ if (acpi_gbl_global_event_handler) {
+ acpi_gbl_global_event_handler
+ (ACPI_EVENT_TYPE_FIXED, NULL, i,
+ acpi_gbl_global_event_handler_context);
+ }
- /* Found an active (signalled) event */
- acpi_os_fixed_event_count(i);
int_status |= acpi_ev_fixed_event_dispatch(i);
}
}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index f226eac314db..f4725212eb48 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -52,6 +52,8 @@ ACPI_MODULE_NAME("evgpe")
/* Local prototypes */
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
+static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
+
/*******************************************************************************
*
* FUNCTION: acpi_ev_update_gpe_enable_mask
@@ -102,7 +104,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
*
* RETURN: Status
*
- * DESCRIPTION: Clear the given GPE from stale events and enable it.
+ * DESCRIPTION: Clear a GPE of stale events and enable it.
*
******************************************************************************/
acpi_status
@@ -113,12 +115,13 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
ACPI_FUNCTION_TRACE(ev_enable_gpe);
/*
- * We will only allow a GPE to be enabled if it has either an
- * associated method (_Lxx/_Exx) or a handler. Otherwise, the
- * GPE will be immediately disabled by acpi_ev_gpe_dispatch the
- * first time it fires.
+ * We will only allow a GPE to be enabled if it has either an associated
+ * method (_Lxx/_Exx) or a handler, or is using the implicit notify
+ * feature. Otherwise, the GPE will be immediately disabled by
+ * acpi_ev_gpe_dispatch the first time it fires.
*/
- if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_NONE) {
return_ACPI_STATUS(AE_NO_HANDLER);
}
@@ -137,9 +140,9 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
/*******************************************************************************
*
- * FUNCTION: acpi_raw_enable_gpe
+ * FUNCTION: acpi_ev_add_gpe_reference
*
- * PARAMETERS: gpe_event_info - GPE to enable
+ * PARAMETERS: gpe_event_info - Add a reference to this GPE
*
* RETURN: Status
*
@@ -148,16 +151,21 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
*
******************************************************************************/
-acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status = AE_OK;
+ ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
+
if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
return_ACPI_STATUS(AE_LIMIT);
}
gpe_event_info->runtime_count++;
if (gpe_event_info->runtime_count == 1) {
+
+ /* Enable on first reference */
+
status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
if (ACPI_SUCCESS(status)) {
status = acpi_ev_enable_gpe(gpe_event_info);
@@ -173,9 +181,9 @@ acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
/*******************************************************************************
*
- * FUNCTION: acpi_raw_disable_gpe
+ * FUNCTION: acpi_ev_remove_gpe_reference
*
- * PARAMETERS: gpe_event_info - GPE to disable
+ * PARAMETERS: gpe_event_info - Remove a reference to this GPE
*
* RETURN: Status
*
@@ -184,16 +192,21 @@ acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
*
******************************************************************************/
-acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status = AE_OK;
+ ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
+
if (!gpe_event_info->runtime_count) {
return_ACPI_STATUS(AE_LIMIT);
}
gpe_event_info->runtime_count--;
if (!gpe_event_info->runtime_count) {
+
+ /* Disable on last reference */
+
status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
if (ACPI_SUCCESS(status)) {
status = acpi_hw_low_set_gpe(gpe_event_info,
@@ -379,7 +392,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
}
ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
- "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n",
+ "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n",
gpe_register_info->base_gpe_number,
status_reg, enable_reg));
@@ -405,7 +418,9 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
* or method.
*/
int_status |=
- acpi_ev_gpe_dispatch(&gpe_block->
+ acpi_ev_gpe_dispatch(gpe_block->
+ node,
+ &gpe_block->
event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
}
}
@@ -435,19 +450,29 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
* an interrupt handler.
*
******************************************************************************/
-static void acpi_ev_asynch_enable_gpe(void *context);
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
{
- struct acpi_gpe_event_info *gpe_event_info = (void *)context;
+ struct acpi_gpe_event_info *gpe_event_info = context;
acpi_status status;
- struct acpi_gpe_event_info local_gpe_event_info;
+ struct acpi_gpe_event_info *local_gpe_event_info;
struct acpi_evaluate_info *info;
+ struct acpi_gpe_notify_object *notify_object;
ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
+ /* Allocate a local GPE block */
+
+ local_gpe_event_info =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info));
+ if (!local_gpe_event_info) {
+ ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE"));
+ return_VOID;
+ }
+
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
+ ACPI_FREE(local_gpe_event_info);
return_VOID;
}
@@ -455,6 +480,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ ACPI_FREE(local_gpe_event_info);
return_VOID;
}
@@ -462,7 +488,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
* Take a snapshot of the GPE info for this level - we copy the info to
* prevent a race condition with remove_handler/remove_block.
*/
- ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info,
+ ACPI_MEMCPY(local_gpe_event_info, gpe_event_info,
sizeof(struct acpi_gpe_event_info));
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
@@ -470,12 +496,34 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
return_VOID;
}
- /*
- * Must check for control method type dispatch one more time to avoid a
- * race with ev_gpe_install_handler
- */
- if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
- ACPI_GPE_DISPATCH_METHOD) {
+ /* Do the correct dispatch - normal method or implicit notify */
+
+ switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
+ case ACPI_GPE_DISPATCH_NOTIFY:
+
+ /*
+ * Implicit notify.
+ * Dispatch a DEVICE_WAKE notify to the appropriate handler.
+ * NOTE: the request is queued for execution after this method
+ * completes. The notify handlers are NOT invoked synchronously
+ * from this thread -- because handlers may in turn run other
+ * control methods.
+ */
+ status = acpi_ev_queue_notify_request(
+ local_gpe_event_info->dispatch.device.node,
+ ACPI_NOTIFY_DEVICE_WAKE);
+
+ notify_object = local_gpe_event_info->dispatch.device.next;
+ while (ACPI_SUCCESS(status) && notify_object) {
+ status = acpi_ev_queue_notify_request(
+ notify_object->node,
+ ACPI_NOTIFY_DEVICE_WAKE);
+ notify_object = notify_object->next;
+ }
+
+ break;
+
+ case ACPI_GPE_DISPATCH_METHOD:
/* Allocate the evaluation information block */
@@ -488,7 +536,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
* control method that corresponds to this GPE
*/
info->prefix_node =
- local_gpe_event_info.dispatch.method_node;
+ local_gpe_event_info->dispatch.method_node;
info->flags = ACPI_IGNORE_RETURN_VALUE;
status = acpi_ns_evaluate(info);
@@ -499,46 +547,98 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
ACPI_EXCEPTION((AE_INFO, status,
"while evaluating GPE method [%4.4s]",
acpi_ut_get_node_name
- (local_gpe_event_info.dispatch.
+ (local_gpe_event_info->dispatch.
method_node)));
}
+
+ break;
+
+ default:
+ return_VOID; /* Should never happen */
}
+
/* Defer enabling of GPE until all notify handlers are done */
- acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
- gpe_event_info);
+
+ status = acpi_os_execute(OSL_NOTIFY_HANDLER,
+ acpi_ev_asynch_enable_gpe,
+ local_gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(local_gpe_event_info);
+ }
return_VOID;
}
-static void acpi_ev_asynch_enable_gpe(void *context)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_asynch_enable_gpe
+ *
+ * PARAMETERS: Context (gpe_event_info) - Info for this GPE
+ * Callback from acpi_os_execute
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to
+ * complete (i.e., finish execution of Notify)
+ *
+ ******************************************************************************/
+
+static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
{
struct acpi_gpe_event_info *gpe_event_info = context;
+
+ (void)acpi_ev_finish_gpe(gpe_event_info);
+
+ ACPI_FREE(gpe_event_info);
+ return;
+}
+
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_finish_gpe
+ *
+ * PARAMETERS: gpe_event_info - Info for this GPE
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution
+ * of a GPE method or a synchronous or asynchronous GPE handler.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
+{
acpi_status status;
+
if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
ACPI_GPE_LEVEL_TRIGGERED) {
/*
- * GPE is level-triggered, we clear the GPE status bit after handling
- * the event.
+ * GPE is level-triggered, we clear the GPE status bit after
+ * handling the event.
*/
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
- return_VOID;
+ return (status);
}
}
/*
- * Enable this GPE, conditionally. This means that the GPE will only be
- * physically enabled if the enable_for_run bit is set in the event_info
+ * Enable this GPE, conditionally. This means that the GPE will
+ * only be physically enabled if the enable_for_run bit is set
+ * in the event_info.
*/
- (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE);
-
- return_VOID;
+ (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
+ return (AE_OK);
}
+
/*******************************************************************************
*
* FUNCTION: acpi_ev_gpe_dispatch
*
- * PARAMETERS: gpe_event_info - Info for this GPE
+ * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
+ * gpe_event_info - Info for this GPE
* gpe_number - Number relative to the parent GPE block
*
* RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
@@ -551,13 +651,22 @@ static void acpi_ev_asynch_enable_gpe(void *context)
******************************************************************************/
u32
-acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
+acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
+ struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
{
acpi_status status;
+ u32 return_value;
ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
- acpi_os_gpe_count(gpe_number);
+ /* Invoke global event handler if present */
+
+ acpi_gpe_count++;
+ if (acpi_gbl_global_event_handler) {
+ acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device,
+ gpe_number,
+ acpi_gbl_global_event_handler_context);
+ }
/*
* If edge-triggered, clear the GPE status bit now. Note that
@@ -568,59 +677,55 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to clear GPE[0x%2X]",
- gpe_number));
+ "Unable to clear GPE%02X", gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
}
/*
- * Dispatch the GPE to either an installed handler, or the control method
- * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
- * it and do not attempt to run the method. If there is neither a handler
- * nor a method, we disable this GPE to prevent further such pointless
- * events from firing.
+ * Always disable the GPE so that it does not keep firing before
+ * any asynchronous activity completes (either from the execution
+ * of a GPE method or an asynchronous GPE handler.)
+ *
+ * If there is no handler or method to run, just disable the
+ * GPE and leave it disabled permanently to prevent further such
+ * pointless events from firing.
+ */
+ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to disable GPE%02X", gpe_number));
+ return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+ }
+
+ /*
+ * Dispatch the GPE to either an installed handler or the control
+ * method associated with this GPE (_Lxx or _Exx). If a handler
+ * exists, we invoke it and do not attempt to run the method.
+ * If there is neither a handler nor a method, leave the GPE
+ * disabled.
*/
switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
case ACPI_GPE_DISPATCH_HANDLER:
- /*
- * Invoke the installed handler (at interrupt level)
- * Ignore return status for now.
- * TBD: leave GPE disabled on error?
- */
- (void)gpe_event_info->dispatch.handler->address(gpe_event_info->
- dispatch.
- handler->
- context);
+ /* Invoke the installed handler (at interrupt level) */
- /* It is now safe to clear level-triggered events. */
+ return_value =
+ gpe_event_info->dispatch.handler->address(gpe_device,
+ gpe_number,
+ gpe_event_info->
+ dispatch.handler->
+ context);
- if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
- ACPI_GPE_LEVEL_TRIGGERED) {
- status = acpi_hw_clear_gpe(gpe_event_info);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Unable to clear GPE[0x%2X]",
- gpe_number));
- return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
- }
+ /* If requested, clear (if level-triggered) and reenable the GPE */
+
+ if (return_value & ACPI_REENABLE_GPE) {
+ (void)acpi_ev_finish_gpe(gpe_event_info);
}
break;
case ACPI_GPE_DISPATCH_METHOD:
-
- /*
- * Disable the GPE, so it doesn't keep firing before the method has a
- * chance to run (it runs asynchronously with interrupts enabled).
- */
- status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Unable to disable GPE[0x%2X]",
- gpe_number));
- return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
- }
+ case ACPI_GPE_DISPATCH_NOTIFY:
/*
* Execute the method associated with the GPE
@@ -631,7 +736,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to queue handler for GPE[0x%2X] - event disabled",
+ "Unable to queue handler for GPE%2X - event disabled",
gpe_number));
}
break;
@@ -644,20 +749,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
* a GPE to be enabled if it has no handler or method.
*/
ACPI_ERROR((AE_INFO,
- "No handler or method for GPE[0x%2X], disabling event",
+ "No handler or method for GPE%02X, disabling event",
gpe_number));
- /*
- * Disable the GPE. The GPE will remain disabled a handler
- * is installed or ACPICA is restarted.
- */
- status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Unable to disable GPE[0x%2X]",
- gpe_number));
- return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
- }
break;
}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 020add3eee1c..ca2c41a53311 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -361,9 +361,9 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
gpe_block->node = gpe_device;
gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
+ gpe_block->initialized = FALSE;
gpe_block->register_count = register_count;
gpe_block->block_base_number = gpe_block_base_number;
- gpe_block->initialized = FALSE;
ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
sizeof(struct acpi_generic_address));
@@ -386,7 +386,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
return_ACPI_STATUS(status);
}
- acpi_all_gpes_initialized = FALSE;
+ acpi_gbl_all_gpes_initialized = FALSE;
/* Find all GPE methods (_Lxx or_Exx) for this block */
@@ -423,14 +423,12 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
*
* FUNCTION: acpi_ev_initialize_gpe_block
*
- * PARAMETERS: gpe_device - Handle to the parent GPE block
- * gpe_block - Gpe Block info
+ * PARAMETERS: acpi_gpe_callback
*
* RETURN: Status
*
- * DESCRIPTION: Initialize and enable a GPE block. First find and run any
- * _PRT methods associated with the block, then enable the
- * appropriate GPEs.
+ * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have
+ * associated methods.
* Note: Assumes namespace is locked.
*
******************************************************************************/
@@ -450,8 +448,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
/*
- * Ignore a null GPE block (e.g., if no GPE block 1 exists) and
- * GPE blocks that have been initialized already.
+ * Ignore a null GPE block (e.g., if no GPE block 1 exists), and
+ * any GPE blocks that have been initialized already.
*/
if (!gpe_block || gpe_block->initialized) {
return_ACPI_STATUS(AE_OK);
@@ -459,8 +457,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/*
* Enable all GPEs that have a corresponding method and have the
- * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block must
- * be enabled via the acpi_enable_gpe() interface.
+ * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block
+ * must be enabled via the acpi_enable_gpe() interface.
*/
gpe_enabled_count = 0;
@@ -472,14 +470,19 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
gpe_event_info = &gpe_block->event_info[gpe_index];
- /* Ignore GPEs that have no corresponding _Lxx/_Exx method */
-
- if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)
+ /*
+ * Ignore GPEs that have no corresponding _Lxx/_Exx method
+ * and GPEs that are used to wake the system
+ */
+ if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_NONE)
+ || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
+ == ACPI_GPE_DISPATCH_HANDLER)
|| (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
continue;
}
- status = acpi_raw_enable_gpe(gpe_event_info);
+ status = acpi_ev_add_gpe_reference(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not enable GPE 0x%02X",
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 4c8dea513b66..ce9aa9f9a972 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,11 +45,27 @@
#include "accommon.h"
#include "acevents.h"
#include "acnamesp.h"
-#include "acinterp.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evgpeinit")
+/*
+ * Note: History of _PRW support in ACPICA
+ *
+ * Originally (2000 - 2010), the GPE initialization code performed a walk of
+ * the entire namespace to execute the _PRW methods and detect all GPEs
+ * capable of waking the system.
+ *
+ * As of 10/2010, the _PRW method execution has been removed since it is
+ * actually unnecessary. The host OS must in fact execute all _PRW methods
+ * in order to identify the device/power-resource dependencies. We now put
+ * the onus on the host OS to identify the wake GPEs as part of this process
+ * and to inform ACPICA of these GPEs via the acpi_setup_gpe_for_wake interface. This
+ * not only reduces the complexity of the ACPICA initialization code, but in
+ * some cases (on systems with very large namespaces) it should reduce the
+ * kernel boot time as well.
+ */
+
/*******************************************************************************
*
* FUNCTION: acpi_ev_gpe_initialize
@@ -222,7 +238,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
acpi_status status = AE_OK;
/*
- * 2) Find any _Lxx/_Exx GPE methods that have just been loaded.
+ * Find any _Lxx/_Exx GPE methods that have just been loaded.
*
* Any GPEs that correspond to new _Lxx/_Exx methods are immediately
* enabled.
@@ -235,9 +251,9 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
return;
}
+ walk_info.count = 0;
walk_info.owner_id = table_owner_id;
walk_info.execute_by_owner_id = TRUE;
- walk_info.count = 0;
/* Walk the interrupt level descriptor list */
@@ -298,7 +314,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
* xx - is the GPE number [in HEX]
*
* If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
- * with that owner.
+ * with that owner.
*
******************************************************************************/
@@ -415,6 +431,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
* Add the GPE information from above to the gpe_event_info block for
* use during dispatch of this GPE.
*/
+ gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK);
gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
gpe_event_info->dispatch.method_node = method_node;
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 19a0e513ea48..80a81d0c4a80 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -154,6 +154,45 @@ u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
/*******************************************************************************
*
+ * FUNCTION: acpi_ev_get_gpe_device
+ *
+ * PARAMETERS: GPE_WALK_CALLBACK
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
+ * block device. NULL if the GPE is one of the FADT-defined GPEs.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block, void *context)
+{
+ struct acpi_gpe_device_info *info = context;
+
+ /* Increment Index by the number of GPEs in this block */
+
+ info->next_block_base_index += gpe_block->gpe_count;
+
+ if (info->index < info->next_block_base_index) {
+ /*
+ * The GPE index is within this block, get the node. Leave the node
+ * NULL for the FADT-defined GPEs
+ */
+ if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
+ info->gpe_device = gpe_block->node;
+ }
+
+ info->status = AE_OK;
+ return (AE_CTRL_END);
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ev_get_gpe_xrupt_block
*
* PARAMETERS: interrupt_number - Interrupt for a GPE block
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index fcaed9fb44ff..7dc80946f7bd 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -284,41 +284,39 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
* RETURN: ACPI_INTERRUPT_HANDLED
*
* DESCRIPTION: Invoked directly from the SCI handler when a global lock
- * release interrupt occurs. Attempt to acquire the global lock,
- * if successful, signal the thread waiting for the lock.
+ * release interrupt occurs. If there's a thread waiting for
+ * the global lock, signal it.
*
* NOTE: Assumes that the semaphore can be signaled from interrupt level. If
* this is not possible for some reason, a separate thread will have to be
* scheduled to do this.
*
******************************************************************************/
+static u8 acpi_ev_global_lock_pending;
static u32 acpi_ev_global_lock_handler(void *context)
{
- u8 acquired = FALSE;
+ acpi_status status;
+ acpi_cpu_flags flags;
- /*
- * Attempt to get the lock.
- *
- * If we don't get it now, it will be marked pending and we will
- * take another interrupt when it becomes free.
- */
- ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
- if (acquired) {
+ flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
- /* Got the lock, now wake all threads waiting for it */
+ if (!acpi_ev_global_lock_pending) {
+ goto out;
+ }
- acpi_gbl_global_lock_acquired = TRUE;
- /* Send a unit to the semaphore */
+ /* Send a unit to the semaphore */
- if (ACPI_FAILURE
- (acpi_os_signal_semaphore
- (acpi_gbl_global_lock_semaphore, 1))) {
- ACPI_ERROR((AE_INFO,
- "Could not signal Global Lock semaphore"));
- }
+ status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
}
+ acpi_ev_global_lock_pending = FALSE;
+
+ out:
+ acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
+
return (ACPI_INTERRUPT_HANDLED);
}
@@ -415,6 +413,7 @@ static int acpi_ev_global_lock_acquired;
acpi_status acpi_ev_acquire_global_lock(u16 timeout)
{
+ acpi_cpu_flags flags;
acpi_status status = AE_OK;
u8 acquired = FALSE;
@@ -467,32 +466,47 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
return_ACPI_STATUS(AE_OK);
}
- /* Attempt to acquire the actual hardware lock */
+ flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
+
+ do {
+
+ /* Attempt to acquire the actual hardware lock */
+
+ ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
+ if (acquired) {
+ acpi_gbl_global_lock_acquired = TRUE;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Acquired hardware Global Lock\n"));
+ break;
+ }
- ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
- if (acquired) {
+ acpi_ev_global_lock_pending = TRUE;
- /* We got the lock */
+ acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
+ /*
+ * Did not get the lock. The pending bit was set above, and we
+ * must wait until we get the global lock released interrupt.
+ */
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Acquired hardware Global Lock\n"));
+ "Waiting for hardware Global Lock\n"));
- acpi_gbl_global_lock_acquired = TRUE;
- return_ACPI_STATUS(AE_OK);
- }
+ /*
+ * Wait for handshake with the global lock interrupt handler.
+ * This interface releases the interpreter if we must wait.
+ */
+ status = acpi_ex_system_wait_semaphore(
+ acpi_gbl_global_lock_semaphore,
+ ACPI_WAIT_FOREVER);
- /*
- * Did not get the lock. The pending bit was set above, and we must now
- * wait until we get the global lock released interrupt.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));
+ flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
- /*
- * Wait for handshake with the global lock interrupt handler.
- * This interface releases the interpreter if we must wait.
- */
- status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
- ACPI_WAIT_FOREVER);
+ } while (ACPI_SUCCESS(status));
+
+ acpi_ev_global_lock_pending = FALSE;
+
+ acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 98fd210e87b2..785a5ee64585 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 0b47a6dc9290..9659cee6093e 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -590,9 +590,9 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
* See acpi_ns_exec_module_code
*/
if (obj_desc->method.
- flags & AOPOBJ_MODULE_LEVEL) {
+ info_flags & ACPI_METHOD_MODULE_LEVEL) {
handler_obj =
- obj_desc->method.extra.handler;
+ obj_desc->method.dispatch.handler;
}
break;
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 8dfbaa96e422..2ebd40e1a3ef 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 36af222cac65..e1141402dbed 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -92,6 +92,57 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
#endif /* ACPI_FUTURE_USAGE */
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_global_event_handler
+ *
+ * PARAMETERS: Handler - Pointer to the global event handler function
+ * Context - Value passed to the handler on each event
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Saves the pointer to the handler function. The global handler
+ * is invoked upon each incoming GPE and Fixed Event. It is
+ * invoked at interrupt level at the time of the event dispatch.
+ * Can be used to update event counters, etc.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_global_event_handler);
+
+ /* Parameter validation */
+
+ if (!handler) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Don't allow two handlers. */
+
+ if (acpi_gbl_global_event_handler) {
+ status = AE_ALREADY_EXISTS;
+ goto cleanup;
+ }
+
+ acpi_gbl_global_event_handler = handler;
+ acpi_gbl_global_event_handler_context = context;
+
+ cleanup:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
+
/*******************************************************************************
*
* FUNCTION: acpi_install_fixed_event_handler
@@ -671,10 +722,10 @@ ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
acpi_status
acpi_install_gpe_handler(acpi_handle gpe_device,
u32 gpe_number,
- u32 type, acpi_event_handler address, void *context)
+ u32 type, acpi_gpe_handler address, void *context)
{
struct acpi_gpe_event_info *gpe_event_info;
- struct acpi_handler_info *handler;
+ struct acpi_gpe_handler_info *handler;
acpi_status status;
acpi_cpu_flags flags;
@@ -693,7 +744,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
/* Allocate memory for the handler object */
- handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info));
+ handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_handler_info));
if (!handler) {
status = AE_NO_MEMORY;
goto unlock_and_exit;
@@ -722,7 +773,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
handler->address = address;
handler->context = context;
handler->method_node = gpe_event_info->dispatch.method_node;
- handler->orig_flags = gpe_event_info->flags &
+ handler->original_flags = gpe_event_info->flags &
(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
/*
@@ -731,10 +782,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
* disabled now to avoid spurious execution of the handler.
*/
- if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
+ if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD)
&& gpe_event_info->runtime_count) {
- handler->orig_enabled = 1;
- (void)acpi_raw_disable_gpe(gpe_event_info);
+ handler->originally_enabled = 1;
+ (void)acpi_ev_remove_gpe_reference(gpe_event_info);
}
/* Install the handler */
@@ -777,10 +828,10 @@ ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
******************************************************************************/
acpi_status
acpi_remove_gpe_handler(acpi_handle gpe_device,
- u32 gpe_number, acpi_event_handler address)
+ u32 gpe_number, acpi_gpe_handler address)
{
struct acpi_gpe_event_info *gpe_event_info;
- struct acpi_handler_info *handler;
+ struct acpi_gpe_handler_info *handler;
acpi_status status;
acpi_cpu_flags flags;
@@ -835,7 +886,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
gpe_event_info->dispatch.method_node = handler->method_node;
gpe_event_info->flags &=
~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
- gpe_event_info->flags |= handler->orig_flags;
+ gpe_event_info->flags |= handler->original_flags;
/*
* If the GPE was previously associated with a method and it was
@@ -843,9 +894,9 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
* post-initialization configuration.
*/
- if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
- && handler->orig_enabled)
- (void)acpi_raw_enable_gpe(gpe_event_info);
+ if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD)
+ && handler->originally_enabled)
+ (void)acpi_ev_add_gpe_reference(gpe_event_info);
/* Now we can free the handler object */
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index a1dabe3fd8ae..c57b5c707a77 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,18 +43,11 @@
#include <acpi/acpi.h>
#include "accommon.h"
-#include "acevents.h"
-#include "acnamesp.h"
#include "actables.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evxfevnt")
-/* Local prototypes */
-static acpi_status
-acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
- struct acpi_gpe_block_info *gpe_block, void *context);
-
/*******************************************************************************
*
* FUNCTION: acpi_enable
@@ -213,185 +206,6 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event)
/*******************************************************************************
*
- * FUNCTION: acpi_gpe_wakeup
- *
- * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
- * gpe_number - GPE level within the GPE block
- * Action - Enable or Disable
- *
- * RETURN: Status
- *
- * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit.
- *
- ******************************************************************************/
-acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action)
-{
- acpi_status status = AE_OK;
- struct acpi_gpe_event_info *gpe_event_info;
- struct acpi_gpe_register_info *gpe_register_info;
- acpi_cpu_flags flags;
- u32 register_bit;
-
- ACPI_FUNCTION_TRACE(acpi_gpe_wakeup);
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
- /* Ensure that we have a valid GPE number */
-
- gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
- if (!gpe_event_info || !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
-
- gpe_register_info = gpe_event_info->register_info;
- if (!gpe_register_info) {
- status = AE_NOT_EXIST;
- goto unlock_and_exit;
- }
-
- register_bit =
- acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
-
- /* Perform the action */
-
- switch (action) {
- case ACPI_GPE_ENABLE:
- ACPI_SET_BIT(gpe_register_info->enable_for_wake,
- (u8)register_bit);
- break;
-
- case ACPI_GPE_DISABLE:
- ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
- (u8)register_bit);
- break;
-
- default:
- ACPI_ERROR((AE_INFO, "%u, Invalid action", action));
- status = AE_BAD_PARAMETER;
- break;
- }
-
-unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_gpe_wakeup)
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_enable_gpe
- *
- * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
- * gpe_number - GPE level within the GPE block
- *
- * RETURN: Status
- *
- * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
- * hardware-enabled.
- *
- ******************************************************************************/
-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
-{
- acpi_status status = AE_BAD_PARAMETER;
- struct acpi_gpe_event_info *gpe_event_info;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(acpi_enable_gpe);
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
- /* Ensure that we have a valid GPE number */
-
- gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
- if (gpe_event_info) {
- status = acpi_raw_enable_gpe(gpe_event_info);
- }
-
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- return_ACPI_STATUS(status);
-}
-ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_disable_gpe
- *
- * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
- * gpe_number - GPE level within the GPE block
- *
- * RETURN: Status
- *
- * DESCRIPTION: Remove a reference to a GPE. When the last reference is
- * removed, only then is the GPE disabled (for runtime GPEs), or
- * the GPE mask bit disabled (for wake GPEs)
- *
- ******************************************************************************/
-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
-{
- acpi_status status = AE_BAD_PARAMETER;
- struct acpi_gpe_event_info *gpe_event_info;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(acpi_disable_gpe);
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
- /* Ensure that we have a valid GPE number */
-
- gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
- if (gpe_event_info) {
- status = acpi_raw_disable_gpe(gpe_event_info) ;
- }
-
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- return_ACPI_STATUS(status);
-}
-ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_gpe_can_wake
- *
- * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
- * gpe_number - GPE level within the GPE block
- *
- * RETURN: Status
- *
- * DESCRIPTION: Set the ACPI_GPE_CAN_WAKE flag for the given GPE. If the GPE
- * has a corresponding method and is currently enabled, disable it
- * (GPEs with corresponding methods are enabled unconditionally
- * during initialization, but GPEs that can wake up are expected
- * to be initially disabled).
- *
- ******************************************************************************/
-acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number)
-{
- acpi_status status = AE_OK;
- struct acpi_gpe_event_info *gpe_event_info;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(acpi_gpe_can_wake);
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
- /* Ensure that we have a valid GPE number */
-
- gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
- if (gpe_event_info) {
- gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
- } else {
- status = AE_BAD_PARAMETER;
- }
-
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- return_ACPI_STATUS(status);
-}
-ACPI_EXPORT_SYMBOL(acpi_gpe_can_wake)
-
-/*******************************************************************************
- *
* FUNCTION: acpi_disable_event
*
* PARAMETERS: Event - The fixed eventto be enabled
@@ -483,44 +297,6 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event)
/*******************************************************************************
*
- * FUNCTION: acpi_clear_gpe
- *
- * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
- * gpe_number - GPE level within the GPE block
- *
- * RETURN: Status
- *
- * DESCRIPTION: Clear an ACPI event (general purpose)
- *
- ******************************************************************************/
-acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
-{
- acpi_status status = AE_OK;
- struct acpi_gpe_event_info *gpe_event_info;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(acpi_clear_gpe);
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
- /* Ensure that we have a valid GPE number */
-
- gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
- if (!gpe_event_info) {
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
-
- status = acpi_hw_clear_gpe(gpe_event_info);
-
- unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
-/*******************************************************************************
- *
* FUNCTION: acpi_get_event_status
*
* PARAMETERS: Event - The fixed event
@@ -575,379 +351,3 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
}
ACPI_EXPORT_SYMBOL(acpi_get_event_status)
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_get_gpe_status
- *
- * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
- * gpe_number - GPE level within the GPE block
- * event_status - Where the current status of the event will
- * be returned
- *
- * RETURN: Status
- *
- * DESCRIPTION: Get status of an event (general purpose)
- *
- ******************************************************************************/
-acpi_status
-acpi_get_gpe_status(acpi_handle gpe_device,
- u32 gpe_number, acpi_event_status *event_status)
-{
- acpi_status status = AE_OK;
- struct acpi_gpe_event_info *gpe_event_info;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
- /* Ensure that we have a valid GPE number */
-
- gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
- if (!gpe_event_info) {
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
-
- /* Obtain status on the requested GPE number */
-
- status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
-
- if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
- *event_status |= ACPI_EVENT_FLAG_HANDLE;
-
- unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
-/*******************************************************************************
- *
- * FUNCTION: acpi_install_gpe_block
- *
- * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
- * gpe_block_address - Address and space_iD
- * register_count - Number of GPE register pairs in the block
- * interrupt_number - H/W interrupt for the block
- *
- * RETURN: Status
- *
- * DESCRIPTION: Create and Install a block of GPE registers
- *
- ******************************************************************************/
-acpi_status
-acpi_install_gpe_block(acpi_handle gpe_device,
- struct acpi_generic_address *gpe_block_address,
- u32 register_count, u32 interrupt_number)
-{
- acpi_status status = AE_OK;
- union acpi_operand_object *obj_desc;
- struct acpi_namespace_node *node;
- struct acpi_gpe_block_info *gpe_block;
-
- ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
-
- if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- node = acpi_ns_validate_handle(gpe_device);
- if (!node) {
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
-
- /*
- * For user-installed GPE Block Devices, the gpe_block_base_number
- * is always zero
- */
- status =
- acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0,
- interrupt_number, &gpe_block);
- if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
- }
-
- /* Install block in the device_object attached to the node */
-
- obj_desc = acpi_ns_get_attached_object(node);
- if (!obj_desc) {
-
- /*
- * No object, create a new one (Device nodes do not always have
- * an attached object)
- */
- obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
- if (!obj_desc) {
- status = AE_NO_MEMORY;
- goto unlock_and_exit;
- }
-
- status =
- acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
-
- /* Remove local reference to the object */
-
- acpi_ut_remove_reference(obj_desc);
-
- if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
- }
- }
-
- /* Now install the GPE block in the device_object */
-
- obj_desc->device.gpe_block = gpe_block;
-
- unlock_and_exit:
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_remove_gpe_block
- *
- * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
- *
- * RETURN: Status
- *
- * DESCRIPTION: Remove a previously installed block of GPE registers
- *
- ******************************************************************************/
-acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
-{
- union acpi_operand_object *obj_desc;
- acpi_status status;
- struct acpi_namespace_node *node;
-
- ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
-
- if (!gpe_device) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- node = acpi_ns_validate_handle(gpe_device);
- if (!node) {
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
-
- /* Get the device_object attached to the node */
-
- obj_desc = acpi_ns_get_attached_object(node);
- if (!obj_desc || !obj_desc->device.gpe_block) {
- return_ACPI_STATUS(AE_NULL_OBJECT);
- }
-
- /* Delete the GPE block (but not the device_object) */
-
- status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block);
- if (ACPI_SUCCESS(status)) {
- obj_desc->device.gpe_block = NULL;
- }
-
- unlock_and_exit:
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_get_gpe_device
- *
- * PARAMETERS: Index - System GPE index (0-current_gpe_count)
- * gpe_device - Where the parent GPE Device is returned
- *
- * RETURN: Status
- *
- * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL
- * gpe device indicates that the gpe number is contained in one of
- * the FADT-defined gpe blocks. Otherwise, the GPE block device.
- *
- ******************************************************************************/
-acpi_status
-acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
-{
- struct acpi_gpe_device_info info;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(acpi_get_gpe_device);
-
- if (!gpe_device) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
- if (index >= acpi_current_gpe_count) {
- return_ACPI_STATUS(AE_NOT_EXIST);
- }
-
- /* Setup and walk the GPE list */
-
- info.index = index;
- info.status = AE_NOT_EXIST;
- info.gpe_device = NULL;
- info.next_block_base_index = 0;
-
- status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- *gpe_device = info.gpe_device;
- return_ACPI_STATUS(info.status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_get_gpe_device
- *
- * PARAMETERS: GPE_WALK_CALLBACK
- *
- * RETURN: Status
- *
- * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
- * block device. NULL if the GPE is one of the FADT-defined GPEs.
- *
- ******************************************************************************/
-static acpi_status
-acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
- struct acpi_gpe_block_info *gpe_block, void *context)
-{
- struct acpi_gpe_device_info *info = context;
-
- /* Increment Index by the number of GPEs in this block */
-
- info->next_block_base_index += gpe_block->gpe_count;
-
- if (info->index < info->next_block_base_index) {
- /*
- * The GPE index is within this block, get the node. Leave the node
- * NULL for the FADT-defined GPEs
- */
- if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
- info->gpe_device = gpe_block->node;
- }
-
- info->status = AE_OK;
- return (AE_CTRL_END);
- }
-
- return (AE_OK);
-}
-
-/******************************************************************************
- *
- * FUNCTION: acpi_disable_all_gpes
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
- *
- ******************************************************************************/
-
-acpi_status acpi_disable_all_gpes(void)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(acpi_disable_all_gpes);
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- status = acpi_hw_disable_all_gpes();
- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-
- return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION: acpi_enable_all_runtime_gpes
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
- *
- ******************************************************************************/
-
-acpi_status acpi_enable_all_runtime_gpes(void)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes);
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- status = acpi_hw_enable_all_runtime_gpes();
- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-
- return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION: acpi_update_gpes
- *
- * PARAMETERS: None
- *
- * RETURN: None
- *
- * DESCRIPTION: Enable all GPEs that have associated _Lxx or _Exx methods and
- * are not pointed to by any device _PRW methods indicating that
- * these GPEs are generally intended for system or device wakeup
- * (such GPEs have to be enabled directly when the devices whose
- * _PRW methods point to them are set up for wakeup signaling).
- *
- ******************************************************************************/
-
-acpi_status acpi_update_gpes(void)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(acpi_update_gpes);
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- } else if (acpi_all_gpes_initialized) {
- goto unlock;
- }
-
- status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL);
- if (ACPI_SUCCESS(status)) {
- acpi_all_gpes_initialized = TRUE;
- }
-
-unlock:
- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-
- return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
new file mode 100644
index 000000000000..52aaff3df562
--- /dev/null
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -0,0 +1,696 @@
+/******************************************************************************
+ *
+ * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2011, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evxfgpe")
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_update_all_gpes
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Complete GPE initialization and enable all GPEs that have
+ * associated _Lxx or _Exx methods and are not pointed to by any
+ * device _PRW methods (this indicates that these GPEs are
+ * generally intended for system or device wakeup. Such GPEs
+ * have to be enabled directly when the devices whose _PRW
+ * methods point to them are set up for wakeup signaling.)
+ *
+ * NOTE: Should be called after any GPEs are added to the system. Primarily,
+ * after the system _PRW methods have been run, but also after a GPE Block
+ * Device has been added or if any new GPE methods have been added via a
+ * dynamic table load.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_update_all_gpes(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_update_all_gpes);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (acpi_gbl_all_gpes_initialized) {
+ goto unlock_and_exit;
+ }
+
+ status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL);
+ if (ACPI_SUCCESS(status)) {
+ acpi_gbl_all_gpes_initialized = TRUE;
+ }
+
+unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_update_all_gpes)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_enable_gpe
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
+ * gpe_number - GPE level within the GPE block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
+ * hardware-enabled.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+ acpi_status status = AE_BAD_PARAMETER;
+ struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(acpi_enable_gpe);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (gpe_event_info) {
+ status = acpi_ev_add_gpe_reference(gpe_event_info);
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_disable_gpe
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
+ * gpe_number - GPE level within the GPE block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a reference to a GPE. When the last reference is
+ * removed, only then is the GPE disabled (for runtime GPEs), or
+ * the GPE mask bit disabled (for wake GPEs)
+ *
+ ******************************************************************************/
+
+acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+ acpi_status status = AE_BAD_PARAMETER;
+ struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(acpi_disable_gpe);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (gpe_event_info) {
+ status = acpi_ev_remove_gpe_reference(gpe_event_info) ;
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
+
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_setup_gpe_for_wake
+ *
+ * PARAMETERS: wake_device - Device associated with the GPE (via _PRW)
+ * gpe_device - Parent GPE Device. NULL for GPE0/GPE1
+ * gpe_number - GPE level within the GPE block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Mark a GPE as having the ability to wake the system. This
+ * interface is intended to be used as the host executes the
+ * _PRW methods (Power Resources for Wake) in the system tables.
+ * Each _PRW appears under a Device Object (The wake_device), and
+ * contains the info for the wake GPE associated with the
+ * wake_device.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_setup_gpe_for_wake(acpi_handle wake_device,
+ acpi_handle gpe_device, u32 gpe_number)
+{
+ acpi_status status = AE_BAD_PARAMETER;
+ struct acpi_gpe_event_info *gpe_event_info;
+ struct acpi_namespace_node *device_node;
+ struct acpi_gpe_notify_object *notify_object;
+ acpi_cpu_flags flags;
+ u8 gpe_dispatch_mask;
+
+ ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
+
+ /* Parameter Validation */
+
+ if (!wake_device) {
+ /*
+ * By forcing wake_device to be valid, we automatically enable the
+ * implicit notify feature on all hosts.
+ */
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ goto unlock_and_exit;
+ }
+
+ if (wake_device == ACPI_ROOT_OBJECT) {
+ goto out;
+ }
+
+ /*
+ * If there is no method or handler for this GPE, then the
+ * wake_device will be notified whenever this GPE fires (aka
+ * "implicit notify") Note: The GPE is assumed to be
+ * level-triggered (for windows compatibility).
+ */
+ gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK;
+ if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE
+ && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) {
+ goto out;
+ }
+
+ /* Validate wake_device is of type Device */
+
+ device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
+ if (device_node->type != ACPI_TYPE_DEVICE) {
+ goto unlock_and_exit;
+ }
+
+ if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) {
+ gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
+ ACPI_GPE_LEVEL_TRIGGERED);
+ gpe_event_info->dispatch.device.node = device_node;
+ gpe_event_info->dispatch.device.next = NULL;
+ } else {
+ /* There are multiple devices to notify implicitly. */
+
+ notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object));
+ if (!notify_object) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ notify_object->node = device_node;
+ notify_object->next = gpe_event_info->dispatch.device.next;
+ gpe_event_info->dispatch.device.next = notify_object;
+ }
+
+ out:
+ gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+ status = AE_OK;
+
+ unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_set_gpe_wake_mask
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
+ * gpe_number - GPE level within the GPE block
+ * Action - Enable or Disable
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit. The GPE must
+ * already be marked as a WAKE GPE.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action)
+{
+ acpi_status status = AE_OK;
+ struct acpi_gpe_event_info *gpe_event_info;
+ struct acpi_gpe_register_info *gpe_register_info;
+ acpi_cpu_flags flags;
+ u32 register_bit;
+
+ ACPI_FUNCTION_TRACE(acpi_set_gpe_wake_mask);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /*
+ * Ensure that we have a valid GPE number and that this GPE is in
+ * fact a wake GPE
+ */
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
+ status = AE_TYPE;
+ goto unlock_and_exit;
+ }
+
+ gpe_register_info = gpe_event_info->register_info;
+ if (!gpe_register_info) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
+
+ register_bit =
+ acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
+
+ /* Perform the action */
+
+ switch (action) {
+ case ACPI_GPE_ENABLE:
+ ACPI_SET_BIT(gpe_register_info->enable_for_wake,
+ (u8)register_bit);
+ break;
+
+ case ACPI_GPE_DISABLE:
+ ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
+ (u8)register_bit);
+ break;
+
+ default:
+ ACPI_ERROR((AE_INFO, "%u, Invalid action", action));
+ status = AE_BAD_PARAMETER;
+ break;
+ }
+
+unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_gpe_wake_mask)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_clear_gpe
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
+ * gpe_number - GPE level within the GPE block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Clear an ACPI event (general purpose)
+ *
+ ******************************************************************************/
+acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+ acpi_status status = AE_OK;
+ struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(acpi_clear_gpe);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ status = acpi_hw_clear_gpe(gpe_event_info);
+
+ unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_gpe_status
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
+ * gpe_number - GPE level within the GPE block
+ * event_status - Where the current status of the event will
+ * be returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get the current status of a GPE (signalled/not_signalled)
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_gpe_status(acpi_handle gpe_device,
+ u32 gpe_number, acpi_event_status *event_status)
+{
+ acpi_status status = AE_OK;
+ struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Obtain status on the requested GPE number */
+
+ status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
+
+ if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
+ *event_status |= ACPI_EVENT_FLAG_HANDLE;
+
+ unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_disable_all_gpes
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
+ *
+ ******************************************************************************/
+
+acpi_status acpi_disable_all_gpes(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_disable_all_gpes);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_hw_disable_all_gpes();
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_disable_all_gpes)
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_enable_all_runtime_gpes
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
+ *
+ ******************************************************************************/
+
+acpi_status acpi_enable_all_runtime_gpes(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_hw_enable_all_runtime_gpes();
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_gpe_block
+ *
+ * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
+ * gpe_block_address - Address and space_iD
+ * register_count - Number of GPE register pairs in the block
+ * interrupt_number - H/W interrupt for the block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create and Install a block of GPE registers. The GPEs are not
+ * enabled here.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_gpe_block(acpi_handle gpe_device,
+ struct acpi_generic_address *gpe_block_address,
+ u32 register_count, u32 interrupt_number)
+{
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+ struct acpi_namespace_node *node;
+ struct acpi_gpe_block_info *gpe_block;
+
+ ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
+
+ if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ node = acpi_ns_validate_handle(gpe_device);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /*
+ * For user-installed GPE Block Devices, the gpe_block_base_number
+ * is always zero
+ */
+ status =
+ acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0,
+ interrupt_number, &gpe_block);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ /* Install block in the device_object attached to the node */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+
+ /*
+ * No object, create a new one (Device nodes do not always have
+ * an attached object)
+ */
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ status =
+ acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+ }
+
+ /* Now install the GPE block in the device_object */
+
+ obj_desc->device.gpe_block = gpe_block;
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_remove_gpe_block
+ *
+ * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a previously installed block of GPE registers
+ *
+ ******************************************************************************/
+acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+ struct acpi_namespace_node *node;
+
+ ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
+
+ if (!gpe_device) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ node = acpi_ns_validate_handle(gpe_device);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Get the device_object attached to the node */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc || !obj_desc->device.gpe_block) {
+ return_ACPI_STATUS(AE_NULL_OBJECT);
+ }
+
+ /* Delete the GPE block (but not the device_object) */
+
+ status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block);
+ if (ACPI_SUCCESS(status)) {
+ obj_desc->device.gpe_block = NULL;
+ }
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_gpe_device
+ *
+ * PARAMETERS: Index - System GPE index (0-current_gpe_count)
+ * gpe_device - Where the parent GPE Device is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL
+ * gpe device indicates that the gpe number is contained in one of
+ * the FADT-defined gpe blocks. Otherwise, the GPE block device.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
+{
+ struct acpi_gpe_device_info info;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_get_gpe_device);
+
+ if (!gpe_device) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (index >= acpi_current_gpe_count) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ /* Setup and walk the GPE list */
+
+ info.index = index;
+ info.status = AE_NOT_EXIST;
+ info.gpe_device = NULL;
+ info.next_block_base_index = 0;
+
+ status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ *gpe_device = ACPI_CAST_PTR(acpi_handle, info.gpe_device);
+ return_ACPI_STATUS(info.status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index ce9314f79451..eb7386763712 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 18832205b631..745a42b401f5 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index b73bc50c5b76..74162a11817d 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 3c61b48c73f5..e7b372d17667 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -482,13 +482,11 @@ acpi_ex_create_method(u8 * aml_start,
obj_desc->method.aml_length = aml_length;
/*
- * Disassemble the method flags. Split off the Arg Count
- * for efficiency
+ * Disassemble the method flags. Split off the arg_count, Serialized
+ * flag, and sync_level for efficiency.
*/
method_flags = (u8) operand[1]->integer.value;
- obj_desc->method.method_flags =
- (u8) (method_flags & ~AML_METHOD_ARG_COUNT);
obj_desc->method.param_count =
(u8) (method_flags & AML_METHOD_ARG_COUNT);
@@ -497,6 +495,8 @@ acpi_ex_create_method(u8 * aml_start,
* created for this method when it is parsed.
*/
if (method_flags & AML_METHOD_SERIALIZED) {
+ obj_desc->method.info_flags = ACPI_METHOD_SERIALIZED;
+
/*
* ACPI 1.0: sync_level = 0
* ACPI 2.0: sync_level = sync_level in method declaration
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index be8c98b480d7..c7a2f1edd282 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index f067bbb0d961..61b8c0e8b74d 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -122,7 +122,7 @@ static struct acpi_exdump_info acpi_ex_dump_event[2] = {
static struct acpi_exdump_info acpi_ex_dump_method[9] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL},
- {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.method_flags), "Method Flags"},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.info_flags), "Info Flags"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count),
"Parameter Count"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"},
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index f17d2ff0031b..0bde2230c028 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 38293fd3e088..6c79c29f082d 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 95db4be0877b..703d88ed0b3d 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 6af14e43f839..be1c56ead653 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index d11e539ef763..49ec049c157e 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 84e4d185aa25..236ead14b7f7 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 10e104cf0fb9..2571b4a310f4 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 7a08d23befcd..1b48d9d28c9a 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 4b50730cf9a0..f4a2787e8e92 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 7aae29f73d3f..cc95e2000406 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index de17e10da0ed..f0d5e14f1f2c 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 1fa4289a687e..55997e46948b 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 7ca35ea8acea..db502cd7d934 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 8c97cfd6a0fd..e3bb00ccdff5 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 1624436ba4c5..c0c8842dd344 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index d4af684620ca..a979017d56b8 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index e972b667b09b..dc665cc554de 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index 675aaa91a770..df66e7b686be 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 4093522eed45..8ad93146dd32 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index b44274a0b62c..fc380d3d45ab 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 14750db2a1b8..f610d88a66be 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -62,10 +62,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
* PARAMETERS: gpe_event_info - Info block for the GPE
* gpe_register_info - Info block for the GPE register
*
- * RETURN: Status
+ * RETURN: Register mask with a one in the GPE bit position
*
- * DESCRIPTION: Compute GPE enable mask with one bit corresponding to the given
- * GPE set.
+ * DESCRIPTION: Compute the register mask for this GPE. One bit is set in the
+ * correct position for the input GPE.
*
******************************************************************************/
@@ -85,12 +85,12 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
*
* RETURN: Status
*
- * DESCRIPTION: Enable or disable a single GPE in its enable register.
+ * DESCRIPTION: Enable or disable a single GPE in the parent enable register.
*
******************************************************************************/
acpi_status
-acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
+acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
{
struct acpi_gpe_register_info *gpe_register_info;
acpi_status status;
@@ -113,14 +113,20 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
return (status);
}
- /* Set ot clear just the bit that corresponds to this GPE */
+ /* Set or clear just the bit that corresponds to this GPE */
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
gpe_register_info);
switch (action) {
- case ACPI_GPE_COND_ENABLE:
- if (!(register_bit & gpe_register_info->enable_for_run))
+ case ACPI_GPE_CONDITIONAL_ENABLE:
+
+ /* Only enable if the enable_for_run bit is set */
+
+ if (!(register_bit & gpe_register_info->enable_for_run)) {
return (AE_BAD_PARAMETER);
+ }
+
+ /*lint -fallthrough */
case ACPI_GPE_ENABLE:
ACPI_SET_BIT(enable_mask, register_bit);
@@ -131,7 +137,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid action\n"));
+ ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u\n", action));
return (AE_BAD_PARAMETER);
}
@@ -168,13 +174,13 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
return (AE_NOT_EXIST);
}
- register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
- gpe_register_info);
-
/*
* Write a one to the appropriate bit in the status register to
* clear this GPE.
*/
+ register_bit =
+ acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
+
status = acpi_hw_write(register_bit,
&gpe_register_info->status_address);
@@ -201,8 +207,8 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
u32 in_byte;
u32 register_bit;
struct acpi_gpe_register_info *gpe_register_info;
- acpi_status status;
acpi_event_status local_event_status = 0;
+ acpi_status status;
ACPI_FUNCTION_ENTRY();
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index ad21c7d8bf4f..050fd227951b 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 5d1273b660ae..55accb7018bb 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -7,7 +7,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 3796811276ac..2ac28bbe8827 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 1ef8e0bb250b..9c8eb71a12fb 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index e1d9c777b213..5f1605874655 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 50cc3be77724..6f98d210e71c 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 0cd925be5fc1..d93172fd15a8 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -163,9 +163,9 @@ acpi_status acpi_ns_root_initialize(void)
#else
/* Mark this as a very SPECIAL method */
- obj_desc->method.method_flags =
- AML_METHOD_INTERNAL_ONLY;
- obj_desc->method.extra.implementation =
+ obj_desc->method.info_flags =
+ ACPI_METHOD_INTERNAL_ONLY;
+ obj_desc->method.dispatch.implementation =
acpi_ut_osi_implementation;
#endif
break;
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 1e5ff803d9ad..1d0ef15d158f 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -234,8 +234,8 @@ void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namesp
* modified the namespace. This is used for cleanup when the
* method exits.
*/
- walk_state->method_desc->method.flags |=
- AOPOBJ_MODIFIED_NAMESPACE;
+ walk_state->method_desc->method.info_flags |=
+ ACPI_METHOD_MODIFIED_NAMESPACE;
}
}
@@ -341,6 +341,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
{
struct acpi_namespace_node *child_node = NULL;
u32 level = 1;
+ acpi_status status;
ACPI_FUNCTION_TRACE(ns_delete_namespace_subtree);
@@ -348,6 +349,13 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
return_VOID;
}
+ /* Lock namespace for possible update */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+
/*
* Traverse the tree of objects until we bubble back up
* to where we started.
@@ -397,6 +405,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
}
}
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_VOID;
}
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index a54dc39e304b..b683cc2ff9d3 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -624,9 +624,22 @@ acpi_ns_dump_objects(acpi_object_type type,
acpi_owner_id owner_id, acpi_handle start_handle)
{
struct acpi_walk_info info;
+ acpi_status status;
ACPI_FUNCTION_ENTRY();
+ /*
+ * Just lock the entire namespace for the duration of the dump.
+ * We don't want any changes to the namespace during this time,
+ * especially the temporary nodes since we are going to display
+ * them also.
+ */
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Could not acquire namespace mutex\n");
+ return;
+ }
+
info.debug_level = ACPI_LV_TABLES;
info.owner_id = owner_id;
info.display_type = display_type;
@@ -636,6 +649,8 @@ acpi_ns_dump_objects(acpi_object_type type,
ACPI_NS_WALK_TEMP_NODES,
acpi_ns_dump_one_object, NULL,
(void *)&info, NULL);
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
#endif /* ACPI_FUTURE_USAGE */
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index d2a97921e249..2ed294b7a4db 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index f52829cc294b..c1bd02b1a058 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -389,7 +389,7 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
* acpi_gbl_root_node->Object is NULL at PASS1.
*/
if ((type == ACPI_TYPE_DEVICE) && parent_node->object) {
- method_obj->method.extra.handler =
+ method_obj->method.dispatch.handler =
parent_node->object->device.handler;
}
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 0cac7ec0d2ec..fd7c6380e294 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index df18be94fefe..5f7dc691c183 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d3104af57e13..d5fa520c3de5 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 41a9213dd5af..3bb8bf105ea2 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 5808c89e9fac..b3234fa795b8 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 7096bcda0c72..9fb03fa8ffde 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index d1c136692667..1d76ac85b5e7 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 4ef9f43ea926..973883babee1 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 41102a84272f..28b0d7a62b99 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index a7d6ad9c111b..cb1b104a69a2 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 2cd5be8fe10f..345f0c3c6ad2 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index ebef8a7fd707..c53f0040e490 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index b01e45a415e3..3fd4526f3dba 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -603,10 +603,9 @@ acpi_status acpi_install_method(u8 *buffer)
method_obj->method.param_count = (u8)
(method_flags & AML_METHOD_ARG_COUNT);
- method_obj->method.method_flags = (u8)
- (method_flags & ~AML_METHOD_ARG_COUNT);
-
if (method_flags & AML_METHOD_SERIALIZED) {
+ method_obj->method.info_flags = ACPI_METHOD_SERIALIZED;
+
method_obj->method.sync_level = (u8)
((method_flags & AML_METHOD_SYNC_LEVEL) >> 4);
}
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index a1f04e9b8030..db7660f8b869 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 7df1a4c95274..e1fad0ee0136 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 2f2e7760938c..01dd70d1de51 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -655,7 +655,7 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
method_obj->method.aml_start = aml_start;
method_obj->method.aml_length = aml_length;
method_obj->method.owner_id = owner_id;
- method_obj->method.flags |= AOPOBJ_MODULE_LEVEL;
+ method_obj->method.info_flags |= ACPI_METHOD_MODULE_LEVEL;
/*
* Save the parent node in next_object. This is cheating, but we
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 2b0c3be2b1b8..bed08de7528c 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 8d81542194d4..9bb0cbd37b5e 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,7 +55,6 @@
#include "acparser.h"
#include "acdispat.h"
#include "amlcode.h"
-#include "acnamesp.h"
#include "acinterp.h"
#define _COMPONENT ACPI_PARSER
@@ -539,24 +538,16 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
/* Check for possible multi-thread reentrancy problem */
if ((status == AE_ALREADY_EXISTS) &&
- (!walk_state->method_desc->method.mutex)) {
- ACPI_INFO((AE_INFO,
- "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
- walk_state->method_node->name.
- ascii));
-
+ (!(walk_state->method_desc->method.
+ info_flags & ACPI_METHOD_SERIALIZED))) {
/*
- * Method tried to create an object twice. The probable cause is
- * that the method cannot handle reentrancy.
- *
- * The method is marked not_serialized, but it tried to create
- * a named object, causing the second thread entrance to fail.
- * Workaround this problem by marking the method permanently
- * as Serialized.
+ * Method is not serialized and tried to create an object
+ * twice. The probable cause is that the method cannot
+ * handle reentrancy. Mark as "pending serialized" now, and
+ * then mark "serialized" when the last thread exits.
*/
- walk_state->method_desc->method.method_flags |=
- AML_METHOD_SERIALIZED;
- walk_state->method_desc->method.sync_level = 0;
+ walk_state->method_desc->method.info_flags |=
+ ACPI_METHOD_SERIALIZED_PENDING;
}
}
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 40e2b279ea12..a5faa1323a02 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index d4b970c3630b..f1464c03aa42 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index fe29eee5adb1..7eda78503422 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 8abb9629443d..3312d6368bf1 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index c42f067cff9d..8086805d4494 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,6 @@
#include "acdispat.h"
#include "acinterp.h"
#include "actables.h"
-#include "amlcode.h"
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psxface")
@@ -285,15 +284,15 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
goto cleanup;
}
- if (info->obj_desc->method.flags & AOPOBJ_MODULE_LEVEL) {
+ if (info->obj_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL) {
walk_state->parse_flags |= ACPI_PARSE_MODULE_LEVEL;
}
/* Invoke an internal method if necessary */
- if (info->obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
+ if (info->obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
status =
- info->obj_desc->method.extra.implementation(walk_state);
+ info->obj_desc->method.dispatch.implementation(walk_state);
info->return_object = walk_state->return_desc;
/* Cleanup states */
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 226c806ae986..9e66f9078426 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index d6ebf7ec622d..3a8a89ec2ca4 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index c80a2eea3a01..4ce6e1147e80 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index f859b0386fe4..33db7520c74b 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index 1fd868b964fd..f9ea60872aa4 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index 33bff17c0bbc..0c7efef008be 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 545da40d7fa7..50b8ad211167 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 7335f22aac20..1bfcef736c50 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index 887b8ba8c432..7cc6d8625f1e 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index f8cd9e87d987..410264b22a29 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 491191e6cf69..231811e56939 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 9f6a6e7e1c8e..2ff657a28f26 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index d2ff4325c427..428d44e2d162 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 989d5c867864..a55cb2bb5abb 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 83d7af8d0905..48db0944ce4a 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 34f9c2bc5e1f..0f2d395feaba 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 4a8b9e6ea57a..4b7085dfc683 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index fd2c07d1d3ac..7eb6c6cc1edf 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 8f0896281567..0a697351cf69 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 6fef83f04bcd..aded299a2fa8 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index f21c486929a5..a9bcd816dc29 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index ed794cd033ea..31f5a7832ef1 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 22f59ef604e0..18f73c9d10bc 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index e87bc6760be6..97dd9bbf055a 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -768,7 +768,7 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_gpe_fadt_blocks[0] = NULL;
acpi_gbl_gpe_fadt_blocks[1] = NULL;
acpi_current_gpe_count = 0;
- acpi_all_gpes_initialized = FALSE;
+ acpi_gbl_all_gpes_initialized = FALSE;
/* Global handlers */
@@ -778,6 +778,7 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_init_handler = NULL;
acpi_gbl_table_handler = NULL;
acpi_gbl_interface_handler = NULL;
+ acpi_gbl_global_event_handler = NULL;
/* Global Lock support */
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index d2906328535d..b679ea693545 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index c1b1c803ea9b..191b6828cce9 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index b081cd46a15f..f6bb75c6faf5 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 49cf7b7fd816..ce481da9bb45 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index c7d0e05ef5a4..c33a852d4f42 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index d9efa495b433..a946c689f03b 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -85,6 +85,7 @@ acpi_status acpi_ut_mutex_initialize(void)
spin_lock_init(acpi_gbl_gpe_lock);
spin_lock_init(acpi_gbl_hardware_lock);
+ spin_lock_init(acpi_ev_global_lock_pending_lock);
/* Mutex for _OSI support */
status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index fd1fa2749ea5..188340a017b4 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 18c59a85fdca..1fb10cb8f11d 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 7965919000b1..84e051844247 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index d35d109b8da2..30c21e1a9360 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 1f484c9a6888..98ad125e14ff 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 6f12e314fbae..916ae097c43c 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index 18df1e940276..ef0581f2094d 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -109,6 +109,8 @@ static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus)
return sizeof(*estatus) + estatus->data_length;
}
+void apei_estatus_print(const char *pfx,
+ const struct acpi_hest_generic_status *estatus);
int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
#endif
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index f4cf2fc4c8c1..31464a006d76 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/acpi/apei/cper.c
@@ -46,6 +46,317 @@ u64 cper_next_record_id(void)
}
EXPORT_SYMBOL_GPL(cper_next_record_id);
+static const char *cper_severity_strs[] = {
+ "recoverable",
+ "fatal",
+ "corrected",
+ "info",
+};
+
+static const char *cper_severity_str(unsigned int severity)
+{
+ return severity < ARRAY_SIZE(cper_severity_strs) ?
+ cper_severity_strs[severity] : "unknown";
+}
+
+/*
+ * cper_print_bits - print strings for set bits
+ * @pfx: prefix for each line, including log level and prefix string
+ * @bits: bit mask
+ * @strs: string array, indexed by bit position
+ * @strs_size: size of the string array: @strs
+ *
+ * For each set bit in @bits, print the corresponding string in @strs.
+ * If the output length is longer than 80, multiple line will be
+ * printed, with @pfx is printed at the beginning of each line.
+ */
+static void cper_print_bits(const char *pfx, unsigned int bits,
+ const char *strs[], unsigned int strs_size)
+{
+ int i, len = 0;
+ const char *str;
+ char buf[84];
+
+ for (i = 0; i < strs_size; i++) {
+ if (!(bits & (1U << i)))
+ continue;
+ str = strs[i];
+ if (len && len + strlen(str) + 2 > 80) {
+ printk("%s\n", buf);
+ len = 0;
+ }
+ if (!len)
+ len = snprintf(buf, sizeof(buf), "%s%s", pfx, str);
+ else
+ len += snprintf(buf+len, sizeof(buf)-len, ", %s", str);
+ }
+ if (len)
+ printk("%s\n", buf);
+}
+
+static const char *cper_proc_type_strs[] = {
+ "IA32/X64",
+ "IA64",
+};
+
+static const char *cper_proc_isa_strs[] = {
+ "IA32",
+ "IA64",
+ "X64",
+};
+
+static const char *cper_proc_error_type_strs[] = {
+ "cache error",
+ "TLB error",
+ "bus error",
+ "micro-architectural error",
+};
+
+static const char *cper_proc_op_strs[] = {
+ "unknown or generic",
+ "data read",
+ "data write",
+ "instruction execution",
+};
+
+static const char *cper_proc_flag_strs[] = {
+ "restartable",
+ "precise IP",
+ "overflow",
+ "corrected",
+};
+
+static void cper_print_proc_generic(const char *pfx,
+ const struct cper_sec_proc_generic *proc)
+{
+ if (proc->validation_bits & CPER_PROC_VALID_TYPE)
+ printk("%s""processor_type: %d, %s\n", pfx, proc->proc_type,
+ proc->proc_type < ARRAY_SIZE(cper_proc_type_strs) ?
+ cper_proc_type_strs[proc->proc_type] : "unknown");
+ if (proc->validation_bits & CPER_PROC_VALID_ISA)
+ printk("%s""processor_isa: %d, %s\n", pfx, proc->proc_isa,
+ proc->proc_isa < ARRAY_SIZE(cper_proc_isa_strs) ?
+ cper_proc_isa_strs[proc->proc_isa] : "unknown");
+ if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) {
+ printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type);
+ cper_print_bits(pfx, proc->proc_error_type,
+ cper_proc_error_type_strs,
+ ARRAY_SIZE(cper_proc_error_type_strs));
+ }
+ if (proc->validation_bits & CPER_PROC_VALID_OPERATION)
+ printk("%s""operation: %d, %s\n", pfx, proc->operation,
+ proc->operation < ARRAY_SIZE(cper_proc_op_strs) ?
+ cper_proc_op_strs[proc->operation] : "unknown");
+ if (proc->validation_bits & CPER_PROC_VALID_FLAGS) {
+ printk("%s""flags: 0x%02x\n", pfx, proc->flags);
+ cper_print_bits(pfx, proc->flags, cper_proc_flag_strs,
+ ARRAY_SIZE(cper_proc_flag_strs));
+ }
+ if (proc->validation_bits & CPER_PROC_VALID_LEVEL)
+ printk("%s""level: %d\n", pfx, proc->level);
+ if (proc->validation_bits & CPER_PROC_VALID_VERSION)
+ printk("%s""version_info: 0x%016llx\n", pfx, proc->cpu_version);
+ if (proc->validation_bits & CPER_PROC_VALID_ID)
+ printk("%s""processor_id: 0x%016llx\n", pfx, proc->proc_id);
+ if (proc->validation_bits & CPER_PROC_VALID_TARGET_ADDRESS)
+ printk("%s""target_address: 0x%016llx\n",
+ pfx, proc->target_addr);
+ if (proc->validation_bits & CPER_PROC_VALID_REQUESTOR_ID)
+ printk("%s""requestor_id: 0x%016llx\n",
+ pfx, proc->requestor_id);
+ if (proc->validation_bits & CPER_PROC_VALID_RESPONDER_ID)
+ printk("%s""responder_id: 0x%016llx\n",
+ pfx, proc->responder_id);
+ if (proc->validation_bits & CPER_PROC_VALID_IP)
+ printk("%s""IP: 0x%016llx\n", pfx, proc->ip);
+}
+
+static const char *cper_mem_err_type_strs[] = {
+ "unknown",
+ "no error",
+ "single-bit ECC",
+ "multi-bit ECC",
+ "single-symbol chipkill ECC",
+ "multi-symbol chipkill ECC",
+ "master abort",
+ "target abort",
+ "parity error",
+ "watchdog timeout",
+ "invalid address",
+ "mirror Broken",
+ "memory sparing",
+ "scrub corrected error",
+ "scrub uncorrected error",
+};
+
+static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem)
+{
+ if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
+ printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
+ if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS)
+ printk("%s""physical_address: 0x%016llx\n",
+ pfx, mem->physical_addr);
+ if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK)
+ printk("%s""physical_address_mask: 0x%016llx\n",
+ pfx, mem->physical_addr_mask);
+ if (mem->validation_bits & CPER_MEM_VALID_NODE)
+ printk("%s""node: %d\n", pfx, mem->node);
+ if (mem->validation_bits & CPER_MEM_VALID_CARD)
+ printk("%s""card: %d\n", pfx, mem->card);
+ if (mem->validation_bits & CPER_MEM_VALID_MODULE)
+ printk("%s""module: %d\n", pfx, mem->module);
+ if (mem->validation_bits & CPER_MEM_VALID_BANK)
+ printk("%s""bank: %d\n", pfx, mem->bank);
+ if (mem->validation_bits & CPER_MEM_VALID_DEVICE)
+ printk("%s""device: %d\n", pfx, mem->device);
+ if (mem->validation_bits & CPER_MEM_VALID_ROW)
+ printk("%s""row: %d\n", pfx, mem->row);
+ if (mem->validation_bits & CPER_MEM_VALID_COLUMN)
+ printk("%s""column: %d\n", pfx, mem->column);
+ if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION)
+ printk("%s""bit_position: %d\n", pfx, mem->bit_pos);
+ if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
+ printk("%s""requestor_id: 0x%016llx\n", pfx, mem->requestor_id);
+ if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
+ printk("%s""responder_id: 0x%016llx\n", pfx, mem->responder_id);
+ if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID)
+ printk("%s""target_id: 0x%016llx\n", pfx, mem->target_id);
+ if (mem->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
+ u8 etype = mem->error_type;
+ printk("%s""error_type: %d, %s\n", pfx, etype,
+ etype < ARRAY_SIZE(cper_mem_err_type_strs) ?
+ cper_mem_err_type_strs[etype] : "unknown");
+ }
+}
+
+static const char *cper_pcie_port_type_strs[] = {
+ "PCIe end point",
+ "legacy PCI end point",
+ "unknown",
+ "unknown",
+ "root port",
+ "upstream switch port",
+ "downstream switch port",
+ "PCIe to PCI/PCI-X bridge",
+ "PCI/PCI-X to PCIe bridge",
+ "root complex integrated endpoint device",
+ "root complex event collector",
+};
+
+static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie)
+{
+ if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
+ printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
+ pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ?
+ cper_pcie_port_type_strs[pcie->port_type] : "unknown");
+ if (pcie->validation_bits & CPER_PCIE_VALID_VERSION)
+ printk("%s""version: %d.%d\n", pfx,
+ pcie->version.major, pcie->version.minor);
+ if (pcie->validation_bits & CPER_PCIE_VALID_COMMAND_STATUS)
+ printk("%s""command: 0x%04x, status: 0x%04x\n", pfx,
+ pcie->command, pcie->status);
+ if (pcie->validation_bits & CPER_PCIE_VALID_DEVICE_ID) {
+ const __u8 *p;
+ printk("%s""device_id: %04x:%02x:%02x.%x\n", pfx,
+ pcie->device_id.segment, pcie->device_id.bus,
+ pcie->device_id.device, pcie->device_id.function);
+ printk("%s""slot: %d\n", pfx,
+ pcie->device_id.slot >> CPER_PCIE_SLOT_SHIFT);
+ printk("%s""secondary_bus: 0x%02x\n", pfx,
+ pcie->device_id.secondary_bus);
+ printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
+ pcie->device_id.vendor_id, pcie->device_id.device_id);
+ p = pcie->device_id.class_code;
+ printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]);
+ }
+ if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
+ printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,
+ pcie->serial_number.lower, pcie->serial_number.upper);
+ if (pcie->validation_bits & CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS)
+ printk(
+ "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
+ pfx, pcie->bridge.secondary_status, pcie->bridge.control);
+}
+
+static const char *apei_estatus_section_flag_strs[] = {
+ "primary",
+ "containment warning",
+ "reset",
+ "threshold exceeded",
+ "resource not accessible",
+ "latent error",
+};
+
+static void apei_estatus_print_section(
+ const char *pfx, const struct acpi_hest_generic_data *gdata, int sec_no)
+{
+ uuid_le *sec_type = (uuid_le *)gdata->section_type;
+ __u16 severity;
+
+ severity = gdata->error_severity;
+ printk("%s""section: %d, severity: %d, %s\n", pfx, sec_no, severity,
+ cper_severity_str(severity));
+ printk("%s""flags: 0x%02x\n", pfx, gdata->flags);
+ cper_print_bits(pfx, gdata->flags, apei_estatus_section_flag_strs,
+ ARRAY_SIZE(apei_estatus_section_flag_strs));
+ if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
+ printk("%s""fru_id: %pUl\n", pfx, (uuid_le *)gdata->fru_id);
+ if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
+ printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
+
+ if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_GENERIC)) {
+ struct cper_sec_proc_generic *proc_err = (void *)(gdata + 1);
+ printk("%s""section_type: general processor error\n", pfx);
+ if (gdata->error_data_length >= sizeof(*proc_err))
+ cper_print_proc_generic(pfx, proc_err);
+ else
+ goto err_section_too_small;
+ } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
+ struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
+ printk("%s""section_type: memory error\n", pfx);
+ if (gdata->error_data_length >= sizeof(*mem_err))
+ cper_print_mem(pfx, mem_err);
+ else
+ goto err_section_too_small;
+ } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
+ struct cper_sec_pcie *pcie = (void *)(gdata + 1);
+ printk("%s""section_type: PCIe error\n", pfx);
+ if (gdata->error_data_length >= sizeof(*pcie))
+ cper_print_pcie(pfx, pcie);
+ else
+ goto err_section_too_small;
+ } else
+ printk("%s""section type: unknown, %pUl\n", pfx, sec_type);
+
+ return;
+
+err_section_too_small:
+ pr_err(FW_WARN "error section length is too small\n");
+}
+
+void apei_estatus_print(const char *pfx,
+ const struct acpi_hest_generic_status *estatus)
+{
+ struct acpi_hest_generic_data *gdata;
+ unsigned int data_len, gedata_len;
+ int sec_no = 0;
+ __u16 severity;
+
+ printk("%s""APEI generic hardware error status\n", pfx);
+ severity = estatus->error_severity;
+ printk("%s""severity: %d, %s\n", pfx, severity,
+ cper_severity_str(severity));
+ data_len = estatus->data_length;
+ gdata = (struct acpi_hest_generic_data *)(estatus + 1);
+ while (data_len > sizeof(*gdata)) {
+ gedata_len = gdata->error_data_length;
+ apei_estatus_print_section(pfx, gdata, sec_no);
+ data_len -= gedata_len + sizeof(*gdata);
+ sec_no++;
+ }
+}
+EXPORT_SYMBOL_GPL(apei_estatus_print);
+
int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
{
if (estatus->data_length &&
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index cf29df69380b..096aebfe7f32 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -39,7 +39,7 @@
#define EINJ_PFX "EINJ: "
#define SPIN_UNIT 100 /* 100ns */
-/* Firmware should respond within 1 miliseconds */
+/* Firmware should respond within 1 milliseconds */
#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
/*
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 5850d320404c..cf6db6b7662a 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -53,7 +53,7 @@
sizeof(struct acpi_table_erst)))
#define SPIN_UNIT 100 /* 100ns */
-/* Firmware should respond within 1 miliseconds */
+/* Firmware should respond within 1 milliseconds */
#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
#define FIRMWARE_MAX_STALL 50 /* 50us */
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 0d505e59214d..d1d484d4a06a 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -12,10 +12,6 @@
* For more information about Generic Hardware Error Source, please
* refer to ACPI Specification version 4.0, section 17.3.2.6
*
- * Now, only SCI notification type and memory errors are
- * supported. More notification type and hardware error type will be
- * added later.
- *
* Copyright 2010 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
@@ -39,14 +35,18 @@
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/interrupt.h>
+#include <linux/timer.h>
#include <linux/cper.h>
#include <linux/kdebug.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
+#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
#include <acpi/apei.h>
#include <acpi/atomicio.h>
#include <acpi/hed.h>
#include <asm/mce.h>
+#include <asm/tlbflush.h>
#include "apei-internal.h"
@@ -55,42 +55,131 @@
#define GHES_ESTATUS_MAX_SIZE 65536
/*
- * One struct ghes is created for each generic hardware error
- * source.
- *
+ * One struct ghes is created for each generic hardware error source.
* It provides the context for APEI hardware error timer/IRQ/SCI/NMI
- * handler. Handler for one generic hardware error source is only
- * triggered after the previous one is done. So handler can uses
- * struct ghes without locking.
+ * handler.
*
* estatus: memory buffer for error status block, allocated during
* HEST parsing.
*/
#define GHES_TO_CLEAR 0x0001
+#define GHES_EXITING 0x0002
struct ghes {
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
- struct list_head list;
u64 buffer_paddr;
unsigned long flags;
+ union {
+ struct list_head list;
+ struct timer_list timer;
+ unsigned int irq;
+ };
};
+static int ghes_panic_timeout __read_mostly = 30;
+
/*
- * Error source lists, one list for each notification method. The
- * members in lists are struct ghes.
+ * All error sources notified with SCI shares one notifier function,
+ * so they need to be linked and checked one by one. This is applied
+ * to NMI too.
*
- * The list members are only added in HEST parsing and deleted during
- * module_exit, that is, single-threaded. So no lock is needed for
- * that.
- *
- * But the mutual exclusion is needed between members adding/deleting
- * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is
- * used for that.
+ * RCU is used for these lists, so ghes_list_mutex is only used for
+ * list changing, not for traversing.
*/
static LIST_HEAD(ghes_sci);
+static LIST_HEAD(ghes_nmi);
static DEFINE_MUTEX(ghes_list_mutex);
+/*
+ * NMI may be triggered on any CPU, so ghes_nmi_lock is used for
+ * mutual exclusion.
+ */
+static DEFINE_RAW_SPINLOCK(ghes_nmi_lock);
+
+/*
+ * Because the memory area used to transfer hardware error information
+ * from BIOS to Linux can be determined only in NMI, IRQ or timer
+ * handler, but general ioremap can not be used in atomic context, so
+ * a special version of atomic ioremap is implemented for that.
+ */
+
+/*
+ * Two virtual pages are used, one for NMI context, the other for
+ * IRQ/PROCESS context
+ */
+#define GHES_IOREMAP_PAGES 2
+#define GHES_IOREMAP_NMI_PAGE(base) (base)
+#define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE)
+
+/* virtual memory area for atomic ioremap */
+static struct vm_struct *ghes_ioremap_area;
+/*
+ * These 2 spinlock is used to prevent atomic ioremap virtual memory
+ * area from being mapped simultaneously.
+ */
+static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
+static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
+
+static int ghes_ioremap_init(void)
+{
+ ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
+ VM_IOREMAP, VMALLOC_START, VMALLOC_END);
+ if (!ghes_ioremap_area) {
+ pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ghes_ioremap_exit(void)
+{
+ free_vm_area(ghes_ioremap_area);
+}
+
+static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
+{
+ unsigned long vaddr;
+
+ vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr);
+ ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
+ pfn << PAGE_SHIFT, PAGE_KERNEL);
+
+ return (void __iomem *)vaddr;
+}
+
+static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
+{
+ unsigned long vaddr;
+
+ vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
+ ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
+ pfn << PAGE_SHIFT, PAGE_KERNEL);
+
+ return (void __iomem *)vaddr;
+}
+
+static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
+{
+ unsigned long vaddr = (unsigned long __force)vaddr_ptr;
+ void *base = ghes_ioremap_area->addr;
+
+ BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
+ unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
+ __flush_tlb_one(vaddr);
+}
+
+static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
+{
+ unsigned long vaddr = (unsigned long __force)vaddr_ptr;
+ void *base = ghes_ioremap_area->addr;
+
+ BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
+ unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
+ __flush_tlb_one(vaddr);
+}
+
static struct ghes *ghes_new(struct acpi_hest_generic *generic)
{
struct ghes *ghes;
@@ -101,7 +190,6 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
if (!ghes)
return ERR_PTR(-ENOMEM);
ghes->generic = generic;
- INIT_LIST_HEAD(&ghes->list);
rc = acpi_pre_map_gar(&generic->error_status_address);
if (rc)
goto err_free;
@@ -158,22 +246,41 @@ static inline int ghes_severity(int severity)
}
}
-/* SCI handler run in work queue, so ioremap can be used here */
-static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
- int from_phys)
+static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
+ int from_phys)
{
- void *vaddr;
-
- vaddr = ioremap_cache(paddr, len);
- if (!vaddr)
- return -ENOMEM;
- if (from_phys)
- memcpy(buffer, vaddr, len);
- else
- memcpy(vaddr, buffer, len);
- iounmap(vaddr);
-
- return 0;
+ void __iomem *vaddr;
+ unsigned long flags = 0;
+ int in_nmi = in_nmi();
+ u64 offset;
+ u32 trunk;
+
+ while (len > 0) {
+ offset = paddr - (paddr & PAGE_MASK);
+ if (in_nmi) {
+ raw_spin_lock(&ghes_ioremap_lock_nmi);
+ vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT);
+ } else {
+ spin_lock_irqsave(&ghes_ioremap_lock_irq, flags);
+ vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT);
+ }
+ trunk = PAGE_SIZE - offset;
+ trunk = min(trunk, len);
+ if (from_phys)
+ memcpy_fromio(buffer, vaddr + offset, trunk);
+ else
+ memcpy_toio(vaddr + offset, buffer, trunk);
+ len -= trunk;
+ paddr += trunk;
+ buffer += trunk;
+ if (in_nmi) {
+ ghes_iounmap_nmi(vaddr);
+ raw_spin_unlock(&ghes_ioremap_lock_nmi);
+ } else {
+ ghes_iounmap_irq(vaddr);
+ spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
+ }
+ }
}
static int ghes_read_estatus(struct ghes *ghes, int silent)
@@ -194,10 +301,8 @@ static int ghes_read_estatus(struct ghes *ghes, int silent)
if (!buf_paddr)
return -ENOENT;
- rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
- sizeof(*ghes->estatus), 1);
- if (rc)
- return rc;
+ ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
+ sizeof(*ghes->estatus), 1);
if (!ghes->estatus->block_status)
return -ENOENT;
@@ -212,17 +317,15 @@ static int ghes_read_estatus(struct ghes *ghes, int silent)
goto err_read_block;
if (apei_estatus_check_header(ghes->estatus))
goto err_read_block;
- rc = ghes_copy_tofrom_phys(ghes->estatus + 1,
- buf_paddr + sizeof(*ghes->estatus),
- len - sizeof(*ghes->estatus), 1);
- if (rc)
- return rc;
+ ghes_copy_tofrom_phys(ghes->estatus + 1,
+ buf_paddr + sizeof(*ghes->estatus),
+ len - sizeof(*ghes->estatus), 1);
if (apei_estatus_check(ghes->estatus))
goto err_read_block;
rc = 0;
err_read_block:
- if (rc && !silent)
+ if (rc && !silent && printk_ratelimit())
pr_warning(FW_WARN GHES_PFX
"Failed to read error status block!\n");
return rc;
@@ -255,11 +358,26 @@ static void ghes_do_proc(struct ghes *ghes)
}
#endif
}
+}
- if (!processed && printk_ratelimit())
- pr_warning(GHES_PFX
- "Unknown error record from generic hardware error source: %d\n",
- ghes->generic->header.source_id);
+static void ghes_print_estatus(const char *pfx, struct ghes *ghes)
+{
+ /* Not more than 2 messages every 5 seconds */
+ static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2);
+
+ if (pfx == NULL) {
+ if (ghes_severity(ghes->estatus->error_severity) <=
+ GHES_SEV_CORRECTED)
+ pfx = KERN_WARNING HW_ERR;
+ else
+ pfx = KERN_ERR HW_ERR;
+ }
+ if (__ratelimit(&ratelimit)) {
+ printk(
+ "%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
+ pfx, ghes->generic->header.source_id);
+ apei_estatus_print(pfx, ghes->estatus);
+ }
}
static int ghes_proc(struct ghes *ghes)
@@ -269,6 +387,7 @@ static int ghes_proc(struct ghes *ghes)
rc = ghes_read_estatus(ghes, 0);
if (rc)
goto out;
+ ghes_print_estatus(NULL, ghes);
ghes_do_proc(ghes);
out:
@@ -276,6 +395,42 @@ out:
return 0;
}
+static void ghes_add_timer(struct ghes *ghes)
+{
+ struct acpi_hest_generic *g = ghes->generic;
+ unsigned long expire;
+
+ if (!g->notify.poll_interval) {
+ pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
+ g->header.source_id);
+ return;
+ }
+ expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
+ ghes->timer.expires = round_jiffies_relative(expire);
+ add_timer(&ghes->timer);
+}
+
+static void ghes_poll_func(unsigned long data)
+{
+ struct ghes *ghes = (void *)data;
+
+ ghes_proc(ghes);
+ if (!(ghes->flags & GHES_EXITING))
+ ghes_add_timer(ghes);
+}
+
+static irqreturn_t ghes_irq_func(int irq, void *data)
+{
+ struct ghes *ghes = data;
+ int rc;
+
+ rc = ghes_proc(ghes);
+ if (rc)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
static int ghes_notify_sci(struct notifier_block *this,
unsigned long event, void *data)
{
@@ -292,10 +447,63 @@ static int ghes_notify_sci(struct notifier_block *this,
return ret;
}
+static int ghes_notify_nmi(struct notifier_block *this,
+ unsigned long cmd, void *data)
+{
+ struct ghes *ghes, *ghes_global = NULL;
+ int sev, sev_global = -1;
+ int ret = NOTIFY_DONE;
+
+ if (cmd != DIE_NMI)
+ return ret;
+
+ raw_spin_lock(&ghes_nmi_lock);
+ list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
+ if (ghes_read_estatus(ghes, 1)) {
+ ghes_clear_estatus(ghes);
+ continue;
+ }
+ sev = ghes_severity(ghes->estatus->error_severity);
+ if (sev > sev_global) {
+ sev_global = sev;
+ ghes_global = ghes;
+ }
+ ret = NOTIFY_STOP;
+ }
+
+ if (ret == NOTIFY_DONE)
+ goto out;
+
+ if (sev_global >= GHES_SEV_PANIC) {
+ oops_begin();
+ ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global);
+ /* reboot to log the error! */
+ if (panic_timeout == 0)
+ panic_timeout = ghes_panic_timeout;
+ panic("Fatal hardware error!");
+ }
+
+ list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
+ if (!(ghes->flags & GHES_TO_CLEAR))
+ continue;
+ /* Do not print estatus because printk is not NMI safe */
+ ghes_do_proc(ghes);
+ ghes_clear_estatus(ghes);
+ }
+
+out:
+ raw_spin_unlock(&ghes_nmi_lock);
+ return ret;
+}
+
static struct notifier_block ghes_notifier_sci = {
.notifier_call = ghes_notify_sci,
};
+static struct notifier_block ghes_notifier_nmi = {
+ .notifier_call = ghes_notify_nmi,
+};
+
static int __devinit ghes_probe(struct platform_device *ghes_dev)
{
struct acpi_hest_generic *generic;
@@ -306,18 +514,27 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
if (!generic->enabled)
return -ENODEV;
- if (generic->error_block_length <
- sizeof(struct acpi_hest_generic_status)) {
- pr_warning(FW_BUG GHES_PFX
-"Invalid error block length: %u for generic hardware error source: %d\n",
- generic->error_block_length,
+ switch (generic->notify.type) {
+ case ACPI_HEST_NOTIFY_POLLED:
+ case ACPI_HEST_NOTIFY_EXTERNAL:
+ case ACPI_HEST_NOTIFY_SCI:
+ case ACPI_HEST_NOTIFY_NMI:
+ break;
+ case ACPI_HEST_NOTIFY_LOCAL:
+ pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
generic->header.source_id);
goto err;
+ default:
+ pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
+ generic->notify.type, generic->header.source_id);
+ goto err;
}
- if (generic->records_to_preallocate == 0) {
- pr_warning(FW_BUG GHES_PFX
-"Invalid records to preallocate: %u for generic hardware error source: %d\n",
- generic->records_to_preallocate,
+
+ rc = -EIO;
+ if (generic->error_block_length <
+ sizeof(struct acpi_hest_generic_status)) {
+ pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
+ generic->error_block_length,
generic->header.source_id);
goto err;
}
@@ -327,38 +544,43 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
ghes = NULL;
goto err;
}
- if (generic->notify.type == ACPI_HEST_NOTIFY_SCI) {
+ switch (generic->notify.type) {
+ case ACPI_HEST_NOTIFY_POLLED:
+ ghes->timer.function = ghes_poll_func;
+ ghes->timer.data = (unsigned long)ghes;
+ init_timer_deferrable(&ghes->timer);
+ ghes_add_timer(ghes);
+ break;
+ case ACPI_HEST_NOTIFY_EXTERNAL:
+ /* External interrupt vector is GSI */
+ if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) {
+ pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
+ generic->header.source_id);
+ goto err;
+ }
+ if (request_irq(ghes->irq, ghes_irq_func,
+ 0, "GHES IRQ", ghes)) {
+ pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
+ generic->header.source_id);
+ goto err;
+ }
+ break;
+ case ACPI_HEST_NOTIFY_SCI:
mutex_lock(&ghes_list_mutex);
if (list_empty(&ghes_sci))
register_acpi_hed_notifier(&ghes_notifier_sci);
list_add_rcu(&ghes->list, &ghes_sci);
mutex_unlock(&ghes_list_mutex);
- } else {
- unsigned char *notify = NULL;
-
- switch (generic->notify.type) {
- case ACPI_HEST_NOTIFY_POLLED:
- notify = "POLL";
- break;
- case ACPI_HEST_NOTIFY_EXTERNAL:
- case ACPI_HEST_NOTIFY_LOCAL:
- notify = "IRQ";
- break;
- case ACPI_HEST_NOTIFY_NMI:
- notify = "NMI";
- break;
- }
- if (notify) {
- pr_warning(GHES_PFX
-"Generic hardware error source: %d notified via %s is not supported!\n",
- generic->header.source_id, notify);
- } else {
- pr_warning(FW_WARN GHES_PFX
-"Unknown notification type: %u for generic hardware error source: %d\n",
- generic->notify.type, generic->header.source_id);
- }
- rc = -ENODEV;
- goto err;
+ break;
+ case ACPI_HEST_NOTIFY_NMI:
+ mutex_lock(&ghes_list_mutex);
+ if (list_empty(&ghes_nmi))
+ register_die_notifier(&ghes_notifier_nmi);
+ list_add_rcu(&ghes->list, &ghes_nmi);
+ mutex_unlock(&ghes_list_mutex);
+ break;
+ default:
+ BUG();
}
platform_set_drvdata(ghes_dev, ghes);
@@ -379,7 +601,14 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
ghes = platform_get_drvdata(ghes_dev);
generic = ghes->generic;
+ ghes->flags |= GHES_EXITING;
switch (generic->notify.type) {
+ case ACPI_HEST_NOTIFY_POLLED:
+ del_timer_sync(&ghes->timer);
+ break;
+ case ACPI_HEST_NOTIFY_EXTERNAL:
+ free_irq(ghes->irq, ghes);
+ break;
case ACPI_HEST_NOTIFY_SCI:
mutex_lock(&ghes_list_mutex);
list_del_rcu(&ghes->list);
@@ -387,12 +616,23 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
unregister_acpi_hed_notifier(&ghes_notifier_sci);
mutex_unlock(&ghes_list_mutex);
break;
+ case ACPI_HEST_NOTIFY_NMI:
+ mutex_lock(&ghes_list_mutex);
+ list_del_rcu(&ghes->list);
+ if (list_empty(&ghes_nmi))
+ unregister_die_notifier(&ghes_notifier_nmi);
+ mutex_unlock(&ghes_list_mutex);
+ /*
+ * To synchronize with NMI handler, ghes can only be
+ * freed after NMI handler finishes.
+ */
+ synchronize_rcu();
+ break;
default:
BUG();
break;
}
- synchronize_rcu();
ghes_fini(ghes);
kfree(ghes);
@@ -412,6 +652,8 @@ static struct platform_driver ghes_platform_driver = {
static int __init ghes_init(void)
{
+ int rc;
+
if (acpi_disabled)
return -ENODEV;
@@ -420,12 +662,25 @@ static int __init ghes_init(void)
return -EINVAL;
}
- return platform_driver_register(&ghes_platform_driver);
+ rc = ghes_ioremap_init();
+ if (rc)
+ goto err;
+
+ rc = platform_driver_register(&ghes_platform_driver);
+ if (rc)
+ goto err_ioremap_exit;
+
+ return 0;
+err_ioremap_exit:
+ ghes_ioremap_exit();
+err:
+ return rc;
}
static void __exit ghes_exit(void)
{
platform_driver_unregister(&ghes_platform_driver);
+ ghes_ioremap_exit();
}
module_init(ghes_init);
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index daa7bc63f1d4..abda3786a5d7 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -195,24 +195,24 @@ static int __init setup_hest_disable(char *str)
__setup("hest_disable", setup_hest_disable);
-static int __init hest_init(void)
+void __init acpi_hest_init(void)
{
acpi_status status;
int rc = -ENODEV;
unsigned int ghes_count = 0;
- if (acpi_disabled)
- goto err;
-
if (hest_disable) {
- pr_info(HEST_PFX "HEST tabling parsing is disabled.\n");
- goto err;
+ pr_info(HEST_PFX "Table parsing disabled.\n");
+ return;
}
+ if (acpi_disabled)
+ goto err;
+
status = acpi_get_table(ACPI_SIG_HEST, 0,
(struct acpi_table_header **)&hest_tab);
if (status == AE_NOT_FOUND) {
- pr_info(HEST_PFX "Table is not found!\n");
+ pr_info(HEST_PFX "Table not found.\n");
goto err;
} else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
@@ -226,15 +226,11 @@ static int __init hest_init(void)
goto err;
rc = hest_ghes_dev_register(ghes_count);
- if (rc)
- goto err;
-
- pr_info(HEST_PFX "HEST table parsing is initialized.\n");
+ if (!rc) {
+ pr_info(HEST_PFX "Table parsing has been initialized.\n");
+ return;
+ }
- return 0;
err:
hest_disable = 1;
- return rc;
}
-
-subsys_initcall(hest_init);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 95649d373071..ac1a599f5147 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -631,6 +631,17 @@ static int acpi_battery_update(struct acpi_battery *battery)
return result;
}
+static void acpi_battery_refresh(struct acpi_battery *battery)
+{
+ if (!battery->bat.dev)
+ return;
+
+ acpi_battery_get_info(battery);
+ /* The battery may have changed its reporting units. */
+ sysfs_remove_battery(battery);
+ sysfs_add_battery(battery);
+}
+
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
@@ -868,6 +879,8 @@ static int acpi_battery_add_fs(struct acpi_device *device)
struct proc_dir_entry *entry = NULL;
int i;
+ printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
+ " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
if (!acpi_device_dir(device)) {
acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
acpi_battery_dir);
@@ -914,6 +927,8 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
if (!battery)
return;
old = battery->bat.dev;
+ if (event == ACPI_BATTERY_NOTIFY_INFO)
+ acpi_battery_refresh(battery);
acpi_battery_update(battery);
acpi_bus_generate_proc_event(device, event,
acpi_battery_present(battery));
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index d68bd61072bb..7ced61f39492 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -52,22 +52,6 @@ EXPORT_SYMBOL(acpi_root_dir);
#define STRUCT_TO_INT(s) (*((int*)&s))
-static int set_power_nocheck(const struct dmi_system_id *id)
-{
- printk(KERN_NOTICE PREFIX "%s detected - "
- "disable power check in power transition\n", id->ident);
- acpi_power_nocheck = 1;
- return 0;
-}
-static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = {
- {
- set_power_nocheck, "HP Pavilion 05", {
- DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- DMI_MATCH(DMI_SYS_VENDOR, "HP Pavilion 05"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "2001211RE101GLEND") }, NULL},
- {},
-};
-
#ifdef CONFIG_X86
static int set_copy_dsdt(const struct dmi_system_id *id)
@@ -196,33 +180,24 @@ EXPORT_SYMBOL(acpi_bus_get_private_data);
Power Management
-------------------------------------------------------------------------- */
-int acpi_bus_get_power(acpi_handle handle, int *state)
+static int __acpi_bus_get_power(struct acpi_device *device, int *state)
{
int result = 0;
acpi_status status = 0;
- struct acpi_device *device = NULL;
unsigned long long psc = 0;
-
- result = acpi_bus_get_device(handle, &device);
- if (result)
- return result;
+ if (!device || !state)
+ return -EINVAL;
*state = ACPI_STATE_UNKNOWN;
- if (!device->flags.power_manageable) {
- /* TBD: Non-recursive algorithm for walking up hierarchy */
- if (device->parent)
- *state = device->parent->power.state;
- else
- *state = ACPI_STATE_D0;
- } else {
+ if (device->flags.power_manageable) {
/*
* Get the device's power state either directly (via _PSC) or
* indirectly (via power resources).
*/
if (device->power.flags.power_resources) {
- result = acpi_power_get_inferred_state(device);
+ result = acpi_power_get_inferred_state(device, state);
if (result)
return result;
} else if (device->power.flags.explicit_get) {
@@ -230,59 +205,33 @@ int acpi_bus_get_power(acpi_handle handle, int *state)
NULL, &psc);
if (ACPI_FAILURE(status))
return -ENODEV;
- device->power.state = (int)psc;
+ *state = (int)psc;
}
-
- *state = device->power.state;
+ } else {
+ /* TBD: Non-recursive algorithm for walking up hierarchy. */
+ *state = device->parent ?
+ device->parent->power.state : ACPI_STATE_D0;
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n",
- device->pnp.bus_id, device->power.state));
+ device->pnp.bus_id, *state));
return 0;
}
-EXPORT_SYMBOL(acpi_bus_get_power);
-int acpi_bus_set_power(acpi_handle handle, int state)
+static int __acpi_bus_set_power(struct acpi_device *device, int state)
{
int result = 0;
acpi_status status = AE_OK;
- struct acpi_device *device = NULL;
char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };
-
- result = acpi_bus_get_device(handle, &device);
- if (result)
- return result;
-
- if ((state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
+ if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
return -EINVAL;
/* Make sure this is a valid target state */
- if (!device->flags.power_manageable) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
- kobject_name(&device->dev.kobj)));
- return -ENODEV;
- }
- /*
- * Get device's current power state
- */
- if (!acpi_power_nocheck) {
- /*
- * Maybe the incorrect power state is returned on the bogus
- * bios, which is different with the real power state.
- * For example: the bios returns D0 state and the real power
- * state is D3. OS expects to set the device to D0 state. In
- * such case if OS uses the power state returned by the BIOS,
- * the device can't be transisted to the correct power state.
- * So if the acpi_power_nocheck is set, it is unnecessary to
- * get the power state by calling acpi_bus_get_power.
- */
- acpi_bus_get_power(device->handle, &device->power.state);
- }
- if ((state == device->power.state) && !device->flags.force_power_state) {
+ if (state == device->power.state) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
state));
return 0;
@@ -351,8 +300,75 @@ int acpi_bus_set_power(acpi_handle handle, int state)
return result;
}
+
+int acpi_bus_set_power(acpi_handle handle, int state)
+{
+ struct acpi_device *device;
+ int result;
+
+ result = acpi_bus_get_device(handle, &device);
+ if (result)
+ return result;
+
+ if (!device->flags.power_manageable) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Device [%s] is not power manageable\n",
+ dev_name(&device->dev)));
+ return -ENODEV;
+ }
+
+ return __acpi_bus_set_power(device, state);
+}
EXPORT_SYMBOL(acpi_bus_set_power);
+
+int acpi_bus_init_power(struct acpi_device *device)
+{
+ int state;
+ int result;
+
+ if (!device)
+ return -EINVAL;
+
+ device->power.state = ACPI_STATE_UNKNOWN;
+
+ result = __acpi_bus_get_power(device, &state);
+ if (result)
+ return result;
+
+ if (device->power.flags.power_resources)
+ result = acpi_power_on_resources(device, state);
+
+ if (!result)
+ device->power.state = state;
+
+ return result;
+}
+
+
+int acpi_bus_update_power(acpi_handle handle, int *state_p)
+{
+ struct acpi_device *device;
+ int state;
+ int result;
+
+ result = acpi_bus_get_device(handle, &device);
+ if (result)
+ return result;
+
+ result = __acpi_bus_get_power(device, &state);
+ if (result)
+ return result;
+
+ result = __acpi_bus_set_power(device, state);
+ if (!result && state_p)
+ *state_p = state;
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_update_power);
+
+
bool acpi_bus_power_manageable(acpi_handle handle)
{
struct acpi_device *device;
@@ -1023,15 +1039,8 @@ static int __init acpi_init(void)
if (acpi_disabled)
return result;
- /*
- * If the laptop falls into the DMI check table, the power state check
- * will be disabled in the course of device power transition.
- */
- dmi_check_system(power_nocheck_dmi_table);
-
acpi_scan_init();
acpi_ec_init();
- acpi_power_init();
acpi_debugfs_init();
acpi_sleep_proc_init();
acpi_wakeup_device_init();
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 71ef9cd0735f..76bbb78a5ad9 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -279,6 +279,9 @@ static int acpi_lid_send_state(struct acpi_device *device)
input_report_switch(button->input, SW_LID, !state);
input_sync(button->input);
+ if (state)
+ pm_wakeup_event(&device->dev, 0);
+
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
if (ret == NOTIFY_DONE)
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
@@ -314,6 +317,8 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
input_sync(input);
input_report_key(input, keycode, 0);
input_sync(input);
+
+ pm_wakeup_event(&device->dev, 0);
}
acpi_bus_generate_proc_event(device, event, ++button->pushed);
@@ -426,7 +431,7 @@ static int acpi_button_add(struct acpi_device *device)
acpi_enable_gpe(device->wakeup.gpe_device,
device->wakeup.gpe_number);
device->wakeup.run_wake_count++;
- device->wakeup.state.enabled = 1;
+ device_set_wakeup_enable(&device->dev, true);
}
printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
@@ -449,7 +454,7 @@ static int acpi_button_remove(struct acpi_device *device, int type)
acpi_disable_gpe(device->wakeup.gpe_device,
device->wakeup.gpe_number);
device->wakeup.run_wake_count--;
- device->wakeup.state.enabled = 0;
+ device_set_wakeup_enable(&device->dev, false);
}
acpi_button_remove_fs(device);
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 5df67f1d6c61..384f7abcff77 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -26,7 +26,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
size_t count, loff_t *ppos)
{
static char *buf;
- static int uncopied_bytes;
+ static u32 max_size;
+ static u32 uncopied_bytes;
+
struct acpi_table_header table;
acpi_status status;
@@ -37,19 +39,24 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
if (copy_from_user(&table, user_buf,
sizeof(struct acpi_table_header)))
return -EFAULT;
- uncopied_bytes = table.length;
- buf = kzalloc(uncopied_bytes, GFP_KERNEL);
+ uncopied_bytes = max_size = table.length;
+ buf = kzalloc(max_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
}
- if (uncopied_bytes < count) {
- kfree(buf);
+ if (buf == NULL)
+ return -EINVAL;
+
+ if ((*ppos > max_size) ||
+ (*ppos + count > max_size) ||
+ (*ppos + count < count) ||
+ (count > uncopied_bytes))
return -EINVAL;
- }
if (copy_from_user(buf + (*ppos), user_buf, count)) {
kfree(buf);
+ buf = NULL;
return -EFAULT;
}
@@ -59,6 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
if (!uncopied_bytes) {
status = acpi_install_method(buf);
kfree(buf);
+ buf = NULL;
if (ACPI_FAILURE(status))
return -EINVAL;
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 81514a4918cc..1864ad3cf895 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -725,7 +725,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
complete_dock(ds);
dock_event(ds, event, DOCK_EVENT);
dock_lock(ds, 1);
- acpi_update_gpes();
+ acpi_update_all_gpes();
break;
}
if (dock_present(ds) || dock_in_progress(ds))
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 302b31ed31f1..fa848c4116a8 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -606,7 +606,8 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
return 0;
}
-static u32 acpi_ec_gpe_handler(void *data)
+static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
+ u32 gpe_number, void *data)
{
struct acpi_ec *ec = data;
@@ -618,7 +619,7 @@ static u32 acpi_ec_gpe_handler(void *data)
wake_up(&ec->wait);
ec_check_sci(ec, acpi_ec_read_status(ec));
}
- return ACPI_INTERRUPT_HANDLED;
+ return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
}
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 60049080c869..467479f07c1f 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -86,7 +86,7 @@ static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long
if (!device)
return -EINVAL;
- result = acpi_bus_get_power(device->handle, &acpi_state);
+ result = acpi_bus_update_power(device->handle, &acpi_state);
if (result)
return result;
@@ -123,7 +123,6 @@ static struct thermal_cooling_device_ops fan_cooling_ops = {
static int acpi_fan_add(struct acpi_device *device)
{
int result = 0;
- int state = 0;
struct thermal_cooling_device *cdev;
if (!device)
@@ -132,16 +131,12 @@ static int acpi_fan_add(struct acpi_device *device)
strcpy(acpi_device_name(device), "Fan");
strcpy(acpi_device_class(device), ACPI_FAN_CLASS);
- result = acpi_bus_get_power(device->handle, &state);
+ result = acpi_bus_update_power(device->handle, NULL);
if (result) {
- printk(KERN_ERR PREFIX "Reading power state\n");
+ printk(KERN_ERR PREFIX "Setting initial power state\n");
goto end;
}
- device->flags.force_power_state = 1;
- acpi_bus_set_power(device->handle, state);
- device->flags.force_power_state = 0;
-
cdev = thermal_cooling_device_register("Fan", device,
&fan_cooling_ops);
if (IS_ERR(cdev)) {
@@ -200,22 +195,14 @@ static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state)
static int acpi_fan_resume(struct acpi_device *device)
{
- int result = 0;
- int power_state = 0;
+ int result;
if (!device)
return -EINVAL;
- result = acpi_bus_get_power(device->handle, &power_state);
- if (result) {
- printk(KERN_ERR PREFIX
- "Error reading fan power state\n");
- return result;
- }
-
- device->flags.force_power_state = 1;
- acpi_bus_set_power(device->handle, power_state);
- device->flags.force_power_state = 0;
+ result = acpi_bus_update_power(device->handle, NULL);
+ if (result)
+ printk(KERN_ERR PREFIX "Error updating fan power state\n");
return result;
}
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 78b0164c35b2..7c47ed55e528 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -167,11 +167,8 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle)
"firmware_node");
ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
"physical_node");
- if (acpi_dev->wakeup.flags.valid) {
+ if (acpi_dev->wakeup.flags.valid)
device_set_wakeup_capable(dev, true);
- device_set_wakeup_enable(dev,
- acpi_dev->wakeup.state.enabled);
- }
}
return 0;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index a212bfeddf8c..b1cc81a0431b 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -41,9 +41,10 @@ static inline int acpi_debugfs_init(void) { return 0; }
int acpi_power_init(void);
int acpi_device_sleep_wake(struct acpi_device *dev,
int enable, int sleep_state, int dev_state);
-int acpi_power_get_inferred_state(struct acpi_device *device);
+int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
+int acpi_power_on_resources(struct acpi_device *device, int state);
int acpi_power_transition(struct acpi_device *device, int state);
-extern int acpi_power_nocheck;
+int acpi_bus_init_power(struct acpi_device *device);
int acpi_wakeup_device_init(void);
void acpi_early_processor_set_pdc(void);
@@ -82,8 +83,16 @@ extern int acpi_sleep_init(void);
#ifdef CONFIG_ACPI_SLEEP
int acpi_sleep_proc_init(void);
+int suspend_nvs_alloc(void);
+void suspend_nvs_free(void);
+int suspend_nvs_save(void);
+void suspend_nvs_restore(void);
#else
static inline int acpi_sleep_proc_init(void) { return 0; }
+static inline int suspend_nvs_alloc(void) { return 0; }
+static inline void suspend_nvs_free(void) {}
+static inline int suspend_nvs_save(void) { return 0; }
+static inline void suspend_nvs_restore(void) {}
#endif
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index d9926afec110..5eb25eb3ea48 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -275,23 +275,19 @@ acpi_table_parse_srat(enum acpi_srat_type id,
int __init acpi_numa_init(void)
{
int ret = 0;
- int nr_cpu_entries = nr_cpu_ids;
-#ifdef CONFIG_X86
/*
* Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
* SRAT cpu entries could have different order with that in MADT.
* So go over all cpu entries in SRAT to get apicid to node mapping.
*/
- nr_cpu_entries = MAX_LOCAL_APIC;
-#endif
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
- acpi_parse_x2apic_affinity, nr_cpu_entries);
+ acpi_parse_x2apic_affinity, 0);
acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
- acpi_parse_processor_affinity, nr_cpu_entries);
+ acpi_parse_processor_affinity, 0);
ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
acpi_parse_memory_affinity,
NR_NODE_MEMBLKS);
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
new file mode 100644
index 000000000000..fa5a1df42b79
--- /dev/null
+++ b/drivers/acpi/nvs.c
@@ -0,0 +1,145 @@
+/*
+ * nvs.c - Routines for saving and restoring ACPI NVS memory region
+ *
+ * Copyright (C) 2008-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/acpi_io.h>
+#include <acpi/acpiosxf.h>
+
+/*
+ * Platforms, like ACPI, may want us to save some memory used by them during
+ * suspend and to restore the contents of this memory during the subsequent
+ * resume. The code below implements a mechanism allowing us to do that.
+ */
+
+struct nvs_page {
+ unsigned long phys_start;
+ unsigned int size;
+ void *kaddr;
+ void *data;
+ struct list_head node;
+};
+
+static LIST_HEAD(nvs_list);
+
+/**
+ * suspend_nvs_register - register platform NVS memory region to save
+ * @start - physical address of the region
+ * @size - size of the region
+ *
+ * The NVS region need not be page-aligned (both ends) and we arrange
+ * things so that the data from page-aligned addresses in this region will
+ * be copied into separate RAM pages.
+ */
+int suspend_nvs_register(unsigned long start, unsigned long size)
+{
+ struct nvs_page *entry, *next;
+
+ while (size > 0) {
+ unsigned int nr_bytes;
+
+ entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
+ if (!entry)
+ goto Error;
+
+ list_add_tail(&entry->node, &nvs_list);
+ entry->phys_start = start;
+ nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
+ entry->size = (size < nr_bytes) ? size : nr_bytes;
+
+ start += entry->size;
+ size -= entry->size;
+ }
+ return 0;
+
+ Error:
+ list_for_each_entry_safe(entry, next, &nvs_list, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+ return -ENOMEM;
+}
+
+/**
+ * suspend_nvs_free - free data pages allocated for saving NVS regions
+ */
+void suspend_nvs_free(void)
+{
+ struct nvs_page *entry;
+
+ list_for_each_entry(entry, &nvs_list, node)
+ if (entry->data) {
+ free_page((unsigned long)entry->data);
+ entry->data = NULL;
+ if (entry->kaddr) {
+ iounmap(entry->kaddr);
+ entry->kaddr = NULL;
+ }
+ }
+}
+
+/**
+ * suspend_nvs_alloc - allocate memory necessary for saving NVS regions
+ */
+int suspend_nvs_alloc(void)
+{
+ struct nvs_page *entry;
+
+ list_for_each_entry(entry, &nvs_list, node) {
+ entry->data = (void *)__get_free_page(GFP_KERNEL);
+ if (!entry->data) {
+ suspend_nvs_free();
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+/**
+ * suspend_nvs_save - save NVS memory regions
+ */
+int suspend_nvs_save(void)
+{
+ struct nvs_page *entry;
+
+ printk(KERN_INFO "PM: Saving platform NVS memory\n");
+
+ list_for_each_entry(entry, &nvs_list, node)
+ if (entry->data) {
+ entry->kaddr = acpi_os_ioremap(entry->phys_start,
+ entry->size);
+ if (!entry->kaddr) {
+ suspend_nvs_free();
+ return -ENOMEM;
+ }
+ memcpy(entry->data, entry->kaddr, entry->size);
+ }
+
+ return 0;
+}
+
+/**
+ * suspend_nvs_restore - restore NVS memory regions
+ *
+ * This function is going to be called with interrupts disabled, so it
+ * cannot iounmap the virtual addresses used to access the NVS region.
+ */
+void suspend_nvs_restore(void)
+{
+ struct nvs_page *entry;
+
+ printk(KERN_INFO "PM: Restoring platform NVS memory\n");
+
+ list_for_each_entry(entry, &nvs_list, node)
+ if (entry->data)
+ memcpy(entry->kaddr, entry->data, entry->size);
+}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 055d7b701fff..c90c76aa7f8b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -38,6 +38,7 @@
#include <linux/workqueue.h>
#include <linux/nmi.h>
#include <linux/acpi.h>
+#include <linux/acpi_io.h>
#include <linux/efi.h>
#include <linux/ioport.h>
#include <linux/list.h>
@@ -302,9 +303,10 @@ void __iomem *__init_refok
acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
struct acpi_ioremap *map, *tmp_map;
- unsigned long flags, pg_sz;
+ unsigned long flags;
void __iomem *virt;
- phys_addr_t pg_off;
+ acpi_physical_address pg_off;
+ acpi_size pg_sz;
if (phys > ULONG_MAX) {
printk(KERN_ERR PREFIX "Cannot map memory that high\n");
@@ -320,7 +322,7 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
pg_off = round_down(phys, PAGE_SIZE);
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
- virt = ioremap(pg_off, pg_sz);
+ virt = acpi_os_ioremap(pg_off, pg_sz);
if (!virt) {
kfree(map);
return NULL;
@@ -634,17 +636,21 @@ EXPORT_SYMBOL(acpi_os_write_port);
acpi_status
acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
{
- u32 dummy;
void __iomem *virt_addr;
- int size = width / 8, unmap = 0;
+ unsigned int size = width / 8;
+ bool unmap = false;
+ u32 dummy;
rcu_read_lock();
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
- rcu_read_unlock();
if (!virt_addr) {
- virt_addr = ioremap(phys_addr, size);
- unmap = 1;
+ rcu_read_unlock();
+ virt_addr = acpi_os_ioremap(phys_addr, size);
+ if (!virt_addr)
+ return AE_BAD_ADDRESS;
+ unmap = true;
}
+
if (!value)
value = &dummy;
@@ -664,6 +670,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
if (unmap)
iounmap(virt_addr);
+ else
+ rcu_read_unlock();
return AE_OK;
}
@@ -672,14 +680,17 @@ acpi_status
acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
{
void __iomem *virt_addr;
- int size = width / 8, unmap = 0;
+ unsigned int size = width / 8;
+ bool unmap = false;
rcu_read_lock();
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
- rcu_read_unlock();
if (!virt_addr) {
- virt_addr = ioremap(phys_addr, size);
- unmap = 1;
+ rcu_read_unlock();
+ virt_addr = acpi_os_ioremap(phys_addr, size);
+ if (!virt_addr)
+ return AE_BAD_ADDRESS;
+ unmap = true;
}
switch (width) {
@@ -698,6 +709,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
if (unmap)
iounmap(virt_addr);
+ else
+ rcu_read_unlock();
return AE_OK;
}
@@ -1233,8 +1246,7 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
int acpi_check_resource_conflict(const struct resource *res)
{
struct acpi_res_list *res_list_elem;
- int ioport;
- int clash = 0;
+ int ioport = 0, clash = 0;
if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
return 0;
@@ -1264,9 +1276,13 @@ int acpi_check_resource_conflict(const struct resource *res)
if (clash) {
if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
printk(KERN_WARNING "ACPI: resource %s %pR"
- " conflicts with ACPI region %s %pR\n",
+ " conflicts with ACPI region %s "
+ "[%s 0x%zx-0x%zx]\n",
res->name, res, res_list_elem->name,
- res_list_elem);
+ (res_list_elem->resource_type ==
+ ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem",
+ (size_t) res_list_elem->start,
+ (size_t) res_list_elem->end);
if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
printk(KERN_NOTICE "ACPI: This conflict may"
" cause random problems and system"
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 96668ad09622..85249395623b 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -36,6 +36,7 @@
#include <linux/slab.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#include <acpi/apei.h>
#define PREFIX "ACPI: "
@@ -47,6 +48,11 @@ static int acpi_pci_root_add(struct acpi_device *device);
static int acpi_pci_root_remove(struct acpi_device *device, int type);
static int acpi_pci_root_start(struct acpi_device *device);
+#define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \
+ | OSC_ACTIVE_STATE_PWR_SUPPORT \
+ | OSC_CLOCK_PWR_CAPABILITY_SUPPORT \
+ | OSC_MSI_SUPPORT)
+
static const struct acpi_device_id root_device_ids[] = {
{"PNP0A03", 0},
{"", 0},
@@ -566,6 +572,33 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
if (flags != base_flags)
acpi_pci_osc_support(root, flags);
+ if (!pcie_ports_disabled
+ && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
+ flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
+ | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
+ | OSC_PCI_EXPRESS_PME_CONTROL;
+
+ if (pci_aer_available()) {
+ if (aer_acpi_firmware_first())
+ dev_dbg(root->bus->bridge,
+ "PCIe errors handled by BIOS.\n");
+ else
+ flags |= OSC_PCI_EXPRESS_AER_CONTROL;
+ }
+
+ dev_info(root->bus->bridge,
+ "Requesting ACPI _OSC control (0x%02x)\n", flags);
+
+ status = acpi_pci_osc_control_set(device->handle, &flags,
+ OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+ if (ACPI_SUCCESS(status))
+ dev_info(root->bus->bridge,
+ "ACPI _OSC control (0x%02x) granted\n", flags);
+ else
+ dev_dbg(root->bus->bridge,
+ "ACPI _OSC request failed (code %d)\n", status);
+ }
+
pci_acpi_add_bus_pm_notifier(device, root->bus);
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
@@ -600,6 +633,8 @@ static int acpi_pci_root_remove(struct acpi_device *device, int type)
static int __init acpi_pci_root_init(void)
{
+ acpi_hest_init();
+
if (acpi_pci_disabled)
return 0;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 4c9c2fb5d98f..9ac2a9fa90ff 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -56,9 +56,6 @@ ACPI_MODULE_NAME("power");
#define ACPI_POWER_RESOURCE_STATE_ON 0x01
#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
-int acpi_power_nocheck;
-module_param_named(power_nocheck, acpi_power_nocheck, bool, 000);
-
static int acpi_power_add(struct acpi_device *device);
static int acpi_power_remove(struct acpi_device *device, int type);
static int acpi_power_resume(struct acpi_device *device);
@@ -148,9 +145,8 @@ static int acpi_power_get_state(acpi_handle handle, int *state)
static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
{
- int result = 0, state1;
- u32 i = 0;
-
+ int cur_state;
+ int i = 0;
if (!list || !state)
return -EINVAL;
@@ -158,25 +154,33 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
/* The state of the list is 'on' IFF all resources are 'on'. */
for (i = 0; i < list->count; i++) {
- /*
- * The state of the power resource can be obtained by
- * using the ACPI handle. In such case it is unnecessary to
- * get the Power resource first and then get its state again.
- */
- result = acpi_power_get_state(list->handles[i], &state1);
+ struct acpi_power_resource *resource;
+ acpi_handle handle = list->handles[i];
+ int result;
+
+ result = acpi_power_get_context(handle, &resource);
if (result)
return result;
- *state = state1;
+ mutex_lock(&resource->resource_lock);
- if (*state != ACPI_POWER_RESOURCE_STATE_ON)
+ result = acpi_power_get_state(handle, &cur_state);
+
+ mutex_unlock(&resource->resource_lock);
+
+ if (result)
+ return result;
+
+ if (cur_state != ACPI_POWER_RESOURCE_STATE_ON)
break;
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource list is %s\n",
- *state ? "on" : "off"));
+ cur_state ? "on" : "off"));
- return result;
+ *state = cur_state;
+
+ return 0;
}
static int __acpi_power_on(struct acpi_power_resource *resource)
@@ -222,7 +226,7 @@ static int acpi_power_on(acpi_handle handle)
return result;
}
-static int acpi_power_off_device(acpi_handle handle)
+static int acpi_power_off(acpi_handle handle)
{
int result = 0;
acpi_status status = AE_OK;
@@ -266,6 +270,35 @@ static int acpi_power_off_device(acpi_handle handle)
return result;
}
+static void __acpi_power_off_list(struct acpi_handle_list *list, int num_res)
+{
+ int i;
+
+ for (i = num_res - 1; i >= 0 ; i--)
+ acpi_power_off(list->handles[i]);
+}
+
+static void acpi_power_off_list(struct acpi_handle_list *list)
+{
+ __acpi_power_off_list(list, list->count);
+}
+
+static int acpi_power_on_list(struct acpi_handle_list *list)
+{
+ int result = 0;
+ int i;
+
+ for (i = 0; i < list->count; i++) {
+ result = acpi_power_on(list->handles[i]);
+ if (result) {
+ __acpi_power_off_list(list, i);
+ break;
+ }
+ }
+
+ return result;
+}
+
/**
* acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
* ACPI 3.0) _PSW (Power State Wake)
@@ -404,8 +437,7 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
/* Close power resource */
for (i = 0; i < dev->wakeup.resources.count; i++) {
- int ret = acpi_power_off_device(
- dev->wakeup.resources.handles[i]);
+ int ret = acpi_power_off(dev->wakeup.resources.handles[i]);
if (ret) {
printk(KERN_ERR PREFIX "Transition power state\n");
dev->wakeup.flags.valid = 0;
@@ -423,19 +455,16 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
Device Power Management
-------------------------------------------------------------------------- */
-int acpi_power_get_inferred_state(struct acpi_device *device)
+int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
{
int result = 0;
struct acpi_handle_list *list = NULL;
int list_state = 0;
int i = 0;
-
- if (!device)
+ if (!device || !state)
return -EINVAL;
- device->power.state = ACPI_STATE_UNKNOWN;
-
/*
* We know a device's inferred power state when all the resources
* required for a given D-state are 'on'.
@@ -450,22 +479,26 @@ int acpi_power_get_inferred_state(struct acpi_device *device)
return result;
if (list_state == ACPI_POWER_RESOURCE_STATE_ON) {
- device->power.state = i;
+ *state = i;
return 0;
}
}
- device->power.state = ACPI_STATE_D3;
-
+ *state = ACPI_STATE_D3;
return 0;
}
+int acpi_power_on_resources(struct acpi_device *device, int state)
+{
+ if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3)
+ return -EINVAL;
+
+ return acpi_power_on_list(&device->power.states[state].resources);
+}
+
int acpi_power_transition(struct acpi_device *device, int state)
{
- int result = 0;
- struct acpi_handle_list *cl = NULL; /* Current Resources */
- struct acpi_handle_list *tl = NULL; /* Target Resources */
- int i = 0;
+ int result;
if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
return -EINVAL;
@@ -477,37 +510,20 @@ int acpi_power_transition(struct acpi_device *device, int state)
|| (device->power.state > ACPI_STATE_D3))
return -ENODEV;
- cl = &device->power.states[device->power.state].resources;
- tl = &device->power.states[state].resources;
-
/* TBD: Resources must be ordered. */
/*
* First we reference all power resources required in the target list
- * (e.g. so the device doesn't lose power while transitioning).
+ * (e.g. so the device doesn't lose power while transitioning). Then,
+ * we dereference all power resources used in the current list.
*/
- for (i = 0; i < tl->count; i++) {
- result = acpi_power_on(tl->handles[i]);
- if (result)
- goto end;
- }
+ result = acpi_power_on_list(&device->power.states[state].resources);
+ if (!result)
+ acpi_power_off_list(
+ &device->power.states[device->power.state].resources);
- /*
- * Then we dereference all power resources used in the current list.
- */
- for (i = 0; i < cl->count; i++) {
- result = acpi_power_off_device(cl->handles[i]);
- if (result)
- goto end;
- }
-
- end:
- if (result)
- device->power.state = ACPI_STATE_UNKNOWN;
- else {
- /* We shouldn't change the state till all above operations succeed */
- device->power.state = state;
- }
+ /* We shouldn't change the state unless the above operations succeed. */
+ device->power.state = result ? ACPI_STATE_UNKNOWN : state;
return result;
}
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index afad67769db6..f5f986991b52 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -311,7 +311,9 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
dev->pnp.bus_id,
(u32) dev->wakeup.sleep_state,
dev->wakeup.flags.run_wake ? '*' : ' ',
- dev->wakeup.state.enabled ? "enabled" : "disabled");
+ (device_may_wakeup(&dev->dev)
+ || (ldev && device_may_wakeup(ldev))) ?
+ "enabled" : "disabled");
if (ldev)
seq_printf(seq, "%s:%s",
ldev->bus ? ldev->bus->name : "no-bus",
@@ -328,8 +330,10 @@ static void physical_device_enable_wakeup(struct acpi_device *adev)
{
struct device *dev = acpi_get_physical_device(adev->handle);
- if (dev && device_can_wakeup(dev))
- device_set_wakeup_enable(dev, adev->wakeup.state.enabled);
+ if (dev && device_can_wakeup(dev)) {
+ bool enable = !device_may_wakeup(dev);
+ device_set_wakeup_enable(dev, enable);
+ }
}
static ssize_t
@@ -341,7 +345,6 @@ acpi_system_write_wakeup_device(struct file *file,
char strbuf[5];
char str[5] = "";
unsigned int len = count;
- struct acpi_device *found_dev = NULL;
if (len > 4)
len = 4;
@@ -361,33 +364,13 @@ acpi_system_write_wakeup_device(struct file *file,
continue;
if (!strncmp(dev->pnp.bus_id, str, 4)) {
- dev->wakeup.state.enabled =
- dev->wakeup.state.enabled ? 0 : 1;
- found_dev = dev;
- break;
- }
- }
- if (found_dev) {
- physical_device_enable_wakeup(found_dev);
- list_for_each_safe(node, next, &acpi_wakeup_device_list) {
- struct acpi_device *dev = container_of(node,
- struct
- acpi_device,
- wakeup_list);
-
- if ((dev != found_dev) &&
- (dev->wakeup.gpe_number ==
- found_dev->wakeup.gpe_number)
- && (dev->wakeup.gpe_device ==
- found_dev->wakeup.gpe_device)) {
- printk(KERN_WARNING
- "ACPI: '%s' and '%s' have the same GPE, "
- "can't disable/enable one separately\n",
- dev->pnp.bus_id, found_dev->pnp.bus_id);
- dev->wakeup.state.enabled =
- found_dev->wakeup.state.enabled;
+ if (device_can_wakeup(&dev->dev)) {
+ bool enable = !device_may_wakeup(&dev->dev);
+ device_set_wakeup_enable(&dev->dev, enable);
+ } else {
physical_device_enable_wakeup(dev);
}
+ break;
}
}
mutex_unlock(&acpi_device_lock);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index bec561c14beb..3c1a2fec8cda 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -23,7 +23,7 @@ static int set_no_mwait(const struct dmi_system_id *id)
{
printk(KERN_NOTICE PREFIX "%s detected - "
"disabling mwait for CPU C-states\n", id->ident);
- idle_nomwait = 1;
+ boot_option_idle_override = IDLE_NOMWAIT;
return 0;
}
@@ -283,7 +283,7 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
{
acpi_status status = AE_OK;
- if (idle_nomwait) {
+ if (boot_option_idle_override == IDLE_NOMWAIT) {
/*
* If mwait is disabled for CPU C-states, the C2C3_FFH access
* mode will be disabled in the parameter of _PDC object.
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 85e48047d7b0..360a74e6add0 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -40,10 +40,6 @@
#include <linux/pm.h>
#include <linux/cpufreq.h>
#include <linux/cpu.h>
-#ifdef CONFIG_ACPI_PROCFS
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
#include <linux/dmi.h>
#include <linux/moduleparam.h>
#include <linux/cpuidle.h>
@@ -246,53 +242,6 @@ static int acpi_processor_errata(struct acpi_processor *pr)
return result;
}
-#ifdef CONFIG_ACPI_PROCFS
-static struct proc_dir_entry *acpi_processor_dir = NULL;
-
-static int __cpuinit acpi_processor_add_fs(struct acpi_device *device)
-{
- struct proc_dir_entry *entry = NULL;
-
-
- if (!acpi_device_dir(device)) {
- acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
- acpi_processor_dir);
- if (!acpi_device_dir(device))
- return -ENODEV;
- }
-
- /* 'throttling' [R/W] */
- entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
- S_IFREG | S_IRUGO | S_IWUSR,
- acpi_device_dir(device),
- &acpi_processor_throttling_fops,
- acpi_driver_data(device));
- if (!entry)
- return -EIO;
- return 0;
-}
-static int acpi_processor_remove_fs(struct acpi_device *device)
-{
-
- if (acpi_device_dir(device)) {
- remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
- acpi_device_dir(device));
- remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
- acpi_device_dir(device) = NULL;
- }
-
- return 0;
-}
-#else
-static inline int acpi_processor_add_fs(struct acpi_device *device)
-{
- return 0;
-}
-static inline int acpi_processor_remove_fs(struct acpi_device *device)
-{
- return 0;
-}
-#endif
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -478,8 +427,13 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
if (action == CPU_ONLINE && pr) {
acpi_processor_ppc_has_changed(pr, 0);
acpi_processor_cst_has_changed(pr);
+ acpi_processor_reevaluate_tstate(pr, action);
acpi_processor_tstate_has_changed(pr);
}
+ if (action == CPU_DEAD && pr) {
+ /* invalidate the flag.throttling after one CPU is offline */
+ acpi_processor_reevaluate_tstate(pr, action);
+ }
return NOTIFY_OK;
}
@@ -537,14 +491,10 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
per_cpu(processors, pr->id) = pr;
- result = acpi_processor_add_fs(device);
- if (result)
- goto err_free_cpumask;
-
sysdev = get_cpu_sysdev(pr->id);
if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {
result = -EFAULT;
- goto err_remove_fs;
+ goto err_free_cpumask;
}
#ifdef CONFIG_CPU_FREQ
@@ -590,8 +540,6 @@ err_thermal_unregister:
thermal_cooling_device_unregister(pr->cdev);
err_power_exit:
acpi_processor_power_exit(pr, device);
-err_remove_fs:
- acpi_processor_remove_fs(device);
err_free_cpumask:
free_cpumask_var(pr->throttling.shared_cpu_map);
@@ -620,8 +568,6 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
sysfs_remove_link(&device->dev.kobj, "sysdev");
- acpi_processor_remove_fs(device);
-
if (pr->cdev) {
sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
sysfs_remove_link(&pr->cdev->device.kobj, "device");
@@ -854,12 +800,6 @@ static int __init acpi_processor_init(void)
memset(&errata, 0, sizeof(errata));
-#ifdef CONFIG_ACPI_PROCFS
- acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
- if (!acpi_processor_dir)
- return -ENOMEM;
-#endif
-
if (!cpuidle_register_driver(&acpi_idle_driver)) {
printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
acpi_idle_driver.name);
@@ -885,10 +825,6 @@ static int __init acpi_processor_init(void)
out_cpuidle:
cpuidle_unregister_driver(&acpi_idle_driver);
-#ifdef CONFIG_ACPI_PROCFS
- remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
-#endif
-
return result;
}
@@ -907,10 +843,6 @@ static void __exit acpi_processor_exit(void)
cpuidle_unregister_driver(&acpi_idle_driver);
-#ifdef CONFIG_ACPI_PROCFS
- remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
-#endif
-
return;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index a765b823aa9e..d615b7d69bca 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -79,6 +79,13 @@ module_param(bm_check_disable, uint, 0000);
static unsigned int latency_factor __read_mostly = 2;
module_param(latency_factor, uint, 0644);
+static int disabled_by_idle_boot_param(void)
+{
+ return boot_option_idle_override == IDLE_POLL ||
+ boot_option_idle_override == IDLE_FORCE_MWAIT ||
+ boot_option_idle_override == IDLE_HALT;
+}
+
/*
* IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
* For now disable this. Probably a bug somewhere else.
@@ -455,7 +462,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
continue;
}
if (cx.type == ACPI_STATE_C1 &&
- (idle_halt || idle_nomwait)) {
+ (boot_option_idle_override == IDLE_NOMWAIT)) {
/*
* In most cases the C1 space_id obtained from
* _CST object is FIXED_HARDWARE access mode.
@@ -1016,7 +1023,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
state->flags = 0;
switch (cx->type) {
case ACPI_STATE_C1:
- state->flags |= CPUIDLE_FLAG_SHALLOW;
if (cx->entry_method == ACPI_CSTATE_FFH)
state->flags |= CPUIDLE_FLAG_TIME_VALID;
@@ -1025,16 +1031,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
break;
case ACPI_STATE_C2:
- state->flags |= CPUIDLE_FLAG_BALANCED;
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = acpi_idle_enter_simple;
dev->safe_state = state;
break;
case ACPI_STATE_C3:
- state->flags |= CPUIDLE_FLAG_DEEP;
state->flags |= CPUIDLE_FLAG_TIME_VALID;
- state->flags |= CPUIDLE_FLAG_CHECK_BM;
state->enter = pr->flags.bm_check ?
acpi_idle_enter_bm :
acpi_idle_enter_simple;
@@ -1058,7 +1061,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
{
int ret = 0;
- if (boot_option_idle_override)
+ if (disabled_by_idle_boot_param())
return 0;
if (!pr)
@@ -1089,19 +1092,10 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
acpi_status status = 0;
static int first_run;
- if (boot_option_idle_override)
+ if (disabled_by_idle_boot_param())
return 0;
if (!first_run) {
- if (idle_halt) {
- /*
- * When the boot option of "idle=halt" is added, halt
- * is used for CPU IDLE.
- * In such case C2/C3 is meaningless. So the max_cstate
- * is set to one.
- */
- max_cstate = 1;
- }
dmi_check_system(processor_power_dmi_table);
max_cstate = acpi_processor_cstate_check(max_cstate);
if (max_cstate < ACPI_C_STATES_MAX)
@@ -1142,7 +1136,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
int acpi_processor_power_exit(struct acpi_processor *pr,
struct acpi_device *device)
{
- if (boot_option_idle_override)
+ if (disabled_by_idle_boot_param())
return 0;
cpuidle_unregister_device(&pr->power.dev);
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index ff3632717c51..fa84e9744330 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -32,10 +32,6 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
-#ifdef CONFIG_ACPI_PROCFS
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -370,6 +366,58 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
}
/*
+ * This function is used to reevaluate whether the T-state is valid
+ * after one CPU is onlined/offlined.
+ * It is noted that it won't reevaluate the following properties for
+ * the T-state.
+ * 1. Control method.
+ * 2. the number of supported T-state
+ * 3. TSD domain
+ */
+void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
+ unsigned long action)
+{
+ int result = 0;
+
+ if (action == CPU_DEAD) {
+ /* When one CPU is offline, the T-state throttling
+ * will be invalidated.
+ */
+ pr->flags.throttling = 0;
+ return;
+ }
+ /* the following is to recheck whether the T-state is valid for
+ * the online CPU
+ */
+ if (!pr->throttling.state_count) {
+ /* If the number of T-state is invalid, it is
+ * invalidated.
+ */
+ pr->flags.throttling = 0;
+ return;
+ }
+ pr->flags.throttling = 1;
+
+ /* Disable throttling (if enabled). We'll let subsequent
+ * policy (e.g.thermal) decide to lower performance if it
+ * so chooses, but for now we'll crank up the speed.
+ */
+
+ result = acpi_processor_get_throttling(pr);
+ if (result)
+ goto end;
+
+ if (pr->throttling.state) {
+ result = acpi_processor_set_throttling(pr, 0, false);
+ if (result)
+ goto end;
+ }
+
+end:
+ if (result)
+ pr->flags.throttling = 0;
+}
+/*
* _PTC - Processor Throttling Control (and status) register location
*/
static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
@@ -876,7 +924,11 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
*/
cpumask_copy(saved_mask, &current->cpus_allowed);
/* FIXME: use work_on_cpu() */
- set_cpus_allowed_ptr(current, cpumask_of(pr->id));
+ if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
+ /* Can't migrate to the target pr->id CPU. Exit */
+ free_cpumask_var(saved_mask);
+ return -ENODEV;
+ }
ret = pr->throttling.acpi_processor_get_throttling(pr);
/* restore the previous state */
set_cpus_allowed_ptr(current, saved_mask);
@@ -1051,6 +1103,14 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
return -ENOMEM;
}
+ if (cpu_is_offline(pr->id)) {
+ /*
+ * the cpu pointed by pr->id is offline. Unnecessary to change
+ * the throttling state any more.
+ */
+ return -ENODEV;
+ }
+
cpumask_copy(saved_mask, &current->cpus_allowed);
t_state.target_state = state;
p_throttling = &(pr->throttling);
@@ -1074,7 +1134,11 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
/* FIXME: use work_on_cpu() */
- set_cpus_allowed_ptr(current, cpumask_of(pr->id));
+ if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
+ /* Can't migrate to the pr->id CPU. Exit */
+ ret = -ENODEV;
+ goto exit;
+ }
ret = p_throttling->acpi_processor_set_throttling(pr,
t_state.target_state, force);
} else {
@@ -1106,7 +1170,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
}
t_state.cpu = i;
/* FIXME: use work_on_cpu() */
- set_cpus_allowed_ptr(current, cpumask_of(i));
+ if (set_cpus_allowed_ptr(current, cpumask_of(i)))
+ continue;
ret = match_pr->throttling.
acpi_processor_set_throttling(
match_pr, t_state.target_state, force);
@@ -1126,6 +1191,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
/* restore the previous state */
/* FIXME: use work_on_cpu() */
set_cpus_allowed_ptr(current, saved_mask);
+exit:
free_cpumask_var(online_throttling_cpus);
free_cpumask_var(saved_mask);
return ret;
@@ -1216,113 +1282,3 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
return result;
}
-#ifdef CONFIG_ACPI_PROCFS
-/* proc interface */
-static int acpi_processor_throttling_seq_show(struct seq_file *seq,
- void *offset)
-{
- struct acpi_processor *pr = seq->private;
- int i = 0;
- int result = 0;
-
- if (!pr)
- goto end;
-
- if (!(pr->throttling.state_count > 0)) {
- seq_puts(seq, "<not supported>\n");
- goto end;
- }
-
- result = acpi_processor_get_throttling(pr);
-
- if (result) {
- seq_puts(seq,
- "Could not determine current throttling state.\n");
- goto end;
- }
-
- seq_printf(seq, "state count: %d\n"
- "active state: T%d\n"
- "state available: T%d to T%d\n",
- pr->throttling.state_count, pr->throttling.state,
- pr->throttling_platform_limit,
- pr->throttling.state_count - 1);
-
- seq_puts(seq, "states:\n");
- if (pr->throttling.acpi_processor_get_throttling ==
- acpi_processor_get_throttling_fadt) {
- for (i = 0; i < pr->throttling.state_count; i++)
- seq_printf(seq, " %cT%d: %02d%%\n",
- (i == pr->throttling.state ? '*' : ' '), i,
- (pr->throttling.states[i].performance ? pr->
- throttling.states[i].performance / 10 : 0));
- } else {
- for (i = 0; i < pr->throttling.state_count; i++)
- seq_printf(seq, " %cT%d: %02d%%\n",
- (i == pr->throttling.state ? '*' : ' '), i,
- (int)pr->throttling.states_tss[i].
- freqpercentage);
- }
-
- end:
- return 0;
-}
-
-static int acpi_processor_throttling_open_fs(struct inode *inode,
- struct file *file)
-{
- return single_open(file, acpi_processor_throttling_seq_show,
- PDE(inode)->data);
-}
-
-static ssize_t acpi_processor_write_throttling(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * data)
-{
- int result = 0;
- struct seq_file *m = file->private_data;
- struct acpi_processor *pr = m->private;
- char state_string[5] = "";
- char *charp = NULL;
- size_t state_val = 0;
- char tmpbuf[5] = "";
-
- if (!pr || (count > sizeof(state_string) - 1))
- return -EINVAL;
-
- if (copy_from_user(state_string, buffer, count))
- return -EFAULT;
-
- state_string[count] = '\0';
- if ((count > 0) && (state_string[count-1] == '\n'))
- state_string[count-1] = '\0';
-
- charp = state_string;
- if ((state_string[0] == 't') || (state_string[0] == 'T'))
- charp++;
-
- state_val = simple_strtoul(charp, NULL, 0);
- if (state_val >= pr->throttling.state_count)
- return -EINVAL;
-
- snprintf(tmpbuf, 5, "%zu", state_val);
-
- if (strcmp(tmpbuf, charp) != 0)
- return -EINVAL;
-
- result = acpi_processor_set_throttling(pr, state_val, false);
- if (result)
- return result;
-
- return count;
-}
-
-const struct file_operations acpi_processor_throttling_fops = {
- .owner = THIS_MODULE,
- .open = acpi_processor_throttling_open_fs,
- .read = seq_read,
- .write = acpi_processor_write_throttling,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index e5dbedb16bbf..51ae3794ec7f 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -484,6 +484,8 @@ acpi_sbs_add_fs(struct proc_dir_entry **dir,
const struct file_operations *state_fops,
const struct file_operations *alarm_fops, void *data)
{
+ printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded,"
+ " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
if (!*dir) {
*dir = proc_mkdir(dir_name, parent_dir);
if (!*dir) {
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 29ef505c487b..b99e62494607 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -778,7 +778,7 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
wakeup->resources.handles[i] = element->reference.handle;
}
- acpi_gpe_can_wake(wakeup->gpe_device, wakeup->gpe_number);
+ acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number);
out:
kfree(buffer.pointer);
@@ -803,7 +803,7 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
/* Power button, Lid switch always enable wakeup */
if (!acpi_match_device_ids(device, button_device_ids)) {
device->wakeup.flags.run_wake = 1;
- device->wakeup.flags.always_enabled = 1;
+ device_set_wakeup_capable(&device->dev, true);
return;
}
@@ -815,16 +815,22 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
!!(event_status & ACPI_EVENT_FLAG_HANDLE);
}
-static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
+static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
{
+ acpi_handle temp;
acpi_status status = 0;
int psw_error;
+ /* Presence of _PRW indicates wake capable */
+ status = acpi_get_handle(device->handle, "_PRW", &temp);
+ if (ACPI_FAILURE(status))
+ return;
+
status = acpi_bus_extract_wakeup_device_power_package(device->handle,
&device->wakeup);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package"));
- goto end;
+ return;
}
device->wakeup.flags.valid = 1;
@@ -840,13 +846,10 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
if (psw_error)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"error in _DSW or _PSW evaluation\n"));
-
-end:
- if (ACPI_FAILURE(status))
- device->flags.wake_capable = 0;
- return 0;
}
+static void acpi_bus_add_power_resource(acpi_handle handle);
+
static int acpi_bus_get_power_flags(struct acpi_device *device)
{
acpi_status status = 0;
@@ -875,8 +878,12 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
acpi_evaluate_reference(device->handle, object_name, NULL,
&ps->resources);
if (ps->resources.count) {
+ int j;
+
device->power.flags.power_resources = 1;
ps->flags.valid = 1;
+ for (j = 0; j < ps->resources.count; j++)
+ acpi_bus_add_power_resource(ps->resources.handles[j]);
}
/* Evaluate "_PSx" to see if we can do explicit sets */
@@ -901,10 +908,7 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
device->power.states[ACPI_STATE_D3].flags.valid = 1;
device->power.states[ACPI_STATE_D3].power = 0;
- /* TBD: System wake support and resource requirements. */
-
- device->power.state = ACPI_STATE_UNKNOWN;
- acpi_bus_get_power(device->handle, &(device->power.state));
+ acpi_bus_init_power(device);
return 0;
}
@@ -947,11 +951,6 @@ static int acpi_bus_get_flags(struct acpi_device *device)
if (ACPI_SUCCESS(status))
device->flags.power_manageable = 1;
- /* Presence of _PRW indicates wake capable */
- status = acpi_get_handle(device->handle, "_PRW", &temp);
- if (ACPI_SUCCESS(status))
- device->flags.wake_capable = 1;
-
/* TBD: Performance management */
return 0;
@@ -1278,11 +1277,7 @@ static int acpi_add_single_object(struct acpi_device **child,
* Wakeup device management
*-----------------------
*/
- if (device->flags.wake_capable) {
- result = acpi_bus_get_wakeup_device_flags(device);
- if (result)
- goto end;
- }
+ acpi_bus_get_wakeup_device_flags(device);
/*
* Performance Management
@@ -1326,6 +1321,20 @@ end:
#define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \
ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING)
+static void acpi_bus_add_power_resource(acpi_handle handle)
+{
+ struct acpi_bus_ops ops = {
+ .acpi_op_add = 1,
+ .acpi_op_start = 1,
+ };
+ struct acpi_device *device = NULL;
+
+ acpi_bus_get_device(handle, &device);
+ if (!device)
+ acpi_add_single_object(&device, handle, ACPI_BUS_TYPE_POWER,
+ ACPI_STA_DEFAULT, &ops);
+}
+
static int acpi_bus_type_and_status(acpi_handle handle, int *type,
unsigned long long *sta)
{
@@ -1371,7 +1380,6 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
struct acpi_bus_ops *ops = context;
int type;
unsigned long long sta;
- struct acpi_device_wakeup wakeup;
struct acpi_device *device;
acpi_status status;
int result;
@@ -1382,7 +1390,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
!(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
- acpi_bus_extract_wakeup_device_power_package(handle, &wakeup);
+ struct acpi_device_wakeup wakeup;
+ acpi_handle temp;
+
+ status = acpi_get_handle(handle, "_PRW", &temp);
+ if (ACPI_SUCCESS(status))
+ acpi_bus_extract_wakeup_device_power_package(handle,
+ &wakeup);
return AE_CTRL_DEPTH;
}
@@ -1467,7 +1481,7 @@ int acpi_bus_start(struct acpi_device *device)
result = acpi_bus_scan(device->handle, &ops, NULL);
- acpi_update_gpes();
+ acpi_update_all_gpes();
return result;
}
@@ -1573,6 +1587,8 @@ int __init acpi_scan_init(void)
printk(KERN_ERR PREFIX "Could not register bus type\n");
}
+ acpi_power_init();
+
/*
* Enumerate devices in the ACPI namespace.
*/
@@ -1584,7 +1600,7 @@ int __init acpi_scan_init(void)
if (result)
acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
else
- acpi_update_gpes();
+ acpi_update_all_gpes();
return result;
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index febb153b5a68..d6a8cd14de2e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -124,8 +124,7 @@ static int acpi_pm_freeze(void)
static int acpi_pm_pre_suspend(void)
{
acpi_pm_freeze();
- suspend_nvs_save();
- return 0;
+ return suspend_nvs_save();
}
/**
@@ -151,7 +150,7 @@ static int acpi_pm_prepare(void)
{
int error = __acpi_pm_prepare();
if (!error)
- acpi_pm_pre_suspend();
+ error = acpi_pm_pre_suspend();
return error;
}
@@ -167,6 +166,7 @@ static void acpi_pm_finish(void)
u32 acpi_state = acpi_target_sleep_state;
acpi_ec_unblock_transactions();
+ suspend_nvs_free();
if (acpi_state == ACPI_STATE_S0)
return;
@@ -187,7 +187,6 @@ static void acpi_pm_finish(void)
*/
static void acpi_pm_end(void)
{
- suspend_nvs_free();
/*
* This is necessary in case acpi_pm_finish() is not called during a
* failing transition to a sleep state.
@@ -319,7 +318,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
}
}
-static struct platform_suspend_ops acpi_suspend_ops = {
+static const struct platform_suspend_ops acpi_suspend_ops = {
.valid = acpi_suspend_state_valid,
.begin = acpi_suspend_begin,
.prepare_late = acpi_pm_prepare,
@@ -347,7 +346,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
* The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
* been requested.
*/
-static struct platform_suspend_ops acpi_suspend_ops_old = {
+static const struct platform_suspend_ops acpi_suspend_ops_old = {
.valid = acpi_suspend_state_valid,
.begin = acpi_suspend_begin_old,
.prepare_late = acpi_pm_pre_suspend,
@@ -435,6 +434,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
},
},
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Averatec AV1020-ED2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
+ },
+ },
{},
};
#endif /* CONFIG_SUSPEND */
@@ -506,7 +513,7 @@ static void acpi_pm_thaw(void)
acpi_enable_all_runtime_gpes();
}
-static struct platform_hibernation_ops acpi_hibernation_ops = {
+static const struct platform_hibernation_ops acpi_hibernation_ops = {
.begin = acpi_hibernation_begin,
.end = acpi_pm_end,
.pre_snapshot = acpi_pm_prepare,
@@ -549,7 +556,7 @@ static int acpi_hibernation_begin_old(void)
* The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
* been requested.
*/
-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
.begin = acpi_hibernation_begin_old,
.end = acpi_pm_end,
.pre_snapshot = acpi_pm_pre_suspend,
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index f8588f81048a..61891e75583d 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -438,7 +438,7 @@ static void delete_gpe_attr_array(void)
return;
}
-void acpi_os_gpe_count(u32 gpe_number)
+static void gpe_count(u32 gpe_number)
{
acpi_gpe_count++;
@@ -454,7 +454,7 @@ void acpi_os_gpe_count(u32 gpe_number)
return;
}
-void acpi_os_fixed_event_count(u32 event_number)
+static void fixed_event_count(u32 event_number)
{
if (!all_counters)
return;
@@ -468,6 +468,16 @@ void acpi_os_fixed_event_count(u32 event_number)
return;
}
+static void acpi_gbl_event_handler(u32 event_type, acpi_handle device,
+ u32 event_number, void *context)
+{
+ if (event_type == ACPI_EVENT_TYPE_GPE)
+ gpe_count(event_number);
+
+ if (event_type == ACPI_EVENT_TYPE_FIXED)
+ fixed_event_count(event_number);
+}
+
static int get_status(u32 index, acpi_event_status *status,
acpi_handle *handle)
{
@@ -601,6 +611,7 @@ end:
void acpi_irq_stats_init(void)
{
+ acpi_status status;
int i;
if (all_counters)
@@ -619,6 +630,10 @@ void acpi_irq_stats_init(void)
if (all_counters == NULL)
goto fail;
+ status = acpi_install_global_event_handler(acpi_gbl_event_handler, NULL);
+ if (ACPI_FAILURE(status))
+ goto fail;
+
counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
GFP_KERNEL);
if (counter_attrs == NULL)
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 5a27b0a31315..2607e17b520f 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1059,8 +1059,9 @@ static int acpi_thermal_resume(struct acpi_device *device)
break;
tz->trips.active[i].flags.enabled = 1;
for (j = 0; j < tz->trips.active[i].devices.count; j++) {
- result = acpi_bus_get_power(tz->trips.active[i].devices.
- handles[j], &power_state);
+ result = acpi_bus_update_power(
+ tz->trips.active[i].devices.handles[j],
+ &power_state);
if (result || (power_state != ACPI_STATE_D0)) {
tz->trips.active[i].flags.enabled = 0;
break;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 5cd0228d2daa..90f8f7676d1f 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -33,7 +33,6 @@
#include <linux/input.h>
#include <linux/backlight.h>
#include <linux/thermal.h>
-#include <linux/video_output.h>
#include <linux/sort.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
@@ -81,6 +80,13 @@ module_param(brightness_switch_enabled, bool, 0644);
static int allow_duplicates;
module_param(allow_duplicates, bool, 0644);
+/*
+ * Some BIOSes claim they use minimum backlight at boot,
+ * and this may bring dimming screen after boot
+ */
+static int use_bios_initial_backlight = 1;
+module_param(use_bios_initial_backlight, bool, 0644);
+
static int register_count = 0;
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device, int type);
@@ -172,9 +178,6 @@ struct acpi_video_device_cap {
u8 _BQC:1; /* Get current brightness level */
u8 _BCQ:1; /* Some buggy BIOS uses _BCQ instead of _BQC */
u8 _DDC:1; /*Return the EDID for this device */
- u8 _DCS:1; /*Return status of output device */
- u8 _DGS:1; /*Query graphics state */
- u8 _DSS:1; /*Device state set */
};
struct acpi_video_brightness_flags {
@@ -202,7 +205,6 @@ struct acpi_video_device {
struct acpi_video_device_brightness *brightness;
struct backlight_device *backlight;
struct thermal_cooling_device *cooling_dev;
- struct output_device *output_dev;
};
static const char device_decode[][30] = {
@@ -226,10 +228,6 @@ static int acpi_video_get_next_level(struct acpi_video_device *device,
u32 level_current, u32 event);
static int acpi_video_switch_brightness(struct acpi_video_device *device,
int event);
-static int acpi_video_device_get_state(struct acpi_video_device *device,
- unsigned long long *state);
-static int acpi_video_output_get(struct output_device *od);
-static int acpi_video_device_set_state(struct acpi_video_device *device, int state);
/*backlight device sysfs support*/
static int acpi_video_get_brightness(struct backlight_device *bd)
@@ -260,35 +258,11 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
vd->brightness->levels[request_level]);
}
-static struct backlight_ops acpi_backlight_ops = {
+static const struct backlight_ops acpi_backlight_ops = {
.get_brightness = acpi_video_get_brightness,
.update_status = acpi_video_set_brightness,
};
-/*video output device sysfs support*/
-static int acpi_video_output_get(struct output_device *od)
-{
- unsigned long long state;
- struct acpi_video_device *vd =
- (struct acpi_video_device *)dev_get_drvdata(&od->dev);
- acpi_video_device_get_state(vd, &state);
- return (int)state;
-}
-
-static int acpi_video_output_set(struct output_device *od)
-{
- unsigned long state = od->request_state;
- struct acpi_video_device *vd=
- (struct acpi_video_device *)dev_get_drvdata(&od->dev);
- return acpi_video_device_set_state(vd, state);
-}
-
-static struct output_properties acpi_output_properties = {
- .set_state = acpi_video_output_set,
- .get_status = acpi_video_output_get,
-};
-
-
/* thermal cooling device callbacks */
static int video_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned
long *state)
@@ -344,34 +318,6 @@ static struct thermal_cooling_device_ops video_cooling_ops = {
Video Management
-------------------------------------------------------------------------- */
-/* device */
-
-static int
-acpi_video_device_get_state(struct acpi_video_device *device,
- unsigned long long *state)
-{
- int status;
-
- status = acpi_evaluate_integer(device->dev->handle, "_DCS", NULL, state);
-
- return status;
-}
-
-static int
-acpi_video_device_set_state(struct acpi_video_device *device, int state)
-{
- int status;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list args = { 1, &arg0 };
- unsigned long long ret;
-
-
- arg0.integer.value = state;
- status = acpi_evaluate_integer(device->dev->handle, "_DSS", &args, &ret);
-
- return status;
-}
-
static int
acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
union acpi_object **levels)
@@ -766,9 +712,11 @@ acpi_video_init_brightness(struct acpi_video_device *device)
* when invoked for the first time, i.e. level_old is invalid.
* set the backlight to max_level in this case
*/
- for (i = 2; i < br->count; i++)
- if (level_old == br->levels[i])
- level = level_old;
+ if (use_bios_initial_backlight) {
+ for (i = 2; i < br->count; i++)
+ if (level_old == br->levels[i])
+ level = level_old;
+ }
goto set_level;
}
@@ -831,15 +779,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) {
device->cap._DDC = 1;
}
- if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DCS", &h_dummy1))) {
- device->cap._DCS = 1;
- }
- if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DGS", &h_dummy1))) {
- device->cap._DGS = 1;
- }
- if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DSS", &h_dummy1))) {
- device->cap._DSS = 1;
- }
if (acpi_video_backlight_support()) {
struct backlight_properties props;
@@ -904,21 +843,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
printk(KERN_ERR PREFIX "Create sysfs link\n");
}
-
- if (acpi_video_display_switch_support()) {
-
- if (device->cap._DCS && device->cap._DSS) {
- static int count;
- char *name;
- name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
- if (!name)
- return;
- count++;
- device->output_dev = video_output_register(name,
- NULL, device, &acpi_output_properties);
- kfree(name);
- }
- }
}
/*
@@ -1360,6 +1284,9 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
if (!video_device)
continue;
+ if (!video_device->cap._DDC)
+ continue;
+
if (type) {
switch (type) {
case ACPI_VIDEO_DISPLAY_CRT:
@@ -1452,7 +1379,6 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
thermal_cooling_device_unregister(device->cooling_dev);
device->cooling_dev = NULL;
}
- video_output_unregister(device->output_dev);
return 0;
}
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index b83676126598..5af3479714f6 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -17,15 +17,14 @@
* capabilities the graphics cards plugged in support. The check for general
* video capabilities will be triggered by the first caller of
* acpi_video_get_capabilities(NULL); which will happen when the first
- * backlight (or display output) switching supporting driver calls:
+ * backlight switching supporting driver calls:
* acpi_video_backlight_support();
*
* Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B)
* are available, video.ko should be used to handle the device.
*
* Otherwise vendor specific drivers like thinkpad_acpi, asus_acpi,
- * sony_acpi,... can take care about backlight brightness and display output
- * switching.
+ * sony_acpi,... can take care about backlight brightness.
*
* If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m)
* this file will not be compiled, acpi_video_get_capabilities() and
@@ -161,8 +160,6 @@ long acpi_video_get_capabilities(acpi_handle graphics_handle)
*
* if (dmi_name_in_vendors("XY")) {
* acpi_video_support |=
- * ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR;
- * acpi_video_support |=
* ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
*}
*/
@@ -212,33 +209,8 @@ int acpi_video_backlight_support(void)
EXPORT_SYMBOL(acpi_video_backlight_support);
/*
- * Returns true if video.ko can do display output switching.
- * This does not work well/at all with binary graphics drivers
- * which disable system io ranges and do it on their own.
- */
-int acpi_video_display_switch_support(void)
-{
- if (!acpi_video_caps_checked)
- acpi_video_get_capabilities(NULL);
-
- if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR)
- return 0;
- else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO)
- return 1;
-
- if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR)
- return 0;
- else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO)
- return 1;
-
- return acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING;
-}
-EXPORT_SYMBOL(acpi_video_display_switch_support);
-
-/*
- * Use acpi_display_output=vendor/video or acpi_backlight=vendor/video
- * To force that backlight or display output switching is processed by vendor
- * specific acpi drivers or video.ko driver.
+ * Use acpi_backlight=vendor/video to force that backlight switching
+ * is processed by vendor specific acpi drivers or video.ko driver.
*/
static int __init acpi_backlight(char *str)
{
@@ -255,19 +227,3 @@ static int __init acpi_backlight(char *str)
return 1;
}
__setup("acpi_backlight=", acpi_backlight);
-
-static int __init acpi_display_output(char *str)
-{
- if (str == NULL || *str == '\0')
- return 1;
- else {
- if (!strcmp("vendor", str))
- acpi_video_support |=
- ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR;
- if (!strcmp("video", str))
- acpi_video_support |=
- ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
- }
- return 1;
-}
-__setup("acpi_display_output=", acpi_display_output);
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index f62a50c3ed34..7bfbe40bc43b 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -37,15 +37,16 @@ void acpi_enable_wakeup_devices(u8 sleep_state)
container_of(node, struct acpi_device, wakeup_list);
if (!dev->wakeup.flags.valid
- || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count)
- || sleep_state > (u32) dev->wakeup.sleep_state)
+ || sleep_state > (u32) dev->wakeup.sleep_state
+ || !(device_may_wakeup(&dev->dev)
+ || dev->wakeup.prepare_count))
continue;
- if (dev->wakeup.state.enabled)
+ if (device_may_wakeup(&dev->dev))
acpi_enable_wakeup_device_power(dev, sleep_state);
/* The wake-up power should have been enabled already. */
- acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
+ acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
ACPI_GPE_ENABLE);
}
}
@@ -63,14 +64,15 @@ void acpi_disable_wakeup_devices(u8 sleep_state)
container_of(node, struct acpi_device, wakeup_list);
if (!dev->wakeup.flags.valid
- || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count)
- || (sleep_state > (u32) dev->wakeup.sleep_state))
+ || sleep_state > (u32) dev->wakeup.sleep_state
+ || !(device_may_wakeup(&dev->dev)
+ || dev->wakeup.prepare_count))
continue;
- acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
+ acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
ACPI_GPE_DISABLE);
- if (dev->wakeup.state.enabled)
+ if (device_may_wakeup(&dev->dev))
acpi_disable_wakeup_device_power(dev);
}
}
@@ -84,8 +86,12 @@ int __init acpi_wakeup_device_init(void)
struct acpi_device *dev = container_of(node,
struct acpi_device,
wakeup_list);
- if (dev->wakeup.flags.always_enabled)
- dev->wakeup.state.enabled = 1;
+ if (device_can_wakeup(&dev->dev)) {
+ /* Button GPEs are supposed to be always enabled. */
+ acpi_enable_gpe(dev->wakeup.gpe_device,
+ dev->wakeup.gpe_number);
+ device_set_wakeup_enable(&dev->dev, true);
+ }
}
mutex_unlock(&acpi_device_lock);
return 0;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 36e2319264bd..c2328aed0836 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -2,6 +2,14 @@
# SATA/PATA driver configuration
#
+config HAVE_PATA_PLATFORM
+ bool
+ help
+ This is an internal configuration node for any machine that
+ uses pata-platform driver to enable the relevant driver in the
+ configuration structure without having to submit endless patches
+ to update the PATA_PLATFORM entry.
+
menuconfig ATA
tristate "Serial ATA and Parallel ATA drivers"
depends on HAS_IOMEM
@@ -90,6 +98,14 @@ config SATA_INIC162X
help
This option enables support for Initio 162x Serial ATA.
+config SATA_ACARD_AHCI
+ tristate "ACard AHCI variant (ATP 8620)"
+ depends on PCI
+ help
+ This option enables support for Acard.
+
+ If unsure, say N.
+
config SATA_SIL24
tristate "Silicon Image 3124/3132 SATA support"
depends on PCI
@@ -400,11 +416,11 @@ config PATA_HPT37X
If unsure, say N.
config PATA_HPT3X2N
- tristate "HPT 372N/302N PATA support"
+ tristate "HPT 371N/372N/302N PATA support"
depends on PCI
help
This option enables support for the N variant HPT PATA
- controllers via the new ATA layer
+ controllers via the new ATA layer.
If unsure, say N.
@@ -765,17 +781,9 @@ config PATA_PCMCIA
If unsure, say N.
-config HAVE_PATA_PLATFORM
- bool
- help
- This is an internal configuration node for any machine that
- uses pata-platform driver to enable the relevant driver in the
- configuration structure without having to submit endless patches
- to update the PATA_PLATFORM entry.
-
config PATA_PLATFORM
tristate "Generic platform device PATA support"
- depends on EMBEDDED || PPC || HAVE_PATA_PLATFORM
+ depends on EXPERT || PPC || HAVE_PATA_PLATFORM
help
This option enables support for generic directly connected ATA
devices commonly found on embedded systems.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 2b67c900a459..27291aad6ca7 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_ATA) += libata.o
# non-SFF interface
obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
+obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o
obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
obj-$(CONFIG_SATA_FSL) += sata_fsl.o
obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
new file mode 100644
index 000000000000..339c210f03a6
--- /dev/null
+++ b/drivers/ata/acard-ahci.c
@@ -0,0 +1,528 @@
+
+/*
+ * acard-ahci.c - ACard AHCI SATA support
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2010 Red Hat, Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/gfp.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+#include "ahci.h"
+
+#define DRV_NAME "acard-ahci"
+#define DRV_VERSION "1.0"
+
+/*
+ Received FIS structure limited to 80h.
+*/
+
+#define ACARD_AHCI_RX_FIS_SZ 128
+
+enum {
+ AHCI_PCI_BAR = 5,
+};
+
+enum board_ids {
+ board_acard_ahci,
+};
+
+struct acard_sg {
+ __le32 addr;
+ __le32 addr_hi;
+ __le32 reserved;
+ __le32 size; /* bit 31 (EOT) max==0x10000 (64k) */
+};
+
+static void acard_ahci_qc_prep(struct ata_queued_cmd *qc);
+static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+static int acard_ahci_port_start(struct ata_port *ap);
+static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+
+#ifdef CONFIG_PM
+static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
+static int acard_ahci_pci_device_resume(struct pci_dev *pdev);
+#endif
+
+static struct scsi_host_template acard_ahci_sht = {
+ AHCI_SHT("acard-ahci"),
+};
+
+static struct ata_port_operations acard_ops = {
+ .inherits = &ahci_ops,
+ .qc_prep = acard_ahci_qc_prep,
+ .qc_fill_rtf = acard_ahci_qc_fill_rtf,
+ .port_start = acard_ahci_port_start,
+};
+
+#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
+
+static const struct ata_port_info acard_ahci_port_info[] = {
+ [board_acard_ahci] =
+ {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &acard_ops,
+ },
+};
+
+static const struct pci_device_id acard_ahci_pci_tbl[] = {
+ /* ACard */
+ { PCI_VDEVICE(ARTOP, 0x000d), board_acard_ahci }, /* ATP8620 */
+
+ { } /* terminate list */
+};
+
+static struct pci_driver acard_ahci_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = acard_ahci_pci_tbl,
+ .probe = acard_ahci_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = acard_ahci_pci_device_suspend,
+ .resume = acard_ahci_pci_device_resume,
+#endif
+};
+
+#ifdef CONFIG_PM
+static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 ctl;
+
+ if (mesg.event & PM_EVENT_SUSPEND &&
+ hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "BIOS update required for suspend/resume\n");
+ return -EIO;
+ }
+
+ if (mesg.event & PM_EVENT_SLEEP) {
+ /* AHCI spec rev1.1 section 8.3.3:
+ * Software must disable interrupts prior to requesting a
+ * transition of the HBA to D3 state.
+ */
+ ctl = readl(mmio + HOST_CTL);
+ ctl &= ~HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ }
+
+ return ata_pci_device_suspend(pdev, mesg);
+}
+
+static int acard_ahci_pci_device_resume(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+ ahci_init_controller(host);
+ }
+
+ ata_host_resume(host);
+
+ return 0;
+}
+#endif
+
+static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
+{
+ int rc;
+
+ if (using_dac &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "64-bit DMA enable failed\n");
+ return rc;
+ }
+ }
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit DMA enable failed\n");
+ return rc;
+ }
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit consistent DMA enable failed\n");
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static void acard_ahci_pci_print_info(struct ata_host *host)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ u16 cc;
+ const char *scc_s;
+
+ pci_read_config_word(pdev, 0x0a, &cc);
+ if (cc == PCI_CLASS_STORAGE_IDE)
+ scc_s = "IDE";
+ else if (cc == PCI_CLASS_STORAGE_SATA)
+ scc_s = "SATA";
+ else if (cc == PCI_CLASS_STORAGE_RAID)
+ scc_s = "RAID";
+ else
+ scc_s = "unknown";
+
+ ahci_print_info(host, scc_s);
+}
+
+static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
+{
+ struct scatterlist *sg;
+ struct acard_sg *acard_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
+ unsigned int si, last_si = 0;
+
+ VPRINTK("ENTER\n");
+
+ /*
+ * Next, the S/G list.
+ */
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ dma_addr_t addr = sg_dma_address(sg);
+ u32 sg_len = sg_dma_len(sg);
+
+ /*
+ * ACard note:
+ * We must set an end-of-table (EOT) bit,
+ * and the segment cannot exceed 64k (0x10000)
+ */
+ acard_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
+ acard_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
+ acard_sg[si].size = cpu_to_le32(sg_len);
+ last_si = si;
+ }
+
+ acard_sg[last_si].size |= cpu_to_le32(1 << 31); /* set EOT */
+
+ return si;
+}
+
+static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ int is_atapi = ata_is_atapi(qc->tf.protocol);
+ void *cmd_tbl;
+ u32 opts;
+ const u32 cmd_fis_len = 5; /* five dwords */
+ unsigned int n_elem;
+
+ /*
+ * Fill in command table information. First, the header,
+ * a SATA Register - Host to Device command FIS.
+ */
+ cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
+
+ ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
+ if (is_atapi) {
+ memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
+ memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
+ }
+
+ n_elem = 0;
+ if (qc->flags & ATA_QCFLAG_DMAMAP)
+ n_elem = acard_ahci_fill_sg(qc, cmd_tbl);
+
+ /*
+ * Fill in command slot information.
+ *
+ * ACard note: prd table length not filled in
+ */
+ opts = cmd_fis_len | (qc->dev->link->pmp << 12);
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
+ opts |= AHCI_CMD_WRITE;
+ if (is_atapi)
+ opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
+
+ ahci_fill_cmd_slot(pp, qc->tag, opts);
+}
+
+static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+ struct ahci_port_priv *pp = qc->ap->private_data;
+ u8 *rx_fis = pp->rx_fis;
+
+ if (pp->fbs_enabled)
+ rx_fis += qc->dev->link->pmp * ACARD_AHCI_RX_FIS_SZ;
+
+ /*
+ * After a successful execution of an ATA PIO data-in command,
+ * the device doesn't send D2H Reg FIS to update the TF and
+ * the host should take TF and E_Status from the preceding PIO
+ * Setup FIS.
+ */
+ if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
+ !(qc->flags & ATA_QCFLAG_FAILED)) {
+ ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
+ qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15];
+ } else
+ ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
+
+ return true;
+}
+
+static int acard_ahci_port_start(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct device *dev = ap->host->dev;
+ struct ahci_port_priv *pp;
+ void *mem;
+ dma_addr_t mem_dma;
+ size_t dma_sz, rx_fis_sz;
+
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
+ /* check FBS capability */
+ if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd = readl(port_mmio + PORT_CMD);
+ if (cmd & PORT_CMD_FBSCP)
+ pp->fbs_supported = true;
+ else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
+ dev_printk(KERN_INFO, dev,
+ "port %d can do FBS, forcing FBSCP\n",
+ ap->port_no);
+ pp->fbs_supported = true;
+ } else
+ dev_printk(KERN_WARNING, dev,
+ "port %d is not capable of FBS\n",
+ ap->port_no);
+ }
+
+ if (pp->fbs_supported) {
+ dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
+ rx_fis_sz = ACARD_AHCI_RX_FIS_SZ * 16;
+ } else {
+ dma_sz = AHCI_PORT_PRIV_DMA_SZ;
+ rx_fis_sz = ACARD_AHCI_RX_FIS_SZ;
+ }
+
+ mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+ memset(mem, 0, dma_sz);
+
+ /*
+ * First item in chunk of DMA memory: 32-slot command table,
+ * 32 bytes each in size
+ */
+ pp->cmd_slot = mem;
+ pp->cmd_slot_dma = mem_dma;
+
+ mem += AHCI_CMD_SLOT_SZ;
+ mem_dma += AHCI_CMD_SLOT_SZ;
+
+ /*
+ * Second item: Received-FIS area
+ */
+ pp->rx_fis = mem;
+ pp->rx_fis_dma = mem_dma;
+
+ mem += rx_fis_sz;
+ mem_dma += rx_fis_sz;
+
+ /*
+ * Third item: data area for storing a single command
+ * and its scatter-gather table
+ */
+ pp->cmd_tbl = mem;
+ pp->cmd_tbl_dma = mem_dma;
+
+ /*
+ * Save off initial list of interrupts to be enabled.
+ * This could be changed later
+ */
+ pp->intr_mask = DEF_PORT_IRQ;
+
+ ap->private_data = pp;
+
+ /* engage engines, captain */
+ return ahci_port_resume(ap);
+}
+
+static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ unsigned int board_id = ent->driver_data;
+ struct ata_port_info pi = acard_ahci_port_info[board_id];
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+ struct device *dev = &pdev->dev;
+ struct ahci_host_priv *hpriv;
+ struct ata_host *host;
+ int n_ports, i, rc;
+
+ VPRINTK("ENTER\n");
+
+ WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ /* acquire resources */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ /* AHCI controllers often implement SFF compatible interface.
+ * Grab all PCI BARs just in case.
+ */
+ rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+ return rc;
+
+ hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+ hpriv->flags |= (unsigned long)pi.private_data;
+
+ if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
+ pci_enable_msi(pdev);
+
+ hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+
+ /* save initial config */
+ ahci_save_initial_config(&pdev->dev, hpriv, 0, 0);
+
+ /* prepare host */
+ if (hpriv->cap & HOST_CAP_NCQ)
+ pi.flags |= ATA_FLAG_NCQ;
+
+ if (hpriv->cap & HOST_CAP_PMP)
+ pi.flags |= ATA_FLAG_PMP;
+
+ ahci_set_em_messages(hpriv, &pi);
+
+ /* CAP.NP sometimes indicate the index of the last enabled
+ * port, at other times, that of the last possible port, so
+ * determining the maximum port number requires looking at
+ * both CAP.NP and port_map.
+ */
+ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+ if (!host)
+ return -ENOMEM;
+ host->private_data = hpriv;
+
+ if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+ host->flags |= ATA_HOST_PARALLEL_SCAN;
+ else
+ printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
+ ata_port_pbar_desc(ap, AHCI_PCI_BAR,
+ 0x100 + ap->port_no * 0x80, "port");
+
+ /* set initial link pm policy */
+ /*
+ ap->pm_policy = NOT_AVAILABLE;
+ */
+ /* disabled/not-implemented port */
+ if (!(hpriv->port_map & (1 << i)))
+ ap->ops = &ata_dummy_port_ops;
+ }
+
+ /* initialize adapter */
+ rc = acard_ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
+ if (rc)
+ return rc;
+
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+ ahci_init_controller(host);
+ acard_ahci_pci_print_info(host);
+
+ pci_set_master(pdev);
+ return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
+ &acard_ahci_sht);
+}
+
+static int __init acard_ahci_init(void)
+{
+ return pci_register_driver(&acard_ahci_pci_driver);
+}
+
+static void __exit acard_ahci_exit(void)
+{
+ pci_unregister_driver(&acard_ahci_pci_driver);
+}
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("ACard AHCI SATA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, acard_ahci_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(acard_ahci_init);
+module_exit(acard_ahci_exit);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 328826381a2d..b8d96ce37fc9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -260,6 +260,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
{ PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
{ PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
+ { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -379,6 +380,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
{ PCI_DEVICE(0x1b4b, 0x9123),
+ .class = PCI_CLASS_STORAGE_SATA_AHCI,
+ .class_mask = 0xffffff,
.driver_data = board_ahci_yes_fbs }, /* 88se9128 */
/* Promise */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 329cbbb91284..3e606c34f57b 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -311,6 +311,8 @@ extern struct device_attribute *ahci_sdev_attrs[];
extern struct ata_port_operations ahci_ops;
+void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+ u32 opts);
void ahci_save_initial_config(struct device *dev,
struct ahci_host_priv *hpriv,
unsigned int force_port_map,
@@ -326,6 +328,7 @@ int ahci_stop_engine(struct ata_port *ap);
void ahci_start_engine(struct ata_port *ap);
int ahci_check_ready(struct ata_link *link);
int ahci_kick_engine(struct ata_port *ap);
+int ahci_port_resume(struct ata_port *ap);
void ahci_set_em_messages(struct ahci_host_priv *hpriv,
struct ata_port_info *pi);
int ahci_reset_em(struct ata_host *host);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index ebc08d65b3dd..26d452339e98 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -87,10 +87,7 @@ static int ahci_hardreset(struct ata_link *link, unsigned int *class,
static void ahci_postreset(struct ata_link *link, unsigned int *class);
static void ahci_error_handler(struct ata_port *ap);
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
-static int ahci_port_resume(struct ata_port *ap);
static void ahci_dev_config(struct ata_device *dev);
-static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
- u32 opts);
#ifdef CONFIG_PM
static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
#endif
@@ -1133,8 +1130,8 @@ static unsigned int ahci_dev_classify(struct ata_port *ap)
return ata_dev_classify(&tf);
}
-static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
- u32 opts)
+void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+ u32 opts)
{
dma_addr_t cmd_tbl_dma;
@@ -1145,6 +1142,7 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
}
+EXPORT_SYMBOL_GPL(ahci_fill_cmd_slot);
int ahci_kick_engine(struct ata_port *ap)
{
@@ -1918,7 +1916,7 @@ static void ahci_pmp_detach(struct ata_port *ap)
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
}
-static int ahci_port_resume(struct ata_port *ap)
+int ahci_port_resume(struct ata_port *ap)
{
ahci_power_up(ap);
ahci_start_port(ap);
@@ -1930,6 +1928,7 @@ static int ahci_port_resume(struct ata_port *ap)
return 0;
}
+EXPORT_SYMBOL_GPL(ahci_port_resume);
#ifdef CONFIG_PM
static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 0a6a943b3779..d4e52e214859 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2240,7 +2240,7 @@ int ata_dev_configure(struct ata_device *dev)
if (id[ATA_ID_CFA_KEY_MGMT] & 1)
ata_dev_printk(dev, KERN_WARNING,
"supports DRM functions and may "
- "not be fully accessable.\n");
+ "not be fully accessible.\n");
snprintf(revbuf, 7, "CFA");
} else {
snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
@@ -2248,7 +2248,7 @@ int ata_dev_configure(struct ata_device *dev)
if (ata_id_has_tpm(id))
ata_dev_printk(dev, KERN_WARNING,
"supports DRM functions and may "
- "not be fully accessable.\n");
+ "not be fully accessible.\n");
}
dev->n_sectors = ata_id_n_sectors(id);
@@ -4138,6 +4138,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
* device and controller are SATA.
*/
{ "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVR-212D", "1.28", ATA_HORKAGE_NOSETXFER },
/* End Marker */
{ }
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 66aa4bee80a6..600f6353ecf8 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -346,12 +346,11 @@ struct device_attribute *ata_common_sdev_attrs[] = {
};
EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
-static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+static void ata_scsi_invalid_field(struct scsi_cmnd *cmd)
{
ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
/* "Invalid field in cbd" */
- done(cmd);
+ cmd->scsi_done(cmd);
}
/**
@@ -719,7 +718,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
* ata_scsi_qc_new - acquire new ata_queued_cmd reference
* @dev: ATA device to which the new command is attached
* @cmd: SCSI command that originated this ATA command
- * @done: SCSI command completion function
*
* Obtain a reference to an unused ata_queued_cmd structure,
* which is the basic libata structure representing a single
@@ -736,21 +734,20 @@ EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
* Command allocated, or %NULL if none available.
*/
static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
- struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+ struct scsi_cmnd *cmd)
{
struct ata_queued_cmd *qc;
qc = ata_qc_new_init(dev);
if (qc) {
qc->scsicmd = cmd;
- qc->scsidone = done;
+ qc->scsidone = cmd->scsi_done;
qc->sg = scsi_sglist(cmd);
qc->n_elem = scsi_sg_count(cmd);
} else {
cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
- done(cmd);
+ cmd->scsi_done(cmd);
}
return qc;
@@ -1102,9 +1099,9 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
struct request_queue *q = sdev->request_queue;
void *buf;
- /* set the min alignment and padding */
- blk_queue_update_dma_alignment(sdev->request_queue,
- ATA_DMA_PAD_SZ - 1);
+ sdev->sector_size = ATA_SECT_SIZE;
+
+ /* set DMA padding */
blk_queue_update_dma_pad(sdev->request_queue,
ATA_DMA_PAD_SZ - 1);
@@ -1118,13 +1115,25 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
} else {
- /* ATA devices must be sector aligned */
sdev->sector_size = ata_id_logical_sector_size(dev->id);
- blk_queue_update_dma_alignment(sdev->request_queue,
- sdev->sector_size - 1);
sdev->manage_start_stop = 1;
}
+ /*
+ * ata_pio_sectors() expects buffer for each sector to not cross
+ * page boundary. Enforce it by requiring buffers to be sector
+ * aligned, which works iff sector_size is not larger than
+ * PAGE_SIZE. ATAPI devices also need the alignment as
+ * IDENTIFY_PACKET is executed as ATA_PROT_PIO.
+ */
+ if (sdev->sector_size > PAGE_SIZE)
+ ata_dev_printk(dev, KERN_WARNING,
+ "sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
+ sdev->sector_size);
+
+ blk_queue_update_dma_alignment(sdev->request_queue,
+ sdev->sector_size - 1);
+
if (dev->flags & ATA_DFLAG_AN)
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
@@ -1735,7 +1744,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
* ata_scsi_translate - Translate then issue SCSI command to ATA device
* @dev: ATA device to which the command is addressed
* @cmd: SCSI command to execute
- * @done: SCSI command completion function
* @xlat_func: Actor which translates @cmd to an ATA taskfile
*
* Our ->queuecommand() function has decided that the SCSI
@@ -1759,7 +1767,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
* needs to be deferred.
*/
static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *),
ata_xlat_func_t xlat_func)
{
struct ata_port *ap = dev->link->ap;
@@ -1768,7 +1775,7 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
VPRINTK("ENTER\n");
- qc = ata_scsi_qc_new(dev, cmd, done);
+ qc = ata_scsi_qc_new(dev, cmd);
if (!qc)
goto err_mem;
@@ -1804,14 +1811,14 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
early_finish:
ata_qc_free(qc);
- qc->scsidone(cmd);
+ cmd->scsi_done(cmd);
DPRINTK("EXIT - early finish (good or error)\n");
return 0;
err_did:
ata_qc_free(qc);
cmd->result = (DID_ERROR << 16);
- qc->scsidone(cmd);
+ cmd->scsi_done(cmd);
err_mem:
DPRINTK("EXIT - internal\n");
return 0;
@@ -3116,7 +3123,6 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
}
static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
- void (*done)(struct scsi_cmnd *),
struct ata_device *dev)
{
u8 scsi_op = scmd->cmnd[0];
@@ -3150,9 +3156,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
}
if (xlat_func)
- rc = ata_scsi_translate(dev, scmd, done, xlat_func);
+ rc = ata_scsi_translate(dev, scmd, xlat_func);
else
- ata_scsi_simulate(dev, scmd, done);
+ ata_scsi_simulate(dev, scmd);
return rc;
@@ -3160,7 +3166,7 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
scmd->cmd_len, scsi_op, dev->cdb_len);
scmd->result = DID_ERROR << 16;
- done(scmd);
+ scmd->scsi_done(scmd);
return 0;
}
@@ -3199,7 +3205,7 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
dev = ata_scsi_find_dev(ap, scsidev);
if (likely(dev))
- rc = __ata_scsi_queuecmd(cmd, cmd->scsi_done, dev);
+ rc = __ata_scsi_queuecmd(cmd, dev);
else {
cmd->result = (DID_BAD_TARGET << 16);
cmd->scsi_done(cmd);
@@ -3214,7 +3220,6 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
* ata_scsi_simulate - simulate SCSI command on ATA device
* @dev: the target device
* @cmd: SCSI command being sent to device.
- * @done: SCSI command completion function.
*
* Interprets and directly executes a select list of SCSI commands
* that can be handled internally.
@@ -3223,8 +3228,7 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
* spin_lock_irqsave(host lock)
*/
-void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
- void (*done)(struct scsi_cmnd *))
+void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
{
struct ata_scsi_args args;
const u8 *scsicmd = cmd->cmnd;
@@ -3233,17 +3237,17 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
args.dev = dev;
args.id = dev->id;
args.cmd = cmd;
- args.done = done;
+ args.done = cmd->scsi_done;
switch(scsicmd[0]) {
/* TODO: worth improving? */
case FORMAT_UNIT:
- ata_scsi_invalid_field(cmd, done);
+ ata_scsi_invalid_field(cmd);
break;
case INQUIRY:
if (scsicmd[1] & 2) /* is CmdDt set? */
- ata_scsi_invalid_field(cmd, done);
+ ata_scsi_invalid_field(cmd);
else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
else switch (scsicmd[2]) {
@@ -3269,7 +3273,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
break;
default:
- ata_scsi_invalid_field(cmd, done);
+ ata_scsi_invalid_field(cmd);
break;
}
break;
@@ -3281,7 +3285,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
case MODE_SELECT: /* unconditionally return */
case MODE_SELECT_10: /* bad-field-in-cdb */
- ata_scsi_invalid_field(cmd, done);
+ ata_scsi_invalid_field(cmd);
break;
case READ_CAPACITY:
@@ -3292,7 +3296,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
else
- ata_scsi_invalid_field(cmd, done);
+ ata_scsi_invalid_field(cmd);
break;
case REPORT_LUNS:
@@ -3302,7 +3306,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
case REQUEST_SENSE:
ata_scsi_set_sense(cmd, 0, 0, 0);
cmd->result = (DRIVER_SENSE << 24);
- done(cmd);
+ cmd->scsi_done(cmd);
break;
/* if we reach this, then writeback caching is disabled,
@@ -3324,14 +3328,14 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4]))
ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
else
- ata_scsi_invalid_field(cmd, done);
+ ata_scsi_invalid_field(cmd);
break;
/* all other commands */
default:
ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
/* "Invalid command operation code" */
- done(cmd);
+ cmd->scsi_done(cmd);
break;
}
}
@@ -3858,7 +3862,6 @@ EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
/**
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
* @cmd: SCSI command to be sent
- * @done: Completion function, called when command is complete
* @ap: ATA port to which the command is being sent
*
* RETURNS:
@@ -3866,18 +3869,17 @@ EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
* 0 otherwise.
*/
-int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
- struct ata_port *ap)
+int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
{
int rc = 0;
ata_scsi_dump_cdb(ap, cmd);
if (likely(ata_dev_enabled(ap->link.device)))
- rc = __ata_scsi_queuecmd(cmd, done, ap->link.device);
+ rc = __ata_scsi_queuecmd(cmd, ap->link.device);
else {
cmd->result = (DID_BAD_TARGET << 16);
- done(cmd);
+ cmd->scsi_done(cmd);
}
return rc;
}
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 7688868557b9..538ec38ba995 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -25,7 +25,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt366"
-#define DRV_VERSION "0.6.8"
+#define DRV_VERSION "0.6.10"
struct hpt_clock {
u8 xfer_mode;
@@ -110,18 +110,23 @@ static const struct hpt_clock hpt366_25[] = {
{ 0, 0x01208585 }
};
-static const char *bad_ata33[] = {
- "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2",
- "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
- "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
+static const char * const bad_ata33[] = {
+ "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3",
+ "Maxtor 90845U3", "Maxtor 90650U2",
+ "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5",
+ "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
+ "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6",
+ "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
"Maxtor 90510D4",
"Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
- "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
- "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
+ "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7",
+ "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
+ "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5",
+ "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
NULL
};
-static const char *bad_ata66_4[] = {
+static const char * const bad_ata66_4[] = {
"IBM-DTLA-307075",
"IBM-DTLA-307060",
"IBM-DTLA-307045",
@@ -140,12 +145,13 @@ static const char *bad_ata66_4[] = {
NULL
};
-static const char *bad_ata66_3[] = {
+static const char * const bad_ata66_3[] = {
"WDC AC310200R",
NULL
};
-static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[])
+static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
+ const char * const list[])
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
int i = 0;
@@ -154,8 +160,8 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, cons
while (list[i] != NULL) {
if (!strcmp(list[i], model_num)) {
- printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
- modestr, list[i]);
+ pr_warning(DRV_NAME ": %s is not supported for %s.\n",
+ modestr, list[i]);
return 1;
}
i++;
@@ -288,6 +294,7 @@ static struct ata_port_operations hpt366_port_ops = {
static void hpt36x_init_chipset(struct pci_dev *dev)
{
u8 drive_fast;
+
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
@@ -349,16 +356,16 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
/* PCI clocking determines the ATA timing values to use */
/* info_hpt366 is safe against re-entry so we can scribble on it */
- switch((reg1 & 0x700) >> 8) {
- case 9:
- hpriv = &hpt366_40;
- break;
- case 5:
- hpriv = &hpt366_25;
- break;
- default:
- hpriv = &hpt366_33;
- break;
+ switch ((reg1 & 0x700) >> 8) {
+ case 9:
+ hpriv = &hpt366_40;
+ break;
+ case 5:
+ hpriv = &hpt366_25;
+ break;
+ default:
+ hpriv = &hpt366_33;
+ break;
}
/* Now kick off ATA set up */
return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, hpriv, 0);
@@ -385,9 +392,9 @@ static const struct pci_device_id hpt36x[] = {
};
static struct pci_driver hpt36x_pci_driver = {
- .name = DRV_NAME,
+ .name = DRV_NAME,
.id_table = hpt36x,
- .probe = hpt36x_init_one,
+ .probe = hpt36x_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 9ae4c0830577..4c5b5183225e 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -8,7 +8,7 @@
* Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
* Portions Copyright (C) 2001 Sun Microsystems, Inc.
* Portions Copyright (C) 2003 Red Hat Inc
- * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
+ * Portions Copyright (C) 2005-2010 MontaVista Software, Inc.
*
* TODO
* Look into engine reset on timeout errors. Should not be required.
@@ -24,7 +24,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt37x"
-#define DRV_VERSION "0.6.15"
+#define DRV_VERSION "0.6.22"
struct hpt_clock {
u8 xfer_speed;
@@ -210,7 +210,7 @@ static u32 hpt37x_find_mode(struct ata_port *ap, int speed)
{
struct hpt_clock *clocks = ap->host->private_data;
- while(clocks->xfer_speed) {
+ while (clocks->xfer_speed) {
if (clocks->xfer_speed == speed)
return clocks->timing;
clocks++;
@@ -219,7 +219,8 @@ static u32 hpt37x_find_mode(struct ata_port *ap, int speed)
return 0xffffffffU; /* silence compiler warning */
}
-static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[])
+static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
+ const char * const list[])
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
int i = 0;
@@ -228,8 +229,8 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, cons
while (list[i] != NULL) {
if (!strcmp(list[i], model_num)) {
- printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
- modestr, list[i]);
+ pr_warning(DRV_NAME ": %s is not supported for %s.\n",
+ modestr, list[i]);
return 1;
}
i++;
@@ -237,18 +238,23 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, cons
return 0;
}
-static const char *bad_ata33[] = {
- "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2",
- "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
- "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
+static const char * const bad_ata33[] = {
+ "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3",
+ "Maxtor 90845U3", "Maxtor 90650U2",
+ "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5",
+ "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
+ "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6",
+ "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
"Maxtor 90510D4",
"Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
- "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
- "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
+ "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7",
+ "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
+ "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5",
+ "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
NULL
};
-static const char *bad_ata100_5[] = {
+static const char * const bad_ata100_5[] = {
"IBM-DTLA-307075",
"IBM-DTLA-307060",
"IBM-DTLA-307045",
@@ -302,6 +308,22 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
}
/**
+ * hpt372_filter - mode selection filter
+ * @adev: ATA device
+ * @mask: mode mask
+ *
+ * The Marvell bridge chips used on the HighPoint SATA cards do not seem
+ * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes...
+ */
+static unsigned long hpt372_filter(struct ata_device *adev, unsigned long mask)
+{
+ if (ata_id_is_sata(adev->id))
+ mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA);
+
+ return mask;
+}
+
+/**
* hpt37x_cable_detect - Detect the cable type
* @ap: ATA port to detect on
*
@@ -373,6 +395,7 @@ static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline)
{ 0x50, 1, 0x04, 0x04 },
{ 0x54, 1, 0x04, 0x04 }
};
+
if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no]))
return -ENOENT;
@@ -586,11 +609,11 @@ static struct ata_port_operations hpt370a_port_ops = {
};
/*
- * Configuration for HPT372, HPT371, HPT302. Slightly different PIO
- * and DMA mode setting functionality.
+ * Configuration for HPT371 and HPT302. Slightly different PIO and DMA
+ * mode setting functionality.
*/
-static struct ata_port_operations hpt372_port_ops = {
+static struct ata_port_operations hpt302_port_ops = {
.inherits = &ata_bmdma_port_ops,
.bmdma_stop = hpt37x_bmdma_stop,
@@ -602,14 +625,23 @@ static struct ata_port_operations hpt372_port_ops = {
};
/*
- * Configuration for HPT374. Mode setting works like 372 and friends
+ * Configuration for HPT372. Mode setting works like 371 and 302
+ * but we have a mode filter.
+ */
+
+static struct ata_port_operations hpt372_port_ops = {
+ .inherits = &hpt302_port_ops,
+ .mode_filter = hpt372_filter,
+};
+
+/*
+ * Configuration for HPT374. Mode setting and filtering works like 372
* but we have a different cable detection procedure for function 1.
*/
static struct ata_port_operations hpt374_fn1_port_ops = {
.inherits = &hpt372_port_ops,
.cable_detect = hpt374_fn1_cable_detect,
- .prereset = hpt37x_pre_reset,
};
/**
@@ -647,12 +679,12 @@ static int hpt37x_calibrate_dpll(struct pci_dev *dev)
u32 reg5c;
int tries;
- for(tries = 0; tries < 0x5000; tries++) {
+ for (tries = 0; tries < 0x5000; tries++) {
udelay(50);
pci_read_config_byte(dev, 0x5b, &reg5b);
if (reg5b & 0x80) {
/* See if it stays set */
- for(tries = 0; tries < 0x1000; tries ++) {
+ for (tries = 0; tries < 0x1000; tries++) {
pci_read_config_byte(dev, 0x5b, &reg5b);
/* Failed ? */
if ((reg5b & 0x80) == 0)
@@ -660,7 +692,7 @@ static int hpt37x_calibrate_dpll(struct pci_dev *dev)
}
/* Turn off tuning, we have the DPLL set */
pci_read_config_dword(dev, 0x5c, &reg5c);
- pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100);
+ pci_write_config_dword(dev, 0x5c, reg5c & ~0x100);
return 1;
}
}
@@ -672,6 +704,7 @@ static u32 hpt374_read_freq(struct pci_dev *pdev)
{
u32 freq;
unsigned long io_base = pci_resource_start(pdev, 4);
+
if (PCI_FUNC(pdev->devfn) & 1) {
struct pci_dev *pdev_0;
@@ -737,23 +770,23 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
.udma_mask = ATA_UDMA5,
.port_ops = &hpt370a_port_ops
};
- /* HPT370 - UDMA100 */
+ /* HPT370 - UDMA66 */
static const struct ata_port_info info_hpt370_33 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
+ .udma_mask = ATA_UDMA4,
.port_ops = &hpt370_port_ops
};
- /* HPT370A - UDMA100 */
+ /* HPT370A - UDMA66 */
static const struct ata_port_info info_hpt370a_33 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
+ .udma_mask = ATA_UDMA4,
.port_ops = &hpt370a_port_ops
};
- /* HPT371, 372 and friends - UDMA133 */
+ /* HPT372 - UDMA133 */
static const struct ata_port_info info_hpt372 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -761,7 +794,15 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
.udma_mask = ATA_UDMA6,
.port_ops = &hpt372_port_ops
};
- /* HPT374 - UDMA100, function 1 uses different prereset method */
+ /* HPT371, 302 - UDMA133 */
+ static const struct ata_port_info info_hpt302 = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &hpt302_port_ops
+ };
+ /* HPT374 - UDMA100, function 1 uses different cable_detect method */
static const struct ata_port_info info_hpt374_fn0 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -796,7 +837,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if (rc)
return rc;
- if (dev->device == PCI_DEVICE_ID_TTI_HPT366) {
+ switch (dev->device) {
+ case PCI_DEVICE_ID_TTI_HPT366:
/* May be a later chip in disguise. Check */
/* Older chips are in the HPT366 driver. Ignore them */
if (rev < 3)
@@ -805,66 +847,66 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if (rev == 6)
return -ENODEV;
- switch(rev) {
- case 3:
- ppi[0] = &info_hpt370;
- chip_table = &hpt370;
- prefer_dpll = 0;
- break;
- case 4:
- ppi[0] = &info_hpt370a;
- chip_table = &hpt370a;
- prefer_dpll = 0;
- break;
- case 5:
- ppi[0] = &info_hpt372;
- chip_table = &hpt372;
- break;
- default:
- printk(KERN_ERR "pata_hpt37x: Unknown HPT366 "
- "subtype, please report (%d).\n", rev);
- return -ENODEV;
- }
- } else {
- switch(dev->device) {
- case PCI_DEVICE_ID_TTI_HPT372:
- /* 372N if rev >= 2*/
- if (rev >= 2)
- return -ENODEV;
- ppi[0] = &info_hpt372;
- chip_table = &hpt372a;
- break;
- case PCI_DEVICE_ID_TTI_HPT302:
- /* 302N if rev > 1 */
- if (rev > 1)
- return -ENODEV;
- ppi[0] = &info_hpt372;
- /* Check this */
- chip_table = &hpt302;
- break;
- case PCI_DEVICE_ID_TTI_HPT371:
- if (rev > 1)
- return -ENODEV;
- ppi[0] = &info_hpt372;
- chip_table = &hpt371;
- /* Single channel device, master is not present
- but the BIOS (or us for non x86) must mark it
- absent */
- pci_read_config_byte(dev, 0x50, &mcr1);
- mcr1 &= ~0x04;
- pci_write_config_byte(dev, 0x50, mcr1);
- break;
- case PCI_DEVICE_ID_TTI_HPT374:
- chip_table = &hpt374;
- if (!(PCI_FUNC(dev->devfn) & 1))
- *ppi = &info_hpt374_fn0;
- else
- *ppi = &info_hpt374_fn1;
- break;
- default:
- printk(KERN_ERR "pata_hpt37x: PCI table is bogus please report (%d).\n", dev->device);
- return -ENODEV;
+ switch (rev) {
+ case 3:
+ ppi[0] = &info_hpt370;
+ chip_table = &hpt370;
+ prefer_dpll = 0;
+ break;
+ case 4:
+ ppi[0] = &info_hpt370a;
+ chip_table = &hpt370a;
+ prefer_dpll = 0;
+ break;
+ case 5:
+ ppi[0] = &info_hpt372;
+ chip_table = &hpt372;
+ break;
+ default:
+ pr_err(DRV_NAME ": Unknown HPT366 subtype, "
+ "please report (%d).\n", rev);
+ return -ENODEV;
}
+ break;
+ case PCI_DEVICE_ID_TTI_HPT372:
+ /* 372N if rev >= 2 */
+ if (rev >= 2)
+ return -ENODEV;
+ ppi[0] = &info_hpt372;
+ chip_table = &hpt372a;
+ break;
+ case PCI_DEVICE_ID_TTI_HPT302:
+ /* 302N if rev > 1 */
+ if (rev > 1)
+ return -ENODEV;
+ ppi[0] = &info_hpt302;
+ /* Check this */
+ chip_table = &hpt302;
+ break;
+ case PCI_DEVICE_ID_TTI_HPT371:
+ if (rev > 1)
+ return -ENODEV;
+ ppi[0] = &info_hpt302;
+ chip_table = &hpt371;
+ /*
+ * Single channel device, master is not present but the BIOS
+ * (or us for non x86) must mark it absent
+ */
+ pci_read_config_byte(dev, 0x50, &mcr1);
+ mcr1 &= ~0x04;
+ pci_write_config_byte(dev, 0x50, mcr1);
+ break;
+ case PCI_DEVICE_ID_TTI_HPT374:
+ chip_table = &hpt374;
+ if (!(PCI_FUNC(dev->devfn) & 1))
+ *ppi = &info_hpt374_fn0;
+ else
+ *ppi = &info_hpt374_fn1;
+ break;
+ default:
+ pr_err(DRV_NAME ": PCI table is bogus, please report (%d).\n",
+ dev->device);
+ return -ENODEV;
}
/* Ok so this is a chip we support */
@@ -893,9 +935,11 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if (chip_table == &hpt372a)
outb(0x0e, iobase + 0x9c);
- /* Some devices do not let this value be accessed via PCI space
- according to the old driver. In addition we must use the value
- from FN 0 on the HPT374 */
+ /*
+ * Some devices do not let this value be accessed via PCI space
+ * according to the old driver. In addition we must use the value
+ * from FN 0 on the HPT374.
+ */
if (chip_table == &hpt374) {
freq = hpt374_read_freq(dev);
@@ -909,10 +953,10 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
u8 sr;
u32 total = 0;
- printk(KERN_WARNING "pata_hpt37x: BIOS has not set timing clocks.\n");
+ pr_warning(DRV_NAME ": BIOS has not set timing clocks.\n");
/* This is the process the HPT371 BIOS is reported to use */
- for(i = 0; i < 128; i++) {
+ for (i = 0; i < 128; i++) {
pci_read_config_byte(dev, 0x78, &sr);
total += sr & 0x1FF;
udelay(15);
@@ -947,20 +991,25 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
/* Select the DPLL clock. */
pci_write_config_byte(dev, 0x5b, 0x21);
- pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
+ pci_write_config_dword(dev, 0x5C,
+ (f_high << 16) | f_low | 0x100);
- for(adjust = 0; adjust < 8; adjust++) {
+ for (adjust = 0; adjust < 8; adjust++) {
if (hpt37x_calibrate_dpll(dev))
break;
- /* See if it'll settle at a fractionally different clock */
+ /*
+ * See if it'll settle at a fractionally
+ * different clock
+ */
if (adjust & 1)
f_low -= adjust >> 1;
else
f_high += adjust >> 1;
- pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
+ pci_write_config_dword(dev, 0x5C,
+ (f_high << 16) | f_low | 0x100);
}
if (adjust == 8) {
- printk(KERN_ERR "pata_hpt37x: DPLL did not stabilize!\n");
+ pr_err(DRV_NAME ": DPLL did not stabilize!\n");
return -ENODEV;
}
if (dpll == 3)
@@ -968,22 +1017,23 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
else
private_data = (void *)hpt37x_timings_50;
- printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using %dMHz DPLL.\n",
- MHz[clock_slot], MHz[dpll]);
+ pr_info(DRV_NAME ": bus clock %dMHz, using %dMHz DPLL.\n",
+ MHz[clock_slot], MHz[dpll]);
} else {
private_data = (void *)chip_table->clocks[clock_slot];
/*
* Perform a final fixup. Note that we will have used the
* DPLL on the HPT372 which means we don't have to worry
* about lack of UDMA133 support on lower clocks
- */
+ */
if (clock_slot < 2 && ppi[0] == &info_hpt370)
ppi[0] = &info_hpt370_33;
if (clock_slot < 2 && ppi[0] == &info_hpt370a)
ppi[0] = &info_hpt370a_33;
- printk(KERN_INFO "pata_hpt37x: %s using %dMHz bus clock.\n",
- chip_table->name, MHz[clock_slot]);
+
+ pr_info(DRV_NAME ": %s using %dMHz bus clock.\n",
+ chip_table->name, MHz[clock_slot]);
}
/* Now kick off ATA set up */
@@ -1001,9 +1051,9 @@ static const struct pci_device_id hpt37x[] = {
};
static struct pci_driver hpt37x_pci_driver = {
- .name = DRV_NAME,
+ .name = DRV_NAME,
.id_table = hpt37x,
- .probe = hpt37x_init_one,
+ .probe = hpt37x_init_one,
.remove = ata_pci_remove_one
};
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 32f3463216b8..eca68caf5f46 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -1,5 +1,5 @@
/*
- * Libata driver for the highpoint 372N and 302N UDMA66 ATA controllers.
+ * Libata driver for the HighPoint 371N, 372N, and 302N UDMA66 ATA controllers.
*
* This driver is heavily based upon:
*
@@ -8,7 +8,7 @@
* Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
* Portions Copyright (C) 2001 Sun Microsystems, Inc.
* Portions Copyright (C) 2003 Red Hat Inc
- * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
+ * Portions Copyright (C) 2005-2010 MontaVista Software, Inc.
*
*
* TODO
@@ -25,7 +25,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt3x2n"
-#define DRV_VERSION "0.3.10"
+#define DRV_VERSION "0.3.14"
enum {
HPT_PCI_FAST = (1 << 31),
@@ -103,7 +103,7 @@ static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed)
{
struct hpt_clock *clocks = hpt3x2n_clocks;
- while(clocks->xfer_speed) {
+ while (clocks->xfer_speed) {
if (clocks->xfer_speed == speed)
return clocks->timing;
clocks++;
@@ -113,6 +113,22 @@ static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed)
}
/**
+ * hpt372n_filter - mode selection filter
+ * @adev: ATA device
+ * @mask: mode mask
+ *
+ * The Marvell bridge chips used on the HighPoint SATA cards do not seem
+ * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes...
+ */
+static unsigned long hpt372n_filter(struct ata_device *adev, unsigned long mask)
+{
+ if (ata_id_is_sata(adev->id))
+ mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA);
+
+ return mask;
+}
+
+/**
* hpt3x2n_cable_detect - Detect the cable type
* @ap: ATA port to detect on
*
@@ -153,6 +169,7 @@ static int hpt3x2n_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
/* Reset the state machine */
pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
udelay(100);
@@ -328,10 +345,10 @@ static struct scsi_host_template hpt3x2n_sht = {
};
/*
- * Configuration for HPT3x2n.
+ * Configuration for HPT302N/371N.
*/
-static struct ata_port_operations hpt3x2n_port_ops = {
+static struct ata_port_operations hpt3xxn_port_ops = {
.inherits = &ata_bmdma_port_ops,
.bmdma_stop = hpt3x2n_bmdma_stop,
@@ -345,6 +362,15 @@ static struct ata_port_operations hpt3x2n_port_ops = {
.prereset = hpt3x2n_pre_reset,
};
+/*
+ * Configuration for HPT372N. Same as 302N/371N but we have a mode filter.
+ */
+
+static struct ata_port_operations hpt372n_port_ops = {
+ .inherits = &hpt3xxn_port_ops,
+ .mode_filter = &hpt372n_filter,
+};
+
/**
* hpt3xn_calibrate_dpll - Calibrate the DPLL loop
* @dev: PCI device
@@ -359,12 +385,12 @@ static int hpt3xn_calibrate_dpll(struct pci_dev *dev)
u32 reg5c;
int tries;
- for(tries = 0; tries < 0x5000; tries++) {
+ for (tries = 0; tries < 0x5000; tries++) {
udelay(50);
pci_read_config_byte(dev, 0x5b, &reg5b);
if (reg5b & 0x80) {
/* See if it stays set */
- for(tries = 0; tries < 0x1000; tries ++) {
+ for (tries = 0; tries < 0x1000; tries++) {
pci_read_config_byte(dev, 0x5b, &reg5b);
/* Failed ? */
if ((reg5b & 0x80) == 0)
@@ -372,7 +398,7 @@ static int hpt3xn_calibrate_dpll(struct pci_dev *dev)
}
/* Turn off tuning, we have the DPLL set */
pci_read_config_dword(dev, 0x5c, &reg5c);
- pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100);
+ pci_write_config_dword(dev, 0x5c, reg5c & ~0x100);
return 1;
}
}
@@ -388,8 +414,19 @@ static int hpt3x2n_pci_clock(struct pci_dev *pdev)
fcnt = inl(iobase + 0x90); /* Not PCI readable for some chips */
if ((fcnt >> 12) != 0xABCDE) {
- printk(KERN_WARNING "hpt3xn: BIOS clock data not set.\n");
- return 33; /* Not BIOS set */
+ int i;
+ u16 sr;
+ u32 total = 0;
+
+ pr_warning(DRV_NAME ": BIOS clock data not set.\n");
+
+ /* This is the process the HPT371 BIOS is reported to use */
+ for (i = 0; i < 128; i++) {
+ pci_read_config_word(pdev, 0x78, &sr);
+ total += sr & 0x1FF;
+ udelay(15);
+ }
+ fcnt = total / 128;
}
fcnt &= 0x1FF;
@@ -431,21 +468,27 @@ static int hpt3x2n_pci_clock(struct pci_dev *pdev)
* HPT372N 9 (HPT372N) * UDMA133
*
* (1) UDMA133 support depends on the bus clock
- *
- * To pin down HPT371N
*/
static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
- /* HPT372N and friends - UDMA133 */
- static const struct ata_port_info info = {
+ /* HPT372N - UDMA133 */
+ static const struct ata_port_info info_hpt372n = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &hpt372n_port_ops
+ };
+ /* HPT302N and HPT371N - UDMA133 */
+ static const struct ata_port_info info_hpt3xxn = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
- .port_ops = &hpt3x2n_port_ops
+ .port_ops = &hpt3xxn_port_ops
};
- const struct ata_port_info *ppi[] = { &info, NULL };
+ const struct ata_port_info *ppi[] = { &info_hpt3xxn, NULL };
u8 rev = dev->revision;
u8 irqmask;
unsigned int pci_mhz;
@@ -459,30 +502,35 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if (rc)
return rc;
- switch(dev->device) {
- case PCI_DEVICE_ID_TTI_HPT366:
- if (rev < 6)
- return -ENODEV;
- break;
- case PCI_DEVICE_ID_TTI_HPT371:
- if (rev < 2)
- return -ENODEV;
- /* 371N if rev > 1 */
- break;
- case PCI_DEVICE_ID_TTI_HPT372:
- /* 372N if rev >= 2*/
- if (rev < 2)
- return -ENODEV;
- break;
- case PCI_DEVICE_ID_TTI_HPT302:
- if (rev < 2)
- return -ENODEV;
- break;
- case PCI_DEVICE_ID_TTI_HPT372N:
- break;
- default:
- printk(KERN_ERR "pata_hpt3x2n: PCI table is bogus please report (%d).\n", dev->device);
+ switch (dev->device) {
+ case PCI_DEVICE_ID_TTI_HPT366:
+ /* 372N if rev >= 6 */
+ if (rev < 6)
return -ENODEV;
+ goto hpt372n;
+ case PCI_DEVICE_ID_TTI_HPT371:
+ /* 371N if rev >= 2 */
+ if (rev < 2)
+ return -ENODEV;
+ break;
+ case PCI_DEVICE_ID_TTI_HPT372:
+ /* 372N if rev >= 2 */
+ if (rev < 2)
+ return -ENODEV;
+ goto hpt372n;
+ case PCI_DEVICE_ID_TTI_HPT302:
+ /* 302N if rev >= 2 */
+ if (rev < 2)
+ return -ENODEV;
+ break;
+ case PCI_DEVICE_ID_TTI_HPT372N:
+hpt372n:
+ ppi[0] = &info_hpt372n;
+ break;
+ default:
+ pr_err(DRV_NAME ": PCI table is bogus, please report (%d).\n",
+ dev->device);
+ return -ENODEV;
}
/* Ok so this is a chip we support */
@@ -509,8 +557,10 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
pci_write_config_byte(dev, 0x50, mcr1);
}
- /* Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or
- 50 for UDMA100. Right now we always use 66 */
+ /*
+ * Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or
+ * 50 for UDMA100. Right now we always use 66
+ */
pci_mhz = hpt3x2n_pci_clock(dev);
@@ -522,20 +572,22 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
pci_write_config_byte(dev, 0x5B, 0x21);
/* Unlike the 37x we don't try jiggling the frequency */
- for(adjust = 0; adjust < 8; adjust++) {
+ for (adjust = 0; adjust < 8; adjust++) {
if (hpt3xn_calibrate_dpll(dev))
break;
pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
}
if (adjust == 8) {
- printk(KERN_ERR "pata_hpt3x2n: DPLL did not stabilize!\n");
+ pr_err(DRV_NAME ": DPLL did not stabilize!\n");
return -ENODEV;
}
- printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using 66MHz DPLL.\n",
- pci_mhz);
- /* Set our private data up. We only need a few flags so we use
- it directly */
+ pr_info(DRV_NAME ": bus clock %dMHz, using 66MHz DPLL.\n", pci_mhz);
+
+ /*
+ * Set our private data up. We only need a few flags
+ * so we use it directly.
+ */
if (pci_mhz > 60)
hpriv = (void *)(PCI66 | USE_DPLL);
@@ -562,9 +614,9 @@ static const struct pci_device_id hpt3x2n[] = {
};
static struct pci_driver hpt3x2n_pci_driver = {
- .name = DRV_NAME,
+ .name = DRV_NAME,
.id_table = hpt3x2n,
- .probe = hpt3x2n_init_one,
+ .probe = hpt3x2n_init_one,
.remove = ata_pci_remove_one
};
@@ -579,7 +631,7 @@ static void __exit hpt3x2n_exit(void)
}
MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3x2n/30x");
+MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3xxN");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, hpt3x2n);
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 8cc536e49a0a..d7d8026cde99 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -610,7 +610,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
};
static struct ata_port_operations mpc52xx_ata_port_ops = {
- .inherits = &ata_sff_port_ops,
+ .inherits = &ata_bmdma_port_ops,
.sff_dev_select = mpc52xx_ata_dev_select,
.set_piomode = mpc52xx_ata_set_piomode,
.set_dmamode = mpc52xx_ata_set_dmamode,
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index b777176ff494..e079cf29ed5d 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -370,7 +370,7 @@ static int __devinit vsc_sata_init_one(struct pci_dev *pdev,
if (pci_resource_len(pdev, 0) == 0)
return -ENODEV;
- /* map IO regions and intialize host accordingly */
+ /* map IO regions and initialize host accordingly */
rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index ffe9b655292e..9f47e8625266 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1926,8 +1926,9 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
const struct firmware *fw;
unsigned long start_address;
const struct ihex_binrec *rec;
+ const char *errmsg = 0;
int res;
-
+
res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
if (res) {
PRINTK (KERN_ERR, "Cannot load microcode data");
@@ -1937,8 +1938,8 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
/* First record contains just the start address */
rec = (const struct ihex_binrec *)fw->data;
if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) {
- PRINTK (KERN_ERR, "Bad microcode data (no start record)");
- return -EINVAL;
+ errmsg = "no start record";
+ goto fail;
}
start_address = be32_to_cpup((__be32 *)rec->data);
@@ -1950,12 +1951,12 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr),
be16_to_cpu(rec->len));
if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) {
- PRINTK (KERN_ERR, "Bad microcode data (record too long)");
- return -EINVAL;
+ errmsg = "record too long";
+ goto fail;
}
if (be16_to_cpu(rec->len) & 3) {
- PRINTK (KERN_ERR, "Bad microcode data (odd number of bytes)");
- return -EINVAL;
+ errmsg = "odd number of bytes";
+ goto fail;
}
res = loader_write(lb, dev, rec);
if (res)
@@ -1970,6 +1971,10 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
res = loader_start(lb, dev, start_address);
return res;
+fail:
+ release_firmware(fw);
+ PRINTK(KERN_ERR, "Bad microcode data (%s)", errmsg);
+ return -EINVAL;
}
/********** give adapter parameters **********/
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index bca9cb89a118..487a54739854 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -151,7 +151,7 @@ static int fetch_stats(struct atm_dev *dev,struct idt77105_stats __user *arg,int
spin_unlock_irqrestore(&idt77105_priv_lock, flags);
if (arg == NULL)
return 0;
- return copy_to_user(arg, &PRIV(dev)->stats,
+ return copy_to_user(arg, &stats,
sizeof(struct idt77105_stats)) ? -EFAULT : 0;
}
diff --git a/drivers/atm/idt77252.h b/drivers/atm/idt77252.h
index 5042bb2dab15..f53a43ae2bbe 100644
--- a/drivers/atm/idt77252.h
+++ b/drivers/atm/idt77252.h
@@ -572,7 +572,7 @@ struct idt77252_dev
#define SAR_STAT_TSQF 0x00001000 /* Transmit Status Queue full */
#define SAR_STAT_TMROF 0x00000800 /* Timer overflow */
#define SAR_STAT_PHYI 0x00000400 /* PHY device Interrupt flag */
-#define SAR_STAT_CMDBZ 0x00000200 /* ABR SAR Comand Busy Flag */
+#define SAR_STAT_CMDBZ 0x00000200 /* ABR SAR Command Busy Flag */
#define SAR_STAT_FBQ3A 0x00000100 /* Free Buffer Queue 3 Attention */
#define SAR_STAT_FBQ2A 0x00000080 /* Free Buffer Queue 2 Attention */
#define SAR_STAT_RSQF 0x00000040 /* Receive Status Queue full */
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 729254053758..d80d51b62a1a 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2063,7 +2063,7 @@ static int tx_init(struct atm_dev *dev)
- UBR Table size is 4K
- UBR wait queue is 4K
since the table and wait queues are contiguous, all the bytes
- can be initialized by one memeset.
+ can be initialized by one memeset.
*/
vcsize_sel = 0;
@@ -2089,7 +2089,7 @@ static int tx_init(struct atm_dev *dev)
- ABR Table size is 2K
- ABR wait queue is 2K
since the table and wait queues are contiguous, all the bytes
- can be intialized by one memeset.
+ can be initialized by one memeset.
*/
i = ABR_SCHED_TABLE * iadev->memSize;
writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 73fb1c4f4cd4..25ef1a4556e6 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -866,8 +866,9 @@ static int popen(struct atm_vcc *vcc)
}
skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
- if (!skb && net_ratelimit()) {
- dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
+ if (!skb) {
+ if (net_ratelimit())
+ dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
return -ENOMEM;
}
header = (void *)skb_put(skb, sizeof(*header));
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index fd96345bc35c..d57e8d0fb823 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -70,7 +70,7 @@ config PREVENT_FIRMWARE_BUILD
If unsure say Y here.
config FW_LOADER
- tristate "Userspace firmware loading support" if EMBEDDED
+ tristate "Userspace firmware loading support" if EXPERT
default y
---help---
This option is provided for the case where no in-kernel-tree modules
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 2ca7f5b7b824..19f49e41ce5d 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -1,31 +1,46 @@
/**
- * struct bus_type_private - structure to hold the private to the driver core portions of the bus_type structure.
+ * struct subsys_private - structure to hold the private to the driver core portions of the bus_type/class structure.
*
- * @subsys - the struct kset that defines this bus. This is the main kobject
- * @drivers_kset - the list of drivers associated with this bus
- * @devices_kset - the list of devices associated with this bus
+ * @subsys - the struct kset that defines this subsystem
+ * @devices_kset - the list of devices associated
+ *
+ * @drivers_kset - the list of drivers associated
* @klist_devices - the klist to iterate over the @devices_kset
* @klist_drivers - the klist to iterate over the @drivers_kset
* @bus_notifier - the bus notifier list for anything that cares about things
- * on this bus.
+ * on this bus.
* @bus - pointer back to the struct bus_type that this structure is associated
- * with.
+ * with.
+ *
+ * @class_interfaces - list of class_interfaces associated
+ * @glue_dirs - "glue" directory to put in-between the parent device to
+ * avoid namespace conflicts
+ * @class_mutex - mutex to protect the children, devices, and interfaces lists.
+ * @class - pointer back to the struct class that this structure is associated
+ * with.
*
* This structure is the one that is the actual kobject allowing struct
- * bus_type to be statically allocated safely. Nothing outside of the driver
- * core should ever touch these fields.
+ * bus_type/class to be statically allocated safely. Nothing outside of the
+ * driver core should ever touch these fields.
*/
-struct bus_type_private {
+struct subsys_private {
struct kset subsys;
- struct kset *drivers_kset;
struct kset *devices_kset;
+
+ struct kset *drivers_kset;
struct klist klist_devices;
struct klist klist_drivers;
struct blocking_notifier_head bus_notifier;
unsigned int drivers_autoprobe:1;
struct bus_type *bus;
+
+ struct list_head class_interfaces;
+ struct kset glue_dirs;
+ struct mutex class_mutex;
+ struct class *class;
};
+#define to_subsys_private(obj) container_of(obj, struct subsys_private, subsys.kobj)
struct driver_private {
struct kobject kobj;
@@ -36,33 +51,6 @@ struct driver_private {
};
#define to_driver(obj) container_of(obj, struct driver_private, kobj)
-
-/**
- * struct class_private - structure to hold the private to the driver core portions of the class structure.
- *
- * @class_subsys - the struct kset that defines this class. This is the main kobject
- * @class_devices - list of devices associated with this class
- * @class_interfaces - list of class_interfaces associated with this class
- * @class_dirs - "glue" directory for virtual devices associated with this class
- * @class_mutex - mutex to protect the children, devices, and interfaces lists.
- * @class - pointer back to the struct class that this structure is associated
- * with.
- *
- * This structure is the one that is the actual kobject allowing struct
- * class to be statically allocated safely. Nothing outside of the driver
- * core should ever touch these fields.
- */
-struct class_private {
- struct kset class_subsys;
- struct klist class_devices;
- struct list_head class_interfaces;
- struct kset class_dirs;
- struct mutex class_mutex;
- struct class *class;
-};
-#define to_class(obj) \
- container_of(obj, struct class_private, class_subsys.kobj)
-
/**
* struct device_private - structure to hold the private to the driver core portions of the device structure.
*
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 33c270a64db7..000e7b2006f8 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -20,7 +20,6 @@
#include "power/power.h"
#define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr)
-#define to_bus(obj) container_of(obj, struct bus_type_private, subsys.kobj)
/*
* sysfs bindings for drivers
@@ -96,11 +95,11 @@ static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
- struct bus_type_private *bus_priv = to_bus(kobj);
+ struct subsys_private *subsys_priv = to_subsys_private(kobj);
ssize_t ret = 0;
if (bus_attr->show)
- ret = bus_attr->show(bus_priv->bus, buf);
+ ret = bus_attr->show(subsys_priv->bus, buf);
return ret;
}
@@ -108,11 +107,11 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
- struct bus_type_private *bus_priv = to_bus(kobj);
+ struct subsys_private *subsys_priv = to_subsys_private(kobj);
ssize_t ret = 0;
if (bus_attr->store)
- ret = bus_attr->store(bus_priv->bus, buf, count);
+ ret = bus_attr->store(subsys_priv->bus, buf, count);
return ret;
}
@@ -858,9 +857,9 @@ static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
int bus_register(struct bus_type *bus)
{
int retval;
- struct bus_type_private *priv;
+ struct subsys_private *priv;
- priv = kzalloc(sizeof(struct bus_type_private), GFP_KERNEL);
+ priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -976,7 +975,7 @@ struct klist *bus_get_device_klist(struct bus_type *bus)
EXPORT_SYMBOL_GPL(bus_get_device_klist);
/*
- * Yes, this forcably breaks the klist abstraction temporarily. It
+ * Yes, this forcibly breaks the klist abstraction temporarily. It
* just wants to sort the klist, not change reference counts and
* take/drop locks rapidly in the process. It does all this while
* holding the lock for the list, so objects can't otherwise be
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 9c63a5687d69..4f1df2e8fd74 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -27,7 +27,7 @@ static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct class_attribute *class_attr = to_class_attr(attr);
- struct class_private *cp = to_class(kobj);
+ struct subsys_private *cp = to_subsys_private(kobj);
ssize_t ret = -EIO;
if (class_attr->show)
@@ -39,7 +39,7 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct class_attribute *class_attr = to_class_attr(attr);
- struct class_private *cp = to_class(kobj);
+ struct subsys_private *cp = to_subsys_private(kobj);
ssize_t ret = -EIO;
if (class_attr->store)
@@ -49,7 +49,7 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
static void class_release(struct kobject *kobj)
{
- struct class_private *cp = to_class(kobj);
+ struct subsys_private *cp = to_subsys_private(kobj);
struct class *class = cp->class;
pr_debug("class '%s': release.\n", class->name);
@@ -65,7 +65,7 @@ static void class_release(struct kobject *kobj)
static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject *kobj)
{
- struct class_private *cp = to_class(kobj);
+ struct subsys_private *cp = to_subsys_private(kobj);
struct class *class = cp->class;
return class->ns_type;
@@ -82,7 +82,7 @@ static struct kobj_type class_ktype = {
.child_ns_type = class_child_ns_type,
};
-/* Hotplug events for classes go to the class class_subsys */
+/* Hotplug events for classes go to the class subsys */
static struct kset *class_kset;
@@ -90,7 +90,7 @@ int class_create_file(struct class *cls, const struct class_attribute *attr)
{
int error;
if (cls)
- error = sysfs_create_file(&cls->p->class_subsys.kobj,
+ error = sysfs_create_file(&cls->p->subsys.kobj,
&attr->attr);
else
error = -EINVAL;
@@ -100,20 +100,20 @@ int class_create_file(struct class *cls, const struct class_attribute *attr)
void class_remove_file(struct class *cls, const struct class_attribute *attr)
{
if (cls)
- sysfs_remove_file(&cls->p->class_subsys.kobj, &attr->attr);
+ sysfs_remove_file(&cls->p->subsys.kobj, &attr->attr);
}
static struct class *class_get(struct class *cls)
{
if (cls)
- kset_get(&cls->p->class_subsys);
+ kset_get(&cls->p->subsys);
return cls;
}
static void class_put(struct class *cls)
{
if (cls)
- kset_put(&cls->p->class_subsys);
+ kset_put(&cls->p->subsys);
}
static int add_class_attrs(struct class *cls)
@@ -162,7 +162,7 @@ static void klist_class_dev_put(struct klist_node *n)
int __class_register(struct class *cls, struct lock_class_key *key)
{
- struct class_private *cp;
+ struct subsys_private *cp;
int error;
pr_debug("device class '%s': registering\n", cls->name);
@@ -170,11 +170,11 @@ int __class_register(struct class *cls, struct lock_class_key *key)
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
- klist_init(&cp->class_devices, klist_class_dev_get, klist_class_dev_put);
+ klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put);
INIT_LIST_HEAD(&cp->class_interfaces);
- kset_init(&cp->class_dirs);
+ kset_init(&cp->glue_dirs);
__mutex_init(&cp->class_mutex, "struct class mutex", key);
- error = kobject_set_name(&cp->class_subsys.kobj, "%s", cls->name);
+ error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name);
if (error) {
kfree(cp);
return error;
@@ -187,15 +187,15 @@ int __class_register(struct class *cls, struct lock_class_key *key)
#if defined(CONFIG_BLOCK)
/* let the block class directory show up in the root of sysfs */
if (!sysfs_deprecated || cls != &block_class)
- cp->class_subsys.kobj.kset = class_kset;
+ cp->subsys.kobj.kset = class_kset;
#else
- cp->class_subsys.kobj.kset = class_kset;
+ cp->subsys.kobj.kset = class_kset;
#endif
- cp->class_subsys.kobj.ktype = &class_ktype;
+ cp->subsys.kobj.ktype = &class_ktype;
cp->class = cls;
cls->p = cp;
- error = kset_register(&cp->class_subsys);
+ error = kset_register(&cp->subsys);
if (error) {
kfree(cp);
return error;
@@ -210,7 +210,7 @@ void class_unregister(struct class *cls)
{
pr_debug("device class '%s': unregistering\n", cls->name);
remove_class_attrs(cls);
- kset_unregister(&cls->p->class_subsys);
+ kset_unregister(&cls->p->subsys);
}
static void class_create_release(struct class *cls)
@@ -295,7 +295,7 @@ void class_dev_iter_init(struct class_dev_iter *iter, struct class *class,
if (start)
start_knode = &start->knode_class;
- klist_iter_init_node(&class->p->class_devices, &iter->ki, start_knode);
+ klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode);
iter->type = type;
}
EXPORT_SYMBOL_GPL(class_dev_iter_init);
@@ -482,8 +482,8 @@ void class_interface_unregister(struct class_interface *class_intf)
class_put(parent);
}
-ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
- char *buf)
+ssize_t show_class_attr_string(struct class *class,
+ struct class_attribute *attr, char *buf)
{
struct class_attribute_string *cs;
cs = container_of(attr, struct class_attribute_string, attr);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 6ed645411c40..080e9ca11017 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -338,6 +338,35 @@ static void device_remove_attributes(struct device *dev,
device_remove_file(dev, &attrs[i]);
}
+static int device_add_bin_attributes(struct device *dev,
+ struct bin_attribute *attrs)
+{
+ int error = 0;
+ int i;
+
+ if (attrs) {
+ for (i = 0; attr_name(attrs[i]); i++) {
+ error = device_create_bin_file(dev, &attrs[i]);
+ if (error)
+ break;
+ }
+ if (error)
+ while (--i >= 0)
+ device_remove_bin_file(dev, &attrs[i]);
+ }
+ return error;
+}
+
+static void device_remove_bin_attributes(struct device *dev,
+ struct bin_attribute *attrs)
+{
+ int i;
+
+ if (attrs)
+ for (i = 0; attr_name(attrs[i]); i++)
+ device_remove_bin_file(dev, &attrs[i]);
+}
+
static int device_add_groups(struct device *dev,
const struct attribute_group **groups)
{
@@ -378,12 +407,15 @@ static int device_add_attrs(struct device *dev)
error = device_add_attributes(dev, class->dev_attrs);
if (error)
return error;
+ error = device_add_bin_attributes(dev, class->dev_bin_attrs);
+ if (error)
+ goto err_remove_class_attrs;
}
if (type) {
error = device_add_groups(dev, type->groups);
if (error)
- goto err_remove_class_attrs;
+ goto err_remove_class_bin_attrs;
}
error = device_add_groups(dev, dev->groups);
@@ -395,6 +427,9 @@ static int device_add_attrs(struct device *dev)
err_remove_type_groups:
if (type)
device_remove_groups(dev, type->groups);
+ err_remove_class_bin_attrs:
+ if (class)
+ device_remove_bin_attributes(dev, class->dev_bin_attrs);
err_remove_class_attrs:
if (class)
device_remove_attributes(dev, class->dev_attrs);
@@ -412,8 +447,10 @@ static void device_remove_attrs(struct device *dev)
if (type)
device_remove_groups(dev, type->groups);
- if (class)
+ if (class) {
device_remove_attributes(dev, class->dev_attrs);
+ device_remove_bin_attributes(dev, class->dev_bin_attrs);
+ }
}
@@ -610,7 +647,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
dir->class = class;
kobject_init(&dir->kobj, &class_dir_ktype);
- dir->kobj.kset = &class->p->class_dirs;
+ dir->kobj.kset = &class->p->glue_dirs;
retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
if (retval < 0) {
@@ -635,7 +672,7 @@ static struct kobject *get_device_parent(struct device *dev,
if (sysfs_deprecated && dev->class == &block_class) {
if (parent && parent->class == &block_class)
return &parent->kobj;
- return &block_class.p->class_subsys.kobj;
+ return &block_class.p->subsys.kobj;
}
#endif
@@ -654,13 +691,13 @@ static struct kobject *get_device_parent(struct device *dev,
mutex_lock(&gdp_mutex);
/* find our class-directory at the parent and reference it */
- spin_lock(&dev->class->p->class_dirs.list_lock);
- list_for_each_entry(k, &dev->class->p->class_dirs.list, entry)
+ spin_lock(&dev->class->p->glue_dirs.list_lock);
+ list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
if (k->parent == parent_kobj) {
kobj = kobject_get(k);
break;
}
- spin_unlock(&dev->class->p->class_dirs.list_lock);
+ spin_unlock(&dev->class->p->glue_dirs.list_lock);
if (kobj) {
mutex_unlock(&gdp_mutex);
return kobj;
@@ -682,7 +719,7 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
{
/* see if we live in a "glue" directory */
if (!glue_dir || !dev->class ||
- glue_dir->kset != &dev->class->p->class_dirs)
+ glue_dir->kset != &dev->class->p->glue_dirs)
return;
kobject_put(glue_dir);
@@ -709,7 +746,7 @@ static int device_add_class_symlinks(struct device *dev)
return 0;
error = sysfs_create_link(&dev->kobj,
- &dev->class->p->class_subsys.kobj,
+ &dev->class->p->subsys.kobj,
"subsystem");
if (error)
goto out;
@@ -728,7 +765,7 @@ static int device_add_class_symlinks(struct device *dev)
#endif
/* link in the class directory pointing to the device */
- error = sysfs_create_link(&dev->class->p->class_subsys.kobj,
+ error = sysfs_create_link(&dev->class->p->subsys.kobj,
&dev->kobj, dev_name(dev));
if (error)
goto out_device;
@@ -756,7 +793,7 @@ static void device_remove_class_symlinks(struct device *dev)
if (sysfs_deprecated && dev->class == &block_class)
return;
#endif
- sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, dev_name(dev));
+ sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
}
/**
@@ -947,7 +984,7 @@ int device_add(struct device *dev)
mutex_lock(&dev->class->p->class_mutex);
/* tie the class to the device */
klist_add_tail(&dev->knode_class,
- &dev->class->p->class_devices);
+ &dev->class->p->klist_devices);
/* notify any interfaces that the device is here */
list_for_each_entry(class_intf,
@@ -1513,6 +1550,8 @@ EXPORT_SYMBOL_GPL(device_destroy);
* exclusion between two different calls of device_rename
* on the same device to ensure that new_name is valid and
* won't conflict with other devices.
+ *
+ * "Never use this function, bad things will happen" - gregkh
*/
int device_rename(struct device *dev, const char *new_name)
{
@@ -1535,7 +1574,7 @@ int device_rename(struct device *dev, const char *new_name)
}
if (dev->class) {
- error = sysfs_rename_link(&dev->class->p->class_subsys.kobj,
+ error = sysfs_rename_link(&dev->class->p->subsys.kobj,
&dev->kobj, old_device_name, new_name);
if (error)
goto out;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index ce012a9c6201..36b43052001d 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -117,12 +117,21 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
"Node %d WritebackTmp: %8lu kB\n"
"Node %d Slab: %8lu kB\n"
"Node %d SReclaimable: %8lu kB\n"
- "Node %d SUnreclaim: %8lu kB\n",
+ "Node %d SUnreclaim: %8lu kB\n"
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ "Node %d AnonHugePages: %8lu kB\n"
+#endif
+ ,
nid, K(node_page_state(nid, NR_FILE_DIRTY)),
nid, K(node_page_state(nid, NR_WRITEBACK)),
nid, K(node_page_state(nid, NR_FILE_PAGES)),
nid, K(node_page_state(nid, NR_FILE_MAPPED)),
- nid, K(node_page_state(nid, NR_ANON_PAGES)),
+ nid, K(node_page_state(nid, NR_ANON_PAGES)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
+ HPAGE_PMD_NR
+#endif
+ ),
nid, K(node_page_state(nid, NR_SHMEM)),
nid, node_page_state(nid, NR_KERNEL_STACK) *
THREAD_SIZE / 1024,
@@ -133,7 +142,13 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
- nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
+ nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ , nid,
+ K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
+ HPAGE_PMD_NR)
+#endif
+ );
n += hugetlb_report_node_meminfo(nid, buf + n);
return n;
}
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 81f2c84697f4..42f97f925629 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_idle);
*
* If PM operations are defined for the @dev's driver and they include
* ->runtime_suspend(), execute it and return its error code. Otherwise,
- * return -EINVAL.
+ * return 0.
*/
int pm_generic_runtime_suspend(struct device *dev)
{
@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
*
* If PM operations are defined for the @dev's driver and they include
* ->runtime_resume(), execute it and return its error code. Otherwise,
- * return -EINVAL.
+ * return 0.
*/
int pm_generic_runtime_resume(struct device *dev)
{
@@ -185,7 +185,7 @@ static int __pm_generic_resume(struct device *dev, int event)
return 0;
ret = callback(dev);
- if (!ret) {
+ if (!ret && pm_runtime_enabled(dev)) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index ead3e79d6fcf..83404973f97a 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -8,7 +8,7 @@
*
*
* The driver model core calls device_pm_add() when a device is registered.
- * This will intialize the embedded device_pm_info object in the device
+ * This will initialize the embedded device_pm_info object in the device
* and add it to the list of power-controlled devices. sysfs entries for
* controlling device power management will also be added.
*
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/async.h>
+#include <linux/suspend.h>
#include "../base.h"
#include "power.h"
@@ -41,16 +42,13 @@
*/
LIST_HEAD(dpm_list);
+LIST_HEAD(dpm_prepared_list);
+LIST_HEAD(dpm_suspended_list);
+LIST_HEAD(dpm_noirq_list);
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
-/*
- * Set once the preparation of devices for a PM transition has started, reset
- * before starting to resume devices. Protected by dpm_list_mtx.
- */
-static bool transition_started;
-
static int async_error;
/**
@@ -59,7 +57,7 @@ static int async_error;
*/
void device_pm_init(struct device *dev)
{
- dev->power.status = DPM_ON;
+ dev->power.in_suspend = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
@@ -90,22 +88,11 @@ void device_pm_unlock(void)
void device_pm_add(struct device *dev)
{
pr_debug("PM: Adding info for %s:%s\n",
- dev->bus ? dev->bus->name : "No Bus",
- kobject_name(&dev->kobj));
+ dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
mutex_lock(&dpm_list_mtx);
- if (dev->parent) {
- if (dev->parent->power.status >= DPM_SUSPENDING)
- dev_warn(dev, "parent %s should not be sleeping\n",
- dev_name(dev->parent));
- } else if (transition_started) {
- /*
- * We refuse to register parentless devices while a PM
- * transition is in progress in order to avoid leaving them
- * unhandled down the road
- */
- dev_WARN(dev, "Parentless device registered during a PM transaction\n");
- }
-
+ if (dev->parent && dev->parent->power.in_suspend)
+ dev_warn(dev, "parent %s should not be sleeping\n",
+ dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
mutex_unlock(&dpm_list_mtx);
}
@@ -117,8 +104,7 @@ void device_pm_add(struct device *dev)
void device_pm_remove(struct device *dev)
{
pr_debug("PM: Removing info for %s:%s\n",
- dev->bus ? dev->bus->name : "No Bus",
- kobject_name(&dev->kobj));
+ dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
@@ -135,10 +121,8 @@ void device_pm_remove(struct device *dev)
void device_pm_move_before(struct device *deva, struct device *devb)
{
pr_debug("PM: Moving %s:%s before %s:%s\n",
- deva->bus ? deva->bus->name : "No Bus",
- kobject_name(&deva->kobj),
- devb->bus ? devb->bus->name : "No Bus",
- kobject_name(&devb->kobj));
+ deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
+ devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
/* Delete deva from dpm_list and reinsert before devb. */
list_move_tail(&deva->power.entry, &devb->power.entry);
}
@@ -151,10 +135,8 @@ void device_pm_move_before(struct device *deva, struct device *devb)
void device_pm_move_after(struct device *deva, struct device *devb)
{
pr_debug("PM: Moving %s:%s after %s:%s\n",
- deva->bus ? deva->bus->name : "No Bus",
- kobject_name(&deva->kobj),
- devb->bus ? devb->bus->name : "No Bus",
- kobject_name(&devb->kobj));
+ deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
+ devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
/* Delete deva from dpm_list and reinsert after devb. */
list_move(&deva->power.entry, &devb->power.entry);
}
@@ -166,8 +148,7 @@ void device_pm_move_after(struct device *deva, struct device *devb)
void device_pm_move_last(struct device *dev)
{
pr_debug("PM: Moving %s:%s to end of list\n",
- dev->bus ? dev->bus->name : "No Bus",
- kobject_name(&dev->kobj));
+ dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
list_move_tail(&dev->power.entry, &dpm_list);
}
@@ -303,7 +284,7 @@ static int pm_noirq_op(struct device *dev,
pm_message_t state)
{
int error = 0;
- ktime_t calltime, delta, rettime;
+ ktime_t calltime = ktime_set(0, 0), delta, rettime;
if (initcall_debug) {
pr_info("calling %s+ @ %i, parent: %s\n",
@@ -405,7 +386,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
int error)
{
printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
- kobject_name(&dev->kobj), pm_verb(state.event), info, error);
+ dev_name(dev), pm_verb(state.event), info, error);
}
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
@@ -475,33 +456,24 @@ End:
*/
void dpm_resume_noirq(pm_message_t state)
{
- struct list_head list;
ktime_t starttime = ktime_get();
- INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
- transition_started = false;
- while (!list_empty(&dpm_list)) {
- struct device *dev = to_device(dpm_list.next);
+ while (!list_empty(&dpm_noirq_list)) {
+ struct device *dev = to_device(dpm_noirq_list.next);
+ int error;
get_device(dev);
- if (dev->power.status > DPM_OFF) {
- int error;
-
- dev->power.status = DPM_OFF;
- mutex_unlock(&dpm_list_mtx);
+ list_move_tail(&dev->power.entry, &dpm_suspended_list);
+ mutex_unlock(&dpm_list_mtx);
- error = device_resume_noirq(dev, state);
+ error = device_resume_noirq(dev, state);
+ if (error)
+ pm_dev_err(dev, state, " early", error);
- mutex_lock(&dpm_list_mtx);
- if (error)
- pm_dev_err(dev, state, " early", error);
- }
- if (!list_empty(&dev->power.entry))
- list_move_tail(&dev->power.entry, &list);
+ mutex_lock(&dpm_list_mtx);
put_device(dev);
}
- list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
dpm_show_time(starttime, state, "early");
resume_device_irqs();
@@ -544,7 +516,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
dpm_wait(dev->parent, async);
device_lock(dev);
- dev->power.status = DPM_RESUMING;
+ dev->power.in_suspend = false;
if (dev->bus) {
if (dev->bus->pm) {
@@ -610,19 +582,14 @@ static bool is_async(struct device *dev)
*/
static void dpm_resume(pm_message_t state)
{
- struct list_head list;
struct device *dev;
ktime_t starttime = ktime_get();
- INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
- list_for_each_entry(dev, &dpm_list, power.entry) {
- if (dev->power.status < DPM_OFF)
- continue;
-
+ list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
INIT_COMPLETION(dev->power.completion);
if (is_async(dev)) {
get_device(dev);
@@ -630,28 +597,24 @@ static void dpm_resume(pm_message_t state)
}
}
- while (!list_empty(&dpm_list)) {
- dev = to_device(dpm_list.next);
+ while (!list_empty(&dpm_suspended_list)) {
+ dev = to_device(dpm_suspended_list.next);
get_device(dev);
- if (dev->power.status >= DPM_OFF && !is_async(dev)) {
+ if (!is_async(dev)) {
int error;
mutex_unlock(&dpm_list_mtx);
error = device_resume(dev, state, false);
-
- mutex_lock(&dpm_list_mtx);
if (error)
pm_dev_err(dev, state, "", error);
- } else if (dev->power.status == DPM_SUSPENDING) {
- /* Allow new children of the device to be registered */
- dev->power.status = DPM_RESUMING;
+
+ mutex_lock(&dpm_list_mtx);
}
if (!list_empty(&dev->power.entry))
- list_move_tail(&dev->power.entry, &list);
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
put_device(dev);
}
- list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, NULL);
@@ -697,22 +660,18 @@ static void dpm_complete(pm_message_t state)
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
- transition_started = false;
- while (!list_empty(&dpm_list)) {
- struct device *dev = to_device(dpm_list.prev);
+ while (!list_empty(&dpm_prepared_list)) {
+ struct device *dev = to_device(dpm_prepared_list.prev);
get_device(dev);
- if (dev->power.status > DPM_ON) {
- dev->power.status = DPM_ON;
- mutex_unlock(&dpm_list_mtx);
+ dev->power.in_suspend = false;
+ list_move(&dev->power.entry, &list);
+ mutex_unlock(&dpm_list_mtx);
- device_complete(dev, state);
- pm_runtime_put_sync(dev);
+ device_complete(dev, state);
+ pm_runtime_put_sync(dev);
- mutex_lock(&dpm_list_mtx);
- }
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &list);
+ mutex_lock(&dpm_list_mtx);
put_device(dev);
}
list_splice(&list, &dpm_list);
@@ -802,15 +761,13 @@ End:
*/
int dpm_suspend_noirq(pm_message_t state)
{
- struct list_head list;
ktime_t starttime = ktime_get();
int error = 0;
- INIT_LIST_HEAD(&list);
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_list)) {
- struct device *dev = to_device(dpm_list.prev);
+ while (!list_empty(&dpm_suspended_list)) {
+ struct device *dev = to_device(dpm_suspended_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -823,12 +780,10 @@ int dpm_suspend_noirq(pm_message_t state)
put_device(dev);
break;
}
- dev->power.status = DPM_OFF_IRQ;
if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &list);
+ list_move(&dev->power.entry, &dpm_noirq_list);
put_device(dev);
}
- list_splice_tail(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
if (error)
dpm_resume_noirq(resume_event(state));
@@ -876,6 +831,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (async_error)
goto End;
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto End;
+ }
+
if (dev->class) {
if (dev->class->pm) {
pm_dev_dbg(dev, state, "class ");
@@ -907,9 +867,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
}
- if (!error)
- dev->power.status = DPM_OFF;
-
End:
device_unlock(dev);
complete_all(&dev->power.completion);
@@ -951,16 +908,14 @@ static int device_suspend(struct device *dev)
*/
static int dpm_suspend(pm_message_t state)
{
- struct list_head list;
ktime_t starttime = ktime_get();
int error = 0;
- INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
- while (!list_empty(&dpm_list)) {
- struct device *dev = to_device(dpm_list.prev);
+ while (!list_empty(&dpm_prepared_list)) {
+ struct device *dev = to_device(dpm_prepared_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -974,12 +929,11 @@ static int dpm_suspend(pm_message_t state)
break;
}
if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &list);
+ list_move(&dev->power.entry, &dpm_suspended_list);
put_device(dev);
if (async_error)
break;
}
- list_splice(&list, dpm_list.prev);
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
if (!error)
@@ -1038,22 +992,20 @@ static int device_prepare(struct device *dev, pm_message_t state)
*/
static int dpm_prepare(pm_message_t state)
{
- struct list_head list;
int error = 0;
- INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
- transition_started = true;
while (!list_empty(&dpm_list)) {
struct device *dev = to_device(dpm_list.next);
get_device(dev);
- dev->power.status = DPM_PREPARING;
mutex_unlock(&dpm_list_mtx);
pm_runtime_get_noresume(dev);
- if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
- /* Wake-up requested during system sleep transition. */
+ if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+ pm_wakeup_event(dev, 0);
+
+ if (pm_wakeup_pending()) {
pm_runtime_put_sync(dev);
error = -EBUSY;
} else {
@@ -1062,24 +1014,22 @@ static int dpm_prepare(pm_message_t state)
mutex_lock(&dpm_list_mtx);
if (error) {
- dev->power.status = DPM_ON;
if (error == -EAGAIN) {
put_device(dev);
error = 0;
continue;
}
- printk(KERN_ERR "PM: Failed to prepare device %s "
- "for power transition: error %d\n",
- kobject_name(&dev->kobj), error);
+ printk(KERN_INFO "PM: Device %s not prepared "
+ "for power transition: code %d\n",
+ dev_name(dev), error);
put_device(dev);
break;
}
- dev->power.status = DPM_SUSPENDING;
+ dev->power.in_suspend = true;
if (!list_empty(&dev->power.entry))
- list_move_tail(&dev->power.entry, &list);
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
put_device(dev);
}
- list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
return error;
}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 02c652be83e7..42615b419dfb 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -250,13 +250,16 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
if (!cb)
return -ENOSYS;
- spin_unlock_irq(&dev->power.lock);
+ if (dev->power.irq_safe) {
+ retval = cb(dev);
+ } else {
+ spin_unlock_irq(&dev->power.lock);
- retval = cb(dev);
+ retval = cb(dev);
- spin_lock_irq(&dev->power.lock);
+ spin_lock_irq(&dev->power.lock);
+ }
dev->power.runtime_error = retval;
-
return retval;
}
@@ -404,12 +407,15 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
- if (parent && !parent->power.ignore_children) {
- spin_unlock_irq(&dev->power.lock);
+ /* Maybe the parent is now able to suspend. */
+ if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
+ spin_unlock(&dev->power.lock);
- pm_request_idle(parent);
+ spin_lock(&parent->power.lock);
+ rpm_idle(parent, RPM_ASYNC);
+ spin_unlock(&parent->power.lock);
- spin_lock_irq(&dev->power.lock);
+ spin_lock(&dev->power.lock);
}
out:
@@ -527,10 +533,13 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (!parent && dev->parent) {
/*
- * Increment the parent's resume counter and resume it if
- * necessary.
+ * Increment the parent's usage counter and resume it if
+ * necessary. Not needed if dev is irq-safe; then the
+ * parent is permanently resumed.
*/
parent = dev->parent;
+ if (dev->power.irq_safe)
+ goto skip_parent;
spin_unlock(&dev->power.lock);
pm_runtime_get_noresume(parent);
@@ -553,6 +562,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
goto out;
goto repeat;
}
+ skip_parent:
if (dev->power.no_callbacks)
goto no_callback; /* Assume success. */
@@ -584,7 +594,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
rpm_idle(dev, RPM_ASYNC);
out:
- if (parent) {
+ if (parent && !dev->power.irq_safe) {
spin_unlock_irq(&dev->power.lock);
pm_runtime_put(parent);
@@ -1065,7 +1075,6 @@ EXPORT_SYMBOL_GPL(pm_runtime_allow);
* Set the power.no_callbacks flag, which tells the PM core that this
* device is power-managed through its parent and has no run-time PM
* callbacks of its own. The run-time sysfs attributes will be removed.
- *
*/
void pm_runtime_no_callbacks(struct device *dev)
{
@@ -1078,6 +1087,27 @@ void pm_runtime_no_callbacks(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
/**
+ * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
+ * @dev: Device to handle
+ *
+ * Set the power.irq_safe flag, which tells the PM core that the
+ * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
+ * always be invoked with the spinlock held and interrupts disabled. It also
+ * causes the parent's usage counter to be permanently incremented, preventing
+ * the parent from runtime suspending -- otherwise an irq-safe child might have
+ * to wait for a non-irq-safe parent.
+ */
+void pm_runtime_irq_safe(struct device *dev)
+{
+ if (dev->parent)
+ pm_runtime_get_sync(dev->parent);
+ spin_lock_irq(&dev->power.lock);
+ dev->power.irq_safe = 1;
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
+
+/**
* update_autosuspend - Handle a change to a device's autosuspend settings.
* @dev: Device to handle.
* @old_delay: The former autosuspend_delay value.
@@ -1199,4 +1229,6 @@ void pm_runtime_remove(struct device *dev)
/* Change the status back to 'suspended' to match the initial status. */
if (dev->power.runtime_status == RPM_ACTIVE)
pm_runtime_set_suspended(dev);
+ if (dev->power.irq_safe && dev->parent)
+ pm_runtime_put_sync(dev->parent);
}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 71c5528e1c35..8ec406d8f548 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -542,26 +542,26 @@ static void pm_wakeup_update_hit_counts(void)
}
/**
- * pm_check_wakeup_events - Check for new wakeup events.
+ * pm_wakeup_pending - Check if power transition in progress should be aborted.
*
* Compare the current number of registered wakeup events with its preserved
- * value from the past to check if new wakeup events have been registered since
- * the old value was stored. Check if the current number of wakeup events being
- * processed is zero.
+ * value from the past and return true if new wakeup events have been registered
+ * since the old value was stored. Also return true if the current number of
+ * wakeup events being processed is different from zero.
*/
-bool pm_check_wakeup_events(void)
+bool pm_wakeup_pending(void)
{
unsigned long flags;
- bool ret = true;
+ bool ret = false;
spin_lock_irqsave(&events_lock, flags);
if (events_check_enabled) {
- ret = ((unsigned int)atomic_read(&event_count) == saved_count)
- && !atomic_read(&events_in_progress);
- events_check_enabled = ret;
+ ret = ((unsigned int)atomic_read(&event_count) != saved_count)
+ || atomic_read(&events_in_progress);
+ events_check_enabled = !ret;
}
spin_unlock_irqrestore(&events_lock, flags);
- if (!ret)
+ if (ret)
pm_wakeup_update_hit_counts();
return ret;
}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 4b9359a6f6ca..83c32cb72582 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -464,6 +464,7 @@ config XEN_BLKDEV_FRONTEND
tristate "Xen virtual block device support"
depends on XEN
default y
+ select XEN_XENBUS_FRONTEND
help
This driver implements the front-end of the Xen virtual
block device driver. It communicates with a back-end driver
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index d7f463d6312d..40528ba56d1b 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -39,4 +39,4 @@ obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
-swim_mod-objs := swim.o swim_asm.o
+swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/aoe/Makefile b/drivers/block/aoe/Makefile
index e76d997183c6..06ea82cdf27d 100644
--- a/drivers/block/aoe/Makefile
+++ b/drivers/block/aoe/Makefile
@@ -3,4 +3,4 @@
#
obj-$(CONFIG_ATA_OVER_ETH) += aoe.o
-aoe-objs := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
+aoe-y := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 8e0f9256eb58..9279272b3732 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -238,9 +238,9 @@ static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c)
/*
* Enqueuing and dequeuing functions for cmdlists.
*/
-static inline void addQ(struct hlist_head *list, CommandList_struct *c)
+static inline void addQ(struct list_head *list, CommandList_struct *c)
{
- hlist_add_head(&c->list, list);
+ list_add_tail(&c->list, list);
}
static inline void removeQ(CommandList_struct *c)
@@ -253,12 +253,12 @@ static inline void removeQ(CommandList_struct *c)
* them off as 'stale' to prevent the driver from
* falling over.
*/
- if (WARN_ON(hlist_unhashed(&c->list))) {
+ if (WARN_ON(list_empty(&c->list))) {
c->cmd_type = CMD_MSG_STALE;
return;
}
- hlist_del_init(&c->list);
+ list_del_init(&c->list);
}
static void enqueue_cmd_and_start_io(ctlr_info_t *h,
@@ -905,7 +905,7 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h)
c->cmdindex = i;
- INIT_HLIST_NODE(&c->list);
+ INIT_LIST_HEAD(&c->list);
c->busaddr = (__u32) cmd_dma_handle;
temp64.val = (__u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -944,7 +944,7 @@ static CommandList_struct *cmd_special_alloc(ctlr_info_t *h)
}
memset(c->err_info, 0, sizeof(ErrorInfo_struct));
- INIT_HLIST_NODE(&c->list);
+ INIT_LIST_HEAD(&c->list);
c->busaddr = (__u32) cmd_dma_handle;
temp64.val = (__u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -2833,7 +2833,7 @@ static int cciss_revalidate(struct gendisk *disk)
sector_t total_size;
InquiryData_struct *inq_buff = NULL;
- for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
+ for (logvol = 0; logvol <= h->highest_lun; logvol++) {
if (!h->drv[logvol])
continue;
if (memcmp(h->drv[logvol]->LunID, drv->LunID,
@@ -2888,8 +2888,8 @@ static void start_io(ctlr_info_t *h)
{
CommandList_struct *c;
- while (!hlist_empty(&h->reqQ)) {
- c = hlist_entry(h->reqQ.first, CommandList_struct, list);
+ while (!list_empty(&h->reqQ)) {
+ c = list_entry(h->reqQ.next, CommandList_struct, list);
/* can't do anything if fifo is full */
if ((h->access.fifo_full(h))) {
dev_warn(&h->pdev->dev, "fifo full\n");
@@ -3402,11 +3402,10 @@ static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag)
{
u32 tag;
CommandList_struct *c = NULL;
- struct hlist_node *tmp;
__u32 busaddr_masked, tag_masked;
tag = cciss_tag_discard_error_bits(raw_tag);
- hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+ list_for_each_entry(c, &h->cmpQ, list) {
busaddr_masked = cciss_tag_discard_error_bits(c->busaddr);
tag_masked = cciss_tag_discard_error_bits(tag);
if (busaddr_masked == tag_masked) {
@@ -4572,8 +4571,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
h = hba[i];
h->pdev = pdev;
h->busy_initializing = 1;
- INIT_HLIST_HEAD(&h->cmpQ);
- INIT_HLIST_HEAD(&h->reqQ);
+ INIT_LIST_HEAD(&h->cmpQ);
+ INIT_LIST_HEAD(&h->reqQ);
mutex_init(&h->busy_shutting_down);
if (cciss_pci_init(h) != 0)
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 4b8933d778f1..579f74918493 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -103,8 +103,8 @@ struct ctlr_info
struct access_method access;
/* queue and queue Info */
- struct hlist_head reqQ;
- struct hlist_head cmpQ;
+ struct list_head reqQ;
+ struct list_head cmpQ;
unsigned int Qdepth;
unsigned int maxQsinceinit;
unsigned int maxSG;
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index eb060f1b00b6..35463d2f0ee7 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -195,7 +195,7 @@ typedef struct _CommandList_struct {
int ctlr;
int cmd_type;
long cmdindex;
- struct hlist_node list;
+ struct list_head list;
struct request * rq;
struct completion *waiting;
int retry_count;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 1ea1a34e78b2..3803a0348937 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -911,8 +911,6 @@ struct drbd_md {
struct drbd_backing_dev {
struct block_device *backing_bdev;
struct block_device *md_bdev;
- struct file *lo_file;
- struct file *md_file;
struct drbd_md md;
struct disk_conf dc; /* The user provided config... */
sector_t known_size; /* last known size of that backing device */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 6be5401d0e88..29cd0dc9fe4f 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -3372,11 +3372,8 @@ void drbd_free_bc(struct drbd_backing_dev *ldev)
if (ldev == NULL)
return;
- bd_release(ldev->backing_bdev);
- bd_release(ldev->md_bdev);
-
- fput(ldev->lo_file);
- fput(ldev->md_file);
+ blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
kfree(ldev);
}
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 29e5c70e4e26..8cbfaa687d72 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -855,7 +855,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
sector_t max_possible_sectors;
sector_t min_md_device_sectors;
struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
- struct inode *inode, *inode2;
+ struct block_device *bdev;
struct lru_cache *resync_lru = NULL;
union drbd_state ns, os;
unsigned int max_seg_s;
@@ -907,46 +907,40 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
}
}
- nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0);
- if (IS_ERR(nbc->lo_file)) {
+ bdev = blkdev_get_by_path(nbc->dc.backing_dev,
+ FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
+ if (IS_ERR(bdev)) {
dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
- PTR_ERR(nbc->lo_file));
- nbc->lo_file = NULL;
+ PTR_ERR(bdev));
retcode = ERR_OPEN_DISK;
goto fail;
}
+ nbc->backing_bdev = bdev;
- inode = nbc->lo_file->f_dentry->d_inode;
-
- if (!S_ISBLK(inode->i_mode)) {
- retcode = ERR_DISK_NOT_BDEV;
- goto fail;
- }
-
- nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0);
- if (IS_ERR(nbc->md_file)) {
+ /*
+ * meta_dev_idx >= 0: external fixed size, possibly multiple
+ * drbd sharing one meta device. TODO in that case, paranoia
+ * check that [md_bdev, meta_dev_idx] is not yet used by some
+ * other drbd minor! (if you use drbd.conf + drbdadm, that
+ * should check it for you already; but if you don't, or
+ * someone fooled it, we need to double check here)
+ */
+ bdev = blkdev_get_by_path(nbc->dc.meta_dev,
+ FMODE_READ | FMODE_WRITE | FMODE_EXCL,
+ (nbc->dc.meta_dev_idx < 0) ?
+ (void *)mdev : (void *)drbd_m_holder);
+ if (IS_ERR(bdev)) {
dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
- PTR_ERR(nbc->md_file));
- nbc->md_file = NULL;
+ PTR_ERR(bdev));
retcode = ERR_OPEN_MD_DISK;
goto fail;
}
+ nbc->md_bdev = bdev;
- inode2 = nbc->md_file->f_dentry->d_inode;
-
- if (!S_ISBLK(inode2->i_mode)) {
- retcode = ERR_MD_NOT_BDEV;
- goto fail;
- }
-
- nbc->backing_bdev = inode->i_bdev;
- if (bd_claim(nbc->backing_bdev, mdev)) {
- printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n",
- nbc->backing_bdev, mdev,
- nbc->backing_bdev->bd_holder,
- nbc->backing_bdev->bd_contains->bd_holder,
- nbc->backing_bdev->bd_holders);
- retcode = ERR_BDCLAIM_DISK;
+ if ((nbc->backing_bdev == nbc->md_bdev) !=
+ (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
+ nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
+ retcode = ERR_MD_IDX_INVALID;
goto fail;
}
@@ -955,28 +949,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
offsetof(struct bm_extent, lce));
if (!resync_lru) {
retcode = ERR_NOMEM;
- goto release_bdev_fail;
- }
-
- /* meta_dev_idx >= 0: external fixed size,
- * possibly multiple drbd sharing one meta device.
- * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is
- * not yet used by some other drbd minor!
- * (if you use drbd.conf + drbdadm,
- * that should check it for you already; but if you don't, or someone
- * fooled it, we need to double check here) */
- nbc->md_bdev = inode2->i_bdev;
- if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev
- : (void *) drbd_m_holder)) {
- retcode = ERR_BDCLAIM_MD_DISK;
- goto release_bdev_fail;
- }
-
- if ((nbc->backing_bdev == nbc->md_bdev) !=
- (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
- nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
- retcode = ERR_MD_IDX_INVALID;
- goto release_bdev2_fail;
+ goto fail;
}
/* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
@@ -987,7 +960,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
(unsigned long long) drbd_get_max_capacity(nbc),
(unsigned long long) nbc->dc.disk_size);
retcode = ERR_DISK_TO_SMALL;
- goto release_bdev2_fail;
+ goto fail;
}
if (nbc->dc.meta_dev_idx < 0) {
@@ -1004,7 +977,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
dev_warn(DEV, "refusing attach: md-device too small, "
"at least %llu sectors needed for this meta-disk type\n",
(unsigned long long) min_md_device_sectors);
- goto release_bdev2_fail;
+ goto fail;
}
/* Make sure the new disk is big enough
@@ -1012,7 +985,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (drbd_get_max_capacity(nbc) <
drbd_get_capacity(mdev->this_bdev)) {
retcode = ERR_DISK_TO_SMALL;
- goto release_bdev2_fail;
+ goto fail;
}
nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
@@ -1035,7 +1008,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
drbd_resume_io(mdev);
if (retcode < SS_SUCCESS)
- goto release_bdev2_fail;
+ goto fail;
if (!get_ldev_if_state(mdev, D_ATTACHING))
goto force_diskless;
@@ -1269,18 +1242,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
force_diskless:
drbd_force_state(mdev, NS(disk, D_FAILED));
drbd_md_sync(mdev);
- release_bdev2_fail:
- if (nbc)
- bd_release(nbc->md_bdev);
- release_bdev_fail:
- if (nbc)
- bd_release(nbc->backing_bdev);
fail:
if (nbc) {
- if (nbc->lo_file)
- fput(nbc->lo_file);
- if (nbc->md_file)
- fput(nbc->md_file);
+ if (nbc->backing_bdev)
+ blkdev_put(nbc->backing_bdev,
+ FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ if (nbc->md_bdev)
+ blkdev_put(nbc->md_bdev,
+ FMODE_READ | FMODE_WRITE | FMODE_EXCL);
kfree(nbc);
}
lc_destroy(resync_lru);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 25e4dffa0aad..77fc76f8aea9 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -597,6 +597,11 @@ static unsigned char fsector_t; /* sector in track */
static unsigned char in_sector_offset; /* offset within physical sector,
* expressed in units of 512 bytes */
+static inline bool drive_no_geom(int drive)
+{
+ return !current_type[drive] && !ITYPE(UDRS->fd_device);
+}
+
#ifndef fd_eject
static inline int fd_eject(int drive)
{
@@ -3276,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
struct block_device *bdev = opened_bdev[cnt];
if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
continue;
- __invalidate_device(bdev);
+ __invalidate_device(bdev, true);
}
mutex_unlock(&open_lock);
} else {
@@ -3782,7 +3787,7 @@ static int check_floppy_change(struct gendisk *disk)
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
test_bit(drive, &fake_change) ||
- (!ITYPE(UDRS->fd_device) && !current_type[drive]))
+ drive_no_geom(drive))
return 1;
return 0;
}
@@ -3848,13 +3853,13 @@ static int __floppy_read_block_0(struct block_device *bdev)
static int floppy_revalidate(struct gendisk *disk)
{
int drive = (long)disk->private_data;
-#define NO_GEOM (!current_type[drive] && !ITYPE(UDRS->fd_device))
int cf;
int res = 0;
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
- test_bit(drive, &fake_change) || NO_GEOM) {
+ test_bit(drive, &fake_change) ||
+ drive_no_geom(drive)) {
if (WARN(atomic_read(&usage_count) == 0,
"VFS: revalidate called on non-open device.\n"))
return -EFAULT;
@@ -3862,7 +3867,7 @@ static int floppy_revalidate(struct gendisk *disk)
lock_fdc(drive, false);
cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
test_bit(FD_VERIFY_BIT, &UDRS->flags));
- if (!(cf || test_bit(drive, &fake_change) || NO_GEOM)) {
+ if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) {
process_fd_request(); /*already done by another thread */
return 0;
}
@@ -3874,7 +3879,7 @@ static int floppy_revalidate(struct gendisk *disk)
clear_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
if (cf)
UDRS->generation++;
- if (NO_GEOM) {
+ if (drive_no_geom(drive)) {
/* auto-sensing */
res = __floppy_read_block_0(opened_bdev[drive]);
} else {
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 7ea0bea2f7e3..dbf31ec9114d 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -78,7 +78,6 @@
#include <asm/uaccess.h>
-static DEFINE_MUTEX(loop_mutex);
static LIST_HEAD(loop_devices);
static DEFINE_MUTEX(loop_devices_mutex);
@@ -395,11 +394,7 @@ lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct loop_device *lo = p->lo;
struct page *page = buf->page;
sector_t IV;
- int size, ret;
-
- ret = buf->ops->confirm(pipe, buf);
- if (unlikely(ret))
- return ret;
+ int size;
IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
(buf->offset >> 9);
@@ -1505,11 +1500,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
{
struct loop_device *lo = bdev->bd_disk->private_data;
- mutex_lock(&loop_mutex);
mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++;
mutex_unlock(&lo->lo_ctl_mutex);
- mutex_unlock(&loop_mutex);
return 0;
}
@@ -1519,7 +1512,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
struct loop_device *lo = disk->private_data;
int err;
- mutex_lock(&loop_mutex);
mutex_lock(&lo->lo_ctl_mutex);
if (--lo->lo_refcnt)
@@ -1544,7 +1536,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
out:
mutex_unlock(&lo->lo_ctl_mutex);
out_unlocked:
- mutex_unlock(&loop_mutex);
return 0;
}
@@ -1645,6 +1636,9 @@ out:
static void loop_free(struct loop_device *lo)
{
+ if (!lo->lo_queue->queue_lock)
+ lo->lo_queue->queue_lock = &lo->lo_queue->__queue_lock;
+
blk_cleanup_queue(lo->lo_queue);
put_disk(lo->lo_disk);
list_del(&lo->lo_list);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a32fb41246f8..e6fc716aca45 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -53,7 +53,6 @@
#define DBG_BLKDEV 0x0100
#define DBG_RX 0x0200
#define DBG_TX 0x0400
-static DEFINE_MUTEX(nbd_mutex);
static unsigned int debugflags;
#endif /* NDEBUG */
@@ -718,11 +717,9 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
- mutex_lock(&nbd_mutex);
mutex_lock(&lo->tx_lock);
error = __nbd_ioctl(bdev, lo, cmd, arg);
mutex_unlock(&lo->tx_lock);
- mutex_unlock(&nbd_mutex);
return error;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 19b3568e9326..77d70eebb6b2 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2296,15 +2296,12 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
* so bdget() can't fail.
*/
bdget(pd->bdev->bd_dev);
- if ((ret = blkdev_get(pd->bdev, FMODE_READ)))
+ if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd)))
goto out;
- if ((ret = bd_claim(pd->bdev, pd)))
- goto out_putdev;
-
if ((ret = pkt_get_last_written(pd, &lba))) {
printk(DRIVER_NAME": pkt_get_last_written failed\n");
- goto out_unclaim;
+ goto out_putdev;
}
set_capacity(pd->disk, lba << 2);
@@ -2314,7 +2311,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
q = bdev_get_queue(pd->bdev);
if (write) {
if ((ret = pkt_open_write(pd)))
- goto out_unclaim;
+ goto out_putdev;
/*
* Some CDRW drives can not handle writes larger than one packet,
* even if the size is a multiple of the packet size.
@@ -2329,23 +2326,21 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
}
if ((ret = pkt_set_segment_merging(pd, q)))
- goto out_unclaim;
+ goto out_putdev;
if (write) {
if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
printk(DRIVER_NAME": not enough memory for buffers\n");
ret = -ENOMEM;
- goto out_unclaim;
+ goto out_putdev;
}
printk(DRIVER_NAME": %lukB available on disc\n", lba << 1);
}
return 0;
-out_unclaim:
- bd_release(pd->bdev);
out_putdev:
- blkdev_put(pd->bdev, FMODE_READ);
+ blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
out:
return ret;
}
@@ -2362,8 +2357,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
pkt_lock_door(pd, 0);
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
- bd_release(pd->bdev);
- blkdev_put(pd->bdev, FMODE_READ);
+ blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
pkt_shrink_pktlist(pd);
}
@@ -2733,7 +2727,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
bdev = bdget(dev);
if (!bdev)
return -ENOMEM;
- ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY);
+ ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
if (ret)
return ret;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 008d4a00b50d..e1e38b11f48a 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1790,18 +1790,29 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count)
rc = rbd_bus_add_dev(rbd_dev);
if (rc)
- goto err_out_disk;
+ goto err_out_blkdev;
+
/* set up and announce blkdev mapping */
rc = rbd_init_disk(rbd_dev);
if (rc)
- goto err_out_blkdev;
+ goto err_out_bus;
return count;
+err_out_bus:
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ list_del_init(&rbd_dev->node);
+ mutex_unlock(&ctl_mutex);
+
+ /* this will also clean up rest of rbd_dev stuff */
+
+ rbd_bus_del_dev(rbd_dev);
+ kfree(options);
+ kfree(mon_dev_name);
+ return rc;
+
err_out_blkdev:
unregister_blkdev(rbd_dev->major, rbd_dev->name);
-err_out_disk:
- rbd_free_disk(rbd_dev);
err_out_client:
rbd_put_client(rbd_dev);
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 949ed09c6361..6dcd55a74c0a 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -39,6 +39,11 @@ static struct usb_device_id ath3k_table[] = {
/* Atheros AR3011 with sflash firmware*/
{ USB_DEVICE(0x0CF3, 0x3002) },
+ /* Atheros AR9285 Malbec with sflash firmware */
+ { USB_DEVICE(0x03F0, 0x311D) },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE02C) },
{ } /* Terminating entry */
};
@@ -47,46 +52,40 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
#define USB_REQ_DFU_DNLOAD 1
#define BULK_SIZE 4096
-struct ath3k_data {
- struct usb_device *udev;
- u8 *fw_data;
- u32 fw_size;
- u32 fw_sent;
-};
-
-static int ath3k_load_firmware(struct ath3k_data *data,
- unsigned char *firmware,
- int count)
+static int ath3k_load_firmware(struct usb_device *udev,
+ const struct firmware *firmware)
{
u8 *send_buf;
int err, pipe, len, size, sent = 0;
+ int count = firmware->size;
- BT_DBG("ath3k %p udev %p", data, data->udev);
+ BT_DBG("udev %p", udev);
- pipe = usb_sndctrlpipe(data->udev, 0);
+ pipe = usb_sndctrlpipe(udev, 0);
- if ((usb_control_msg(data->udev, pipe,
+ send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
+ if (!send_buf) {
+ BT_ERR("Can't allocate memory chunk for firmware");
+ return -ENOMEM;
+ }
+
+ memcpy(send_buf, firmware->data, 20);
+ if ((err = usb_control_msg(udev, pipe,
USB_REQ_DFU_DNLOAD,
USB_TYPE_VENDOR, 0, 0,
- firmware, 20, USB_CTRL_SET_TIMEOUT)) < 0) {
+ send_buf, 20, USB_CTRL_SET_TIMEOUT)) < 0) {
BT_ERR("Can't change to loading configuration err");
- return -EBUSY;
+ goto error;
}
sent += 20;
count -= 20;
- send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
- if (!send_buf) {
- BT_ERR("Can't allocate memory chunk for firmware");
- return -ENOMEM;
- }
-
while (count) {
size = min_t(uint, count, BULK_SIZE);
- pipe = usb_sndbulkpipe(data->udev, 0x02);
- memcpy(send_buf, firmware + sent, size);
+ pipe = usb_sndbulkpipe(udev, 0x02);
+ memcpy(send_buf, firmware->data + sent, size);
- err = usb_bulk_msg(data->udev, pipe, send_buf, size,
+ err = usb_bulk_msg(udev, pipe, send_buf, size,
&len, 3000);
if (err || (len != size)) {
@@ -112,57 +111,28 @@ static int ath3k_probe(struct usb_interface *intf,
{
const struct firmware *firmware;
struct usb_device *udev = interface_to_usbdev(intf);
- struct ath3k_data *data;
- int size;
BT_DBG("intf %p id %p", intf, id);
if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
return -ENODEV;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->udev = udev;
-
if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
- kfree(data);
return -EIO;
}
- size = max_t(uint, firmware->size, 4096);
- data->fw_data = kmalloc(size, GFP_KERNEL);
- if (!data->fw_data) {
+ if (ath3k_load_firmware(udev, firmware)) {
release_firmware(firmware);
- kfree(data);
- return -ENOMEM;
- }
-
- memcpy(data->fw_data, firmware->data, firmware->size);
- data->fw_size = firmware->size;
- data->fw_sent = 0;
- release_firmware(firmware);
-
- usb_set_intfdata(intf, data);
- if (ath3k_load_firmware(data, data->fw_data, data->fw_size)) {
- usb_set_intfdata(intf, NULL);
- kfree(data->fw_data);
- kfree(data);
return -EIO;
}
+ release_firmware(firmware);
return 0;
}
static void ath3k_disconnect(struct usb_interface *intf)
{
- struct ath3k_data *data = usb_get_intfdata(intf);
-
BT_DBG("ath3k_disconnect intf %p", intf);
-
- kfree(data->fw_data);
- kfree(data);
}
static struct usb_driver ath3k_driver = {
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 1da773f899a2..700a3840fddc 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -102,6 +102,12 @@ static struct usb_device_id blacklist_table[] = {
/* Atheros 3011 with sflash firmware */
{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+ /* Atheros AR9285 Malbec with sflash firmware */
+ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@@ -826,7 +832,7 @@ static void btusb_work(struct work_struct *work)
if (hdev->conn_hash.sco_num > 0) {
if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
- err = usb_autopm_get_interface(data->isoc);
+ err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
if (err < 0) {
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
usb_kill_anchored_urbs(&data->isoc_anchor);
@@ -855,7 +861,7 @@ static void btusb_work(struct work_struct *work)
__set_isoc_interface(hdev, 0);
if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags))
- usb_autopm_put_interface(data->isoc);
+ usb_autopm_put_interface(data->isoc ? data->isoc : data->intf);
}
}
@@ -1038,8 +1044,6 @@ static int btusb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, data);
- usb_enable_autosuspend(interface_to_usbdev(intf));
-
return 0;
}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index af13c62dc473..e2c48a7eccff 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -409,7 +409,8 @@ int register_cdrom(struct cdrom_device_info *cdi)
}
ENSURE(drive_status, CDC_DRIVE_STATUS );
- ENSURE(media_changed, CDC_MEDIA_CHANGED);
+ if (cdo->check_events == NULL && cdo->media_changed == NULL)
+ *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC);
ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
ENSURE(lock_door, CDC_LOCK);
ENSURE(select_speed, CDC_SELECT_SPEED);
@@ -1348,7 +1349,10 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot)
if (!CDROM_CAN(CDC_SELECT_DISC))
return -EDRIVE_CANT_DO_THIS;
- (void) cdi->ops->media_changed(cdi, slot);
+ if (cdi->ops->check_events)
+ cdi->ops->check_events(cdi, 0, slot);
+ else
+ cdi->ops->media_changed(cdi, slot);
if (slot == CDSL_NONE) {
/* set media changed bits, on both queues */
@@ -1392,6 +1396,42 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot)
return slot;
}
+/*
+ * As cdrom implements an extra ioctl consumer for media changed
+ * event, it needs to buffer ->check_events() output, such that event
+ * is not lost for both the usual VFS and ioctl paths.
+ * cdi->{vfs|ioctl}_events are used to buffer pending events for each
+ * path.
+ *
+ * XXX: Locking is non-existent. cdi->ops->check_events() can be
+ * called in parallel and buffering fields are accessed without any
+ * exclusion. The original media_changed code had the same problem.
+ * It might be better to simply deprecate CDROM_MEDIA_CHANGED ioctl
+ * and remove this cruft altogether. It doesn't have much usefulness
+ * at this point.
+ */
+static void cdrom_update_events(struct cdrom_device_info *cdi,
+ unsigned int clearing)
+{
+ unsigned int events;
+
+ events = cdi->ops->check_events(cdi, clearing, CDSL_CURRENT);
+ cdi->vfs_events |= events;
+ cdi->ioctl_events |= events;
+}
+
+unsigned int cdrom_check_events(struct cdrom_device_info *cdi,
+ unsigned int clearing)
+{
+ unsigned int events;
+
+ cdrom_update_events(cdi, clearing);
+ events = cdi->vfs_events;
+ cdi->vfs_events = 0;
+ return events;
+}
+EXPORT_SYMBOL(cdrom_check_events);
+
/* We want to make media_changed accessible to the user through an
* ioctl. The main problem now is that we must double-buffer the
* low-level implementation, to assure that the VFS and the user both
@@ -1403,15 +1443,26 @@ int media_changed(struct cdrom_device_info *cdi, int queue)
{
unsigned int mask = (1 << (queue & 1));
int ret = !!(cdi->mc_flags & mask);
+ bool changed;
if (!CDROM_CAN(CDC_MEDIA_CHANGED))
- return ret;
+ return ret;
+
/* changed since last call? */
- if (cdi->ops->media_changed(cdi, CDSL_CURRENT)) {
+ if (cdi->ops->check_events) {
+ BUG_ON(!queue); /* shouldn't be called from VFS path */
+ cdrom_update_events(cdi, DISK_EVENT_MEDIA_CHANGE);
+ changed = cdi->ioctl_events & DISK_EVENT_MEDIA_CHANGE;
+ cdi->ioctl_events = 0;
+ } else
+ changed = cdi->ops->media_changed(cdi, CDSL_CURRENT);
+
+ if (changed) {
cdi->mc_flags = 0x3; /* set bit on both queues */
ret |= 1;
cdi->media_written = 0;
}
+
cdi->mc_flags &= ~mask; /* clear bit */
return ret;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index d4a7776f4b77..b7980a83ce2d 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -5,7 +5,7 @@
menu "Character devices"
config VT
- bool "Virtual terminal" if EMBEDDED
+ bool "Virtual terminal" if EXPERT
depends on !S390
select INPUT
default y
@@ -39,13 +39,13 @@ config VT
config CONSOLE_TRANSLATIONS
depends on VT
default y
- bool "Enable character translations in console" if EMBEDDED
+ bool "Enable character translations in console" if EXPERT
---help---
This enables support for font mapping and Unicode translation
on virtual consoles.
config VT_CONSOLE
- bool "Support for console on virtual terminal" if EMBEDDED
+ bool "Support for console on virtual terminal" if EXPERT
depends on VT
default y
---help---
@@ -426,10 +426,10 @@ config SGI_MBCS
If you have an SGI Altix with an attached SABrick
say Y or M here, otherwise say N.
-source "drivers/serial/Kconfig"
+source "drivers/tty/serial/Kconfig"
config UNIX98_PTYS
- bool "Unix98 PTY support" if EMBEDDED
+ bool "Unix98 PTY support" if EXPERT
default y
---help---
A pseudo terminal (PTY) is a software device consisting of two
@@ -495,7 +495,7 @@ config LEGACY_PTY_COUNT
config TTY_PRINTK
bool "TTY driver to output user messages via printk"
- depends on EMBEDDED
+ depends on EXPERT
default n
---help---
If you say Y here, the support for writing user messages (i.e.
@@ -1047,15 +1047,6 @@ config NSC_GPIO
pc8736x_gpio drivers. If those drivers are built as
modules, this one will be too, named nsc_gpio
-config CS5535_GPIO
- tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)"
- depends on X86_32
- help
- Give userspace access to the GPIO pins on the AMD CS5535 and
- CS5536 Geode companion devices.
-
- If compiled as a module, it will be called cs5535_gpio.
-
config RAW_DRIVER
tristate "RAW driver (/dev/raw/rawN)"
depends on BLOCK
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index fa0b824b7a65..8238f89f73c9 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -30,17 +30,6 @@ obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
obj-$(CONFIG_SX) += sx.o generic_serial.o
obj-$(CONFIG_RIO) += rio/ generic_serial.o
-obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
-obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
-obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
-obj-$(CONFIG_HVC_TILE) += hvc_tile.o
-obj-$(CONFIG_HVC_DCC) += hvc_dcc.o
-obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
-obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
-obj-$(CONFIG_HVC_IRQ) += hvc_irq.o
-obj-$(CONFIG_HVC_XEN) += hvc_xen.o
-obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o
-obj-$(CONFIG_HVC_UDBG) += hvc_udbg.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
@@ -48,7 +37,6 @@ obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_MMTIMER) += mmtimer.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
obj-$(CONFIG_VIOTAPE) += viotape.o
-obj-$(CONFIG_HVCS) += hvcs.o
obj-$(CONFIG_IBM_BSR) += bsr.o
obj-$(CONFIG_SGI_MBCS) += mbcs.o
obj-$(CONFIG_BRIQ_PANEL) += briq_panel.o
@@ -82,7 +70,6 @@ obj-$(CONFIG_NWFLASH) += nwflash.o
obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o
obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
-obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o
obj-$(CONFIG_GPIO_TB0219) += tb0219.o
obj-$(CONFIG_TELCLOCK) += tlclk.o
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index fcd867d923ba..d8b1b576556c 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -50,7 +50,7 @@ config AGP_ATI
config AGP_AMD
tristate "AMD Irongate, 761, and 762 chipset support"
- depends on AGP && (X86_32 || ALPHA)
+ depends on AGP && X86_32
help
This option gives you AGP support for the GLX component of
X on AMD Irongate, 761, and 762 chipsets.
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 5259065f3c79..3e67ddde9e16 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -120,7 +120,6 @@ struct agp_bridge_driver {
void (*agp_destroy_page)(struct page *, int flags);
void (*agp_destroy_pages)(struct agp_memory *);
int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
- void (*chipset_flush)(struct agp_bridge_data *);
};
struct agp_bridge_data {
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index b1b4362bc648..45681c0ff3b6 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -41,22 +41,8 @@ static int amd_create_page_map(struct amd_page_map *page_map)
if (page_map->real == NULL)
return -ENOMEM;
-#ifndef CONFIG_X86
- SetPageReserved(virt_to_page(page_map->real));
- global_cache_flush();
- page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
- PAGE_SIZE);
- if (page_map->remapped == NULL) {
- ClearPageReserved(virt_to_page(page_map->real));
- free_page((unsigned long) page_map->real);
- page_map->real = NULL;
- return -ENOMEM;
- }
- global_cache_flush();
-#else
set_memory_uc((unsigned long)page_map->real, 1);
page_map->remapped = page_map->real;
-#endif
for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
writel(agp_bridge->scratch_page, page_map->remapped+i);
@@ -68,12 +54,7 @@ static int amd_create_page_map(struct amd_page_map *page_map)
static void amd_free_page_map(struct amd_page_map *page_map)
{
-#ifndef CONFIG_X86
- iounmap(page_map->remapped);
- ClearPageReserved(virt_to_page(page_map->real));
-#else
set_memory_wb((unsigned long)page_map->real, 1);
-#endif
free_page((unsigned long) page_map->real);
}
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 9252e85706ef..780498d76581 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -773,18 +773,23 @@ int __init agp_amd64_init(void)
#else
printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
#endif
+ pci_unregister_driver(&agp_amd64_pci_driver);
return -ENODEV;
}
/* First check that we have at least one AMD64 NB */
- if (!pci_dev_present(amd_nb_misc_ids))
+ if (!pci_dev_present(amd_nb_misc_ids)) {
+ pci_unregister_driver(&agp_amd64_pci_driver);
return -ENODEV;
+ }
/* Look for any AGP bridge */
agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
err = driver_attach(&agp_amd64_pci_driver.driver);
- if (err == 0 && agp_bridges_found == 0)
+ if (err == 0 && agp_bridges_found == 0) {
+ pci_unregister_driver(&agp_amd64_pci_driver);
err = -ENODEV;
+ }
}
return err;
}
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
index 9d2c97a69cdd..a48e05b31593 100644
--- a/drivers/char/agp/compat_ioctl.c
+++ b/drivers/char/agp/compat_ioctl.c
@@ -276,7 +276,6 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case AGPIOC_CHIPSET_FLUSH32:
- ret_val = agpioc_chipset_flush_wrap(curr_priv);
break;
}
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h
index 0c9678ac0371..f30e0fd97963 100644
--- a/drivers/char/agp/compat_ioctl.h
+++ b/drivers/char/agp/compat_ioctl.h
@@ -102,6 +102,5 @@ void agp_free_memory_wrap(struct agp_memory *memory);
struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
struct agp_memory *agp_find_mem_by_key(int key);
struct agp_client *agp_find_client_by_pid(pid_t id);
-int agpioc_chipset_flush_wrap(struct agp_file_private *priv);
#endif /* _AGP_COMPAT_H */
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 3cb4539a98b2..2e044338753c 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -957,13 +957,6 @@ static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
return agp_unbind_memory(memory);
}
-int agpioc_chipset_flush_wrap(struct agp_file_private *priv)
-{
- DBG("");
- agp_flush_chipset(agp_bridge);
- return 0;
-}
-
static long agp_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -1039,7 +1032,6 @@ static long agp_ioctl(struct file *file,
break;
case AGPIOC_CHIPSET_FLUSH:
- ret_val = agpioc_chipset_flush_wrap(curr_priv);
break;
}
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 4956f1c8f9d5..012cba0d6d96 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -81,13 +81,6 @@ static int agp_get_key(void)
return -1;
}
-void agp_flush_chipset(struct agp_bridge_data *bridge)
-{
- if (bridge->driver->chipset_flush)
- bridge->driver->chipset_flush(bridge);
-}
-EXPORT_SYMBOL(agp_flush_chipset);
-
/*
* Use kmalloc if possible for the page list. Otherwise fall back to
* vmalloc. This speeds things up and also saves memory for small AGP
@@ -487,26 +480,6 @@ int agp_unbind_memory(struct agp_memory *curr)
}
EXPORT_SYMBOL(agp_unbind_memory);
-/**
- * agp_rebind_emmory - Rewrite the entire GATT, useful on resume
- */
-int agp_rebind_memory(void)
-{
- struct agp_memory *curr;
- int ret_val = 0;
-
- spin_lock(&agp_bridge->mapped_lock);
- list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
- ret_val = curr->bridge->driver->insert_memory(curr,
- curr->pg_start,
- curr->type);
- if (ret_val != 0)
- break;
- }
- spin_unlock(&agp_bridge->mapped_lock);
- return ret_val;
-}
-EXPORT_SYMBOL(agp_rebind_memory);
/* End - Routines for handling swapping of agp_memory into the GATT */
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index e72f49d52202..b0a0dccc98c1 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -717,8 +717,8 @@ static const struct intel_agp_driver_description {
{ PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver },
{ PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver },
{ PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver },
- { PCI_DEVICE_ID_INTEL_82845_HB, "845G", &intel_845_driver },
- { PCI_DEVICE_ID_INTEL_82845G_HB, "830M", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82845_HB, "i845", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82845G_HB, "845G", &intel_845_driver },
{ PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver },
{ PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver },
{ PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver },
@@ -774,20 +774,14 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
/*
- * If the device has not been properly setup, the following will catch
- * the problem and should stop the system from crashing.
- * 20030610 - hamish@zot.org
- */
- if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev, "can't enable PCI device\n");
- agp_put_bridge(bridge);
- return -ENODEV;
- }
-
- /*
* The following fixes the case where the BIOS has "forgotten" to
* provide an address range for the GART.
* 20030610 - hamish@zot.org
+ * This happens before pci_enable_device() intentionally;
+ * calling pci_enable_device() before assigning the resource
+ * will result in the GART being disabled on machines with such
+ * BIOSs (the GART ends up with a BAR starting at 0, which
+ * conflicts a lot of other devices).
*/
r = &pdev->resource[0];
if (!r->start && r->end) {
@@ -798,6 +792,17 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
}
}
+ /*
+ * If the device has not been properly setup, the following will catch
+ * the problem and should stop the system from crashing.
+ * 20030610 - hamish@zot.org
+ */
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "can't enable PCI device\n");
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+
/* Fill in the mode register */
if (cap_ptr) {
pci_read_config_dword(pdev,
@@ -828,14 +833,9 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
static int agp_intel_resume(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
- int ret_val;
bridge->driver->configure();
- ret_val = agp_rebind_memory();
- if (ret_val != 0)
- return ret_val;
-
return 0;
}
#endif
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 90539df02504..5feebe2800e9 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -75,6 +75,8 @@
#define I810_GMS_DISABLE 0x00000000
#define I810_PGETBL_CTL 0x2020
#define I810_PGETBL_ENABLED 0x00000001
+/* Note: PGETBL_CTL2 has a different offset on G33. */
+#define I965_PGETBL_CTL2 0x20c4
#define I965_PGETBL_SIZE_MASK 0x0000000e
#define I965_PGETBL_SIZE_512KB (0 << 1)
#define I965_PGETBL_SIZE_256KB (1 << 1)
@@ -82,9 +84,17 @@
#define I965_PGETBL_SIZE_1MB (3 << 1)
#define I965_PGETBL_SIZE_2MB (4 << 1)
#define I965_PGETBL_SIZE_1_5MB (5 << 1)
-#define G33_PGETBL_SIZE_MASK (3 << 8)
-#define G33_PGETBL_SIZE_1M (1 << 8)
-#define G33_PGETBL_SIZE_2M (2 << 8)
+#define G33_GMCH_SIZE_MASK (3 << 8)
+#define G33_GMCH_SIZE_1M (1 << 8)
+#define G33_GMCH_SIZE_2M (2 << 8)
+#define G4x_GMCH_SIZE_MASK (0xf << 8)
+#define G4x_GMCH_SIZE_1M (0x1 << 8)
+#define G4x_GMCH_SIZE_2M (0x3 << 8)
+#define G4x_GMCH_SIZE_VT_1M (0x9 << 8)
+#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
+#define G4x_GMCH_SIZE_VT_2M (0xc << 8)
+
+#define GFX_FLSH_CNTL 0x2170 /* 915+ */
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
@@ -120,6 +130,7 @@
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
#define I915_IFPADDR 0x60
+#define I830_HIC 0x70
/* Intel 965G registers */
#define I965_MSAC 0x62
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 29ac6d499fa6..0d09b537bb9a 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -21,10 +21,10 @@
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
+#include <linux/delay.h>
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
-#include <linux/intel-gtt.h>
#include <drm/intel-gtt.h>
/*
@@ -39,40 +39,12 @@
#define USE_PCI_DMA_API 0
#endif
-/* Max amount of stolen space, anything above will be returned to Linux */
-int intel_max_stolen = 32 * 1024 * 1024;
-
-static const struct aper_size_info_fixed intel_i810_sizes[] =
-{
- {64, 16384, 4},
- /* The 32M mode still requires a 64k gatt */
- {32, 8192, 4}
-};
-
-#define AGP_DCACHE_MEMORY 1
-#define AGP_PHYS_MEMORY 2
-#define INTEL_AGP_CACHED_MEMORY 3
-
-static struct gatt_mask intel_i810_masks[] =
-{
- {.mask = I810_PTE_VALID, .type = 0},
- {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
- {.mask = I810_PTE_VALID, .type = 0},
- {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
- .type = INTEL_AGP_CACHED_MEMORY}
-};
-
-#define INTEL_AGP_UNCACHED_MEMORY 0
-#define INTEL_AGP_CACHED_MEMORY_LLC 1
-#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
-#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
-#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
-
struct intel_gtt_driver {
unsigned int gen : 8;
unsigned int is_g33 : 1;
unsigned int is_pineview : 1;
unsigned int is_ironlake : 1;
+ unsigned int has_pgtbl_enable : 1;
unsigned int dma_mask_size : 8;
/* Chipset specific GTT setup */
int (*setup)(void);
@@ -95,14 +67,12 @@ static struct _intel_private {
u8 __iomem *registers;
phys_addr_t gtt_bus_addr;
phys_addr_t gma_bus_addr;
- phys_addr_t pte_bus_addr;
+ u32 PGETBL_save;
u32 __iomem *gtt; /* I915G */
+ bool clear_fake_agp; /* on first access via agp, fill with scratch */
int num_dcache_entries;
- union {
- void __iomem *i9xx_flush_page;
- void *i8xx_flush_page;
- };
- struct page *i8xx_page;
+ void __iomem *i9xx_flush_page;
+ char *i81x_gtt_table;
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
@@ -113,42 +83,31 @@ static struct _intel_private {
#define IS_G33 intel_private.driver->is_g33
#define IS_PINEVIEW intel_private.driver->is_pineview
#define IS_IRONLAKE intel_private.driver->is_ironlake
+#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
-static void intel_agp_free_sglist(struct agp_memory *mem)
-{
- struct sg_table st;
-
- st.sgl = mem->sg_list;
- st.orig_nents = st.nents = mem->page_count;
-
- sg_free_table(&st);
-
- mem->sg_list = NULL;
- mem->num_sg = 0;
-}
-
-static int intel_agp_map_memory(struct agp_memory *mem)
+int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
+ struct scatterlist **sg_list, int *num_sg)
{
struct sg_table st;
struct scatterlist *sg;
int i;
- if (mem->sg_list)
+ if (*sg_list)
return 0; /* already mapped (for e.g. resume */
- DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
+ DBG("try mapping %lu pages\n", (unsigned long)num_entries);
- if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
+ if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
goto err;
- mem->sg_list = sg = st.sgl;
+ *sg_list = sg = st.sgl;
- for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
- sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
+ for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
+ sg_set_page(sg, pages[i], PAGE_SIZE, 0);
- mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
- mem->page_count, PCI_DMA_BIDIRECTIONAL);
- if (unlikely(!mem->num_sg))
+ *num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
+ num_entries, PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(!*num_sg))
goto err;
return 0;
@@ -157,90 +116,22 @@ err:
sg_free_table(&st);
return -ENOMEM;
}
+EXPORT_SYMBOL(intel_gtt_map_memory);
-static void intel_agp_unmap_memory(struct agp_memory *mem)
+void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
{
+ struct sg_table st;
DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
- pci_unmap_sg(intel_private.pcidev, mem->sg_list,
- mem->page_count, PCI_DMA_BIDIRECTIONAL);
- intel_agp_free_sglist(mem);
-}
-
-static int intel_i810_fetch_size(void)
-{
- u32 smram_miscc;
- struct aper_size_info_fixed *values;
-
- pci_read_config_dword(intel_private.bridge_dev,
- I810_SMRAM_MISCC, &smram_miscc);
- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
- if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
- dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
- return 0;
- }
- if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
- agp_bridge->current_size = (void *) (values + 1);
- agp_bridge->aperture_size_idx = 1;
- return values[1].size;
- } else {
- agp_bridge->current_size = (void *) (values);
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- }
-
- return 0;
-}
-
-static int intel_i810_configure(void)
-{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- int i;
-
- current_size = A_SIZE_FIX(agp_bridge->current_size);
-
- if (!intel_private.registers) {
- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
- temp &= 0xfff80000;
-
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers) {
- dev_err(&intel_private.pcidev->dev,
- "can't remap memory\n");
- return -ENOMEM;
- }
- }
+ pci_unmap_sg(intel_private.pcidev, sg_list,
+ num_sg, PCI_DMA_BIDIRECTIONAL);
- if ((readl(intel_private.registers+I810_DRAM_CTL)
- & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
- /* This will need to be dynamically assigned */
- dev_info(&intel_private.pcidev->dev,
- "detected 4MB dedicated video ram\n");
- intel_private.num_dcache_entries = 1024;
- }
- pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = 0; i < current_size->num_entries; i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
- }
- global_cache_flush();
- return 0;
-}
+ st.sgl = sg_list;
+ st.orig_nents = st.nents = num_sg;
-static void intel_i810_cleanup(void)
-{
- writel(0, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers); /* PCI Posting. */
- iounmap(intel_private.registers);
+ sg_free_table(&st);
}
+EXPORT_SYMBOL(intel_gtt_unmap_memory);
static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
@@ -277,80 +168,64 @@ static void i8xx_destroy_pages(struct page *page)
atomic_dec(&agp_bridge->current_memory_agp);
}
-static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
- int type)
+#define I810_GTT_ORDER 4
+static int i810_setup(void)
{
- int i, j, num_entries;
- void *temp;
- int ret = -EINVAL;
- int mask_type;
-
- if (mem->page_count == 0)
- goto out;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
+ u32 reg_addr;
+ char *gtt_table;
- if ((pg_start + mem->page_count) > num_entries)
- goto out_err;
+ /* i81x does not preallocate the gtt. It's always 64kb in size. */
+ gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
+ if (gtt_table == NULL)
+ return -ENOMEM;
+ intel_private.i81x_gtt_table = gtt_table;
+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
+ reg_addr &= 0xfff80000;
- for (j = pg_start; j < (pg_start + mem->page_count); j++) {
- if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
- ret = -EBUSY;
- goto out_err;
- }
- }
+ intel_private.registers = ioremap(reg_addr, KB(64));
+ if (!intel_private.registers)
+ return -ENOMEM;
- if (type != mem->type)
- goto out_err;
+ writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
+ intel_private.registers+I810_PGETBL_CTL);
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+ intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
- switch (mask_type) {
- case AGP_DCACHE_MEMORY:
- if (!mem->is_flushed)
- global_cache_flush();
- for (i = pg_start; i < (pg_start + mem->page_count); i++) {
- writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
- intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
- break;
- case AGP_PHYS_MEMORY:
- case AGP_NORMAL_MEMORY:
- if (!mem->is_flushed)
- global_cache_flush();
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.registers+I810_PTE_BASE+(j*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
- break;
- default:
- goto out_err;
+ if ((readl(intel_private.registers+I810_DRAM_CTL)
+ & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+ dev_info(&intel_private.pcidev->dev,
+ "detected 4MB dedicated video ram\n");
+ intel_private.num_dcache_entries = 1024;
}
-out:
- ret = 0;
-out_err:
- mem->is_flushed = true;
- return ret;
+ return 0;
}
-static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
- int type)
+static void i810_cleanup(void)
+{
+ writel(0, intel_private.registers+I810_PGETBL_CTL);
+ free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
+}
+
+static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
{
int i;
- if (mem->page_count == 0)
- return 0;
+ if ((pg_start + mem->page_count)
+ > intel_private.num_dcache_entries)
+ return -EINVAL;
+
+ if (!mem->is_flushed)
+ global_cache_flush();
- for (i = pg_start; i < (mem->page_count + pg_start); i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ for (i = pg_start; i < (pg_start + mem->page_count); i++) {
+ dma_addr_t addr = i << PAGE_SHIFT;
+ intel_private.driver->write_entry(addr,
+ i, type);
}
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+ readl(intel_private.gtt+i-1);
return 0;
}
@@ -397,29 +272,6 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
return new;
}
-static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
-{
- struct agp_memory *new;
-
- if (type == AGP_DCACHE_MEMORY) {
- if (pg_count != intel_private.num_dcache_entries)
- return NULL;
-
- new = agp_create_memory(1);
- if (new == NULL)
- return NULL;
-
- new->type = AGP_DCACHE_MEMORY;
- new->page_count = pg_count;
- new->num_scratch_pages = 0;
- agp_free_page_array(new);
- return new;
- }
- if (type == AGP_PHYS_MEMORY)
- return alloc_agpphysmem_i8xx(pg_count, type);
- return NULL;
-}
-
static void intel_i810_free_by_type(struct agp_memory *curr)
{
agp_free_key(curr->key);
@@ -437,13 +289,6 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
kfree(curr);
}
-static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
- dma_addr_t addr, int type)
-{
- /* Type checking must be done elsewhere */
- return addr | bridge->driver->masks[type].mask;
-}
-
static int intel_gtt_setup_scratch_page(void)
{
struct page *page;
@@ -455,7 +300,7 @@ static int intel_gtt_setup_scratch_page(void)
get_page(page);
set_pages_uc(page, 1);
- if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
+ if (intel_private.base.needs_dmar) {
dma_addr = pci_map_page(intel_private.pcidev, page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
@@ -470,34 +315,45 @@ static int intel_gtt_setup_scratch_page(void)
return 0;
}
-static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
+static void i810_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
+{
+ u32 pte_flags = I810_PTE_VALID;
+
+ switch (flags) {
+ case AGP_DCACHE_MEMORY:
+ pte_flags |= I810_PTE_LOCAL;
+ break;
+ case AGP_USER_CACHED_MEMORY:
+ pte_flags |= I830_PTE_SYSTEM_CACHED;
+ break;
+ }
+
+ writel(addr | pte_flags, intel_private.gtt + entry);
+}
+
+static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
+ {32, 8192, 3},
+ {64, 16384, 4},
{128, 32768, 5},
- /* The 64M mode still requires a 128k gatt */
- {64, 16384, 5},
{256, 65536, 6},
{512, 131072, 7},
};
-static unsigned int intel_gtt_stolen_entries(void)
+static unsigned int intel_gtt_stolen_size(void)
{
u16 gmch_ctrl;
u8 rdct;
int local = 0;
static const int ddt[4] = { 0, 16, 32, 64 };
- unsigned int overhead_entries, stolen_entries;
unsigned int stolen_size = 0;
+ if (INTEL_GTT_GEN == 1)
+ return 0; /* no stolen mem on i81x */
+
pci_read_config_word(intel_private.bridge_dev,
I830_GMCH_CTRL, &gmch_ctrl);
- if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
- overhead_entries = 0;
- else
- overhead_entries = intel_private.base.gtt_mappable_entries
- / 1024;
-
- overhead_entries += 1; /* BIOS popup */
-
if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
@@ -623,12 +479,7 @@ static unsigned int intel_gtt_stolen_entries(void)
}
}
- if (!local && stolen_size > intel_max_stolen) {
- dev_info(&intel_private.bridge_dev->dev,
- "detected %dK stolen memory, trimming to %dK\n",
- stolen_size / KB(1), intel_max_stolen / KB(1));
- stolen_size = intel_max_stolen;
- } else if (stolen_size > 0) {
+ if (stolen_size > 0) {
dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
stolen_size / KB(1), local ? "local" : "stolen");
} else {
@@ -637,46 +488,88 @@ static unsigned int intel_gtt_stolen_entries(void)
stolen_size = 0;
}
- stolen_entries = stolen_size/KB(4) - overhead_entries;
+ return stolen_size;
+}
- return stolen_entries;
+static void i965_adjust_pgetbl_size(unsigned int size_flag)
+{
+ u32 pgetbl_ctl, pgetbl_ctl2;
+
+ /* ensure that ppgtt is disabled */
+ pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
+ pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
+ writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
+
+ /* write the new ggtt size */
+ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+ pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
+ pgetbl_ctl |= size_flag;
+ writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
}
-static unsigned int intel_gtt_total_entries(void)
+static unsigned int i965_gtt_total_entries(void)
{
int size;
+ u32 pgetbl_ctl;
+ u16 gmch_ctl;
- if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
- u32 pgetbl_ctl;
- pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctl);
- switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
- case I965_PGETBL_SIZE_128KB:
- size = KB(128);
- break;
- case I965_PGETBL_SIZE_256KB:
- size = KB(256);
- break;
- case I965_PGETBL_SIZE_512KB:
- size = KB(512);
+ if (INTEL_GTT_GEN == 5) {
+ switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
+ case G4x_GMCH_SIZE_1M:
+ case G4x_GMCH_SIZE_VT_1M:
+ i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
break;
- case I965_PGETBL_SIZE_1MB:
- size = KB(1024);
+ case G4x_GMCH_SIZE_VT_1_5M:
+ i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
break;
- case I965_PGETBL_SIZE_2MB:
- size = KB(2048);
+ case G4x_GMCH_SIZE_2M:
+ case G4x_GMCH_SIZE_VT_2M:
+ i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
break;
- case I965_PGETBL_SIZE_1_5MB:
- size = KB(1024 + 512);
- break;
- default:
- dev_info(&intel_private.pcidev->dev,
- "unknown page table size, assuming 512KB\n");
- size = KB(512);
}
+ }
- return size/4;
- } else if (INTEL_GTT_GEN == 6) {
+ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+
+ switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+ case I965_PGETBL_SIZE_128KB:
+ size = KB(128);
+ break;
+ case I965_PGETBL_SIZE_256KB:
+ size = KB(256);
+ break;
+ case I965_PGETBL_SIZE_512KB:
+ size = KB(512);
+ break;
+ /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
+ case I965_PGETBL_SIZE_1MB:
+ size = KB(1024);
+ break;
+ case I965_PGETBL_SIZE_2MB:
+ size = KB(2048);
+ break;
+ case I965_PGETBL_SIZE_1_5MB:
+ size = KB(1024 + 512);
+ break;
+ default:
+ dev_info(&intel_private.pcidev->dev,
+ "unknown page table size, assuming 512KB\n");
+ size = KB(512);
+ }
+
+ return size/4;
+}
+
+static unsigned int intel_gtt_total_entries(void)
+{
+ int size;
+
+ if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
+ return i965_gtt_total_entries();
+ else if (INTEL_GTT_GEN == 6) {
u16 snb_gmch_ctl;
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
@@ -706,7 +599,18 @@ static unsigned int intel_gtt_mappable_entries(void)
{
unsigned int aperture_size;
- if (INTEL_GTT_GEN == 2) {
+ if (INTEL_GTT_GEN == 1) {
+ u32 smram_miscc;
+
+ pci_read_config_dword(intel_private.bridge_dev,
+ I810_SMRAM_MISCC, &smram_miscc);
+
+ if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
+ == I810_GFX_MEM_WIN_32M)
+ aperture_size = MB(32);
+ else
+ aperture_size = MB(64);
+ } else if (INTEL_GTT_GEN == 2) {
u16 gmch_ctrl;
pci_read_config_word(intel_private.bridge_dev,
@@ -739,7 +643,7 @@ static void intel_gtt_cleanup(void)
iounmap(intel_private.gtt);
iounmap(intel_private.registers);
-
+
intel_gtt_teardown_scratch_page();
}
@@ -755,6 +659,14 @@ static int intel_gtt_init(void)
intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
intel_private.base.gtt_total_entries = intel_gtt_total_entries();
+ /* save the PGETBL reg for resume */
+ intel_private.PGETBL_save =
+ readl(intel_private.registers+I810_PGETBL_CTL)
+ & ~I810_PGETBL_ENABLED;
+ /* we only ever restore the register when enabling the PGTBL... */
+ if (HAS_PGTBL_EN)
+ intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
+
dev_info(&intel_private.bridge_dev->dev,
"detected gtt size: %dK total, %dK mappable\n",
intel_private.base.gtt_total_entries * 4,
@@ -772,14 +684,9 @@ static int intel_gtt_init(void)
global_cache_flush(); /* FIXME: ? */
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
- if (intel_private.base.gtt_stolen_entries == 0) {
- intel_private.driver->cleanup();
- iounmap(intel_private.registers);
- iounmap(intel_private.gtt);
- return -ENOMEM;
- }
+ intel_private.base.stolen_size = intel_gtt_stolen_size();
+
+ intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
ret = intel_gtt_setup_scratch_page();
if (ret != 0) {
@@ -812,28 +719,6 @@ static int intel_fake_agp_fetch_size(void)
static void i830_cleanup(void)
{
- if (intel_private.i8xx_flush_page) {
- kunmap(intel_private.i8xx_flush_page);
- intel_private.i8xx_flush_page = NULL;
- }
-
- __free_page(intel_private.i8xx_page);
- intel_private.i8xx_page = NULL;
-}
-
-static void intel_i830_setup_flush(void)
-{
- /* return if we've already set the flush mechanism up */
- if (intel_private.i8xx_page)
- return;
-
- intel_private.i8xx_page = alloc_page(GFP_KERNEL);
- if (!intel_private.i8xx_page)
- return;
-
- intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
- if (!intel_private.i8xx_flush_page)
- i830_cleanup();
}
/* The chipset_flush interface needs to get data that has already been
@@ -848,39 +733,46 @@ static void intel_i830_setup_flush(void)
*/
static void i830_chipset_flush(void)
{
- unsigned int *pg = intel_private.i8xx_flush_page;
-
- memset(pg, 0, 1024);
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ /* Forcibly evict everything from the CPU write buffers.
+ * clflush appears to be insufficient.
+ */
+ wbinvd_on_all_cpus();
+
+ /* Now we've only seen documents for this magic bit on 855GM,
+ * we hope it exists for the other gen2 chipsets...
+ *
+ * Also works as advertised on my 845G.
+ */
+ writel(readl(intel_private.registers+I830_HIC) | (1<<31),
+ intel_private.registers+I830_HIC);
+
+ while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
+ if (time_after(jiffies, timeout))
+ break;
- if (cpu_has_clflush)
- clflush_cache_range(pg, 1024);
- else if (wbinvd_on_all_cpus() != 0)
- printk(KERN_ERR "Timed out waiting for cache flush.\n");
+ udelay(50);
+ }
}
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
u32 pte_flags = I810_PTE_VALID;
-
- switch (flags) {
- case AGP_DCACHE_MEMORY:
- pte_flags |= I810_PTE_LOCAL;
- break;
- case AGP_USER_CACHED_MEMORY:
+
+ if (flags == AGP_USER_CACHED_MEMORY)
pte_flags |= I830_PTE_SYSTEM_CACHED;
- break;
- }
writel(addr | pte_flags, intel_private.gtt + entry);
}
-static void intel_enable_gtt(void)
+static bool intel_enable_gtt(void)
{
u32 gma_addr;
- u16 gmch_ctrl;
+ u8 __iomem *reg;
- if (INTEL_GTT_GEN == 2)
+ if (INTEL_GTT_GEN <= 2)
pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
&gma_addr);
else
@@ -889,13 +781,47 @@ static void intel_enable_gtt(void)
intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
- pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
- gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
+ if (INTEL_GTT_GEN >= 6)
+ return true;
- writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
- intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+ if (INTEL_GTT_GEN == 2) {
+ u16 gmch_ctrl;
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, gmch_ctrl);
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
+ if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
+ dev_err(&intel_private.pcidev->dev,
+ "failed to enable the GTT: GMCH_CTRL=%x\n",
+ gmch_ctrl);
+ return false;
+ }
+ }
+
+ /* On the resume path we may be adjusting the PGTBL value, so
+ * be paranoid and flush all chipset write buffers...
+ */
+ if (INTEL_GTT_GEN >= 3)
+ writel(0, intel_private.registers+GFX_FLSH_CNTL);
+
+ reg = intel_private.registers+I810_PGETBL_CTL;
+ writel(intel_private.PGETBL_save, reg);
+ if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
+ dev_err(&intel_private.pcidev->dev,
+ "failed to enable the GTT: PGETBL=%x [expected %x]\n",
+ readl(reg), intel_private.PGETBL_save);
+ return false;
+ }
+
+ if (INTEL_GTT_GEN >= 3)
+ writel(0, intel_private.registers+GFX_FLSH_CNTL);
+
+ return true;
}
static int i830_setup(void)
@@ -910,10 +836,6 @@ static int i830_setup(void)
return -ENOMEM;
intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
- intel_private.pte_bus_addr =
- readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
-
- intel_i830_setup_flush();
return 0;
}
@@ -934,21 +856,12 @@ static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
static int intel_fake_agp_configure(void)
{
- int i;
-
- intel_enable_gtt();
+ if (!intel_enable_gtt())
+ return -EIO;
+ intel_private.clear_fake_agp = true;
agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
- for (i = intel_private.base.gtt_stolen_entries;
- i < intel_private.base.gtt_total_entries; i++) {
- intel_private.driver->write_entry(intel_private.scratch_page_dma,
- i, 0);
- }
- readl(intel_private.gtt+i-1); /* PCI Posting. */
-
- global_cache_flush();
-
return 0;
}
@@ -965,10 +878,10 @@ static bool i830_check_flags(unsigned int flags)
return false;
}
-static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
- unsigned int sg_len,
- unsigned int pg_start,
- unsigned int flags)
+void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
+ unsigned int sg_len,
+ unsigned int pg_start,
+ unsigned int flags)
{
struct scatterlist *sg;
unsigned int len, m;
@@ -989,27 +902,41 @@ static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
}
readl(intel_private.gtt+j-1);
}
+EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
+
+void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
+ struct page **pages, unsigned int flags)
+{
+ int i, j;
+
+ for (i = 0, j = first_entry; i < num_entries; i++, j++) {
+ dma_addr_t addr = page_to_phys(pages[i]);
+ intel_private.driver->write_entry(addr,
+ j, flags);
+ }
+ readl(intel_private.gtt+j-1);
+}
+EXPORT_SYMBOL(intel_gtt_insert_pages);
static int intel_fake_agp_insert_entries(struct agp_memory *mem,
off_t pg_start, int type)
{
- int i, j;
int ret = -EINVAL;
- if (mem->page_count == 0)
- goto out;
+ if (intel_private.clear_fake_agp) {
+ int start = intel_private.base.stolen_size / PAGE_SIZE;
+ int end = intel_private.base.gtt_mappable_entries;
+ intel_gtt_clear_range(start, end - start);
+ intel_private.clear_fake_agp = false;
+ }
- if (pg_start < intel_private.base.gtt_stolen_entries) {
- dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
- "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
- pg_start, intel_private.base.gtt_stolen_entries);
+ if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
+ return i810_insert_dcache_entries(mem, pg_start, type);
- dev_info(&intel_private.pcidev->dev,
- "trying to insert into local/stolen memory\n");
- goto out_err;
- }
+ if (mem->page_count == 0)
+ goto out;
- if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
+ if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
goto out_err;
if (type != mem->type)
@@ -1021,21 +948,17 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (!mem->is_flushed)
global_cache_flush();
- if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
- ret = intel_agp_map_memory(mem);
+ if (intel_private.base.needs_dmar) {
+ ret = intel_gtt_map_memory(mem->pages, mem->page_count,
+ &mem->sg_list, &mem->num_sg);
if (ret != 0)
return ret;
intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
pg_start, type);
- } else {
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- dma_addr_t addr = page_to_phys(mem->pages[i]);
- intel_private.driver->write_entry(addr,
- j, type);
- }
- readl(intel_private.gtt+j-1);
- }
+ } else
+ intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
+ type);
out:
ret = 0;
@@ -1044,40 +967,54 @@ out_err:
return ret;
}
+void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
+{
+ unsigned int i;
+
+ for (i = first_entry; i < (first_entry + num_entries); i++) {
+ intel_private.driver->write_entry(intel_private.scratch_page_dma,
+ i, 0);
+ }
+ readl(intel_private.gtt+i-1);
+}
+EXPORT_SYMBOL(intel_gtt_clear_range);
+
static int intel_fake_agp_remove_entries(struct agp_memory *mem,
off_t pg_start, int type)
{
- int i;
-
if (mem->page_count == 0)
return 0;
- if (pg_start < intel_private.base.gtt_stolen_entries) {
- dev_info(&intel_private.pcidev->dev,
- "trying to disable local/stolen memory\n");
- return -EINVAL;
- }
-
- if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
- intel_agp_unmap_memory(mem);
+ intel_gtt_clear_range(pg_start, mem->page_count);
- for (i = pg_start; i < (mem->page_count + pg_start); i++) {
- intel_private.driver->write_entry(intel_private.scratch_page_dma,
- i, 0);
+ if (intel_private.base.needs_dmar) {
+ intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
+ mem->sg_list = NULL;
+ mem->num_sg = 0;
}
- readl(intel_private.gtt+i-1);
return 0;
}
-static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
-{
- intel_private.driver->chipset_flush();
-}
-
static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
int type)
{
+ struct agp_memory *new;
+
+ if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
+ if (pg_count != intel_private.num_dcache_entries)
+ return NULL;
+
+ new = agp_create_memory(1);
+ if (new == NULL)
+ return NULL;
+
+ new->type = AGP_DCACHE_MEMORY;
+ new->page_count = pg_count;
+ new->num_scratch_pages = 0;
+ agp_free_page_array(new);
+ return new;
+ }
if (type == AGP_PHYS_MEMORY)
return alloc_agpphysmem_i8xx(pg_count, type);
/* always return NULL for other allocation types for now */
@@ -1274,40 +1211,11 @@ static int i9xx_setup(void)
intel_private.gtt_bus_addr = reg_addr + gtt_offset;
}
- intel_private.pte_bus_addr =
- readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
-
intel_i9xx_setup_flush();
return 0;
}
-static const struct agp_bridge_driver intel_810_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i810_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 2,
- .needs_scratch_page = true,
- .configure = intel_i810_configure,
- .fetch_size = intel_i810_fetch_size,
- .cleanup = intel_i810_cleanup,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_fake_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = agp_generic_create_gatt_table,
- .free_gatt_table = agp_generic_free_gatt_table,
- .insert_memory = intel_i810_insert_entries,
- .remove_memory = intel_i810_remove_entries,
- .alloc_by_type = intel_i810_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = agp_generic_type_to_mask_type,
-};
-
static const struct agp_bridge_driver intel_fake_agp_driver = {
.owner = THIS_MODULE,
.size_type = FIXED_APER_SIZE,
@@ -1328,15 +1236,20 @@ static const struct agp_bridge_driver intel_fake_agp_driver = {
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
- .chipset_flush = intel_fake_agp_chipset_flush,
};
static const struct intel_gtt_driver i81x_gtt_driver = {
.gen = 1,
+ .has_pgtbl_enable = 1,
.dma_mask_size = 32,
+ .setup = i810_setup,
+ .cleanup = i810_cleanup,
+ .check_flags = i830_check_flags,
+ .write_entry = i810_write_entry,
};
static const struct intel_gtt_driver i8xx_gtt_driver = {
.gen = 2,
+ .has_pgtbl_enable = 1,
.setup = i830_setup,
.cleanup = i830_cleanup,
.write_entry = i830_write_entry,
@@ -1346,10 +1259,11 @@ static const struct intel_gtt_driver i8xx_gtt_driver = {
};
static const struct intel_gtt_driver i915_gtt_driver = {
.gen = 3,
+ .has_pgtbl_enable = 1,
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
/* i945 is the last gpu to need phys mem (for overlay and cursors). */
- .write_entry = i830_write_entry,
+ .write_entry = i830_write_entry,
.dma_mask_size = 32,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1376,6 +1290,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = {
};
static const struct intel_gtt_driver i965_gtt_driver = {
.gen = 4,
+ .has_pgtbl_enable = 1,
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
@@ -1419,93 +1334,92 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
static const struct intel_gtt_driver_description {
unsigned int gmch_chip_id;
char *name;
- const struct agp_bridge_driver *gmch_driver;
const struct intel_gtt_driver *gtt_driver;
} intel_gtt_chipsets[] = {
- { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver,
+ { PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
&i81x_gtt_driver},
- { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver,
+ { PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
&i81x_gtt_driver},
- { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver,
+ { PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
&i81x_gtt_driver},
- { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver,
+ { PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
&i81x_gtt_driver},
{ PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
- &intel_fake_agp_driver, &i8xx_gtt_driver},
- { PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
- &intel_fake_agp_driver, &i8xx_gtt_driver},
+ &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
+ &i8xx_gtt_driver},
{ PCI_DEVICE_ID_INTEL_82854_IG, "854",
- &intel_fake_agp_driver, &i8xx_gtt_driver},
+ &i8xx_gtt_driver},
{ PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
- &intel_fake_agp_driver, &i8xx_gtt_driver},
+ &i8xx_gtt_driver},
{ PCI_DEVICE_ID_INTEL_82865_IG, "865",
- &intel_fake_agp_driver, &i8xx_gtt_driver},
+ &i8xx_gtt_driver},
{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
- &intel_fake_agp_driver, &i915_gtt_driver },
+ &i915_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
- &intel_fake_agp_driver, &i915_gtt_driver },
+ &i915_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
- &intel_fake_agp_driver, &i915_gtt_driver },
+ &i915_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
- &intel_fake_agp_driver, &i915_gtt_driver },
+ &i915_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
- &intel_fake_agp_driver, &i915_gtt_driver },
+ &i915_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
- &intel_fake_agp_driver, &i915_gtt_driver },
+ &i915_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
- &intel_fake_agp_driver, &i965_gtt_driver },
+ &i965_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
- &intel_fake_agp_driver, &i965_gtt_driver },
+ &i965_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
- &intel_fake_agp_driver, &i965_gtt_driver },
+ &i965_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
- &intel_fake_agp_driver, &i965_gtt_driver },
+ &i965_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
- &intel_fake_agp_driver, &i965_gtt_driver },
+ &i965_gtt_driver },
{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
- &intel_fake_agp_driver, &i965_gtt_driver },
+ &i965_gtt_driver },
{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
- &intel_fake_agp_driver, &g33_gtt_driver },
+ &g33_gtt_driver },
{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
- &intel_fake_agp_driver, &g33_gtt_driver },
+ &g33_gtt_driver },
{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
- &intel_fake_agp_driver, &g33_gtt_driver },
+ &g33_gtt_driver },
{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
- &intel_fake_agp_driver, &pineview_gtt_driver },
+ &pineview_gtt_driver },
{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
- &intel_fake_agp_driver, &pineview_gtt_driver },
+ &pineview_gtt_driver },
{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
- &intel_fake_agp_driver, &g4x_gtt_driver },
+ &g4x_gtt_driver },
{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
- &intel_fake_agp_driver, &g4x_gtt_driver },
+ &g4x_gtt_driver },
{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
- &intel_fake_agp_driver, &g4x_gtt_driver },
+ &g4x_gtt_driver },
{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
- &intel_fake_agp_driver, &g4x_gtt_driver },
+ &g4x_gtt_driver },
{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
- &intel_fake_agp_driver, &g4x_gtt_driver },
+ &g4x_gtt_driver },
{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
- &intel_fake_agp_driver, &g4x_gtt_driver },
+ &g4x_gtt_driver },
{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
- &intel_fake_agp_driver, &g4x_gtt_driver },
+ &g4x_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
- "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
+ "HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
- "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
+ "HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
- "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ "Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
- "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ "Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
- "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ "Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
- "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ "Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
- "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ "Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
- "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ "Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
- "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ "Sandybridge", &sandybridge_gtt_driver },
{ 0, NULL, NULL }
};
@@ -1530,21 +1444,20 @@ int intel_gmch_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge)
{
int i, mask;
- bridge->driver = NULL;
+ intel_private.driver = NULL;
for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
- bridge->driver =
- intel_gtt_chipsets[i].gmch_driver;
- intel_private.driver =
+ intel_private.driver =
intel_gtt_chipsets[i].gtt_driver;
break;
}
}
- if (!bridge->driver)
+ if (!intel_private.driver)
return 0;
+ bridge->driver = &intel_fake_agp_driver;
bridge->dev_private_data = &intel_private;
bridge->dev = pdev;
@@ -1560,8 +1473,8 @@ int intel_gmch_probe(struct pci_dev *pdev,
pci_set_consistent_dma_mask(intel_private.pcidev,
DMA_BIT_MASK(mask));
- if (bridge->driver == &intel_810_driver)
- return 1;
+ /*if (bridge->driver == &intel_810_driver)
+ return 1;*/
if (intel_gtt_init() != 0)
return 0;
@@ -1570,12 +1483,19 @@ int intel_gmch_probe(struct pci_dev *pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
-struct intel_gtt *intel_gtt_get(void)
+const struct intel_gtt *intel_gtt_get(void)
{
return &intel_private.base;
}
EXPORT_SYMBOL(intel_gtt_get);
+void intel_gtt_chipset_flush(void)
+{
+ if (intel_private.driver->chipset_flush)
+ intel_private.driver->chipset_flush();
+}
+EXPORT_SYMBOL(intel_gtt_chipset_flush);
+
void intel_gmch_remove(struct pci_dev *pdev)
{
if (intel_private.pcidev)
diff --git a/drivers/char/bfin_jtag_comm.c b/drivers/char/bfin_jtag_comm.c
index e397df3ad98e..16402445f2b2 100644
--- a/drivers/char/bfin_jtag_comm.c
+++ b/drivers/char/bfin_jtag_comm.c
@@ -183,16 +183,16 @@ bfin_jc_circ_write(const unsigned char *buf, int count)
}
#ifndef CONFIG_BFIN_JTAG_COMM_CONSOLE
-# define acquire_console_sem()
-# define release_console_sem()
+# define console_lock()
+# define console_unlock()
#endif
static int
bfin_jc_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
int i;
- acquire_console_sem();
+ console_lock();
i = bfin_jc_circ_write(buf, count);
- release_console_sem();
+ console_unlock();
wake_up_process(bfin_jc_kthread);
return i;
}
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 794aacb715c1..d0387a84eec1 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -24,6 +24,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <crypto/padlock.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/hw_random.h>
@@ -34,7 +35,6 @@
#include <asm/i387.h>
-#define PFX KBUILD_MODNAME ": "
enum {
@@ -81,8 +81,7 @@ static inline u32 xstore(u32 *addr, u32 edx_in)
ts_state = irq_ts_save();
asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
- :"=m"(*addr), "=a"(eax_out)
- :"D"(addr), "d"(edx_in));
+ : "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr));
irq_ts_restore(ts_state);
return eax_out;
@@ -90,8 +89,10 @@ static inline u32 xstore(u32 *addr, u32 edx_in)
static int via_rng_data_present(struct hwrng *rng, int wait)
{
+ char buf[16 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
+ ((aligned(STACK_ALIGN)));
+ u32 *via_rng_datum = (u32 *)PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
u32 bytes_out;
- u32 *via_rng_datum = (u32 *)(&rng->priv);
int i;
/* We choose the recommended 1-byte-per-instruction RNG rate,
@@ -115,6 +116,7 @@ static int via_rng_data_present(struct hwrng *rng, int wait)
break;
udelay(10);
}
+ rng->priv = *via_rng_datum;
return bytes_out ? 1 : 0;
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 2fe72f8edf44..38223e93aa98 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -970,6 +970,33 @@ out_kfree:
}
EXPORT_SYMBOL(ipmi_create_user);
+int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
+{
+ int rv = 0;
+ ipmi_smi_t intf;
+ struct ipmi_smi_handlers *handlers;
+
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == if_num)
+ goto found;
+ }
+ /* Not found, return an error */
+ rv = -EINVAL;
+ mutex_unlock(&ipmi_interfaces_mutex);
+ return rv;
+
+found:
+ handlers = intf->handlers;
+ rv = -ENOSYS;
+ if (handlers->get_smi_info)
+ rv = handlers->get_smi_info(intf->send_info, data);
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_get_smi_info);
+
static void free_user(struct kref *ref)
{
ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 035da9e64a17..7855f9f45b8e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -57,6 +57,7 @@
#include <asm/irq.h>
#include <linux/interrupt.h>
#include <linux/rcupdate.h>
+#include <linux/ipmi.h>
#include <linux/ipmi_smi.h>
#include <asm/io.h>
#include "ipmi_si_sm.h"
@@ -69,6 +70,8 @@
#ifdef CONFIG_PPC_OF
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#endif
#define PFX "ipmi_si: "
@@ -107,10 +110,6 @@ enum si_type {
};
static char *si_to_str[] = { "kcs", "smic", "bt" };
-enum ipmi_addr_src {
- SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
- SI_PCI, SI_DEVICETREE, SI_DEFAULT
-};
static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
"ACPI", "SMBIOS", "PCI",
"device-tree", "default" };
@@ -291,6 +290,7 @@ struct smi_info {
struct task_struct *thread;
struct list_head link;
+ union ipmi_smi_info_union addr_info;
};
#define smi_inc_stat(smi, stat) \
@@ -320,6 +320,7 @@ static int unload_when_empty = 1;
static int add_smi(struct smi_info *smi);
static int try_smi_init(struct smi_info *smi);
static void cleanup_one_si(struct smi_info *to_clean);
+static void cleanup_ipmi_si(void);
static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
static int register_xaction_notifier(struct notifier_block *nb)
@@ -1186,6 +1187,18 @@ static int smi_start_processing(void *send_info,
return 0;
}
+static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+{
+ struct smi_info *smi = send_info;
+
+ data->addr_src = smi->addr_source;
+ data->dev = smi->dev;
+ data->addr_info = smi->addr_info;
+ get_device(smi->dev);
+
+ return 0;
+}
+
static void set_maintenance_mode(void *send_info, int enable)
{
struct smi_info *smi_info = send_info;
@@ -1197,6 +1210,7 @@ static void set_maintenance_mode(void *send_info, int enable)
static struct ipmi_smi_handlers handlers = {
.owner = THIS_MODULE,
.start_processing = smi_start_processing,
+ .get_smi_info = get_smi_info,
.sender = sender,
.request_events = request_events,
.set_maintenance_mode = set_maintenance_mode,
@@ -1928,7 +1942,8 @@ static void __devinit hardcode_find_bmc(void)
static int acpi_failure;
/* For GPE-type interrupts. */
-static u32 ipmi_acpi_gpe(void *context)
+static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
+ u32 gpe_number, void *context)
{
struct smi_info *smi_info = context;
unsigned long flags;
@@ -2156,6 +2171,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
printk(KERN_INFO PFX "probing via ACPI\n");
handle = acpi_dev->handle;
+ info->addr_info.acpi_info.acpi_handle = handle;
/* _IFT tells us the interface type: KCS, BT, etc */
status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
@@ -2546,7 +2562,7 @@ static int __devinit ipmi_of_probe(struct platform_device *dev,
{
struct smi_info *info;
struct resource resource;
- const int *regsize, *regspacing, *regshift;
+ const __be32 *regsize, *regspacing, *regshift;
struct device_node *np = dev->dev.of_node;
int ret;
int proplen;
@@ -2599,9 +2615,9 @@ static int __devinit ipmi_of_probe(struct platform_device *dev,
info->io.addr_data = resource.start;
- info->io.regsize = regsize ? *regsize : DEFAULT_REGSIZE;
- info->io.regspacing = regspacing ? *regspacing : DEFAULT_REGSPACING;
- info->io.regshift = regshift ? *regshift : 0;
+ info->io.regsize = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE;
+ info->io.regspacing = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING;
+ info->io.regshift = regshift ? be32_to_cpup(regshift) : 0;
info->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
info->dev = &dev->dev;
@@ -3435,16 +3451,7 @@ static int __devinit init_ipmi_si(void)
mutex_lock(&smi_infos_lock);
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
-#ifdef CONFIG_PCI
- if (pci_registered)
- pci_unregister_driver(&ipmi_pci_driver);
-#endif
-
-#ifdef CONFIG_PPC_OF
- if (of_registered)
- of_unregister_platform_driver(&ipmi_of_platform_driver);
-#endif
- driver_unregister(&ipmi_driver.driver);
+ cleanup_ipmi_si();
printk(KERN_WARNING PFX
"Unable to find any System Interface(s)\n");
return -ENODEV;
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index f4d334f2536e..320668f4c3aa 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1081,7 +1081,7 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
{
struct die_args *args = data;
- if (val != DIE_NMI)
+ if (val != DIE_NMIUNKNOWN)
return NOTIFY_OK;
/* Hack, if it's a memory or I/O error, ignore it. */
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 777181a2e603..bcbbc71febb7 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -830,8 +830,7 @@ static void monitor_card(unsigned long p)
test_bit(IS_ANY_T1, &dev->flags))) {
DEBUGP(4, dev, "Perform AUTOPPS\n");
set_bit(IS_AUTOPPS_ACT, &dev->flags);
- ptsreq.protocol = ptsreq.protocol =
- (0x01 << dev->proto);
+ ptsreq.protocol = (0x01 << dev->proto);
ptsreq.flags = 0x01;
ptsreq.pts1 = 0x00;
ptsreq.pts2 = 0x00;
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index 94b8eb4d691d..444155a305ae 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -78,7 +78,6 @@ static void signalled_reboot_callback(void *callback_data)
static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
{
struct ipw_dev *ipw = priv_data;
- struct resource *io_resource;
int ret;
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
@@ -92,9 +91,12 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
if (ret)
return ret;
- io_resource = request_region(p_dev->resource[0]->start,
- resource_size(p_dev->resource[0]),
- IPWIRELESS_PCCARD_NAME);
+ if (!request_region(p_dev->resource[0]->start,
+ resource_size(p_dev->resource[0]),
+ IPWIRELESS_PCCARD_NAME)) {
+ ret = -EBUSY;
+ goto exit;
+ }
p_dev->resource[2]->flags |=
WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
@@ -105,22 +107,25 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr);
if (ret != 0)
- goto exit2;
+ goto exit1;
ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100;
- ipw->attr_memory = ioremap(p_dev->resource[2]->start,
+ ipw->common_memory = ioremap(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]));
- request_mem_region(p_dev->resource[2]->start,
- resource_size(p_dev->resource[2]),
- IPWIRELESS_PCCARD_NAME);
+ if (!request_mem_region(p_dev->resource[2]->start,
+ resource_size(p_dev->resource[2]),
+ IPWIRELESS_PCCARD_NAME)) {
+ ret = -EBUSY;
+ goto exit2;
+ }
p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM |
WIN_ENABLE;
p_dev->resource[3]->end = 0; /* this used to be 0x1000 */
ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0);
if (ret != 0)
- goto exit2;
+ goto exit3;
ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0);
if (ret != 0)
@@ -128,23 +133,28 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
ipw->attr_memory = ioremap(p_dev->resource[3]->start,
resource_size(p_dev->resource[3]));
- request_mem_region(p_dev->resource[3]->start,
- resource_size(p_dev->resource[3]),
- IPWIRELESS_PCCARD_NAME);
+ if (!request_mem_region(p_dev->resource[3]->start,
+ resource_size(p_dev->resource[3]),
+ IPWIRELESS_PCCARD_NAME)) {
+ ret = -EBUSY;
+ goto exit4;
+ }
return 0;
+exit4:
+ iounmap(ipw->attr_memory);
exit3:
+ release_mem_region(p_dev->resource[2]->start,
+ resource_size(p_dev->resource[2]));
exit2:
- if (ipw->common_memory) {
- release_mem_region(p_dev->resource[2]->start,
- resource_size(p_dev->resource[2]));
- iounmap(ipw->common_memory);
- }
+ iounmap(ipw->common_memory);
exit1:
- release_resource(io_resource);
+ release_region(p_dev->resource[0]->start,
+ resource_size(p_dev->resource[0]));
+exit:
pcmcia_disable_device(p_dev);
- return -1;
+ return ret;
}
static int config_ipwireless(struct ipw_dev *ipw)
@@ -219,6 +229,8 @@ exit:
static void release_ipwireless(struct ipw_dev *ipw)
{
+ release_region(ipw->link->resource[0]->start,
+ resource_size(ipw->link->resource[0]));
if (ipw->common_memory) {
release_mem_region(ipw->link->resource[2]->start,
resource_size(ipw->link->resource[2]));
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
index d3d63be2cd37..1a9f5f6d6ac5 100644
--- a/drivers/char/ramoops.c
+++ b/drivers/char/ramoops.c
@@ -30,7 +30,7 @@
#define RAMOOPS_KERNMSG_HDR "===="
-#define RECORD_SIZE 4096
+#define RECORD_SIZE 4096UL
static ulong mem_address;
module_param(mem_address, ulong, 0400);
@@ -68,11 +68,16 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
char *buf, *buf_orig;
struct timeval timestamp;
+ if (reason != KMSG_DUMP_OOPS &&
+ reason != KMSG_DUMP_PANIC &&
+ reason != KMSG_DUMP_KEXEC)
+ return;
+
/* Only dump oopses if dump_oops is set */
if (reason == KMSG_DUMP_OOPS && !dump_oops)
return;
- buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE));
+ buf = cxt->virt_addr + (cxt->count * RECORD_SIZE);
buf_orig = buf;
memset(buf, '\0', RECORD_SIZE);
@@ -83,8 +88,8 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
buf += res;
hdr_size = buf - buf_orig;
- l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - hdr_size));
- l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - hdr_size) - l2_cpy);
+ l2_cpy = min(l2, RECORD_SIZE - hdr_size);
+ l1_cpy = min(l1, RECORD_SIZE - hdr_size - l2_cpy);
s2_start = l2 - l2_cpy;
s1_start = l1 - l1_cpy;
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index bfe25ea9766b..b4b9d5a47885 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -65,15 +65,12 @@ static int raw_open(struct inode *inode, struct file *filp)
if (!bdev)
goto out;
igrab(bdev->bd_inode);
- err = blkdev_get(bdev, filp->f_mode);
+ err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open);
if (err)
goto out;
- err = bd_claim(bdev, raw_open);
- if (err)
- goto out1;
err = set_blocksize(bdev, bdev_logical_block_size(bdev));
if (err)
- goto out2;
+ goto out1;
filp->f_flags |= O_DIRECT;
filp->f_mapping = bdev->bd_inode->i_mapping;
if (++raw_devices[minor].inuse == 1)
@@ -83,10 +80,8 @@ static int raw_open(struct inode *inode, struct file *filp)
mutex_unlock(&raw_mutex);
return 0;
-out2:
- bd_release(bdev);
out1:
- blkdev_put(bdev, filp->f_mode);
+ blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
out:
mutex_unlock(&raw_mutex);
return err;
@@ -110,8 +105,7 @@ static int raw_release(struct inode *inode, struct file *filp)
}
mutex_unlock(&raw_mutex);
- bd_release(bdev);
- blkdev_put(bdev, filp->f_mode);
+ blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
return 0;
}
diff --git a/drivers/char/snsc.h b/drivers/char/snsc.h
index 4be62eda9fbc..e8c52c882b21 100644
--- a/drivers/char/snsc.h
+++ b/drivers/char/snsc.h
@@ -19,7 +19,6 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
-#include <linux/kobject.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/semaphore.h>
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index c17a305ecb28..dd21df55689d 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -493,9 +493,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
"1.2 TPM (device-id 0x%X, rev-id %d)\n",
vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
- if (is_itpm(to_pnp_dev(dev)))
- itpm = 1;
-
if (itpm)
dev_info(dev, "Intel iTPM workaround enabled\n");
@@ -637,6 +634,9 @@ static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
else
interrupts = 0;
+ if (is_itpm(pnp_dev))
+ itpm = 1;
+
return tpm_tis_init(&pnp_dev->dev, start, len, irq);
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 896a2ced1d27..84b164d1eb2b 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1,6 +1,7 @@
/*
* Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
- * Copyright (C) 2009, 2010 Red Hat, Inc.
+ * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
+ * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,7 +32,7 @@
#include <linux/virtio_console.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
-#include "hvc_console.h"
+#include "../tty/hvc/hvc_console.h"
/*
* This is a global struct for storing common data for all the devices
@@ -387,6 +388,10 @@ static void discard_port_data(struct port *port)
unsigned int len;
int ret;
+ if (!port->portdev) {
+ /* Device has been unplugged. vqs are already gone. */
+ return;
+ }
vq = port->in_vq;
if (port->inbuf)
buf = port->inbuf;
@@ -469,6 +474,10 @@ static void reclaim_consumed_buffers(struct port *port)
void *buf;
unsigned int len;
+ if (!port->portdev) {
+ /* Device has been unplugged. vqs are already gone. */
+ return;
+ }
while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
kfree(buf);
port->outvq_full = false;
@@ -1462,6 +1471,17 @@ static void control_work_handler(struct work_struct *work)
spin_unlock(&portdev->cvq_lock);
}
+static void out_intr(struct virtqueue *vq)
+{
+ struct port *port;
+
+ port = find_port_by_vq(vq->vdev->priv, vq);
+ if (!port)
+ return;
+
+ wake_up_interruptible(&port->waitqueue);
+}
+
static void in_intr(struct virtqueue *vq)
{
struct port *port;
@@ -1566,7 +1586,7 @@ static int init_vqs(struct ports_device *portdev)
*/
j = 0;
io_callbacks[j] = in_intr;
- io_callbacks[j + 1] = NULL;
+ io_callbacks[j + 1] = out_intr;
io_names[j] = "input";
io_names[j + 1] = "output";
j += 2;
@@ -1580,7 +1600,7 @@ static int init_vqs(struct ports_device *portdev)
for (i = 1; i < nr_ports; i++) {
j += 2;
io_callbacks[j] = in_intr;
- io_callbacks[j + 1] = NULL;
+ io_callbacks[j + 1] = out_intr;
io_names[j] = "input";
io_names[j + 1] = "output";
}
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index cfb0f5278415..effe7974aa9a 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -202,17 +202,21 @@ static int __init init_acpi_pm_clocksource(void)
printk(KERN_INFO "PM-Timer had inconsistent results:"
" 0x%#llx, 0x%#llx - aborting.\n",
value1, value2);
+ pmtmr_ioport = 0;
return -EINVAL;
}
if (i == ACPI_PM_READ_CHECKS) {
printk(KERN_INFO "PM-Timer failed consistency check "
" (0x%#llx) - aborting.\n", value1);
+ pmtmr_ioport = 0;
return -ENODEV;
}
}
- if (verify_pmtmr_rate() != 0)
+ if (verify_pmtmr_rate() != 0){
+ pmtmr_ioport = 0;
return -ENODEV;
+ }
return clocksource_register_hz(&clocksource_acpi_pm,
PMTMR_TICKS_PER_SEC);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 01b886e68822..79c47e88d5d1 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -196,9 +196,9 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
clkevt.clkevt.cpumask = cpumask_of(0);
- setup_irq(irq, &tc_irqaction);
-
clockevents_register_device(&clkevt.clkevt);
+
+ setup_irq(irq, &tc_irqaction);
}
#else /* !CONFIG_GENERIC_CLOCKEVENTS */
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index a8c8d9c19d74..ca8ee8093d6c 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -71,7 +71,7 @@ config CPU_FREQ_DEFAULT_GOV_PERFORMANCE
config CPU_FREQ_DEFAULT_GOV_POWERSAVE
bool "powersave"
- depends on EMBEDDED
+ depends on EXPERT
select CPU_FREQ_GOV_POWERSAVE
help
Use the CPUFreq governor 'powersave' as default. This sets
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1109f6848a43..5cb4d09919d6 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1919,8 +1919,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
ret = sysdev_driver_register(&cpu_sysdev_class,
&cpufreq_sysdev_driver);
+ if (ret)
+ goto err_null_driver;
- if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
+ if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
int i;
ret = -ENODEV;
@@ -1935,21 +1937,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (ret) {
dprintk("no CPU initialized for driver %s\n",
driver_data->name);
- sysdev_driver_unregister(&cpu_sysdev_class,
- &cpufreq_sysdev_driver);
-
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- cpufreq_driver = NULL;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ goto err_sysdev_unreg;
}
}
- if (!ret) {
- register_hotcpu_notifier(&cpufreq_cpu_notifier);
- dprintk("driver %s up and running\n", driver_data->name);
- cpufreq_debug_enable_ratelimit();
- }
+ register_hotcpu_notifier(&cpufreq_cpu_notifier);
+ dprintk("driver %s up and running\n", driver_data->name);
+ cpufreq_debug_enable_ratelimit();
+ return 0;
+err_sysdev_unreg:
+ sysdev_driver_unregister(&cpu_sysdev_class,
+ &cpufreq_sysdev_driver);
+err_null_driver:
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver = NULL;
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 386888f10df0..bf5092455a8f 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -96,7 +96,15 @@ static void cpuidle_idle_call(void)
/* enter the state and update stats */
dev->last_state = target_state;
+
+ trace_power_start(POWER_CSTATE, next_state, dev->cpu);
+ trace_cpu_idle(next_state, dev->cpu);
+
dev->last_residency = target_state->enter(dev, target_state);
+
+ trace_power_end(dev->cpu);
+ trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
+
if (dev->last_state)
target_state = dev->last_state;
@@ -106,8 +114,6 @@ static void cpuidle_idle_call(void)
/* give the governor an opportunity to reflect on the outcome */
if (cpuidle_curr_governor->reflect)
cpuidle_curr_governor->reflect(dev);
- trace_power_end(smp_processor_id());
- trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
}
/**
@@ -155,6 +161,45 @@ void cpuidle_resume_and_unlock(void)
EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
+#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
+{
+ ktime_t t1, t2;
+ s64 diff;
+ int ret;
+
+ t1 = ktime_get();
+ local_irq_enable();
+ while (!need_resched())
+ cpu_relax();
+
+ t2 = ktime_get();
+ diff = ktime_to_us(ktime_sub(t2, t1));
+ if (diff > INT_MAX)
+ diff = INT_MAX;
+
+ ret = (int) diff;
+ return ret;
+}
+
+static void poll_idle_init(struct cpuidle_device *dev)
+{
+ struct cpuidle_state *state = &dev->states[0];
+
+ cpuidle_set_statedata(state, NULL);
+
+ snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
+ snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
+ state->exit_latency = 0;
+ state->target_residency = 0;
+ state->power_usage = -1;
+ state->flags = 0;
+ state->enter = poll_idle;
+}
+#else
+static void poll_idle_init(struct cpuidle_device *dev) {}
+#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
+
/**
* cpuidle_enable_device - enables idle PM for a CPU
* @dev: the CPU
@@ -179,6 +224,8 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
return ret;
}
+ poll_idle_init(dev);
+
if ((ret = cpuidle_add_state_sysfs(dev)))
return ret;
@@ -233,45 +280,6 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
EXPORT_SYMBOL_GPL(cpuidle_disable_device);
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
-{
- ktime_t t1, t2;
- s64 diff;
- int ret;
-
- t1 = ktime_get();
- local_irq_enable();
- while (!need_resched())
- cpu_relax();
-
- t2 = ktime_get();
- diff = ktime_to_us(ktime_sub(t2, t1));
- if (diff > INT_MAX)
- diff = INT_MAX;
-
- ret = (int) diff;
- return ret;
-}
-
-static void poll_idle_init(struct cpuidle_device *dev)
-{
- struct cpuidle_state *state = &dev->states[0];
-
- cpuidle_set_statedata(state, NULL);
-
- snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
- snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
- state->exit_latency = 0;
- state->target_residency = 0;
- state->power_usage = -1;
- state->flags = CPUIDLE_FLAG_POLL;
- state->enter = poll_idle;
-}
-#else
-static void poll_idle_init(struct cpuidle_device *dev) {}
-#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
-
/**
* __cpuidle_register_device - internal register function called before register
* and enable routines
@@ -292,8 +300,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
init_completion(&dev->kobj_unregister);
- poll_idle_init(dev);
-
/*
* cpuidle driver should set the dev->power_specified bit
* before registering the device if the driver provides
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 7d279e578df5..c99305afa58a 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -857,7 +857,7 @@ static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
printk(KERN_WARNING MV_CESA
"Base driver '%s' could not be loaded!\n",
base_hash_name);
- err = PTR_ERR(fallback_tfm);
+ err = PTR_ERR(base_hash);
goto err_bad_base;
}
}
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 76141262ea1d..80dc094e78c6 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1542,7 +1542,7 @@ out:
return err;
}
-static void __exit n2_unregister_algs(void)
+static void __devexit n2_unregister_algs(void)
{
mutex_lock(&spu_lock);
if (!--algs_registered)
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 799ca517c121..add2a1a72ba4 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -74,11 +74,9 @@
#define FLAGS_CBC BIT(1)
#define FLAGS_GIV BIT(2)
-#define FLAGS_NEW_KEY BIT(4)
-#define FLAGS_NEW_IV BIT(5)
-#define FLAGS_INIT BIT(6)
-#define FLAGS_FAST BIT(7)
-#define FLAGS_BUSY 8
+#define FLAGS_INIT BIT(4)
+#define FLAGS_FAST BIT(5)
+#define FLAGS_BUSY BIT(6)
struct omap_aes_ctx {
struct omap_aes_dev *dd;
@@ -98,19 +96,18 @@ struct omap_aes_reqctx {
struct omap_aes_dev {
struct list_head list;
unsigned long phys_base;
- void __iomem *io_base;
+ void __iomem *io_base;
struct clk *iclk;
struct omap_aes_ctx *ctx;
struct device *dev;
unsigned long flags;
+ int err;
- u32 *iv;
- u32 ctrl;
+ spinlock_t lock;
+ struct crypto_queue queue;
- spinlock_t lock;
- struct crypto_queue queue;
-
- struct tasklet_struct task;
+ struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
struct ablkcipher_request *req;
size_t total;
@@ -179,9 +176,13 @@ static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit)
static int omap_aes_hw_init(struct omap_aes_dev *dd)
{
- int err = 0;
-
+ /*
+ * clocks are enabled when request starts and disabled when finished.
+ * It may be long delays between requests.
+ * Device might go to off mode to save power.
+ */
clk_enable(dd->iclk);
+
if (!(dd->flags & FLAGS_INIT)) {
/* is it necessary to reset before every operation? */
omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET,
@@ -193,39 +194,26 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
__asm__ __volatile__("nop");
__asm__ __volatile__("nop");
- err = omap_aes_wait(dd, AES_REG_SYSSTATUS,
- AES_REG_SYSSTATUS_RESETDONE);
- if (!err)
- dd->flags |= FLAGS_INIT;
- }
+ if (omap_aes_wait(dd, AES_REG_SYSSTATUS,
+ AES_REG_SYSSTATUS_RESETDONE))
+ return -ETIMEDOUT;
- return err;
-}
+ dd->flags |= FLAGS_INIT;
+ dd->err = 0;
+ }
-static void omap_aes_hw_cleanup(struct omap_aes_dev *dd)
-{
- clk_disable(dd->iclk);
+ return 0;
}
-static void omap_aes_write_ctrl(struct omap_aes_dev *dd)
+static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
{
unsigned int key32;
- int i;
+ int i, err;
u32 val, mask;
- val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
- if (dd->flags & FLAGS_CBC)
- val |= AES_REG_CTRL_CBC;
- if (dd->flags & FLAGS_ENCRYPT)
- val |= AES_REG_CTRL_DIRECTION;
-
- if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) &&
- !(dd->ctx->flags & FLAGS_NEW_KEY))
- goto out;
-
- /* only need to write control registers for new settings */
-
- dd->ctrl = val;
+ err = omap_aes_hw_init(dd);
+ if (err)
+ return err;
val = 0;
if (dd->dma_lch_out >= 0)
@@ -237,30 +225,43 @@ static void omap_aes_write_ctrl(struct omap_aes_dev *dd)
omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
- pr_debug("Set key\n");
key32 = dd->ctx->keylen / sizeof(u32);
- /* set a key */
+
+ /* it seems a key should always be set even if it has not changed */
for (i = 0; i < key32; i++) {
omap_aes_write(dd, AES_REG_KEY(i),
__le32_to_cpu(dd->ctx->key[i]));
}
- dd->ctx->flags &= ~FLAGS_NEW_KEY;
- if (dd->flags & FLAGS_NEW_IV) {
- pr_debug("Set IV\n");
- omap_aes_write_n(dd, AES_REG_IV(0), dd->iv, 4);
- dd->flags &= ~FLAGS_NEW_IV;
- }
+ if ((dd->flags & FLAGS_CBC) && dd->req->info)
+ omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4);
+
+ val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
+ if (dd->flags & FLAGS_CBC)
+ val |= AES_REG_CTRL_CBC;
+ if (dd->flags & FLAGS_ENCRYPT)
+ val |= AES_REG_CTRL_DIRECTION;
mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
AES_REG_CTRL_KEY_SIZE;
- omap_aes_write_mask(dd, AES_REG_CTRL, dd->ctrl, mask);
+ omap_aes_write_mask(dd, AES_REG_CTRL, val, mask);
-out:
- /* start DMA or disable idle mode */
- omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
- AES_REG_MASK_START);
+ /* IN */
+ omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
+ dd->phys_base + AES_REG_DATA, 0, 4);
+
+ omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
+ omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
+
+ /* OUT */
+ omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
+ dd->phys_base + AES_REG_DATA, 0, 4);
+
+ omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
+ omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
+
+ return 0;
}
static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
@@ -288,8 +289,16 @@ static void omap_aes_dma_callback(int lch, u16 ch_status, void *data)
{
struct omap_aes_dev *dd = data;
- if (lch == dd->dma_lch_out)
- tasklet_schedule(&dd->task);
+ if (ch_status != OMAP_DMA_BLOCK_IRQ) {
+ pr_err("omap-aes DMA error status: 0x%hx\n", ch_status);
+ dd->err = -EIO;
+ dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
+ } else if (lch == dd->dma_lch_in) {
+ return;
+ }
+
+ /* dma_lch_out - completed */
+ tasklet_schedule(&dd->done_task);
}
static int omap_aes_dma_init(struct omap_aes_dev *dd)
@@ -339,18 +348,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd)
goto err_dma_out;
}
- omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
- dd->phys_base + AES_REG_DATA, 0, 4);
-
- omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
- omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
-
- omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
- dd->phys_base + AES_REG_DATA, 0, 4);
-
- omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
- omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
-
return 0;
err_dma_out:
@@ -406,6 +403,11 @@ static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
if (!count)
return off;
+ /*
+ * buflen and total are AES_BLOCK_SIZE size aligned,
+ * so count should be also aligned
+ */
+
sg_copy_buf(buf + off, *sg, *offset, count, out);
off += count;
@@ -461,7 +463,9 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
omap_start_dma(dd->dma_lch_in);
omap_start_dma(dd->dma_lch_out);
- omap_aes_write_ctrl(dd);
+ /* start DMA or disable idle mode */
+ omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
+ AES_REG_MASK_START);
return 0;
}
@@ -488,8 +492,10 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
count = min(dd->total, sg_dma_len(dd->in_sg));
count = min(count, sg_dma_len(dd->out_sg));
- if (count != dd->total)
+ if (count != dd->total) {
+ pr_err("request length != buffer length\n");
return -EINVAL;
+ }
pr_debug("fast\n");
@@ -525,23 +531,25 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
dd->total -= count;
- err = omap_aes_hw_init(dd);
-
err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count);
+ if (err) {
+ dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+ }
return err;
}
static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
{
- struct omap_aes_ctx *ctx;
+ struct ablkcipher_request *req = dd->req;
pr_debug("err: %d\n", err);
- ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req));
+ clk_disable(dd->iclk);
+ dd->flags &= ~FLAGS_BUSY;
- if (!dd->total)
- dd->req->base.complete(&dd->req->base, err);
+ req->base.complete(&req->base, err);
}
static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
@@ -553,8 +561,6 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
- omap_aes_hw_cleanup(dd);
-
omap_stop_dma(dd->dma_lch_in);
omap_stop_dma(dd->dma_lch_out);
@@ -574,40 +580,39 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
}
}
- if (err || !dd->total)
- omap_aes_finish_req(dd, err);
-
return err;
}
-static int omap_aes_handle_req(struct omap_aes_dev *dd)
+static int omap_aes_handle_queue(struct omap_aes_dev *dd,
+ struct ablkcipher_request *req)
{
struct crypto_async_request *async_req, *backlog;
struct omap_aes_ctx *ctx;
struct omap_aes_reqctx *rctx;
- struct ablkcipher_request *req;
unsigned long flags;
-
- if (dd->total)
- goto start;
+ int err, ret = 0;
spin_lock_irqsave(&dd->lock, flags);
+ if (req)
+ ret = ablkcipher_enqueue_request(&dd->queue, req);
+ if (dd->flags & FLAGS_BUSY) {
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return ret;
+ }
backlog = crypto_get_backlog(&dd->queue);
async_req = crypto_dequeue_request(&dd->queue);
- if (!async_req)
- clear_bit(FLAGS_BUSY, &dd->flags);
+ if (async_req)
+ dd->flags |= FLAGS_BUSY;
spin_unlock_irqrestore(&dd->lock, flags);
if (!async_req)
- return 0;
+ return ret;
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
req = ablkcipher_request_cast(async_req);
- pr_debug("get new req\n");
-
/* assign new request to device */
dd->req = req;
dd->total = req->nbytes;
@@ -621,27 +626,22 @@ static int omap_aes_handle_req(struct omap_aes_dev *dd)
rctx->mode &= FLAGS_MODE_MASK;
dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
- dd->iv = req->info;
- if ((dd->flags & FLAGS_CBC) && dd->iv)
- dd->flags |= FLAGS_NEW_IV;
- else
- dd->flags &= ~FLAGS_NEW_IV;
-
+ dd->ctx = ctx;
ctx->dd = dd;
- if (dd->ctx != ctx) {
- /* assign new context to device */
- dd->ctx = ctx;
- ctx->flags |= FLAGS_NEW_KEY;
- }
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
- pr_err("request size is not exact amount of AES blocks\n");
+ err = omap_aes_write_ctrl(dd);
+ if (!err)
+ err = omap_aes_crypt_dma_start(dd);
+ if (err) {
+ /* aes_task will not finish it, so do it here */
+ omap_aes_finish_req(dd, err);
+ tasklet_schedule(&dd->queue_task);
+ }
-start:
- return omap_aes_crypt_dma_start(dd);
+ return ret; /* return ret, which is enqueue return value */
}
-static void omap_aes_task(unsigned long data)
+static void omap_aes_done_task(unsigned long data)
{
struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
int err;
@@ -650,40 +650,50 @@ static void omap_aes_task(unsigned long data)
err = omap_aes_crypt_dma_stop(dd);
- err = omap_aes_handle_req(dd);
+ err = dd->err ? : err;
+
+ if (dd->total && !err) {
+ err = omap_aes_crypt_dma_start(dd);
+ if (!err)
+ return; /* DMA started. Not fininishing. */
+ }
+
+ omap_aes_finish_req(dd, err);
+ omap_aes_handle_queue(dd, NULL);
pr_debug("exit\n");
}
+static void omap_aes_queue_task(unsigned long data)
+{
+ struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
+
+ omap_aes_handle_queue(dd, NULL);
+}
+
static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
struct omap_aes_dev *dd;
- unsigned long flags;
- int err;
pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
+ if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of AES blocks\n");
+ return -EINVAL;
+ }
+
dd = omap_aes_find_dev(ctx);
if (!dd)
return -ENODEV;
rctx->mode = mode;
- spin_lock_irqsave(&dd->lock, flags);
- err = ablkcipher_enqueue_request(&dd->queue, req);
- spin_unlock_irqrestore(&dd->lock, flags);
-
- if (!test_and_set_bit(FLAGS_BUSY, &dd->flags))
- omap_aes_handle_req(dd);
-
- pr_debug("exit\n");
-
- return err;
+ return omap_aes_handle_queue(dd, req);
}
/* ********************** ALG API ************************************ */
@@ -701,7 +711,6 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
- ctx->flags |= FLAGS_NEW_KEY;
return 0;
}
@@ -750,7 +759,7 @@ static struct crypto_alg algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_aes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = omap_aes_cra_init,
@@ -770,7 +779,7 @@ static struct crypto_alg algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_aes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = omap_aes_cra_init,
@@ -849,7 +858,8 @@ static int omap_aes_probe(struct platform_device *pdev)
(reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR);
clk_disable(dd->iclk);
- tasklet_init(&dd->task, omap_aes_task, (unsigned long)dd);
+ tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
+ tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
err = omap_aes_dma_init(dd);
if (err)
@@ -876,7 +886,8 @@ err_algs:
crypto_unregister_alg(&algs[j]);
omap_aes_dma_cleanup(dd);
err_dma:
- tasklet_kill(&dd->task);
+ tasklet_kill(&dd->done_task);
+ tasklet_kill(&dd->queue_task);
iounmap(dd->io_base);
err_io:
clk_put(dd->iclk);
@@ -903,7 +914,8 @@ static int omap_aes_remove(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(algs); i++)
crypto_unregister_alg(&algs[i]);
- tasklet_kill(&dd->task);
+ tasklet_kill(&dd->done_task);
+ tasklet_kill(&dd->queue_task);
omap_aes_dma_cleanup(dd);
iounmap(dd->io_base);
clk_put(dd->iclk);
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index a081c7c7d03f..2e71123516e0 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -72,10 +72,9 @@
#define DEFAULT_TIMEOUT_INTERVAL HZ
-#define FLAGS_FIRST 0x0001
#define FLAGS_FINUP 0x0002
#define FLAGS_FINAL 0x0004
-#define FLAGS_FAST 0x0008
+#define FLAGS_SG 0x0008
#define FLAGS_SHA1 0x0010
#define FLAGS_DMA_ACTIVE 0x0020
#define FLAGS_OUTPUT_READY 0x0040
@@ -83,13 +82,17 @@
#define FLAGS_INIT 0x0100
#define FLAGS_CPU 0x0200
#define FLAGS_HMAC 0x0400
-
-/* 3rd byte */
-#define FLAGS_BUSY 16
+#define FLAGS_ERROR 0x0800
+#define FLAGS_BUSY 0x1000
#define OP_UPDATE 1
#define OP_FINAL 2
+#define OMAP_ALIGN_MASK (sizeof(u32)-1)
+#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
+
+#define BUFLEN PAGE_SIZE
+
struct omap_sham_dev;
struct omap_sham_reqctx {
@@ -97,8 +100,8 @@ struct omap_sham_reqctx {
unsigned long flags;
unsigned long op;
+ u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
size_t digcnt;
- u8 *buffer;
size_t bufcnt;
size_t buflen;
dma_addr_t dma_addr;
@@ -107,6 +110,8 @@ struct omap_sham_reqctx {
struct scatterlist *sg;
unsigned int offset; /* offset in current sg */
unsigned int total; /* total request */
+
+ u8 buffer[0] OMAP_ALIGNED;
};
struct omap_sham_hmac_ctx {
@@ -136,6 +141,7 @@ struct omap_sham_dev {
int irq;
struct clk *iclk;
spinlock_t lock;
+ int err;
int dma;
int dma_lch;
struct tasklet_struct done_task;
@@ -194,53 +200,68 @@ static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
static void omap_sham_copy_hash(struct ahash_request *req, int out)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ u32 *hash = (u32 *)ctx->digest;
+ int i;
+
+ /* MD5 is almost unused. So copy sha1 size to reduce code */
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
+ if (out)
+ hash[i] = omap_sham_read(ctx->dd,
+ SHA_REG_DIGEST(i));
+ else
+ omap_sham_write(ctx->dd,
+ SHA_REG_DIGEST(i), hash[i]);
+ }
+}
+
+static void omap_sham_copy_ready_hash(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ u32 *in = (u32 *)ctx->digest;
u32 *hash = (u32 *)req->result;
int i;
+ if (!hash)
+ return;
+
if (likely(ctx->flags & FLAGS_SHA1)) {
/* SHA1 results are in big endian */
for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
- if (out)
- hash[i] = be32_to_cpu(omap_sham_read(ctx->dd,
- SHA_REG_DIGEST(i)));
- else
- omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
- cpu_to_be32(hash[i]));
+ hash[i] = be32_to_cpu(in[i]);
} else {
/* MD5 results are in little endian */
for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
- if (out)
- hash[i] = le32_to_cpu(omap_sham_read(ctx->dd,
- SHA_REG_DIGEST(i)));
- else
- omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
- cpu_to_le32(hash[i]));
+ hash[i] = le32_to_cpu(in[i]);
}
}
-static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
- int final, int dma)
+static int omap_sham_hw_init(struct omap_sham_dev *dd)
{
- struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
- u32 val = length << 5, mask;
+ clk_enable(dd->iclk);
- if (unlikely(!ctx->digcnt)) {
+ if (!(dd->flags & FLAGS_INIT)) {
+ omap_sham_write_mask(dd, SHA_REG_MASK,
+ SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
- clk_enable(dd->iclk);
+ if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
+ SHA_REG_SYSSTATUS_RESETDONE))
+ return -ETIMEDOUT;
- if (!(dd->flags & FLAGS_INIT)) {
- omap_sham_write_mask(dd, SHA_REG_MASK,
- SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
+ dd->flags |= FLAGS_INIT;
+ dd->err = 0;
+ }
- if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
- SHA_REG_SYSSTATUS_RESETDONE))
- return -ETIMEDOUT;
+ return 0;
+}
- dd->flags |= FLAGS_INIT;
- }
- } else {
+static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
+ int final, int dma)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ u32 val = length << 5, mask;
+
+ if (likely(ctx->digcnt))
omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
- }
omap_sham_write_mask(dd, SHA_REG_MASK,
SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
@@ -260,29 +281,26 @@ static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
-
- return 0;
}
static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
size_t length, int final)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
- int err, count, len32;
+ int count, len32;
const u32 *buffer = (const u32 *)buf;
dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
ctx->digcnt, length, final);
- err = omap_sham_write_ctrl(dd, length, final, 0);
- if (err)
- return err;
+ omap_sham_write_ctrl(dd, length, final, 0);
+
+ /* should be non-zero before next lines to disable clocks later */
+ ctx->digcnt += length;
if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
return -ETIMEDOUT;
- ctx->digcnt += length;
-
if (final)
ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
@@ -298,16 +316,11 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
size_t length, int final)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
- int err, len32;
+ int len32;
dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
ctx->digcnt, length, final);
- /* flush cache entries related to our page */
- if (dma_addr == ctx->dma_addr)
- dma_sync_single_for_device(dd->dev, dma_addr, length,
- DMA_TO_DEVICE);
-
len32 = DIV_ROUND_UP(length, sizeof(u32));
omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
@@ -317,9 +330,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
dma_addr, 0, 0);
- err = omap_sham_write_ctrl(dd, length, final, 1);
- if (err)
- return err;
+ omap_sham_write_ctrl(dd, length, final, 1);
ctx->digcnt += length;
@@ -371,15 +382,29 @@ static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
return 0;
}
+static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
+ struct omap_sham_reqctx *ctx,
+ size_t length, int final)
+{
+ ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
+ dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
+ return -EINVAL;
+ }
+
+ ctx->flags &= ~FLAGS_SG;
+
+ /* next call does not fail... so no unmap in the case of error */
+ return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
+}
+
static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
unsigned int final;
size_t count;
- if (!ctx->total)
- return 0;
-
omap_sham_append_sg(ctx);
final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
@@ -390,30 +415,68 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
count = ctx->bufcnt;
ctx->bufcnt = 0;
- return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final);
+ return omap_sham_xmit_dma_map(dd, ctx, count, final);
}
return 0;
}
-static int omap_sham_update_dma_fast(struct omap_sham_dev *dd)
+/* Start address alignment */
+#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
+/* SHA1 block size alignment */
+#define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
+
+static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
- unsigned int length;
+ unsigned int length, final, tail;
+ struct scatterlist *sg;
- ctx->flags |= FLAGS_FAST;
+ if (!ctx->total)
+ return 0;
+
+ if (ctx->bufcnt || ctx->offset)
+ return omap_sham_update_dma_slow(dd);
+
+ dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
+ ctx->digcnt, ctx->bufcnt, ctx->total);
+
+ sg = ctx->sg;
- length = min(ctx->total, sg_dma_len(ctx->sg));
- ctx->total = length;
+ if (!SG_AA(sg))
+ return omap_sham_update_dma_slow(dd);
+
+ if (!sg_is_last(sg) && !SG_SA(sg))
+ /* size is not SHA1_BLOCK_SIZE aligned */
+ return omap_sham_update_dma_slow(dd);
+
+ length = min(ctx->total, sg->length);
+
+ if (sg_is_last(sg)) {
+ if (!(ctx->flags & FLAGS_FINUP)) {
+ /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
+ tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
+ /* without finup() we need one block to close hash */
+ if (!tail)
+ tail = SHA1_MD5_BLOCK_SIZE;
+ length -= tail;
+ }
+ }
if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
dev_err(dd->dev, "dma_map_sg error\n");
return -EINVAL;
}
+ ctx->flags |= FLAGS_SG;
+
ctx->total -= length;
+ ctx->offset = length; /* offset where to start slow */
- return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1);
+ final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
+
+ /* next call does not fail... so no unmap in the case of error */
+ return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
}
static int omap_sham_update_cpu(struct omap_sham_dev *dd)
@@ -433,8 +496,17 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
omap_stop_dma(dd->dma_lch);
- if (ctx->flags & FLAGS_FAST)
+ if (ctx->flags & FLAGS_SG) {
dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
+ if (ctx->sg->length == ctx->offset) {
+ ctx->sg = sg_next(ctx->sg);
+ if (ctx->sg)
+ ctx->offset = 0;
+ }
+ } else {
+ dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
+ DMA_TO_DEVICE);
+ }
return 0;
}
@@ -454,14 +526,7 @@ static void omap_sham_cleanup(struct ahash_request *req)
spin_unlock_irqrestore(&dd->lock, flags);
if (ctx->digcnt)
- clk_disable(dd->iclk);
-
- if (ctx->dma_addr)
- dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
- DMA_TO_DEVICE);
-
- if (ctx->buffer)
- free_page((unsigned long)ctx->buffer);
+ omap_sham_copy_ready_hash(req);
dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
}
@@ -489,8 +554,6 @@ static int omap_sham_init(struct ahash_request *req)
ctx->flags = 0;
- ctx->flags |= FLAGS_FIRST;
-
dev_dbg(dd->dev, "init: digest size: %d\n",
crypto_ahash_digestsize(tfm));
@@ -499,21 +562,7 @@ static int omap_sham_init(struct ahash_request *req)
ctx->bufcnt = 0;
ctx->digcnt = 0;
-
- ctx->buflen = PAGE_SIZE;
- ctx->buffer = (void *)__get_free_page(
- (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC);
- if (!ctx->buffer)
- return -ENOMEM;
-
- ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
- dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
- free_page((unsigned long)ctx->buffer);
- return -EINVAL;
- }
+ ctx->buflen = BUFLEN;
if (tctx->flags & FLAGS_HMAC) {
struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -538,10 +587,8 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
if (ctx->flags & FLAGS_CPU)
err = omap_sham_update_cpu(dd);
- else if (ctx->flags & FLAGS_FAST)
- err = omap_sham_update_dma_fast(dd);
else
- err = omap_sham_update_dma_slow(dd);
+ err = omap_sham_update_dma_start(dd);
/* wait for dma completion before can take more data */
dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
@@ -560,15 +607,12 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
use_dma = 0;
if (use_dma)
- err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1);
+ err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
else
err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
ctx->bufcnt = 0;
- if (err != -EINPROGRESS)
- omap_sham_cleanup(req);
-
dev_dbg(dd->dev, "final_req: err: %d\n", err);
return err;
@@ -576,6 +620,7 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
static int omap_sham_finish_req_hmac(struct ahash_request *req)
{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
struct omap_sham_hmac_ctx *bctx = tctx->base;
int bs = crypto_shash_blocksize(bctx->shash);
@@ -590,48 +635,56 @@ static int omap_sham_finish_req_hmac(struct ahash_request *req)
return crypto_shash_init(&desc.shash) ?:
crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
- crypto_shash_finup(&desc.shash, req->result, ds, req->result);
+ crypto_shash_finup(&desc.shash, ctx->digest, ds, ctx->digest);
}
static void omap_sham_finish_req(struct ahash_request *req, int err)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_dev *dd = ctx->dd;
if (!err) {
omap_sham_copy_hash(ctx->dd->req, 1);
if (ctx->flags & FLAGS_HMAC)
err = omap_sham_finish_req_hmac(req);
+ } else {
+ ctx->flags |= FLAGS_ERROR;
}
- if (ctx->flags & FLAGS_FINAL)
+ if ((ctx->flags & FLAGS_FINAL) || err)
omap_sham_cleanup(req);
- clear_bit(FLAGS_BUSY, &ctx->dd->flags);
+ clk_disable(dd->iclk);
+ dd->flags &= ~FLAGS_BUSY;
if (req->base.complete)
req->base.complete(&req->base, err);
}
-static int omap_sham_handle_queue(struct omap_sham_dev *dd)
+static int omap_sham_handle_queue(struct omap_sham_dev *dd,
+ struct ahash_request *req)
{
struct crypto_async_request *async_req, *backlog;
struct omap_sham_reqctx *ctx;
- struct ahash_request *req, *prev_req;
+ struct ahash_request *prev_req;
unsigned long flags;
- int err = 0;
-
- if (test_and_set_bit(FLAGS_BUSY, &dd->flags))
- return 0;
+ int err = 0, ret = 0;
spin_lock_irqsave(&dd->lock, flags);
+ if (req)
+ ret = ahash_enqueue_request(&dd->queue, req);
+ if (dd->flags & FLAGS_BUSY) {
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return ret;
+ }
backlog = crypto_get_backlog(&dd->queue);
async_req = crypto_dequeue_request(&dd->queue);
- if (!async_req)
- clear_bit(FLAGS_BUSY, &dd->flags);
+ if (async_req)
+ dd->flags |= FLAGS_BUSY;
spin_unlock_irqrestore(&dd->lock, flags);
if (!async_req)
- return 0;
+ return ret;
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -646,7 +699,22 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd)
dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
ctx->op, req->nbytes);
- if (req != prev_req && ctx->digcnt)
+
+ err = omap_sham_hw_init(dd);
+ if (err)
+ goto err1;
+
+ omap_set_dma_dest_params(dd->dma_lch, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ dd->phys_base + SHA_REG_DIN(0), 0, 16);
+
+ omap_set_dma_dest_burst_mode(dd->dma_lch,
+ OMAP_DMA_DATA_BURST_16);
+
+ omap_set_dma_src_burst_mode(dd->dma_lch,
+ OMAP_DMA_DATA_BURST_4);
+
+ if (ctx->digcnt)
/* request has changed - restore hash */
omap_sham_copy_hash(req, 0);
@@ -658,7 +726,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd)
} else if (ctx->op == OP_FINAL) {
err = omap_sham_final_req(dd);
}
-
+err1:
if (err != -EINPROGRESS) {
/* done_task will not finish it, so do it here */
omap_sham_finish_req(req, err);
@@ -667,7 +735,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd)
dev_dbg(dd->dev, "exit, err: %d\n", err);
- return err;
+ return ret;
}
static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
@@ -675,18 +743,10 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
struct omap_sham_dev *dd = tctx->dd;
- unsigned long flags;
- int err;
ctx->op = op;
- spin_lock_irqsave(&dd->lock, flags);
- err = ahash_enqueue_request(&dd->queue, req);
- spin_unlock_irqrestore(&dd->lock, flags);
-
- omap_sham_handle_queue(dd);
-
- return err;
+ return omap_sham_handle_queue(dd, req);
}
static int omap_sham_update(struct ahash_request *req)
@@ -709,21 +769,13 @@ static int omap_sham_update(struct ahash_request *req)
*/
omap_sham_append_sg(ctx);
return 0;
- } else if (ctx->bufcnt + ctx->total <= 64) {
+ } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
+ /*
+ * faster to use CPU for short transfers
+ */
ctx->flags |= FLAGS_CPU;
- } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) {
- /* may be can use faster functions */
- int aligned = IS_ALIGNED((u32)ctx->sg->offset,
- sizeof(u32));
-
- if (aligned && (ctx->flags & FLAGS_FIRST))
- /* digest: first and final */
- ctx->flags |= FLAGS_FAST;
-
- ctx->flags &= ~FLAGS_FIRST;
}
- } else if (ctx->bufcnt + ctx->total <= ctx->buflen) {
- /* if not finaup -> not fast */
+ } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
omap_sham_append_sg(ctx);
return 0;
}
@@ -761,12 +813,14 @@ static int omap_sham_final(struct ahash_request *req)
ctx->flags |= FLAGS_FINUP;
- /* OMAP HW accel works only with buffers >= 9 */
- /* HMAC is always >= 9 because of ipad */
- if ((ctx->digcnt + ctx->bufcnt) < 9)
- err = omap_sham_final_shash(req);
- else if (ctx->bufcnt)
- return omap_sham_enqueue(req, OP_FINAL);
+ if (!(ctx->flags & FLAGS_ERROR)) {
+ /* OMAP HW accel works only with buffers >= 9 */
+ /* HMAC is always >= 9 because of ipad */
+ if ((ctx->digcnt + ctx->bufcnt) < 9)
+ err = omap_sham_final_shash(req);
+ else if (ctx->bufcnt)
+ return omap_sham_enqueue(req, OP_FINAL);
+ }
omap_sham_cleanup(req);
@@ -836,6 +890,8 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
const char *alg_name = crypto_tfm_alg_name(tfm);
+ pr_info("enter\n");
+
/* Allocate a fallback and abort if it failed. */
tctx->fallback = crypto_alloc_shash(alg_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
@@ -846,7 +902,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
}
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct omap_sham_reqctx));
+ sizeof(struct omap_sham_reqctx) + BUFLEN);
if (alg_base) {
struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -932,7 +988,7 @@ static struct ahash_alg algs[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
@@ -956,7 +1012,7 @@ static struct ahash_alg algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
sizeof(struct omap_sham_hmac_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_sha1_init,
.cra_exit = omap_sham_cra_exit,
@@ -980,7 +1036,7 @@ static struct ahash_alg algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
sizeof(struct omap_sham_hmac_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_md5_init,
.cra_exit = omap_sham_cra_exit,
@@ -993,7 +1049,7 @@ static void omap_sham_done_task(unsigned long data)
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
struct ahash_request *req = dd->req;
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- int ready = 1;
+ int ready = 0, err = 0;
if (ctx->flags & FLAGS_OUTPUT_READY) {
ctx->flags &= ~FLAGS_OUTPUT_READY;
@@ -1003,15 +1059,18 @@ static void omap_sham_done_task(unsigned long data)
if (dd->flags & FLAGS_DMA_ACTIVE) {
dd->flags &= ~FLAGS_DMA_ACTIVE;
omap_sham_update_dma_stop(dd);
- omap_sham_update_dma_slow(dd);
+ if (!dd->err)
+ err = omap_sham_update_dma_start(dd);
}
- if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) {
- dev_dbg(dd->dev, "update done\n");
+ err = dd->err ? : err;
+
+ if (err != -EINPROGRESS && (ready || err)) {
+ dev_dbg(dd->dev, "update done: err: %d\n", err);
/* finish curent request */
- omap_sham_finish_req(req, 0);
+ omap_sham_finish_req(req, err);
/* start new request */
- omap_sham_handle_queue(dd);
+ omap_sham_handle_queue(dd, NULL);
}
}
@@ -1019,7 +1078,7 @@ static void omap_sham_queue_task(unsigned long data)
{
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
- omap_sham_handle_queue(dd);
+ omap_sham_handle_queue(dd, NULL);
}
static irqreturn_t omap_sham_irq(int irq, void *dev_id)
@@ -1041,6 +1100,7 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id)
omap_sham_read(dd, SHA_REG_CTRL);
ctx->flags |= FLAGS_OUTPUT_READY;
+ dd->err = 0;
tasklet_schedule(&dd->done_task);
return IRQ_HANDLED;
@@ -1050,8 +1110,13 @@ static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
{
struct omap_sham_dev *dd = data;
- if (likely(lch == dd->dma_lch))
- tasklet_schedule(&dd->done_task);
+ if (ch_status != OMAP_DMA_BLOCK_IRQ) {
+ pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
+ dd->err = -EIO;
+ dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
+ }
+
+ tasklet_schedule(&dd->done_task);
}
static int omap_sham_dma_init(struct omap_sham_dev *dd)
@@ -1066,15 +1131,6 @@ static int omap_sham_dma_init(struct omap_sham_dev *dd)
dev_err(dd->dev, "Unable to request DMA channel\n");
return err;
}
- omap_set_dma_dest_params(dd->dma_lch, 0,
- OMAP_DMA_AMODE_CONSTANT,
- dd->phys_base + SHA_REG_DIN(0), 0, 16);
-
- omap_set_dma_dest_burst_mode(dd->dma_lch,
- OMAP_DMA_DATA_BURST_16);
-
- omap_set_dma_src_burst_mode(dd->dma_lch,
- OMAP_DMA_DATA_BURST_4);
return 0;
}
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 8a515baa38f7..db33d300aa23 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -9,6 +9,7 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/padlock.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -21,7 +22,6 @@
#include <asm/byteorder.h>
#include <asm/processor.h>
#include <asm/i387.h>
-#include "padlock.h"
/*
* Number of data blocks actually fetched for each xcrypt insn.
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index d3a27e0119bc..adf075b6b9a8 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -13,6 +13,7 @@
*/
#include <crypto/internal/hash.h>
+#include <crypto/padlock.h>
#include <crypto/sha.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -22,13 +23,6 @@
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include <asm/i387.h>
-#include "padlock.h"
-
-#ifdef CONFIG_64BIT
-#define STACK_ALIGN 16
-#else
-#define STACK_ALIGN 4
-#endif
struct padlock_sha_desc {
struct shash_desc fallback;
diff --git a/drivers/crypto/padlock.h b/drivers/crypto/padlock.h
deleted file mode 100644
index b728e4518bd1..000000000000
--- a/drivers/crypto/padlock.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Driver for VIA PadLock
- *
- * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#ifndef _CRYPTO_PADLOCK_H
-#define _CRYPTO_PADLOCK_H
-
-#define PADLOCK_ALIGNMENT 16
-
-#define PFX "padlock: "
-
-#define PADLOCK_CRA_PRIORITY 300
-#define PADLOCK_COMPOSITE_PRIORITY 400
-
-#endif /* _CRYPTO_PADLOCK_H */
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index b98c67664ae7..c461eda62411 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -110,8 +110,6 @@ static void unregister_dca_providers(void)
/* at this point only one domain in the list is expected */
domain = list_first_entry(&dca_domains, struct dca_domain, node);
- if (!domain)
- return;
list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
list_del(&dca->node);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6ee23592700a..1c28816152fa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -109,7 +109,7 @@ config FSL_DMA
config MPC512X_DMA
tristate "Freescale MPC512x built-in DMA engine support"
- depends on PPC_MPC512x
+ depends on PPC_MPC512x || PPC_MPC831x
select DMA_ENGINE
---help---
Enable support for the Freescale MPC512x built-in DMA engine.
@@ -200,11 +200,16 @@ config PL330_DMA
platform_data for a dma-pl330 device.
config PCH_DMA
- tristate "Topcliff (Intel EG20T) PCH DMA support"
+ tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support"
depends on PCI && X86
select DMA_ENGINE
help
- Enable support for the Topcliff (Intel EG20T) PCH DMA engine.
+ Enable support for Intel EG20T PCH DMA engine.
+
+ This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/
+ Output Hub) which is for IVI(In-Vehicle Infotainment) use.
+ ML7213 is companion chip for Intel Atom E6xx series.
+ ML7213 is completely compatible for Intel EG20T PCH.
config IMX_SDMA
tristate "i.MX SDMA support"
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b605cc9ac3a2..07bca4970e50 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -19,14 +19,14 @@
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
- * The full GNU General Public License is iin this distribution in the
- * file called COPYING.
+ * The full GNU General Public License is in this distribution in the file
+ * called COPYING.
*
* Documentation: ARM DDI 0196G == PL080
- * Documentation: ARM DDI 0218E == PL081
+ * Documentation: ARM DDI 0218E == PL081
*
- * PL080 & PL081 both have 16 sets of DMA signals that can be routed to
- * any channel.
+ * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
+ * channel.
*
* The PL080 has 8 channels available for simultaneous use, and the PL081
* has only two channels. So on these DMA controllers the number of channels
@@ -53,7 +53,23 @@
*
* ASSUMES default (little) endianness for DMA transfers
*
- * Only DMAC flow control is implemented
+ * The PL08x has two flow control settings:
+ * - DMAC flow control: the transfer size defines the number of transfers
+ * which occur for the current LLI entry, and the DMAC raises TC at the
+ * end of every LLI entry. Observed behaviour shows the DMAC listening
+ * to both the BREQ and SREQ signals (contrary to documented),
+ * transferring data if either is active. The LBREQ and LSREQ signals
+ * are ignored.
+ *
+ * - Peripheral flow control: the transfer size is ignored (and should be
+ * zero). The data is transferred from the current LLI entry, until
+ * after the final transfer signalled by LBREQ or LSREQ. The DMAC
+ * will then move to the next LLI entry.
+ *
+ * Only the former works sanely with scatter lists, so we only implement
+ * the DMAC flow control method. However, peripherals which use the LBREQ
+ * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
+ * these hardware restrictions prevents them from using scatter DMA.
*
* Global TODO:
* - Break out common code from arch/arm/mach-s3c64xx and share
@@ -61,50 +77,40 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <linux/dmapool.h>
-#include <linux/amba/bus.h>
#include <linux/dmaengine.h>
+#include <linux/amba/bus.h>
#include <linux/amba/pl08x.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/hardware/pl080.h>
-#include <asm/dma.h>
-#include <asm/mach/dma.h>
-#include <asm/atomic.h>
-#include <asm/processor.h>
-#include <asm/cacheflush.h>
#define DRIVER_NAME "pl08xdmac"
/**
- * struct vendor_data - vendor-specific config parameters
- * for PL08x derivates
- * @name: the name of this specific variant
+ * struct vendor_data - vendor-specific config parameters for PL08x derivatives
* @channels: the number of channels available in this variant
- * @dualmaster: whether this version supports dual AHB masters
- * or not.
+ * @dualmaster: whether this version supports dual AHB masters or not.
*/
struct vendor_data {
- char *name;
u8 channels;
bool dualmaster;
};
/*
* PL08X private data structures
- * An LLI struct - see pl08x TRM
- * Note that next uses bit[0] as a bus bit,
- * start & end do not - their bus bit info
- * is in cctl
+ * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
+ * start & end do not - their bus bit info is in cctl. Also note that these
+ * are fixed 32-bit quantities.
*/
-struct lli {
- dma_addr_t src;
- dma_addr_t dst;
- dma_addr_t next;
+struct pl08x_lli {
+ u32 src;
+ u32 dst;
+ u32 lli;
u32 cctl;
};
@@ -119,6 +125,8 @@ struct lli {
* @phy_chans: array of data for the physical channels
* @pool: a pool for the LLI descriptors
* @pool_ctr: counter of LLIs in the pool
+ * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches
+ * @mem_buses: set to indicate memory transfers on AHB2.
* @lock: a spinlock for this struct
*/
struct pl08x_driver_data {
@@ -126,11 +134,13 @@ struct pl08x_driver_data {
struct dma_device memcpy;
void __iomem *base;
struct amba_device *adev;
- struct vendor_data *vd;
+ const struct vendor_data *vd;
struct pl08x_platform_data *pd;
struct pl08x_phy_chan *phy_chans;
struct dma_pool *pool;
int pool_ctr;
+ u8 lli_buses;
+ u8 mem_buses;
spinlock_t lock;
};
@@ -152,9 +162,9 @@ struct pl08x_driver_data {
/* Size (bytes) of each LLI buffer allocated for one transfer */
# define PL08X_LLI_TSFR_SIZE 0x2000
-/* Maximimum times we call dma_pool_alloc on this pool without freeing */
+/* Maximum times we call dma_pool_alloc on this pool without freeing */
#define PL08X_MAX_ALLOCS 0x40
-#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli))
+#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
#define PL08X_ALIGN 8
static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
@@ -162,6 +172,11 @@ static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
return container_of(chan, struct pl08x_dma_chan, chan);
}
+static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct pl08x_txd, tx);
+}
+
/*
* Physical channel handling
*/
@@ -177,103 +192,63 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
/*
* Set the initial DMA register values i.e. those for the first LLI
- * The next lli pointer and the configuration interrupt bit have
- * been set when the LLIs were constructed
+ * The next LLI pointer and the configuration interrupt bit have
+ * been set when the LLIs were constructed. Poke them into the hardware
+ * and start the transfer.
*/
-static void pl08x_set_cregs(struct pl08x_driver_data *pl08x,
- struct pl08x_phy_chan *ch)
+static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
+ struct pl08x_txd *txd)
{
- /* Wait for channel inactive */
- while (pl08x_phy_channel_busy(ch))
- ;
-
- dev_vdbg(&pl08x->adev->dev,
- "WRITE channel %d: csrc=%08x, cdst=%08x, "
- "cctl=%08x, clli=%08x, ccfg=%08x\n",
- ch->id,
- ch->csrc,
- ch->cdst,
- ch->cctl,
- ch->clli,
- ch->ccfg);
-
- writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR);
- writel(ch->cdst, ch->base + PL080_CH_DST_ADDR);
- writel(ch->clli, ch->base + PL080_CH_LLI);
- writel(ch->cctl, ch->base + PL080_CH_CONTROL);
- writel(ch->ccfg, ch->base + PL080_CH_CONFIG);
-}
-
-static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan)
-{
- struct pl08x_channel_data *cd = plchan->cd;
+ struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_phy_chan *phychan = plchan->phychan;
- struct pl08x_txd *txd = plchan->at;
-
- /* Copy the basic control register calculated at transfer config */
- phychan->csrc = txd->csrc;
- phychan->cdst = txd->cdst;
- phychan->clli = txd->clli;
- phychan->cctl = txd->cctl;
-
- /* Assign the signal to the proper control registers */
- phychan->ccfg = cd->ccfg;
- phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK;
- phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK;
- /* If it wasn't set from AMBA, ignore it */
- if (txd->direction == DMA_TO_DEVICE)
- /* Select signal as destination */
- phychan->ccfg |=
- (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT);
- else if (txd->direction == DMA_FROM_DEVICE)
- /* Select signal as source */
- phychan->ccfg |=
- (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT);
- /* Always enable error interrupts */
- phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK;
- /* Always enable terminal interrupts */
- phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK;
-}
-
-/*
- * Enable the DMA channel
- * Assumes all other configuration bits have been set
- * as desired before this code is called
- */
-static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x,
- struct pl08x_phy_chan *ch)
-{
+ struct pl08x_lli *lli = &txd->llis_va[0];
u32 val;
- /*
- * Do not access config register until channel shows as disabled
- */
- while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id))
- ;
+ plchan->at = txd;
- /*
- * Do not access config register until channel shows as inactive
- */
- val = readl(ch->base + PL080_CH_CONFIG);
+ /* Wait for channel inactive */
+ while (pl08x_phy_channel_busy(phychan))
+ cpu_relax();
+
+ dev_vdbg(&pl08x->adev->dev,
+ "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
+ "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
+ phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
+ txd->ccfg);
+
+ writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
+ writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
+ writel(lli->lli, phychan->base + PL080_CH_LLI);
+ writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
+ writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
+
+ /* Enable the DMA channel */
+ /* Do not access config register until channel shows as disabled */
+ while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
+ cpu_relax();
+
+ /* Do not access config register until channel shows as inactive */
+ val = readl(phychan->base + PL080_CH_CONFIG);
while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(phychan->base + PL080_CH_CONFIG);
- writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG);
+ writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
}
/*
- * Overall DMAC remains enabled always.
- *
- * Disabling individual channels could lose data.
+ * Pause the channel by setting the HALT bit.
*
- * Disable the peripheral DMA after disabling the DMAC
- * in order to allow the DMAC FIFO to drain, and
- * hence allow the channel to show inactive
+ * For M->P transfers, pause the DMAC first and then stop the peripheral -
+ * the FIFO can only drain if the peripheral is still requesting data.
+ * (note: this can still timeout if the DMAC FIFO never drains of data.)
*
+ * For P->M transfers, disable the peripheral first to stop it filling
+ * the DMAC FIFO, and then pause the DMAC.
*/
static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
{
u32 val;
+ int timeout;
/* Set the HALT bit and wait for the FIFO to drain */
val = readl(ch->base + PL080_CH_CONFIG);
@@ -281,8 +256,13 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
writel(val, ch->base + PL080_CH_CONFIG);
/* Wait for channel inactive */
- while (pl08x_phy_channel_busy(ch))
- ;
+ for (timeout = 1000; timeout; timeout--) {
+ if (!pl08x_phy_channel_busy(ch))
+ break;
+ udelay(1);
+ }
+ if (pl08x_phy_channel_busy(ch))
+ pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
}
static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
@@ -296,19 +276,24 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
}
-/* Stops the channel */
-static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch)
+/*
+ * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
+ * clears any pending interrupt status. This should not be used for
+ * an on-going transfer, but as a method of shutting down a channel
+ * (eg, when it's no longer used) or terminating a transfer.
+ */
+static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
+ struct pl08x_phy_chan *ch)
{
- u32 val;
+ u32 val = readl(ch->base + PL080_CH_CONFIG);
- pl08x_pause_phy_chan(ch);
+ val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
+ PL080_CONFIG_TC_IRQ_MASK);
- /* Disable channel */
- val = readl(ch->base + PL080_CH_CONFIG);
- val &= ~PL080_CONFIG_ENABLE;
- val &= ~PL080_CONFIG_ERR_IRQ_MASK;
- val &= ~PL080_CONFIG_TC_IRQ_MASK;
writel(val, ch->base + PL080_CH_CONFIG);
+
+ writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
+ writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
}
static inline u32 get_bytes_in_cctl(u32 cctl)
@@ -333,54 +318,56 @@ static inline u32 get_bytes_in_cctl(u32 cctl)
static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
{
struct pl08x_phy_chan *ch;
- struct pl08x_txd *txdi = NULL;
struct pl08x_txd *txd;
unsigned long flags;
- u32 bytes = 0;
+ size_t bytes = 0;
spin_lock_irqsave(&plchan->lock, flags);
-
ch = plchan->phychan;
txd = plchan->at;
/*
- * Next follow the LLIs to get the number of pending bytes in the
- * currently active transaction.
+ * Follow the LLIs to get the number of remaining
+ * bytes in the currently active transaction.
*/
if (ch && txd) {
- struct lli *llis_va = txd->llis_va;
- struct lli *llis_bus = (struct lli *) txd->llis_bus;
- u32 clli = readl(ch->base + PL080_CH_LLI);
+ u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
- /* First get the bytes in the current active LLI */
+ /* First get the remaining bytes in the active transfer */
bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
if (clli) {
- int i = 0;
+ struct pl08x_lli *llis_va = txd->llis_va;
+ dma_addr_t llis_bus = txd->llis_bus;
+ int index;
+
+ BUG_ON(clli < llis_bus || clli >= llis_bus +
+ sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
+
+ /*
+ * Locate the next LLI - as this is an array,
+ * it's simple maths to find.
+ */
+ index = (clli - llis_bus) / sizeof(struct pl08x_lli);
- /* Forward to the LLI pointed to by clli */
- while ((clli != (u32) &(llis_bus[i])) &&
- (i < MAX_NUM_TSFR_LLIS))
- i++;
+ for (; index < MAX_NUM_TSFR_LLIS; index++) {
+ bytes += get_bytes_in_cctl(llis_va[index].cctl);
- while (clli) {
- bytes += get_bytes_in_cctl(llis_va[i].cctl);
/*
- * A clli of 0x00000000 will terminate the
- * LLI list
+ * A LLI pointer of 0 terminates the LLI list
*/
- clli = llis_va[i].next;
- i++;
+ if (!llis_va[index].lli)
+ break;
}
}
}
/* Sum up all queued transactions */
- if (!list_empty(&plchan->desc_list)) {
- list_for_each_entry(txdi, &plchan->desc_list, node) {
+ if (!list_empty(&plchan->pend_list)) {
+ struct pl08x_txd *txdi;
+ list_for_each_entry(txdi, &plchan->pend_list, node) {
bytes += txdi->len;
}
-
}
spin_unlock_irqrestore(&plchan->lock, flags);
@@ -390,6 +377,10 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
/*
* Allocate a physical channel for a virtual channel
+ *
+ * Try to locate a physical channel to be used for this transfer. If all
+ * are taken return NULL and the requester will have to cope by using
+ * some fallback PIO mode or retrying later.
*/
static struct pl08x_phy_chan *
pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
@@ -399,12 +390,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
unsigned long flags;
int i;
- /*
- * Try to locate a physical channel to be used for
- * this transfer. If all are taken return NULL and
- * the requester will have to cope by using some fallback
- * PIO mode or retrying later.
- */
for (i = 0; i < pl08x->vd->channels; i++) {
ch = &pl08x->phy_chans[i];
@@ -433,13 +418,12 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
{
unsigned long flags;
+ spin_lock_irqsave(&ch->lock, flags);
+
/* Stop the channel and clear its interrupts */
- pl08x_stop_phy_chan(ch);
- writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
- writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
+ pl08x_terminate_phy_chan(pl08x, ch);
/* Mark it as free */
- spin_lock_irqsave(&ch->lock, flags);
ch->serving = NULL;
spin_unlock_irqrestore(&ch->lock, flags);
}
@@ -465,11 +449,11 @@ static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
}
static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
- u32 tsize)
+ size_t tsize)
{
u32 retbits = cctl;
- /* Remove all src, dst and transfersize bits */
+ /* Remove all src, dst and transfer size bits */
retbits &= ~PL080_CONTROL_DWIDTH_MASK;
retbits &= ~PL080_CONTROL_SWIDTH_MASK;
retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
@@ -509,95 +493,87 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
return retbits;
}
+struct pl08x_lli_build_data {
+ struct pl08x_txd *txd;
+ struct pl08x_driver_data *pl08x;
+ struct pl08x_bus_data srcbus;
+ struct pl08x_bus_data dstbus;
+ size_t remainder;
+};
+
/*
- * Autoselect a master bus to use for the transfer
- * this prefers the destination bus if both available
- * if fixed address on one bus the other will be chosen
+ * Autoselect a master bus to use for the transfer this prefers the
+ * destination bus if both available if fixed address on one bus the
+ * other will be chosen
*/
-void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus,
- struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus,
- struct pl08x_bus_data **sbus, u32 cctl)
+static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
+ struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
{
if (!(cctl & PL080_CONTROL_DST_INCR)) {
- *mbus = src_bus;
- *sbus = dst_bus;
+ *mbus = &bd->srcbus;
+ *sbus = &bd->dstbus;
} else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
- *mbus = dst_bus;
- *sbus = src_bus;
+ *mbus = &bd->dstbus;
+ *sbus = &bd->srcbus;
} else {
- if (dst_bus->buswidth == 4) {
- *mbus = dst_bus;
- *sbus = src_bus;
- } else if (src_bus->buswidth == 4) {
- *mbus = src_bus;
- *sbus = dst_bus;
- } else if (dst_bus->buswidth == 2) {
- *mbus = dst_bus;
- *sbus = src_bus;
- } else if (src_bus->buswidth == 2) {
- *mbus = src_bus;
- *sbus = dst_bus;
+ if (bd->dstbus.buswidth == 4) {
+ *mbus = &bd->dstbus;
+ *sbus = &bd->srcbus;
+ } else if (bd->srcbus.buswidth == 4) {
+ *mbus = &bd->srcbus;
+ *sbus = &bd->dstbus;
+ } else if (bd->dstbus.buswidth == 2) {
+ *mbus = &bd->dstbus;
+ *sbus = &bd->srcbus;
+ } else if (bd->srcbus.buswidth == 2) {
+ *mbus = &bd->srcbus;
+ *sbus = &bd->dstbus;
} else {
- /* src_bus->buswidth == 1 */
- *mbus = dst_bus;
- *sbus = src_bus;
+ /* bd->srcbus.buswidth == 1 */
+ *mbus = &bd->dstbus;
+ *sbus = &bd->srcbus;
}
}
}
/*
- * Fills in one LLI for a certain transfer descriptor
- * and advance the counter
+ * Fills in one LLI for a certain transfer descriptor and advance the counter
*/
-int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
- struct pl08x_txd *txd, int num_llis, int len,
- u32 cctl, u32 *remainder)
+static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
+ int num_llis, int len, u32 cctl)
{
- struct lli *llis_va = txd->llis_va;
- struct lli *llis_bus = (struct lli *) txd->llis_bus;
+ struct pl08x_lli *llis_va = bd->txd->llis_va;
+ dma_addr_t llis_bus = bd->txd->llis_bus;
BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
- llis_va[num_llis].cctl = cctl;
- llis_va[num_llis].src = txd->srcbus.addr;
- llis_va[num_llis].dst = txd->dstbus.addr;
-
- /*
- * On versions with dual masters, you can optionally AND on
- * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read
- * in new LLIs with that controller, but we always try to
- * choose AHB1 to point into memory. The idea is to have AHB2
- * fixed on the peripheral and AHB1 messing around in the
- * memory. So we don't manipulate this bit currently.
- */
-
- llis_va[num_llis].next =
- (dma_addr_t)((u32) &(llis_bus[num_llis + 1]));
+ llis_va[num_llis].cctl = cctl;
+ llis_va[num_llis].src = bd->srcbus.addr;
+ llis_va[num_llis].dst = bd->dstbus.addr;
+ llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
+ if (bd->pl08x->lli_buses & PL08X_AHB2)
+ llis_va[num_llis].lli |= PL080_LLI_LM_AHB2;
if (cctl & PL080_CONTROL_SRC_INCR)
- txd->srcbus.addr += len;
+ bd->srcbus.addr += len;
if (cctl & PL080_CONTROL_DST_INCR)
- txd->dstbus.addr += len;
+ bd->dstbus.addr += len;
- *remainder -= len;
+ BUG_ON(bd->remainder < len);
- return num_llis + 1;
+ bd->remainder -= len;
}
/*
- * Return number of bytes to fill to boundary, or len
+ * Return number of bytes to fill to boundary, or len.
+ * This calculation works for any value of addr.
*/
-static inline u32 pl08x_pre_boundary(u32 addr, u32 len)
+static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
{
- u32 boundary;
+ size_t boundary_len = PL08X_BOUNDARY_SIZE -
+ (addr & (PL08X_BOUNDARY_SIZE - 1));
- boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1)
- << PL08X_BOUNDARY_SHIFT;
-
- if (boundary < addr + len)
- return boundary - addr;
- else
- return len;
+ return min(boundary_len, len);
}
/*
@@ -608,20 +584,13 @@ static inline u32 pl08x_pre_boundary(u32 addr, u32 len)
static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
struct pl08x_txd *txd)
{
- struct pl08x_channel_data *cd = txd->cd;
struct pl08x_bus_data *mbus, *sbus;
- u32 remainder;
+ struct pl08x_lli_build_data bd;
int num_llis = 0;
u32 cctl;
- int max_bytes_per_lli;
- int total_bytes = 0;
- struct lli *llis_va;
- struct lli *llis_bus;
-
- if (!txd) {
- dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__);
- return 0;
- }
+ size_t max_bytes_per_lli;
+ size_t total_bytes = 0;
+ struct pl08x_lli *llis_va;
txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
&txd->llis_bus);
@@ -632,121 +601,79 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
pl08x->pool_ctr++;
- /*
- * Initialize bus values for this transfer
- * from the passed optimal values
- */
- if (!cd) {
- dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__);
- return 0;
- }
-
- /* Get the default CCTL from the platform data */
- cctl = cd->cctl;
+ /* Get the default CCTL */
+ cctl = txd->cctl;
- /*
- * On the PL080 we have two bus masters and we
- * should select one for source and one for
- * destination. We try to use AHB2 for the
- * bus which does not increment (typically the
- * peripheral) else we just choose something.
- */
- cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
- if (pl08x->vd->dualmaster) {
- if (cctl & PL080_CONTROL_SRC_INCR)
- /* Source increments, use AHB2 for destination */
- cctl |= PL080_CONTROL_DST_AHB2;
- else if (cctl & PL080_CONTROL_DST_INCR)
- /* Destination increments, use AHB2 for source */
- cctl |= PL080_CONTROL_SRC_AHB2;
- else
- /* Just pick something, source AHB1 dest AHB2 */
- cctl |= PL080_CONTROL_DST_AHB2;
- }
+ bd.txd = txd;
+ bd.pl08x = pl08x;
+ bd.srcbus.addr = txd->src_addr;
+ bd.dstbus.addr = txd->dst_addr;
/* Find maximum width of the source bus */
- txd->srcbus.maxwidth =
+ bd.srcbus.maxwidth =
pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
PL080_CONTROL_SWIDTH_SHIFT);
/* Find maximum width of the destination bus */
- txd->dstbus.maxwidth =
+ bd.dstbus.maxwidth =
pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
PL080_CONTROL_DWIDTH_SHIFT);
/* Set up the bus widths to the maximum */
- txd->srcbus.buswidth = txd->srcbus.maxwidth;
- txd->dstbus.buswidth = txd->dstbus.maxwidth;
+ bd.srcbus.buswidth = bd.srcbus.maxwidth;
+ bd.dstbus.buswidth = bd.dstbus.maxwidth;
dev_vdbg(&pl08x->adev->dev,
"%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
- __func__, txd->srcbus.buswidth, txd->dstbus.buswidth);
+ __func__, bd.srcbus.buswidth, bd.dstbus.buswidth);
/*
* Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
*/
- max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) *
+ max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
PL080_CONTROL_TRANSFER_SIZE_MASK;
dev_vdbg(&pl08x->adev->dev,
- "%s max bytes per lli = %d\n",
+ "%s max bytes per lli = %zu\n",
__func__, max_bytes_per_lli);
/* We need to count this down to zero */
- remainder = txd->len;
+ bd.remainder = txd->len;
dev_vdbg(&pl08x->adev->dev,
- "%s remainder = %d\n",
- __func__, remainder);
+ "%s remainder = %zu\n",
+ __func__, bd.remainder);
/*
* Choose bus to align to
* - prefers destination bus if both available
* - if fixed address on one bus chooses other
- * - modifies cctl to choose an apropriate master
- */
- pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus,
- &mbus, &sbus, cctl);
-
-
- /*
- * The lowest bit of the LLI register
- * is also used to indicate which master to
- * use for reading the LLIs.
*/
+ pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
if (txd->len < mbus->buswidth) {
- /*
- * Less than a bus width available
- * - send as single bytes
- */
- while (remainder) {
+ /* Less than a bus width available - send as single bytes */
+ while (bd.remainder) {
dev_vdbg(&pl08x->adev->dev,
"%s single byte LLIs for a transfer of "
- "less than a bus width (remain %08x)\n",
- __func__, remainder);
+ "less than a bus width (remain 0x%08x)\n",
+ __func__, bd.remainder);
cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
- num_llis =
- pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1,
- cctl, &remainder);
+ pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
total_bytes++;
}
} else {
- /*
- * Make one byte LLIs until master bus is aligned
- * - slave will then be aligned also
- */
+ /* Make one byte LLIs until master bus is aligned */
while ((mbus->addr) % (mbus->buswidth)) {
dev_vdbg(&pl08x->adev->dev,
"%s adjustment lli for less than bus width "
- "(remain %08x)\n",
- __func__, remainder);
+ "(remain 0x%08x)\n",
+ __func__, bd.remainder);
cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
- num_llis = pl08x_fill_lli_for_desc
- (pl08x, txd, num_llis, 1, cctl, &remainder);
+ pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
total_bytes++;
}
/*
- * Master now aligned
+ * Master now aligned
* - if slave is not then we must set its width down
*/
if (sbus->addr % sbus->buswidth) {
@@ -761,63 +688,51 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
* Make largest possible LLIs until less than one bus
* width left
*/
- while (remainder > (mbus->buswidth - 1)) {
- int lli_len, target_len;
- int tsize;
- int odd_bytes;
+ while (bd.remainder > (mbus->buswidth - 1)) {
+ size_t lli_len, target_len, tsize, odd_bytes;
/*
* If enough left try to send max possible,
* otherwise try to send the remainder
*/
- target_len = remainder;
- if (remainder > max_bytes_per_lli)
- target_len = max_bytes_per_lli;
+ target_len = min(bd.remainder, max_bytes_per_lli);
/*
- * Set bus lengths for incrementing busses
- * to number of bytes which fill to next memory
- * boundary
+ * Set bus lengths for incrementing buses to the
+ * number of bytes which fill to next memory boundary,
+ * limiting on the target length calculated above.
*/
if (cctl & PL080_CONTROL_SRC_INCR)
- txd->srcbus.fill_bytes =
- pl08x_pre_boundary(
- txd->srcbus.addr,
- remainder);
+ bd.srcbus.fill_bytes =
+ pl08x_pre_boundary(bd.srcbus.addr,
+ target_len);
else
- txd->srcbus.fill_bytes =
- max_bytes_per_lli;
+ bd.srcbus.fill_bytes = target_len;
if (cctl & PL080_CONTROL_DST_INCR)
- txd->dstbus.fill_bytes =
- pl08x_pre_boundary(
- txd->dstbus.addr,
- remainder);
+ bd.dstbus.fill_bytes =
+ pl08x_pre_boundary(bd.dstbus.addr,
+ target_len);
else
- txd->dstbus.fill_bytes =
- max_bytes_per_lli;
+ bd.dstbus.fill_bytes = target_len;
- /*
- * Find the nearest
- */
- lli_len = min(txd->srcbus.fill_bytes,
- txd->dstbus.fill_bytes);
+ /* Find the nearest */
+ lli_len = min(bd.srcbus.fill_bytes,
+ bd.dstbus.fill_bytes);
- BUG_ON(lli_len > remainder);
+ BUG_ON(lli_len > bd.remainder);
if (lli_len <= 0) {
dev_err(&pl08x->adev->dev,
- "%s lli_len is %d, <= 0\n",
+ "%s lli_len is %zu, <= 0\n",
__func__, lli_len);
return 0;
}
if (lli_len == target_len) {
/*
- * Can send what we wanted
- */
- /*
- * Maintain alignment
+ * Can send what we wanted.
+ * Maintain alignment
*/
lli_len = (lli_len/mbus->buswidth) *
mbus->buswidth;
@@ -825,17 +740,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
} else {
/*
* So now we know how many bytes to transfer
- * to get to the nearest boundary
- * The next lli will past the boundary
- * - however we may be working to a boundary
- * on the slave bus
- * We need to ensure the master stays aligned
+ * to get to the nearest boundary. The next
+ * LLI will past the boundary. However, we
+ * may be working to a boundary on the slave
+ * bus. We need to ensure the master stays
+ * aligned, and that we are working in
+ * multiples of the bus widths.
*/
odd_bytes = lli_len % mbus->buswidth;
- /*
- * - and that we are working in multiples
- * of the bus widths
- */
lli_len -= odd_bytes;
}
@@ -855,41 +767,38 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
if (target_len != lli_len) {
dev_vdbg(&pl08x->adev->dev,
- "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n",
+ "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
__func__, target_len, lli_len, txd->len);
}
cctl = pl08x_cctl_bits(cctl,
- txd->srcbus.buswidth,
- txd->dstbus.buswidth,
+ bd.srcbus.buswidth,
+ bd.dstbus.buswidth,
tsize);
dev_vdbg(&pl08x->adev->dev,
- "%s fill lli with single lli chunk of size %08x (remainder %08x)\n",
- __func__, lli_len, remainder);
- num_llis = pl08x_fill_lli_for_desc(pl08x, txd,
- num_llis, lli_len, cctl,
- &remainder);
+ "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
+ __func__, lli_len, bd.remainder);
+ pl08x_fill_lli_for_desc(&bd, num_llis++,
+ lli_len, cctl);
total_bytes += lli_len;
}
if (odd_bytes) {
/*
- * Creep past the boundary,
- * maintaining master alignment
+ * Creep past the boundary, maintaining
+ * master alignment
*/
int j;
for (j = 0; (j < mbus->buswidth)
- && (remainder); j++) {
+ && (bd.remainder); j++) {
cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
dev_vdbg(&pl08x->adev->dev,
- "%s align with boundardy, single byte (remain %08x)\n",
- __func__, remainder);
- num_llis =
- pl08x_fill_lli_for_desc(pl08x,
- txd, num_llis, 1,
- cctl, &remainder);
+ "%s align with boundary, single byte (remain 0x%08zx)\n",
+ __func__, bd.remainder);
+ pl08x_fill_lli_for_desc(&bd,
+ num_llis++, 1, cctl);
total_bytes++;
}
}
@@ -898,25 +807,18 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
/*
* Send any odd bytes
*/
- if (remainder < 0) {
- dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n",
- __func__, remainder);
- return 0;
- }
-
- while (remainder) {
+ while (bd.remainder) {
cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
dev_vdbg(&pl08x->adev->dev,
- "%s align with boundardy, single odd byte (remain %d)\n",
- __func__, remainder);
- num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis,
- 1, cctl, &remainder);
+ "%s align with boundary, single odd byte (remain %zu)\n",
+ __func__, bd.remainder);
+ pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
total_bytes++;
}
}
if (total_bytes != txd->len) {
dev_err(&pl08x->adev->dev,
- "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n",
+ "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
__func__, total_bytes, txd->len);
return 0;
}
@@ -927,41 +829,12 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
__func__, (u32) MAX_NUM_TSFR_LLIS);
return 0;
}
- /*
- * Decide whether this is a loop or a terminated transfer
- */
- llis_va = txd->llis_va;
- llis_bus = (struct lli *) txd->llis_bus;
-
- if (cd->circular_buffer) {
- /*
- * Loop the circular buffer so that the next element
- * points back to the beginning of the LLI.
- */
- llis_va[num_llis - 1].next =
- (dma_addr_t)((unsigned int)&(llis_bus[0]));
- } else {
- /*
- * On non-circular buffers, the final LLI terminates
- * the LLI.
- */
- llis_va[num_llis - 1].next = 0;
- /*
- * The final LLI element shall also fire an interrupt
- */
- llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
- }
-
- /* Now store the channel register values */
- txd->csrc = llis_va[0].src;
- txd->cdst = llis_va[0].dst;
- if (num_llis > 1)
- txd->clli = llis_va[0].next;
- else
- txd->clli = 0;
- txd->cctl = llis_va[0].cctl;
- /* ccfg will be set at physical channel allocation time */
+ llis_va = txd->llis_va;
+ /* The final LLI terminates the LLI. */
+ llis_va[num_llis - 1].lli = 0;
+ /* The final LLI element shall also fire an interrupt. */
+ llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
#ifdef VERBOSE_DEBUG
{
@@ -969,13 +842,13 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
for (i = 0; i < num_llis; i++) {
dev_vdbg(&pl08x->adev->dev,
- "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n",
+ "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
i,
&llis_va[i],
llis_va[i].src,
llis_va[i].dst,
llis_va[i].cctl,
- llis_va[i].next
+ llis_va[i].lli
);
}
}
@@ -988,14 +861,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
struct pl08x_txd *txd)
{
- if (!txd)
- dev_err(&pl08x->adev->dev,
- "%s no descriptor to free\n",
- __func__);
-
/* Free the LLI */
- dma_pool_free(pl08x->pool, txd->llis_va,
- txd->llis_bus);
+ dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
pl08x->pool_ctr--;
@@ -1008,13 +875,12 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
struct pl08x_txd *txdi = NULL;
struct pl08x_txd *next;
- if (!list_empty(&plchan->desc_list)) {
+ if (!list_empty(&plchan->pend_list)) {
list_for_each_entry_safe(txdi,
- next, &plchan->desc_list, node) {
+ next, &plchan->pend_list, node) {
list_del(&txdi->node);
pl08x_free_txd(pl08x, txdi);
}
-
}
}
@@ -1069,6 +935,12 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
return -EBUSY;
}
ch->signal = ret;
+
+ /* Assign the flow control signal to this channel */
+ if (txd->direction == DMA_TO_DEVICE)
+ txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
+ else if (txd->direction == DMA_FROM_DEVICE)
+ txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
}
dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
@@ -1076,19 +948,54 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
ch->signal,
plchan->name);
+ plchan->phychan_hold++;
plchan->phychan = ch;
return 0;
}
+static void release_phy_channel(struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_driver_data *pl08x = plchan->host;
+
+ if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
+ pl08x->pd->put_signal(plchan);
+ plchan->phychan->signal = -1;
+ }
+ pl08x_put_phy_channel(pl08x, plchan->phychan);
+ plchan->phychan = NULL;
+}
+
static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
+ struct pl08x_txd *txd = to_pl08x_txd(tx);
+ unsigned long flags;
+
+ spin_lock_irqsave(&plchan->lock, flags);
+
+ plchan->chan.cookie += 1;
+ if (plchan->chan.cookie < 0)
+ plchan->chan.cookie = 1;
+ tx->cookie = plchan->chan.cookie;
+
+ /* Put this onto the pending list */
+ list_add_tail(&txd->node, &plchan->pend_list);
+
+ /*
+ * If there was no physical channel available for this memcpy,
+ * stack the request up and indicate that the channel is waiting
+ * for a free physical channel.
+ */
+ if (!plchan->slave && !plchan->phychan) {
+ /* Do this memcpy whenever there is a channel ready */
+ plchan->state = PL08X_CHAN_WAITING;
+ plchan->waiting = txd;
+ } else {
+ plchan->phychan_hold--;
+ }
- atomic_inc(&plchan->last_issued);
- tx->cookie = atomic_read(&plchan->last_issued);
- /* This unlock follows the lock in the prep() function */
- spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
+ spin_unlock_irqrestore(&plchan->lock, flags);
return tx->cookie;
}
@@ -1102,10 +1009,9 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
}
/*
- * Code accessing dma_async_is_complete() in a tight loop
- * may give problems - could schedule where indicated.
- * If slaves are relying on interrupts to signal completion this
- * function must not be called with interrupts disabled
+ * Code accessing dma_async_is_complete() in a tight loop may give problems.
+ * If slaves are relying on interrupts to signal completion this function
+ * must not be called with interrupts disabled.
*/
static enum dma_status
pl08x_dma_tx_status(struct dma_chan *chan,
@@ -1118,7 +1024,7 @@ pl08x_dma_tx_status(struct dma_chan *chan,
enum dma_status ret;
u32 bytesleft = 0;
- last_used = atomic_read(&plchan->last_issued);
+ last_used = plchan->chan.cookie;
last_complete = plchan->lc;
ret = dma_async_is_complete(cookie, last_complete, last_used);
@@ -1128,13 +1034,9 @@ pl08x_dma_tx_status(struct dma_chan *chan,
}
/*
- * schedule(); could be inserted here
- */
-
- /*
* This cookie not complete yet
*/
- last_used = atomic_read(&plchan->last_issued);
+ last_used = plchan->chan.cookie;
last_complete = plchan->lc;
/* Get number of bytes left in the active transactions and queue */
@@ -1199,37 +1101,35 @@ static const struct burst_table burst_sizes[] = {
},
};
-static void dma_set_runtime_config(struct dma_chan *chan,
- struct dma_slave_config *config)
+static int dma_set_runtime_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_channel_data *cd = plchan->cd;
enum dma_slave_buswidth addr_width;
+ dma_addr_t addr;
u32 maxburst;
u32 cctl = 0;
- /* Mask out all except src and dst channel */
- u32 ccfg = cd->ccfg & 0x000003DEU;
- int i = 0;
+ int i;
+
+ if (!plchan->slave)
+ return -EINVAL;
/* Transfer direction */
plchan->runtime_direction = config->direction;
if (config->direction == DMA_TO_DEVICE) {
- plchan->runtime_addr = config->dst_addr;
- cctl |= PL080_CONTROL_SRC_INCR;
- ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ addr = config->dst_addr;
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
} else if (config->direction == DMA_FROM_DEVICE) {
- plchan->runtime_addr = config->src_addr;
- cctl |= PL080_CONTROL_DST_INCR;
- ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ addr = config->src_addr;
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
} else {
dev_err(&pl08x->adev->dev,
"bad runtime_config: alien transfer direction\n");
- return;
+ return -EINVAL;
}
switch (addr_width) {
@@ -1248,42 +1148,40 @@ static void dma_set_runtime_config(struct dma_chan *chan,
default:
dev_err(&pl08x->adev->dev,
"bad runtime_config: alien address width\n");
- return;
+ return -EINVAL;
}
/*
* Now decide on a maxburst:
- * If this channel will only request single transfers, set
- * this down to ONE element.
+ * If this channel will only request single transfers, set this
+ * down to ONE element. Also select one element if no maxburst
+ * is specified.
*/
- if (plchan->cd->single) {
+ if (plchan->cd->single || maxburst == 0) {
cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
(PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
} else {
- while (i < ARRAY_SIZE(burst_sizes)) {
+ for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
if (burst_sizes[i].burstwords <= maxburst)
break;
- i++;
- }
cctl |= burst_sizes[i].reg;
}
- /* Access the cell in privileged mode, non-bufferable, non-cacheable */
- cctl &= ~PL080_CONTROL_PROT_MASK;
- cctl |= PL080_CONTROL_PROT_SYS;
+ plchan->runtime_addr = addr;
/* Modify the default channel data to fit PrimeCell request */
cd->cctl = cctl;
- cd->ccfg = ccfg;
dev_dbg(&pl08x->adev->dev,
"configured channel %s (%s) for %s, data width %d, "
- "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n",
+ "maxburst %d words, LE, CCTL=0x%08x\n",
dma_chan_name(chan), plchan->name,
(config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
addr_width,
maxburst,
- cctl, ccfg);
+ cctl);
+
+ return 0;
}
/*
@@ -1293,35 +1191,26 @@ static void dma_set_runtime_config(struct dma_chan *chan,
static void pl08x_issue_pending(struct dma_chan *chan)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
- struct pl08x_driver_data *pl08x = plchan->host;
unsigned long flags;
spin_lock_irqsave(&plchan->lock, flags);
- /* Something is already active */
- if (plchan->at) {
- spin_unlock_irqrestore(&plchan->lock, flags);
- return;
- }
-
- /* Didn't get a physical channel so waiting for it ... */
- if (plchan->state == PL08X_CHAN_WAITING)
+ /* Something is already active, or we're waiting for a channel... */
+ if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
+ spin_unlock_irqrestore(&plchan->lock, flags);
return;
+ }
/* Take the first element in the queue and execute it */
- if (!list_empty(&plchan->desc_list)) {
+ if (!list_empty(&plchan->pend_list)) {
struct pl08x_txd *next;
- next = list_first_entry(&plchan->desc_list,
+ next = list_first_entry(&plchan->pend_list,
struct pl08x_txd,
node);
list_del(&next->node);
- plchan->at = next;
plchan->state = PL08X_CHAN_RUNNING;
- /* Configure the physical channel for the active txd */
- pl08x_config_phychan_for_txd(plchan);
- pl08x_set_cregs(pl08x, plchan->phychan);
- pl08x_enable_phy_chan(pl08x, plchan->phychan);
+ pl08x_start_txd(plchan, next);
}
spin_unlock_irqrestore(&plchan->lock, flags);
@@ -1330,30 +1219,17 @@ static void pl08x_issue_pending(struct dma_chan *chan)
static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
struct pl08x_txd *txd)
{
- int num_llis;
struct pl08x_driver_data *pl08x = plchan->host;
- int ret;
+ unsigned long flags;
+ int num_llis, ret;
num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
-
- if (!num_llis)
+ if (!num_llis) {
+ kfree(txd);
return -EINVAL;
+ }
- spin_lock_irqsave(&plchan->lock, plchan->lockflags);
-
- /*
- * If this device is not using a circular buffer then
- * queue this new descriptor for transfer.
- * The descriptor for a circular buffer continues
- * to be used until the channel is freed.
- */
- if (txd->cd->circular_buffer)
- dev_err(&pl08x->adev->dev,
- "%s attempting to queue a circular buffer\n",
- __func__);
- else
- list_add_tail(&txd->node,
- &plchan->desc_list);
+ spin_lock_irqsave(&plchan->lock, flags);
/*
* See if we already have a physical channel allocated,
@@ -1362,45 +1238,74 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
ret = prep_phy_channel(plchan, txd);
if (ret) {
/*
- * No physical channel available, we will
- * stack up the memcpy channels until there is a channel
- * available to handle it whereas slave transfers may
- * have been denied due to platform channel muxing restrictions
- * and since there is no guarantee that this will ever be
- * resolved, and since the signal must be aquired AFTER
- * aquiring the physical channel, we will let them be NACK:ed
- * with -EBUSY here. The drivers can alway retry the prep()
- * call if they are eager on doing this using DMA.
+ * No physical channel was available.
+ *
+ * memcpy transfers can be sorted out at submission time.
+ *
+ * Slave transfers may have been denied due to platform
+ * channel muxing restrictions. Since there is no guarantee
+ * that this will ever be resolved, and the signal must be
+ * acquired AFTER acquiring the physical channel, we will let
+ * them be NACK:ed with -EBUSY here. The drivers can retry
+ * the prep() call if they are eager on doing this using DMA.
*/
if (plchan->slave) {
pl08x_free_txd_list(pl08x, plchan);
- spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
+ pl08x_free_txd(pl08x, txd);
+ spin_unlock_irqrestore(&plchan->lock, flags);
return -EBUSY;
}
- /* Do this memcpy whenever there is a channel ready */
- plchan->state = PL08X_CHAN_WAITING;
- plchan->waiting = txd;
} else
/*
- * Else we're all set, paused and ready to roll,
- * status will switch to PL08X_CHAN_RUNNING when
- * we call issue_pending(). If there is something
- * running on the channel already we don't change
- * its state.
+ * Else we're all set, paused and ready to roll, status
+ * will switch to PL08X_CHAN_RUNNING when we call
+ * issue_pending(). If there is something running on the
+ * channel already we don't change its state.
*/
if (plchan->state == PL08X_CHAN_IDLE)
plchan->state = PL08X_CHAN_PAUSED;
- /*
- * Notice that we leave plchan->lock locked on purpose:
- * it will be unlocked in the subsequent tx_submit()
- * call. This is a consequence of the current API.
- */
+ spin_unlock_irqrestore(&plchan->lock, flags);
return 0;
}
/*
+ * Given the source and destination available bus masks, select which
+ * will be routed to each port. We try to have source and destination
+ * on separate ports, but always respect the allowable settings.
+ */
+static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst)
+{
+ u32 cctl = 0;
+
+ if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
+ cctl |= PL080_CONTROL_DST_AHB2;
+ if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
+ cctl |= PL080_CONTROL_SRC_AHB2;
+
+ return cctl;
+}
+
+static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
+ unsigned long flags)
+{
+ struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+
+ if (txd) {
+ dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
+ txd->tx.flags = flags;
+ txd->tx.tx_submit = pl08x_tx_submit;
+ INIT_LIST_HEAD(&txd->node);
+
+ /* Always enable error and terminal interrupts */
+ txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
+ PL080_CONFIG_TC_IRQ_MASK;
+ }
+ return txd;
+}
+
+/*
* Initialize a descriptor to be used by memcpy submit
*/
static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
@@ -1412,40 +1317,38 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
struct pl08x_txd *txd;
int ret;
- txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+ txd = pl08x_get_txd(plchan, flags);
if (!txd) {
dev_err(&pl08x->adev->dev,
"%s no memory for descriptor\n", __func__);
return NULL;
}
- dma_async_tx_descriptor_init(&txd->tx, chan);
txd->direction = DMA_NONE;
- txd->srcbus.addr = src;
- txd->dstbus.addr = dest;
+ txd->src_addr = src;
+ txd->dst_addr = dest;
+ txd->len = len;
/* Set platform data for m2m */
- txd->cd = &pl08x->pd->memcpy_channel;
+ txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ txd->cctl = pl08x->pd->memcpy_channel.cctl &
+ ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
+
/* Both to be incremented or the code will break */
- txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
- txd->tx.tx_submit = pl08x_tx_submit;
- txd->tx.callback = NULL;
- txd->tx.callback_param = NULL;
- txd->len = len;
+ txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
+
+ if (pl08x->vd->dualmaster)
+ txd->cctl |= pl08x_select_bus(pl08x,
+ pl08x->mem_buses, pl08x->mem_buses);
- INIT_LIST_HEAD(&txd->node);
ret = pl08x_prep_channel_resources(plchan, txd);
if (ret)
return NULL;
- /*
- * NB: the channel lock is held at this point so tx_submit()
- * must be called in direct succession.
- */
return &txd->tx;
}
-struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
+static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags)
@@ -1453,6 +1356,7 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
+ u8 src_buses, dst_buses;
int ret;
/*
@@ -1467,14 +1371,12 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
__func__, sgl->length, plchan->name);
- txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+ txd = pl08x_get_txd(plchan, flags);
if (!txd) {
dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
return NULL;
}
- dma_async_tx_descriptor_init(&txd->tx, chan);
-
if (direction != plchan->runtime_direction)
dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
"the direction configured for the PrimeCell\n",
@@ -1486,37 +1388,47 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
* channel target address dynamically at runtime.
*/
txd->direction = direction;
+ txd->len = sgl->length;
+
+ txd->cctl = plchan->cd->cctl &
+ ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
+ PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
+ PL080_CONTROL_PROT_MASK);
+
+ /* Access the cell in privileged mode, non-bufferable, non-cacheable */
+ txd->cctl |= PL080_CONTROL_PROT_SYS;
+
if (direction == DMA_TO_DEVICE) {
- txd->srcbus.addr = sgl->dma_address;
+ txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ txd->cctl |= PL080_CONTROL_SRC_INCR;
+ txd->src_addr = sgl->dma_address;
if (plchan->runtime_addr)
- txd->dstbus.addr = plchan->runtime_addr;
+ txd->dst_addr = plchan->runtime_addr;
else
- txd->dstbus.addr = plchan->cd->addr;
+ txd->dst_addr = plchan->cd->addr;
+ src_buses = pl08x->mem_buses;
+ dst_buses = plchan->cd->periph_buses;
} else if (direction == DMA_FROM_DEVICE) {
+ txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ txd->cctl |= PL080_CONTROL_DST_INCR;
if (plchan->runtime_addr)
- txd->srcbus.addr = plchan->runtime_addr;
+ txd->src_addr = plchan->runtime_addr;
else
- txd->srcbus.addr = plchan->cd->addr;
- txd->dstbus.addr = sgl->dma_address;
+ txd->src_addr = plchan->cd->addr;
+ txd->dst_addr = sgl->dma_address;
+ src_buses = plchan->cd->periph_buses;
+ dst_buses = pl08x->mem_buses;
} else {
dev_err(&pl08x->adev->dev,
"%s direction unsupported\n", __func__);
return NULL;
}
- txd->cd = plchan->cd;
- txd->tx.tx_submit = pl08x_tx_submit;
- txd->tx.callback = NULL;
- txd->tx.callback_param = NULL;
- txd->len = sgl->length;
- INIT_LIST_HEAD(&txd->node);
+
+ txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses);
ret = pl08x_prep_channel_resources(plchan, txd);
if (ret)
return NULL;
- /*
- * NB: the channel lock is held at this point so tx_submit()
- * must be called in direct succession.
- */
return &txd->tx;
}
@@ -1531,10 +1443,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
/* Controls applicable to inactive channels */
if (cmd == DMA_SLAVE_CONFIG) {
- dma_set_runtime_config(chan,
- (struct dma_slave_config *)
- arg);
- return 0;
+ return dma_set_runtime_config(chan,
+ (struct dma_slave_config *)arg);
}
/*
@@ -1552,22 +1462,14 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
plchan->state = PL08X_CHAN_IDLE;
if (plchan->phychan) {
- pl08x_stop_phy_chan(plchan->phychan);
+ pl08x_terminate_phy_chan(pl08x, plchan->phychan);
/*
* Mark physical channel as free and free any slave
* signal
*/
- if ((plchan->phychan->signal >= 0) &&
- pl08x->pd->put_signal) {
- pl08x->pd->put_signal(plchan);
- plchan->phychan->signal = -1;
- }
- pl08x_put_phy_channel(pl08x, plchan->phychan);
- plchan->phychan = NULL;
+ release_phy_channel(plchan);
}
- /* Stop any pending tasklet */
- tasklet_disable(&plchan->tasklet);
/* Dequeue jobs and free LLIs */
if (plchan->at) {
pl08x_free_txd(pl08x, plchan->at);
@@ -1609,10 +1511,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
/*
* Just check that the device is there and active
- * TODO: turn this bit on/off depending on the number of
- * physical channels actually used, if it is zero... well
- * shut it off. That will save some power. Cut the clock
- * at the same time.
+ * TODO: turn this bit on/off depending on the number of physical channels
+ * actually used, if it is zero... well shut it off. That will save some
+ * power. Cut the clock at the same time.
*/
static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
{
@@ -1620,78 +1521,66 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
val = readl(pl08x->base + PL080_CONFIG);
val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
- /* We implictly clear bit 1 and that means little-endian mode */
+ /* We implicitly clear bit 1 and that means little-endian mode */
val |= PL080_CONFIG_ENABLE;
writel(val, pl08x->base + PL080_CONFIG);
}
+static void pl08x_unmap_buffers(struct pl08x_txd *txd)
+{
+ struct device *dev = txd->tx.chan->device->dev;
+
+ if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ dma_unmap_single(dev, txd->src_addr, txd->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, txd->src_addr, txd->len,
+ DMA_TO_DEVICE);
+ }
+ if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ dma_unmap_single(dev, txd->dst_addr, txd->len,
+ DMA_FROM_DEVICE);
+ else
+ dma_unmap_page(dev, txd->dst_addr, txd->len,
+ DMA_FROM_DEVICE);
+ }
+}
+
static void pl08x_tasklet(unsigned long data)
{
struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
- struct pl08x_phy_chan *phychan = plchan->phychan;
struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ unsigned long flags;
- if (!plchan)
- BUG();
-
- spin_lock(&plchan->lock);
-
- if (plchan->at) {
- dma_async_tx_callback callback =
- plchan->at->tx.callback;
- void *callback_param =
- plchan->at->tx.callback_param;
-
- /*
- * Update last completed
- */
- plchan->lc =
- (plchan->at->tx.cookie);
-
- /*
- * Callback to signal completion
- */
- if (callback)
- callback(callback_param);
+ spin_lock_irqsave(&plchan->lock, flags);
- /*
- * Device callbacks should NOT clear
- * the current transaction on the channel
- * Linus: sometimes they should?
- */
- if (!plchan->at)
- BUG();
+ txd = plchan->at;
+ plchan->at = NULL;
- /*
- * Free the descriptor if it's not for a device
- * using a circular buffer
- */
- if (!plchan->at->cd->circular_buffer) {
- pl08x_free_txd(pl08x, plchan->at);
- plchan->at = NULL;
- }
- /*
- * else descriptor for circular
- * buffers only freed when
- * client has disabled dma
- */
+ if (txd) {
+ /* Update last completed */
+ plchan->lc = txd->tx.cookie;
}
- /*
- * If a new descriptor is queued, set it up
- * plchan->at is NULL here
- */
- if (!list_empty(&plchan->desc_list)) {
+
+ /* If a new descriptor is queued, set it up plchan->at is NULL here */
+ if (!list_empty(&plchan->pend_list)) {
struct pl08x_txd *next;
- next = list_first_entry(&plchan->desc_list,
+ next = list_first_entry(&plchan->pend_list,
struct pl08x_txd,
node);
list_del(&next->node);
- plchan->at = next;
- /* Configure the physical channel for the next txd */
- pl08x_config_phychan_for_txd(plchan);
- pl08x_set_cregs(pl08x, plchan->phychan);
- pl08x_enable_phy_chan(pl08x, plchan->phychan);
+
+ pl08x_start_txd(plchan, next);
+ } else if (plchan->phychan_hold) {
+ /*
+ * This channel is still in use - we have a new txd being
+ * prepared and will soon be queued. Don't give up the
+ * physical channel.
+ */
} else {
struct pl08x_dma_chan *waiting = NULL;
@@ -1699,20 +1588,14 @@ static void pl08x_tasklet(unsigned long data)
* No more jobs, so free up the physical channel
* Free any allocated signal on slave transfers too
*/
- if ((phychan->signal >= 0) && pl08x->pd->put_signal) {
- pl08x->pd->put_signal(plchan);
- phychan->signal = -1;
- }
- pl08x_put_phy_channel(pl08x, phychan);
- plchan->phychan = NULL;
+ release_phy_channel(plchan);
plchan->state = PL08X_CHAN_IDLE;
/*
- * And NOW before anyone else can grab that free:d
- * up physical channel, see if there is some memcpy
- * pending that seriously needs to start because of
- * being stacked up while we were choking the
- * physical channels with data.
+ * And NOW before anyone else can grab that free:d up
+ * physical channel, see if there is some memcpy pending
+ * that seriously needs to start because of being stacked
+ * up while we were choking the physical channels with data.
*/
list_for_each_entry(waiting, &pl08x->memcpy.channels,
chan.device_node) {
@@ -1724,6 +1607,7 @@ static void pl08x_tasklet(unsigned long data)
ret = prep_phy_channel(waiting,
waiting->waiting);
BUG_ON(ret);
+ waiting->phychan_hold--;
waiting->state = PL08X_CHAN_RUNNING;
waiting->waiting = NULL;
pl08x_issue_pending(&waiting->chan);
@@ -1732,7 +1616,25 @@ static void pl08x_tasklet(unsigned long data)
}
}
- spin_unlock(&plchan->lock);
+ spin_unlock_irqrestore(&plchan->lock, flags);
+
+ if (txd) {
+ dma_async_tx_callback callback = txd->tx.callback;
+ void *callback_param = txd->tx.callback_param;
+
+ /* Don't try to unmap buffers on slave channels */
+ if (!plchan->slave)
+ pl08x_unmap_buffers(txd);
+
+ /* Free the descriptor */
+ spin_lock_irqsave(&plchan->lock, flags);
+ pl08x_free_txd(pl08x, txd);
+ spin_unlock_irqrestore(&plchan->lock, flags);
+
+ /* Callback to signal completion */
+ if (callback)
+ callback(callback_param);
+ }
}
static irqreturn_t pl08x_irq(int irq, void *dev)
@@ -1744,9 +1646,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
val = readl(pl08x->base + PL080_ERR_STATUS);
if (val) {
- /*
- * An error interrupt (on one or more channels)
- */
+ /* An error interrupt (on one or more channels) */
dev_err(&pl08x->adev->dev,
"%s error interrupt, register value 0x%08x\n",
__func__, val);
@@ -1770,9 +1670,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
mask |= (1 << i);
}
}
- /*
- * Clear only the terminal interrupts on channels we processed
- */
+ /* Clear only the terminal interrupts on channels we processed */
writel(mask, pl08x->base + PL080_TC_CLEAR);
return mask ? IRQ_HANDLED : IRQ_NONE;
@@ -1791,6 +1689,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
int i;
INIT_LIST_HEAD(&dmadev->channels);
+
/*
* Register as many many memcpy as we have physical channels,
* we won't always be able to use all but the code will have
@@ -1819,16 +1718,23 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
return -ENOMEM;
}
}
+ if (chan->cd->circular_buffer) {
+ dev_err(&pl08x->adev->dev,
+ "channel %s: circular buffers not supported\n",
+ chan->name);
+ kfree(chan);
+ continue;
+ }
dev_info(&pl08x->adev->dev,
"initialize virtual channel \"%s\"\n",
chan->name);
chan->chan.device = dmadev;
- atomic_set(&chan->last_issued, 0);
- chan->lc = atomic_read(&chan->last_issued);
+ chan->chan.cookie = 0;
+ chan->lc = 0;
spin_lock_init(&chan->lock);
- INIT_LIST_HEAD(&chan->desc_list);
+ INIT_LIST_HEAD(&chan->pend_list);
tasklet_init(&chan->tasklet, pl08x_tasklet,
(unsigned long) chan);
@@ -1898,7 +1804,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, "CHANNEL:\tSTATE:\n");
seq_printf(s, "--------\t------\n");
list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
- seq_printf(s, "%s\t\t\%s\n", chan->name,
+ seq_printf(s, "%s\t\t%s\n", chan->name,
pl08x_state_str(chan->state));
}
@@ -1906,7 +1812,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, "CHANNEL:\tSTATE:\n");
seq_printf(s, "--------\t------\n");
list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
- seq_printf(s, "%s\t\t\%s\n", chan->name,
+ seq_printf(s, "%s\t\t%s\n", chan->name,
pl08x_state_str(chan->state));
}
@@ -1942,7 +1848,7 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
{
struct pl08x_driver_data *pl08x;
- struct vendor_data *vd = id->data;
+ const struct vendor_data *vd = id->data;
int ret = 0;
int i;
@@ -1990,6 +1896,14 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
pl08x->adev = adev;
pl08x->vd = vd;
+ /* By default, AHB1 only. If dualmaster, from platform */
+ pl08x->lli_buses = PL08X_AHB1;
+ pl08x->mem_buses = PL08X_AHB1;
+ if (pl08x->vd->dualmaster) {
+ pl08x->lli_buses = pl08x->pd->lli_buses;
+ pl08x->mem_buses = pl08x->pd->mem_buses;
+ }
+
/* A DMA memory pool for LLIs, align on 1-byte boundary */
pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
@@ -2009,14 +1923,12 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
/* Turn on the PL08x */
pl08x_ensure_on(pl08x);
- /*
- * Attach the interrupt handler
- */
+ /* Attach the interrupt handler */
writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
- vd->name, pl08x);
+ DRIVER_NAME, pl08x);
if (ret) {
dev_err(&adev->dev, "%s failed to request interrupt %d\n",
__func__, adev->irq[0]);
@@ -2087,8 +1999,9 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
amba_set_drvdata(adev, pl08x);
init_pl08x_debugfs(pl08x);
- dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n",
- vd->name, adev->res.start);
+ dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
+ amba_part(adev), amba_rev(adev),
+ (unsigned long long)adev->res.start, adev->irq[0]);
return 0;
out_no_slave_reg:
@@ -2115,13 +2028,11 @@ out_no_pl08x:
/* PL080 has 8 channels and the PL080 have just 2 */
static struct vendor_data vendor_pl080 = {
- .name = "PL080",
.channels = 8,
.dualmaster = true,
};
static struct vendor_data vendor_pl081 = {
- .name = "PL081",
.channels = 2,
.dualmaster = false,
};
@@ -2160,7 +2071,7 @@ static int __init pl08x_init(void)
retval = amba_driver_register(&pl08x_amba_driver);
if (retval)
printk(KERN_WARNING DRIVER_NAME
- "failed to register as an amba device (%d)\n",
+ "failed to register as an AMBA device (%d)\n",
retval);
return retval;
}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index ea0ee81cff53..3d7d705f026f 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -253,7 +253,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* move myself to free_list */
list_move(&desc->desc_node, &atchan->free_list);
- /* unmap dma addresses */
+ /* unmap dma addresses (not on slave channels) */
if (!atchan->chan_common.private) {
struct device *parent = chan2parent(&atchan->chan_common);
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
@@ -583,7 +583,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
desc->lli.ctrlb = ctrlb;
desc->txd.cookie = 0;
- async_tx_ack(&desc->txd);
if (!first) {
first = desc;
@@ -604,7 +603,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
/* set end-of-link to the last link descriptor of list*/
set_desc_eol(desc);
- desc->txd.flags = flags; /* client is in control of this ack */
+ first->txd.flags = flags; /* client is in control of this ack */
return &first->txd;
@@ -670,7 +669,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!desc)
goto err_desc_get;
- mem = sg_phys(sg);
+ mem = sg_dma_address(sg);
len = sg_dma_len(sg);
mem_width = 2;
if (unlikely(mem & 3 || len & 3))
@@ -712,7 +711,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!desc)
goto err_desc_get;
- mem = sg_phys(sg);
+ mem = sg_dma_address(sg);
len = sg_dma_len(sg);
mem_width = 2;
if (unlikely(mem & 3 || len & 3))
@@ -749,8 +748,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
first->txd.cookie = -EBUSY;
first->len = total_len;
- /* last link descriptor of list is responsible of flags */
- prev->txd.flags = flags; /* client is in control of this ack */
+ /* first link descriptor of list is responsible of flags */
+ first->txd.flags = flags; /* client is in control of this ack */
return &first->txd;
@@ -854,11 +853,11 @@ static void atc_issue_pending(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "issue_pending\n");
+ spin_lock_bh(&atchan->lock);
if (!atc_chan_is_enabled(atchan)) {
- spin_lock_bh(&atchan->lock);
atc_advance_work(atchan);
- spin_unlock_bh(&atchan->lock);
}
+ spin_unlock_bh(&atchan->lock);
}
/**
@@ -1210,7 +1209,7 @@ static int __init at_dma_init(void)
{
return platform_driver_probe(&at_dma_driver, at_dma_probe);
}
-module_init(at_dma_init);
+subsys_initcall(at_dma_init);
static void __exit at_dma_exit(void)
{
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index e5e172d21692..4de947a450fc 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1,7 +1,7 @@
/*
* Freescale MPC85xx, MPC83xx DMA Engine support
*
- * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
*
* Author:
* Zhang Wei <wei.zhang@freescale.com>, Jul 2007
@@ -1324,6 +1324,8 @@ static int __devinit fsldma_of_probe(struct platform_device *op,
fdev->common.device_control = fsl_dma_device_control;
fdev->common.dev = &op->dev;
+ dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
+
dev_set_drvdata(&op->dev, fdev);
/*
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index e53d438142bb..e18eaabe92b9 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -49,6 +49,7 @@ struct imxdma_channel {
struct imxdma_engine {
struct device *dev;
+ struct device_dma_parameters dma_parms;
struct dma_device dma_device;
struct imxdma_channel channel[MAX_DMA_CHANNELS];
};
@@ -242,6 +243,21 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
else
dmamode = DMA_MODE_WRITE;
+ switch (imxdmac->word_size) {
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ if (sgl->length & 3 || sgl->dma_address & 3)
+ return NULL;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ if (sgl->length & 1 || sgl->dma_address & 1)
+ return NULL;
+ break;
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ break;
+ default:
+ return NULL;
+ }
+
ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
dma_length, imxdmac->per_address, dmamode);
if (ret)
@@ -329,6 +345,9 @@ static int __init imxdma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&imxdma->dma_device.channels);
+ dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
+
/* Initialize channel parameters */
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
struct imxdma_channel *imxdmac = &imxdma->channel[i];
@@ -346,11 +365,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
imxdmac->imxdma = imxdma;
spin_lock_init(&imxdmac->lock);
- dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
- dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
-
imxdmac->chan.device = &imxdma->dma_device;
- imxdmac->chan.chan_id = i;
imxdmac->channel = i;
/* Add the channel to the DMAC list */
@@ -370,6 +385,9 @@ static int __init imxdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imxdma);
+ imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
+ dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
+
ret = dma_async_device_register(&imxdma->dma_device);
if (ret) {
dev_err(&pdev->dev, "unable to register\n");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d5a5d4d9c19b..b6d1455fa936 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -230,7 +230,7 @@ struct sdma_engine;
* struct sdma_channel - housekeeping for a SDMA channel
*
* @sdma pointer to the SDMA engine for this channel
- * @channel the channel number, matches dmaengine chan_id
+ * @channel the channel number, matches dmaengine chan_id + 1
* @direction transfer type. Needed for setting SDMA script
* @peripheral_type Peripheral type. Needed for setting SDMA script
* @event_id0 aka dma request line
@@ -301,6 +301,7 @@ struct sdma_firmware_header {
struct sdma_engine {
struct device *dev;
+ struct device_dma_parameters dma_parms;
struct sdma_channel channel[MAX_DMA_CHANNELS];
struct sdma_channel_control *channel_control;
void __iomem *regs;
@@ -449,7 +450,7 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
if (bd->mode.status & BD_RROR)
sdmac->status = DMA_ERROR;
else
- sdmac->status = DMA_SUCCESS;
+ sdmac->status = DMA_IN_PROGRESS;
bd->mode.status |= BD_DONE;
sdmac->buf_tail++;
@@ -770,15 +771,15 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
__raw_writel(1 << channel, sdma->regs + SDMA_H_START);
}
-static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma)
+static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
{
- dma_cookie_t cookie = sdma->chan.cookie;
+ dma_cookie_t cookie = sdmac->chan.cookie;
if (++cookie < 0)
cookie = 1;
- sdma->chan.cookie = cookie;
- sdma->desc.cookie = cookie;
+ sdmac->chan.cookie = cookie;
+ sdmac->desc.cookie = cookie;
return cookie;
}
@@ -798,7 +799,7 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = sdma_assign_cookie(sdmac);
- sdma_enable_channel(sdma, tx->chan->chan_id);
+ sdma_enable_channel(sdma, sdmac->channel);
spin_unlock_irq(&sdmac->lock);
@@ -811,10 +812,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
struct imx_dma_data *data = chan->private;
int prio, ret;
- /* No need to execute this for internal channel 0 */
- if (chan->chan_id == 0)
- return 0;
-
if (!data)
return -EINVAL;
@@ -879,7 +876,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
int ret, i, count;
- int channel = chan->chan_id;
+ int channel = sdmac->channel;
struct scatterlist *sg;
if (sdmac->status == DMA_IN_PROGRESS)
@@ -924,22 +921,33 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
ret = -EINVAL;
goto err_out;
}
- if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
+
+ switch (sdmac->word_size) {
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
bd->mode.command = 0;
- else
- bd->mode.command = sdmac->word_size;
+ if (count & 3 || sg->dma_address & 3)
+ return NULL;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ bd->mode.command = 2;
+ if (count & 1 || sg->dma_address & 1)
+ return NULL;
+ break;
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ bd->mode.command = 1;
+ break;
+ default:
+ return NULL;
+ }
param = BD_DONE | BD_EXTD | BD_CONT;
- if (sdmac->flags & IMX_DMA_SG_LOOP) {
+ if (i + 1 == sg_len) {
param |= BD_INTR;
- if (i + 1 == sg_len)
- param |= BD_WRAP;
+ param |= BD_LAST;
+ param &= ~BD_CONT;
}
- if (i + 1 == sg_len)
- param |= BD_INTR;
-
dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
i, count, sg->dma_address,
param & BD_WRAP ? "wrap" : "",
@@ -953,6 +961,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
return &sdmac->desc;
err_out:
+ sdmac->status = DMA_ERROR;
return NULL;
}
@@ -963,7 +972,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
int num_periods = buf_len / period_len;
- int channel = chan->chan_id;
+ int channel = sdmac->channel;
int ret, i = 0, buf = 0;
dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
@@ -1066,14 +1075,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
dma_cookie_t last_used;
- enum dma_status ret;
last_used = chan->cookie;
- ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used);
dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
- return ret;
+ return sdmac->status;
}
static void sdma_issue_pending(struct dma_chan *chan)
@@ -1135,7 +1142,7 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma,
/* download the RAM image for SDMA */
sdma_load_script(sdma, ram_code,
header->ram_code_size,
- sdma->script_addrs->ram_code_start_addr);
+ addr->ram_code_start_addr);
clk_disable(sdma->clk);
sdma_add_scripts(sdma, addr);
@@ -1237,7 +1244,6 @@ static int __init sdma_probe(struct platform_device *pdev)
struct resource *iores;
struct sdma_platform_data *pdata = pdev->dev.platform_data;
int i;
- dma_cap_mask_t mask;
struct sdma_engine *sdma;
sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
@@ -1280,6 +1286,9 @@ static int __init sdma_probe(struct platform_device *pdev)
sdma->version = pdata->sdma_version;
+ dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+
INIT_LIST_HEAD(&sdma->dma_device.channels);
/* Initialize channel parameters */
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
@@ -1288,15 +1297,17 @@ static int __init sdma_probe(struct platform_device *pdev)
sdmac->sdma = sdma;
spin_lock_init(&sdmac->lock);
- dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
- dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
-
sdmac->chan.device = &sdma->dma_device;
- sdmac->chan.chan_id = i;
sdmac->channel = i;
- /* Add the channel to the DMAC list */
- list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels);
+ /*
+ * Add the channel to the DMAC list. Do not add channel 0 though
+ * because we need it internally in the SDMA driver. This also means
+ * that channel 0 in dmaengine counting matches sdma channel 1.
+ */
+ if (i)
+ list_add_tail(&sdmac->chan.device_node,
+ &sdma->dma_device.channels);
}
ret = sdma_init(sdma);
@@ -1317,6 +1328,8 @@ static int __init sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_control = sdma_control;
sdma->dma_device.device_issue_pending = sdma_issue_pending;
+ sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
+ dma_set_max_seg_size(sdma->dma_device.dev, 65535);
ret = dma_async_device_register(&sdma->dma_device);
if (ret) {
@@ -1324,13 +1337,6 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_init;
}
- /* request channel 0. This is an internal control channel
- * to the SDMA engine and not available to clients.
- */
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- dma_request_channel(mask, NULL, NULL);
-
dev_info(sdma->dev, "initialized\n");
return 0;
@@ -1348,7 +1354,7 @@ err_clk:
err_request_region:
err_irq:
kfree(sdma);
- return 0;
+ return ret;
}
static int __exit sdma_remove(struct platform_device *pdev)
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 3109bd94bc4f..798f46a4590d 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -664,11 +664,20 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
/*calculate CTL_LO*/
ctl_lo.ctl_lo = 0;
ctl_lo.ctlx.int_en = 1;
- ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width;
- ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width;
ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
+ /*
+ * Here we need some translation from "enum dma_slave_buswidth"
+ * to the format for our dma controller
+ * standard intel_mid_dmac's format
+ * 1 Byte 0b000
+ * 2 Bytes 0b001
+ * 4 Bytes 0b010
+ */
+ ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
+ ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
+
if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
ctl_lo.ctlx.tt_fc = 0;
ctl_lo.ctlx.sinc = 0;
@@ -746,8 +755,18 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
BUG_ON(!mids);
if (!midc->dma->pimr_mask) {
- pr_debug("MDMA: SG list is not supported by this controller\n");
- return NULL;
+ /* We can still handle sg list with only one item */
+ if (sg_len == 1) {
+ txd = intel_mid_dma_prep_memcpy(chan,
+ mids->dma_slave.dst_addr,
+ mids->dma_slave.src_addr,
+ sgl->length,
+ flags);
+ return txd;
+ } else {
+ pr_warn("MDMA: SG list is not supported by this controller\n");
+ return NULL;
+ }
}
pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
@@ -758,6 +777,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
pr_err("MDMA: Prep memcpy failed\n");
return NULL;
}
+
desc = to_intel_mid_dma_desc(txd);
desc->dirn = direction;
ctl_lo.ctl_lo = desc->ctl_lo;
@@ -1021,11 +1041,6 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
/*DMA Interrupt*/
pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
- if (!mid) {
- pr_err("ERR_MDMA:null pointer mid\n");
- return -EINVAL;
- }
-
pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
tfr_status &= mid->intr_mask;
if (tfr_status) {
@@ -1060,8 +1075,8 @@ static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data)
* mid_setup_dma - Setup the DMA controller
* @pdev: Controller PCI device structure
*
- * Initilize the DMA controller, channels, registers with DMA engine,
- * ISR. Initilize DMA controller channels.
+ * Initialize the DMA controller, channels, registers with DMA engine,
+ * ISR. Initialize DMA controller channels.
*/
static int mid_setup_dma(struct pci_dev *pdev)
{
@@ -1217,7 +1232,7 @@ static void middma_shutdown(struct pci_dev *pdev)
* @pdev: Controller PCI device structure
* @id: pci device id structure
*
- * Initilize the PCI device, map BARs, query driver data.
+ * Initialize the PCI device, map BARs, query driver data.
* Call setup_dma to complete contoller and chan initilzation
*/
static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 161c452923b8..c6b01f535b29 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1261,7 +1261,7 @@ out:
return err;
}
-#ifdef CONFIG_MD_RAID6_PQ
+#ifdef CONFIG_RAID6_PQ
static int __devinit
iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
{
@@ -1584,7 +1584,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
- #ifdef CONFIG_MD_RAID6_PQ
+ #ifdef CONFIG_RAID6_PQ
ret = iop_adma_pq_zero_sum_self_test(adev);
dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
#else
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index cb26ee9773d6..c1a125e7d1df 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1145,29 +1145,6 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
- /*
- * Problem (observed with channel DMAIC_7): after enabling the channel
- * and initialising buffers, there comes an interrupt with current still
- * pointing at buffer 0, whereas it should use buffer 0 first and only
- * generate an interrupt when it is done, then current should already
- * point to buffer 1. This spurious interrupt also comes on channel
- * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the
- * first interrupt, there comes the second with current correctly
- * pointing to buffer 1 this time. But sometimes this second interrupt
- * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling
- * the channel seems to prevent the channel from hanging, but it doesn't
- * prevent the spurious interrupt. This might also be unsafe. Think
- * about the IDMAC controller trying to switch to a buffer, when we
- * clear the ready bit, and re-enable it a moment later.
- */
- reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY);
- idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY);
- idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY);
-
- reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY);
- idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY);
- idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY);
-
spin_unlock_irqrestore(&ipu->lock, flags);
return 0;
@@ -1246,33 +1223,6 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
/* Other interrupts do not interfere with this channel */
spin_lock(&ichan->lock);
- if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
- ((curbuf >> chan_id) & 1) == ichan->active_buffer &&
- !list_is_last(ichan->queue.next, &ichan->queue))) {
- int i = 100;
-
- /* This doesn't help. See comment in ipu_disable_channel() */
- while (--i) {
- curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
- if (((curbuf >> chan_id) & 1) != ichan->active_buffer)
- break;
- cpu_relax();
- }
-
- if (!i) {
- spin_unlock(&ichan->lock);
- dev_dbg(dev,
- "IRQ on active buffer on channel %x, active "
- "%d, ready %x, %x, current %x!\n", chan_id,
- ichan->active_buffer, ready0, ready1, curbuf);
- return IRQ_NONE;
- } else
- dev_dbg(dev,
- "Buffer deactivated on channel %x, active "
- "%d, ready %x, %x, current %x, rest %d!\n", chan_id,
- ichan->active_buffer, ready0, ready1, curbuf, i);
- }
-
if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
(!ichan->active_buffer && (ready0 >> chan_id) & 1)
)) {
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 4e9cbf300594..59c270192ccc 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -1,6 +1,7 @@
/*
* Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
* Copyright (C) Semihalf 2009
+ * Copyright (C) Ilya Yanok, Emcraft Systems 2010
*
* Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
* (defines, structures and comments) was taken from MPC5121 DMA driver
@@ -70,6 +71,8 @@
#define MPC_DMA_DMAES_SBE (1 << 1)
#define MPC_DMA_DMAES_DBE (1 << 0)
+#define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
+
#define MPC_DMA_TSIZE_1 0x00
#define MPC_DMA_TSIZE_2 0x01
#define MPC_DMA_TSIZE_4 0x02
@@ -104,7 +107,10 @@ struct __attribute__ ((__packed__)) mpc_dma_regs {
/* 0x30 */
u32 dmahrsh; /* DMA hw request status high(ch63~32) */
u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
- u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
+ union {
+ u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
+ u32 dmagpor; /* (General purpose register on MPC8308) */
+ };
u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
/* 0x40 ~ 0xff */
u32 reserve0[48]; /* Reserved */
@@ -195,7 +201,9 @@ struct mpc_dma {
struct mpc_dma_regs __iomem *regs;
struct mpc_dma_tcd __iomem *tcd;
int irq;
+ int irq2;
uint error_status;
+ int is_mpc8308;
/* Lock for error_status field in this structure */
spinlock_t error_status_lock;
@@ -252,11 +260,13 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan)
prev = mdesc;
}
- prev->tcd->start = 0;
prev->tcd->int_maj = 1;
/* Send first descriptor in chain into hardware */
memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
+
+ if (first != prev)
+ mdma->tcd[cid].e_sg = 1;
out_8(&mdma->regs->dmassrt, cid);
}
@@ -274,6 +284,9 @@ static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
spin_lock(&mchan->lock);
+ out_8(&mdma->regs->dmacint, ch + off);
+ out_8(&mdma->regs->dmacerr, ch + off);
+
/* Check error status */
if (es & (1 << ch))
list_for_each_entry(mdesc, &mchan->active, node)
@@ -302,36 +315,68 @@ static irqreturn_t mpc_dma_irq(int irq, void *data)
spin_unlock(&mdma->error_status_lock);
/* Handle interrupt on each channel */
- mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
+ if (mdma->dma.chancnt > 32) {
+ mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
in_be32(&mdma->regs->dmaerrh), 32);
+ }
mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
in_be32(&mdma->regs->dmaerrl), 0);
- /* Ack interrupt on all channels */
- out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
- out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
- out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
- out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
-
/* Schedule tasklet */
tasklet_schedule(&mdma->tasklet);
return IRQ_HANDLED;
}
-/* DMA Tasklet */
-static void mpc_dma_tasklet(unsigned long data)
+/* proccess completed descriptors */
+static void mpc_dma_process_completed(struct mpc_dma *mdma)
{
- struct mpc_dma *mdma = (void *)data;
dma_cookie_t last_cookie = 0;
struct mpc_dma_chan *mchan;
struct mpc_dma_desc *mdesc;
struct dma_async_tx_descriptor *desc;
unsigned long flags;
LIST_HEAD(list);
- uint es;
int i;
+ for (i = 0; i < mdma->dma.chancnt; i++) {
+ mchan = &mdma->channels[i];
+
+ /* Get all completed descriptors */
+ spin_lock_irqsave(&mchan->lock, flags);
+ if (!list_empty(&mchan->completed))
+ list_splice_tail_init(&mchan->completed, &list);
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ if (list_empty(&list))
+ continue;
+
+ /* Execute callbacks and run dependencies */
+ list_for_each_entry(mdesc, &list, node) {
+ desc = &mdesc->desc;
+
+ if (desc->callback)
+ desc->callback(desc->callback_param);
+
+ last_cookie = desc->cookie;
+ dma_run_dependencies(desc);
+ }
+
+ /* Free descriptors */
+ spin_lock_irqsave(&mchan->lock, flags);
+ list_splice_tail_init(&list, &mchan->free);
+ mchan->completed_cookie = last_cookie;
+ spin_unlock_irqrestore(&mchan->lock, flags);
+ }
+}
+
+/* DMA Tasklet */
+static void mpc_dma_tasklet(unsigned long data)
+{
+ struct mpc_dma *mdma = (void *)data;
+ unsigned long flags;
+ uint es;
+
spin_lock_irqsave(&mdma->error_status_lock, flags);
es = mdma->error_status;
mdma->error_status = 0;
@@ -370,35 +415,7 @@ static void mpc_dma_tasklet(unsigned long data)
dev_err(mdma->dma.dev, "- Destination Bus Error\n");
}
- for (i = 0; i < mdma->dma.chancnt; i++) {
- mchan = &mdma->channels[i];
-
- /* Get all completed descriptors */
- spin_lock_irqsave(&mchan->lock, flags);
- if (!list_empty(&mchan->completed))
- list_splice_tail_init(&mchan->completed, &list);
- spin_unlock_irqrestore(&mchan->lock, flags);
-
- if (list_empty(&list))
- continue;
-
- /* Execute callbacks and run dependencies */
- list_for_each_entry(mdesc, &list, node) {
- desc = &mdesc->desc;
-
- if (desc->callback)
- desc->callback(desc->callback_param);
-
- last_cookie = desc->cookie;
- dma_run_dependencies(desc);
- }
-
- /* Free descriptors */
- spin_lock_irqsave(&mchan->lock, flags);
- list_splice_tail_init(&list, &mchan->free);
- mchan->completed_cookie = last_cookie;
- spin_unlock_irqrestore(&mchan->lock, flags);
- }
+ mpc_dma_process_completed(mdma);
}
/* Submit descriptor to hardware */
@@ -563,6 +580,7 @@ static struct dma_async_tx_descriptor *
mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
size_t len, unsigned long flags)
{
+ struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
struct mpc_dma_desc *mdesc = NULL;
struct mpc_dma_tcd *tcd;
@@ -577,8 +595,11 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
}
spin_unlock_irqrestore(&mchan->lock, iflags);
- if (!mdesc)
+ if (!mdesc) {
+ /* try to free completed descriptors */
+ mpc_dma_process_completed(mdma);
return NULL;
+ }
mdesc->error = 0;
tcd = mdesc->tcd;
@@ -591,7 +612,8 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
tcd->dsize = MPC_DMA_TSIZE_32;
tcd->soff = 32;
tcd->doff = 32;
- } else if (IS_ALIGNED(src | dst | len, 16)) {
+ } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
+ /* MPC8308 doesn't support 16 byte transfers */
tcd->ssize = MPC_DMA_TSIZE_16;
tcd->dsize = MPC_DMA_TSIZE_16;
tcd->soff = 16;
@@ -651,6 +673,15 @@ static int __devinit mpc_dma_probe(struct platform_device *op,
return -EINVAL;
}
+ if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
+ mdma->is_mpc8308 = 1;
+ mdma->irq2 = irq_of_parse_and_map(dn, 1);
+ if (mdma->irq2 == NO_IRQ) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ return -EINVAL;
+ }
+ }
+
retval = of_address_to_resource(dn, 0, &res);
if (retval) {
dev_err(dev, "Error parsing memory region!\n");
@@ -681,11 +712,23 @@ static int __devinit mpc_dma_probe(struct platform_device *op,
return -EINVAL;
}
+ if (mdma->is_mpc8308) {
+ retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
+ DRV_NAME, mdma);
+ if (retval) {
+ dev_err(dev, "Error requesting IRQ2!\n");
+ return -EINVAL;
+ }
+ }
+
spin_lock_init(&mdma->error_status_lock);
dma = &mdma->dma;
dma->dev = dev;
- dma->chancnt = MPC_DMA_CHANNELS;
+ if (!mdma->is_mpc8308)
+ dma->chancnt = MPC_DMA_CHANNELS;
+ else
+ dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */
dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
dma->device_free_chan_resources = mpc_dma_free_chan_resources;
dma->device_issue_pending = mpc_dma_issue_pending;
@@ -721,26 +764,40 @@ static int __devinit mpc_dma_probe(struct platform_device *op,
* - Round-robin group arbitration,
* - Round-robin channel arbitration.
*/
- out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
- MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
-
- /* Disable hardware DMA requests */
- out_be32(&mdma->regs->dmaerqh, 0);
- out_be32(&mdma->regs->dmaerql, 0);
-
- /* Disable error interrupts */
- out_be32(&mdma->regs->dmaeeih, 0);
- out_be32(&mdma->regs->dmaeeil, 0);
-
- /* Clear interrupts status */
- out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
- out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
- out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
- out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
-
- /* Route interrupts to IPIC */
- out_be32(&mdma->regs->dmaihsa, 0);
- out_be32(&mdma->regs->dmailsa, 0);
+ if (!mdma->is_mpc8308) {
+ out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
+ MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
+
+ /* Disable hardware DMA requests */
+ out_be32(&mdma->regs->dmaerqh, 0);
+ out_be32(&mdma->regs->dmaerql, 0);
+
+ /* Disable error interrupts */
+ out_be32(&mdma->regs->dmaeeih, 0);
+ out_be32(&mdma->regs->dmaeeil, 0);
+
+ /* Clear interrupts status */
+ out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
+
+ /* Route interrupts to IPIC */
+ out_be32(&mdma->regs->dmaihsa, 0);
+ out_be32(&mdma->regs->dmailsa, 0);
+ } else {
+ /* MPC8308 has 16 channels and lacks some registers */
+ out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
+
+ /* enable snooping */
+ out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
+ /* Disable error interrupts */
+ out_be32(&mdma->regs->dmaeeil, 0);
+
+ /* Clear interrupts status */
+ out_be32(&mdma->regs->dmaintl, 0xFFFF);
+ out_be32(&mdma->regs->dmaerrl, 0xFFFF);
+ }
/* Register DMA engine */
dev_set_drvdata(dev, mdma);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index c064c89420d0..1c38418ae61f 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -1,6 +1,7 @@
/*
* Topcliff PCH DMA controller driver
* Copyright (c) 2010 Intel Corporation
+ * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -921,12 +922,19 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
}
/* PCI Device ID of DMA device */
-#define PCI_DEVICE_ID_PCH_DMA_8CH 0x8810
-#define PCI_DEVICE_ID_PCH_DMA_4CH 0x8815
+#define PCI_VENDOR_ID_ROHM 0x10DB
+#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
+#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
+#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
+#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
+#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
static const struct pci_device_id pch_dma_id_table[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
{ 0, },
};
@@ -954,6 +962,7 @@ static void __exit pch_dma_exit(void)
module_init(pch_dma_init);
module_exit(pch_dma_exit);
-MODULE_DESCRIPTION("Topcliff PCH DMA controller driver");
+MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
+ "DMA controller driver");
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index fab68a553205..6e1d46a65d0e 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1,5 +1,6 @@
/*
- * Copyright (C) ST-Ericsson SA 2007-2010
+ * Copyright (C) Ericsson AB 2007-2008
+ * Copyright (C) ST-Ericsson SA 2008-2010
* Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
* Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
@@ -554,8 +555,66 @@ static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
return d;
}
-/* Support functions for logical channels */
+static int d40_psize_2_burst_size(bool is_log, int psize)
+{
+ if (is_log) {
+ if (psize == STEDMA40_PSIZE_LOG_1)
+ return 1;
+ } else {
+ if (psize == STEDMA40_PSIZE_PHY_1)
+ return 1;
+ }
+
+ return 2 << psize;
+}
+
+/*
+ * The dma only supports transmitting packages up to
+ * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
+ * dma elements required to send the entire sg list
+ */
+static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
+{
+ int dmalen;
+ u32 max_w = max(data_width1, data_width2);
+ u32 min_w = min(data_width1, data_width2);
+ u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
+
+ if (seg_max > STEDMA40_MAX_SEG_SIZE)
+ seg_max -= (1 << max_w);
+
+ if (!IS_ALIGNED(size, 1 << max_w))
+ return -EINVAL;
+
+ if (size <= seg_max)
+ dmalen = 1;
+ else {
+ dmalen = size / seg_max;
+ if (dmalen * seg_max < size)
+ dmalen++;
+ }
+ return dmalen;
+}
+
+static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
+ u32 data_width1, u32 data_width2)
+{
+ struct scatterlist *sg;
+ int i;
+ int len = 0;
+ int ret;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ ret = d40_size_2_dmalen(sg_dma_len(sg),
+ data_width1, data_width2);
+ if (ret < 0)
+ return ret;
+ len += ret;
+ }
+ return len;
+}
+/* Support functions for logical channels */
static int d40_channel_execute_command(struct d40_chan *d40c,
enum d40_command command)
@@ -1241,6 +1300,21 @@ static int d40_validate_conf(struct d40_chan *d40c,
res = -EINVAL;
}
+ if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
+ (1 << conf->src_info.data_width) !=
+ d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
+ (1 << conf->dst_info.data_width)) {
+ /*
+ * The DMAC hardware only supports
+ * src (burst x width) == dst (burst x width)
+ */
+
+ dev_err(&d40c->chan.dev->device,
+ "[%s] src (burst x width) != dst (burst x width)\n",
+ __func__);
+ res = -EINVAL;
+ }
+
return res;
}
@@ -1638,13 +1712,21 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
if (d40d == NULL)
goto err;
- d40d->lli_len = sgl_len;
+ d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width);
+ if (d40d->lli_len < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Unaligned size\n", __func__);
+ goto err;
+ }
+
d40d->lli_current = 0;
d40d->txd.flags = dma_flags;
if (d40c->log_num != D40_PHY_CHAN) {
- if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
+ if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
dev_err(&d40c->chan.dev->device,
"[%s] Out of memory\n", __func__);
goto err;
@@ -1654,15 +1736,17 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
sgl_len,
d40d->lli_log.src,
d40c->log_def.lcsp1,
- d40c->dma_cfg.src_info.data_width);
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width);
(void) d40_log_sg_to_lli(sgl_dst,
sgl_len,
d40d->lli_log.dst,
d40c->log_def.lcsp3,
- d40c->dma_cfg.dst_info.data_width);
+ d40c->dma_cfg.dst_info.data_width,
+ d40c->dma_cfg.src_info.data_width);
} else {
- if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
+ if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
dev_err(&d40c->chan.dev->device,
"[%s] Out of memory\n", __func__);
goto err;
@@ -1675,6 +1759,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
virt_to_phys(d40d->lli_phy.src),
d40c->src_def_cfg,
d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width,
d40c->dma_cfg.src_info.psize);
if (res < 0)
@@ -1687,6 +1772,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
virt_to_phys(d40d->lli_phy.dst),
d40c->dst_def_cfg,
d40c->dma_cfg.dst_info.data_width,
+ d40c->dma_cfg.src_info.data_width,
d40c->dma_cfg.dst_info.psize);
if (res < 0)
@@ -1826,7 +1912,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
struct d40_chan *d40c = container_of(chan, struct d40_chan,
chan);
unsigned long flags;
- int err = 0;
if (d40c->phy_chan == NULL) {
dev_err(&d40c->chan.dev->device,
@@ -1844,6 +1929,15 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
}
d40d->txd.flags = dma_flags;
+ d40d->lli_len = d40_size_2_dmalen(size,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width);
+ if (d40d->lli_len < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Unaligned size\n", __func__);
+ goto err;
+ }
+
dma_async_tx_descriptor_init(&d40d->txd, chan);
@@ -1851,37 +1945,40 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
if (d40c->log_num != D40_PHY_CHAN) {
- if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
+ if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
dev_err(&d40c->chan.dev->device,
"[%s] Out of memory\n", __func__);
goto err;
}
- d40d->lli_len = 1;
d40d->lli_current = 0;
- d40_log_fill_lli(d40d->lli_log.src,
- src,
- size,
- d40c->log_def.lcsp1,
- d40c->dma_cfg.src_info.data_width,
- true);
+ if (d40_log_buf_to_lli(d40d->lli_log.src,
+ src,
+ size,
+ d40c->log_def.lcsp1,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width,
+ true) == NULL)
+ goto err;
- d40_log_fill_lli(d40d->lli_log.dst,
- dst,
- size,
- d40c->log_def.lcsp3,
- d40c->dma_cfg.dst_info.data_width,
- true);
+ if (d40_log_buf_to_lli(d40d->lli_log.dst,
+ dst,
+ size,
+ d40c->log_def.lcsp3,
+ d40c->dma_cfg.dst_info.data_width,
+ d40c->dma_cfg.src_info.data_width,
+ true) == NULL)
+ goto err;
} else {
- if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
+ if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
dev_err(&d40c->chan.dev->device,
"[%s] Out of memory\n", __func__);
goto err;
}
- err = d40_phy_fill_lli(d40d->lli_phy.src,
+ if (d40_phy_buf_to_lli(d40d->lli_phy.src,
src,
size,
d40c->dma_cfg.src_info.psize,
@@ -1889,11 +1986,11 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
d40c->src_def_cfg,
true,
d40c->dma_cfg.src_info.data_width,
- false);
- if (err)
- goto err_fill_lli;
+ d40c->dma_cfg.dst_info.data_width,
+ false) == NULL)
+ goto err;
- err = d40_phy_fill_lli(d40d->lli_phy.dst,
+ if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
dst,
size,
d40c->dma_cfg.dst_info.psize,
@@ -1901,10 +1998,9 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
d40c->dst_def_cfg,
true,
d40c->dma_cfg.dst_info.data_width,
- false);
-
- if (err)
- goto err_fill_lli;
+ d40c->dma_cfg.src_info.data_width,
+ false) == NULL)
+ goto err;
(void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
d40d->lli_pool.size, DMA_TO_DEVICE);
@@ -1913,9 +2009,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
spin_unlock_irqrestore(&d40c->lock, flags);
return &d40d->txd;
-err_fill_lli:
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed filling in PHY LLI\n", __func__);
err:
if (d40d)
d40_desc_free(d40c, d40d);
@@ -1945,13 +2038,21 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
dma_addr_t dev_addr = 0;
int total_size;
- if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
+ d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width);
+ if (d40d->lli_len < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Unaligned size\n", __func__);
+ return -EINVAL;
+ }
+
+ if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
dev_err(&d40c->chan.dev->device,
"[%s] Out of memory\n", __func__);
return -ENOMEM;
}
- d40d->lli_len = sg_len;
d40d->lli_current = 0;
if (direction == DMA_FROM_DEVICE)
@@ -1993,13 +2094,21 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
dma_addr_t dst_dev_addr;
int res;
- if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
+ d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width);
+ if (d40d->lli_len < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Unaligned size\n", __func__);
+ return -EINVAL;
+ }
+
+ if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
dev_err(&d40c->chan.dev->device,
"[%s] Out of memory\n", __func__);
return -ENOMEM;
}
- d40d->lli_len = sgl_len;
d40d->lli_current = 0;
if (direction == DMA_FROM_DEVICE) {
@@ -2024,6 +2133,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
virt_to_phys(d40d->lli_phy.src),
d40c->src_def_cfg,
d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width,
d40c->dma_cfg.src_info.psize);
if (res < 0)
return res;
@@ -2035,6 +2145,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
virt_to_phys(d40d->lli_phy.dst),
d40c->dst_def_cfg,
d40c->dma_cfg.dst_info.data_width,
+ d40c->dma_cfg.src_info.data_width,
d40c->dma_cfg.dst_info.psize);
if (res < 0)
return res;
@@ -2244,6 +2355,8 @@ static void d40_set_runtime_config(struct dma_chan *chan,
psize = STEDMA40_PSIZE_PHY_8;
else if (config_maxburst >= 4)
psize = STEDMA40_PSIZE_PHY_4;
+ else if (config_maxburst >= 2)
+ psize = STEDMA40_PSIZE_PHY_2;
else
psize = STEDMA40_PSIZE_PHY_1;
}
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 8557cb88b255..0b096a38322d 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) ST-Ericsson SA 2007-2010
- * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
+ * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
* Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
*/
@@ -122,15 +122,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
*dst_cfg = dst;
}
-int d40_phy_fill_lli(struct d40_phy_lli *lli,
- dma_addr_t data,
- u32 data_size,
- int psize,
- dma_addr_t next_lli,
- u32 reg_cfg,
- bool term_int,
- u32 data_width,
- bool is_device)
+static int d40_phy_fill_lli(struct d40_phy_lli *lli,
+ dma_addr_t data,
+ u32 data_size,
+ int psize,
+ dma_addr_t next_lli,
+ u32 reg_cfg,
+ bool term_int,
+ u32 data_width,
+ bool is_device)
{
int num_elems;
@@ -139,13 +139,6 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli,
else
num_elems = 2 << psize;
- /*
- * Size is 16bit. data_width is 8, 16, 32 or 64 bit
- * Block large than 64 KiB must be split.
- */
- if (data_size > (0xffff << data_width))
- return -EINVAL;
-
/* Must be aligned */
if (!IS_ALIGNED(data, 0x1 << data_width))
return -EINVAL;
@@ -187,55 +180,118 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli,
return 0;
}
+static int d40_seg_size(int size, int data_width1, int data_width2)
+{
+ u32 max_w = max(data_width1, data_width2);
+ u32 min_w = min(data_width1, data_width2);
+ u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
+
+ if (seg_max > STEDMA40_MAX_SEG_SIZE)
+ seg_max -= (1 << max_w);
+
+ if (size <= seg_max)
+ return size;
+
+ if (size <= 2 * seg_max)
+ return ALIGN(size / 2, 1 << max_w);
+
+ return seg_max;
+}
+
+struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
+ dma_addr_t addr,
+ u32 size,
+ int psize,
+ dma_addr_t lli_phys,
+ u32 reg_cfg,
+ bool term_int,
+ u32 data_width1,
+ u32 data_width2,
+ bool is_device)
+{
+ int err;
+ dma_addr_t next = lli_phys;
+ int size_rest = size;
+ int size_seg = 0;
+
+ do {
+ size_seg = d40_seg_size(size_rest, data_width1, data_width2);
+ size_rest -= size_seg;
+
+ if (term_int && size_rest == 0)
+ next = 0;
+ else
+ next = ALIGN(next + sizeof(struct d40_phy_lli),
+ D40_LLI_ALIGN);
+
+ err = d40_phy_fill_lli(lli,
+ addr,
+ size_seg,
+ psize,
+ next,
+ reg_cfg,
+ !next,
+ data_width1,
+ is_device);
+
+ if (err)
+ goto err;
+
+ lli++;
+ if (!is_device)
+ addr += size_seg;
+ } while (size_rest);
+
+ return lli;
+
+ err:
+ return NULL;
+}
+
int d40_phy_sg_to_lli(struct scatterlist *sg,
int sg_len,
dma_addr_t target,
- struct d40_phy_lli *lli,
+ struct d40_phy_lli *lli_sg,
dma_addr_t lli_phys,
u32 reg_cfg,
- u32 data_width,
+ u32 data_width1,
+ u32 data_width2,
int psize)
{
int total_size = 0;
int i;
struct scatterlist *current_sg = sg;
- dma_addr_t next_lli_phys;
dma_addr_t dst;
- int err = 0;
+ struct d40_phy_lli *lli = lli_sg;
+ dma_addr_t l_phys = lli_phys;
for_each_sg(sg, current_sg, sg_len, i) {
total_size += sg_dma_len(current_sg);
- /* If this scatter list entry is the last one, no next link */
- if (sg_len - 1 == i)
- next_lli_phys = 0;
- else
- next_lli_phys = ALIGN(lli_phys + (i + 1) *
- sizeof(struct d40_phy_lli),
- D40_LLI_ALIGN);
-
if (target)
dst = target;
else
dst = sg_phys(current_sg);
- err = d40_phy_fill_lli(&lli[i],
- dst,
- sg_dma_len(current_sg),
- psize,
- next_lli_phys,
- reg_cfg,
- !next_lli_phys,
- data_width,
- target == dst);
- if (err)
- goto err;
+ l_phys = ALIGN(lli_phys + (lli - lli_sg) *
+ sizeof(struct d40_phy_lli), D40_LLI_ALIGN);
+
+ lli = d40_phy_buf_to_lli(lli,
+ dst,
+ sg_dma_len(current_sg),
+ psize,
+ l_phys,
+ reg_cfg,
+ sg_len - 1 == i,
+ data_width1,
+ data_width2,
+ target == dst);
+ if (lli == NULL)
+ return -EINVAL;
}
return total_size;
-err:
- return err;
}
@@ -315,17 +371,20 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
writel(lli_dst->lcsp13, &lcla[1].lcsp13);
}
-void d40_log_fill_lli(struct d40_log_lli *lli,
- dma_addr_t data, u32 data_size,
- u32 reg_cfg,
- u32 data_width,
- bool addr_inc)
+static void d40_log_fill_lli(struct d40_log_lli *lli,
+ dma_addr_t data, u32 data_size,
+ u32 reg_cfg,
+ u32 data_width,
+ bool addr_inc)
{
lli->lcsp13 = reg_cfg;
/* The number of elements to transfer */
lli->lcsp02 = ((data_size >> data_width) <<
D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
+
+ BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE);
+
/* 16 LSBs address of the current element */
lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
/* 16 MSBs address of the current element */
@@ -348,55 +407,94 @@ int d40_log_sg_to_dev(struct scatterlist *sg,
int total_size = 0;
struct scatterlist *current_sg = sg;
int i;
+ struct d40_log_lli *lli_src = lli->src;
+ struct d40_log_lli *lli_dst = lli->dst;
for_each_sg(sg, current_sg, sg_len, i) {
total_size += sg_dma_len(current_sg);
if (direction == DMA_TO_DEVICE) {
- d40_log_fill_lli(&lli->src[i],
- sg_phys(current_sg),
- sg_dma_len(current_sg),
- lcsp->lcsp1, src_data_width,
- true);
- d40_log_fill_lli(&lli->dst[i],
- dev_addr,
- sg_dma_len(current_sg),
- lcsp->lcsp3, dst_data_width,
- false);
+ lli_src =
+ d40_log_buf_to_lli(lli_src,
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ lcsp->lcsp1, src_data_width,
+ dst_data_width,
+ true);
+ lli_dst =
+ d40_log_buf_to_lli(lli_dst,
+ dev_addr,
+ sg_dma_len(current_sg),
+ lcsp->lcsp3, dst_data_width,
+ src_data_width,
+ false);
} else {
- d40_log_fill_lli(&lli->dst[i],
- sg_phys(current_sg),
- sg_dma_len(current_sg),
- lcsp->lcsp3, dst_data_width,
- true);
- d40_log_fill_lli(&lli->src[i],
- dev_addr,
- sg_dma_len(current_sg),
- lcsp->lcsp1, src_data_width,
- false);
+ lli_dst =
+ d40_log_buf_to_lli(lli_dst,
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ lcsp->lcsp3, dst_data_width,
+ src_data_width,
+ true);
+ lli_src =
+ d40_log_buf_to_lli(lli_src,
+ dev_addr,
+ sg_dma_len(current_sg),
+ lcsp->lcsp1, src_data_width,
+ dst_data_width,
+ false);
}
}
return total_size;
}
+struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
+ dma_addr_t addr,
+ int size,
+ u32 lcsp13, /* src or dst*/
+ u32 data_width1,
+ u32 data_width2,
+ bool addr_inc)
+{
+ struct d40_log_lli *lli = lli_sg;
+ int size_rest = size;
+ int size_seg = 0;
+
+ do {
+ size_seg = d40_seg_size(size_rest, data_width1, data_width2);
+ size_rest -= size_seg;
+
+ d40_log_fill_lli(lli,
+ addr,
+ size_seg,
+ lcsp13, data_width1,
+ addr_inc);
+ if (addr_inc)
+ addr += size_seg;
+ lli++;
+ } while (size_rest);
+
+ return lli;
+}
+
int d40_log_sg_to_lli(struct scatterlist *sg,
int sg_len,
struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/
- u32 data_width)
+ u32 data_width1, u32 data_width2)
{
int total_size = 0;
struct scatterlist *current_sg = sg;
int i;
+ struct d40_log_lli *lli = lli_sg;
for_each_sg(sg, current_sg, sg_len, i) {
total_size += sg_dma_len(current_sg);
-
- d40_log_fill_lli(&lli_sg[i],
- sg_phys(current_sg),
- sg_dma_len(current_sg),
- lcsp13, data_width,
- true);
+ lli = d40_log_buf_to_lli(lli,
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ lcsp13,
+ data_width1, data_width2, true);
}
return total_size;
}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 9e419b907544..9cc43495bea2 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -292,18 +292,20 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
struct d40_phy_lli *lli,
dma_addr_t lli_phys,
u32 reg_cfg,
- u32 data_width,
+ u32 data_width1,
+ u32 data_width2,
int psize);
-int d40_phy_fill_lli(struct d40_phy_lli *lli,
- dma_addr_t data,
- u32 data_size,
- int psize,
- dma_addr_t next_lli,
- u32 reg_cfg,
- bool term_int,
- u32 data_width,
- bool is_device);
+struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
+ dma_addr_t data,
+ u32 data_size,
+ int psize,
+ dma_addr_t next_lli,
+ u32 reg_cfg,
+ bool term_int,
+ u32 data_width1,
+ u32 data_width2,
+ bool is_device);
void d40_phy_lli_write(void __iomem *virtbase,
u32 phy_chan_num,
@@ -312,12 +314,12 @@ void d40_phy_lli_write(void __iomem *virtbase,
/* Logical channels */
-void d40_log_fill_lli(struct d40_log_lli *lli,
- dma_addr_t data,
- u32 data_size,
- u32 reg_cfg,
- u32 data_width,
- bool addr_inc);
+struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
+ dma_addr_t addr,
+ int size,
+ u32 lcsp13, /* src or dst*/
+ u32 data_width1, u32 data_width2,
+ bool addr_inc);
int d40_log_sg_to_dev(struct scatterlist *sg,
int sg_len,
@@ -332,7 +334,7 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
int sg_len,
struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/
- u32 data_width);
+ u32 data_width1, u32 data_width2);
void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
struct d40_log_lli *lli_dst,
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 4a5ecc58025d..23e03554f0d3 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -826,8 +826,6 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
/* Display and decode various NB registers for debug purposes. */
static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
{
- int ganged;
-
debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
debugf1(" NB two channel DRAM capable: %s\n",
@@ -851,28 +849,19 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
debugf1(" DramHoleValid: %s\n",
(pvt->dhar & DHAR_VALID) ? "yes" : "no");
+ amd64_debug_display_dimm_sizes(0, pvt);
+
/* everything below this point is Fam10h and above */
- if (boot_cpu_data.x86 == 0xf) {
- amd64_debug_display_dimm_sizes(0, pvt);
+ if (boot_cpu_data.x86 == 0xf)
return;
- }
+
+ amd64_debug_display_dimm_sizes(1, pvt);
amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
amd64_dump_dramcfg_low(pvt->dclr1, 1);
-
- /*
- * Determine if ganged and then dump memory sizes for first controller,
- * and if NOT ganged dump info for 2nd controller.
- */
- ganged = dct_ganging_enabled(pvt);
-
- amd64_debug_display_dimm_sizes(0, pvt);
-
- if (!ganged)
- amd64_debug_display_dimm_sizes(1, pvt);
}
/* Read in both of DBAM registers */
@@ -1644,11 +1633,10 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
WARN_ON(ctrl != 0);
}
- debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
- ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
+ dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
+ dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0;
- dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
- dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
+ debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
diff --git a/drivers/edac/amd8131_edac.h b/drivers/edac/amd8131_edac.h
index 60e0d1c72dee..6f8b07131ec4 100644
--- a/drivers/edac/amd8131_edac.h
+++ b/drivers/edac/amd8131_edac.h
@@ -99,7 +99,7 @@ struct amd8131_dev_info {
/*
* AMD8131 chipset has two pairs of PCIX Bridge and related IOAPIC
- * Controler, and ATCA-6101 has two AMD8131 chipsets, so there are
+ * Controller, and ATCA-6101 has two AMD8131 chipsets, so there are
* four PCIX Bridges on ATCA-6101 altogether.
*
* These PCIX Bridges share the same PCI Device ID and are all of
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index c973004c002c..db1df59ae2b6 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -47,7 +47,7 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
offset = address & ~PAGE_MASK;
syndrome = (ar & 0x000000001fe00000ul) >> 21;
- /* TODO: Decoding of the error addresss */
+ /* TODO: Decoding of the error address */
edac_mc_handle_ce(mci, csrow->first_page + pfn, offset,
syndrome, 0, chan, "");
}
@@ -68,7 +68,7 @@ static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
pfn = address >> PAGE_SHIFT;
offset = address & ~PAGE_MASK;
- /* TODO: Decoding of the error addresss */
+ /* TODO: Decoding of the error address */
edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, "");
}
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index ff1eb7bb26c6..3d965347a673 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -259,7 +259,7 @@ enum scrub_type {
* for single channel are 64 bits, for dual channel 128
* bits.
*
- * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memmory.
+ * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory.
* Motherboards commonly drive two chip-select pins to
* a memory stick. A single-ranked stick, will occupy
* only one of those rows. The other will be unused.
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 362861c15779..81154ab296b6 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1,6 +1,6 @@
/* Intel i7 core/Nehalem Memory Controller kernel module
*
- * This driver supports yhe memory controllers found on the Intel
+ * This driver supports the memory controllers found on the Intel
* processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
* Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
* and Westmere-EP.
@@ -1271,7 +1271,7 @@ static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
int i;
/*
- * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses
+ * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses
* aren't announced by acpi. So, we need to use a legacy scan probing
* to detect them
*/
@@ -1864,7 +1864,7 @@ static int i7core_mce_check_error(void *priv, struct mce *mce)
if (mce->mcgstatus & 1)
i7core_check_error(mci);
- /* Advice mcelog that the error were handled */
+ /* Advise mcelog that the errors were handled */
return 1;
}
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 070cea41b661..b9f0c20df1aa 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -873,7 +873,7 @@ ppc4xx_edac_get_mtype(u32 mcopt1)
}
/**
- * ppc4xx_edac_init_csrows - intialize driver instance rows
+ * ppc4xx_edac_init_csrows - initialize driver instance rows
* @mci: A pointer to the EDAC memory controller instance
* associated with the ibm,sdram-4xx-ddr2 controller for which
* the csrows (i.e. banks/ranks) are being initialized.
@@ -881,7 +881,7 @@ ppc4xx_edac_get_mtype(u32 mcopt1)
* currently set for the controller, from which bank width
* and memory typ information is derived.
*
- * This routine intializes the virtual "chip select rows" associated
+ * This routine initializes the virtual "chip select rows" associated
* with the EDAC memory controller instance. An ibm,sdram-4xx-ddr2
* controller bank/rank is mapped to a row.
*
@@ -992,7 +992,7 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
}
/**
- * ppc4xx_edac_mc_init - intialize driver instance
+ * ppc4xx_edac_mc_init - initialize driver instance
* @mci: A pointer to the EDAC memory controller instance being
* initialized.
* @op: A pointer to the OpenFirmware device tree node associated
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 40a222e19b2d..0c56989cd907 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -19,7 +19,7 @@ config FIREWIRE
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
- depends on PCI && FIREWIRE
+ depends on PCI && FIREWIRE && MMU
help
Enable this driver if you have a FireWire controller based
on the OHCI specification. For all practical purposes, this
@@ -49,15 +49,13 @@ config FIREWIRE_SBP2
configuration section.
config FIREWIRE_NET
- tristate "IP networking over 1394 (EXPERIMENTAL)"
- depends on FIREWIRE && INET && EXPERIMENTAL
+ tristate "IP networking over 1394"
+ depends on FIREWIRE && INET
help
This enables IPv4 over IEEE 1394, providing IP connectivity with
other implementations of RFC 2734 as found on several operating
systems. Multicast support is currently limited.
- NOTE, this driver is not stable yet!
-
To compile this driver as a module, say M here: The module will be
called firewire-net.
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index be0492398ef9..24ff35511e2b 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -75,6 +75,8 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
#define BIB_IRMC ((1) << 31)
#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
+#define CANON_OUI 0x000085
+
static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
{
struct fw_descriptor *desc;
@@ -284,6 +286,7 @@ static void bm_work(struct work_struct *work)
bool root_device_is_running;
bool root_device_is_cmc;
bool irm_is_1394_1995_only;
+ bool keep_this_irm;
spin_lock_irq(&card->lock);
@@ -305,6 +308,10 @@ static void bm_work(struct work_struct *work)
irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
(irm_device->config_rom[2] & 0x000000f0) == 0;
+ /* Canon MV5i works unreliably if it is not root node. */
+ keep_this_irm = irm_device && irm_device->config_rom &&
+ irm_device->config_rom[3] >> 8 == CANON_OUI;
+
root_id = root_node->node_id;
irm_id = card->irm_node->node_id;
local_id = card->local_node->node_id;
@@ -333,7 +340,7 @@ static void bm_work(struct work_struct *work)
goto pick_me;
}
- if (irm_is_1394_1995_only) {
+ if (irm_is_1394_1995_only && !keep_this_irm) {
new_root_id = local_id;
fw_notify("%s, making local node (%02x) root.\n",
"IRM is not 1394a compliant", new_root_id);
@@ -382,7 +389,7 @@ static void bm_work(struct work_struct *work)
spin_lock_irq(&card->lock);
- if (rcode != RCODE_COMPLETE) {
+ if (rcode != RCODE_COMPLETE && !keep_this_irm) {
/*
* The lock request failed, maybe the IRM
* isn't really IRM capable after all. Let's
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 14bb7b7b5dd7..48ae712e2101 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1501,9 +1501,10 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
e->client = client;
e->p.speed = SCODE_100;
e->p.generation = a->generation;
- e->p.header[0] = a->data[0];
- e->p.header[1] = a->data[1];
- e->p.header_length = 8;
+ e->p.header[0] = TCODE_LINK_INTERNAL << 4;
+ e->p.header[1] = a->data[0];
+ e->p.header[2] = a->data[1];
+ e->p.header_length = 12;
e->p.callback = outbound_phy_packet_callback;
e->phy_packet.closure = a->closure;
e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index b42a0bde8494..d00f8ce902cc 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -72,6 +72,15 @@
#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
#define PHY_IDENTIFIER(id) ((id) << 30)
+/* returns 0 if the split timeout handler is already running */
+static int try_cancel_split_timeout(struct fw_transaction *t)
+{
+ if (t->is_split_transaction)
+ return del_timer(&t->split_timeout_timer);
+ else
+ return 1;
+}
+
static int close_transaction(struct fw_transaction *transaction,
struct fw_card *card, int rcode)
{
@@ -81,7 +90,7 @@ static int close_transaction(struct fw_transaction *transaction,
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t == transaction) {
- if (!del_timer(&t->split_timeout_timer)) {
+ if (!try_cancel_split_timeout(t)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
@@ -141,16 +150,28 @@ static void split_transaction_timeout_callback(unsigned long data)
card->tlabel_mask &= ~(1ULL << t->tlabel);
spin_unlock_irqrestore(&card->lock, flags);
- card->driver->cancel_packet(card, &t->packet);
-
- /*
- * At this point cancel_packet will never call the transaction
- * callback, since we just took the transaction out of the list.
- * So do it here.
- */
t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
}
+static void start_split_transaction_timeout(struct fw_transaction *t,
+ struct fw_card *card)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ return;
+ }
+
+ t->is_split_transaction = true;
+ mod_timer(&t->split_timeout_timer,
+ jiffies + card->split_timeout_jiffies);
+
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
static void transmit_complete_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
@@ -162,7 +183,7 @@ static void transmit_complete_callback(struct fw_packet *packet,
close_transaction(t, card, RCODE_COMPLETE);
break;
case ACK_PENDING:
- t->timestamp = packet->timestamp;
+ start_split_transaction_timeout(t, card);
break;
case ACK_BUSY_X:
case ACK_BUSY_A:
@@ -250,7 +271,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
break;
default:
- WARN(1, "wrong tcode %d", tcode);
+ WARN(1, "wrong tcode %d\n", tcode);
}
common:
packet->speed = speed;
@@ -349,11 +370,9 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
t->node_id = destination_id;
t->tlabel = tlabel;
t->card = card;
+ t->is_split_transaction = false;
setup_timer(&t->split_timeout_timer,
split_transaction_timeout_callback, (unsigned long)t);
- /* FIXME: start this timer later, relative to t->timestamp */
- mod_timer(&t->split_timeout_timer,
- jiffies + card->split_timeout_jiffies);
t->callback = callback;
t->callback_data = callback_data;
@@ -423,7 +442,8 @@ static void transmit_phy_packet_callback(struct fw_packet *packet,
}
static struct fw_packet phy_config_packet = {
- .header_length = 8,
+ .header_length = 12,
+ .header[0] = TCODE_LINK_INTERNAL << 4,
.payload_length = 0,
.speed = SCODE_100,
.callback = transmit_phy_packet_callback,
@@ -451,8 +471,8 @@ void fw_send_phy_config(struct fw_card *card,
mutex_lock(&phy_config_mutex);
- phy_config_packet.header[0] = data;
- phy_config_packet.header[1] = ~data;
+ phy_config_packet.header[1] = data;
+ phy_config_packet.header[2] = ~data;
phy_config_packet.generation = generation;
INIT_COMPLETION(phy_config_done);
@@ -638,7 +658,7 @@ int fw_get_response_length(struct fw_request *r)
}
default:
- WARN(1, "wrong tcode %d", tcode);
+ WARN(1, "wrong tcode %d\n", tcode);
return 0;
}
}
@@ -694,7 +714,7 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header,
break;
default:
- WARN(1, "wrong tcode %d", tcode);
+ WARN(1, "wrong tcode %d\n", tcode);
}
response->payload_mapped = false;
@@ -925,7 +945,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t->node_id == source && t->tlabel == tlabel) {
- if (!del_timer(&t->split_timeout_timer)) {
+ if (!try_cancel_split_timeout(t)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index e6239f971be6..f8dfcf1c6cbe 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -215,9 +215,11 @@ static inline bool is_next_generation(int new_generation, int old_generation)
/* -transaction */
+#define TCODE_LINK_INTERNAL 0xe
+
#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
-#define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == 0xe)
+#define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == TCODE_LINK_INTERNAL)
#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 1a467a91fb0b..7ed08fd1214e 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -9,6 +9,7 @@
#include <linux/bug.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/highmem.h>
@@ -179,6 +180,7 @@ struct fwnet_device {
/* Number of tx datagrams that have been queued but not yet acked */
int queued_datagrams;
+ int peer_count;
struct list_head peer_list;
struct fw_card *card;
struct net_device *netdev;
@@ -189,6 +191,7 @@ struct fwnet_peer {
struct fwnet_device *dev;
u64 guid;
u64 fifo;
+ __be32 ip;
/* guarded by dev->lock */
struct list_head pd_list; /* received partial datagrams */
@@ -568,6 +571,8 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
peer->speed = sspd;
if (peer->max_payload > max_payload)
peer->max_payload = max_payload;
+
+ peer->ip = arp1394->sip;
}
spin_unlock_irqrestore(&dev->lock, flags);
@@ -996,15 +1001,23 @@ static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask)
static void fwnet_write_complete(struct fw_card *card, int rcode,
void *payload, size_t length, void *data)
{
- struct fwnet_packet_task *ptask;
-
- ptask = data;
+ struct fwnet_packet_task *ptask = data;
+ static unsigned long j;
+ static int last_rcode, errors_skipped;
if (rcode == RCODE_COMPLETE) {
fwnet_transmit_packet_done(ptask);
} else {
- fw_error("fwnet_write_complete: failed: %x\n", rcode);
fwnet_transmit_packet_failed(ptask);
+
+ if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
+ fw_error("fwnet_write_complete: "
+ "failed: %x (skipped %d)\n", rcode, errors_skipped);
+
+ errors_skipped = 0;
+ last_rcode = rcode;
+ } else
+ errors_skipped++;
}
}
@@ -1213,6 +1226,14 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
return retval;
}
+static void set_carrier_state(struct fwnet_device *dev)
+{
+ if (dev->peer_count > 1)
+ netif_carrier_on(dev->netdev);
+ else
+ netif_carrier_off(dev->netdev);
+}
+
/* ifup */
static int fwnet_open(struct net_device *net)
{
@@ -1226,6 +1247,10 @@ static int fwnet_open(struct net_device *net)
}
netif_start_queue(net);
+ spin_lock_irq(&dev->lock);
+ set_carrier_state(dev);
+ spin_unlock_irq(&dev->lock);
+
return 0;
}
@@ -1397,6 +1422,10 @@ static int fwnet_change_mtu(struct net_device *net, int new_mtu)
return 0;
}
+static const struct ethtool_ops fwnet_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+};
+
static const struct net_device_ops fwnet_netdev_ops = {
.ndo_open = fwnet_open,
.ndo_stop = fwnet_stop,
@@ -1415,6 +1444,7 @@ static void fwnet_init_dev(struct net_device *net)
net->hard_header_len = FWNET_HLEN;
net->type = ARPHRD_IEEE1394;
net->tx_queue_len = FWNET_TX_QUEUE_LEN;
+ net->ethtool_ops = &fwnet_ethtool_ops;
}
/* caller must hold fwnet_device_mutex */
@@ -1443,6 +1473,7 @@ static int fwnet_add_peer(struct fwnet_device *dev,
peer->dev = dev;
peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
peer->fifo = FWNET_NO_FIFO_ADDR;
+ peer->ip = 0;
INIT_LIST_HEAD(&peer->pd_list);
peer->pdg_size = 0;
peer->datagram_label = 0;
@@ -1455,6 +1486,8 @@ static int fwnet_add_peer(struct fwnet_device *dev,
spin_lock_irq(&dev->lock);
list_add_tail(&peer->peer_link, &dev->peer_list);
+ dev->peer_count++;
+ set_carrier_state(dev);
spin_unlock_irq(&dev->lock);
return 0;
@@ -1535,13 +1568,15 @@ static int fwnet_probe(struct device *_dev)
return ret;
}
-static void fwnet_remove_peer(struct fwnet_peer *peer)
+static void fwnet_remove_peer(struct fwnet_peer *peer, struct fwnet_device *dev)
{
struct fwnet_partial_datagram *pd, *pd_next;
- spin_lock_irq(&peer->dev->lock);
+ spin_lock_irq(&dev->lock);
list_del(&peer->peer_link);
- spin_unlock_irq(&peer->dev->lock);
+ dev->peer_count--;
+ set_carrier_state(dev);
+ spin_unlock_irq(&dev->lock);
list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link)
fwnet_pd_delete(pd);
@@ -1558,10 +1593,13 @@ static int fwnet_remove(struct device *_dev)
mutex_lock(&fwnet_device_mutex);
- fwnet_remove_peer(peer);
+ net = dev->netdev;
+ if (net && peer->ip)
+ arp_invalidate(net, peer->ip);
+
+ fwnet_remove_peer(peer, dev);
if (list_empty(&dev->peer_list)) {
- net = dev->netdev;
unregister_netdev(net);
if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index bf184fb59a5e..0618145376ad 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -302,7 +302,7 @@ nosy_open(struct inode *inode, struct file *file)
file->private_data = client;
- return 0;
+ return nonseekable_open(inode, file);
fail:
kfree(client);
lynx_put(lynx);
@@ -405,7 +405,6 @@ static const struct file_operations nosy_ops = {
.poll = nosy_poll,
.open = nosy_open,
.release = nosy_release,
- .llseek = noop_llseek,
};
#define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index e3c8b60bd86b..bd3c61b6dd8d 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -18,6 +18,7 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/delay.h>
@@ -40,6 +41,7 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/time.h>
+#include <linux/vmalloc.h>
#include <asm/byteorder.h>
#include <asm/page.h>
@@ -80,17 +82,23 @@ struct descriptor {
#define COMMAND_PTR(regs) ((regs) + 12)
#define CONTEXT_MATCH(regs) ((regs) + 16)
-struct ar_buffer {
- struct descriptor descriptor;
- struct ar_buffer *next;
- __le32 data[0];
-};
+#define AR_BUFFER_SIZE (32*1024)
+#define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
+/* we need at least two pages for proper list management */
+#define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
+
+#define MAX_ASYNC_PAYLOAD 4096
+#define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
+#define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
struct ar_context {
struct fw_ohci *ohci;
- struct ar_buffer *current_buffer;
- struct ar_buffer *last_buffer;
+ struct page *pages[AR_BUFFERS];
+ void *buffer;
+ struct descriptor *descriptors;
+ dma_addr_t descriptors_bus;
void *pointer;
+ unsigned int last_buffer_index;
u32 regs;
struct tasklet_struct tasklet;
};
@@ -117,6 +125,8 @@ struct context {
struct fw_ohci *ohci;
u32 regs;
int total_allocation;
+ bool running;
+ bool flushing;
/*
* List of page-sized buffers for storing DMA descriptors.
@@ -161,6 +171,9 @@ struct iso_context {
int excess_bytes;
void *header;
size_t header_length;
+
+ u8 sync;
+ u8 tags;
};
#define CONFIG_ROM_SIZE 1024
@@ -177,7 +190,8 @@ struct fw_ohci {
u32 bus_time;
bool is_root;
bool csr_state_setclear_abdicate;
-
+ int n_ir;
+ int n_it;
/*
* Spinlock for accessing fw_ohci data. Never call out of
* this driver with this lock held.
@@ -186,6 +200,9 @@ struct fw_ohci {
struct mutex phy_reg_mutex;
+ void *misc_buffer;
+ dma_addr_t misc_buffer_bus;
+
struct ar_context ar_request_ctx;
struct ar_context ar_response_ctx;
struct context at_request_ctx;
@@ -411,10 +428,6 @@ static const char *tcodes[] = {
[0xc] = "-reserved-", [0xd] = "-reserved-",
[0xe] = "link internal", [0xf] = "-reserved-",
};
-static const char *phys[] = {
- [0x0] = "phy config packet", [0x1] = "link-on packet",
- [0x2] = "self-id packet", [0x3] = "-reserved-",
-};
static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
{
@@ -433,12 +446,6 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
return;
}
- if (header[0] == ~header[1]) {
- fw_notify("A%c %s, %s, %08x\n",
- dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
- return;
- }
-
switch (tcode) {
case 0x0: case 0x6: case 0x8:
snprintf(specific, sizeof(specific), " = %08x",
@@ -453,9 +460,13 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
}
switch (tcode) {
- case 0xe: case 0xa:
+ case 0xa:
fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
break;
+ case 0xe:
+ fw_notify("A%c %s, PHY %08x %08x\n",
+ dir, evts[evt], header[1], header[2]);
+ break;
case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
fw_notify("A%c spd %x tl %02x, "
"%04x -> %04x, %s, "
@@ -594,59 +605,150 @@ static int ohci_update_phy_reg(struct fw_card *card, int addr,
return ret;
}
-static void ar_context_link_page(struct ar_context *ctx,
- struct ar_buffer *ab, dma_addr_t ab_bus)
+static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
{
- size_t offset;
+ return page_private(ctx->pages[i]);
+}
- ab->next = NULL;
- memset(&ab->descriptor, 0, sizeof(ab->descriptor));
- ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
- DESCRIPTOR_STATUS |
- DESCRIPTOR_BRANCH_ALWAYS);
- offset = offsetof(struct ar_buffer, data);
- ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
- ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
- ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
- ab->descriptor.branch_address = 0;
+static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
+{
+ struct descriptor *d;
+
+ d = &ctx->descriptors[index];
+ d->branch_address &= cpu_to_le32(~0xf);
+ d->res_count = cpu_to_le16(PAGE_SIZE);
+ d->transfer_status = 0;
wmb(); /* finish init of new descriptors before branch_address update */
- ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
- ctx->last_buffer->next = ab;
- ctx->last_buffer = ab;
+ d = &ctx->descriptors[ctx->last_buffer_index];
+ d->branch_address |= cpu_to_le32(1);
+
+ ctx->last_buffer_index = index;
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
flush_writes(ctx->ohci);
}
-static int ar_context_add_page(struct ar_context *ctx)
+static void ar_context_release(struct ar_context *ctx)
{
- struct device *dev = ctx->ohci->card.device;
- struct ar_buffer *ab;
- dma_addr_t uninitialized_var(ab_bus);
+ unsigned int i;
- ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
- if (ab == NULL)
- return -ENOMEM;
+ if (ctx->buffer)
+ vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES);
- ar_context_link_page(ctx, ab, ab_bus);
+ for (i = 0; i < AR_BUFFERS; i++)
+ if (ctx->pages[i]) {
+ dma_unmap_page(ctx->ohci->card.device,
+ ar_buffer_bus(ctx, i),
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_page(ctx->pages[i]);
+ }
+}
- return 0;
+static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
+{
+ if (reg_read(ctx->ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
+ reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
+ flush_writes(ctx->ohci);
+
+ fw_error("AR error: %s; DMA stopped\n", error_msg);
+ }
+ /* FIXME: restart? */
}
-static void ar_context_release(struct ar_context *ctx)
+static inline unsigned int ar_next_buffer_index(unsigned int index)
+{
+ return (index + 1) % AR_BUFFERS;
+}
+
+static inline unsigned int ar_prev_buffer_index(unsigned int index)
+{
+ return (index - 1 + AR_BUFFERS) % AR_BUFFERS;
+}
+
+static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
+{
+ return ar_next_buffer_index(ctx->last_buffer_index);
+}
+
+/*
+ * We search for the buffer that contains the last AR packet DMA data written
+ * by the controller.
+ */
+static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
+ unsigned int *buffer_offset)
{
- struct ar_buffer *ab, *ab_next;
- size_t offset;
- dma_addr_t ab_bus;
+ unsigned int i, next_i, last = ctx->last_buffer_index;
+ __le16 res_count, next_res_count;
+
+ i = ar_first_buffer_index(ctx);
+ res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
+
+ /* A buffer that is not yet completely filled must be the last one. */
+ while (i != last && res_count == 0) {
+
+ /* Peek at the next descriptor. */
+ next_i = ar_next_buffer_index(i);
+ rmb(); /* read descriptors in order */
+ next_res_count = ACCESS_ONCE(
+ ctx->descriptors[next_i].res_count);
+ /*
+ * If the next descriptor is still empty, we must stop at this
+ * descriptor.
+ */
+ if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
+ /*
+ * The exception is when the DMA data for one packet is
+ * split over three buffers; in this case, the middle
+ * buffer's descriptor might be never updated by the
+ * controller and look still empty, and we have to peek
+ * at the third one.
+ */
+ if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
+ next_i = ar_next_buffer_index(next_i);
+ rmb();
+ next_res_count = ACCESS_ONCE(
+ ctx->descriptors[next_i].res_count);
+ if (next_res_count != cpu_to_le16(PAGE_SIZE))
+ goto next_buffer_is_active;
+ }
- for (ab = ctx->current_buffer; ab; ab = ab_next) {
- ab_next = ab->next;
- offset = offsetof(struct ar_buffer, data);
- ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
- dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE,
- ab, ab_bus);
+ break;
+ }
+
+next_buffer_is_active:
+ i = next_i;
+ res_count = next_res_count;
+ }
+
+ rmb(); /* read res_count before the DMA data */
+
+ *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
+ if (*buffer_offset > PAGE_SIZE) {
+ *buffer_offset = 0;
+ ar_context_abort(ctx, "corrupted descriptor");
+ }
+
+ return i;
+}
+
+static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
+ unsigned int end_buffer_index,
+ unsigned int end_buffer_offset)
+{
+ unsigned int i;
+
+ i = ar_first_buffer_index(ctx);
+ while (i != end_buffer_index) {
+ dma_sync_single_for_cpu(ctx->ohci->card.device,
+ ar_buffer_bus(ctx, i),
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ i = ar_next_buffer_index(i);
}
+ if (end_buffer_offset > 0)
+ dma_sync_single_for_cpu(ctx->ohci->card.device,
+ ar_buffer_bus(ctx, i),
+ end_buffer_offset, DMA_FROM_DEVICE);
}
#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
@@ -689,6 +791,10 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
p.header[3] = cond_le32_to_cpu(buffer[3]);
p.header_length = 16;
p.payload_length = p.header[3] >> 16;
+ if (p.payload_length > MAX_ASYNC_PAYLOAD) {
+ ar_context_abort(ctx, "invalid packet length");
+ return NULL;
+ }
break;
case TCODE_WRITE_RESPONSE:
@@ -699,9 +805,8 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
break;
default:
- /* FIXME: Stop context, discard everything, and restart? */
- p.header_length = 0;
- p.payload_length = 0;
+ ar_context_abort(ctx, "invalid tcode");
+ return NULL;
}
p.payload = (void *) buffer + p.header_length;
@@ -751,121 +856,147 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
return buffer + length + 1;
}
+static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
+{
+ void *next;
+
+ while (p < end) {
+ next = handle_ar_packet(ctx, p);
+ if (!next)
+ return p;
+ p = next;
+ }
+
+ return p;
+}
+
+static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
+{
+ unsigned int i;
+
+ i = ar_first_buffer_index(ctx);
+ while (i != end_buffer) {
+ dma_sync_single_for_device(ctx->ohci->card.device,
+ ar_buffer_bus(ctx, i),
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ ar_context_link_page(ctx, i);
+ i = ar_next_buffer_index(i);
+ }
+}
+
static void ar_context_tasklet(unsigned long data)
{
struct ar_context *ctx = (struct ar_context *)data;
- struct ar_buffer *ab;
- struct descriptor *d;
- void *buffer, *end;
- __le16 res_count;
+ unsigned int end_buffer_index, end_buffer_offset;
+ void *p, *end;
- ab = ctx->current_buffer;
- d = &ab->descriptor;
+ p = ctx->pointer;
+ if (!p)
+ return;
- res_count = ACCESS_ONCE(d->res_count);
- if (res_count == 0) {
- size_t size, size2, rest, pktsize, size3, offset;
- dma_addr_t start_bus;
- void *start;
+ end_buffer_index = ar_search_last_active_buffer(ctx,
+ &end_buffer_offset);
+ ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
+ end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
+ if (end_buffer_index < ar_first_buffer_index(ctx)) {
/*
- * This descriptor is finished and we may have a
- * packet split across this and the next buffer. We
- * reuse the page for reassembling the split packet.
+ * The filled part of the overall buffer wraps around; handle
+ * all packets up to the buffer end here. If the last packet
+ * wraps around, its tail will be visible after the buffer end
+ * because the buffer start pages are mapped there again.
*/
+ void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
+ p = handle_ar_packets(ctx, p, buffer_end);
+ if (p < buffer_end)
+ goto error;
+ /* adjust p to point back into the actual buffer */
+ p -= AR_BUFFERS * PAGE_SIZE;
+ }
- offset = offsetof(struct ar_buffer, data);
- start = ab;
- start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
- buffer = ab->data;
-
- ab = ab->next;
- d = &ab->descriptor;
- size = start + PAGE_SIZE - ctx->pointer;
- /* valid buffer data in the next page */
- rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
- /* what actually fits in this page */
- size2 = min(rest, (size_t)PAGE_SIZE - offset - size);
- memmove(buffer, ctx->pointer, size);
- memcpy(buffer + size, ab->data, size2);
-
- while (size > 0) {
- void *next = handle_ar_packet(ctx, buffer);
- pktsize = next - buffer;
- if (pktsize >= size) {
- /*
- * We have handled all the data that was
- * originally in this page, so we can now
- * continue in the next page.
- */
- buffer = next;
- break;
- }
- /* move the next packet to the start of the buffer */
- memmove(buffer, next, size + size2 - pktsize);
- size -= pktsize;
- /* fill up this page again */
- size3 = min(rest - size2,
- (size_t)PAGE_SIZE - offset - size - size2);
- memcpy(buffer + size + size2,
- (void *) ab->data + size2, size3);
- size2 += size3;
- }
-
- if (rest > 0) {
- /* handle the packets that are fully in the next page */
- buffer = (void *) ab->data +
- (buffer - (start + offset + size));
- end = (void *) ab->data + rest;
-
- while (buffer < end)
- buffer = handle_ar_packet(ctx, buffer);
+ p = handle_ar_packets(ctx, p, end);
+ if (p != end) {
+ if (p > end)
+ ar_context_abort(ctx, "inconsistent descriptor");
+ goto error;
+ }
- ctx->current_buffer = ab;
- ctx->pointer = end;
+ ctx->pointer = p;
+ ar_recycle_buffers(ctx, end_buffer_index);
- ar_context_link_page(ctx, start, start_bus);
- } else {
- ctx->pointer = start + PAGE_SIZE;
- }
- } else {
- buffer = ctx->pointer;
- ctx->pointer = end =
- (void *) ab + PAGE_SIZE - le16_to_cpu(res_count);
+ return;
- while (buffer < end)
- buffer = handle_ar_packet(ctx, buffer);
- }
+error:
+ ctx->pointer = NULL;
}
-static int ar_context_init(struct ar_context *ctx,
- struct fw_ohci *ohci, u32 regs)
+static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
+ unsigned int descriptors_offset, u32 regs)
{
- struct ar_buffer ab;
+ unsigned int i;
+ dma_addr_t dma_addr;
+ struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
+ struct descriptor *d;
ctx->regs = regs;
ctx->ohci = ohci;
- ctx->last_buffer = &ab;
tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
- ar_context_add_page(ctx);
- ar_context_add_page(ctx);
- ctx->current_buffer = ab.next;
- ctx->pointer = ctx->current_buffer->data;
+ for (i = 0; i < AR_BUFFERS; i++) {
+ ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32);
+ if (!ctx->pages[i])
+ goto out_of_memory;
+ dma_addr = dma_map_page(ohci->card.device, ctx->pages[i],
+ 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ohci->card.device, dma_addr)) {
+ __free_page(ctx->pages[i]);
+ ctx->pages[i] = NULL;
+ goto out_of_memory;
+ }
+ set_page_private(ctx->pages[i], dma_addr);
+ }
+
+ for (i = 0; i < AR_BUFFERS; i++)
+ pages[i] = ctx->pages[i];
+ for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
+ pages[AR_BUFFERS + i] = ctx->pages[i];
+ ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES,
+ -1, PAGE_KERNEL);
+ if (!ctx->buffer)
+ goto out_of_memory;
+
+ ctx->descriptors = ohci->misc_buffer + descriptors_offset;
+ ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
+
+ for (i = 0; i < AR_BUFFERS; i++) {
+ d = &ctx->descriptors[i];
+ d->req_count = cpu_to_le16(PAGE_SIZE);
+ d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
+ DESCRIPTOR_STATUS |
+ DESCRIPTOR_BRANCH_ALWAYS);
+ d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i));
+ d->branch_address = cpu_to_le32(ctx->descriptors_bus +
+ ar_next_buffer_index(i) * sizeof(struct descriptor));
+ }
return 0;
+
+out_of_memory:
+ ar_context_release(ctx);
+
+ return -ENOMEM;
}
static void ar_context_run(struct ar_context *ctx)
{
- struct ar_buffer *ab = ctx->current_buffer;
- dma_addr_t ab_bus;
- size_t offset;
+ unsigned int i;
+
+ for (i = 0; i < AR_BUFFERS; i++)
+ ar_context_link_page(ctx, i);
- offset = offsetof(struct ar_buffer, data);
- ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
+ ctx->pointer = ctx->buffer;
- reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
+ reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
flush_writes(ctx->ohci);
}
@@ -1042,6 +1173,7 @@ static void context_run(struct context *ctx, u32 extra)
le32_to_cpu(ctx->last->branch_address));
reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
+ ctx->running = true;
flush_writes(ohci);
}
@@ -1069,6 +1201,7 @@ static void context_stop(struct context *ctx)
int i;
reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
+ ctx->running = false;
flush_writes(ctx->ohci);
for (i = 0; i < 10; i++) {
@@ -1099,7 +1232,6 @@ static int at_context_queue_packet(struct context *ctx,
struct descriptor *d, *last;
__le32 *header;
int z, tcode;
- u32 reg;
d = context_get_descriptors(ctx, 4, &d_bus);
if (d == NULL) {
@@ -1113,21 +1245,27 @@ static int at_context_queue_packet(struct context *ctx,
/*
* The DMA format for asyncronous link packets is different
* from the IEEE1394 layout, so shift the fields around
- * accordingly. If header_length is 8, it's a PHY packet, to
- * which we need to prepend an extra quadlet.
+ * accordingly.
*/
+ tcode = (packet->header[0] >> 4) & 0x0f;
header = (__le32 *) &d[1];
- switch (packet->header_length) {
- case 16:
- case 12:
+ switch (tcode) {
+ case TCODE_WRITE_QUADLET_REQUEST:
+ case TCODE_WRITE_BLOCK_REQUEST:
+ case TCODE_WRITE_RESPONSE:
+ case TCODE_READ_QUADLET_REQUEST:
+ case TCODE_READ_BLOCK_REQUEST:
+ case TCODE_READ_QUADLET_RESPONSE:
+ case TCODE_READ_BLOCK_RESPONSE:
+ case TCODE_LOCK_REQUEST:
+ case TCODE_LOCK_RESPONSE:
header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
(packet->speed << 16));
header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
(packet->header[0] & 0xffff0000));
header[2] = cpu_to_le32(packet->header[2]);
- tcode = (packet->header[0] >> 4) & 0x0f;
if (TCODE_IS_BLOCK_PACKET(tcode))
header[3] = cpu_to_le32(packet->header[3]);
else
@@ -1136,18 +1274,18 @@ static int at_context_queue_packet(struct context *ctx,
d[0].req_count = cpu_to_le16(packet->header_length);
break;
- case 8:
+ case TCODE_LINK_INTERNAL:
header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
(packet->speed << 16));
- header[1] = cpu_to_le32(packet->header[0]);
- header[2] = cpu_to_le32(packet->header[1]);
+ header[1] = cpu_to_le32(packet->header[1]);
+ header[2] = cpu_to_le32(packet->header[2]);
d[0].req_count = cpu_to_le16(12);
- if (is_ping_packet(packet->header))
+ if (is_ping_packet(&packet->header[1]))
d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
break;
- case 4:
+ case TCODE_STREAM_DATA:
header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
(packet->speed << 16));
header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
@@ -1197,6 +1335,8 @@ static int at_context_queue_packet(struct context *ctx,
* some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
* up stalling out. So we just bail out in software and try again
* later, and everyone is happy.
+ * FIXME: Test of IntEvent.busReset may no longer be necessary since we
+ * flush AT queues in bus_reset_tasklet.
* FIXME: Document how the locking works.
*/
if (ohci->generation != packet->generation ||
@@ -1210,14 +1350,23 @@ static int at_context_queue_packet(struct context *ctx,
context_append(ctx, d, z, 4 - z);
- /* If the context isn't already running, start it up. */
- reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
- if ((reg & CONTEXT_RUN) == 0)
+ if (!ctx->running)
context_run(ctx, 0);
return 0;
}
+static void at_context_flush(struct context *ctx)
+{
+ tasklet_disable(&ctx->tasklet);
+
+ ctx->flushing = true;
+ context_tasklet((unsigned long)ctx);
+ ctx->flushing = false;
+
+ tasklet_enable(&ctx->tasklet);
+}
+
static int handle_at_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
@@ -1227,7 +1376,7 @@ static int handle_at_packet(struct context *context,
struct fw_ohci *ohci = context->ohci;
int evt;
- if (last->transfer_status == 0)
+ if (last->transfer_status == 0 && !context->flushing)
/* This descriptor isn't done yet, stop iteration. */
return 0;
@@ -1261,11 +1410,15 @@ static int handle_at_packet(struct context *context,
break;
case OHCI1394_evt_missing_ack:
- /*
- * Using a valid (current) generation count, but the
- * node is not on the bus or not sending acks.
- */
- packet->ack = RCODE_NO_ACK;
+ if (context->flushing)
+ packet->ack = RCODE_GENERATION;
+ else {
+ /*
+ * Using a valid (current) generation count, but the
+ * node is not on the bus or not sending acks.
+ */
+ packet->ack = RCODE_NO_ACK;
+ }
break;
case ACK_COMPLETE + 0x10:
@@ -1278,6 +1431,13 @@ static int handle_at_packet(struct context *context,
packet->ack = evt - 0x10;
break;
+ case OHCI1394_evt_no_status:
+ if (context->flushing) {
+ packet->ack = RCODE_GENERATION;
+ break;
+ }
+ /* fall through */
+
default:
packet->ack = RCODE_SEND_ERROR;
break;
@@ -1583,9 +1743,23 @@ static void bus_reset_tasklet(unsigned long data)
/* FIXME: Document how the locking works. */
spin_lock_irqsave(&ohci->lock, flags);
- ohci->generation = generation;
+ ohci->generation = -1; /* prevent AT packet queueing */
context_stop(&ohci->at_request_ctx);
context_stop(&ohci->at_response_ctx);
+
+ spin_unlock_irqrestore(&ohci->lock, flags);
+
+ /*
+ * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
+ * packets in the AT queues and software needs to drain them.
+ * Some OHCI 1.1 controllers (JMicron) apparently require this too.
+ */
+ at_context_flush(&ohci->at_request_ctx);
+ at_context_flush(&ohci->at_response_ctx);
+
+ spin_lock_irqsave(&ohci->lock, flags);
+
+ ohci->generation = generation;
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
if (ohci->quirks & QUIRK_RESET_PACKET)
@@ -1653,8 +1827,12 @@ static irqreturn_t irq_handler(int irq, void *data)
if (!event || !~event)
return IRQ_NONE;
- /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
- reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
+ /*
+ * busReset and postedWriteErr must not be cleared yet
+ * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
+ */
+ reg_write(ohci, OHCI1394_IntEventClear,
+ event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
log_irqs(event);
if (event & OHCI1394_selfIDComplete)
@@ -1672,30 +1850,41 @@ static irqreturn_t irq_handler(int irq, void *data)
if (event & OHCI1394_respTxComplete)
tasklet_schedule(&ohci->at_response_ctx.tasklet);
- iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
- reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
+ if (event & OHCI1394_isochRx) {
+ iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
+ reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
- while (iso_event) {
- i = ffs(iso_event) - 1;
- tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
- iso_event &= ~(1 << i);
+ while (iso_event) {
+ i = ffs(iso_event) - 1;
+ tasklet_schedule(
+ &ohci->ir_context_list[i].context.tasklet);
+ iso_event &= ~(1 << i);
+ }
}
- iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
- reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
+ if (event & OHCI1394_isochTx) {
+ iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
+ reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
- while (iso_event) {
- i = ffs(iso_event) - 1;
- tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
- iso_event &= ~(1 << i);
+ while (iso_event) {
+ i = ffs(iso_event) - 1;
+ tasklet_schedule(
+ &ohci->it_context_list[i].context.tasklet);
+ iso_event &= ~(1 << i);
+ }
}
if (unlikely(event & OHCI1394_regAccessFail))
fw_error("Register access failure - "
"please notify linux1394-devel@lists.sf.net\n");
- if (unlikely(event & OHCI1394_postedWriteErr))
+ if (unlikely(event & OHCI1394_postedWriteErr)) {
+ reg_read(ohci, OHCI1394_PostedWriteAddressHi);
+ reg_read(ohci, OHCI1394_PostedWriteAddressLo);
+ reg_write(ohci, OHCI1394_IntEventClear,
+ OHCI1394_postedWriteErr);
fw_error("PCI posted write error\n");
+ }
if (unlikely(event & OHCI1394_cycleTooLong)) {
if (printk_ratelimit())
@@ -1719,7 +1908,8 @@ static irqreturn_t irq_handler(int irq, void *data)
spin_lock(&ohci->lock);
update_bus_time(ohci);
spin_unlock(&ohci->lock);
- }
+ } else
+ flush_writes(ohci);
return IRQ_HANDLED;
}
@@ -2495,6 +2685,10 @@ static int ohci_start_iso(struct fw_iso_context *base,
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
context_run(&ctx->context, control);
+
+ ctx->sync = sync;
+ ctx->tags = tags;
+
break;
}
@@ -2592,6 +2786,26 @@ static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
return ret;
}
+#ifdef CONFIG_PM
+static void ohci_resume_iso_dma(struct fw_ohci *ohci)
+{
+ int i;
+ struct iso_context *ctx;
+
+ for (i = 0 ; i < ohci->n_ir ; i++) {
+ ctx = &ohci->ir_context_list[i];
+ if (ctx->context.running)
+ ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
+ }
+
+ for (i = 0 ; i < ohci->n_it ; i++) {
+ ctx = &ohci->it_context_list[i];
+ if (ctx->context.running)
+ ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
+ }
+}
+#endif
+
static int queue_iso_transmit(struct iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
@@ -2901,7 +3115,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
struct fw_ohci *ohci;
u32 bus_options, max_receive, link_speed, version;
u64 guid;
- int i, err, n_ir, n_it;
+ int i, err;
size_t size;
ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
@@ -2955,31 +3169,55 @@ static int __devinit pci_probe(struct pci_dev *dev,
if (param_quirks)
ohci->quirks = param_quirks;
- ar_context_init(&ohci->ar_request_ctx, ohci,
- OHCI1394_AsReqRcvContextControlSet);
+ /*
+ * Because dma_alloc_coherent() allocates at least one page,
+ * we save space by using a common buffer for the AR request/
+ * response descriptors and the self IDs buffer.
+ */
+ BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
+ BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
+ ohci->misc_buffer = dma_alloc_coherent(ohci->card.device,
+ PAGE_SIZE,
+ &ohci->misc_buffer_bus,
+ GFP_KERNEL);
+ if (!ohci->misc_buffer) {
+ err = -ENOMEM;
+ goto fail_iounmap;
+ }
+
+ err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
+ OHCI1394_AsReqRcvContextControlSet);
+ if (err < 0)
+ goto fail_misc_buf;
- ar_context_init(&ohci->ar_response_ctx, ohci,
- OHCI1394_AsRspRcvContextControlSet);
+ err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
+ OHCI1394_AsRspRcvContextControlSet);
+ if (err < 0)
+ goto fail_arreq_ctx;
- context_init(&ohci->at_request_ctx, ohci,
- OHCI1394_AsReqTrContextControlSet, handle_at_packet);
+ err = context_init(&ohci->at_request_ctx, ohci,
+ OHCI1394_AsReqTrContextControlSet, handle_at_packet);
+ if (err < 0)
+ goto fail_arrsp_ctx;
- context_init(&ohci->at_response_ctx, ohci,
- OHCI1394_AsRspTrContextControlSet, handle_at_packet);
+ err = context_init(&ohci->at_response_ctx, ohci,
+ OHCI1394_AsRspTrContextControlSet, handle_at_packet);
+ if (err < 0)
+ goto fail_atreq_ctx;
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
ohci->ir_context_channels = ~0ULL;
ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
- n_ir = hweight32(ohci->ir_context_mask);
- size = sizeof(struct iso_context) * n_ir;
+ ohci->n_ir = hweight32(ohci->ir_context_mask);
+ size = sizeof(struct iso_context) * ohci->n_ir;
ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
- n_it = hweight32(ohci->it_context_mask);
- size = sizeof(struct iso_context) * n_it;
+ ohci->n_it = hweight32(ohci->it_context_mask);
+ size = sizeof(struct iso_context) * ohci->n_it;
ohci->it_context_list = kzalloc(size, GFP_KERNEL);
if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
@@ -2987,15 +3225,8 @@ static int __devinit pci_probe(struct pci_dev *dev,
goto fail_contexts;
}
- /* self-id dma buffer allocation */
- ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
- SELF_ID_BUF_SIZE,
- &ohci->self_id_bus,
- GFP_KERNEL);
- if (ohci->self_id_cpu == NULL) {
- err = -ENOMEM;
- goto fail_contexts;
- }
+ ohci->self_id_cpu = ohci->misc_buffer + PAGE_SIZE/2;
+ ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
bus_options = reg_read(ohci, OHCI1394_BusOptions);
max_receive = (bus_options >> 12) & 0xf;
@@ -3005,26 +3236,30 @@ static int __devinit pci_probe(struct pci_dev *dev,
err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
if (err)
- goto fail_self_id;
+ goto fail_contexts;
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
"%d IR + %d IT contexts, quirks 0x%x\n",
dev_name(&dev->dev), version >> 16, version & 0xff,
- n_ir, n_it, ohci->quirks);
+ ohci->n_ir, ohci->n_it, ohci->quirks);
return 0;
- fail_self_id:
- dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
- ohci->self_id_cpu, ohci->self_id_bus);
fail_contexts:
kfree(ohci->ir_context_list);
kfree(ohci->it_context_list);
context_release(&ohci->at_response_ctx);
+ fail_atreq_ctx:
context_release(&ohci->at_request_ctx);
+ fail_arrsp_ctx:
ar_context_release(&ohci->ar_response_ctx);
+ fail_arreq_ctx:
ar_context_release(&ohci->ar_request_ctx);
+ fail_misc_buf:
+ dma_free_coherent(ohci->card.device, PAGE_SIZE,
+ ohci->misc_buffer, ohci->misc_buffer_bus);
+ fail_iounmap:
pci_iounmap(dev, ohci->registers);
fail_iomem:
pci_release_region(dev, 0);
@@ -3063,10 +3298,10 @@ static void pci_remove(struct pci_dev *dev)
if (ohci->config_rom)
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
ohci->config_rom, ohci->config_rom_bus);
- dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
- ohci->self_id_cpu, ohci->self_id_bus);
ar_context_release(&ohci->ar_request_ctx);
ar_context_release(&ohci->ar_response_ctx);
+ dma_free_coherent(ohci->card.device, PAGE_SIZE,
+ ohci->misc_buffer, ohci->misc_buffer_bus);
context_release(&ohci->at_request_ctx);
context_release(&ohci->at_response_ctx);
kfree(ohci->it_context_list);
@@ -3117,7 +3352,20 @@ static int pci_resume(struct pci_dev *dev)
return err;
}
- return ohci_enable(&ohci->card, NULL, 0);
+ /* Some systems don't setup GUID register on resume from ram */
+ if (!reg_read(ohci, OHCI1394_GUIDLo) &&
+ !reg_read(ohci, OHCI1394_GUIDHi)) {
+ reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
+ reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
+ }
+
+ err = ohci_enable(&ohci->card, NULL, 0);
+ if (err)
+ return err;
+
+ ohci_resume_iso_dma(ohci);
+
+ return 0;
}
#endif
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index e8b6a13515bd..e710424b59ea 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -27,7 +27,7 @@ config EDD_OFF
using the kernel parameter 'edd={on|skipmbr|off}'.
config FIRMWARE_MEMMAP
- bool "Add firmware-provided memory map to sysfs" if EMBEDDED
+ bool "Add firmware-provided memory map to sysfs" if EXPERT
default X86
help
Add the firmware-provided (unmodified) memory map to /sys/firmware/memmap.
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index e28e41668177..bcb1126e3d00 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -378,10 +378,17 @@ static void __init print_filtered(const char *info)
static void __init dmi_dump_ids(void)
{
+ const char *board; /* Board Name is optional */
+
printk(KERN_DEBUG "DMI: ");
- print_filtered(dmi_get_system_info(DMI_BOARD_NAME));
- printk(KERN_CONT "/");
+ print_filtered(dmi_get_system_info(DMI_SYS_VENDOR));
+ printk(KERN_CONT " ");
print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME));
+ board = dmi_get_system_info(DMI_BOARD_NAME);
+ if (board) {
+ printk(KERN_CONT "/");
+ print_filtered(board);
+ }
printk(KERN_CONT ", BIOS ");
print_filtered(dmi_get_system_info(DMI_BIOS_VERSION));
printk(KERN_CONT " ");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 082495bb08a7..664660e56335 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -118,7 +118,7 @@ config GPIO_SCH
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
- depends on GPIOLIB
+ depends on GPIOLIB && MFD_SUPPORT && PCI
select MFD_CORE
select MFD_VX855
help
@@ -295,7 +295,7 @@ comment "PCI GPIO expanders:"
config GPIO_CS5535
tristate "AMD CS5535/CS5536 GPIO support"
- depends on PCI && !CS5535_GPIO
+ depends on PCI && X86 && !CS5535_GPIO
help
The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
can be used for quite a number of things. The CS5535/6 is found on
@@ -333,6 +333,15 @@ config GPIO_PCH
which is an IOH(Input/Output Hub) for x86 embedded processor.
This driver can access PCH GPIO device.
+config GPIO_ML_IOH
+ tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
+ depends on PCI
+ help
+ ML7213 is companion chip for Intel Atom E6xx series.
+ This driver can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/Output
+ Hub) which is for IVI(In-Vehicle Infotainment) use.
+ This driver can access the IOH's GPIO device.
+
config GPIO_TIMBERDALE
bool "Support for timberdale GPIO IP"
depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
@@ -342,6 +351,7 @@ config GPIO_TIMBERDALE
config GPIO_RDC321X
tristate "RDC R-321x GPIO support"
depends on PCI && GPIOLIB
+ select MFD_SUPPORT
select MFD_CORE
select MFD_RDC321X
help
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 39bfd7a37650..3351cf87b0ed 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o
obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
obj-$(CONFIG_GPIO_SX150X) += sx150x.o
obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o
+obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o
diff --git a/drivers/gpio/adp5588-gpio.c b/drivers/gpio/adp5588-gpio.c
index 0871f78af593..33fc685cb385 100644
--- a/drivers/gpio/adp5588-gpio.c
+++ b/drivers/gpio/adp5588-gpio.c
@@ -146,9 +146,10 @@ static int adp5588_gpio_to_irq(struct gpio_chip *chip, unsigned off)
return dev->irq_base + off;
}
-static void adp5588_irq_bus_lock(unsigned int irq)
+static void adp5588_irq_bus_lock(struct irq_data *d)
{
- struct adp5588_gpio *dev = get_irq_chip_data(irq);
+ struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
+
mutex_lock(&dev->irq_lock);
}
@@ -160,9 +161,9 @@ static void adp5588_irq_bus_lock(unsigned int irq)
* and unlocks the bus.
*/
-static void adp5588_irq_bus_sync_unlock(unsigned int irq)
+static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
{
- struct adp5588_gpio *dev = get_irq_chip_data(irq);
+ struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
int i;
for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
@@ -175,31 +176,31 @@ static void adp5588_irq_bus_sync_unlock(unsigned int irq)
mutex_unlock(&dev->irq_lock);
}
-static void adp5588_irq_mask(unsigned int irq)
+static void adp5588_irq_mask(struct irq_data *d)
{
- struct adp5588_gpio *dev = get_irq_chip_data(irq);
- unsigned gpio = irq - dev->irq_base;
+ struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
+ unsigned gpio = d->irq - dev->irq_base;
dev->irq_mask[ADP5588_BANK(gpio)] &= ~ADP5588_BIT(gpio);
}
-static void adp5588_irq_unmask(unsigned int irq)
+static void adp5588_irq_unmask(struct irq_data *d)
{
- struct adp5588_gpio *dev = get_irq_chip_data(irq);
- unsigned gpio = irq - dev->irq_base;
+ struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
+ unsigned gpio = d->irq - dev->irq_base;
dev->irq_mask[ADP5588_BANK(gpio)] |= ADP5588_BIT(gpio);
}
-static int adp5588_irq_set_type(unsigned int irq, unsigned int type)
+static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
{
- struct adp5588_gpio *dev = get_irq_chip_data(irq);
- uint16_t gpio = irq - dev->irq_base;
+ struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
+ uint16_t gpio = d->irq - dev->irq_base;
unsigned bank, bit;
if ((type & IRQ_TYPE_EDGE_BOTH)) {
dev_err(&dev->client->dev, "irq %d: unsupported type %d\n",
- irq, type);
+ d->irq, type);
return -EINVAL;
}
@@ -222,11 +223,11 @@ static int adp5588_irq_set_type(unsigned int irq, unsigned int type)
static struct irq_chip adp5588_irq_chip = {
.name = "adp5588",
- .mask = adp5588_irq_mask,
- .unmask = adp5588_irq_unmask,
- .bus_lock = adp5588_irq_bus_lock,
- .bus_sync_unlock = adp5588_irq_bus_sync_unlock,
- .set_type = adp5588_irq_set_type,
+ .irq_mask = adp5588_irq_mask,
+ .irq_unmask = adp5588_irq_unmask,
+ .irq_bus_lock = adp5588_irq_bus_lock,
+ .irq_bus_sync_unlock = adp5588_irq_bus_sync_unlock,
+ .irq_set_type = adp5588_irq_set_type,
};
static int adp5588_gpio_read_intstat(struct i2c_client *client, u8 *buf)
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index d3e55a0ae92b..0d05ea7d499b 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -11,13 +11,13 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/module.h>
-#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/cs5535.h>
+#include <asm/msr.h>
#define DRV_NAME "cs5535-gpio"
-#define GPIO_BAR 1
/*
* Some GPIO pins
@@ -46,7 +46,7 @@ static struct cs5535_gpio_chip {
struct gpio_chip chip;
resource_size_t base;
- struct pci_dev *pdev;
+ struct platform_device *pdev;
spinlock_t lock;
} cs5535_gpio_chip;
@@ -144,6 +144,57 @@ int cs5535_gpio_isset(unsigned offset, unsigned int reg)
}
EXPORT_SYMBOL_GPL(cs5535_gpio_isset);
+int cs5535_gpio_set_irq(unsigned group, unsigned irq)
+{
+ uint32_t lo, hi;
+
+ if (group > 7 || irq > 15)
+ return -EINVAL;
+
+ rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
+
+ lo &= ~(0xF << (group * 4));
+ lo |= (irq & 0xF) << (group * 4);
+
+ wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs5535_gpio_set_irq);
+
+void cs5535_gpio_setup_event(unsigned offset, int pair, int pme)
+{
+ struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
+ uint32_t shift = (offset % 8) * 4;
+ unsigned long flags;
+ uint32_t val;
+
+ if (offset >= 24)
+ offset = GPIO_MAP_W;
+ else if (offset >= 16)
+ offset = GPIO_MAP_Z;
+ else if (offset >= 8)
+ offset = GPIO_MAP_Y;
+ else
+ offset = GPIO_MAP_X;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ val = inl(chip->base + offset);
+
+ /* Clear whatever was there before */
+ val &= ~(0xF << shift);
+
+ /* Set the new value */
+ val |= ((pair & 7) << shift);
+
+ /* Set the PME bit if this is a PME event */
+ if (pme)
+ val |= (1 << (shift + 3));
+
+ outl(val, chip->base + offset);
+ spin_unlock_irqrestore(&chip->lock, flags);
+}
+EXPORT_SYMBOL_GPL(cs5535_gpio_setup_event);
+
/*
* Generic gpio_chip API support.
*/
@@ -249,10 +300,10 @@ static struct cs5535_gpio_chip cs5535_gpio_chip = {
},
};
-static int __init cs5535_gpio_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_id)
+static int __devinit cs5535_gpio_probe(struct platform_device *pdev)
{
- int err;
+ struct resource *res;
+ int err = -EIO;
ulong mask_orig = mask;
/* There are two ways to get the GPIO base address; one is by
@@ -262,25 +313,23 @@ static int __init cs5535_gpio_probe(struct pci_dev *pdev,
* it turns out to be unreliable in the face of crappy BIOSes, we
* can always go back to using MSRs.. */
- err = pci_enable_device_io(pdev);
- if (err) {
- dev_err(&pdev->dev, "can't enable device IO\n");
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't fetch device resource info\n");
goto done;
}
- err = pci_request_region(pdev, GPIO_BAR, DRV_NAME);
- if (err) {
- dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", GPIO_BAR);
+ if (!request_region(res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "can't request region\n");
goto done;
}
/* set up the driver-specific struct */
- cs5535_gpio_chip.base = pci_resource_start(pdev, GPIO_BAR);
+ cs5535_gpio_chip.base = res->start;
cs5535_gpio_chip.pdev = pdev;
spin_lock_init(&cs5535_gpio_chip.lock);
- dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", GPIO_BAR,
- (unsigned long long) cs5535_gpio_chip.base);
+ dev_info(&pdev->dev, "reserved resource region %pR\n", res);
/* mask out reserved pins */
mask &= 0x1F7FFFFF;
@@ -298,78 +347,49 @@ static int __init cs5535_gpio_probe(struct pci_dev *pdev,
if (err)
goto release_region;
- dev_info(&pdev->dev, DRV_NAME ": GPIO support successfully loaded.\n");
+ dev_info(&pdev->dev, "GPIO support successfully loaded.\n");
return 0;
release_region:
- pci_release_region(pdev, GPIO_BAR);
+ release_region(res->start, resource_size(res));
done:
return err;
}
-static void __exit cs5535_gpio_remove(struct pci_dev *pdev)
+static int __devexit cs5535_gpio_remove(struct platform_device *pdev)
{
+ struct resource *r;
int err;
err = gpiochip_remove(&cs5535_gpio_chip.chip);
if (err) {
/* uhh? */
dev_err(&pdev->dev, "unable to remove gpio_chip?\n");
- }
- pci_release_region(pdev, GPIO_BAR);
-}
-
-static struct pci_device_id cs5535_gpio_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, cs5535_gpio_pci_tbl);
-
-/*
- * We can't use the standard PCI driver registration stuff here, since
- * that allows only one driver to bind to each PCI device (and we want
- * multiple drivers to be able to bind to the device). Instead, manually
- * scan for the PCI device, request a single region, and keep track of the
- * devices that we're using.
- */
-
-static int __init cs5535_gpio_scan_pci(void)
-{
- struct pci_dev *pdev;
- int err = -ENODEV;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(cs5535_gpio_pci_tbl); i++) {
- pdev = pci_get_device(cs5535_gpio_pci_tbl[i].vendor,
- cs5535_gpio_pci_tbl[i].device, NULL);
- if (pdev) {
- err = cs5535_gpio_probe(pdev, &cs5535_gpio_pci_tbl[i]);
- if (err)
- pci_dev_put(pdev);
-
- /* we only support a single CS5535/6 southbridge */
- break;
- }
+ return err;
}
- return err;
+ r = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ release_region(r->start, resource_size(r));
+ return 0;
}
-static void __exit cs5535_gpio_free_pci(void)
-{
- cs5535_gpio_remove(cs5535_gpio_chip.pdev);
- pci_dev_put(cs5535_gpio_chip.pdev);
-}
+static struct platform_driver cs5535_gpio_drv = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = cs5535_gpio_probe,
+ .remove = __devexit_p(cs5535_gpio_remove),
+};
static int __init cs5535_gpio_init(void)
{
- return cs5535_gpio_scan_pci();
+ return platform_driver_register(&cs5535_gpio_drv);
}
static void __exit cs5535_gpio_exit(void)
{
- cs5535_gpio_free_pci();
+ platform_driver_unregister(&cs5535_gpio_drv);
}
module_init(cs5535_gpio_init);
@@ -378,3 +398,4 @@ module_exit(cs5535_gpio_exit);
MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 64db9dc3a275..54d70a47afc1 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -134,10 +134,10 @@ static int lnw_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
return lnw->irq_base + offset;
}
-static int lnw_irq_type(unsigned irq, unsigned type)
+static int lnw_irq_type(struct irq_data *d, unsigned type)
{
- struct lnw_gpio *lnw = get_irq_chip_data(irq);
- u32 gpio = irq - lnw->irq_base;
+ struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
+ u32 gpio = d->irq - lnw->irq_base;
unsigned long flags;
u32 value;
void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
@@ -162,19 +162,19 @@ static int lnw_irq_type(unsigned irq, unsigned type)
return 0;
}
-static void lnw_irq_unmask(unsigned irq)
+static void lnw_irq_unmask(struct irq_data *d)
{
}
-static void lnw_irq_mask(unsigned irq)
+static void lnw_irq_mask(struct irq_data *d)
{
}
static struct irq_chip lnw_irqchip = {
.name = "LNW-GPIO",
- .mask = lnw_irq_mask,
- .unmask = lnw_irq_unmask,
- .set_type = lnw_irq_type,
+ .irq_mask = lnw_irq_mask,
+ .irq_unmask = lnw_irq_unmask,
+ .irq_set_type = lnw_irq_type,
};
static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = { /* pin number */
@@ -187,7 +187,7 @@ MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
{
- struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq);
+ struct lnw_gpio *lnw = get_irq_data(irq);
u32 base, gpio;
void __iomem *gedr;
u32 gedr_v;
@@ -206,7 +206,12 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
/* clear the edge detect status bit */
writel(gedr_v, gedr);
}
- desc->chip->eoi(irq);
+
+ if (desc->chip->irq_eoi)
+ desc->chip->irq_eoi(irq_get_irq_data(irq));
+ else
+ dev_warn(lnw->chip.dev, "missing EOI handler for irq %d\n", irq);
+
}
static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c
index 9cad60f9e962..9e1d01f0071a 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/max732x.c
@@ -327,40 +327,40 @@ static int max732x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
return chip->irq_base + off;
}
-static void max732x_irq_mask(unsigned int irq)
+static void max732x_irq_mask(struct irq_data *d)
{
- struct max732x_chip *chip = get_irq_chip_data(irq);
+ struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
- chip->irq_mask_cur &= ~(1 << (irq - chip->irq_base));
+ chip->irq_mask_cur &= ~(1 << (d->irq - chip->irq_base));
}
-static void max732x_irq_unmask(unsigned int irq)
+static void max732x_irq_unmask(struct irq_data *d)
{
- struct max732x_chip *chip = get_irq_chip_data(irq);
+ struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
- chip->irq_mask_cur |= 1 << (irq - chip->irq_base);
+ chip->irq_mask_cur |= 1 << (d->irq - chip->irq_base);
}
-static void max732x_irq_bus_lock(unsigned int irq)
+static void max732x_irq_bus_lock(struct irq_data *d)
{
- struct max732x_chip *chip = get_irq_chip_data(irq);
+ struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
mutex_lock(&chip->irq_lock);
chip->irq_mask_cur = chip->irq_mask;
}
-static void max732x_irq_bus_sync_unlock(unsigned int irq)
+static void max732x_irq_bus_sync_unlock(struct irq_data *d)
{
- struct max732x_chip *chip = get_irq_chip_data(irq);
+ struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
max732x_irq_update_mask(chip);
mutex_unlock(&chip->irq_lock);
}
-static int max732x_irq_set_type(unsigned int irq, unsigned int type)
+static int max732x_irq_set_type(struct irq_data *d, unsigned int type)
{
- struct max732x_chip *chip = get_irq_chip_data(irq);
- uint16_t off = irq - chip->irq_base;
+ struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
+ uint16_t off = d->irq - chip->irq_base;
uint16_t mask = 1 << off;
if (!(mask & chip->dir_input)) {
@@ -371,7 +371,7 @@ static int max732x_irq_set_type(unsigned int irq, unsigned int type)
if (!(type & IRQ_TYPE_EDGE_BOTH)) {
dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
- irq, type);
+ d->irq, type);
return -EINVAL;
}
@@ -390,11 +390,11 @@ static int max732x_irq_set_type(unsigned int irq, unsigned int type)
static struct irq_chip max732x_irq_chip = {
.name = "max732x",
- .mask = max732x_irq_mask,
- .unmask = max732x_irq_unmask,
- .bus_lock = max732x_irq_bus_lock,
- .bus_sync_unlock = max732x_irq_bus_sync_unlock,
- .set_type = max732x_irq_set_type,
+ .irq_mask = max732x_irq_mask,
+ .irq_unmask = max732x_irq_unmask,
+ .irq_bus_lock = max732x_irq_bus_lock,
+ .irq_bus_sync_unlock = max732x_irq_bus_sync_unlock,
+ .irq_set_type = max732x_irq_set_type,
};
static uint8_t max732x_irq_pending(struct max732x_chip *chip)
diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c
new file mode 100644
index 000000000000..cead8e6ff345
--- /dev/null
+++ b/drivers/gpio/ml_ioh_gpio.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+
+#define PCI_VENDOR_ID_ROHM 0x10DB
+
+struct ioh_reg_comn {
+ u32 ien;
+ u32 istatus;
+ u32 idisp;
+ u32 iclr;
+ u32 imask;
+ u32 imaskclr;
+ u32 po;
+ u32 pi;
+ u32 pm;
+ u32 im_0;
+ u32 im_1;
+ u32 reserved;
+};
+
+struct ioh_regs {
+ struct ioh_reg_comn regs[8];
+ u32 reserve1[16];
+ u32 ioh_sel_reg[4];
+ u32 reserve2[11];
+ u32 srst;
+};
+
+/**
+ * struct ioh_gpio_reg_data - The register store data.
+ * @po_reg: To store contents of PO register.
+ * @pm_reg: To store contents of PM register.
+ */
+struct ioh_gpio_reg_data {
+ u32 po_reg;
+ u32 pm_reg;
+};
+
+/**
+ * struct ioh_gpio - GPIO private data structure.
+ * @base: PCI base address of Memory mapped I/O register.
+ * @reg: Memory mapped IOH GPIO register list.
+ * @dev: Pointer to device structure.
+ * @gpio: Data for GPIO infrastructure.
+ * @ioh_gpio_reg: Memory mapped Register data is saved here
+ * when suspend.
+ * @ch: Indicate GPIO channel
+ */
+struct ioh_gpio {
+ void __iomem *base;
+ struct ioh_regs __iomem *reg;
+ struct device *dev;
+ struct gpio_chip gpio;
+ struct ioh_gpio_reg_data ioh_gpio_reg;
+ struct mutex lock;
+ int ch;
+};
+
+static const int num_ports[] = {6, 12, 16, 16, 15, 16, 16, 12};
+
+static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+{
+ u32 reg_val;
+ struct ioh_gpio *chip = container_of(gpio, struct ioh_gpio, gpio);
+
+ mutex_lock(&chip->lock);
+ reg_val = ioread32(&chip->reg->regs[chip->ch].po);
+ if (val)
+ reg_val |= (1 << nr);
+ else
+ reg_val &= ~(1 << nr);
+
+ iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
+ mutex_unlock(&chip->lock);
+}
+
+static int ioh_gpio_get(struct gpio_chip *gpio, unsigned nr)
+{
+ struct ioh_gpio *chip = container_of(gpio, struct ioh_gpio, gpio);
+
+ return ioread32(&chip->reg->regs[chip->ch].pi) & (1 << nr);
+}
+
+static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+ int val)
+{
+ struct ioh_gpio *chip = container_of(gpio, struct ioh_gpio, gpio);
+ u32 pm;
+ u32 reg_val;
+
+ mutex_lock(&chip->lock);
+ pm = ioread32(&chip->reg->regs[chip->ch].pm) &
+ ((1 << num_ports[chip->ch]) - 1);
+ pm |= (1 << nr);
+ iowrite32(pm, &chip->reg->regs[chip->ch].pm);
+
+ reg_val = ioread32(&chip->reg->regs[chip->ch].po);
+ if (val)
+ reg_val |= (1 << nr);
+ else
+ reg_val &= ~(1 << nr);
+
+ mutex_unlock(&chip->lock);
+
+ return 0;
+}
+
+static int ioh_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+ struct ioh_gpio *chip = container_of(gpio, struct ioh_gpio, gpio);
+ u32 pm;
+
+ mutex_lock(&chip->lock);
+ pm = ioread32(&chip->reg->regs[chip->ch].pm) &
+ ((1 << num_ports[chip->ch]) - 1);
+ pm &= ~(1 << nr);
+ iowrite32(pm, &chip->reg->regs[chip->ch].pm);
+ mutex_unlock(&chip->lock);
+
+ return 0;
+}
+
+/*
+ * Save register configuration and disable interrupts.
+ */
+static void ioh_gpio_save_reg_conf(struct ioh_gpio *chip)
+{
+ chip->ioh_gpio_reg.po_reg = ioread32(&chip->reg->regs[chip->ch].po);
+ chip->ioh_gpio_reg.pm_reg = ioread32(&chip->reg->regs[chip->ch].pm);
+}
+
+/*
+ * This function restores the register configuration of the GPIO device.
+ */
+static void ioh_gpio_restore_reg_conf(struct ioh_gpio *chip)
+{
+ /* to store contents of PO register */
+ iowrite32(chip->ioh_gpio_reg.po_reg, &chip->reg->regs[chip->ch].po);
+ /* to store contents of PM register */
+ iowrite32(chip->ioh_gpio_reg.pm_reg, &chip->reg->regs[chip->ch].pm);
+}
+
+static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
+{
+ struct gpio_chip *gpio = &chip->gpio;
+
+ gpio->label = dev_name(chip->dev);
+ gpio->owner = THIS_MODULE;
+ gpio->direction_input = ioh_gpio_direction_input;
+ gpio->get = ioh_gpio_get;
+ gpio->direction_output = ioh_gpio_direction_output;
+ gpio->set = ioh_gpio_set;
+ gpio->dbg_show = NULL;
+ gpio->base = -1;
+ gpio->ngpio = num_port;
+ gpio->can_sleep = 0;
+}
+
+static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int ret;
+ int i;
+ struct ioh_gpio *chip;
+ void __iomem *base;
+ void __iomem *chip_save;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s : pci_enable_device failed", __func__);
+ goto err_pci_enable;
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_request_regions failed-%d", ret);
+ goto err_request_regions;
+ }
+
+ base = pci_iomap(pdev, 1, 0);
+ if (base == 0) {
+ dev_err(&pdev->dev, "%s : pci_iomap failed", __func__);
+ ret = -ENOMEM;
+ goto err_iomap;
+ }
+
+ chip_save = kzalloc(sizeof(*chip) * 8, GFP_KERNEL);
+ if (chip_save == NULL) {
+ dev_err(&pdev->dev, "%s : kzalloc failed", __func__);
+ ret = -ENOMEM;
+ goto err_kzalloc;
+ }
+
+ chip = chip_save;
+ for (i = 0; i < 8; i++, chip++) {
+ chip->dev = &pdev->dev;
+ chip->base = base;
+ chip->reg = chip->base;
+ chip->ch = i;
+ mutex_init(&chip->lock);
+ ioh_gpio_setup(chip, num_ports[i]);
+ ret = gpiochip_add(&chip->gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "IOH gpio: Failed to register GPIO\n");
+ goto err_gpiochip_add;
+ }
+ }
+
+ chip = chip_save;
+ pci_set_drvdata(pdev, chip);
+
+ return 0;
+
+err_gpiochip_add:
+ for (; i != 0; i--) {
+ chip--;
+ ret = gpiochip_remove(&chip->gpio);
+ if (ret)
+ dev_err(&pdev->dev, "Failed gpiochip_remove(%d)\n", i);
+ }
+ kfree(chip_save);
+
+err_kzalloc:
+ pci_iounmap(pdev, base);
+
+err_iomap:
+ pci_release_regions(pdev);
+
+err_request_regions:
+ pci_disable_device(pdev);
+
+err_pci_enable:
+
+ dev_err(&pdev->dev, "%s Failed returns %d\n", __func__, ret);
+ return ret;
+}
+
+static void __devexit ioh_gpio_remove(struct pci_dev *pdev)
+{
+ int err;
+ int i;
+ struct ioh_gpio *chip = pci_get_drvdata(pdev);
+ void __iomem *chip_save;
+
+ chip_save = chip;
+ for (i = 0; i < 8; i++, chip++) {
+ err = gpiochip_remove(&chip->gpio);
+ if (err)
+ dev_err(&pdev->dev, "Failed gpiochip_remove\n");
+ }
+
+ chip = chip_save;
+ pci_iounmap(pdev, chip->base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ kfree(chip);
+}
+
+#ifdef CONFIG_PM
+static int ioh_gpio_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ s32 ret;
+ struct ioh_gpio *chip = pci_get_drvdata(pdev);
+
+ ioh_gpio_save_reg_conf(chip);
+ ioh_gpio_restore_reg_conf(chip);
+
+ ret = pci_save_state(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_save_state Failed-%d\n", ret);
+ return ret;
+ }
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+ ret = pci_enable_wake(pdev, PCI_D0, 1);
+ if (ret)
+ dev_err(&pdev->dev, "pci_enable_wake Failed -%d\n", ret);
+
+ return 0;
+}
+
+static int ioh_gpio_resume(struct pci_dev *pdev)
+{
+ s32 ret;
+ struct ioh_gpio *chip = pci_get_drvdata(pdev);
+
+ ret = pci_enable_wake(pdev, PCI_D0, 0);
+
+ pci_set_power_state(pdev, PCI_D0);
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_enable_device Failed-%d ", ret);
+ return ret;
+ }
+ pci_restore_state(pdev);
+
+ iowrite32(0x01, &chip->reg->srst);
+ iowrite32(0x00, &chip->reg->srst);
+ ioh_gpio_restore_reg_conf(chip);
+
+ return 0;
+}
+#else
+#define ioh_gpio_suspend NULL
+#define ioh_gpio_resume NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(ioh_gpio_pcidev_id) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) },
+ { 0, }
+};
+
+static struct pci_driver ioh_gpio_driver = {
+ .name = "ml_ioh_gpio",
+ .id_table = ioh_gpio_pcidev_id,
+ .probe = ioh_gpio_probe,
+ .remove = __devexit_p(ioh_gpio_remove),
+ .suspend = ioh_gpio_suspend,
+ .resume = ioh_gpio_resume
+};
+
+static int __init ioh_gpio_pci_init(void)
+{
+ return pci_register_driver(&ioh_gpio_driver);
+}
+module_init(ioh_gpio_pci_init);
+
+static void __exit ioh_gpio_pci_exit(void)
+{
+ pci_unregister_driver(&ioh_gpio_driver);
+}
+module_exit(ioh_gpio_pci_exit);
+
+MODULE_DESCRIPTION("OKI SEMICONDUCTOR ML-IOH series GPIO Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 501866662e05..b473429eee75 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -60,6 +60,7 @@ struct pca953x_chip {
unsigned gpio_start;
uint16_t reg_output;
uint16_t reg_direction;
+ struct mutex i2c_lock;
#ifdef CONFIG_GPIO_PCA953X_IRQ
struct mutex irq_lock;
@@ -119,13 +120,17 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
chip = container_of(gc, struct pca953x_chip, gpio_chip);
+ mutex_lock(&chip->i2c_lock);
reg_val = chip->reg_direction | (1u << off);
ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
if (ret)
- return ret;
+ goto exit;
chip->reg_direction = reg_val;
- return 0;
+ ret = 0;
+exit:
+ mutex_unlock(&chip->i2c_lock);
+ return ret;
}
static int pca953x_gpio_direction_output(struct gpio_chip *gc,
@@ -137,6 +142,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
chip = container_of(gc, struct pca953x_chip, gpio_chip);
+ mutex_lock(&chip->i2c_lock);
/* set output level */
if (val)
reg_val = chip->reg_output | (1u << off);
@@ -145,7 +151,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
if (ret)
- return ret;
+ goto exit;
chip->reg_output = reg_val;
@@ -153,10 +159,13 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
reg_val = chip->reg_direction & ~(1u << off);
ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
if (ret)
- return ret;
+ goto exit;
chip->reg_direction = reg_val;
- return 0;
+ ret = 0;
+exit:
+ mutex_unlock(&chip->i2c_lock);
+ return ret;
}
static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
@@ -167,7 +176,9 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
chip = container_of(gc, struct pca953x_chip, gpio_chip);
+ mutex_lock(&chip->i2c_lock);
ret = pca953x_read_reg(chip, PCA953X_INPUT, &reg_val);
+ mutex_unlock(&chip->i2c_lock);
if (ret < 0) {
/* NOTE: diagnostic already emitted; that's all we should
* do unless gpio_*_value_cansleep() calls become different
@@ -187,6 +198,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
chip = container_of(gc, struct pca953x_chip, gpio_chip);
+ mutex_lock(&chip->i2c_lock);
if (val)
reg_val = chip->reg_output | (1u << off);
else
@@ -194,9 +206,11 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
if (ret)
- return;
+ goto exit;
chip->reg_output = reg_val;
+exit:
+ mutex_unlock(&chip->i2c_lock);
}
static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
@@ -228,30 +242,30 @@ static int pca953x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
return chip->irq_base + off;
}
-static void pca953x_irq_mask(unsigned int irq)
+static void pca953x_irq_mask(struct irq_data *d)
{
- struct pca953x_chip *chip = get_irq_chip_data(irq);
+ struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
- chip->irq_mask &= ~(1 << (irq - chip->irq_base));
+ chip->irq_mask &= ~(1 << (d->irq - chip->irq_base));
}
-static void pca953x_irq_unmask(unsigned int irq)
+static void pca953x_irq_unmask(struct irq_data *d)
{
- struct pca953x_chip *chip = get_irq_chip_data(irq);
+ struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
- chip->irq_mask |= 1 << (irq - chip->irq_base);
+ chip->irq_mask |= 1 << (d->irq - chip->irq_base);
}
-static void pca953x_irq_bus_lock(unsigned int irq)
+static void pca953x_irq_bus_lock(struct irq_data *d)
{
- struct pca953x_chip *chip = get_irq_chip_data(irq);
+ struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
mutex_lock(&chip->irq_lock);
}
-static void pca953x_irq_bus_sync_unlock(unsigned int irq)
+static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
{
- struct pca953x_chip *chip = get_irq_chip_data(irq);
+ struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
uint16_t new_irqs;
uint16_t level;
@@ -268,15 +282,15 @@ static void pca953x_irq_bus_sync_unlock(unsigned int irq)
mutex_unlock(&chip->irq_lock);
}
-static int pca953x_irq_set_type(unsigned int irq, unsigned int type)
+static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
{
- struct pca953x_chip *chip = get_irq_chip_data(irq);
- uint16_t level = irq - chip->irq_base;
+ struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
+ uint16_t level = d->irq - chip->irq_base;
uint16_t mask = 1 << level;
if (!(type & IRQ_TYPE_EDGE_BOTH)) {
dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
- irq, type);
+ d->irq, type);
return -EINVAL;
}
@@ -295,11 +309,11 @@ static int pca953x_irq_set_type(unsigned int irq, unsigned int type)
static struct irq_chip pca953x_irq_chip = {
.name = "pca953x",
- .mask = pca953x_irq_mask,
- .unmask = pca953x_irq_unmask,
- .bus_lock = pca953x_irq_bus_lock,
- .bus_sync_unlock = pca953x_irq_bus_sync_unlock,
- .set_type = pca953x_irq_set_type,
+ .irq_mask = pca953x_irq_mask,
+ .irq_unmask = pca953x_irq_unmask,
+ .irq_bus_lock = pca953x_irq_bus_lock,
+ .irq_bus_sync_unlock = pca953x_irq_bus_sync_unlock,
+ .irq_set_type = pca953x_irq_set_type,
};
static uint16_t pca953x_irq_pending(struct pca953x_chip *chip)
@@ -517,6 +531,8 @@ static int __devinit pca953x_probe(struct i2c_client *client,
chip->names = pdata->names;
+ mutex_init(&chip->i2c_lock);
+
/* initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 5005990f751f..2975d22daffe 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -129,10 +129,10 @@ static int pl061_to_irq(struct gpio_chip *gc, unsigned offset)
/*
* PL061 GPIO IRQ
*/
-static void pl061_irq_disable(unsigned irq)
+static void pl061_irq_disable(struct irq_data *d)
{
- struct pl061_gpio *chip = get_irq_chip_data(irq);
- int offset = irq - chip->irq_base;
+ struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+ int offset = d->irq - chip->irq_base;
unsigned long flags;
u8 gpioie;
@@ -143,10 +143,10 @@ static void pl061_irq_disable(unsigned irq)
spin_unlock_irqrestore(&chip->irq_lock, flags);
}
-static void pl061_irq_enable(unsigned irq)
+static void pl061_irq_enable(struct irq_data *d)
{
- struct pl061_gpio *chip = get_irq_chip_data(irq);
- int offset = irq - chip->irq_base;
+ struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+ int offset = d->irq - chip->irq_base;
unsigned long flags;
u8 gpioie;
@@ -157,10 +157,10 @@ static void pl061_irq_enable(unsigned irq)
spin_unlock_irqrestore(&chip->irq_lock, flags);
}
-static int pl061_irq_type(unsigned irq, unsigned trigger)
+static int pl061_irq_type(struct irq_data *d, unsigned trigger)
{
- struct pl061_gpio *chip = get_irq_chip_data(irq);
- int offset = irq - chip->irq_base;
+ struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+ int offset = d->irq - chip->irq_base;
unsigned long flags;
u8 gpiois, gpioibe, gpioiev;
@@ -203,9 +203,9 @@ static int pl061_irq_type(unsigned irq, unsigned trigger)
static struct irq_chip pl061_irqchip = {
.name = "GPIO",
- .enable = pl061_irq_enable,
- .disable = pl061_irq_disable,
- .set_type = pl061_irq_type,
+ .irq_enable = pl061_irq_enable,
+ .irq_disable = pl061_irq_disable,
+ .irq_set_type = pl061_irq_type,
};
static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
@@ -214,7 +214,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
struct list_head *ptr;
struct pl061_gpio *chip;
- desc->chip->ack(irq);
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
list_for_each(ptr, chip_list) {
unsigned long pending;
int offset;
@@ -229,7 +229,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
for_each_set_bit(offset, &pending, PL061_GPIO_NR)
generic_handle_irq(pl061_to_irq(&chip->gc, offset));
}
- desc->chip->unmask(irq);
+ desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
static int pl061_probe(struct amba_device *dev, struct amba_id *id)
diff --git a/drivers/gpio/stmpe-gpio.c b/drivers/gpio/stmpe-gpio.c
index 7c9e6a052c45..eb2901f8ab5e 100644
--- a/drivers/gpio/stmpe-gpio.c
+++ b/drivers/gpio/stmpe-gpio.c
@@ -122,10 +122,10 @@ static struct gpio_chip template_chip = {
.can_sleep = 1,
};
-static int stmpe_gpio_irq_set_type(unsigned int irq, unsigned int type)
+static int stmpe_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
- struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
- int offset = irq - stmpe_gpio->irq_base;
+ struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+ int offset = d->irq - stmpe_gpio->irq_base;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -145,16 +145,16 @@ static int stmpe_gpio_irq_set_type(unsigned int irq, unsigned int type)
return 0;
}
-static void stmpe_gpio_irq_lock(unsigned int irq)
+static void stmpe_gpio_irq_lock(struct irq_data *d)
{
- struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
+ struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
mutex_lock(&stmpe_gpio->irq_lock);
}
-static void stmpe_gpio_irq_sync_unlock(unsigned int irq)
+static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
{
- struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
+ struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
struct stmpe *stmpe = stmpe_gpio->stmpe;
int num_banks = DIV_ROUND_UP(stmpe->num_gpios, 8);
static const u8 regmap[] = {
@@ -180,20 +180,20 @@ static void stmpe_gpio_irq_sync_unlock(unsigned int irq)
mutex_unlock(&stmpe_gpio->irq_lock);
}
-static void stmpe_gpio_irq_mask(unsigned int irq)
+static void stmpe_gpio_irq_mask(struct irq_data *d)
{
- struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
- int offset = irq - stmpe_gpio->irq_base;
+ struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+ int offset = d->irq - stmpe_gpio->irq_base;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
stmpe_gpio->regs[REG_IE][regoffset] &= ~mask;
}
-static void stmpe_gpio_irq_unmask(unsigned int irq)
+static void stmpe_gpio_irq_unmask(struct irq_data *d)
{
- struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
- int offset = irq - stmpe_gpio->irq_base;
+ struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+ int offset = d->irq - stmpe_gpio->irq_base;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -202,11 +202,11 @@ static void stmpe_gpio_irq_unmask(unsigned int irq)
static struct irq_chip stmpe_gpio_irq_chip = {
.name = "stmpe-gpio",
- .bus_lock = stmpe_gpio_irq_lock,
- .bus_sync_unlock = stmpe_gpio_irq_sync_unlock,
- .mask = stmpe_gpio_irq_mask,
- .unmask = stmpe_gpio_irq_unmask,
- .set_type = stmpe_gpio_irq_set_type,
+ .irq_bus_lock = stmpe_gpio_irq_lock,
+ .irq_bus_sync_unlock = stmpe_gpio_irq_sync_unlock,
+ .irq_mask = stmpe_gpio_irq_mask,
+ .irq_unmask = stmpe_gpio_irq_unmask,
+ .irq_set_type = stmpe_gpio_irq_set_type,
};
static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c
index 823559ab0e24..e60be0015c9b 100644
--- a/drivers/gpio/sx150x.c
+++ b/drivers/gpio/sx150x.c
@@ -304,36 +304,36 @@ static int sx150x_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
return chip->irq_base + offset;
}
-static void sx150x_irq_mask(unsigned int irq)
+static void sx150x_irq_mask(struct irq_data *d)
{
- struct irq_chip *ic = get_irq_chip(irq);
+ struct irq_chip *ic = irq_data_get_irq_chip(d);
struct sx150x_chip *chip;
unsigned n;
chip = container_of(ic, struct sx150x_chip, irq_chip);
- n = irq - chip->irq_base;
+ n = d->irq - chip->irq_base;
sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 1);
sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense, 0);
}
-static void sx150x_irq_unmask(unsigned int irq)
+static void sx150x_irq_unmask(struct irq_data *d)
{
- struct irq_chip *ic = get_irq_chip(irq);
+ struct irq_chip *ic = irq_data_get_irq_chip(d);
struct sx150x_chip *chip;
unsigned n;
chip = container_of(ic, struct sx150x_chip, irq_chip);
- n = irq - chip->irq_base;
+ n = d->irq - chip->irq_base;
sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 0);
sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense,
chip->irq_sense >> (n * 2));
}
-static int sx150x_irq_set_type(unsigned int irq, unsigned int flow_type)
+static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
- struct irq_chip *ic = get_irq_chip(irq);
+ struct irq_chip *ic = irq_data_get_irq_chip(d);
struct sx150x_chip *chip;
unsigned n, val = 0;
@@ -341,7 +341,7 @@ static int sx150x_irq_set_type(unsigned int irq, unsigned int flow_type)
return -EINVAL;
chip = container_of(ic, struct sx150x_chip, irq_chip);
- n = irq - chip->irq_base;
+ n = d->irq - chip->irq_base;
if (flow_type & IRQ_TYPE_EDGE_RISING)
val |= 0x1;
@@ -386,9 +386,9 @@ static irqreturn_t sx150x_irq_thread_fn(int irq, void *dev_id)
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
}
-static void sx150x_irq_bus_lock(unsigned int irq)
+static void sx150x_irq_bus_lock(struct irq_data *d)
{
- struct irq_chip *ic = get_irq_chip(irq);
+ struct irq_chip *ic = irq_data_get_irq_chip(d);
struct sx150x_chip *chip;
chip = container_of(ic, struct sx150x_chip, irq_chip);
@@ -396,9 +396,9 @@ static void sx150x_irq_bus_lock(unsigned int irq)
mutex_lock(&chip->lock);
}
-static void sx150x_irq_bus_sync_unlock(unsigned int irq)
+static void sx150x_irq_bus_sync_unlock(struct irq_data *d)
{
- struct irq_chip *ic = get_irq_chip(irq);
+ struct irq_chip *ic = irq_data_get_irq_chip(d);
struct sx150x_chip *chip;
unsigned n;
@@ -437,16 +437,16 @@ static void sx150x_init_chip(struct sx150x_chip *chip,
if (pdata->oscio_is_gpo)
++chip->gpio_chip.ngpio;
- chip->irq_chip.name = client->name;
- chip->irq_chip.mask = sx150x_irq_mask;
- chip->irq_chip.unmask = sx150x_irq_unmask;
- chip->irq_chip.set_type = sx150x_irq_set_type;
- chip->irq_chip.bus_lock = sx150x_irq_bus_lock;
- chip->irq_chip.bus_sync_unlock = sx150x_irq_bus_sync_unlock;
- chip->irq_summary = -1;
- chip->irq_base = -1;
- chip->irq_sense = 0;
- chip->irq_set_type_pending = 0;
+ chip->irq_chip.name = client->name;
+ chip->irq_chip.irq_mask = sx150x_irq_mask;
+ chip->irq_chip.irq_unmask = sx150x_irq_unmask;
+ chip->irq_chip.irq_set_type = sx150x_irq_set_type;
+ chip->irq_chip.irq_bus_lock = sx150x_irq_bus_lock;
+ chip->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock;
+ chip->irq_summary = -1;
+ chip->irq_base = -1;
+ chip->irq_sense = 0;
+ chip->irq_set_type_pending = 0;
}
static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
diff --git a/drivers/gpio/tc3589x-gpio.c b/drivers/gpio/tc3589x-gpio.c
index 180d584454fb..27200af1a595 100644
--- a/drivers/gpio/tc3589x-gpio.c
+++ b/drivers/gpio/tc3589x-gpio.c
@@ -110,10 +110,10 @@ static struct gpio_chip template_chip = {
.can_sleep = 1,
};
-static int tc3589x_gpio_irq_set_type(unsigned int irq, unsigned int type)
+static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
- struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
- int offset = irq - tc3589x_gpio->irq_base;
+ struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+ int offset = d->irq - tc3589x_gpio->irq_base;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -137,16 +137,16 @@ static int tc3589x_gpio_irq_set_type(unsigned int irq, unsigned int type)
return 0;
}
-static void tc3589x_gpio_irq_lock(unsigned int irq)
+static void tc3589x_gpio_irq_lock(struct irq_data *d)
{
- struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
+ struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
mutex_lock(&tc3589x_gpio->irq_lock);
}
-static void tc3589x_gpio_irq_sync_unlock(unsigned int irq)
+static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
{
- struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
+ struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
static const u8 regmap[] = {
[REG_IBE] = TC3589x_GPIOIBE0,
@@ -172,20 +172,20 @@ static void tc3589x_gpio_irq_sync_unlock(unsigned int irq)
mutex_unlock(&tc3589x_gpio->irq_lock);
}
-static void tc3589x_gpio_irq_mask(unsigned int irq)
+static void tc3589x_gpio_irq_mask(struct irq_data *d)
{
- struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
- int offset = irq - tc3589x_gpio->irq_base;
+ struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+ int offset = d->irq - tc3589x_gpio->irq_base;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
tc3589x_gpio->regs[REG_IE][regoffset] &= ~mask;
}
-static void tc3589x_gpio_irq_unmask(unsigned int irq)
+static void tc3589x_gpio_irq_unmask(struct irq_data *d)
{
- struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
- int offset = irq - tc3589x_gpio->irq_base;
+ struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+ int offset = d->irq - tc3589x_gpio->irq_base;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -194,11 +194,11 @@ static void tc3589x_gpio_irq_unmask(unsigned int irq)
static struct irq_chip tc3589x_gpio_irq_chip = {
.name = "tc3589x-gpio",
- .bus_lock = tc3589x_gpio_irq_lock,
- .bus_sync_unlock = tc3589x_gpio_irq_sync_unlock,
- .mask = tc3589x_gpio_irq_mask,
- .unmask = tc3589x_gpio_irq_unmask,
- .set_type = tc3589x_gpio_irq_set_type,
+ .irq_bus_lock = tc3589x_gpio_irq_lock,
+ .irq_bus_sync_unlock = tc3589x_gpio_irq_sync_unlock,
+ .irq_mask = tc3589x_gpio_irq_mask,
+ .irq_unmask = tc3589x_gpio_irq_unmask,
+ .irq_set_type = tc3589x_gpio_irq_set_type,
};
static irqreturn_t tc3589x_gpio_irq(int irq, void *dev)
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
index 45293662e950..58c8f30352dd 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/timbgpio.c
@@ -109,10 +109,10 @@ static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset)
/*
* GPIO IRQ
*/
-static void timbgpio_irq_disable(unsigned irq)
+static void timbgpio_irq_disable(struct irq_data *d)
{
- struct timbgpio *tgpio = get_irq_chip_data(irq);
- int offset = irq - tgpio->irq_base;
+ struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
+ int offset = d->irq - tgpio->irq_base;
unsigned long flags;
spin_lock_irqsave(&tgpio->lock, flags);
@@ -121,10 +121,10 @@ static void timbgpio_irq_disable(unsigned irq)
spin_unlock_irqrestore(&tgpio->lock, flags);
}
-static void timbgpio_irq_enable(unsigned irq)
+static void timbgpio_irq_enable(struct irq_data *d)
{
- struct timbgpio *tgpio = get_irq_chip_data(irq);
- int offset = irq - tgpio->irq_base;
+ struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
+ int offset = d->irq - tgpio->irq_base;
unsigned long flags;
spin_lock_irqsave(&tgpio->lock, flags);
@@ -133,10 +133,10 @@ static void timbgpio_irq_enable(unsigned irq)
spin_unlock_irqrestore(&tgpio->lock, flags);
}
-static int timbgpio_irq_type(unsigned irq, unsigned trigger)
+static int timbgpio_irq_type(struct irq_data *d, unsigned trigger)
{
- struct timbgpio *tgpio = get_irq_chip_data(irq);
- int offset = irq - tgpio->irq_base;
+ struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
+ int offset = d->irq - tgpio->irq_base;
unsigned long flags;
u32 lvr, flr, bflr = 0;
u32 ver;
@@ -199,7 +199,7 @@ static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
unsigned long ipr;
int offset;
- desc->chip->ack(irq);
+ desc->irq_data.chip->irq_ack(irq_get_irq_data(irq));
ipr = ioread32(tgpio->membase + TGPIO_IPR);
iowrite32(ipr, tgpio->membase + TGPIO_ICR);
@@ -217,9 +217,9 @@ static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
static struct irq_chip timbgpio_irqchip = {
.name = "GPIO",
- .enable = timbgpio_irq_enable,
- .disable = timbgpio_irq_disable,
- .set_type = timbgpio_irq_type,
+ .irq_enable = timbgpio_irq_enable,
+ .irq_disable = timbgpio_irq_disable,
+ .irq_set_type = timbgpio_irq_type,
};
static int __devinit timbgpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
index b16c9a8c03f5..cffa3bd7ad3b 100644
--- a/drivers/gpio/vr41xx_giu.c
+++ b/drivers/gpio/vr41xx_giu.c
@@ -111,69 +111,69 @@ static inline u16 giu_clear(u16 offset, u16 clear)
return data;
}
-static void ack_giuint_low(unsigned int irq)
+static void ack_giuint_low(struct irq_data *d)
{
- giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(irq));
+ giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(d->irq));
}
-static void mask_giuint_low(unsigned int irq)
+static void mask_giuint_low(struct irq_data *d)
{
- giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
+ giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
}
-static void mask_ack_giuint_low(unsigned int irq)
+static void mask_ack_giuint_low(struct irq_data *d)
{
unsigned int pin;
- pin = GPIO_PIN_OF_IRQ(irq);
+ pin = GPIO_PIN_OF_IRQ(d->irq);
giu_clear(GIUINTENL, 1 << pin);
giu_write(GIUINTSTATL, 1 << pin);
}
-static void unmask_giuint_low(unsigned int irq)
+static void unmask_giuint_low(struct irq_data *d)
{
- giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
+ giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
}
static struct irq_chip giuint_low_irq_chip = {
.name = "GIUINTL",
- .ack = ack_giuint_low,
- .mask = mask_giuint_low,
- .mask_ack = mask_ack_giuint_low,
- .unmask = unmask_giuint_low,
+ .irq_ack = ack_giuint_low,
+ .irq_mask = mask_giuint_low,
+ .irq_mask_ack = mask_ack_giuint_low,
+ .irq_unmask = unmask_giuint_low,
};
-static void ack_giuint_high(unsigned int irq)
+static void ack_giuint_high(struct irq_data *d)
{
giu_write(GIUINTSTATH,
- 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+ 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
}
-static void mask_giuint_high(unsigned int irq)
+static void mask_giuint_high(struct irq_data *d)
{
- giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+ giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
}
-static void mask_ack_giuint_high(unsigned int irq)
+static void mask_ack_giuint_high(struct irq_data *d)
{
unsigned int pin;
- pin = GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET;
+ pin = GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET;
giu_clear(GIUINTENH, 1 << pin);
giu_write(GIUINTSTATH, 1 << pin);
}
-static void unmask_giuint_high(unsigned int irq)
+static void unmask_giuint_high(struct irq_data *d)
{
- giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+ giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
}
static struct irq_chip giuint_high_irq_chip = {
.name = "GIUINTH",
- .ack = ack_giuint_high,
- .mask = mask_giuint_high,
- .mask_ack = mask_ack_giuint_high,
- .unmask = unmask_giuint_high,
+ .irq_ack = ack_giuint_high,
+ .irq_mask = mask_giuint_high,
+ .irq_mask_ack = mask_ack_giuint_high,
+ .irq_unmask = unmask_giuint_high,
};
static int giu_get_irq(unsigned int irq)
diff --git a/drivers/gpio/wm8994-gpio.c b/drivers/gpio/wm8994-gpio.c
index 618398e4ed8e..c822baacd8fc 100644
--- a/drivers/gpio/wm8994-gpio.c
+++ b/drivers/gpio/wm8994-gpio.c
@@ -35,6 +35,29 @@ static inline struct wm8994_gpio *to_wm8994_gpio(struct gpio_chip *chip)
return container_of(chip, struct wm8994_gpio, gpio_chip);
}
+static int wm8994_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
+ struct wm8994 *wm8994 = wm8994_gpio->wm8994;
+
+ switch (wm8994->type) {
+ case WM8958:
+ switch (offset) {
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 6:
+ return -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int wm8994_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
@@ -136,6 +159,7 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
static struct gpio_chip template_chip = {
.label = "wm8994",
.owner = THIS_MODULE,
+ .request = wm8994_gpio_request,
.direction_input = wm8994_gpio_direction_in,
.get = wm8994_gpio_get,
.direction_output = wm8994_gpio_direction_out,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 7af443672626..0902d4460039 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -23,7 +23,7 @@ config DRM_KMS_HELPER
tristate
depends on DRM
select FB
- select FRAMEBUFFER_CONSOLE if !EMBEDDED
+ select FRAMEBUFFER_CONSOLE if !EXPERT
help
FB and CRTC helpers for KMS drivers.
@@ -100,14 +100,16 @@ config DRM_I830
config DRM_I915
tristate "i915 driver"
depends on AGP_INTEL
+ # we need shmfs for the swappable backing store, and in particular
+ # the shmem_readpage() which depends upon tmpfs
select SHMEM
+ select TMPFS
select DRM_KMS_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
- select VIDEO_OUTPUT_CONTROL if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 252fdb98b73a..0cb2ba50af53 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -466,10 +466,4 @@ drm_agp_bind_pages(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_agp_bind_pages);
-void drm_agp_chipset_flush(struct drm_device *dev)
-{
- agp_flush_chipset(dev->agp->bridge);
-}
-EXPORT_SYMBOL(drm_agp_chipset_flush);
-
#endif /* __OS_HAS_AGP */
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 2baa6708e44c..654faa803dcb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2674,3 +2674,23 @@ out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
+
+void drm_mode_config_reset(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ if (crtc->funcs->reset)
+ crtc->funcs->reset(crtc);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->funcs->reset)
+ encoder->funcs->reset(encoder);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->funcs->reset)
+ connector->funcs->reset(connector);
+}
+EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 2d4e17a004db..92369655dca3 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -336,20 +336,20 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_display_mode *adjusted_mode, saved_mode;
+ struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
struct drm_encoder *encoder;
bool ret = true;
- adjusted_mode = drm_mode_duplicate(dev, mode);
-
crtc->enabled = drm_helper_crtc_in_use(crtc);
-
if (!crtc->enabled)
return true;
+ adjusted_mode = drm_mode_duplicate(dev, mode);
+
+ saved_hwmode = crtc->hwmode;
saved_mode = crtc->mode;
saved_x = crtc->x;
saved_y = crtc->y;
@@ -427,11 +427,20 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
}
- /* XXX free adjustedmode */
- drm_mode_destroy(dev, adjusted_mode);
+ /* Store real post-adjustment hardware mode. */
+ crtc->hwmode = *adjusted_mode;
+
+ /* Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc);
+
/* FIXME: add subpixel order */
done:
+ drm_mode_destroy(dev, adjusted_mode);
if (!ret) {
+ crtc->hwmode = saved_hwmode;
crtc->mode = saved_mode;
crtc->x = saved_x;
crtc->y = saved_y;
@@ -486,14 +495,17 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
crtc_funcs = set->crtc->helper_private;
+ if (!set->mode)
+ set->fb = NULL;
+
if (set->fb) {
DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
set->crtc->base.id, set->fb->base.id,
(int)set->num_connectors, set->x, set->y);
} else {
- DRM_DEBUG_KMS("[CRTC:%d] [NOFB] #connectors=%d (x y) (%i %i)\n",
- set->crtc->base.id, (int)set->num_connectors,
- set->x, set->y);
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+ set->mode = NULL;
+ set->num_connectors = 0;
}
dev = set->crtc->dev;
@@ -638,8 +650,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
mode_changed = true;
if (mode_changed) {
- set->crtc->enabled = (set->mode != NULL);
- if (set->mode != NULL) {
+ set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
+ if (set->crtc->enabled) {
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
@@ -650,9 +662,16 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
old_fb)) {
DRM_ERROR("failed to set mode on [CRTC:%d]\n",
set->crtc->base.id);
+ set->crtc->fb = old_fb;
ret = -EINVAL;
goto fail;
}
+ DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+ for (i = 0; i < set->num_connectors; i++) {
+ DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+ drm_get_connector_name(set->connectors[i]));
+ set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+ }
}
drm_helper_disable_unused_functions(dev);
} else if (fb_changed) {
@@ -664,14 +683,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
set->crtc->fb = set->fb;
ret = crtc_funcs->mode_set_base(set->crtc,
set->x, set->y, old_fb);
- if (ret != 0)
+ if (ret != 0) {
+ set->crtc->fb = old_fb;
goto fail;
- }
- DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
- for (i = 0; i < set->num_connectors; i++) {
- DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
- drm_get_connector_name(set->connectors[i]));
- set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+ }
}
kfree(save_connectors);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d2849e4ea4d0..6977a1ce9d98 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -985,6 +985,8 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
FB_VISUAL_TRUECOLOR;
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
info->fix.type_aux = 0;
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
@@ -1005,6 +1007,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
info->var.bits_per_pixel = fb->bits_per_pixel;
+ info->var.accel_flags = FB_ACCELF_TEXT;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
@@ -1530,3 +1533,24 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
+/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
+ * but the module doesn't depend on any fb console symbols. At least
+ * attempt to load fbcon to avoid leaving the system without a usable console.
+ */
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
+static int __init drm_fb_helper_modinit(void)
+{
+ const char *name = "fbcon";
+ struct module *fbcon;
+
+ mutex_lock(&module_mutex);
+ fbcon = find_module(name);
+ mutex_unlock(&module_mutex);
+
+ if (!fbcon)
+ request_module_nowait(name);
+ return 0;
+}
+
+module_init(drm_fb_helper_modinit);
+#endif
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index a39794bac04b..2ec7d48fc4a8 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -236,6 +236,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
return -EBUSY; /* No exclusive opens */
if (!drm_cpu_valid())
return -EINVAL;
+ if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+ return -EINVAL;
DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 3cdbaf379bb5..be9a9c07d152 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -283,17 +283,18 @@ int drm_vma_info(struct seq_file *m, void *data)
#endif
mutex_lock(&dev->struct_mutex);
- seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
+ seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
atomic_read(&dev->vma_count),
- high_memory, (u64)virt_to_phys(high_memory));
+ high_memory, (void *)virt_to_phys(high_memory));
list_for_each_entry(pt, &dev->vmalist, head) {
vma = pt->vma;
if (!vma)
continue;
seq_printf(m,
- "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
- pt->pid, vma->vm_start, vma->vm_end,
+ "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
+ pt->pid,
+ (void *)vma->vm_start, (void *)vma->vm_end,
vma->vm_flags & VM_READ ? 'r' : '-',
vma->vm_flags & VM_WRITE ? 'w' : '-',
vma->vm_flags & VM_EXEC ? 'x' : '-',
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 16d5155edad1..28d1d3c24d65 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -40,6 +40,22 @@
#include <linux/slab.h>
#include <linux/vgaarb.h>
+
+/* Access macro for slots in vblank timestamp ringbuffer. */
+#define vblanktimestamp(dev, crtc, count) ( \
+ (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
+ ((count) % DRM_VBLANKTIME_RBSIZE)])
+
+/* Retry timestamp calculation up to 3 times to satisfy
+ * drm_timestamp_precision before giving up.
+ */
+#define DRM_TIMESTAMP_MAXRETRIES 3
+
+/* Threshold in nanoseconds for detection of redundant
+ * vblank irq in drm_handle_vblank(). 1 msec should be ok.
+ */
+#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
+
/**
* Get interrupt from bus id.
*
@@ -77,6 +93,89 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
return 0;
}
+/*
+ * Clear vblank timestamp buffer for a crtc.
+ */
+static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
+{
+ memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
+ DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
+}
+
+/*
+ * Disable vblank irq's on crtc, make sure that last vblank count
+ * of hardware and corresponding consistent software vblank counter
+ * are preserved, even if there are any spurious vblank irq's after
+ * disable.
+ */
+static void vblank_disable_and_save(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+ u32 vblcount;
+ s64 diff_ns;
+ int vblrc;
+ struct timeval tvblank;
+
+ /* Prevent vblank irq processing while disabling vblank irqs,
+ * so no updates of timestamps or count can happen after we've
+ * disabled. Needed to prevent races in case of delayed irq's.
+ * Disable preemption, so vblank_time_lock is held as short as
+ * possible, even under a kernel with PREEMPT_RT patches.
+ */
+ preempt_disable();
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+ dev->driver->disable_vblank(dev, crtc);
+ dev->vblank_enabled[crtc] = 0;
+
+ /* No further vblank irq's will be processed after
+ * this point. Get current hardware vblank count and
+ * vblank timestamp, repeat until they are consistent.
+ *
+ * FIXME: There is still a race condition here and in
+ * drm_update_vblank_count() which can cause off-by-one
+ * reinitialization of software vblank counter. If gpu
+ * vblank counter doesn't increment exactly at the leading
+ * edge of a vblank interval, then we can lose 1 count if
+ * we happen to execute between start of vblank and the
+ * delayed gpu counter increment.
+ */
+ do {
+ dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+ vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
+ } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
+
+ /* Compute time difference to stored timestamp of last vblank
+ * as updated by last invocation of drm_handle_vblank() in vblank irq.
+ */
+ vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+ /* If there is at least 1 msec difference between the last stored
+ * timestamp and tvblank, then we are currently executing our
+ * disable inside a new vblank interval, the tvblank timestamp
+ * corresponds to this new vblank interval and the irq handler
+ * for this vblank didn't run yet and won't run due to our disable.
+ * Therefore we need to do the job of drm_handle_vblank() and
+ * increment the vblank counter by one to account for this vblank.
+ *
+ * Skip this step if there isn't any high precision timestamp
+ * available. In that case we can't account for this and just
+ * hope for the best.
+ */
+ if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
+ atomic_inc(&dev->_vblank_count[crtc]);
+ smp_mb__after_atomic_inc();
+ }
+
+ /* Invalidate all timestamps while vblank irq's are off. */
+ clear_vblank_timestamps(dev, crtc);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ preempt_enable();
+}
+
static void vblank_disable_fn(unsigned long arg)
{
struct drm_device *dev = (struct drm_device *)arg;
@@ -91,10 +190,7 @@ static void vblank_disable_fn(unsigned long arg)
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
dev->vblank_enabled[i]) {
DRM_DEBUG("disabling vblank on crtc %d\n", i);
- dev->last_vblank[i] =
- dev->driver->get_vblank_counter(dev, i);
- dev->driver->disable_vblank(dev, i);
- dev->vblank_enabled[i] = 0;
+ vblank_disable_and_save(dev, i);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@@ -117,6 +213,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
kfree(dev->last_vblank);
kfree(dev->last_vblank_wait);
kfree(dev->vblank_inmodeset);
+ kfree(dev->_vblank_time);
dev->num_crtcs = 0;
}
@@ -129,6 +226,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
(unsigned long)dev);
spin_lock_init(&dev->vbl_lock);
+ spin_lock_init(&dev->vblank_time_lock);
+
dev->num_crtcs = num_crtcs;
dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
@@ -161,6 +260,19 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
if (!dev->vblank_inmodeset)
goto err;
+ dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
+ sizeof(struct timeval), GFP_KERNEL);
+ if (!dev->_vblank_time)
+ goto err;
+
+ DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
+
+ /* Driver specific high-precision vblank timestamping supported? */
+ if (dev->driver->get_vblank_timestamp)
+ DRM_INFO("Driver supports precise vblank timestamp query.\n");
+ else
+ DRM_INFO("No driver support for vblank timestamp query.\n");
+
/* Zero per-crtc vblank stuff */
for (i = 0; i < num_crtcs; i++) {
init_waitqueue_head(&dev->vbl_queue[i]);
@@ -279,7 +391,7 @@ EXPORT_SYMBOL(drm_irq_install);
*
* Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
*/
-int drm_irq_uninstall(struct drm_device * dev)
+int drm_irq_uninstall(struct drm_device *dev)
{
unsigned long irqflags;
int irq_enabled, i;
@@ -335,7 +447,9 @@ int drm_control(struct drm_device *dev, void *data,
{
struct drm_control *ctl = data;
- /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
+ /* if we haven't irq we fallback for compatibility reasons -
+ * this used to be a separate function in drm_dma.h
+ */
switch (ctl->func) {
@@ -360,6 +474,285 @@ int drm_control(struct drm_device *dev, void *data,
}
/**
+ * drm_calc_timestamping_constants - Calculate and
+ * store various constants which are later needed by
+ * vblank and swap-completion timestamping, e.g, by
+ * drm_calc_vbltimestamp_from_scanoutpos().
+ * They are derived from crtc's true scanout timing,
+ * so they take things like panel scaling or other
+ * adjustments into account.
+ *
+ * @crtc drm_crtc whose timestamp constants should be updated.
+ *
+ */
+void drm_calc_timestamping_constants(struct drm_crtc *crtc)
+{
+ s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+ u64 dotclock;
+
+ /* Dot clock in Hz: */
+ dotclock = (u64) crtc->hwmode.clock * 1000;
+
+ /* Fields of interlaced scanout modes are only halve a frame duration.
+ * Double the dotclock to get halve the frame-/line-/pixelduration.
+ */
+ if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
+ dotclock *= 2;
+
+ /* Valid dotclock? */
+ if (dotclock > 0) {
+ /* Convert scanline length in pixels and video dot clock to
+ * line duration, frame duration and pixel duration in
+ * nanoseconds:
+ */
+ pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
+ linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
+ 1000000000), dotclock);
+ framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns;
+ } else
+ DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
+ crtc->base.id);
+
+ crtc->pixeldur_ns = pixeldur_ns;
+ crtc->linedur_ns = linedur_ns;
+ crtc->framedur_ns = framedur_ns;
+
+ DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+ crtc->base.id, crtc->hwmode.crtc_htotal,
+ crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
+ DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
+ crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
+ (int) linedur_ns, (int) pixeldur_ns);
+}
+EXPORT_SYMBOL(drm_calc_timestamping_constants);
+
+/**
+ * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
+ * drivers. Implements calculation of exact vblank timestamps from
+ * given drm_display_mode timings and current video scanout position
+ * of a crtc. This can be called from within get_vblank_timestamp()
+ * implementation of a kms driver to implement the actual timestamping.
+ *
+ * Should return timestamps conforming to the OML_sync_control OpenML
+ * extension specification. The timestamp corresponds to the end of
+ * the vblank interval, aka start of scanout of topmost-leftmost display
+ * pixel in the following video frame.
+ *
+ * Requires support for optional dev->driver->get_scanout_position()
+ * in kms driver, plus a bit of setup code to provide a drm_display_mode
+ * that corresponds to the true scanout timing.
+ *
+ * The current implementation only handles standard video modes. It
+ * returns as no operation if a doublescan or interlaced video mode is
+ * active. Higher level code is expected to handle this.
+ *
+ * @dev: DRM device.
+ * @crtc: Which crtc's vblank timestamp to retrieve.
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs).
+ * On return contains true maximum error of timestamp.
+ * @vblank_time: Pointer to struct timeval which should receive the timestamp.
+ * @flags: Flags to pass to driver:
+ * 0 = Default.
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ * @refcrtc: drm_crtc* of crtc which defines scanout timing.
+ *
+ * Returns negative value on error, failure or if not supported in current
+ * video mode:
+ *
+ * -EINVAL - Invalid crtc.
+ * -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
+ * -ENOTSUPP - Function not supported in current display mode.
+ * -EIO - Failed, e.g., due to failed scanout position query.
+ *
+ * Returns or'ed positive status flags on success:
+ *
+ * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
+ * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
+ *
+ */
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ struct drm_crtc *refcrtc)
+{
+ struct timeval stime, raw_time;
+ struct drm_display_mode *mode;
+ int vbl_status, vtotal, vdisplay;
+ int vpos, hpos, i;
+ s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+ bool invbl;
+
+ if (crtc < 0 || crtc >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ /* Scanout position query not supported? Should not happen. */
+ if (!dev->driver->get_scanout_position) {
+ DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+ return -EIO;
+ }
+
+ mode = &refcrtc->hwmode;
+ vtotal = mode->crtc_vtotal;
+ vdisplay = mode->crtc_vdisplay;
+
+ /* Durations of frames, lines, pixels in nanoseconds. */
+ framedur_ns = refcrtc->framedur_ns;
+ linedur_ns = refcrtc->linedur_ns;
+ pixeldur_ns = refcrtc->pixeldur_ns;
+
+ /* If mode timing undefined, just return as no-op:
+ * Happens during initial modesetting of a crtc.
+ */
+ if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
+ DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
+ return -EAGAIN;
+ }
+
+ /* Get current scanout position with system timestamp.
+ * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
+ * if single query takes longer than max_error nanoseconds.
+ *
+ * This guarantees a tight bound on maximum error if
+ * code gets preempted or delayed for some reason.
+ */
+ for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
+ /* Disable preemption to make it very likely to
+ * succeed in the first iteration even on PREEMPT_RT kernel.
+ */
+ preempt_disable();
+
+ /* Get system timestamp before query. */
+ do_gettimeofday(&stime);
+
+ /* Get vertical and horizontal scanout pos. vpos, hpos. */
+ vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
+
+ /* Get system timestamp after query. */
+ do_gettimeofday(&raw_time);
+
+ preempt_enable();
+
+ /* Return as no-op if scanout query unsupported or failed. */
+ if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
+ DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
+ crtc, vbl_status);
+ return -EIO;
+ }
+
+ duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
+
+ /* Accept result with < max_error nsecs timing uncertainty. */
+ if (duration_ns <= (s64) *max_error)
+ break;
+ }
+
+ /* Noisy system timing? */
+ if (i == DRM_TIMESTAMP_MAXRETRIES) {
+ DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
+ crtc, (int) duration_ns/1000, *max_error/1000, i);
+ }
+
+ /* Return upper bound of timestamp precision error. */
+ *max_error = (int) duration_ns;
+
+ /* Check if in vblank area:
+ * vpos is >=0 in video scanout area, but negative
+ * within vblank area, counting down the number of lines until
+ * start of scanout.
+ */
+ invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
+
+ /* Convert scanout position into elapsed time at raw_time query
+ * since start of scanout at first display scanline. delta_ns
+ * can be negative if start of scanout hasn't happened yet.
+ */
+ delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
+
+ /* Is vpos outside nominal vblank area, but less than
+ * 1/100 of a frame height away from start of vblank?
+ * If so, assume this isn't a massively delayed vblank
+ * interrupt, but a vblank interrupt that fired a few
+ * microseconds before true start of vblank. Compensate
+ * by adding a full frame duration to the final timestamp.
+ * Happens, e.g., on ATI R500, R600.
+ *
+ * We only do this if DRM_CALLED_FROM_VBLIRQ.
+ */
+ if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
+ ((vdisplay - vpos) < vtotal / 100)) {
+ delta_ns = delta_ns - framedur_ns;
+
+ /* Signal this correction as "applied". */
+ vbl_status |= 0x8;
+ }
+
+ /* Subtract time delta from raw timestamp to get final
+ * vblank_time timestamp for end of vblank.
+ */
+ *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
+
+ DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %d.%d -> %d.%d [e %d us, %d rep]\n",
+ crtc, (int) vbl_status, hpos, vpos, raw_time.tv_sec,
+ raw_time.tv_usec, vblank_time->tv_sec, vblank_time->tv_usec,
+ (int) duration_ns/1000, i);
+
+ vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
+ if (invbl)
+ vbl_status |= DRM_VBLANKTIME_INVBL;
+
+ return vbl_status;
+}
+EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+
+/**
+ * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
+ * vblank interval.
+ *
+ * @dev: DRM device
+ * @crtc: which crtc's vblank timestamp to retrieve
+ * @tvblank: Pointer to target struct timeval which should receive the timestamp
+ * @flags: Flags to pass to driver:
+ * 0 = Default.
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ *
+ * Fetches the system timestamp corresponding to the time of the most recent
+ * vblank interval on specified crtc. May call into kms-driver to
+ * compute the timestamp with a high-precision GPU specific method.
+ *
+ * Returns zero if timestamp originates from uncorrected do_gettimeofday()
+ * call, i.e., it isn't very precisely locked to the true vblank.
+ *
+ * Returns non-zero if timestamp is considered to be very precise.
+ */
+u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+ struct timeval *tvblank, unsigned flags)
+{
+ int ret = 0;
+
+ /* Define requested maximum error on timestamps (nanoseconds). */
+ int max_error = (int) drm_timestamp_precision * 1000;
+
+ /* Query driver if possible and precision timestamping enabled. */
+ if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
+ ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+ tvblank, flags);
+ if (ret > 0)
+ return (u32) ret;
+ }
+
+ /* GPU high precision timestamp query unsupported or failed.
+ * Return gettimeofday timestamp as best estimate.
+ */
+ do_gettimeofday(tvblank);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_get_last_vbltimestamp);
+
+/**
* drm_vblank_count - retrieve "cooked" vblank counter value
* @dev: DRM device
* @crtc: which counter to retrieve
@@ -375,6 +768,40 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc)
EXPORT_SYMBOL(drm_vblank_count);
/**
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
+ * and the system timestamp corresponding to that vblank counter value.
+ *
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+ * of the vblank interval that corresponds to the current value vblank counter
+ * value.
+ */
+u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+ struct timeval *vblanktime)
+{
+ u32 cur_vblank;
+
+ /* Read timestamp from slot of _vblank_time ringbuffer
+ * that corresponds to current vblank count. Retry if
+ * count has incremented during readout. This works like
+ * a seqlock.
+ */
+ do {
+ cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
+ *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+ smp_rmb();
+ } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
+
+ return cur_vblank;
+}
+EXPORT_SYMBOL(drm_vblank_count_and_time);
+
+/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
* @crtc: counter to update
@@ -392,7 +819,8 @@ EXPORT_SYMBOL(drm_vblank_count);
*/
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
{
- u32 cur_vblank, diff;
+ u32 cur_vblank, diff, tslot, rc;
+ struct timeval t_vblank;
/*
* Interrupts were disabled prior to this call, so deal with counter
@@ -400,8 +828,18 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
* NOTE! It's possible we lost a full dev->max_vblank_count events
* here if the register is small or we had vblank interrupts off for
* a long time.
+ *
+ * We repeat the hardware vblank counter & timestamp query until
+ * we get consistent results. This to prevent races between gpu
+ * updating its hardware counter while we are retrieving the
+ * corresponding vblank timestamp.
*/
- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ do {
+ cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
+ } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+
+ /* Deal with counter wrap */
diff = cur_vblank - dev->last_vblank[crtc];
if (cur_vblank < dev->last_vblank[crtc]) {
diff += dev->max_vblank_count;
@@ -413,7 +851,18 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
crtc, diff);
+ /* Reinitialize corresponding vblank timestamp if high-precision query
+ * available. Skip this step if query unsupported or failed. Will
+ * reinitialize delayed at next vblank interrupt in that case.
+ */
+ if (rc) {
+ tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
+ vblanktimestamp(dev, crtc, tslot) = t_vblank;
+ }
+
+ smp_mb__before_atomic_inc();
atomic_add(diff, &dev->_vblank_count[crtc]);
+ smp_mb__after_atomic_inc();
}
/**
@@ -429,15 +878,27 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
*/
int drm_vblank_get(struct drm_device *dev, int crtc)
{
- unsigned long irqflags;
+ unsigned long irqflags, irqflags2;
int ret = 0;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+ /* Disable preemption while holding vblank_time_lock. Do
+ * it explicitely to guard against PREEMPT_RT kernel.
+ */
+ preempt_disable();
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
if (!dev->vblank_enabled[crtc]) {
+ /* Enable vblank irqs under vblank_time_lock protection.
+ * All vblank count & timestamp updates are held off
+ * until we are done reinitializing master counter and
+ * timestamps. Filtercode in drm_handle_vblank() will
+ * prevent double-accounting of same vblank interval.
+ */
ret = dev->driver->enable_vblank(dev, crtc);
- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
+ crtc, ret);
if (ret)
atomic_dec(&dev->vblank_refcount[crtc]);
else {
@@ -445,6 +906,8 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
drm_update_vblank_count(dev, crtc);
}
}
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
+ preempt_enable();
} else {
if (!dev->vblank_enabled[crtc]) {
atomic_dec(&dev->vblank_refcount[crtc]);
@@ -463,15 +926,17 @@ EXPORT_SYMBOL(drm_vblank_get);
* @crtc: which counter to give up
*
* Release ownership of a given vblank counter, turning off interrupts
- * if possible.
+ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
*/
void drm_vblank_put(struct drm_device *dev, int crtc)
{
- BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
+ BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
/* Last user schedules interrupt disable */
- if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
- mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
+ if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
+ (drm_vblank_offdelay > 0))
+ mod_timer(&dev->vblank_disable_timer,
+ jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
}
EXPORT_SYMBOL(drm_vblank_put);
@@ -480,10 +945,8 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
unsigned long irqflags;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- dev->driver->disable_vblank(dev, crtc);
+ vblank_disable_and_save(dev, crtc);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
- dev->vblank_enabled[crtc] = 0;
- dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
EXPORT_SYMBOL(drm_vblank_off);
@@ -549,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
- int crtc, ret = 0;
+ int ret = 0;
+ unsigned int crtc;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
@@ -602,7 +1066,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->base.file_priv = file_priv;
e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
- do_gettimeofday(&now);
spin_lock_irqsave(&dev->event_lock, flags);
if (file_priv->event_space < sizeof e->event) {
@@ -611,7 +1074,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
}
file_priv->event_space -= sizeof e->event;
- seq = drm_vblank_count(dev, pipe);
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
+
if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
(seq - vblwait->request.sequence) <= (1 << 23)) {
vblwait->request.sequence = seq + 1;
@@ -626,15 +1090,18 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->event.sequence = vblwait->request.sequence;
if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+ e->event.sequence = seq;
e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_usec;
drm_vblank_put(dev, pipe);
list_add_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
+ vblwait->reply.sequence = seq;
trace_drm_vblank_event_delivered(current->pid, pipe,
vblwait->request.sequence);
} else {
list_add_tail(&e->base.link, &dev->vblank_event_list);
+ vblwait->reply.sequence = vblwait->request.sequence;
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -727,11 +1194,10 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
if (ret != -EINTR) {
struct timeval now;
- do_gettimeofday(&now);
-
+ vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
- vblwait->reply.sequence = drm_vblank_count(dev, crtc);
+
DRM_DEBUG("returning %d to client\n",
vblwait->reply.sequence);
} else {
@@ -750,8 +1216,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
unsigned long flags;
unsigned int seq;
- do_gettimeofday(&now);
- seq = drm_vblank_count(dev, crtc);
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
spin_lock_irqsave(&dev->event_lock, flags);
@@ -787,13 +1252,68 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
*/
-void drm_handle_vblank(struct drm_device *dev, int crtc)
+bool drm_handle_vblank(struct drm_device *dev, int crtc)
{
+ u32 vblcount;
+ s64 diff_ns;
+ struct timeval tvblank;
+ unsigned long irqflags;
+
if (!dev->num_crtcs)
- return;
+ return false;
+
+ /* Need timestamp lock to prevent concurrent execution with
+ * vblank enable/disable, as this would cause inconsistent
+ * or corrupted timestamps and vblank counts.
+ */
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+ /* Vblank irq handling disabled. Nothing to do. */
+ if (!dev->vblank_enabled[crtc]) {
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ return false;
+ }
+
+ /* Fetch corresponding timestamp for this vblank interval from
+ * driver and store it in proper slot of timestamp ringbuffer.
+ */
+
+ /* Get current timestamp and count. */
+ vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+
+ /* Compute time difference to timestamp of last vblank */
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+ /* Update vblank timestamp and count if at least
+ * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
+ * difference between last stored timestamp and current
+ * timestamp. A smaller difference means basically
+ * identical timestamps. Happens if this vblank has
+ * been already processed and this is a redundant call,
+ * e.g., due to spurious vblank interrupts. We need to
+ * ignore those for accounting.
+ */
+ if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+ /* Store new timestamp in ringbuffer. */
+ vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
+
+ /* Increment cooked vblank count. This also atomically commits
+ * the timestamp computed above.
+ */
+ smp_mb__before_atomic_inc();
+ atomic_inc(&dev->_vblank_count[crtc]);
+ smp_mb__after_atomic_inc();
+ } else {
+ DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
+ crtc, (int) diff_ns);
+ }
- atomic_inc(&dev->_vblank_count[crtc]);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
drm_handle_vblank_events(dev, crtc);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ return true;
}
EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index a6bfc302ed90..c59515ba7e69 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -392,10 +392,36 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
+ mm->scan_check_range = 0;
}
EXPORT_SYMBOL(drm_mm_init_scan);
/**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole. This version is for range-restricted scans.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ mm->scan_alignment = alignment;
+ mm->scan_size = size;
+ mm->scanned_blocks = 0;
+ mm->scan_hit_start = 0;
+ mm->scan_hit_size = 0;
+ mm->scan_start = start;
+ mm->scan_end = end;
+ mm->scan_check_range = 1;
+}
+EXPORT_SYMBOL(drm_mm_init_scan_with_range);
+
+/**
* Add a node to the scan list that might be freed to make space for the desired
* hole.
*
@@ -406,6 +432,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct list_head *prev_free, *next_free;
struct drm_mm_node *prev_node, *next_node;
+ unsigned long adj_start;
+ unsigned long adj_end;
mm->scanned_blocks++;
@@ -452,7 +480,17 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
node->free_stack.prev = prev_free;
node->free_stack.next = next_free;
- if (check_free_hole(node->start, node->start + node->size,
+ if (mm->scan_check_range) {
+ adj_start = node->start < mm->scan_start ?
+ mm->scan_start : node->start;
+ adj_end = node->start + node->size > mm->scan_end ?
+ mm->scan_end : node->start + node->size;
+ } else {
+ adj_start = node->start;
+ adj_end = node->start + node->size;
+ }
+
+ if (check_free_hole(adj_start , adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = node->start;
mm->scan_hit_size = node->size;
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index cdc89ee042cc..d59edc18301f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -40,12 +40,22 @@
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
+unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
+EXPORT_SYMBOL(drm_vblank_offdelay);
+
+unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
+EXPORT_SYMBOL(drm_timestamp_precision);
+
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
+MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
module_param_named(debug, drm_debug, int, 0600);
+module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
+module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
struct idr drm_minors_idr;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fdc833d5cc7b..0ae6a7c5020f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -9,6 +9,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_gem.o \
i915_gem_debug.o \
i915_gem_evict.o \
+ i915_gem_execbuffer.o \
+ i915_gem_gtt.o \
i915_gem_tiling.o \
i915_trace_points.o \
intel_display.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1f4f3ceb63c7..3601466c5502 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,6 +32,7 @@
#include "drmP.h"
#include "drm.h"
#include "intel_drv.h"
+#include "intel_ringbuffer.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -72,7 +73,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
B(is_broadwater);
B(is_crestline);
B(has_fbc);
- B(has_rc6);
B(has_pipe_cxsr);
B(has_hotplug);
B(cursor_needs_physical);
@@ -86,19 +86,19 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
-static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
+static const char *get_pin_flag(struct drm_i915_gem_object *obj)
{
- if (obj_priv->user_pin_count > 0)
+ if (obj->user_pin_count > 0)
return "P";
- else if (obj_priv->pin_count > 0)
+ else if (obj->pin_count > 0)
return "p";
else
return " ";
}
-static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
{
- switch (obj_priv->tiling_mode) {
+ switch (obj->tiling_mode) {
default:
case I915_TILING_NONE: return " ";
case I915_TILING_X: return "X";
@@ -106,10 +106,19 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
}
}
+static const char *agp_type_str(int type)
+{
+ switch (type) {
+ case 0: return " uncached";
+ case 1: return " snooped";
+ default: return "";
+ }
+}
+
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
+ seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
@@ -117,6 +126,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.read_domains,
obj->base.write_domain,
obj->last_rendering_seqno,
+ obj->last_fenced_seqno,
+ agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
@@ -124,7 +135,17 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
if (obj->gtt_space != NULL)
- seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
+ seq_printf(m, " (gtt offset: %08x, size: %08x)",
+ obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+ if (obj->pin_mappable || obj->fault_mappable) {
+ char s[3], *t = s;
+ if (obj->pin_mappable)
+ *t++ = 'p';
+ if (obj->fault_mappable)
+ *t++ = 'f';
+ *t = '\0';
+ seq_printf(m, " (%s mappable)", s);
+ }
if (obj->ring != NULL)
seq_printf(m, " (%s)", obj->ring->name);
}
@@ -136,7 +157,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
struct list_head *head;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
int count, ret;
@@ -171,12 +192,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
}
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj_priv, head, mm_list) {
+ list_for_each_entry(obj, head, mm_list) {
seq_printf(m, " ");
- describe_obj(m, obj_priv);
+ describe_obj(m, obj);
seq_printf(m, "\n");
- total_obj_size += obj_priv->base.size;
- total_gtt_size += obj_priv->gtt_space->size;
+ total_obj_size += obj->base.size;
+ total_gtt_size += obj->gtt_space->size;
count++;
}
mutex_unlock(&dev->struct_mutex);
@@ -186,30 +207,116 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
return 0;
}
+#define count_objects(list, member) do { \
+ list_for_each_entry(obj, list, member) { \
+ size += obj->gtt_space->size; \
+ ++count; \
+ if (obj->map_and_fenceable) { \
+ mappable_size += obj->gtt_space->size; \
+ ++mappable_count; \
+ } \
+ } \
+} while(0)
+
static int i915_gem_object_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 count, mappable_count;
+ size_t size, mappable_size;
+ struct drm_i915_gem_object *obj;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
- seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
- seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
- seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
- seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
- seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
- seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
+ seq_printf(m, "%u objects, %zu bytes\n",
+ dev_priv->mm.object_count,
+ dev_priv->mm.object_memory);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.gtt_list, gtt_list);
+ seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.active_list, mm_list);
+ count_objects(&dev_priv->mm.flushing_list, mm_list);
+ seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.pinned_list, mm_list);
+ seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.inactive_list, mm_list);
+ seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.deferred_free_list, mm_list);
+ seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ if (obj->fault_mappable) {
+ size += obj->gtt_space->size;
+ ++count;
+ }
+ if (obj->pin_mappable) {
+ mappable_size += obj->gtt_space->size;
+ ++mappable_count;
+ }
+ }
+ seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
+ mappable_count, mappable_size);
+ seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+ count, size);
+
+ seq_printf(m, "%zu [%zu] gtt total\n",
+ dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
mutex_unlock(&dev->struct_mutex);
return 0;
}
+static int i915_gem_gtt_info(struct seq_file *m, void* data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ size_t total_obj_size, total_gtt_size;
+ int count, ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ total_obj_size = total_gtt_size = count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ seq_printf(m, " ");
+ describe_obj(m, obj);
+ seq_printf(m, "\n");
+ total_obj_size += obj->base.size;
+ total_gtt_size += obj->gtt_space->size;
+ count++;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ count, total_obj_size, total_gtt_size);
+
+ return 0;
+}
+
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
@@ -243,14 +350,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "%d prepares\n", work->pending);
if (work->old_fb_obj) {
- struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
- if(obj_priv)
- seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+ struct drm_i915_gem_object *obj = work->old_fb_obj;
+ if (obj)
+ seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
}
if (work->pending_flip_obj) {
- struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
- if(obj_priv)
- seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+ struct drm_i915_gem_object *obj = work->pending_flip_obj;
+ if (obj)
+ seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -265,44 +372,80 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_request *gem_request;
- int ret;
+ int ret, count;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- seq_printf(m, "Request:\n");
- list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
- list) {
- seq_printf(m, " %d @ %d\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies));
+ count = 0;
+ if (!list_empty(&dev_priv->ring[RCS].request_list)) {
+ seq_printf(m, "Render requests:\n");
+ list_for_each_entry(gem_request,
+ &dev_priv->ring[RCS].request_list,
+ list) {
+ seq_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ count++;
+ }
+ if (!list_empty(&dev_priv->ring[VCS].request_list)) {
+ seq_printf(m, "BSD requests:\n");
+ list_for_each_entry(gem_request,
+ &dev_priv->ring[VCS].request_list,
+ list) {
+ seq_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ count++;
+ }
+ if (!list_empty(&dev_priv->ring[BCS].request_list)) {
+ seq_printf(m, "BLT requests:\n");
+ list_for_each_entry(gem_request,
+ &dev_priv->ring[BCS].request_list,
+ list) {
+ seq_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ count++;
}
mutex_unlock(&dev->struct_mutex);
+ if (count == 0)
+ seq_printf(m, "No requests\n");
+
return 0;
}
+static void i915_ring_seqno_info(struct seq_file *m,
+ struct intel_ring_buffer *ring)
+{
+ if (ring->get_seqno) {
+ seq_printf(m, "Current sequence (%s): %d\n",
+ ring->name, ring->get_seqno(ring));
+ seq_printf(m, "Waiter sequence (%s): %d\n",
+ ring->name, ring->waiting_seqno);
+ seq_printf(m, "IRQ sequence (%s): %d\n",
+ ring->name, ring->irq_seqno);
+ }
+}
+
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- if (dev_priv->render_ring.status_page.page_addr != NULL) {
- seq_printf(m, "Current sequence: %d\n",
- dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
- } else {
- seq_printf(m, "Current sequence: hws uninitialized\n");
- }
- seq_printf(m, "Waiter sequence: %d\n",
- dev_priv->mm.waiting_gem_seqno);
- seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_ring_seqno_info(m, &dev_priv->ring[i]);
mutex_unlock(&dev->struct_mutex);
@@ -315,7 +458,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -354,16 +497,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
- if (dev_priv->render_ring.status_page.page_addr != NULL) {
- seq_printf(m, "Current sequence: %d\n",
- dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
- } else {
- seq_printf(m, "Current sequence: hws uninitialized\n");
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ if (IS_GEN6(dev)) {
+ seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
+ dev_priv->ring[i].name,
+ I915_READ_IMR(&dev_priv->ring[i]));
+ }
+ i915_ring_seqno_info(m, &dev_priv->ring[i]);
}
- seq_printf(m, "Waiter sequence: %d\n",
- dev_priv->mm.waiting_gem_seqno);
- seq_printf(m, "IRQ sequence: %d\n",
- dev_priv->mm.irq_gem_seqno);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -383,29 +524,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
+ struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
- if (obj == NULL) {
- seq_printf(m, "Fenced object[%2d] = unused\n", i);
- } else {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = to_intel_bo(obj);
- seq_printf(m, "Fenced object[%2d] = %p: %s "
- "%08x %08zx %08x %s %08x %08x %d",
- i, obj, get_pin_flag(obj_priv),
- obj_priv->gtt_offset,
- obj->size, obj_priv->stride,
- get_tiling_flag(obj_priv),
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- if (obj->name)
- seq_printf(m, " (name: %d)", obj->name);
- seq_printf(m, "\n");
- }
+ seq_printf(m, "Fenced object[%2d] = ", i);
+ if (obj == NULL)
+ seq_printf(m, "unused");
+ else
+ describe_obj(m, obj);
+ seq_printf(m, "\n");
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -414,10 +543,12 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int i;
+ struct intel_ring_buffer *ring;
volatile u32 *hws;
+ int i;
- hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
+ ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+ hws = (volatile u32 *)ring->status_page.page_addr;
if (hws == NULL)
return 0;
@@ -431,14 +562,14 @@ static int i915_hws_info(struct seq_file *m, void *data)
static void i915_dump_object(struct seq_file *m,
struct io_mapping *mapping,
- struct drm_i915_gem_object *obj_priv)
+ struct drm_i915_gem_object *obj)
{
int page, page_count, i;
- page_count = obj_priv->base.size / PAGE_SIZE;
+ page_count = obj->base.size / PAGE_SIZE;
for (page = 0; page < page_count; page++) {
u32 *mem = io_mapping_map_wc(mapping,
- obj_priv->gtt_offset + page * PAGE_SIZE);
+ obj->gtt_offset + page * PAGE_SIZE);
for (i = 0; i < PAGE_SIZE; i += 4)
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
io_mapping_unmap(mem);
@@ -450,25 +581,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- obj = &obj_priv->base;
- if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
- seq_printf(m, "--- gtt_offset = 0x%08x\n",
- obj_priv->gtt_offset);
- i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
+ seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+ i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
}
}
mutex_unlock(&dev->struct_mutex);
-
return 0;
}
@@ -477,19 +604,21 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- if (!dev_priv->render_ring.gem_object) {
+ ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+ if (!ring->obj) {
seq_printf(m, "No ringbuffer setup\n");
} else {
- u8 *virt = dev_priv->render_ring.virtual_start;
+ u8 *virt = ring->virtual_start;
uint32_t off;
- for (off = 0; off < dev_priv->render_ring.size; off += 4) {
+ for (off = 0; off < ring->size; off += 4) {
uint32_t *ptr = (uint32_t *)(virt + off);
seq_printf(m, "%08x : %08x\n", off, *ptr);
}
@@ -504,19 +633,38 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- unsigned int head, tail;
+ struct intel_ring_buffer *ring;
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+ if (ring->size == 0)
+ return 0;
- seq_printf(m, "RingHead : %08x\n", head);
- seq_printf(m, "RingTail : %08x\n", tail);
- seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
- seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
+ seq_printf(m, "Ring %s:\n", ring->name);
+ seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
+ seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
+ seq_printf(m, " Size : %08x\n", ring->size);
+ seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
+ seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
+ if (IS_GEN6(dev)) {
+ seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
+ seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
+ }
+ seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
+ seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
return 0;
}
+static const char *ring_str(int ring)
+{
+ switch (ring) {
+ case RING_RENDER: return " render";
+ case RING_BSD: return " bsd";
+ case RING_BLT: return " blt";
+ default: return "";
+ }
+}
+
static const char *pin_flag(int pinned)
{
if (pinned > 0)
@@ -547,6 +695,37 @@ static const char *purgeable_flag(int purgeable)
return purgeable ? " purgeable" : "";
}
+static void print_error_buffers(struct seq_file *m,
+ const char *name,
+ struct drm_i915_error_buffer *err,
+ int count)
+{
+ seq_printf(m, "%s [%d]:\n", name, count);
+
+ while (count--) {
+ seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s",
+ err->gtt_offset,
+ err->size,
+ err->read_domains,
+ err->write_domain,
+ err->seqno,
+ pin_flag(err->pinned),
+ tiling_flag(err->tiling),
+ dirty_flag(err->dirty),
+ purgeable_flag(err->purgeable),
+ ring_str(err->ring),
+ agp_type_str(err->agp_type));
+
+ if (err->name)
+ seq_printf(m, " (name: %d)", err->name);
+ if (err->fence_reg != I915_FENCE_REG_NONE)
+ seq_printf(m, " (fence: %d)", err->fence_reg);
+
+ seq_printf(m, "\n");
+ err++;
+ }
+}
+
static int i915_error_state(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -568,47 +747,54 @@ static int i915_error_state(struct seq_file *m, void *unused)
error->time.tv_usec);
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
- seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
- seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
+ seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ seq_printf(m, "ERROR: 0x%08x\n", error->error);
+ seq_printf(m, "Blitter command stream:\n");
+ seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
+ seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
+ seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
+ seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
+ seq_printf(m, "Video (BSD) command stream:\n");
+ seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
+ seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
+ seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
+ seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
+ }
+ seq_printf(m, "Render command stream:\n");
+ seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
- seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
if (INTEL_INFO(dev)->gen >= 4) {
- seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
+ seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
}
- seq_printf(m, "seqno: 0x%08x\n", error->seqno);
-
- if (error->active_bo_count) {
- seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
-
- for (i = 0; i < error->active_bo_count; i++) {
- seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
- error->active_bo[i].gtt_offset,
- error->active_bo[i].size,
- error->active_bo[i].read_domains,
- error->active_bo[i].write_domain,
- error->active_bo[i].seqno,
- pin_flag(error->active_bo[i].pinned),
- tiling_flag(error->active_bo[i].tiling),
- dirty_flag(error->active_bo[i].dirty),
- purgeable_flag(error->active_bo[i].purgeable));
-
- if (error->active_bo[i].name)
- seq_printf(m, " (name: %d)", error->active_bo[i].name);
- if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
- seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
-
- seq_printf(m, "\n");
- }
- }
+ seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
+ seq_printf(m, " seqno: 0x%08x\n", error->seqno);
+
+ for (i = 0; i < 16; i++)
+ seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+
+ if (error->active_bo)
+ print_error_buffers(m, "Active",
+ error->active_bo,
+ error->active_bo_count);
+
+ if (error->pinned_bo)
+ print_error_buffers(m, "Pinned",
+ error->pinned_bo,
+ error->pinned_bo_count);
for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
if (error->batchbuffer[i]) {
struct drm_i915_error_object *obj = error->batchbuffer[i];
- seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+ seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
offset = 0;
for (page = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
@@ -635,6 +821,9 @@ static int i915_error_state(struct seq_file *m, void *unused)
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
+ if (error->display)
+ intel_display_print_error_state(m, dev, error->display);
+
out:
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
@@ -658,15 +847,51 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u16 rgvswctl = I915_READ16(MEMSWCTL);
- u16 rgvstat = I915_READ16(MEMSTAT_ILK);
- seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
- seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
- seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
- MEMSTAT_VID_SHIFT);
- seq_printf(m, "Current P-state: %d\n",
- (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+ if (IS_GEN5(dev)) {
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
+ u16 rgvstat = I915_READ16(MEMSTAT_ILK);
+
+ seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+ seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+ seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+ MEMSTAT_VID_SHIFT);
+ seq_printf(m, "Current P-state: %d\n",
+ (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+ } else if (IS_GEN6(dev)) {
+ u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+ u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ int max_freq;
+
+ /* RPSTAT1 is in the GT power well */
+ __gen6_force_wake_get(dev_priv);
+
+ seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+ seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
+ seq_printf(m, "Render p-state ratio: %d\n",
+ (gt_perf_status & 0xff00) >> 8);
+ seq_printf(m, "Render p-state VID: %d\n",
+ gt_perf_status & 0xff);
+ seq_printf(m, "Render p-state limit: %d\n",
+ rp_state_limits & 0xff);
+
+ max_freq = (rp_state_cap & 0xff0000) >> 16;
+ seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+ max_freq * 100);
+
+ max_freq = (rp_state_cap & 0xff00) >> 8;
+ seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+ max_freq * 100);
+
+ max_freq = rp_state_cap & 0xff;
+ seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+ max_freq * 100);
+
+ __gen6_force_wake_put(dev_priv);
+ } else {
+ seq_printf(m, "no P-state info available\n");
+ }
return 0;
}
@@ -715,7 +940,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
- u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
+ u32 rstdbyctl = I915_READ(RSTDBYCTL);
u16 crstandvid = I915_READ16(CRSTANDVID);
seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
@@ -738,6 +963,30 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
seq_printf(m, "Render standby enabled: %s\n",
(rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
+ seq_printf(m, "Current RS state: ");
+ switch (rstdbyctl & RSX_STATUS_MASK) {
+ case RSX_STATUS_ON:
+ seq_printf(m, "on\n");
+ break;
+ case RSX_STATUS_RC1:
+ seq_printf(m, "RC1\n");
+ break;
+ case RSX_STATUS_RC1E:
+ seq_printf(m, "RC1E\n");
+ break;
+ case RSX_STATUS_RS1:
+ seq_printf(m, "RS1\n");
+ break;
+ case RSX_STATUS_RS2:
+ seq_printf(m, "RS2 (RC6)\n");
+ break;
+ case RSX_STATUS_RS3:
+ seq_printf(m, "RC3 (RC6+)\n");
+ break;
+ default:
+ seq_printf(m, "unknown\n");
+ break;
+ }
return 0;
}
@@ -794,7 +1043,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
bool sr_enabled = false;
- if (IS_GEN5(dev))
+ if (HAS_PCH_SPLIT(dev))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
@@ -886,7 +1135,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel);
- describe_obj(m, to_intel_bo(fb->obj));
+ describe_obj(m, fb->obj);
seq_printf(m, "\n");
list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
@@ -898,7 +1147,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel);
- describe_obj(m, to_intel_bo(fb->obj));
+ describe_obj(m, fb->obj);
seq_printf(m, "\n");
}
@@ -943,7 +1192,6 @@ i915_wedged_write(struct file *filp,
loff_t *ppos)
{
struct drm_device *dev = filp->private_data;
- drm_i915_private_t *dev_priv = dev->dev_private;
char buf[20];
int val = 1;
@@ -959,12 +1207,7 @@ i915_wedged_write(struct file *filp,
}
DRM_INFO("Manually setting wedged to %d\n", val);
-
- atomic_set(&dev_priv->mm.wedged, val);
- if (val) {
- wake_up_all(&dev_priv->irq_queue);
- queue_work(dev_priv->wq, &dev_priv->error_work);
- }
+ i915_handle_error(dev, val);
return cnt;
}
@@ -1018,6 +1261,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
static struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
+ {"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
@@ -1028,9 +1272,15 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
- {"i915_gem_hws", i915_hws_info, 0},
- {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
- {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
+ {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
+ {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
+ {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
+ {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
+ {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
+ {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
+ {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
+ {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
+ {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
{"i915_batchbuffers", i915_batchbuffer_info, 0},
{"i915_error_state", i915_error_state, 0},
{"i915_rstdby_delays", i915_rstdby_delays, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index cb900dc83d95..e33d9be7df3b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -50,6 +50,8 @@
static int i915_init_phys_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
@@ -58,11 +60,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
- dev_priv->render_ring.status_page.page_addr
- = dev_priv->status_page_dmah->vaddr;
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
- memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
if (INTEL_INFO(dev)->gen >= 4)
dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -80,13 +81,15 @@ static int i915_init_phys_hws(struct drm_device *dev)
static void i915_free_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
}
- if (dev_priv->render_ring.status_page.gfx_addr) {
- dev_priv->render_ring.status_page.gfx_addr = 0;
+ if (ring->status_page.gfx_addr) {
+ ring->status_page.gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
}
@@ -98,7 +101,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
- struct intel_ring_buffer *ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
/*
* We should never lose context on the ring with modesetting
@@ -107,8 +110,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
@@ -124,6 +127,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
static int i915_dma_cleanup(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
@@ -132,9 +137,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
drm_irq_uninstall(dev);
mutex_lock(&dev->struct_mutex);
- intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
- intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ intel_cleanup_ring_buffer(&dev_priv->ring[i]);
mutex_unlock(&dev->struct_mutex);
/* Clear the HWS virtual address at teardown */
@@ -148,6 +152,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ int ret;
master_priv->sarea = drm_getsarea(dev);
if (master_priv->sarea) {
@@ -158,33 +163,22 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
if (init->ring_size != 0) {
- if (dev_priv->render_ring.gem_object != NULL) {
+ if (LP_RING(dev_priv)->obj != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
return -EINVAL;
}
- dev_priv->render_ring.size = init->ring_size;
-
- dev_priv->render_ring.map.offset = init->ring_start;
- dev_priv->render_ring.map.size = init->ring_size;
- dev_priv->render_ring.map.type = 0;
- dev_priv->render_ring.map.flags = 0;
- dev_priv->render_ring.map.mtrr = 0;
-
- drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
-
- if (dev_priv->render_ring.map.handle == NULL) {
+ ret = intel_render_ring_init_dri(dev,
+ init->ring_start,
+ init->ring_size);
+ if (ret) {
i915_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
+ return ret;
}
}
- dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
-
dev_priv->cpp = init->cpp;
dev_priv->back_offset = init->back_offset;
dev_priv->front_offset = init->front_offset;
@@ -202,12 +196,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
static int i915_dma_resume(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
- struct intel_ring_buffer *ring;
DRM_DEBUG_DRIVER("%s\n", __func__);
- ring = &dev_priv->render_ring;
-
if (ring->map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -222,7 +214,7 @@ static int i915_dma_resume(struct drm_device * dev)
DRM_DEBUG_DRIVER("hw status page @ %p\n",
ring->status_page.page_addr);
if (ring->status_page.gfx_addr != 0)
- intel_ring_setup_status_page(dev, ring);
+ intel_ring_setup_status_page(ring);
else
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
@@ -264,7 +256,7 @@ static int i915_dma_init(struct drm_device *dev, void *data,
* instruction detected will be given a size of zero, which is a
* signal to abort the rest of the buffer.
*/
-static int do_validate_cmd(int cmd)
+static int validate_cmd(int cmd)
{
switch (((cmd >> 29) & 0x7)) {
case 0x0:
@@ -322,40 +314,27 @@ static int do_validate_cmd(int cmd)
return 0;
}
-static int validate_cmd(int cmd)
-{
- int ret = do_validate_cmd(cmd);
-
-/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
-
- return ret;
-}
-
static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int i;
+ int i, ret;
- if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
+ if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
return -EINVAL;
- BEGIN_LP_RING((dwords+1)&~1);
-
for (i = 0; i < dwords;) {
- int cmd, sz;
-
- cmd = buffer[i];
-
- if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
+ int sz = validate_cmd(buffer[i]);
+ if (sz == 0 || i + sz > dwords)
return -EINVAL;
-
- OUT_RING(cmd);
-
- while (++i, --sz) {
- OUT_RING(buffer[i]);
- }
+ i += sz;
}
+ ret = BEGIN_LP_RING((dwords+1)&~1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < dwords; i++)
+ OUT_RING(buffer[i]);
if (dwords & 1)
OUT_RING(0);
@@ -366,34 +345,41 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
int
i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect *boxes,
- int i, int DR1, int DR4)
+ struct drm_clip_rect *box,
+ int DR1, int DR4)
{
- struct drm_clip_rect box = boxes[i];
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
- if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
+ if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
+ box->y2 <= 0 || box->x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
- box.x1, box.y1, box.x2, box.y2);
+ box->x1, box->y1, box->x2, box->y2);
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 4) {
- BEGIN_LP_RING(4);
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ return ret;
+
OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
- OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
- OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+ OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+ OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
OUT_RING(DR4);
- ADVANCE_LP_RING();
} else {
- BEGIN_LP_RING(6);
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+ return ret;
+
OUT_RING(GFX_OP_DRAWRECT_INFO);
OUT_RING(DR1);
- OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
- OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+ OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+ OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
OUT_RING(DR4);
OUT_RING(0);
- ADVANCE_LP_RING();
}
+ ADVANCE_LP_RING();
return 0;
}
@@ -413,12 +399,13 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
}
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
@@ -440,7 +427,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
for (i = 0; i < count; i++) {
if (i < nbox) {
- ret = i915_emit_box(dev, cliprects, i,
+ ret = i915_emit_box(dev, &cliprects[i],
cmd->DR1, cmd->DR4);
if (ret)
return ret;
@@ -459,8 +446,9 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch,
struct drm_clip_rect *cliprects)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int nbox = batch->num_cliprects;
- int i = 0, count;
+ int i, count, ret;
if ((batch->start | batch->used) & 0x7) {
DRM_ERROR("alignment");
@@ -470,17 +458,19 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
i915_kernel_lost_context(dev);
count = nbox ? nbox : 1;
-
for (i = 0; i < count; i++) {
if (i < nbox) {
- int ret = i915_emit_box(dev, cliprects, i,
- batch->DR1, batch->DR4);
+ ret = i915_emit_box(dev, &cliprects[i],
+ batch->DR1, batch->DR4);
if (ret)
return ret;
}
if (!IS_I830(dev) && !IS_845G(dev)) {
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(2);
+ if (ret)
+ return ret;
+
if (INTEL_INFO(dev)->gen >= 4) {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
OUT_RING(batch->start);
@@ -488,26 +478,29 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
}
- ADVANCE_LP_RING();
} else {
- BEGIN_LP_RING(4);
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ return ret;
+
OUT_RING(MI_BATCH_BUFFER);
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
OUT_RING(batch->start + batch->used - 4);
OUT_RING(0);
- ADVANCE_LP_RING();
}
+ ADVANCE_LP_RING();
}
if (IS_G4X(dev) || IS_GEN5(dev)) {
- BEGIN_LP_RING(2);
- OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(2) == 0) {
+ OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+ }
}
- i915_emit_breadcrumb(dev);
+ i915_emit_breadcrumb(dev);
return 0;
}
@@ -516,6 +509,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
dev->primary->master->driver_priv;
+ int ret;
if (!master_priv->sarea_priv)
return -EINVAL;
@@ -527,12 +521,13 @@ static int i915_dispatch_flip(struct drm_device * dev)
i915_kernel_lost_context(dev);
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(10);
+ if (ret)
+ return ret;
+
OUT_RING(MI_FLUSH | MI_READ_FLUSH);
OUT_RING(0);
- ADVANCE_LP_RING();
- BEGIN_LP_RING(6);
OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
OUT_RING(0);
if (dev_priv->current_page == 0) {
@@ -543,33 +538,32 @@ static int i915_dispatch_flip(struct drm_device * dev)
dev_priv->current_page = 0;
}
OUT_RING(0);
- ADVANCE_LP_RING();
- BEGIN_LP_RING(2);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
OUT_RING(0);
+
ADVANCE_LP_RING();
master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
return 0;
}
-static int i915_quiescent(struct drm_device * dev)
+static int i915_quiescent(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
i915_kernel_lost_context(dev);
- return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
- dev_priv->render_ring.size - 8);
+ return intel_wait_ring_buffer(ring, ring->size - 8);
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -768,9 +762,15 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_BLT:
value = HAS_BLT(dev);
break;
+ case I915_PARAM_HAS_RELAXED_FENCING:
+ value = 1;
+ break;
case I915_PARAM_HAS_COHERENT_RINGS:
value = 1;
break;
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
+ value = INTEL_INFO(dev)->gen >= 4;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -826,7 +826,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
- struct intel_ring_buffer *ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
@@ -1005,73 +1005,47 @@ intel_teardown_mchbar(struct drm_device *dev)
#define PTE_VALID (1 << 0)
/**
- * i915_gtt_to_phys - take a GTT address and turn it into a physical one
+ * i915_stolen_to_phys - take an offset into stolen memory and turn it into
+ * a physical one
* @dev: drm device
- * @gtt_addr: address to translate
+ * @offset: address to translate
*
- * Some chip functions require allocations from stolen space but need the
- * physical address of the memory in question. We use this routine
- * to get a physical address suitable for register programming from a given
- * GTT address.
+ * Some chip functions require allocations from stolen space and need the
+ * physical address of the memory in question.
*/
-static unsigned long i915_gtt_to_phys(struct drm_device *dev,
- unsigned long gtt_addr)
+static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
{
- unsigned long *gtt;
- unsigned long entry, phys;
- int gtt_bar = IS_GEN2(dev) ? 1 : 0;
- int gtt_offset, gtt_size;
-
- if (INTEL_INFO(dev)->gen >= 4) {
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
- gtt_offset = 2*1024*1024;
- gtt_size = 2*1024*1024;
- } else {
- gtt_offset = 512*1024;
- gtt_size = 512*1024;
- }
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->bridge_dev;
+ u32 base;
+
+#if 0
+ /* On the machines I have tested the Graphics Base of Stolen Memory
+ * is unreliable, so compute the base by subtracting the stolen memory
+ * from the Top of Low Usable DRAM which is where the BIOS places
+ * the graphics stolen memory.
+ */
+ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ /* top 32bits are reserved = 0 */
+ pci_read_config_dword(pdev, 0xA4, &base);
} else {
- gtt_bar = 3;
- gtt_offset = 0;
- gtt_size = pci_resource_len(dev->pdev, gtt_bar);
- }
-
- gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset,
- gtt_size);
- if (!gtt) {
- DRM_ERROR("ioremap of GTT failed\n");
- return 0;
- }
-
- entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
-
- DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
-
- /* Mask out these reserved bits on this hardware. */
- if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
- entry &= ~PTE_ADDRESS_MASK_HIGH;
-
- /* If it's not a mapping type we know, then bail. */
- if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
- (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) {
- iounmap(gtt);
- return 0;
- }
-
- if (!(entry & PTE_VALID)) {
- DRM_ERROR("bad GTT entry in stolen space\n");
- iounmap(gtt);
- return 0;
+ /* XXX presume 8xx is the same as i915 */
+ pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
+ }
+#else
+ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ u16 val;
+ pci_read_config_word(pdev, 0xb0, &val);
+ base = val >> 4 << 20;
+ } else {
+ u8 val;
+ pci_read_config_byte(pdev, 0x9c, &val);
+ base = val >> 3 << 27;
}
+ base -= dev_priv->mm.gtt->stolen_size;
+#endif
- iounmap(gtt);
-
- phys =(entry & PTE_ADDRESS_MASK) |
- ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
-
- DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
-
- return phys;
+ return base + offset;
}
static void i915_warn_stolen(struct drm_device *dev)
@@ -1087,54 +1061,35 @@ static void i915_setup_compression(struct drm_device *dev, int size)
unsigned long cfb_base;
unsigned long ll_base = 0;
- /* Leave 1M for line length buffer & misc. */
- compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
- if (!compressed_fb) {
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- i915_warn_stolen(dev);
- return;
- }
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+ if (compressed_fb)
+ compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+ if (!compressed_fb)
+ goto err;
- compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
- if (!compressed_fb) {
- i915_warn_stolen(dev);
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- return;
- }
+ cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
+ if (!cfb_base)
+ goto err_fb;
- cfb_base = i915_gtt_to_phys(dev, compressed_fb->start);
- if (!cfb_base) {
- DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
- drm_mm_put_block(compressed_fb);
- }
-
- if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
- compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
- 4096, 0);
- if (!compressed_llb) {
- i915_warn_stolen(dev);
- return;
- }
-
- compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
- if (!compressed_llb) {
- i915_warn_stolen(dev);
- return;
- }
+ if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
+ compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
+ 4096, 4096, 0);
+ if (compressed_llb)
+ compressed_llb = drm_mm_get_block(compressed_llb,
+ 4096, 4096);
+ if (!compressed_llb)
+ goto err_fb;
- ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
- if (!ll_base) {
- DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
- drm_mm_put_block(compressed_fb);
- drm_mm_put_block(compressed_llb);
- }
+ ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
+ if (!ll_base)
+ goto err_llb;
}
dev_priv->cfb_size = size;
intel_disable_fbc(dev);
dev_priv->compressed_fb = compressed_fb;
- if (IS_IRONLAKE_M(dev))
+ if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
@@ -1144,8 +1099,17 @@ static void i915_setup_compression(struct drm_device *dev, int size)
dev_priv->compressed_llb = compressed_llb;
}
- DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
- ll_base, size >> 20);
+ DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+ cfb_base, ll_base, size >> 20);
+ return;
+
+err_llb:
+ drm_mm_put_block(compressed_llb);
+err_fb:
+ drm_mm_put_block(compressed_fb);
+err:
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ i915_warn_stolen(dev);
}
static void i915_cleanup_compression(struct drm_device *dev)
@@ -1176,12 +1140,16 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_INFO "i915: switched on\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
i915_resume(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
printk(KERN_ERR "i915: switched off\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
i915_suspend(dev, pmm);
+ dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1196,17 +1164,20 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
-static int i915_load_modeset_init(struct drm_device *dev,
- unsigned long prealloc_size,
- unsigned long agp_size)
+static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long prealloc_size, gtt_size, mappable_size;
int ret = 0;
- /* Basic memrange allocator for stolen space (aka mm.vram) */
- drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
+ prealloc_size = dev_priv->mm.gtt->stolen_size;
+ gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+ mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
- /* Let GEM Manage from end of prealloc space to end of aperture.
+ /* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch page.
* There are a number of places where the hardware apparently
@@ -1215,7 +1186,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
* at the last page of the aperture. One page should be enough to
* keep any prefetching inside of the aperture.
*/
- i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
+ i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
mutex_lock(&dev->struct_mutex);
ret = i915_gem_init_ringbuffer(dev);
@@ -1227,31 +1198,39 @@ static int i915_load_modeset_init(struct drm_device *dev,
if (I915_HAS_FBC(dev) && i915_powersave) {
int cfb_size;
- /* Try to get an 8M buffer... */
- if (prealloc_size > (9*1024*1024))
- cfb_size = 8*1024*1024;
+ /* Leave 1M for line length buffer & misc. */
+
+ /* Try to get a 32M buffer... */
+ if (prealloc_size > (36*1024*1024))
+ cfb_size = 32*1024*1024;
else /* fall back to 7/8 of the stolen space */
cfb_size = prealloc_size * 7 / 8;
i915_setup_compression(dev, cfb_size);
}
- /* Allow hardware batchbuffers unless told otherwise.
- */
+ /* Allow hardware batchbuffers unless told otherwise. */
dev_priv->allow_batchbuffer = 1;
ret = intel_parse_bios(dev);
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
- /* if we have > 1 VGA cards, then disable the radeon VGA resources */
+ /* If we have > 1 VGA cards, then we need to arbitrate access
+ * to the common VGA resources.
+ *
+ * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+ * then we do not take part in VGA arbitration and the
+ * vga_client_register() fails with -ENODEV.
+ */
ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
- if (ret)
+ if (ret && ret != -ENODEV)
goto cleanup_ringbuffer;
intel_register_dsm_handler();
ret = vga_switcheroo_register_client(dev->pdev,
i915_switcheroo_set_state,
+ NULL,
i915_switcheroo_can_switch);
if (ret)
goto cleanup_vga_client;
@@ -1426,152 +1405,12 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
}
}
-struct v_table {
- u8 vid;
- unsigned long vd; /* in .1 mil */
- unsigned long vm; /* in .1 mil */
- u8 pvid;
-};
-
-static struct v_table v_table[] = {
- { 0, 16125, 15000, 0x7f, },
- { 1, 16000, 14875, 0x7e, },
- { 2, 15875, 14750, 0x7d, },
- { 3, 15750, 14625, 0x7c, },
- { 4, 15625, 14500, 0x7b, },
- { 5, 15500, 14375, 0x7a, },
- { 6, 15375, 14250, 0x79, },
- { 7, 15250, 14125, 0x78, },
- { 8, 15125, 14000, 0x77, },
- { 9, 15000, 13875, 0x76, },
- { 10, 14875, 13750, 0x75, },
- { 11, 14750, 13625, 0x74, },
- { 12, 14625, 13500, 0x73, },
- { 13, 14500, 13375, 0x72, },
- { 14, 14375, 13250, 0x71, },
- { 15, 14250, 13125, 0x70, },
- { 16, 14125, 13000, 0x6f, },
- { 17, 14000, 12875, 0x6e, },
- { 18, 13875, 12750, 0x6d, },
- { 19, 13750, 12625, 0x6c, },
- { 20, 13625, 12500, 0x6b, },
- { 21, 13500, 12375, 0x6a, },
- { 22, 13375, 12250, 0x69, },
- { 23, 13250, 12125, 0x68, },
- { 24, 13125, 12000, 0x67, },
- { 25, 13000, 11875, 0x66, },
- { 26, 12875, 11750, 0x65, },
- { 27, 12750, 11625, 0x64, },
- { 28, 12625, 11500, 0x63, },
- { 29, 12500, 11375, 0x62, },
- { 30, 12375, 11250, 0x61, },
- { 31, 12250, 11125, 0x60, },
- { 32, 12125, 11000, 0x5f, },
- { 33, 12000, 10875, 0x5e, },
- { 34, 11875, 10750, 0x5d, },
- { 35, 11750, 10625, 0x5c, },
- { 36, 11625, 10500, 0x5b, },
- { 37, 11500, 10375, 0x5a, },
- { 38, 11375, 10250, 0x59, },
- { 39, 11250, 10125, 0x58, },
- { 40, 11125, 10000, 0x57, },
- { 41, 11000, 9875, 0x56, },
- { 42, 10875, 9750, 0x55, },
- { 43, 10750, 9625, 0x54, },
- { 44, 10625, 9500, 0x53, },
- { 45, 10500, 9375, 0x52, },
- { 46, 10375, 9250, 0x51, },
- { 47, 10250, 9125, 0x50, },
- { 48, 10125, 9000, 0x4f, },
- { 49, 10000, 8875, 0x4e, },
- { 50, 9875, 8750, 0x4d, },
- { 51, 9750, 8625, 0x4c, },
- { 52, 9625, 8500, 0x4b, },
- { 53, 9500, 8375, 0x4a, },
- { 54, 9375, 8250, 0x49, },
- { 55, 9250, 8125, 0x48, },
- { 56, 9125, 8000, 0x47, },
- { 57, 9000, 7875, 0x46, },
- { 58, 8875, 7750, 0x45, },
- { 59, 8750, 7625, 0x44, },
- { 60, 8625, 7500, 0x43, },
- { 61, 8500, 7375, 0x42, },
- { 62, 8375, 7250, 0x41, },
- { 63, 8250, 7125, 0x40, },
- { 64, 8125, 7000, 0x3f, },
- { 65, 8000, 6875, 0x3e, },
- { 66, 7875, 6750, 0x3d, },
- { 67, 7750, 6625, 0x3c, },
- { 68, 7625, 6500, 0x3b, },
- { 69, 7500, 6375, 0x3a, },
- { 70, 7375, 6250, 0x39, },
- { 71, 7250, 6125, 0x38, },
- { 72, 7125, 6000, 0x37, },
- { 73, 7000, 5875, 0x36, },
- { 74, 6875, 5750, 0x35, },
- { 75, 6750, 5625, 0x34, },
- { 76, 6625, 5500, 0x33, },
- { 77, 6500, 5375, 0x32, },
- { 78, 6375, 5250, 0x31, },
- { 79, 6250, 5125, 0x30, },
- { 80, 6125, 5000, 0x2f, },
- { 81, 6000, 4875, 0x2e, },
- { 82, 5875, 4750, 0x2d, },
- { 83, 5750, 4625, 0x2c, },
- { 84, 5625, 4500, 0x2b, },
- { 85, 5500, 4375, 0x2a, },
- { 86, 5375, 4250, 0x29, },
- { 87, 5250, 4125, 0x28, },
- { 88, 5125, 4000, 0x27, },
- { 89, 5000, 3875, 0x26, },
- { 90, 4875, 3750, 0x25, },
- { 91, 4750, 3625, 0x24, },
- { 92, 4625, 3500, 0x23, },
- { 93, 4500, 3375, 0x22, },
- { 94, 4375, 3250, 0x21, },
- { 95, 4250, 3125, 0x20, },
- { 96, 4125, 3000, 0x1f, },
- { 97, 4125, 3000, 0x1e, },
- { 98, 4125, 3000, 0x1d, },
- { 99, 4125, 3000, 0x1c, },
- { 100, 4125, 3000, 0x1b, },
- { 101, 4125, 3000, 0x1a, },
- { 102, 4125, 3000, 0x19, },
- { 103, 4125, 3000, 0x18, },
- { 104, 4125, 3000, 0x17, },
- { 105, 4125, 3000, 0x16, },
- { 106, 4125, 3000, 0x15, },
- { 107, 4125, 3000, 0x14, },
- { 108, 4125, 3000, 0x13, },
- { 109, 4125, 3000, 0x12, },
- { 110, 4125, 3000, 0x11, },
- { 111, 4125, 3000, 0x10, },
- { 112, 4125, 3000, 0x0f, },
- { 113, 4125, 3000, 0x0e, },
- { 114, 4125, 3000, 0x0d, },
- { 115, 4125, 3000, 0x0c, },
- { 116, 4125, 3000, 0x0b, },
- { 117, 4125, 3000, 0x0a, },
- { 118, 4125, 3000, 0x09, },
- { 119, 4125, 3000, 0x08, },
- { 120, 1125, 0, 0x07, },
- { 121, 1000, 0, 0x06, },
- { 122, 875, 0, 0x05, },
- { 123, 750, 0, 0x04, },
- { 124, 625, 0, 0x03, },
- { 125, 500, 0, 0x02, },
- { 126, 375, 0, 0x01, },
- { 127, 0, 0, 0x00, },
-};
-
-struct cparams {
- int i;
- int t;
- int m;
- int c;
-};
-
-static struct cparams cparams[] = {
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
{ 1, 1333, 301, 28664 },
{ 1, 1066, 294, 24460 },
{ 1, 800, 294, 25192 },
@@ -1637,21 +1476,145 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
return ((m * x) / 127) - b;
}
-static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{
- unsigned long val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(v_table); i++) {
- if (v_table[i].pvid == pxvid) {
- if (IS_MOBILE(dev_priv->dev))
- val = v_table[i].vm;
- else
- val = v_table[i].vd;
- }
- }
-
- return val;
+ static const struct v_table {
+ u16 vd; /* in .1 mil */
+ u16 vm; /* in .1 mil */
+ } v_table[] = {
+ { 0, 0, },
+ { 375, 0, },
+ { 500, 0, },
+ { 625, 0, },
+ { 750, 0, },
+ { 875, 0, },
+ { 1000, 0, },
+ { 1125, 0, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4250, 3125, },
+ { 4375, 3250, },
+ { 4500, 3375, },
+ { 4625, 3500, },
+ { 4750, 3625, },
+ { 4875, 3750, },
+ { 5000, 3875, },
+ { 5125, 4000, },
+ { 5250, 4125, },
+ { 5375, 4250, },
+ { 5500, 4375, },
+ { 5625, 4500, },
+ { 5750, 4625, },
+ { 5875, 4750, },
+ { 6000, 4875, },
+ { 6125, 5000, },
+ { 6250, 5125, },
+ { 6375, 5250, },
+ { 6500, 5375, },
+ { 6625, 5500, },
+ { 6750, 5625, },
+ { 6875, 5750, },
+ { 7000, 5875, },
+ { 7125, 6000, },
+ { 7250, 6125, },
+ { 7375, 6250, },
+ { 7500, 6375, },
+ { 7625, 6500, },
+ { 7750, 6625, },
+ { 7875, 6750, },
+ { 8000, 6875, },
+ { 8125, 7000, },
+ { 8250, 7125, },
+ { 8375, 7250, },
+ { 8500, 7375, },
+ { 8625, 7500, },
+ { 8750, 7625, },
+ { 8875, 7750, },
+ { 9000, 7875, },
+ { 9125, 8000, },
+ { 9250, 8125, },
+ { 9375, 8250, },
+ { 9500, 8375, },
+ { 9625, 8500, },
+ { 9750, 8625, },
+ { 9875, 8750, },
+ { 10000, 8875, },
+ { 10125, 9000, },
+ { 10250, 9125, },
+ { 10375, 9250, },
+ { 10500, 9375, },
+ { 10625, 9500, },
+ { 10750, 9625, },
+ { 10875, 9750, },
+ { 11000, 9875, },
+ { 11125, 10000, },
+ { 11250, 10125, },
+ { 11375, 10250, },
+ { 11500, 10375, },
+ { 11625, 10500, },
+ { 11750, 10625, },
+ { 11875, 10750, },
+ { 12000, 10875, },
+ { 12125, 11000, },
+ { 12250, 11125, },
+ { 12375, 11250, },
+ { 12500, 11375, },
+ { 12625, 11500, },
+ { 12750, 11625, },
+ { 12875, 11750, },
+ { 13000, 11875, },
+ { 13125, 12000, },
+ { 13250, 12125, },
+ { 13375, 12250, },
+ { 13500, 12375, },
+ { 13625, 12500, },
+ { 13750, 12625, },
+ { 13875, 12750, },
+ { 14000, 12875, },
+ { 14125, 13000, },
+ { 14250, 13125, },
+ { 14375, 13250, },
+ { 14500, 13375, },
+ { 14625, 13500, },
+ { 14750, 13625, },
+ { 14875, 13750, },
+ { 15000, 13875, },
+ { 15125, 14000, },
+ { 15250, 14125, },
+ { 15375, 14250, },
+ { 15500, 14375, },
+ { 15625, 14500, },
+ { 15750, 14625, },
+ { 15875, 14750, },
+ { 16000, 14875, },
+ { 16125, 15000, },
+ };
+ if (dev_priv->info->is_mobile)
+ return v_table[pxvid].vm;
+ else
+ return v_table[pxvid].vd;
}
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
@@ -1905,9 +1868,9 @@ ips_ping_for_i915_load(void)
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
- resource_size_t base, size;
int ret = 0, mmio_bar;
- uint32_t agp_size, prealloc_size;
+ uint32_t agp_size;
+
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@@ -1923,11 +1886,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->dev = dev;
dev_priv->info = (struct intel_device_info *) flags;
- /* Add register map (needed for suspend/resume) */
- mmio_bar = IS_GEN2(dev) ? 1 : 0;
- base = pci_resource_start(dev->pdev, mmio_bar);
- size = pci_resource_len(dev->pdev, mmio_bar);
-
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
@@ -1937,16 +1895,36 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_GEN2(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
- dev_priv->regs = ioremap(base, size);
+ /* 965GM sometimes incorrectly writes to hardware status page (HWS)
+ * using 32bit addressing, overwriting memory if HWS is located
+ * above 4GB.
+ *
+ * The documentation also mentions an issue with undefined
+ * behaviour if any general state is accessed within a page above 4GB,
+ * which also needs to be handled carefully.
+ */
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
+ mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
goto put_bridge;
}
+ dev_priv->mm.gtt = intel_gtt_get();
+ if (!dev_priv->mm.gtt) {
+ DRM_ERROR("Failed to initialize GTT\n");
+ ret = -ENODEV;
+ goto out_iomapfree;
+ }
+
+ agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
dev_priv->mm.gtt_mapping =
- io_mapping_create_wc(dev->agp->base,
- dev->agp->agp_info.aper_size * 1024*1024);
+ io_mapping_create_wc(dev->agp->base, agp_size);
if (dev_priv->mm.gtt_mapping == NULL) {
ret = -EIO;
goto out_rmmap;
@@ -1958,24 +1936,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* MTRR if present. Even if a UC MTRR isn't present.
*/
dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024,
+ agp_size,
MTRR_TYPE_WRCOMB, 1);
if (dev_priv->mm.gtt_mtrr < 0) {
DRM_INFO("MTRR allocation failed. Graphics "
"performance may suffer.\n");
}
- dev_priv->mm.gtt = intel_gtt_get();
- if (!dev_priv->mm.gtt) {
- DRM_ERROR("Failed to initialize GTT\n");
- ret = -ENODEV;
- goto out_iomapfree;
- }
-
- prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
- agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
* by the GPU. i915_gem_retire_requests() is called directly when we
@@ -1983,7 +1950,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* bo.
*
* It is also used for periodic low-priority events, such as
- * idle-timers and hangcheck.
+ * idle-timers and recording error state.
*
* All tasks on the workqueue are expected to acquire the dev mutex
* so there is no point in running more than one instance of the
@@ -2001,22 +1968,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* enable GEM by default */
dev_priv->has_gem = 1;
- if (prealloc_size > agp_size * 3 / 4) {
- DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
- "memory stolen.\n",
- prealloc_size / 1024, agp_size / 1024);
- DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
- "updating the BIOS to fix).\n");
- dev_priv->has_gem = 0;
- }
-
- if (dev_priv->has_gem == 0 &&
- drm_core_check_feature(dev, DRIVER_MODESET)) {
- DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
- ret = -ENODEV;
- goto out_iomapfree;
- }
-
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
@@ -2037,8 +1988,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Init HWS */
if (!I915_NEED_GFX_HWS(dev)) {
ret = i915_init_phys_hws(dev);
- if (ret != 0)
- goto out_workqueue_free;
+ if (ret)
+ goto out_gem_unload;
}
if (IS_PINEVIEW(dev))
@@ -2060,16 +2011,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
- spin_lock_init(&dev_priv->user_irq_lock);
+ spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
dev_priv->trace_irq_seqno = 0;
ret = drm_vblank_init(dev, I915_NUM_PIPE);
-
- if (ret) {
- (void) i915_driver_unload(dev);
- return ret;
- }
+ if (ret)
+ goto out_gem_unload;
/* Start out suspended */
dev_priv->mm.suspended = 1;
@@ -2077,10 +2025,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_detect_pch(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
+ ret = i915_load_modeset_init(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
- goto out_workqueue_free;
+ goto out_gem_unload;
}
}
@@ -2100,12 +2048,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
-out_workqueue_free:
+out_gem_unload:
+ if (dev->pdev->msi_enabled)
+ pci_disable_msi(dev->pdev);
+
+ intel_teardown_gmbus(dev);
+ intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->wq);
out_iomapfree:
io_mapping_free(dev_priv->mm.gtt_mapping);
out_rmmap:
- iounmap(dev_priv->regs);
+ pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
@@ -2122,6 +2075,9 @@ int i915_driver_unload(struct drm_device *dev)
i915_mch_dev = NULL;
spin_unlock(&mchdev_lock);
+ if (dev_priv->mm.inactive_shrinker.shrink)
+ unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+
mutex_lock(&dev->struct_mutex);
ret = i915_gpu_idle(dev);
if (ret)
@@ -2179,7 +2135,7 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
- drm_mm_takedown(&dev_priv->mm.vram);
+ drm_mm_takedown(&dev_priv->mm.stolen);
intel_cleanup_overlay(dev);
@@ -2188,7 +2144,7 @@ int i915_driver_unload(struct drm_device *dev)
}
if (dev_priv->regs != NULL)
- iounmap(dev_priv->regs);
+ pci_iounmap(dev->pdev, dev_priv->regs);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f737960712e6..0ad533f06af9 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -46,15 +46,24 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
unsigned int i915_powersave = 1;
module_param_named(powersave, i915_powersave, int, 0600);
+unsigned int i915_enable_rc6 = 0;
+module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
+
unsigned int i915_lvds_downclock = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
+unsigned int i915_panel_use_ssc = 1;
+module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
+
+bool i915_try_reset = true;
+module_param_named(reset, i915_try_reset, bool, 0600);
+
static struct drm_driver driver;
extern int intel_agp_enabled;
#define INTEL_VGA_DEVICE(id, info) { \
.class = PCI_CLASS_DISPLAY_VGA << 8, \
- .class_mask = 0xffff00, \
+ .class_mask = 0xff0000, \
.vendor = 0x8086, \
.device = id, \
.subvendor = PCI_ANY_ID, \
@@ -111,7 +120,7 @@ static const struct intel_device_info intel_i965g_info = {
static const struct intel_device_info intel_i965gm_info = {
.gen = 4, .is_crestline = 1,
- .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
.has_overlay = 1,
.supports_tv = 1,
};
@@ -130,7 +139,7 @@ static const struct intel_device_info intel_g45_info = {
static const struct intel_device_info intel_gm45_info = {
.gen = 4, .is_g4x = 1,
- .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
+ .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.supports_tv = 1,
.has_bsd_ring = 1,
@@ -150,7 +159,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_m_info = {
.gen = 5, .is_mobile = 1,
- .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 0, /* disabled due to buggy hardware */
.has_bsd_ring = 1,
};
@@ -165,6 +174,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
static const struct intel_device_info intel_sandybridge_m_info = {
.gen = 6, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
};
@@ -244,10 +254,34 @@ void intel_detect_pch (struct drm_device *dev)
}
}
+void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+ udelay(10);
+
+ I915_WRITE_NOTRACE(FORCEWAKE, 1);
+ POSTING_READ(FORCEWAKE);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
+ udelay(10);
+}
+
+void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE, 0);
+ POSTING_READ(FORCEWAKE);
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_kms_helper_poll_disable(dev);
+
pci_save_state(dev->pdev);
/* If KMS is active, we do the leavevt stuff here */
@@ -284,7 +318,9 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
if (state.event == PM_EVENT_PRETHAW)
return 0;
- drm_kms_helper_poll_disable(dev);
+
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
error = i915_drm_freeze(dev);
if (error)
@@ -304,6 +340,12 @@ static int i915_drm_thaw(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
i915_restore_state(dev);
intel_opregion_setup(dev);
@@ -315,10 +357,14 @@ static int i915_drm_thaw(struct drm_device *dev)
error = i915_gem_init_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
+ drm_mode_config_reset(dev);
drm_irq_install(dev);
/* Resume the modeset for every activated CRTC */
drm_helper_resume_force_mode(dev);
+
+ if (IS_IRONLAKE_M(dev))
+ ironlake_enable_rc6(dev);
}
intel_opregion_init(dev);
@@ -332,6 +378,9 @@ int i915_resume(struct drm_device *dev)
{
int ret;
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
if (pci_enable_device(dev->pdev))
return -EIO;
@@ -405,6 +454,14 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags)
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
+static int gen6_do_reset(struct drm_device *dev, u8 flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
+ return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+}
+
/**
* i965_reset - reset chip after a hang
* @dev: drm device to reset
@@ -431,7 +488,11 @@ int i915_reset(struct drm_device *dev, u8 flags)
bool need_display = true;
int ret;
- mutex_lock(&dev->struct_mutex);
+ if (!i915_try_reset)
+ return 0;
+
+ if (!mutex_trylock(&dev->struct_mutex))
+ return -EBUSY;
i915_gem_reset(dev);
@@ -439,6 +500,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
if (get_seconds() - dev_priv->last_gpu_reset < 5) {
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
} else switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ ret = gen6_do_reset(dev, flags);
+ break;
case 5:
ret = ironlake_do_reset(dev, flags);
break;
@@ -472,11 +536,17 @@ int i915_reset(struct drm_device *dev, u8 flags)
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) {
- struct intel_ring_buffer *ring = &dev_priv->render_ring;
dev_priv->mm.suspended = 0;
- ring->init(dev, ring);
+
+ dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
+ if (HAS_BSD(dev))
+ dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
+ if (HAS_BLT(dev))
+ dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
+
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
+ drm_mode_config_reset(dev);
drm_irq_install(dev);
mutex_lock(&dev->struct_mutex);
}
@@ -501,6 +571,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
static int __devinit
i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ /* Only bind to function 0 of the device. Early generations
+ * used function 1 as a placeholder for multi-head. This causes
+ * us confusion instead, especially on the systems where both
+ * functions have the same PCI-ID!
+ */
+ if (PCI_FUNC(pdev->devfn))
+ return -ENODEV;
+
return drm_get_pci_dev(pdev, ent, &driver);
}
@@ -523,6 +601,9 @@ static int i915_pm_suspend(struct device *dev)
return -ENODEV;
}
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
error = i915_drm_freeze(drm_dev);
if (error)
return error;
@@ -606,6 +687,8 @@ static struct drm_driver driver = {
.device_is_agp = i915_driver_device_is_agp,
.enable_vblank = i915_enable_vblank,
.disable_vblank = i915_disable_vblank,
+ .get_vblank_timestamp = i915_get_vblank_timestamp,
+ .get_scanout_position = i915_get_crtc_scanoutpos,
.irq_preinstall = i915_driver_irq_preinstall,
.irq_postinstall = i915_driver_irq_postinstall,
.irq_uninstall = i915_driver_irq_uninstall,
@@ -661,8 +744,6 @@ static int __init i915_init(void)
driver.num_ioctls = i915_max_ioctl;
- i915_gem_shrinker_init();
-
/*
* If CONFIG_DRM_I915_KMS is set, default to KMS unless
* explicitly disabled with the module pararmeter.
@@ -684,17 +765,14 @@ static int __init i915_init(void)
driver.driver_features &= ~DRIVER_MODESET;
#endif
- if (!(driver.driver_features & DRIVER_MODESET)) {
- driver.suspend = i915_suspend;
- driver.resume = i915_resume;
- }
+ if (!(driver.driver_features & DRIVER_MODESET))
+ driver.get_vblank_timestamp = NULL;
return drm_init(&driver);
}
static void __exit i915_exit(void)
{
- i915_gem_shrinker_exit();
drm_exit(&driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 409826da3099..65dfe81d0035 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -89,7 +89,7 @@ struct drm_i915_gem_phys_object {
int id;
struct page **page_list;
drm_dma_handle_t *handle;
- struct drm_gem_object *cur_obj;
+ struct drm_i915_gem_object *cur_obj;
};
struct mem_block {
@@ -124,9 +124,9 @@ struct drm_i915_master_private {
#define I915_FENCE_REG_NONE -1
struct drm_i915_fence_reg {
- struct drm_gem_object *obj;
struct list_head lru_list;
- bool gpu;
+ struct drm_i915_gem_object *obj;
+ uint32_t setup_seqno;
};
struct sdvo_device_mapping {
@@ -139,6 +139,8 @@ struct sdvo_device_mapping {
u8 ddc_pin;
};
+struct intel_display_error_state;
+
struct drm_i915_error_state {
u32 eir;
u32 pgtbl_er;
@@ -148,32 +150,47 @@ struct drm_i915_error_state {
u32 ipehr;
u32 instdone;
u32 acthd;
+ u32 error; /* gen6+ */
+ u32 bcs_acthd; /* gen6+ blt engine */
+ u32 bcs_ipehr;
+ u32 bcs_ipeir;
+ u32 bcs_instdone;
+ u32 bcs_seqno;
+ u32 vcs_acthd; /* gen6+ bsd engine */
+ u32 vcs_ipehr;
+ u32 vcs_ipeir;
+ u32 vcs_instdone;
+ u32 vcs_seqno;
u32 instpm;
u32 instps;
u32 instdone1;
u32 seqno;
u64 bbaddr;
+ u64 fence[16];
struct timeval time;
struct drm_i915_error_object {
int page_count;
u32 gtt_offset;
u32 *pages[0];
- } *ringbuffer, *batchbuffer[2];
+ } *ringbuffer, *batchbuffer[I915_NUM_RINGS];
struct drm_i915_error_buffer {
- size_t size;
+ u32 size;
u32 name;
u32 seqno;
u32 gtt_offset;
u32 read_domains;
u32 write_domain;
- u32 fence_reg;
+ s32 fence_reg:5;
s32 pinned:2;
u32 tiling:2;
u32 dirty:1;
u32 purgeable:1;
- } *active_bo;
- u32 active_bo_count;
+ u32 ring:4;
+ u32 agp_type:1;
+ } *active_bo, *pinned_bo;
+ u32 active_bo_count, pinned_bo_count;
struct intel_overlay_error_state *overlay;
+ struct intel_display_error_state *display;
};
struct drm_i915_display_funcs {
@@ -207,7 +224,6 @@ struct intel_device_info {
u8 is_broadwater : 1;
u8 is_crestline : 1;
u8 has_fbc : 1;
- u8 has_rc6 : 1;
u8 has_pipe_cxsr : 1;
u8 has_hotplug : 1;
u8 cursor_needs_physical : 1;
@@ -243,6 +259,7 @@ typedef struct drm_i915_private {
const struct intel_device_info *info;
int has_gem;
+ int relative_constants_mode;
void __iomem *regs;
@@ -253,20 +270,15 @@ typedef struct drm_i915_private {
} *gmbus;
struct pci_dev *bridge_dev;
- struct intel_ring_buffer render_ring;
- struct intel_ring_buffer bsd_ring;
- struct intel_ring_buffer blt_ring;
+ struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
- void *seqno_page;
dma_addr_t dma_status_page;
uint32_t counter;
- unsigned int seqno_gfx_addr;
drm_local_map_t hws_map;
- struct drm_gem_object *seqno_obj;
- struct drm_gem_object *pwrctx;
- struct drm_gem_object *renderctx;
+ struct drm_i915_gem_object *pwrctx;
+ struct drm_i915_gem_object *renderctx;
struct resource mch_res;
@@ -275,25 +287,17 @@ typedef struct drm_i915_private {
int front_offset;
int current_page;
int page_flipping;
-#define I915_DEBUG_READ (1<<0)
-#define I915_DEBUG_WRITE (1<<1)
- unsigned long debug_flags;
- wait_queue_head_t irq_queue;
atomic_t irq_received;
- /** Protects user_irq_refcount and irq_mask_reg */
- spinlock_t user_irq_lock;
u32 trace_irq_seqno;
+
+ /* protects the irq masks */
+ spinlock_t irq_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
- u32 irq_mask_reg;
u32 pipestat[2];
- /** splitted irq regs for graphics and display engine on Ironlake,
- irq_mask_reg is still used for display irq. */
- u32 gt_irq_mask_reg;
- u32 gt_irq_enable_reg;
- u32 de_irq_enable_reg;
- u32 pch_irq_mask_reg;
- u32 pch_irq_enable_reg;
+ u32 irq_mask;
+ u32 gt_irq_mask;
+ u32 pch_irq_mask;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
@@ -306,7 +310,7 @@ typedef struct drm_i915_private {
int num_pipe;
/* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd;
@@ -329,6 +333,7 @@ typedef struct drm_i915_private {
/* LVDS info */
int backlight_level; /* restore backlight to this value */
+ bool backlight_enabled;
struct drm_display_mode *panel_fixed_mode;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -530,23 +535,24 @@ typedef struct drm_i915_private {
struct {
/** Bridge to intel-gtt-ko */
- struct intel_gtt *gtt;
+ const struct intel_gtt *gtt;
/** Memory allocator for GTT stolen memory */
- struct drm_mm vram;
+ struct drm_mm stolen;
/** Memory allocator for GTT */
struct drm_mm gtt_space;
+ /** List of all objects in gtt_space. Used to restore gtt
+ * mappings on resume */
+ struct list_head gtt_list;
+
+ /** Usable portion of the GTT for GEM */
+ unsigned long gtt_start;
+ unsigned long gtt_mappable_end;
+ unsigned long gtt_end;
struct io_mapping *gtt_mapping;
int gtt_mtrr;
- /**
- * Membership on list of all loaded devices, used to evict
- * inactive buffers under memory pressure.
- *
- * Modifications should only be done whilst holding the
- * shrink_list_lock spinlock.
- */
- struct list_head shrink_list;
+ struct shrinker inactive_shrinker;
/**
* List of objects currently involved in rendering.
@@ -609,16 +615,6 @@ typedef struct drm_i915_private {
struct delayed_work retire_work;
/**
- * Waiting sequence number, if any
- */
- uint32_t waiting_gem_seqno;
-
- /**
- * Last seq seen at irq time
- */
- uint32_t irq_gem_seqno;
-
- /**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
@@ -645,16 +641,11 @@ typedef struct drm_i915_private {
/* storage for physical objects */
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
- uint32_t flush_rings;
-
/* accounting, useful for userland debugging */
- size_t object_memory;
- size_t pin_memory;
- size_t gtt_memory;
size_t gtt_total;
+ size_t mappable_gtt_total;
+ size_t object_memory;
u32 object_count;
- u32 pin_count;
- u32 gtt_count;
} mm;
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
@@ -688,14 +679,14 @@ typedef struct drm_i915_private {
u8 fmax;
u8 fstart;
- u64 last_count1;
- unsigned long last_time1;
- u64 last_count2;
- struct timespec last_time2;
- unsigned long gfx_power;
- int c_m;
- int r_t;
- u8 corr;
+ u64 last_count1;
+ unsigned long last_time1;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ int c_m;
+ int r_t;
+ u8 corr;
spinlock_t *mchdev_lock;
enum no_fbc_reason no_fbc_reason;
@@ -709,20 +700,20 @@ typedef struct drm_i915_private {
struct intel_fbdev *fbdev;
} drm_i915_private_t;
-/** driver private structure attached to each drm_gem_object */
struct drm_i915_gem_object {
struct drm_gem_object base;
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
+ struct list_head gtt_list;
/** This object's place on the active/flushing/inactive lists */
struct list_head ring_list;
struct list_head mm_list;
/** This object's place on GPU write list */
struct list_head gpu_write_list;
- /** This object's place on eviction list */
- struct list_head evict_list;
+ /** This object's place in the batchbuffer or on the eviction list */
+ struct list_head exec_list;
/**
* This is set if the object is on the active or flushing lists
@@ -738,6 +729,12 @@ struct drm_i915_gem_object {
unsigned int dirty : 1;
/**
+ * This is set if the object has been written to since the last
+ * GPU flush.
+ */
+ unsigned int pending_gpu_write : 1;
+
+ /**
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
@@ -747,29 +744,15 @@ struct drm_i915_gem_object {
signed int fence_reg : 5;
/**
- * Used for checking the object doesn't appear more than once
- * in an execbuffer object list.
- */
- unsigned int in_execbuffer : 1;
-
- /**
* Advice: are the backing pages purgeable?
*/
unsigned int madv : 2;
/**
- * Refcount for the pages array. With the current locking scheme, there
- * are at most two concurrent users: Binding a bo to the gtt and
- * pwrite/pread using physical addresses. So two bits for a maximum
- * of two users are enough.
- */
- unsigned int pages_refcount : 2;
-#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3
-
- /**
* Current tiling mode for the object.
*/
unsigned int tiling_mode : 2;
+ unsigned int tiling_changed : 1;
/** How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -783,28 +766,55 @@ struct drm_i915_gem_object {
unsigned int pin_count : 4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
- /** AGP memory structure for our GTT binding. */
- DRM_AGP_MEM *agp_mem;
+ /**
+ * Is the object at the current location in the gtt mappable and
+ * fenceable? Used to avoid costly recalculations.
+ */
+ unsigned int map_and_fenceable : 1;
+
+ /**
+ * Whether the current gtt mapping needs to be mappable (and isn't just
+ * mappable by accident). Track pin and fault separate for a more
+ * accurate mappable working set.
+ */
+ unsigned int fault_mappable : 1;
+ unsigned int pin_mappable : 1;
+
+ /*
+ * Is the GPU currently using a fence to access this buffer,
+ */
+ unsigned int pending_fenced_gpu_access:1;
+ unsigned int fenced_gpu_access:1;
struct page **pages;
/**
- * Current offset of the object in GTT space.
- *
- * This is the same as gtt_space->start
+ * DMAR support
*/
- uint32_t gtt_offset;
+ struct scatterlist *sg_list;
+ int num_sg;
- /* Which ring is refering to is this object */
- struct intel_ring_buffer *ring;
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ struct hlist_node exec_node;
+ unsigned long exec_handle;
+ struct drm_i915_gem_exec_object2 *exec_entry;
/**
- * Fake offset for use by mmap(2)
+ * Current offset of the object in GTT space.
+ *
+ * This is the same as gtt_space->start
*/
- uint64_t mmap_offset;
+ uint32_t gtt_offset;
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_rendering_seqno;
+ struct intel_ring_buffer *ring;
+
+ /** Breadcrumb of last fenced GPU access to the buffer. */
+ uint32_t last_fenced_seqno;
+ struct intel_ring_buffer *last_fenced_ring;
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
@@ -880,11 +890,75 @@ enum intel_chip_family {
CHIP_I965 = 0x08,
};
+#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
+#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
+#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
+#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
+#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+
+#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
+
+#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
+#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+
+#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+
+/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
+ IS_I915GM(dev)))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
+#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
+/* dsparb controlled by hw only */
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+
+#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+
+#include "i915_trace.h"
+
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
extern unsigned int i915_powersave;
extern unsigned int i915_lvds_downclock;
+extern unsigned int i915_panel_use_ssc;
+extern unsigned int i915_enable_rc6;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
@@ -907,8 +981,8 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
extern int i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect *boxes,
- int i, int DR1, int DR4);
+ struct drm_clip_rect *box,
+ int DR1, int DR4);
extern int i915_reset(struct drm_device *dev, u8 flags);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
@@ -918,6 +992,7 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
+void i915_handle_error(struct drm_device *dev, bool wedged);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -939,12 +1014,6 @@ extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
-extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
-extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
- u32 mask);
-extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
- u32 mask);
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -953,6 +1022,13 @@ void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void intel_enable_asle (struct drm_device *dev);
+int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags);
+
+int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+ int *vpos, int *hpos);
#ifdef CONFIG_DEBUG_FS
extern void i915_destroy_error_state(struct drm_device *dev);
@@ -1017,15 +1093,28 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
-struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
- size_t size);
+int __must_check i915_gem_flush_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains);
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
-int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
-void i915_gem_object_unpin(struct drm_gem_object *obj);
-int i915_gem_object_unbind(struct drm_gem_object *obj);
-void i915_gem_release_mmap(struct drm_gem_object *obj);
+int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable);
+void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
+int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool interruptible);
+void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring,
+ u32 seqno);
+
/**
* Returns true if seq1 is later than seq2.
*/
@@ -1035,73 +1124,88 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
-int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
- bool interruptible);
-int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
- bool interruptible);
+static inline u32
+i915_gem_next_request_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return ring->outstanding_lazy_request = dev_priv->next_seqno;
+}
+
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined,
+ bool interruptible);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_reset(struct drm_device *dev);
-void i915_gem_clflush_object(struct drm_gem_object *obj);
-int i915_gem_object_set_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain);
-int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
- bool interruptible);
-int i915_gem_init_ringbuffer(struct drm_device *dev);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
+int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
+ bool interruptible);
+int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-int i915_gem_do_init(struct drm_device *dev, unsigned long start,
- unsigned long end);
-int i915_gpu_idle(struct drm_device *dev);
-int i915_gem_idle(struct drm_device *dev);
-uint32_t i915_add_request(struct drm_device *dev,
- struct drm_file *file_priv,
- struct drm_i915_gem_request *request,
- struct intel_ring_buffer *ring);
-int i915_do_wait_request(struct drm_device *dev,
- uint32_t seqno,
- bool interruptible,
- struct intel_ring_buffer *ring);
+void i915_gem_do_init(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end);
+int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_idle(struct drm_device *dev);
+int __must_check i915_add_request(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_request *request,
+ struct intel_ring_buffer *ring);
+int __must_check i915_do_wait_request(struct drm_device *dev,
+ uint32_t seqno,
+ bool interruptible,
+ struct intel_ring_buffer *ring);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
- int write);
-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
- bool pipelined);
+int __must_check
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
+ bool write);
+int __must_check
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined);
int i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj,
+ struct drm_i915_gem_object *obj,
int id,
int align);
void i915_gem_detach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj);
+ struct drm_i915_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+void i915_gem_release(struct drm_device *dev, struct drm_file *file);
-void i915_gem_shrinker_init(void);
-void i915_gem_shrinker_exit(void);
+/* i915_gem_gtt.c */
+void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
/* i915_gem_evict.c */
-int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
-int i915_gem_evict_everything(struct drm_device *dev);
-int i915_gem_evict_inactive(struct drm_device *dev);
+int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment, bool mappable);
+int __must_check i915_gem_evict_everything(struct drm_device *dev,
+ bool purgeable_only);
+int __must_check i915_gem_evict_inactive(struct drm_device *dev,
+ bool purgeable_only);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
-bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
- int tiling_mode);
-bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
- int tiling_mode);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
/* i915_gem_debug.c */
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark);
#if WATCH_LISTS
int i915_verify_lists(struct drm_device *dev);
#else
#define i915_verify_lists(dev) 0
#endif
-void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
+ int handle);
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark);
/* i915_debugfs.c */
@@ -1163,6 +1267,8 @@ extern void intel_disable_fbc(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void ironlake_enable_rc6(struct drm_device *dev);
+extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
@@ -1170,79 +1276,120 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+
+extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+extern void intel_display_print_error_state(struct seq_file *m,
+ struct drm_device *dev,
+ struct intel_display_error_state *error);
#endif
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
+
+#define BEGIN_LP_RING(n) \
+ intel_ring_begin(LP_RING(dev_priv), (n))
+
+#define OUT_RING(x) \
+ intel_ring_emit(LP_RING(dev_priv), x)
+
+#define ADVANCE_LP_RING() \
+ intel_ring_advance(LP_RING(dev_priv))
+
/**
* Lock test for when it's just for synchronization of ring access.
*
* In that case, we don't need to do it when GEM is initialized as nobody else
* has access to the ring.
*/
-#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
- if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
- == NULL) \
- LOCK_TEST_WITH_RETURN(dev, file_priv); \
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
+ if (LP_RING(dev->dev_private)->obj == NULL) \
+ LOCK_TEST_WITH_RETURN(dev, file); \
} while (0)
-static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
+
+#define __i915_read(x, y) \
+static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+ u##x val = read##y(dev_priv->regs + reg); \
+ trace_i915_reg_rw('R', reg, val, sizeof(val)); \
+ return val; \
+}
+__i915_read(8, b)
+__i915_read(16, w)
+__i915_read(32, l)
+__i915_read(64, q)
+#undef __i915_read
+
+#define __i915_write(x, y) \
+static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+ trace_i915_reg_rw('W', reg, val, sizeof(val)); \
+ write##y(val, dev_priv->regs + reg); \
+}
+__i915_write(8, b)
+__i915_write(16, w)
+__i915_write(32, l)
+__i915_write(64, q)
+#undef __i915_write
+
+#define I915_READ8(reg) i915_read8(dev_priv, (reg))
+#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
+
+#define I915_READ16(reg) i915_read16(dev_priv, (reg))
+#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
+#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
+#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
+
+#define I915_READ(reg) i915_read32(dev_priv, (reg))
+#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
+#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
+#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
+
+#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
+#define I915_READ64(reg) i915_read64(dev_priv, (reg))
+
+#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
+#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
+
+
+/* On SNB platform, before reading ring registers forcewake bit
+ * must be set to prevent GT core from power down and stale values being
+ * returned.
+ */
+void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
+void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
+static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val;
- val = readl(dev_priv->regs + reg);
- if (dev_priv->debug_flags & I915_DEBUG_READ)
- printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
+ if (dev_priv->info->gen >= 6) {
+ __gen6_force_wake_get(dev_priv);
+ val = I915_READ(reg);
+ __gen6_force_wake_put(dev_priv);
+ } else
+ val = I915_READ(reg);
+
return val;
}
-static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
- u32 val)
+static inline void
+i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
{
- writel(val, dev_priv->regs + reg);
- if (dev_priv->debug_flags & I915_DEBUG_WRITE)
- printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
+ /* Trace down the write operation before the real write */
+ trace_i915_reg_rw('W', reg, val, len);
+ switch (len) {
+ case 8:
+ writeq(val, dev_priv->regs + reg);
+ break;
+ case 4:
+ writel(val, dev_priv->regs + reg);
+ break;
+ case 2:
+ writew(val, dev_priv->regs + reg);
+ break;
+ case 1:
+ writeb(val, dev_priv->regs + reg);
+ break;
+ }
}
-#define I915_READ(reg) i915_read(dev_priv, (reg))
-#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val))
-#define I915_READ16(reg) readw(dev_priv->regs + (reg))
-#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
-#define I915_READ8(reg) readb(dev_priv->regs + (reg))
-#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg))
-#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
-#define I915_READ64(reg) readq(dev_priv->regs + (reg))
-#define POSTING_READ(reg) (void)I915_READ(reg)
-#define POSTING_READ16(reg) (void)I915_READ16(reg)
-
-#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
- I915_DEBUG_WRITE)
-#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
- I915_DEBUG_WRITE))
-
-#define I915_VERBOSE 0
-
-#define BEGIN_LP_RING(n) do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
- if (I915_VERBOSE) \
- DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
- intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \
-} while (0)
-
-
-#define OUT_RING(x) do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
- if (I915_VERBOSE) \
- DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
- intel_ring_emit(dev, &dev_priv__->render_ring, x); \
-} while (0)
-
-#define ADVANCE_LP_RING() do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
- if (I915_VERBOSE) \
- DRM_DEBUG("ADVANCE_LP_RING %x\n", \
- dev_priv__->render_ring.tail); \
- intel_ring_advance(dev, &dev_priv__->render_ring); \
-} while(0)
-
/**
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@ -1259,72 +1406,9 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
- (dev_priv->render_ring.status_page.page_addr))[reg])
+ (LP_RING(dev_priv)->status_page.page_addr))[reg])
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
#define I915_BREADCRUMB_INDEX 0x21
-#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
-
-#define IS_I830(dev) ((dev)->pci_device == 0x3577)
-#define IS_845G(dev) ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
-#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
-#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
-#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
-#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
-#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
-#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
-#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
-#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
-#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-
-#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
-#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
-#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
-#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
-#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
-
-#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
-#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
-#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
-
-#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
-#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
-
-/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
- * rows, which changed the alignment requirements and fence programming.
- */
-#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
- IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
-#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
-#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
-/* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-
-#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
-#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
-
-#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
-#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
-
-#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
-#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
-#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
-
-#define PRIMARY_RINGBUFFER_SIZE (128*1024)
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 275ec6ed43ae..cf4f74c7c6fb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,38 +34,31 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
-#include <linux/intel-gtt.h>
-
-static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
-
-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
-static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
- int write);
-static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
- uint64_t offset,
- uint64_t size);
-static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
- bool interruptible);
-static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
- unsigned alignment);
-static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+
+static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
+static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
+ bool write);
+static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
+ uint64_t offset,
+ uint64_t size);
+static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
+static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ unsigned alignment,
+ bool map_and_fenceable);
+static void i915_gem_clear_fence_reg(struct drm_device *dev,
+ struct drm_i915_fence_reg *reg);
+static int i915_gem_phys_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv);
-static void i915_gem_free_object_tail(struct drm_gem_object *obj);
+ struct drm_file *file);
+static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
-static int
-i915_gem_object_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask);
+static int i915_gem_inactive_shrink(struct shrinker *shrinker,
+ int nr_to_scan,
+ gfp_t gfp_mask);
-static void
-i915_gem_object_put_pages(struct drm_gem_object *obj);
-
-static LIST_HEAD(shrink_list);
-static DEFINE_SPINLOCK(shrink_list_lock);
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -82,34 +75,6 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
dev_priv->mm.object_memory -= size;
}
-static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
- size_t size)
-{
- dev_priv->mm.gtt_count++;
- dev_priv->mm.gtt_memory += size;
-}
-
-static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
- size_t size)
-{
- dev_priv->mm.gtt_count--;
- dev_priv->mm.gtt_memory -= size;
-}
-
-static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
- size_t size)
-{
- dev_priv->mm.pin_count++;
- dev_priv->mm.pin_memory += size;
-}
-
-static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
- size_t size)
-{
- dev_priv->mm.pin_count--;
- dev_priv->mm.pin_memory -= size;
-}
-
int
i915_gem_check_is_wedged(struct drm_device *dev)
{
@@ -140,7 +105,7 @@ i915_gem_check_is_wedged(struct drm_device *dev)
return -EIO;
}
-static int i915_mutex_lock_interruptible(struct drm_device *dev)
+int i915_mutex_lock_interruptible(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -163,75 +128,80 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev)
}
static inline bool
-i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
+i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return obj_priv->gtt_space &&
- !obj_priv->active &&
- obj_priv->pin_count == 0;
+ return obj->gtt_space && !obj->active && obj->pin_count == 0;
}
-int i915_gem_do_init(struct drm_device *dev,
- unsigned long start,
- unsigned long end)
+void i915_gem_do_init(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (start >= end ||
- (start & (PAGE_SIZE - 1)) != 0 ||
- (end & (PAGE_SIZE - 1)) != 0) {
- return -EINVAL;
- }
-
- drm_mm_init(&dev_priv->mm.gtt_space, start,
- end - start);
+ drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
+ dev_priv->mm.gtt_start = start;
+ dev_priv->mm.gtt_mappable_end = mappable_end;
+ dev_priv->mm.gtt_end = end;
dev_priv->mm.gtt_total = end - start;
+ dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
- return 0;
+ /* Take over this portion of the GTT */
+ intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
}
int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_init *args = data;
- int ret;
+
+ if (args->gtt_start >= args->gtt_end ||
+ (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
+ return -EINVAL;
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
+ i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
+ struct drm_i915_gem_object *obj;
+ size_t pinned;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
+ pinned = 0;
mutex_lock(&dev->struct_mutex);
- args->aper_size = dev_priv->mm.gtt_total;
- args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
+ list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
+ pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
+ args->aper_size = dev_priv->mm.gtt_total;
+ args->aper_available_size = args->aper_size -pinned;
+
return 0;
}
-
/**
* Creates a new mm object and returns a handle to it.
*/
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_create *args = data;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
int ret;
u32 handle;
@@ -242,45 +212,28 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
if (obj == NULL)
return -ENOMEM;
- ret = drm_gem_handle_create(file_priv, obj, &handle);
+ ret = drm_gem_handle_create(file, &obj->base, &handle);
if (ret) {
- drm_gem_object_release(obj);
- i915_gem_info_remove_obj(dev->dev_private, obj->size);
+ drm_gem_object_release(&obj->base);
+ i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
kfree(obj);
return ret;
}
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
trace_i915_gem_object_create(obj);
args->handle = handle;
return 0;
}
-static inline int
-fast_shmem_read(struct page **pages,
- loff_t page_base, int page_offset,
- char __user *data,
- int length)
-{
- char *vaddr;
- int ret;
-
- vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
- ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
- kunmap_atomic(vaddr);
-
- return ret;
-}
-
-static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
+static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
- drm_i915_private_t *dev_priv = obj->dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
- obj_priv->tiling_mode != I915_TILING_NONE;
+ obj->tiling_mode != I915_TILING_NONE;
}
static inline void
@@ -356,38 +309,51 @@ slow_shmem_bit17_copy(struct page *gpu_page,
* fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
*/
static int
-i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pread_fast(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
- loff_t offset, page_base;
+ loff_t offset;
char __user *user_data;
int page_offset, page_length;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- obj_priv = to_intel_bo(obj);
offset = args->offset;
while (remain > 0) {
+ struct page *page;
+ char *vaddr;
+ int ret;
+
/* Operation in this page
*
- * page_base = page offset within aperture
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
- page_base = (offset & ~(PAGE_SIZE-1));
page_offset = offset & (PAGE_SIZE-1);
page_length = remain;
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- if (fast_shmem_read(obj_priv->pages,
- page_base, page_offset,
- user_data, page_length))
+ page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ vaddr = kmap_atomic(page);
+ ret = __copy_to_user_inatomic(user_data,
+ vaddr + page_offset,
+ page_length);
+ kunmap_atomic(vaddr);
+
+ mark_page_accessed(page);
+ page_cache_release(page);
+ if (ret)
return -EFAULT;
remain -= page_length;
@@ -398,30 +364,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
return 0;
}
-static int
-i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
-{
- int ret;
-
- ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
-
- /* If we've insufficient memory to map in the pages, attempt
- * to make some space by throwing out some old buffers.
- */
- if (ret == -ENOMEM) {
- struct drm_device *dev = obj->dev;
-
- ret = i915_gem_evict_something(dev, obj->size,
- i915_gem_get_gtt_alignment(obj));
- if (ret)
- return ret;
-
- ret = i915_gem_object_get_pages(obj, 0);
- }
-
- return ret;
-}
-
/**
* This is the fallback shmem pread path, which allocates temporary storage
* in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -429,18 +371,19 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
* and not take page faults.
*/
static int
-i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pread_slow(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
loff_t offset, pinned_pages, i;
loff_t first_data_page, last_data_page, num_pages;
- int shmem_page_index, shmem_page_offset;
- int data_page_index, data_page_offset;
+ int shmem_page_offset;
+ int data_page_index, data_page_offset;
int page_length;
int ret;
uint64_t data_ptr = args->data_ptr;
@@ -479,19 +422,18 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- obj_priv = to_intel_bo(obj);
offset = args->offset;
while (remain > 0) {
+ struct page *page;
+
/* Operation in this page
*
- * shmem_page_index = page number within shmem file
* shmem_page_offset = offset within page in shmem file
* data_page_index = page number in get_user_pages return
* data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
- shmem_page_index = offset / PAGE_SIZE;
shmem_page_offset = offset & ~PAGE_MASK;
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
data_page_offset = data_ptr & ~PAGE_MASK;
@@ -502,8 +444,13 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
+ page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
if (do_bit17_swizzling) {
- slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+ slow_shmem_bit17_copy(page,
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
@@ -512,11 +459,14 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
} else {
slow_shmem_copy(user_pages[data_page_index],
data_page_offset,
- obj_priv->pages[shmem_page_index],
+ page,
shmem_page_offset,
page_length);
}
+ mark_page_accessed(page);
+ page_cache_release(page);
+
remain -= page_length;
data_ptr += page_length;
offset += page_length;
@@ -525,6 +475,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
out:
for (i = 0; i < pinned_pages; i++) {
SetPageDirty(user_pages[i]);
+ mark_page_accessed(user_pages[i]);
page_cache_release(user_pages[i]);
}
drm_free_large(user_pages);
@@ -539,11 +490,10 @@ out:
*/
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pread *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret = 0;
if (args->size == 0)
@@ -563,39 +513,33 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
/* Bounds check source. */
- if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ if (args->offset > obj->base.size ||
+ args->size > obj->base.size - args->offset) {
ret = -EINVAL;
goto out;
}
- ret = i915_gem_object_get_pages_or_evict(obj);
- if (ret)
- goto out;
-
ret = i915_gem_object_set_cpu_read_domain_range(obj,
args->offset,
args->size);
if (ret)
- goto out_put;
+ goto out;
ret = -EFAULT;
if (!i915_gem_object_needs_bit17_swizzle(obj))
- ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+ ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
if (ret == -EFAULT)
- ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+ ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
-out_put:
- i915_gem_object_put_pages(obj);
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -645,32 +589,16 @@ slow_kernel_write(struct io_mapping *mapping,
io_mapping_unmap(dst_vaddr);
}
-static inline int
-fast_shmem_write(struct page **pages,
- loff_t page_base, int page_offset,
- char __user *data,
- int length)
-{
- char *vaddr;
- int ret;
-
- vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
- ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
- kunmap_atomic(vaddr);
-
- return ret;
-}
-
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
*/
static int
-i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_gtt_pwrite_fast(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t offset, page_base;
@@ -680,8 +608,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- obj_priv = to_intel_bo(obj);
- offset = obj_priv->gtt_offset + args->offset;
+ offset = obj->gtt_offset + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -721,11 +648,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
* than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
*/
static int
-i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_gtt_pwrite_slow(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t gtt_page_base, offset;
@@ -762,12 +689,15 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
goto out_unpin_pages;
}
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto out_unpin_pages;
+
+ ret = i915_gem_object_put_fence(obj);
if (ret)
goto out_unpin_pages;
- obj_priv = to_intel_bo(obj);
- offset = obj_priv->gtt_offset + args->offset;
+ offset = obj->gtt_offset + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -813,39 +743,58 @@ out_unpin_pages:
* copy_from_user into the kmapped pages backing the object.
*/
static int
-i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pwrite_fast(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
- loff_t offset, page_base;
+ loff_t offset;
char __user *user_data;
int page_offset, page_length;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- obj_priv = to_intel_bo(obj);
offset = args->offset;
- obj_priv->dirty = 1;
+ obj->dirty = 1;
while (remain > 0) {
+ struct page *page;
+ char *vaddr;
+ int ret;
+
/* Operation in this page
*
- * page_base = page offset within aperture
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
- page_base = (offset & ~(PAGE_SIZE-1));
page_offset = offset & (PAGE_SIZE-1);
page_length = remain;
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- if (fast_shmem_write(obj_priv->pages,
- page_base, page_offset,
- user_data, page_length))
+ page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ vaddr = kmap_atomic(page, KM_USER0);
+ ret = __copy_from_user_inatomic(vaddr + page_offset,
+ user_data,
+ page_length);
+ kunmap_atomic(vaddr, KM_USER0);
+
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ page_cache_release(page);
+
+ /* If we get a fault while copying data, then (presumably) our
+ * source page isn't available. Return the error and we'll
+ * retry in the slow path.
+ */
+ if (ret)
return -EFAULT;
remain -= page_length;
@@ -864,17 +813,18 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
* struct_mutex is held.
*/
static int
-i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pwrite_slow(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
loff_t offset, pinned_pages, i;
loff_t first_data_page, last_data_page, num_pages;
- int shmem_page_index, shmem_page_offset;
+ int shmem_page_offset;
int data_page_index, data_page_offset;
int page_length;
int ret;
@@ -912,20 +862,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- obj_priv = to_intel_bo(obj);
offset = args->offset;
- obj_priv->dirty = 1;
+ obj->dirty = 1;
while (remain > 0) {
+ struct page *page;
+
/* Operation in this page
*
- * shmem_page_index = page number within shmem file
* shmem_page_offset = offset within page in shmem file
* data_page_index = page number in get_user_pages return
* data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
- shmem_page_index = offset / PAGE_SIZE;
shmem_page_offset = offset & ~PAGE_MASK;
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
data_page_offset = data_ptr & ~PAGE_MASK;
@@ -936,21 +885,32 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
+ page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto out;
+ }
+
if (do_bit17_swizzling) {
- slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+ slow_shmem_bit17_copy(page,
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
page_length,
0);
} else {
- slow_shmem_copy(obj_priv->pages[shmem_page_index],
+ slow_shmem_copy(page,
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
page_length);
}
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ page_cache_release(page);
+
remain -= page_length;
data_ptr += page_length;
offset += page_length;
@@ -974,8 +934,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_pwrite *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
if (args->size == 0)
@@ -995,15 +954,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
/* Bounds check destination. */
- if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ if (args->offset > obj->base.size ||
+ args->size > obj->base.size - args->offset) {
ret = -EINVAL;
goto out;
}
@@ -1014,16 +973,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
- if (obj_priv->phys_obj)
+ if (obj->phys_obj)
ret = i915_gem_phys_pwrite(dev, obj, args, file);
- else if (obj_priv->tiling_mode == I915_TILING_NONE &&
- obj_priv->gtt_space &&
- obj->write_domain != I915_GEM_DOMAIN_CPU) {
- ret = i915_gem_object_pin(obj, 0);
+ else if (obj->gtt_space &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ ret = i915_gem_object_pin(obj, 0, true);
if (ret)
goto out;
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto out_unpin;
+
+ ret = i915_gem_object_put_fence(obj);
if (ret)
goto out_unpin;
@@ -1034,26 +996,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
out_unpin:
i915_gem_object_unpin(obj);
} else {
- ret = i915_gem_object_get_pages_or_evict(obj);
- if (ret)
- goto out;
-
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret)
- goto out_put;
+ goto out;
ret = -EFAULT;
if (!i915_gem_object_needs_bit17_swizzle(obj))
ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
if (ret == -EFAULT)
ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
-
-out_put:
- i915_gem_object_put_pages(obj);
}
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1065,12 +1020,10 @@ unlock:
*/
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_set_domain *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain;
int ret;
@@ -1095,28 +1048,15 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
-
- intel_mark_busy(dev, obj);
if (read_domains & I915_GEM_DOMAIN_GTT) {
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
- /* Update the LRU on the fence for the CPU access that's
- * about to occur.
- */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_fence_reg *reg =
- &dev_priv->fence_regs[obj_priv->fence_reg];
- list_move_tail(&reg->lru_list,
- &dev_priv->mm.fence_list);
- }
-
/* Silently promote "you're not bound, there was nothing to do"
* to success, since the client was just asking us to
* make sure everything was done.
@@ -1127,11 +1067,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
- /* Maintain LRU order of "inactive" objects */
- if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
-
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1142,10 +1078,10 @@ unlock:
*/
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_sw_finish *args = data;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
int ret = 0;
if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1155,17 +1091,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
/* Pinned buffers may be scanout, so flush the cache */
- if (to_intel_bo(obj)->pin_count)
+ if (obj->pin_count)
i915_gem_object_flush_cpu_write_domain(obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1180,8 +1116,9 @@ unlock:
*/
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap *args = data;
struct drm_gem_object *obj;
loff_t offset;
@@ -1190,10 +1127,15 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
return -ENOENT;
+ if (obj->size > dev_priv->mm.gtt_mappable_end) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -E2BIG;
+ }
+
offset = args->offset;
down_write(&current->mm->mmap_sem);
@@ -1228,10 +1170,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
*/
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
pgoff_t page_offset;
unsigned long pfn;
int ret = 0;
@@ -1243,27 +1184,35 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex);
- if (!obj_priv->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, 0);
- if (ret)
- goto unlock;
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (!obj->map_and_fenceable) {
+ ret = i915_gem_object_unbind(obj);
if (ret)
goto unlock;
}
-
- /* Need a new fence register? */
- if (obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj, true);
+ if (!obj->gtt_space) {
+ ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret)
goto unlock;
}
- if (i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unlock;
+
+ if (obj->tiling_mode == I915_TILING_NONE)
+ ret = i915_gem_object_put_fence(obj);
+ else
+ ret = i915_gem_object_get_fence(obj, NULL, true);
+ if (ret)
+ goto unlock;
+
+ if (i915_gem_object_is_inactive(obj))
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+ obj->fault_mappable = true;
- pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
+ pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
page_offset;
/* Finally, remap it using the new GTT offset */
@@ -1272,11 +1221,12 @@ unlock:
mutex_unlock(&dev->struct_mutex);
switch (ret) {
+ case -EAGAIN:
+ set_need_resched();
case 0:
case -ERESTARTSYS:
return VM_FAULT_NOPAGE;
case -ENOMEM:
- case -EAGAIN:
return VM_FAULT_OOM;
default:
return VM_FAULT_SIGBUS;
@@ -1295,37 +1245,39 @@ unlock:
* This routine allocates and attaches a fake offset for @obj.
*/
static int
-i915_gem_create_mmap_offset(struct drm_gem_object *obj)
+i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_map_list *list;
struct drm_local_map *map;
int ret = 0;
/* Set the object up for mmap'ing */
- list = &obj->map_list;
+ list = &obj->base.map_list;
list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
if (!list->map)
return -ENOMEM;
map = list->map;
map->type = _DRM_GEM;
- map->size = obj->size;
+ map->size = obj->base.size;
map->handle = obj;
/* Get a DRM GEM mmap offset allocated... */
list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- obj->size / PAGE_SIZE, 0, 0);
+ obj->base.size / PAGE_SIZE,
+ 0, 0);
if (!list->file_offset_node) {
- DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+ DRM_ERROR("failed to allocate offset for bo %d\n",
+ obj->base.name);
ret = -ENOSPC;
goto out_free_list;
}
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- obj->size / PAGE_SIZE, 0);
+ obj->base.size / PAGE_SIZE,
+ 0);
if (!list->file_offset_node) {
ret = -ENOMEM;
goto out_free_list;
@@ -1338,16 +1290,13 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
goto out_free_mm;
}
- /* By now we should be all set, any drm_mmap request on the offset
- * below will get to our mmap & fault handler */
- obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
-
return 0;
out_free_mm:
drm_mm_put_block(list->file_offset_node);
out_free_list:
kfree(list->map);
+ list->map = NULL;
return ret;
}
@@ -1367,38 +1316,51 @@ out_free_list:
* fixup by i915_gem_fault().
*/
void
-i915_gem_release_mmap(struct drm_gem_object *obj)
+i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ if (!obj->fault_mappable)
+ return;
+
+ unmap_mapping_range(obj->base.dev->dev_mapping,
+ (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
+ obj->base.size, 1);
- if (dev->dev_mapping)
- unmap_mapping_range(dev->dev_mapping,
- obj_priv->mmap_offset, obj->size, 1);
+ obj->fault_mappable = false;
}
static void
-i915_gem_free_mmap_offset(struct drm_gem_object *obj)
+i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_device *dev = obj->base.dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list;
+ struct drm_map_list *list = &obj->base.map_list;
- list = &obj->map_list;
drm_ht_remove_item(&mm->offset_hash, &list->hash);
+ drm_mm_put_block(list->file_offset_node);
+ kfree(list->map);
+ list->map = NULL;
+}
- if (list->file_offset_node) {
- drm_mm_put_block(list->file_offset_node);
- list->file_offset_node = NULL;
- }
+static uint32_t
+i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ uint32_t size;
- if (list->map) {
- kfree(list->map);
- list->map = NULL;
- }
+ if (INTEL_INFO(dev)->gen >= 4 ||
+ obj->tiling_mode == I915_TILING_NONE)
+ return obj->base.size;
- obj_priv->mmap_offset = 0;
+ /* Previous chips need a power-of-two fence region when tiling */
+ if (INTEL_INFO(dev)->gen == 3)
+ size = 1024*1024;
+ else
+ size = 512*1024;
+
+ while (size < obj->base.size)
+ size <<= 1;
+
+ return size;
}
/**
@@ -1406,42 +1368,68 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj)
* @obj: object to check
*
* Return the required GTT alignment for an object, taking into account
- * potential fence register mapping if needed.
+ * potential fence register mapping.
*/
static uint32_t
-i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
+i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int start, i;
+ struct drm_device *dev = obj->base.dev;
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
- if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
+ if (INTEL_INFO(dev)->gen >= 4 ||
+ obj->tiling_mode == I915_TILING_NONE)
return 4096;
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
- if (INTEL_INFO(dev)->gen == 3)
- start = 1024*1024;
- else
- start = 512*1024;
+ return i915_gem_get_gtt_size(obj);
+}
+
+/**
+ * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
+ * unfenced object
+ * @obj: object to check
+ *
+ * Return the required GTT alignment for an object, only taking into account
+ * unfenced tiled surface requirements.
+ */
+static uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ int tile_height;
- for (i = start; i < obj->size; i <<= 1)
- ;
+ /*
+ * Minimum alignment is 4k (GTT page size) for sane hw.
+ */
+ if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
+ obj->tiling_mode == I915_TILING_NONE)
+ return 4096;
- return i;
+ /*
+ * Older chips need unfenced tiled buffers to be aligned to the left
+ * edge of an even tile row (where tile rows are counted as if the bo is
+ * placed in a fenced gtt region).
+ */
+ if (IS_GEN2(dev) ||
+ (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ tile_height = 32;
+ else
+ tile_height = 8;
+
+ return tile_height * obj->stride * 2;
}
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
* @data: GTT mapping ioctl data
- * @file_priv: GEM object info
+ * @file: GEM object info
*
* Simply returns the fake offset to userspace so it can mmap it.
* The mmap call will end up in drm_gem_mmap(), which will set things
@@ -1454,11 +1442,11 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
*/
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap_gtt *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1468,130 +1456,196 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
- if (obj_priv->madv != I915_MADV_WILLNEED) {
+ if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
+ ret = -E2BIG;
+ goto unlock;
+ }
+
+ if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to mmap a purgeable buffer\n");
ret = -EINVAL;
goto out;
}
- if (!obj_priv->mmap_offset) {
+ if (!obj->base.map_list.map) {
ret = i915_gem_create_mmap_offset(obj);
if (ret)
goto out;
}
- args->offset = obj_priv->mmap_offset;
-
- /*
- * Pull it into the GTT so that we have a page list (makes the
- * initial fault faster and any subsequent flushing possible).
- */
- if (!obj_priv->agp_mem) {
- ret = i915_gem_object_bind_to_gtt(obj, 0);
- if (ret)
- goto out;
- }
+ args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
+ gfp_t gfpmask)
+{
+ int page_count, i;
+ struct address_space *mapping;
+ struct inode *inode;
+ struct page *page;
+
+ /* Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ */
+ page_count = obj->base.size / PAGE_SIZE;
+ BUG_ON(obj->pages != NULL);
+ obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
+ if (obj->pages == NULL)
+ return -ENOMEM;
+
+ inode = obj->base.filp->f_path.dentry->d_inode;
+ mapping = inode->i_mapping;
+ for (i = 0; i < page_count; i++) {
+ page = read_cache_page_gfp(mapping, i,
+ GFP_HIGHUSER |
+ __GFP_COLD |
+ __GFP_RECLAIMABLE |
+ gfpmask);
+ if (IS_ERR(page))
+ goto err_pages;
+
+ obj->pages[i] = page;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ i915_gem_object_do_bit_17_swizzle(obj);
+
+ return 0;
+
+err_pages:
+ while (i--)
+ page_cache_release(obj->pages[i]);
+
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
+ return PTR_ERR(page);
+}
+
static void
-i915_gem_object_put_pages(struct drm_gem_object *obj)
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int page_count = obj->size / PAGE_SIZE;
+ int page_count = obj->base.size / PAGE_SIZE;
int i;
- BUG_ON(obj_priv->pages_refcount == 0);
- BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
-
- if (--obj_priv->pages_refcount != 0)
- return;
+ BUG_ON(obj->madv == __I915_MADV_PURGED);
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE)
i915_gem_object_save_bit_17_swizzle(obj);
- if (obj_priv->madv == I915_MADV_DONTNEED)
- obj_priv->dirty = 0;
+ if (obj->madv == I915_MADV_DONTNEED)
+ obj->dirty = 0;
for (i = 0; i < page_count; i++) {
- if (obj_priv->dirty)
- set_page_dirty(obj_priv->pages[i]);
+ if (obj->dirty)
+ set_page_dirty(obj->pages[i]);
- if (obj_priv->madv == I915_MADV_WILLNEED)
- mark_page_accessed(obj_priv->pages[i]);
+ if (obj->madv == I915_MADV_WILLNEED)
+ mark_page_accessed(obj->pages[i]);
- page_cache_release(obj_priv->pages[i]);
+ page_cache_release(obj->pages[i]);
}
- obj_priv->dirty = 0;
-
- drm_free_large(obj_priv->pages);
- obj_priv->pages = NULL;
-}
-
-static uint32_t
-i915_gem_next_request_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ obj->dirty = 0;
- ring->outstanding_lazy_request = true;
- return dev_priv->next_seqno;
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
}
-static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj,
- struct intel_ring_buffer *ring)
+void
+i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring,
+ u32 seqno)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
BUG_ON(ring == NULL);
- obj_priv->ring = ring;
+ obj->ring = ring;
/* Add a reference if we're newly entering the active list. */
- if (!obj_priv->active) {
- drm_gem_object_reference(obj);
- obj_priv->active = 1;
+ if (!obj->active) {
+ drm_gem_object_reference(&obj->base);
+ obj->active = 1;
}
/* Move from whatever list we were on to the tail of execution. */
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
- list_move_tail(&obj_priv->ring_list, &ring->active_list);
- obj_priv->last_rendering_seqno = seqno;
+ list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
+ list_move_tail(&obj->ring_list, &ring->active_list);
+
+ obj->last_rendering_seqno = seqno;
+ if (obj->fenced_gpu_access) {
+ struct drm_i915_fence_reg *reg;
+
+ BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
+
+ obj->last_fenced_seqno = seqno;
+ obj->last_fenced_ring = ring;
+
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ }
}
static void
-i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
+i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ list_del_init(&obj->ring_list);
+ obj->last_rendering_seqno = 0;
+}
+
+static void
+i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- BUG_ON(!obj_priv->active);
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
- list_del_init(&obj_priv->ring_list);
- obj_priv->last_rendering_seqno = 0;
+ BUG_ON(!obj->active);
+ list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
+
+ i915_gem_object_move_off_active(obj);
+}
+
+static void
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (obj->pin_count != 0)
+ list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
+ else
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+ BUG_ON(!list_empty(&obj->gpu_write_list));
+ BUG_ON(!obj->active);
+ obj->ring = NULL;
+
+ i915_gem_object_move_off_active(obj);
+ obj->fenced_gpu_access = false;
+
+ obj->active = 0;
+ obj->pending_gpu_write = false;
+ drm_gem_object_unreference(&obj->base);
+
+ WARN_ON(i915_verify_lists(dev));
}
/* Immediately discard the backing storage */
static void
-i915_gem_object_truncate(struct drm_gem_object *obj)
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct inode *inode;
/* Our goal here is to return as much of the memory as
@@ -1600,42 +1654,18 @@ i915_gem_object_truncate(struct drm_gem_object *obj)
* backing pages, *now*. Here we mirror the actions taken
* when by shmem_delete_inode() to release the backing store.
*/
- inode = obj->filp->f_path.dentry->d_inode;
+ inode = obj->base.filp->f_path.dentry->d_inode;
truncate_inode_pages(inode->i_mapping, 0);
if (inode->i_op->truncate_range)
inode->i_op->truncate_range(inode, 0, (loff_t)-1);
- obj_priv->madv = __I915_MADV_PURGED;
+ obj->madv = __I915_MADV_PURGED;
}
static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
{
- return obj_priv->madv == I915_MADV_DONTNEED;
-}
-
-static void
-i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
- if (obj_priv->pin_count != 0)
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
- else
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
- list_del_init(&obj_priv->ring_list);
-
- BUG_ON(!list_empty(&obj_priv->gpu_write_list));
-
- obj_priv->last_rendering_seqno = 0;
- obj_priv->ring = NULL;
- if (obj_priv->active) {
- obj_priv->active = 0;
- drm_gem_object_unreference(obj);
- }
- WARN_ON(i915_verify_lists(dev));
+ return obj->madv == I915_MADV_DONTNEED;
}
static void
@@ -1643,37 +1673,27 @@ i915_gem_process_flushing_list(struct drm_device *dev,
uint32_t flush_domains,
struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv, *next;
+ struct drm_i915_gem_object *obj, *next;
- list_for_each_entry_safe(obj_priv, next,
+ list_for_each_entry_safe(obj, next,
&ring->gpu_write_list,
gpu_write_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
- if (obj->write_domain & flush_domains) {
- uint32_t old_write_domain = obj->write_domain;
+ if (obj->base.write_domain & flush_domains) {
+ uint32_t old_write_domain = obj->base.write_domain;
- obj->write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_active(obj, ring);
-
- /* update the fence lru list */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_fence_reg *reg =
- &dev_priv->fence_regs[obj_priv->fence_reg];
- list_move_tail(&reg->lru_list,
- &dev_priv->mm.fence_list);
- }
+ obj->base.write_domain = 0;
+ list_del_init(&obj->gpu_write_list);
+ i915_gem_object_move_to_active(obj, ring,
+ i915_gem_next_request_seqno(dev, ring));
trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
+ obj->base.read_domains,
old_write_domain);
}
}
}
-uint32_t
+int
i915_add_request(struct drm_device *dev,
struct drm_file *file,
struct drm_i915_gem_request *request,
@@ -1683,17 +1703,17 @@ i915_add_request(struct drm_device *dev,
struct drm_i915_file_private *file_priv = NULL;
uint32_t seqno;
int was_empty;
+ int ret;
+
+ BUG_ON(request == NULL);
if (file != NULL)
file_priv = file->driver_priv;
- if (request == NULL) {
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return 0;
- }
+ ret = ring->add_request(ring, &seqno);
+ if (ret)
+ return ret;
- seqno = ring->add_request(dev, ring, 0);
ring->outstanding_lazy_request = false;
request->seqno = seqno;
@@ -1717,26 +1737,7 @@ i915_add_request(struct drm_device *dev,
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
}
- return seqno;
-}
-
-/**
- * Command execution barrier
- *
- * Ensures that all commands in the ring are finished
- * before signalling the CPU
- */
-static void
-i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
-{
- uint32_t flush_domains = 0;
-
- /* The sampler always gets flushed on i965 (sigh) */
- if (INTEL_INFO(dev)->gen >= 4)
- flush_domains |= I915_GEM_DOMAIN_SAMPLER;
-
- ring->flush(dev, ring,
- I915_GEM_DOMAIN_COMMAND, flush_domains);
+ return 0;
}
static inline void
@@ -1769,62 +1770,76 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
}
while (!list_empty(&ring->active_list)) {
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
+
+ obj->base.write_domain = 0;
+ list_del_init(&obj->gpu_write_list);
+ i915_gem_object_move_to_inactive(obj);
+ }
+}
+
+static void i915_gem_reset_fences(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+ struct drm_i915_gem_object *obj = reg->obj;
+
+ if (!obj)
+ continue;
- obj_priv = list_first_entry(&ring->active_list,
- struct drm_i915_gem_object,
- ring_list);
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
- obj_priv->base.write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_inactive(&obj_priv->base);
+ reg->obj->fence_reg = I915_FENCE_REG_NONE;
+ reg->obj->fenced_gpu_access = false;
+ reg->obj->last_fenced_seqno = 0;
+ reg->obj->last_fenced_ring = NULL;
+ i915_gem_clear_fence_reg(dev, reg);
}
}
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int i;
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
/* Remove anything from the flushing lists. The GPU cache is likely
* to be lost on reset along with the data, so simply move the
* lost bo to the inactive list.
*/
while (!list_empty(&dev_priv->mm.flushing_list)) {
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- mm_list);
+ obj= list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ mm_list);
- obj_priv->base.write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_inactive(&obj_priv->base);
+ obj->base.write_domain = 0;
+ list_del_init(&obj->gpu_write_list);
+ i915_gem_object_move_to_inactive(obj);
}
/* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse.
*/
- list_for_each_entry(obj_priv,
+ list_for_each_entry(obj,
&dev_priv->mm.inactive_list,
mm_list)
{
- obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
}
/* The fence registers are invalidated so clear them out */
- for (i = 0; i < 16; i++) {
- struct drm_i915_fence_reg *reg;
-
- reg = &dev_priv->fence_regs[i];
- if (!reg->obj)
- continue;
-
- i915_gem_clear_fence_reg(reg->obj);
- }
+ i915_gem_reset_fences(dev);
}
/**
@@ -1836,6 +1851,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
+ int i;
if (!ring->status_page.page_addr ||
list_empty(&ring->request_list))
@@ -1843,7 +1859,12 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
WARN_ON(i915_verify_lists(dev));
- seqno = ring->get_seqno(dev, ring);
+ seqno = ring->get_seqno(ring);
+
+ for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
+ if (seqno >= ring->sync_seqno[i])
+ ring->sync_seqno[i] = 0;
+
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@@ -1865,18 +1886,16 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
while (!list_empty(&ring->active_list)) {
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
- obj_priv = list_first_entry(&ring->active_list,
- struct drm_i915_gem_object,
- ring_list);
+ obj= list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
- if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
+ if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
break;
- obj = &obj_priv->base;
- if (obj->write_domain != 0)
+ if (obj->base.write_domain != 0)
i915_gem_object_move_to_flushing(obj);
else
i915_gem_object_move_to_inactive(obj);
@@ -1884,7 +1903,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
if (unlikely (dev_priv->trace_irq_seqno &&
i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
- ring->user_irq_put(dev, ring);
+ ring->irq_put(ring);
dev_priv->trace_irq_seqno = 0;
}
@@ -1895,24 +1914,24 @@ void
i915_gem_retire_requests(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
if (!list_empty(&dev_priv->mm.deferred_free_list)) {
- struct drm_i915_gem_object *obj_priv, *tmp;
+ struct drm_i915_gem_object *obj, *next;
/* We must be careful that during unbind() we do not
* accidentally infinitely recurse into retire requests.
* Currently:
* retire -> free -> unbind -> wait -> retire_ring
*/
- list_for_each_entry_safe(obj_priv, tmp,
+ list_for_each_entry_safe(obj, next,
&dev_priv->mm.deferred_free_list,
mm_list)
- i915_gem_free_object_tail(&obj_priv->base);
+ i915_gem_free_object_tail(obj);
}
- i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
- i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
- i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
}
static void
@@ -1920,6 +1939,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
struct drm_device *dev;
+ bool idle;
+ int i;
dev_priv = container_of(work, drm_i915_private_t,
mm.retire_work.work);
@@ -1933,11 +1954,31 @@ i915_gem_retire_work_handler(struct work_struct *work)
i915_gem_retire_requests(dev);
- if (!dev_priv->mm.suspended &&
- (!list_empty(&dev_priv->render_ring.request_list) ||
- !list_empty(&dev_priv->bsd_ring.request_list) ||
- !list_empty(&dev_priv->blt_ring.request_list)))
+ /* Send a periodic flush down the ring so we don't hold onto GEM
+ * objects indefinitely.
+ */
+ idle = true;
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct intel_ring_buffer *ring = &dev_priv->ring[i];
+
+ if (!list_empty(&ring->gpu_write_list)) {
+ struct drm_i915_gem_request *request;
+ int ret;
+
+ ret = i915_gem_flush_ring(dev, ring, 0,
+ I915_GEM_GPU_DOMAINS);
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (ret || request == NULL ||
+ i915_add_request(dev, NULL, request, ring))
+ kfree(request);
+ }
+
+ idle &= list_empty(&ring->request_list);
+ }
+
+ if (!dev_priv->mm.suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+
mutex_unlock(&dev->struct_mutex);
}
@@ -1954,14 +1995,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (atomic_read(&dev_priv->mm.wedged))
return -EAGAIN;
- if (ring->outstanding_lazy_request) {
- seqno = i915_add_request(dev, NULL, NULL, ring);
- if (seqno == 0)
+ if (seqno == ring->outstanding_lazy_request) {
+ struct drm_i915_gem_request *request;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
return -ENOMEM;
+
+ ret = i915_add_request(dev, NULL, request, ring);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
+ seqno = request->seqno;
}
- BUG_ON(seqno == dev_priv->next_seqno);
- if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+ if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
if (HAS_PCH_SPLIT(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
@@ -1975,21 +2025,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
trace_i915_gem_request_wait_begin(dev, seqno);
- ring->waiting_gem_seqno = seqno;
- ring->user_irq_get(dev, ring);
- if (interruptible)
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(
- ring->get_seqno(dev, ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- else
- wait_event(ring->irq_queue,
- i915_seqno_passed(
- ring->get_seqno(dev, ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
+ ring->waiting_seqno = seqno;
+ if (ring->irq_get(ring)) {
+ if (interruptible)
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
+ else
+ wait_event(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
- ring->user_irq_put(dev, ring);
- ring->waiting_gem_seqno = 0;
+ ring->irq_put(ring);
+ } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
+ seqno) ||
+ atomic_read(&dev_priv->mm.wedged), 3000))
+ ret = -EBUSY;
+ ring->waiting_seqno = 0;
trace_i915_gem_request_wait_end(dev, seqno);
}
@@ -1998,7 +2050,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (ret && ret != -ERESTARTSYS)
DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
- __func__, ret, seqno, ring->get_seqno(dev, ring),
+ __func__, ret, seqno, ring->get_seqno(ring),
dev_priv->next_seqno);
/* Directly dispatch request retiring. While we have the work queue
@@ -2023,70 +2075,30 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno,
return i915_do_wait_request(dev, seqno, 1, ring);
}
-static void
-i915_gem_flush_ring(struct drm_device *dev,
- struct drm_file *file_priv,
- struct intel_ring_buffer *ring,
- uint32_t invalidate_domains,
- uint32_t flush_domains)
-{
- ring->flush(dev, ring, invalidate_domains, flush_domains);
- i915_gem_process_flushing_list(dev, flush_domains, ring);
-}
-
-static void
-i915_gem_flush(struct drm_device *dev,
- struct drm_file *file_priv,
- uint32_t invalidate_domains,
- uint32_t flush_domains,
- uint32_t flush_rings)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (flush_domains & I915_GEM_DOMAIN_CPU)
- drm_agp_chipset_flush(dev);
-
- if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
- if (flush_rings & RING_RENDER)
- i915_gem_flush_ring(dev, file_priv,
- &dev_priv->render_ring,
- invalidate_domains, flush_domains);
- if (flush_rings & RING_BSD)
- i915_gem_flush_ring(dev, file_priv,
- &dev_priv->bsd_ring,
- invalidate_domains, flush_domains);
- if (flush_rings & RING_BLT)
- i915_gem_flush_ring(dev, file_priv,
- &dev_priv->blt_ring,
- invalidate_domains, flush_domains);
- }
-}
-
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
-static int
-i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool interruptible)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_device *dev = obj->base.dev;
int ret;
/* This function only exists to support waiting for existing rendering,
* not for emitting required flushes.
*/
- BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
+ BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
/* If there is rendering queued on the buffer being evicted, wait for
* it.
*/
- if (obj_priv->active) {
+ if (obj->active) {
ret = i915_do_wait_request(dev,
- obj_priv->last_rendering_seqno,
+ obj->last_rendering_seqno,
interruptible,
- obj_priv->ring);
+ obj->ring);
if (ret)
return ret;
}
@@ -2098,17 +2110,14 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj,
* Unbinds an object from the GTT aperture.
*/
int
-i915_gem_object_unbind(struct drm_gem_object *obj)
+i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret = 0;
- if (obj_priv->gtt_space == NULL)
+ if (obj->gtt_space == NULL)
return 0;
- if (obj_priv->pin_count != 0) {
+ if (obj->pin_count != 0) {
DRM_ERROR("Attempting to unbind pinned buffer\n");
return -EINVAL;
}
@@ -2131,27 +2140,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
*/
if (ret) {
i915_gem_clflush_object(obj);
- obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
/* release the fence reg _after_ flushing */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- i915_gem_clear_fence_reg(obj);
-
- drm_unbind_agp(obj_priv->agp_mem);
- drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+ ret = i915_gem_object_put_fence(obj);
+ if (ret == -ERESTARTSYS)
+ return ret;
- i915_gem_object_put_pages(obj);
- BUG_ON(obj_priv->pages_refcount);
+ i915_gem_gtt_unbind_object(obj);
+ i915_gem_object_put_pages_gtt(obj);
- i915_gem_info_remove_gtt(dev_priv, obj->size);
- list_del_init(&obj_priv->mm_list);
+ list_del_init(&obj->gtt_list);
+ list_del_init(&obj->mm_list);
+ /* Avoid an unnecessary call to unbind on rebind. */
+ obj->map_and_fenceable = true;
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
- obj_priv->gtt_offset = 0;
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
+ obj->gtt_offset = 0;
- if (i915_gem_object_is_purgeable(obj_priv))
+ if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
trace_i915_gem_object_unbind(obj);
@@ -2159,14 +2168,37 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return ret;
}
+int
+i915_gem_flush_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains)
+{
+ int ret;
+
+ ret = ring->flush(ring, invalidate_domains, flush_domains);
+ if (ret)
+ return ret;
+
+ i915_gem_process_flushing_list(dev, flush_domains, ring);
+ return 0;
+}
+
static int i915_ring_idle(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
+ int ret;
+
if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
return 0;
- i915_gem_flush_ring(dev, NULL, ring,
- I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (!list_empty(&ring->gpu_write_list)) {
+ ret = i915_gem_flush_ring(dev, ring,
+ I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+ }
+
return i915_wait_request(dev,
i915_gem_next_request_seqno(dev, ring),
ring);
@@ -2177,7 +2209,7 @@ i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
bool lists_empty;
- int ret;
+ int ret, i;
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->mm.active_list));
@@ -2185,258 +2217,305 @@ i915_gpu_idle(struct drm_device *dev)
return 0;
/* Flush everything onto the inactive list. */
- ret = i915_ring_idle(dev, &dev_priv->render_ring);
- if (ret)
- return ret;
-
- ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
- if (ret)
- return ret;
-
- ret = i915_ring_idle(dev, &dev_priv->blt_ring);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-i915_gem_object_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask)
-{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int page_count, i;
- struct address_space *mapping;
- struct inode *inode;
- struct page *page;
-
- BUG_ON(obj_priv->pages_refcount
- == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
-
- if (obj_priv->pages_refcount++ != 0)
- return 0;
-
- /* Get the list of pages out of our struct file. They'll be pinned
- * at this point until we release them.
- */
- page_count = obj->size / PAGE_SIZE;
- BUG_ON(obj_priv->pages != NULL);
- obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
- if (obj_priv->pages == NULL) {
- obj_priv->pages_refcount--;
- return -ENOMEM;
- }
-
- inode = obj->filp->f_path.dentry->d_inode;
- mapping = inode->i_mapping;
- for (i = 0; i < page_count; i++) {
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_RECLAIMABLE |
- gfpmask);
- if (IS_ERR(page))
- goto err_pages;
-
- obj_priv->pages[i] = page;
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ ret = i915_ring_idle(dev, &dev_priv->ring[i]);
+ if (ret)
+ return ret;
}
- if (obj_priv->tiling_mode != I915_TILING_NONE)
- i915_gem_object_do_bit_17_swizzle(obj);
-
return 0;
-
-err_pages:
- while (i--)
- page_cache_release(obj_priv->pages[i]);
-
- drm_free_large(obj_priv->pages);
- obj_priv->pages = NULL;
- obj_priv->pages_refcount--;
- return PTR_ERR(page);
}
-static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int regnum = obj_priv->fence_reg;
+ u32 size = obj->gtt_space->size;
+ int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
- 0xfffff000) << 32;
- val |= obj_priv->gtt_offset & 0xfffff000;
- val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= (uint64_t)((obj->stride / 128) - 1) <<
SANDYBRIDGE_FENCE_PITCH_SHIFT;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
+ intel_ring_emit(pipelined, (u32)val);
+ intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
+ intel_ring_emit(pipelined, (u32)(val >> 32));
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
+
+ return 0;
}
-static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int regnum = obj_priv->fence_reg;
+ u32 size = obj->gtt_space->size;
+ int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
- val |= obj_priv->gtt_offset & 0xfffff000;
- val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+ if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
- I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
+ intel_ring_emit(pipelined, (u32)val);
+ intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
+ intel_ring_emit(pipelined, (u32)(val >> 32));
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
+
+ return 0;
}
-static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int regnum = obj_priv->fence_reg;
+ u32 size = obj->gtt_space->size;
+ u32 fence_reg, val, pitch_val;
int tile_width;
- uint32_t fence_reg, val;
- uint32_t pitch_val;
- if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
- (obj_priv->gtt_offset & (obj->size - 1))) {
- WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
- __func__, obj_priv->gtt_offset, obj->size);
- return;
- }
+ if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ obj->gtt_offset, obj->map_and_fenceable, size))
+ return -EINVAL;
- if (obj_priv->tiling_mode == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(dev))
+ if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128;
else
tile_width = 512;
/* Note: pitch better be a power of two tile widths */
- pitch_val = obj_priv->stride / tile_width;
+ pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
- if (obj_priv->tiling_mode == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(dev))
- WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
- else
- WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
-
- val = obj_priv->gtt_offset;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I915_FENCE_SIZE_BITS(obj->size);
+ val |= I915_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
- if (regnum < 8)
- fence_reg = FENCE_REG_830_0 + (regnum * 4);
+ fence_reg = obj->fence_reg;
+ if (fence_reg < 8)
+ fence_reg = FENCE_REG_830_0 + fence_reg * 4;
else
- fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
- I915_WRITE(fence_reg, val);
+ fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
+
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(pipelined, fence_reg);
+ intel_ring_emit(pipelined, val);
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE(fence_reg, val);
+
+ return 0;
}
-static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int regnum = obj_priv->fence_reg;
+ u32 size = obj->gtt_space->size;
+ int regnum = obj->fence_reg;
uint32_t val;
uint32_t pitch_val;
- uint32_t fence_size_bits;
- if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
- (obj_priv->gtt_offset & (obj->size - 1))) {
- WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
- __func__, obj_priv->gtt_offset);
- return;
- }
+ if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
+ obj->gtt_offset, size))
+ return -EINVAL;
- pitch_val = obj_priv->stride / 128;
+ pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1;
- WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
- val = obj_priv->gtt_offset;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
- WARN_ON(fence_size_bits & ~0x00000f00);
- val |= fence_size_bits;
+ val |= I830_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
- I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
+ intel_ring_emit(pipelined, val);
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
+
+ return 0;
}
-static int i915_find_fence_reg(struct drm_device *dev,
- bool interruptible)
+static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ return i915_seqno_passed(ring->get_seqno(ring), seqno);
+}
+
+static int
+i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined,
+ bool interruptible)
+{
+ int ret;
+
+ if (obj->fenced_gpu_access) {
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+ ret = i915_gem_flush_ring(obj->base.dev,
+ obj->last_fenced_ring,
+ 0, obj->base.write_domain);
+ if (ret)
+ return ret;
+ }
+
+ obj->fenced_gpu_access = false;
+ }
+
+ if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
+ if (!ring_passed_seqno(obj->last_fenced_ring,
+ obj->last_fenced_seqno)) {
+ ret = i915_do_wait_request(obj->base.dev,
+ obj->last_fenced_seqno,
+ interruptible,
+ obj->last_fenced_ring);
+ if (ret)
+ return ret;
+ }
+
+ obj->last_fenced_seqno = 0;
+ obj->last_fenced_ring = NULL;
+ }
+
+ /* Ensure that all CPU reads are completed before installing a fence
+ * and all writes before removing the fence.
+ */
+ if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
+ mb();
+
+ return 0;
+}
+
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ ret = i915_gem_object_flush_fence(obj, NULL, true);
+ if (ret)
+ return ret;
+
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ i915_gem_clear_fence_reg(obj->base.dev,
+ &dev_priv->fence_regs[obj->fence_reg]);
+
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ }
+
+ return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_i915_fence_reg *reg = NULL;
- struct drm_i915_gem_object *obj_priv = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_gem_object *obj = NULL;
- int i, avail, ret;
+ struct drm_i915_fence_reg *reg, *first, *avail;
+ int i;
/* First try to find a free reg */
- avail = 0;
+ avail = NULL;
for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
reg = &dev_priv->fence_regs[i];
if (!reg->obj)
- return i;
+ return reg;
- obj_priv = to_intel_bo(reg->obj);
- if (!obj_priv->pin_count)
- avail++;
+ if (!reg->obj->pin_count)
+ avail = reg;
}
- if (avail == 0)
- return -ENOSPC;
+ if (avail == NULL)
+ return NULL;
/* None available, try to steal one or wait for a user to finish */
- i = I915_FENCE_REG_NONE;
- list_for_each_entry(reg, &dev_priv->mm.fence_list,
- lru_list) {
- obj = reg->obj;
- obj_priv = to_intel_bo(obj);
-
- if (obj_priv->pin_count)
+ avail = first = NULL;
+ list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+ if (reg->obj->pin_count)
continue;
- /* found one! */
- i = obj_priv->fence_reg;
- break;
+ if (first == NULL)
+ first = reg;
+
+ if (!pipelined ||
+ !reg->obj->last_fenced_ring ||
+ reg->obj->last_fenced_ring == pipelined) {
+ avail = reg;
+ break;
+ }
}
- BUG_ON(i == I915_FENCE_REG_NONE);
+ if (avail == NULL)
+ avail = first;
- /* We only have a reference on obj from the active list. put_fence_reg
- * might drop that one, causing a use-after-free in it. So hold a
- * private reference to obj like the other callers of put_fence_reg
- * (set_tiling ioctl) do. */
- drm_gem_object_reference(obj);
- ret = i915_gem_object_put_fence_reg(obj, interruptible);
- drm_gem_object_unreference(obj);
- if (ret != 0)
- return ret;
-
- return i;
+ return avail;
}
/**
- * i915_gem_object_get_fence_reg - set up a fence reg for an object
+ * i915_gem_object_get_fence - set up a fence reg for an object
* @obj: object to map through a fence reg
+ * @pipelined: ring on which to queue the change, or NULL for CPU access
+ * @interruptible: must we wait uninterruptibly for the register to retire?
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
@@ -2448,72 +2527,141 @@ static int i915_find_fence_reg(struct drm_device *dev,
* and tiling format.
*/
int
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
- bool interruptible)
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined,
+ bool interruptible)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- struct drm_i915_fence_reg *reg = NULL;
+ struct drm_i915_fence_reg *reg;
int ret;
- /* Just update our place in the LRU if our fence is getting used. */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+ /* XXX disable pipelining. There are bugs. Shocking. */
+ pipelined = NULL;
+
+ /* Just update our place in the LRU if our fence is getting reused. */
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ reg = &dev_priv->fence_regs[obj->fence_reg];
list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+
+ if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+ pipelined = NULL;
+
+ if (!pipelined) {
+ if (reg->setup_seqno) {
+ if (!ring_passed_seqno(obj->last_fenced_ring,
+ reg->setup_seqno)) {
+ ret = i915_do_wait_request(obj->base.dev,
+ reg->setup_seqno,
+ interruptible,
+ obj->last_fenced_ring);
+ if (ret)
+ return ret;
+ }
+
+ reg->setup_seqno = 0;
+ }
+ } else if (obj->last_fenced_ring &&
+ obj->last_fenced_ring != pipelined) {
+ ret = i915_gem_object_flush_fence(obj,
+ pipelined,
+ interruptible);
+ if (ret)
+ return ret;
+ } else if (obj->tiling_changed) {
+ if (obj->fenced_gpu_access) {
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+ ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
+ 0, obj->base.write_domain);
+ if (ret)
+ return ret;
+ }
+
+ obj->fenced_gpu_access = false;
+ }
+ }
+
+ if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+ pipelined = NULL;
+ BUG_ON(!pipelined && reg->setup_seqno);
+
+ if (obj->tiling_changed) {
+ if (pipelined) {
+ reg->setup_seqno =
+ i915_gem_next_request_seqno(dev, pipelined);
+ obj->last_fenced_seqno = reg->setup_seqno;
+ obj->last_fenced_ring = pipelined;
+ }
+ goto update;
+ }
+
return 0;
}
- switch (obj_priv->tiling_mode) {
- case I915_TILING_NONE:
- WARN(1, "allocating a fence for non-tiled object?\n");
- break;
- case I915_TILING_X:
- if (!obj_priv->stride)
- return -EINVAL;
- WARN((obj_priv->stride & (512 - 1)),
- "object 0x%08x is X tiled but has non-512B pitch\n",
- obj_priv->gtt_offset);
- break;
- case I915_TILING_Y:
- if (!obj_priv->stride)
- return -EINVAL;
- WARN((obj_priv->stride & (128 - 1)),
- "object 0x%08x is Y tiled but has non-128B pitch\n",
- obj_priv->gtt_offset);
- break;
- }
+ reg = i915_find_fence_reg(dev, pipelined);
+ if (reg == NULL)
+ return -ENOSPC;
- ret = i915_find_fence_reg(dev, interruptible);
- if (ret < 0)
+ ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
+ if (ret)
return ret;
- obj_priv->fence_reg = ret;
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
- list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ if (reg->obj) {
+ struct drm_i915_gem_object *old = reg->obj;
+
+ drm_gem_object_reference(&old->base);
+
+ if (old->tiling_mode)
+ i915_gem_release_mmap(old);
+
+ ret = i915_gem_object_flush_fence(old,
+ pipelined,
+ interruptible);
+ if (ret) {
+ drm_gem_object_unreference(&old->base);
+ return ret;
+ }
+
+ if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
+ pipelined = NULL;
+
+ old->fence_reg = I915_FENCE_REG_NONE;
+ old->last_fenced_ring = pipelined;
+ old->last_fenced_seqno =
+ pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+
+ drm_gem_object_unreference(&old->base);
+ } else if (obj->last_fenced_seqno == 0)
+ pipelined = NULL;
reg->obj = obj;
+ list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ obj->fence_reg = reg - dev_priv->fence_regs;
+ obj->last_fenced_ring = pipelined;
+ reg->setup_seqno =
+ pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+ obj->last_fenced_seqno = reg->setup_seqno;
+
+update:
+ obj->tiling_changed = false;
switch (INTEL_INFO(dev)->gen) {
case 6:
- sandybridge_write_fence_reg(reg);
+ ret = sandybridge_write_fence_reg(obj, pipelined);
break;
case 5:
case 4:
- i965_write_fence_reg(reg);
+ ret = i965_write_fence_reg(obj, pipelined);
break;
case 3:
- i915_write_fence_reg(reg);
+ ret = i915_write_fence_reg(obj, pipelined);
break;
case 2:
- i830_write_fence_reg(reg);
+ ret = i830_write_fence_reg(obj, pipelined);
break;
}
- trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
- obj_priv->tiling_mode);
-
- return 0;
+ return ret;
}
/**
@@ -2521,154 +2669,125 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
* @obj: object to clear
*
* Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj_priv.
+ * data structures in dev_priv and obj.
*/
static void
-i915_gem_clear_fence_reg(struct drm_gem_object *obj)
+i915_gem_clear_fence_reg(struct drm_device *dev,
+ struct drm_i915_fence_reg *reg)
{
- struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- struct drm_i915_fence_reg *reg =
- &dev_priv->fence_regs[obj_priv->fence_reg];
- uint32_t fence_reg;
+ uint32_t fence_reg = reg - dev_priv->fence_regs;
switch (INTEL_INFO(dev)->gen) {
case 6:
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
- (obj_priv->fence_reg * 8), 0);
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
break;
case 5:
case 4:
- I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
+ I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
break;
case 3:
- if (obj_priv->fence_reg >= 8)
- fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
+ if (fence_reg >= 8)
+ fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
else
case 2:
- fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
+ fence_reg = FENCE_REG_830_0 + fence_reg * 4;
I915_WRITE(fence_reg, 0);
break;
}
- reg->obj = NULL;
- obj_priv->fence_reg = I915_FENCE_REG_NONE;
list_del_init(&reg->lru_list);
-}
-
-/**
- * i915_gem_object_put_fence_reg - waits on outstanding fenced access
- * to the buffer to finish, and then resets the fence register.
- * @obj: tiled object holding a fence register.
- * @bool: whether the wait upon the fence is interruptible
- *
- * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj_priv.
- */
-int
-i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
- bool interruptible)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- struct drm_i915_fence_reg *reg;
-
- if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
- return 0;
-
- /* If we've changed tiling, GTT-mappings of the object
- * need to re-fault to ensure that the correct fence register
- * setup is in place.
- */
- i915_gem_release_mmap(obj);
-
- /* On the i915, GPU access to tiled buffers is via a fence,
- * therefore we must wait for any outstanding access to complete
- * before clearing the fence.
- */
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
- if (reg->gpu) {
- int ret;
-
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
-
- ret = i915_gem_object_wait_rendering(obj, interruptible);
- if (ret)
- return ret;
-
- reg->gpu = false;
- }
-
- i915_gem_object_flush_gtt_write_domain(obj);
- i915_gem_clear_fence_reg(obj);
-
- return 0;
+ reg->obj = NULL;
+ reg->setup_seqno = 0;
}
/**
* Finds free space in the GTT aperture and binds the object there.
*/
static int
-i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ unsigned alignment,
+ bool map_and_fenceable)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_mm_node *free_space;
- gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
+ gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
+ u32 size, fence_size, fence_alignment, unfenced_alignment;
+ bool mappable, fenceable;
int ret;
- if (obj_priv->madv != I915_MADV_WILLNEED) {
+ if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to bind a purgeable object\n");
return -EINVAL;
}
+ fence_size = i915_gem_get_gtt_size(obj);
+ fence_alignment = i915_gem_get_gtt_alignment(obj);
+ unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
+
if (alignment == 0)
- alignment = i915_gem_get_gtt_alignment(obj);
- if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
+ alignment = map_and_fenceable ? fence_alignment :
+ unfenced_alignment;
+ if (map_and_fenceable && alignment & (fence_alignment - 1)) {
DRM_ERROR("Invalid object alignment requested %u\n", alignment);
return -EINVAL;
}
+ size = map_and_fenceable ? fence_size : obj->base.size;
+
/* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space.
*/
- if (obj->size > dev_priv->mm.gtt_total) {
+ if (obj->base.size >
+ (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n");
return -E2BIG;
}
search_free:
- free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
- obj->size, alignment, 0);
- if (free_space != NULL)
- obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
- alignment);
- if (obj_priv->gtt_space == NULL) {
+ if (map_and_fenceable)
+ free_space =
+ drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+ size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0);
+ else
+ free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
+ size, alignment, 0);
+
+ if (free_space != NULL) {
+ if (map_and_fenceable)
+ obj->gtt_space =
+ drm_mm_get_block_range_generic(free_space,
+ size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0);
+ else
+ obj->gtt_space =
+ drm_mm_get_block(free_space, size, alignment);
+ }
+ if (obj->gtt_space == NULL) {
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
- ret = i915_gem_evict_something(dev, obj->size, alignment);
+ ret = i915_gem_evict_something(dev, size, alignment,
+ map_and_fenceable);
if (ret)
return ret;
goto search_free;
}
- ret = i915_gem_object_get_pages(obj, gfpmask);
+ ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
if (ret) {
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
if (ret == -ENOMEM) {
- /* first try to clear up some space from the GTT */
- ret = i915_gem_evict_something(dev, obj->size,
- alignment);
+ /* first try to reclaim some memory by clearing the GTT */
+ ret = i915_gem_evict_everything(dev, false);
if (ret) {
/* now try to shrink everyone else */
if (gfpmask) {
@@ -2676,7 +2795,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
goto search_free;
}
- return ret;
+ return -ENOMEM;
}
goto search_free;
@@ -2685,122 +2804,116 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
return ret;
}
- /* Create an AGP memory structure pointing at our pages, and bind it
- * into the GTT.
- */
- obj_priv->agp_mem = drm_agp_bind_pages(dev,
- obj_priv->pages,
- obj->size >> PAGE_SHIFT,
- obj_priv->gtt_space->start,
- obj_priv->agp_type);
- if (obj_priv->agp_mem == NULL) {
- i915_gem_object_put_pages(obj);
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
-
- ret = i915_gem_evict_something(dev, obj->size, alignment);
- if (ret)
+ ret = i915_gem_gtt_bind_object(obj);
+ if (ret) {
+ i915_gem_object_put_pages_gtt(obj);
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
+
+ if (i915_gem_evict_everything(dev, false))
return ret;
goto search_free;
}
- /* keep track of bounds object by adding it to the inactive list */
- list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
- i915_gem_info_add_gtt(dev_priv, obj->size);
+ list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
+ list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
- BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+
+ obj->gtt_offset = obj->gtt_space->start;
+
+ fenceable =
+ obj->gtt_space->size == fence_size &&
+ (obj->gtt_space->start & (fence_alignment -1)) == 0;
- obj_priv->gtt_offset = obj_priv->gtt_space->start;
- trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+ mappable =
+ obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
+ obj->map_and_fenceable = mappable && fenceable;
+
+ trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
return 0;
}
void
-i915_gem_clflush_object(struct drm_gem_object *obj)
+i915_gem_clflush_object(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
- if (obj_priv->pages == NULL)
+ if (obj->pages == NULL)
return;
trace_i915_gem_object_clflush(obj);
- drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
+ drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
}
/** Flushes any GPU write domain for the object if it's dirty. */
static int
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- uint32_t old_write_domain;
+ struct drm_device *dev = obj->base.dev;
- if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+ if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
return 0;
/* Queue the GPU write cache flushing we need. */
- old_write_domain = obj->write_domain;
- i915_gem_flush_ring(dev, NULL,
- to_intel_bo(obj)->ring,
- 0, obj->write_domain);
- BUG_ON(obj->write_domain);
-
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
-
- return 0;
+ return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
}
/** Flushes the GTT write domain for the object if it's dirty. */
static void
-i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
{
uint32_t old_write_domain;
- if (obj->write_domain != I915_GEM_DOMAIN_GTT)
+ if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
return;
- /* No actual flushing is required for the GTT write domain. Writes
+ /* No actual flushing is required for the GTT write domain. Writes
* to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
+ *
+ * However, we do have to enforce the order so that all writes through
+ * the GTT land before any writes to the device, such as updates to
+ * the GATT itself.
*/
- old_write_domain = obj->write_domain;
- obj->write_domain = 0;
+ wmb();
+
+ i915_gem_release_mmap(obj);
+
+ old_write_domain = obj->base.write_domain;
+ obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
+ obj->base.read_domains,
old_write_domain);
}
/** Flushes the CPU write domain for the object if it's dirty. */
static void
-i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
uint32_t old_write_domain;
- if (obj->write_domain != I915_GEM_DOMAIN_CPU)
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- old_write_domain = obj->write_domain;
- obj->write_domain = 0;
+ intel_gtt_chipset_flush();
+ old_write_domain = obj->base.write_domain;
+ obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
+ obj->base.read_domains,
old_write_domain);
}
@@ -2811,37 +2924,39 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
* flushes to occur.
*/
int
-i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain, old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
- if (obj_priv->gtt_space == NULL)
+ if (obj->gtt_space == NULL)
return -EINVAL;
ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret != 0)
- return ret;
- ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
+ if (obj->pending_gpu_write || write) {
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
+ }
+
i915_gem_object_flush_cpu_write_domain(obj);
- old_write_domain = obj->write_domain;
- old_read_domains = obj->read_domains;
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
- obj->read_domains = I915_GEM_DOMAIN_GTT;
- obj->write_domain = I915_GEM_DOMAIN_GTT;
- obj_priv->dirty = 1;
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->dirty = 1;
}
trace_i915_gem_object_change_domain(obj,
@@ -2856,23 +2971,23 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
* wait, as in modesetting process we're not supposed to be interrupted.
*/
int
-i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
- bool pipelined)
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
- if (obj_priv->gtt_space == NULL)
+ if (obj->gtt_space == NULL)
return -EINVAL;
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
+
/* Currently, we are always called from an non-interruptible context. */
- if (!pipelined) {
+ if (pipelined != obj->ring) {
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
@@ -2880,12 +2995,12 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
i915_gem_object_flush_cpu_write_domain(obj);
- old_read_domains = obj->read_domains;
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ old_read_domains = obj->base.read_domains;
+ obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
- obj->write_domain);
+ obj->base.write_domain);
return 0;
}
@@ -2894,14 +3009,19 @@ int
i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
bool interruptible)
{
+ int ret;
+
if (!obj->active)
return 0;
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
- i915_gem_flush_ring(obj->base.dev, NULL, obj->ring,
- 0, obj->base.write_domain);
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+ ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
+ 0, obj->base.write_domain);
+ if (ret)
+ return ret;
+ }
- return i915_gem_object_wait_rendering(&obj->base, interruptible);
+ return i915_gem_object_wait_rendering(obj, interruptible);
}
/**
@@ -2911,14 +3031,15 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
* flushes to occur.
*/
static int
-i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
{
uint32_t old_write_domain, old_read_domains;
int ret;
ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret != 0)
+ if (ret)
return ret;
+
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
@@ -2930,27 +3051,27 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
- old_write_domain = obj->write_domain;
- old_read_domains = obj->read_domains;
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
/* Flush the CPU cache if it's still invalid. */
- if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj);
- obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
*/
if (write) {
- obj->read_domains = I915_GEM_DOMAIN_CPU;
- obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
trace_i915_gem_object_change_domain(obj,
@@ -2960,184 +3081,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
return 0;
}
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Mapped to GTT
- * 4. Read by GPU
- * 5. Unmapped from GTT
- * 6. Freed
- *
- * Let's take these a step at a time
- *
- * 1. Allocated
- * Pages allocated from the kernel may still have
- * cache contents, so we set them to (CPU, CPU) always.
- * 2. Written by CPU (using pwrite)
- * The pwrite function calls set_domain (CPU, CPU) and
- * this function does nothing (as nothing changes)
- * 3. Mapped by GTT
- * This function asserts that the object is not
- * currently in any GPU-based read or write domains
- * 4. Read by GPU
- * i915_gem_execbuffer calls set_domain (COMMAND, 0).
- * As write_domain is zero, this function adds in the
- * current read domains (CPU+COMMAND, 0).
- * flush_domains is set to CPU.
- * invalidate_domains is set to COMMAND
- * clflush is run to get data out of the CPU caches
- * then i915_dev_set_domain calls i915_gem_flush to
- * emit an MI_FLUSH and drm_agp_chipset_flush
- * 5. Unmapped from GTT
- * i915_gem_object_unbind calls set_domain (CPU, CPU)
- * flush_domains and invalidate_domains end up both zero
- * so no flushing/invalidating happens
- * 6. Freed
- * yay, done
- *
- * Case 2: The shared render buffer
- *
- * 1. Allocated
- * 2. Mapped to GTT
- * 3. Read/written by GPU
- * 4. set_domain to (CPU,CPU)
- * 5. Read/written by CPU
- * 6. Read/written by GPU
- *
- * 1. Allocated
- * Same as last example, (CPU, CPU)
- * 2. Mapped to GTT
- * Nothing changes (assertions find that it is not in the GPU)
- * 3. Read/written by GPU
- * execbuffer calls set_domain (RENDER, RENDER)
- * flush_domains gets CPU
- * invalidate_domains gets GPU
- * clflush (obj)
- * MI_FLUSH and drm_agp_chipset_flush
- * 4. set_domain (CPU, CPU)
- * flush_domains gets GPU
- * invalidate_domains gets CPU
- * wait_rendering (obj) to make sure all drawing is complete.
- * This will include an MI_FLUSH to get the data from GPU
- * to memory
- * clflush (obj) to invalidate the CPU cache
- * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- * 5. Read/written by CPU
- * cache lines are loaded and dirtied
- * 6. Read written by GPU
- * Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Read by GPU
- * 4. Updated (written) by CPU again
- * 5. Read by GPU
- *
- * 1. Allocated
- * (CPU, CPU)
- * 2. Written by CPU
- * (CPU, CPU)
- * 3. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- * 4. Updated (written) by CPU again
- * (CPU, CPU)
- * flush_domains = 0 (no previous write domain)
- * invalidate_domains = 0 (no new read domains)
- * 5. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- */
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
- struct intel_ring_buffer *ring)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- uint32_t invalidate_domains = 0;
- uint32_t flush_domains = 0;
- uint32_t old_read_domains;
-
- intel_mark_busy(dev, obj);
-
- /*
- * If the object isn't moving to a new write domain,
- * let the object stay in multiple read domains
- */
- if (obj->pending_write_domain == 0)
- obj->pending_read_domains |= obj->read_domains;
- else
- obj_priv->dirty = 1;
-
- /*
- * Flush the current write domain if
- * the new read domains don't match. Invalidate
- * any read domains which differ from the old
- * write domain
- */
- if (obj->write_domain &&
- (obj->write_domain != obj->pending_read_domains ||
- obj_priv->ring != ring)) {
- flush_domains |= obj->write_domain;
- invalidate_domains |=
- obj->pending_read_domains & ~obj->write_domain;
- }
- /*
- * Invalidate any read caches which may have
- * stale data. That is, any new read domains.
- */
- invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
- i915_gem_clflush_object(obj);
-
- old_read_domains = obj->read_domains;
-
- /* The actual obj->write_domain will be updated with
- * pending_write_domain after we emit the accumulated flush for all
- * of our domain changes in execbuffers (which clears objects'
- * write_domains). So if we have a current write domain that we
- * aren't changing, set pending_write_domain to that.
- */
- if (flush_domains == 0 && obj->pending_write_domain == 0)
- obj->pending_write_domain = obj->write_domain;
- obj->read_domains = obj->pending_read_domains;
-
- dev->invalidate_domains |= invalidate_domains;
- dev->flush_domains |= flush_domains;
- if (flush_domains & I915_GEM_GPU_DOMAINS)
- dev_priv->mm.flush_rings |= obj_priv->ring->id;
- if (invalidate_domains & I915_GEM_GPU_DOMAINS)
- dev_priv->mm.flush_rings |= ring->id;
-
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- obj->write_domain);
-}
-
/**
* Moves the object from a partially CPU read to a full one.
*
@@ -3145,30 +3088,28 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
* and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
*/
static void
-i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
+i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
- if (!obj_priv->page_cpu_valid)
+ if (!obj->page_cpu_valid)
return;
/* If we're partially in the CPU read domain, finish moving it in.
*/
- if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
+ if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
int i;
- for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
- if (obj_priv->page_cpu_valid[i])
+ for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
+ if (obj->page_cpu_valid[i])
continue;
- drm_clflush_pages(obj_priv->pages + i, 1);
+ drm_clflush_pages(obj->pages + i, 1);
}
}
/* Free the page_cpu_valid mappings which are now stale, whether
* or not we've got I915_GEM_DOMAIN_CPU.
*/
- kfree(obj_priv->page_cpu_valid);
- obj_priv->page_cpu_valid = NULL;
+ kfree(obj->page_cpu_valid);
+ obj->page_cpu_valid = NULL;
}
/**
@@ -3184,19 +3125,19 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
* flushes to occur.
*/
static int
-i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
uint64_t offset, uint64_t size)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_read_domains;
int i, ret;
- if (offset == 0 && size == obj->size)
+ if (offset == 0 && size == obj->base.size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret != 0)
+ if (ret)
return ret;
+
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
@@ -3204,457 +3145,45 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're already fully in the CPU read domain, we're done. */
- if (obj_priv->page_cpu_valid == NULL &&
- (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
+ if (obj->page_cpu_valid == NULL &&
+ (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
return 0;
/* Otherwise, create/clear the per-page CPU read domain flag if we're
* newly adding I915_GEM_DOMAIN_CPU
*/
- if (obj_priv->page_cpu_valid == NULL) {
- obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
- GFP_KERNEL);
- if (obj_priv->page_cpu_valid == NULL)
+ if (obj->page_cpu_valid == NULL) {
+ obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
+ GFP_KERNEL);
+ if (obj->page_cpu_valid == NULL)
return -ENOMEM;
- } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
- memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
+ } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+ memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
/* Flush the cache on any pages that are still invalid from the CPU's
* perspective.
*/
for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
i++) {
- if (obj_priv->page_cpu_valid[i])
+ if (obj->page_cpu_valid[i])
continue;
- drm_clflush_pages(obj_priv->pages + i, 1);
+ drm_clflush_pages(obj->pages + i, 1);
- obj_priv->page_cpu_valid[i] = 1;
+ obj->page_cpu_valid[i] = 1;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
- old_read_domains = obj->read_domains;
- obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ old_read_domains = obj->base.read_domains;
+ obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
- obj->write_domain);
-
- return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object2 *entry,
- struct drm_i915_gem_relocation_entry *reloc)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_gem_object *target_obj;
- uint32_t target_offset;
- int ret = -EINVAL;
-
- target_obj = drm_gem_object_lookup(dev, file_priv,
- reloc->target_handle);
- if (target_obj == NULL)
- return -ENOENT;
-
- target_offset = to_intel_bo(target_obj)->gtt_offset;
-
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc->offset,
- (int) reloc->target_handle,
- (int) reloc->read_domains,
- (int) reloc->write_domain,
- (int) target_offset,
- (int) reloc->presumed_offset,
- reloc->delta);
-#endif
-
- /* The target buffer should have appeared before us in the
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (target_offset == 0) {
- DRM_ERROR("No GTT space found for object %d\n",
- reloc->target_handle);
- goto err;
- }
-
- /* Validate that the target is in a valid r/w GPU domain */
- if (reloc->write_domain & (reloc->write_domain - 1)) {
- DRM_ERROR("reloc with multiple write domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->read_domains,
- reloc->write_domain);
- goto err;
- }
- if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
- reloc->read_domains & I915_GEM_DOMAIN_CPU) {
- DRM_ERROR("reloc with read/write CPU domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->read_domains,
- reloc->write_domain);
- goto err;
- }
- if (reloc->write_domain && target_obj->pending_write_domain &&
- reloc->write_domain != target_obj->pending_write_domain) {
- DRM_ERROR("Write domain conflict: "
- "obj %p target %d offset %d "
- "new %08x old %08x\n",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->write_domain,
- target_obj->pending_write_domain);
- goto err;
- }
-
- target_obj->pending_read_domains |= reloc->read_domains;
- target_obj->pending_write_domain |= reloc->write_domain;
-
- /* If the relocation already has the right value in it, no
- * more work needs to be done.
- */
- if (target_offset == reloc->presumed_offset)
- goto out;
-
- /* Check that the relocation address is valid... */
- if (reloc->offset > obj->base.size - 4) {
- DRM_ERROR("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset,
- (int) obj->base.size);
- goto err;
- }
- if (reloc->offset & 3) {
- DRM_ERROR("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset);
- goto err;
- }
-
- /* and points to somewhere within the target object. */
- if (reloc->delta >= target_obj->size) {
- DRM_ERROR("Relocation beyond target object bounds: "
- "obj %p target %d delta %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->delta,
- (int) target_obj->size);
- goto err;
- }
-
- reloc->delta += target_offset;
- if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
- uint32_t page_offset = reloc->offset & ~PAGE_MASK;
- char *vaddr;
-
- vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
- *(uint32_t *)(vaddr + page_offset) = reloc->delta;
- kunmap_atomic(vaddr);
- } else {
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t __iomem *reloc_entry;
- void __iomem *reloc_page;
-
- ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
- if (ret)
- goto err;
-
- /* Map the page containing the relocation we're going to perform. */
- reloc->offset += obj->gtt_offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- reloc->offset & PAGE_MASK);
- reloc_entry = (uint32_t __iomem *)
- (reloc_page + (reloc->offset & ~PAGE_MASK));
- iowrite32(reloc->delta, reloc_entry);
- io_mapping_unmap_atomic(reloc_page);
- }
-
- /* and update the user's relocation entry */
- reloc->presumed_offset = target_offset;
-
-out:
- ret = 0;
-err:
- drm_gem_object_unreference(target_obj);
- return ret;
-}
-
-static int
-i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object2 *entry)
-{
- struct drm_i915_gem_relocation_entry __user *user_relocs;
- int i, ret;
-
- user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_i915_gem_relocation_entry reloc;
-
- if (__copy_from_user_inatomic(&reloc,
- user_relocs+i,
- sizeof(reloc)))
- return -EFAULT;
-
- ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
- if (ret)
- return ret;
-
- if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
- &reloc.presumed_offset,
- sizeof(reloc.presumed_offset)))
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object2 *entry,
- struct drm_i915_gem_relocation_entry *relocs)
-{
- int i, ret;
-
- for (i = 0; i < entry->relocation_count; i++) {
- ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate(struct drm_device *dev,
- struct drm_file *file,
- struct drm_gem_object **object_list,
- struct drm_i915_gem_exec_object2 *exec_list,
- int count)
-{
- int i, ret;
-
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
- obj->base.pending_read_domains = 0;
- obj->base.pending_write_domain = 0;
- ret = i915_gem_execbuffer_relocate_object(obj, file,
- &exec_list[i]);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int
-i915_gem_execbuffer_reserve(struct drm_device *dev,
- struct drm_file *file,
- struct drm_gem_object **object_list,
- struct drm_i915_gem_exec_object2 *exec_list,
- int count)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret, i, retry;
-
- /* attempt to pin all of the buffers into the GTT */
- for (retry = 0; retry < 2; retry++) {
- ret = 0;
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
- struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
- bool need_fence =
- entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj->tiling_mode != I915_TILING_NONE;
-
- /* Check fence reg constraints and rebind if necessary */
- if (need_fence &&
- !i915_gem_object_fence_offset_ok(&obj->base,
- obj->tiling_mode)) {
- ret = i915_gem_object_unbind(&obj->base);
- if (ret)
- break;
- }
-
- ret = i915_gem_object_pin(&obj->base, entry->alignment);
- if (ret)
- break;
-
- /*
- * Pre-965 chips need a fence register set up in order
- * to properly handle blits to/from tiled surfaces.
- */
- if (need_fence) {
- ret = i915_gem_object_get_fence_reg(&obj->base, true);
- if (ret) {
- i915_gem_object_unpin(&obj->base);
- break;
- }
-
- dev_priv->fence_regs[obj->fence_reg].gpu = true;
- }
-
- entry->offset = obj->gtt_offset;
- }
-
- while (i--)
- i915_gem_object_unpin(object_list[i]);
-
- if (ret == 0)
- break;
-
- if (ret != -ENOSPC || retry)
- return ret;
-
- ret = i915_gem_evict_everything(dev);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
- struct drm_file *file,
- struct drm_gem_object **object_list,
- struct drm_i915_gem_exec_object2 *exec_list,
- int count)
-{
- struct drm_i915_gem_relocation_entry *reloc;
- int i, total, ret;
-
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
- obj->in_execbuffer = false;
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- total = 0;
- for (i = 0; i < count; i++)
- total += exec_list[i].relocation_count;
-
- reloc = drm_malloc_ab(total, sizeof(*reloc));
- if (reloc == NULL) {
- mutex_lock(&dev->struct_mutex);
- return -ENOMEM;
- }
-
- total = 0;
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_relocation_entry __user *user_relocs;
-
- user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
-
- if (copy_from_user(reloc+total, user_relocs,
- exec_list[i].relocation_count *
- sizeof(*reloc))) {
- ret = -EFAULT;
- mutex_lock(&dev->struct_mutex);
- goto err;
- }
-
- total += exec_list[i].relocation_count;
- }
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret) {
- mutex_lock(&dev->struct_mutex);
- goto err;
- }
-
- ret = i915_gem_execbuffer_reserve(dev, file,
- object_list, exec_list,
- count);
- if (ret)
- goto err;
-
- total = 0;
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
- obj->base.pending_read_domains = 0;
- obj->base.pending_write_domain = 0;
- ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
- &exec_list[i],
- reloc + total);
- if (ret)
- goto err;
-
- total += exec_list[i].relocation_count;
- }
-
- /* Leave the user relocations as are, this is the painfully slow path,
- * and we want to avoid the complication of dropping the lock whilst
- * having buffers reserved in the aperture and so causing spurious
- * ENOSPC for random operations.
- */
-
-err:
- drm_free_large(reloc);
- return ret;
-}
-
-static int
-i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
- struct drm_file *file,
- struct intel_ring_buffer *ring,
- struct drm_gem_object **objects,
- int count)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret, i;
-
- /* Zero the global flush/invalidate flags. These
- * will be modified as new domains are computed
- * for each object
- */
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
- dev_priv->mm.flush_rings = 0;
- for (i = 0; i < count; i++)
- i915_gem_object_set_to_gpu_domain(objects[i], ring);
-
- if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- dev->invalidate_domains,
- dev->flush_domains);
-#endif
- i915_gem_flush(dev, file,
- dev->invalidate_domains,
- dev->flush_domains,
- dev_priv->mm.flush_rings);
- }
-
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
- /* XXX replace with semaphores */
- if (obj->ring && ring != obj->ring) {
- ret = i915_gem_object_wait_rendering(&obj->base, true);
- if (ret)
- return ret;
- }
- }
+ obj->base.write_domain);
return 0;
}
@@ -3694,599 +3223,129 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
return 0;
ret = 0;
- if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+ if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
/* And wait for the seqno passing without holding any locks and
* causing extra latency for others. This is safe as the irq
* generation is designed to be run atomically and so is
* lockless.
*/
- ring->user_irq_get(dev, ring);
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- ring->user_irq_put(dev, ring);
-
- if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
- ret = -EIO;
- }
-
- if (ret == 0)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
-
- return ret;
-}
-
-static int
-i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
- uint64_t exec_offset)
-{
- uint32_t exec_start, exec_len;
-
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- exec_len = (uint32_t) exec->batch_len;
-
- if ((exec_start | exec_len) & 0x7)
- return -EINVAL;
-
- if (!exec_start)
- return -EINVAL;
-
- return 0;
-}
-
-static int
-validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
- int count)
-{
- int i;
-
- for (i = 0; i < count; i++) {
- char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
- int length; /* limited by fault_in_pages_readable() */
-
- /* First check for malicious input causing overflow */
- if (exec[i].relocation_count >
- INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
- return -EINVAL;
-
- length = exec[i].relocation_count *
- sizeof(struct drm_i915_gem_relocation_entry);
- if (!access_ok(VERIFY_READ, ptr, length))
- return -EFAULT;
-
- /* we may also need to update the presumed offsets */
- if (!access_ok(VERIFY_WRITE, ptr, length))
- return -EFAULT;
-
- if (fault_in_pages_readable(ptr, length))
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int
-i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file,
- struct drm_i915_gem_execbuffer2 *args,
- struct drm_i915_gem_exec_object2 *exec_list)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object **object_list = NULL;
- struct drm_gem_object *batch_obj;
- struct drm_i915_gem_object *obj_priv;
- struct drm_clip_rect *cliprects = NULL;
- struct drm_i915_gem_request *request = NULL;
- int ret, i, flips;
- uint64_t exec_offset;
-
- struct intel_ring_buffer *ring = NULL;
-
- ret = i915_gem_check_is_wedged(dev);
- if (ret)
- return ret;
-
- ret = validate_exec_list(exec_list, args->buffer_count);
- if (ret)
- return ret;
-
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
- switch (args->flags & I915_EXEC_RING_MASK) {
- case I915_EXEC_DEFAULT:
- case I915_EXEC_RENDER:
- ring = &dev_priv->render_ring;
- break;
- case I915_EXEC_BSD:
- if (!HAS_BSD(dev)) {
- DRM_ERROR("execbuf with invalid ring (BSD)\n");
- return -EINVAL;
- }
- ring = &dev_priv->bsd_ring;
- break;
- case I915_EXEC_BLT:
- if (!HAS_BLT(dev)) {
- DRM_ERROR("execbuf with invalid ring (BLT)\n");
- return -EINVAL;
- }
- ring = &dev_priv->blt_ring;
- break;
- default:
- DRM_ERROR("execbuf with unknown ring: %d\n",
- (int)(args->flags & I915_EXEC_RING_MASK));
- return -EINVAL;
- }
-
- if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
- return -EINVAL;
- }
- object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
- if (object_list == NULL) {
- DRM_ERROR("Failed to allocate object list for %d buffers\n",
- args->buffer_count);
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- if (args->num_cliprects != 0) {
- cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
- GFP_KERNEL);
- if (cliprects == NULL) {
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- ret = copy_from_user(cliprects,
- (struct drm_clip_rect __user *)
- (uintptr_t) args->cliprects_ptr,
- sizeof(*cliprects) * args->num_cliprects);
- if (ret != 0) {
- DRM_ERROR("copy %d cliprects failed: %d\n",
- args->num_cliprects, ret);
- ret = -EFAULT;
- goto pre_mutex_err;
- }
- }
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL) {
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto pre_mutex_err;
-
- if (dev_priv->mm.suspended) {
- mutex_unlock(&dev->struct_mutex);
- ret = -EBUSY;
- goto pre_mutex_err;
- }
-
- /* Look up object handles */
- for (i = 0; i < args->buffer_count; i++) {
- object_list[i] = drm_gem_object_lookup(dev, file,
- exec_list[i].handle);
- if (object_list[i] == NULL) {
- DRM_ERROR("Invalid object handle %d at index %d\n",
- exec_list[i].handle, i);
- /* prevent error path from reading uninitialized data */
- args->buffer_count = i + 1;
- ret = -ENOENT;
- goto err;
- }
-
- obj_priv = to_intel_bo(object_list[i]);
- if (obj_priv->in_execbuffer) {
- DRM_ERROR("Object %p appears more than once in object list\n",
- object_list[i]);
- /* prevent error path from reading uninitialized data */
- args->buffer_count = i + 1;
- ret = -EINVAL;
- goto err;
- }
- obj_priv->in_execbuffer = true;
- }
-
- /* Move the objects en-masse into the GTT, evicting if necessary. */
- ret = i915_gem_execbuffer_reserve(dev, file,
- object_list, exec_list,
- args->buffer_count);
- if (ret)
- goto err;
-
- /* The objects are in their final locations, apply the relocations. */
- ret = i915_gem_execbuffer_relocate(dev, file,
- object_list, exec_list,
- args->buffer_count);
- if (ret) {
- if (ret == -EFAULT) {
- ret = i915_gem_execbuffer_relocate_slow(dev, file,
- object_list,
- exec_list,
- args->buffer_count);
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- }
- if (ret)
- goto err;
- }
-
- /* Set the pending read domains for the batch buffer to COMMAND */
- batch_obj = object_list[args->buffer_count-1];
- if (batch_obj->pending_write_domain) {
- DRM_ERROR("Attempting to use self-modifying batch buffer\n");
- ret = -EINVAL;
- goto err;
- }
- batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
-
- /* Sanity check the batch buffer */
- exec_offset = to_intel_bo(batch_obj)->gtt_offset;
- ret = i915_gem_check_execbuffer(args, exec_offset);
- if (ret != 0) {
- DRM_ERROR("execbuf with invalid offset/length\n");
- goto err;
- }
-
- ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
- object_list, args->buffer_count);
- if (ret)
- goto err;
-
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
- uint32_t old_write_domain = obj->write_domain;
- obj->write_domain = obj->pending_write_domain;
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
-
-#if WATCH_COHERENCY
- for (i = 0; i < args->buffer_count; i++) {
- i915_gem_object_check_coherency(object_list[i],
- exec_list[i].handle);
- }
-#endif
-
-#if WATCH_EXEC
- i915_gem_dump_object(batch_obj,
- args->batch_len,
- __func__,
- ~0);
-#endif
-
- /* Check for any pending flips. As we only maintain a flip queue depth
- * of 1, we can simply insert a WAIT for the next display flip prior
- * to executing the batch and avoid stalling the CPU.
- */
- flips = 0;
- for (i = 0; i < args->buffer_count; i++) {
- if (object_list[i]->write_domain)
- flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
- }
- if (flips) {
- int plane, flip_mask;
-
- for (plane = 0; flips >> plane; plane++) {
- if (((flips >> plane) & 1) == 0)
- continue;
-
- if (plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring,
- MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
- }
- }
-
- /* Exec the batchbuffer */
- ret = ring->dispatch_gem_execbuffer(dev, ring, args,
- cliprects, exec_offset);
- if (ret) {
- DRM_ERROR("dispatch failed %d\n", ret);
- goto err;
- }
-
- /*
- * Ensure that the commands in the batch buffer are
- * finished before the interrupt fires
- */
- i915_retire_commands(dev, ring);
-
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
-
- i915_gem_object_move_to_active(obj, ring);
- if (obj->write_domain)
- list_move_tail(&to_intel_bo(obj)->gpu_write_list,
- &ring->gpu_write_list);
- }
-
- i915_add_request(dev, file, request, ring);
- request = NULL;
-
-err:
- for (i = 0; i < args->buffer_count; i++) {
- if (object_list[i]) {
- obj_priv = to_intel_bo(object_list[i]);
- obj_priv->in_execbuffer = false;
- }
- drm_gem_object_unreference(object_list[i]);
- }
-
- mutex_unlock(&dev->struct_mutex);
-
-pre_mutex_err:
- drm_free_large(object_list);
- kfree(cliprects);
- kfree(request);
-
- return ret;
-}
-
-/*
- * Legacy execbuffer just creates an exec2 list from the original exec object
- * list array and passes it to the real function.
- */
-int
-i915_gem_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_execbuffer *args = data;
- struct drm_i915_gem_execbuffer2 exec2;
- struct drm_i915_gem_exec_object *exec_list = NULL;
- struct drm_i915_gem_exec_object2 *exec2_list = NULL;
- int ret, i;
-
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
- if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
- return -EINVAL;
- }
-
- /* Copy in the exec list from userland */
- exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
- exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
- if (exec_list == NULL || exec2_list == NULL) {
- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
- return -ENOMEM;
- }
- ret = copy_from_user(exec_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec_list) * args->buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
- return -EFAULT;
- }
-
- for (i = 0; i < args->buffer_count; i++) {
- exec2_list[i].handle = exec_list[i].handle;
- exec2_list[i].relocation_count = exec_list[i].relocation_count;
- exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
- exec2_list[i].alignment = exec_list[i].alignment;
- exec2_list[i].offset = exec_list[i].offset;
- if (INTEL_INFO(dev)->gen < 4)
- exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
- else
- exec2_list[i].flags = 0;
- }
+ if (ring->irq_get(ring)) {
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
+ ring->irq_put(ring);
- exec2.buffers_ptr = args->buffers_ptr;
- exec2.buffer_count = args->buffer_count;
- exec2.batch_start_offset = args->batch_start_offset;
- exec2.batch_len = args->batch_len;
- exec2.DR1 = args->DR1;
- exec2.DR4 = args->DR4;
- exec2.num_cliprects = args->num_cliprects;
- exec2.cliprects_ptr = args->cliprects_ptr;
- exec2.flags = I915_EXEC_RENDER;
-
- ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
- if (!ret) {
- /* Copy the new buffer offsets back to the user's exec list. */
- for (i = 0; i < args->buffer_count; i++)
- exec_list[i].offset = exec2_list[i].offset;
- /* ... and back out to userspace */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- exec_list,
- sizeof(*exec_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
+ if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+ ret = -EIO;
}
}
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
- return ret;
-}
-
-int
-i915_gem_execbuffer2(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_execbuffer2 *args = data;
- struct drm_i915_gem_exec_object2 *exec2_list = NULL;
- int ret;
-
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
- if (args->buffer_count < 1) {
- DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
- return -EINVAL;
- }
-
- exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
- if (exec2_list == NULL) {
- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
- return -ENOMEM;
- }
- ret = copy_from_user(exec2_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- drm_free_large(exec2_list);
- return -EFAULT;
- }
-
- ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
- if (!ret) {
- /* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- exec2_list,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
- }
- }
+ if (ret == 0)
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
- drm_free_large(exec2_list);
return ret;
}
int
-i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
- BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+ BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
WARN_ON(i915_verify_lists(dev));
- if (obj_priv->gtt_space != NULL) {
- if (alignment == 0)
- alignment = i915_gem_get_gtt_alignment(obj);
- if (obj_priv->gtt_offset & (alignment - 1)) {
- WARN(obj_priv->pin_count,
- "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
- obj_priv->gtt_offset, alignment);
+ if (obj->gtt_space != NULL) {
+ if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+ (map_and_fenceable && !obj->map_and_fenceable)) {
+ WARN(obj->pin_count,
+ "bo is already pinned with incorrect alignment:"
+ " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+ " obj->map_and_fenceable=%d\n",
+ obj->gtt_offset, alignment,
+ map_and_fenceable,
+ obj->map_and_fenceable);
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
}
}
- if (obj_priv->gtt_space == NULL) {
- ret = i915_gem_object_bind_to_gtt(obj, alignment);
+ if (obj->gtt_space == NULL) {
+ ret = i915_gem_object_bind_to_gtt(obj, alignment,
+ map_and_fenceable);
if (ret)
return ret;
}
- obj_priv->pin_count++;
-
- /* If the object is not active and not pending a flush,
- * remove it from the inactive list
- */
- if (obj_priv->pin_count == 1) {
- i915_gem_info_add_pin(dev_priv, obj->size);
- if (!obj_priv->active)
- list_move_tail(&obj_priv->mm_list,
+ if (obj->pin_count++ == 0) {
+ if (!obj->active)
+ list_move_tail(&obj->mm_list,
&dev_priv->mm.pinned_list);
}
+ obj->pin_mappable |= map_and_fenceable;
WARN_ON(i915_verify_lists(dev));
return 0;
}
void
-i915_gem_object_unpin(struct drm_gem_object *obj)
+i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
WARN_ON(i915_verify_lists(dev));
- obj_priv->pin_count--;
- BUG_ON(obj_priv->pin_count < 0);
- BUG_ON(obj_priv->gtt_space == NULL);
+ BUG_ON(obj->pin_count == 0);
+ BUG_ON(obj->gtt_space == NULL);
- /* If the object is no longer pinned, and is
- * neither active nor being flushed, then stick it on
- * the inactive list
- */
- if (obj_priv->pin_count == 0) {
- if (!obj_priv->active)
- list_move_tail(&obj_priv->mm_list,
+ if (--obj->pin_count == 0) {
+ if (!obj->active)
+ list_move_tail(&obj->mm_list,
&dev_priv->mm.inactive_list);
- i915_gem_info_remove_pin(dev_priv, obj->size);
+ obj->pin_mappable = false;
}
WARN_ON(i915_verify_lists(dev));
}
int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pin *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
- if (obj_priv->madv != I915_MADV_WILLNEED) {
+ if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n");
ret = -EINVAL;
goto out;
}
- if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
+ if (obj->pin_filp != NULL && obj->pin_filp != file) {
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
args->handle);
ret = -EINVAL;
goto out;
}
- obj_priv->user_pin_count++;
- obj_priv->pin_filp = file_priv;
- if (obj_priv->user_pin_count == 1) {
- ret = i915_gem_object_pin(obj, args->alignment);
+ obj->user_pin_count++;
+ obj->pin_filp = file;
+ if (obj->user_pin_count == 1) {
+ ret = i915_gem_object_pin(obj, args->alignment, true);
if (ret)
goto out;
}
@@ -4295,9 +3354,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
* as the X server doesn't manage domains yet
*/
i915_gem_object_flush_cpu_write_domain(obj);
- args->offset = obj_priv->gtt_offset;
+ args->offset = obj->gtt_offset;
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4305,38 +3364,36 @@ unlock:
int
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pin *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
- if (obj_priv->pin_filp != file_priv) {
+ if (obj->pin_filp != file) {
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
ret = -EINVAL;
goto out;
}
- obj_priv->user_pin_count--;
- if (obj_priv->user_pin_count == 0) {
- obj_priv->pin_filp = NULL;
+ obj->user_pin_count--;
+ if (obj->user_pin_count == 0) {
+ obj->pin_filp = NULL;
i915_gem_object_unpin(obj);
}
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4344,48 +3401,50 @@ unlock:
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_busy *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
/* Count all active objects as busy, even if they are currently not used
* by the gpu. Users of this interface expect objects to eventually
* become non-busy without any further actions, therefore emit any
* necessary flushes here.
*/
- args->busy = obj_priv->active;
+ args->busy = obj->active;
if (args->busy) {
/* Unconditionally flush objects, even when the gpu still uses this
* object. Userspace calling this function indicates that it wants to
* use this buffer rather sooner than later, so issuing the required
* flush earlier is beneficial.
*/
- if (obj->write_domain & I915_GEM_GPU_DOMAINS) {
- i915_gem_flush_ring(dev, file_priv,
- obj_priv->ring,
- 0, obj->write_domain);
- } else if (obj_priv->ring->outstanding_lazy_request) {
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+ ret = i915_gem_flush_ring(dev, obj->ring,
+ 0, obj->base.write_domain);
+ } else if (obj->ring->outstanding_lazy_request ==
+ obj->last_rendering_seqno) {
+ struct drm_i915_gem_request *request;
+
/* This ring is not being cleared by active usage,
* so emit a request to do so.
*/
- u32 seqno = i915_add_request(dev,
- NULL, NULL,
- obj_priv->ring);
- if (seqno == 0)
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request)
+ ret = i915_add_request(dev,
+ NULL, request,
+ obj->ring);
+ else
ret = -ENOMEM;
}
@@ -4394,12 +3453,12 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* are actually unmasked, and our working set ends up being
* larger than required.
*/
- i915_gem_retire_requests_ring(dev, obj_priv->ring);
+ i915_gem_retire_requests_ring(dev, obj->ring);
- args->busy = obj_priv->active;
+ args->busy = obj->active;
}
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4417,8 +3476,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_madvise *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
switch (args->madv) {
@@ -4433,37 +3491,36 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
- if (obj_priv->pin_count) {
+ if (obj->pin_count) {
ret = -EINVAL;
goto out;
}
- if (obj_priv->madv != __I915_MADV_PURGED)
- obj_priv->madv = args->madv;
+ if (obj->madv != __I915_MADV_PURGED)
+ obj->madv = args->madv;
/* if the object is no longer bound, discard its backing storage */
- if (i915_gem_object_is_purgeable(obj_priv) &&
- obj_priv->gtt_space == NULL)
+ if (i915_gem_object_is_purgeable(obj) &&
+ obj->gtt_space == NULL)
i915_gem_object_truncate(obj);
- args->retained = obj_priv->madv != __I915_MADV_PURGED;
+ args->retained = obj->madv != __I915_MADV_PURGED;
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
-struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
- size_t size)
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ size_t size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
@@ -4486,11 +3543,15 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
obj->base.driver_private = NULL;
obj->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj->mm_list);
+ INIT_LIST_HEAD(&obj->gtt_list);
INIT_LIST_HEAD(&obj->ring_list);
+ INIT_LIST_HEAD(&obj->exec_list);
INIT_LIST_HEAD(&obj->gpu_write_list);
obj->madv = I915_MADV_WILLNEED;
+ /* Avoid an unnecessary call to unbind on the first bind. */
+ obj->map_and_fenceable = true;
- return &obj->base;
+ return obj;
}
int i915_gem_init_object(struct drm_gem_object *obj)
@@ -4500,42 +3561,41 @@ int i915_gem_init_object(struct drm_gem_object *obj)
return 0;
}
-static void i915_gem_free_object_tail(struct drm_gem_object *obj)
+static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
ret = i915_gem_object_unbind(obj);
if (ret == -ERESTARTSYS) {
- list_move(&obj_priv->mm_list,
+ list_move(&obj->mm_list,
&dev_priv->mm.deferred_free_list);
return;
}
- if (obj_priv->mmap_offset)
+ if (obj->base.map_list.map)
i915_gem_free_mmap_offset(obj);
- drm_gem_object_release(obj);
- i915_gem_info_remove_obj(dev_priv, obj->size);
+ drm_gem_object_release(&obj->base);
+ i915_gem_info_remove_obj(dev_priv, obj->base.size);
- kfree(obj_priv->page_cpu_valid);
- kfree(obj_priv->bit_17);
- kfree(obj_priv);
+ kfree(obj->page_cpu_valid);
+ kfree(obj->bit_17);
+ kfree(obj);
}
-void i915_gem_free_object(struct drm_gem_object *obj)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+ struct drm_device *dev = obj->base.dev;
trace_i915_gem_object_destroy(obj);
- while (obj_priv->pin_count > 0)
+ while (obj->pin_count > 0)
i915_gem_object_unpin(obj);
- if (obj_priv->phys_obj)
+ if (obj->phys_obj)
i915_gem_detach_phys_object(dev, obj);
i915_gem_free_object_tail(obj);
@@ -4562,13 +3622,15 @@ i915_gem_idle(struct drm_device *dev)
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_gem_evict_inactive(dev);
+ ret = i915_gem_evict_inactive(dev, false);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
}
+ i915_gem_reset_fences(dev);
+
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
* And not confound mm.suspended!
@@ -4587,82 +3649,15 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
-/*
- * 965+ support PIPE_CONTROL commands, which provide finer grained control
- * over cache flushing.
- */
-static int
-i915_gem_init_pipe_control(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-
- obj = i915_gem_alloc_object(dev, 4096);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate seqno page\n");
- ret = -ENOMEM;
- goto err;
- }
- obj_priv = to_intel_bo(obj);
- obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
-
- ret = i915_gem_object_pin(obj, 4096);
- if (ret)
- goto err_unref;
-
- dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
- dev_priv->seqno_page = kmap(obj_priv->pages[0]);
- if (dev_priv->seqno_page == NULL)
- goto err_unpin;
-
- dev_priv->seqno_obj = obj;
- memset(dev_priv->seqno_page, 0, PAGE_SIZE);
-
- return 0;
-
-err_unpin:
- i915_gem_object_unpin(obj);
-err_unref:
- drm_gem_object_unreference(obj);
-err:
- return ret;
-}
-
-
-static void
-i915_gem_cleanup_pipe_control(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- obj = dev_priv->seqno_obj;
- obj_priv = to_intel_bo(obj);
- kunmap(obj_priv->pages[0]);
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- dev_priv->seqno_obj = NULL;
-
- dev_priv->seqno_page = NULL;
-}
-
int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- if (HAS_PIPE_CONTROL(dev)) {
- ret = i915_gem_init_pipe_control(dev);
- if (ret)
- return ret;
- }
-
ret = intel_init_render_ring_buffer(dev);
if (ret)
- goto cleanup_pipe_control;
+ return ret;
if (HAS_BSD(dev)) {
ret = intel_init_bsd_ring_buffer(dev);
@@ -4681,12 +3676,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
return 0;
cleanup_bsd_ring:
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
cleanup_render_ring:
- intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-cleanup_pipe_control:
- if (HAS_PIPE_CONTROL(dev))
- i915_gem_cleanup_pipe_control(dev);
+ intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
return ret;
}
@@ -4694,12 +3686,10 @@ void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
- intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
- intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
- if (HAS_PIPE_CONTROL(dev))
- i915_gem_cleanup_pipe_control(dev);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ intel_cleanup_ring_buffer(&dev_priv->ring[i]);
}
int
@@ -4707,7 +3697,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret, i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
@@ -4727,14 +3717,12 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
}
BUG_ON(!list_empty(&dev_priv->mm.active_list));
- BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
- BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
- BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
- BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
- BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
+ BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
+ }
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
@@ -4796,17 +3784,14 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
- init_ring_lists(&dev_priv->render_ring);
- init_ring_lists(&dev_priv->bsd_ring);
- init_ring_lists(&dev_priv->blt_ring);
+ INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ init_ring_lists(&dev_priv->ring[i]);
for (i = 0; i < 16; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
init_completion(&dev_priv->error_completion);
- spin_lock(&shrink_list_lock);
- list_add(&dev_priv->mm.shrink_list, &shrink_list);
- spin_unlock(&shrink_list_lock);
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
@@ -4818,6 +3803,8 @@ i915_gem_load(struct drm_device *dev)
}
}
+ dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
+
/* Old X drivers will take 0-2 for front, back, depth buffers */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
dev_priv->fence_reg_start = 3;
@@ -4849,6 +3836,10 @@ i915_gem_load(struct drm_device *dev)
}
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
+
+ dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
+ dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
+ register_shrinker(&dev_priv->mm.inactive_shrinker);
}
/*
@@ -4918,47 +3909,47 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
}
void i915_gem_detach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj)
+ struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv;
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ char *vaddr;
int i;
- int ret;
int page_count;
- obj_priv = to_intel_bo(obj);
- if (!obj_priv->phys_obj)
+ if (!obj->phys_obj)
return;
+ vaddr = obj->phys_obj->handle->vaddr;
- ret = i915_gem_object_get_pages(obj, 0);
- if (ret)
- goto out;
-
- page_count = obj->size / PAGE_SIZE;
-
+ page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *dst = kmap_atomic(obj_priv->pages[i]);
- char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
-
- memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(dst);
+ struct page *page = read_cache_page_gfp(mapping, i,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (!IS_ERR(page)) {
+ char *dst = kmap_atomic(page);
+ memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
+ kunmap_atomic(dst);
+
+ drm_clflush_pages(&page, 1);
+
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ page_cache_release(page);
+ }
}
- drm_clflush_pages(obj_priv->pages, page_count);
- drm_agp_chipset_flush(dev);
+ intel_gtt_chipset_flush();
- i915_gem_object_put_pages(obj);
-out:
- obj_priv->phys_obj->cur_obj = NULL;
- obj_priv->phys_obj = NULL;
+ obj->phys_obj->cur_obj = NULL;
+ obj->phys_obj = NULL;
}
int
i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj,
+ struct drm_i915_gem_object *obj,
int id,
int align)
{
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
int ret = 0;
int page_count;
int i;
@@ -4966,10 +3957,8 @@ i915_gem_attach_phys_object(struct drm_device *dev,
if (id > I915_MAX_PHYS_OBJECT)
return -EINVAL;
- obj_priv = to_intel_bo(obj);
-
- if (obj_priv->phys_obj) {
- if (obj_priv->phys_obj->id == id)
+ if (obj->phys_obj) {
+ if (obj->phys_obj->id == id)
return 0;
i915_gem_detach_phys_object(dev, obj);
}
@@ -4977,51 +3966,50 @@ i915_gem_attach_phys_object(struct drm_device *dev,
/* create a new object */
if (!dev_priv->mm.phys_objs[id - 1]) {
ret = i915_gem_init_phys_object(dev, id,
- obj->size, align);
+ obj->base.size, align);
if (ret) {
- DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
- goto out;
+ DRM_ERROR("failed to init phys object %d size: %zu\n",
+ id, obj->base.size);
+ return ret;
}
}
/* bind to the object */
- obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
- obj_priv->phys_obj->cur_obj = obj;
-
- ret = i915_gem_object_get_pages(obj, 0);
- if (ret) {
- DRM_ERROR("failed to get page list\n");
- goto out;
- }
+ obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
+ obj->phys_obj->cur_obj = obj;
- page_count = obj->size / PAGE_SIZE;
+ page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *src = kmap_atomic(obj_priv->pages[i]);
- char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+ struct page *page;
+ char *dst, *src;
+ page = read_cache_page_gfp(mapping, i,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ src = kmap_atomic(page);
+ dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(src);
- }
- i915_gem_object_put_pages(obj);
+ mark_page_accessed(page);
+ page_cache_release(page);
+ }
return 0;
-out:
- return ret;
}
static int
-i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_phys_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
+ void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
- DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
-
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
@@ -5036,7 +4024,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
return -EFAULT;
}
- drm_agp_chipset_flush(dev);
+ intel_gtt_chipset_flush();
return 0;
}
@@ -5074,144 +4062,68 @@ i915_gpu_is_active(struct drm_device *dev)
}
static int
-i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
-{
- drm_i915_private_t *dev_priv, *next_dev;
- struct drm_i915_gem_object *obj_priv, *next_obj;
- int cnt = 0;
- int would_deadlock = 1;
+i915_gem_inactive_shrink(struct shrinker *shrinker,
+ int nr_to_scan,
+ gfp_t gfp_mask)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker,
+ struct drm_i915_private,
+ mm.inactive_shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj, *next;
+ int cnt;
+
+ if (!mutex_trylock(&dev->struct_mutex))
+ return 0;
/* "fast-path" to count number of available objects */
if (nr_to_scan == 0) {
- spin_lock(&shrink_list_lock);
- list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (mutex_trylock(&dev->struct_mutex)) {
- list_for_each_entry(obj_priv,
- &dev_priv->mm.inactive_list,
- mm_list)
- cnt++;
- mutex_unlock(&dev->struct_mutex);
- }
- }
- spin_unlock(&shrink_list_lock);
-
- return (cnt / 100) * sysctl_vfs_cache_pressure;
+ cnt = 0;
+ list_for_each_entry(obj,
+ &dev_priv->mm.inactive_list,
+ mm_list)
+ cnt++;
+ mutex_unlock(&dev->struct_mutex);
+ return cnt / 100 * sysctl_vfs_cache_pressure;
}
- spin_lock(&shrink_list_lock);
-
rescan:
/* first scan for clean buffers */
- list_for_each_entry_safe(dev_priv, next_dev,
- &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (! mutex_trylock(&dev->struct_mutex))
- continue;
-
- spin_unlock(&shrink_list_lock);
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev);
- list_for_each_entry_safe(obj_priv, next_obj,
- &dev_priv->mm.inactive_list,
- mm_list) {
- if (i915_gem_object_is_purgeable(obj_priv)) {
- i915_gem_object_unbind(&obj_priv->base);
- if (--nr_to_scan <= 0)
- break;
- }
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list,
+ mm_list) {
+ if (i915_gem_object_is_purgeable(obj)) {
+ if (i915_gem_object_unbind(obj) == 0 &&
+ --nr_to_scan == 0)
+ break;
}
-
- spin_lock(&shrink_list_lock);
- mutex_unlock(&dev->struct_mutex);
-
- would_deadlock = 0;
-
- if (nr_to_scan <= 0)
- break;
}
/* second pass, evict/count anything still on the inactive list */
- list_for_each_entry_safe(dev_priv, next_dev,
- &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (! mutex_trylock(&dev->struct_mutex))
- continue;
-
- spin_unlock(&shrink_list_lock);
-
- list_for_each_entry_safe(obj_priv, next_obj,
- &dev_priv->mm.inactive_list,
- mm_list) {
- if (nr_to_scan > 0) {
- i915_gem_object_unbind(&obj_priv->base);
- nr_to_scan--;
- } else
- cnt++;
- }
-
- spin_lock(&shrink_list_lock);
- mutex_unlock(&dev->struct_mutex);
-
- would_deadlock = 0;
+ cnt = 0;
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list,
+ mm_list) {
+ if (nr_to_scan &&
+ i915_gem_object_unbind(obj) == 0)
+ nr_to_scan--;
+ else
+ cnt++;
}
- if (nr_to_scan) {
- int active = 0;
-
+ if (nr_to_scan && i915_gpu_is_active(dev)) {
/*
* We are desperate for pages, so as a last resort, wait
* for the GPU to finish and discard whatever we can.
* This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively.
*/
- list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (!mutex_trylock(&dev->struct_mutex))
- continue;
-
- spin_unlock(&shrink_list_lock);
-
- if (i915_gpu_is_active(dev)) {
- i915_gpu_idle(dev);
- active++;
- }
-
- spin_lock(&shrink_list_lock);
- mutex_unlock(&dev->struct_mutex);
- }
-
- if (active)
+ if (i915_gpu_idle(dev) == 0)
goto rescan;
}
-
- spin_unlock(&shrink_list_lock);
-
- if (would_deadlock)
- return -1;
- else if (cnt > 0)
- return (cnt / 100) * sysctl_vfs_cache_pressure;
- else
- return 0;
-}
-
-static struct shrinker shrinker = {
- .shrink = i915_gem_shrink,
- .seeks = DEFAULT_SEEKS,
-};
-
-__init void
-i915_gem_shrinker_init(void)
-{
- register_shrinker(&shrinker);
-}
-
-__exit void
-i915_gem_shrinker_exit(void)
-{
- unregister_shrinker(&shrinker);
+ mutex_unlock(&dev->struct_mutex);
+ return cnt / 100 * sysctl_vfs_cache_pressure;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 48644b840a8d..29d014c48ca2 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
}
void
-i915_gem_dump_object(struct drm_gem_object *obj, int len,
+i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page;
- DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
+ DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
int page_len, chunk, chunk_len;
@@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
chunk_len = page_len - chunk;
if (chunk_len > 128)
chunk_len = 128;
- i915_gem_dump_page(obj_priv->pages[page],
+ i915_gem_dump_page(obj->pages[page],
chunk, chunk + chunk_len,
- obj_priv->gtt_offset +
+ obj->gtt_offset +
page * PAGE_SIZE,
mark);
}
@@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
#if WATCH_COHERENCY
void
-i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
+i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_device *dev = obj->base.dev;
int page;
uint32_t *gtt_mapping;
uint32_t *backing_map = NULL;
int bad_count = 0;
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
- __func__, obj, obj_priv->gtt_offset, handle,
+ __func__, obj, obj->gtt_offset, handle,
obj->size / 1024);
- gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
- obj->size);
+ gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
if (gtt_mapping == NULL) {
DRM_ERROR("failed to map GTT space\n");
return;
@@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
- backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
+ backing_map = kmap_atomic(obj->pages[page], KM_USER0);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
@@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
if (cpuval != gttval) {
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
"0x%08x vs 0x%08x\n",
- (int)(obj_priv->gtt_offset +
+ (int)(obj->gtt_offset +
page * PAGE_SIZE + i * 4),
cpuval, gttval);
if (bad_count++ >= 8) {
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index d8ae7d1d0cc6..3d39005540aa 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -32,28 +32,36 @@
#include "i915_drm.h"
static bool
-mark_free(struct drm_i915_gem_object *obj_priv,
- struct list_head *unwind)
+mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
- list_add(&obj_priv->evict_list, unwind);
- drm_gem_object_reference(&obj_priv->base);
- return drm_mm_scan_add_block(obj_priv->gtt_space);
+ list_add(&obj->exec_list, unwind);
+ drm_gem_object_reference(&obj->base);
+ return drm_mm_scan_add_block(obj->gtt_space);
}
int
-i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
+i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment, bool mappable)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret = 0;
i915_gem_retire_requests(dev);
/* Re-check for free space after retiring requests */
- if (drm_mm_search_free(&dev_priv->mm.gtt_space,
- min_size, alignment, 0))
- return 0;
+ if (mappable) {
+ if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0))
+ return 0;
+ } else {
+ if (drm_mm_search_free(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0))
+ return 0;
+ }
/*
* The goal is to evict objects and amalgamate space in LRU order.
@@ -79,45 +87,56 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
*/
INIT_LIST_HEAD(&unwind_list);
- drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+ if (mappable)
+ drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
+ alignment, 0,
+ dev_priv->mm.gtt_mappable_end);
+ else
+ drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
/* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
- if (mark_free(obj_priv, &unwind_list))
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
+ if (mark_free(obj, &unwind_list))
goto found;
}
/* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
- if (obj_priv->base.write_domain || obj_priv->pin_count)
+ if (obj->base.write_domain || obj->pin_count)
continue;
- if (mark_free(obj_priv, &unwind_list))
+ if (mark_free(obj, &unwind_list))
goto found;
}
/* Finally add anything with a pending flush (in order of retirement) */
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
- if (obj_priv->pin_count)
+ list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
+ if (obj->pin_count)
continue;
- if (mark_free(obj_priv, &unwind_list))
+ if (mark_free(obj, &unwind_list))
goto found;
}
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- if (! obj_priv->base.write_domain || obj_priv->pin_count)
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ if (! obj->base.write_domain || obj->pin_count)
continue;
- if (mark_free(obj_priv, &unwind_list))
+ if (mark_free(obj, &unwind_list))
goto found;
}
/* Nothing found, clean up and bail out! */
- list_for_each_entry(obj_priv, &unwind_list, evict_list) {
- ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
+ while (!list_empty(&unwind_list)) {
+ obj = list_first_entry(&unwind_list,
+ struct drm_i915_gem_object,
+ exec_list);
+
+ ret = drm_mm_scan_remove_block(obj->gtt_space);
BUG_ON(ret);
- drm_gem_object_unreference(&obj_priv->base);
+
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
}
/* We expect the caller to unpin, evict all and try again, or give up.
@@ -131,33 +150,34 @@ found:
* temporary list. */
INIT_LIST_HEAD(&eviction_list);
while (!list_empty(&unwind_list)) {
- obj_priv = list_first_entry(&unwind_list,
- struct drm_i915_gem_object,
- evict_list);
- if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
- list_move(&obj_priv->evict_list, &eviction_list);
+ obj = list_first_entry(&unwind_list,
+ struct drm_i915_gem_object,
+ exec_list);
+ if (drm_mm_scan_remove_block(obj->gtt_space)) {
+ list_move(&obj->exec_list, &eviction_list);
continue;
}
- list_del(&obj_priv->evict_list);
- drm_gem_object_unreference(&obj_priv->base);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
}
/* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) {
- obj_priv = list_first_entry(&eviction_list,
- struct drm_i915_gem_object,
- evict_list);
+ obj = list_first_entry(&eviction_list,
+ struct drm_i915_gem_object,
+ exec_list);
if (ret == 0)
- ret = i915_gem_object_unbind(&obj_priv->base);
- list_del(&obj_priv->evict_list);
- drm_gem_object_unreference(&obj_priv->base);
+ ret = i915_gem_object_unbind(obj);
+
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
}
return ret;
}
int
-i915_gem_evict_everything(struct drm_device *dev)
+i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
@@ -176,36 +196,22 @@ i915_gem_evict_everything(struct drm_device *dev)
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
- ret = i915_gem_evict_inactive(dev);
- if (ret)
- return ret;
-
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
- BUG_ON(!lists_empty);
-
- return 0;
+ return i915_gem_evict_inactive(dev, purgeable_only);
}
/** Unbinds all inactive objects. */
int
-i915_gem_evict_inactive(struct drm_device *dev)
+i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
-
- while (!list_empty(&dev_priv->mm.inactive_list)) {
- struct drm_gem_object *obj;
- int ret;
-
- obj = &list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- mm_list)->base;
-
- ret = i915_gem_object_unbind(obj);
- if (ret != 0) {
- DRM_ERROR("Error unbinding object: %d\n", ret);
- return ret;
+ struct drm_i915_gem_object *obj, *next;
+
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list, mm_list) {
+ if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
+ int ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
new file mode 100644
index 000000000000..d2f445e825f2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -0,0 +1,1377 @@
+/*
+ * Copyright © 2008,2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+struct change_domains {
+ uint32_t invalidate_domains;
+ uint32_t flush_domains;
+ uint32_t flush_rings;
+};
+
+/*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ *
+ * This is (we hope) the only really tricky part of gem. The goal
+ * is fairly simple -- track which caches hold bits of the object
+ * and make sure they remain coherent. A few concrete examples may
+ * help to explain how it works. For shorthand, we use the notation
+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
+ * a pair of read and write domain masks.
+ *
+ * Case 1: the batch buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Mapped to GTT
+ * 4. Read by GPU
+ * 5. Unmapped from GTT
+ * 6. Freed
+ *
+ * Let's take these a step at a time
+ *
+ * 1. Allocated
+ * Pages allocated from the kernel may still have
+ * cache contents, so we set them to (CPU, CPU) always.
+ * 2. Written by CPU (using pwrite)
+ * The pwrite function calls set_domain (CPU, CPU) and
+ * this function does nothing (as nothing changes)
+ * 3. Mapped by GTT
+ * This function asserts that the object is not
+ * currently in any GPU-based read or write domains
+ * 4. Read by GPU
+ * i915_gem_execbuffer calls set_domain (COMMAND, 0).
+ * As write_domain is zero, this function adds in the
+ * current read domains (CPU+COMMAND, 0).
+ * flush_domains is set to CPU.
+ * invalidate_domains is set to COMMAND
+ * clflush is run to get data out of the CPU caches
+ * then i915_dev_set_domain calls i915_gem_flush to
+ * emit an MI_FLUSH and drm_agp_chipset_flush
+ * 5. Unmapped from GTT
+ * i915_gem_object_unbind calls set_domain (CPU, CPU)
+ * flush_domains and invalidate_domains end up both zero
+ * so no flushing/invalidating happens
+ * 6. Freed
+ * yay, done
+ *
+ * Case 2: The shared render buffer
+ *
+ * 1. Allocated
+ * 2. Mapped to GTT
+ * 3. Read/written by GPU
+ * 4. set_domain to (CPU,CPU)
+ * 5. Read/written by CPU
+ * 6. Read/written by GPU
+ *
+ * 1. Allocated
+ * Same as last example, (CPU, CPU)
+ * 2. Mapped to GTT
+ * Nothing changes (assertions find that it is not in the GPU)
+ * 3. Read/written by GPU
+ * execbuffer calls set_domain (RENDER, RENDER)
+ * flush_domains gets CPU
+ * invalidate_domains gets GPU
+ * clflush (obj)
+ * MI_FLUSH and drm_agp_chipset_flush
+ * 4. set_domain (CPU, CPU)
+ * flush_domains gets GPU
+ * invalidate_domains gets CPU
+ * wait_rendering (obj) to make sure all drawing is complete.
+ * This will include an MI_FLUSH to get the data from GPU
+ * to memory
+ * clflush (obj) to invalidate the CPU cache
+ * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
+ * 5. Read/written by CPU
+ * cache lines are loaded and dirtied
+ * 6. Read written by GPU
+ * Same as last GPU access
+ *
+ * Case 3: The constant buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Read by GPU
+ * 4. Updated (written) by CPU again
+ * 5. Read by GPU
+ *
+ * 1. Allocated
+ * (CPU, CPU)
+ * 2. Written by CPU
+ * (CPU, CPU)
+ * 3. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ * 4. Updated (written) by CPU again
+ * (CPU, CPU)
+ * flush_domains = 0 (no previous write domain)
+ * invalidate_domains = 0 (no new read domains)
+ * 5. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ */
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring,
+ struct change_domains *cd)
+{
+ uint32_t invalidate_domains = 0, flush_domains = 0;
+
+ /*
+ * If the object isn't moving to a new write domain,
+ * let the object stay in multiple read domains
+ */
+ if (obj->base.pending_write_domain == 0)
+ obj->base.pending_read_domains |= obj->base.read_domains;
+
+ /*
+ * Flush the current write domain if
+ * the new read domains don't match. Invalidate
+ * any read domains which differ from the old
+ * write domain
+ */
+ if (obj->base.write_domain &&
+ (((obj->base.write_domain != obj->base.pending_read_domains ||
+ obj->ring != ring)) ||
+ (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
+ flush_domains |= obj->base.write_domain;
+ invalidate_domains |=
+ obj->base.pending_read_domains & ~obj->base.write_domain;
+ }
+ /*
+ * Invalidate any read caches which may have
+ * stale data. That is, any new read domains.
+ */
+ invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
+ i915_gem_clflush_object(obj);
+
+ /* blow away mappings if mapped through GTT */
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
+ i915_gem_release_mmap(obj);
+
+ /* The actual obj->write_domain will be updated with
+ * pending_write_domain after we emit the accumulated flush for all
+ * of our domain changes in execbuffers (which clears objects'
+ * write_domains). So if we have a current write domain that we
+ * aren't changing, set pending_write_domain to that.
+ */
+ if (flush_domains == 0 && obj->base.pending_write_domain == 0)
+ obj->base.pending_write_domain = obj->base.write_domain;
+
+ cd->invalidate_domains |= invalidate_domains;
+ cd->flush_domains |= flush_domains;
+ if (flush_domains & I915_GEM_GPU_DOMAINS)
+ cd->flush_rings |= obj->ring->id;
+ if (invalidate_domains & I915_GEM_GPU_DOMAINS)
+ cd->flush_rings |= ring->id;
+}
+
+struct eb_objects {
+ int and;
+ struct hlist_head buckets[0];
+};
+
+static struct eb_objects *
+eb_create(int size)
+{
+ struct eb_objects *eb;
+ int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ while (count > size)
+ count >>= 1;
+ eb = kzalloc(count*sizeof(struct hlist_head) +
+ sizeof(struct eb_objects),
+ GFP_KERNEL);
+ if (eb == NULL)
+ return eb;
+
+ eb->and = count - 1;
+ return eb;
+}
+
+static void
+eb_reset(struct eb_objects *eb)
+{
+ memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
+}
+
+static void
+eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
+{
+ hlist_add_head(&obj->exec_node,
+ &eb->buckets[obj->exec_handle & eb->and]);
+}
+
+static struct drm_i915_gem_object *
+eb_get_object(struct eb_objects *eb, unsigned long handle)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct drm_i915_gem_object *obj;
+
+ head = &eb->buckets[handle & eb->and];
+ hlist_for_each(node, head) {
+ obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
+ if (obj->exec_handle == handle)
+ return obj;
+ }
+
+ return NULL;
+}
+
+static void
+eb_destroy(struct eb_objects *eb)
+{
+ kfree(eb);
+}
+
+static int
+i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb,
+ struct drm_i915_gem_relocation_entry *reloc)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_gem_object *target_obj;
+ uint32_t target_offset;
+ int ret = -EINVAL;
+
+ /* we've already hold a reference to all valid objects */
+ target_obj = &eb_get_object(eb, reloc->target_handle)->base;
+ if (unlikely(target_obj == NULL))
+ return -ENOENT;
+
+ target_offset = to_intel_bo(target_obj)->gtt_offset;
+
+#if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc->offset,
+ (int) reloc->target_handle,
+ (int) reloc->read_domains,
+ (int) reloc->write_domain,
+ (int) target_offset,
+ (int) reloc->presumed_offset,
+ reloc->delta);
+#endif
+
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+ if (unlikely(target_offset == 0)) {
+ DRM_ERROR("No GTT space found for object %d\n",
+ reloc->target_handle);
+ return ret;
+ }
+
+ /* Validate that the target is in a valid r/w GPU domain */
+ if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
+ DRM_ERROR("reloc with multiple write domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ return ret;
+ }
+ if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
+ DRM_ERROR("reloc with read/write CPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ return ret;
+ }
+ if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
+ reloc->write_domain != target_obj->pending_write_domain)) {
+ DRM_ERROR("Write domain conflict: "
+ "obj %p target %d offset %d "
+ "new %08x old %08x\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->write_domain,
+ target_obj->pending_write_domain);
+ return ret;
+ }
+
+ target_obj->pending_read_domains |= reloc->read_domains;
+ target_obj->pending_write_domain |= reloc->write_domain;
+
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
+ */
+ if (target_offset == reloc->presumed_offset)
+ return 0;
+
+ /* Check that the relocation address is valid... */
+ if (unlikely(reloc->offset > obj->base.size - 4)) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ (int) obj->base.size);
+ return ret;
+ }
+ if (unlikely(reloc->offset & 3)) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset);
+ return ret;
+ }
+
+ /* and points to somewhere within the target object. */
+ if (unlikely(reloc->delta >= target_obj->size)) {
+ DRM_ERROR("Relocation beyond target object bounds: "
+ "obj %p target %d delta %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->delta,
+ (int) target_obj->size);
+ return ret;
+ }
+
+ reloc->delta += target_offset;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+ uint32_t page_offset = reloc->offset & ~PAGE_MASK;
+ char *vaddr;
+
+ vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+ *(uint32_t *)(vaddr + page_offset) = reloc->delta;
+ kunmap_atomic(vaddr);
+ } else {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t __iomem *reloc_entry;
+ void __iomem *reloc_page;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ if (ret)
+ return ret;
+
+ /* Map the page containing the relocation we're going to perform. */
+ reloc->offset += obj->gtt_offset;
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc->offset & PAGE_MASK);
+ reloc_entry = (uint32_t __iomem *)
+ (reloc_page + (reloc->offset & ~PAGE_MASK));
+ iowrite32(reloc->delta, reloc_entry);
+ io_mapping_unmap_atomic(reloc_page);
+ }
+
+ /* and update the user's relocation entry */
+ reloc->presumed_offset = target_offset;
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb)
+{
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ int i, ret;
+
+ user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_i915_gem_relocation_entry reloc;
+
+ if (__copy_from_user_inatomic(&reloc,
+ user_relocs+i,
+ sizeof(reloc)))
+ return -EFAULT;
+
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
+ if (ret)
+ return ret;
+
+ if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
+ &reloc.presumed_offset,
+ sizeof(reloc.presumed_offset)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb,
+ struct drm_i915_gem_relocation_entry *relocs)
+{
+ const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ int i, ret;
+
+ for (i = 0; i < entry->relocation_count; i++) {
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate(struct drm_device *dev,
+ struct eb_objects *eb,
+ struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ list_for_each_entry(obj, objects, exec_list) {
+ ret = i915_gem_execbuffer_relocate_object(obj, eb);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ int ret, retry;
+ bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+ struct list_head ordered_objects;
+
+ INIT_LIST_HEAD(&ordered_objects);
+ while (!list_empty(objects)) {
+ struct drm_i915_gem_exec_object2 *entry;
+ bool need_fence, need_mappable;
+
+ obj = list_first_entry(objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ entry = obj->exec_entry;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+ need_mappable =
+ entry->relocation_count ? true : need_fence;
+
+ if (need_mappable)
+ list_move(&obj->exec_list, &ordered_objects);
+ else
+ list_move_tail(&obj->exec_list, &ordered_objects);
+
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ }
+ list_splice(&ordered_objects, objects);
+
+ /* Attempt to pin all of the buffers into the GTT.
+ * This is done in 3 phases:
+ *
+ * 1a. Unbind all objects that do not match the GTT constraints for
+ * the execbuffer (fenceable, mappable, alignment etc).
+ * 1b. Increment pin count for already bound objects.
+ * 2. Bind new objects.
+ * 3. Decrement pin count.
+ *
+ * This avoid unnecessary unbinding of later objects in order to makr
+ * room for the earlier objects *unless* we need to defragment.
+ */
+ retry = 0;
+ do {
+ ret = 0;
+
+ /* Unbind any ill-fitting objects or pin. */
+ list_for_each_entry(obj, objects, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ bool need_fence, need_mappable;
+ if (!obj->gtt_space)
+ continue;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+ need_mappable =
+ entry->relocation_count ? true : need_fence;
+
+ if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
+ (need_mappable && !obj->map_and_fenceable))
+ ret = i915_gem_object_unbind(obj);
+ else
+ ret = i915_gem_object_pin(obj,
+ entry->alignment,
+ need_mappable);
+ if (ret)
+ goto err;
+
+ entry++;
+ }
+
+ /* Bind fresh objects */
+ list_for_each_entry(obj, objects, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ bool need_fence;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+
+ if (!obj->gtt_space) {
+ bool need_mappable =
+ entry->relocation_count ? true : need_fence;
+
+ ret = i915_gem_object_pin(obj,
+ entry->alignment,
+ need_mappable);
+ if (ret)
+ break;
+ }
+
+ if (has_fenced_gpu_access) {
+ if (need_fence) {
+ ret = i915_gem_object_get_fence(obj, ring, 1);
+ if (ret)
+ break;
+ } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode == I915_TILING_NONE) {
+ /* XXX pipelined! */
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ break;
+ }
+ obj->pending_fenced_gpu_access = need_fence;
+ }
+
+ entry->offset = obj->gtt_offset;
+ }
+
+ /* Decrement pin count for bound objects */
+ list_for_each_entry(obj, objects, exec_list) {
+ if (obj->gtt_space)
+ i915_gem_object_unpin(obj);
+ }
+
+ if (ret != -ENOSPC || retry > 1)
+ return ret;
+
+ /* First attempt, just clear anything that is purgeable.
+ * Second attempt, clear the entire GTT.
+ */
+ ret = i915_gem_evict_everything(ring->dev, retry == 0);
+ if (ret)
+ return ret;
+
+ retry++;
+ } while (1);
+
+err:
+ obj = list_entry(obj->exec_list.prev,
+ struct drm_i915_gem_object,
+ exec_list);
+ while (objects != &obj->exec_list) {
+ if (obj->gtt_space)
+ i915_gem_object_unpin(obj);
+
+ obj = list_entry(obj->exec_list.prev,
+ struct drm_i915_gem_object,
+ exec_list);
+ }
+
+ return ret;
+}
+
+static int
+i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_ring_buffer *ring,
+ struct list_head *objects,
+ struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *exec,
+ int count)
+{
+ struct drm_i915_gem_relocation_entry *reloc;
+ struct drm_i915_gem_object *obj;
+ int *reloc_offset;
+ int i, total, ret;
+
+ /* We may process another execbuffer during the unlock... */
+ while (!list_empty(objects)) {
+ obj = list_first_entry(objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ total = 0;
+ for (i = 0; i < count; i++)
+ total += exec[i].relocation_count;
+
+ reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
+ reloc = drm_malloc_ab(total, sizeof(*reloc));
+ if (reloc == NULL || reloc_offset == NULL) {
+ drm_free_large(reloc);
+ drm_free_large(reloc_offset);
+ mutex_lock(&dev->struct_mutex);
+ return -ENOMEM;
+ }
+
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+ user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
+
+ if (copy_from_user(reloc+total, user_relocs,
+ exec[i].relocation_count * sizeof(*reloc))) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ reloc_offset[i] = total;
+ total += exec[i].relocation_count;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ /* reacquire the objects */
+ eb_reset(eb);
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+ exec[i].handle));
+ if (obj == NULL) {
+ DRM_ERROR("Invalid object handle %d at index %d\n",
+ exec[i].handle, i);
+ ret = -ENOENT;
+ goto err;
+ }
+
+ list_add_tail(&obj->exec_list, objects);
+ obj->exec_handle = exec[i].handle;
+ obj->exec_entry = &exec[i];
+ eb_add_object(eb, obj);
+ }
+
+ ret = i915_gem_execbuffer_reserve(ring, file, objects);
+ if (ret)
+ goto err;
+
+ list_for_each_entry(obj, objects, exec_list) {
+ int offset = obj->exec_entry - exec;
+ ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
+ reloc + reloc_offset[offset]);
+ if (ret)
+ goto err;
+ }
+
+ /* Leave the user relocations as are, this is the painfully slow path,
+ * and we want to avoid the complication of dropping the lock whilst
+ * having buffers reserved in the aperture and so causing spurious
+ * ENOSPC for random operations.
+ */
+
+err:
+ drm_free_large(reloc);
+ drm_free_large(reloc_offset);
+ return ret;
+}
+
+static int
+i915_gem_execbuffer_flush(struct drm_device *dev,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains,
+ uint32_t flush_rings)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i, ret;
+
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ intel_gtt_chipset_flush();
+
+ if (flush_domains & I915_GEM_DOMAIN_GTT)
+ wmb();
+
+ if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ if (flush_rings & (1 << i)) {
+ ret = i915_gem_flush_ring(dev,
+ &dev_priv->ring[i],
+ invalidate_domains,
+ flush_domains);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to)
+{
+ struct intel_ring_buffer *from = obj->ring;
+ u32 seqno;
+ int ret, idx;
+
+ if (from == NULL || to == from)
+ return 0;
+
+ /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
+ if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
+ return i915_gem_object_wait_rendering(obj, true);
+
+ idx = intel_ring_sync_index(from, to);
+
+ seqno = obj->last_rendering_seqno;
+ if (seqno <= from->sync_seqno[idx])
+ return 0;
+
+ if (seqno == from->outstanding_lazy_request) {
+ struct drm_i915_gem_request *request;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
+ ret = i915_add_request(obj->base.dev, NULL, request, from);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
+ seqno = request->seqno;
+ }
+
+ from->sync_seqno[idx] = seqno;
+ return intel_ring_sync(to, from, seqno - 1);
+}
+
+static int
+i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
+ struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ struct change_domains cd;
+ int ret;
+
+ cd.invalidate_domains = 0;
+ cd.flush_domains = 0;
+ cd.flush_rings = 0;
+ list_for_each_entry(obj, objects, exec_list)
+ i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
+
+ if (cd.invalidate_domains | cd.flush_domains) {
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+ __func__,
+ cd.invalidate_domains,
+ cd.flush_domains);
+#endif
+ ret = i915_gem_execbuffer_flush(ring->dev,
+ cd.invalidate_domains,
+ cd.flush_domains,
+ cd.flush_rings);
+ if (ret)
+ return ret;
+ }
+
+ list_for_each_entry(obj, objects, exec_list) {
+ ret = i915_gem_execbuffer_sync_rings(obj, ring);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool
+i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+{
+ return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
+}
+
+static int
+validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+ int length; /* limited by fault_in_pages_readable() */
+
+ /* First check for malicious input causing overflow */
+ if (exec[i].relocation_count >
+ INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
+ return -EINVAL;
+
+ length = exec[i].relocation_count *
+ sizeof(struct drm_i915_gem_relocation_entry);
+ if (!access_ok(VERIFY_READ, ptr, length))
+ return -EFAULT;
+
+ /* we may also need to update the presumed offsets */
+ if (!access_ok(VERIFY_WRITE, ptr, length))
+ return -EFAULT;
+
+ if (fault_in_pages_readable(ptr, length))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
+ struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ int flips;
+
+ /* Check for any pending flips. As we only maintain a flip queue depth
+ * of 1, we can simply insert a WAIT for the next display flip prior
+ * to executing the batch and avoid stalling the CPU.
+ */
+ flips = 0;
+ list_for_each_entry(obj, objects, exec_list) {
+ if (obj->base.write_domain)
+ flips |= atomic_read(&obj->pending_flip);
+ }
+ if (flips) {
+ int plane, flip_mask, ret;
+
+ for (plane = 0; flips >> plane; plane++) {
+ if (((flips >> plane) & 1) == 0)
+ continue;
+
+ if (plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ }
+ }
+
+ return 0;
+}
+
+static void
+i915_gem_execbuffer_move_to_active(struct list_head *objects,
+ struct intel_ring_buffer *ring,
+ u32 seqno)
+{
+ struct drm_i915_gem_object *obj;
+
+ list_for_each_entry(obj, objects, exec_list) {
+ obj->base.read_domains = obj->base.pending_read_domains;
+ obj->base.write_domain = obj->base.pending_write_domain;
+ obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
+
+ i915_gem_object_move_to_active(obj, ring, seqno);
+ if (obj->base.write_domain) {
+ obj->dirty = 1;
+ obj->pending_gpu_write = true;
+ list_move_tail(&obj->gpu_write_list,
+ &ring->gpu_write_list);
+ intel_mark_busy(ring->dev, obj);
+ }
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->base.read_domains,
+ obj->base.write_domain);
+ }
+}
+
+static void
+i915_gem_execbuffer_retire_commands(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_request *request;
+ u32 invalidate;
+
+ /*
+ * Ensure that the commands in the batch buffer are
+ * finished before the interrupt fires.
+ *
+ * The sampler always gets flushed on i965 (sigh).
+ */
+ invalidate = I915_GEM_DOMAIN_COMMAND;
+ if (INTEL_INFO(dev)->gen >= 4)
+ invalidate |= I915_GEM_DOMAIN_SAMPLER;
+ if (ring->flush(ring, invalidate, 0)) {
+ i915_gem_next_request_seqno(dev, ring);
+ return;
+ }
+
+ /* Add a breadcrumb for the completion of the batch buffer */
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL || i915_add_request(dev, file, request, ring)) {
+ i915_gem_next_request_seqno(dev, ring);
+ kfree(request);
+ }
+}
+
+static int
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct drm_i915_gem_exec_object2 *exec)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct list_head objects;
+ struct eb_objects *eb;
+ struct drm_i915_gem_object *batch_obj;
+ struct drm_clip_rect *cliprects = NULL;
+ struct intel_ring_buffer *ring;
+ u32 exec_start, exec_len;
+ u32 seqno;
+ int ret, mode, i;
+
+ if (!i915_gem_check_execbuffer(args)) {
+ DRM_ERROR("execbuf with invalid offset/length\n");
+ return -EINVAL;
+ }
+
+ ret = validate_exec_list(exec, args->buffer_count);
+ if (ret)
+ return ret;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+ switch (args->flags & I915_EXEC_RING_MASK) {
+ case I915_EXEC_DEFAULT:
+ case I915_EXEC_RENDER:
+ ring = &dev_priv->ring[RCS];
+ break;
+ case I915_EXEC_BSD:
+ if (!HAS_BSD(dev)) {
+ DRM_ERROR("execbuf with invalid ring (BSD)\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->ring[VCS];
+ break;
+ case I915_EXEC_BLT:
+ if (!HAS_BLT(dev)) {
+ DRM_ERROR("execbuf with invalid ring (BLT)\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->ring[BCS];
+ break;
+ default:
+ DRM_ERROR("execbuf with unknown ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return -EINVAL;
+ }
+
+ mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+ switch (mode) {
+ case I915_EXEC_CONSTANTS_REL_GENERAL:
+ case I915_EXEC_CONSTANTS_ABSOLUTE:
+ case I915_EXEC_CONSTANTS_REL_SURFACE:
+ if (ring == &dev_priv->ring[RCS] &&
+ mode != dev_priv->relative_constants_mode) {
+ if (INTEL_INFO(dev)->gen < 4)
+ return -EINVAL;
+
+ if (INTEL_INFO(dev)->gen > 5 &&
+ mode == I915_EXEC_CONSTANTS_REL_SURFACE)
+ return -EINVAL;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, INSTPM);
+ intel_ring_emit(ring,
+ I915_EXEC_CONSTANTS_MASK << 16 | mode);
+ intel_ring_advance(ring);
+
+ dev_priv->relative_constants_mode = mode;
+ }
+ break;
+ default:
+ DRM_ERROR("execbuf with unknown constants: %d\n", mode);
+ return -EINVAL;
+ }
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ if (args->num_cliprects != 0) {
+ if (ring != &dev_priv->ring[RCS]) {
+ DRM_ERROR("clip rectangles are only valid with the render ring\n");
+ return -EINVAL;
+ }
+
+ cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
+ GFP_KERNEL);
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+
+ if (copy_from_user(cliprects,
+ (struct drm_clip_rect __user *)(uintptr_t)
+ args->cliprects_ptr,
+ sizeof(*cliprects)*args->num_cliprects)) {
+ ret = -EFAULT;
+ goto pre_mutex_err;
+ }
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto pre_mutex_err;
+
+ if (dev_priv->mm.suspended) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = -EBUSY;
+ goto pre_mutex_err;
+ }
+
+ eb = eb_create(args->buffer_count);
+ if (eb == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+
+ /* Look up object handles */
+ INIT_LIST_HEAD(&objects);
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+ exec[i].handle));
+ if (obj == NULL) {
+ DRM_ERROR("Invalid object handle %d at index %d\n",
+ exec[i].handle, i);
+ /* prevent error path from reading uninitialized data */
+ ret = -ENOENT;
+ goto err;
+ }
+
+ if (!list_empty(&obj->exec_list)) {
+ DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
+ obj, exec[i].handle, i);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ list_add_tail(&obj->exec_list, &objects);
+ obj->exec_handle = exec[i].handle;
+ obj->exec_entry = &exec[i];
+ eb_add_object(eb, obj);
+ }
+
+ /* take note of the batch buffer before we might reorder the lists */
+ batch_obj = list_entry(objects.prev,
+ struct drm_i915_gem_object,
+ exec_list);
+
+ /* Move the objects en-masse into the GTT, evicting if necessary. */
+ ret = i915_gem_execbuffer_reserve(ring, file, &objects);
+ if (ret)
+ goto err;
+
+ /* The objects are in their final locations, apply the relocations. */
+ ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
+ if (ret) {
+ if (ret == -EFAULT) {
+ ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
+ &objects, eb,
+ exec,
+ args->buffer_count);
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ }
+ if (ret)
+ goto err;
+ }
+
+ /* Set the pending read domains for the batch buffer to COMMAND */
+ if (batch_obj->base.pending_write_domain) {
+ DRM_ERROR("Attempting to use self-modifying batch buffer\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
+ ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
+ if (ret)
+ goto err;
+
+ ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
+ if (ret)
+ goto err;
+
+ seqno = i915_gem_next_request_seqno(dev, ring);
+ for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
+ if (seqno < ring->sync_seqno[i]) {
+ /* The GPU can not handle its semaphore value wrapping,
+ * so every billion or so execbuffers, we need to stall
+ * the GPU in order to reset the counters.
+ */
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ goto err;
+
+ BUG_ON(ring->sync_seqno[i]);
+ }
+ }
+
+ exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+ exec_len = args->batch_len;
+ if (cliprects) {
+ for (i = 0; i < args->num_cliprects; i++) {
+ ret = i915_emit_box(dev, &cliprects[i],
+ args->DR1, args->DR4);
+ if (ret)
+ goto err;
+
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len);
+ if (ret)
+ goto err;
+ }
+ } else {
+ ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
+ if (ret)
+ goto err;
+ }
+
+ i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
+ i915_gem_execbuffer_retire_commands(dev, file, ring);
+
+err:
+ eb_destroy(eb);
+ while (!list_empty(&objects)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+pre_mutex_err:
+ kfree(cliprects);
+ return ret;
+}
+
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_execbuffer2 exec2;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret, i;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ /* Copy in the exec list from userland */
+ exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec_list == NULL || exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < args->buffer_count; i++) {
+ exec2_list[i].handle = exec_list[i].handle;
+ exec2_list[i].relocation_count = exec_list[i].relocation_count;
+ exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+ exec2_list[i].alignment = exec_list[i].alignment;
+ exec2_list[i].offset = exec_list[i].offset;
+ if (INTEL_INFO(dev)->gen < 4)
+ exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+ else
+ exec2_list[i].flags = 0;
+ }
+
+ exec2.buffers_ptr = args->buffers_ptr;
+ exec2.buffer_count = args->buffer_count;
+ exec2.batch_start_offset = args->batch_start_offset;
+ exec2.batch_len = args->batch_len;
+ exec2.DR1 = args->DR1;
+ exec2.DR4 = args->DR4;
+ exec2.num_cliprects = args->num_cliprects;
+ exec2.cliprects_ptr = args->cliprects_ptr;
+ exec2.flags = I915_EXEC_RENDER;
+
+ ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ for (i = 0; i < args->buffer_count; i++)
+ exec_list[i].offset = exec2_list[i].offset;
+ /* ... and back out to userspace */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec_list,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return ret;
+}
+
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_execbuffer2 *args = data;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec2_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec2_list,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+
+ drm_free_large(exec2_list);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
new file mode 100644
index 000000000000..b0abdc64aa9f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright © 2010 Daniel Vetter
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ /* First fill our portion of the GTT with scratch pages */
+ intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
+ (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
+
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ i915_gem_clflush_object(obj);
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ BUG_ON(!obj->sg_list);
+
+ intel_gtt_insert_sg_entries(obj->sg_list,
+ obj->num_sg,
+ obj->gtt_space->start
+ >> PAGE_SHIFT,
+ obj->agp_type);
+ } else
+ intel_gtt_insert_pages(obj->gtt_space->start
+ >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ obj->pages,
+ obj->agp_type);
+ }
+
+ intel_gtt_chipset_flush();
+}
+
+int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ ret = intel_gtt_map_memory(obj->pages,
+ obj->base.size >> PAGE_SHIFT,
+ &obj->sg_list,
+ &obj->num_sg);
+ if (ret != 0)
+ return ret;
+
+ intel_gtt_insert_sg_entries(obj->sg_list,
+ obj->num_sg,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->agp_type);
+ } else
+ intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ obj->pages,
+ obj->agp_type);
+
+ return 0;
+}
+
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
+{
+ intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
+
+ if (obj->sg_list) {
+ intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
+ obj->sg_list = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index af352de70be1..79a04fde69b5 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -181,10 +181,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
/* Check pitch constriants for all chips & tiling formats */
-bool
+static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
- int tile_width;
+ int tile_width, tile_height;
/* Linear is always fine */
if (tiling_mode == I915_TILING_NONE)
@@ -215,6 +215,20 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
}
}
+ if (IS_GEN2(dev) ||
+ (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ tile_height = 32;
+ else
+ tile_height = 8;
+ /* i8xx is strange: It has 2 interleaved rows of tiles, so needs an even
+ * number of tile rows. */
+ if (IS_GEN2(dev))
+ tile_height *= 2;
+
+ /* Size needs to be aligned to a full tile row */
+ if (size & (tile_height * stride - 1))
+ return false;
+
/* 965+ just needs multiples of tile width */
if (INTEL_INFO(dev)->gen >= 4) {
if (stride & (tile_width - 1))
@@ -232,32 +246,44 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
return true;
}
-bool
-i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
+/* Is the current GTT allocation valid for the change in tiling? */
+static bool
+i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
- if (obj_priv->gtt_space == NULL)
- return true;
+ u32 size;
if (tiling_mode == I915_TILING_NONE)
return true;
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_INFO(obj->base.dev)->gen >= 4)
return true;
- if (obj_priv->gtt_offset & (obj->size - 1))
- return false;
-
- if (IS_GEN3(dev)) {
- if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+ if (INTEL_INFO(obj->base.dev)->gen == 3) {
+ if (obj->gtt_offset & ~I915_FENCE_START_MASK)
return false;
} else {
- if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
+ if (obj->gtt_offset & ~I830_FENCE_START_MASK)
return false;
}
+ /*
+ * Previous chips need to be aligned to the size of the smallest
+ * fence register that can contain the object.
+ */
+ if (INTEL_INFO(obj->base.dev)->gen == 3)
+ size = 1024*1024;
+ else
+ size = 512*1024;
+
+ while (size < obj->base.size)
+ size <<= 1;
+
+ if (obj->gtt_space->size != size)
+ return false;
+
+ if (obj->gtt_offset & (size - 1))
+ return false;
+
return true;
}
@@ -267,30 +293,29 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
*/
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = i915_gem_check_is_wedged(dev);
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL)
return -ENOENT;
- obj_priv = to_intel_bo(obj);
- if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
- drm_gem_object_unreference_unlocked(obj);
+ if (!i915_tiling_ok(dev,
+ args->stride, obj->base.size, args->tiling_mode)) {
+ drm_gem_object_unreference_unlocked(&obj->base);
return -EINVAL;
}
- if (obj_priv->pin_count) {
- drm_gem_object_unreference_unlocked(obj);
+ if (obj->pin_count) {
+ drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY;
}
@@ -324,34 +349,28 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
- if (args->tiling_mode != obj_priv->tiling_mode ||
- args->stride != obj_priv->stride) {
+ if (args->tiling_mode != obj->tiling_mode ||
+ args->stride != obj->stride) {
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
* need to ensure that any fence register is cleared.
*/
- if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
- ret = i915_gem_object_unbind(obj);
- else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- ret = i915_gem_object_put_fence_reg(obj, true);
- else
- i915_gem_release_mmap(obj);
+ i915_gem_release_mmap(obj);
- if (ret != 0) {
- args->tiling_mode = obj_priv->tiling_mode;
- args->stride = obj_priv->stride;
- goto err;
- }
+ obj->map_and_fenceable =
+ obj->gtt_space == NULL ||
+ (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
+ i915_gem_object_fence_ok(obj, args->tiling_mode));
- obj_priv->tiling_mode = args->tiling_mode;
- obj_priv->stride = args->stride;
+ obj->tiling_changed = true;
+ obj->tiling_mode = args->tiling_mode;
+ obj->stride = args->stride;
}
-err:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
/**
@@ -359,22 +378,20 @@ err:
*/
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL)
return -ENOENT;
- obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
- args->tiling_mode = obj_priv->tiling_mode;
- switch (obj_priv->tiling_mode) {
+ args->tiling_mode = obj->tiling_mode;
+ switch (obj->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
break;
@@ -394,7 +411,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -424,46 +441,44 @@ i915_gem_swizzle_page(struct page *page)
}
void
-i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int page_count = obj->size >> PAGE_SHIFT;
+ int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
return;
- if (obj_priv->bit_17 == NULL)
+ if (obj->bit_17 == NULL)
return;
for (i = 0; i < page_count; i++) {
- char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
+ char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
if ((new_bit_17 & 0x1) !=
- (test_bit(i, obj_priv->bit_17) != 0)) {
- i915_gem_swizzle_page(obj_priv->pages[i]);
- set_page_dirty(obj_priv->pages[i]);
+ (test_bit(i, obj->bit_17) != 0)) {
+ i915_gem_swizzle_page(obj->pages[i]);
+ set_page_dirty(obj->pages[i]);
}
}
}
void
-i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int page_count = obj->size >> PAGE_SHIFT;
+ int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
return;
- if (obj_priv->bit_17 == NULL) {
- obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
sizeof(long), GFP_KERNEL);
- if (obj_priv->bit_17 == NULL) {
+ if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
return;
@@ -471,9 +486,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
}
for (i = 0; i < page_count; i++) {
- if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
- __set_bit(i, obj_priv->bit_17);
+ if (page_to_phys(obj->pages[i]) & (1 << 17))
+ __set_bit(i, obj->bit_17);
else
- __clear_bit(i, obj_priv->bit_17);
+ __clear_bit(i, obj->bit_17);
}
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 729fd0c91d7b..8a9e08bf1cf7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -64,64 +64,24 @@
#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
DRM_I915_VBLANK_PIPE_B)
-void
-ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
- if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
- dev_priv->gt_irq_mask_reg &= ~mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- (void) I915_READ(GTIMR);
- }
-}
-
-void
-ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
- if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
- dev_priv->gt_irq_mask_reg |= mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- (void) I915_READ(GTIMR);
- }
-}
-
/* For display hotplug interrupt */
static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->irq_mask_reg & mask) != 0) {
- dev_priv->irq_mask_reg &= ~mask;
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- (void) I915_READ(DEIMR);
+ if ((dev_priv->irq_mask & mask) != 0) {
+ dev_priv->irq_mask &= ~mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ POSTING_READ(DEIMR);
}
}
static inline void
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->irq_mask_reg & mask) != mask) {
- dev_priv->irq_mask_reg |= mask;
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- (void) I915_READ(DEIMR);
- }
-}
-
-void
-i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
- if ((dev_priv->irq_mask_reg & mask) != 0) {
- dev_priv->irq_mask_reg &= ~mask;
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void) I915_READ(IMR);
- }
-}
-
-void
-i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
- if ((dev_priv->irq_mask_reg & mask) != mask) {
- dev_priv->irq_mask_reg |= mask;
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void) I915_READ(IMR);
+ if ((dev_priv->irq_mask & mask) != mask) {
+ dev_priv->irq_mask |= mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ POSTING_READ(DEIMR);
}
}
@@ -144,7 +104,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
dev_priv->pipestat[pipe] |= mask;
/* Enable the interrupt, clear any pending status */
I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
- (void) I915_READ(reg);
+ POSTING_READ(reg);
}
}
@@ -156,16 +116,19 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
dev_priv->pipestat[pipe] &= ~mask;
I915_WRITE(reg, dev_priv->pipestat[pipe]);
- (void) I915_READ(reg);
+ POSTING_READ(reg);
}
}
/**
* intel_enable_asle - enable ASLE interrupt for OpRegion
*/
-void intel_enable_asle (struct drm_device *dev)
+void intel_enable_asle(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, DE_GSE);
@@ -176,6 +139,8 @@ void intel_enable_asle (struct drm_device *dev)
i915_enable_pipestat(dev_priv, 0,
PIPE_LEGACY_BLC_EVENT_ENABLE);
}
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
/**
@@ -243,6 +208,103 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
return I915_READ(reg);
}
+int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+ int *vpos, int *hpos)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 vbl = 0, position = 0;
+ int vbl_start, vbl_end, htotal, vtotal;
+ bool in_vbl = true;
+ int ret = 0;
+
+ if (!i915_pipe_enabled(dev, pipe)) {
+ DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
+ "pipe %d\n", pipe);
+ return 0;
+ }
+
+ /* Get vtotal. */
+ vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* No obvious pixelcount register. Only query vertical
+ * scanout position from Display scan line register.
+ */
+ position = I915_READ(PIPEDSL(pipe));
+
+ /* Decode into vertical scanout position. Don't have
+ * horizontal scanout position.
+ */
+ *vpos = position & 0x1fff;
+ *hpos = 0;
+ } else {
+ /* Have access to pixelcount since start of frame.
+ * We can split this into vertical and horizontal
+ * scanout position.
+ */
+ position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+
+ htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
+ *vpos = position / htotal;
+ *hpos = position - (*vpos * htotal);
+ }
+
+ /* Query vblank area. */
+ vbl = I915_READ(VBLANK(pipe));
+
+ /* Test position against vblank region. */
+ vbl_start = vbl & 0x1fff;
+ vbl_end = (vbl >> 16) & 0x1fff;
+
+ if ((*vpos < vbl_start) || (*vpos > vbl_end))
+ in_vbl = false;
+
+ /* Inside "upper part" of vblank area? Apply corrective offset: */
+ if (in_vbl && (*vpos >= vbl_start))
+ *vpos = *vpos - vtotal;
+
+ /* Readouts valid? */
+ if (vbl > 0)
+ ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+ /* In vblank? */
+ if (in_vbl)
+ ret |= DRM_SCANOUTPOS_INVBL;
+
+ return ret;
+}
+
+int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+
+ if (pipe < 0 || pipe >= dev_priv->num_pipe) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
+ return -EINVAL;
+ }
+
+ /* Get drm_crtc to timestamp: */
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
+ if (crtc == NULL) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
+ return -EINVAL;
+ }
+
+ if (!crtc->enabled) {
+ DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+ return -EBUSY;
+ }
+
+ /* Helper routine in DRM core does all the work: */
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
+ vblank_time, flags,
+ crtc);
+}
+
/*
* Handle hotplug events outside the interrupt handler proper.
*/
@@ -254,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
+ DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
@@ -297,20 +361,109 @@ static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 seqno = ring->get_seqno(dev, ring);
- ring->irq_gem_seqno = seqno;
+ u32 seqno;
+
+ if (ring->obj == NULL)
+ return;
+
+ seqno = ring->get_seqno(ring);
trace_i915_gem_request_complete(dev, seqno);
+
+ ring->irq_seqno = seqno;
wake_up_all(&ring->irq_queue);
+
dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer,
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
+static void gen6_pm_irq_handler(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u8 new_delay = dev_priv->cur_delay;
+ u32 pm_iir;
+
+ pm_iir = I915_READ(GEN6_PMIIR);
+ if (!pm_iir)
+ return;
+
+ if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+ if (dev_priv->cur_delay != dev_priv->max_delay)
+ new_delay = dev_priv->cur_delay + 1;
+ if (new_delay > dev_priv->max_delay)
+ new_delay = dev_priv->max_delay;
+ } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
+ if (dev_priv->cur_delay != dev_priv->min_delay)
+ new_delay = dev_priv->cur_delay - 1;
+ if (new_delay < dev_priv->min_delay) {
+ new_delay = dev_priv->min_delay;
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
+ ((new_delay << 16) & 0x3f0000));
+ } else {
+ /* Make sure we continue to get down interrupts
+ * until we hit the minimum frequency */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
+ }
+
+ }
+
+ gen6_set_rps(dev, new_delay);
+ dev_priv->cur_delay = new_delay;
+
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+}
+
+static void pch_irq_handler(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 pch_iir;
+
+ pch_iir = I915_READ(SDEIIR);
+
+ if (pch_iir & SDE_AUDIO_POWER_MASK)
+ DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+ (pch_iir & SDE_AUDIO_POWER_MASK) >>
+ SDE_AUDIO_POWER_SHIFT);
+
+ if (pch_iir & SDE_GMBUS)
+ DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_HDCP_MASK)
+ DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_TRANS_MASK)
+ DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
+
+ if (pch_iir & SDE_POISON)
+ DRM_ERROR("PCH poison interrupt\n");
+
+ if (pch_iir & SDE_FDI_MASK) {
+ u32 fdia, fdib;
+
+ fdia = I915_READ(FDI_RXA_IIR);
+ fdib = I915_READ(FDI_RXB_IIR);
+ DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib);
+ }
+
+ if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
+ DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
+
+ if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
+ DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
+
+ if (pch_iir & SDE_TRANSB_FIFO_UNDER)
+ DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
+ if (pch_iir & SDE_TRANSA_FIFO_UNDER)
+ DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
+}
+
static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pch_iir;
+ u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
u32 hotplug_mask;
struct drm_i915_master_private *master_priv;
u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
@@ -321,13 +474,15 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- (void)I915_READ(DEIER);
+ POSTING_READ(DEIER);
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
pch_iir = I915_READ(SDEIIR);
+ pm_iir = I915_READ(GEN6_PMIIR);
- if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
+ if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
+ (!IS_GEN6(dev) || pm_iir == 0))
goto done;
if (HAS_PCH_CPT(dev))
@@ -344,12 +499,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
READ_BREADCRUMB(dev_priv);
}
- if (gt_iir & GT_PIPE_NOTIFY)
- notify_ring(dev, &dev_priv->render_ring);
+ if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+ notify_ring(dev, &dev_priv->ring[RCS]);
if (gt_iir & bsd_usr_interrupt)
- notify_ring(dev, &dev_priv->bsd_ring);
- if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->blt_ring);
+ notify_ring(dev, &dev_priv->ring[VCS]);
+ if (gt_iir & GT_BLT_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[BCS]);
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
@@ -371,14 +526,20 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
drm_handle_vblank(dev, 1);
/* check event from PCH */
- if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+ if (de_iir & DE_PCH_EVENT) {
+ if (pch_iir & hotplug_mask)
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+ pch_irq_handler(dev);
+ }
if (de_iir & DE_PCU_EVENT) {
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
i915_handle_rps_change(dev);
}
+ if (IS_GEN6(dev))
+ gen6_pm_irq_handler(dev);
+
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir);
@@ -386,7 +547,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
done:
I915_WRITE(DEIER, de_ier);
- (void)I915_READ(DEIER);
+ POSTING_READ(DEIER);
return ret;
}
@@ -422,29 +583,23 @@ static void i915_error_work_func(struct work_struct *work)
#ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object *
-i915_error_object_create(struct drm_device *dev,
- struct drm_gem_object *src)
+i915_error_object_create(struct drm_i915_private *dev_priv,
+ struct drm_i915_gem_object *src)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_object *dst;
- struct drm_i915_gem_object *src_priv;
int page, page_count;
u32 reloc_offset;
- if (src == NULL)
+ if (src == NULL || src->pages == NULL)
return NULL;
- src_priv = to_intel_bo(src);
- if (src_priv->pages == NULL)
- return NULL;
-
- page_count = src->size / PAGE_SIZE;
+ page_count = src->base.size / PAGE_SIZE;
dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
if (dst == NULL)
return NULL;
- reloc_offset = src_priv->gtt_offset;
+ reloc_offset = src->gtt_offset;
for (page = 0; page < page_count; page++) {
unsigned long flags;
void __iomem *s;
@@ -466,7 +621,7 @@ i915_error_object_create(struct drm_device *dev,
reloc_offset += PAGE_SIZE;
}
dst->page_count = page_count;
- dst->gtt_offset = src_priv->gtt_offset;
+ dst->gtt_offset = src->gtt_offset;
return dst;
@@ -503,53 +658,98 @@ i915_error_state_free(struct drm_device *dev,
kfree(error);
}
-static u32
-i915_get_bbaddr(struct drm_device *dev, u32 *ring)
+static u32 capture_bo_list(struct drm_i915_error_buffer *err,
+ int count,
+ struct list_head *head)
{
- u32 cmd;
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, mm_list) {
+ err->size = obj->base.size;
+ err->name = obj->base.name;
+ err->seqno = obj->last_rendering_seqno;
+ err->gtt_offset = obj->gtt_offset;
+ err->read_domains = obj->base.read_domains;
+ err->write_domain = obj->base.write_domain;
+ err->fence_reg = obj->fence_reg;
+ err->pinned = 0;
+ if (obj->pin_count > 0)
+ err->pinned = 1;
+ if (obj->user_pin_count > 0)
+ err->pinned = -1;
+ err->tiling = obj->tiling_mode;
+ err->dirty = obj->dirty;
+ err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->ring = obj->ring ? obj->ring->id : 0;
+ err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY;
+
+ if (++i == count)
+ break;
- if (IS_I830(dev) || IS_845G(dev))
- cmd = MI_BATCH_BUFFER;
- else if (INTEL_INFO(dev)->gen >= 4)
- cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
- MI_BATCH_NON_SECURE_I965);
- else
- cmd = (MI_BATCH_BUFFER_START | (2 << 6));
+ err++;
+ }
- return ring[0] == cmd ? ring[1] : 0;
+ return i;
}
-static u32
-i915_ringbuffer_last_batch(struct drm_device *dev)
+static void i915_gem_record_fences(struct drm_device *dev,
+ struct drm_i915_error_state *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 head, bbaddr;
- u32 *ring;
-
- /* Locate the current position in the ringbuffer and walk back
- * to find the most recently dispatched batch buffer.
- */
- bbaddr = 0;
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
+ int i;
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
- while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
- bbaddr = i915_get_bbaddr(dev, ring);
- if (bbaddr)
- break;
}
+}
- if (bbaddr == 0) {
- ring = (u32 *)(dev_priv->render_ring.virtual_start
- + dev_priv->render_ring.size);
- while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
- bbaddr = i915_get_bbaddr(dev, ring);
- if (bbaddr)
- break;
- }
+static struct drm_i915_error_object *
+i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_object *obj;
+ u32 seqno;
+
+ if (!ring->get_seqno)
+ return NULL;
+
+ seqno = ring->get_seqno(ring);
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ if (obj->ring != ring)
+ continue;
+
+ if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
+ continue;
+
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
+ continue;
+
+ /* We need to copy these to an anonymous buffer as the simplest
+ * method to avoid being overwritten by userspace.
+ */
+ return i915_error_object_create(dev_priv, obj);
}
- return bbaddr;
+ return NULL;
}
/**
@@ -564,12 +764,10 @@ i915_ringbuffer_last_batch(struct drm_device *dev)
static void i915_capture_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct drm_i915_error_state *error;
- struct drm_gem_object *batchbuffer[2];
unsigned long flags;
- u32 bbaddr;
- int count;
+ int i;
spin_lock_irqsave(&dev_priv->error_lock, flags);
error = dev_priv->first_error;
@@ -585,20 +783,33 @@ static void i915_capture_error_state(struct drm_device *dev)
DRM_DEBUG_DRIVER("generating error event\n");
- error->seqno =
- dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
+ error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
error->pipeastat = I915_READ(PIPEASTAT);
error->pipebstat = I915_READ(PIPEBSTAT);
error->instpm = I915_READ(INSTPM);
- if (INTEL_INFO(dev)->gen < 4) {
- error->ipeir = I915_READ(IPEIR);
- error->ipehr = I915_READ(IPEHR);
- error->instdone = I915_READ(INSTDONE);
- error->acthd = I915_READ(ACTHD);
- error->bbaddr = 0;
- } else {
+ error->error = 0;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->error = I915_READ(ERROR_GEN6);
+
+ error->bcs_acthd = I915_READ(BCS_ACTHD);
+ error->bcs_ipehr = I915_READ(BCS_IPEHR);
+ error->bcs_ipeir = I915_READ(BCS_IPEIR);
+ error->bcs_instdone = I915_READ(BCS_INSTDONE);
+ error->bcs_seqno = 0;
+ if (dev_priv->ring[BCS].get_seqno)
+ error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
+
+ error->vcs_acthd = I915_READ(VCS_ACTHD);
+ error->vcs_ipehr = I915_READ(VCS_IPEHR);
+ error->vcs_ipeir = I915_READ(VCS_IPEIR);
+ error->vcs_instdone = I915_READ(VCS_INSTDONE);
+ error->vcs_seqno = 0;
+ if (dev_priv->ring[VCS].get_seqno)
+ error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
+ }
+ if (INTEL_INFO(dev)->gen >= 4) {
error->ipeir = I915_READ(IPEIR_I965);
error->ipehr = I915_READ(IPEHR_I965);
error->instdone = I915_READ(INSTDONE_I965);
@@ -606,118 +817,63 @@ static void i915_capture_error_state(struct drm_device *dev)
error->instdone1 = I915_READ(INSTDONE1);
error->acthd = I915_READ(ACTHD_I965);
error->bbaddr = I915_READ64(BB_ADDR);
+ } else {
+ error->ipeir = I915_READ(IPEIR);
+ error->ipehr = I915_READ(IPEHR);
+ error->instdone = I915_READ(INSTDONE);
+ error->acthd = I915_READ(ACTHD);
+ error->bbaddr = 0;
}
+ i915_gem_record_fences(dev, error);
- bbaddr = i915_ringbuffer_last_batch(dev);
-
- /* Grab the current batchbuffer, most likely to have crashed. */
- batchbuffer[0] = NULL;
- batchbuffer[1] = NULL;
- count = 0;
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
- if (batchbuffer[0] == NULL &&
- bbaddr >= obj_priv->gtt_offset &&
- bbaddr < obj_priv->gtt_offset + obj->size)
- batchbuffer[0] = obj;
-
- if (batchbuffer[1] == NULL &&
- error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
- batchbuffer[1] = obj;
-
- count++;
- }
- /* Scan the other lists for completeness for those bizarre errors. */
- if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
- if (batchbuffer[0] == NULL &&
- bbaddr >= obj_priv->gtt_offset &&
- bbaddr < obj_priv->gtt_offset + obj->size)
- batchbuffer[0] = obj;
-
- if (batchbuffer[1] == NULL &&
- error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
- batchbuffer[1] = obj;
-
- if (batchbuffer[0] && batchbuffer[1])
- break;
- }
- }
- if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
- if (batchbuffer[0] == NULL &&
- bbaddr >= obj_priv->gtt_offset &&
- bbaddr < obj_priv->gtt_offset + obj->size)
- batchbuffer[0] = obj;
-
- if (batchbuffer[1] == NULL &&
- error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
- batchbuffer[1] = obj;
-
- if (batchbuffer[0] && batchbuffer[1])
- break;
- }
- }
-
- /* We need to copy these to an anonymous buffer as the simplest
- * method to avoid being overwritten by userspace.
- */
- error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
- if (batchbuffer[1] != batchbuffer[0])
- error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
- else
- error->batchbuffer[1] = NULL;
+ /* Record the active batchbuffers */
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ error->batchbuffer[i] =
+ i915_error_first_batchbuffer(dev_priv,
+ &dev_priv->ring[i]);
/* Record the ringbuffer */
- error->ringbuffer = i915_error_object_create(dev,
- dev_priv->render_ring.gem_object);
+ error->ringbuffer = i915_error_object_create(dev_priv,
+ dev_priv->ring[RCS].obj);
- /* Record buffers on the active list. */
+ /* Record buffers on the active and pinned lists. */
error->active_bo = NULL;
- error->active_bo_count = 0;
+ error->pinned_bo = NULL;
- if (count)
- error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
- GFP_ATOMIC);
+ i = 0;
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
+ i++;
+ error->active_bo_count = i;
+ list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
+ i++;
+ error->pinned_bo_count = i - error->active_bo_count;
- if (error->active_bo) {
- int i = 0;
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
- error->active_bo[i].size = obj->size;
- error->active_bo[i].name = obj->name;
- error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
- error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
- error->active_bo[i].read_domains = obj->read_domains;
- error->active_bo[i].write_domain = obj->write_domain;
- error->active_bo[i].fence_reg = obj_priv->fence_reg;
- error->active_bo[i].pinned = 0;
- if (obj_priv->pin_count > 0)
- error->active_bo[i].pinned = 1;
- if (obj_priv->user_pin_count > 0)
- error->active_bo[i].pinned = -1;
- error->active_bo[i].tiling = obj_priv->tiling_mode;
- error->active_bo[i].dirty = obj_priv->dirty;
- error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
-
- if (++i == count)
- break;
- }
- error->active_bo_count = i;
+ error->active_bo = NULL;
+ error->pinned_bo = NULL;
+ if (i) {
+ error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
+ GFP_ATOMIC);
+ if (error->active_bo)
+ error->pinned_bo =
+ error->active_bo + error->active_bo_count;
}
+ if (error->active_bo)
+ error->active_bo_count =
+ capture_bo_list(error->active_bo,
+ error->active_bo_count,
+ &dev_priv->mm.active_list);
+
+ if (error->pinned_bo)
+ error->pinned_bo_count =
+ capture_bo_list(error->pinned_bo,
+ error->pinned_bo_count,
+ &dev_priv->mm.pinned_list);
+
do_gettimeofday(&error->time);
error->overlay = intel_overlay_capture_error_state(dev);
+ error->display = intel_display_capture_error_state(dev);
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (dev_priv->first_error == NULL) {
@@ -775,7 +931,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " ACTHD: 0x%08x\n",
I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
- (void)I915_READ(IPEIR_I965);
+ POSTING_READ(IPEIR_I965);
}
if (eir & GM45_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
@@ -783,7 +939,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
- (void)I915_READ(PGTBL_ER);
+ POSTING_READ(PGTBL_ER);
}
}
@@ -794,7 +950,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
- (void)I915_READ(PGTBL_ER);
+ POSTING_READ(PGTBL_ER);
}
}
@@ -825,7 +981,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " ACTHD: 0x%08x\n",
I915_READ(ACTHD));
I915_WRITE(IPEIR, ipeir);
- (void)I915_READ(IPEIR);
+ POSTING_READ(IPEIR);
} else {
u32 ipeir = I915_READ(IPEIR_I965);
@@ -842,12 +998,12 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " ACTHD: 0x%08x\n",
I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
- (void)I915_READ(IPEIR_I965);
+ POSTING_READ(IPEIR_I965);
}
}
I915_WRITE(EIR, eir);
- (void)I915_READ(EIR);
+ POSTING_READ(EIR);
eir = I915_READ(EIR);
if (eir) {
/*
@@ -870,7 +1026,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
-static void i915_handle_error(struct drm_device *dev, bool wedged)
+void i915_handle_error(struct drm_device *dev, bool wedged)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -884,11 +1040,11 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
/*
* Wakeup waiting processes so they don't hang
*/
- wake_up_all(&dev_priv->render_ring.irq_queue);
+ wake_up_all(&dev_priv->ring[RCS].irq_queue);
if (HAS_BSD(dev))
- wake_up_all(&dev_priv->bsd_ring.irq_queue);
+ wake_up_all(&dev_priv->ring[VCS].irq_queue);
if (HAS_BLT(dev))
- wake_up_all(&dev_priv->blt_ring.irq_queue);
+ wake_up_all(&dev_priv->ring[BCS].irq_queue);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -899,7 +1055,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct intel_unpin_work *work;
unsigned long flags;
bool stall_detected;
@@ -918,13 +1074,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
}
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
- obj_priv = to_intel_bo(work->pending_flip_obj);
+ obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
- stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
+ stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
} else {
int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
- stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
+ stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
crtc->y * crtc->fb->pitch +
crtc->x * crtc->fb->bits_per_pixel/8);
}
@@ -970,7 +1126,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
pipea_stats = I915_READ(PIPEASTAT);
pipeb_stats = I915_READ(PIPEBSTAT);
@@ -993,7 +1149,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
I915_WRITE(PIPEBSTAT, pipeb_stats);
irq_received = 1;
}
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
if (!irq_received)
break;
@@ -1026,9 +1182,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
}
if (iir & I915_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->render_ring);
- if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
- notify_ring(dev, &dev_priv->bsd_ring);
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (iir & I915_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
intel_prepare_page_flip(dev, 0);
@@ -1042,18 +1198,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
intel_finish_page_flip_plane(dev, 1);
}
- if (pipea_stats & vblank_status) {
+ if (pipea_stats & vblank_status &&
+ drm_handle_vblank(dev, 0)) {
vblank++;
- drm_handle_vblank(dev, 0);
if (!dev_priv->flip_pending_is_done) {
i915_pageflip_stall_check(dev, 0);
intel_finish_page_flip(dev, 0);
}
}
- if (pipeb_stats & vblank_status) {
+ if (pipeb_stats & vblank_status &&
+ drm_handle_vblank(dev, 1)) {
vblank++;
- drm_handle_vblank(dev, 1);
if (!dev_priv->flip_pending_is_done) {
i915_pageflip_stall_check(dev, 1);
intel_finish_page_flip(dev, 1);
@@ -1101,12 +1257,13 @@ static int i915_emit_irq(struct drm_device * dev)
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
return dev_priv->counter;
}
@@ -1114,12 +1271,11 @@ static int i915_emit_irq(struct drm_device * dev)
void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
-
- if (dev_priv->trace_irq_seqno == 0)
- render_ring->user_irq_get(dev, render_ring);
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
- dev_priv->trace_irq_seqno = seqno;
+ if (dev_priv->trace_irq_seqno == 0 &&
+ ring->irq_get(ring))
+ dev_priv->trace_irq_seqno = seqno;
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -1127,7 +1283,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
- struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
@@ -1141,10 +1297,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- render_ring->user_irq_get(dev, render_ring);
- DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
- READ_BREADCRUMB(dev_priv) >= irq_nr);
- render_ring->user_irq_put(dev, render_ring);
+ if (ring->irq_get(ring)) {
+ DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+ READ_BREADCRUMB(dev_priv) >= irq_nr);
+ ring->irq_put(ring);
+ } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
+ ret = -EBUSY;
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1163,7 +1321,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
drm_i915_irq_emit_t *emit = data;
int result;
- if (!dev_priv || !dev_priv->render_ring.virtual_start) {
+ if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -1209,9 +1367,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
- ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
+ ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
@@ -1219,7 +1377,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
else
i915_enable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
@@ -1231,15 +1389,15 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
- ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
+ ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE |
PIPE_START_VBLANK_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
void i915_enable_interrupt (struct drm_device *dev)
@@ -1306,12 +1464,50 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
-static struct drm_i915_gem_request *
-i915_get_tail_request(struct drm_device *dev)
+static u32
+ring_last_seqno(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- return list_entry(dev_priv->render_ring.request_list.prev,
- struct drm_i915_gem_request, list);
+ return list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request, list)->seqno;
+}
+
+static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
+{
+ if (list_empty(&ring->request_list) ||
+ i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
+ /* Issue a wake-up to catch stuck h/w. */
+ if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) {
+ DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
+ ring->name,
+ ring->waiting_seqno,
+ ring->get_seqno(ring));
+ wake_up_all(&ring->irq_queue);
+ *err = true;
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool kick_ring(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ_CTL(ring);
+ if (tmp & RING_WAIT) {
+ DRM_ERROR("Kicking stuck wait on %s\n",
+ ring->name);
+ I915_WRITE_CTL(ring, tmp);
+ return true;
+ }
+ if (IS_GEN6(dev) &&
+ (tmp & RING_WAIT_SEMAPHORE)) {
+ DRM_ERROR("Kicking stuck semaphore on %s\n",
+ ring->name);
+ I915_WRITE_CTL(ring, tmp);
+ return true;
+ }
+ return false;
}
/**
@@ -1325,6 +1521,17 @@ void i915_hangcheck_elapsed(unsigned long data)
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t acthd, instdone, instdone1;
+ bool err = false;
+
+ /* If all work is done then ACTHD clearly hasn't advanced. */
+ if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
+ i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
+ i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
+ dev_priv->hangcheck_count = 0;
+ if (err)
+ goto repeat;
+ return;
+ }
if (INTEL_INFO(dev)->gen < 4) {
acthd = I915_READ(ACTHD);
@@ -1336,38 +1543,6 @@ void i915_hangcheck_elapsed(unsigned long data)
instdone1 = I915_READ(INSTDONE1);
}
- /* If all work is done then ACTHD clearly hasn't advanced. */
- if (list_empty(&dev_priv->render_ring.request_list) ||
- i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
- i915_get_tail_request(dev)->seqno)) {
- bool missed_wakeup = false;
-
- dev_priv->hangcheck_count = 0;
-
- /* Issue a wake-up to catch stuck h/w. */
- if (dev_priv->render_ring.waiting_gem_seqno &&
- waitqueue_active(&dev_priv->render_ring.irq_queue)) {
- wake_up_all(&dev_priv->render_ring.irq_queue);
- missed_wakeup = true;
- }
-
- if (dev_priv->bsd_ring.waiting_gem_seqno &&
- waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
- wake_up_all(&dev_priv->bsd_ring.irq_queue);
- missed_wakeup = true;
- }
-
- if (dev_priv->blt_ring.waiting_gem_seqno &&
- waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
- wake_up_all(&dev_priv->blt_ring.irq_queue);
- missed_wakeup = true;
- }
-
- if (missed_wakeup)
- DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
- return;
- }
-
if (dev_priv->last_acthd == acthd &&
dev_priv->last_instdone == instdone &&
dev_priv->last_instdone1 == instdone1) {
@@ -1380,12 +1555,17 @@ void i915_hangcheck_elapsed(unsigned long data)
* and break the hang. This should work on
* all but the second generation chipsets.
*/
- u32 tmp = I915_READ(PRB0_CTL);
- if (tmp & RING_WAIT) {
- I915_WRITE(PRB0_CTL, tmp);
- POSTING_READ(PRB0_CTL);
- goto out;
- }
+
+ if (kick_ring(&dev_priv->ring[RCS]))
+ goto repeat;
+
+ if (HAS_BSD(dev) &&
+ kick_ring(&dev_priv->ring[VCS]))
+ goto repeat;
+
+ if (HAS_BLT(dev) &&
+ kick_ring(&dev_priv->ring[BCS]))
+ goto repeat;
}
i915_handle_error(dev, true);
@@ -1399,7 +1579,7 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->last_instdone1 = instdone1;
}
-out:
+repeat:
/* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->hangcheck_timer,
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -1417,17 +1597,17 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
I915_WRITE(DEIMR, 0xffffffff);
I915_WRITE(DEIER, 0x0);
- (void) I915_READ(DEIER);
+ POSTING_READ(DEIER);
/* and GT */
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
- (void) I915_READ(GTIER);
+ POSTING_READ(GTIER);
/* south display irq */
I915_WRITE(SDEIMR, 0xffffffff);
I915_WRITE(SDEIER, 0x0);
- (void) I915_READ(SDEIER);
+ POSTING_READ(SDEIER);
}
static int ironlake_irq_postinstall(struct drm_device *dev)
@@ -1436,38 +1616,34 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
- u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
+ u32 render_irqs;
u32 hotplug_mask;
- dev_priv->irq_mask_reg = ~display_mask;
- dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
+ dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
- (void) I915_READ(DEIER);
-
- if (IS_GEN6(dev)) {
- render_mask =
- GT_PIPE_NOTIFY |
- GT_GEN6_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT;
- }
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
+ POSTING_READ(DEIER);
- dev_priv->gt_irq_mask_reg = ~render_mask;
- dev_priv->gt_irq_enable_reg = render_mask;
+ dev_priv->gt_irq_mask = ~0;
I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- if (IS_GEN6(dev)) {
- I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
- I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
- I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
- }
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
- (void) I915_READ(GTIER);
+ if (IS_GEN6(dev))
+ render_irqs =
+ GT_USER_INTERRUPT |
+ GT_GEN6_BSD_USER_INTERRUPT |
+ GT_BLT_USER_INTERRUPT;
+ else
+ render_irqs =
+ GT_USER_INTERRUPT |
+ GT_PIPE_NOTIFY |
+ GT_BSD_USER_INTERRUPT;
+ I915_WRITE(GTIER, render_irqs);
+ POSTING_READ(GTIER);
if (HAS_PCH_CPT(dev)) {
hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
@@ -1475,15 +1651,15 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
} else {
hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+ hotplug_mask |= SDE_AUX_MASK;
}
- dev_priv->pch_irq_mask_reg = ~hotplug_mask;
- dev_priv->pch_irq_enable_reg = hotplug_mask;
+ dev_priv->pch_irq_mask = ~hotplug_mask;
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
- I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
- I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
- (void) I915_READ(SDEIER);
+ I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
+ I915_WRITE(SDEIER, hotplug_mask);
+ POSTING_READ(SDEIER);
if (IS_IRONLAKE_M(dev)) {
/* Clear & enable PCU event interrupts */
@@ -1519,7 +1695,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
I915_WRITE(PIPEBSTAT, 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
- (void) I915_READ(IER);
+ POSTING_READ(IER);
}
/*
@@ -1532,11 +1708,11 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
u32 error_mask;
- DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
+ DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
if (HAS_BSD(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
+ DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
if (HAS_BLT(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
+ DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
@@ -1544,7 +1720,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
return ironlake_irq_postinstall(dev);
/* Unmask the interrupts that we always want on. */
- dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
+ dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
@@ -1553,7 +1729,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
- dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
+ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
}
/*
@@ -1571,9 +1747,9 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
}
I915_WRITE(EMR, error_mask);
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ I915_WRITE(IMR, dev_priv->irq_mask);
I915_WRITE(IER, enable_mask);
- (void) I915_READ(IER);
+ POSTING_READ(IER);
if (I915_HAS_HOTPLUG(dev)) {
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cb8f43429279..729d4233b763 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -78,6 +78,12 @@
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
+#define GEN6_GDRST 0x941c
+#define GEN6_GRDOM_FULL (1 << 0)
+#define GEN6_GRDOM_RENDER (1 << 1)
+#define GEN6_GRDOM_MEDIA (1 << 2)
+#define GEN6_GRDOM_BLT (1 << 3)
+
/* VGA stuff */
#define VGA_ST01_MDA 0x3ba
@@ -139,6 +145,8 @@
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
+#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
+#define MI_SUSPEND_FLUSH_EN (1<<0)
#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
#define MI_OVERLAY_CONTINUE (0x0<<21)
@@ -153,17 +161,31 @@
#define MI_MM_SPACE_PHYSICAL (0<<8)
#define MI_SAVE_EXT_STATE_EN (1<<3)
#define MI_RESTORE_EXT_STATE_EN (1<<2)
+#define MI_FORCE_RESTORE (1<<1)
#define MI_RESTORE_INHIBIT (1<<0)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
-#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
-#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */
+/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
+ * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
+ * simply ignores the register load under certain conditions.
+ * - One can actually load arbitrary many arbitrary registers: Simply issue x
+ * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
+ */
+#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
+#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
+#define MI_INVALIDATE_TLB (1<<18)
+#define MI_INVALIDATE_BSD (1<<7)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
+#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
+#define MI_SEMAPHORE_UPDATE (1<<21)
+#define MI_SEMAPHORE_COMPARE (1<<20)
+#define MI_SEMAPHORE_REGISTER (1<<18)
/*
* 3D instructions used by the kernel
*/
@@ -256,10 +278,6 @@
* Instruction and interrupt control regs
*/
#define PGTBL_ER 0x02024
-#define PRB0_TAIL 0x02030
-#define PRB0_HEAD 0x02034
-#define PRB0_START 0x02038
-#define PRB0_CTL 0x0203c
#define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000
#define GEN6_BSD_RING_BASE 0x12000
@@ -268,9 +286,14 @@
#define RING_HEAD(base) ((base)+0x34)
#define RING_START(base) ((base)+0x38)
#define RING_CTL(base) ((base)+0x3c)
+#define RING_SYNC_0(base) ((base)+0x40)
+#define RING_SYNC_1(base) ((base)+0x44)
+#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
#define RING_ACTHD(base) ((base)+0x74)
+#define RING_NOPID(base) ((base)+0x94)
+#define RING_IMR(base) ((base)+0xa8)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
@@ -285,10 +308,17 @@
#define RING_INVALID 0x00000000
#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
+#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
+#if 0
+#define PRB0_TAIL 0x02030
+#define PRB0_HEAD 0x02034
+#define PRB0_START 0x02038
+#define PRB0_CTL 0x0203c
#define PRB1_TAIL 0x02040 /* 915+ only */
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
#define PRB1_CTL 0x0204c /* 915+ only */
+#endif
#define IPEIR_I965 0x02064
#define IPEHR_I965 0x02068
#define INSTDONE_I965 0x0206c
@@ -305,11 +335,42 @@
#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
+#define VCS_INSTDONE 0x1206C
+#define VCS_IPEIR 0x12064
+#define VCS_IPEHR 0x12068
+#define VCS_ACTHD 0x12074
+#define BCS_INSTDONE 0x2206C
+#define BCS_IPEIR 0x22064
+#define BCS_IPEHR 0x22068
+#define BCS_ACTHD 0x22074
+
+#define ERROR_GEN6 0x040a0
+
+/* GM45+ chicken bits -- debug workaround bits that may be required
+ * for various sorts of correct behavior. The top 16 bits of each are
+ * the enables for writing to the corresponding low bit.
+ */
+#define _3D_CHICKEN 0x02084
+#define _3D_CHICKEN2 0x0208c
+/* Disables pipelining of read flushes past the SF-WIZ interface.
+ * Required on all Ironlake steppings according to the B-Spec, but the
+ * particular danger of not doing so is not specified.
+ */
+# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
+#define _3D_CHICKEN3 0x02090
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 11)
+#define GFX_MODE 0x02520
+#define GFX_RUN_LIST_ENABLE (1<<15)
+#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
+#define GFX_SURFACE_FAULT_ENABLE (1<<12)
+#define GFX_REPLAY_MODE (1<<11)
+#define GFX_PSMI_GRANULARITY (1<<10)
+#define GFX_PPGTT_ENABLE (1<<9)
+
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
@@ -454,6 +515,10 @@
#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
+#define GEN6_BLITTER_ECOSKPD 0x221d0
+#define GEN6_BLITTER_LOCK_SHIFT 16
+#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
@@ -461,7 +526,7 @@
#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
#define GEN6_BSD_IMR 0x120a8
-#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12)
+#define GEN6_BSD_USER_INTERRUPT (1 << 12)
#define GEN6_BSD_RNCID 0x12198
@@ -541,6 +606,18 @@
#define ILK_DISPLAY_CHICKEN1 0x42000
#define ILK_FBCQ_DIS (1<<22)
+#define ILK_PABSTRETCH_DIS (1<<21)
+
+
+/*
+ * Framebuffer compression for Sandybridge
+ *
+ * The following two registers are of type GTTMMADR
+ */
+#define SNB_DPFC_CTL_SA 0x100100
+#define SNB_CPU_FENCE_ENABLE (1<<29)
+#define DPFC_CPU_FENCE_OFFSET 0x100104
+
/*
* GPIO regs
@@ -900,6 +977,8 @@
*/
#define MCHBAR_MIRROR_BASE 0x10000
+#define MCHBAR_MIRROR_BASE_SNB 0x140000
+
/** 915-945 and GM965 MCH register controlling DRAM channel access */
#define DCC 0x10200
#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
@@ -1061,9 +1140,50 @@
#define RCBMINAVG 0x111a0
#define RCUPEI 0x111b0
#define RCDNEI 0x111b4
-#define MCHBAR_RENDER_STANDBY 0x111b8
-#define RCX_SW_EXIT (1<<23)
-#define RSX_STATUS_MASK 0x00700000
+#define RSTDBYCTL 0x111b8
+#define RS1EN (1<<31)
+#define RS2EN (1<<30)
+#define RS3EN (1<<29)
+#define D3RS3EN (1<<28) /* Display D3 imlies RS3 */
+#define SWPROMORSX (1<<27) /* RSx promotion timers ignored */
+#define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */
+#define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */
+#define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */
+#define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */
+#define RSX_STATUS_MASK (7<<20)
+#define RSX_STATUS_ON (0<<20)
+#define RSX_STATUS_RC1 (1<<20)
+#define RSX_STATUS_RC1E (2<<20)
+#define RSX_STATUS_RS1 (3<<20)
+#define RSX_STATUS_RS2 (4<<20) /* aka rc6 */
+#define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */
+#define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */
+#define RSX_STATUS_RSVD2 (7<<20)
+#define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */
+#define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */
+#define JRSC (1<<17) /* rsx coupled to cpu c-state */
+#define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */
+#define RS1CONTSAV_MASK (3<<14)
+#define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */
+#define RS1CONTSAV_RSVD (1<<14)
+#define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */
+#define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */
+#define NORMSLEXLAT_MASK (3<<12)
+#define SLOW_RS123 (0<<12)
+#define SLOW_RS23 (1<<12)
+#define SLOW_RS3 (2<<12)
+#define NORMAL_RS123 (3<<12)
+#define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */
+#define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */
+#define STATELOCK (1<<7) /* locked to rs_cstate if 0 */
+#define RS_CSTATE_MASK (3<<4)
+#define RS_CSTATE_C367_RS1 (0<<4)
+#define RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
+#define RS_CSTATE_RSVD (2<<4)
+#define RS_CSTATE_C367_RS2 (3<<4)
+#define REDSAVES (1<<3) /* no context save if was idle during rs0 */
+#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */
#define VIDCTL 0x111c0
#define VIDSTS 0x111c8
#define VIDSTART 0x111cc /* 8 bits */
@@ -1119,6 +1239,10 @@
#define DDRMPLL1 0X12c20
#define PEG_BAND_GAP_DATA 0x14d68
+#define GEN6_GT_PERF_STATUS 0x145948
+#define GEN6_RP_STATE_LIMITS 0x145994
+#define GEN6_RP_STATE_CAP 0x145998
+
/*
* Logical Context regs
*/
@@ -1168,7 +1292,6 @@
#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
-#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
/* VGA port control */
@@ -1430,17 +1553,7 @@
/* Backlight control */
#define BLC_PWM_CTL 0x61254
-#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
#define BLC_PWM_CTL2 0x61250 /* 965+ only */
-#define BLM_COMBINATION_MODE (1 << 30)
-/*
- * This is the most significant 15 bits of the number of backlight cycles in a
- * complete cycle of the modulated backlight control.
- *
- * The actual value is this field multiplied by two.
- */
-#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
-#define BLM_LEGACY_MODE (1 << 16)
/*
* This is the number of cycles out of the backlight modulation cycle for which
* the backlight is on.
@@ -2182,8 +2295,10 @@
#define PIPE_6BPC (2 << 5)
#define PIPE_12BPC (3 << 5)
+#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
+#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL)
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -2271,8 +2386,13 @@
/* Memory latency timer register */
#define MLTR_ILK 0x11222
+#define MLTR_WM1_SHIFT 0
+#define MLTR_WM2_SHIFT 8
/* the unit of memory self-refresh latency time is 0.5us */
#define ILK_SRLT_MASK 0x3f
+#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
+#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT)
+#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT)
/* define the fifo size on Ironlake */
#define ILK_DISPLAY_FIFO 128
@@ -2291,6 +2411,40 @@
#define ILK_FIFO_LINE_SIZE 64
+/* define the WM info on Sandybridge */
+#define SNB_DISPLAY_FIFO 128
+#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */
+#define SNB_DISPLAY_DFTWM 8
+#define SNB_CURSOR_FIFO 32
+#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */
+#define SNB_CURSOR_DFTWM 8
+
+#define SNB_DISPLAY_SR_FIFO 512
+#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */
+#define SNB_DISPLAY_DFT_SRWM 0x3f
+#define SNB_CURSOR_SR_FIFO 64
+#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */
+#define SNB_CURSOR_DFT_SRWM 8
+
+#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */
+
+#define SNB_FIFO_LINE_SIZE 64
+
+
+/* the address where we get all kinds of latency value */
+#define SSKPD 0x5d10
+#define SSKPD_WM_MASK 0x3f
+#define SSKPD_WM0_SHIFT 0
+#define SSKPD_WM1_SHIFT 8
+#define SSKPD_WM2_SHIFT 16
+#define SSKPD_WM3_SHIFT 24
+
+#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
+#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT)
+#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT)
+#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT)
+#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT)
+
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
@@ -2351,6 +2505,10 @@
#define CURBBASE 0x700c4
#define CURBPOS 0x700c8
+#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR)
+#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE)
+#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS)
+
/* Display A control */
#define DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1<<31)
@@ -2464,6 +2622,8 @@
#define DISPLAY_PORT_PLL_BIOS_2 0x46014
#define PCH_DSPCLK_GATE_D 0x42020
+# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
+# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
@@ -2589,6 +2749,8 @@
#define GTIER 0x4401c
#define ILK_DISPLAY_CHICKEN2 0x42004
+/* Required on all Ironlake and Sandybridge according to the B-Spec. */
+#define ILK_ELPIN_409_SELECT (1 << 25)
#define ILK_DPARB_GATE (1<<22)
#define ILK_VSDPFD_FULL (1<<21)
#define ILK_DISPLAY_CHICKEN_FUSES 0x42014
@@ -2600,6 +2762,8 @@
#define ILK_DESKTOP (1<<23)
#define ILK_DSPCLK_GATE 0x42020
#define ILK_DPARB_CLK_GATE (1<<5)
+#define ILK_DPFD_CLK_GATE (1<<7)
+
/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
#define ILK_CLK_FBC (1<<7)
#define ILK_DPFC_DIS1 (1<<8)
@@ -2612,12 +2776,41 @@
/* PCH */
/* south display engine interrupt */
+#define SDE_AUDIO_POWER_D (1 << 27)
+#define SDE_AUDIO_POWER_C (1 << 26)
+#define SDE_AUDIO_POWER_B (1 << 25)
+#define SDE_AUDIO_POWER_SHIFT (25)
+#define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT)
+#define SDE_GMBUS (1 << 24)
+#define SDE_AUDIO_HDCP_TRANSB (1 << 23)
+#define SDE_AUDIO_HDCP_TRANSA (1 << 22)
+#define SDE_AUDIO_HDCP_MASK (3 << 22)
+#define SDE_AUDIO_TRANSB (1 << 21)
+#define SDE_AUDIO_TRANSA (1 << 20)
+#define SDE_AUDIO_TRANS_MASK (3 << 20)
+#define SDE_POISON (1 << 19)
+/* 18 reserved */
+#define SDE_FDI_RXB (1 << 17)
+#define SDE_FDI_RXA (1 << 16)
+#define SDE_FDI_MASK (3 << 16)
+#define SDE_AUXD (1 << 15)
+#define SDE_AUXC (1 << 14)
+#define SDE_AUXB (1 << 13)
+#define SDE_AUX_MASK (7 << 13)
+/* 12 reserved */
#define SDE_CRT_HOTPLUG (1 << 11)
#define SDE_PORTD_HOTPLUG (1 << 10)
#define SDE_PORTC_HOTPLUG (1 << 9)
#define SDE_PORTB_HOTPLUG (1 << 8)
#define SDE_SDVOB_HOTPLUG (1 << 6)
#define SDE_HOTPLUG_MASK (0xf << 8)
+#define SDE_TRANSB_CRC_DONE (1 << 5)
+#define SDE_TRANSB_CRC_ERR (1 << 4)
+#define SDE_TRANSB_FIFO_UNDER (1 << 3)
+#define SDE_TRANSA_CRC_DONE (1 << 2)
+#define SDE_TRANSA_CRC_ERR (1 << 1)
+#define SDE_TRANSA_FIFO_UNDER (1 << 0)
+#define SDE_TRANS_MASK (0x3f)
/* CPT */
#define SDE_CRT_HOTPLUG_CPT (1 << 19)
#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
@@ -2679,6 +2872,7 @@
#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
#define PCH_FPA0 0xc6040
+#define FP_CB_TUNE (0x3<<22)
#define PCH_FPA1 0xc6044
#define PCH_FPB0 0xc6048
#define PCH_FPB1 0xc604c
@@ -3057,10 +3251,74 @@
#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
/* SNB B-stepping */
-#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
-#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
-#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
+#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22)
+#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22)
+#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22)
+#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+#define FORCEWAKE 0xA18C
+#define FORCEWAKE_ACK 0x130090
+
+#define GEN6_RPNSWREQ 0xA008
+#define GEN6_TURBO_DISABLE (1<<31)
+#define GEN6_FREQUENCY(x) ((x)<<25)
+#define GEN6_OFFSET(x) ((x)<<19)
+#define GEN6_AGGRESSIVE_TURBO (0<<15)
+#define GEN6_RC_VIDEO_FREQ 0xA00C
+#define GEN6_RC_CONTROL 0xA090
+#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
+#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
+#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
+#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
+#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
+#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
+#define GEN6_RC_CTL_HW_ENABLE (1<<31)
+#define GEN6_RP_DOWN_TIMEOUT 0xA010
+#define GEN6_RP_INTERRUPT_LIMITS 0xA014
+#define GEN6_RPSTAT1 0xA01C
+#define GEN6_RP_CONTROL 0xA024
+#define GEN6_RP_MEDIA_TURBO (1<<11)
+#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
+#define GEN6_RP_MEDIA_IS_GFX (1<<8)
+#define GEN6_RP_ENABLE (1<<7)
+#define GEN6_RP_UP_BUSY_MAX (0x2<<3)
+#define GEN6_RP_DOWN_BUSY_MIN (0x2<<0)
+#define GEN6_RP_UP_THRESHOLD 0xA02C
+#define GEN6_RP_DOWN_THRESHOLD 0xA030
+#define GEN6_RP_UP_EI 0xA068
+#define GEN6_RP_DOWN_EI 0xA06C
+#define GEN6_RP_IDLE_HYSTERSIS 0xA070
+#define GEN6_RC_STATE 0xA094
+#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098
+#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C
+#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0
+#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8
+#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC
+#define GEN6_RC_SLEEP 0xA0B0
+#define GEN6_RC1e_THRESHOLD 0xA0B4
+#define GEN6_RC6_THRESHOLD 0xA0B8
+#define GEN6_RC6p_THRESHOLD 0xA0BC
+#define GEN6_RC6pp_THRESHOLD 0xA0C0
+#define GEN6_PMINTRMSK 0xA168
+
+#define GEN6_PMISR 0x44020
+#define GEN6_PMIMR 0x44024
+#define GEN6_PMIIR 0x44028
+#define GEN6_PMIER 0x4402C
+#define GEN6_PM_MBOX_EVENT (1<<25)
+#define GEN6_PM_THERMAL_EVENT (1<<24)
+#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6)
+#define GEN6_PM_RP_UP_THRESHOLD (1<<5)
+#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
+#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
+#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
+
+#define GEN6_PCODE_MAILBOX 0x138124
+#define GEN6_PCODE_READY (1<<31)
+#define GEN6_READ_OC_PARAMS 0xc
+#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9
+#define GEN6_PCODE_DATA 0x138128
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 42729d25da58..0521ecf26017 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -235,6 +235,7 @@ static void i915_restore_vga(struct drm_device *dev)
static void i915_save_modeset_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
@@ -367,6 +368,28 @@ static void i915_save_modeset_reg(struct drm_device *dev)
}
i915_save_palette(dev, PIPE_B);
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+ }
+
return;
}
@@ -375,10 +398,33 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_a_reg, fpa0_reg, fpa1_reg;
int dpll_b_reg, fpb0_reg, fpb1_reg;
+ int i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
+ break;
+ case 3:
+ case 2:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ break;
+ }
+
+
if (HAS_PCH_SPLIT(dev)) {
dpll_a_reg = PCH_DPLL_A;
dpll_b_reg = PCH_DPLL_B;
@@ -694,7 +740,7 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
- I915_WRITE(MCHBAR_RENDER_STANDBY,
+ I915_WRITE(RSTDBYCTL,
dev_priv->saveMCHBAR_RENDER_STANDBY);
} else {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
@@ -765,14 +811,16 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
dev_priv->saveMCHBAR_RENDER_STANDBY =
- I915_READ(MCHBAR_RENDER_STANDBY);
+ I915_READ(RSTDBYCTL);
} else {
dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR);
}
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
+ if (IS_GEN6(dev))
+ gen6_disable_rps(dev);
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -788,28 +836,6 @@ int i915_save_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 6:
- for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
-
- }
-
return 0;
}
@@ -823,27 +849,6 @@ int i915_restore_state(struct drm_device *dev)
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 6:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
- break;
- case 3:
- case 2:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
- break;
- }
-
i915_restore_display(dev);
/* Interrupt state */
@@ -860,13 +865,16 @@ int i915_restore_state(struct drm_device *dev)
}
/* Clock gating state */
- intel_init_clock_gating(dev);
+ intel_enable_clock_gating(dev);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
}
+ if (IS_GEN6(dev))
+ gen6_enable_rps(dev_priv);
+
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fea97a21cc14..7f0fc3ed61aa 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -6,6 +6,7 @@
#include <linux/tracepoint.h>
#include <drm/drmP.h>
+#include "i915_drv.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
@@ -16,18 +17,18 @@
TRACE_EVENT(i915_gem_object_create,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
__field(u32, size)
),
TP_fast_assign(
__entry->obj = obj;
- __entry->size = obj->size;
+ __entry->size = obj->base.size;
),
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
@@ -35,40 +36,43 @@ TRACE_EVENT(i915_gem_object_create,
TRACE_EVENT(i915_gem_object_bind,
- TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
- TP_ARGS(obj, gtt_offset),
+ TP_ARGS(obj, gtt_offset, mappable),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
__field(u32, gtt_offset)
+ __field(bool, mappable)
),
TP_fast_assign(
__entry->obj = obj;
__entry->gtt_offset = gtt_offset;
+ __entry->mappable = mappable;
),
- TP_printk("obj=%p, gtt_offset=%08x",
- __entry->obj, __entry->gtt_offset)
+ TP_printk("obj=%p, gtt_offset=%08x%s",
+ __entry->obj, __entry->gtt_offset,
+ __entry->mappable ? ", mappable" : "")
);
TRACE_EVENT(i915_gem_object_change_domain,
- TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
+ TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
TP_ARGS(obj, old_read_domains, old_write_domain),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
__field(u32, read_domains)
__field(u32, write_domain)
),
TP_fast_assign(
__entry->obj = obj;
- __entry->read_domains = obj->read_domains | (old_read_domains << 16);
- __entry->write_domain = obj->write_domain | (old_write_domain << 16);
+ __entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
+ __entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
),
TP_printk("obj=%p, read=%04x, write=%04x",
@@ -76,36 +80,14 @@ TRACE_EVENT(i915_gem_object_change_domain,
__entry->read_domains, __entry->write_domain)
);
-TRACE_EVENT(i915_gem_object_get_fence,
-
- TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
-
- TP_ARGS(obj, fence, tiling_mode),
-
- TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
- __field(int, fence)
- __field(int, tiling_mode)
- ),
-
- TP_fast_assign(
- __entry->obj = obj;
- __entry->fence = fence;
- __entry->tiling_mode = tiling_mode;
- ),
-
- TP_printk("obj=%p, fence=%d, tiling=%d",
- __entry->obj, __entry->fence, __entry->tiling_mode)
-);
-
DECLARE_EVENT_CLASS(i915_gem_object,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
@@ -117,21 +99,21 @@ DECLARE_EVENT_CLASS(i915_gem_object,
DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
@@ -263,13 +245,13 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end,
);
TRACE_EVENT(i915_flip_request,
- TP_PROTO(int plane, struct drm_gem_object *obj),
+ TP_PROTO(int plane, struct drm_i915_gem_object *obj),
TP_ARGS(plane, obj),
TP_STRUCT__entry(
__field(int, plane)
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
@@ -281,13 +263,13 @@ TRACE_EVENT(i915_flip_request,
);
TRACE_EVENT(i915_flip_complete,
- TP_PROTO(int plane, struct drm_gem_object *obj),
+ TP_PROTO(int plane, struct drm_i915_gem_object *obj),
TP_ARGS(plane, obj),
TP_STRUCT__entry(
__field(int, plane)
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
@@ -298,6 +280,29 @@ TRACE_EVENT(i915_flip_complete,
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
);
+TRACE_EVENT(i915_reg_rw,
+ TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len),
+
+ TP_ARGS(cmd, reg, val, len),
+
+ TP_STRUCT__entry(
+ __field(int, cmd)
+ __field(uint32_t, reg)
+ __field(uint64_t, val)
+ __field(int, len)
+ ),
+
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->reg = reg;
+ __entry->val = (uint64_t)val;
+ __entry->len = len;
+ ),
+
+ TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d",
+ __entry->cmd, __entry->reg, __entry->val, __entry->len)
+);
+
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b0b1200ed650..0b44956c336b 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -264,17 +264,12 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->int_crt_support = general->int_crt_support;
dev_priv->lvds_use_ssc = general->enable_ssc;
- if (dev_priv->lvds_use_ssc) {
- if (IS_I85X(dev))
- dev_priv->lvds_ssc_freq =
- general->ssc_freq ? 66 : 48;
- else if (IS_GEN5(dev) || IS_GEN6(dev))
- dev_priv->lvds_ssc_freq =
- general->ssc_freq ? 100 : 120;
- else
- dev_priv->lvds_ssc_freq =
- general->ssc_freq ? 100 : 96;
- }
+ if (IS_I85X(dev))
+ dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
+ else if (IS_GEN5(dev) || IS_GEN6(dev))
+ dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120;
+ else
+ dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
}
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 8df574316063..8a77ff4a7237 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -30,6 +30,7 @@
#include "drm.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
+#include "drm_edid.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -287,8 +288,9 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
}
-static bool intel_crt_detect_ddc(struct intel_crt *crt)
+static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
+ struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
/* CRT should always be at 0, but check anyway */
@@ -301,8 +303,26 @@ static bool intel_crt_detect_ddc(struct intel_crt *crt)
}
if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
- DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
- return true;
+ struct edid *edid;
+ bool is_digital = false;
+
+ edid = drm_get_edid(connector,
+ &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ /*
+ * This may be a DVI-I connector with a shared DDC
+ * link between analog and digital outputs, so we
+ * have to check the EDID input spec of the attached device.
+ */
+ if (edid != NULL) {
+ is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+
+ if (!is_digital) {
+ DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+ return true;
+ }
}
return false;
@@ -458,7 +478,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
}
}
- if (intel_crt_detect_ddc(crt))
+ if (intel_crt_detect_ddc(connector))
return connector_status_connected;
if (!force)
@@ -472,7 +492,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
crtc = intel_get_load_detect_pipe(&crt->base, connector,
NULL, &dpms_mode);
if (crtc) {
- if (intel_crt_detect_ddc(crt))
+ if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else
status = intel_crt_load_detect(crtc, crt);
@@ -515,6 +535,15 @@ static int intel_crt_set_property(struct drm_connector *connector,
return 0;
}
+static void intel_crt_reset(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
+
+ if (HAS_PCH_SPLIT(dev))
+ crt->force_hotplug_required = 1;
+}
+
/*
* Routines for controlling stuff on the analog port
*/
@@ -528,6 +557,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
+ .reset = intel_crt_reset,
.dpms = drm_helper_connector_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fca523288aca..e79b25bbee6c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -642,26 +642,23 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
.find_pll = intel_find_pll_ironlake_dp,
};
-static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+ int refclk)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
- int refclk = 120;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
- refclk = 100;
-
if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP) {
/* LVDS dual channel */
- if (refclk == 100)
+ if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
else
limit = &intel_limits_ironlake_dual_lvds;
} else {
- if (refclk == 100)
+ if (refclk == 100000)
limit = &intel_limits_ironlake_single_lvds_100m;
else
limit = &intel_limits_ironlake_single_lvds;
@@ -702,13 +699,13 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
return limit;
}
-static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
{
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
if (HAS_PCH_SPLIT(dev))
- limit = intel_ironlake_limit(crtc);
+ limit = intel_ironlake_limit(crtc, refclk);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
} else if (IS_PINEVIEW(dev)) {
@@ -773,11 +770,10 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
* the given connectors.
*/
-static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
+static bool intel_PLL_is_valid(struct drm_device *dev,
+ const intel_limit_t *limit,
+ const intel_clock_t *clock)
{
- const intel_limit_t *limit = intel_limit (crtc);
- struct drm_device *dev = crtc->dev;
-
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
INTELPllInvalid ("p1 out of range\n");
if (clock->p < limit->p.min || limit->p.max < clock->p)
@@ -849,8 +845,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int this_err;
intel_clock(dev, refclk, &clock);
-
- if (!intel_PLL_is_valid(crtc, &clock))
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
continue;
this_err = abs(clock.dot - target);
@@ -912,9 +908,11 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int this_err;
intel_clock(dev, refclk, &clock);
- if (!intel_PLL_is_valid(crtc, &clock))
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
continue;
- this_err = abs(clock.dot - target) ;
+
+ this_err = abs(clock.dot - target);
if (this_err < err_most) {
*best_clock = clock;
err_most = this_err;
@@ -1066,13 +1064,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane, i;
u32 fbc_ctl, fbc_ctl2;
if (fb->pitch == dev_priv->cfb_pitch &&
- obj_priv->fence_reg == dev_priv->cfb_fence &&
+ obj->fence_reg == dev_priv->cfb_fence &&
intel_crtc->plane == dev_priv->cfb_plane &&
I915_READ(FBC_CONTROL) & FBC_CTL_EN)
return;
@@ -1086,7 +1084,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* FBC_CTL wants 64B units */
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
@@ -1096,7 +1094,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE)
fbc_ctl2 |= FBC_CTL_CPU_FENCE;
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
@@ -1107,7 +1105,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE)
fbc_ctl |= dev_priv->cfb_fence;
I915_WRITE(FBC_CONTROL, fbc_ctl);
@@ -1150,7 +1148,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
@@ -1159,7 +1157,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
- dev_priv->cfb_fence == obj_priv->fence_reg &&
+ dev_priv->cfb_fence == obj->fence_reg &&
dev_priv->cfb_plane == intel_crtc->plane &&
dev_priv->cfb_y == crtc->y)
return;
@@ -1170,12 +1168,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
}
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
dev_priv->cfb_y = crtc->y;
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
- if (obj_priv->tiling_mode != I915_TILING_NONE) {
+ if (obj->tiling_mode != I915_TILING_NONE) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
} else {
@@ -1215,13 +1213,33 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
+static void sandybridge_blit_fbc_update(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 blt_ecoskpd;
+
+ /* Make sure blitter notifies FBC of writes */
+ __gen6_force_wake_get(dev_priv);
+ blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT);
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ POSTING_READ(GEN6_BLITTER_ECOSKPD);
+ __gen6_force_wake_put(dev_priv);
+}
+
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
@@ -1230,9 +1248,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
- dev_priv->cfb_fence == obj_priv->fence_reg &&
+ dev_priv->cfb_fence == obj->fence_reg &&
dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_offset == obj_priv->gtt_offset &&
+ dev_priv->cfb_offset == obj->gtt_offset &&
dev_priv->cfb_y == crtc->y)
return;
@@ -1242,14 +1260,14 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
}
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
- dev_priv->cfb_offset = obj_priv->gtt_offset;
+ dev_priv->cfb_offset = obj->gtt_offset;
dev_priv->cfb_y = crtc->y;
dpfc_ctl &= DPFC_RESERVED;
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
- if (obj_priv->tiling_mode != I915_TILING_NONE) {
+ if (obj->tiling_mode != I915_TILING_NONE) {
dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
} else {
@@ -1260,10 +1278,17 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
+ I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ if (IS_GEN6(dev)) {
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ sandybridge_blit_fbc_update(dev);
+ }
+
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
@@ -1345,7 +1370,7 @@ static void intel_update_fbc(struct drm_device *dev)
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
DRM_DEBUG_KMS("\n");
@@ -1384,9 +1409,9 @@ static void intel_update_fbc(struct drm_device *dev)
intel_crtc = to_intel_crtc(crtc);
fb = crtc->fb;
intel_fb = to_intel_framebuffer(fb);
- obj_priv = to_intel_bo(intel_fb->obj);
+ obj = intel_fb->obj;
- if (intel_fb->obj->size > dev_priv->cfb_size) {
+ if (intel_fb->obj->base.size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
"compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
@@ -1410,7 +1435,7 @@ static void intel_update_fbc(struct drm_device *dev)
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
- if (obj_priv->tiling_mode != I915_TILING_X) {
+ if (obj->tiling_mode != I915_TILING_X) {
DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
dev_priv->no_fbc_reason = FBC_NOT_TILED;
goto out_disable;
@@ -1433,14 +1458,13 @@ out_disable:
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_gem_object *obj,
- bool pipelined)
+ struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
u32 alignment;
int ret;
- switch (obj_priv->tiling_mode) {
+ switch (obj->tiling_mode) {
case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
alignment = 128 * 1024;
@@ -1461,7 +1485,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
BUG();
}
- ret = i915_gem_object_pin(obj, alignment);
+ ret = i915_gem_object_pin(obj, alignment, true);
if (ret)
return ret;
@@ -1474,9 +1498,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
* framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous.
*/
- if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
- obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj, false);
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ ret = i915_gem_object_get_fence(obj, pipelined, false);
if (ret)
goto err_unpin;
}
@@ -1497,8 +1520,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long Start, Offset;
u32 dspcntr;
@@ -1515,7 +1537,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
- obj_priv = to_intel_bo(obj);
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
@@ -1540,7 +1561,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 4) {
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
@@ -1552,7 +1573,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
I915_WRITE(reg, dspcntr);
- Start = obj_priv->gtt_offset;
+ Start = obj->gtt_offset;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
@@ -1598,7 +1619,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
to_intel_framebuffer(crtc->fb)->obj,
- false);
+ NULL);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1606,23 +1627,22 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb) {
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
wait_event(dev_priv->pending_flip_queue,
- atomic_read(&obj_priv->pending_flip) == 0);
+ atomic_read(&dev_priv->mm.wedged) ||
+ atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer.
+ *
+ * This should only fail upon a hung GPU, in which case we
+ * can safely continue.
*/
- ret = i915_gem_object_flush_gpu(obj_priv, false);
- if (ret) {
- i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ ret = i915_gem_object_flush_gpu(obj, false);
+ (void) ret;
}
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -1633,8 +1653,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- if (old_fb)
+ if (old_fb) {
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
+ }
mutex_unlock(&dev->struct_mutex);
@@ -1996,31 +2018,56 @@ static void intel_flush_display_plane(struct drm_device *dev,
static void intel_clear_scanline_wait(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
u32 tmp;
if (IS_GEN2(dev))
/* Can't break the hang on i8xx */
return;
- tmp = I915_READ(PRB0_CTL);
- if (tmp & RING_WAIT) {
- I915_WRITE(PRB0_CTL, tmp);
- POSTING_READ(PRB0_CTL);
- }
+ ring = LP_RING(dev_priv);
+ tmp = I915_READ_CTL(ring);
+ if (tmp & RING_WAIT)
+ I915_WRITE_CTL(ring, tmp);
}
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct drm_i915_private *dev_priv;
if (crtc->fb == NULL)
return;
- obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
+ obj = to_intel_framebuffer(crtc->fb)->obj;
dev_priv = crtc->dev->dev_private;
wait_event(dev_priv->pending_flip_queue,
- atomic_read(&obj_priv->pending_flip) == 0);
+ atomic_read(&obj->pending_flip) == 0);
+}
+
+static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+
+ /*
+ * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
+ * must be driven by its own crtc; no sharing is possible.
+ */
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ if (encoder->base.crtc != crtc)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_EDP:
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ return false;
+ continue;
+ }
+ }
+
+ return true;
}
static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -2031,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp;
+ bool is_pch_port = false;
if (intel_crtc->active)
return;
@@ -2044,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
- ironlake_fdi_enable(crtc);
+ is_pch_port = intel_crtc_driving_pch(crtc);
+
+ if (is_pch_port)
+ ironlake_fdi_enable(crtc);
+ else {
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+ POSTING_READ(reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(0x7 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev))
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(100);
+ }
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
@@ -2078,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_flush_display_plane(dev, plane);
}
+ /* Skip the PCH stuff if possible */
+ if (!is_pch_port)
+ goto done;
+
/* For PCH output, training FDI link */
if (IS_GEN6(dev))
gen6_fdi_link_train(crtc);
@@ -2162,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(reg, temp | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
-
+done:
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
intel_crtc_update_cursor(crtc, true);
@@ -2850,6 +2951,39 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = {
ILK_FIFO_LINE_SIZE
};
+static struct intel_watermark_params sandybridge_display_wm_info = {
+ SNB_DISPLAY_FIFO,
+ SNB_DISPLAY_MAXWM,
+ SNB_DISPLAY_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_cursor_wm_info = {
+ SNB_CURSOR_FIFO,
+ SNB_CURSOR_MAXWM,
+ SNB_CURSOR_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_display_srwm_info = {
+ SNB_DISPLAY_SR_FIFO,
+ SNB_DISPLAY_MAX_SRWM,
+ SNB_DISPLAY_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_cursor_srwm_info = {
+ SNB_CURSOR_SR_FIFO,
+ SNB_CURSOR_MAX_SRWM,
+ SNB_CURSOR_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+
/**
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
@@ -3383,12 +3517,17 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
static bool ironlake_compute_wm0(struct drm_device *dev,
int pipe,
+ const struct intel_watermark_params *display,
+ int display_latency_ns,
+ const struct intel_watermark_params *cursor,
+ int cursor_latency_ns,
int *plane_wm,
int *cursor_wm)
{
struct drm_crtc *crtc;
- int htotal, hdisplay, clock, pixel_size = 0;
- int line_time_us, line_count, entries;
+ int htotal, hdisplay, clock, pixel_size;
+ int line_time_us, line_count;
+ int entries, tlb_miss;
crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc->fb == NULL || !crtc->enabled)
@@ -3400,37 +3539,141 @@ static bool ironlake_compute_wm0(struct drm_device *dev,
pixel_size = crtc->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */
- entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000;
- entries = DIV_ROUND_UP(entries,
- ironlake_display_wm_info.cacheline_size);
- *plane_wm = entries + ironlake_display_wm_info.guard_size;
- if (*plane_wm > (int)ironlake_display_wm_info.max_wm)
- *plane_wm = ironlake_display_wm_info.max_wm;
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *plane_wm = entries + display->guard_size;
+ if (*plane_wm > (int)display->max_wm)
+ *plane_wm = display->max_wm;
/* Use the large buffer method to calculate cursor watermark */
line_time_us = ((htotal * 1000) / clock);
- line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+ line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
entries = line_count * 64 * pixel_size;
- entries = DIV_ROUND_UP(entries,
- ironlake_cursor_wm_info.cacheline_size);
- *cursor_wm = entries + ironlake_cursor_wm_info.guard_size;
- if (*cursor_wm > ironlake_cursor_wm_info.max_wm)
- *cursor_wm = ironlake_cursor_wm_info.max_wm;
+ tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+ if (*cursor_wm > (int)cursor->max_wm)
+ *cursor_wm = (int)cursor->max_wm;
+
+ return true;
+}
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool ironlake_check_srwm(struct drm_device *dev, int level,
+ int fbc_wm, int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
+ " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
+
+ if (fbc_wm > SNB_FBC_MAX_SRWM) {
+ DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
+ fbc_wm, SNB_FBC_MAX_SRWM, level);
+
+ /* fbc has it's own way to disable FBC WM */
+ I915_WRITE(DISP_ARB_CTL,
+ I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
+ return false;
+ }
+
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
+ display_wm, SNB_DISPLAY_MAX_SRWM, level);
+ return false;
+ }
+
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
+ cursor_wm, SNB_CURSOR_MAX_SRWM, level);
+ return false;
+ }
+
+ if (!(fbc_wm || display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
+ return false;
+ }
return true;
}
+/*
+ * Compute watermark values of WM[1-3],
+ */
+static bool ironlake_compute_srwm(struct drm_device *dev, int level,
+ int hdisplay, int htotal,
+ int pixel_size, int clock, int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *fbc_wm, int *display_wm, int *cursor_wm)
+{
+
+ unsigned long line_time_us;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *fbc_wm = *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /*
+ * Spec says:
+ * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
+ */
+ *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return ironlake_check_srwm(dev, level,
+ *fbc_wm, *display_wm, *cursor_wm,
+ display, cursor);
+}
+
static void ironlake_update_wm(struct drm_device *dev,
int planea_clock, int planeb_clock,
- int sr_hdisplay, int sr_htotal,
+ int hdisplay, int htotal,
int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int plane_wm, cursor_wm, enabled;
- int tmp;
+ int fbc_wm, plane_wm, cursor_wm, enabled;
+ int clock;
enabled = 0;
- if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) {
+ if (ironlake_compute_wm0(dev, 0,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -3439,7 +3682,12 @@ static void ironlake_update_wm(struct drm_device *dev,
enabled++;
}
- if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) {
+ if (ironlake_compute_wm0(dev, 1,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -3452,57 +3700,151 @@ static void ironlake_update_wm(struct drm_device *dev,
* Calculate and update the self-refresh watermark only when one
* display plane is used.
*/
- tmp = 0;
- if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) {
- unsigned long line_time_us;
- int small, large, plane_fbc;
- int sr_clock, entries;
- int line_count, line_size;
- /* Read the self-refresh latency. The unit is 0.5us */
- int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
- sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = (sr_htotal * 1000) / sr_clock;
+ if (enabled != 1)
+ return;
- /* Use ns/us then divide to preserve precision */
- line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
- / 1000;
- line_size = sr_hdisplay * pixel_size;
+ clock = planea_clock ? planea_clock : planeb_clock;
- /* Use the minimum of the small and large buffer method for primary */
- small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000;
- large = line_count * line_size;
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
+ clock, ILK_READ_WM1_LATENCY() * 500,
+ &ironlake_display_srwm_info,
+ &ironlake_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
- entries = DIV_ROUND_UP(min(small, large),
- ironlake_display_srwm_info.cacheline_size);
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size,
+ clock, ILK_READ_WM2_LATENCY() * 500,
+ &ironlake_display_srwm_info,
+ &ironlake_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
- plane_fbc = entries * 64;
- plane_fbc = DIV_ROUND_UP(plane_fbc, line_size);
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
- plane_wm = entries + ironlake_display_srwm_info.guard_size;
- if (plane_wm > (int)ironlake_display_srwm_info.max_wm)
- plane_wm = ironlake_display_srwm_info.max_wm;
+ /*
+ * WM3 is unsupported on ILK, probably because we don't have latency
+ * data for that power state
+ */
+}
- /* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * 64;
- entries = DIV_ROUND_UP(entries,
- ironlake_cursor_srwm_info.cacheline_size);
+static void sandybridge_update_wm(struct drm_device *dev,
+ int planea_clock, int planeb_clock,
+ int hdisplay, int htotal,
+ int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ int fbc_wm, plane_wm, cursor_wm, enabled;
+ int clock;
- cursor_wm = entries + ironlake_cursor_srwm_info.guard_size;
- if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm)
- cursor_wm = ironlake_cursor_srwm_info.max_wm;
+ enabled = 0;
+ if (ironlake_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEA_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled++;
+ }
- /* configure watermark and enable self-refresh */
- tmp = (WM1_LP_SR_EN |
- (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
- (plane_fbc << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
- DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d,"
- " cursor %d\n", plane_wm, plane_fbc, cursor_wm);
+ if (ironlake_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEB_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled++;
}
- I915_WRITE(WM1_LP_ILK, tmp);
- /* XXX setup WM2 and WM3 */
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (enabled != 1)
+ return;
+
+ clock = planea_clock ? planea_clock : planeb_clock;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
+ clock, SNB_READ_WM1_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2,
+ hdisplay, htotal, pixel_size,
+ clock, SNB_READ_WM2_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3 */
+ if (!ironlake_compute_srwm(dev, 3,
+ hdisplay, htotal, pixel_size,
+ clock, SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
}
/**
@@ -3580,6 +3922,11 @@ static void intel_update_watermarks(struct drm_device *dev)
sr_hdisplay, sr_htotal, pixel_size);
}
+static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->lvds_use_ssc && i915_panel_use_ssc;
+}
+
static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -3642,7 +3989,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
- if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
+ if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
refclk / 1000);
@@ -3660,7 +4007,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
- limit = intel_limit(crtc);
+ limit = intel_limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -3714,7 +4061,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int lane = 0, link_bw, bpp;
/* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
- if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) {
+ if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
target_clock = mode->clock;
intel_edp_link_config(has_edp_encoder,
&lane, &link_bw);
@@ -3817,7 +4164,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
udelay(200);
if (has_edp_encoder) {
- if (dev_priv->lvds_use_ssc) {
+ if (intel_panel_use_ssc(dev_priv)) {
temp |= DREF_SSC1_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
@@ -3828,13 +4175,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Enable CPU source on CPU attached eDP */
if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- if (dev_priv->lvds_use_ssc)
+ if (intel_panel_use_ssc(dev_priv))
temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
else
temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else {
/* Enable SSC on PCH eDP if needed */
- if (dev_priv->lvds_use_ssc) {
+ if (intel_panel_use_ssc(dev_priv)) {
DRM_ERROR("enabling SSC on PCH\n");
temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
}
@@ -3857,6 +4204,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
reduced_clock.m2;
}
+ /* Enable autotuning of the PLL clock (if permissible) */
+ if (HAS_PCH_SPLIT(dev)) {
+ int factor = 21;
+
+ if (is_lvds) {
+ if ((intel_panel_use_ssc(dev_priv) &&
+ dev_priv->lvds_ssc_freq == 100) ||
+ (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
+ factor = 25;
+ } else if (is_sdvo && is_tv)
+ factor = 20;
+
+ if (clock.m1 < factor * clock.n)
+ fp |= FP_CB_TUNE;
+ }
+
dpll = 0;
if (!HAS_PCH_SPLIT(dev))
dpll = DPLL_VGA_MODE_DIS;
@@ -3925,7 +4288,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
- else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2)
+ else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
@@ -4071,7 +4434,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll);
/* Wait for the clocks to stabilize. */
@@ -4089,13 +4451,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
I915_WRITE(DPLL_MD(pipe), temp);
} else {
- /* write it again -- the BIOS does, after all */
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
I915_WRITE(dpll_reg, dpll);
}
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(dpll_reg);
- udelay(150);
}
intel_crtc->lowfreq_avail = false;
@@ -4331,15 +4693,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
}
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
+ struct drm_file *file,
uint32_t handle,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_gem_object *bo;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
uint32_t addr;
int ret;
@@ -4349,7 +4710,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) {
DRM_DEBUG_KMS("cursor off\n");
addr = 0;
- bo = NULL;
+ obj = NULL;
mutex_lock(&dev->struct_mutex);
goto finish;
}
@@ -4360,13 +4721,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
}
- bo = drm_gem_object_lookup(dev, file_priv, handle);
- if (!bo)
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ if (!obj)
return -ENOENT;
- obj_priv = to_intel_bo(bo);
-
- if (bo->size < width * height * 4) {
+ if (obj->base.size < width * height * 4) {
DRM_ERROR("buffer is to small\n");
ret = -ENOMEM;
goto fail;
@@ -4375,29 +4734,41 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!dev_priv->info->cursor_needs_physical) {
- ret = i915_gem_object_pin(bo, PAGE_SIZE);
+ if (obj->tiling_mode) {
+ DRM_ERROR("cursor cannot be tiled\n");
+ ret = -EINVAL;
+ goto fail_locked;
+ }
+
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
if (ret) {
DRM_ERROR("failed to pin cursor bo\n");
goto fail_locked;
}
- ret = i915_gem_object_set_to_gtt_domain(bo, 0);
+ ret = i915_gem_object_set_to_gtt_domain(obj, 0);
if (ret) {
DRM_ERROR("failed to move cursor bo into the GTT\n");
goto fail_unpin;
}
- addr = obj_priv->gtt_offset;
+ ret = i915_gem_object_put_fence(obj);
+ if (ret) {
+ DRM_ERROR("failed to move cursor bo into the GTT\n");
+ goto fail_unpin;
+ }
+
+ addr = obj->gtt_offset;
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
- ret = i915_gem_attach_phys_object(dev, bo,
+ ret = i915_gem_attach_phys_object(dev, obj,
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
align);
if (ret) {
DRM_ERROR("failed to attach phys object\n");
goto fail_locked;
}
- addr = obj_priv->phys_obj->handle->busaddr;
+ addr = obj->phys_obj->handle->busaddr;
}
if (IS_GEN2(dev))
@@ -4406,17 +4777,17 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
finish:
if (intel_crtc->cursor_bo) {
if (dev_priv->info->cursor_needs_physical) {
- if (intel_crtc->cursor_bo != bo)
+ if (intel_crtc->cursor_bo != obj)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
i915_gem_object_unpin(intel_crtc->cursor_bo);
- drm_gem_object_unreference(intel_crtc->cursor_bo);
+ drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
mutex_unlock(&dev->struct_mutex);
intel_crtc->cursor_addr = addr;
- intel_crtc->cursor_bo = bo;
+ intel_crtc->cursor_bo = obj;
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
@@ -4424,11 +4795,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return 0;
fail_unpin:
- i915_gem_object_unpin(bo);
+ i915_gem_object_unpin(obj);
fail_locked:
mutex_unlock(&dev->struct_mutex);
fail:
- drm_gem_object_unreference_unlocked(bo);
+ drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}
@@ -4739,8 +5110,14 @@ static void intel_gpu_idle_timer(unsigned long arg)
struct drm_device *dev = (struct drm_device *)arg;
drm_i915_private_t *dev_priv = dev->dev_private;
- dev_priv->busy = false;
+ if (!list_empty(&dev_priv->mm.active_list)) {
+ /* Still processing requests, so just re-arm the timer. */
+ mod_timer(&dev_priv->idle_timer, jiffies +
+ msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+ return;
+ }
+ dev_priv->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
@@ -4751,9 +5128,17 @@ static void intel_crtc_idle_timer(unsigned long arg)
struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
struct drm_crtc *crtc = &intel_crtc->base;
drm_i915_private_t *dev_priv = crtc->dev->dev_private;
+ struct intel_framebuffer *intel_fb;
- intel_crtc->busy = false;
+ intel_fb = to_intel_framebuffer(crtc->fb);
+ if (intel_fb && intel_fb->obj->active) {
+ /* The framebuffer is still being accessed by the GPU. */
+ mod_timer(&intel_crtc->idle_timer, jiffies +
+ msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
+ return;
+ }
+ intel_crtc->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
@@ -4763,8 +5148,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dpll = I915_READ(dpll_reg);
+ int dpll_reg = DPLL(pipe);
+ int dpll;
if (HAS_PCH_SPLIT(dev))
return;
@@ -4772,17 +5157,19 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
if (!dev_priv->lvds_downclock_avail)
return;
+ dpll = I915_READ(dpll_reg);
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
DRM_DEBUG_DRIVER("upclocking LVDS\n");
/* Unlock panel regs */
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
- PANEL_UNLOCK_REGS);
+ I915_WRITE(PP_CONTROL,
+ I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
- dpll = I915_READ(dpll_reg);
+ POSTING_READ(dpll_reg);
intel_wait_for_vblank(dev, pipe);
+
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
@@ -4888,7 +5275,7 @@ static void intel_idle_update(struct work_struct *work)
* buffer), we'll also mark the display as busy, so we know to increase its
* clock frequency.
*/
-void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
+void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL;
@@ -4969,8 +5356,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
mutex_lock(&work->dev->struct_mutex);
i915_gem_object_unpin(work->old_fb_obj);
- drm_gem_object_unreference(work->pending_flip_obj);
- drm_gem_object_unreference(work->old_fb_obj);
+ drm_gem_object_unreference(&work->pending_flip_obj->base);
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+
mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
@@ -4981,15 +5369,17 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct drm_pending_vblank_event *e;
- struct timeval now;
+ struct timeval tnow, tvbl;
unsigned long flags;
/* Ignore early vblank irqs */
if (intel_crtc == NULL)
return;
+ do_gettimeofday(&tnow);
+
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
@@ -4998,26 +5388,49 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
}
intel_crtc->unpin_work = NULL;
- drm_vblank_put(dev, intel_crtc->pipe);
if (work->event) {
e = work->event;
- do_gettimeofday(&now);
- e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe);
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
+
+ /* Called before vblank count and timestamps have
+ * been updated for the vblank interval of flip
+ * completion? Need to increment vblank count and
+ * add one videorefresh duration to returned timestamp
+ * to account for this. We assume this happened if we
+ * get called over 0.9 frame durations after the last
+ * timestamped vblank.
+ *
+ * This calculation can not be used with vrefresh rates
+ * below 5Hz (10Hz to be on the safe side) without
+ * promoting to 64 integers.
+ */
+ if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
+ 9 * crtc->framedur_ns) {
+ e->event.sequence++;
+ tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
+ crtc->framedur_ns);
+ }
+
+ e->event.tv_sec = tvbl.tv_sec;
+ e->event.tv_usec = tvbl.tv_usec;
+
list_add_tail(&e->base.link,
&e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
}
+ drm_vblank_put(dev, intel_crtc->pipe);
+
spin_unlock_irqrestore(&dev->event_lock, flags);
- obj_priv = to_intel_bo(work->old_fb_obj);
+ obj = work->old_fb_obj;
+
atomic_clear_mask(1 << intel_crtc->plane,
- &obj_priv->pending_flip.counter);
- if (atomic_read(&obj_priv->pending_flip) == 0)
+ &obj->pending_flip.counter);
+ if (atomic_read(&obj->pending_flip) == 0)
wake_up(&dev_priv->pending_flip_queue);
+
schedule_work(&work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
@@ -5063,8 +5476,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags, offset;
@@ -5098,13 +5510,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj, true);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
if (ret)
goto cleanup_work;
/* Reference the objects for the scheduled work. */
- drm_gem_object_reference(work->old_fb_obj);
- drm_gem_object_reference(obj);
+ drm_gem_object_reference(&work->old_fb_obj->base);
+ drm_gem_object_reference(&obj->base);
crtc->fb = fb;
@@ -5112,22 +5524,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret)
goto cleanup_objs;
- /* Block clients from rendering to the new back buffer until
- * the flip occurs and the object is no longer visible.
- */
- atomic_add(1 << intel_crtc->plane,
- &to_intel_bo(work->old_fb_obj)->pending_flip);
-
- work->pending_flip_obj = obj;
- obj_priv = to_intel_bo(obj);
-
if (IS_GEN3(dev) || IS_GEN2(dev)) {
u32 flip_mask;
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
*/
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(2);
+ if (ret)
+ goto cleanup_objs;
+
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
@@ -5137,18 +5543,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ADVANCE_LP_RING();
}
+ work->pending_flip_obj = obj;
+
work->enable_stall_check = true;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
- BEGIN_LP_RING(4);
- switch(INTEL_INFO(dev)->gen) {
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ goto cleanup_objs;
+
+ /* Block clients from rendering to the new back buffer until
+ * the flip occurs and the object is no longer visible.
+ */
+ atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+
+ switch (INTEL_INFO(dev)->gen) {
case 2:
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset + offset);
+ OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
break;
@@ -5156,7 +5572,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset + offset);
+ OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
break;
@@ -5169,7 +5585,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+ OUT_RING(obj->gtt_offset | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -5183,8 +5599,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
case 6:
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch | obj_priv->tiling_mode);
- OUT_RING(obj_priv->gtt_offset);
+ OUT_RING(fb->pitch | obj->tiling_mode);
+ OUT_RING(obj->gtt_offset);
pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
@@ -5200,8 +5616,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_objs:
- drm_gem_object_unreference(work->old_fb_obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+ drm_gem_object_unreference(&obj->base);
cleanup_work:
mutex_unlock(&dev->struct_mutex);
@@ -5214,6 +5630,16 @@ cleanup_work:
return ret;
}
+static void intel_crtc_reset(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* Reset flags back to the 'unknown' status so that they
+ * will be correctly set on the initial modeset.
+ */
+ intel_crtc->dpms_mode = -1;
+}
+
static struct drm_crtc_helper_funcs intel_helper_funcs = {
.dpms = intel_crtc_dpms,
.mode_fixup = intel_crtc_mode_fixup,
@@ -5225,6 +5651,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
};
static const struct drm_crtc_funcs intel_crtc_funcs = {
+ .reset = intel_crtc_reset,
.cursor_set = intel_crtc_cursor_set,
.cursor_move = intel_crtc_cursor_move,
.gamma_set = intel_crtc_gamma_set,
@@ -5315,8 +5742,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
- intel_crtc->cursor_addr = 0;
- intel_crtc->dpms_mode = -1;
+ intel_crtc_reset(&intel_crtc->base);
intel_crtc->active = true; /* force the pipe off on setup_init_config */
if (HAS_PCH_SPLIT(dev)) {
@@ -5338,7 +5764,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
@@ -5498,6 +5924,8 @@ static void intel_setup_outputs(struct drm_device *dev)
encoder->base.possible_clones =
intel_encoder_clones(dev, encoder->clone_mask);
}
+
+ intel_panel_setup_backlight(dev);
}
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -5505,19 +5933,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb);
- drm_gem_object_unreference_unlocked(intel_fb->obj);
+ drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
kfree(intel_fb);
}
static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
+ struct drm_file *file,
unsigned int *handle)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_gem_object *object = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb->obj;
- return drm_gem_handle_create(file_priv, object, handle);
+ return drm_gem_handle_create(file, &obj->base, handle);
}
static const struct drm_framebuffer_funcs intel_fb_funcs = {
@@ -5528,12 +5956,11 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj)
+ struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ if (obj->tiling_mode == I915_TILING_Y)
return -EINVAL;
if (mode_cmd->pitch & 63)
@@ -5565,11 +5992,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
struct drm_mode_fb_cmd *mode_cmd)
{
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
struct intel_framebuffer *intel_fb;
int ret;
- obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
if (!obj)
return ERR_PTR(-ENOENT);
@@ -5577,10 +6004,9 @@ intel_user_framebuffer_create(struct drm_device *dev,
if (!intel_fb)
return ERR_PTR(-ENOMEM);
- ret = intel_framebuffer_init(dev, intel_fb,
- mode_cmd, obj);
+ ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
if (ret) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference_unlocked(&obj->base);
kfree(intel_fb);
return ERR_PTR(ret);
}
@@ -5593,10 +6019,10 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.output_poll_changed = intel_fb_output_poll_changed,
};
-static struct drm_gem_object *
+static struct drm_i915_gem_object *
intel_alloc_context_page(struct drm_device *dev)
{
- struct drm_gem_object *ctx;
+ struct drm_i915_gem_object *ctx;
int ret;
ctx = i915_gem_alloc_object(dev, 4096);
@@ -5606,7 +6032,7 @@ intel_alloc_context_page(struct drm_device *dev)
}
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(ctx, 4096);
+ ret = i915_gem_object_pin(ctx, 4096, true);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
@@ -5624,7 +6050,7 @@ intel_alloc_context_page(struct drm_device *dev)
err_unpin:
i915_gem_object_unpin(ctx);
err_unref:
- drm_gem_object_unreference(ctx);
+ drm_gem_object_unreference(&ctx->base);
mutex_unlock(&dev->struct_mutex);
return NULL;
}
@@ -5736,6 +6162,25 @@ void ironlake_disable_drps(struct drm_device *dev)
}
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 swreq;
+
+ swreq = (val & 0x3ff) << 25;
+ I915_WRITE(GEN6_RPNSWREQ, swreq);
+}
+
+void gen6_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, 0);
+ I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+}
+
static unsigned long intel_pxfreq(u32 vidfreq)
{
unsigned long freq;
@@ -5822,7 +6267,123 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
-void intel_init_clock_gating(struct drm_device *dev)
+void gen6_enable_rps(struct drm_i915_private *dev_priv)
+{
+ u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ u32 pcu_mbox;
+ int cur_freq, min_freq, max_freq;
+ int i;
+
+ /* Here begins a magic sequence of register writes to enable
+ * auto-downclocking.
+ *
+ * Perhaps there might be some value in exposing these to
+ * userspace...
+ */
+ I915_WRITE(GEN6_RC_STATE, 0);
+ __gen6_force_wake_get(dev_priv);
+
+ /* disable the counters and set deterministic thresholds */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
+
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+ I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ I915_WRITE(GEN6_RC_CONTROL,
+ GEN6_RC_CTL_RC6p_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE);
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(10) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN6_FREQUENCY(12));
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ 18 << 24 |
+ 6 << 16);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RP_UP_EI, 100000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 300000);
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_USE_NORMAL_FREQ |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_MAX |
+ GEN6_RP_DOWN_BUSY_MIN);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+ I915_WRITE(GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+
+ min_freq = (rp_state_cap & 0xff0000) >> 16;
+ max_freq = rp_state_cap & 0xff;
+ cur_freq = (gt_perf_status & 0xff00) >> 8;
+
+ /* Check for overclock support */
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
+ pcu_mbox = I915_READ(GEN6_PCODE_DATA);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+ if (pcu_mbox & (1<<31)) { /* OC supported */
+ max_freq = pcu_mbox & 0xff;
+ DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 100);
+ }
+
+ /* In units of 100MHz */
+ dev_priv->max_delay = max_freq;
+ dev_priv->min_delay = min_freq;
+ dev_priv->cur_delay = cur_freq;
+
+ /* requires MSI enabled */
+ I915_WRITE(GEN6_PMIER,
+ GEN6_PM_MBOX_EVENT |
+ GEN6_PM_THERMAL_EVENT |
+ GEN6_PM_RP_DOWN_TIMEOUT |
+ GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_UP_EI_EXPIRED |
+ GEN6_PM_RP_DOWN_EI_EXPIRED);
+ I915_WRITE(GEN6_PMIMR, 0);
+ /* enable all PM interrupts */
+ I915_WRITE(GEN6_PMINTRMSK, 0);
+
+ __gen6_force_wake_put(dev_priv);
+}
+
+void intel_enable_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5835,7 +6396,9 @@ void intel_init_clock_gating(struct drm_device *dev)
if (IS_GEN5(dev)) {
/* Required for FBC */
- dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
+ dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
+ DPFCRUNIT_CLOCK_GATE_DISABLE |
+ DPFDUNIT_CLOCK_GATE_DISABLE;
/* Required for CxSR */
dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
@@ -5872,9 +6435,9 @@ void intel_init_clock_gating(struct drm_device *dev)
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
}
/*
* Based on the document from hardware guys the following bits
@@ -5896,7 +6459,49 @@ void intel_init_clock_gating(struct drm_device *dev)
ILK_DPFC_DIS2 |
ILK_CLK_FBC);
}
- return;
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+
+ if (IS_GEN5(dev)) {
+ I915_WRITE(_3D_CHICKEN2,
+ _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+ _3D_CHICKEN2_WM_READ_PIPELINED);
+ }
+
+ if (IS_GEN6(dev)) {
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /*
+ * According to the spec the following bits should be
+ * set in order to enable memory self-refresh and fbc:
+ * The bit21 and bit22 of 0x42000
+ * The bit21 and bit22 of 0x42004
+ * The bit5 and bit7 of 0x42020
+ * The bit14 of 0x70180
+ * The bit14 of 0x71180
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE |
+ ILK_DPFD_CLK_GATE);
+
+ I915_WRITE(DSPACNTR,
+ I915_READ(DSPACNTR) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ I915_WRITE(DSPBCNTR,
+ I915_READ(DSPBCNTR) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ }
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5934,57 +6539,106 @@ void intel_init_clock_gating(struct drm_device *dev)
} else if (IS_I830(dev)) {
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
}
+}
+
+static void ironlake_teardown_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->renderctx) {
+ i915_gem_object_unpin(dev_priv->renderctx);
+ drm_gem_object_unreference(&dev_priv->renderctx->base);
+ dev_priv->renderctx = NULL;
+ }
+
+ if (dev_priv->pwrctx) {
+ i915_gem_object_unpin(dev_priv->pwrctx);
+ drm_gem_object_unreference(&dev_priv->pwrctx->base);
+ dev_priv->pwrctx = NULL;
+ }
+}
+
+static void ironlake_disable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (I915_READ(PWRCTXA)) {
+ /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+ wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+ 50);
+
+ I915_WRITE(PWRCTXA, 0);
+ POSTING_READ(PWRCTXA);
+
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+ POSTING_READ(RSTDBYCTL);
+ }
+
+ ironlake_teardown_rc6(dev);
+}
+
+static int ironlake_setup_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->renderctx == NULL)
+ dev_priv->renderctx = intel_alloc_context_page(dev);
+ if (!dev_priv->renderctx)
+ return -ENOMEM;
+
+ if (dev_priv->pwrctx == NULL)
+ dev_priv->pwrctx = intel_alloc_context_page(dev);
+ if (!dev_priv->pwrctx) {
+ ironlake_teardown_rc6(dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ironlake_enable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* rc6 disabled by default due to repeated reports of hanging during
+ * boot and resume.
+ */
+ if (!i915_enable_rc6)
+ return;
+
+ ret = ironlake_setup_rc6(dev);
+ if (ret)
+ return;
/*
* GPU can automatically power down the render unit if given a page
* to save state.
*/
- if (IS_IRONLAKE_M(dev)) {
- if (dev_priv->renderctx == NULL)
- dev_priv->renderctx = intel_alloc_context_page(dev);
- if (dev_priv->renderctx) {
- struct drm_i915_gem_object *obj_priv;
- obj_priv = to_intel_bo(dev_priv->renderctx);
- if (obj_priv) {
- BEGIN_LP_RING(4);
- OUT_RING(MI_SET_CONTEXT);
- OUT_RING(obj_priv->gtt_offset |
- MI_MM_SPACE_GTT |
- MI_SAVE_EXT_STATE_EN |
- MI_RESTORE_EXT_STATE_EN |
- MI_RESTORE_INHIBIT);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_FLUSH);
- ADVANCE_LP_RING();
- }
- } else
- DRM_DEBUG_KMS("Failed to allocate render context."
- "Disable RC6\n");
+ ret = BEGIN_LP_RING(6);
+ if (ret) {
+ ironlake_teardown_rc6(dev);
+ return;
}
- if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct drm_i915_gem_object *obj_priv = NULL;
-
- if (dev_priv->pwrctx) {
- obj_priv = to_intel_bo(dev_priv->pwrctx);
- } else {
- struct drm_gem_object *pwrctx;
-
- pwrctx = intel_alloc_context_page(dev);
- if (pwrctx) {
- dev_priv->pwrctx = pwrctx;
- obj_priv = to_intel_bo(pwrctx);
- }
- }
+ OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+ OUT_RING(MI_SET_CONTEXT);
+ OUT_RING(dev_priv->renderctx->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ MI_RESTORE_INHIBIT);
+ OUT_RING(MI_SUSPEND_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_FLUSH);
+ ADVANCE_LP_RING();
- if (obj_priv) {
- I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
- I915_WRITE(MCHBAR_RENDER_STANDBY,
- I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
- }
- }
+ I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
+
/* Set up chip specific display functions */
static void intel_init_display(struct drm_device *dev)
{
@@ -5997,7 +6651,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.dpms = i9xx_crtc_dpms;
if (I915_HAS_FBC(dev)) {
- if (IS_IRONLAKE_M(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = ironlake_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
@@ -6046,6 +6700,14 @@ static void intel_init_display(struct drm_device *dev)
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
+ } else if (IS_GEN6(dev)) {
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
} else
dev_priv->display.update_wm = NULL;
} else if (IS_PINEVIEW(dev)) {
@@ -6191,12 +6853,7 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
}
-
- /* set memory base */
- if (IS_GEN2(dev))
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
- else
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
+ dev->mode_config.fb_base = dev->agp->base;
if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;
@@ -6211,7 +6868,7 @@ void intel_modeset_init(struct drm_device *dev)
intel_setup_outputs(dev);
- intel_init_clock_gating(dev);
+ intel_enable_clock_gating(dev);
/* Just disable it once at startup */
i915_disable_vga(dev);
@@ -6221,6 +6878,12 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_emon(dev);
}
+ if (IS_GEN6(dev))
+ gen6_enable_rps(dev_priv);
+
+ if (IS_IRONLAKE_M(dev))
+ ironlake_enable_rc6(dev);
+
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
@@ -6252,28 +6915,13 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- if (dev_priv->renderctx) {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = to_intel_bo(dev_priv->renderctx);
- I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
- I915_READ(CCID);
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(dev_priv->renderctx);
- }
-
- if (dev_priv->pwrctx) {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = to_intel_bo(dev_priv->pwrctx);
- I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
- I915_READ(PWRCTXA);
- i915_gem_object_unpin(dev_priv->pwrctx);
- drm_gem_object_unreference(dev_priv->pwrctx);
- }
-
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
+ if (IS_GEN6(dev))
+ gen6_disable_rps(dev);
+
+ if (IS_IRONLAKE_M(dev))
+ ironlake_disable_rc6(dev);
mutex_unlock(&dev->struct_mutex);
@@ -6325,3 +6973,113 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
return 0;
}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+struct intel_display_error_state {
+ struct intel_cursor_error_state {
+ u32 control;
+ u32 position;
+ u32 base;
+ u32 size;
+ } cursor[2];
+
+ struct intel_pipe_error_state {
+ u32 conf;
+ u32 source;
+
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ } pipe[2];
+
+ struct intel_plane_error_state {
+ u32 control;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 addr;
+ u32 surface;
+ u32 tile_offset;
+ } plane[2];
+};
+
+struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_display_error_state *error;
+ int i;
+
+ error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ if (error == NULL)
+ return NULL;
+
+ for (i = 0; i < 2; i++) {
+ error->cursor[i].control = I915_READ(CURCNTR(i));
+ error->cursor[i].position = I915_READ(CURPOS(i));
+ error->cursor[i].base = I915_READ(CURBASE(i));
+
+ error->plane[i].control = I915_READ(DSPCNTR(i));
+ error->plane[i].stride = I915_READ(DSPSTRIDE(i));
+ error->plane[i].size = I915_READ(DSPSIZE(i));
+ error->plane[i].pos= I915_READ(DSPPOS(i));
+ error->plane[i].addr = I915_READ(DSPADDR(i));
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->plane[i].surface = I915_READ(DSPSURF(i));
+ error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
+ }
+
+ error->pipe[i].conf = I915_READ(PIPECONF(i));
+ error->pipe[i].source = I915_READ(PIPESRC(i));
+ error->pipe[i].htotal = I915_READ(HTOTAL(i));
+ error->pipe[i].hblank = I915_READ(HBLANK(i));
+ error->pipe[i].hsync = I915_READ(HSYNC(i));
+ error->pipe[i].vtotal = I915_READ(VTOTAL(i));
+ error->pipe[i].vblank = I915_READ(VBLANK(i));
+ error->pipe[i].vsync = I915_READ(VSYNC(i));
+ }
+
+ return error;
+}
+
+void
+intel_display_print_error_state(struct seq_file *m,
+ struct drm_device *dev,
+ struct intel_display_error_state *error)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ seq_printf(m, "Pipe [%d]:\n", i);
+ seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
+ seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
+ seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
+ seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
+ seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
+ seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
+ seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
+ seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
+
+ seq_printf(m, "Plane [%d]:\n", i);
+ seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
+ seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
+ seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
+ seq_printf(m, " POS: %08x\n", error->plane[i].pos);
+ seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
+ seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
+ }
+
+ seq_printf(m, "Cursor [%d]:\n", i);
+ seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
+ seq_printf(m, " POS: %08x\n", error->cursor[i].position);
+ seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
+ }
+}
+#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 864417cffe9a..51cb4e36997f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1153,18 +1153,27 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
static uint32_t
intel_gen6_edp_signal_levels(uint8_t train_set)
{
- switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
- return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
- return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
- return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
default:
- DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
- return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
}
}
@@ -1334,17 +1343,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool channel_eq = false;
- int tries;
+ int tries, cr_tries;
u32 reg;
uint32_t DP = intel_dp->DP;
/* channel equalization */
tries = 0;
+ cr_tries = 0;
channel_eq = false;
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
+ if (cr_tries > 5) {
+ DRM_ERROR("failed to train DP, aborting\n");
+ intel_dp_link_down(intel_dp);
+ break;
+ }
+
if (IS_GEN6(dev) && is_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
@@ -1367,14 +1383,26 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
if (!intel_dp_get_link_status(intel_dp))
break;
+ /* Make sure clock is still ok */
+ if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+ intel_dp_start_link_train(intel_dp);
+ cr_tries++;
+ continue;
+ }
+
if (intel_channel_eq_ok(intel_dp)) {
channel_eq = true;
break;
}
- /* Try 5 times */
- if (tries > 5)
- break;
+ /* Try 5 times, then try clock recovery if that fails */
+ if (tries > 5) {
+ intel_dp_link_down(intel_dp);
+ intel_dp_start_link_train(intel_dp);
+ tries = 0;
+ cr_tries++;
+ continue;
+ }
/* Compute new intel_dp->train_set as requested by target */
intel_get_adjust_train(intel_dp);
@@ -1442,8 +1470,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
/* Changes to enable or select take place the vblank
* after being written.
*/
- intel_wait_for_vblank(intel_dp->base.base.dev,
- intel_crtc->pipe);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
}
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
@@ -1612,6 +1639,24 @@ static int intel_dp_get_modes(struct drm_connector *connector)
return 0;
}
+static bool
+intel_dp_detect_audio(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct edid *edid;
+ bool has_audio = false;
+
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ has_audio = drm_detect_monitor_audio(edid);
+
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+
+ return has_audio;
+}
+
static int
intel_dp_set_property(struct drm_connector *connector,
struct drm_property *property,
@@ -1625,17 +1670,23 @@ intel_dp_set_property(struct drm_connector *connector,
return ret;
if (property == intel_dp->force_audio_property) {
- if (val == intel_dp->force_audio)
+ int i = val;
+ bool has_audio;
+
+ if (i == intel_dp->force_audio)
return 0;
- intel_dp->force_audio = val;
+ intel_dp->force_audio = i;
- if (val > 0 && intel_dp->has_audio)
- return 0;
- if (val < 0 && !intel_dp->has_audio)
+ if (i == 0)
+ has_audio = intel_dp_detect_audio(connector);
+ else
+ has_audio = i > 0;
+
+ if (has_audio == intel_dp->has_audio)
return 0;
- intel_dp->has_audio = val > 0;
+ intel_dp->has_audio = has_audio;
goto done;
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e52c6125bb1f..2c431049963c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -127,7 +127,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
struct intel_framebuffer {
struct drm_framebuffer base;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
};
struct intel_fbdev {
@@ -166,7 +166,7 @@ struct intel_crtc {
struct intel_unpin_work *unpin_work;
int fdi_lanes;
- struct drm_gem_object *cursor_bo;
+ struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
@@ -220,8 +220,8 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
- struct drm_gem_object *old_fb_obj;
- struct drm_gem_object *pending_flip_obj;
+ struct drm_i915_gem_object *old_fb_obj;
+ struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
int pending;
bool enable_stall_check;
@@ -236,7 +236,8 @@ void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
+extern void intel_mark_busy(struct drm_device *dev,
+ struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int dp_reg);
void
@@ -256,6 +257,9 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern u32 intel_panel_get_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
+extern void intel_panel_setup_backlight(struct drm_device *dev);
+extern void intel_panel_enable_backlight(struct drm_device *dev);
+extern void intel_panel_disable_backlight(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
@@ -293,19 +297,21 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
-extern void intel_init_clock_gating(struct drm_device *dev);
+extern void intel_enable_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
+extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
+extern void gen6_disable_rps(struct drm_device *dev);
extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_gem_object *obj,
- bool pipelined);
+ struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined);
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj);
+ struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index af2a1dddc28e..512782728e51 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -62,13 +62,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = ifbdev->helper.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd mode_cmd;
- struct drm_gem_object *fbo = NULL;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct device *device = &dev->pdev->dev;
- int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ int size, ret;
/* we don't do packed 24bpp */
if (sizes->surface_bpp == 24)
@@ -78,23 +78,22 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mode_cmd.height = sizes->surface_height;
mode_cmd.bpp = sizes->surface_bpp;
- mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
+ mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
- fbo = i915_gem_alloc_object(dev, size);
- if (!fbo) {
+ obj = i915_gem_alloc_object(dev, size);
+ if (!obj) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
goto out;
}
- obj_priv = to_intel_bo(fbo);
mutex_lock(&dev->struct_mutex);
/* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, false);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
@@ -108,7 +107,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
info->par = ifbdev;
- ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+ ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
if (ret)
goto out_unpin;
@@ -122,6 +121,11 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
/* setup aperture base/size for vesafb takeover */
info->apertures = alloc_apertures(1);
if (!info->apertures) {
@@ -129,26 +133,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
goto out_unpin;
}
info->apertures->ranges[0].base = dev->mode_config.fb_base;
- if (!IS_GEN2(dev))
- info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
- else
- info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+ info->apertures->ranges[0].size =
+ dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
- info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
+ info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_len = size;
- info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset,
- size);
+ info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
if (!info->screen_base) {
ret = -ENOSPC;
goto out_unpin;
}
-
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out_unpin;
- }
info->screen_size = size;
// memset(info->screen_base, 0, size);
@@ -156,10 +151,6 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
- /* FIXME: we really shouldn't expose mmio space at all */
- info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
- info->fix.mmio_len = pci_resource_len(dev->pdev, mmio_bar);
-
info->pixmap.size = 64*1024;
info->pixmap.buf_align = 8;
info->pixmap.access_align = 32;
@@ -168,7 +159,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
fb->width, fb->height,
- obj_priv->gtt_offset, fbo);
+ obj->gtt_offset, obj);
mutex_unlock(&dev->struct_mutex);
@@ -176,9 +167,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
return 0;
out_unpin:
- i915_gem_object_unpin(fbo);
+ i915_gem_object_unpin(obj);
out_unref:
- drm_gem_object_unreference(fbo);
+ drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
out:
return ret;
@@ -225,7 +216,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj) {
- drm_gem_object_unreference_unlocked(ifb->obj);
+ drm_gem_object_unreference_unlocked(&ifb->obj->base);
ifb->obj = NULL;
}
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0d0273e7b029..c635c9e357b9 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -251,6 +251,27 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
&dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
}
+static bool
+intel_hdmi_detect_audio(struct drm_connector *connector)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct edid *edid;
+ bool has_audio = false;
+
+ edid = drm_get_edid(connector,
+ &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL)
+ has_audio = drm_detect_monitor_audio(edid);
+
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+
+ return has_audio;
+}
+
static int
intel_hdmi_set_property(struct drm_connector *connector,
struct drm_property *property,
@@ -264,17 +285,23 @@ intel_hdmi_set_property(struct drm_connector *connector,
return ret;
if (property == intel_hdmi->force_audio_property) {
- if (val == intel_hdmi->force_audio)
+ int i = val;
+ bool has_audio;
+
+ if (i == intel_hdmi->force_audio)
return 0;
- intel_hdmi->force_audio = val;
+ intel_hdmi->force_audio = i;
- if (val > 0 && intel_hdmi->has_audio)
- return 0;
- if (val < 0 && !intel_hdmi->has_audio)
+ if (i == 0)
+ has_audio = intel_hdmi_detect_audio(connector);
+ else
+ has_audio = i > 0;
+
+ if (has_audio == intel_hdmi->has_audio)
return 0;
- intel_hdmi->has_audio = val > 0;
+ intel_hdmi->has_audio = has_audio;
goto done;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 3dba086e7eea..58040f68ed7a 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -85,8 +85,9 @@ static u32 get_reserved(struct intel_gpio *gpio)
/* On most chips, these bits must be preserved in software. */
if (!IS_I830(dev) && !IS_845G(dev))
- reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
- GPIO_CLOCK_PULLUP_DISABLE);
+ reserved = I915_READ_NOTRACE(gpio->reg) &
+ (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
return reserved;
}
@@ -96,9 +97,9 @@ static int get_clock(void *data)
struct intel_gpio *gpio = data;
struct drm_i915_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
- I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
- I915_WRITE(gpio->reg, reserved);
- return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+ I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+ I915_WRITE_NOTRACE(gpio->reg, reserved);
+ return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
@@ -106,9 +107,9 @@ static int get_data(void *data)
struct intel_gpio *gpio = data;
struct drm_i915_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
- I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
- I915_WRITE(gpio->reg, reserved);
- return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+ I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+ I915_WRITE_NOTRACE(gpio->reg, reserved);
+ return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
@@ -124,7 +125,7 @@ static void set_clock(void *data, int state_high)
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
- I915_WRITE(gpio->reg, reserved | clock_bits);
+ I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits);
POSTING_READ(gpio->reg);
}
@@ -141,7 +142,7 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- I915_WRITE(gpio->reg, reserved | data_bits);
+ I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits);
POSTING_READ(gpio->reg);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 25bcedf386fd..bcdba7bd5cfa 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -106,7 +106,7 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
POSTING_READ(lvds_reg);
- intel_panel_set_backlight(dev, dev_priv->backlight_level);
+ intel_panel_enable_backlight(dev);
}
static void intel_lvds_disable(struct intel_lvds *intel_lvds)
@@ -123,8 +123,7 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
lvds_reg = LVDS;
}
- dev_priv->backlight_level = intel_panel_get_backlight(dev);
- intel_panel_set_backlight(dev, 0);
+ intel_panel_disable_backlight(dev);
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
@@ -262,12 +261,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return true;
}
- /* Make sure pre-965s set dither correctly */
- if (INTEL_INFO(dev)->gen < 4) {
- if (dev_priv->lvds_dither)
- pfit_control |= PANEL_8TO6_DITHER_ENABLE;
- }
-
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == mode->hdisplay &&
adjusted_mode->vdisplay == mode->vdisplay)
@@ -304,14 +297,13 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
- pfit_control |= PFIT_ENABLE;
/* 965+ is easy, it does everything in hw */
if (scaled_width > scaled_height)
- pfit_control |= PFIT_SCALING_PILLAR;
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
else if (scaled_width < scaled_height)
- pfit_control |= PFIT_SCALING_LETTER;
- else
- pfit_control |= PFIT_SCALING_AUTO;
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
+ else if (adjusted_mode->hdisplay != mode->hdisplay)
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
} else {
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
@@ -358,13 +350,17 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
- pfit_control |= PFIT_ENABLE;
- if (INTEL_INFO(dev)->gen >= 4)
- pfit_control |= PFIT_SCALING_AUTO;
- else
- pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
+ if (mode->vdisplay != adjusted_mode->vdisplay ||
+ mode->hdisplay != adjusted_mode->hdisplay) {
+ pfit_control |= PFIT_ENABLE;
+ if (INTEL_INFO(dev)->gen >= 4)
+ pfit_control |= PFIT_SCALING_AUTO;
+ else
+ pfit_control |= (VERT_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_AUTO_SCALE |
+ HORIZ_INTERP_BILINEAR);
+ }
break;
default:
@@ -372,6 +368,16 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
out:
+ /* If not enabling scaling, be consistent and always use 0. */
+ if ((pfit_control & PFIT_ENABLE) == 0) {
+ pfit_control = 0;
+ pfit_pgm_ratios = 0;
+ }
+
+ /* Make sure pre-965 set dither correctly */
+ if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
if (pfit_control != intel_lvds->pfit_control ||
pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
intel_lvds->pfit_control = pfit_control;
@@ -395,8 +401,6 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- dev_priv->backlight_level = intel_panel_get_backlight(dev);
-
/* We try to do the minimum that is necessary in order to unlock
* the registers for mode setting.
*
@@ -427,9 +431,6 @@ static void intel_lvds_commit(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- if (dev_priv->backlight_level == 0)
- dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
-
/* Undo any unlocking done in prepare to prevent accidental
* adjustment of the registers.
*/
@@ -703,6 +704,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen i915GMm-HFS",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
.ident = "Aopen i945GTt-VFA",
.matches = {
DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
@@ -914,6 +923,8 @@ bool intel_lvds_init(struct drm_device *dev)
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 1);
+ if (INTEL_INFO(dev)->gen >= 5)
+ intel_encoder->crtc_mask |= (1 << 0);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1019,10 +1030,18 @@ bool intel_lvds_init(struct drm_device *dev)
out:
if (HAS_PCH_SPLIT(dev)) {
u32 pwm;
- /* make sure PWM is enabled */
+
+ pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
+
+ /* make sure PWM is enabled and locked to the LVDS pipe */
pwm = I915_READ(BLC_PWM_CPU_CTL2);
- pwm |= (PWM_ENABLE | PWM_PIPE_B);
- I915_WRITE(BLC_PWM_CPU_CTL2, pwm);
+ if (pipe == 0 && (pwm & PWM_PIPE_B))
+ I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE);
+ if (pipe)
+ pwm |= PWM_PIPE_B;
+ else
+ pwm &= ~PWM_PIPE_B;
+ I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE);
pwm = I915_READ(BLC_PWM_PCH_CTL1);
pwm |= PWM_PCH_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 9b0d9a867aea..64fd64443ca6 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -26,6 +26,7 @@
*/
#include <linux/acpi.h>
+#include <linux/acpi_io.h>
#include <acpi/video.h>
#include "drmP.h"
@@ -273,14 +274,8 @@ void intel_opregion_enable_asle(struct drm_device *dev)
struct opregion_asle *asle = dev_priv->opregion.asle;
if (asle) {
- if (IS_MOBILE(dev)) {
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ if (IS_MOBILE(dev))
intel_enable_asle(dev);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock,
- irqflags);
- }
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
ASLE_PFMB_EN;
@@ -482,7 +477,7 @@ int intel_opregion_setup(struct drm_device *dev)
return -ENOTSUPP;
}
- base = ioremap(asls, OPREGION_SIZE);
+ base = acpi_os_ioremap(asls, OPREGION_SIZE);
if (!base)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 02ff0a481f47..3fbb98b948d6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -221,15 +221,16 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
int ret;
BUG_ON(overlay->last_flip_req);
- overlay->last_flip_req =
- i915_add_request(dev, NULL, request, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
+ ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+ overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
ret = i915_do_wait_request(dev,
overlay->last_flip_req, true,
- &dev_priv->render_ring);
+ LP_RING(dev_priv));
if (ret)
return ret;
@@ -289,6 +290,7 @@ i830_deactivate_pipe_a(struct drm_device *dev)
static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *request;
int pipe_a_quirk = 0;
int ret;
@@ -308,7 +310,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
goto out;
}
- BEGIN_LP_RING(4);
+ ret = BEGIN_LP_RING(4);
+ if (ret) {
+ kfree(request);
+ goto out;
+ }
+
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
OUT_RING(overlay->flip_addr | OFC_UPDATE);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -332,6 +339,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
struct drm_i915_gem_request *request;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
+ int ret;
BUG_ON(!overlay->active);
@@ -347,36 +355,44 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(2);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
ADVANCE_LP_RING();
- overlay->last_flip_req =
- i915_add_request(dev, NULL, request, &dev_priv->render_ring);
+ ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
+ overlay->last_flip_req = request->seqno;
return 0;
}
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
{
- struct drm_gem_object *obj = &overlay->old_vid_bo->base;
+ struct drm_i915_gem_object *obj = overlay->old_vid_bo;
i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
overlay->old_vid_bo = NULL;
}
static void intel_overlay_off_tail(struct intel_overlay *overlay)
{
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj = overlay->vid_bo;
/* never have the overlay hw on without showing a frame */
BUG_ON(!overlay->vid_bo);
- obj = &overlay->vid_bo->base;
i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
overlay->vid_bo = NULL;
overlay->crtc->overlay = NULL;
@@ -389,8 +405,10 @@ static int intel_overlay_off(struct intel_overlay *overlay,
bool interruptible)
{
struct drm_device *dev = overlay->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 flip_addr = overlay->flip_addr;
struct drm_i915_gem_request *request;
+ int ret;
BUG_ON(!overlay->active);
@@ -404,7 +422,11 @@ static int intel_overlay_off(struct intel_overlay *overlay,
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- BEGIN_LP_RING(6);
+ ret = BEGIN_LP_RING(6);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
/* wait for overlay to go idle */
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
@@ -432,7 +454,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
return 0;
ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible, &dev_priv->render_ring);
+ interruptible, LP_RING(dev_priv));
if (ret)
return ret;
@@ -467,7 +489,12 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (request == NULL)
return -ENOMEM;
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(2);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
@@ -736,13 +763,12 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
}
static int intel_overlay_do_put_image(struct intel_overlay *overlay,
- struct drm_gem_object *new_bo,
+ struct drm_i915_gem_object *new_bo,
struct put_image_params *params)
{
int ret, tmp_width;
struct overlay_registers *regs;
bool scale_changed = false;
- struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
struct drm_device *dev = overlay->dev;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -753,7 +779,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
+ ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true);
if (ret != 0)
return ret;
@@ -761,6 +787,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
goto out_unpin;
+ ret = i915_gem_object_put_fence(new_bo);
+ if (ret)
+ goto out_unpin;
+
if (!overlay->active) {
regs = intel_overlay_map_regs(overlay);
if (!regs) {
@@ -797,7 +827,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
regs->SWIDTHSW = calc_swidthsw(overlay->dev,
params->offset_Y, tmp_width);
regs->SHEIGHT = params->src_h;
- regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
+ regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
regs->OSTRIDE = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -811,8 +841,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale);
regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
- regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
- regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
+ regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
+ regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
regs->OSTRIDE |= params->stride_UV << 16;
}
@@ -829,7 +859,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
overlay->old_vid_bo = overlay->vid_bo;
- overlay->vid_bo = to_intel_bo(new_bo);
+ overlay->vid_bo = new_bo;
return 0;
@@ -942,7 +972,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
static int check_overlay_src(struct drm_device *dev,
struct drm_intel_overlay_put_image *rec,
- struct drm_gem_object *new_bo)
+ struct drm_i915_gem_object *new_bo)
{
int uv_hscale = uv_hsubsampling(rec->flags);
int uv_vscale = uv_vsubsampling(rec->flags);
@@ -1027,7 +1057,7 @@ static int check_overlay_src(struct drm_device *dev,
return -EINVAL;
tmp = rec->stride_Y*rec->src_height;
- if (rec->offset_Y + tmp > new_bo->size)
+ if (rec->offset_Y + tmp > new_bo->base.size)
return -EINVAL;
break;
@@ -1038,12 +1068,12 @@ static int check_overlay_src(struct drm_device *dev,
return -EINVAL;
tmp = rec->stride_Y * rec->src_height;
- if (rec->offset_Y + tmp > new_bo->size)
+ if (rec->offset_Y + tmp > new_bo->base.size)
return -EINVAL;
tmp = rec->stride_UV * (rec->src_height / uv_vscale);
- if (rec->offset_U + tmp > new_bo->size ||
- rec->offset_V + tmp > new_bo->size)
+ if (rec->offset_U + tmp > new_bo->base.size ||
+ rec->offset_V + tmp > new_bo->base.size)
return -EINVAL;
break;
}
@@ -1086,7 +1116,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct intel_overlay *overlay;
struct drm_mode_object *drmmode_obj;
struct intel_crtc *crtc;
- struct drm_gem_object *new_bo;
+ struct drm_i915_gem_object *new_bo;
struct put_image_params *params;
int ret;
@@ -1125,8 +1155,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
}
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
- new_bo = drm_gem_object_lookup(dev, file_priv,
- put_image_rec->bo_handle);
+ new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
+ put_image_rec->bo_handle));
if (!new_bo) {
ret = -ENOENT;
goto out_free;
@@ -1135,6 +1165,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
+ if (new_bo->tiling_mode) {
+ DRM_ERROR("buffer used for overlay image can not be tiled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
ret = intel_overlay_recover_from_interrupt(overlay, true);
if (ret != 0)
goto out_unlock;
@@ -1217,7 +1253,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
- drm_gem_object_unreference_unlocked(new_bo);
+ drm_gem_object_unreference_unlocked(&new_bo->base);
out_free:
kfree(params);
@@ -1370,7 +1406,7 @@ void intel_setup_overlay(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
- struct drm_gem_object *reg_bo;
+ struct drm_i915_gem_object *reg_bo;
struct overlay_registers *regs;
int ret;
@@ -1385,7 +1421,7 @@ void intel_setup_overlay(struct drm_device *dev)
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
if (!reg_bo)
goto out_free;
- overlay->reg_bo = to_intel_bo(reg_bo);
+ overlay->reg_bo = reg_bo;
if (OVERLAY_NEEDS_PHYSICAL(dev)) {
ret = i915_gem_attach_phys_object(dev, reg_bo,
@@ -1395,14 +1431,14 @@ void intel_setup_overlay(struct drm_device *dev)
DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo;
}
- overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+ overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
if (ret) {
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
}
- overlay->flip_addr = overlay->reg_bo->gtt_offset;
+ overlay->flip_addr = reg_bo->gtt_offset;
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) {
@@ -1434,7 +1470,7 @@ void intel_setup_overlay(struct drm_device *dev)
out_unpin_bo:
i915_gem_object_unpin(reg_bo);
out_free_bo:
- drm_gem_object_unreference(reg_bo);
+ drm_gem_object_unreference(&reg_bo->base);
out_free:
kfree(overlay);
return;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 92ff8f385278..d860abeda70f 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,8 +30,6 @@
#include "intel_drv.h"
-#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
-
void
intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
@@ -112,17 +110,36 @@ done:
dev_priv->pch_pf_size = (width << 16) | height;
}
-static int is_backlight_combination_mode(struct drm_device *dev)
+static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
- if (INTEL_INFO(dev)->gen >= 4)
- return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
+ /* Restore the CTL value if it lost, e.g. GPU reset */
- if (IS_GEN2(dev))
- return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
+ if (HAS_PCH_SPLIT(dev_priv->dev)) {
+ val = I915_READ(BLC_PWM_PCH_CTL2);
+ if (dev_priv->saveBLC_PWM_CTL2 == 0) {
+ dev_priv->saveBLC_PWM_CTL2 = val;
+ } else if (val == 0) {
+ I915_WRITE(BLC_PWM_PCH_CTL2,
+ dev_priv->saveBLC_PWM_CTL);
+ val = dev_priv->saveBLC_PWM_CTL;
+ }
+ } else {
+ val = I915_READ(BLC_PWM_CTL);
+ if (dev_priv->saveBLC_PWM_CTL == 0) {
+ dev_priv->saveBLC_PWM_CTL = val;
+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ } else if (val == 0) {
+ I915_WRITE(BLC_PWM_CTL,
+ dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_CTL2,
+ dev_priv->saveBLC_PWM_CTL2);
+ val = dev_priv->saveBLC_PWM_CTL;
+ }
+ }
- return 0;
+ return val;
}
u32 intel_panel_get_max_backlight(struct drm_device *dev)
@@ -130,10 +147,18 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 max;
+ max = i915_read_blc_pwm_ctl(dev_priv);
+ if (max == 0) {
+ /* XXX add code here to query mode clock or hardware clock
+ * and program max PWM appropriately.
+ */
+ printk_once(KERN_WARNING "fixme: max PWM is zero.\n");
+ return 1;
+ }
+
if (HAS_PCH_SPLIT(dev)) {
- max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
+ max >>= 16;
} else {
- max = I915_READ(BLC_PWM_CTL);
if (IS_PINEVIEW(dev)) {
max >>= 17;
} else {
@@ -141,17 +166,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 4)
max &= ~1;
}
-
- if (is_backlight_combination_mode(dev))
- max *= 0xff;
- }
-
- if (max == 0) {
- /* XXX add code here to query mode clock or hardware clock
- * and program max PWM appropriately.
- */
- DRM_ERROR("fixme: max PWM is zero.\n");
- max = 1;
}
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
@@ -169,15 +183,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (IS_PINEVIEW(dev))
val >>= 1;
-
- if (is_backlight_combination_mode(dev)){
- u8 lbpc;
-
- val &= ~1;
- pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
- val *= lbpc;
- val >>= 1;
- }
}
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
@@ -200,16 +205,6 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);
-
- if (is_backlight_combination_mode(dev)){
- u32 max = intel_panel_get_max_backlight(dev);
- u8 lpbc;
-
- lpbc = level * 0xfe / max + 1;
- level /= lpbc;
- pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
- }
-
tmp = I915_READ(BLC_PWM_CTL);
if (IS_PINEVIEW(dev)) {
tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
@@ -218,3 +213,34 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
+
+void intel_panel_disable_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->backlight_enabled) {
+ dev_priv->backlight_level = intel_panel_get_backlight(dev);
+ dev_priv->backlight_enabled = false;
+ }
+
+ intel_panel_set_backlight(dev, 0);
+}
+
+void intel_panel_enable_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->backlight_level == 0)
+ dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+
+ intel_panel_set_backlight(dev, dev_priv->backlight_level);
+ dev_priv->backlight_enabled = true;
+}
+
+void intel_panel_setup_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->backlight_level = intel_panel_get_backlight(dev);
+ dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 31cd7e33e820..445f27efe677 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,14 @@
#include "i915_trace.h"
#include "intel_drv.h"
+static inline int ring_space(struct intel_ring_buffer *ring)
+{
+ int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
+ if (space < 0)
+ space += ring->size;
+ return space;
+}
+
static u32 i915_gem_get_seqno(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -48,14 +56,15 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
return seqno;
}
-static void
-render_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static int
+render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
+ struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 cmd;
+ int ret;
#if WATCH_EXEC
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
@@ -109,49 +118,54 @@ render_ring_flush(struct drm_device *dev,
if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
cmd |= MI_EXE_FLUSH;
+ if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+ (IS_G4X(dev) || IS_GEN5(dev)))
+ cmd |= MI_INVALIDATE_ISP;
+
#if WATCH_EXEC
DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
#endif
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, cmd);
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
}
+
+ return 0;
}
-static void ring_write_tail(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static void ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
I915_WRITE_TAIL(ring, value);
}
-u32 intel_ring_get_active_head(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
RING_ACTHD(ring->mmio_base) : ACTHD;
return I915_READ(acthd_reg);
}
-static int init_ring_common(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int init_ring_common(struct intel_ring_buffer *ring)
{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj = ring->obj;
u32 head;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- obj_priv = to_intel_bo(ring->gem_object);
/* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
- ring->write_tail(dev, ring, 0);
+ ring->write_tail(ring, 0);
/* Initialize the ring. */
- I915_WRITE_START(ring, obj_priv->gtt_offset);
+ I915_WRITE_START(ring, obj->gtt_offset);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* G45 ring initialization fails to reset head to zero */
@@ -178,12 +192,13 @@ static int init_ring_common(struct drm_device *dev,
}
I915_WRITE_CTL(ring,
- ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+ ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_REPORT_64K | RING_VALID);
- head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* If the head is still not zero, the ring is dead */
- if (head != 0) {
+ if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
+ I915_READ_START(ring) != obj->gtt_offset ||
+ (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
@@ -194,344 +209,569 @@ static int init_ring_common(struct drm_device *dev,
return -EIO;
}
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_kernel_lost_context(dev);
+ if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
+ i915_kernel_lost_context(ring->dev);
else {
- ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->head = I915_READ_HEAD(ring);
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->size;
+ ring->space = ring_space(ring);
}
+
return 0;
}
-static int init_render_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+struct pipe_control {
+ struct drm_i915_gem_object *obj;
+ volatile u32 *cpu_page;
+ u32 gtt_offset;
+};
+
+static int
+init_pipe_control(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret = init_ring_common(dev, ring);
- int mode;
+ struct pipe_control *pc;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ if (ring->private)
+ return 0;
+
+ pc = kmalloc(sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ obj = i915_gem_alloc_object(ring->dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate seqno page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ obj->agp_type = AGP_USER_CACHED_MEMORY;
+
+ ret = i915_gem_object_pin(obj, 4096, true);
+ if (ret)
+ goto err_unref;
+
+ pc->gtt_offset = obj->gtt_offset;
+ pc->cpu_page = kmap(obj->pages[0]);
+ if (pc->cpu_page == NULL)
+ goto err_unpin;
+
+ pc->obj = obj;
+ ring->private = pc;
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+err:
+ kfree(pc);
+ return ret;
+}
+
+static void
+cleanup_pipe_control(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ struct drm_i915_gem_object *obj;
+
+ if (!ring->private)
+ return;
+
+ obj = pc->obj;
+ kunmap(obj->pages[0]);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+
+ kfree(pc);
+ ring->private = NULL;
+}
+
+static int init_render_ring(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = init_ring_common(ring);
if (INTEL_INFO(dev)->gen > 3) {
- mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+ int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
if (IS_GEN6(dev))
mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
I915_WRITE(MI_MODE, mode);
}
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (IS_GEN5(dev)) {
+ ret = init_pipe_control(ring);
+ if (ret)
+ return ret;
+ }
+
return ret;
}
-#define PIPE_CONTROL_FLUSH(addr) \
+static void render_ring_cleanup(struct intel_ring_buffer *ring)
+{
+ if (!ring->private)
+ return;
+
+ cleanup_pipe_control(ring);
+}
+
+static void
+update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int id;
+
+ /*
+ * cs -> 1 = vcs, 0 = bcs
+ * vcs -> 1 = bcs, 0 = cs,
+ * bcs -> 1 = cs, 0 = vcs.
+ */
+ id = ring - dev_priv->ring;
+ id += 2 - i;
+ id %= 3;
+
+ intel_ring_emit(ring,
+ MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_REGISTER |
+ MI_SEMAPHORE_UPDATE);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring,
+ RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
+}
+
+static int
+gen6_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
+{
+ u32 seqno;
+ int ret;
+
+ ret = intel_ring_begin(ring, 10);
+ if (ret)
+ return ret;
+
+ seqno = i915_gem_get_seqno(ring->dev);
+ update_semaphore(ring, 0, seqno);
+ update_semaphore(ring, 1, seqno);
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ *result = seqno;
+ return 0;
+}
+
+int
+intel_ring_sync(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *to,
+ u32 seqno)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_REGISTER |
+ intel_ring_sync_index(ring, to) << 17 |
+ MI_SEMAPHORE_COMPARE);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+#define PIPE_CONTROL_FLUSH(ring__, addr__) \
do { \
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
+ intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
PIPE_CONTROL_DEPTH_STALL | 2); \
- OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
- OUT_RING(0); \
- OUT_RING(0); \
+ intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
+ intel_ring_emit(ring__, 0); \
+ intel_ring_emit(ring__, 0); \
} while (0)
-/**
- * Creates a new sequence number, emitting a write of it to the status page
- * plus an interrupt, which will trigger i915_user_interrupt_handler.
- *
- * Must be called with struct_lock held.
- *
- * Returned sequence numbers are nonzero on success.
- */
-static u32
-render_ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 flush_domains)
+static int
+pc_render_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 seqno;
+ struct drm_device *dev = ring->dev;
+ u32 seqno = i915_gem_get_seqno(dev);
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
- seqno = i915_gem_get_seqno(dev);
-
- if (IS_GEN6(dev)) {
- BEGIN_LP_RING(6);
- OUT_RING(GFX_OP_PIPE_CONTROL | 3);
- OUT_RING(PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
- PIPE_CONTROL_NOTIFY);
- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
- OUT_RING(seqno);
- OUT_RING(0);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else if (HAS_PIPE_CONTROL(dev)) {
- u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
+ /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
+ * incoherent with writes to memory, i.e. completely fubar,
+ * so we need to use PIPE_NOTIFY instead.
+ *
+ * However, we also need to workaround the qword write
+ * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
+ * memory before requesting an interrupt.
+ */
+ ret = intel_ring_begin(ring, 32);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+ intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, 0);
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+ PIPE_CONTROL_NOTIFY);
+ intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ *result = seqno;
+ return 0;
+}
- /*
- * Workaround qword write incoherence by flushing the
- * PIPE_NOTIFY buffers out to memory before requesting
- * an interrupt.
- */
- BEGIN_LP_RING(32);
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
- OUT_RING(seqno);
- OUT_RING(0);
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128; /* write to separate cachelines */
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
- PIPE_CONTROL_NOTIFY);
- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
- OUT_RING(seqno);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(seqno);
+static int
+render_ring_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
+{
+ struct drm_device *dev = ring->dev;
+ u32 seqno = i915_gem_get_seqno(dev);
+ int ret;
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
- }
- return seqno;
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ *result = seqno;
+ return 0;
}
static u32
-render_ring_get_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ring_get_seqno(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- if (HAS_PIPE_CONTROL(dev))
- return ((volatile u32 *)(dev_priv->seqno_page))[0];
- else
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static u32
+pc_render_get_seqno(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ return pc->cpu_page[0];
}
static void
-render_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
+ dev_priv->gt_irq_mask &= ~mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+}
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
+static void
+ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+ dev_priv->gt_irq_mask |= mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+}
+
+static void
+i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+ dev_priv->irq_mask &= ~mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+}
+
+static void
+i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+ dev_priv->irq_mask |= mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+}
+
+static bool
+render_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock(&ring->irq_lock);
+ if (ring->irq_refcount++ == 0) {
if (HAS_PCH_SPLIT(dev))
- ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+ ironlake_enable_irq(dev_priv,
+ GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
}
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock(&ring->irq_lock);
+
+ return true;
}
static void
-render_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+render_ring_put_irq(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
- if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
+ spin_lock(&ring->irq_lock);
+ if (--ring->irq_refcount == 0) {
if (HAS_PCH_SPLIT(dev))
- ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+ ironlake_disable_irq(dev_priv,
+ GT_USER_INTERRUPT |
+ GT_PIPE_NOTIFY);
else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
}
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock(&ring->irq_lock);
}
-void intel_ring_setup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- if (IS_GEN6(dev)) {
- I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
- ring->status_page.gfx_addr);
- I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
- } else {
- I915_WRITE(RING_HWS_PGA(ring->mmio_base),
- ring->status_page.gfx_addr);
- I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
- }
-
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ u32 mmio = IS_GEN6(ring->dev) ?
+ RING_HWS_PGA_GEN6(ring->mmio_base) :
+ RING_HWS_PGA(ring->mmio_base);
+ I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+ POSTING_READ(mmio);
}
-static void
-bsd_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
+static int
+bsd_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
{
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, MI_FLUSH);
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
-}
+ int ret;
-static int init_bsd_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- return init_ring_common(dev, ring);
+ if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+ return 0;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ return 0;
}
-static u32
-ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 flush_domains)
+static int
+ring_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
{
u32 seqno;
+ int ret;
- seqno = i915_gem_get_seqno(dev);
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
- intel_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(dev, ring,
- I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(dev, ring, seqno);
- intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
- intel_ring_advance(dev, ring);
+ seqno = i915_gem_get_seqno(ring->dev);
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+ *result = seqno;
+ return 0;
+}
- return seqno;
+static bool
+ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock(&ring->irq_lock);
+ if (ring->irq_refcount++ == 0)
+ ironlake_enable_irq(dev_priv, flag);
+ spin_unlock(&ring->irq_lock);
+
+ return true;
}
static void
-bsd_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ spin_lock(&ring->irq_lock);
+ if (--ring->irq_refcount == 0)
+ ironlake_disable_irq(dev_priv, flag);
+ spin_unlock(&ring->irq_lock);
+}
+
+static bool
+gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
{
- /* do nothing */
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock(&ring->irq_lock);
+ if (ring->irq_refcount++ == 0) {
+ ring->irq_mask &= ~rflag;
+ I915_WRITE_IMR(ring, ring->irq_mask);
+ ironlake_enable_irq(dev_priv, gflag);
+ }
+ spin_unlock(&ring->irq_lock);
+
+ return true;
}
+
static void
-bsd_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
{
- /* do nothing */
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ spin_lock(&ring->irq_lock);
+ if (--ring->irq_refcount == 0) {
+ ring->irq_mask |= rflag;
+ I915_WRITE_IMR(ring, ring->irq_mask);
+ ironlake_disable_irq(dev_priv, gflag);
+ }
+ spin_unlock(&ring->irq_lock);
}
-static u32
-ring_status_page_get_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static bool
+bsd_ring_get_irq(struct intel_ring_buffer *ring)
{
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
+}
+static void
+bsd_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
}
static int
-ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
{
- uint32_t exec_start;
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
- (2 << 6) | MI_BATCH_NON_SECURE_I965);
- intel_ring_emit(dev, ring, exec_start);
- intel_ring_advance(dev, ring);
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
return 0;
}
static int
-render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len)
{
+ struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int nbox = exec->num_cliprects;
- int i = 0, count;
- uint32_t exec_start, exec_len;
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- exec_len = (uint32_t) exec->batch_len;
+ int ret;
trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
- count = nbox ? nbox : 1;
+ if (IS_I830(dev) || IS_845G(dev)) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- int ret = i915_emit_box(dev, cliprects, i,
- exec->DR1, exec->DR4);
- if (ret)
- return ret;
- }
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, 0);
+ } else {
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- if (IS_I830(dev) || IS_845G(dev)) {
- intel_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
- intel_ring_emit(dev, ring,
- exec_start | MI_BATCH_NON_SECURE);
- intel_ring_emit(dev, ring, exec_start + exec_len - 4);
- intel_ring_emit(dev, ring, 0);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, offset);
} else {
- intel_ring_begin(dev, ring, 2);
- if (INTEL_INFO(dev)->gen >= 4) {
- intel_ring_emit(dev, ring,
- MI_BATCH_BUFFER_START | (2 << 6)
- | MI_BATCH_NON_SECURE_I965);
- intel_ring_emit(dev, ring, exec_start);
- } else {
- intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
- | (2 << 6));
- intel_ring_emit(dev, ring, exec_start |
- MI_BATCH_NON_SECURE);
- }
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | (2 << 6));
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
}
- intel_ring_advance(dev, ring);
}
-
- if (IS_G4X(dev) || IS_GEN5(dev)) {
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, MI_FLUSH |
- MI_NO_WRITE_FLUSH |
- MI_INVALIDATE_ISP );
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
- }
- /* XXX breadcrumb */
+ intel_ring_advance(ring);
return 0;
}
-static void cleanup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static void cleanup_status_page(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj;
obj = ring->status_page.obj;
if (obj == NULL)
return;
- obj_priv = to_intel_bo(obj);
- kunmap(obj_priv->pages[0]);
+ kunmap(obj->pages[0]);
i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
}
-static int init_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int init_status_page(struct intel_ring_buffer *ring)
{
+ struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
obj = i915_gem_alloc_object(dev, 4096);
@@ -540,16 +780,15 @@ static int init_status_page(struct drm_device *dev,
ret = -ENOMEM;
goto err;
}
- obj_priv = to_intel_bo(obj);
- obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+ obj->agp_type = AGP_USER_CACHED_MEMORY;
- ret = i915_gem_object_pin(obj, 4096);
+ ret = i915_gem_object_pin(obj, 4096, true);
if (ret != 0) {
goto err_unref;
}
- ring->status_page.gfx_addr = obj_priv->gtt_offset;
- ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+ ring->status_page.gfx_addr = obj->gtt_offset;
+ ring->status_page.page_addr = kmap(obj->pages[0]);
if (ring->status_page.page_addr == NULL) {
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
goto err_unpin;
@@ -557,7 +796,7 @@ static int init_status_page(struct drm_device *dev,
ring->status_page.obj = obj;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
- intel_ring_setup_status_page(dev, ring);
+ intel_ring_setup_status_page(ring);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
ring->name, ring->status_page.gfx_addr);
@@ -566,7 +805,7 @@ static int init_status_page(struct drm_device *dev,
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
err:
return ret;
}
@@ -574,9 +813,7 @@ err:
int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
int ret;
ring->dev = dev;
@@ -584,8 +821,11 @@ int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->gpu_write_list);
+ spin_lock_init(&ring->irq_lock);
+ ring->irq_mask = ~0;
+
if (I915_NEED_GFX_HWS(dev)) {
- ret = init_status_page(dev, ring);
+ ret = init_status_page(ring);
if (ret)
return ret;
}
@@ -597,15 +837,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
goto err_hws;
}
- ring->gem_object = obj;
+ ring->obj = obj;
- ret = i915_gem_object_pin(obj, PAGE_SIZE);
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
if (ret)
goto err_unref;
- obj_priv = to_intel_bo(obj);
ring->map.size = ring->size;
- ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+ ring->map.offset = dev->agp->base + obj->gtt_offset;
ring->map.type = 0;
ring->map.flags = 0;
ring->map.mtrr = 0;
@@ -618,60 +857,64 @@ int intel_init_ring_buffer(struct drm_device *dev,
}
ring->virtual_start = ring->map.handle;
- ret = ring->init(dev, ring);
+ ret = ring->init(ring);
if (ret)
goto err_unmap;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_kernel_lost_context(dev);
- else {
- ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
- ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->size;
- }
- return ret;
+ /* Workaround an erratum on the i830 which causes a hang if
+ * the TAIL pointer points to within the last 2 cachelines
+ * of the buffer.
+ */
+ ring->effective_size = ring->size;
+ if (IS_I830(ring->dev))
+ ring->effective_size -= 128;
+
+ return 0;
err_unmap:
drm_core_ioremapfree(&ring->map, dev);
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
- drm_gem_object_unreference(obj);
- ring->gem_object = NULL;
+ drm_gem_object_unreference(&obj->base);
+ ring->obj = NULL;
err_hws:
- cleanup_status_page(dev, ring);
+ cleanup_status_page(ring);
return ret;
}
-void intel_cleanup_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
{
- if (ring->gem_object == NULL)
+ struct drm_i915_private *dev_priv;
+ int ret;
+
+ if (ring->obj == NULL)
return;
- drm_core_ioremapfree(&ring->map, dev);
+ /* Disable the ring buffer. The ring must be idle at this point */
+ dev_priv = ring->dev->dev_private;
+ ret = intel_wait_ring_buffer(ring, ring->size - 8);
+ I915_WRITE_CTL(ring, 0);
+
+ drm_core_ioremapfree(&ring->map, ring->dev);
- i915_gem_object_unpin(ring->gem_object);
- drm_gem_object_unreference(ring->gem_object);
- ring->gem_object = NULL;
+ i915_gem_object_unpin(ring->obj);
+ drm_gem_object_unreference(&ring->obj->base);
+ ring->obj = NULL;
if (ring->cleanup)
ring->cleanup(ring);
- cleanup_status_page(dev, ring);
+ cleanup_status_page(ring);
}
-static int intel_wrap_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{
unsigned int *virt;
- int rem;
- rem = ring->size - ring->tail;
+ int rem = ring->size - ring->tail;
if (ring->space < rem) {
- int ret = intel_wait_ring_buffer(dev, ring, rem);
+ int ret = intel_wait_ring_buffer(ring, rem);
if (ret)
return ret;
}
@@ -684,34 +927,36 @@ static int intel_wrap_ring_buffer(struct drm_device *dev,
}
ring->tail = 0;
- ring->space = ring->head - 8;
+ ring->space = ring_space(ring);
return 0;
}
-int intel_wait_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n)
+int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long end;
- drm_i915_private_t *dev_priv = dev->dev_private;
u32 head;
+ /* If the reported head position has wrapped or hasn't advanced,
+ * fallback to the slow and accurate path.
+ */
+ head = intel_read_status_page(ring, 4);
+ if (head > ring->head) {
+ ring->head = head;
+ ring->space = ring_space(ring);
+ if (ring->space >= n)
+ return 0;
+ }
+
trace_i915_ring_wait_begin (dev);
end = jiffies + 3 * HZ;
do {
- /* If the reported head position has wrapped or hasn't advanced,
- * fallback to the slow and accurate path.
- */
- head = intel_read_status_page(ring, 4);
- if (head < ring->actual_head)
- head = I915_READ_HEAD(ring);
- ring->actual_head = head;
- ring->head = head & HEAD_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->size;
+ ring->head = I915_READ_HEAD(ring);
+ ring->space = ring_space(ring);
if (ring->space >= n) {
- trace_i915_ring_wait_end (dev);
+ trace_i915_ring_wait_end(dev);
return 0;
}
@@ -722,29 +967,39 @@ int intel_wait_ring_buffer(struct drm_device *dev,
}
msleep(1);
+ if (atomic_read(&dev_priv->mm.wedged))
+ return -EAGAIN;
} while (!time_after(jiffies, end));
trace_i915_ring_wait_end (dev);
return -EBUSY;
}
-void intel_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- int num_dwords)
+int intel_ring_begin(struct intel_ring_buffer *ring,
+ int num_dwords)
{
int n = 4*num_dwords;
- if (unlikely(ring->tail + n > ring->size))
- intel_wrap_ring_buffer(dev, ring);
- if (unlikely(ring->space < n))
- intel_wait_ring_buffer(dev, ring, n);
+ int ret;
+
+ if (unlikely(ring->tail + n > ring->effective_size)) {
+ ret = intel_wrap_ring_buffer(ring);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (unlikely(ring->space < n)) {
+ ret = intel_wait_ring_buffer(ring, n);
+ if (unlikely(ret))
+ return ret;
+ }
ring->space -= n;
+ return 0;
}
-void intel_ring_advance(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+void intel_ring_advance(struct intel_ring_buffer *ring)
{
ring->tail &= ring->size - 1;
- ring->write_tail(dev, ring, ring->tail);
+ ring->write_tail(ring, ring->tail);
}
static const struct intel_ring_buffer render_ring = {
@@ -756,10 +1011,11 @@ static const struct intel_ring_buffer render_ring = {
.write_tail = ring_write_tail,
.flush = render_ring_flush,
.add_request = render_ring_add_request,
- .get_seqno = render_ring_get_seqno,
- .user_irq_get = render_ring_get_user_irq,
- .user_irq_put = render_ring_put_user_irq,
- .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+ .get_seqno = ring_get_seqno,
+ .irq_get = render_ring_get_irq,
+ .irq_put = render_ring_put_irq,
+ .dispatch_execbuffer = render_ring_dispatch_execbuffer,
+ .cleanup = render_ring_cleanup,
};
/* ring buffer for bit-stream decoder */
@@ -769,22 +1025,21 @@ static const struct intel_ring_buffer bsd_ring = {
.id = RING_BSD,
.mmio_base = BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
- .init = init_bsd_ring,
+ .init = init_ring_common,
.write_tail = ring_write_tail,
.flush = bsd_ring_flush,
.add_request = ring_add_request,
- .get_seqno = ring_status_page_get_seqno,
- .user_irq_get = bsd_ring_get_user_irq,
- .user_irq_put = bsd_ring_put_user_irq,
- .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
+ .get_seqno = ring_get_seqno,
+ .irq_get = bsd_ring_get_irq,
+ .irq_put = bsd_ring_put_irq,
+ .dispatch_execbuffer = ring_dispatch_execbuffer,
};
-static void gen6_bsd_ring_write_tail(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
/* Every tail move must follow the sequence below */
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
@@ -803,69 +1058,112 @@ static void gen6_bsd_ring_write_tail(struct drm_device *dev,
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
}
-static void gen6_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
+static int gen6_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate, u32 flush)
{
- intel_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_FLUSH_DW);
- intel_ring_emit(dev, ring, 0);
- intel_ring_emit(dev, ring, 0);
- intel_ring_emit(dev, ring, 0);
- intel_ring_advance(dev, ring);
+ uint32_t cmd;
+ int ret;
+
+ if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
+ return 0;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ cmd = MI_FLUSH_DW;
+ if (invalidate & I915_GEM_GPU_DOMAINS)
+ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ return 0;
}
static int
-gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len)
{
- uint32_t exec_start;
+ int ret;
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring,
- MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(dev, ring, exec_start);
- intel_ring_advance(dev, ring);
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
return 0;
}
+static bool
+gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ return gen6_ring_get_irq(ring,
+ GT_USER_INTERRUPT,
+ GEN6_RENDER_USER_INTERRUPT);
+}
+
+static void
+gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ return gen6_ring_put_irq(ring,
+ GT_USER_INTERRUPT,
+ GEN6_RENDER_USER_INTERRUPT);
+}
+
+static bool
+gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ return gen6_ring_get_irq(ring,
+ GT_GEN6_BSD_USER_INTERRUPT,
+ GEN6_BSD_USER_INTERRUPT);
+}
+
+static void
+gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ return gen6_ring_put_irq(ring,
+ GT_GEN6_BSD_USER_INTERRUPT,
+ GEN6_BSD_USER_INTERRUPT);
+}
+
/* ring buffer for Video Codec for Gen6+ */
static const struct intel_ring_buffer gen6_bsd_ring = {
- .name = "gen6 bsd ring",
- .id = RING_BSD,
- .mmio_base = GEN6_BSD_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_bsd_ring,
- .write_tail = gen6_bsd_ring_write_tail,
- .flush = gen6_ring_flush,
- .add_request = ring_add_request,
- .get_seqno = ring_status_page_get_seqno,
- .user_irq_get = bsd_ring_get_user_irq,
- .user_irq_put = bsd_ring_put_user_irq,
- .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
+ .name = "gen6 bsd ring",
+ .id = RING_BSD,
+ .mmio_base = GEN6_BSD_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+ .init = init_ring_common,
+ .write_tail = gen6_bsd_ring_write_tail,
+ .flush = gen6_ring_flush,
+ .add_request = gen6_add_request,
+ .get_seqno = ring_get_seqno,
+ .irq_get = gen6_bsd_ring_get_irq,
+ .irq_put = gen6_bsd_ring_put_irq,
+ .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
};
/* Blitter support (SandyBridge+) */
-static void
-blt_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static bool
+blt_ring_get_irq(struct intel_ring_buffer *ring)
{
- /* do nothing */
+ return gen6_ring_get_irq(ring,
+ GT_BLT_USER_INTERRUPT,
+ GEN6_BLITTER_USER_INTERRUPT);
}
+
static void
-blt_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+blt_ring_put_irq(struct intel_ring_buffer *ring)
{
- /* do nothing */
+ gen6_ring_put_irq(ring,
+ GT_BLT_USER_INTERRUPT,
+ GEN6_BLITTER_USER_INTERRUPT);
}
@@ -883,32 +1181,31 @@ to_blt_workaround(struct intel_ring_buffer *ring)
return ring->private;
}
-static int blt_ring_init(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int blt_ring_init(struct intel_ring_buffer *ring)
{
- if (NEED_BLT_WORKAROUND(dev)) {
+ if (NEED_BLT_WORKAROUND(ring->dev)) {
struct drm_i915_gem_object *obj;
- u32 __iomem *ptr;
+ u32 *ptr;
int ret;
- obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
+ obj = i915_gem_alloc_object(ring->dev, 4096);
if (obj == NULL)
return -ENOMEM;
- ret = i915_gem_object_pin(&obj->base, 4096);
+ ret = i915_gem_object_pin(obj, 4096, true);
if (ret) {
drm_gem_object_unreference(&obj->base);
return ret;
}
ptr = kmap(obj->pages[0]);
- iowrite32(MI_BATCH_BUFFER_END, ptr);
- iowrite32(MI_NOOP, ptr+1);
+ *ptr++ = MI_BATCH_BUFFER_END;
+ *ptr++ = MI_NOOP;
kunmap(obj->pages[0]);
- ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret) {
- i915_gem_object_unpin(&obj->base);
+ i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
return ret;
}
@@ -916,51 +1213,47 @@ static int blt_ring_init(struct drm_device *dev,
ring->private = obj;
}
- return init_ring_common(dev, ring);
+ return init_ring_common(ring);
}
-static void blt_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static int blt_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
if (ring->private) {
- intel_ring_begin(dev, ring, num_dwords+2);
- intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
- intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
- } else
- intel_ring_begin(dev, ring, 4);
-}
+ int ret = intel_ring_begin(ring, num_dwords+2);
+ if (ret)
+ return ret;
-static void blt_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
-{
- blt_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_FLUSH_DW);
- intel_ring_emit(dev, ring, 0);
- intel_ring_emit(dev, ring, 0);
- intel_ring_emit(dev, ring, 0);
- intel_ring_advance(dev, ring);
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START);
+ intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
+
+ return 0;
+ } else
+ return intel_ring_begin(ring, 4);
}
-static u32
-blt_ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 flush_domains)
+static int blt_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate, u32 flush)
{
- u32 seqno = i915_gem_get_seqno(dev);
+ uint32_t cmd;
+ int ret;
- blt_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(dev, ring,
- I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(dev, ring, seqno);
- intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
- intel_ring_advance(dev, ring);
+ if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
+ return 0;
- DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
- return seqno;
+ ret = blt_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ cmd = MI_FLUSH_DW;
+ if (invalidate & I915_GEM_DOMAIN_RENDER)
+ cmd |= MI_INVALIDATE_TLB;
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ return 0;
}
static void blt_ring_cleanup(struct intel_ring_buffer *ring)
@@ -981,47 +1274,98 @@ static const struct intel_ring_buffer gen6_blt_ring = {
.init = blt_ring_init,
.write_tail = ring_write_tail,
.flush = blt_ring_flush,
- .add_request = blt_ring_add_request,
- .get_seqno = ring_status_page_get_seqno,
- .user_irq_get = blt_ring_get_user_irq,
- .user_irq_put = blt_ring_put_user_irq,
- .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
+ .add_request = gen6_add_request,
+ .get_seqno = ring_get_seqno,
+ .irq_get = blt_ring_get_irq,
+ .irq_put = blt_ring_put_irq,
+ .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
.cleanup = blt_ring_cleanup,
};
int intel_init_render_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
-
- dev_priv->render_ring = render_ring;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+
+ *ring = render_ring;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ring->add_request = gen6_add_request;
+ ring->irq_get = gen6_render_ring_get_irq;
+ ring->irq_put = gen6_render_ring_put_irq;
+ } else if (IS_GEN5(dev)) {
+ ring->add_request = pc_render_add_request;
+ ring->get_seqno = pc_render_get_seqno;
+ }
if (!I915_NEED_GFX_HWS(dev)) {
- dev_priv->render_ring.status_page.page_addr
- = dev_priv->status_page_dmah->vaddr;
- memset(dev_priv->render_ring.status_page.page_addr,
- 0, PAGE_SIZE);
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ }
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+
+ *ring = render_ring;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ring->add_request = gen6_add_request;
+ ring->irq_get = gen6_render_ring_get_irq;
+ ring->irq_put = gen6_render_ring_put_irq;
+ } else if (IS_GEN5(dev)) {
+ ring->add_request = pc_render_add_request;
+ ring->get_seqno = pc_render_get_seqno;
+ }
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ INIT_LIST_HEAD(&ring->gpu_write_list);
+
+ ring->size = size;
+ ring->effective_size = ring->size;
+ if (IS_I830(ring->dev))
+ ring->effective_size -= 128;
+
+ ring->map.offset = start;
+ ring->map.size = size;
+ ring->map.type = 0;
+ ring->map.flags = 0;
+ ring->map.mtrr = 0;
+
+ drm_core_ioremap_wc(&ring->map, dev);
+ if (ring->map.handle == NULL) {
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return -ENOMEM;
}
- return intel_init_ring_buffer(dev, &dev_priv->render_ring);
+ ring->virtual_start = (void __force __iomem *)ring->map.handle;
+ return 0;
}
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
if (IS_GEN6(dev))
- dev_priv->bsd_ring = gen6_bsd_ring;
+ *ring = gen6_bsd_ring;
else
- dev_priv->bsd_ring = bsd_ring;
+ *ring = bsd_ring;
- return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+ return intel_init_ring_buffer(dev, ring);
}
int intel_init_blt_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
- dev_priv->blt_ring = gen6_blt_ring;
+ *ring = gen6_blt_ring;
- return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
+ return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index d2cd0f1efeed..6d6fde85a636 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,22 +1,40 @@
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
+enum {
+ RCS = 0x0,
+ VCS,
+ BCS,
+ I915_NUM_RINGS,
+};
+
struct intel_hw_status_page {
- void *page_addr;
+ u32 __iomem *page_addr;
unsigned int gfx_addr;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
};
-#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
-#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
-#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
-#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
-#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
-#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
-#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
-#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
+#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
+
+#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
+#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+
+#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
+#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+
+#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
+#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+
+#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
+#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+
+#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
+#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
+
+#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
+#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
+#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1((ring)->mmio_base))
-struct drm_i915_gem_execbuffer2;
struct intel_ring_buffer {
const char *name;
enum intel_ring_id {
@@ -25,45 +43,38 @@ struct intel_ring_buffer {
RING_BLT = 0x4,
} id;
u32 mmio_base;
- unsigned long size;
void *virtual_start;
struct drm_device *dev;
- struct drm_gem_object *gem_object;
+ struct drm_i915_gem_object *obj;
- u32 actual_head;
u32 head;
u32 tail;
int space;
+ int size;
+ int effective_size;
struct intel_hw_status_page status_page;
- u32 irq_gem_seqno; /* last seq seem at irq time */
- u32 waiting_gem_seqno;
- int user_irq_refcount;
- void (*user_irq_get)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- void (*user_irq_put)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ spinlock_t irq_lock;
+ u32 irq_refcount;
+ u32 irq_mask;
+ u32 irq_seqno; /* last seq seem at irq time */
+ u32 waiting_seqno;
+ u32 sync_seqno[I915_NUM_RINGS-1];
+ bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
+ void (*irq_put)(struct intel_ring_buffer *ring);
- int (*init)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ int (*init)(struct intel_ring_buffer *ring);
- void (*write_tail)(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+ void (*write_tail)(struct intel_ring_buffer *ring,
u32 value);
- void (*flush)(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains);
- u32 (*add_request)(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 flush_domains);
- u32 (*get_seqno)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- int (*dispatch_gem_execbuffer)(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset);
+ int __must_check (*flush)(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains);
+ int (*add_request)(struct intel_ring_buffer *ring,
+ u32 *seqno);
+ u32 (*get_seqno)(struct intel_ring_buffer *ring);
+ int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
+ u32 offset, u32 length);
void (*cleanup)(struct intel_ring_buffer *ring);
/**
@@ -96,7 +107,7 @@ struct intel_ring_buffer {
/**
* Do we have some not yet emitted requests outstanding?
*/
- bool outstanding_lazy_request;
+ u32 outstanding_lazy_request;
wait_queue_head_t irq_queue;
drm_local_map_t map;
@@ -105,44 +116,57 @@ struct intel_ring_buffer {
};
static inline u32
+intel_ring_sync_index(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *other)
+{
+ int idx;
+
+ /*
+ * cs -> 0 = vcs, 1 = bcs
+ * vcs -> 0 = bcs, 1 = cs,
+ * bcs -> 0 = cs, 1 = vcs.
+ */
+
+ idx = (other - ring) - 1;
+ if (idx < 0)
+ idx += I915_NUM_RINGS;
+
+ return idx;
+}
+
+static inline u32
intel_read_status_page(struct intel_ring_buffer *ring,
- int reg)
+ int reg)
{
- u32 *regs = ring->status_page.page_addr;
- return regs[reg];
+ return ioread32(ring->status_page.page_addr + reg);
}
-int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
-void intel_cleanup_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
-int intel_wait_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n);
-void intel_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n);
-
-static inline void intel_ring_emit(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- unsigned int data)
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
+int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
+int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
+
+static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+ u32 data)
{
- unsigned int *virt = ring->virtual_start + ring->tail;
- *virt = data;
+ iowrite32(data, ring->virtual_start + ring->tail);
ring->tail += 4;
}
-void intel_ring_advance(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+void intel_ring_advance(struct intel_ring_buffer *ring);
-u32 intel_ring_get_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
+int intel_ring_sync(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *to,
+ u32 seqno);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
int intel_init_blt_ring_buffer(struct drm_device *dev);
-u32 intel_ring_get_active_head(struct drm_device *dev,
- struct intel_ring_buffer *ring);
-void intel_ring_setup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
+
+/* DRI warts */
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 6bc42fa2a6ec..7c50cdce84f0 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -46,6 +46,7 @@
SDVO_TV_MASK)
#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
@@ -473,20 +474,6 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
return false;
}
- i = 3;
- while (status == SDVO_CMD_STATUS_PENDING && i--) {
- if (!intel_sdvo_read_byte(intel_sdvo,
- SDVO_I2C_CMD_STATUS,
- &status))
- return false;
- }
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("command returns response %s [%d]\n",
- status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
- status);
- return false;
- }
-
return true;
}
@@ -497,6 +484,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
u8 status;
int i;
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+
/*
* The documentation states that all commands will be
* processed within 15µs, and that we need only poll
@@ -505,14 +494,19 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
*
* Check 5 times in case the hardware failed to read the docs.
*/
- do {
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+
+ while (status == SDVO_CMD_STATUS_PENDING && retry--) {
+ udelay(15);
if (!intel_sdvo_read_byte(intel_sdvo,
SDVO_I2C_CMD_STATUS,
&status))
- return false;
- } while (status == SDVO_CMD_STATUS_PENDING && --retry);
+ goto log_fail;
+ }
- DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
DRM_LOG_KMS("(%s)", cmd_status_names[status]);
else
@@ -533,7 +527,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
return true;
log_fail:
- DRM_LOG_KMS("\n");
+ DRM_LOG_KMS("... failed\n");
return false;
}
@@ -550,6 +544,7 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
u8 ddc_bus)
{
+ /* This must be the immediately preceding write before the i2c xfer */
return intel_sdvo_write_cmd(intel_sdvo,
SDVO_CMD_SET_CONTROL_BUS_SWITCH,
&ddc_bus, 1);
@@ -557,7 +552,10 @@ static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
{
- return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len);
+ if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+ return false;
+
+ return intel_sdvo_read_response(intel_sdvo, NULL, 0);
}
static bool
@@ -859,18 +857,21 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
intel_dip_infoframe_csum(&avi_if);
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
set_buf_index, 2))
return false;
for (i = 0; i < sizeof(avi_if); i += 8) {
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA,
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_DATA,
data, 8))
return false;
data++;
}
- return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE,
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_TXRATE,
&tx_rate, 1);
}
@@ -1024,9 +1025,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
- if (intel_sdvo->has_hdmi_monitor &&
- !intel_sdvo_set_avi_infoframe(intel_sdvo))
- return;
+ if (intel_sdvo->has_hdmi_monitor) {
+ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_sdvo,
+ SDVO_COLORIMETRY_RGB256);
+ intel_sdvo_set_avi_infoframe(intel_sdvo);
+ } else
+ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
if (intel_sdvo->is_tv &&
!intel_sdvo_set_tv_format(intel_sdvo))
@@ -1045,7 +1050,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
/* Set the SDVO control regs. */
if (INTEL_INFO(dev)->gen >= 4) {
- sdvox = SDVO_BORDER_ENABLE;
+ sdvox = 0;
+ if (INTEL_INFO(dev)->gen < 5)
+ sdvox |= SDVO_BORDER_ENABLE;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1075,7 +1082,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
}
- if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
+ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
+ INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_STALL_SELECT;
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
@@ -1352,7 +1360,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
}
- }
+ } else
+ status = connector_status_disconnected;
connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -1395,12 +1404,30 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
intel_sdvo->attached_output = response;
+ intel_sdvo->has_hdmi_monitor = false;
+ intel_sdvo->has_hdmi_audio = false;
+
if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
- else if (response & SDVO_TMDS_MASK)
+ else if (IS_TMDS(intel_sdvo_connector))
ret = intel_sdvo_hdmi_sink_detect(connector);
- else
- ret = connector_status_connected;
+ else {
+ struct edid *edid;
+
+ /* if we have an edid check it matches the connection */
+ edid = intel_sdvo_get_edid(connector);
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+ if (edid != NULL) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL)
+ ret = connector_status_disconnected;
+ else
+ ret = connector_status_connected;
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ } else
+ ret = connector_status_connected;
+ }
/* May update encoder flag for like clock for SDVO TV, etc.*/
if (ret == connector_status_connected) {
@@ -1436,10 +1463,15 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
+
+ if (connector_is_digital == monitor_is_digital) {
drm_mode_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
+
connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -1658,6 +1690,22 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
kfree(connector);
}
+static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct edid *edid;
+ bool has_audio = false;
+
+ if (!intel_sdvo->is_hdmi)
+ return false;
+
+ edid = intel_sdvo_get_edid(connector);
+ if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+ has_audio = drm_detect_monitor_audio(edid);
+
+ return has_audio;
+}
+
static int
intel_sdvo_set_property(struct drm_connector *connector,
struct drm_property *property,
@@ -1674,17 +1722,23 @@ intel_sdvo_set_property(struct drm_connector *connector,
return ret;
if (property == intel_sdvo_connector->force_audio_property) {
- if (val == intel_sdvo_connector->force_audio)
+ int i = val;
+ bool has_audio;
+
+ if (i == intel_sdvo_connector->force_audio)
return 0;
- intel_sdvo_connector->force_audio = val;
+ intel_sdvo_connector->force_audio = i;
- if (val > 0 && intel_sdvo->has_hdmi_audio)
- return 0;
- if (val < 0 && !intel_sdvo->has_hdmi_audio)
+ if (i == 0)
+ has_audio = intel_sdvo_detect_hdmi_audio(connector);
+ else
+ has_audio = i > 0;
+
+ if (has_audio == intel_sdvo->has_hdmi_audio)
return 0;
- intel_sdvo->has_hdmi_audio = val > 0;
+ intel_sdvo->has_hdmi_audio = has_audio;
goto done;
}
@@ -1919,20 +1973,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
static bool
intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
{
- int is_hdmi;
-
- if (!intel_sdvo_check_supp_encode(intel_sdvo))
- return false;
-
- if (!intel_sdvo_set_target_output(intel_sdvo,
- device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
- return false;
-
- is_hdmi = 0;
- if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
- return false;
-
- return !!is_hdmi;
+ return intel_sdvo_check_supp_encode(intel_sdvo);
}
static u8
@@ -2034,12 +2075,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
- /* enable hdmi encoding mode if supported */
- intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
- intel_sdvo_set_colorimetry(intel_sdvo,
- SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
-
intel_sdvo->is_hdmi = true;
}
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 2f7681989316..fe4a53a50b83 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1234,7 +1234,8 @@ static const struct drm_display_mode reported_modes[] = {
* \return false if TV is disconnected.
*/
static int
-intel_tv_detect_type (struct intel_tv *intel_tv)
+intel_tv_detect_type (struct intel_tv *intel_tv,
+ struct drm_connector *connector)
{
struct drm_encoder *encoder = &intel_tv->base.base;
struct drm_device *dev = encoder->dev;
@@ -1245,10 +1246,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
int type;
/* Disable TV interrupts around load detect or we'll recurse */
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
- PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_disable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
save_tv_dac = tv_dac = I915_READ(TV_DAC);
save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1301,10 +1305,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
I915_WRITE(TV_CTL, save_tv_ctl);
/* Restore interrupt config */
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
- PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
return type;
}
@@ -1354,7 +1361,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
- type = intel_tv_detect_type(intel_tv);
+ type = intel_tv_detect_type(intel_tv, connector);
} else if (force) {
struct drm_crtc *crtc;
int dpms_mode;
@@ -1362,7 +1369,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
&mode, &dpms_mode);
if (crtc) {
- type = intel_tv_detect_type(intel_tv);
+ type = intel_tv_detect_type(intel_tv, connector);
intel_release_load_detect_pipe(&intel_tv->base, connector,
dpms_mode);
} else
@@ -1656,6 +1663,18 @@ intel_tv_init(struct drm_device *dev)
intel_encoder = &intel_tv->base;
connector = &intel_connector->base;
+ /* The documentation, for the older chipsets at least, recommend
+ * using a polling method rather than hotplug detection for TVs.
+ * This is because in order to perform the hotplug detection, the PLLs
+ * for the TV must be kept alive increasing power drain and starving
+ * bandwidth from other encoders. Notably for instance, it causes
+ * pipe underruns on Crestline when this encoder is supposedly idle.
+ *
+ * More recent chipsets favour HDMI rather than integrated S-Video.
+ */
+ connector->polled =
+ DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 72730e9ca06c..de70959b9ed5 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -8,9 +8,9 @@ config DRM_NOUVEAU
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB
- select FRAMEBUFFER_CONSOLE if !EMBEDDED
+ select FRAMEBUFFER_CONSOLE if !EXPERT
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
- select ACPI_VIDEO if ACPI
+ select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
help
Choose this option for open-source nVidia support.
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 23fa82d667d6..e12c97fd8db8 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -5,27 +5,32 @@
ccflags-y := -Iinclude/drm
nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o nouveau_notifier.o \
- nouveau_sgdma.o nouveau_dma.o \
+ nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
nouveau_dp.o nouveau_ramht.o \
nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
+ nouveau_mm.o nouveau_vm.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o nvc0_graph.o \
- nv40_grctx.o nv50_grctx.o \
+ nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
+ nv84_crypt.o \
nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
- nv50_crtc.o nv50_dac.o nv50_sor.o \
- nv50_cursor.o nv50_display.o nv50_fbcon.o \
+ nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
+ nv50_cursor.o nv50_display.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
- nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
+ nv04_crtc.o nv04_display.o nv04_cursor.o \
+ nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
nv10_gpio.o nv50_gpio.o \
nv50_calc.o \
- nv04_pm.o nv50_pm.o nva3_pm.o
+ nv04_pm.o nv50_pm.o nva3_pm.o \
+ nv50_vram.o nvc0_vram.o \
+ nv50_vm.o nvc0_vm.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 119152606e4c..a54238058dc5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -130,10 +130,15 @@ static int nouveau_dsm_init(void)
static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
{
- if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ /* easy option one - intel vendor ID means Integrated */
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
return VGA_SWITCHEROO_IGD;
- else
- return VGA_SWITCHEROO_DIS;
+
+ /* is this device on Bus 0? - this may need improving */
+ if (pdev->bus->number == 0)
+ return VGA_SWITCHEROO_IGD;
+
+ return VGA_SWITCHEROO_DIS;
}
static struct vga_switcheroo_handler nouveau_dsm_handler = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index b14c81110575..d3a9c6e02477 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -59,7 +59,7 @@ static int nv40_set_intensity(struct backlight_device *bd)
return 0;
}
-static struct backlight_ops nv40_bl_ops = {
+static const struct backlight_ops nv40_bl_ops = {
.options = BL_CORE_SUSPENDRESUME,
.get_brightness = nv40_get_intensity,
.update_status = nv40_set_intensity,
@@ -82,7 +82,7 @@ static int nv50_set_intensity(struct backlight_device *bd)
return 0;
}
-static struct backlight_ops nv50_bl_ops = {
+static const struct backlight_ops nv50_bl_ops = {
.options = BL_CORE_SUSPENDRESUME,
.get_brightness = nv50_get_intensity,
.update_status = nv50_set_intensity,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index b2293576f278..6bdab891c64e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1927,7 +1927,7 @@ init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* offset (8 bit): opcode
* offset + 1 (16 bit): time
*
- * Sleep for "time" miliseconds.
+ * Sleep for "time" milliseconds.
*/
unsigned time = ROM16(bios->data[offset + 1]);
@@ -1935,7 +1935,7 @@ init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (!iexec->execute)
return 3;
- BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X miliseconds\n",
+ BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n",
offset, time);
msleep(time);
@@ -6053,52 +6053,17 @@ static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
return entry;
}
-static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
+static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c,
+ int heads, int or)
{
struct dcb_entry *entry = new_dcb_entry(dcb);
- entry->type = 0;
+ entry->type = type;
entry->i2c_index = i2c;
entry->heads = heads;
- entry->location = DCB_LOC_ON_CHIP;
- entry->or = 1;
-}
-
-static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
-{
- struct dcb_entry *entry = new_dcb_entry(dcb);
-
- entry->type = 2;
- entry->i2c_index = LEGACY_I2C_PANEL;
- entry->heads = twoHeads ? 3 : 1;
- entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
- entry->or = 1; /* means |0x10 gets set on CRE_LCD__INDEX */
- entry->duallink_possible = false; /* SiI164 and co. are single link */
-
-#if 0
- /*
- * For dvi-a either crtc probably works, but my card appears to only
- * support dvi-d. "nvidia" still attempts to program it for dvi-a,
- * doing the full fp output setup (program 0x6808.. fp dimension regs,
- * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
- * the monitor picks up the mode res ok and lights up, but no pixel
- * data appears, so the board manufacturer probably connected up the
- * sync lines, but missed the video traces / components
- *
- * with this introduction, dvi-a left as an exercise for the reader.
- */
- fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
-#endif
-}
-
-static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
-{
- struct dcb_entry *entry = new_dcb_entry(dcb);
-
- entry->type = 1;
- entry->i2c_index = LEGACY_I2C_TV;
- entry->heads = twoHeads ? 3 : 1;
- entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
+ if (type != OUTPUT_ANALOG)
+ entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
+ entry->or = or;
}
static bool
@@ -6263,7 +6228,7 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->tvconf.has_component_output = false;
break;
case OUTPUT_LVDS:
- if ((conn & 0x00003f00) != 0x10)
+ if ((conn & 0x00003f00) >> 8 != 0x10)
entry->lvdsconf.use_straps_for_mode = true;
entry->lvdsconf.use_power_scripts = true;
break;
@@ -6345,6 +6310,9 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
static bool
apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
+
/* Dell Precision M6300
* DCB entry 2: 02025312 00000010
* DCB entry 3: 02026312 00000020
@@ -6362,11 +6330,51 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
return false;
}
+ /* GeForce3 Ti 200
+ *
+ * DCB reports an LVDS output that should be TMDS:
+ * DCB entry 1: f2005014 ffffffff
+ */
+ if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
+ if (*conn == 0xf2005014 && *conf == 0xffffffff) {
+ fabricate_dcb_output(dcb, OUTPUT_TMDS, 1, 1, 1);
+ return false;
+ }
+ }
+
return true;
}
+static void
+fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
+{
+ struct dcb_table *dcb = &bios->dcb;
+ int all_heads = (nv_two_heads(dev) ? 3 : 1);
+
+#ifdef __powerpc__
+ /* Apple iMac G4 NV17 */
+ if (of_machine_is_compatible("PowerMac4,5")) {
+ fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1);
+ fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2);
+ return;
+ }
+#endif
+
+ /* Make up some sane defaults */
+ fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
+
+ if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
+ fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
+ all_heads, 0);
+
+ else if (bios->tmds.output0_script_ptr ||
+ bios->tmds.output1_script_ptr)
+ fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
+ all_heads, 1);
+}
+
static int
-parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
+parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &bios->dcb;
@@ -6386,12 +6394,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
/* this situation likely means a really old card, pre DCB */
if (dcbptr == 0x0) {
- NV_INFO(dev, "Assuming a CRT output exists\n");
- fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
-
- if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
- fabricate_tv_output(dcb, twoHeads);
-
+ fabricate_dcb_encoder_table(dev, bios);
return 0;
}
@@ -6451,21 +6454,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
*/
NV_TRACEWARN(dev, "No useful information in BIOS output table; "
"adding all possible outputs\n");
- fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
-
- /*
- * Attempt to detect TV before DVI because the test
- * for the former is more accurate and it rules the
- * latter out.
- */
- if (nv04_tv_identify(dev,
- bios->legacy.i2c_indices.tv) >= 0)
- fabricate_tv_output(dcb, twoHeads);
-
- else if (bios->tmds.output0_script_ptr ||
- bios->tmds.output1_script_ptr)
- fabricate_dvi_i_output(dcb, twoHeads);
-
+ fabricate_dcb_encoder_table(dev, bios);
return 0;
}
@@ -6859,7 +6848,7 @@ nouveau_bios_init(struct drm_device *dev)
if (ret)
return ret;
- ret = parse_dcb_table(dev, bios, nv_two_heads(dev));
+ ret = parse_dcb_table(dev, bios);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c41e1c200ef5..a52184007f5f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -32,6 +32,8 @@
#include "nouveau_drm.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
#include <linux/log2.h>
#include <linux/slab.h>
@@ -46,82 +48,54 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
if (unlikely(nvbo->gem))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
- if (nvbo->tile)
- nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
-
+ nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
+ if (nvbo->vma.node) {
+ nouveau_vm_unmap(&nvbo->vma);
+ nouveau_vm_put(&nvbo->vma);
+ }
kfree(nvbo);
}
static void
-nouveau_bo_fixup_align(struct drm_device *dev,
- uint32_t tile_mode, uint32_t tile_flags,
- int *align, int *size)
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
+ int *page_shift)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- /*
- * Some of the tile_flags have a periodic structure of N*4096 bytes,
- * align to to that as well as the page size. Align the size to the
- * appropriate boundaries. This does imply that sizes are rounded up
- * 3-7 pages, so be aware of this and do not waste memory by allocating
- * many small buffers.
- */
- if (dev_priv->card_type == NV_50) {
- uint32_t block_size = dev_priv->vram_size >> 15;
- int i;
-
- switch (tile_flags) {
- case 0x1800:
- case 0x2800:
- case 0x4800:
- case 0x7a00:
- if (is_power_of_2(block_size)) {
- for (i = 1; i < 10; i++) {
- *align = 12 * i * block_size;
- if (!(*align % 65536))
- break;
- }
- } else {
- for (i = 1; i < 10; i++) {
- *align = 8 * i * block_size;
- if (!(*align % 65536))
- break;
- }
- }
- *size = roundup(*size, *align);
- break;
- default:
- break;
- }
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
- } else {
- if (tile_mode) {
+ if (dev_priv->card_type < NV_50) {
+ if (nvbo->tile_mode) {
if (dev_priv->chipset >= 0x40) {
*align = 65536;
- *size = roundup(*size, 64 * tile_mode);
+ *size = roundup(*size, 64 * nvbo->tile_mode);
} else if (dev_priv->chipset >= 0x30) {
*align = 32768;
- *size = roundup(*size, 64 * tile_mode);
+ *size = roundup(*size, 64 * nvbo->tile_mode);
} else if (dev_priv->chipset >= 0x20) {
*align = 16384;
- *size = roundup(*size, 64 * tile_mode);
+ *size = roundup(*size, 64 * nvbo->tile_mode);
} else if (dev_priv->chipset >= 0x10) {
*align = 16384;
- *size = roundup(*size, 32 * tile_mode);
+ *size = roundup(*size, 32 * nvbo->tile_mode);
}
}
+ } else {
+ if (likely(dev_priv->chan_vm)) {
+ if (*size > 256 * 1024)
+ *page_shift = dev_priv->chan_vm->lpg_shift;
+ else
+ *page_shift = dev_priv->chan_vm->spg_shift;
+ } else {
+ *page_shift = 12;
+ }
+
+ *size = roundup(*size, (1 << *page_shift));
+ *align = max((1 << *page_shift), *align);
}
- /* ALIGN works only on powers of two. */
*size = roundup(*size, PAGE_SIZE);
-
- if (dev_priv->card_type == NV_50) {
- *size = roundup(*size, 65536);
- *align = max(65536, *align);
- }
}
int
@@ -132,7 +106,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
- int ret = 0;
+ int ret = 0, page_shift = 0;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
@@ -145,10 +119,19 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev;
- nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo),
- &align, &size);
+ nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
align >>= PAGE_SHIFT;
+ if (!nvbo->no_vm && dev_priv->chan_vm) {
+ ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
+ NV_MEM_ACCESS_RW, &nvbo->vma);
+ if (ret) {
+ kfree(nvbo);
+ return ret;
+ }
+ }
+
+ nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
nvbo->channel = chan;
@@ -161,6 +144,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
}
nvbo->channel = NULL;
+ if (nvbo->vma.node) {
+ if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+ nvbo->bo.offset = nvbo->vma.offset;
+ }
+
*pnvbo = nvbo;
return 0;
}
@@ -182,17 +170,17 @@ static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+ int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
if (dev_priv->card_type == NV_10 &&
- nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
+ nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
+ nvbo->bo.mem.num_pages < vram_pages / 2) {
/*
* Make sure that the color and depth buffers are handled
* by independent memory controller units. Up to a 9x
* speed up when alpha-blending and depth-test are enabled
* at the same time.
*/
- int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
-
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
nvbo->placement.fpfn = vram_pages / 2;
nvbo->placement.lpfn = ~0;
@@ -244,7 +232,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
nouveau_bo_placement_set(nvbo, memtype, 0);
- ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
+ ret = nouveau_bo_validate(nvbo, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -280,7 +268,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
- ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
+ ret = nouveau_bo_validate(nvbo, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -319,6 +307,25 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
ttm_bo_kunmap(&nvbo->kmap);
}
+int
+nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu)
+{
+ int ret;
+
+ ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
+ no_wait_reserve, no_wait_gpu);
+ if (ret)
+ return ret;
+
+ if (nvbo->vma.node) {
+ if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+ nvbo->bo.offset = nvbo->vma.offset;
+ }
+
+ return 0;
+}
+
u16
nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
{
@@ -410,37 +417,40 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
- man->func = &ttm_bo_manager_func;
+ if (dev_priv->card_type >= NV_50) {
+ man->func = &nouveau_vram_manager;
+ man->io_reserve_fastpath = false;
+ man->use_io_reserve_lru = true;
+ } else {
+ man->func = &ttm_bo_manager_func;
+ }
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- if (dev_priv->card_type == NV_50)
- man->gpu_offset = 0x40000000;
- else
- man->gpu_offset = 0;
break;
case TTM_PL_TT:
man->func = &ttm_bo_manager_func;
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED;
- man->default_caching = TTM_PL_FLAG_UNCACHED;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
break;
case NOUVEAU_GART_SGDMA:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
+ man->gpu_offset = dev_priv->gart_info.aper_base;
break;
default:
NV_ERROR(dev, "Unknown GART type: %d\n",
dev_priv->gart_info.type);
return -EINVAL;
}
- man->gpu_offset = dev_priv->vm_gart_base;
break;
default:
NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
@@ -485,16 +495,9 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
if (ret)
return ret;
- if (nvbo->channel) {
- ret = nouveau_fence_sync(fence, nvbo->channel);
- if (ret)
- goto out;
- }
-
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
no_wait_reserve, no_wait_gpu, new_mem);
-out:
- nouveau_fence_unref((void *)&fence);
+ nouveau_fence_unref(&fence);
return ret;
}
@@ -516,6 +519,58 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
}
static int
+nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ u64 src_offset = old_mem->start << PAGE_SHIFT;
+ u64 dst_offset = new_mem->start << PAGE_SHIFT;
+ u32 page_count = new_mem->num_pages;
+ int ret;
+
+ if (!nvbo->no_vm) {
+ if (old_mem->mem_type == TTM_PL_VRAM)
+ src_offset = nvbo->vma.offset;
+ else
+ src_offset += dev_priv->gart_info.aper_base;
+
+ if (new_mem->mem_type == TTM_PL_VRAM)
+ dst_offset = nvbo->vma.offset;
+ else
+ dst_offset += dev_priv->gart_info.aper_base;
+ }
+
+ page_count = new_mem->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 2047) ? 2047 : page_count;
+
+ ret = RING_SPACE(chan, 12);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
+ OUT_RING (chan, upper_32_bits(dst_offset));
+ OUT_RING (chan, lower_32_bits(dst_offset));
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
+ OUT_RING (chan, upper_32_bits(src_offset));
+ OUT_RING (chan, lower_32_bits(src_offset));
+ OUT_RING (chan, PAGE_SIZE); /* src_pitch */
+ OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
+ OUT_RING (chan, PAGE_SIZE); /* line_length */
+ OUT_RING (chan, line_count);
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
+ OUT_RING (chan, 0x00100110);
+
+ page_count -= line_count;
+ src_offset += (PAGE_SIZE * line_count);
+ dst_offset += (PAGE_SIZE * line_count);
+ }
+
+ return 0;
+}
+
+static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
@@ -529,14 +584,14 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
dst_offset = new_mem->start << PAGE_SHIFT;
if (!nvbo->no_vm) {
if (old_mem->mem_type == TTM_PL_VRAM)
- src_offset += dev_priv->vm_vram_base;
+ src_offset = nvbo->vma.offset;
else
- src_offset += dev_priv->vm_gart_base;
+ src_offset += dev_priv->gart_info.aper_base;
if (new_mem->mem_type == TTM_PL_VRAM)
- dst_offset += dev_priv->vm_vram_base;
+ dst_offset = nvbo->vma.offset;
else
- dst_offset += dev_priv->vm_gart_base;
+ dst_offset += dev_priv->gart_info.aper_base;
}
ret = RING_SPACE(chan, 3);
@@ -683,17 +738,27 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
int ret;
chan = nvbo->channel;
- if (!chan || nvbo->no_vm)
+ if (!chan || nvbo->no_vm) {
chan = dev_priv->channel;
+ mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
+ }
if (dev_priv->card_type < NV_50)
ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
else
+ if (dev_priv->card_type < NV_C0)
ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
- if (ret)
- return ret;
+ else
+ ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
+ if (ret == 0) {
+ ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
+ no_wait_reserve,
+ no_wait_gpu, new_mem);
+ }
- return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ if (chan == dev_priv->channel)
+ mutex_unlock(&chan->mutex);
+ return ret;
}
static int
@@ -724,7 +789,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
out:
ttm_bo_mem_put(bo, &tmp_mem);
return ret;
@@ -750,11 +815,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (ret)
goto out;
@@ -771,7 +836,6 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
uint64_t offset;
- int ret;
if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
/* Nothing to do. */
@@ -781,18 +845,12 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
offset = new_mem->start << PAGE_SHIFT;
- if (dev_priv->card_type == NV_50) {
- ret = nv50_mem_vm_bind_linear(dev,
- offset + dev_priv->vm_vram_base,
- new_mem->size,
- nouveau_bo_tile_layout(nvbo),
- offset);
- if (ret)
- return ret;
-
+ if (dev_priv->chan_vm) {
+ nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
} else if (dev_priv->card_type >= NV_10) {
*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
- nvbo->tile_mode);
+ nvbo->tile_mode,
+ nvbo->tile_flags);
}
return 0;
@@ -808,9 +866,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
if (dev_priv->card_type >= NV_10 &&
dev_priv->card_type < NV_50) {
- if (*old_tile)
- nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
-
+ nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
*old_tile = new_tile;
}
}
@@ -879,6 +935,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct drm_device *dev = dev_priv->dev;
+ int ret;
mem->bus.addr = NULL;
mem->bus.offset = 0;
@@ -901,9 +958,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
#endif
break;
case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
+ {
+ struct nouveau_vram *vram = mem->mm_node;
+ u8 page_shift;
+
+ if (!dev_priv->bar1_vm) {
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(dev->pdev, 1);
+ mem->bus.is_iomem = true;
+ break;
+ }
+
+ if (dev_priv->card_type == NV_C0)
+ page_shift = vram->page_shift;
+ else
+ page_shift = 12;
+
+ ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
+ page_shift, NV_MEM_ACCESS_RW,
+ &vram->bar_vma);
+ if (ret)
+ return ret;
+
+ nouveau_vm_map(&vram->bar_vma, vram);
+ if (ret) {
+ nouveau_vm_put(&vram->bar_vma);
+ return ret;
+ }
+
+ mem->bus.offset = vram->bar_vma.offset;
+ if (dev_priv->card_type == NV_50) /*XXX*/
+ mem->bus.offset -= 0x0020000000ULL;
mem->bus.base = pci_resource_start(dev->pdev, 1);
mem->bus.is_iomem = true;
+ }
break;
default:
return -EINVAL;
@@ -914,6 +1002,17 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct nouveau_vram *vram = mem->mm_node;
+
+ if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
+ return;
+
+ if (!vram->bar_vma.node)
+ return;
+
+ nouveau_vm_unmap(&vram->bar_vma);
+ nouveau_vm_put(&vram->bar_vma);
}
static int
@@ -939,7 +1038,23 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->placement.fpfn = 0;
nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
- return ttm_bo_validate(bo, &nvbo->placement, false, true, false);
+ return nouveau_bo_validate(nvbo, false, true, false);
+}
+
+void
+nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
+{
+ struct nouveau_fence *old_fence;
+
+ if (likely(fence))
+ nouveau_fence_ref(fence);
+
+ spin_lock(&nvbo->bo.bdev->fence_lock);
+ old_fence = nvbo->bo.sync_obj;
+ nvbo->bo.sync_obj = fence;
+ spin_unlock(&nvbo->bo.bdev->fence_lock);
+
+ nouveau_fence_unref(&old_fence);
}
struct ttm_bo_driver nouveau_bo_driver = {
@@ -949,11 +1064,11 @@ struct ttm_bo_driver nouveau_bo_driver = {
.evict_flags = nouveau_bo_evict_flags,
.move = nouveau_bo_move,
.verify_access = nouveau_bo_verify_access,
- .sync_obj_signaled = nouveau_fence_signalled,
- .sync_obj_wait = nouveau_fence_wait,
- .sync_obj_flush = nouveau_fence_flush,
- .sync_obj_unref = nouveau_fence_unref,
- .sync_obj_ref = nouveau_fence_ref,
+ .sync_obj_signaled = __nouveau_fence_signalled,
+ .sync_obj_wait = __nouveau_fence_wait,
+ .sync_obj_flush = __nouveau_fence_flush,
+ .sync_obj_unref = __nouveau_fence_unref,
+ .sync_obj_ref = __nouveau_fence_ref,
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 373950e34814..3960d66d7aba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -38,23 +38,28 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
int ret;
if (dev_priv->card_type >= NV_50) {
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
- dev_priv->vm_end, NV_DMA_ACCESS_RO,
- NV_DMA_TARGET_AGP, &pushbuf);
+ if (dev_priv->card_type < NV_C0) {
+ ret = nouveau_gpuobj_dma_new(chan,
+ NV_CLASS_DMA_IN_MEMORY, 0,
+ (1ULL << 40),
+ NV_MEM_ACCESS_RO,
+ NV_MEM_TARGET_VM,
+ &pushbuf);
+ }
chan->pushbuf_base = pb->bo.offset;
} else
if (pb->bo.mem.mem_type == TTM_PL_TT) {
- ret = nouveau_gpuobj_gart_dma_new(chan, 0,
- dev_priv->gart_info.aper_size,
- NV_DMA_ACCESS_RO, &pushbuf,
- NULL);
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
+ dev_priv->gart_info.aper_size,
+ NV_MEM_ACCESS_RO,
+ NV_MEM_TARGET_GART, &pushbuf);
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
} else
if (dev_priv->card_type != NV_04) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->fb_available_size,
- NV_DMA_ACCESS_RO,
- NV_DMA_TARGET_VIDMEM, &pushbuf);
+ NV_MEM_ACCESS_RO,
+ NV_MEM_TARGET_VRAM, &pushbuf);
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
} else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's
@@ -62,17 +67,16 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
* VRAM.
*/
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- pci_resource_start(dev->pdev,
- 1),
+ pci_resource_start(dev->pdev, 1),
dev_priv->fb_available_size,
- NV_DMA_ACCESS_RO,
- NV_DMA_TARGET_PCI, &pushbuf);
+ NV_MEM_ACCESS_RO,
+ NV_MEM_TARGET_PCI, &pushbuf);
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
}
nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
nouveau_gpuobj_ref(NULL, &pushbuf);
- return 0;
+ return ret;
}
static struct nouveau_bo *
@@ -100,6 +104,13 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
return NULL;
}
+ ret = nouveau_bo_map(pushbuf);
+ if (ret) {
+ nouveau_bo_unpin(pushbuf);
+ nouveau_bo_ref(NULL, &pushbuf);
+ return NULL;
+ }
+
return pushbuf;
}
@@ -107,74 +118,59 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
int
nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
struct drm_file *file_priv,
- uint32_t vram_handle, uint32_t tt_handle)
+ uint32_t vram_handle, uint32_t gart_handle)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan;
- int channel, user;
+ unsigned long flags;
int ret;
- /*
- * Alright, here is the full story
- * Nvidia cards have multiple hw fifo contexts (praise them for that,
- * no complicated crash-prone context switches)
- * We allocate a new context for each app and let it write to it
- * directly (woo, full userspace command submission !)
- * When there are no more contexts, you lost
- */
- for (channel = 0; channel < pfifo->channels; channel++) {
- if (dev_priv->fifos[channel] == NULL)
+ /* allocate and lock channel structure */
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ chan->dev = dev;
+ chan->file_priv = file_priv;
+ chan->vram_handle = vram_handle;
+ chan->gart_handle = gart_handle;
+
+ kref_init(&chan->ref);
+ atomic_set(&chan->users, 1);
+ mutex_init(&chan->mutex);
+ mutex_lock(&chan->mutex);
+
+ /* allocate hw channel id */
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
+ if (!dev_priv->channels.ptr[chan->id]) {
+ nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
break;
+ }
}
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- /* no more fifos. you lost. */
- if (channel == pfifo->channels)
- return -EINVAL;
+ if (chan->id == pfifo->channels) {
+ mutex_unlock(&chan->mutex);
+ kfree(chan);
+ return -ENODEV;
+ }
- dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel),
- GFP_KERNEL);
- if (!dev_priv->fifos[channel])
- return -ENOMEM;
- chan = dev_priv->fifos[channel];
+ NV_DEBUG(dev, "initialising channel %d\n", chan->id);
INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
+ INIT_LIST_HEAD(&chan->nvsw.flip);
INIT_LIST_HEAD(&chan->fence.pending);
- chan->dev = dev;
- chan->id = channel;
- chan->file_priv = file_priv;
- chan->vram_handle = vram_handle;
- chan->gart_handle = tt_handle;
-
- NV_INFO(dev, "Allocating FIFO number %d\n", channel);
/* Allocate DMA push buffer */
chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
if (!chan->pushbuf_bo) {
ret = -ENOMEM;
NV_ERROR(dev, "pushbuf %d\n", ret);
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
nouveau_dma_pre_init(chan);
-
- /* Locate channel's user control regs */
- if (dev_priv->card_type < NV_40)
- user = NV03_USER(channel);
- else
- if (dev_priv->card_type < NV_50)
- user = NV40_USER(channel);
- else
- user = NV50_USER(channel);
-
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
- PAGE_SIZE);
- if (!chan->user) {
- NV_ERROR(dev, "ioremap of regs failed.\n");
- nouveau_channel_free(chan);
- return -ENOMEM;
- }
chan->user_put = 0x40;
chan->user_get = 0x44;
@@ -182,15 +178,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
ret = nouveau_notifier_init_channel(chan);
if (ret) {
NV_ERROR(dev, "ntfy %d\n", ret);
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
/* Setup channel's default objects */
- ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
+ ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
if (ret) {
NV_ERROR(dev, "gpuobj %d\n", ret);
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
@@ -198,24 +194,17 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
ret = nouveau_channel_pushbuf_ctxdma_init(chan);
if (ret) {
NV_ERROR(dev, "pbctxdma %d\n", ret);
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
/* disable the fifo caches */
pfifo->reassign(dev, false);
- /* Create a graphics context for new channel */
- ret = pgraph->create_context(chan);
- if (ret) {
- nouveau_channel_free(chan);
- return ret;
- }
-
/* Construct inital RAMFC for new channel */
ret = pfifo->create_context(chan);
if (ret) {
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
@@ -225,83 +214,111 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
if (!ret)
ret = nouveau_fence_channel_init(chan);
if (ret) {
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
nouveau_debugfs_channel_init(chan);
- NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel);
+ NV_DEBUG(dev, "channel %d initialised\n", chan->id);
*chan_ret = chan;
return 0;
}
-/* stops a fifo */
+struct nouveau_channel *
+nouveau_channel_get_unlocked(struct nouveau_channel *ref)
+{
+ struct nouveau_channel *chan = NULL;
+
+ if (likely(ref && atomic_inc_not_zero(&ref->users)))
+ nouveau_channel_ref(ref, &chan);
+
+ return chan;
+}
+
+struct nouveau_channel *
+nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+
+ if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR))
+ return ERR_PTR(-EINVAL);
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+
+ if (unlikely(!chan))
+ return ERR_PTR(-EINVAL);
+
+ if (unlikely(file_priv && chan->file_priv != file_priv)) {
+ nouveau_channel_put_unlocked(&chan);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mutex_lock(&chan->mutex);
+ return chan;
+}
+
void
-nouveau_channel_free(struct nouveau_channel *chan)
+nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
{
+ struct nouveau_channel *chan = *pchan;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
unsigned long flags;
- int ret;
- NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id);
+ /* decrement the refcount, and we're done if there's still refs */
+ if (likely(!atomic_dec_and_test(&chan->users))) {
+ nouveau_channel_ref(NULL, pchan);
+ return;
+ }
+ /* noone wants the channel anymore */
+ NV_DEBUG(dev, "freeing channel %d\n", chan->id);
nouveau_debugfs_channel_fini(chan);
- /* Give outstanding push buffers a chance to complete */
- nouveau_fence_update(chan);
- if (chan->fence.sequence != chan->fence.sequence_ack) {
- struct nouveau_fence *fence = NULL;
+ /* give it chance to idle */
+ nouveau_channel_idle(chan);
- ret = nouveau_fence_new(chan, &fence, true);
- if (ret == 0) {
- ret = nouveau_fence_wait(fence, NULL, false, false);
- nouveau_fence_unref((void *)&fence);
- }
-
- if (ret)
- NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
- }
-
- /* Ensure all outstanding fences are signaled. They should be if the
+ /* ensure all outstanding fences are signaled. they should be if the
* above attempts at idling were OK, but if we failed this'll tell TTM
* we're done with the buffers.
*/
nouveau_fence_channel_fini(chan);
- /* This will prevent pfifo from switching channels. */
+ /* boot it off the hardware */
pfifo->reassign(dev, false);
- /* We want to give pgraph a chance to idle and get rid of all potential
- * errors. We need to do this before the lock, otherwise the irq handler
- * is unable to process them.
+ /* We want to give pgraph a chance to idle and get rid of all
+ * potential errors. We need to do this without the context
+ * switch lock held, otherwise the irq handler is unable to
+ * process them.
*/
if (pgraph->channel(dev) == chan)
nouveau_wait_for_idle(dev);
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
- pgraph->fifo_access(dev, false);
- if (pgraph->channel(dev) == chan)
- pgraph->unload_context(dev);
- pgraph->destroy_context(chan);
- pgraph->fifo_access(dev, true);
-
- if (pfifo->channel_id(dev) == chan->id) {
- pfifo->disable(dev);
- pfifo->unload_context(dev);
- pfifo->enable(dev);
- }
+ /* destroy the engine specific contexts */
pfifo->destroy_context(chan);
+ pgraph->destroy_context(chan);
+ if (pcrypt->destroy_context)
+ pcrypt->destroy_context(chan);
pfifo->reassign(dev, true);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ /* aside from its resources, the channel should now be dead,
+ * remove it from the channel list
+ */
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- /* Release the channel's resources */
+ /* destroy any resources the channel owned */
nouveau_gpuobj_ref(NULL, &chan->pushbuf);
if (chan->pushbuf_bo) {
nouveau_bo_unmap(chan->pushbuf_bo);
@@ -310,44 +327,80 @@ nouveau_channel_free(struct nouveau_channel *chan)
}
nouveau_gpuobj_channel_takedown(chan);
nouveau_notifier_takedown_channel(chan);
- if (chan->user)
- iounmap(chan->user);
- dev_priv->fifos[chan->id] = NULL;
+ nouveau_channel_ref(NULL, pchan);
+}
+
+void
+nouveau_channel_put(struct nouveau_channel **pchan)
+{
+ mutex_unlock(&(*pchan)->mutex);
+ nouveau_channel_put_unlocked(pchan);
+}
+
+static void
+nouveau_channel_del(struct kref *ref)
+{
+ struct nouveau_channel *chan =
+ container_of(ref, struct nouveau_channel, ref);
+
kfree(chan);
}
+void
+nouveau_channel_ref(struct nouveau_channel *chan,
+ struct nouveau_channel **pchan)
+{
+ if (chan)
+ kref_get(&chan->ref);
+
+ if (*pchan)
+ kref_put(&(*pchan)->ref, nouveau_channel_del);
+
+ *pchan = chan;
+}
+
+void
+nouveau_channel_idle(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_fence *fence = NULL;
+ int ret;
+
+ nouveau_fence_update(chan);
+
+ if (chan->fence.sequence != chan->fence.sequence_ack) {
+ ret = nouveau_fence_new(chan, &fence, true);
+ if (!ret) {
+ ret = nouveau_fence_wait(fence, false, false);
+ nouveau_fence_unref(&fence);
+ }
+
+ if (ret)
+ NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+ }
+}
+
/* cleans up all the fifos from file_priv */
void
nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
+ struct nouveau_channel *chan;
int i;
NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
for (i = 0; i < engine->fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->fifos[i];
+ chan = nouveau_channel_get(dev, file_priv, i);
+ if (IS_ERR(chan))
+ continue;
- if (chan && chan->file_priv == file_priv)
- nouveau_channel_free(chan);
+ atomic_dec(&chan->users);
+ nouveau_channel_put(&chan);
}
}
-int
-nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
- int channel)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
-
- if (channel >= engine->fifo.channels)
- return 0;
- if (dev_priv->fifos[channel] == NULL)
- return 0;
-
- return (dev_priv->fifos[channel]->file_priv == file_priv);
-}
/***********************************
* ioctls wrapping the functions
@@ -383,36 +436,44 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
else
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
- init->subchan[0].handle = NvM2MF;
- if (dev_priv->card_type < NV_50)
- init->subchan[0].grclass = 0x0039;
- else
- init->subchan[0].grclass = 0x5039;
- init->subchan[1].handle = NvSw;
- init->subchan[1].grclass = NV_SW;
- init->nr_subchan = 2;
+ if (dev_priv->card_type < NV_C0) {
+ init->subchan[0].handle = NvM2MF;
+ if (dev_priv->card_type < NV_50)
+ init->subchan[0].grclass = 0x0039;
+ else
+ init->subchan[0].grclass = 0x5039;
+ init->subchan[1].handle = NvSw;
+ init->subchan[1].grclass = NV_SW;
+ init->nr_subchan = 2;
+ } else {
+ init->subchan[0].handle = 0x9039;
+ init->subchan[0].grclass = 0x9039;
+ init->nr_subchan = 1;
+ }
/* Named memory object area */
ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
&init->notifier_handle);
- if (ret) {
- nouveau_channel_free(chan);
- return ret;
- }
- return 0;
+ if (ret == 0)
+ atomic_inc(&chan->users); /* userspace reference */
+ nouveau_channel_put(&chan);
+ return ret;
}
static int
nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_nouveau_channel_free *cfree = data;
+ struct drm_nouveau_channel_free *req = data;
struct nouveau_channel *chan;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
+ chan = nouveau_channel_get(dev, file_priv, req->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
- nouveau_channel_free(chan);
+ atomic_dec(&chan->users);
+ nouveau_channel_put(&chan);
return 0;
}
@@ -421,18 +482,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
***********************************/
struct drm_ioctl_desc nouveau_ioctls[] = {
- DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
};
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 52c356e9a3d1..390d82c3c4b0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -37,6 +37,8 @@
#include "nouveau_connector.h"
#include "nouveau_hw.h"
+static void nouveau_connector_hotplug(void *, int);
+
static struct nouveau_encoder *
find_encoder_by_type(struct drm_connector *connector, int type)
{
@@ -94,22 +96,30 @@ nouveau_connector_bpp(struct drm_connector *connector)
}
static void
-nouveau_connector_destroy(struct drm_connector *drm_connector)
+nouveau_connector_destroy(struct drm_connector *connector)
{
- struct nouveau_connector *nv_connector =
- nouveau_connector(drm_connector);
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct drm_nouveau_private *dev_priv;
+ struct nouveau_gpio_engine *pgpio;
struct drm_device *dev;
if (!nv_connector)
return;
dev = nv_connector->base.dev;
+ dev_priv = dev->dev_private;
NV_DEBUG_KMS(dev, "\n");
+ pgpio = &dev_priv->engine.gpio;
+ if (pgpio->irq_unregister) {
+ pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
+ nouveau_connector_hotplug, connector);
+ }
+
kfree(nv_connector->edid);
- drm_sysfs_connector_remove(drm_connector);
- drm_connector_cleanup(drm_connector);
- kfree(drm_connector);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
}
static struct nouveau_i2c_chan *
@@ -497,6 +507,7 @@ nouveau_connector_native_mode(struct drm_connector *connector)
int high_w = 0, high_h = 0, high_v = 0;
list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
+ mode->vrefresh = drm_mode_vrefresh(mode);
if (helper->mode_valid(connector, mode) != MODE_OK ||
(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
@@ -760,6 +771,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
{
const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
struct nouveau_connector *nv_connector = NULL;
struct dcb_connector_table_entry *dcb = NULL;
struct drm_connector *connector;
@@ -876,6 +888,11 @@ nouveau_connector_create(struct drm_device *dev, int index)
break;
}
+ if (pgpio->irq_register) {
+ pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
+ nouveau_connector_hotplug, connector);
+ }
+
drm_sysfs_connector_add(connector);
dcb->drm = connector;
return dcb->drm;
@@ -886,3 +903,29 @@ fail:
return ERR_PTR(ret);
}
+
+static void
+nouveau_connector_hotplug(void *data, int plugged)
+{
+ struct drm_connector *connector = data;
+ struct drm_device *dev = connector->dev;
+
+ NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
+ drm_get_connector_name(connector));
+
+ if (connector->encoder && connector->encoder->crtc &&
+ connector->encoder->crtc->enabled) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder);
+ struct drm_encoder_helper_funcs *helper =
+ connector->encoder->helper_private;
+
+ if (nv_encoder->dcb->type == OUTPUT_DP) {
+ if (plugged)
+ helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
+ else
+ helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
+ }
+ }
+
+ drm_helper_hpd_irq_event(dev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 2e11fd65b4dd..505c6bfb4d75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -29,6 +29,9 @@
#include "nouveau_drv.h"
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
+#include "nouveau_hw.h"
+#include "nouveau_crtc.h"
+#include "nouveau_dma.h"
static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
@@ -104,3 +107,207 @@ const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.output_poll_changed = nouveau_fbcon_output_poll_changed,
};
+int
+nouveau_vblank_enable(struct drm_device *dev, int crtc)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->card_type >= NV_50)
+ nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0,
+ NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
+ else
+ NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
+ NV_PCRTC_INTR_0_VBLANK);
+
+ return 0;
+}
+
+void
+nouveau_vblank_disable(struct drm_device *dev, int crtc)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->card_type >= NV_50)
+ nv_mask(dev, NV50_PDISPLAY_INTR_EN_1,
+ NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
+ else
+ NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
+}
+
+static int
+nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
+ struct nouveau_bo *new_bo)
+{
+ int ret;
+
+ ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
+ if (ret)
+ goto fail;
+
+ ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
+ if (ret)
+ goto fail_unreserve;
+
+ return 0;
+
+fail_unreserve:
+ ttm_bo_unreserve(&new_bo->bo);
+fail:
+ nouveau_bo_unpin(new_bo);
+ return ret;
+}
+
+static void
+nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
+ struct nouveau_bo *new_bo,
+ struct nouveau_fence *fence)
+{
+ nouveau_bo_fence(new_bo, fence);
+ ttm_bo_unreserve(&new_bo->bo);
+
+ nouveau_bo_fence(old_bo, fence);
+ ttm_bo_unreserve(&old_bo->bo);
+
+ nouveau_bo_unpin(old_bo);
+}
+
+static int
+nouveau_page_flip_emit(struct nouveau_channel *chan,
+ struct nouveau_bo *old_bo,
+ struct nouveau_bo *new_bo,
+ struct nouveau_page_flip_state *s,
+ struct nouveau_fence **pfence)
+{
+ struct drm_device *dev = chan->dev;
+ unsigned long flags;
+ int ret;
+
+ /* Queue it to the pending list */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_add_tail(&s->head, &chan->nvsw.flip);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /* Synchronize with the old framebuffer */
+ ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
+ if (ret)
+ goto fail;
+
+ /* Emit the pageflip */
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ goto fail;
+
+ BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+ OUT_RING(chan, 0);
+ FIRE_RING(chan);
+
+ ret = nouveau_fence_new(chan, pfence, true);
+ if (ret)
+ goto fail;
+
+ return 0;
+fail:
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_del(&s->head);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return ret;
+}
+
+int
+nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
+ struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
+ struct nouveau_page_flip_state *s;
+ struct nouveau_channel *chan;
+ struct nouveau_fence *fence;
+ int ret;
+
+ if (dev_priv->engine.graph.accel_blocked)
+ return -ENODEV;
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ /* Don't let the buffers go away while we flip */
+ ret = nouveau_page_flip_reserve(old_bo, new_bo);
+ if (ret)
+ goto fail_free;
+
+ /* Initialize a page flip struct */
+ *s = (struct nouveau_page_flip_state)
+ { { }, s->event, nouveau_crtc(crtc)->index,
+ fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
+ new_bo->bo.offset };
+
+ /* Choose the channel the flip will be handled in */
+ chan = nouveau_fence_channel(new_bo->bo.sync_obj);
+ if (!chan)
+ chan = nouveau_channel_get_unlocked(dev_priv->channel);
+ mutex_lock(&chan->mutex);
+
+ /* Emit a page flip */
+ ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
+ nouveau_channel_put(&chan);
+ if (ret)
+ goto fail_unreserve;
+
+ /* Update the crtc struct and cleanup */
+ crtc->fb = fb;
+
+ nouveau_page_flip_unreserve(old_bo, new_bo, fence);
+ nouveau_fence_unref(&fence);
+ return 0;
+
+fail_unreserve:
+ nouveau_page_flip_unreserve(old_bo, new_bo, NULL);
+fail_free:
+ kfree(s);
+ return ret;
+}
+
+int
+nouveau_finish_page_flip(struct nouveau_channel *chan,
+ struct nouveau_page_flip_state *ps)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_page_flip_state *s;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ if (list_empty(&chan->nvsw.flip)) {
+ NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return -EINVAL;
+ }
+
+ s = list_first_entry(&chan->nvsw.flip,
+ struct nouveau_page_flip_state, head);
+ if (s->event) {
+ struct drm_pending_vblank_event *e = s->event;
+ struct timeval now;
+
+ do_gettimeofday(&now);
+ e->event.sequence = 0;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+
+ list_del(&s->head);
+ *ps = *s;
+ kfree(s);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 82581e600dcd..b368ed74aad7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -36,7 +36,7 @@ nouveau_dma_pre_init(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_bo *pushbuf = chan->pushbuf_bo;
- if (dev_priv->card_type == NV_50) {
+ if (dev_priv->card_type >= NV_50) {
const int ib_size = pushbuf->bo.mem.size / 2;
chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
@@ -59,27 +59,32 @@ nouveau_dma_init(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *obj = NULL;
int ret, i;
- /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
- ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
- 0x0039 : 0x5039, &obj);
- if (ret)
- return ret;
+ if (dev_priv->card_type >= NV_C0) {
+ ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
+ if (ret)
+ return ret;
- ret = nouveau_ramht_insert(chan, NvM2MF, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- if (ret)
- return ret;
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
- /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
- ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
+ OUT_RING (chan, 0x00009039);
+ FIRE_RING (chan);
+ return 0;
+ }
+
+ /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
+ ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ?
+ 0x0039 : 0x5039);
if (ret)
return ret;
- /* Map push buffer */
- ret = nouveau_bo_map(chan->pushbuf_bo);
+ /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
+ ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
+ &chan->m2mf_ntfy);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index d578c21d3c8d..c36f1763feaa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -77,7 +77,8 @@ enum {
/* G80+ display objects */
NvEvoVRAM = 0x01000000,
NvEvoFB16 = 0x01000001,
- NvEvoFB32 = 0x01000002
+ NvEvoFB32 = 0x01000002,
+ NvEvoVRAM_LP = 0x01000003
};
#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
@@ -125,6 +126,12 @@ extern void
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
static inline void
+BEGIN_NVC0(struct nouveau_channel *chan, int op, int subc, int mthd, int size)
+{
+ OUT_RING(chan, (op << 28) | (size << 16) | (subc << 13) | (mthd >> 2));
+}
+
+static inline void
BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
{
OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 4562f309ae3d..38d599554bce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -279,7 +279,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
struct bit_displayport_encoder_table *dpe;
int dpe_headerlen;
uint8_t config[4], status[3];
- bool cr_done, cr_max_vs, eq_done;
+ bool cr_done, cr_max_vs, eq_done, hpd_state;
int ret = 0, i, tries, voltage;
NV_DEBUG_KMS(dev, "link training!!\n");
@@ -297,7 +297,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
/* disable hotplug detect, this flips around on some panels during
* link training.
*/
- pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
+ hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
if (dpe->script0) {
NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
@@ -439,7 +439,7 @@ stop:
}
/* re-enable hotplug detect */
- pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
+ pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state);
return eq_done;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 90875494a65a..f658a04eecf9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -115,6 +115,10 @@ MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n");
int nouveau_perflvl_wr;
module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
+MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
+int nouveau_msi;
+module_param_named(msi, nouveau_msi, int, 0400);
+
int nouveau_fbpercrtc;
#if 0
module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -167,6 +171,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
if (pm_state.event == PM_EVENT_PRETHAW)
return 0;
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
NV_INFO(dev, "Disabling fbcon acceleration...\n");
nouveau_fbcon_save_disable_accel(dev);
@@ -193,23 +200,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
NV_INFO(dev, "Idling channels...\n");
for (i = 0; i < pfifo->channels; i++) {
- struct nouveau_fence *fence = NULL;
-
- chan = dev_priv->fifos[i];
- if (!chan || (dev_priv->card_type >= NV_50 &&
- chan == dev_priv->fifos[0]))
- continue;
-
- ret = nouveau_fence_new(chan, &fence, true);
- if (ret == 0) {
- ret = nouveau_fence_wait(fence, NULL, false, false);
- nouveau_fence_unref((void *)&fence);
- }
+ chan = dev_priv->channels.ptr[i];
- if (ret) {
- NV_ERROR(dev, "Failed to idle channel %d for suspend\n",
- chan->id);
- }
+ if (chan && chan->pushbuf_bo)
+ nouveau_channel_idle(chan);
}
pgraph->fifo_access(dev, false);
@@ -219,17 +213,17 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
pfifo->unload_context(dev);
pgraph->unload_context(dev);
- NV_INFO(dev, "Suspending GPU objects...\n");
- ret = nouveau_gpuobj_suspend(dev);
+ ret = pinstmem->suspend(dev);
if (ret) {
NV_ERROR(dev, "... failed: %d\n", ret);
goto out_abort;
}
- ret = pinstmem->suspend(dev);
+ NV_INFO(dev, "Suspending GPU objects...\n");
+ ret = nouveau_gpuobj_suspend(dev);
if (ret) {
NV_ERROR(dev, "... failed: %d\n", ret);
- nouveau_gpuobj_suspend_cleanup(dev);
+ pinstmem->resume(dev);
goto out_abort;
}
@@ -240,9 +234,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
pci_set_power_state(pdev, PCI_D3hot);
}
- acquire_console_sem();
+ console_lock();
nouveau_fbcon_set_suspend(dev, 1);
- release_console_sem();
+ console_unlock();
nouveau_fbcon_restore_accel(dev);
return 0;
@@ -263,6 +257,9 @@ nouveau_pci_resume(struct pci_dev *pdev)
struct drm_crtc *crtc;
int ret, i;
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "We're back, enabling device...\n");
@@ -294,17 +291,18 @@ nouveau_pci_resume(struct pci_dev *pdev)
}
}
+ NV_INFO(dev, "Restoring GPU objects...\n");
+ nouveau_gpuobj_resume(dev);
+
NV_INFO(dev, "Reinitialising engines...\n");
engine->instmem.resume(dev);
engine->mc.init(dev);
engine->timer.init(dev);
engine->fb.init(dev);
engine->graph.init(dev);
+ engine->crypt.init(dev);
engine->fifo.init(dev);
- NV_INFO(dev, "Restoring GPU objects...\n");
- nouveau_gpuobj_resume(dev);
-
nouveau_irq_postinstall(dev);
/* Re-write SKIPS, they'll have been lost over the suspend */
@@ -313,7 +311,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
int j;
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- chan = dev_priv->fifos[i];
+ chan = dev_priv->channels.ptr[i];
if (!chan || !chan->pushbuf_bo)
continue;
@@ -347,13 +345,11 @@ nouveau_pci_resume(struct pci_dev *pdev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT;
- nv_crtc->cursor.set_offset(nv_crtc,
- nv_crtc->cursor.nvbo->bo.offset -
- dev_priv->vm_vram_base);
-
+ nv_crtc->cursor.set_offset(nv_crtc, offset);
nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
- nv_crtc->cursor_saved_y);
+ nv_crtc->cursor_saved_y);
}
/* Force CLUT to get re-loaded during modeset */
@@ -363,9 +359,9 @@ nouveau_pci_resume(struct pci_dev *pdev)
nv_crtc->lut.depth = 0;
}
- acquire_console_sem();
+ console_lock();
nouveau_fbcon_set_suspend(dev, 0);
- release_console_sem();
+ console_unlock();
nouveau_fbcon_zfill_all(dev);
@@ -393,6 +389,9 @@ static struct drm_driver driver = {
.irq_postinstall = nouveau_irq_postinstall,
.irq_uninstall = nouveau_irq_uninstall,
.irq_handler = nouveau_irq_handler,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = nouveau_vblank_enable,
+ .disable_vblank = nouveau_vblank_disable,
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = nouveau_ioctls,
.fops = {
@@ -403,6 +402,7 @@ static struct drm_driver driver = {
.mmap = nouveau_ttm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+ .read = drm_read,
#if defined(CONFIG_COMPAT)
.compat_ioctl = nouveau_compat_ioctl,
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1c7db64c03bf..982d70b12722 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -54,22 +54,37 @@ struct nouveau_fpriv {
#include "nouveau_drm.h"
#include "nouveau_reg.h"
#include "nouveau_bios.h"
+#include "nouveau_util.h"
+
struct nouveau_grctx;
+struct nouveau_vram;
+#include "nouveau_vm.h"
#define MAX_NUM_DCB_ENTRIES 16
#define NOUVEAU_MAX_CHANNEL_NR 128
#define NOUVEAU_MAX_TILE_NR 15
-#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
-#define NV50_VM_BLOCK (512*1024*1024ULL)
-#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
+struct nouveau_vram {
+ struct drm_device *dev;
+
+ struct nouveau_vma bar_vma;
+ u8 page_shift;
+
+ struct list_head regions;
+ u32 memtype;
+ u64 offset;
+ u64 size;
+};
struct nouveau_tile_reg {
- struct nouveau_fence *fence;
- uint32_t addr;
- uint32_t size;
bool used;
+ uint32_t addr;
+ uint32_t limit;
+ uint32_t pitch;
+ uint32_t zcomp;
+ struct drm_mm_node *tag_mem;
+ struct nouveau_fence *fence;
};
struct nouveau_bo {
@@ -88,6 +103,7 @@ struct nouveau_bo {
struct nouveau_channel *channel;
+ struct nouveau_vma vma;
bool mappable;
bool no_vm;
@@ -96,7 +112,6 @@ struct nouveau_bo {
struct nouveau_tile_reg *tile;
struct drm_gem_object *gem;
- struct drm_file *cpu_filp;
int pin_refcnt;
};
@@ -133,20 +148,29 @@ enum nouveau_flags {
#define NVOBJ_ENGINE_SW 0
#define NVOBJ_ENGINE_GR 1
-#define NVOBJ_ENGINE_DISPLAY 2
+#define NVOBJ_ENGINE_PPP 2
+#define NVOBJ_ENGINE_COPY 3
+#define NVOBJ_ENGINE_VP 4
+#define NVOBJ_ENGINE_CRYPT 5
+#define NVOBJ_ENGINE_BSP 6
+#define NVOBJ_ENGINE_DISPLAY 0xcafe0001
#define NVOBJ_ENGINE_INT 0xdeadbeef
+#define NVOBJ_FLAG_DONT_MAP (1 << 0)
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
+#define NVOBJ_FLAG_VM (1 << 3)
+#define NVOBJ_FLAG_VM_USER (1 << 4)
+
+#define NVOBJ_CINST_GLOBAL 0xdeadbeef
+
struct nouveau_gpuobj {
struct drm_device *dev;
struct kref refcount;
struct list_head list;
- struct drm_mm_node *im_pramin;
- struct nouveau_bo *im_backing;
- uint32_t *im_backing_suspend;
- int im_bound;
+ void *node;
+ u32 *suspend;
uint32_t flags;
@@ -162,10 +186,29 @@ struct nouveau_gpuobj {
void *priv;
};
+struct nouveau_page_flip_state {
+ struct list_head head;
+ struct drm_pending_vblank_event *event;
+ int crtc, bpp, pitch, x, y;
+ uint64_t offset;
+};
+
+enum nouveau_channel_mutex_class {
+ NOUVEAU_UCHANNEL_MUTEX,
+ NOUVEAU_KCHANNEL_MUTEX
+};
+
struct nouveau_channel {
struct drm_device *dev;
int id;
+ /* references to the channel data structure */
+ struct kref ref;
+ /* users of the hardware channel resources, the hardware
+ * context will be kicked off when it reaches zero. */
+ atomic_t users;
+ struct mutex mutex;
+
/* owner of this fifo */
struct drm_file *file_priv;
/* mapping of the fifo itself */
@@ -198,16 +241,17 @@ struct nouveau_channel {
/* PFIFO context */
struct nouveau_gpuobj *ramfc;
struct nouveau_gpuobj *cache;
+ void *fifo_priv;
/* PGRAPH context */
/* XXX may be merge 2 pointers as private data ??? */
struct nouveau_gpuobj *ramin_grctx;
+ struct nouveau_gpuobj *crypt_ctx;
void *pgraph_ctx;
/* NV50 VM */
+ struct nouveau_vm *vm;
struct nouveau_gpuobj *vm_pd;
- struct nouveau_gpuobj *vm_gart_pt;
- struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
/* Objects */
struct nouveau_gpuobj *ramin; /* Private instmem */
@@ -238,9 +282,11 @@ struct nouveau_channel {
struct {
struct nouveau_gpuobj *vblsem;
+ uint32_t vblsem_head;
uint32_t vblsem_offset;
uint32_t vblsem_rval;
struct list_head vbl_wait;
+ struct list_head flip;
} nvsw;
struct {
@@ -258,11 +304,11 @@ struct nouveau_instmem_engine {
int (*suspend)(struct drm_device *dev);
void (*resume)(struct drm_device *dev);
- int (*populate)(struct drm_device *, struct nouveau_gpuobj *,
- uint32_t *size);
- void (*clear)(struct drm_device *, struct nouveau_gpuobj *);
- int (*bind)(struct drm_device *, struct nouveau_gpuobj *);
- int (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
+ int (*get)(struct nouveau_gpuobj *, u32 size, u32 align);
+ void (*put)(struct nouveau_gpuobj *);
+ int (*map)(struct nouveau_gpuobj *);
+ void (*unmap)(struct nouveau_gpuobj *);
+
void (*flush)(struct drm_device *);
};
@@ -279,15 +325,21 @@ struct nouveau_timer_engine {
struct nouveau_fb_engine {
int num_tiles;
+ struct drm_mm tag_heap;
+ void *priv;
int (*init)(struct drm_device *dev);
void (*takedown)(struct drm_device *dev);
- void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch);
+ void (*init_tile_region)(struct drm_device *dev, int i,
+ uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags);
+ void (*set_tile_region)(struct drm_device *dev, int i);
+ void (*free_tile_region)(struct drm_device *dev, int i);
};
struct nouveau_fifo_engine {
+ void *priv;
int channels;
struct nouveau_gpuobj *playlist[2];
@@ -310,22 +362,11 @@ struct nouveau_fifo_engine {
void (*tlb_flush)(struct drm_device *dev);
};
-struct nouveau_pgraph_object_method {
- int id;
- int (*exec)(struct nouveau_channel *chan, int grclass, int mthd,
- uint32_t data);
-};
-
-struct nouveau_pgraph_object_class {
- int id;
- bool software;
- struct nouveau_pgraph_object_method *methods;
-};
-
struct nouveau_pgraph_engine {
- struct nouveau_pgraph_object_class *grclass;
bool accel_blocked;
+ bool registered;
int grctx_size;
+ void *priv;
/* NV2x/NV3x context table (0x400780) */
struct nouveau_gpuobj *ctx_table;
@@ -342,8 +383,7 @@ struct nouveau_pgraph_engine {
int (*unload_context)(struct drm_device *);
void (*tlb_flush)(struct drm_device *dev);
- void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch);
+ void (*set_tile_region)(struct drm_device *dev, int i);
};
struct nouveau_display_engine {
@@ -355,13 +395,19 @@ struct nouveau_display_engine {
};
struct nouveau_gpio_engine {
+ void *priv;
+
int (*init)(struct drm_device *);
void (*takedown)(struct drm_device *);
int (*get)(struct drm_device *, enum dcb_gpio_tag);
int (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
- void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
+ int (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
+ void (*)(void *, int), void *);
+ void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
+ void (*)(void *, int), void *);
+ bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
};
struct nouveau_pm_voltage_level {
@@ -437,6 +483,7 @@ struct nouveau_pm_engine {
struct nouveau_pm_level *cur;
struct device *hwmon;
+ struct notifier_block acpi_nb;
int (*clock_get)(struct drm_device *, u32 id);
void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
@@ -449,6 +496,25 @@ struct nouveau_pm_engine {
int (*temp_get)(struct drm_device *);
};
+struct nouveau_crypt_engine {
+ bool registered;
+
+ int (*init)(struct drm_device *);
+ void (*takedown)(struct drm_device *);
+ int (*create_context)(struct nouveau_channel *);
+ void (*destroy_context)(struct nouveau_channel *);
+ void (*tlb_flush)(struct drm_device *dev);
+};
+
+struct nouveau_vram_engine {
+ int (*init)(struct drm_device *);
+ int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
+ u32 type, struct nouveau_vram **);
+ void (*put)(struct drm_device *, struct nouveau_vram **);
+
+ bool (*flags_valid)(struct drm_device *, u32 tile_flags);
+};
+
struct nouveau_engine {
struct nouveau_instmem_engine instmem;
struct nouveau_mc_engine mc;
@@ -459,6 +525,8 @@ struct nouveau_engine {
struct nouveau_display_engine display;
struct nouveau_gpio_engine gpio;
struct nouveau_pm_engine pm;
+ struct nouveau_crypt_engine crypt;
+ struct nouveau_vram_engine vram;
};
struct nouveau_pll_vals {
@@ -577,18 +645,15 @@ struct drm_nouveau_private {
bool ramin_available;
struct drm_mm ramin_heap;
struct list_head gpuobj_list;
+ struct list_head classes;
struct nouveau_bo *vga_ram;
+ /* interrupt handling */
+ void (*irq_handler[32])(struct drm_device *);
+ bool msi_enabled;
struct workqueue_struct *wq;
struct work_struct irq_work;
- struct work_struct hpd_work;
-
- struct {
- spinlock_t lock;
- uint32_t hpd0_bits;
- uint32_t hpd1_bits;
- } hpd_state;
struct list_head vbl_waiting;
@@ -605,8 +670,10 @@ struct drm_nouveau_private {
struct nouveau_bo *bo;
} fence;
- int fifo_alloc_count;
- struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
+ struct {
+ spinlock_t lock;
+ struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR];
+ } channels;
struct nouveau_engine engine;
struct nouveau_channel *channel;
@@ -632,12 +699,14 @@ struct drm_nouveau_private {
uint64_t aper_free;
struct nouveau_gpuobj *sg_ctxdma;
- struct page *sg_dummy_page;
- dma_addr_t sg_dummy_bus;
+ struct nouveau_vma vma;
} gart_info;
/* nv10-nv40 tiling regions */
- struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR];
+ struct {
+ struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
+ spinlock_t lock;
+ } tile;
/* VRAM/fb configuration */
uint64_t vram_size;
@@ -650,14 +719,12 @@ struct drm_nouveau_private {
uint64_t fb_aper_free;
int fb_mtrr;
+ /* BAR control (NV50-) */
+ struct nouveau_vm *bar1_vm;
+ struct nouveau_vm *bar3_vm;
+
/* G8x/G9x virtual address space */
- uint64_t vm_gart_base;
- uint64_t vm_gart_size;
- uint64_t vm_vram_base;
- uint64_t vm_vram_size;
- uint64_t vm_end;
- struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
- int vm_vram_pt_nr;
+ struct nouveau_vm *chan_vm;
struct nvbios vbios;
@@ -674,6 +741,7 @@ struct drm_nouveau_private {
struct backlight_device *backlight;
struct nouveau_channel *evo;
+ u32 evo_alloc;
struct {
struct dcb_entry *dcb;
u16 script;
@@ -686,6 +754,8 @@ struct drm_nouveau_private {
struct nouveau_fbdev *nfbdev;
struct apertures_struct *apertures;
+
+ bool powered_down;
};
static inline struct drm_nouveau_private *
@@ -719,16 +789,6 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
return 0;
}
-#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \
- struct drm_nouveau_private *nv = dev->dev_private; \
- if (!nouveau_channel_owner(dev, (cl), (id))) { \
- NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
- DRM_CURRENTPID, (id)); \
- return -EPERM; \
- } \
- (ch) = nv->fifos[(id)]; \
-} while (0)
-
/* nouveau_drv.c */
extern int nouveau_agpmode;
extern int nouveau_duallink;
@@ -748,6 +808,7 @@ extern int nouveau_force_post;
extern int nouveau_override_conntype;
extern char *nouveau_perflvl;
extern int nouveau_perflvl_wr;
+extern int nouveau_msi;
extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -762,8 +823,10 @@ extern int nouveau_ioctl_getparam(struct drm_device *, void *data,
struct drm_file *);
extern int nouveau_ioctl_setparam(struct drm_device *, void *data,
struct drm_file *);
-extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
- uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
+ uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
+ uint32_t reg, uint32_t mask, uint32_t val);
extern bool nouveau_wait_for_idle(struct drm_device *);
extern int nouveau_card_init(struct drm_device *);
@@ -775,24 +838,22 @@ extern void nouveau_mem_gart_fini(struct drm_device *);
extern int nouveau_mem_init_agp(struct drm_device *);
extern int nouveau_mem_reset_agp(struct drm_device *);
extern void nouveau_mem_close(struct drm_device *);
-extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
- uint32_t addr,
- uint32_t size,
- uint32_t pitch);
-extern void nv10_mem_expire_tiling(struct drm_device *dev,
- struct nouveau_tile_reg *tile,
- struct nouveau_fence *fence);
-extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
- uint32_t size, uint32_t flags,
- uint64_t phys);
-extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
- uint32_t size);
+extern int nouveau_mem_detect(struct drm_device *);
+extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags);
+extern struct nouveau_tile_reg *nv10_mem_set_tiling(
+ struct drm_device *dev, uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags);
+extern void nv10_mem_put_tile_region(struct drm_device *dev,
+ struct nouveau_tile_reg *tile,
+ struct nouveau_fence *fence);
+extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
/* nouveau_notifier.c */
extern int nouveau_notifier_init_channel(struct nouveau_channel *);
extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
- int cout, uint32_t *offset);
+ int cout, uint32_t start, uint32_t end,
+ uint32_t *offset);
extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
struct drm_file *);
@@ -803,21 +864,44 @@ extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
extern struct drm_ioctl_desc nouveau_ioctls[];
extern int nouveau_max_ioctl;
extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
-extern int nouveau_channel_owner(struct drm_device *, struct drm_file *,
- int channel);
extern int nouveau_channel_alloc(struct drm_device *dev,
struct nouveau_channel **chan,
struct drm_file *file_priv,
uint32_t fb_ctxdma, uint32_t tt_ctxdma);
-extern void nouveau_channel_free(struct nouveau_channel *);
+extern struct nouveau_channel *
+nouveau_channel_get_unlocked(struct nouveau_channel *);
+extern struct nouveau_channel *
+nouveau_channel_get(struct drm_device *, struct drm_file *, int id);
+extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
+extern void nouveau_channel_put(struct nouveau_channel **);
+extern void nouveau_channel_ref(struct nouveau_channel *chan,
+ struct nouveau_channel **pchan);
+extern void nouveau_channel_idle(struct nouveau_channel *chan);
/* nouveau_object.c */
+#define NVOBJ_CLASS(d,c,e) do { \
+ int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \
+ if (ret) \
+ return ret; \
+} while(0)
+
+#define NVOBJ_MTHD(d,c,m,e) do { \
+ int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \
+ if (ret) \
+ return ret; \
+} while(0)
+
extern int nouveau_gpuobj_early_init(struct drm_device *);
extern int nouveau_gpuobj_init(struct drm_device *);
extern void nouveau_gpuobj_takedown(struct drm_device *);
extern int nouveau_gpuobj_suspend(struct drm_device *dev);
-extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
extern void nouveau_gpuobj_resume(struct drm_device *dev);
+extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
+extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
+ int (*exec)(struct nouveau_channel *,
+ u32 class, u32 mthd, u32 data));
+extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32);
+extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
uint32_t vram_h, uint32_t tt_h);
extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
@@ -832,21 +916,25 @@ extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst,
extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
uint64_t offset, uint64_t size, int access,
int target, struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
- uint64_t offset, uint64_t size,
- int access, struct nouveau_gpuobj **,
- uint32_t *o_ret);
-extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
- struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class,
- struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class);
+extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
+ u64 size, int target, int access, u32 type,
+ u32 comp, struct nouveau_gpuobj **pobj);
+extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
+ int class, u64 base, u64 size, int target,
+ int access, u32 type, u32 comp);
extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
struct drm_file *);
extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
struct drm_file *);
/* nouveau_irq.c */
+extern int nouveau_irq_init(struct drm_device *);
+extern void nouveau_irq_fini(struct drm_device *);
extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
+extern void nouveau_irq_register(struct drm_device *, int status_bit,
+ void (*)(struct drm_device *));
+extern void nouveau_irq_unregister(struct drm_device *, int status_bit);
extern void nouveau_irq_preinstall(struct drm_device *);
extern int nouveau_irq_postinstall(struct drm_device *);
extern void nouveau_irq_uninstall(struct drm_device *);
@@ -854,8 +942,8 @@ extern void nouveau_irq_uninstall(struct drm_device *);
/* nouveau_sgdma.c */
extern int nouveau_sgdma_init(struct drm_device *);
extern void nouveau_sgdma_takedown(struct drm_device *);
-extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
- uint32_t *page);
+extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
+ uint32_t offset);
extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
/* nouveau_debugfs.c */
@@ -966,18 +1054,25 @@ extern void nv04_fb_takedown(struct drm_device *);
/* nv10_fb.c */
extern int nv10_fb_init(struct drm_device *);
extern void nv10_fb_takedown(struct drm_device *);
-extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
- uint32_t, uint32_t);
+extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
+ uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags);
+extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
+extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
/* nv30_fb.c */
extern int nv30_fb_init(struct drm_device *);
extern void nv30_fb_takedown(struct drm_device *);
+extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
+ uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags);
+extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
/* nv40_fb.c */
extern int nv40_fb_init(struct drm_device *);
extern void nv40_fb_takedown(struct drm_device *);
-extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
- uint32_t, uint32_t);
+extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
+
/* nv50_fb.c */
extern int nv50_fb_init(struct drm_device *);
extern void nv50_fb_takedown(struct drm_device *);
@@ -989,6 +1084,7 @@ extern void nvc0_fb_takedown(struct drm_device *);
/* nv04_fifo.c */
extern int nv04_fifo_init(struct drm_device *);
+extern void nv04_fifo_fini(struct drm_device *);
extern void nv04_fifo_disable(struct drm_device *);
extern void nv04_fifo_enable(struct drm_device *);
extern bool nv04_fifo_reassign(struct drm_device *, bool);
@@ -998,19 +1094,18 @@ extern int nv04_fifo_create_context(struct nouveau_channel *);
extern void nv04_fifo_destroy_context(struct nouveau_channel *);
extern int nv04_fifo_load_context(struct nouveau_channel *);
extern int nv04_fifo_unload_context(struct drm_device *);
+extern void nv04_fifo_isr(struct drm_device *);
/* nv10_fifo.c */
extern int nv10_fifo_init(struct drm_device *);
extern int nv10_fifo_channel_id(struct drm_device *);
extern int nv10_fifo_create_context(struct nouveau_channel *);
-extern void nv10_fifo_destroy_context(struct nouveau_channel *);
extern int nv10_fifo_load_context(struct nouveau_channel *);
extern int nv10_fifo_unload_context(struct drm_device *);
/* nv40_fifo.c */
extern int nv40_fifo_init(struct drm_device *);
extern int nv40_fifo_create_context(struct nouveau_channel *);
-extern void nv40_fifo_destroy_context(struct nouveau_channel *);
extern int nv40_fifo_load_context(struct nouveau_channel *);
extern int nv40_fifo_unload_context(struct drm_device *);
@@ -1038,7 +1133,6 @@ extern int nvc0_fifo_load_context(struct nouveau_channel *);
extern int nvc0_fifo_unload_context(struct drm_device *);
/* nv04_graph.c */
-extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
extern int nv04_graph_init(struct drm_device *);
extern void nv04_graph_takedown(struct drm_device *);
extern void nv04_graph_fifo_access(struct drm_device *, bool);
@@ -1047,10 +1141,11 @@ extern int nv04_graph_create_context(struct nouveau_channel *);
extern void nv04_graph_destroy_context(struct nouveau_channel *);
extern int nv04_graph_load_context(struct nouveau_channel *);
extern int nv04_graph_unload_context(struct drm_device *);
-extern void nv04_graph_context_switch(struct drm_device *);
+extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data);
+extern struct nouveau_bitfield nv04_graph_nsource[];
/* nv10_graph.c */
-extern struct nouveau_pgraph_object_class nv10_graph_grclass[];
extern int nv10_graph_init(struct drm_device *);
extern void nv10_graph_takedown(struct drm_device *);
extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
@@ -1058,13 +1153,11 @@ extern int nv10_graph_create_context(struct nouveau_channel *);
extern void nv10_graph_destroy_context(struct nouveau_channel *);
extern int nv10_graph_load_context(struct nouveau_channel *);
extern int nv10_graph_unload_context(struct drm_device *);
-extern void nv10_graph_context_switch(struct drm_device *);
-extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
- uint32_t, uint32_t);
+extern void nv10_graph_set_tile_region(struct drm_device *dev, int i);
+extern struct nouveau_bitfield nv10_graph_intr[];
+extern struct nouveau_bitfield nv10_graph_nstatus[];
/* nv20_graph.c */
-extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
-extern struct nouveau_pgraph_object_class nv30_graph_grclass[];
extern int nv20_graph_create_context(struct nouveau_channel *);
extern void nv20_graph_destroy_context(struct nouveau_channel *);
extern int nv20_graph_load_context(struct nouveau_channel *);
@@ -1072,11 +1165,9 @@ extern int nv20_graph_unload_context(struct drm_device *);
extern int nv20_graph_init(struct drm_device *);
extern void nv20_graph_takedown(struct drm_device *);
extern int nv30_graph_init(struct drm_device *);
-extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
- uint32_t, uint32_t);
+extern void nv20_graph_set_tile_region(struct drm_device *dev, int i);
/* nv40_graph.c */
-extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
extern int nv40_graph_init(struct drm_device *);
extern void nv40_graph_takedown(struct drm_device *);
extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
@@ -1085,11 +1176,9 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *);
extern int nv40_graph_load_context(struct nouveau_channel *);
extern int nv40_graph_unload_context(struct drm_device *);
extern void nv40_grctx_init(struct nouveau_grctx *);
-extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
- uint32_t, uint32_t);
+extern void nv40_graph_set_tile_region(struct drm_device *dev, int i);
/* nv50_graph.c */
-extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
extern int nv50_graph_init(struct drm_device *);
extern void nv50_graph_takedown(struct drm_device *);
extern void nv50_graph_fifo_access(struct drm_device *, bool);
@@ -1098,10 +1187,10 @@ extern int nv50_graph_create_context(struct nouveau_channel *);
extern void nv50_graph_destroy_context(struct nouveau_channel *);
extern int nv50_graph_load_context(struct nouveau_channel *);
extern int nv50_graph_unload_context(struct drm_device *);
-extern void nv50_graph_context_switch(struct drm_device *);
extern int nv50_grctx_init(struct nouveau_grctx *);
extern void nv50_graph_tlb_flush(struct drm_device *dev);
extern void nv86_graph_tlb_flush(struct drm_device *dev);
+extern struct nouveau_enum nv50_data_error_names[];
/* nvc0_graph.c */
extern int nvc0_graph_init(struct drm_device *);
@@ -1113,16 +1202,22 @@ extern void nvc0_graph_destroy_context(struct nouveau_channel *);
extern int nvc0_graph_load_context(struct nouveau_channel *);
extern int nvc0_graph_unload_context(struct drm_device *);
+/* nv84_crypt.c */
+extern int nv84_crypt_init(struct drm_device *dev);
+extern void nv84_crypt_fini(struct drm_device *dev);
+extern int nv84_crypt_create_context(struct nouveau_channel *);
+extern void nv84_crypt_destroy_context(struct nouveau_channel *);
+extern void nv84_crypt_tlb_flush(struct drm_device *dev);
+
/* nv04_instmem.c */
extern int nv04_instmem_init(struct drm_device *);
extern void nv04_instmem_takedown(struct drm_device *);
extern int nv04_instmem_suspend(struct drm_device *);
extern void nv04_instmem_resume(struct drm_device *);
-extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
- uint32_t *size);
-extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern void nv04_instmem_put(struct nouveau_gpuobj *);
+extern int nv04_instmem_map(struct nouveau_gpuobj *);
+extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
extern void nv04_instmem_flush(struct drm_device *);
/* nv50_instmem.c */
@@ -1130,26 +1225,18 @@ extern int nv50_instmem_init(struct drm_device *);
extern void nv50_instmem_takedown(struct drm_device *);
extern int nv50_instmem_suspend(struct drm_device *);
extern void nv50_instmem_resume(struct drm_device *);
-extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
- uint32_t *size);
-extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern void nv50_instmem_put(struct nouveau_gpuobj *);
+extern int nv50_instmem_map(struct nouveau_gpuobj *);
+extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
extern void nv50_instmem_flush(struct drm_device *);
extern void nv84_instmem_flush(struct drm_device *);
-extern void nv50_vm_flush(struct drm_device *, int engine);
/* nvc0_instmem.c */
extern int nvc0_instmem_init(struct drm_device *);
extern void nvc0_instmem_takedown(struct drm_device *);
extern int nvc0_instmem_suspend(struct drm_device *);
extern void nvc0_instmem_resume(struct drm_device *);
-extern int nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
- uint32_t *size);
-extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
-extern void nvc0_instmem_flush(struct drm_device *);
/* nv04_mc.c */
extern int nv04_mc_init(struct drm_device *);
@@ -1219,6 +1306,9 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
+extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
+extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu);
/* nouveau_fence.c */
struct nouveau_fence;
@@ -1234,12 +1324,35 @@ extern void nouveau_fence_work(struct nouveau_fence *fence,
void (*work)(void *priv, bool signalled),
void *priv);
struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
-extern bool nouveau_fence_signalled(void *obj, void *arg);
-extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+
+extern bool __nouveau_fence_signalled(void *obj, void *arg);
+extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+extern int __nouveau_fence_flush(void *obj, void *arg);
+extern void __nouveau_fence_unref(void **obj);
+extern void *__nouveau_fence_ref(void *obj);
+
+static inline bool nouveau_fence_signalled(struct nouveau_fence *obj)
+{
+ return __nouveau_fence_signalled(obj, NULL);
+}
+static inline int
+nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr)
+{
+ return __nouveau_fence_wait(obj, NULL, lazy, intr);
+}
extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
-extern int nouveau_fence_flush(void *obj, void *arg);
-extern void nouveau_fence_unref(void **obj);
-extern void *nouveau_fence_ref(void *obj);
+static inline int nouveau_fence_flush(struct nouveau_fence *obj)
+{
+ return __nouveau_fence_flush(obj, NULL);
+}
+static inline void nouveau_fence_unref(struct nouveau_fence **obj)
+{
+ __nouveau_fence_unref((void **)obj);
+}
+static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
+{
+ return __nouveau_fence_ref(obj);
+}
/* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
@@ -1259,15 +1372,28 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
+/* nouveau_display.c */
+int nouveau_vblank_enable(struct drm_device *dev, int crtc);
+void nouveau_vblank_disable(struct drm_device *dev, int crtc);
+int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event);
+int nouveau_finish_page_flip(struct nouveau_channel *,
+ struct nouveau_page_flip_state *);
+
/* nv10_gpio.c */
int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
/* nv50_gpio.c */
int nv50_gpio_init(struct drm_device *dev);
+void nv50_gpio_fini(struct drm_device *dev);
int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
-void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
+int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
+ void (*)(void *, int), void *);
+void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
+ void (*)(void *, int), void *);
+bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
/* nv50_calc. */
int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
@@ -1334,7 +1460,9 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
}
#define nv_wait(dev, reg, mask, val) \
- nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
+ nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val))
+#define nv_wait_ne(dev, reg, mask, val) \
+ nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val))
/* PRAMIN access */
static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
@@ -1447,6 +1575,37 @@ nv_match_device(struct drm_device *dev, unsigned device,
dev->pdev->subsystem_device == sub_device;
}
+/* returns 1 if device is one of the nv4x using the 0x4497 object class,
+ * helpful to determine a number of other hardware features
+ */
+static inline int
+nv44_graph_class(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if ((dev_priv->chipset & 0xf0) == 0x60)
+ return 1;
+
+ return !(0x0baf & (1 << (dev_priv->chipset & 0x0f)));
+}
+
+/* memory type/access flags, do not match hardware values */
+#define NV_MEM_ACCESS_RO 1
+#define NV_MEM_ACCESS_WO 2
+#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
+#define NV_MEM_ACCESS_SYS 4
+#define NV_MEM_ACCESS_VM 8
+
+#define NV_MEM_TARGET_VRAM 0
+#define NV_MEM_TARGET_PCI 1
+#define NV_MEM_TARGET_PCI_NOSNOOP 2
+#define NV_MEM_TARGET_VM 3
+#define NV_MEM_TARGET_GART 4
+
+#define NV_MEM_TYPE_VM 0x7f
+#define NV_MEM_COMP_VM 0x03
+
+/* NV_SW object class */
#define NV_SW 0x0000506e
#define NV_SW_DMA_SEMAPHORE 0x00000060
#define NV_SW_SEMAPHORE_OFFSET 0x00000064
@@ -1457,5 +1616,6 @@ nv_match_device(struct drm_device *dev, unsigned device,
#define NV_SW_VBLSEM_OFFSET 0x00000400
#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
#define NV_SW_VBLSEM_RELEASE 0x00000408
+#define NV_SW_PAGE_FLIP 0x00000500
#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 02a4d1fd4845..60769d2f9a66 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -49,6 +49,102 @@
#include "nouveau_fbcon.h"
#include "nouveau_dma.h"
+static void
+nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ ret = -ENODEV;
+ if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+ mutex_trylock(&dev_priv->channel->mutex)) {
+ if (dev_priv->card_type < NV_50)
+ ret = nv04_fbcon_fillrect(info, rect);
+ else
+ if (dev_priv->card_type < NV_C0)
+ ret = nv50_fbcon_fillrect(info, rect);
+ else
+ ret = nvc0_fbcon_fillrect(info, rect);
+ mutex_unlock(&dev_priv->channel->mutex);
+ }
+
+ if (ret == 0)
+ return;
+
+ if (ret != -ENODEV)
+ nouveau_fbcon_gpu_lockup(info);
+ cfb_fillrect(info, rect);
+}
+
+static void
+nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ ret = -ENODEV;
+ if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+ mutex_trylock(&dev_priv->channel->mutex)) {
+ if (dev_priv->card_type < NV_50)
+ ret = nv04_fbcon_copyarea(info, image);
+ else
+ if (dev_priv->card_type < NV_C0)
+ ret = nv50_fbcon_copyarea(info, image);
+ else
+ ret = nvc0_fbcon_copyarea(info, image);
+ mutex_unlock(&dev_priv->channel->mutex);
+ }
+
+ if (ret == 0)
+ return;
+
+ if (ret != -ENODEV)
+ nouveau_fbcon_gpu_lockup(info);
+ cfb_copyarea(info, image);
+}
+
+static void
+nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ ret = -ENODEV;
+ if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+ mutex_trylock(&dev_priv->channel->mutex)) {
+ if (dev_priv->card_type < NV_50)
+ ret = nv04_fbcon_imageblit(info, image);
+ else
+ if (dev_priv->card_type < NV_C0)
+ ret = nv50_fbcon_imageblit(info, image);
+ else
+ ret = nvc0_fbcon_imageblit(info, image);
+ mutex_unlock(&dev_priv->channel->mutex);
+ }
+
+ if (ret == 0)
+ return;
+
+ if (ret != -ENODEV)
+ nouveau_fbcon_gpu_lockup(info);
+ cfb_imageblit(info, image);
+}
+
static int
nouveau_fbcon_sync(struct fb_info *info)
{
@@ -58,22 +154,36 @@ nouveau_fbcon_sync(struct fb_info *info)
struct nouveau_channel *chan = dev_priv->channel;
int ret, i;
- if (!chan || !chan->accel_done ||
+ if (!chan || !chan->accel_done || in_interrupt() ||
info->state != FBINFO_STATE_RUNNING ||
info->flags & FBINFO_HWACCEL_DISABLED)
return 0;
- if (RING_SPACE(chan, 4)) {
+ if (!mutex_trylock(&chan->mutex))
+ return 0;
+
+ ret = RING_SPACE(chan, 4);
+ if (ret) {
+ mutex_unlock(&chan->mutex);
nouveau_fbcon_gpu_lockup(info);
return 0;
}
- BEGIN_RING(chan, 0, 0x0104, 1);
- OUT_RING(chan, 0);
- BEGIN_RING(chan, 0, 0x0100, 1);
- OUT_RING(chan, 0);
+ if (dev_priv->card_type >= NV_C0) {
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
+ OUT_RING (chan, 0);
+ } else {
+ BEGIN_RING(chan, 0, 0x0104, 1);
+ OUT_RING (chan, 0);
+ BEGIN_RING(chan, 0, 0x0100, 1);
+ OUT_RING (chan, 0);
+ }
+
nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
FIRE_RING(chan);
+ mutex_unlock(&chan->mutex);
ret = -EBUSY;
for (i = 0; i < 100000; i++) {
@@ -97,9 +207,9 @@ static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = nouveau_fbcon_fillrect,
+ .fb_copyarea = nouveau_fbcon_copyarea,
+ .fb_imageblit = nouveau_fbcon_imageblit,
.fb_sync = nouveau_fbcon_sync,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
@@ -108,29 +218,13 @@ static struct fb_ops nouveau_fbcon_ops = {
.fb_debug_leave = drm_fb_helper_debug_leave,
};
-static struct fb_ops nv04_fbcon_ops = {
+static struct fb_ops nouveau_fbcon_sw_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = nv04_fbcon_fillrect,
- .fb_copyarea = nv04_fbcon_copyarea,
- .fb_imageblit = nv04_fbcon_imageblit,
- .fb_sync = nouveau_fbcon_sync,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
-};
-
-static struct fb_ops nv50_fbcon_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = nv50_fbcon_fillrect,
- .fb_copyarea = nv50_fbcon_copyarea,
- .fb_imageblit = nv50_fbcon_imageblit,
- .fb_sync = nouveau_fbcon_sync,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -257,9 +351,9 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT;
info->flags |= FBINFO_CAN_FORCE_OUTPUT;
- info->fbops = &nouveau_fbcon_ops;
- info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
- dev_priv->vm_vram_base;
+ info->fbops = &nouveau_fbcon_sw_ops;
+ info->fix.smem_start = nvbo->bo.mem.bus.base +
+ nvbo->bo.mem.bus.offset;
info->fix.smem_len = size;
info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
@@ -268,10 +362,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
- /* FIXME: we really shouldn't expose mmio space at all */
- info->fix.mmio_start = pci_resource_start(pdev, 1);
- info->fix.mmio_len = pci_resource_len(pdev, 1);
-
/* Set aperture base/size for vesafb takeover */
info->apertures = dev_priv->apertures;
if (!info->apertures) {
@@ -285,19 +375,20 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
+ mutex_unlock(&dev->struct_mutex);
+
if (dev_priv->channel && !nouveau_nofbaccel) {
- switch (dev_priv->card_type) {
- case NV_C0:
- break;
- case NV_50:
- nv50_fbcon_accel_init(info);
- info->fbops = &nv50_fbcon_ops;
- break;
- default:
- nv04_fbcon_accel_init(info);
- info->fbops = &nv04_fbcon_ops;
- break;
- };
+ ret = -ENODEV;
+ if (dev_priv->card_type < NV_50)
+ ret = nv04_fbcon_accel_init(info);
+ else
+ if (dev_priv->card_type < NV_C0)
+ ret = nv50_fbcon_accel_init(info);
+ else
+ ret = nvc0_fbcon_accel_init(info);
+
+ if (ret == 0)
+ info->fbops = &nouveau_fbcon_ops;
}
nouveau_fbcon_zfill(dev, nfbdev);
@@ -308,7 +399,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
nouveau_fb->base.height,
nvbo->bo.offset, nvbo);
- mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index e7e12684c37e..b73c29f87fc3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,15 +40,21 @@ struct nouveau_fbdev {
void nouveau_fbcon_restore(void);
-void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
-void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
-void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv04_fbcon_accel_init(struct fb_info *info);
-void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
-void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
-void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+
+int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv50_fbcon_accel_init(struct fb_info *info);
+int nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+int nvc0_fbcon_accel_init(struct fb_info *info);
+
void nouveau_fbcon_gpu_lockup(struct fb_info *info);
int nouveau_fbcon_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ab1bbfbf266e..221b8462ea37 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -32,7 +32,8 @@
#include "nouveau_dma.h"
#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
-#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
+#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17 && \
+ nouveau_private(dev)->card_type < NV_C0)
struct nouveau_fence {
struct nouveau_channel *channel;
@@ -64,6 +65,7 @@ nouveau_fence_del(struct kref *ref)
struct nouveau_fence *fence =
container_of(ref, struct nouveau_fence, refcount);
+ nouveau_channel_ref(NULL, &fence->channel);
kfree(fence);
}
@@ -76,14 +78,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
spin_lock(&chan->fence.lock);
- if (USE_REFCNT(dev))
- sequence = nvchan_rd32(chan, 0x48);
- else
- sequence = atomic_read(&chan->fence.last_sequence_irq);
+ /* Fetch the last sequence if the channel is still up and running */
+ if (likely(!list_empty(&chan->fence.pending))) {
+ if (USE_REFCNT(dev))
+ sequence = nvchan_rd32(chan, 0x48);
+ else
+ sequence = atomic_read(&chan->fence.last_sequence_irq);
- if (chan->fence.sequence_ack == sequence)
- goto out;
- chan->fence.sequence_ack = sequence;
+ if (chan->fence.sequence_ack == sequence)
+ goto out;
+ chan->fence.sequence_ack = sequence;
+ }
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
sequence = fence->sequence;
@@ -113,13 +118,13 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
if (!fence)
return -ENOMEM;
kref_init(&fence->refcount);
- fence->channel = chan;
+ nouveau_channel_ref(chan, &fence->channel);
if (emit)
ret = nouveau_fence_emit(fence);
if (ret)
- nouveau_fence_unref((void *)&fence);
+ nouveau_fence_unref(&fence);
*pfence = fence;
return ret;
}
@@ -127,7 +132,7 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
struct nouveau_channel *
nouveau_fence_channel(struct nouveau_fence *fence)
{
- return fence ? fence->channel : NULL;
+ return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
}
int
@@ -135,6 +140,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = fence->channel;
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
ret = RING_SPACE(chan, 2);
@@ -155,8 +161,15 @@ nouveau_fence_emit(struct nouveau_fence *fence)
list_add_tail(&fence->entry, &chan->fence.pending);
spin_unlock(&chan->fence.lock);
- BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1);
- OUT_RING(chan, fence->sequence);
+ if (USE_REFCNT(dev)) {
+ if (dev_priv->card_type < NV_C0)
+ BEGIN_RING(chan, NvSubSw, 0x0050, 1);
+ else
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0050, 1);
+ } else {
+ BEGIN_RING(chan, NvSubSw, 0x0150, 1);
+ }
+ OUT_RING (chan, fence->sequence);
FIRE_RING(chan);
return 0;
@@ -182,7 +195,7 @@ nouveau_fence_work(struct nouveau_fence *fence,
}
void
-nouveau_fence_unref(void **sync_obj)
+__nouveau_fence_unref(void **sync_obj)
{
struct nouveau_fence *fence = nouveau_fence(*sync_obj);
@@ -192,7 +205,7 @@ nouveau_fence_unref(void **sync_obj)
}
void *
-nouveau_fence_ref(void *sync_obj)
+__nouveau_fence_ref(void *sync_obj)
{
struct nouveau_fence *fence = nouveau_fence(sync_obj);
@@ -201,7 +214,7 @@ nouveau_fence_ref(void *sync_obj)
}
bool
-nouveau_fence_signalled(void *sync_obj, void *sync_arg)
+__nouveau_fence_signalled(void *sync_obj, void *sync_arg)
{
struct nouveau_fence *fence = nouveau_fence(sync_obj);
struct nouveau_channel *chan = fence->channel;
@@ -214,13 +227,14 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg)
}
int
-nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
{
unsigned long timeout = jiffies + (3 * DRM_HZ);
+ unsigned long sleep_time = jiffies + 1;
int ret = 0;
while (1) {
- if (nouveau_fence_signalled(sync_obj, sync_arg))
+ if (__nouveau_fence_signalled(sync_obj, sync_arg))
break;
if (time_after_eq(jiffies, timeout)) {
@@ -230,7 +244,7 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
__set_current_state(intr ? TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE);
- if (lazy)
+ if (lazy && time_after_eq(jiffies, sleep_time))
schedule_timeout(1);
if (intr && signal_pending(current)) {
@@ -368,7 +382,7 @@ emit_semaphore(struct nouveau_channel *chan, int method,
kref_get(&sema->ref);
nouveau_fence_work(fence, semaphore_work, sema);
- nouveau_fence_unref((void *)&fence);
+ nouveau_fence_unref(&fence);
return 0;
}
@@ -380,33 +394,49 @@ nouveau_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *chan = nouveau_fence_channel(fence);
struct drm_device *dev = wchan->dev;
struct nouveau_semaphore *sema;
- int ret;
+ int ret = 0;
- if (likely(!fence || chan == wchan ||
- nouveau_fence_signalled(fence, NULL)))
- return 0;
+ if (likely(!chan || chan == wchan ||
+ nouveau_fence_signalled(fence)))
+ goto out;
sema = alloc_semaphore(dev);
if (!sema) {
/* Early card or broken userspace, fall back to
* software sync. */
- return nouveau_fence_wait(fence, NULL, false, false);
+ ret = nouveau_fence_wait(fence, true, false);
+ goto out;
+ }
+
+ /* try to take chan's mutex, if we can't take it right away
+ * we have to fallback to software sync to prevent locking
+ * order issues
+ */
+ if (!mutex_trylock(&chan->mutex)) {
+ ret = nouveau_fence_wait(fence, true, false);
+ goto out_unref;
}
/* Make wchan wait until it gets signalled */
ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
if (ret)
- goto out;
+ goto out_unlock;
/* Signal the semaphore from chan */
ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
-out:
+
+out_unlock:
+ mutex_unlock(&chan->mutex);
+out_unref:
kref_put(&sema->ref, free_semaphore);
+out:
+ if (chan)
+ nouveau_channel_put_unlocked(&chan);
return ret;
}
int
-nouveau_fence_flush(void *sync_obj, void *sync_arg)
+__nouveau_fence_flush(void *sync_obj, void *sync_arg)
{
return 0;
}
@@ -420,30 +450,27 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
int ret;
/* Create an NV_SW object for various sync purposes */
- ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj);
+ ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
if (ret)
return ret;
- ret = nouveau_ramht_insert(chan, NvSw, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- if (ret)
- return ret;
-
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubSw, 0, 1);
- OUT_RING(chan, NvSw);
+ /* we leave subchannel empty for nvc0 */
+ if (dev_priv->card_type < NV_C0) {
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, 0, 1);
+ OUT_RING(chan, NvSw);
+ }
/* Create a DMA object for the shared cross-channel sync area. */
if (USE_SEMA(dev)) {
- struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node;
+ struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
mem->start << PAGE_SHIFT,
- mem->size << PAGE_SHIFT,
- NV_DMA_ACCESS_RW,
- NV_DMA_TARGET_VIDMEM, &obj);
+ mem->size, NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &obj);
if (ret)
return ret;
@@ -473,6 +500,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
{
struct nouveau_fence *tmp, *fence;
+ spin_lock(&chan->fence.lock);
+
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
fence->signalled = true;
list_del(&fence->entry);
@@ -482,6 +511,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
kref_put(&fence->refcount, nouveau_fence_del);
}
+
+ spin_unlock(&chan->fence.lock);
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 9a1fdcf400c2..506c508b7eda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -48,9 +48,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
return;
nvbo->gem = NULL;
- if (unlikely(nvbo->cpu_filp))
- ttm_bo_synccpu_write_release(bo);
-
if (unlikely(nvbo->pin_refcnt)) {
nvbo->pin_refcnt = 1;
nouveau_bo_unpin(nvbo);
@@ -106,32 +103,6 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
return 0;
}
-static bool
-nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (dev_priv->card_type >= NV_50) {
- switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
- case 0x0000:
- case 0x1800:
- case 0x2800:
- case 0x4800:
- case 0x7000:
- case 0x7400:
- case 0x7a00:
- case 0xe000:
- return true;
- }
- } else {
- if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
- return true;
- }
-
- NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
- return false;
-}
-
int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -146,11 +117,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
- if (req->channel_hint) {
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
- file_priv, chan);
- }
-
if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
flags |= TTM_PL_FLAG_VRAM;
if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
@@ -158,13 +124,23 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
flags |= TTM_PL_FLAG_SYSTEM;
- if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
+ if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
+ NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
+ }
+
+ if (req->channel_hint) {
+ chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+ }
ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
req->info.tile_mode, req->info.tile_flags, false,
(req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
&nvbo);
+ if (chan)
+ nouveau_channel_put(&chan);
if (ret)
return ret;
@@ -231,15 +207,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
list_for_each_safe(entry, tmp, list) {
nvbo = list_entry(entry, struct nouveau_bo, entry);
- if (likely(fence)) {
- struct nouveau_fence *prev_fence;
-
- spin_lock(&nvbo->bo.lock);
- prev_fence = nvbo->bo.sync_obj;
- nvbo->bo.sync_obj = nouveau_fence_ref(fence);
- spin_unlock(&nvbo->bo.lock);
- nouveau_fence_unref((void *)&prev_fence);
- }
+
+ nouveau_bo_fence(nvbo, fence);
if (unlikely(nvbo->validate_mapped)) {
ttm_bo_kunmap(&nvbo->kmap);
@@ -299,14 +268,15 @@ retry:
return -EINVAL;
}
- ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
+ ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
if (ret) {
validate_fini(op, NULL);
- if (ret == -EAGAIN)
- ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
+ if (unlikely(ret == -EAGAIN))
+ ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
drm_gem_object_unreference_unlocked(gem);
- if (ret) {
- NV_ERROR(dev, "fail reserve\n");
+ if (unlikely(ret)) {
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "fail reserve\n");
return ret;
}
goto retry;
@@ -331,25 +301,6 @@ retry:
validate_fini(op, NULL);
return -EINVAL;
}
-
- if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
- validate_fini(op, NULL);
-
- if (nvbo->cpu_filp == file_priv) {
- NV_ERROR(dev, "bo %p mapped by process trying "
- "to validate it!\n", nvbo);
- return -EINVAL;
- }
-
- mutex_unlock(&drm_global_mutex);
- ret = ttm_bo_wait_cpu(&nvbo->bo, false);
- mutex_lock(&drm_global_mutex);
- if (ret) {
- NV_ERROR(dev, "fail wait_cpu\n");
- return ret;
- }
- goto retry;
- }
}
return 0;
@@ -383,11 +334,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
}
nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
- ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
- false, false, false);
+ ret = nouveau_bo_validate(nvbo, true, false, false);
nvbo->channel = NULL;
if (unlikely(ret)) {
- NV_ERROR(dev, "fail ttm_validate\n");
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "fail ttm_validate\n");
return ret;
}
@@ -439,13 +390,15 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) {
- NV_ERROR(dev, "validate_init\n");
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "validate_init\n");
return ret;
}
ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
- NV_ERROR(dev, "validate vram_list\n");
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "validate vram_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -453,7 +406,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
- NV_ERROR(dev, "validate gart_list\n");
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "validate gart_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -461,7 +415,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
- NV_ERROR(dev, "validate both_list\n");
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "validate both_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -557,9 +512,9 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
data |= r->vor;
}
- spin_lock(&nvbo->bo.lock);
+ spin_lock(&nvbo->bo.bdev->fence_lock);
ret = ttm_bo_wait(&nvbo->bo, false, false, false);
- spin_unlock(&nvbo->bo.lock);
+ spin_unlock(&nvbo->bo.bdev->fence_lock);
if (ret) {
NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
break;
@@ -585,7 +540,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct nouveau_fence *fence = NULL;
int i, j, ret = 0, do_reloc = 0;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
+ chan = nouveau_channel_get(dev, file_priv, req->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
req->vram_available = dev_priv->fb_aper_free;
req->gart_available = dev_priv->gart_info.aper_free;
@@ -595,28 +552,34 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
req->nr_push, NOUVEAU_GEM_MAX_PUSH);
+ nouveau_channel_put(&chan);
return -EINVAL;
}
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
+ nouveau_channel_put(&chan);
return -EINVAL;
}
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
+ nouveau_channel_put(&chan);
return -EINVAL;
}
push = u_memcpya(req->push, req->nr_push, sizeof(*push));
- if (IS_ERR(push))
+ if (IS_ERR(push)) {
+ nouveau_channel_put(&chan);
return PTR_ERR(push);
+ }
bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
if (IS_ERR(bo)) {
kfree(push);
+ nouveau_channel_put(&chan);
return PTR_ERR(bo);
}
@@ -639,7 +602,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
req->nr_buffers, &op, &do_reloc);
if (ret) {
- NV_ERROR(dev, "validate: %d\n", ret);
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "validate: %d\n", ret);
goto out;
}
@@ -732,7 +696,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
out:
validate_fini(&op, fence);
- nouveau_fence_unref((void**)&fence);
+ nouveau_fence_unref(&fence);
kfree(bo);
kfree(push);
@@ -750,6 +714,7 @@ out_next:
req->suffix1 = 0x00000000;
}
+ nouveau_channel_put(&chan);
return ret;
}
@@ -781,26 +746,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- if (nvbo->cpu_filp) {
- if (nvbo->cpu_filp == file_priv)
- goto out;
-
- ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
- if (ret)
- goto out;
- }
-
- if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
- spin_lock(&nvbo->bo.lock);
- ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
- spin_unlock(&nvbo->bo.lock);
- } else {
- ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
- if (ret == 0)
- nvbo->cpu_filp = file_priv;
- }
-
-out:
+ spin_lock(&nvbo->bo.bdev->fence_lock);
+ ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
+ spin_unlock(&nvbo->bo.bdev->fence_lock);
drm_gem_object_unreference_unlocked(gem);
return ret;
}
@@ -809,26 +757,7 @@ int
nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_nouveau_gem_cpu_prep *req = data;
- struct drm_gem_object *gem;
- struct nouveau_bo *nvbo;
- int ret = -EINVAL;
-
- gem = drm_gem_object_lookup(dev, file_priv, req->handle);
- if (!gem)
- return -ENOENT;
- nvbo = nouveau_gem_object(gem);
-
- if (nvbo->cpu_filp != file_priv)
- goto out;
- nvbo->cpu_filp = NULL;
-
- ttm_bo_synccpu_write_release(&nvbo->bo);
- ret = 0;
-
-out:
- drm_gem_object_unreference_unlocked(gem);
- return ret;
+ return 0;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index b9672a05c411..053edf9d2f67 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -953,7 +953,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
- if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC)
+ if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC)
NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
else
NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
@@ -999,8 +999,8 @@ nv_load_state_ext(struct drm_device *dev, int head,
if (dev_priv->card_type == NV_10) {
/* Not waiting for vertical retrace before modifying
CRE_53/CRE_54 causes lockups. */
- nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
- nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
+ nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
+ nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
}
wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
@@ -1017,8 +1017,9 @@ nv_load_state_ext(struct drm_device *dev, int head,
NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
- /* Setting 1 on this value gives you interrupts for every vblank period. */
- NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0);
+ /* Enable vblank interrupts. */
+ NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0,
+ (dev->vblank_enabled[head] ? 1 : 0));
NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 7bfd9e6c9d67..2ba7265bc967 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -36,18 +36,7 @@
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_ramht.h"
-#include <linux/ratelimit.h>
-
-/* needed for hotplug irq */
-#include "nouveau_connector.h"
-#include "nv50_display.h"
-
-static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
-
-static int nouveau_ratelimit(void)
-{
- return __ratelimit(&nouveau_ratelimit_state);
-}
+#include "nouveau_util.h"
void
nouveau_irq_preinstall(struct drm_device *dev)
@@ -57,19 +46,19 @@ nouveau_irq_preinstall(struct drm_device *dev)
/* Master disable */
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
- if (dev_priv->card_type >= NV_50) {
- INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
- INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
- spin_lock_init(&dev_priv->hpd_state.lock);
- INIT_LIST_HEAD(&dev_priv->vbl_waiting);
- }
+ INIT_LIST_HEAD(&dev_priv->vbl_waiting);
}
int
nouveau_irq_postinstall(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
/* Master enable */
nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
+ if (dev_priv->msi_enabled)
+ nv_wr08(dev, 0x00088068, 0xff);
+
return 0;
}
@@ -80,1178 +69,83 @@ nouveau_irq_uninstall(struct drm_device *dev)
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
}
-static int
-nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_pgraph_object_method *grm;
- struct nouveau_pgraph_object_class *grc;
-
- grc = dev_priv->engine.graph.grclass;
- while (grc->id) {
- if (grc->id == class)
- break;
- grc++;
- }
-
- if (grc->id != class || !grc->methods)
- return -ENOENT;
-
- grm = grc->methods;
- while (grm->id) {
- if (grm->id == mthd)
- return grm->exec(chan, class, mthd, data);
- grm++;
- }
-
- return -ENOENT;
-}
-
-static bool
-nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
-{
- struct drm_device *dev = chan->dev;
- const int subc = (addr >> 13) & 0x7;
- const int mthd = addr & 0x1ffc;
-
- if (mthd == 0x0000) {
- struct nouveau_gpuobj *gpuobj;
-
- gpuobj = nouveau_ramht_find(chan, data);
- if (!gpuobj)
- return false;
-
- if (gpuobj->engine != NVOBJ_ENGINE_SW)
- return false;
-
- chan->sw_subchannel[subc] = gpuobj->class;
- nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
- NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
- return true;
- }
-
- /* hw object */
- if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
- return false;
-
- if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
- return false;
-
- return true;
-}
-
-static void
-nouveau_fifo_irq_handler(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
- uint32_t status, reassign;
- int cnt = 0;
-
- reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
- while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
- struct nouveau_channel *chan = NULL;
- uint32_t chid, get;
-
- nv_wr32(dev, NV03_PFIFO_CACHES, 0);
-
- chid = engine->fifo.channel_id(dev);
- if (chid >= 0 && chid < engine->fifo.channels)
- chan = dev_priv->fifos[chid];
- get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
-
- if (status & NV_PFIFO_INTR_CACHE_ERROR) {
- uint32_t mthd, data;
- int ptr;
-
- /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
- * wrapping on my G80 chips, but CACHE1 isn't big
- * enough for this much data.. Tests show that it
- * wraps around to the start at GET=0x800.. No clue
- * as to why..
- */
- ptr = (get & 0x7ff) >> 2;
-
- if (dev_priv->card_type < NV_40) {
- mthd = nv_rd32(dev,
- NV04_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(dev,
- NV04_PFIFO_CACHE1_DATA(ptr));
- } else {
- mthd = nv_rd32(dev,
- NV40_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(dev,
- NV40_PFIFO_CACHE1_DATA(ptr));
- }
-
- if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
- NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
- "Mthd 0x%04x Data 0x%08x\n",
- chid, (mthd >> 13) & 7, mthd & 0x1ffc,
- data);
- }
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
- nv_wr32(dev, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_CACHE_ERROR);
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
- nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-
- status &= ~NV_PFIFO_INTR_CACHE_ERROR;
- }
-
- if (status & NV_PFIFO_INTR_DMA_PUSHER) {
- u32 dma_get = nv_rd32(dev, 0x003244);
- u32 dma_put = nv_rd32(dev, 0x003240);
- u32 push = nv_rd32(dev, 0x003220);
- u32 state = nv_rd32(dev, 0x003228);
-
- if (dev_priv->card_type == NV_50) {
- u32 ho_get = nv_rd32(dev, 0x003328);
- u32 ho_put = nv_rd32(dev, 0x003320);
- u32 ib_get = nv_rd32(dev, 0x003334);
- u32 ib_put = nv_rd32(dev, 0x003330);
-
- if (nouveau_ratelimit())
- NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
- "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
- "State 0x%08x Push 0x%08x\n",
- chid, ho_get, dma_get, ho_put,
- dma_put, ib_get, ib_put, state,
- push);
-
- /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
- nv_wr32(dev, 0x003364, 0x00000000);
- if (dma_get != dma_put || ho_get != ho_put) {
- nv_wr32(dev, 0x003244, dma_put);
- nv_wr32(dev, 0x003328, ho_put);
- } else
- if (ib_get != ib_put) {
- nv_wr32(dev, 0x003334, ib_put);
- }
- } else {
- NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
- "Put 0x%08x State 0x%08x Push 0x%08x\n",
- chid, dma_get, dma_put, state, push);
-
- if (dma_get != dma_put)
- nv_wr32(dev, 0x003244, dma_put);
- }
-
- nv_wr32(dev, 0x003228, 0x00000000);
- nv_wr32(dev, 0x003220, 0x00000001);
- nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
- status &= ~NV_PFIFO_INTR_DMA_PUSHER;
- }
-
- if (status & NV_PFIFO_INTR_SEMAPHORE) {
- uint32_t sem;
-
- status &= ~NV_PFIFO_INTR_SEMAPHORE;
- nv_wr32(dev, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_SEMAPHORE);
-
- sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
- nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
- }
-
- if (dev_priv->card_type == NV_50) {
- if (status & 0x00000010) {
- nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
- status &= ~0x00000010;
- nv_wr32(dev, 0x002100, 0x00000010);
- }
- }
-
- if (status) {
- if (nouveau_ratelimit())
- NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
- status, chid);
- nv_wr32(dev, NV03_PFIFO_INTR_0, status);
- status = 0;
- }
-
- nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
- }
-
- if (status) {
- NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
- nv_wr32(dev, 0x2140, 0);
- nv_wr32(dev, 0x140, 0);
- }
-
- nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
-}
-
-struct nouveau_bitfield_names {
- uint32_t mask;
- const char *name;
-};
-
-static struct nouveau_bitfield_names nstatus_names[] =
-{
- { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
- { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
- { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
- { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
-};
-
-static struct nouveau_bitfield_names nstatus_names_nv10[] =
-{
- { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
- { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
- { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
- { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
-};
-
-static struct nouveau_bitfield_names nsource_names[] =
-{
- { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
- { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
- { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
- { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
- { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
- { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
- { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
- { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
- { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
- { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
- { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
- { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
- { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
- { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
- { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
- { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
- { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
- { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
- { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
-};
-
-static void
-nouveau_print_bitfield_names_(uint32_t value,
- const struct nouveau_bitfield_names *namelist,
- const int namelist_len)
-{
- /*
- * Caller must have already printed the KERN_* log level for us.
- * Also the caller is responsible for adding the newline.
- */
- int i;
- for (i = 0; i < namelist_len; ++i) {
- uint32_t mask = namelist[i].mask;
- if (value & mask) {
- printk(" %s", namelist[i].name);
- value &= ~mask;
- }
- }
- if (value)
- printk(" (unknown bits 0x%08x)", value);
-}
-#define nouveau_print_bitfield_names(val, namelist) \
- nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
-
-struct nouveau_enum_names {
- uint32_t value;
- const char *name;
-};
-
-static void
-nouveau_print_enum_names_(uint32_t value,
- const struct nouveau_enum_names *namelist,
- const int namelist_len)
-{
- /*
- * Caller must have already printed the KERN_* log level for us.
- * Also the caller is responsible for adding the newline.
- */
- int i;
- for (i = 0; i < namelist_len; ++i) {
- if (value == namelist[i].value) {
- printk("%s", namelist[i].name);
- return;
- }
- }
- printk("unknown value 0x%08x", value);
-}
-#define nouveau_print_enum_names(val, namelist) \
- nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
-
-static int
-nouveau_graph_chid_from_grctx(struct drm_device *dev)
+irqreturn_t
+nouveau_irq_handler(DRM_IRQ_ARGS)
{
+ struct drm_device *dev = (struct drm_device *)arg;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t inst;
+ unsigned long flags;
+ u32 stat;
int i;
- if (dev_priv->card_type < NV_40)
- return dev_priv->engine.fifo.channels;
- else
- if (dev_priv->card_type < NV_50) {
- inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
-
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->fifos[i];
-
- if (!chan || !chan->ramin_grctx)
- continue;
-
- if (inst == chan->ramin_grctx->pinst)
- break;
- }
- } else {
- inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
-
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->fifos[i];
-
- if (!chan || !chan->ramin)
- continue;
-
- if (inst == chan->ramin->vinst)
- break;
- }
- }
-
-
- return i;
-}
-
-static int
-nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
- int channel;
-
- if (dev_priv->card_type < NV_10)
- channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
- else
- if (dev_priv->card_type < NV_40)
- channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
- else
- channel = nouveau_graph_chid_from_grctx(dev);
-
- if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
- NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
- return -EINVAL;
- }
-
- *channel_ret = channel;
- return 0;
-}
-
-struct nouveau_pgraph_trap {
- int channel;
- int class;
- int subc, mthd, size;
- uint32_t data, data2;
- uint32_t nsource, nstatus;
-};
-
-static void
-nouveau_graph_trap_info(struct drm_device *dev,
- struct nouveau_pgraph_trap *trap)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t address;
-
- trap->nsource = trap->nstatus = 0;
- if (dev_priv->card_type < NV_50) {
- trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
- trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
- }
-
- if (nouveau_graph_trapped_channel(dev, &trap->channel))
- trap->channel = -1;
- address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
-
- trap->mthd = address & 0x1FFC;
- trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
- if (dev_priv->card_type < NV_10) {
- trap->subc = (address >> 13) & 0x7;
- } else {
- trap->subc = (address >> 16) & 0x7;
- trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
- }
-
- if (dev_priv->card_type < NV_10)
- trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
- else if (dev_priv->card_type < NV_40)
- trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
- else if (dev_priv->card_type < NV_50)
- trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
- else
- trap->class = nv_rd32(dev, 0x400814);
-}
-
-static void
-nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
- struct nouveau_pgraph_trap *trap)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
-
- if (dev_priv->card_type < NV_50) {
- NV_INFO(dev, "%s - nSource:", id);
- nouveau_print_bitfield_names(nsource, nsource_names);
- printk(", nStatus:");
- if (dev_priv->card_type < NV_10)
- nouveau_print_bitfield_names(nstatus, nstatus_names);
- else
- nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
- printk("\n");
- }
-
- NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
- "Data 0x%08x:0x%08x\n",
- id, trap->channel, trap->subc,
- trap->class, trap->mthd,
- trap->data2, trap->data);
-}
-
-static int
-nouveau_pgraph_intr_swmthd(struct drm_device *dev,
- struct nouveau_pgraph_trap *trap)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (trap->channel < 0 ||
- trap->channel >= dev_priv->engine.fifo.channels ||
- !dev_priv->fifos[trap->channel])
- return -ENODEV;
-
- return nouveau_call_method(dev_priv->fifos[trap->channel],
- trap->class, trap->mthd, trap->data);
-}
-
-static inline void
-nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
-{
- struct nouveau_pgraph_trap trap;
- int unhandled = 0;
+ stat = nv_rd32(dev, NV03_PMC_INTR_0);
+ if (!stat)
+ return IRQ_NONE;
- nouveau_graph_trap_info(dev, &trap);
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ for (i = 0; i < 32 && stat; i++) {
+ if (!(stat & (1 << i)) || !dev_priv->irq_handler[i])
+ continue;
- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- if (nouveau_pgraph_intr_swmthd(dev, &trap))
- unhandled = 1;
- } else {
- unhandled = 1;
+ dev_priv->irq_handler[i](dev);
+ stat &= ~(1 << i);
}
- if (unhandled)
- nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
-}
-
-
-static inline void
-nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
-{
- struct nouveau_pgraph_trap trap;
- int unhandled = 0;
-
- nouveau_graph_trap_info(dev, &trap);
- trap.nsource = nsource;
-
- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- if (nouveau_pgraph_intr_swmthd(dev, &trap))
- unhandled = 1;
- } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
- uint32_t v = nv_rd32(dev, 0x402000);
- nv_wr32(dev, 0x402000, v);
-
- /* dump the error anyway for now: it's useful for
- Gallium development */
- unhandled = 1;
- } else {
- unhandled = 1;
- }
+ if (dev_priv->msi_enabled)
+ nv_wr08(dev, 0x00088068, 0xff);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- if (unhandled && nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
+ if (stat && nouveau_ratelimit())
+ NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat);
+ return IRQ_HANDLED;
}
-static inline void
-nouveau_pgraph_intr_context_switch(struct drm_device *dev)
+int
+nouveau_irq_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
- uint32_t chid;
-
- chid = engine->fifo.channel_id(dev);
- NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
-
- switch (dev_priv->card_type) {
- case NV_04:
- nv04_graph_context_switch(dev);
- break;
- case NV_10:
- nv10_graph_context_switch(dev);
- break;
- default:
- NV_ERROR(dev, "Context switch not implemented\n");
- break;
- }
-}
-
-static void
-nouveau_pgraph_irq_handler(struct drm_device *dev)
-{
- uint32_t status;
-
- while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
- uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-
- if (status & NV_PGRAPH_INTR_NOTIFY) {
- nouveau_pgraph_intr_notify(dev, nsource);
-
- status &= ~NV_PGRAPH_INTR_NOTIFY;
- nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
- }
-
- if (status & NV_PGRAPH_INTR_ERROR) {
- nouveau_pgraph_intr_error(dev, nsource);
+ int ret;
- status &= ~NV_PGRAPH_INTR_ERROR;
- nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
+ if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) {
+ ret = pci_enable_msi(dev->pdev);
+ if (ret == 0) {
+ NV_INFO(dev, "enabled MSI\n");
+ dev_priv->msi_enabled = true;
}
-
- if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
- status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- nv_wr32(dev, NV03_PGRAPH_INTR,
- NV_PGRAPH_INTR_CONTEXT_SWITCH);
-
- nouveau_pgraph_intr_context_switch(dev);
- }
-
- if (status) {
- NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
- nv_wr32(dev, NV03_PGRAPH_INTR, status);
- }
-
- if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
- nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
}
- nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
-}
-
-static struct nouveau_enum_names nv50_mp_exec_error_names[] =
-{
- { 3, "STACK_UNDERFLOW" },
- { 4, "QUADON_ACTIVE" },
- { 8, "TIMEOUT" },
- { 0x10, "INVALID_OPCODE" },
- { 0x40, "BREAKPOINT" },
-};
-
-static void
-nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t units = nv_rd32(dev, 0x1540);
- uint32_t addr, mp10, status, pc, oplow, ophigh;
- int i;
- int mps = 0;
- for (i = 0; i < 4; i++) {
- if (!(units & 1 << (i+24)))
- continue;
- if (dev_priv->chipset < 0xa0)
- addr = 0x408200 + (tpid << 12) + (i << 7);
- else
- addr = 0x408100 + (tpid << 11) + (i << 7);
- mp10 = nv_rd32(dev, addr + 0x10);
- status = nv_rd32(dev, addr + 0x14);
- if (!status)
- continue;
- if (display) {
- nv_rd32(dev, addr + 0x20);
- pc = nv_rd32(dev, addr + 0x24);
- oplow = nv_rd32(dev, addr + 0x70);
- ophigh= nv_rd32(dev, addr + 0x74);
- NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
- "TP %d MP %d: ", tpid, i);
- nouveau_print_enum_names(status,
- nv50_mp_exec_error_names);
- printk(" at %06x warp %d, opcode %08x %08x\n",
- pc&0xffffff, pc >> 24,
- oplow, ophigh);
- }
- nv_wr32(dev, addr + 0x10, mp10);
- nv_wr32(dev, addr + 0x14, 0);
- mps++;
- }
- if (!mps && display)
- NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
- "No MPs claiming errors?\n", tpid);
+ return drm_irq_install(dev);
}
-static void
-nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
- uint32_t ustatus_new, int display, const char *name)
+void
+nouveau_irq_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int tps = 0;
- uint32_t units = nv_rd32(dev, 0x1540);
- int i, r;
- uint32_t ustatus_addr, ustatus;
- for (i = 0; i < 16; i++) {
- if (!(units & (1 << i)))
- continue;
- if (dev_priv->chipset < 0xa0)
- ustatus_addr = ustatus_old + (i << 12);
- else
- ustatus_addr = ustatus_new + (i << 11);
- ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
- if (!ustatus)
- continue;
- tps++;
- switch (type) {
- case 6: /* texture error... unknown for now */
- nv50_fb_vm_trap(dev, display, name);
- if (display) {
- NV_ERROR(dev, "magic set %d:\n", i);
- for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
- nv_rd32(dev, r));
- }
- break;
- case 7: /* MP error */
- if (ustatus & 0x00010000) {
- nv50_pgraph_mp_trap(dev, i, display);
- ustatus &= ~0x00010000;
- }
- break;
- case 8: /* TPDMA error */
- {
- uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
- uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
- uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
- uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
- uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
- uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
- uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
- nv50_fb_vm_trap(dev, display, name);
- /* 2d engine destination */
- if (ustatus & 0x00000010) {
- if (display) {
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
- i, e14, e10);
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000010;
- }
- /* Render target */
- if (ustatus & 0x00000040) {
- if (display) {
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
- i, e14, e10);
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000040;
- }
- /* CUDA memory: l[], g[] or stack. */
- if (ustatus & 0x00000080) {
- if (display) {
- if (e18 & 0x80000000) {
- /* g[] read fault? */
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
- i, e14, e10 | ((e18 >> 24) & 0x1f));
- e18 &= ~0x1f000000;
- } else if (e18 & 0xc) {
- /* g[] write fault? */
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
- i, e14, e10 | ((e18 >> 7) & 0x1f));
- e18 &= ~0x00000f80;
- } else {
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
- i, e14, e10);
- }
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000080;
- }
- }
- break;
- }
- if (ustatus) {
- if (display)
- NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
- }
- nv_wr32(dev, ustatus_addr, 0xc0000000);
- }
-
- if (!tps && display)
- NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
-}
-
-static void
-nv50_pgraph_trap_handler(struct drm_device *dev)
-{
- struct nouveau_pgraph_trap trap;
- uint32_t status = nv_rd32(dev, 0x400108);
- uint32_t ustatus;
- int display = nouveau_ratelimit();
-
-
- if (!status && display) {
- nouveau_graph_trap_info(dev, &trap);
- nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
- NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
- }
-
- /* DISPATCH: Relays commands to other units and handles NOTIFY,
- * COND, QUERY. If you get a trap from it, the command is still stuck
- * in DISPATCH and you need to do something about it. */
- if (status & 0x001) {
- ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
- if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
- }
-
- /* Known to be triggered by screwed up NOTIFY and COND... */
- if (ustatus & 0x00000001) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
- nv_wr32(dev, 0x400500, 0);
- if (nv_rd32(dev, 0x400808) & 0x80000000) {
- if (display) {
- if (nouveau_graph_trapped_channel(dev, &trap.channel))
- trap.channel = -1;
- trap.class = nv_rd32(dev, 0x400814);
- trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
- trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
- trap.data = nv_rd32(dev, 0x40080c);
- trap.data2 = nv_rd32(dev, 0x400810);
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
- }
- nv_wr32(dev, 0x400808, 0);
- } else if (display) {
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
- }
- nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
- nv_wr32(dev, 0x400848, 0);
- ustatus &= ~0x00000001;
- }
- if (ustatus & 0x00000002) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
- nv_wr32(dev, 0x400500, 0);
- if (nv_rd32(dev, 0x40084c) & 0x80000000) {
- if (display) {
- if (nouveau_graph_trapped_channel(dev, &trap.channel))
- trap.channel = -1;
- trap.class = nv_rd32(dev, 0x400814);
- trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
- trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
- trap.data = nv_rd32(dev, 0x40085c);
- trap.data2 = 0;
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
- }
- nv_wr32(dev, 0x40084c, 0);
- } else if (display) {
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
- }
- ustatus &= ~0x00000002;
- }
- if (ustatus && display)
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
- nv_wr32(dev, 0x400804, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x001);
- status &= ~0x001;
- }
-
- /* TRAPs other than dispatch use the "normal" trap regs. */
- if (status && display) {
- nouveau_graph_trap_info(dev, &trap);
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_TRAP", &trap);
- }
-
- /* M2MF: Memory to memory copy engine. */
- if (status & 0x002) {
- ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
- if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
- }
- if (ustatus & 0x00000001) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
- ustatus &= ~0x00000001;
- }
- if (ustatus & 0x00000002) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
- ustatus &= ~0x00000002;
- }
- if (ustatus & 0x00000004) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
- ustatus &= ~0x00000004;
- }
- NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x406804),
- nv_rd32(dev, 0x406808),
- nv_rd32(dev, 0x40680c),
- nv_rd32(dev, 0x406810));
- if (ustatus && display)
- NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
- /* No sane way found yet -- just reset the bugger. */
- nv_wr32(dev, 0x400040, 2);
- nv_wr32(dev, 0x400040, 0);
- nv_wr32(dev, 0x406800, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x002);
- status &= ~0x002;
- }
-
- /* VFETCH: Fetches data from vertex buffers. */
- if (status & 0x004) {
- ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
- if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
- }
- if (ustatus & 0x00000001) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
- NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x400c00),
- nv_rd32(dev, 0x400c08),
- nv_rd32(dev, 0x400c0c),
- nv_rd32(dev, 0x400c10));
- ustatus &= ~0x00000001;
- }
- if (ustatus && display)
- NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
- nv_wr32(dev, 0x400c04, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x004);
- status &= ~0x004;
- }
-
- /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
- if (status & 0x008) {
- ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
- if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
- }
- if (ustatus & 0x00000001) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
- NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x401804),
- nv_rd32(dev, 0x401808),
- nv_rd32(dev, 0x40180c),
- nv_rd32(dev, 0x401810));
- ustatus &= ~0x00000001;
- }
- if (ustatus && display)
- NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
- /* No sane way found yet -- just reset the bugger. */
- nv_wr32(dev, 0x400040, 0x80);
- nv_wr32(dev, 0x400040, 0);
- nv_wr32(dev, 0x401800, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x008);
- status &= ~0x008;
- }
-
- /* CCACHE: Handles code and c[] caches and fills them. */
- if (status & 0x010) {
- ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
- if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
- }
- if (ustatus & 0x00000001) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
- NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x405800),
- nv_rd32(dev, 0x405804),
- nv_rd32(dev, 0x405808),
- nv_rd32(dev, 0x40580c),
- nv_rd32(dev, 0x405810),
- nv_rd32(dev, 0x405814),
- nv_rd32(dev, 0x40581c));
- ustatus &= ~0x00000001;
- }
- if (ustatus && display)
- NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
- nv_wr32(dev, 0x405018, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x010);
- status &= ~0x010;
- }
-
- /* Unknown, not seen yet... 0x402000 is the only trap status reg
- * remaining, so try to handle it anyway. Perhaps related to that
- * unknown DMA slot on tesla? */
- if (status & 0x20) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
- ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
- if (display)
- NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
- nv_wr32(dev, 0x402000, 0xc0000000);
- /* no status modifiction on purpose */
- }
-
- /* TEXTURE: CUDA texturing units */
- if (status & 0x040) {
- nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
- "PGRAPH_TRAP_TEXTURE");
- nv_wr32(dev, 0x400108, 0x040);
- status &= ~0x040;
- }
-
- /* MP: CUDA execution engines. */
- if (status & 0x080) {
- nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
- "PGRAPH_TRAP_MP");
- nv_wr32(dev, 0x400108, 0x080);
- status &= ~0x080;
- }
-
- /* TPDMA: Handles TP-initiated uncached memory accesses:
- * l[], g[], stack, 2d surfaces, render targets. */
- if (status & 0x100) {
- nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
- "PGRAPH_TRAP_TPDMA");
- nv_wr32(dev, 0x400108, 0x100);
- status &= ~0x100;
- }
-
- if (status) {
- if (display)
- NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
- status);
- nv_wr32(dev, 0x400108, status);
- }
-}
-
-/* There must be a *lot* of these. Will take some time to gather them up. */
-static struct nouveau_enum_names nv50_data_error_names[] =
-{
- { 4, "INVALID_VALUE" },
- { 5, "INVALID_ENUM" },
- { 8, "INVALID_OBJECT" },
- { 0xc, "INVALID_BITFIELD" },
- { 0x28, "MP_NO_REG_SPACE" },
- { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
-};
-
-static void
-nv50_pgraph_irq_handler(struct drm_device *dev)
-{
- struct nouveau_pgraph_trap trap;
- int unhandled = 0;
- uint32_t status;
-
- while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
- /* NOTIFY: You've set a NOTIFY an a command and it's done. */
- if (status & 0x00000001) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_NOTIFY", &trap);
- status &= ~0x00000001;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
- }
-
- /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
- * when you write 0x200 to 0x50c0 method 0x31c. */
- if (status & 0x00000002) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_COMPUTE_QUERY", &trap);
- status &= ~0x00000002;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
- }
-
- /* Unknown, never seen: 0x4 */
-
- /* ILLEGAL_MTHD: You used a wrong method for this class. */
- if (status & 0x00000010) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_pgraph_intr_swmthd(dev, &trap))
- unhandled = 1;
- if (unhandled && nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_ILLEGAL_MTHD", &trap);
- status &= ~0x00000010;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
- }
-
- /* ILLEGAL_CLASS: You used a wrong class. */
- if (status & 0x00000020) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_ILLEGAL_CLASS", &trap);
- status &= ~0x00000020;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
- }
-
- /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
- if (status & 0x00000040) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_DOUBLE_NOTIFY", &trap);
- status &= ~0x00000040;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
- }
-
- /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
- if (status & 0x00001000) {
- nv_wr32(dev, 0x400500, 0x00000000);
- nv_wr32(dev, NV03_PGRAPH_INTR,
- NV_PGRAPH_INTR_CONTEXT_SWITCH);
- nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
- NV40_PGRAPH_INTR_EN) &
- ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
- nv_wr32(dev, 0x400500, 0x00010001);
-
- nv50_graph_context_switch(dev);
-
- status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- }
-
- /* BUFFER_NOTIFY: Your m2mf transfer finished */
- if (status & 0x00010000) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_BUFFER_NOTIFY", &trap);
- status &= ~0x00010000;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
- }
-
- /* DATA_ERROR: Invalid value for this method, or invalid
- * state in current PGRAPH context for this operation */
- if (status & 0x00100000) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit()) {
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_DATA_ERROR", &trap);
- NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
- nouveau_print_enum_names(nv_rd32(dev, 0x400110),
- nv50_data_error_names);
- printk("\n");
- }
- status &= ~0x00100000;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
- }
- /* TRAP: Something bad happened in the middle of command
- * execution. Has a billion types, subtypes, and even
- * subsubtypes. */
- if (status & 0x00200000) {
- nv50_pgraph_trap_handler(dev);
- status &= ~0x00200000;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
- }
-
- /* Unknown, never seen: 0x00400000 */
-
- /* SINGLE_STEP: Happens on every method if you turned on
- * single stepping in 40008c */
- if (status & 0x01000000) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_SINGLE_STEP", &trap);
- status &= ~0x01000000;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
- }
-
- /* 0x02000000 happens when you pause a ctxprog...
- * but the only way this can happen that I know is by
- * poking the relevant MMIO register, and we don't
- * do that. */
-
- if (status) {
- NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
- status);
- nv_wr32(dev, NV03_PGRAPH_INTR, status);
- }
-
- {
- const int isb = (1 << 16) | (1 << 0);
-
- if ((nv_rd32(dev, 0x400500) & isb) != isb)
- nv_wr32(dev, 0x400500,
- nv_rd32(dev, 0x400500) | isb);
- }
- }
-
- nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
- if (nv_rd32(dev, 0x400824) & (1 << 31))
- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+ drm_irq_uninstall(dev);
+ if (dev_priv->msi_enabled)
+ pci_disable_msi(dev->pdev);
}
-static void
-nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
+void
+nouveau_irq_register(struct drm_device *dev, int status_bit,
+ void (*handler)(struct drm_device *))
{
- if (crtc & 1)
- nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
- if (crtc & 2)
- nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ dev_priv->irq_handler[status_bit] = handler;
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
}
-irqreturn_t
-nouveau_irq_handler(DRM_IRQ_ARGS)
+void
+nouveau_irq_unregister(struct drm_device *dev, int status_bit)
{
- struct drm_device *dev = (struct drm_device *)arg;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t status;
unsigned long flags;
- status = nv_rd32(dev, NV03_PMC_INTR_0);
- if (!status)
- return IRQ_NONE;
-
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
- if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
- nouveau_fifo_irq_handler(dev);
- status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
- }
-
- if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
- if (dev_priv->card_type >= NV_50)
- nv50_pgraph_irq_handler(dev);
- else
- nouveau_pgraph_irq_handler(dev);
-
- status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
- }
-
- if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
- nouveau_crtc_irq_handler(dev, (status>>24)&3);
- status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
- }
-
- if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
- NV_PMC_INTR_0_NV50_I2C_PENDING)) {
- nv50_display_irq_handler(dev);
- status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
- NV_PMC_INTR_0_NV50_I2C_PENDING);
- }
-
- if (status)
- NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
-
+ dev_priv->irq_handler[status_bit] = NULL;
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index fe4a30dc4b42..26347b7cd872 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -36,183 +36,112 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
/*
* NV10-NV40 tiling helpers
*/
static void
-nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv10_mem_update_tile_region(struct drm_device *dev,
+ struct nouveau_tile_reg *tile, uint32_t addr,
+ uint32_t size, uint32_t pitch, uint32_t flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nouveau_tile_reg *tile = &dev_priv->tile[i];
+ int i = tile - dev_priv->tile.reg;
+ unsigned long save;
- tile->addr = addr;
- tile->size = size;
- tile->used = !!pitch;
- nouveau_fence_unref((void **)&tile->fence);
+ nouveau_fence_unref(&tile->fence);
+ if (tile->pitch)
+ pfb->free_tile_region(dev, i);
+
+ if (pitch)
+ pfb->init_tile_region(dev, i, addr, size, pitch, flags);
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, save);
pfifo->reassign(dev, false);
pfifo->cache_pull(dev, false);
nouveau_wait_for_idle(dev);
- pgraph->set_region_tiling(dev, i, addr, size, pitch);
- pfb->set_region_tiling(dev, i, addr, size, pitch);
+ pfb->set_tile_region(dev, i);
+ pgraph->set_tile_region(dev, i);
pfifo->cache_pull(dev, true);
pfifo->reassign(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
}
-struct nouveau_tile_reg *
-nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
- uint32_t pitch)
+static struct nouveau_tile_reg *
+nv10_mem_get_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct nouveau_tile_reg *found = NULL;
- unsigned long i, flags;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ spin_lock(&dev_priv->tile.lock);
- for (i = 0; i < pfb->num_tiles; i++) {
- struct nouveau_tile_reg *tile = &dev_priv->tile[i];
-
- if (tile->used)
- /* Tile region in use. */
- continue;
-
- if (tile->fence &&
- !nouveau_fence_signalled(tile->fence, NULL))
- /* Pending tile region. */
- continue;
-
- if (max(tile->addr, addr) <
- min(tile->addr + tile->size, addr + size))
- /* Kill an intersecting tile region. */
- nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
-
- if (pitch && !found) {
- /* Free tile region. */
- nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
- found = tile;
- }
- }
-
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ if (!tile->used &&
+ (!tile->fence || nouveau_fence_signalled(tile->fence)))
+ tile->used = true;
+ else
+ tile = NULL;
- return found;
+ spin_unlock(&dev_priv->tile.lock);
+ return tile;
}
void
-nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
- struct nouveau_fence *fence)
-{
- if (fence) {
- /* Mark it as pending. */
- tile->fence = fence;
- nouveau_fence_ref(fence);
- }
-
- tile->used = false;
-}
-
-/*
- * NV50 VM helpers
- */
-int
-nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
- uint32_t flags, uint64_t phys)
+nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
+ struct nouveau_fence *fence)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *pgt;
- unsigned block;
- int i;
-
- virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
- size = (size >> 16) << 1;
- phys |= ((uint64_t)flags << 32);
- phys |= 1;
- if (dev_priv->vram_sys_base) {
- phys += dev_priv->vram_sys_base;
- phys |= 0x30;
- }
-
- while (size) {
- unsigned offset_h = upper_32_bits(phys);
- unsigned offset_l = lower_32_bits(phys);
- unsigned pte, end;
-
- for (i = 7; i >= 0; i--) {
- block = 1 << (i + 1);
- if (size >= block && !(virt & (block - 1)))
- break;
- }
- offset_l |= (i << 7);
-
- phys += block << 15;
- size -= block;
-
- while (block) {
- pgt = dev_priv->vm_vram_pt[virt >> 14];
- pte = virt & 0x3ffe;
-
- end = pte + block;
- if (end > 16384)
- end = 16384;
- block -= (end - pte);
- virt += (end - pte);
-
- while (pte < end) {
- nv_wo32(pgt, (pte * 4) + 0, offset_l);
- nv_wo32(pgt, (pte * 4) + 4, offset_h);
- pte += 2;
- }
+ if (tile) {
+ spin_lock(&dev_priv->tile.lock);
+ if (fence) {
+ /* Mark it as pending. */
+ tile->fence = fence;
+ nouveau_fence_ref(fence);
}
- }
- dev_priv->engine.instmem.flush(dev);
- dev_priv->engine.fifo.tlb_flush(dev);
- dev_priv->engine.graph.tlb_flush(dev);
- nv50_vm_flush(dev, 6);
- return 0;
+ tile->used = false;
+ spin_unlock(&dev_priv->tile.lock);
+ }
}
-void
-nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
+struct nouveau_tile_reg *
+nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *pgt;
- unsigned pages, pte, end;
-
- virt -= dev_priv->vm_vram_base;
- pages = (size >> 16) << 1;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nouveau_tile_reg *tile, *found = NULL;
+ int i;
- while (pages) {
- pgt = dev_priv->vm_vram_pt[virt >> 29];
- pte = (virt & 0x1ffe0000ULL) >> 15;
+ for (i = 0; i < pfb->num_tiles; i++) {
+ tile = nv10_mem_get_tile_region(dev, i);
- end = pte + pages;
- if (end > 16384)
- end = 16384;
- pages -= (end - pte);
- virt += (end - pte) << 15;
+ if (pitch && !found) {
+ found = tile;
+ continue;
- while (pte < end) {
- nv_wo32(pgt, (pte * 4), 0);
- pte++;
+ } else if (tile && tile->pitch) {
+ /* Kill an unused tile region. */
+ nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
}
+
+ nv10_mem_put_tile_region(dev, tile, NULL);
}
- dev_priv->engine.instmem.flush(dev);
- dev_priv->engine.fifo.tlb_flush(dev);
- dev_priv->engine.graph.tlb_flush(dev);
- nv50_vm_flush(dev, 6);
+ if (found)
+ nv10_mem_update_tile_region(dev, found, addr, size,
+ pitch, flags);
+ return found;
}
/*
@@ -312,62 +241,7 @@ nouveau_mem_detect_nforce(struct drm_device *dev)
return 0;
}
-static void
-nv50_vram_preinit(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i, parts, colbits, rowbitsa, rowbitsb, banks;
- u64 rowsize, predicted;
- u32 r0, r4, rt, ru;
-
- r0 = nv_rd32(dev, 0x100200);
- r4 = nv_rd32(dev, 0x100204);
- rt = nv_rd32(dev, 0x100250);
- ru = nv_rd32(dev, 0x001540);
- NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
-
- for (i = 0, parts = 0; i < 8; i++) {
- if (ru & (0x00010000 << i))
- parts++;
- }
-
- colbits = (r4 & 0x0000f000) >> 12;
- rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
- rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
- banks = ((r4 & 0x01000000) ? 8 : 4);
-
- rowsize = parts * banks * (1 << colbits) * 8;
- predicted = rowsize << rowbitsa;
- if (r0 & 0x00000004)
- predicted += rowsize << rowbitsb;
-
- if (predicted != dev_priv->vram_size) {
- NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
- (u32)(dev_priv->vram_size >> 20));
- NV_WARN(dev, "we calculated %dMiB VRAM\n",
- (u32)(predicted >> 20));
- }
-
- dev_priv->vram_rblock_size = rowsize >> 12;
- if (rt & 1)
- dev_priv->vram_rblock_size *= 3;
-
- NV_DEBUG(dev, "rblock %lld bytes\n",
- (u64)dev_priv->vram_rblock_size << 12);
-}
-
-static void
-nvaa_vram_preinit(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- /* To our knowledge, there's no large scale reordering of pages
- * that occurs on IGP chipsets.
- */
- dev_priv->vram_rblock_size = 1;
-}
-
-static int
+int
nouveau_mem_detect(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -381,33 +255,6 @@ nouveau_mem_detect(struct drm_device *dev)
if (dev_priv->card_type < NV_50) {
dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
- } else
- if (dev_priv->card_type < NV_C0) {
- dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
- dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
- dev_priv->vram_size &= 0xffffffff00ll;
-
- switch (dev_priv->chipset) {
- case 0xaa:
- case 0xac:
- case 0xaf:
- dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
- dev_priv->vram_sys_base <<= 12;
- nvaa_vram_preinit(dev);
- break;
- default:
- nv50_vram_preinit(dev);
- break;
- }
- } else {
- dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
- dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
- }
-
- NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
- if (dev_priv->vram_sys_base) {
- NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
- dev_priv->vram_sys_base);
}
if (dev_priv->vram_size)
@@ -415,6 +262,15 @@ nouveau_mem_detect(struct drm_device *dev)
return -ENOMEM;
}
+bool
+nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+ if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
+ return true;
+
+ return false;
+}
+
#if __OS_HAS_AGP
static unsigned long
get_agp_mode(struct drm_device *dev, unsigned long mode)
@@ -547,10 +403,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
if (ret)
return ret;
- ret = nouveau_mem_detect(dev);
- if (ret)
- return ret;
-
dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
ret = nouveau_ttm_global_init(dev_priv);
@@ -566,13 +418,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
return ret;
}
- dev_priv->fb_available_size = dev_priv->vram_size;
- dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
- if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
- dev_priv->fb_mappable_pages =
- pci_resource_len(dev->pdev, 1);
- dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
-
/* reserve space at end of VRAM for PRAMIN */
if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
@@ -583,6 +428,22 @@ nouveau_mem_vram_init(struct drm_device *dev)
else
dev_priv->ramin_rsvd_vram = (512 * 1024);
+ ret = dev_priv->engine.vram.init(dev);
+ if (ret)
+ return ret;
+
+ NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
+ if (dev_priv->vram_sys_base) {
+ NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
+ dev_priv->vram_sys_base);
+ }
+
+ dev_priv->fb_available_size = dev_priv->vram_size;
+ dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
+ if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
+ dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
+ dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
+
dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
dev_priv->fb_aper_free = dev_priv->fb_available_size;
@@ -799,3 +660,112 @@ nouveau_mem_timing_fini(struct drm_device *dev)
kfree(mem->timing);
}
+
+static int
+nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+ struct nouveau_mm *mm;
+ u32 b_size;
+ int ret;
+
+ p_size = (p_size << PAGE_SHIFT) >> 12;
+ b_size = dev_priv->vram_rblock_size >> 12;
+
+ ret = nouveau_mm_init(&mm, 0, p_size, b_size);
+ if (ret)
+ return ret;
+
+ man->priv = mm;
+ return 0;
+}
+
+static int
+nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
+{
+ struct nouveau_mm *mm = man->priv;
+ int ret;
+
+ ret = nouveau_mm_fini(&mm);
+ if (ret)
+ return ret;
+
+ man->priv = NULL;
+ return 0;
+}
+
+static void
+nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct drm_device *dev = dev_priv->dev;
+
+ vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
+}
+
+static int
+nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct drm_device *dev = dev_priv->dev;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_vram *node;
+ u32 size_nc = 0;
+ int ret;
+
+ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
+ size_nc = 1 << nvbo->vma.node->type;
+
+ ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
+ mem->page_alignment << PAGE_SHIFT, size_nc,
+ (nvbo->tile_flags >> 8) & 0xff, &node);
+ if (ret)
+ return ret;
+
+ node->page_shift = 12;
+ if (nvbo->vma.node)
+ node->page_shift = nvbo->vma.node->type;
+
+ mem->mm_node = node;
+ mem->start = node->offset >> PAGE_SHIFT;
+ return 0;
+}
+
+void
+nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+ struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm_node *r;
+ u32 total = 0, free = 0;
+
+ mutex_lock(&mm->mutex);
+ list_for_each_entry(r, &mm->nodes, nl_entry) {
+ printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
+ prefix, r->type, ((u64)r->offset << 12),
+ (((u64)r->offset + r->length) << 12));
+
+ total += r->length;
+ if (!r->type)
+ free += r->length;
+ }
+ mutex_unlock(&mm->mutex);
+
+ printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
+ prefix, (u64)total << 12, (u64)free << 12);
+ printk(KERN_DEBUG "%s block: 0x%08x\n",
+ prefix, mm->block_size << 12);
+}
+
+const struct ttm_mem_type_manager_func nouveau_vram_manager = {
+ nouveau_vram_manager_init,
+ nouveau_vram_manager_fini,
+ nouveau_vram_manager_new,
+ nouveau_vram_manager_del,
+ nouveau_vram_manager_debug
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
new file mode 100644
index 000000000000..8844b50c3e54
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static inline void
+region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
+{
+ list_del(&a->nl_entry);
+ list_del(&a->fl_entry);
+ kfree(a);
+}
+
+static struct nouveau_mm_node *
+region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
+{
+ struct nouveau_mm_node *b;
+
+ if (a->length == size)
+ return a;
+
+ b = kmalloc(sizeof(*b), GFP_KERNEL);
+ if (unlikely(b == NULL))
+ return NULL;
+
+ b->offset = a->offset;
+ b->length = size;
+ b->type = a->type;
+ a->offset += size;
+ a->length -= size;
+ list_add_tail(&b->nl_entry, &a->nl_entry);
+ if (b->type == 0)
+ list_add_tail(&b->fl_entry, &a->fl_entry);
+ return b;
+}
+
+#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \
+ list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
+
+void
+nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
+{
+ struct nouveau_mm_node *prev = node(this, prev);
+ struct nouveau_mm_node *next = node(this, next);
+
+ list_add(&this->fl_entry, &rmm->free);
+ this->type = 0;
+
+ if (prev && prev->type == 0) {
+ prev->length += this->length;
+ region_put(rmm, this);
+ this = prev;
+ }
+
+ if (next && next->type == 0) {
+ next->offset = this->offset;
+ next->length += this->length;
+ region_put(rmm, this);
+ }
+}
+
+int
+nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
+ u32 align, struct nouveau_mm_node **pnode)
+{
+ struct nouveau_mm_node *prev, *this, *next;
+ u32 min = size_nc ? size_nc : size;
+ u32 align_mask = align - 1;
+ u32 splitoff;
+ u32 s, e;
+
+ list_for_each_entry(this, &rmm->free, fl_entry) {
+ e = this->offset + this->length;
+ s = this->offset;
+
+ prev = node(this, prev);
+ if (prev && prev->type != type)
+ s = roundup(s, rmm->block_size);
+
+ next = node(this, next);
+ if (next && next->type != type)
+ e = rounddown(e, rmm->block_size);
+
+ s = (s + align_mask) & ~align_mask;
+ e &= ~align_mask;
+ if (s > e || e - s < min)
+ continue;
+
+ splitoff = s - this->offset;
+ if (splitoff && !region_split(rmm, this, splitoff))
+ return -ENOMEM;
+
+ this = region_split(rmm, this, min(size, e - s));
+ if (!this)
+ return -ENOMEM;
+
+ this->type = type;
+ list_del(&this->fl_entry);
+ *pnode = this;
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+int
+nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
+{
+ struct nouveau_mm *rmm;
+ struct nouveau_mm_node *heap;
+
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+ if (!heap)
+ return -ENOMEM;
+ heap->offset = roundup(offset, block);
+ heap->length = rounddown(offset + length, block) - heap->offset;
+
+ rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
+ if (!rmm) {
+ kfree(heap);
+ return -ENOMEM;
+ }
+ rmm->block_size = block;
+ mutex_init(&rmm->mutex);
+ INIT_LIST_HEAD(&rmm->nodes);
+ INIT_LIST_HEAD(&rmm->free);
+ list_add(&heap->nl_entry, &rmm->nodes);
+ list_add(&heap->fl_entry, &rmm->free);
+
+ *prmm = rmm;
+ return 0;
+}
+
+int
+nouveau_mm_fini(struct nouveau_mm **prmm)
+{
+ struct nouveau_mm *rmm = *prmm;
+ struct nouveau_mm_node *heap =
+ list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
+
+ if (!list_is_singular(&rmm->nodes))
+ return -EBUSY;
+
+ kfree(heap);
+ kfree(rmm);
+ *prmm = NULL;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
new file mode 100644
index 000000000000..798eaf39691c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_REGION_H__
+#define __NOUVEAU_REGION_H__
+
+struct nouveau_mm_node {
+ struct list_head nl_entry;
+ struct list_head fl_entry;
+ struct list_head rl_entry;
+
+ u8 type;
+ u32 offset;
+ u32 length;
+};
+
+struct nouveau_mm {
+ struct list_head nodes;
+ struct list_head free;
+
+ struct mutex mutex;
+
+ u32 block_size;
+};
+
+int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block);
+int nouveau_mm_fini(struct nouveau_mm **);
+int nouveau_mm_pre(struct nouveau_mm *);
+int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
+ u32 align, struct nouveau_mm_node **);
+void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
+
+int nv50_vram_init(struct drm_device *);
+int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
+ u32 memtype, struct nouveau_vram **);
+void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
+bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
+
+int nvc0_vram_init(struct drm_device *);
+int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
+ u32 memtype, struct nouveau_vram **);
+bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 2cc59f8c658b..5ea167623a82 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -96,48 +96,33 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
int
nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
- int size, uint32_t *b_offset)
+ int size, uint32_t start, uint32_t end,
+ uint32_t *b_offset)
{
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *nobj = NULL;
struct drm_mm_node *mem;
uint32_t offset;
int target, ret;
- mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0);
+ mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
+ start, end, 0);
if (mem)
- mem = drm_mm_get_block(mem, size, 0);
+ mem = drm_mm_get_block_range(mem, size, 0, start, end);
if (!mem) {
NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
return -ENOMEM;
}
- offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
- if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
- target = NV_DMA_TARGET_VIDMEM;
- } else
- if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
- if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
- dev_priv->card_type < NV_50) {
- ret = nouveau_sgdma_get_page(dev, offset, &offset);
- if (ret)
- return ret;
- target = NV_DMA_TARGET_PCI;
- } else {
- target = NV_DMA_TARGET_AGP;
- if (dev_priv->card_type >= NV_50)
- offset += dev_priv->vm_gart_base;
- }
- } else {
- NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
- chan->notifier_bo->bo.mem.mem_type);
- return -EINVAL;
- }
+ if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
+ target = NV_MEM_TARGET_VRAM;
+ else
+ target = NV_MEM_TARGET_GART;
+ offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
offset += mem->start;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
- mem->size, NV_DMA_ACCESS_RW, target,
+ mem->size, NV_MEM_ACCESS_RW, target,
&nobj);
if (ret) {
drm_mm_put_block(mem);
@@ -181,15 +166,21 @@ int
nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_notifierobj_alloc *na = data;
struct nouveau_channel *chan;
int ret;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
+ /* completely unnecessary for these chipsets... */
+ if (unlikely(dev_priv->card_type >= NV_C0))
+ return -EINVAL;
- ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
- if (ret)
- return ret;
+ chan = nouveau_channel_get(dev, file_priv, na->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
- return 0;
+ ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
+ &na->offset);
+ nouveau_channel_put(&chan);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index dd572adca02a..30b6544467ca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -35,6 +35,102 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_ramht.h"
+#include "nouveau_vm.h"
+
+struct nouveau_gpuobj_method {
+ struct list_head head;
+ u32 mthd;
+ int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
+};
+
+struct nouveau_gpuobj_class {
+ struct list_head head;
+ struct list_head methods;
+ u32 id;
+ u32 engine;
+};
+
+int
+nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj_class *oc;
+
+ oc = kzalloc(sizeof(*oc), GFP_KERNEL);
+ if (!oc)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&oc->methods);
+ oc->id = class;
+ oc->engine = engine;
+ list_add(&oc->head, &dev_priv->classes);
+ return 0;
+}
+
+int
+nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
+ int (*exec)(struct nouveau_channel *, u32, u32, u32))
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj_method *om;
+ struct nouveau_gpuobj_class *oc;
+
+ list_for_each_entry(oc, &dev_priv->classes, head) {
+ if (oc->id == class)
+ goto found;
+ }
+
+ return -EINVAL;
+
+found:
+ om = kzalloc(sizeof(*om), GFP_KERNEL);
+ if (!om)
+ return -ENOMEM;
+
+ om->mthd = mthd;
+ om->exec = exec;
+ list_add(&om->head, &oc->methods);
+ return 0;
+}
+
+int
+nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_gpuobj_method *om;
+ struct nouveau_gpuobj_class *oc;
+
+ list_for_each_entry(oc, &dev_priv->classes, head) {
+ if (oc->id != class)
+ continue;
+
+ list_for_each_entry(om, &oc->methods, head) {
+ if (om->mthd == mthd)
+ return om->exec(chan, class, mthd, data);
+ }
+ }
+
+ return -ENOENT;
+}
+
+int
+nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
+ u32 class, u32 mthd, u32 data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = NULL;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ if (chid > 0 && chid < dev_priv->engine.fifo.channels)
+ chan = dev_priv->channels.ptr[chid];
+ if (chan)
+ ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return ret;
+}
/* NVidia uses context objects to drive drawing operations.
@@ -73,17 +169,14 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
struct nouveau_gpuobj **gpuobj_ret)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
struct nouveau_gpuobj *gpuobj;
struct drm_mm_node *ramin = NULL;
- int ret;
+ int ret, i;
NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
chan ? chan->id : -1, size, align, flags);
- if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
- return -EINVAL;
-
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
if (!gpuobj)
return -ENOMEM;
@@ -98,88 +191,41 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
spin_unlock(&dev_priv->ramin_lock);
if (chan) {
- NV_DEBUG(dev, "channel heap\n");
-
ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
if (ramin)
ramin = drm_mm_get_block(ramin, size, align);
-
if (!ramin) {
nouveau_gpuobj_ref(NULL, &gpuobj);
return -ENOMEM;
}
- } else {
- NV_DEBUG(dev, "global heap\n");
-
- /* allocate backing pages, sets vinst */
- ret = engine->instmem.populate(dev, gpuobj, &size);
- if (ret) {
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return ret;
- }
-
- /* try and get aperture space */
- do {
- if (drm_mm_pre_get(&dev_priv->ramin_heap))
- return -ENOMEM;
- spin_lock(&dev_priv->ramin_lock);
- ramin = drm_mm_search_free(&dev_priv->ramin_heap, size,
- align, 0);
- if (ramin == NULL) {
- spin_unlock(&dev_priv->ramin_lock);
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return -ENOMEM;
- }
-
- ramin = drm_mm_get_block_atomic(ramin, size, align);
- spin_unlock(&dev_priv->ramin_lock);
- } while (ramin == NULL);
-
- /* on nv50 it's ok to fail, we have a fallback path */
- if (!ramin && dev_priv->card_type < NV_50) {
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return -ENOMEM;
- }
- }
+ gpuobj->pinst = chan->ramin->pinst;
+ if (gpuobj->pinst != ~0)
+ gpuobj->pinst += ramin->start;
- /* if we got a chunk of the aperture, map pages into it */
- gpuobj->im_pramin = ramin;
- if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) {
- ret = engine->instmem.bind(dev, gpuobj);
+ gpuobj->cinst = ramin->start;
+ gpuobj->vinst = ramin->start + chan->ramin->vinst;
+ gpuobj->node = ramin;
+ } else {
+ ret = instmem->get(gpuobj, size, align);
if (ret) {
nouveau_gpuobj_ref(NULL, &gpuobj);
return ret;
}
- }
-
- /* calculate the various different addresses for the object */
- if (chan) {
- gpuobj->pinst = chan->ramin->pinst;
- if (gpuobj->pinst != ~0)
- gpuobj->pinst += gpuobj->im_pramin->start;
- if (dev_priv->card_type < NV_50) {
- gpuobj->cinst = gpuobj->pinst;
- } else {
- gpuobj->cinst = gpuobj->im_pramin->start;
- gpuobj->vinst = gpuobj->im_pramin->start +
- chan->ramin->vinst;
- }
- } else {
- if (gpuobj->im_pramin)
- gpuobj->pinst = gpuobj->im_pramin->start;
- else
+ ret = -ENOSYS;
+ if (!(flags & NVOBJ_FLAG_DONT_MAP))
+ ret = instmem->map(gpuobj);
+ if (ret)
gpuobj->pinst = ~0;
- gpuobj->cinst = 0xdeadbeef;
+
+ gpuobj->cinst = NVOBJ_CINST_GLOBAL;
}
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
- int i;
-
for (i = 0; i < gpuobj->size; i += 4)
nv_wo32(gpuobj, i, 0);
- engine->instmem.flush(dev);
+ instmem->flush(dev);
}
@@ -195,6 +241,7 @@ nouveau_gpuobj_init(struct drm_device *dev)
NV_DEBUG(dev, "\n");
INIT_LIST_HEAD(&dev_priv->gpuobj_list);
+ INIT_LIST_HEAD(&dev_priv->classes);
spin_lock_init(&dev_priv->ramin_lock);
dev_priv->ramin_base = ~0;
@@ -205,9 +252,20 @@ void
nouveau_gpuobj_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj_method *om, *tm;
+ struct nouveau_gpuobj_class *oc, *tc;
NV_DEBUG(dev, "\n");
+ list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
+ list_for_each_entry_safe(om, tm, &oc->methods, head) {
+ list_del(&om->head);
+ kfree(om);
+ }
+ list_del(&oc->head);
+ kfree(oc);
+ }
+
BUG_ON(!list_empty(&dev_priv->gpuobj_list));
}
@@ -219,26 +277,34 @@ nouveau_gpuobj_del(struct kref *ref)
container_of(ref, struct nouveau_gpuobj, refcount);
struct drm_device *dev = gpuobj->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
int i;
NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
- if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
+ if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
for (i = 0; i < gpuobj->size; i += 4)
nv_wo32(gpuobj, i, 0);
- engine->instmem.flush(dev);
+ instmem->flush(dev);
}
if (gpuobj->dtor)
gpuobj->dtor(dev, gpuobj);
- if (gpuobj->im_backing)
- engine->instmem.clear(dev, gpuobj);
+ if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
+ if (gpuobj->node) {
+ instmem->unmap(gpuobj);
+ instmem->put(gpuobj);
+ }
+ } else {
+ if (gpuobj->node) {
+ spin_lock(&dev_priv->ramin_lock);
+ drm_mm_put_block(gpuobj->node);
+ spin_unlock(&dev_priv->ramin_lock);
+ }
+ }
spin_lock(&dev_priv->ramin_lock);
- if (gpuobj->im_pramin)
- drm_mm_put_block(gpuobj->im_pramin);
list_del(&gpuobj->list);
spin_unlock(&dev_priv->ramin_lock);
@@ -278,7 +344,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
kref_init(&gpuobj->refcount);
gpuobj->size = size;
gpuobj->pinst = pinst;
- gpuobj->cinst = 0xdeadbeef;
+ gpuobj->cinst = NVOBJ_CINST_GLOBAL;
gpuobj->vinst = vinst;
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
@@ -335,113 +401,150 @@ nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
The method below creates a DMA object in instance RAM and returns a handle
to it that can be used to set up context objects.
*/
-int
-nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
- uint64_t offset, uint64_t size, int access,
- int target, struct nouveau_gpuobj **gpuobj)
+
+void
+nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
+ u64 base, u64 size, int target, int access,
+ u32 type, u32 comp)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
- int ret;
+ struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ u32 flags0;
- NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
- chan->id, class, offset, size);
- NV_DEBUG(dev, "access=%d target=%d\n", access, target);
+ flags0 = (comp << 29) | (type << 22) | class;
+ flags0 |= 0x00100000;
+
+ switch (access) {
+ case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
+ case NV_MEM_ACCESS_RW:
+ case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
+ default:
+ break;
+ }
switch (target) {
- case NV_DMA_TARGET_AGP:
- offset += dev_priv->gart_info.aper_base;
+ case NV_MEM_TARGET_VRAM:
+ flags0 |= 0x00010000;
+ break;
+ case NV_MEM_TARGET_PCI:
+ flags0 |= 0x00020000;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ flags0 |= 0x00030000;
break;
+ case NV_MEM_TARGET_GART:
+ base += dev_priv->gart_info.aper_base;
default:
+ flags0 &= ~0x00100000;
break;
}
- ret = nouveau_gpuobj_new(dev, chan,
- nouveau_gpuobj_class_instmem_size(dev, class),
- 16, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, gpuobj);
- if (ret) {
- NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
- return ret;
- }
+ /* convert to base + limit */
+ size = (base + size) - 1;
- if (dev_priv->card_type < NV_50) {
- uint32_t frame, adjust, pte_flags = 0;
-
- if (access != NV_DMA_ACCESS_RO)
- pte_flags |= (1<<1);
- adjust = offset & 0x00000fff;
- frame = offset & ~0x00000fff;
-
- nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
- (access << 14) | (target << 16) |
- class));
- nv_wo32(*gpuobj, 4, size - 1);
- nv_wo32(*gpuobj, 8, frame | pte_flags);
- nv_wo32(*gpuobj, 12, frame | pte_flags);
- } else {
- uint64_t limit = offset + size - 1;
- uint32_t flags0, flags5;
+ nv_wo32(obj, offset + 0x00, flags0);
+ nv_wo32(obj, offset + 0x04, lower_32_bits(size));
+ nv_wo32(obj, offset + 0x08, lower_32_bits(base));
+ nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
+ upper_32_bits(base));
+ nv_wo32(obj, offset + 0x10, 0x00000000);
+ nv_wo32(obj, offset + 0x14, 0x00000000);
- if (target == NV_DMA_TARGET_VIDMEM) {
- flags0 = 0x00190000;
- flags5 = 0x00010000;
- } else {
- flags0 = 0x7fc00000;
- flags5 = 0x00080000;
- }
+ pinstmem->flush(obj->dev);
+}
- nv_wo32(*gpuobj, 0, flags0 | class);
- nv_wo32(*gpuobj, 4, lower_32_bits(limit));
- nv_wo32(*gpuobj, 8, lower_32_bits(offset));
- nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
- (upper_32_bits(offset) & 0xff));
- nv_wo32(*gpuobj, 20, flags5);
- }
+int
+nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
+ int target, int access, u32 type, u32 comp,
+ struct nouveau_gpuobj **pobj)
+{
+ struct drm_device *dev = chan->dev;
+ int ret;
- instmem->flush(dev);
+ ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
+ if (ret)
+ return ret;
- (*gpuobj)->engine = NVOBJ_ENGINE_SW;
- (*gpuobj)->class = class;
+ nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
+ access, type, comp);
return 0;
}
int
-nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
- uint64_t offset, uint64_t size, int access,
- struct nouveau_gpuobj **gpuobj,
- uint32_t *o_ret)
+nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
+ u64 size, int access, int target,
+ struct nouveau_gpuobj **pobj)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *obj;
+ u32 flags0, flags2;
int ret;
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
- (dev_priv->card_type >= NV_50 &&
- dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- offset + dev_priv->vm_gart_base,
- size, access, NV_DMA_TARGET_AGP,
- gpuobj);
- if (o_ret)
- *o_ret = 0;
- } else
- if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
- nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
- if (offset & ~0xffffffffULL) {
- NV_ERROR(dev, "obj offset exceeds 32-bits\n");
- return -EINVAL;
+ if (dev_priv->card_type >= NV_50) {
+ u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
+ u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
+
+ return nv50_gpuobj_dma_new(chan, class, base, size,
+ target, access, type, comp, pobj);
+ }
+
+ if (target == NV_MEM_TARGET_GART) {
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ target = NV_MEM_TARGET_PCI_NOSNOOP;
+ base += dev_priv->gart_info.aper_base;
+ } else
+ if (base != 0) {
+ base = nouveau_sgdma_get_physical(dev, base);
+ target = NV_MEM_TARGET_PCI;
+ } else {
+ nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
+ return 0;
}
- if (o_ret)
- *o_ret = (uint32_t)offset;
- ret = (*gpuobj != NULL) ? 0 : -EINVAL;
- } else {
- NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
- return -EINVAL;
}
- return ret;
+ flags0 = class;
+ flags0 |= 0x00003000; /* PT present, PT linear */
+ flags2 = 0;
+
+ switch (target) {
+ case NV_MEM_TARGET_PCI:
+ flags0 |= 0x00020000;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ flags0 |= 0x00030000;
+ break;
+ default:
+ break;
+ }
+
+ switch (access) {
+ case NV_MEM_ACCESS_RO:
+ flags0 |= 0x00004000;
+ break;
+ case NV_MEM_ACCESS_WO:
+ flags0 |= 0x00008000;
+ default:
+ flags2 |= 0x00000002;
+ break;
+ }
+
+ flags0 |= (base & 0x00000fff) << 20;
+ flags2 |= (base & 0xfffff000);
+
+ ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+ if (ret)
+ return ret;
+
+ nv_wo32(obj, 0x00, flags0);
+ nv_wo32(obj, 0x04, size - 1);
+ nv_wo32(obj, 0x08, flags2);
+ nv_wo32(obj, 0x0c, flags2);
+
+ obj->engine = NVOBJ_ENGINE_SW;
+ obj->class = class;
+ *pobj = obj;
+ return 0;
}
/* Context objects in the instance RAM have the following structure.
@@ -495,82 +598,130 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
entry[5]:
set to 0?
*/
+static int
+nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
+ struct nouveau_gpuobj **gpuobj_ret)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_gpuobj *gpuobj;
+
+ gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
+ if (!gpuobj)
+ return -ENOMEM;
+ gpuobj->dev = chan->dev;
+ gpuobj->engine = NVOBJ_ENGINE_SW;
+ gpuobj->class = class;
+ kref_init(&gpuobj->refcount);
+ gpuobj->cinst = 0x40;
+
+ spin_lock(&dev_priv->ramin_lock);
+ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+ spin_unlock(&dev_priv->ramin_lock);
+ *gpuobj_ret = gpuobj;
+ return 0;
+}
+
int
-nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
- struct nouveau_gpuobj **gpuobj)
+nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj_class *oc;
+ struct nouveau_gpuobj *gpuobj;
int ret;
NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
+ list_for_each_entry(oc, &dev_priv->classes, head) {
+ if (oc->id == class)
+ goto found;
+ }
+
+ NV_ERROR(dev, "illegal object class: 0x%x\n", class);
+ return -EINVAL;
+
+found:
+ switch (oc->engine) {
+ case NVOBJ_ENGINE_SW:
+ if (dev_priv->card_type < NV_C0) {
+ ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
+ if (ret)
+ return ret;
+ goto insert;
+ }
+ break;
+ case NVOBJ_ENGINE_GR:
+ if ((dev_priv->card_type >= NV_20 && !chan->ramin_grctx) ||
+ (dev_priv->card_type < NV_20 && !chan->pgraph_ctx)) {
+ struct nouveau_pgraph_engine *pgraph =
+ &dev_priv->engine.graph;
+
+ ret = pgraph->create_context(chan);
+ if (ret)
+ return ret;
+ }
+ break;
+ case NVOBJ_ENGINE_CRYPT:
+ if (!chan->crypt_ctx) {
+ struct nouveau_crypt_engine *pcrypt =
+ &dev_priv->engine.crypt;
+
+ ret = pcrypt->create_context(chan);
+ if (ret)
+ return ret;
+ }
+ break;
+ }
+
+ /* we're done if this is fermi */
+ if (dev_priv->card_type >= NV_C0)
+ return 0;
+
ret = nouveau_gpuobj_new(dev, chan,
nouveau_gpuobj_class_instmem_size(dev, class),
16,
NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
- gpuobj);
+ &gpuobj);
if (ret) {
- NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
+ NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
return ret;
}
if (dev_priv->card_type >= NV_50) {
- nv_wo32(*gpuobj, 0, class);
- nv_wo32(*gpuobj, 20, 0x00010000);
+ nv_wo32(gpuobj, 0, class);
+ nv_wo32(gpuobj, 20, 0x00010000);
} else {
switch (class) {
case NV_CLASS_NULL:
- nv_wo32(*gpuobj, 0, 0x00001030);
- nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
+ nv_wo32(gpuobj, 0, 0x00001030);
+ nv_wo32(gpuobj, 4, 0xFFFFFFFF);
break;
default:
if (dev_priv->card_type >= NV_40) {
- nv_wo32(*gpuobj, 0, class);
+ nv_wo32(gpuobj, 0, class);
#ifdef __BIG_ENDIAN
- nv_wo32(*gpuobj, 8, 0x01000000);
+ nv_wo32(gpuobj, 8, 0x01000000);
#endif
} else {
#ifdef __BIG_ENDIAN
- nv_wo32(*gpuobj, 0, class | 0x00080000);
+ nv_wo32(gpuobj, 0, class | 0x00080000);
#else
- nv_wo32(*gpuobj, 0, class);
+ nv_wo32(gpuobj, 0, class);
#endif
}
}
}
dev_priv->engine.instmem.flush(dev);
- (*gpuobj)->engine = NVOBJ_ENGINE_GR;
- (*gpuobj)->class = class;
- return 0;
-}
+ gpuobj->engine = oc->engine;
+ gpuobj->class = oc->id;
-int
-nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
- struct nouveau_gpuobj **gpuobj_ret)
-{
- struct drm_nouveau_private *dev_priv;
- struct nouveau_gpuobj *gpuobj;
-
- if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
- return -EINVAL;
- dev_priv = chan->dev->dev_private;
-
- gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
- if (!gpuobj)
- return -ENOMEM;
- gpuobj->dev = chan->dev;
- gpuobj->engine = NVOBJ_ENGINE_SW;
- gpuobj->class = class;
- kref_init(&gpuobj->refcount);
- gpuobj->cinst = 0x40;
-
- spin_lock(&dev_priv->ramin_lock);
- list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
- spin_unlock(&dev_priv->ramin_lock);
- *gpuobj_ret = gpuobj;
- return 0;
+insert:
+ ret = nouveau_ramht_insert(chan, handle, gpuobj);
+ if (ret)
+ NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
+ return ret;
}
static int
@@ -585,7 +736,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
/* Base amount for object storage (4KiB enough?) */
- size = 0x1000;
+ size = 0x2000;
base = 0;
/* PGRAPH context */
@@ -624,12 +775,30 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
- int ret, i;
+ int ret;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
+ if (dev_priv->card_type == NV_C0) {
+ struct nouveau_vm *vm = dev_priv->chan_vm;
+ struct nouveau_vm_pgd *vpgd;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
+ &chan->ramin);
+ if (ret)
+ return ret;
+
+ nouveau_vm_ref(vm, &chan->vm, NULL);
+
+ vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
+ nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
+ nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
+ nv_wo32(chan->ramin, 0x0208, 0xffffffff);
+ nv_wo32(chan->ramin, 0x020c, 0x000000ff);
+ return 0;
+ }
+
/* Allocate a chunk of memory for per-channel object storage */
ret = nouveau_gpuobj_channel_init_pramin(chan);
if (ret) {
@@ -639,14 +808,12 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
/* NV50 VM
* - Allocate per-channel page-directory
- * - Map GART and VRAM into the channel's address space at the
- * locations determined during init.
+ * - Link with shared channel VM
*/
- if (dev_priv->card_type >= NV_50) {
+ if (dev_priv->chan_vm) {
u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
u64 vm_vinst = chan->ramin->vinst + pgd_offs;
u32 vm_pinst = chan->ramin->pinst;
- u32 pde;
if (vm_pinst != ~0)
vm_pinst += pgd_offs;
@@ -655,29 +822,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
0, &chan->vm_pd);
if (ret)
return ret;
- for (i = 0; i < 0x4000; i += 8) {
- nv_wo32(chan->vm_pd, i + 0, 0x00000000);
- nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
- }
-
- nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
- &chan->vm_gart_pt);
- pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
- nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
- nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
-
- pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
- for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
- nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
- &chan->vm_vram_pt[i]);
-
- nv_wo32(chan->vm_pd, pde + 0,
- chan->vm_vram_pt[i]->vinst | 0x61);
- nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
- pde += 8;
- }
- instmem->flush(dev);
+ nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
}
/* RAMHT */
@@ -700,9 +846,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
/* VRAM ctxdma */
if (dev_priv->card_type >= NV_50) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->vm_end,
- NV_DMA_ACCESS_RW,
- NV_DMA_TARGET_AGP, &vram);
+ 0, (1ULL << 40), NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VM, &vram);
if (ret) {
NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
return ret;
@@ -710,8 +855,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
} else {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
0, dev_priv->fb_available_size,
- NV_DMA_ACCESS_RW,
- NV_DMA_TARGET_VIDMEM, &vram);
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &vram);
if (ret) {
NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
return ret;
@@ -728,21 +873,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
/* TT memory ctxdma */
if (dev_priv->card_type >= NV_50) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->vm_end,
- NV_DMA_ACCESS_RW,
- NV_DMA_TARGET_AGP, &tt);
- if (ret) {
- NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
- return ret;
- }
- } else
- if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
- ret = nouveau_gpuobj_gart_dma_new(chan, 0,
- dev_priv->gart_info.aper_size,
- NV_DMA_ACCESS_RW, &tt, NULL);
+ 0, (1ULL << 40), NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VM, &tt);
} else {
- NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
- ret = -EINVAL;
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ 0, dev_priv->gart_info.aper_size,
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_GART, &tt);
}
if (ret) {
@@ -763,21 +900,14 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
void
nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
- int i;
NV_DEBUG(dev, "ch%d\n", chan->id);
- if (!chan->ramht)
- return;
-
nouveau_ramht_ref(NULL, &chan->ramht, chan);
+ nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
- nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
- for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
- nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
if (chan->ramin_heap.free_stack.next)
drm_mm_takedown(&chan->ramin_heap);
@@ -791,147 +921,91 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
struct nouveau_gpuobj *gpuobj;
int i;
- if (dev_priv->card_type < NV_50) {
- dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
- if (!dev_priv->susres.ramin_copy)
- return -ENOMEM;
-
- for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
- dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
- return 0;
- }
-
list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
- if (!gpuobj->im_backing)
+ if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
continue;
- gpuobj->im_backing_suspend = vmalloc(gpuobj->size);
- if (!gpuobj->im_backing_suspend) {
+ gpuobj->suspend = vmalloc(gpuobj->size);
+ if (!gpuobj->suspend) {
nouveau_gpuobj_resume(dev);
return -ENOMEM;
}
for (i = 0; i < gpuobj->size; i += 4)
- gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
+ gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
}
return 0;
}
void
-nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *gpuobj;
-
- if (dev_priv->card_type < NV_50) {
- vfree(dev_priv->susres.ramin_copy);
- dev_priv->susres.ramin_copy = NULL;
- return;
- }
-
- list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
- if (!gpuobj->im_backing_suspend)
- continue;
-
- vfree(gpuobj->im_backing_suspend);
- gpuobj->im_backing_suspend = NULL;
- }
-}
-
-void
nouveau_gpuobj_resume(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj;
int i;
- if (dev_priv->card_type < NV_50) {
- for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
- nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
- nouveau_gpuobj_suspend_cleanup(dev);
- return;
- }
-
list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
- if (!gpuobj->im_backing_suspend)
+ if (!gpuobj->suspend)
continue;
for (i = 0; i < gpuobj->size; i += 4)
- nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
- dev_priv->engine.instmem.flush(dev);
+ nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
+
+ vfree(gpuobj->suspend);
+ gpuobj->suspend = NULL;
}
- nouveau_gpuobj_suspend_cleanup(dev);
+ dev_priv->engine.instmem.flush(dev);
}
int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_grobj_alloc *init = data;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nouveau_pgraph_object_class *grc;
- struct nouveau_gpuobj *gr = NULL;
struct nouveau_channel *chan;
int ret;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
-
if (init->handle == ~0)
return -EINVAL;
- grc = pgraph->grclass;
- while (grc->id) {
- if (grc->id == init->class)
- break;
- grc++;
- }
+ chan = nouveau_channel_get(dev, file_priv, init->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
- if (!grc->id) {
- NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
- return -EPERM;
+ if (nouveau_ramht_find(chan, init->handle)) {
+ ret = -EEXIST;
+ goto out;
}
- if (nouveau_ramht_find(chan, init->handle))
- return -EEXIST;
-
- if (!grc->software)
- ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
- else
- ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
+ ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
if (ret) {
NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
ret, init->channel, init->handle);
- return ret;
}
- ret = nouveau_ramht_insert(chan, init->handle, gr);
- nouveau_gpuobj_ref(NULL, &gr);
- if (ret) {
- NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
- ret, init->channel, init->handle);
- return ret;
- }
-
- return 0;
+out:
+ nouveau_channel_put(&chan);
+ return ret;
}
int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gpuobj_free *objfree = data;
- struct nouveau_gpuobj *gpuobj;
struct nouveau_channel *chan;
+ int ret;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
+ chan = nouveau_channel_get(dev, file_priv, objfree->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
- gpuobj = nouveau_ramht_find(chan, objfree->handle);
- if (!gpuobj)
- return -ENOENT;
+ /* Synchronize with the user channel */
+ nouveau_channel_idle(chan);
- nouveau_ramht_remove(chan, objfree->handle);
- return 0;
+ ret = nouveau_ramht_remove(chan, objfree->handle);
+ nouveau_channel_put(&chan);
+ return ret;
}
u32
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 9f7b158f5825..4399e2f34db4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -27,6 +27,10 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
+#include <linux/power_supply.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
@@ -418,8 +422,7 @@ nouveau_hwmon_init(struct drm_device *dev)
return ret;
}
dev_set_drvdata(hwmon_dev, dev);
- ret = sysfs_create_group(&hwmon_dev->kobj,
- &hwmon_attrgroup);
+ ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
if (ret) {
NV_ERROR(dev,
"Unable to create hwmon sysfs file: %d\n", ret);
@@ -440,12 +443,31 @@ nouveau_hwmon_fini(struct drm_device *dev)
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
if (pm->hwmon) {
- sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
+ sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
hwmon_device_unregister(pm->hwmon);
}
#endif
}
+#ifdef CONFIG_ACPI
+static int
+nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct drm_nouveau_private *dev_priv =
+ container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb);
+ struct drm_device *dev = dev_priv->dev;
+ struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
+
+ if (strcmp(entry->device_class, "ac_adapter") == 0) {
+ bool ac = power_supply_is_system_supplied();
+
+ NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC");
+ }
+
+ return NOTIFY_OK;
+}
+#endif
+
int
nouveau_pm_init(struct drm_device *dev)
{
@@ -485,6 +507,10 @@ nouveau_pm_init(struct drm_device *dev)
nouveau_sysfs_init(dev);
nouveau_hwmon_init(dev);
+#ifdef CONFIG_ACPI
+ pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
+ register_acpi_notifier(&pm->acpi_nb);
+#endif
return 0;
}
@@ -503,6 +529,9 @@ nouveau_pm_fini(struct drm_device *dev)
nouveau_perf_fini(dev);
nouveau_volt_fini(dev);
+#ifdef CONFIG_ACPI
+ unregister_acpi_notifier(&pm->acpi_nb);
+#endif
nouveau_hwmon_fini(dev);
nouveau_sysfs_fini(dev);
}
@@ -514,7 +543,7 @@ nouveau_pm_resume(struct drm_device *dev)
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
struct nouveau_pm_level *perflvl;
- if (pm->cur == &pm->boot)
+ if (!pm->cur || pm->cur == &pm->boot)
return;
perflvl = pm->cur;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
index 2d8580927ca4..bef3e6910418 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -104,17 +104,17 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
if (dev_priv->card_type < NV_40) {
- ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) |
+ ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
(chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
(gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
} else
if (dev_priv->card_type < NV_50) {
- ctx = (gpuobj->cinst >> 4) |
+ ctx = (gpuobj->pinst >> 4) |
(chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
(gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
} else {
if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
- ctx = (gpuobj->cinst << 10) | 2;
+ ctx = (gpuobj->cinst << 10) | chan->id;
} else {
ctx = (gpuobj->cinst >> 4) |
((gpuobj->engine <<
@@ -214,18 +214,19 @@ out:
spin_unlock_irqrestore(&chan->ramht->lock, flags);
}
-void
+int
nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
{
struct nouveau_ramht_entry *entry;
entry = nouveau_ramht_remove_entry(chan, handle);
if (!entry)
- return;
+ return -ENOENT;
nouveau_ramht_remove_hash(chan, entry->handle);
nouveau_gpuobj_ref(NULL, &entry->gpuobj);
kfree(entry);
+ return 0;
}
struct nouveau_gpuobj *
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h
index b79cb5e1a8f1..c82de98fee0e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h
@@ -48,7 +48,7 @@ extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
struct nouveau_gpuobj *);
-extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
+extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
extern struct nouveau_gpuobj *
nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 1b42541ca9e5..04e8fb795269 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -45,6 +45,11 @@
# define NV04_PFB_REF_CMD_REFRESH (1 << 0)
#define NV04_PFB_PRE 0x001002d4
# define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0)
+#define NV20_PFB_ZCOMP(i) (0x00100300 + 4*(i))
+# define NV20_PFB_ZCOMP_MODE_32 (4 << 24)
+# define NV20_PFB_ZCOMP_EN (1 << 31)
+# define NV25_PFB_ZCOMP_MODE_16 (1 << 20)
+# define NV25_PFB_ZCOMP_MODE_32 (2 << 20)
#define NV10_PFB_CLOSE_PAGE2 0x0010033c
#define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i))
#define NV40_PFB_TILE(i) (0x00100600 + (i*16))
@@ -74,17 +79,6 @@
# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20
# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0
-/* DMA object defines */
-#define NV_DMA_ACCESS_RW 0
-#define NV_DMA_ACCESS_RO 1
-#define NV_DMA_ACCESS_WO 2
-#define NV_DMA_TARGET_VIDMEM 0
-#define NV_DMA_TARGET_PCI 2
-#define NV_DMA_TARGET_AGP 3
-/* The following is not a real value used by the card, it's changed by
- * nouveau_object_dma_create */
-#define NV_DMA_TARGET_PCI_NONLINEAR 8
-
/* Some object classes we care about in the drm */
#define NV_CLASS_DMA_FROM_MEMORY 0x00000002
#define NV_CLASS_DMA_TO_MEMORY 0x00000003
@@ -332,6 +326,7 @@
#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
#define NV03_PGRAPH_STATUS 0x004006B0
#define NV04_PGRAPH_STATUS 0x00400700
+# define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000
#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
#define NV04_PGRAPH_SURFACE 0x0040070C
@@ -378,6 +373,7 @@
#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
+#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
@@ -714,31 +710,32 @@
#define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010
#define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020
#define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040
-#define NV50_PDISPLAY_INTR_EN 0x0061002c
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC 0x0000000c
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n) (1 << ((n) + 2))
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0 0x00000004
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1 0x00000008
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK10 0x00000010
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK20 0x00000020
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK40 0x00000040
+#define NV50_PDISPLAY_INTR_EN_0 0x00610028
+#define NV50_PDISPLAY_INTR_EN_1 0x0061002c
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC 0x0000000c
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(n) (1 << ((n) + 2))
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_0 0x00000004
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_1 0x00000008
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 0x00000010
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 0x00000020
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK40 0x00000040
#define NV50_PDISPLAY_UNK30_CTRL 0x00610030
#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200
#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400
#define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000
-#define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080
-#define NV50_PDISPLAY_TRAPPED_DATA 0x00610084
-#define NV50_PDISPLAY_CHANNEL_STAT(i) ((i) * 0x10 + 0x00610200)
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA 0x00000010
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED 0x00000000
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED 0x00000010
-#define NV50_PDISPLAY_CHANNEL_DMA_CB(i) ((i) * 0x10 + 0x00610204)
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION 0x00000002
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM 0x00000000
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM 0x00000002
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID 0x00000001
-#define NV50_PDISPLAY_CHANNEL_UNK2(i) ((i) * 0x10 + 0x00610208)
-#define NV50_PDISPLAY_CHANNEL_UNK3(i) ((i) * 0x10 + 0x0061020c)
+#define NV50_PDISPLAY_TRAPPED_ADDR(i) ((i) * 0x08 + 0x00610080)
+#define NV50_PDISPLAY_TRAPPED_DATA(i) ((i) * 0x08 + 0x00610084)
+#define NV50_PDISPLAY_EVO_CTRL(i) ((i) * 0x10 + 0x00610200)
+#define NV50_PDISPLAY_EVO_CTRL_DMA 0x00000010
+#define NV50_PDISPLAY_EVO_CTRL_DMA_DISABLED 0x00000000
+#define NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED 0x00000010
+#define NV50_PDISPLAY_EVO_DMA_CB(i) ((i) * 0x10 + 0x00610204)
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION 0x00000002
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM 0x00000000
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_SYSTEM 0x00000002
+#define NV50_PDISPLAY_EVO_DMA_CB_VALID 0x00000001
+#define NV50_PDISPLAY_EVO_UNK2(i) ((i) * 0x10 + 0x00610208)
+#define NV50_PDISPLAY_EVO_HASH_TAG(i) ((i) * 0x10 + 0x0061020c)
#define NV50_PDISPLAY_CURSOR 0x00610270
#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270)
@@ -746,15 +743,11 @@
#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000
#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000
-#define NV50_PDISPLAY_CTRL_STATE 0x00610300
-#define NV50_PDISPLAY_CTRL_STATE_PENDING 0x80000000
-#define NV50_PDISPLAY_CTRL_STATE_METHOD 0x00001ffc
-#define NV50_PDISPLAY_CTRL_STATE_ENABLE 0x00000001
-#define NV50_PDISPLAY_CTRL_VAL 0x00610304
-#define NV50_PDISPLAY_UNK_380 0x00610380
-#define NV50_PDISPLAY_RAM_AMOUNT 0x00610384
-#define NV50_PDISPLAY_UNK_388 0x00610388
-#define NV50_PDISPLAY_UNK_38C 0x0061038c
+#define NV50_PDISPLAY_PIO_CTRL 0x00610300
+#define NV50_PDISPLAY_PIO_CTRL_PENDING 0x80000000
+#define NV50_PDISPLAY_PIO_CTRL_MTHD 0x00001ffc
+#define NV50_PDISPLAY_PIO_CTRL_ENABLED 0x00000001
+#define NV50_PDISPLAY_PIO_DATA 0x00610304
#define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
#define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index d4ac97007038..9a250eb53098 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -14,7 +14,7 @@ struct nouveau_sgdma_be {
dma_addr_t *pages;
unsigned nr_pages;
- unsigned pte_start;
+ u64 offset;
bool bound;
};
@@ -74,18 +74,6 @@ nouveau_sgdma_clear(struct ttm_backend *be)
}
}
-static inline unsigned
-nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
-
- if (dev_priv->card_type < NV_50)
- return pte + 2;
-
- return pte << 1;
-}
-
static int
nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
@@ -97,32 +85,17 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
- pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
- nvbe->pte_start = pte;
+ nvbe->offset = mem->start << PAGE_SHIFT;
+ pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
for (i = 0; i < nvbe->nr_pages; i++) {
dma_addr_t dma_offset = nvbe->pages[i];
uint32_t offset_l = lower_32_bits(dma_offset);
- uint32_t offset_h = upper_32_bits(dma_offset);
-
- for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
- if (dev_priv->card_type < NV_50) {
- nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
- pte += 1;
- } else {
- nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
- nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
- pte += 2;
- }
+ for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
+ nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
dma_offset += NV_CTXDMA_PAGE_SIZE;
}
}
- dev_priv->engine.instmem.flush(nvbe->dev);
-
- if (dev_priv->card_type == NV_50) {
- dev_priv->engine.fifo.tlb_flush(dev);
- dev_priv->engine.graph.tlb_flush(dev);
- }
nvbe->bound = true;
return 0;
@@ -142,28 +115,10 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
if (!nvbe->bound)
return 0;
- pte = nvbe->pte_start;
+ pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
for (i = 0; i < nvbe->nr_pages; i++) {
- dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
-
- for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
- if (dev_priv->card_type < NV_50) {
- nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
- pte += 1;
- } else {
- nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
- nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
- pte += 2;
- }
-
- dma_offset += NV_CTXDMA_PAGE_SIZE;
- }
- }
- dev_priv->engine.instmem.flush(nvbe->dev);
-
- if (dev_priv->card_type == NV_50) {
- dev_priv->engine.fifo.tlb_flush(dev);
- dev_priv->engine.graph.tlb_flush(dev);
+ for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
+ nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
}
nvbe->bound = false;
@@ -186,6 +141,35 @@ nouveau_sgdma_destroy(struct ttm_backend *be)
}
}
+static int
+nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+
+ nvbe->offset = mem->start << PAGE_SHIFT;
+
+ nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
+ nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
+ nvbe->bound = true;
+ return 0;
+}
+
+static int
+nv50_sgdma_unbind(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+
+ if (!nvbe->bound)
+ return 0;
+
+ nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
+ nvbe->nr_pages << PAGE_SHIFT);
+ nvbe->bound = false;
+ return 0;
+}
+
static struct ttm_backend_func nouveau_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
@@ -194,23 +178,30 @@ static struct ttm_backend_func nouveau_sgdma_backend = {
.destroy = nouveau_sgdma_destroy
};
+static struct ttm_backend_func nv50_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
+ .bind = nv50_sgdma_bind,
+ .unbind = nv50_sgdma_unbind,
+ .destroy = nouveau_sgdma_destroy
+};
+
struct ttm_backend *
nouveau_sgdma_init_ttm(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_sgdma_be *nvbe;
- if (!dev_priv->gart_info.sg_ctxdma)
- return NULL;
-
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
if (!nvbe)
return NULL;
nvbe->dev = dev;
- nvbe->backend.func = &nouveau_sgdma_backend;
-
+ if (dev_priv->card_type < NV_50)
+ nvbe->backend.func = &nouveau_sgdma_backend;
+ else
+ nvbe->backend.func = &nv50_sgdma_backend;
return &nvbe->backend;
}
@@ -218,7 +209,6 @@ int
nouveau_sgdma_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct pci_dev *pdev = dev->pdev;
struct nouveau_gpuobj *gpuobj = NULL;
uint32_t aper_size, obj_size;
int i, ret;
@@ -231,68 +221,40 @@ nouveau_sgdma_init(struct drm_device *dev)
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
obj_size += 8; /* ctxdma header */
- } else {
- /* 1 entire VM page table */
- aper_size = (512 * 1024 * 1024);
- obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
- }
-
- ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &gpuobj);
- if (ret) {
- NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
- return ret;
- }
-
- dev_priv->gart_info.sg_dummy_page =
- alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
- if (!dev_priv->gart_info.sg_dummy_page) {
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return -ENOMEM;
- }
- set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
- dev_priv->gart_info.sg_dummy_bus =
- pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return -EFAULT;
- }
+ ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+ if (ret) {
+ NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
+ return ret;
+ }
- if (dev_priv->card_type < NV_50) {
- /* special case, allocated from global instmem heap so
- * cinst is invalid, we use it on all channels though so
- * cinst needs to be valid, set it the same as pinst
- */
- gpuobj->cinst = gpuobj->pinst;
-
- /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
- * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
- * on those cards? */
nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
(1 << 12) /* PT present */ |
(0 << 13) /* PT *not* linear */ |
- (NV_DMA_ACCESS_RW << 14) |
- (NV_DMA_TARGET_PCI << 16));
+ (0 << 14) /* RW */ |
+ (2 << 16) /* PCI */);
nv_wo32(gpuobj, 4, aper_size - 1);
- for (i = 2; i < 2 + (aper_size >> 12); i++) {
- nv_wo32(gpuobj, i * 4,
- dev_priv->gart_info.sg_dummy_bus | 3);
- }
- } else {
- for (i = 0; i < obj_size; i += 8) {
- nv_wo32(gpuobj, i + 0, 0x00000000);
- nv_wo32(gpuobj, i + 4, 0x00000000);
- }
+ for (i = 2; i < 2 + (aper_size >> 12); i++)
+ nv_wo32(gpuobj, i * 4, 0x00000000);
+
+ dev_priv->gart_info.sg_ctxdma = gpuobj;
+ dev_priv->gart_info.aper_base = 0;
+ dev_priv->gart_info.aper_size = aper_size;
+ } else
+ if (dev_priv->chan_vm) {
+ ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
+ 12, NV_MEM_ACCESS_RW,
+ &dev_priv->gart_info.vma);
+ if (ret)
+ return ret;
+
+ dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
+ dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
}
- dev_priv->engine.instmem.flush(dev);
dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
- dev_priv->gart_info.aper_base = 0;
- dev_priv->gart_info.aper_size = aper_size;
- dev_priv->gart_info.sg_ctxdma = gpuobj;
return 0;
}
@@ -301,31 +263,19 @@ nouveau_sgdma_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- if (dev_priv->gart_info.sg_dummy_page) {
- pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
- NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- unlock_page(dev_priv->gart_info.sg_dummy_page);
- __free_page(dev_priv->gart_info.sg_dummy_page);
- dev_priv->gart_info.sg_dummy_page = NULL;
- dev_priv->gart_info.sg_dummy_bus = 0;
- }
-
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
+ nouveau_vm_put(&dev_priv->gart_info.vma);
}
-int
-nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
+uint32_t
+nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
- int pte;
+ int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
- pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
- if (dev_priv->card_type < NV_50) {
- *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
- return 0;
- }
+ BUG_ON(dev_priv->card_type >= NV_50);
- NV_ERROR(dev, "Unimplemented on NV50\n");
- return -EINVAL;
+ return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
+ (offset & NV_CTXDMA_PAGE_MASK);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 049f755567e5..a54fc431fe98 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -53,10 +53,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.takedown = nv04_instmem_takedown;
engine->instmem.suspend = nv04_instmem_suspend;
engine->instmem.resume = nv04_instmem_resume;
- engine->instmem.populate = nv04_instmem_populate;
- engine->instmem.clear = nv04_instmem_clear;
- engine->instmem.bind = nv04_instmem_bind;
- engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.get = nv04_instmem_get;
+ engine->instmem.put = nv04_instmem_put;
+ engine->instmem.map = nv04_instmem_map;
+ engine->instmem.unmap = nv04_instmem_unmap;
engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
@@ -65,7 +65,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv04_fb_init;
engine->fb.takedown = nv04_fb_takedown;
- engine->graph.grclass = nv04_graph_grclass;
engine->graph.init = nv04_graph_init;
engine->graph.takedown = nv04_graph_takedown;
engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -76,7 +75,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.unload_context = nv04_graph_unload_context;
engine->fifo.channels = 16;
engine->fifo.init = nv04_fifo_init;
- engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.takedown = nv04_fifo_fini;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
@@ -99,16 +98,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_get = nv04_pm_clock_get;
engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
+ engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x10:
engine->instmem.init = nv04_instmem_init;
engine->instmem.takedown = nv04_instmem_takedown;
engine->instmem.suspend = nv04_instmem_suspend;
engine->instmem.resume = nv04_instmem_resume;
- engine->instmem.populate = nv04_instmem_populate;
- engine->instmem.clear = nv04_instmem_clear;
- engine->instmem.bind = nv04_instmem_bind;
- engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.get = nv04_instmem_get;
+ engine->instmem.put = nv04_instmem_put;
+ engine->instmem.map = nv04_instmem_map;
+ engine->instmem.unmap = nv04_instmem_unmap;
engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
@@ -117,8 +120,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
- engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
- engine->graph.grclass = nv10_graph_grclass;
+ engine->fb.init_tile_region = nv10_fb_init_tile_region;
+ engine->fb.set_tile_region = nv10_fb_set_tile_region;
+ engine->fb.free_tile_region = nv10_fb_free_tile_region;
engine->graph.init = nv10_graph_init;
engine->graph.takedown = nv10_graph_takedown;
engine->graph.channel = nv10_graph_channel;
@@ -127,17 +131,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.fifo_access = nv04_graph_fifo_access;
engine->graph.load_context = nv10_graph_load_context;
engine->graph.unload_context = nv10_graph_unload_context;
- engine->graph.set_region_tiling = nv10_graph_set_region_tiling;
+ engine->graph.set_tile_region = nv10_graph_set_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.takedown = nv04_fifo_fini;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.destroy_context = nv04_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
@@ -153,16 +157,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_get = nv04_pm_clock_get;
engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
+ engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x20:
engine->instmem.init = nv04_instmem_init;
engine->instmem.takedown = nv04_instmem_takedown;
engine->instmem.suspend = nv04_instmem_suspend;
engine->instmem.resume = nv04_instmem_resume;
- engine->instmem.populate = nv04_instmem_populate;
- engine->instmem.clear = nv04_instmem_clear;
- engine->instmem.bind = nv04_instmem_bind;
- engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.get = nv04_instmem_get;
+ engine->instmem.put = nv04_instmem_put;
+ engine->instmem.map = nv04_instmem_map;
+ engine->instmem.unmap = nv04_instmem_unmap;
engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
@@ -171,8 +179,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
- engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
- engine->graph.grclass = nv20_graph_grclass;
+ engine->fb.init_tile_region = nv10_fb_init_tile_region;
+ engine->fb.set_tile_region = nv10_fb_set_tile_region;
+ engine->fb.free_tile_region = nv10_fb_free_tile_region;
engine->graph.init = nv20_graph_init;
engine->graph.takedown = nv20_graph_takedown;
engine->graph.channel = nv10_graph_channel;
@@ -181,17 +190,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.fifo_access = nv04_graph_fifo_access;
engine->graph.load_context = nv20_graph_load_context;
engine->graph.unload_context = nv20_graph_unload_context;
- engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
+ engine->graph.set_tile_region = nv20_graph_set_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.takedown = nv04_fifo_fini;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.destroy_context = nv04_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
@@ -207,16 +216,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_get = nv04_pm_clock_get;
engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
+ engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x30:
engine->instmem.init = nv04_instmem_init;
engine->instmem.takedown = nv04_instmem_takedown;
engine->instmem.suspend = nv04_instmem_suspend;
engine->instmem.resume = nv04_instmem_resume;
- engine->instmem.populate = nv04_instmem_populate;
- engine->instmem.clear = nv04_instmem_clear;
- engine->instmem.bind = nv04_instmem_bind;
- engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.get = nv04_instmem_get;
+ engine->instmem.put = nv04_instmem_put;
+ engine->instmem.map = nv04_instmem_map;
+ engine->instmem.unmap = nv04_instmem_unmap;
engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
@@ -225,8 +238,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv30_fb_init;
engine->fb.takedown = nv30_fb_takedown;
- engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
- engine->graph.grclass = nv30_graph_grclass;
+ engine->fb.init_tile_region = nv30_fb_init_tile_region;
+ engine->fb.set_tile_region = nv10_fb_set_tile_region;
+ engine->fb.free_tile_region = nv30_fb_free_tile_region;
engine->graph.init = nv30_graph_init;
engine->graph.takedown = nv20_graph_takedown;
engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -235,17 +249,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv20_graph_destroy_context;
engine->graph.load_context = nv20_graph_load_context;
engine->graph.unload_context = nv20_graph_unload_context;
- engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
+ engine->graph.set_tile_region = nv20_graph_set_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.takedown = nv04_fifo_fini;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.destroy_context = nv04_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
@@ -263,6 +277,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_set = nv04_pm_clock_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
+ engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x40:
case 0x60:
@@ -270,10 +288,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.takedown = nv04_instmem_takedown;
engine->instmem.suspend = nv04_instmem_suspend;
engine->instmem.resume = nv04_instmem_resume;
- engine->instmem.populate = nv04_instmem_populate;
- engine->instmem.clear = nv04_instmem_clear;
- engine->instmem.bind = nv04_instmem_bind;
- engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.get = nv04_instmem_get;
+ engine->instmem.put = nv04_instmem_put;
+ engine->instmem.map = nv04_instmem_map;
+ engine->instmem.unmap = nv04_instmem_unmap;
engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv40_mc_init;
engine->mc.takedown = nv40_mc_takedown;
@@ -282,8 +300,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv40_fb_init;
engine->fb.takedown = nv40_fb_takedown;
- engine->fb.set_region_tiling = nv40_fb_set_region_tiling;
- engine->graph.grclass = nv40_graph_grclass;
+ engine->fb.init_tile_region = nv30_fb_init_tile_region;
+ engine->fb.set_tile_region = nv40_fb_set_tile_region;
+ engine->fb.free_tile_region = nv30_fb_free_tile_region;
engine->graph.init = nv40_graph_init;
engine->graph.takedown = nv40_graph_takedown;
engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -292,17 +311,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv40_graph_destroy_context;
engine->graph.load_context = nv40_graph_load_context;
engine->graph.unload_context = nv40_graph_unload_context;
- engine->graph.set_region_tiling = nv40_graph_set_region_tiling;
+ engine->graph.set_tile_region = nv40_graph_set_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv40_fifo_init;
- engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.takedown = nv04_fifo_fini;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv40_fifo_create_context;
- engine->fifo.destroy_context = nv40_fifo_destroy_context;
+ engine->fifo.destroy_context = nv04_fifo_destroy_context;
engine->fifo.load_context = nv40_fifo_load_context;
engine->fifo.unload_context = nv40_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
@@ -321,6 +340,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->pm.temp_get = nv40_temp_get;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
+ engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x50:
case 0x80: /* gotta love NVIDIA's consistency.. */
@@ -330,10 +353,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.takedown = nv50_instmem_takedown;
engine->instmem.suspend = nv50_instmem_suspend;
engine->instmem.resume = nv50_instmem_resume;
- engine->instmem.populate = nv50_instmem_populate;
- engine->instmem.clear = nv50_instmem_clear;
- engine->instmem.bind = nv50_instmem_bind;
- engine->instmem.unbind = nv50_instmem_unbind;
+ engine->instmem.get = nv50_instmem_get;
+ engine->instmem.put = nv50_instmem_put;
+ engine->instmem.map = nv50_instmem_map;
+ engine->instmem.unmap = nv50_instmem_unmap;
if (dev_priv->chipset == 0x50)
engine->instmem.flush = nv50_instmem_flush;
else
@@ -345,7 +368,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv50_fb_init;
engine->fb.takedown = nv50_fb_takedown;
- engine->graph.grclass = nv50_graph_grclass;
engine->graph.init = nv50_graph_init;
engine->graph.takedown = nv50_graph_takedown;
engine->graph.fifo_access = nv50_graph_fifo_access;
@@ -381,24 +403,32 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.init = nv50_display_init;
engine->display.destroy = nv50_display_destroy;
engine->gpio.init = nv50_gpio_init;
- engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.takedown = nv50_gpio_fini;
engine->gpio.get = nv50_gpio_get;
engine->gpio.set = nv50_gpio_set;
+ engine->gpio.irq_register = nv50_gpio_irq_register;
+ engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
switch (dev_priv->chipset) {
- case 0xa3:
- case 0xa5:
- case 0xa8:
- case 0xaf:
- engine->pm.clock_get = nva3_pm_clock_get;
- engine->pm.clock_pre = nva3_pm_clock_pre;
- engine->pm.clock_set = nva3_pm_clock_set;
- break;
- default:
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ case 0xa0:
+ case 0xaa:
+ case 0xac:
+ case 0x50:
engine->pm.clock_get = nv50_pm_clock_get;
engine->pm.clock_pre = nv50_pm_clock_pre;
engine->pm.clock_set = nv50_pm_clock_set;
break;
+ default:
+ engine->pm.clock_get = nva3_pm_clock_get;
+ engine->pm.clock_pre = nva3_pm_clock_pre;
+ engine->pm.clock_set = nva3_pm_clock_set;
+ break;
}
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
@@ -406,17 +436,39 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.temp_get = nv84_temp_get;
else
engine->pm.temp_get = nv40_temp_get;
+ switch (dev_priv->chipset) {
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0xa0:
+ engine->crypt.init = nv84_crypt_init;
+ engine->crypt.takedown = nv84_crypt_fini;
+ engine->crypt.create_context = nv84_crypt_create_context;
+ engine->crypt.destroy_context = nv84_crypt_destroy_context;
+ engine->crypt.tlb_flush = nv84_crypt_tlb_flush;
+ break;
+ default:
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ break;
+ }
+ engine->vram.init = nv50_vram_init;
+ engine->vram.get = nv50_vram_new;
+ engine->vram.put = nv50_vram_del;
+ engine->vram.flags_valid = nv50_vram_flags_valid;
break;
case 0xC0:
engine->instmem.init = nvc0_instmem_init;
engine->instmem.takedown = nvc0_instmem_takedown;
engine->instmem.suspend = nvc0_instmem_suspend;
engine->instmem.resume = nvc0_instmem_resume;
- engine->instmem.populate = nvc0_instmem_populate;
- engine->instmem.clear = nvc0_instmem_clear;
- engine->instmem.bind = nvc0_instmem_bind;
- engine->instmem.unbind = nvc0_instmem_unbind;
- engine->instmem.flush = nvc0_instmem_flush;
+ engine->instmem.get = nv50_instmem_get;
+ engine->instmem.put = nv50_instmem_put;
+ engine->instmem.map = nv50_instmem_map;
+ engine->instmem.unmap = nv50_instmem_unmap;
+ engine->instmem.flush = nv84_instmem_flush;
engine->mc.init = nv50_mc_init;
engine->mc.takedown = nv50_mc_takedown;
engine->timer.init = nv04_timer_init;
@@ -424,7 +476,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nvc0_fb_init;
engine->fb.takedown = nvc0_fb_takedown;
- engine->graph.grclass = NULL; //nvc0_graph_grclass;
engine->graph.init = nvc0_graph_init;
engine->graph.takedown = nvc0_graph_takedown;
engine->graph.fifo_access = nvc0_graph_fifo_access;
@@ -453,7 +504,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.takedown = nouveau_stub_takedown;
engine->gpio.get = nv50_gpio_get;
engine->gpio.set = nv50_gpio_set;
+ engine->gpio.irq_register = nv50_gpio_irq_register;
+ engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nvc0_vram_init;
+ engine->vram.get = nvc0_vram_new;
+ engine->vram.put = nv50_vram_del;
+ engine->vram.flags_valid = nvc0_vram_flags_valid;
break;
default:
NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
@@ -493,9 +552,13 @@ nouveau_card_init_channel(struct drm_device *dev)
if (ret)
return ret;
+ /* no dma objects on fermi... */
+ if (dev_priv->card_type >= NV_C0)
+ goto out_done;
+
ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
0, dev_priv->vram_size,
- NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
+ NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
&gpuobj);
if (ret)
goto out_err;
@@ -505,9 +568,10 @@ nouveau_card_init_channel(struct drm_device *dev)
if (ret)
goto out_err;
- ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
- dev_priv->gart_info.aper_size,
- NV_DMA_ACCESS_RW, &gpuobj, NULL);
+ ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
+ 0, dev_priv->gart_info.aper_size,
+ NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
+ &gpuobj);
if (ret)
goto out_err;
@@ -516,11 +580,12 @@ nouveau_card_init_channel(struct drm_device *dev)
if (ret)
goto out_err;
+out_done:
+ mutex_unlock(&dev_priv->channel->mutex);
return 0;
out_err:
- nouveau_channel_free(dev_priv->channel);
- dev_priv->channel = NULL;
+ nouveau_channel_put(&dev_priv->channel);
return ret;
}
@@ -531,15 +596,25 @@ static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
nouveau_pci_resume(pdev);
drm_kms_helper_poll_enable(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(dev);
nouveau_pci_suspend(pdev, pmm);
+ dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
+static void nouveau_switcheroo_reprobe(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ nouveau_fbcon_output_poll_changed(dev);
+}
+
static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -560,6 +635,7 @@ nouveau_card_init(struct drm_device *dev)
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
+ nouveau_switcheroo_reprobe,
nouveau_switcheroo_can_switch);
/* Initialise internal driver API hooks */
@@ -567,6 +643,8 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out;
engine = &dev_priv->engine;
+ spin_lock_init(&dev_priv->channels.lock);
+ spin_lock_init(&dev_priv->tile.lock);
spin_lock_init(&dev_priv->context_switch_lock);
/* Make the CRTCs and I2C buses accessible */
@@ -625,26 +703,28 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_fb;
+ /* PCRYPT */
+ ret = engine->crypt.init(dev);
+ if (ret)
+ goto out_graph;
+
/* PFIFO */
ret = engine->fifo.init(dev);
if (ret)
- goto out_graph;
+ goto out_crypt;
}
ret = engine->display.create(dev);
if (ret)
goto out_fifo;
- /* this call irq_preinstall, register irq handler and
- * call irq_postinstall
- */
- ret = drm_irq_install(dev);
+ ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1);
if (ret)
- goto out_display;
+ goto out_vblank;
- ret = drm_vblank_init(dev, 0);
+ ret = nouveau_irq_init(dev);
if (ret)
- goto out_irq;
+ goto out_vblank;
/* what about PVIDEO/PCRTC/PRAMDAC etc? */
@@ -669,12 +749,16 @@ nouveau_card_init(struct drm_device *dev)
out_fence:
nouveau_fence_fini(dev);
out_irq:
- drm_irq_uninstall(dev);
-out_display:
+ nouveau_irq_fini(dev);
+out_vblank:
+ drm_vblank_cleanup(dev);
engine->display.destroy(dev);
out_fifo:
if (!nouveau_noaccel)
engine->fifo.takedown(dev);
+out_crypt:
+ if (!nouveau_noaccel)
+ engine->crypt.takedown(dev);
out_graph:
if (!nouveau_noaccel)
engine->graph.takedown(dev);
@@ -713,12 +797,12 @@ static void nouveau_card_takedown(struct drm_device *dev)
if (!engine->graph.accel_blocked) {
nouveau_fence_fini(dev);
- nouveau_channel_free(dev_priv->channel);
- dev_priv->channel = NULL;
+ nouveau_channel_put_unlocked(&dev_priv->channel);
}
if (!nouveau_noaccel) {
engine->fifo.takedown(dev);
+ engine->crypt.takedown(dev);
engine->graph.takedown(dev);
}
engine->fb.takedown(dev);
@@ -737,7 +821,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
nouveau_gpuobj_takedown(dev);
nouveau_mem_vram_fini(dev);
- drm_irq_uninstall(dev);
+ nouveau_irq_fini(dev);
+ drm_vblank_cleanup(dev);
nouveau_pm_fini(dev);
nouveau_bios_takedown(dev);
@@ -980,6 +1065,7 @@ err_out:
void nouveau_lastclose(struct drm_device *dev)
{
+ vga_switcheroo_process_delayed_switch();
}
int nouveau_unload(struct drm_device *dev)
@@ -1024,21 +1110,6 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
else
getparam->value = NV_PCI;
break;
- case NOUVEAU_GETPARAM_FB_PHYSICAL:
- getparam->value = dev_priv->fb_phys;
- break;
- case NOUVEAU_GETPARAM_AGP_PHYSICAL:
- getparam->value = dev_priv->gart_info.aper_base;
- break;
- case NOUVEAU_GETPARAM_PCI_PHYSICAL:
- if (dev->sg) {
- getparam->value = (unsigned long)dev->sg->virtual;
- } else {
- NV_ERROR(dev, "Requested PCIGART address, "
- "while no PCIGART was created\n");
- return -EINVAL;
- }
- break;
case NOUVEAU_GETPARAM_FB_SIZE:
getparam->value = dev_priv->fb_available_size;
break;
@@ -1046,7 +1117,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = dev_priv->gart_info.aper_size;
break;
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
- getparam->value = dev_priv->vm_vram_base;
+ getparam->value = 0; /* deprecated */
break;
case NOUVEAU_GETPARAM_PTIMER_TIME:
getparam->value = dev_priv->engine.timer.read(dev);
@@ -1054,6 +1125,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
case NOUVEAU_GETPARAM_HAS_BO_USAGE:
getparam->value = 1;
break;
+ case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
+ getparam->value = (dev_priv->card_type < NV_50);
+ break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
* address is the same. User is supposed to know the card
@@ -1087,8 +1161,9 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data,
}
/* Wait until (value(reg) & mask) == val, up until timeout has hit */
-bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
- uint32_t reg, uint32_t mask, uint32_t val)
+bool
+nouveau_wait_eq(struct drm_device *dev, uint64_t timeout,
+ uint32_t reg, uint32_t mask, uint32_t val)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
@@ -1102,10 +1177,33 @@ bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
return false;
}
+/* Wait until (value(reg) & mask) != val, up until timeout has hit */
+bool
+nouveau_wait_ne(struct drm_device *dev, uint64_t timeout,
+ uint32_t reg, uint32_t mask, uint32_t val)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ uint64_t start = ptimer->read(dev);
+
+ do {
+ if ((nv_rd32(dev, reg) & mask) != val)
+ return true;
+ } while (ptimer->read(dev) - start < timeout);
+
+ return false;
+}
+
/* Waits for PGRAPH to go completely idle */
bool nouveau_wait_for_idle(struct drm_device *dev)
{
- if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t mask = ~0;
+
+ if (dev_priv->card_type == NV_40)
+ mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
+
+ if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) {
NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
nv_rd32(dev, NV04_PGRAPH_STATUS));
return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 7ecc4adc1e45..8d9968e1cba8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -265,8 +265,8 @@ nouveau_temp_probe_i2c(struct drm_device *dev)
struct i2c_board_info info[] = {
{ I2C_BOARD_INFO("w83l785ts", 0x2d) },
{ I2C_BOARD_INFO("w83781d", 0x2d) },
- { I2C_BOARD_INFO("f75375", 0x2e) },
{ I2C_BOARD_INFO("adt7473", 0x2e) },
+ { I2C_BOARD_INFO("f75375", 0x2e) },
{ I2C_BOARD_INFO("lm99", 0x4c) },
{ }
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c
new file mode 100644
index 000000000000..fbe0fb13bc1e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_util.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2010 Nouveau Project
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/ratelimit.h>
+
+#include "nouveau_util.h"
+
+static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
+
+void
+nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
+{
+ while (bf->name) {
+ if (value & bf->mask) {
+ printk(" %s", bf->name);
+ value &= ~bf->mask;
+ }
+
+ bf++;
+ }
+
+ if (value)
+ printk(" (unknown bits 0x%08x)", value);
+}
+
+void
+nouveau_enum_print(const struct nouveau_enum *en, u32 value)
+{
+ while (en->name) {
+ if (value == en->value) {
+ printk("%s", en->name);
+ return;
+ }
+
+ en++;
+ }
+
+ printk("(unknown enum 0x%08x)", value);
+}
+
+int
+nouveau_ratelimit(void)
+{
+ return __ratelimit(&nouveau_ratelimit_state);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h
new file mode 100644
index 000000000000..d9ceaea26f4b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_util.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2010 Nouveau Project
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_UTIL_H__
+#define __NOUVEAU_UTIL_H__
+
+struct nouveau_bitfield {
+ u32 mask;
+ const char *name;
+};
+
+struct nouveau_enum {
+ u32 value;
+ const char *name;
+};
+
+void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
+void nouveau_enum_print(const struct nouveau_enum *, u32 value);
+int nouveau_ratelimit(void);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
new file mode 100644
index 000000000000..97d82aedf86b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
+
+void
+nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
+{
+ struct nouveau_vm *vm = vma->vm;
+ struct nouveau_mm_node *r;
+ int big = vma->node->type != vm->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (vm->pgt_bits - bits);
+ u32 end, len;
+
+ list_for_each_entry(r, &vram->regions, rl_entry) {
+ u64 phys = (u64)r->offset << 12;
+ u32 num = r->length >> bits;
+
+ while (num) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+ end = (pte + num);
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ vm->map(vma, pgt, vram, pte, len, phys);
+
+ num -= len;
+ pte += len;
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ }
+ }
+
+ vm->flush(vm);
+}
+
+void
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
+{
+ nouveau_vm_map_at(vma, 0, vram);
+}
+
+void
+nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
+ dma_addr_t *list)
+{
+ struct nouveau_vm *vm = vma->vm;
+ int big = vma->node->type != vm->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 num = length >> vma->node->type;
+ u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (vm->pgt_bits - bits);
+ u32 end, len;
+
+ while (num) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+ end = (pte + num);
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ vm->map_sg(vma, pgt, pte, list, len);
+
+ num -= len;
+ pte += len;
+ list += len;
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ }
+
+ vm->flush(vm);
+}
+
+void
+nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
+{
+ struct nouveau_vm *vm = vma->vm;
+ int big = vma->node->type != vm->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 num = length >> vma->node->type;
+ u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (vm->pgt_bits - bits);
+ u32 end, len;
+
+ while (num) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+ end = (pte + num);
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ vm->unmap(pgt, pte, len);
+
+ num -= len;
+ pte += len;
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ }
+
+ vm->flush(vm);
+}
+
+void
+nouveau_vm_unmap(struct nouveau_vma *vma)
+{
+ nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
+}
+
+static void
+nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
+{
+ struct nouveau_vm_pgd *vpgd;
+ struct nouveau_vm_pgt *vpgt;
+ struct nouveau_gpuobj *pgt;
+ u32 pde;
+
+ for (pde = fpde; pde <= lpde; pde++) {
+ vpgt = &vm->pgt[pde - vm->fpde];
+ if (--vpgt->refcount[big])
+ continue;
+
+ pgt = vpgt->obj[big];
+ vpgt->obj[big] = NULL;
+
+ list_for_each_entry(vpgd, &vm->pgd_list, head) {
+ vm->map_pgt(vpgd->obj, pde, vpgt->obj);
+ }
+
+ mutex_unlock(&vm->mm->mutex);
+ nouveau_gpuobj_ref(NULL, &pgt);
+ mutex_lock(&vm->mm->mutex);
+ }
+}
+
+static int
+nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
+{
+ struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+ struct nouveau_vm_pgd *vpgd;
+ struct nouveau_gpuobj *pgt;
+ int big = (type != vm->spg_shift);
+ u32 pgt_size;
+ int ret;
+
+ pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
+ pgt_size *= 8;
+
+ mutex_unlock(&vm->mm->mutex);
+ ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &pgt);
+ mutex_lock(&vm->mm->mutex);
+ if (unlikely(ret))
+ return ret;
+
+ /* someone beat us to filling the PDE while we didn't have the lock */
+ if (unlikely(vpgt->refcount[big]++)) {
+ mutex_unlock(&vm->mm->mutex);
+ nouveau_gpuobj_ref(NULL, &pgt);
+ mutex_lock(&vm->mm->mutex);
+ return 0;
+ }
+
+ vpgt->obj[big] = pgt;
+ list_for_each_entry(vpgd, &vm->pgd_list, head) {
+ vm->map_pgt(vpgd->obj, pde, vpgt->obj);
+ }
+
+ return 0;
+}
+
+int
+nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
+ u32 access, struct nouveau_vma *vma)
+{
+ u32 align = (1 << page_shift) >> 12;
+ u32 msize = size >> 12;
+ u32 fpde, lpde, pde;
+ int ret;
+
+ mutex_lock(&vm->mm->mutex);
+ ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
+ if (unlikely(ret != 0)) {
+ mutex_unlock(&vm->mm->mutex);
+ return ret;
+ }
+
+ fpde = (vma->node->offset >> vm->pgt_bits);
+ lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
+ for (pde = fpde; pde <= lpde; pde++) {
+ struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+ int big = (vma->node->type != vm->spg_shift);
+
+ if (likely(vpgt->refcount[big])) {
+ vpgt->refcount[big]++;
+ continue;
+ }
+
+ ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
+ if (ret) {
+ if (pde != fpde)
+ nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
+ nouveau_mm_put(vm->mm, vma->node);
+ mutex_unlock(&vm->mm->mutex);
+ vma->node = NULL;
+ return ret;
+ }
+ }
+ mutex_unlock(&vm->mm->mutex);
+
+ vma->vm = vm;
+ vma->offset = (u64)vma->node->offset << 12;
+ vma->access = access;
+ return 0;
+}
+
+void
+nouveau_vm_put(struct nouveau_vma *vma)
+{
+ struct nouveau_vm *vm = vma->vm;
+ u32 fpde, lpde;
+
+ if (unlikely(vma->node == NULL))
+ return;
+ fpde = (vma->node->offset >> vm->pgt_bits);
+ lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
+
+ mutex_lock(&vm->mm->mutex);
+ nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
+ nouveau_mm_put(vm->mm, vma->node);
+ vma->node = NULL;
+ mutex_unlock(&vm->mm->mutex);
+}
+
+int
+nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
+ struct nouveau_vm **pvm)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vm *vm;
+ u64 mm_length = (offset + length) - mm_offset;
+ u32 block, pgt_bits;
+ int ret;
+
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
+ return -ENOMEM;
+
+ if (dev_priv->card_type == NV_50) {
+ vm->map_pgt = nv50_vm_map_pgt;
+ vm->map = nv50_vm_map;
+ vm->map_sg = nv50_vm_map_sg;
+ vm->unmap = nv50_vm_unmap;
+ vm->flush = nv50_vm_flush;
+ vm->spg_shift = 12;
+ vm->lpg_shift = 16;
+
+ pgt_bits = 29;
+ block = (1 << pgt_bits);
+ if (length < block)
+ block = length;
+
+ } else
+ if (dev_priv->card_type == NV_C0) {
+ vm->map_pgt = nvc0_vm_map_pgt;
+ vm->map = nvc0_vm_map;
+ vm->map_sg = nvc0_vm_map_sg;
+ vm->unmap = nvc0_vm_unmap;
+ vm->flush = nvc0_vm_flush;
+ vm->spg_shift = 12;
+ vm->lpg_shift = 17;
+ pgt_bits = 27;
+
+ /* Should be 4096 everywhere, this is a hack that's
+ * currently necessary to avoid an elusive bug that
+ * causes corruption when mixing small/large pages
+ */
+ if (length < (1ULL << 40))
+ block = 4096;
+ else {
+ block = (1 << pgt_bits);
+ if (length < block)
+ block = length;
+ }
+ } else {
+ kfree(vm);
+ return -ENOSYS;
+ }
+
+ vm->fpde = offset >> pgt_bits;
+ vm->lpde = (offset + length - 1) >> pgt_bits;
+ vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
+ if (!vm->pgt) {
+ kfree(vm);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&vm->pgd_list);
+ vm->dev = dev;
+ vm->refcount = 1;
+ vm->pgt_bits = pgt_bits - 12;
+
+ ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
+ block >> 12);
+ if (ret) {
+ kfree(vm);
+ return ret;
+ }
+
+ *pvm = vm;
+ return 0;
+}
+
+static int
+nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
+{
+ struct nouveau_vm_pgd *vpgd;
+ int i;
+
+ if (!pgd)
+ return 0;
+
+ vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
+ if (!vpgd)
+ return -ENOMEM;
+
+ nouveau_gpuobj_ref(pgd, &vpgd->obj);
+
+ mutex_lock(&vm->mm->mutex);
+ for (i = vm->fpde; i <= vm->lpde; i++)
+ vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
+ list_add(&vpgd->head, &vm->pgd_list);
+ mutex_unlock(&vm->mm->mutex);
+ return 0;
+}
+
+static void
+nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
+{
+ struct nouveau_vm_pgd *vpgd, *tmp;
+
+ if (!pgd)
+ return;
+
+ mutex_lock(&vm->mm->mutex);
+ list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+ if (vpgd->obj != pgd)
+ continue;
+
+ list_del(&vpgd->head);
+ nouveau_gpuobj_ref(NULL, &vpgd->obj);
+ kfree(vpgd);
+ }
+ mutex_unlock(&vm->mm->mutex);
+}
+
+static void
+nouveau_vm_del(struct nouveau_vm *vm)
+{
+ struct nouveau_vm_pgd *vpgd, *tmp;
+
+ list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+ nouveau_vm_unlink(vm, vpgd->obj);
+ }
+ WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
+
+ kfree(vm->pgt);
+ kfree(vm);
+}
+
+int
+nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
+ struct nouveau_gpuobj *pgd)
+{
+ struct nouveau_vm *vm;
+ int ret;
+
+ vm = ref;
+ if (vm) {
+ ret = nouveau_vm_link(vm, pgd);
+ if (ret)
+ return ret;
+
+ vm->refcount++;
+ }
+
+ vm = *ptr;
+ *ptr = ref;
+
+ if (vm) {
+ nouveau_vm_unlink(vm, pgd);
+
+ if (--vm->refcount == 0)
+ nouveau_vm_del(vm);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
new file mode 100644
index 000000000000..e1193515771b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_VM_H__
+#define __NOUVEAU_VM_H__
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+struct nouveau_vm_pgt {
+ struct nouveau_gpuobj *obj[2];
+ u32 refcount[2];
+};
+
+struct nouveau_vm_pgd {
+ struct list_head head;
+ struct nouveau_gpuobj *obj;
+};
+
+struct nouveau_vma {
+ struct nouveau_vm *vm;
+ struct nouveau_mm_node *node;
+ u64 offset;
+ u32 access;
+};
+
+struct nouveau_vm {
+ struct drm_device *dev;
+ struct nouveau_mm *mm;
+ int refcount;
+
+ struct list_head pgd_list;
+ atomic_t pgraph_refs;
+ atomic_t pcrypt_refs;
+
+ struct nouveau_vm_pgt *pgt;
+ u32 fpde;
+ u32 lpde;
+
+ u32 pgt_bits;
+ u8 spg_shift;
+ u8 lpg_shift;
+
+ void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
+ struct nouveau_gpuobj *pgt[2]);
+ void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+ void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
+ u32 pte, dma_addr_t *, u32 cnt);
+ void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
+ void (*flush)(struct nouveau_vm *);
+};
+
+/* nouveau_vm.c */
+int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset,
+ struct nouveau_vm **);
+int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
+ struct nouveau_gpuobj *pgd);
+int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
+ u32 access, struct nouveau_vma *);
+void nouveau_vm_put(struct nouveau_vma *);
+void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *);
+void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *);
+void nouveau_vm_unmap(struct nouveau_vma *);
+void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
+void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
+ dma_addr_t *);
+
+/* nv50_vm.c */
+void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+ struct nouveau_gpuobj *pgt[2]);
+void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
+ u32 pte, dma_addr_t *, u32 cnt);
+void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
+void nv50_vm_flush(struct nouveau_vm *);
+void nv50_vm_flush_engine(struct drm_device *, int engine);
+
+/* nvc0_vm.c */
+void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+ struct nouveau_gpuobj *pgt[2]);
+void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
+ u32 pte, dma_addr_t *, u32 cnt);
+void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
+void nvc0_vm_flush(struct nouveau_vm *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 40e180741629..297505eb98d5 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -551,7 +551,10 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
if (dev_priv->card_type >= NV_30)
regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
- regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+ if (dev_priv->card_type >= NV_10)
+ regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+ else
+ regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
/* Some misc regs */
if (dev_priv->card_type == NV_40) {
@@ -669,6 +672,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
if (nv_two_heads(dev))
NVSetOwner(dev, nv_crtc->index);
+ drm_vblank_pre_modeset(dev, nv_crtc->index);
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
NVBlankScreen(dev, nv_crtc->index, true);
@@ -701,6 +705,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
#endif
funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+ drm_vblank_post_modeset(dev, nv_crtc->index);
}
static void nv_crtc_destroy(struct drm_crtc *crtc)
@@ -986,6 +991,7 @@ static const struct drm_crtc_funcs nv04_crtc_funcs = {
.cursor_move = nv04_crtc_cursor_move,
.gamma_set = nv_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
+ .page_flip = nouveau_crtc_page_flip,
.destroy = nv_crtc_destroy,
};
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index ba6423f2ffcc..e000455e06d0 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -74,14 +74,14 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
* use a 10ms timeout (guards against crtc being inactive, in
* which case blank state would never change)
*/
- if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
- 0x00000001, 0x00000000))
+ if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000000))
return -EBUSY;
- if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
- 0x00000001, 0x00000001))
+ if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000001))
return -EBUSY;
- if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
- 0x00000001, 0x00000000))
+ if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000000))
return -EBUSY;
udelay(100);
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index ef23550407b5..c82db37d9f41 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -342,8 +342,8 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
if (nv_encoder->dcb->type == OUTPUT_LVDS) {
bool duallink, dummy;
- nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode->
- clock, &duallink, &dummy);
+ nouveau_bios_parse_lvds_table(dev, output_mode->clock,
+ &duallink, &dummy);
if (duallink)
regp->fp_control |= (8 << 28);
} else
@@ -518,8 +518,6 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
return;
if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
- struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
-
/* when removing an output, crtc may not be set, but PANEL_OFF
* must still be run
*/
@@ -527,12 +525,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
if (mode == DRM_MODE_DPMS_ON) {
- if (!nv_connector->native_mode) {
- NV_ERROR(dev, "Not turning on LVDS without native mode\n");
- return;
- }
call_lvds_script(dev, nv_encoder->dcb, head,
- LVDS_PANEL_ON, nv_connector->native_mode->clock);
+ LVDS_PANEL_ON, nv_encoder->mode.clock);
} else
/* pxclk of 0 is fine for PANEL_OFF, and for a
* disconnected LVDS encoder there is no native_mode
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 9e28cf772e3c..1715e1464b7d 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -32,6 +32,9 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
+static void nv04_vblank_crtc0_isr(struct drm_device *);
+static void nv04_vblank_crtc1_isr(struct drm_device *);
+
static void
nv04_display_store_initial_head_owner(struct drm_device *dev)
{
@@ -197,6 +200,8 @@ nv04_display_create(struct drm_device *dev)
func->save(encoder);
}
+ nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr);
+ nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr);
return 0;
}
@@ -208,6 +213,9 @@ nv04_display_destroy(struct drm_device *dev)
NV_DEBUG_KMS(dev, "\n");
+ nouveau_irq_unregister(dev, 24);
+ nouveau_irq_unregister(dev, 25);
+
/* Turn every CRTC off. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_mode_set modeset = {
@@ -258,3 +266,16 @@ nv04_display_init(struct drm_device *dev)
return 0;
}
+static void
+nv04_vblank_crtc0_isr(struct drm_device *dev)
+{
+ nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
+ drm_handle_vblank(dev, 0);
+}
+
+static void
+nv04_vblank_crtc1_isr(struct drm_device *dev)
+{
+ nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
+ drm_handle_vblank(dev, 1);
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 33e4c9388bc1..7a1189371096 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -28,52 +28,39 @@
#include "nouveau_ramht.h"
#include "nouveau_fbcon.h"
-void
+int
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
- nouveau_fbcon_gpu_lockup(info);
- }
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_copyarea(info, region);
- return;
- }
+ ret = RING_SPACE(chan, 4);
+ if (ret)
+ return ret;
BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
OUT_RING(chan, (region->sy << 16) | region->sx);
OUT_RING(chan, (region->dy << 16) | region->dx);
OUT_RING(chan, (region->height << 16) | region->width);
FIRE_RING(chan);
+ return 0;
}
-void
+int
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
- nouveau_fbcon_gpu_lockup(info);
- }
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_fillrect(info, rect);
- return;
- }
+ ret = RING_SPACE(chan, 7);
+ if (ret)
+ return ret;
BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
@@ -87,9 +74,10 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
OUT_RING(chan, (rect->dx << 16) | rect->dy);
OUT_RING(chan, (rect->width << 16) | rect->height);
FIRE_RING(chan);
+ return 0;
}
-void
+int
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
@@ -101,23 +89,14 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
uint32_t dsize;
uint32_t width;
uint32_t *data = (uint32_t *)image->data;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (image->depth != 1) {
- cfb_imageblit(info, image);
- return;
- }
-
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
- nouveau_fbcon_gpu_lockup(info);
- }
+ if (image->depth != 1)
+ return -ENODEV;
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_imageblit(info, image);
- return;
- }
+ ret = RING_SPACE(chan, 8);
+ if (ret)
+ return ret;
width = ALIGN(image->width, 8);
dsize = ALIGN(width * image->height, 32) >> 5;
@@ -144,11 +123,9 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
while (dsize) {
int iter_len = dsize > 128 ? 128 : dsize;
- if (RING_SPACE(chan, iter_len + 1)) {
- nouveau_fbcon_gpu_lockup(info);
- cfb_imageblit(info, image);
- return;
- }
+ ret = RING_SPACE(chan, iter_len + 1);
+ if (ret)
+ return ret;
BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
OUT_RINGp(chan, data, iter_len);
@@ -157,22 +134,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
}
FIRE_RING(chan);
-}
-
-static int
-nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj);
- if (ret)
- return ret;
-
- ret = nouveau_ramht_insert(dev_priv->channel, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
+ return 0;
}
int
@@ -214,29 +176,31 @@ nv04_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
- 0x0062 : 0x0042, NvCtxSurf2D);
+ ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D,
+ dev_priv->card_type >= NV_10 ?
+ 0x0062 : 0x0042);
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect);
+ ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019);
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop);
+ ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043);
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt);
+ ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044);
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect);
+ ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a);
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ?
- 0x009f : 0x005f, NvImageBlit);
+ ret = nouveau_gpuobj_gr_new(chan, NvImageBlit,
+ dev_priv->chipset >= 0x11 ?
+ 0x009f : 0x005f);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 708293b7ddcd..f89d104698df 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -28,6 +28,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
+#include "nouveau_util.h"
#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
#define NV04_RAMFC__SIZE 32
@@ -128,6 +129,11 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
if (ret)
return ret;
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV03_USER(chan->id), PAGE_SIZE);
+ if (!chan->user)
+ return -ENOMEM;
+
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
/* Setup initial state */
@@ -151,10 +157,31 @@ void
nv04_fifo_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ unsigned long flags;
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pfifo->reassign(dev, false);
+ /* Unload the context if it's the currently active one */
+ if (pfifo->channel_id(dev) == chan->id) {
+ pfifo->disable(dev);
+ pfifo->unload_context(dev);
+ pfifo->enable(dev);
+ }
+
+ /* Keep it from being rescheduled */
+ nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
+
+ pfifo->reassign(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* Free the channel resources */
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
nouveau_gpuobj_ref(NULL, &chan->ramfc);
}
@@ -208,7 +235,7 @@ nv04_fifo_unload_context(struct drm_device *dev)
if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
return 0;
- chan = dev_priv->fifos[chid];
+ chan = dev_priv->channels.ptr[chid];
if (!chan) {
NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
return -EINVAL;
@@ -267,6 +294,7 @@ nv04_fifo_init_ramxx(struct drm_device *dev)
static void
nv04_fifo_init_intr(struct drm_device *dev)
{
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
nv_wr32(dev, 0x002100, 0xffffffff);
nv_wr32(dev, 0x002140, 0xffffffff);
}
@@ -289,7 +317,7 @@ nv04_fifo_init(struct drm_device *dev)
pfifo->reassign(dev, true);
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->fifos[i]) {
+ if (dev_priv->channels.ptr[i]) {
uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
}
@@ -298,3 +326,207 @@ nv04_fifo_init(struct drm_device *dev)
return 0;
}
+void
+nv04_fifo_fini(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x2140, 0x00000000);
+ nouveau_irq_unregister(dev, 8);
+}
+
+static bool
+nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = NULL;
+ struct nouveau_gpuobj *obj;
+ unsigned long flags;
+ const int subc = (addr >> 13) & 0x7;
+ const int mthd = addr & 0x1ffc;
+ bool handled = false;
+ u32 engine;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
+ chan = dev_priv->channels.ptr[chid];
+ if (unlikely(!chan))
+ goto out;
+
+ switch (mthd) {
+ case 0x0000: /* bind object to subchannel */
+ obj = nouveau_ramht_find(chan, data);
+ if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
+ break;
+
+ chan->sw_subchannel[subc] = obj->class;
+ engine = 0x0000000f << (subc * 4);
+
+ nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
+ handled = true;
+ break;
+ default:
+ engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
+ if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
+ break;
+
+ if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
+ mthd, data))
+ handled = true;
+ break;
+ }
+
+out:
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return handled;
+}
+
+void
+nv04_fifo_isr(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ uint32_t status, reassign;
+ int cnt = 0;
+
+ reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
+ while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
+ uint32_t chid, get;
+
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+
+ chid = engine->fifo.channel_id(dev);
+ get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
+
+ if (status & NV_PFIFO_INTR_CACHE_ERROR) {
+ uint32_t mthd, data;
+ int ptr;
+
+ /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
+ * wrapping on my G80 chips, but CACHE1 isn't big
+ * enough for this much data.. Tests show that it
+ * wraps around to the start at GET=0x800.. No clue
+ * as to why..
+ */
+ ptr = (get & 0x7ff) >> 2;
+
+ if (dev_priv->card_type < NV_40) {
+ mthd = nv_rd32(dev,
+ NV04_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(dev,
+ NV04_PFIFO_CACHE1_DATA(ptr));
+ } else {
+ mthd = nv_rd32(dev,
+ NV40_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(dev,
+ NV40_PFIFO_CACHE1_DATA(ptr));
+ }
+
+ if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
+ NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
+ "Mthd 0x%04x Data 0x%08x\n",
+ chid, (mthd >> 13) & 7, mthd & 0x1ffc,
+ data);
+ }
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_CACHE_ERROR);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
+ nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+
+ status &= ~NV_PFIFO_INTR_CACHE_ERROR;
+ }
+
+ if (status & NV_PFIFO_INTR_DMA_PUSHER) {
+ u32 dma_get = nv_rd32(dev, 0x003244);
+ u32 dma_put = nv_rd32(dev, 0x003240);
+ u32 push = nv_rd32(dev, 0x003220);
+ u32 state = nv_rd32(dev, 0x003228);
+
+ if (dev_priv->card_type == NV_50) {
+ u32 ho_get = nv_rd32(dev, 0x003328);
+ u32 ho_put = nv_rd32(dev, 0x003320);
+ u32 ib_get = nv_rd32(dev, 0x003334);
+ u32 ib_put = nv_rd32(dev, 0x003330);
+
+ if (nouveau_ratelimit())
+ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
+ "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
+ "State 0x%08x Push 0x%08x\n",
+ chid, ho_get, dma_get, ho_put,
+ dma_put, ib_get, ib_put, state,
+ push);
+
+ /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+ nv_wr32(dev, 0x003364, 0x00000000);
+ if (dma_get != dma_put || ho_get != ho_put) {
+ nv_wr32(dev, 0x003244, dma_put);
+ nv_wr32(dev, 0x003328, ho_put);
+ } else
+ if (ib_get != ib_put) {
+ nv_wr32(dev, 0x003334, ib_put);
+ }
+ } else {
+ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
+ "Put 0x%08x State 0x%08x Push 0x%08x\n",
+ chid, dma_get, dma_put, state, push);
+
+ if (dma_get != dma_put)
+ nv_wr32(dev, 0x003244, dma_put);
+ }
+
+ nv_wr32(dev, 0x003228, 0x00000000);
+ nv_wr32(dev, 0x003220, 0x00000001);
+ nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+ status &= ~NV_PFIFO_INTR_DMA_PUSHER;
+ }
+
+ if (status & NV_PFIFO_INTR_SEMAPHORE) {
+ uint32_t sem;
+
+ status &= ~NV_PFIFO_INTR_SEMAPHORE;
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_SEMAPHORE);
+
+ sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
+ nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
+
+ if (dev_priv->card_type == NV_50) {
+ if (status & 0x00000010) {
+ nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
+ status &= ~0x00000010;
+ nv_wr32(dev, 0x002100, 0x00000010);
+ }
+ }
+
+ if (status) {
+ if (nouveau_ratelimit())
+ NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
+ status, chid);
+ nv_wr32(dev, NV03_PFIFO_INTR_0, status);
+ status = 0;
+ }
+
+ nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
+ }
+
+ if (status) {
+ NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
+ nv_wr32(dev, 0x2140, 0);
+ nv_wr32(dev, 0x140, 0);
+ }
+
+ nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index c8973421b635..af75015068d6 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -26,6 +26,11 @@
#include "drm.h"
#include "nouveau_drm.h"
#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include "nouveau_util.h"
+
+static int nv04_graph_register(struct drm_device *dev);
+static void nv04_graph_isr(struct drm_device *dev);
static uint32_t nv04_graph_ctx_regs[] = {
0x0040053c,
@@ -357,10 +362,10 @@ nv04_graph_channel(struct drm_device *dev)
if (chid >= dev_priv->engine.fifo.channels)
return NULL;
- return dev_priv->fifos[chid];
+ return dev_priv->channels.ptr[chid];
}
-void
+static void
nv04_graph_context_switch(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -368,7 +373,6 @@ nv04_graph_context_switch(struct drm_device *dev)
struct nouveau_channel *chan = NULL;
int chid;
- pgraph->fifo_access(dev, false);
nouveau_wait_for_idle(dev);
/* If previous context is valid, we need to save it */
@@ -376,11 +380,9 @@ nv04_graph_context_switch(struct drm_device *dev)
/* Load context for next channel */
chid = dev_priv->engine.fifo.channel_id(dev);
- chan = dev_priv->fifos[chid];
+ chan = dev_priv->channels.ptr[chid];
if (chan)
nv04_graph_load_context(chan);
-
- pgraph->fifo_access(dev, true);
}
static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
@@ -412,10 +414,25 @@ int nv04_graph_create_context(struct nouveau_channel *chan)
void nv04_graph_destroy_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pgraph->fifo_access(dev, false);
+
+ /* Unload the context if it's the currently active one */
+ if (pgraph->channel(dev) == chan)
+ pgraph->unload_context(dev);
+ /* Free the context resources */
kfree(pgraph_ctx);
chan->pgraph_ctx = NULL;
+
+ pgraph->fifo_access(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
}
int nv04_graph_load_context(struct nouveau_channel *chan)
@@ -468,13 +485,19 @@ int nv04_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
+ int ret;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
+ ret = nv04_graph_register(dev);
+ if (ret)
+ return ret;
+
/* Enable PGRAPH interrupts */
+ nouveau_irq_register(dev, 12, nv04_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -510,6 +533,8 @@ int nv04_graph_init(struct drm_device *dev)
void nv04_graph_takedown(struct drm_device *dev)
{
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+ nouveau_irq_unregister(dev, 12);
}
void
@@ -524,13 +549,27 @@ nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
}
static int
-nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
atomic_set(&chan->fence.last_sequence_irq, data);
return 0;
}
+int
+nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_page_flip_state s;
+
+ if (!nouveau_finish_page_flip(chan, &s))
+ nv_set_crtc_base(dev, s.crtc,
+ s.offset + s.y * s.pitch + s.x * s.bpp / 8);
+
+ return 0;
+}
+
/*
* Software methods, why they are needed, and how they all work:
*
@@ -606,12 +645,12 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
*/
static void
-nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value)
{
struct drm_device *dev = chan->dev;
- uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+ u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
- uint32_t tmp;
+ u32 tmp;
tmp = nv_ri32(dev, instance);
tmp &= ~mask;
@@ -623,11 +662,11 @@ nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
}
static void
-nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
{
struct drm_device *dev = chan->dev;
- uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
- uint32_t tmp, ctx1;
+ u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+ u32 tmp, ctx1;
int class, op, valid = 1;
ctx1 = nv_ri32(dev, instance);
@@ -672,13 +711,13 @@ nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t val
}
static int
-nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_set_operation(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
if (data > 5)
return 1;
/* Old versions of the objects only accept first three operations. */
- if (data > 2 && grclass < 0x40)
+ if (data > 2 && class < 0x40)
return 1;
nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
/* changing operation changes set of objects needed for validation */
@@ -687,8 +726,8 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
uint32_t min = data & 0xffff, max;
uint32_t w = data >> 16;
@@ -706,8 +745,8 @@ nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
uint32_t min = data & 0xffff, max;
uint32_t w = data >> 16;
@@ -725,8 +764,8 @@ nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -742,8 +781,8 @@ nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -763,8 +802,8 @@ nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -778,8 +817,8 @@ nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -793,8 +832,8 @@ nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_rop(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -808,8 +847,8 @@ nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -823,8 +862,8 @@ nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -838,8 +877,8 @@ nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -853,8 +892,8 @@ nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -868,8 +907,8 @@ nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -883,8 +922,8 @@ nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -898,8 +937,8 @@ nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_clip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -913,8 +952,8 @@ nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -930,194 +969,346 @@ nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
return 1;
}
-static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
- { 0x0150, nv04_graph_mthd_set_ref },
- {}
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
- { 0x0184, nv04_graph_mthd_bind_nv01_patt },
- { 0x0188, nv04_graph_mthd_bind_rop },
- { 0x018c, nv04_graph_mthd_bind_beta1 },
- { 0x0190, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
- { 0x0188, nv04_graph_mthd_bind_nv04_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
- { 0x0184, nv04_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_nv01_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_surf_dst },
- { 0x019c, nv04_graph_mthd_bind_surf_src },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
- { 0x0184, nv04_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_nv04_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_beta4 },
- { 0x019c, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
- { 0x0188, nv04_graph_mthd_bind_chroma },
- { 0x018c, nv04_graph_mthd_bind_clip },
- { 0x0190, nv04_graph_mthd_bind_nv04_patt },
- { 0x0194, nv04_graph_mthd_bind_rop },
- { 0x0198, nv04_graph_mthd_bind_beta1 },
- { 0x019c, nv04_graph_mthd_bind_beta4 },
- { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
- { 0x03e4, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
- { 0x0184, nv04_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_nv01_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
- { 0x0184, nv04_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_nv01_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
- { 0x0184, nv04_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_nv04_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
- { 0x0188, nv04_graph_mthd_bind_nv01_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x0304, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
- { 0x0188, nv04_graph_mthd_bind_nv04_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
- { 0x0304, nv04_graph_mthd_set_operation },
- {},
-};
+static int
+nv04_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = {
- { 0x0184, nv04_graph_mthd_bind_clip },
- { 0x0188, nv04_graph_mthd_bind_nv01_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
+ if (dev_priv->engine.graph.registered)
+ return 0;
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = {
- { 0x0184, nv04_graph_mthd_bind_clip },
- { 0x0188, nv04_graph_mthd_bind_nv04_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
+ /* dvd subpicture */
+ NVOBJ_CLASS(dev, 0x0038, GR);
+
+ /* m2mf */
+ NVOBJ_CLASS(dev, 0x0039, GR);
+
+ /* nv03 gdirect */
+ NVOBJ_CLASS(dev, 0x004b, GR);
+ NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 gdirect */
+ NVOBJ_CLASS(dev, 0x004a, GR);
+ NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv01 imageblit */
+ NVOBJ_CLASS(dev, 0x001f, GR);
+ NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src);
+ NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 imageblit */
+ NVOBJ_CLASS(dev, 0x005f, GR);
+ NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 iifc */
+ NVOBJ_CLASS(dev, 0x0060, GR);
+ NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf);
+ NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation);
+
+ /* nv05 iifc */
+ NVOBJ_CLASS(dev, 0x0064, GR);
+
+ /* nv01 ifc */
+ NVOBJ_CLASS(dev, 0x0021, GR);
+ NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 ifc */
+ NVOBJ_CLASS(dev, 0x0061, GR);
+ NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv05 ifc */
+ NVOBJ_CLASS(dev, 0x0065, GR);
+
+ /* nv03 sifc */
+ NVOBJ_CLASS(dev, 0x0036, GR);
+ NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 sifc */
+ NVOBJ_CLASS(dev, 0x0076, GR);
+ NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv05 sifc */
+ NVOBJ_CLASS(dev, 0x0066, GR);
+
+ /* nv03 sifm */
+ NVOBJ_CLASS(dev, 0x0037, GR);
+ NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation);
+
+ /* nv04 sifm */
+ NVOBJ_CLASS(dev, 0x0077, GR);
+ NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf);
+ NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation);
+
+ /* null */
+ NVOBJ_CLASS(dev, 0x0030, GR);
+
+ /* surf2d */
+ NVOBJ_CLASS(dev, 0x0042, GR);
+
+ /* rop */
+ NVOBJ_CLASS(dev, 0x0043, GR);
+
+ /* beta1 */
+ NVOBJ_CLASS(dev, 0x0012, GR);
+
+ /* beta4 */
+ NVOBJ_CLASS(dev, 0x0072, GR);
+
+ /* cliprect */
+ NVOBJ_CLASS(dev, 0x0019, GR);
+
+ /* nv01 pattern */
+ NVOBJ_CLASS(dev, 0x0018, GR);
+
+ /* nv04 pattern */
+ NVOBJ_CLASS(dev, 0x0044, GR);
+
+ /* swzsurf */
+ NVOBJ_CLASS(dev, 0x0052, GR);
+
+ /* surf3d */
+ NVOBJ_CLASS(dev, 0x0053, GR);
+ NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h);
+ NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v);
+
+ /* nv03 tex_tri */
+ NVOBJ_CLASS(dev, 0x0048, GR);
+ NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color);
+ NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta);
+
+ /* tex_tri */
+ NVOBJ_CLASS(dev, 0x0054, GR);
+
+ /* multitex_tri */
+ NVOBJ_CLASS(dev, 0x0055, GR);
+
+ /* nv01 chroma */
+ NVOBJ_CLASS(dev, 0x0017, GR);
+
+ /* nv04 chroma */
+ NVOBJ_CLASS(dev, 0x0057, GR);
+
+ /* surf_dst */
+ NVOBJ_CLASS(dev, 0x0058, GR);
+
+ /* surf_src */
+ NVOBJ_CLASS(dev, 0x0059, GR);
+
+ /* surf_color */
+ NVOBJ_CLASS(dev, 0x005a, GR);
+
+ /* surf_zeta */
+ NVOBJ_CLASS(dev, 0x005b, GR);
+
+ /* nv01 line */
+ NVOBJ_CLASS(dev, 0x001c, GR);
+ NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 line */
+ NVOBJ_CLASS(dev, 0x005c, GR);
+ NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv01 tri */
+ NVOBJ_CLASS(dev, 0x001d, GR);
+ NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 tri */
+ NVOBJ_CLASS(dev, 0x005d, GR);
+ NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv01 rect */
+ NVOBJ_CLASS(dev, 0x001e, GR);
+ NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 rect */
+ NVOBJ_CLASS(dev, 0x005e, GR);
+ NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nvsw */
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+ dev_priv->engine.graph.registered = true;
+ return 0;
};
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = {
- { 0x0188, nv04_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_surf_color },
- { 0x0190, nv04_graph_mthd_bind_surf_zeta },
- {},
+static struct nouveau_bitfield nv04_graph_intr[] = {
+ { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
+ {}
};
-static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = {
- { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
- { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
- {},
+static struct nouveau_bitfield nv04_graph_nstatus[] =
+{
+ { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
+ { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
+ { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
+ { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
+ {}
};
-struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
- { 0x0038, false, NULL }, /* dvd subpicture */
- { 0x0039, false, NULL }, /* m2mf */
- { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */
- { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */
- { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */
- { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */
- { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */
- { 0x0064, false, NULL }, /* nv05 iifc */
- { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */
- { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */
- { 0x0065, false, NULL }, /* nv05 ifc */
- { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */
- { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */
- { 0x0066, false, NULL }, /* nv05 sifc */
- { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */
- { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */
- { 0x0030, false, NULL }, /* null */
- { 0x0042, false, NULL }, /* surf2d */
- { 0x0043, false, NULL }, /* rop */
- { 0x0012, false, NULL }, /* beta1 */
- { 0x0072, false, NULL }, /* beta4 */
- { 0x0019, false, NULL }, /* cliprect */
- { 0x0018, false, NULL }, /* nv01 pattern */
- { 0x0044, false, NULL }, /* nv04 pattern */
- { 0x0052, false, NULL }, /* swzsurf */
- { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
- { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
- { 0x0054, false, NULL }, /* tex_tri */
- { 0x0055, false, NULL }, /* multitex_tri */
- { 0x0017, false, NULL }, /* nv01 chroma */
- { 0x0057, false, NULL }, /* nv04 chroma */
- { 0x0058, false, NULL }, /* surf_dst */
- { 0x0059, false, NULL }, /* surf_src */
- { 0x005a, false, NULL }, /* surf_color */
- { 0x005b, false, NULL }, /* surf_zeta */
- { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
- { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
- { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
- { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
- { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
- { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
- { 0x506e, true, nv04_graph_mthds_sw },
+struct nouveau_bitfield nv04_graph_nsource[] =
+{
+ { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
+ { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
+ { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
+ { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
+ { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
+ { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
+ { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
+ { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
+ { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
+ { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
+ { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
+ { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
+ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
{}
};
+static void
+nv04_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 chid = (addr & 0x0f000000) >> 24;
+ u32 subc = (addr & 0x0000e000) >> 13;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_NOTIFY) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+ show &= ~NV_PGRAPH_INTR_NOTIFY;
+ }
+ }
+
+ if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+ nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ nv04_graph_context_switch(dev);
+ }
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv04_graph_intr, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, subc, class, mthd, data);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index 0b5ae297abde..b8e3edb5c063 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -98,42 +98,66 @@ nv04_instmem_takedown(struct drm_device *dev)
}
int
-nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
- uint32_t *sz)
+nv04_instmem_suspend(struct drm_device *dev)
{
return 0;
}
void
-nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-}
-
-int
-nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv04_instmem_resume(struct drm_device *dev)
{
- return 0;
}
int
-nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
{
+ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+ struct drm_mm_node *ramin = NULL;
+
+ do {
+ if (drm_mm_pre_get(&dev_priv->ramin_heap))
+ return -ENOMEM;
+
+ spin_lock(&dev_priv->ramin_lock);
+ ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
+ if (ramin == NULL) {
+ spin_unlock(&dev_priv->ramin_lock);
+ return -ENOMEM;
+ }
+
+ ramin = drm_mm_get_block_atomic(ramin, size, align);
+ spin_unlock(&dev_priv->ramin_lock);
+ } while (ramin == NULL);
+
+ gpuobj->node = ramin;
+ gpuobj->vinst = ramin->start;
return 0;
}
void
-nv04_instmem_flush(struct drm_device *dev)
+nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
{
+ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+
+ spin_lock(&dev_priv->ramin_lock);
+ drm_mm_put_block(gpuobj->node);
+ gpuobj->node = NULL;
+ spin_unlock(&dev_priv->ramin_lock);
}
int
-nv04_instmem_suspend(struct drm_device *dev)
+nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
{
+ gpuobj->pinst = gpuobj->vinst;
return 0;
}
void
-nv04_instmem_resume(struct drm_device *dev)
+nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
{
}
+void
+nv04_instmem_flush(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
index cc5cda44e501..f78181a59b4a 100644
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -3,23 +3,109 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+static struct drm_mm_node *
+nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct drm_mm_node *mem;
+ int ret;
+
+ ret = drm_mm_pre_get(&pfb->tag_heap);
+ if (ret)
+ return NULL;
+
+ spin_lock(&dev_priv->tile.lock);
+ mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
+ if (mem)
+ mem = drm_mm_get_block_atomic(mem, size, 0);
+ spin_unlock(&dev_priv->tile.lock);
+
+ return mem;
+}
+
+static void
+nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ spin_lock(&dev_priv->tile.lock);
+ drm_mm_put_block(mem);
+ spin_unlock(&dev_priv->tile.lock);
+}
+
+void
+nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch, uint32_t flags)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+ int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
+
+ tile->addr = addr;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+
+ if (dev_priv->card_type == NV_20) {
+ if (flags & NOUVEAU_GEM_TILE_ZETA) {
+ /*
+ * Allocate some of the on-die tag memory,
+ * used to store Z compression meta-data (most
+ * likely just a bitmap determining if a given
+ * tile is compressed or not).
+ */
+ tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
+
+ if (tile->tag_mem) {
+ /* Enable Z compression */
+ if (dev_priv->chipset >= 0x25)
+ tile->zcomp = tile->tag_mem->start |
+ (bpp == 16 ?
+ NV25_PFB_ZCOMP_MODE_16 :
+ NV25_PFB_ZCOMP_MODE_32);
+ else
+ tile->zcomp = tile->tag_mem->start |
+ NV20_PFB_ZCOMP_EN |
+ (bpp == 16 ? 0 :
+ NV20_PFB_ZCOMP_MODE_32);
+ }
+
+ tile->addr |= 3;
+ } else {
+ tile->addr |= 1;
+ }
+
+ } else {
+ tile->addr |= 1 << 31;
+ }
+}
+
void
-nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv10_fb_free_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t limit = max(1u, addr + size) - 1;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- if (pitch) {
- if (dev_priv->card_type >= NV_20)
- addr |= 1;
- else
- addr |= 1 << 31;
+ if (tile->tag_mem) {
+ nv20_fb_free_tag(dev, tile->tag_mem);
+ tile->tag_mem = NULL;
}
- nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
- nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
- nv_wr32(dev, NV10_PFB_TILE(i), addr);
+ tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
+}
+
+void
+nv10_fb_set_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
+
+ if (dev_priv->card_type == NV_20)
+ nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
}
int
@@ -31,9 +117,14 @@ nv10_fb_init(struct drm_device *dev)
pfb->num_tiles = NV10_PFB_TILE__SIZE;
+ if (dev_priv->card_type == NV_20)
+ drm_mm_init(&pfb->tag_heap, 0,
+ (dev_priv->chipset >= 0x25 ?
+ 64 * 1024 : 32 * 1024));
+
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->num_tiles; i++)
- pfb->set_region_tiling(dev, i, 0, 0, 0);
+ pfb->set_tile_region(dev, i);
return 0;
}
@@ -41,4 +132,13 @@ nv10_fb_init(struct drm_device *dev)
void
nv10_fb_takedown(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int i;
+
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->free_tile_region(dev, i);
+
+ if (dev_priv->card_type == NV_20)
+ drm_mm_takedown(&pfb->tag_heap);
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index f1b03ad58fd5..d2ecbff4bee1 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -53,6 +53,11 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
if (ret)
return ret;
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV03_USER(chan->id), PAGE_SIZE);
+ if (!chan->user)
+ return -ENOMEM;
+
/* Fill entries that are seen filled in dumps of nvidia driver just
* after channel's is put into DMA mode
*/
@@ -73,17 +78,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
return 0;
}
-void
-nv10_fifo_destroy_context(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
-
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
-
- nouveau_gpuobj_ref(NULL, &chan->ramfc);
-}
-
static void
nv10_fifo_do_load_context(struct drm_device *dev, int chid)
{
@@ -219,6 +213,7 @@ nv10_fifo_init_ramxx(struct drm_device *dev)
static void
nv10_fifo_init_intr(struct drm_device *dev)
{
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
nv_wr32(dev, 0x002100, 0xffffffff);
nv_wr32(dev, 0x002140, 0xffffffff);
}
@@ -241,7 +236,7 @@ nv10_fifo_init(struct drm_device *dev)
pfifo->reassign(dev, true);
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->fifos[i]) {
+ if (dev_priv->channels.ptr[i]) {
uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 8e68c9731159..8c92edb7bbcd 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -26,6 +26,10 @@
#include "drm.h"
#include "nouveau_drm.h"
#include "nouveau_drv.h"
+#include "nouveau_util.h"
+
+static int nv10_graph_register(struct drm_device *);
+static void nv10_graph_isr(struct drm_device *);
#define NV10_FIFO_NUMBER 32
@@ -786,15 +790,13 @@ nv10_graph_unload_context(struct drm_device *dev)
return 0;
}
-void
+static void
nv10_graph_context_switch(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_channel *chan = NULL;
int chid;
- pgraph->fifo_access(dev, false);
nouveau_wait_for_idle(dev);
/* If previous context is valid, we need to save it */
@@ -802,11 +804,9 @@ nv10_graph_context_switch(struct drm_device *dev)
/* Load context for next channel */
chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
- chan = dev_priv->fifos[chid];
+ chan = dev_priv->channels.ptr[chid];
if (chan && chan->pgraph_ctx)
nv10_graph_load_context(chan);
-
- pgraph->fifo_access(dev, true);
}
#define NV_WRITE_CTX(reg, val) do { \
@@ -833,7 +833,7 @@ nv10_graph_channel(struct drm_device *dev)
if (chid >= dev_priv->engine.fifo.channels)
return NULL;
- return dev_priv->fifos[chid];
+ return dev_priv->channels.ptr[chid];
}
int nv10_graph_create_context(struct nouveau_channel *chan)
@@ -875,37 +875,54 @@ int nv10_graph_create_context(struct nouveau_channel *chan)
void nv10_graph_destroy_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pgraph->fifo_access(dev, false);
+
+ /* Unload the context if it's the currently active one */
+ if (pgraph->channel(dev) == chan)
+ pgraph->unload_context(dev);
+ /* Free the context resources */
kfree(pgraph_ctx);
chan->pgraph_ctx = NULL;
+
+ pgraph->fifo_access(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
}
void
-nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv10_graph_set_tile_region(struct drm_device *dev, int i)
{
- uint32_t limit = max(1u, addr + size) - 1;
-
- if (pitch)
- addr |= 1 << 31;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit);
- nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch);
- nv_wr32(dev, NV10_PGRAPH_TILE(i), addr);
+ nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
}
int nv10_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
- int i;
+ int ret, i;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
+ ret = nv10_graph_register(dev);
+ if (ret)
+ return ret;
+
+ nouveau_irq_register(dev, 12, nv10_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -928,7 +945,7 @@ int nv10_graph_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
- nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
+ nv10_graph_set_tile_region(dev, i);
nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
@@ -948,17 +965,17 @@ int nv10_graph_init(struct drm_device *dev)
void nv10_graph_takedown(struct drm_device *dev)
{
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+ nouveau_irq_unregister(dev, 12);
}
static int
-nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
struct drm_device *dev = chan->dev;
struct graph_state *ctx = chan->pgraph_ctx;
struct pipe_state *pipe = &ctx->pipe_state;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
uint32_t xfmode0, xfmode1;
int i;
@@ -1025,18 +1042,14 @@ nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
nouveau_wait_for_idle(dev);
- pgraph->fifo_access(dev, true);
-
return 0;
}
static int
-nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
nouveau_wait_for_idle(dev);
@@ -1045,40 +1058,118 @@ nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
nv_wr32(dev, 0x004006b0,
nv_rd32(dev, 0x004006b0) | 0x8 << 24);
- pgraph->fifo_access(dev, true);
+ return 0;
+}
+
+static int
+nv10_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->engine.graph.registered)
+ return 0;
+
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+ NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
+ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+ NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
+ NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
+ NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
+ NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
+
+ /* celcius */
+ if (dev_priv->chipset <= 0x10) {
+ NVOBJ_CLASS(dev, 0x0056, GR);
+ } else
+ if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
+ NVOBJ_CLASS(dev, 0x0096, GR);
+ } else {
+ NVOBJ_CLASS(dev, 0x0099, GR);
+ NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
+ NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
+ NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
+ NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
+ NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
+ }
+ /* nvsw */
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+ dev_priv->engine.graph.registered = true;
return 0;
}
-static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = {
- { 0x1638, nv17_graph_mthd_lma_window },
- { 0x163c, nv17_graph_mthd_lma_window },
- { 0x1640, nv17_graph_mthd_lma_window },
- { 0x1644, nv17_graph_mthd_lma_window },
- { 0x1658, nv17_graph_mthd_lma_enable },
+struct nouveau_bitfield nv10_graph_intr[] = {
+ { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
+ { NV_PGRAPH_INTR_ERROR, "ERROR" },
{}
};
-struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
- { 0x0030, false, NULL }, /* null */
- { 0x0039, false, NULL }, /* m2mf */
- { 0x004a, false, NULL }, /* gdirect */
- { 0x005f, false, NULL }, /* imageblit */
- { 0x009f, false, NULL }, /* imageblit (nv12) */
- { 0x008a, false, NULL }, /* ifc */
- { 0x0089, false, NULL }, /* sifm */
- { 0x0062, false, NULL }, /* surf2d */
- { 0x0043, false, NULL }, /* rop */
- { 0x0012, false, NULL }, /* beta1 */
- { 0x0072, false, NULL }, /* beta4 */
- { 0x0019, false, NULL }, /* cliprect */
- { 0x0044, false, NULL }, /* pattern */
- { 0x0052, false, NULL }, /* swzsurf */
- { 0x0093, false, NULL }, /* surf3d */
- { 0x0094, false, NULL }, /* tex_tri */
- { 0x0095, false, NULL }, /* multitex_tri */
- { 0x0056, false, NULL }, /* celcius (nv10) */
- { 0x0096, false, NULL }, /* celcius (nv11) */
- { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */
+struct nouveau_bitfield nv10_graph_nstatus[] =
+{
+ { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
+ { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
+ { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
+ { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
{}
};
+
+static void
+nv10_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 chid = (addr & 0x01f00000) >> 20;
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_ERROR) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+ show &= ~NV_PGRAPH_INTR_ERROR;
+ }
+ }
+
+ if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+ nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ nv10_graph_context_switch(dev);
+ }
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv10_graph_intr, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, subc, class, mthd, data);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 12ab9cd56eca..8464b76798d5 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -32,6 +32,10 @@
#define NV34_GRCTX_SIZE (18140)
#define NV35_36_GRCTX_SIZE (22396)
+static int nv20_graph_register(struct drm_device *);
+static int nv30_graph_register(struct drm_device *);
+static void nv20_graph_isr(struct drm_device *);
+
static void
nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
@@ -425,9 +429,21 @@ nv20_graph_destroy_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ unsigned long flags;
- nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pgraph->fifo_access(dev, false);
+
+ /* Unload the context if it's the currently active one */
+ if (pgraph->channel(dev) == chan)
+ pgraph->unload_context(dev);
+
+ pgraph->fifo_access(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* Free the context resources */
nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
+ nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
}
int
@@ -496,24 +512,27 @@ nv20_graph_rdi(struct drm_device *dev)
}
void
-nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv20_graph_set_tile_region(struct drm_device *dev, int i)
{
- uint32_t limit = max(1u, addr + size) - 1;
-
- if (pitch)
- addr |= 1;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
+
+ if (dev_priv->card_type == NV_20) {
+ nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp);
+ }
}
int
@@ -560,6 +579,13 @@ nv20_graph_init(struct drm_device *dev)
nv20_graph_rdi(dev);
+ ret = nv20_graph_register(dev);
+ if (ret) {
+ nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
+ return ret;
+ }
+
+ nouveau_irq_register(dev, 12, nv20_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -571,16 +597,17 @@ nv20_graph_init(struct drm_device *dev)
nv_wr32(dev, 0x40009C , 0x00000040);
if (dev_priv->chipset >= 0x25) {
- nv_wr32(dev, 0x400890, 0x00080000);
+ nv_wr32(dev, 0x400890, 0x00a8cfff);
nv_wr32(dev, 0x400610, 0x304B1FB6);
- nv_wr32(dev, 0x400B80, 0x18B82880);
+ nv_wr32(dev, 0x400B80, 0x1cbd3883);
nv_wr32(dev, 0x400B84, 0x44000000);
nv_wr32(dev, 0x400098, 0x40000080);
nv_wr32(dev, 0x400B88, 0x000000ff);
+
} else {
- nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */
+ nv_wr32(dev, 0x400880, 0x0008c7df);
nv_wr32(dev, 0x400094, 0x00000005);
- nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */
+ nv_wr32(dev, 0x400B80, 0x45eae20e);
nv_wr32(dev, 0x400B84, 0x24000000);
nv_wr32(dev, 0x400098, 0x00000040);
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
@@ -591,14 +618,8 @@ nv20_graph_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
- nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
+ nv20_graph_set_tile_region(dev, i);
- for (i = 0; i < 8; i++) {
- nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
- nv_rd32(dev, 0x100300 + i * 4));
- }
nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
@@ -642,6 +663,9 @@ nv20_graph_takedown(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+ nouveau_irq_unregister(dev, 12);
+
nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
}
@@ -684,9 +708,16 @@ nv30_graph_init(struct drm_device *dev)
return ret;
}
+ ret = nv30_graph_register(dev);
+ if (ret) {
+ nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
+ return ret;
+ }
+
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
pgraph->ctx_table->pinst >> 4);
+ nouveau_irq_register(dev, 12, nv20_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -724,7 +755,7 @@ nv30_graph_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
- nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
+ nv20_graph_set_tile_region(dev, i);
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
@@ -744,46 +775,125 @@ nv30_graph_init(struct drm_device *dev)
return 0;
}
-struct nouveau_pgraph_object_class nv20_graph_grclass[] = {
- { 0x0030, false, NULL }, /* null */
- { 0x0039, false, NULL }, /* m2mf */
- { 0x004a, false, NULL }, /* gdirect */
- { 0x009f, false, NULL }, /* imageblit (nv12) */
- { 0x008a, false, NULL }, /* ifc */
- { 0x0089, false, NULL }, /* sifm */
- { 0x0062, false, NULL }, /* surf2d */
- { 0x0043, false, NULL }, /* rop */
- { 0x0012, false, NULL }, /* beta1 */
- { 0x0072, false, NULL }, /* beta4 */
- { 0x0019, false, NULL }, /* cliprect */
- { 0x0044, false, NULL }, /* pattern */
- { 0x009e, false, NULL }, /* swzsurf */
- { 0x0096, false, NULL }, /* celcius */
- { 0x0097, false, NULL }, /* kelvin (nv20) */
- { 0x0597, false, NULL }, /* kelvin (nv25) */
- {}
-};
-
-struct nouveau_pgraph_object_class nv30_graph_grclass[] = {
- { 0x0030, false, NULL }, /* null */
- { 0x0039, false, NULL }, /* m2mf */
- { 0x004a, false, NULL }, /* gdirect */
- { 0x009f, false, NULL }, /* imageblit (nv12) */
- { 0x008a, false, NULL }, /* ifc */
- { 0x038a, false, NULL }, /* ifc (nv30) */
- { 0x0089, false, NULL }, /* sifm */
- { 0x0389, false, NULL }, /* sifm (nv30) */
- { 0x0062, false, NULL }, /* surf2d */
- { 0x0362, false, NULL }, /* surf2d (nv30) */
- { 0x0043, false, NULL }, /* rop */
- { 0x0012, false, NULL }, /* beta1 */
- { 0x0072, false, NULL }, /* beta4 */
- { 0x0019, false, NULL }, /* cliprect */
- { 0x0044, false, NULL }, /* pattern */
- { 0x039e, false, NULL }, /* swzsurf */
- { 0x0397, false, NULL }, /* rankine (nv30) */
- { 0x0497, false, NULL }, /* rankine (nv35) */
- { 0x0697, false, NULL }, /* rankine (nv34) */
- {}
-};
+static int
+nv20_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->engine.graph.registered)
+ return 0;
+
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+ NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
+ NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
+
+ /* kelvin */
+ if (dev_priv->chipset < 0x25)
+ NVOBJ_CLASS(dev, 0x0097, GR);
+ else
+ NVOBJ_CLASS(dev, 0x0597, GR);
+
+ /* nvsw */
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+ dev_priv->engine.graph.registered = true;
+ return 0;
+}
+
+static int
+nv30_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->engine.graph.registered)
+ return 0;
+
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+ NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
+ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+ NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
+ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+ NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
+ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+ NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
+
+ /* rankine */
+ if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
+ NVOBJ_CLASS(dev, 0x0397, GR);
+ else
+ if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
+ NVOBJ_CLASS(dev, 0x0697, GR);
+ else
+ if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
+ NVOBJ_CLASS(dev, 0x0497, GR);
+
+ /* nvsw */
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+ dev_priv->engine.graph.registered = true;
+ return 0;
+}
+
+static void
+nv20_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 chid = (addr & 0x01f00000) >> 20;
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_ERROR) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+ show &= ~NV_PGRAPH_INTR_ERROR;
+ }
+ }
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv10_graph_intr, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, subc, class, mthd, data);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c
index 4a3f2f095128..e0135f0e2144 100644
--- a/drivers/gpu/drm/nouveau/nv30_fb.c
+++ b/drivers/gpu/drm/nouveau/nv30_fb.c
@@ -29,6 +29,27 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+void
+nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch, uint32_t flags)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ tile->addr = addr | 1;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+}
+
+void
+nv30_fb_free_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ tile->addr = tile->limit = tile->pitch = 0;
+}
+
static int
calc_bias(struct drm_device *dev, int k, int i, int j)
{
@@ -65,7 +86,7 @@ nv30_fb_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->num_tiles; i++)
- pfb->set_region_tiling(dev, i, 0, 0, 0);
+ pfb->set_tile_region(dev, i);
/* Init the memory timing regs at 0x10037c/0x1003ac */
if (dev_priv->chipset == 0x30 ||
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index 3cd07d8d5bd7..f3d9c0505f7b 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -4,26 +4,22 @@
#include "nouveau_drm.h"
void
-nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv40_fb_set_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t limit = max(1u, addr + size) - 1;
-
- if (pitch)
- addr |= 1;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
switch (dev_priv->chipset) {
case 0x40:
- nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
- nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
- nv_wr32(dev, NV10_PFB_TILE(i), addr);
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
break;
default:
- nv_wr32(dev, NV40_PFB_TLIMIT(i), limit);
- nv_wr32(dev, NV40_PFB_TSIZE(i), pitch);
- nv_wr32(dev, NV40_PFB_TILE(i), addr);
+ nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV40_PFB_TILE(i), tile->addr);
break;
}
}
@@ -64,7 +60,7 @@ nv40_fb_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->num_tiles; i++)
- pfb->set_region_tiling(dev, i, 0, 0, 0);
+ pfb->set_tile_region(dev, i);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index d337b8b28cdd..49b9a35a9cd6 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -47,6 +47,11 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
if (ret)
return ret;
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV40_USER(chan->id), PAGE_SIZE);
+ if (!chan->user)
+ return -ENOMEM;
+
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_wi32(dev, fc + 0, chan->pushbuf_base);
@@ -59,7 +64,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x30000000 /* no idea.. */);
- nv_wi32(dev, fc + 56, chan->ramin_grctx->pinst >> 4);
nv_wi32(dev, fc + 60, 0x0001FFFF);
/* enable the fifo dma operation */
@@ -70,17 +74,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
return 0;
}
-void
-nv40_fifo_destroy_context(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
-
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
-
- nouveau_gpuobj_ref(NULL, &chan->ramfc);
-}
-
static void
nv40_fifo_do_load_context(struct drm_device *dev, int chid)
{
@@ -279,6 +272,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
static void
nv40_fifo_init_intr(struct drm_device *dev)
{
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
nv_wr32(dev, 0x002100, 0xffffffff);
nv_wr32(dev, 0x002140, 0xffffffff);
}
@@ -301,7 +295,7 @@ nv40_fifo_init(struct drm_device *dev)
pfifo->reassign(dev, true);
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->fifos[i]) {
+ if (dev_priv->channels.ptr[i]) {
uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 7ee1b91569b8..18d30c2c1aa6 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -29,6 +29,9 @@
#include "nouveau_drv.h"
#include "nouveau_grctx.h"
+static int nv40_graph_register(struct drm_device *);
+static void nv40_graph_isr(struct drm_device *);
+
struct nouveau_channel *
nv40_graph_channel(struct drm_device *dev)
{
@@ -42,7 +45,7 @@ nv40_graph_channel(struct drm_device *dev)
inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->fifos[i];
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
if (chan && chan->ramin_grctx &&
chan->ramin_grctx->pinst == inst)
@@ -59,6 +62,7 @@ nv40_graph_create_context(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_grctx ctx = {};
+ unsigned long flags;
int ret;
ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
@@ -73,12 +77,39 @@ nv40_graph_create_context(struct nouveau_channel *chan)
nv40_grctx_init(&ctx);
nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst);
+
+ /* init grctx pointer in ramfc, and on PFIFO if channel is
+ * already active there
+ */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_wo32(chan->ramfc, 0x38, chan->ramin_grctx->pinst >> 4);
+ nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
+ if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
+ nv_wr32(dev, 0x0032e0, chan->ramin_grctx->pinst >> 4);
+ nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return 0;
}
void
nv40_graph_destroy_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pgraph->fifo_access(dev, false);
+
+ /* Unload the context if it's the currently active one */
+ if (pgraph->channel(dev) == chan)
+ pgraph->unload_context(dev);
+
+ pgraph->fifo_access(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* Free the context resources */
nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
}
@@ -174,43 +205,44 @@ nv40_graph_unload_context(struct drm_device *dev)
}
void
-nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv40_graph_set_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t limit = max(1u, addr + size) - 1;
-
- if (pitch)
- addr |= 1;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
switch (dev_priv->chipset) {
+ case 0x40:
+ case 0x41: /* guess */
+ case 0x42:
+ case 0x43:
+ case 0x45: /* guess */
+ case 0x4e:
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
+ break;
case 0x44:
case 0x4a:
- case 0x4e:
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
break;
-
case 0x46:
case 0x47:
case 0x49:
case 0x4b:
- nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
- nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
- nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
- break;
-
+ case 0x4c:
+ case 0x67:
default:
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+ nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
break;
}
}
@@ -232,7 +264,7 @@ nv40_graph_init(struct drm_device *dev)
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nouveau_grctx ctx = {};
uint32_t vramsz, *cp;
- int i, j;
+ int ret, i, j;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
@@ -256,9 +288,14 @@ nv40_graph_init(struct drm_device *dev)
kfree(cp);
+ ret = nv40_graph_register(dev);
+ if (ret)
+ return ret;
+
/* No context present currently */
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
+ nouveau_irq_register(dev, 12, nv40_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -347,7 +384,7 @@ nv40_graph_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->num_tiles; i++)
- nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
+ nv40_graph_set_tile_region(dev, i);
/* begin RAM config */
vramsz = pci_resource_len(dev->pdev, 0) - 1;
@@ -364,17 +401,20 @@ nv40_graph_init(struct drm_device *dev)
break;
default:
switch (dev_priv->chipset) {
- case 0x46:
- case 0x47:
- case 0x49:
- case 0x4b:
- nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
- break;
- default:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x45:
+ case 0x4e:
+ case 0x44:
+ case 0x4a:
nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
break;
+ default:
+ nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
+ break;
}
nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
@@ -390,26 +430,110 @@ nv40_graph_init(struct drm_device *dev)
void nv40_graph_takedown(struct drm_device *dev)
{
+ nouveau_irq_unregister(dev, 12);
+}
+
+static int
+nv40_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->engine.graph.registered)
+ return 0;
+
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+ NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
+ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+ NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
+ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+ NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
+
+ /* curie */
+ if (nv44_graph_class(dev))
+ NVOBJ_CLASS(dev, 0x4497, GR);
+ else
+ NVOBJ_CLASS(dev, 0x4097, GR);
+
+ /* nvsw */
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+ dev_priv->engine.graph.registered = true;
+ return 0;
+}
+
+static int
+nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ chan = dev_priv->channels.ptr[i];
+ if (!chan || !chan->ramin_grctx)
+ continue;
+
+ if (inst == chan->ramin_grctx->pinst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
}
-struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
- { 0x0030, false, NULL }, /* null */
- { 0x0039, false, NULL }, /* m2mf */
- { 0x004a, false, NULL }, /* gdirect */
- { 0x009f, false, NULL }, /* imageblit (nv12) */
- { 0x008a, false, NULL }, /* ifc */
- { 0x0089, false, NULL }, /* sifm */
- { 0x3089, false, NULL }, /* sifm (nv40) */
- { 0x0062, false, NULL }, /* surf2d */
- { 0x3062, false, NULL }, /* surf2d (nv40) */
- { 0x0043, false, NULL }, /* rop */
- { 0x0012, false, NULL }, /* beta1 */
- { 0x0072, false, NULL }, /* beta4 */
- { 0x0019, false, NULL }, /* cliprect */
- { 0x0044, false, NULL }, /* pattern */
- { 0x309e, false, NULL }, /* swzsurf */
- { 0x4097, false, NULL }, /* curie (nv40) */
- { 0x4497, false, NULL }, /* curie (nv44) */
- {}
-};
+static void
+nv40_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
+ u32 chid = nv40_graph_isr_chid(dev, inst);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_ERROR) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+ show &= ~NV_PGRAPH_INTR_ERROR;
+ } else
+ if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
+ nv_mask(dev, 0x402000, 0, 0);
+ }
+ }
+ nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv10_graph_intr, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index ce585093264e..f70447d131d7 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -118,17 +118,6 @@
*/
static int
-nv40_graph_4097(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if ((dev_priv->chipset & 0xf0) == 0x60)
- return 0;
-
- return !!(0x0baf & (1 << dev_priv->chipset));
-}
-
-static int
nv40_graph_vs_count(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -219,7 +208,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
gr_def(ctx, 0x4009dc, 0x80000000);
} else {
cp_ctx(ctx, 0x400840, 20);
- if (!nv40_graph_4097(ctx->dev)) {
+ if (nv44_graph_class(ctx->dev)) {
for (i = 0; i < 8; i++)
gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
}
@@ -228,7 +217,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
gr_def(ctx, 0x400888, 0x00000040);
cp_ctx(ctx, 0x400894, 11);
gr_def(ctx, 0x400894, 0x00000040);
- if (nv40_graph_4097(ctx->dev)) {
+ if (!nv44_graph_class(ctx->dev)) {
for (i = 0; i < 8; i++)
gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
}
@@ -546,7 +535,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
static void
nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
{
- int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084;
+ int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684;
cp_out (ctx, 0x300000);
cp_lsr (ctx, len - 4);
@@ -582,11 +571,11 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
} else {
b0_offset = 0x1d40/4; /* 2200 */
b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
- vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4;
+ vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4;
}
cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
- cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029);
+ cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041);
offset = ctx->ctxvals_pos;
ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
index e4e72c12ab6a..03c0d4c3f355 100644
--- a/drivers/gpu/drm/nouveau/nv40_mc.c
+++ b/drivers/gpu/drm/nouveau/nv40_mc.c
@@ -6,27 +6,17 @@
int
nv40_mc_init(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t tmp;
-
/* Power up everything, resetting each individual unit will
* be done later if needed.
*/
nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
- switch (dev_priv->chipset) {
- case 0x44:
- case 0x46: /* G72 */
- case 0x4e:
- case 0x4c: /* C51_G7X */
- tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
+ if (nv44_graph_class(dev)) {
+ u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
nv_wr32(dev, NV40_PMC_1700, tmp);
nv_wr32(dev, NV40_PMC_1704, 0);
nv_wr32(dev, NV40_PMC_1708, 0);
nv_wr32(dev, NV40_PMC_170C, tmp);
- break;
- default:
- break;
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 56476d0c6de8..9023c4dbb449 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -115,15 +115,16 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
OUT_RING(evo, 0);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
if (dev_priv->chipset != 0x50)
- if (nv_crtc->fb.tile_flags == 0x7a00)
+ if (nv_crtc->fb.tile_flags == 0x7a00 ||
+ nv_crtc->fb.tile_flags == 0xfe00)
OUT_RING(evo, NvEvoFB32);
else
if (nv_crtc->fb.tile_flags == 0x7000)
OUT_RING(evo, NvEvoFB16);
else
- OUT_RING(evo, NvEvoVRAM);
+ OUT_RING(evo, NvEvoVRAM_LP);
else
- OUT_RING(evo, NvEvoVRAM);
+ OUT_RING(evo, NvEvoVRAM_LP);
}
nv_crtc->fb.blanked = blanked;
@@ -345,7 +346,6 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t buffer_handle, uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_bo *cursor = NULL;
struct drm_gem_object *gem;
@@ -374,8 +374,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nouveau_bo_unmap(cursor);
- nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset -
- dev_priv->vm_vram_base);
+ nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT);
nv_crtc->cursor.show(nv_crtc, true);
out:
@@ -437,6 +436,7 @@ static const struct drm_crtc_funcs nv50_crtc_funcs = {
.cursor_move = nv50_crtc_cursor_move,
.gamma_set = nv50_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
+ .page_flip = nouveau_crtc_page_flip,
.destroy = nv50_crtc_destroy,
};
@@ -453,6 +453,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ drm_vblank_pre_modeset(dev, nv_crtc->index);
nv50_crtc_blank(nv_crtc, true);
}
@@ -468,6 +469,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
nv50_crtc_blank(nv_crtc, false);
+ drm_vblank_post_modeset(dev, nv_crtc->index);
ret = RING_SPACE(evo, 2);
if (ret) {
@@ -545,7 +547,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return -EINVAL;
}
- nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
+ nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
@@ -554,13 +556,14 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return ret;
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
- if (nv_crtc->fb.tile_flags == 0x7a00)
+ if (nv_crtc->fb.tile_flags == 0x7a00 ||
+ nv_crtc->fb.tile_flags == 0xfe00)
OUT_RING(evo, NvEvoFB32);
else
if (nv_crtc->fb.tile_flags == 0x7000)
OUT_RING(evo, NvEvoFB16);
else
- OUT_RING(evo, NvEvoVRAM);
+ OUT_RING(evo, NvEvoVRAM_LP);
}
ret = RING_SPACE(evo, 12);
@@ -574,8 +577,10 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
if (!nv_crtc->fb.tile_flags) {
OUT_RING(evo, drm_fb->pitch | (1 << 20));
} else {
- OUT_RING(evo, ((drm_fb->pitch / 4) << 4) |
- fb->nvbo->tile_mode);
+ u32 tile_mode = fb->nvbo->tile_mode;
+ if (dev_priv->card_type >= NV_C0)
+ tile_mode >>= 4;
+ OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | tile_mode);
}
if (dev_priv->chipset == 0x50)
OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f624c611ddea..7cc94ed9ed95 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -33,6 +33,8 @@
#include "nouveau_ramht.h"
#include "drm_crtc_helper.h"
+static void nv50_display_isr(struct drm_device *);
+
static inline int
nv50_sor_nr(struct drm_device *dev)
{
@@ -46,159 +48,6 @@ nv50_sor_nr(struct drm_device *dev)
return 4;
}
-static void
-nv50_evo_channel_del(struct nouveau_channel **pchan)
-{
- struct nouveau_channel *chan = *pchan;
-
- if (!chan)
- return;
- *pchan = NULL;
-
- nouveau_gpuobj_channel_takedown(chan);
- nouveau_bo_unmap(chan->pushbuf_bo);
- nouveau_bo_ref(NULL, &chan->pushbuf_bo);
-
- if (chan->user)
- iounmap(chan->user);
-
- kfree(chan);
-}
-
-static int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
- uint32_t tile_flags, uint32_t magic_flags,
- uint32_t offset, uint32_t limit)
-{
- struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
- struct drm_device *dev = evo->dev;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
- if (ret)
- return ret;
- obj->engine = NVOBJ_ENGINE_DISPLAY;
-
- nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
- nv_wo32(obj, 4, limit);
- nv_wo32(obj, 8, offset);
- nv_wo32(obj, 12, 0x00000000);
- nv_wo32(obj, 16, 0x00000000);
- if (dev_priv->card_type < NV_C0)
- nv_wo32(obj, 20, 0x00010000);
- else
- nv_wo32(obj, 20, 0x00020000);
- dev_priv->engine.instmem.flush(dev);
-
- ret = nouveau_ramht_insert(evo, name, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- if (ret) {
- return ret;
- }
-
- return 0;
-}
-
-static int
-nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramht = NULL;
- struct nouveau_channel *chan;
- int ret;
-
- chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
- if (!chan)
- return -ENOMEM;
- *pchan = chan;
-
- chan->id = -1;
- chan->dev = dev;
- chan->user_get = 4;
- chan->user_put = 0;
-
- ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
- if (ret) {
- NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- ret = drm_mm_init(&chan->ramin_heap, 0, 32768);
- if (ret) {
- NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht);
- if (ret) {
- NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
- nouveau_gpuobj_ref(NULL, &ramht);
- if (ret) {
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- if (dev_priv->chipset != 0x50) {
- ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
- 0, 0xffffffff);
- if (ret) {
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
-
- ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
- 0, 0xffffffff);
- if (ret) {
- nv50_evo_channel_del(pchan);
- return ret;
- }
- }
-
- ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
- 0, dev_priv->vram_size);
- if (ret) {
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
- false, true, &chan->pushbuf_bo);
- if (ret == 0)
- ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- ret = nouveau_bo_map(chan->pushbuf_bo);
- if (ret) {
- NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV50_PDISPLAY_USER(0), PAGE_SIZE);
- if (!chan->user) {
- NV_ERROR(dev, "Error mapping EVO control regs.\n");
- nv50_evo_channel_del(pchan);
- return -ENOMEM;
- }
-
- return 0;
-}
-
int
nv50_display_early_init(struct drm_device *dev)
{
@@ -214,17 +63,16 @@ int
nv50_display_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nouveau_channel *evo = dev_priv->evo;
struct drm_connector *connector;
- uint32_t val, ram_amount;
- uint64_t start;
+ struct nouveau_channel *evo;
int ret, i;
+ u32 val;
NV_DEBUG_KMS(dev, "\n");
nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
+
/*
* I think the 0x006101XX range is some kind of main control area
* that enables things.
@@ -240,16 +88,19 @@ nv50_display_init(struct drm_device *dev)
val = nv_rd32(dev, 0x0061610c + (i * 0x800));
nv_wr32(dev, 0x0061019c + (i * 0x10), val);
}
+
/* DAC */
for (i = 0; i < 3; i++) {
val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
}
+
/* SOR */
for (i = 0; i < nv50_sor_nr(dev); i++) {
val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
}
+
/* EXT */
for (i = 0; i < 3; i++) {
val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
@@ -262,17 +113,6 @@ nv50_display_init(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
}
- /* This used to be in crtc unblank, but seems out of place there. */
- nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
- /* RAM is clamped to 256 MiB. */
- ram_amount = dev_priv->vram_size;
- NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
- if (ram_amount > 256*1024*1024)
- ram_amount = 256*1024*1024;
- nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
- nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
- nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
-
/* The precise purpose is unknown, i suspect it has something to do
* with text mode.
*/
@@ -287,37 +127,6 @@ nv50_display_init(struct drm_device *dev)
}
}
- /* taken from nv bug #12637, attempts to un-wedge the hw if it's
- * stuck in some unspecified state
- */
- start = ptimer->read(dev);
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
- while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
- if ((val & 0x9f0000) == 0x20000)
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
- val | 0x800000);
-
- if ((val & 0x3f0000) == 0x30000)
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
- val | 0x200000);
-
- if (ptimer->read(dev) - start > 1000000000ULL) {
- NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
- NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
- return -EBUSY;
- }
- }
-
- nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
- if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
- 0x40000000, 0x40000000)) {
- NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
- NV_ERROR(dev, "0x610200 = 0x%08x\n",
- nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
- return -EBUSY;
- }
-
for (i = 0; i < 2; i++) {
nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
@@ -341,39 +150,31 @@ nv50_display_init(struct drm_device *dev)
}
}
- nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
+ nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
+ nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
+ nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1,
+ NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
+ NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
+ NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
+
+ /* enable hotplug interrupts */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
- /* initialise fifo */
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
- ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) |
- NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
- NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
- if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
- NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
- return -EBUSY;
+ if (conn->dcb->gpio_tag == 0xff)
+ continue;
+
+ pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
}
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
- (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) |
- NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
- nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
- NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
- nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
-
- evo->dma.max = (4096/4) - 2;
- evo->dma.put = 0;
- evo->dma.cur = evo->dma.put;
- evo->dma.free = evo->dma.max - evo->dma.cur;
-
- ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
+
+ ret = nv50_evo_init(dev);
if (ret)
return ret;
+ evo = dev_priv->evo;
- for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
- OUT_RING(evo, 0);
+ nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
ret = RING_SPACE(evo, 11);
if (ret)
@@ -393,21 +194,6 @@ nv50_display_init(struct drm_device *dev)
if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
NV_ERROR(dev, "evo pushbuf stalled\n");
- /* enable clock change interrupts. */
- nv_wr32(dev, 0x610028, 0x00010001);
- nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
- NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
- NV50_PDISPLAY_INTR_EN_CLK_UNK40));
-
- /* enable hotplug interrupts */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct nouveau_connector *conn = nouveau_connector(connector);
-
- if (conn->dcb->gpio_tag == 0xff)
- continue;
-
- pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
- }
return 0;
}
@@ -452,13 +238,7 @@ static int nv50_display_disable(struct drm_device *dev)
}
}
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
- nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
- if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
- NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
- NV_ERROR(dev, "0x610200 = 0x%08x\n",
- nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
- }
+ nv50_evo_fini(dev);
for (i = 0; i < 3; i++) {
if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i),
@@ -470,7 +250,7 @@ static int nv50_display_disable(struct drm_device *dev)
}
/* disable interrupts. */
- nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
/* disable hotplug interrupts */
nv_wr32(dev, 0xe054, 0xffffffff);
@@ -508,13 +288,6 @@ int nv50_display_create(struct drm_device *dev)
dev->mode_config.fb_base = dev_priv->fb_phys;
- /* Create EVO channel */
- ret = nv50_evo_channel_new(dev, &dev_priv->evo);
- if (ret) {
- NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
- return ret;
- }
-
/* Create CRTC objects */
for (i = 0; i < 2; i++)
nv50_crtc_create(dev, i);
@@ -557,6 +330,9 @@ int nv50_display_create(struct drm_device *dev)
}
}
+ INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
+ nouveau_irq_register(dev, 26, nv50_display_isr);
+
ret = nv50_display_init(dev);
if (ret) {
nv50_display_destroy(dev);
@@ -569,14 +345,12 @@ int nv50_display_create(struct drm_device *dev)
void
nv50_display_destroy(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
NV_DEBUG_KMS(dev, "\n");
drm_mode_config_cleanup(dev);
nv50_display_disable(dev);
- nv50_evo_channel_del(&dev_priv->evo);
+ nouveau_irq_unregister(dev, 26);
}
static u16
@@ -660,32 +434,32 @@ static void
nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan;
- struct list_head *entry, *tmp;
+ struct nouveau_channel *chan, *tmp;
- list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) {
- chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait);
+ list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting,
+ nvsw.vbl_wait) {
+ if (chan->nvsw.vblsem_head != crtc)
+ continue;
nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
chan->nvsw.vblsem_rval);
list_del(&chan->nvsw.vbl_wait);
+ drm_vblank_put(dev, crtc);
}
+
+ drm_handle_vblank(dev, crtc);
}
static void
nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
{
- intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
-
if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
nv50_display_vblank_crtc_handler(dev, 0);
if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
nv50_display_vblank_crtc_handler(dev, 1);
- nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
- NV50_PDISPLAY_INTR_EN) & ~intr);
- nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC);
}
static void
@@ -1011,108 +785,31 @@ nv50_display_irq_handler_bh(struct work_struct *work)
static void
nv50_display_error_handler(struct drm_device *dev)
{
- uint32_t addr, data;
-
- nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000);
- addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
- data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
-
- NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
- 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
-
- nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
-}
-
-void
-nv50_display_irq_hotplug_bh(struct work_struct *work)
-{
- struct drm_nouveau_private *dev_priv =
- container_of(work, struct drm_nouveau_private, hpd_work);
- struct drm_device *dev = dev_priv->dev;
- struct drm_connector *connector;
- const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
- uint32_t unplug_mask, plug_mask, change_mask;
- uint32_t hpd0, hpd1;
-
- spin_lock_irq(&dev_priv->hpd_state.lock);
- hpd0 = dev_priv->hpd_state.hpd0_bits;
- dev_priv->hpd_state.hpd0_bits = 0;
- hpd1 = dev_priv->hpd_state.hpd1_bits;
- dev_priv->hpd_state.hpd1_bits = 0;
- spin_unlock_irq(&dev_priv->hpd_state.lock);
-
- hpd0 &= nv_rd32(dev, 0xe050);
- if (dev_priv->chipset >= 0x90)
- hpd1 &= nv_rd32(dev, 0xe070);
-
- plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
- unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
- change_mask = plug_mask | unplug_mask;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_encoder_helper_funcs *helper;
- struct nouveau_connector *nv_connector =
- nouveau_connector(connector);
- struct nouveau_encoder *nv_encoder;
- struct dcb_gpio_entry *gpio;
- uint32_t reg;
- bool plugged;
-
- if (!nv_connector->dcb)
- continue;
+ u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
+ u32 addr, data;
+ int chid;
- gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag);
- if (!gpio || !(change_mask & (1 << gpio->line)))
+ for (chid = 0; chid < 5; chid++) {
+ if (!(channels & (1 << chid)))
continue;
- reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]);
- plugged = !!(reg & (4 << ((gpio->line & 7) << 2)));
- NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
- drm_get_connector_name(connector)) ;
-
- if (!connector->encoder || !connector->encoder->crtc ||
- !connector->encoder->crtc->enabled)
- continue;
- nv_encoder = nouveau_encoder(connector->encoder);
- helper = connector->encoder->helper_private;
-
- if (nv_encoder->dcb->type != OUTPUT_DP)
- continue;
+ nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
+ addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid));
+ data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid));
+ NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x "
+ "(0x%04x 0x%02x)\n", chid,
+ addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
- if (plugged)
- helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
- else
- helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
+ nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
}
-
- drm_helper_hpd_irq_event(dev);
}
-void
-nv50_display_irq_handler(struct drm_device *dev)
+static void
+nv50_display_isr(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t delayed = 0;
- if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
- uint32_t hpd0_bits, hpd1_bits = 0;
-
- hpd0_bits = nv_rd32(dev, 0xe054);
- nv_wr32(dev, 0xe054, hpd0_bits);
-
- if (dev_priv->chipset >= 0x90) {
- hpd1_bits = nv_rd32(dev, 0xe074);
- nv_wr32(dev, 0xe074, hpd1_bits);
- }
-
- spin_lock(&dev_priv->hpd_state.lock);
- dev_priv->hpd_state.hpd0_bits |= hpd0_bits;
- dev_priv->hpd_state.hpd1_bits |= hpd1_bits;
- spin_unlock(&dev_priv->hpd_state.lock);
-
- queue_work(dev_priv->wq, &dev_priv->hpd_work);
- }
-
while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
@@ -1123,9 +820,9 @@ nv50_display_irq_handler(struct drm_device *dev)
if (!intr0 && !(intr1 & ~delayed))
break;
- if (intr0 & 0x00010000) {
+ if (intr0 & 0x001f0000) {
nv50_display_error_handler(dev);
- intr0 &= ~0x00010000;
+ intr0 &= ~0x001f0000;
}
if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
@@ -1156,4 +853,3 @@ nv50_display_irq_handler(struct drm_device *dev)
}
}
}
-
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index c551f0b85ee0..f0e30b78ef6b 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,9 +35,7 @@
#include "nouveau_crtc.h"
#include "nv50_evo.h"
-void nv50_display_irq_handler(struct drm_device *dev);
void nv50_display_irq_handler_bh(struct work_struct *work);
-void nv50_display_irq_hotplug_bh(struct work_struct *work);
int nv50_display_early_init(struct drm_device *dev);
void nv50_display_late_takedown(struct drm_device *dev);
int nv50_display_create(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
new file mode 100644
index 000000000000..0ea090f4244a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -0,0 +1,344 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+
+static void
+nv50_evo_channel_del(struct nouveau_channel **pevo)
+{
+ struct drm_nouveau_private *dev_priv;
+ struct nouveau_channel *evo = *pevo;
+
+ if (!evo)
+ return;
+ *pevo = NULL;
+
+ dev_priv = evo->dev->dev_private;
+ dev_priv->evo_alloc &= ~(1 << evo->id);
+
+ nouveau_gpuobj_channel_takedown(evo);
+ nouveau_bo_unmap(evo->pushbuf_bo);
+ nouveau_bo_ref(NULL, &evo->pushbuf_bo);
+
+ if (evo->user)
+ iounmap(evo->user);
+
+ kfree(evo);
+}
+
+int
+nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
+ u32 tile_flags, u32 magic_flags, u32 offset, u32 limit,
+ u32 flags5)
+{
+ struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
+ struct drm_device *dev = evo->dev;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj);
+ if (ret)
+ return ret;
+ obj->engine = NVOBJ_ENGINE_DISPLAY;
+
+ nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
+ nv_wo32(obj, 4, limit);
+ nv_wo32(obj, 8, offset);
+ nv_wo32(obj, 12, 0x00000000);
+ nv_wo32(obj, 16, 0x00000000);
+ nv_wo32(obj, 20, flags5);
+ dev_priv->engine.instmem.flush(dev);
+
+ ret = nouveau_ramht_insert(evo, name, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ if (ret) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo;
+ int ret;
+
+ evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
+ if (!evo)
+ return -ENOMEM;
+ *pevo = evo;
+
+ for (evo->id = 0; evo->id < 5; evo->id++) {
+ if (dev_priv->evo_alloc & (1 << evo->id))
+ continue;
+
+ dev_priv->evo_alloc |= (1 << evo->id);
+ break;
+ }
+
+ if (evo->id == 5) {
+ kfree(evo);
+ return -ENODEV;
+ }
+
+ evo->dev = dev;
+ evo->user_get = 4;
+ evo->user_put = 0;
+
+ ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
+ false, true, &evo->pushbuf_bo);
+ if (ret == 0)
+ ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
+ nv50_evo_channel_del(pevo);
+ return ret;
+ }
+
+ ret = nouveau_bo_map(evo->pushbuf_bo);
+ if (ret) {
+ NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
+ nv50_evo_channel_del(pevo);
+ return ret;
+ }
+
+ evo->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_PDISPLAY_USER(evo->id), PAGE_SIZE);
+ if (!evo->user) {
+ NV_ERROR(dev, "Error mapping EVO control regs.\n");
+ nv50_evo_channel_del(pevo);
+ return -ENOMEM;
+ }
+
+ /* bind primary evo channel's ramht to the channel */
+ if (dev_priv->evo && evo != dev_priv->evo)
+ nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL);
+
+ return 0;
+}
+
+static int
+nv50_evo_channel_init(struct nouveau_channel *evo)
+{
+ struct drm_device *dev = evo->dev;
+ int id = evo->id, ret, i;
+ u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT;
+ u32 tmp;
+
+ tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
+ if ((tmp & 0x009f0000) == 0x00020000)
+ nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
+
+ tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
+ if ((tmp & 0x003f0000) == 0x00030000)
+ nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
+
+ /* initialise fifo */
+ nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
+ NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
+ NV50_PDISPLAY_EVO_DMA_CB_VALID);
+ nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
+ nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
+ nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
+ NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
+
+ nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
+ nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
+ NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
+ if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
+ NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id,
+ nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
+ return -EBUSY;
+ }
+
+ /* enable error reporting on the channel */
+ nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
+
+ evo->dma.max = (4096/4) - 2;
+ evo->dma.put = 0;
+ evo->dma.cur = evo->dma.put;
+ evo->dma.free = evo->dma.max - evo->dma.cur;
+
+ ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+ OUT_RING(evo, 0);
+
+ return 0;
+}
+
+static void
+nv50_evo_channel_fini(struct nouveau_channel *evo)
+{
+ struct drm_device *dev = evo->dev;
+ int id = evo->id;
+
+ nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000);
+ nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id));
+ nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
+ if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
+ NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id,
+ nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
+ }
+}
+
+static int
+nv50_evo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramht = NULL;
+ struct nouveau_channel *evo;
+ int ret;
+
+ /* create primary evo channel, the one we use for modesetting
+ * purporses
+ */
+ ret = nv50_evo_channel_new(dev, &dev_priv->evo);
+ if (ret)
+ return ret;
+ evo = dev_priv->evo;
+
+ /* setup object management on it, any other evo channel will
+ * use this also as there's no per-channel support on the
+ * hardware
+ */
+ ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536,
+ NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
+ if (ret) {
+ NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
+ if (ret) {
+ NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
+ if (ret) {
+ NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
+ nouveau_gpuobj_ref(NULL, &ramht);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ /* create some default objects for the scanout memtypes we support */
+ if (dev_priv->card_type >= NV_C0) {
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0xfe, 0x19,
+ 0, 0xffffffff, 0x00000000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
+ 0, dev_priv->vram_size, 0x00020000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
+ 0, dev_priv->vram_size, 0x00000000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+ } else {
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
+ 0, 0xffffffff, 0x00010000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19,
+ 0, 0xffffffff, 0x00010000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
+ 0, dev_priv->vram_size, 0x00010000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
+ 0, dev_priv->vram_size, 0x00010000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int
+nv50_evo_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (!dev_priv->evo) {
+ ret = nv50_evo_create(dev);
+ if (ret)
+ return ret;
+ }
+
+ return nv50_evo_channel_init(dev_priv->evo);
+}
+
+void
+nv50_evo_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->evo) {
+ nv50_evo_channel_fini(dev_priv->evo);
+ nv50_evo_channel_del(&dev_priv->evo);
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
index aae13343bcec..aa4f0d3cea8e 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -24,6 +24,15 @@
*
*/
+#ifndef __NV50_EVO_H__
+#define __NV50_EVO_H__
+
+int nv50_evo_init(struct drm_device *dev);
+void nv50_evo_fini(struct drm_device *dev);
+int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
+ u32 tile_flags, u32 magic_flags,
+ u32 offset, u32 limit);
+
#define NV50_EVO_UPDATE 0x00000080
#define NV50_EVO_UNK84 0x00000084
#define NV50_EVO_UNK84_NOTIFY 0x40000000
@@ -111,3 +120,4 @@
#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index cd1988b15d2c..50290dea0ac4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -3,30 +3,75 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+struct nv50_fb_priv {
+ struct page *r100c08_page;
+ dma_addr_t r100c08;
+};
+
+static int
+nv50_fb_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fb_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!priv->r100c08_page) {
+ kfree(priv);
+ return -ENOMEM;
+ }
+
+ priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
+ __free_page(priv->r100c08_page);
+ kfree(priv);
+ return -EFAULT;
+ }
+
+ dev_priv->engine.fb.priv = priv;
+ return 0;
+}
+
int
nv50_fb_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fb_priv *priv;
+ int ret;
+
+ if (!dev_priv->engine.fb.priv) {
+ ret = nv50_fb_create(dev);
+ if (ret)
+ return ret;
+ }
+ priv = dev_priv->engine.fb.priv;
/* Not a clue what this is exactly. Without pointing it at a
* scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
* cause IOMMU "read from address 0" errors (rh#561267)
*/
- nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8);
+ nv_wr32(dev, 0x100c08, priv->r100c08 >> 8);
/* This is needed to get meaningful information from 100c90
* on traps. No idea what these values mean exactly. */
switch (dev_priv->chipset) {
case 0x50:
- nv_wr32(dev, 0x100c90, 0x0707ff);
+ nv_wr32(dev, 0x100c90, 0x000707ff);
break;
case 0xa3:
case 0xa5:
case 0xa8:
- nv_wr32(dev, 0x100c90, 0x0d0fff);
+ nv_wr32(dev, 0x100c90, 0x000d0fff);
+ break;
+ case 0xaf:
+ nv_wr32(dev, 0x100c90, 0x089d1fff);
break;
default:
- nv_wr32(dev, 0x100c90, 0x1d07ff);
+ nv_wr32(dev, 0x100c90, 0x001d07ff);
break;
}
@@ -36,12 +81,25 @@ nv50_fb_init(struct drm_device *dev)
void
nv50_fb_takedown(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fb_priv *priv;
+
+ priv = dev_priv->engine.fb.priv;
+ if (!priv)
+ return;
+ dev_priv->engine.fb.priv = NULL;
+
+ pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->r100c08_page);
+ kfree(priv);
}
void
nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
u32 trap[6], idx, chinst;
int i, ch;
@@ -60,8 +118,10 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
return;
chinst = (trap[2] << 16) | trap[1];
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
- struct nouveau_channel *chan = dev_priv->fifos[ch];
+ struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
if (!chan || !chan->ramin)
continue;
@@ -69,6 +129,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
if (chinst == chan->ramin->vinst >> 12)
break;
}
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
"channel %d (0x%08x)\n",
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 6dcf048eddbc..791ded1c5c6d 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -1,29 +1,46 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_ramht.h"
#include "nouveau_fbcon.h"
+#include "nouveau_mm.h"
-void
+int
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
- RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
- nouveau_fbcon_gpu_lockup(info);
- }
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_fillrect(info, rect);
- return;
- }
+ ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+ if (ret)
+ return ret;
if (rect->rop != ROP_COPY) {
BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
@@ -45,27 +62,21 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
OUT_RING(chan, 3);
}
FIRE_RING(chan);
+ return 0;
}
-void
+int
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
- nouveau_fbcon_gpu_lockup(info);
- }
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_copyarea(info, region);
- return;
- }
+ ret = RING_SPACE(chan, 12);
+ if (ret)
+ return ret;
BEGIN_RING(chan, NvSub2D, 0x0110, 1);
OUT_RING(chan, 0);
@@ -80,9 +91,10 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
OUT_RING(chan, 0);
OUT_RING(chan, region->sy);
FIRE_RING(chan);
+ return 0;
}
-void
+int
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
@@ -92,23 +104,14 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
uint32_t width, dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (image->depth != 1) {
- cfb_imageblit(info, image);
- return;
- }
+ if (image->depth != 1)
+ return -ENODEV;
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
- nouveau_fbcon_gpu_lockup(info);
- }
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_imageblit(info, image);
- return;
- }
+ ret = RING_SPACE(chan, 11);
+ if (ret)
+ return ret;
width = ALIGN(image->width, 32);
dwords = (width * image->height) >> 5;
@@ -134,11 +137,9 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
- if (RING_SPACE(chan, push + 1)) {
- nouveau_fbcon_gpu_lockup(info);
- cfb_imageblit(info, image);
- return;
- }
+ ret = RING_SPACE(chan, push + 1);
+ if (ret)
+ return ret;
dwords -= push;
@@ -148,6 +149,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
}
FIRE_RING(chan);
+ return 0;
}
int
@@ -157,12 +159,9 @@ nv50_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
- struct nouveau_gpuobj *eng2d = NULL;
- uint64_t fb;
+ struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
int ret, format;
- fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base;
-
switch (info->var.bits_per_pixel) {
case 8:
format = 0xf3;
@@ -190,12 +189,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d);
- if (ret)
- return ret;
-
- ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d);
- nouveau_gpuobj_ref(NULL, &eng2d);
+ ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d);
if (ret)
return ret;
@@ -253,8 +247,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb));
- OUT_RING(chan, lower_32_bits(fb));
+ OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
+ OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
BEGIN_RING(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
@@ -262,8 +256,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb));
- OUT_RING(chan, lower_32_bits(fb));
+ OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
+ OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 1da65bd60c10..8dd04c5dac67 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -28,6 +28,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
+#include "nouveau_vm.h"
static void
nv50_fifo_playlist_update(struct drm_device *dev)
@@ -44,7 +45,8 @@ nv50_fifo_playlist_update(struct drm_device *dev)
/* We never schedule channel 0 or 127 */
for (i = 1, nr = 0; i < 127; i++) {
- if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) {
+ if (dev_priv->channels.ptr[i] &&
+ dev_priv->channels.ptr[i]->ramfc) {
nv_wo32(cur, (nr * 4), i);
nr++;
}
@@ -60,7 +62,7 @@ static void
nv50_fifo_channel_enable(struct drm_device *dev, int channel)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->fifos[channel];
+ struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
uint32_t inst;
NV_DEBUG(dev, "ch%d\n", channel);
@@ -105,6 +107,7 @@ nv50_fifo_init_intr(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
}
@@ -118,7 +121,7 @@ nv50_fifo_init_context_table(struct drm_device *dev)
NV_DEBUG(dev, "\n");
for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
- if (dev_priv->fifos[i])
+ if (dev_priv->channels.ptr[i])
nv50_fifo_channel_enable(dev, i);
else
nv50_fifo_channel_disable(dev, i);
@@ -206,6 +209,9 @@ nv50_fifo_takedown(struct drm_device *dev)
if (!pfifo->playlist[0])
return;
+ nv_wr32(dev, 0x2140, 0x00000000);
+ nouveau_irq_unregister(dev, 8);
+
nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
}
@@ -256,6 +262,11 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
}
ramfc = chan->ramfc;
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_USER(chan->id), PAGE_SIZE);
+ if (!chan->user)
+ return -ENOMEM;
+
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
@@ -291,10 +302,23 @@ void
nv50_fifo_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_gpuobj *ramfc = NULL;
+ unsigned long flags;
NV_DEBUG(dev, "ch%d\n", chan->id);
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pfifo->reassign(dev, false);
+
+ /* Unload the context if it's the currently active one */
+ if (pfifo->channel_id(dev) == chan->id) {
+ pfifo->disable(dev);
+ pfifo->unload_context(dev);
+ pfifo->enable(dev);
+ }
+
/* This will ensure the channel is seen as disabled. */
nouveau_gpuobj_ref(chan->ramfc, &ramfc);
nouveau_gpuobj_ref(NULL, &chan->ramfc);
@@ -305,6 +329,14 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan)
nv50_fifo_channel_disable(dev, 127);
nv50_fifo_playlist_update(dev);
+ pfifo->reassign(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* Free the channel resources */
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
nouveau_gpuobj_ref(NULL, &ramfc);
nouveau_gpuobj_ref(NULL, &chan->cache);
}
@@ -392,7 +424,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
return 0;
- chan = dev_priv->fifos[chid];
+ chan = dev_priv->channels.ptr[chid];
if (!chan) {
NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
return -EINVAL;
@@ -467,5 +499,5 @@ nv50_fifo_unload_context(struct drm_device *dev)
void
nv50_fifo_tlb_flush(struct drm_device *dev)
{
- nv50_vm_flush(dev, 5);
+ nv50_vm_flush_engine(dev, 5);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index b2fab2bf3d61..6b149c0cc06d 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -26,6 +26,28 @@
#include "nouveau_drv.h"
#include "nouveau_hw.h"
+#include "nv50_display.h"
+
+static void nv50_gpio_isr(struct drm_device *dev);
+static void nv50_gpio_isr_bh(struct work_struct *work);
+
+struct nv50_gpio_priv {
+ struct list_head handlers;
+ spinlock_t lock;
+};
+
+struct nv50_gpio_handler {
+ struct drm_device *dev;
+ struct list_head head;
+ struct work_struct work;
+ bool inhibit;
+
+ struct dcb_gpio_entry *gpio;
+
+ void (*handler)(void *data, int state);
+ void *data;
+};
+
static int
nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
{
@@ -75,29 +97,123 @@ nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
return 0;
}
+int
+nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
+ void (*handler)(void *, int), void *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv = pgpio->priv;
+ struct nv50_gpio_handler *gpioh;
+ struct dcb_gpio_entry *gpio;
+ unsigned long flags;
+
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio)
+ return -ENOENT;
+
+ gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL);
+ if (!gpioh)
+ return -ENOMEM;
+
+ INIT_WORK(&gpioh->work, nv50_gpio_isr_bh);
+ gpioh->dev = dev;
+ gpioh->gpio = gpio;
+ gpioh->handler = handler;
+ gpioh->data = data;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_add(&gpioh->head, &priv->handlers);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return 0;
+}
+
void
-nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
+nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
+ void (*handler)(void *, int), void *data)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv = pgpio->priv;
+ struct nv50_gpio_handler *gpioh, *tmp;
struct dcb_gpio_entry *gpio;
- u32 reg, mask;
+ unsigned long flags;
gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio) {
- NV_ERROR(dev, "gpio tag 0x%02x not found\n", tag);
+ if (!gpio)
return;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) {
+ if (gpioh->gpio != gpio ||
+ gpioh->handler != handler ||
+ gpioh->data != data)
+ continue;
+ list_del(&gpioh->head);
+ kfree(gpioh);
}
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+bool
+nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
+{
+ struct dcb_gpio_entry *gpio;
+ u32 reg, mask;
+
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio)
+ return false;
reg = gpio->line < 16 ? 0xe050 : 0xe070;
mask = 0x00010001 << (gpio->line & 0xf);
nv_wr32(dev, reg + 4, mask);
- nv_mask(dev, reg + 0, mask, on ? mask : 0);
+ reg = nv_mask(dev, reg + 0, mask, on ? mask : 0);
+ return (reg & mask) == mask;
+}
+
+static int
+nv50_gpio_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&priv->handlers);
+ spin_lock_init(&priv->lock);
+ pgpio->priv = priv;
+ return 0;
+}
+
+static void
+nv50_gpio_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ kfree(pgpio->priv);
+ pgpio->priv = NULL;
}
int
nv50_gpio_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv;
+ int ret;
+
+ if (!pgpio->priv) {
+ ret = nv50_gpio_create(dev);
+ if (ret)
+ return ret;
+ }
+ priv = pgpio->priv;
/* disable, and ack any pending gpio interrupts */
nv_wr32(dev, 0xe050, 0x00000000);
@@ -107,5 +223,77 @@ nv50_gpio_init(struct drm_device *dev)
nv_wr32(dev, 0xe074, 0xffffffff);
}
+ nouveau_irq_register(dev, 21, nv50_gpio_isr);
return 0;
}
+
+void
+nv50_gpio_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nv_wr32(dev, 0xe050, 0x00000000);
+ if (dev_priv->chipset >= 0x90)
+ nv_wr32(dev, 0xe070, 0x00000000);
+ nouveau_irq_unregister(dev, 21);
+
+ nv50_gpio_destroy(dev);
+}
+
+static void
+nv50_gpio_isr_bh(struct work_struct *work)
+{
+ struct nv50_gpio_handler *gpioh =
+ container_of(work, struct nv50_gpio_handler, work);
+ struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv = pgpio->priv;
+ unsigned long flags;
+ int state;
+
+ state = pgpio->get(gpioh->dev, gpioh->gpio->tag);
+ if (state < 0)
+ return;
+
+ gpioh->handler(gpioh->data, state);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ gpioh->inhibit = false;
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+nv50_gpio_isr(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv = pgpio->priv;
+ struct nv50_gpio_handler *gpioh;
+ u32 intr0, intr1 = 0;
+ u32 hi, lo, ch;
+
+ intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+ if (dev_priv->chipset >= 0x90)
+ intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+
+ hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+ lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+ ch = hi | lo;
+
+ nv_wr32(dev, 0xe054, intr0);
+ if (dev_priv->chipset >= 0x90)
+ nv_wr32(dev, 0xe074, intr1);
+
+ spin_lock(&priv->lock);
+ list_for_each_entry(gpioh, &priv->handlers, head) {
+ if (!(ch & (1 << gpioh->gpio->line)))
+ continue;
+
+ if (gpioh->inhibit)
+ continue;
+ gpioh->inhibit = true;
+
+ queue_work(dev_priv->wq, &gpioh->work);
+ }
+ spin_unlock(&priv->lock);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 8b669d0af610..37e21d2be95b 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -29,6 +29,12 @@
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
#include "nouveau_grctx.h"
+#include "nouveau_dma.h"
+#include "nouveau_vm.h"
+#include "nv50_evo.h"
+
+static int nv50_graph_register(struct drm_device *);
+static void nv50_graph_isr(struct drm_device *);
static void
nv50_graph_init_reset(struct drm_device *dev)
@@ -46,6 +52,7 @@ nv50_graph_init_intr(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
+ nouveau_irq_register(dev, 12, nv50_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
nv_wr32(dev, 0x400138, 0xffffffff);
nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
@@ -145,12 +152,15 @@ nv50_graph_init(struct drm_device *dev)
nv50_graph_init_reset(dev);
nv50_graph_init_regs__nv(dev);
nv50_graph_init_regs(dev);
- nv50_graph_init_intr(dev);
ret = nv50_graph_init_ctxctl(dev);
if (ret)
return ret;
+ ret = nv50_graph_register(dev);
+ if (ret)
+ return ret;
+ nv50_graph_init_intr(dev);
return 0;
}
@@ -158,6 +168,8 @@ void
nv50_graph_takedown(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
+ nv_wr32(dev, 0x40013c, 0x00000000);
+ nouveau_irq_unregister(dev, 12);
}
void
@@ -190,7 +202,7 @@ nv50_graph_channel(struct drm_device *dev)
inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->fifos[i];
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
if (chan && chan->ramin && chan->ramin->vinst == inst)
return chan;
@@ -211,7 +223,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
- ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000,
+ ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
if (ret)
@@ -234,6 +246,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);
dev_priv->engine.instmem.flush(dev);
+ atomic_inc(&chan->vm->pgraph_refs);
return 0;
}
@@ -242,18 +255,34 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
+ unsigned long flags;
NV_DEBUG(dev, "ch%d\n", chan->id);
if (!chan->ramin)
return;
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pfifo->reassign(dev, false);
+ pgraph->fifo_access(dev, false);
+
+ if (pgraph->channel(dev) == chan)
+ pgraph->unload_context(dev);
+
for (i = hdr; i < hdr + 24; i += 4)
nv_wo32(chan->ramin, i, 0);
dev_priv->engine.instmem.flush(dev);
+ pgraph->fifo_access(dev, true);
+ pfifo->reassign(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
+
+ atomic_dec(&chan->vm->pgraph_refs);
}
static int
@@ -306,7 +335,7 @@ nv50_graph_unload_context(struct drm_device *dev)
return 0;
}
-void
+static void
nv50_graph_context_switch(struct drm_device *dev)
{
uint32_t inst;
@@ -322,8 +351,8 @@ nv50_graph_context_switch(struct drm_device *dev)
}
static int
-nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
struct nouveau_gpuobj *gpuobj;
@@ -340,8 +369,8 @@ nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
}
static int
-nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
return -ERANGE;
@@ -351,16 +380,16 @@ nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
}
static int
-nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
chan->nvsw.vblsem_rval = data;
return 0;
}
static int
-nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -368,45 +397,85 @@ nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
return -EINVAL;
- if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) &
- NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) {
- nv_wr32(dev, NV50_PDISPLAY_INTR_1,
- NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data));
- nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
- NV50_PDISPLAY_INTR_EN) |
- NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data));
- }
+ drm_vblank_get(dev, data);
+ chan->nvsw.vblsem_head = data;
list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
+
return 0;
}
-static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = {
- { 0x018c, nv50_graph_nvsw_dma_vblsem },
- { 0x0400, nv50_graph_nvsw_vblsem_offset },
- { 0x0404, nv50_graph_nvsw_vblsem_release_val },
- { 0x0408, nv50_graph_nvsw_vblsem_release },
- {}
-};
+static int
+nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ struct nouveau_page_flip_state s;
-struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
- { 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */
- { 0x0030, false, NULL }, /* null */
- { 0x5039, false, NULL }, /* m2mf */
- { 0x502d, false, NULL }, /* 2d */
- { 0x50c0, false, NULL }, /* compute */
- { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */
- { 0x5097, false, NULL }, /* tesla (nv50) */
- { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */
- { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */
- { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */
- {}
-};
+ if (!nouveau_finish_page_flip(chan, &s)) {
+ /* XXX - Do something here */
+ }
+
+ return 0;
+}
+
+static int
+nv50_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->engine.graph.registered)
+ return 0;
+
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
+ NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
+ NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
+ NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
+
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
+
+ /* tesla */
+ if (dev_priv->chipset == 0x50)
+ NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
+ else
+ if (dev_priv->chipset < 0xa0)
+ NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
+ else {
+ switch (dev_priv->chipset) {
+ case 0xa0:
+ case 0xaa:
+ case 0xac:
+ NVOBJ_CLASS(dev, 0x8397, GR);
+ break;
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ NVOBJ_CLASS(dev, 0x8597, GR);
+ break;
+ case 0xaf:
+ NVOBJ_CLASS(dev, 0x8697, GR);
+ break;
+ }
+ }
+
+ /* compute */
+ NVOBJ_CLASS(dev, 0x50c0, GR);
+ if (dev_priv->chipset > 0xa0 &&
+ dev_priv->chipset != 0xaa &&
+ dev_priv->chipset != 0xac)
+ NVOBJ_CLASS(dev, 0x85c0, GR);
+
+ dev_priv->engine.graph.registered = true;
+ return 0;
+}
void
nv50_graph_tlb_flush(struct drm_device *dev)
{
- nv50_vm_flush(dev, 0);
+ nv50_vm_flush_engine(dev, 0);
}
void
@@ -449,8 +518,535 @@ nv86_graph_tlb_flush(struct drm_device *dev)
nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
}
- nv50_vm_flush(dev, 0);
+ nv50_vm_flush_engine(dev, 0);
nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
}
+
+static struct nouveau_enum nv50_mp_exec_error_names[] =
+{
+ { 3, "STACK_UNDERFLOW" },
+ { 4, "QUADON_ACTIVE" },
+ { 8, "TIMEOUT" },
+ { 0x10, "INVALID_OPCODE" },
+ { 0x40, "BREAKPOINT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
+ { 0x00000001, "NOTIFY" },
+ { 0x00000002, "IN" },
+ { 0x00000004, "OUT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
+ { 0x00000001, "FAULT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
+ { 0x00000001, "FAULT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
+ { 0x00000001, "FAULT" },
+ {}
+};
+
+/* There must be a *lot* of these. Will take some time to gather them up. */
+struct nouveau_enum nv50_data_error_names[] = {
+ { 0x00000003, "INVALID_QUERY_OR_TEXTURE" },
+ { 0x00000004, "INVALID_VALUE" },
+ { 0x00000005, "INVALID_ENUM" },
+ { 0x00000008, "INVALID_OBJECT" },
+ { 0x00000009, "READ_ONLY_OBJECT" },
+ { 0x0000000a, "SUPERVISOR_OBJECT" },
+ { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT" },
+ { 0x0000000c, "INVALID_BITFIELD" },
+ { 0x0000000d, "BEGIN_END_ACTIVE" },
+ { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT" },
+ { 0x0000000f, "VIEWPORT_ID_NEEDS_GP" },
+ { 0x00000010, "RT_DOUBLE_BIND" },
+ { 0x00000011, "RT_TYPES_MISMATCH" },
+ { 0x00000012, "RT_LINEAR_WITH_ZETA" },
+ { 0x00000015, "FP_TOO_FEW_REGS" },
+ { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH" },
+ { 0x00000017, "RT_LINEAR_WITH_MSAA" },
+ { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT" },
+ { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT" },
+ { 0x0000001a, "RT_INVALID_ALIGNMENT" },
+ { 0x0000001b, "SAMPLER_OVER_LIMIT" },
+ { 0x0000001c, "TEXTURE_OVER_LIMIT" },
+ { 0x0000001e, "GP_TOO_MANY_OUTPUTS" },
+ { 0x0000001f, "RT_BPP128_WITH_MS8" },
+ { 0x00000021, "Z_OUT_OF_BOUNDS" },
+ { 0x00000023, "XY_OUT_OF_BOUNDS" },
+ { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED" },
+ { 0x00000028, "CP_NO_REG_SPACE_STRIPED" },
+ { 0x00000029, "CP_NO_REG_SPACE_PACKED" },
+ { 0x0000002a, "CP_NOT_ENOUGH_WARPS" },
+ { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH" },
+ { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS" },
+ { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS" },
+ { 0x0000002e, "CP_NO_BLOCKDIM_LATCH" },
+ { 0x00000031, "ENG2D_FORMAT_MISMATCH" },
+ { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP" },
+ { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT" },
+ { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT" },
+ { 0x00000046, "LAYER_ID_NEEDS_GP" },
+ { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT" },
+ { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_intr[] = {
+ { 0x00000001, "NOTIFY" },
+ { 0x00000002, "COMPUTE_QUERY" },
+ { 0x00000010, "ILLEGAL_MTHD" },
+ { 0x00000020, "ILLEGAL_CLASS" },
+ { 0x00000040, "DOUBLE_NOTIFY" },
+ { 0x00001000, "CONTEXT_SWITCH" },
+ { 0x00010000, "BUFFER_NOTIFY" },
+ { 0x00100000, "DATA_ERROR" },
+ { 0x00200000, "TRAP" },
+ { 0x01000000, "SINGLE_STEP" },
+ {}
+};
+
+static void
+nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t units = nv_rd32(dev, 0x1540);
+ uint32_t addr, mp10, status, pc, oplow, ophigh;
+ int i;
+ int mps = 0;
+ for (i = 0; i < 4; i++) {
+ if (!(units & 1 << (i+24)))
+ continue;
+ if (dev_priv->chipset < 0xa0)
+ addr = 0x408200 + (tpid << 12) + (i << 7);
+ else
+ addr = 0x408100 + (tpid << 11) + (i << 7);
+ mp10 = nv_rd32(dev, addr + 0x10);
+ status = nv_rd32(dev, addr + 0x14);
+ if (!status)
+ continue;
+ if (display) {
+ nv_rd32(dev, addr + 0x20);
+ pc = nv_rd32(dev, addr + 0x24);
+ oplow = nv_rd32(dev, addr + 0x70);
+ ophigh= nv_rd32(dev, addr + 0x74);
+ NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
+ "TP %d MP %d: ", tpid, i);
+ nouveau_enum_print(nv50_mp_exec_error_names, status);
+ printk(" at %06x warp %d, opcode %08x %08x\n",
+ pc&0xffffff, pc >> 24,
+ oplow, ophigh);
+ }
+ nv_wr32(dev, addr + 0x10, mp10);
+ nv_wr32(dev, addr + 0x14, 0);
+ mps++;
+ }
+ if (!mps && display)
+ NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
+ "No MPs claiming errors?\n", tpid);
+}
+
+static void
+nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
+ uint32_t ustatus_new, int display, const char *name)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int tps = 0;
+ uint32_t units = nv_rd32(dev, 0x1540);
+ int i, r;
+ uint32_t ustatus_addr, ustatus;
+ for (i = 0; i < 16; i++) {
+ if (!(units & (1 << i)))
+ continue;
+ if (dev_priv->chipset < 0xa0)
+ ustatus_addr = ustatus_old + (i << 12);
+ else
+ ustatus_addr = ustatus_new + (i << 11);
+ ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
+ if (!ustatus)
+ continue;
+ tps++;
+ switch (type) {
+ case 6: /* texture error... unknown for now */
+ nv50_fb_vm_trap(dev, display, name);
+ if (display) {
+ NV_ERROR(dev, "magic set %d:\n", i);
+ for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ }
+ break;
+ case 7: /* MP error */
+ if (ustatus & 0x00010000) {
+ nv50_pgraph_mp_trap(dev, i, display);
+ ustatus &= ~0x00010000;
+ }
+ break;
+ case 8: /* TPDMA error */
+ {
+ uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
+ uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
+ uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
+ uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
+ uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
+ uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
+ uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
+ nv50_fb_vm_trap(dev, display, name);
+ /* 2d engine destination */
+ if (ustatus & 0x00000010) {
+ if (display) {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
+ i, e14, e10);
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000010;
+ }
+ /* Render target */
+ if (ustatus & 0x00000040) {
+ if (display) {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
+ i, e14, e10);
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000040;
+ }
+ /* CUDA memory: l[], g[] or stack. */
+ if (ustatus & 0x00000080) {
+ if (display) {
+ if (e18 & 0x80000000) {
+ /* g[] read fault? */
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
+ i, e14, e10 | ((e18 >> 24) & 0x1f));
+ e18 &= ~0x1f000000;
+ } else if (e18 & 0xc) {
+ /* g[] write fault? */
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
+ i, e14, e10 | ((e18 >> 7) & 0x1f));
+ e18 &= ~0x00000f80;
+ } else {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
+ i, e14, e10);
+ }
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000080;
+ }
+ }
+ break;
+ }
+ if (ustatus) {
+ if (display)
+ NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+ }
+ nv_wr32(dev, ustatus_addr, 0xc0000000);
+ }
+
+ if (!tps && display)
+ NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
+}
+
+static int
+nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
+{
+ u32 status = nv_rd32(dev, 0x400108);
+ u32 ustatus;
+
+ if (!status && display) {
+ NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
+ return 1;
+ }
+
+ /* DISPATCH: Relays commands to other units and handles NOTIFY,
+ * COND, QUERY. If you get a trap from it, the command is still stuck
+ * in DISPATCH and you need to do something about it. */
+ if (status & 0x001) {
+ ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
+ if (!ustatus && display) {
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
+ }
+
+ nv_wr32(dev, 0x400500, 0x00000000);
+
+ /* Known to be triggered by screwed up NOTIFY and COND... */
+ if (ustatus & 0x00000001) {
+ u32 addr = nv_rd32(dev, 0x400808);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 datal = nv_rd32(dev, 0x40080c);
+ u32 datah = nv_rd32(dev, 0x400810);
+ u32 class = nv_rd32(dev, 0x400814);
+ u32 r848 = nv_rd32(dev, 0x400848);
+
+ NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
+ if (display && (addr & 0x80000000)) {
+ NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x%08x "
+ "400808 0x%08x 400848 0x%08x\n",
+ chid, inst, subc, class, mthd, datah,
+ datal, addr, r848);
+ } else
+ if (display) {
+ NV_INFO(dev, "PGRAPH - no stuck command?\n");
+ }
+
+ nv_wr32(dev, 0x400808, 0);
+ nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
+ nv_wr32(dev, 0x400848, 0);
+ ustatus &= ~0x00000001;
+ }
+
+ if (ustatus & 0x00000002) {
+ u32 addr = nv_rd32(dev, 0x40084c);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, 0x40085c);
+ u32 class = nv_rd32(dev, 0x400814);
+
+ NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
+ if (display && (addr & 0x80000000)) {
+ NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x 40084c 0x%08x\n",
+ chid, inst, subc, class, mthd,
+ data, addr);
+ } else
+ if (display) {
+ NV_INFO(dev, "PGRAPH - no stuck command?\n");
+ }
+
+ nv_wr32(dev, 0x40084c, 0);
+ ustatus &= ~0x00000002;
+ }
+
+ if (ustatus && display) {
+ NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
+ "0x%08x)\n", ustatus);
+ }
+
+ nv_wr32(dev, 0x400804, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x001);
+ status &= ~0x001;
+ if (!status)
+ return 0;
+ }
+
+ /* M2MF: Memory to memory copy engine. */
+ if (status & 0x002) {
+ u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
+ if (display) {
+ NV_INFO(dev, "PGRAPH - TRAP_M2MF");
+ nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
+ nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
+
+ }
+
+ /* No sane way found yet -- just reset the bugger. */
+ nv_wr32(dev, 0x400040, 2);
+ nv_wr32(dev, 0x400040, 0);
+ nv_wr32(dev, 0x406800, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x002);
+ status &= ~0x002;
+ }
+
+ /* VFETCH: Fetches data from vertex buffers. */
+ if (status & 0x004) {
+ u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
+ if (display) {
+ NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
+ nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
+ nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
+ }
+
+ nv_wr32(dev, 0x400c04, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x004);
+ status &= ~0x004;
+ }
+
+ /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
+ if (status & 0x008) {
+ ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
+ if (display) {
+ NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
+ nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
+ nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
+
+ }
+
+ /* No sane way found yet -- just reset the bugger. */
+ nv_wr32(dev, 0x400040, 0x80);
+ nv_wr32(dev, 0x400040, 0);
+ nv_wr32(dev, 0x401800, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x008);
+ status &= ~0x008;
+ }
+
+ /* CCACHE: Handles code and c[] caches and fills them. */
+ if (status & 0x010) {
+ ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
+ if (display) {
+ NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
+ nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
+ " %08x %08x %08x\n",
+ nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804),
+ nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c),
+ nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814),
+ nv_rd32(dev, 0x40581c));
+
+ }
+
+ nv_wr32(dev, 0x405018, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x010);
+ status &= ~0x010;
+ }
+
+ /* Unknown, not seen yet... 0x402000 is the only trap status reg
+ * remaining, so try to handle it anyway. Perhaps related to that
+ * unknown DMA slot on tesla? */
+ if (status & 0x20) {
+ ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
+ if (display)
+ NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
+ nv_wr32(dev, 0x402000, 0xc0000000);
+ /* no status modifiction on purpose */
+ }
+
+ /* TEXTURE: CUDA texturing units */
+ if (status & 0x040) {
+ nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
+ "PGRAPH - TRAP_TEXTURE");
+ nv_wr32(dev, 0x400108, 0x040);
+ status &= ~0x040;
+ }
+
+ /* MP: CUDA execution engines. */
+ if (status & 0x080) {
+ nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
+ "PGRAPH - TRAP_MP");
+ nv_wr32(dev, 0x400108, 0x080);
+ status &= ~0x080;
+ }
+
+ /* TPDMA: Handles TP-initiated uncached memory accesses:
+ * l[], g[], stack, 2d surfaces, render targets. */
+ if (status & 0x100) {
+ nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
+ "PGRAPH - TRAP_TPDMA");
+ nv_wr32(dev, 0x400108, 0x100);
+ status &= ~0x100;
+ }
+
+ if (status) {
+ if (display)
+ NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
+ nv_wr32(dev, 0x400108, status);
+ }
+
+ return 1;
+}
+
+static int
+nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ chan = dev_priv->channels.ptr[i];
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (inst == chan->ramin->vinst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
+}
+
+static void
+nv50_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, 0x400100))) {
+ u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
+ u32 chid = nv50_graph_isr_chid(dev, inst);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400814);
+ u32 show = stat;
+
+ if (stat & 0x00000010) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
+ mthd, data))
+ show &= ~0x00000010;
+ }
+
+ if (stat & 0x00001000) {
+ nv_wr32(dev, 0x400500, 0x00000000);
+ nv_wr32(dev, 0x400100, 0x00001000);
+ nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
+ nv50_graph_context_switch(dev);
+ stat &= ~0x00001000;
+ show &= ~0x00001000;
+ }
+
+ show = (show && nouveau_ratelimit()) ? show : 0;
+
+ if (show & 0x00100000) {
+ u32 ecode = nv_rd32(dev, 0x400110);
+ NV_INFO(dev, "PGRAPH - DATA_ERROR ");
+ nouveau_enum_print(nv50_data_error_names, ecode);
+ printk("\n");
+ }
+
+ if (stat & 0x00200000) {
+ if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
+ show &= ~0x00200000;
+ }
+
+ nv_wr32(dev, 0x400100, stat);
+ nv_wr32(dev, 0x400500, 0x00010001);
+
+ if (show) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv50_graph_intr, show);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ }
+ }
+
+ if (nv_rd32(dev, 0x400824) & (1 << 31))
+ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index b773229b7647..ea0041810ae3 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -27,14 +27,20 @@
#include "drmP.h"
#include "drm.h"
+
#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+#define BAR1_VM_BASE 0x0020000000ULL
+#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1)
+#define BAR3_VM_BASE 0x0000000000ULL
+#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3)
struct nv50_instmem_priv {
uint32_t save1700[5]; /* 0x1700->0x1710 */
- struct nouveau_gpuobj *pramin_pt;
- struct nouveau_gpuobj *pramin_bar;
- struct nouveau_gpuobj *fb_bar;
+ struct nouveau_gpuobj *bar1_dmaobj;
+ struct nouveau_gpuobj *bar3_dmaobj;
};
static void
@@ -48,6 +54,7 @@ nv50_channel_del(struct nouveau_channel **pchan)
return;
nouveau_gpuobj_ref(NULL, &chan->ramfc);
+ nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
if (chan->ramin_heap.free_stack.next)
drm_mm_takedown(&chan->ramin_heap);
@@ -56,14 +63,14 @@ nv50_channel_del(struct nouveau_channel **pchan)
}
static int
-nv50_channel_new(struct drm_device *dev, u32 size,
+nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
struct nouveau_channel **pchan)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
struct nouveau_channel *chan;
- int ret;
+ int ret, i;
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan)
@@ -92,6 +99,17 @@ nv50_channel_new(struct drm_device *dev, u32 size,
return ret;
}
+ for (i = 0; i < 0x4000; i += 8) {
+ nv_wo32(chan->vm_pd, i + 0, 0x00000000);
+ nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
+ }
+
+ ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
+ if (ret) {
+ nv50_channel_del(&chan);
+ return ret;
+ }
+
ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
chan->ramin->pinst + fc,
chan->ramin->vinst + fc, 0x100,
@@ -111,6 +129,7 @@ nv50_instmem_init(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv;
struct nouveau_channel *chan;
+ struct nouveau_vm *vm;
int ret, i;
u32 tmp;
@@ -127,112 +146,87 @@ nv50_instmem_init(struct drm_device *dev)
ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
if (ret) {
NV_ERROR(dev, "Failed to init RAMIN heap\n");
- return -ENOMEM;
+ goto error;
}
- /* we need a channel to plug into the hw to control the BARs */
- ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]);
+ /* BAR3 */
+ ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
+ &dev_priv->bar3_vm);
if (ret)
- return ret;
- chan = dev_priv->fifos[127] = dev_priv->fifos[0];
+ goto error;
- /* allocate page table for PRAMIN BAR */
- ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
- &priv->pramin_pt);
+ ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
+ 0x1000, NVOBJ_FLAG_DONT_MAP |
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->bar3_vm->pgt[0].obj[0]);
if (ret)
- return ret;
+ goto error;
+ dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
- nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63);
- nv_wo32(chan->vm_pd, 0x0004, 0);
+ nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
- /* DMA object for PRAMIN BAR */
- ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar);
+ ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
if (ret)
- return ret;
- nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000);
- nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1);
- nv_wo32(priv->pramin_bar, 0x08, 0x00000000);
- nv_wo32(priv->pramin_bar, 0x0c, 0x00000000);
- nv_wo32(priv->pramin_bar, 0x10, 0x00000000);
- nv_wo32(priv->pramin_bar, 0x14, 0x00000000);
-
- /* map channel into PRAMIN, gpuobj didn't do it for us */
- ret = nv50_instmem_bind(dev, chan->ramin);
+ goto error;
+ dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;
+
+ ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
+ NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
+ NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
+ &priv->bar3_dmaobj);
if (ret)
- return ret;
+ goto error;
- /* poke regs... */
nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
- nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4));
-
- tmp = nv_ri32(dev, 0);
- nv_wi32(dev, 0, ~tmp);
- if (nv_ri32(dev, 0) != ~tmp) {
- NV_ERROR(dev, "PRAMIN readback failed\n");
- return -EIO;
- }
- nv_wi32(dev, 0, tmp);
+ nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));
+ dev_priv->engine.instmem.flush(dev);
dev_priv->ramin_available = true;
- /* Determine VM layout */
- dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
- dev_priv->vm_gart_size = NV50_VM_BLOCK;
-
- dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
- dev_priv->vm_vram_size = dev_priv->vram_size;
- if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
- dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
- dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
- dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
-
- dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
-
- NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
- dev_priv->vm_gart_base,
- dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
- NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
- dev_priv->vm_vram_base,
- dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
-
- /* VRAM page table(s), mapped into VM at +1GiB */
- for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
- ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8,
- 0, NVOBJ_FLAG_ZERO_ALLOC,
- &chan->vm_vram_pt[i]);
- if (ret) {
- NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret);
- dev_priv->vm_vram_pt_nr = i;
- return ret;
- }
- dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i];
-
- nv_wo32(chan->vm_pd, 0x10 + (i*8),
- chan->vm_vram_pt[i]->vinst | 0x61);
- nv_wo32(chan->vm_pd, 0x14 + (i*8), 0);
+ tmp = nv_ro32(chan->ramin, 0);
+ nv_wo32(chan->ramin, 0, ~tmp);
+ if (nv_ro32(chan->ramin, 0) != ~tmp) {
+ NV_ERROR(dev, "PRAMIN readback failed\n");
+ ret = -EIO;
+ goto error;
}
+ nv_wo32(chan->ramin, 0, tmp);
- /* DMA object for FB BAR */
- ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar);
+ /* BAR1 */
+ ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
if (ret)
- return ret;
- nv_wo32(priv->fb_bar, 0x00, 0x7fc00000);
- nv_wo32(priv->fb_bar, 0x04, 0x40000000 +
- pci_resource_len(dev->pdev, 1) - 1);
- nv_wo32(priv->fb_bar, 0x08, 0x40000000);
- nv_wo32(priv->fb_bar, 0x0c, 0x00000000);
- nv_wo32(priv->fb_bar, 0x10, 0x00000000);
- nv_wo32(priv->fb_bar, 0x14, 0x00000000);
+ goto error;
- dev_priv->engine.instmem.flush(dev);
+ ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd);
+ if (ret)
+ goto error;
+ nouveau_vm_ref(NULL, &vm, NULL);
- nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4));
+ ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE,
+ NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
+ NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
+ &priv->bar1_dmaobj);
+ if (ret)
+ goto error;
+
+ nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
for (i = 0; i < 8; i++)
nv_wr32(dev, 0x1900 + (i*4), 0);
+ /* Create shared channel VM, space is reserved at the beginning
+ * to catch "NULL pointer" references
+ */
+ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
+ &dev_priv->chan_vm);
+ if (ret)
+ return ret;
+
return 0;
+
+error:
+ nv50_instmem_takedown(dev);
+ return ret;
}
void
@@ -240,7 +234,7 @@ nv50_instmem_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- struct nouveau_channel *chan = dev_priv->fifos[0];
+ struct nouveau_channel *chan = dev_priv->channels.ptr[0];
int i;
NV_DEBUG(dev, "\n");
@@ -250,23 +244,23 @@ nv50_instmem_takedown(struct drm_device *dev)
dev_priv->ramin_available = false;
- /* Restore state from before init */
+ nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
+
for (i = 0x1700; i <= 0x1710; i += 4)
nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
- nouveau_gpuobj_ref(NULL, &priv->fb_bar);
- nouveau_gpuobj_ref(NULL, &priv->pramin_bar);
- nouveau_gpuobj_ref(NULL, &priv->pramin_pt);
+ nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
+ nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);
- /* Destroy dummy channel */
- if (chan) {
- for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
- nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
- dev_priv->vm_vram_pt_nr = 0;
+ nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
+ dev_priv->channels.ptr[127] = 0;
+ nv50_channel_del(&dev_priv->channels.ptr[0]);
- nv50_channel_del(&dev_priv->fifos[0]);
- dev_priv->fifos[127] = NULL;
- }
+ nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
+ nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
+
+ if (dev_priv->ramin_heap.free_stack.next)
+ drm_mm_takedown(&dev_priv->ramin_heap);
dev_priv->engine.instmem.priv = NULL;
kfree(priv);
@@ -276,16 +270,8 @@ int
nv50_instmem_suspend(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->fifos[0];
- struct nouveau_gpuobj *ramin = chan->ramin;
- int i;
- ramin->im_backing_suspend = vmalloc(ramin->size);
- if (!ramin->im_backing_suspend)
- return -ENOMEM;
-
- for (i = 0; i < ramin->size; i += 4)
- ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
+ dev_priv->ramin_available = false;
return 0;
}
@@ -294,146 +280,124 @@ nv50_instmem_resume(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- struct nouveau_channel *chan = dev_priv->fifos[0];
- struct nouveau_gpuobj *ramin = chan->ramin;
+ struct nouveau_channel *chan = dev_priv->channels.ptr[0];
int i;
- dev_priv->ramin_available = false;
- dev_priv->ramin_base = ~0;
- for (i = 0; i < ramin->size; i += 4)
- nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]);
- dev_priv->ramin_available = true;
- vfree(ramin->im_backing_suspend);
- ramin->im_backing_suspend = NULL;
-
/* Poke the relevant regs, and pray it works :) */
nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
nv_wr32(dev, NV50_PUNK_UNK1710, 0);
nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
NV50_PUNK_BAR_CFG_BASE_VALID);
- nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) |
+ nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) |
NV50_PUNK_BAR1_CTXDMA_VALID);
- nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) |
+ nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) |
NV50_PUNK_BAR3_CTXDMA_VALID);
for (i = 0; i < 8; i++)
nv_wr32(dev, 0x1900 + (i*4), 0);
+
+ dev_priv->ramin_available = true;
}
+struct nv50_gpuobj_node {
+ struct nouveau_vram *vram;
+ struct nouveau_vma chan_vma;
+ u32 align;
+};
+
+
int
-nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
- uint32_t *sz)
+nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
{
+ struct drm_device *dev = gpuobj->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct nv50_gpuobj_node *node = NULL;
int ret;
- if (gpuobj->im_backing)
- return -EINVAL;
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+ node->align = align;
- *sz = ALIGN(*sz, 4096);
- if (*sz == 0)
- return -EINVAL;
+ size = (size + 4095) & ~4095;
+ align = max(align, (u32)4096);
- ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
- true, false, &gpuobj->im_backing);
+ ret = vram->get(dev, size, align, 0, 0, &node->vram);
if (ret) {
- NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
+ kfree(node);
return ret;
}
- ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
- nouveau_bo_ref(NULL, &gpuobj->im_backing);
- return ret;
+ gpuobj->vinst = node->vram->offset;
+
+ if (gpuobj->flags & NVOBJ_FLAG_VM) {
+ u32 flags = NV_MEM_ACCESS_RW;
+ if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
+ flags |= NV_MEM_ACCESS_SYS;
+
+ ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags,
+ &node->chan_vma);
+ if (ret) {
+ vram->put(dev, &node->vram);
+ kfree(node);
+ return ret;
+ }
+
+ nouveau_vm_map(&node->chan_vma, node->vram);
+ gpuobj->vinst = node->chan_vma.offset;
}
- gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
+ gpuobj->size = size;
+ gpuobj->node = node;
return 0;
}
void
-nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
{
+ struct drm_device *dev = gpuobj->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct nv50_gpuobj_node *node;
+
+ node = gpuobj->node;
+ gpuobj->node = NULL;
- if (gpuobj && gpuobj->im_backing) {
- if (gpuobj->im_bound)
- dev_priv->engine.instmem.unbind(dev, gpuobj);
- nouveau_bo_unpin(gpuobj->im_backing);
- nouveau_bo_ref(NULL, &gpuobj->im_backing);
- gpuobj->im_backing = NULL;
+ if (node->chan_vma.node) {
+ nouveau_vm_unmap(&node->chan_vma);
+ nouveau_vm_put(&node->chan_vma);
}
+ vram->put(dev, &node->vram);
+ kfree(node);
}
int
-nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- struct nouveau_gpuobj *pramin_pt = priv->pramin_pt;
- uint32_t pte, pte_end;
- uint64_t vram;
-
- if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
- return -EINVAL;
-
- NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
- gpuobj->im_pramin->start, gpuobj->im_pramin->size);
-
- pte = (gpuobj->im_pramin->start >> 12) << 1;
- pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
- vram = gpuobj->vinst;
-
- NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
- gpuobj->im_pramin->start, pte, pte_end);
- NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
-
- vram |= 1;
- if (dev_priv->vram_sys_base) {
- vram += dev_priv->vram_sys_base;
- vram |= 0x30;
- }
-
- while (pte < pte_end) {
- nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram));
- nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram));
- vram += 0x1000;
- pte += 2;
- }
- dev_priv->engine.instmem.flush(dev);
+ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+ struct nv50_gpuobj_node *node = gpuobj->node;
+ int ret;
- nv50_vm_flush(dev, 6);
+ ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12,
+ NV_MEM_ACCESS_RW, &node->vram->bar_vma);
+ if (ret)
+ return ret;
- gpuobj->im_bound = 1;
+ nouveau_vm_map(&node->vram->bar_vma, node->vram);
+ gpuobj->pinst = node->vram->bar_vma.offset;
return 0;
}
-int
-nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+void
+nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- uint32_t pte, pte_end;
-
- if (gpuobj->im_bound == 0)
- return -EINVAL;
-
- /* can happen during late takedown */
- if (unlikely(!dev_priv->ramin_available))
- return 0;
+ struct nv50_gpuobj_node *node = gpuobj->node;
- pte = (gpuobj->im_pramin->start >> 12) << 1;
- pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
-
- while (pte < pte_end) {
- nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000);
- nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000);
- pte += 2;
+ if (node->vram->bar_vma.node) {
+ nouveau_vm_unmap(&node->vram->bar_vma);
+ nouveau_vm_put(&node->vram->bar_vma);
}
- dev_priv->engine.instmem.flush(dev);
-
- gpuobj->im_bound = 0;
- return 0;
}
void
@@ -452,11 +416,3 @@ nv84_instmem_flush(struct drm_device *dev)
NV_ERROR(dev, "PRAMIN flush timeout\n");
}
-void
-nv50_vm_flush(struct drm_device *dev, int engine)
-{
- nv_wr32(dev, 0x100c80, (engine << 16) | 1);
- if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
- NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
-}
-
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
new file mode 100644
index 000000000000..459ff08241e5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+void
+nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+ struct nouveau_gpuobj *pgt[2])
+{
+ struct drm_nouveau_private *dev_priv = pgd->dev->dev_private;
+ u64 phys = 0xdeadcafe00000000ULL;
+ u32 coverage = 0;
+
+ if (pgt[0]) {
+ phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */
+ coverage = (pgt[0]->size >> 3) << 12;
+ } else
+ if (pgt[1]) {
+ phys = 0x00000001 | pgt[1]->vinst; /* present */
+ coverage = (pgt[1]->size >> 3) << 16;
+ }
+
+ if (phys & 1) {
+ if (coverage <= 32 * 1024 * 1024)
+ phys |= 0x60;
+ else if (coverage <= 64 * 1024 * 1024)
+ phys |= 0x40;
+ else if (coverage < 128 * 1024 * 1024)
+ phys |= 0x20;
+ }
+
+ nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
+ nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
+}
+
+static inline u64
+nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ u64 phys, u32 memtype, u32 target)
+{
+ struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
+
+ phys |= 1; /* present */
+ phys |= (u64)memtype << 40;
+
+ /* IGPs don't have real VRAM, re-target to stolen system memory */
+ if (target == 0 && dev_priv->vram_sys_base) {
+ phys += dev_priv->vram_sys_base;
+ target = 3;
+ }
+
+ phys |= target << 4;
+
+ if (vma->access & NV_MEM_ACCESS_SYS)
+ phys |= (1 << 6);
+
+ if (!(vma->access & NV_MEM_ACCESS_WO))
+ phys |= (1 << 3);
+
+ return phys;
+}
+
+void
+nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+{
+ u32 block;
+ int i;
+
+ phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0);
+ pte <<= 3;
+ cnt <<= 3;
+
+ while (cnt) {
+ u32 offset_h = upper_32_bits(phys);
+ u32 offset_l = lower_32_bits(phys);
+
+ for (i = 7; i >= 0; i--) {
+ block = 1 << (i + 3);
+ if (cnt >= block && !(pte & (block - 1)))
+ break;
+ }
+ offset_l |= (i << 7);
+
+ phys += block << (vma->node->type - 3);
+ cnt -= block;
+
+ while (block) {
+ nv_wo32(pgt, pte + 0, offset_l);
+ nv_wo32(pgt, pte + 4, offset_h);
+ pte += 8;
+ block -= 8;
+ }
+ }
+}
+
+void
+nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ u32 pte, dma_addr_t *list, u32 cnt)
+{
+ pte <<= 3;
+ while (cnt--) {
+ u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2);
+ nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+ nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ pte += 8;
+ }
+}
+
+void
+nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+ pte <<= 3;
+ while (cnt--) {
+ nv_wo32(pgt, pte + 0, 0x00000000);
+ nv_wo32(pgt, pte + 4, 0x00000000);
+ pte += 8;
+ }
+}
+
+void
+nv50_vm_flush(struct nouveau_vm *vm)
+{
+ struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
+
+ pinstmem->flush(vm->dev);
+
+ /* BAR */
+ if (vm != dev_priv->chan_vm) {
+ nv50_vm_flush_engine(vm->dev, 6);
+ return;
+ }
+
+ pfifo->tlb_flush(vm->dev);
+
+ if (atomic_read(&vm->pgraph_refs))
+ pgraph->tlb_flush(vm->dev);
+ if (atomic_read(&vm->pcrypt_refs))
+ pcrypt->tlb_flush(vm->dev);
+}
+
+void
+nv50_vm_flush_engine(struct drm_device *dev, int engine)
+{
+ nv_wr32(dev, 0x100c80, (engine << 16) | 1);
+ if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
+ NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
new file mode 100644
index 000000000000..58e98ad36347
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static int types[0x80] = {
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
+};
+
+bool
+nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+ int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
+
+ if (likely(type < ARRAY_SIZE(types) && types[type]))
+ return true;
+ return false;
+}
+
+void
+nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+ struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm_node *this;
+ struct nouveau_vram *vram;
+
+ vram = *pvram;
+ *pvram = NULL;
+ if (unlikely(vram == NULL))
+ return;
+
+ mutex_lock(&mm->mutex);
+ while (!list_empty(&vram->regions)) {
+ this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+
+ list_del(&this->rl_entry);
+ nouveau_mm_put(mm, this);
+ }
+ mutex_unlock(&mm->mutex);
+
+ kfree(vram);
+}
+
+int
+nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
+ u32 type, struct nouveau_vram **pvram)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+ struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm_node *r;
+ struct nouveau_vram *vram;
+ int ret;
+
+ if (!types[type])
+ return -EINVAL;
+ size >>= 12;
+ align >>= 12;
+ size_nc >>= 12;
+
+ vram = kzalloc(sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&vram->regions);
+ vram->dev = dev_priv->dev;
+ vram->memtype = type;
+ vram->size = size;
+
+ mutex_lock(&mm->mutex);
+ do {
+ ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
+ if (ret) {
+ mutex_unlock(&mm->mutex);
+ nv50_vram_del(dev, &vram);
+ return ret;
+ }
+
+ list_add_tail(&r->rl_entry, &vram->regions);
+ size -= r->length;
+ } while (size);
+ mutex_unlock(&mm->mutex);
+
+ r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+ vram->offset = (u64)r->offset << 12;
+ *pvram = vram;
+ return 0;
+}
+
+static u32
+nv50_vram_rblock(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, parts, colbits, rowbitsa, rowbitsb, banks;
+ u64 rowsize, predicted;
+ u32 r0, r4, rt, ru, rblock_size;
+
+ r0 = nv_rd32(dev, 0x100200);
+ r4 = nv_rd32(dev, 0x100204);
+ rt = nv_rd32(dev, 0x100250);
+ ru = nv_rd32(dev, 0x001540);
+ NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+ for (i = 0, parts = 0; i < 8; i++) {
+ if (ru & (0x00010000 << i))
+ parts++;
+ }
+
+ colbits = (r4 & 0x0000f000) >> 12;
+ rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+ rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+ banks = ((r4 & 0x01000000) ? 8 : 4);
+
+ rowsize = parts * banks * (1 << colbits) * 8;
+ predicted = rowsize << rowbitsa;
+ if (r0 & 0x00000004)
+ predicted += rowsize << rowbitsb;
+
+ if (predicted != dev_priv->vram_size) {
+ NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
+ (u32)(dev_priv->vram_size >> 20));
+ NV_WARN(dev, "we calculated %dMiB VRAM\n",
+ (u32)(predicted >> 20));
+ }
+
+ rblock_size = rowsize;
+ if (rt & 1)
+ rblock_size *= 3;
+
+ NV_DEBUG(dev, "rblock %d bytes\n", rblock_size);
+ return rblock_size;
+}
+
+int
+nv50_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ dev_priv->vram_size = nv_rd32(dev, 0x10020c);
+ dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
+ dev_priv->vram_size &= 0xffffffff00ULL;
+
+ switch (dev_priv->chipset) {
+ case 0xaa:
+ case 0xac:
+ case 0xaf:
+ dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
+ dev_priv->vram_rblock_size = 4096;
+ break;
+ default:
+ dev_priv->vram_rblock_size = nv50_vram_rblock(dev);
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
new file mode 100644
index 000000000000..ec18ae1c3886
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+
+static void nv84_crypt_isr(struct drm_device *);
+
+int
+nv84_crypt_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramin = chan->ramin;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ ret = nouveau_gpuobj_new(dev, chan, 256, 0,
+ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
+ &chan->crypt_ctx);
+ if (ret)
+ return ret;
+
+ nv_wo32(ramin, 0xa0, 0x00190000);
+ nv_wo32(ramin, 0xa4, chan->crypt_ctx->vinst + 0xff);
+ nv_wo32(ramin, 0xa8, chan->crypt_ctx->vinst);
+ nv_wo32(ramin, 0xac, 0);
+ nv_wo32(ramin, 0xb0, 0);
+ nv_wo32(ramin, 0xb4, 0);
+
+ dev_priv->engine.instmem.flush(dev);
+ atomic_inc(&chan->vm->pcrypt_refs);
+ return 0;
+}
+
+void
+nv84_crypt_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ u32 inst;
+
+ if (!chan->crypt_ctx)
+ return;
+
+ inst = (chan->ramin->vinst >> 12);
+ inst |= 0x80000000;
+
+ /* mark context as invalid if still on the hardware, not
+ * doing this causes issues the next time PCRYPT is used,
+ * unsurprisingly :)
+ */
+ nv_wr32(dev, 0x10200c, 0x00000000);
+ if (nv_rd32(dev, 0x102188) == inst)
+ nv_mask(dev, 0x102188, 0x80000000, 0x00000000);
+ if (nv_rd32(dev, 0x10218c) == inst)
+ nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
+ nv_wr32(dev, 0x10200c, 0x00000010);
+
+ nouveau_gpuobj_ref(NULL, &chan->crypt_ctx);
+ atomic_dec(&chan->vm->pcrypt_refs);
+}
+
+void
+nv84_crypt_tlb_flush(struct drm_device *dev)
+{
+ nv50_vm_flush_engine(dev, 0x0a);
+}
+
+int
+nv84_crypt_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
+
+ if (!pcrypt->registered) {
+ NVOBJ_CLASS(dev, 0x74c1, CRYPT);
+ pcrypt->registered = true;
+ }
+
+ nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+
+ nouveau_irq_register(dev, 14, nv84_crypt_isr);
+ nv_wr32(dev, 0x102130, 0xffffffff);
+ nv_wr32(dev, 0x102140, 0xffffffbf);
+
+ nv_wr32(dev, 0x10200c, 0x00000010);
+ return 0;
+}
+
+void
+nv84_crypt_fini(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x102140, 0x00000000);
+ nouveau_irq_unregister(dev, 14);
+}
+
+static void
+nv84_crypt_isr(struct drm_device *dev)
+{
+ u32 stat = nv_rd32(dev, 0x102130);
+ u32 mthd = nv_rd32(dev, 0x102190);
+ u32 data = nv_rd32(dev, 0x102194);
+ u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff;
+ int show = nouveau_ratelimit();
+
+ if (show) {
+ NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ stat, mthd, data, inst);
+ }
+
+ nv_wr32(dev, 0x102130, stat);
+ nv_wr32(dev, 0x10200c, 0x10);
+
+ nv50_fb_vm_trap(dev, show, "PCRYPT");
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
new file mode 100644
index 000000000000..fa5d4c234383
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fbcon.h"
+#include "nouveau_mm.h"
+
+int
+nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
+
+ ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+ if (ret)
+ return ret;
+
+ if (rect->rop != ROP_COPY) {
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ OUT_RING (chan, 1);
+ }
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0588, 1);
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ OUT_RING (chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+ else
+ OUT_RING (chan, rect->color);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0600, 4);
+ OUT_RING (chan, rect->dx);
+ OUT_RING (chan, rect->dy);
+ OUT_RING (chan, rect->dx + rect->width);
+ OUT_RING (chan, rect->dy + rect->height);
+ if (rect->rop != ROP_COPY) {
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ OUT_RING (chan, 3);
+ }
+ FIRE_RING(chan);
+ return 0;
+}
+
+int
+nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
+
+ ret = RING_SPACE(chan, 12);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0110, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x08b0, 4);
+ OUT_RING (chan, region->dx);
+ OUT_RING (chan, region->dy);
+ OUT_RING (chan, region->width);
+ OUT_RING (chan, region->height);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x08d0, 4);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, region->sx);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, region->sy);
+ FIRE_RING(chan);
+ return 0;
+}
+
+int
+nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
+ uint32_t *palette = info->pseudo_palette;
+ int ret;
+
+ if (image->depth != 1)
+ return -ENODEV;
+
+ ret = RING_SPACE(chan, 11);
+ if (ret)
+ return ret;
+
+ width = ALIGN(image->width, 32);
+ dwords = (width * image->height) >> 5;
+
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0814, 2);
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ OUT_RING (chan, palette[image->bg_color] | mask);
+ OUT_RING (chan, palette[image->fg_color] | mask);
+ } else {
+ OUT_RING (chan, image->bg_color);
+ OUT_RING (chan, image->fg_color);
+ }
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0838, 2);
+ OUT_RING (chan, image->width);
+ OUT_RING (chan, image->height);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0850, 4);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, image->dx);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, image->dy);
+
+ while (dwords) {
+ int push = dwords > 2047 ? 2047 : dwords;
+
+ ret = RING_SPACE(chan, push + 1);
+ if (ret)
+ return ret;
+
+ dwords -= push;
+
+ BEGIN_NVC0(chan, 6, NvSub2D, 0x0860, push);
+ OUT_RINGp(chan, data, push);
+ data += push;
+ }
+
+ FIRE_RING(chan);
+ return 0;
+}
+
+int
+nvc0_fbcon_accel_init(struct fb_info *info)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
+ int ret, format;
+
+ ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d);
+ if (ret)
+ return ret;
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ format = 0xf3;
+ break;
+ case 15:
+ format = 0xf8;
+ break;
+ case 16:
+ format = 0xe8;
+ break;
+ case 32:
+ switch (info->var.transp.length) {
+ case 0: /* depth 24 */
+ case 8: /* depth 32, just use 24.. */
+ format = 0xe6;
+ break;
+ case 2: /* depth 30 */
+ format = 0xd1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = RING_SPACE(chan, 60);
+ if (ret) {
+ WARN_ON(1);
+ nouveau_fbcon_gpu_lockup(info);
+ return ret;
+ }
+
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1);
+ OUT_RING (chan, 0x0000902d);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2);
+ OUT_RING (chan, upper_32_bits(chan->notifier_bo->bo.offset));
+ OUT_RING (chan, lower_32_bits(chan->notifier_bo->bo.offset));
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ OUT_RING (chan, 3);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x02a0, 1);
+ OUT_RING (chan, 0x55);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x08c0, 4);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0580, 2);
+ OUT_RING (chan, 4);
+ OUT_RING (chan, format);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x02e8, 2);
+ OUT_RING (chan, 2);
+ OUT_RING (chan, 1);
+
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0804, 1);
+ OUT_RING (chan, format);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0800, 1);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0808, 3);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x081c, 1);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0840, 4);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0200, 10);
+ OUT_RING (chan, format);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, info->fix.line_length);
+ OUT_RING (chan, info->var.xres_virtual);
+ OUT_RING (chan, info->var.yres_virtual);
+ OUT_RING (chan, upper_32_bits(nvbo->vma.offset));
+ OUT_RING (chan, lower_32_bits(nvbo->vma.offset));
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10);
+ OUT_RING (chan, format);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, info->fix.line_length);
+ OUT_RING (chan, info->var.xres_virtual);
+ OUT_RING (chan, info->var.yres_virtual);
+ OUT_RING (chan, upper_32_bits(nvbo->vma.offset));
+ OUT_RING (chan, lower_32_bits(nvbo->vma.offset));
+ FIRE_RING (chan);
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 890c2b95fbc1..e6f92c541dba 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -25,6 +25,49 @@
#include "drmP.h"
#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static void nvc0_fifo_isr(struct drm_device *);
+
+struct nvc0_fifo_priv {
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+ struct nouveau_vma user_vma;
+ int spoon_nr;
+};
+
+struct nvc0_fifo_chan {
+ struct nouveau_bo *user;
+ struct nouveau_gpuobj *ramfc;
+};
+
+static void
+nvc0_fifo_playlist_update(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv = pfifo->priv;
+ struct nouveau_gpuobj *cur;
+ int i, p;
+
+ cur = priv->playlist[priv->cur_playlist];
+ priv->cur_playlist = !priv->cur_playlist;
+
+ for (i = 0, p = 0; i < 128; i++) {
+ if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1))
+ continue;
+ nv_wo32(cur, p + 0, i);
+ nv_wo32(cur, p + 4, 0x00000004);
+ p += 8;
+ }
+ pinstmem->flush(dev);
+
+ nv_wr32(dev, 0x002270, cur->vinst >> 12);
+ nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
+ if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
+ NV_ERROR(dev, "PFIFO - playlist update failed\n");
+}
void
nvc0_fifo_disable(struct drm_device *dev)
@@ -57,12 +100,135 @@ nvc0_fifo_channel_id(struct drm_device *dev)
int
nvc0_fifo_create_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv = pfifo->priv;
+ struct nvc0_fifo_chan *fifoch;
+ u64 ib_virt, user_vinst;
+ int ret;
+
+ chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL);
+ if (!chan->fifo_priv)
+ return -ENOMEM;
+ fifoch = chan->fifo_priv;
+
+ /* allocate vram for control regs, map into polling area */
+ ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, true, true, &fifoch->user);
+ if (ret)
+ goto error;
+
+ ret = nouveau_bo_pin(fifoch->user, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ nouveau_bo_ref(NULL, &fifoch->user);
+ goto error;
+ }
+
+ user_vinst = fifoch->user->bo.mem.start << PAGE_SHIFT;
+
+ ret = nouveau_bo_map(fifoch->user);
+ if (ret) {
+ nouveau_bo_unpin(fifoch->user);
+ nouveau_bo_ref(NULL, &fifoch->user);
+ goto error;
+ }
+
+ nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
+ fifoch->user->bo.mem.mm_node);
+
+ chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
+ priv->user_vma.offset + (chan->id * 0x1000),
+ PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
+
+ /* zero channel regs */
+ nouveau_bo_wr32(fifoch->user, 0x0040/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0044/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0048/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x004c/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0050/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0058/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x005c/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0060/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0088/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x008c/4, 0);
+
+ /* ramfc */
+ ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
+ chan->ramin->vinst, 0x100,
+ NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(user_vinst));
+ nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(user_vinst));
+ nv_wo32(fifoch->ramfc, 0x10, 0x0000face);
+ nv_wo32(fifoch->ramfc, 0x30, 0xfffff902);
+ nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt));
+ nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
+ upper_32_bits(ib_virt));
+ nv_wo32(fifoch->ramfc, 0x54, 0x00000002);
+ nv_wo32(fifoch->ramfc, 0x84, 0x20400000);
+ nv_wo32(fifoch->ramfc, 0x94, 0x30000001);
+ nv_wo32(fifoch->ramfc, 0x9c, 0x00000100);
+ nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f);
+ nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f);
+ nv_wo32(fifoch->ramfc, 0xac, 0x0000001f);
+ nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000);
+ nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */
+ nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */
+ pinstmem->flush(dev);
+
+ nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
+ (chan->ramin->vinst >> 12));
+ nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
+ nvc0_fifo_playlist_update(dev);
return 0;
+
+error:
+ pfifo->destroy_context(chan);
+ return ret;
}
void
nvc0_fifo_destroy_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct nvc0_fifo_chan *fifoch;
+
+ nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x002634, chan->id);
+ if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
+ NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
+
+ nvc0_fifo_playlist_update(dev);
+
+ nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
+
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
+
+ fifoch = chan->fifo_priv;
+ chan->fifo_priv = NULL;
+ if (!fifoch)
+ return;
+
+ nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
+ if (fifoch->user) {
+ nouveau_bo_unmap(fifoch->user);
+ nouveau_bo_unpin(fifoch->user);
+ nouveau_bo_ref(NULL, &fifoch->user);
+ }
+ kfree(fifoch);
}
int
@@ -77,14 +243,213 @@ nvc0_fifo_unload_context(struct drm_device *dev)
return 0;
}
+static void
+nvc0_fifo_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv;
+
+ priv = pfifo->priv;
+ if (!priv)
+ return;
+
+ nouveau_vm_put(&priv->user_vma);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+ kfree(priv);
+}
+
void
nvc0_fifo_takedown(struct drm_device *dev)
{
+ nv_wr32(dev, 0x002140, 0x00000000);
+ nvc0_fifo_destroy(dev);
+}
+
+static int
+nvc0_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ pfifo->priv = priv;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
+ &priv->playlist[0]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
+ &priv->playlist[1]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000,
+ 12, NV_MEM_ACCESS_RW, &priv->user_vma);
+ if (ret)
+ goto error;
+
+ nouveau_irq_register(dev, 8, nvc0_fifo_isr);
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ return 0;
+
+error:
+ nvc0_fifo_destroy(dev);
+ return ret;
}
int
nvc0_fifo_init(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv;
+ int ret, i;
+
+ if (!pfifo->priv) {
+ ret = nvc0_fifo_create(dev);
+ if (ret)
+ return ret;
+ }
+ priv = pfifo->priv;
+
+ /* reset PFIFO, enable all available PSUBFIFO areas */
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x000204, 0xffffffff);
+ nv_wr32(dev, 0x002204, 0xffffffff);
+
+ priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204));
+ NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
+
+ /* assign engines to subfifos */
+ if (priv->spoon_nr >= 3) {
+ nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */
+ nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
+ nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
+ nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
+ nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
+ nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
+ }
+
+ /* PSUBFIFO[n] */
+ for (i = 0; i < 3; i++) {
+ nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
+ }
+
+ nv_mask(dev, 0x002200, 0x00000001, 0x00000001);
+ nv_wr32(dev, 0x002254, 0x10000000 | priv->user_vma.offset >> 12);
+
+ nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xbfffffff);
return 0;
}
+struct nouveau_enum nvc0_fifo_fault_unit[] = {
+ { 0, "PGRAPH" },
+ { 3, "PEEPHOLE" },
+ { 4, "BAR1" },
+ { 5, "BAR3" },
+ { 7, "PFIFO" },
+ {}
+};
+
+struct nouveau_enum nvc0_fifo_fault_reason[] = {
+ { 0, "PT_NOT_PRESENT" },
+ { 1, "PT_TOO_SHORT" },
+ { 2, "PAGE_NOT_PRESENT" },
+ { 3, "VM_LIMIT_EXCEEDED" },
+ {}
+};
+
+struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
+/* { 0x00008000, "" } seen with null ib push */
+ { 0x00200000, "ILLEGAL_MTHD" },
+ { 0x00800000, "EMPTY_SUBC" },
+ {}
+};
+
+static void
+nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
+{
+ u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
+ u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
+ u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
+ u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
+
+ NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
+ (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
+ nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
+ printk("] from ");
+ nouveau_enum_print(nvc0_fifo_fault_unit, unit);
+ printk(" on channel 0x%010llx\n", (u64)inst << 12);
+}
+
+static void
+nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
+{
+ u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
+ u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
+ u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
+ u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
+ u32 subc = (addr & 0x00070000);
+ u32 mthd = (addr & 0x00003ffc);
+
+ NV_INFO(dev, "PSUBFIFO %d:", unit);
+ nouveau_bitfield_print(nvc0_fifo_subfifo_intr, stat);
+ NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
+ unit, chid, subc, mthd, data);
+
+ nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
+ nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+nvc0_fifo_isr(struct drm_device *dev)
+{
+ u32 stat = nv_rd32(dev, 0x002100);
+
+ if (stat & 0x10000000) {
+ u32 units = nv_rd32(dev, 0x00259c);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nvc0_fifo_isr_vm_fault(dev, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(dev, 0x00259c, units);
+ stat &= ~0x10000000;
+ }
+
+ if (stat & 0x20000000) {
+ u32 units = nv_rd32(dev, 0x0025a0);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nvc0_fifo_isr_subfifo_intr(dev, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(dev, 0x0025a0, units);
+ stat &= ~0x20000000;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
+ nv_wr32(dev, 0x002100, stat);
+ }
+
+ nv_wr32(dev, 0x2140, 0);
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 717a5177a8d8..eb18a7e89f5b 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -22,9 +22,17 @@
* Authors: Ben Skeggs
*/
+#include <linux/firmware.h>
+
#include "drmP.h"
#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nvc0_graph.h"
+
+static void nvc0_graph_isr(struct drm_device *);
+static void nvc0_runk140_isr(struct drm_device *);
+static int nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan);
void
nvc0_graph_fifo_access(struct drm_device *dev, bool enabled)
@@ -37,39 +45,754 @@ nvc0_graph_channel(struct drm_device *dev)
return NULL;
}
+static int
+nvc0_graph_construct_context(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ struct nvc0_graph_chan *grch = chan->pgraph_ctx;
+ struct drm_device *dev = chan->dev;
+ int ret, i;
+ u32 *ctx;
+
+ ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ nvc0_graph_load_context(chan);
+
+ nv_wo32(grch->grctx, 0x1c, 1);
+ nv_wo32(grch->grctx, 0x20, 0);
+ nv_wo32(grch->grctx, 0x28, 0);
+ nv_wo32(grch->grctx, 0x2c, 0);
+ dev_priv->engine.instmem.flush(dev);
+
+ ret = nvc0_grctx_generate(chan);
+ if (ret) {
+ kfree(ctx);
+ return ret;
+ }
+
+ ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
+ if (ret) {
+ kfree(ctx);
+ return ret;
+ }
+
+ for (i = 0; i < priv->grctx_size; i += 4)
+ ctx[i / 4] = nv_ro32(grch->grctx, i);
+
+ priv->grctx_vals = ctx;
+ return 0;
+}
+
+static int
+nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ struct nvc0_graph_chan *grch = chan->pgraph_ctx;
+ struct drm_device *dev = chan->dev;
+ int i = 0, gpc, tp, ret;
+ u32 magic;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM,
+ &grch->unk408004);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM,
+ &grch->unk40800c);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096,
+ NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
+ &grch->unk418810);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM,
+ &grch->mmio);
+ if (ret)
+ return ret;
+
+
+ nv_wo32(grch->mmio, i++ * 4, 0x00408004);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, 0x00408008);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000018);
+
+ nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, 0x00408010);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000000);
+
+ nv_wo32(grch->mmio, i++ * 4, 0x00418810);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12);
+ nv_wo32(grch->mmio, i++ * 4, 0x00419848);
+ nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12);
+
+ nv_wo32(grch->mmio, i++ * 4, 0x00419004);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, 0x00419008);
+ nv_wo32(grch->mmio, i++ * 4, 0x00000000);
+
+ nv_wo32(grch->mmio, i++ * 4, 0x00418808);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000018);
+
+ magic = 0x02180000;
+ nv_wo32(grch->mmio, i++ * 4, 0x00405830);
+ nv_wo32(grch->mmio, i++ * 4, magic);
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) {
+ u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
+ nv_wo32(grch->mmio, i++ * 4, reg);
+ nv_wo32(grch->mmio, i++ * 4, magic);
+ }
+ }
+
+ grch->mmio_nr = i / 2;
+ return 0;
+}
+
int
nvc0_graph_create_context(struct nouveau_channel *chan)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nvc0_graph_priv *priv = pgraph->priv;
+ struct nvc0_graph_chan *grch;
+ struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj *grctx;
+ int ret, i;
+
+ chan->pgraph_ctx = kzalloc(sizeof(*grch), GFP_KERNEL);
+ if (!chan->pgraph_ctx)
+ return -ENOMEM;
+ grch = chan->pgraph_ctx;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256,
+ NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
+ &grch->grctx);
+ if (ret)
+ goto error;
+ chan->ramin_grctx = grch->grctx;
+ grctx = grch->grctx;
+
+ ret = nvc0_graph_create_context_mmio_list(chan);
+ if (ret)
+ goto error;
+
+ nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4);
+ nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst));
+ pinstmem->flush(dev);
+
+ if (!priv->grctx_vals) {
+ ret = nvc0_graph_construct_context(chan);
+ if (ret)
+ goto error;
+ }
+
+ for (i = 0; i < priv->grctx_size; i += 4)
+ nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
+
+ nv_wo32(grctx, 0xf4, 0);
+ nv_wo32(grctx, 0xf8, 0);
+ nv_wo32(grctx, 0x10, grch->mmio_nr);
+ nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst));
+ nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst));
+ nv_wo32(grctx, 0x1c, 1);
+ nv_wo32(grctx, 0x20, 0);
+ nv_wo32(grctx, 0x28, 0);
+ nv_wo32(grctx, 0x2c, 0);
+ pinstmem->flush(dev);
return 0;
+
+error:
+ pgraph->destroy_context(chan);
+ return ret;
}
void
nvc0_graph_destroy_context(struct nouveau_channel *chan)
{
+ struct nvc0_graph_chan *grch;
+
+ grch = chan->pgraph_ctx;
+ chan->pgraph_ctx = NULL;
+ if (!grch)
+ return;
+
+ nouveau_gpuobj_ref(NULL, &grch->mmio);
+ nouveau_gpuobj_ref(NULL, &grch->unk418810);
+ nouveau_gpuobj_ref(NULL, &grch->unk40800c);
+ nouveau_gpuobj_ref(NULL, &grch->unk408004);
+ nouveau_gpuobj_ref(NULL, &grch->grctx);
+ chan->ramin_grctx = NULL;
}
int
nvc0_graph_load_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+
+ nv_wr32(dev, 0x409840, 0x00000030);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
+ nv_wr32(dev, 0x409504, 0x00000003);
+ if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
+ NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
+
+ return 0;
+}
+
+static int
+nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
+{
+ nv_wr32(dev, 0x409840, 0x00000003);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
+ nv_wr32(dev, 0x409504, 0x00000009);
+ if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
+ return -EBUSY;
+ }
+
return 0;
}
int
nvc0_graph_unload_context(struct drm_device *dev)
{
- return 0;
+ u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
+ return nvc0_graph_unload_context_to(dev, inst);
+}
+
+static void
+nvc0_graph_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nvc0_graph_priv *priv;
+
+ priv = pgraph->priv;
+ if (!priv)
+ return;
+
+ nouveau_irq_unregister(dev, 12);
+ nouveau_irq_unregister(dev, 25);
+
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
+
+ if (priv->grctx_vals)
+ kfree(priv->grctx_vals);
+ kfree(priv);
}
void
nvc0_graph_takedown(struct drm_device *dev)
{
+ nvc0_graph_destroy(dev);
+}
+
+static int
+nvc0_graph_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nvc0_graph_priv *priv;
+ int ret, gpc, i;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ pgraph->priv = priv;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+ if (ret)
+ goto error;
+
+ for (i = 0; i < 0x1000; i += 4) {
+ nv_wo32(priv->unk4188b4, i, 0x00000010);
+ nv_wo32(priv->unk4188b8, i, 0x00000010);
+ }
+
+ priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
+ priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
+ priv->tp_total += priv->tp_nr[gpc];
+ }
+
+ /*XXX: these need figuring out... */
+ switch (dev_priv->chipset) {
+ case 0xc0:
+ if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
+ priv->magic_not_rop_nr = 0x07;
+ /* filled values up to tp_total, the rest 0 */
+ priv->magicgpc980[0] = 0x22111000;
+ priv->magicgpc980[1] = 0x00000233;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x000ba2e9;
+ } else
+ if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
+ priv->magic_not_rop_nr = 0x05;
+ priv->magicgpc980[0] = 0x11110000;
+ priv->magicgpc980[1] = 0x00233222;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x00092493;
+ } else
+ if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
+ priv->magic_not_rop_nr = 0x06;
+ priv->magicgpc980[0] = 0x11110000;
+ priv->magicgpc980[1] = 0x03332222;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x00088889;
+ }
+ break;
+ case 0xc3: /* 450, 4/0/0/0, 2 */
+ priv->magic_not_rop_nr = 0x03;
+ priv->magicgpc980[0] = 0x00003210;
+ priv->magicgpc980[1] = 0x00000000;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x00200000;
+ break;
+ case 0xc4: /* 460, 3/4/0/0, 4 */
+ priv->magic_not_rop_nr = 0x01;
+ priv->magicgpc980[0] = 0x02321100;
+ priv->magicgpc980[1] = 0x00000000;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x00124925;
+ break;
+ }
+
+ if (!priv->magic_not_rop_nr) {
+ NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
+ priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
+ priv->tp_nr[3], priv->rop_nr);
+ /* use 0xc3's values... */
+ priv->magic_not_rop_nr = 0x03;
+ priv->magicgpc980[0] = 0x00003210;
+ priv->magicgpc980[1] = 0x00000000;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x00200000;
+ }
+
+ nouveau_irq_register(dev, 12, nvc0_graph_isr);
+ nouveau_irq_register(dev, 25, nvc0_runk140_isr);
+ NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
+ NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
+ NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
+ NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
+ return 0;
+
+error:
+ nvc0_graph_destroy(dev);
+ return ret;
+}
+
+static void
+nvc0_graph_init_obj418880(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nvc0_graph_priv *priv = pgraph->priv;
+ int i;
+
+ nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
+ nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
+ nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
+ nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
+}
+
+static void
+nvc0_graph_init_regs(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x400080, 0x003083c2);
+ nv_wr32(dev, 0x400088, 0x00006fe7);
+ nv_wr32(dev, 0x40008c, 0x00000000);
+ nv_wr32(dev, 0x400090, 0x00000030);
+ nv_wr32(dev, 0x40013c, 0x013901f7);
+ nv_wr32(dev, 0x400140, 0x00000100);
+ nv_wr32(dev, 0x400144, 0x00000000);
+ nv_wr32(dev, 0x400148, 0x00000110);
+ nv_wr32(dev, 0x400138, 0x00000000);
+ nv_wr32(dev, 0x400130, 0x00000000);
+ nv_wr32(dev, 0x400134, 0x00000000);
+ nv_wr32(dev, 0x400124, 0x00000002);
+}
+
+static void
+nvc0_graph_init_gpc_0(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ int gpc;
+
+ // TP ROP UNKVAL(magic_not_rop_nr)
+ // 450: 4/0/0/0 2 3
+ // 460: 3/4/0/0 4 1
+ // 465: 3/4/4/0 4 7
+ // 470: 3/3/4/4 5 5
+ // 480: 3/4/4/4 6 6
+
+ // magicgpc918
+ // 450: 00200000 00000000001000000000000000000000
+ // 460: 00124925 00000000000100100100100100100101
+ // 465: 000ba2e9 00000000000010111010001011101001
+ // 470: 00092493 00000000000010010010010010010011
+ // 480: 00088889 00000000000010001000100010001001
+
+ /* filled values up to tp_total, remainder 0 */
+ // 450: 00003210 00000000 00000000 00000000
+ // 460: 02321100 00000000 00000000 00000000
+ // 465: 22111000 00000233 00000000 00000000
+ // 470: 11110000 00233222 00000000 00000000
+ // 480: 11110000 03332222 00000000 00000000
+
+ nv_wr32(dev, GPC_BCAST(0x0980), priv->magicgpc980[0]);
+ nv_wr32(dev, GPC_BCAST(0x0984), priv->magicgpc980[1]);
+ nv_wr32(dev, GPC_BCAST(0x0988), priv->magicgpc980[2]);
+ nv_wr32(dev, GPC_BCAST(0x098c), priv->magicgpc980[3]);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
+ priv->tp_nr[gpc]);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918);
+ }
+
+ nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918);
+ nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr);
+}
+
+static void
+nvc0_graph_init_units(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x409c24, 0x000f0000);
+ nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */
+ nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */
+ nv_wr32(dev, 0x408030, 0xc0000000);
+ nv_wr32(dev, 0x40601c, 0xc0000000);
+ nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */
+ nv_wr32(dev, 0x406018, 0xc0000000);
+ nv_wr32(dev, 0x405840, 0xc0000000);
+ nv_wr32(dev, 0x405844, 0x00ffffff);
+ nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
+ nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
+}
+
+static void
+nvc0_graph_init_gpc_1(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ int gpc, tp;
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x508), 0xffffffff);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x50c), 0xffffffff);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x644), 0x001ffffe);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x64c), 0x0000000f);
+ }
+ nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ }
+}
+
+static void
+nvc0_graph_init_rop(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ int rop;
+
+ for (rop = 0; rop < priv->rop_nr; rop++) {
+ nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
+ }
+}
+
+static int
+nvc0_fuc_load_fw(struct drm_device *dev, u32 fuc_base,
+ const char *code_fw, const char *data_fw)
+{
+ const struct firmware *fw;
+ char name[32];
+ int ret, i;
+
+ snprintf(name, sizeof(name), "nouveau/%s", data_fw);
+ ret = request_firmware(&fw, name, &dev->pdev->dev);
+ if (ret) {
+ NV_ERROR(dev, "failed to load %s\n", data_fw);
+ return ret;
+ }
+
+ nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
+ for (i = 0; i < fw->size / 4; i++)
+ nv_wr32(dev, fuc_base + 0x01c4, ((u32 *)fw->data)[i]);
+ release_firmware(fw);
+
+ snprintf(name, sizeof(name), "nouveau/%s", code_fw);
+ ret = request_firmware(&fw, name, &dev->pdev->dev);
+ if (ret) {
+ NV_ERROR(dev, "failed to load %s\n", code_fw);
+ return ret;
+ }
+
+ nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
+ for (i = 0; i < fw->size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(dev, fuc_base + 0x0188, i >> 6);
+ nv_wr32(dev, fuc_base + 0x0184, ((u32 *)fw->data)[i]);
+ }
+ release_firmware(fw);
+
+ return 0;
+}
+
+static int
+nvc0_graph_init_ctxctl(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ u32 r000260;
+ int ret;
+
+ /* load fuc microcode */
+ r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+ ret = nvc0_fuc_load_fw(dev, 0x409000, "fuc409c", "fuc409d");
+ if (ret == 0)
+ ret = nvc0_fuc_load_fw(dev, 0x41a000, "fuc41ac", "fuc41ad");
+ nv_wr32(dev, 0x000260, r000260);
+
+ if (ret)
+ return ret;
+
+ /* start both of them running */
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x41a10c, 0x00000000);
+ nv_wr32(dev, 0x40910c, 0x00000000);
+ nv_wr32(dev, 0x41a100, 0x00000002);
+ nv_wr32(dev, 0x409100, 0x00000002);
+ if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
+ NV_INFO(dev, "0x409800 wait failed\n");
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x7fffffff);
+ nv_wr32(dev, 0x409504, 0x00000021);
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000010);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
+ return -EBUSY;
+ }
+ priv->grctx_size = nv_rd32(dev, 0x409800);
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000016);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000025);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
}
int
nvc0_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nvc0_graph_priv *priv;
+ int ret;
+
dev_priv->engine.graph.accel_blocked = true;
+
+ switch (dev_priv->chipset) {
+ case 0xc0:
+ case 0xc3:
+ case 0xc4:
+ break;
+ default:
+ NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
+ if (nouveau_noaccel != 0)
+ return 0;
+ break;
+ }
+
+ nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
+
+ if (!pgraph->priv) {
+ ret = nvc0_graph_create(dev);
+ if (ret)
+ return ret;
+ }
+ priv = pgraph->priv;
+
+ nvc0_graph_init_obj418880(dev);
+ nvc0_graph_init_regs(dev);
+ //nvc0_graph_init_unitplemented_magics(dev);
+ nvc0_graph_init_gpc_0(dev);
+ //nvc0_graph_init_unitplemented_c242(dev);
+
+ nv_wr32(dev, 0x400500, 0x00010001);
+ nv_wr32(dev, 0x400100, 0xffffffff);
+ nv_wr32(dev, 0x40013c, 0xffffffff);
+
+ nvc0_graph_init_units(dev);
+ nvc0_graph_init_gpc_1(dev);
+ nvc0_graph_init_rop(dev);
+
+ nv_wr32(dev, 0x400108, 0xffffffff);
+ nv_wr32(dev, 0x400138, 0xffffffff);
+ nv_wr32(dev, 0x400118, 0xffffffff);
+ nv_wr32(dev, 0x400130, 0xffffffff);
+ nv_wr32(dev, 0x40011c, 0xffffffff);
+ nv_wr32(dev, 0x400134, 0xffffffff);
+ nv_wr32(dev, 0x400054, 0x34ce3464);
+
+ ret = nvc0_graph_init_ctxctl(dev);
+ if (ret == 0)
+ dev_priv->engine.graph.accel_blocked = false;
return 0;
}
+static int
+nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ chan = dev_priv->channels.ptr[i];
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (inst == chan->ramin->vinst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
+}
+
+static void
+nvc0_graph_isr(struct drm_device *dev)
+{
+ u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
+ u32 chid = nvc0_graph_isr_chid(dev, inst);
+ u32 stat = nv_rd32(dev, 0x400100);
+ u32 addr = nv_rd32(dev, 0x400704);
+ u32 mthd = (addr & 0x00003ffc);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 data = nv_rd32(dev, 0x400708);
+ u32 code = nv_rd32(dev, 0x400110);
+ u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
+
+ if (stat & 0x00000010) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00000010);
+ stat &= ~0x00000010;
+ }
+
+ if (stat & 0x00000020) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00000020);
+ stat &= ~0x00000020;
+ }
+
+ if (stat & 0x00100000) {
+ NV_INFO(dev, "PGRAPH: DATA_ERROR [");
+ nouveau_enum_print(nv50_data_error_names, code);
+ printk("] ch %d [0x%010llx] subc %d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00100000);
+ stat &= ~0x00100000;
+ }
+
+ if (stat & 0x00200000) {
+ u32 trap = nv_rd32(dev, 0x400108);
+ NV_INFO(dev, "PGRAPH: TRAP ch %d status 0x%08x\n", chid, trap);
+ nv_wr32(dev, 0x400108, trap);
+ nv_wr32(dev, 0x400100, 0x00200000);
+ stat &= ~0x00200000;
+ }
+
+ if (stat & 0x00080000) {
+ u32 ustat = nv_rd32(dev, 0x409c18);
+
+ NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat);
+
+ nv_wr32(dev, 0x409c20, ustat);
+ nv_wr32(dev, 0x400100, 0x00080000);
+ stat &= ~0x00080000;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
+ nv_wr32(dev, 0x400100, stat);
+ }
+
+ nv_wr32(dev, 0x400500, 0x00010001);
+}
+
+static void
+nvc0_runk140_isr(struct drm_device *dev)
+{
+ u32 units = nv_rd32(dev, 0x00017c) & 0x1f;
+
+ while (units) {
+ u32 unit = ffs(units) - 1;
+ u32 reg = 0x140000 + unit * 0x2000;
+ u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0);
+ u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0);
+
+ NV_INFO(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
+ units &= ~(1 << unit);
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
new file mode 100644
index 000000000000..40e26f9c56c4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NVC0_GRAPH_H__
+#define __NVC0_GRAPH_H__
+
+#define GPC_MAX 4
+#define TP_MAX 32
+
+#define ROP_BCAST(r) (0x408800 + (r))
+#define ROP_UNIT(u,r) (0x410000 + (u) * 0x400 + (r))
+#define GPC_BCAST(r) (0x418000 + (r))
+#define GPC_UNIT(t,r) (0x500000 + (t) * 0x8000 + (r))
+#define TP_UNIT(t,m,r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
+
+struct nvc0_graph_priv {
+ u8 gpc_nr;
+ u8 rop_nr;
+ u8 tp_nr[GPC_MAX];
+ u8 tp_total;
+
+ u32 grctx_size;
+ u32 *grctx_vals;
+ struct nouveau_gpuobj *unk4188b4;
+ struct nouveau_gpuobj *unk4188b8;
+
+ u8 magic_not_rop_nr;
+ u32 magicgpc980[4];
+ u32 magicgpc918;
+};
+
+struct nvc0_graph_chan {
+ struct nouveau_gpuobj *grctx;
+ struct nouveau_gpuobj *unk408004; // 0x418810 too
+ struct nouveau_gpuobj *unk40800c; // 0x419004 too
+ struct nouveau_gpuobj *unk418810; // 0x419848 too
+ struct nouveau_gpuobj *mmio;
+ int mmio_nr;
+};
+
+int nvc0_grctx_generate(struct nouveau_channel *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
new file mode 100644
index 000000000000..f880ff776db8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -0,0 +1,2874 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nvc0_graph.h"
+
+static void
+nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
+{
+ nv_wr32(dev, 0x400204, data);
+ nv_wr32(dev, 0x400200, icmd);
+ while (nv_rd32(dev, 0x400700) & 2) {}
+}
+
+static void
+nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
+{
+ nv_wr32(dev, 0x40448c, data);
+ nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
+}
+
+static void
+nvc0_grctx_generate_9097(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0900, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0940, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0980, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0804, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0844, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0884, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0904, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0944, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0984, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0808, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0848, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0888, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x08c8, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0908, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0948, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0988, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x09c8, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x080c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x084c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x088c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x08cc, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x090c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x094c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x098c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x09cc, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x0810, 0x000000cf);
+ nv_mthd(dev, 0x9097, 0x0850, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0890, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0910, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0950, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0990, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0814, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0854, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0894, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x08d4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0914, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0954, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0994, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x09d4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0818, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0858, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0898, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x08d8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0918, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0958, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0998, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x09d8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x081c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x085c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x089c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x091c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x095c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x099c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0820, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0860, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0920, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0960, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2700, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2720, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2740, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2760, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2780, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2704, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2724, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2744, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2764, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2784, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2708, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2728, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2748, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2768, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2788, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x270c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x272c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x274c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x276c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x278c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2710, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2730, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2750, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2770, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2790, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x27b0, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x27d0, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x27f0, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2714, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2734, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2754, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2774, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2794, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x27b4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x27d4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x27f4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x1c00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ca0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ce0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cf0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ca4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cb4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ce4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cf4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c38, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c78, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c98, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ca8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cb8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cd8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ce8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cf8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c1c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c3c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c7c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c9c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cbc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ccc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cdc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cfc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1da0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1db0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1de0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1df0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1da4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1db4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1de4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1df4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d38, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d78, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d98, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1da8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1db8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dd8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1de8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1df8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d1c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d3c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d7c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d9c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dbc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dcc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ddc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dfc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f38, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f78, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f1c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f3c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f7c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f98, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fa0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fa8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fb8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fd8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fe0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fe8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ff0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ff8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f9c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fa4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fb4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fbc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fcc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fdc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fe4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ff4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ffc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2200, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2210, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2220, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2230, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2240, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2000, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2040, 0x00000011);
+ nv_mthd(dev, 0x9097, 0x2080, 0x00000020);
+ nv_mthd(dev, 0x9097, 0x20c0, 0x00000030);
+ nv_mthd(dev, 0x9097, 0x2100, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2140, 0x00000051);
+ nv_mthd(dev, 0x9097, 0x200c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x204c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x208c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x20cc, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x210c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x214c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x2010, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2050, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2090, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x20d0, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x2110, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x2150, 0x00000004);
+ nv_mthd(dev, 0x9097, 0x0380, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0384, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0388, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x038c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0700, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0710, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0720, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0730, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0704, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0714, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0724, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0734, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0708, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0718, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0728, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0738, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2800, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2804, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2808, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x280c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2810, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2814, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2818, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x281c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2820, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2824, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2828, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x282c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2830, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2834, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2838, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x283c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2840, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2844, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2848, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x284c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2850, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2854, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2858, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x285c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2860, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2864, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2868, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x286c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2870, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2874, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2878, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x287c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2880, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2884, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2888, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x288c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2890, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2894, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2898, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x289c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2900, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2904, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2908, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x290c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2910, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2914, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2918, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x291c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2920, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2924, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2928, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x292c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2930, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2934, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2938, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x293c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2940, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2944, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2948, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x294c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2950, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2954, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2958, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x295c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2960, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2964, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2968, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x296c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2970, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2974, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2978, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x297c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2980, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2984, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2988, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x298c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2990, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2994, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2998, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x299c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aa0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ac0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ae0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ba0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0be0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aa4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ac4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ae4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ba4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0be4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aa8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ac8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ae8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ba8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0be8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0acc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bcc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ab0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ad0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0af0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bf0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ab4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ad4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0af4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bb4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bf4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ca0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ce0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cf0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ca4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cb4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ce4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cf4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c38, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c78, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c98, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ca8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cb8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cd8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ce8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cf8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c0c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c1c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c2c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c3c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c4c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c5c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c6c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c7c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c8c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c9c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cac, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cbc, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0ccc, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cdc, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cec, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cfc, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0d00, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d08, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d10, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d18, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d20, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d28, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d30, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d38, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d04, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d0c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d14, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d1c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d24, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d2c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d34, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d3c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ea0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0eb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ec0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ed0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ee0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ef0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e04, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e14, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e24, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e34, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e44, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e54, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e64, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e74, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e84, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e94, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ea4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0eb4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ec4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ed4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ee4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ef4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e08, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e18, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e28, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e38, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e48, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e58, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e68, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e78, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e88, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e98, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ea8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0eb8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ec8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ed8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ee8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ef8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1e00, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e20, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e40, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e60, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e80, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ea0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ec0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ee0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e04, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e24, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e44, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e64, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e84, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ea4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ec4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ee4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e08, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e28, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e48, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e68, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e88, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ea8, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ec8, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ee8, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e0c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e2c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e4c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e6c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e8c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1eac, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ecc, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1eec, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e10, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e30, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e50, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e70, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e90, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1eb0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ed0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ef0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e14, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e34, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e54, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e74, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e94, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1eb4, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ed4, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ef4, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e18, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e38, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e58, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e78, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e98, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x3400, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3404, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3408, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x340c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3410, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3414, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3418, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x341c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3420, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3424, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3428, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x342c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3430, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3434, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3438, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x343c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3440, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3444, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3448, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x344c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3450, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3454, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3458, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x345c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3460, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3464, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3468, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x346c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3470, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3474, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3478, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x347c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3480, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3484, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3488, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x348c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3490, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3494, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3498, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x349c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3500, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3504, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3508, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x350c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3510, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3514, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3518, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x351c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3520, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3524, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3528, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x352c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3530, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3534, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3538, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x353c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3540, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3544, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3548, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x354c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3550, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3554, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3558, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x355c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3560, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3564, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3568, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x356c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3570, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3574, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3578, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x357c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3580, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3584, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3588, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x358c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3590, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3594, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3598, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x359c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d68, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x121c, 0x0fac6881);
+ nv_mthd(dev, 0x9097, 0x0fac, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1538, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0fe0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fe4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fe8, 0x00000014);
+ nv_mthd(dev, 0x9097, 0x0fec, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0ff0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x179c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1228, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x122c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x1230, 0x00010001);
+ nv_mthd(dev, 0x9097, 0x07f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15b4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x15cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1534, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x153c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x16b4, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x0fbc, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x0fc0, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x0fc4, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x0fc8, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x0df8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dfc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1948, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1970, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x161c, 0x000009f0);
+ nv_mthd(dev, 0x9097, 0x0dcc, 0x00000010);
+ nv_mthd(dev, 0x9097, 0x163c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1160, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1164, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1168, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x116c, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1170, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1174, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1178, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x117c, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1180, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1184, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1188, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x118c, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1190, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1194, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1198, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x119c, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11a0, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11a4, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11a8, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11ac, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11b0, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11b4, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11b8, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11bc, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11c0, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11c4, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11c8, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11cc, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11d0, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11d4, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11d8, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11dc, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1880, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1884, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1888, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x188c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1890, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1894, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1898, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x189c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17d0, 0x000000ff);
+ nv_mthd(dev, 0x9097, 0x17d4, 0xffffffff);
+ nv_mthd(dev, 0x9097, 0x17d8, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x17dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1434, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1438, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dec, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x13a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1318, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1644, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0748, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0de8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1648, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1120, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1124, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1128, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x112c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1118, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x164c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1658, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1910, 0x00000290);
+ nv_mthd(dev, 0x9097, 0x1518, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x165c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1520, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1604, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1570, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x13b0, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x13b4, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x020c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1670, 0x30201000);
+ nv_mthd(dev, 0x9097, 0x1674, 0x70605040);
+ nv_mthd(dev, 0x9097, 0x1678, 0xb8a89888);
+ nv_mthd(dev, 0x9097, 0x167c, 0xf8e8d8c8);
+ nv_mthd(dev, 0x9097, 0x166c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1680, 0x00ffff00);
+ nv_mthd(dev, 0x9097, 0x12d0, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x12d4, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1684, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1688, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dac, 0x00001b02);
+ nv_mthd(dev, 0x9097, 0x0db0, 0x00001b02);
+ nv_mthd(dev, 0x9097, 0x0db4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x168c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x156c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x187c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1110, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0dc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1234, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1690, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12ac, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x02c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0790, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0794, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0798, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x079c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x07a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x077c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1000, 0x00000010);
+ nv_mthd(dev, 0x9097, 0x10fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1290, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0218, 0x00000010);
+ nv_mthd(dev, 0x9097, 0x12d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12dc, 0x00000010);
+ nv_mthd(dev, 0x9097, 0x0d94, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x155c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1560, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1564, 0x00001fff);
+ nv_mthd(dev, 0x9097, 0x1574, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1578, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x157c, 0x003fffff);
+ nv_mthd(dev, 0x9097, 0x1354, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1664, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1610, 0x00000012);
+ nv_mthd(dev, 0x9097, 0x1608, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x160c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x162c, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x0210, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0320, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0324, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0328, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x032c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0330, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0334, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0338, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0750, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0760, 0x39291909);
+ nv_mthd(dev, 0x9097, 0x0764, 0x79695949);
+ nv_mthd(dev, 0x9097, 0x0768, 0xb9a99989);
+ nv_mthd(dev, 0x9097, 0x076c, 0xf9e9d9c9);
+ nv_mthd(dev, 0x9097, 0x0770, 0x30201000);
+ nv_mthd(dev, 0x9097, 0x0774, 0x70605040);
+ nv_mthd(dev, 0x9097, 0x0778, 0x00009080);
+ nv_mthd(dev, 0x9097, 0x0780, 0x39291909);
+ nv_mthd(dev, 0x9097, 0x0784, 0x79695949);
+ nv_mthd(dev, 0x9097, 0x0788, 0xb9a99989);
+ nv_mthd(dev, 0x9097, 0x078c, 0xf9e9d9c9);
+ nv_mthd(dev, 0x9097, 0x07d0, 0x30201000);
+ nv_mthd(dev, 0x9097, 0x07d4, 0x70605040);
+ nv_mthd(dev, 0x9097, 0x07d8, 0x00009080);
+ nv_mthd(dev, 0x9097, 0x037c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0740, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0744, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2600, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1918, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x191c, 0x00000900);
+ nv_mthd(dev, 0x9097, 0x1920, 0x00000405);
+ nv_mthd(dev, 0x9097, 0x1308, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1924, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x13ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x192c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x193c, 0x00002c1c);
+ nv_mthd(dev, 0x9097, 0x0d7c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x02c0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1510, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1940, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ff4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ff8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x194c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1950, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1968, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1590, 0x0000003f);
+ nv_mthd(dev, 0x9097, 0x07e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x07ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x07f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x07f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x196c, 0x00000011);
+ nv_mthd(dev, 0x9097, 0x197c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fcc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x02d8, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x1980, 0x00000080);
+ nv_mthd(dev, 0x9097, 0x1504, 0x00000080);
+ nv_mthd(dev, 0x9097, 0x1984, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0300, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x13a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1310, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1314, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1380, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1384, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1388, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x138c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1390, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1394, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x139c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1398, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1594, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1598, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x159c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x15a0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x15a4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0f54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f9c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fa0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x130c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1360, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1364, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1368, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x136c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1370, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1374, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1378, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x137c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x133c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1340, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1344, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1348, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x134c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1350, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1358, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x12e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x131c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1320, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1324, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1328, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1140, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19c8, 0x00001500);
+ nv_mthd(dev, 0x9097, 0x135c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19e0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19e4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19e8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19ec, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19f0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19f4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19f8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19fc, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19cc, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x15b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a00, 0x00001111);
+ nv_mthd(dev, 0x9097, 0x1a04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a1c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d6c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d70, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x10f8, 0x00001010);
+ nv_mthd(dev, 0x9097, 0x0d80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0da0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1508, 0x80000000);
+ nv_mthd(dev, 0x9097, 0x150c, 0x40000000);
+ nv_mthd(dev, 0x9097, 0x1668, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0318, 0x00000008);
+ nv_mthd(dev, 0x9097, 0x031c, 0x00000008);
+ nv_mthd(dev, 0x9097, 0x0d9c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x07dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x074c, 0x00000055);
+ nv_mthd(dev, 0x9097, 0x1420, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x17bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17c4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1008, 0x00000008);
+ nv_mthd(dev, 0x9097, 0x100c, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x1010, 0x0000012c);
+ nv_mthd(dev, 0x9097, 0x0d60, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x075c, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x1018, 0x00000020);
+ nv_mthd(dev, 0x9097, 0x101c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1020, 0x00000020);
+ nv_mthd(dev, 0x9097, 0x1024, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1444, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1448, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x144c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0360, 0x20164010);
+ nv_mthd(dev, 0x9097, 0x0364, 0x00000020);
+ nv_mthd(dev, 0x9097, 0x0368, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0de4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0204, 0x00000006);
+ nv_mthd(dev, 0x9097, 0x0208, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x02cc, 0x003fffff);
+ nv_mthd(dev, 0x9097, 0x02d0, 0x00000c48);
+ nv_mthd(dev, 0x9097, 0x1220, 0x00000005);
+ nv_mthd(dev, 0x9097, 0x0fdc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f98, 0x00300008);
+ nv_mthd(dev, 0x9097, 0x1284, 0x04000080);
+ nv_mthd(dev, 0x9097, 0x1450, 0x00300008);
+ nv_mthd(dev, 0x9097, 0x1454, 0x04000080);
+ nv_mthd(dev, 0x9097, 0x0214, 0x00000000);
+ /* in trace, right after 0x90c0, not here */
+ nv_mthd(dev, 0x9097, 0x3410, 0x80002006);
+}
+
+static void
+nvc0_grctx_generate_902d(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
+ nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
+ nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
+ nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
+ nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
+ nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
+ nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
+}
+
+static void
+nvc0_grctx_generate_9039(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x9039, 0x030c, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0310, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0314, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0320, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0238, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x023c, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0318, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x031c, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_90c0(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x276c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x278c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
+ nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
+ nv_mthd(dev, 0x90c0, 0x02c4, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0790, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0794, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0798, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x079c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x07a0, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x077c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0204, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0208, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x020c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0214, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x024c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0d94, 0x00000001);
+ nv_mthd(dev, 0x90c0, 0x1608, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x160c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x1664, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_dispatch(struct drm_device *dev)
+{
+ int i;
+
+ nv_wr32(dev, 0x404004, 0x00000000);
+ nv_wr32(dev, 0x404008, 0x00000000);
+ nv_wr32(dev, 0x40400c, 0x00000000);
+ nv_wr32(dev, 0x404010, 0x00000000);
+ nv_wr32(dev, 0x404014, 0x00000000);
+ nv_wr32(dev, 0x404018, 0x00000000);
+ nv_wr32(dev, 0x40401c, 0x00000000);
+ nv_wr32(dev, 0x404020, 0x00000000);
+ nv_wr32(dev, 0x404024, 0x00000000);
+ nv_wr32(dev, 0x404028, 0x00000000);
+ nv_wr32(dev, 0x40402c, 0x00000000);
+ nv_wr32(dev, 0x404044, 0x00000000);
+ nv_wr32(dev, 0x404094, 0x00000000);
+ nv_wr32(dev, 0x404098, 0x00000000);
+ nv_wr32(dev, 0x40409c, 0x00000000);
+ nv_wr32(dev, 0x4040a0, 0x00000000);
+ nv_wr32(dev, 0x4040a4, 0x00000000);
+ nv_wr32(dev, 0x4040a8, 0x00000000);
+ nv_wr32(dev, 0x4040ac, 0x00000000);
+ nv_wr32(dev, 0x4040b0, 0x00000000);
+ nv_wr32(dev, 0x4040b4, 0x00000000);
+ nv_wr32(dev, 0x4040b8, 0x00000000);
+ nv_wr32(dev, 0x4040bc, 0x00000000);
+ nv_wr32(dev, 0x4040c0, 0x00000000);
+ nv_wr32(dev, 0x4040c4, 0x00000000);
+ nv_wr32(dev, 0x4040c8, 0xf0000087);
+ nv_wr32(dev, 0x4040d4, 0x00000000);
+ nv_wr32(dev, 0x4040d8, 0x00000000);
+ nv_wr32(dev, 0x4040dc, 0x00000000);
+ nv_wr32(dev, 0x4040e0, 0x00000000);
+ nv_wr32(dev, 0x4040e4, 0x00000000);
+ nv_wr32(dev, 0x4040e8, 0x00001000);
+ nv_wr32(dev, 0x4040f8, 0x00000000);
+ nv_wr32(dev, 0x404130, 0x00000000);
+ nv_wr32(dev, 0x404134, 0x00000000);
+ nv_wr32(dev, 0x404138, 0x20000040);
+ nv_wr32(dev, 0x404150, 0x0000002e);
+ nv_wr32(dev, 0x404154, 0x00000400);
+ nv_wr32(dev, 0x404158, 0x00000200);
+ nv_wr32(dev, 0x404164, 0x00000055);
+ nv_wr32(dev, 0x404168, 0x00000000);
+ nv_wr32(dev, 0x404174, 0x00000000);
+ nv_wr32(dev, 0x404178, 0x00000000);
+ nv_wr32(dev, 0x40417c, 0x00000000);
+ for (i = 0; i < 8; i++)
+ nv_wr32(dev, 0x404200 + (i * 4), 0x00000000); /* subc */
+}
+
+static void
+nvc0_grctx_generate_macro(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404404, 0x00000000);
+ nv_wr32(dev, 0x404408, 0x00000000);
+ nv_wr32(dev, 0x40440c, 0x00000000);
+ nv_wr32(dev, 0x404410, 0x00000000);
+ nv_wr32(dev, 0x404414, 0x00000000);
+ nv_wr32(dev, 0x404418, 0x00000000);
+ nv_wr32(dev, 0x40441c, 0x00000000);
+ nv_wr32(dev, 0x404420, 0x00000000);
+ nv_wr32(dev, 0x404424, 0x00000000);
+ nv_wr32(dev, 0x404428, 0x00000000);
+ nv_wr32(dev, 0x40442c, 0x00000000);
+ nv_wr32(dev, 0x404430, 0x00000000);
+ nv_wr32(dev, 0x404434, 0x00000000);
+ nv_wr32(dev, 0x404438, 0x00000000);
+ nv_wr32(dev, 0x404460, 0x00000000);
+ nv_wr32(dev, 0x404464, 0x00000000);
+ nv_wr32(dev, 0x404468, 0x00ffffff);
+ nv_wr32(dev, 0x40446c, 0x00000000);
+ nv_wr32(dev, 0x404480, 0x00000001);
+ nv_wr32(dev, 0x404498, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_m2mf(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404604, 0x00000015);
+ nv_wr32(dev, 0x404608, 0x00000000);
+ nv_wr32(dev, 0x40460c, 0x00002e00);
+ nv_wr32(dev, 0x404610, 0x00000100);
+ nv_wr32(dev, 0x404618, 0x00000000);
+ nv_wr32(dev, 0x40461c, 0x00000000);
+ nv_wr32(dev, 0x404620, 0x00000000);
+ nv_wr32(dev, 0x404624, 0x00000000);
+ nv_wr32(dev, 0x404628, 0x00000000);
+ nv_wr32(dev, 0x40462c, 0x00000000);
+ nv_wr32(dev, 0x404630, 0x00000000);
+ nv_wr32(dev, 0x404634, 0x00000000);
+ nv_wr32(dev, 0x404638, 0x00000004);
+ nv_wr32(dev, 0x40463c, 0x00000000);
+ nv_wr32(dev, 0x404640, 0x00000000);
+ nv_wr32(dev, 0x404644, 0x00000000);
+ nv_wr32(dev, 0x404648, 0x00000000);
+ nv_wr32(dev, 0x40464c, 0x00000000);
+ nv_wr32(dev, 0x404650, 0x00000000);
+ nv_wr32(dev, 0x404654, 0x00000000);
+ nv_wr32(dev, 0x404658, 0x00000000);
+ nv_wr32(dev, 0x40465c, 0x007f0100);
+ nv_wr32(dev, 0x404660, 0x00000000);
+ nv_wr32(dev, 0x404664, 0x00000000);
+ nv_wr32(dev, 0x404668, 0x00000000);
+ nv_wr32(dev, 0x40466c, 0x00000000);
+ nv_wr32(dev, 0x404670, 0x00000000);
+ nv_wr32(dev, 0x404674, 0x00000000);
+ nv_wr32(dev, 0x404678, 0x00000000);
+ nv_wr32(dev, 0x40467c, 0x00000002);
+ nv_wr32(dev, 0x404680, 0x00000000);
+ nv_wr32(dev, 0x404684, 0x00000000);
+ nv_wr32(dev, 0x404688, 0x00000000);
+ nv_wr32(dev, 0x40468c, 0x00000000);
+ nv_wr32(dev, 0x404690, 0x00000000);
+ nv_wr32(dev, 0x404694, 0x00000000);
+ nv_wr32(dev, 0x404698, 0x00000000);
+ nv_wr32(dev, 0x40469c, 0x00000000);
+ nv_wr32(dev, 0x4046a0, 0x007f0080);
+ nv_wr32(dev, 0x4046a4, 0x00000000);
+ nv_wr32(dev, 0x4046a8, 0x00000000);
+ nv_wr32(dev, 0x4046ac, 0x00000000);
+ nv_wr32(dev, 0x4046b0, 0x00000000);
+ nv_wr32(dev, 0x4046b4, 0x00000000);
+ nv_wr32(dev, 0x4046b8, 0x00000000);
+ nv_wr32(dev, 0x4046bc, 0x00000000);
+ nv_wr32(dev, 0x4046c0, 0x00000000);
+ nv_wr32(dev, 0x4046c4, 0x00000000);
+ nv_wr32(dev, 0x4046c8, 0x00000000);
+ nv_wr32(dev, 0x4046cc, 0x00000000);
+ nv_wr32(dev, 0x4046d0, 0x00000000);
+ nv_wr32(dev, 0x4046d4, 0x00000000);
+ nv_wr32(dev, 0x4046d8, 0x00000000);
+ nv_wr32(dev, 0x4046dc, 0x00000000);
+ nv_wr32(dev, 0x4046e0, 0x00000000);
+ nv_wr32(dev, 0x4046e4, 0x00000000);
+ nv_wr32(dev, 0x4046e8, 0x00000000);
+ nv_wr32(dev, 0x4046f0, 0x00000000);
+ nv_wr32(dev, 0x4046f4, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk47xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404700, 0x00000000);
+ nv_wr32(dev, 0x404704, 0x00000000);
+ nv_wr32(dev, 0x404708, 0x00000000);
+ nv_wr32(dev, 0x40470c, 0x00000000);
+ nv_wr32(dev, 0x404710, 0x00000000);
+ nv_wr32(dev, 0x404714, 0x00000000);
+ nv_wr32(dev, 0x404718, 0x00000000);
+ nv_wr32(dev, 0x40471c, 0x00000000);
+ nv_wr32(dev, 0x404720, 0x00000000);
+ nv_wr32(dev, 0x404724, 0x00000000);
+ nv_wr32(dev, 0x404728, 0x00000000);
+ nv_wr32(dev, 0x40472c, 0x00000000);
+ nv_wr32(dev, 0x404730, 0x00000000);
+ nv_wr32(dev, 0x404734, 0x00000100);
+ nv_wr32(dev, 0x404738, 0x00000000);
+ nv_wr32(dev, 0x40473c, 0x00000000);
+ nv_wr32(dev, 0x404740, 0x00000000);
+ nv_wr32(dev, 0x404744, 0x00000000);
+ nv_wr32(dev, 0x404748, 0x00000000);
+ nv_wr32(dev, 0x40474c, 0x00000000);
+ nv_wr32(dev, 0x404750, 0x00000000);
+ nv_wr32(dev, 0x404754, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_shaders(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x405800, 0x078000bf);
+ nv_wr32(dev, 0x405830, 0x02180000);
+ nv_wr32(dev, 0x405834, 0x00000000);
+ nv_wr32(dev, 0x405838, 0x00000000);
+ nv_wr32(dev, 0x405854, 0x00000000);
+ nv_wr32(dev, 0x405870, 0x00000001);
+ nv_wr32(dev, 0x405874, 0x00000001);
+ nv_wr32(dev, 0x405878, 0x00000001);
+ nv_wr32(dev, 0x40587c, 0x00000001);
+ nv_wr32(dev, 0x405a00, 0x00000000);
+ nv_wr32(dev, 0x405a04, 0x00000000);
+ nv_wr32(dev, 0x405a18, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk60xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x406020, 0x000103c1);
+ nv_wr32(dev, 0x406028, 0x00000001);
+ nv_wr32(dev, 0x40602c, 0x00000001);
+ nv_wr32(dev, 0x406030, 0x00000001);
+ nv_wr32(dev, 0x406034, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_unk64xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x4064a8, 0x00000000);
+ nv_wr32(dev, 0x4064ac, 0x00003fff);
+ nv_wr32(dev, 0x4064b4, 0x00000000);
+ nv_wr32(dev, 0x4064b8, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_tpbus(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x407804, 0x00000023);
+ nv_wr32(dev, 0x40780c, 0x0a418820);
+ nv_wr32(dev, 0x407810, 0x062080e6);
+ nv_wr32(dev, 0x407814, 0x020398a4);
+ nv_wr32(dev, 0x407818, 0x0e629062);
+ nv_wr32(dev, 0x40781c, 0x0a418820);
+ nv_wr32(dev, 0x407820, 0x000000e6);
+ nv_wr32(dev, 0x4078bc, 0x00000103);
+}
+
+static void
+nvc0_grctx_generate_ccache(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x408000, 0x00000000);
+ nv_wr32(dev, 0x408004, 0x00000000);
+ nv_wr32(dev, 0x408008, 0x00000018);
+ nv_wr32(dev, 0x40800c, 0x00000000);
+ nv_wr32(dev, 0x408010, 0x00000000);
+ nv_wr32(dev, 0x408014, 0x00000069);
+ nv_wr32(dev, 0x408018, 0xe100e100);
+ nv_wr32(dev, 0x408064, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_rop(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ // ROPC_BROADCAST
+ nv_wr32(dev, 0x408800, 0x02802a3c);
+ nv_wr32(dev, 0x408804, 0x00000040);
+ nv_wr32(dev, 0x408808, 0x0003e00d);
+ switch (dev_priv->chipset) {
+ case 0xc0:
+ nv_wr32(dev, 0x408900, 0x0080b801);
+ break;
+ case 0xc3:
+ case 0xc4:
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ break;
+ }
+ nv_wr32(dev, 0x408904, 0x02000001);
+ nv_wr32(dev, 0x408908, 0x00c80929);
+ nv_wr32(dev, 0x40890c, 0x00000000);
+ nv_wr32(dev, 0x408980, 0x0000011d);
+}
+
+static void
+nvc0_grctx_generate_gpc(struct drm_device *dev)
+{
+ int i;
+
+ // GPC_BROADCAST
+ nv_wr32(dev, 0x418380, 0x00000016);
+ nv_wr32(dev, 0x418400, 0x38004e00);
+ nv_wr32(dev, 0x418404, 0x71e0ffff);
+ nv_wr32(dev, 0x418408, 0x00000000);
+ nv_wr32(dev, 0x41840c, 0x00001008);
+ nv_wr32(dev, 0x418410, 0x0fff0fff);
+ nv_wr32(dev, 0x418414, 0x00200fff);
+ nv_wr32(dev, 0x418450, 0x00000000);
+ nv_wr32(dev, 0x418454, 0x00000000);
+ nv_wr32(dev, 0x418458, 0x00000000);
+ nv_wr32(dev, 0x41845c, 0x00000000);
+ nv_wr32(dev, 0x418460, 0x00000000);
+ nv_wr32(dev, 0x418464, 0x00000000);
+ nv_wr32(dev, 0x418468, 0x00000001);
+ nv_wr32(dev, 0x41846c, 0x00000000);
+ nv_wr32(dev, 0x418470, 0x00000000);
+ nv_wr32(dev, 0x418600, 0x0000001f);
+ nv_wr32(dev, 0x418684, 0x0000000f);
+ nv_wr32(dev, 0x418700, 0x00000002);
+ nv_wr32(dev, 0x418704, 0x00000080);
+ nv_wr32(dev, 0x418708, 0x00000000);
+ nv_wr32(dev, 0x41870c, 0x07c80000);
+ nv_wr32(dev, 0x418710, 0x00000000);
+ nv_wr32(dev, 0x418800, 0x0006860a);
+ nv_wr32(dev, 0x418808, 0x00000000);
+ nv_wr32(dev, 0x41880c, 0x00000000);
+ nv_wr32(dev, 0x418810, 0x00000000);
+ nv_wr32(dev, 0x418828, 0x00008442);
+ nv_wr32(dev, 0x418830, 0x00000001);
+ nv_wr32(dev, 0x4188d8, 0x00000008);
+ nv_wr32(dev, 0x4188e0, 0x01000000);
+ nv_wr32(dev, 0x4188e8, 0x00000000);
+ nv_wr32(dev, 0x4188ec, 0x00000000);
+ nv_wr32(dev, 0x4188f0, 0x00000000);
+ nv_wr32(dev, 0x4188f4, 0x00000000);
+ nv_wr32(dev, 0x4188f8, 0x00000000);
+ nv_wr32(dev, 0x4188fc, 0x00100000);
+ nv_wr32(dev, 0x41891c, 0x00ff00ff);
+ nv_wr32(dev, 0x418924, 0x00000000);
+ nv_wr32(dev, 0x418928, 0x00ffff00);
+ nv_wr32(dev, 0x41892c, 0x0000ff00);
+ for (i = 0; i < 8; i++) {
+ nv_wr32(dev, 0x418a00 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a04 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a08 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a0c + (i * 0x20), 0x00010000);
+ nv_wr32(dev, 0x418a10 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
+ }
+ nv_wr32(dev, 0x418b00, 0x00000000);
+ nv_wr32(dev, 0x418b08, 0x0a418820);
+ nv_wr32(dev, 0x418b0c, 0x062080e6);
+ nv_wr32(dev, 0x418b10, 0x020398a4);
+ nv_wr32(dev, 0x418b14, 0x0e629062);
+ nv_wr32(dev, 0x418b18, 0x0a418820);
+ nv_wr32(dev, 0x418b1c, 0x000000e6);
+ nv_wr32(dev, 0x418bb8, 0x00000103);
+ nv_wr32(dev, 0x418c08, 0x00000001);
+ nv_wr32(dev, 0x418c10, 0x00000000);
+ nv_wr32(dev, 0x418c14, 0x00000000);
+ nv_wr32(dev, 0x418c18, 0x00000000);
+ nv_wr32(dev, 0x418c1c, 0x00000000);
+ nv_wr32(dev, 0x418c20, 0x00000000);
+ nv_wr32(dev, 0x418c24, 0x00000000);
+ nv_wr32(dev, 0x418c28, 0x00000000);
+ nv_wr32(dev, 0x418c2c, 0x00000000);
+ nv_wr32(dev, 0x418c80, 0x20200004);
+ nv_wr32(dev, 0x418c8c, 0x00000001);
+ nv_wr32(dev, 0x419000, 0x00000780);
+ nv_wr32(dev, 0x419004, 0x00000000);
+ nv_wr32(dev, 0x419008, 0x00000000);
+ nv_wr32(dev, 0x419014, 0x00000004);
+}
+
+static void
+nvc0_grctx_generate_tp(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ // GPC_BROADCAST.TP_BROADCAST
+ nv_wr32(dev, 0x419848, 0x00000000);
+ nv_wr32(dev, 0x419864, 0x0000012a);
+ nv_wr32(dev, 0x419888, 0x00000000);
+ nv_wr32(dev, 0x419a00, 0x000001f0);
+ nv_wr32(dev, 0x419a04, 0x00000001);
+ nv_wr32(dev, 0x419a08, 0x00000023);
+ nv_wr32(dev, 0x419a0c, 0x00020000);
+ nv_wr32(dev, 0x419a10, 0x00000000);
+ nv_wr32(dev, 0x419a14, 0x00000200);
+ nv_wr32(dev, 0x419a1c, 0x00000000);
+ nv_wr32(dev, 0x419a20, 0x00000800);
+ if (dev_priv->chipset != 0xc0)
+ nv_wr32(dev, 0x00419ac4, 0x0007f440); // 0xc3
+ nv_wr32(dev, 0x419b00, 0x0a418820);
+ nv_wr32(dev, 0x419b04, 0x062080e6);
+ nv_wr32(dev, 0x419b08, 0x020398a4);
+ nv_wr32(dev, 0x419b0c, 0x0e629062);
+ nv_wr32(dev, 0x419b10, 0x0a418820);
+ nv_wr32(dev, 0x419b14, 0x000000e6);
+ nv_wr32(dev, 0x419bd0, 0x00900103);
+ nv_wr32(dev, 0x419be0, 0x00000001);
+ nv_wr32(dev, 0x419be4, 0x00000000);
+ nv_wr32(dev, 0x419c00, 0x00000002);
+ nv_wr32(dev, 0x419c04, 0x00000006);
+ nv_wr32(dev, 0x419c08, 0x00000002);
+ nv_wr32(dev, 0x419c20, 0x00000000);
+ nv_wr32(dev, 0x419cbc, 0x28137606);
+ nv_wr32(dev, 0x419ce8, 0x00000000);
+ nv_wr32(dev, 0x419cf4, 0x00000183);
+ nv_wr32(dev, 0x419d20, 0x02180000);
+ nv_wr32(dev, 0x419d24, 0x00001fff);
+ nv_wr32(dev, 0x419e04, 0x00000000);
+ nv_wr32(dev, 0x419e08, 0x00000000);
+ nv_wr32(dev, 0x419e0c, 0x00000000);
+ nv_wr32(dev, 0x419e10, 0x00000002);
+ nv_wr32(dev, 0x419e44, 0x001beff2);
+ nv_wr32(dev, 0x419e48, 0x00000000);
+ nv_wr32(dev, 0x419e4c, 0x0000000f);
+ nv_wr32(dev, 0x419e50, 0x00000000);
+ nv_wr32(dev, 0x419e54, 0x00000000);
+ nv_wr32(dev, 0x419e58, 0x00000000);
+ nv_wr32(dev, 0x419e5c, 0x00000000);
+ nv_wr32(dev, 0x419e60, 0x00000000);
+ nv_wr32(dev, 0x419e64, 0x00000000);
+ nv_wr32(dev, 0x419e68, 0x00000000);
+ nv_wr32(dev, 0x419e6c, 0x00000000);
+ nv_wr32(dev, 0x419e70, 0x00000000);
+ nv_wr32(dev, 0x419e74, 0x00000000);
+ nv_wr32(dev, 0x419e78, 0x00000000);
+ nv_wr32(dev, 0x419e7c, 0x00000000);
+ nv_wr32(dev, 0x419e80, 0x00000000);
+ nv_wr32(dev, 0x419e84, 0x00000000);
+ nv_wr32(dev, 0x419e88, 0x00000000);
+ nv_wr32(dev, 0x419e8c, 0x00000000);
+ nv_wr32(dev, 0x419e90, 0x00000000);
+ nv_wr32(dev, 0x419e98, 0x00000000);
+ if (dev_priv->chipset != 0xc0)
+ nv_wr32(dev, 0x419ee0, 0x00011110);
+ nv_wr32(dev, 0x419f50, 0x00000000);
+ nv_wr32(dev, 0x419f54, 0x00000000);
+ if (dev_priv->chipset != 0xc0)
+ nv_wr32(dev, 0x419f58, 0x00000000);
+}
+
+int
+nvc0_grctx_generate(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ struct nvc0_graph_chan *grch = chan->pgraph_ctx;
+ struct drm_device *dev = chan->dev;
+ int i, gpc, tp, id;
+ u32 r000260, tmp;
+
+ r000260 = nv_rd32(dev, 0x000260);
+ nv_wr32(dev, 0x000260, r000260 & ~1);
+ nv_wr32(dev, 0x400208, 0x00000000);
+
+ nvc0_grctx_generate_dispatch(dev);
+ nvc0_grctx_generate_macro(dev);
+ nvc0_grctx_generate_m2mf(dev);
+ nvc0_grctx_generate_unk47xx(dev);
+ nvc0_grctx_generate_shaders(dev);
+ nvc0_grctx_generate_unk60xx(dev);
+ nvc0_grctx_generate_unk64xx(dev);
+ nvc0_grctx_generate_tpbus(dev);
+ nvc0_grctx_generate_ccache(dev);
+ nvc0_grctx_generate_rop(dev);
+ nvc0_grctx_generate_gpc(dev);
+ nvc0_grctx_generate_tp(dev);
+
+ nv_wr32(dev, 0x404154, 0x00000000);
+
+ /* fuc "mmio list" writes */
+ for (i = 0; i < grch->mmio_nr * 8; i += 8) {
+ u32 reg = nv_ro32(grch->mmio, i + 0);
+ nv_wr32(dev, reg, nv_ro32(grch->mmio, i + 4));
+ }
+
+ for (tp = 0, id = 0; tp < 4; tp++) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ if (tp < priv->tp_nr[gpc]) {
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x088), id);
+ id++;
+ }
+
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tp_nr[gpc]);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tp_nr[gpc]);
+ }
+ }
+
+ tmp = 0;
+ for (i = 0; i < priv->gpc_nr; i++)
+ tmp |= priv->tp_nr[i] << (i * 4);
+ nv_wr32(dev, 0x406028, tmp);
+ nv_wr32(dev, 0x405870, tmp);
+
+ nv_wr32(dev, 0x40602c, 0x00000000);
+ nv_wr32(dev, 0x405874, 0x00000000);
+ nv_wr32(dev, 0x406030, 0x00000000);
+ nv_wr32(dev, 0x405878, 0x00000000);
+ nv_wr32(dev, 0x406034, 0x00000000);
+ nv_wr32(dev, 0x40587c, 0x00000000);
+
+ if (1) {
+ const u8 chipset_tp_max[] = { 16, 0, 0, 4, 8 };
+ u8 max = chipset_tp_max[dev_priv->chipset & 0x0f];
+ u8 tpnr[GPC_MAX];
+ u8 data[32];
+
+ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+ memset(data, 0x1f, sizeof(data));
+
+ gpc = -1;
+ for (tp = 0; tp < priv->tp_total; tp++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpnr[gpc]);
+ tpnr[gpc]--;
+ data[tp] = gpc;
+ }
+
+ for (i = 0; i < max / 4; i++)
+ nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
+ }
+
+ if (1) {
+ u32 data[6] = {}, data2[2] = {};
+ u8 tpnr[GPC_MAX];
+ u8 shift, ntpcv;
+
+ /* calculate first set of magics */
+ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+
+ for (tp = 0; tp < priv->tp_total; tp++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpnr[gpc]);
+ tpnr[gpc]--;
+
+ data[tp / 6] |= gpc << ((tp % 6) * 5);
+ }
+
+ for (; tp < 32; tp++)
+ data[tp / 6] |= 7 << ((tp % 6) * 5);
+
+ /* and the second... */
+ shift = 0;
+ ntpcv = priv->tp_total;
+ while (!(ntpcv & (1 << 4))) {
+ ntpcv <<= 1;
+ shift++;
+ }
+
+ data2[0] = (ntpcv << 16);
+ data2[0] |= (shift << 21);
+ data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+ for (i = 1; i < 7; i++)
+ data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+ // GPC_BROADCAST
+ nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) |
+ priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
+
+ // GPC_BROADCAST.TP_BROADCAST
+ nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) |
+ priv->magic_not_rop_nr |
+ data2[0]);
+ nv_wr32(dev, 0x419be4, data2[1]);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x419b00 + (i * 4), data[i]);
+
+ // UNK78xx
+ nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) |
+ priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x40780c + (i * 4), data[i]);
+ }
+
+ if (1) {
+ u32 tp_mask = 0, tp_set = 0;
+ u8 tpnr[GPC_MAX];
+
+ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+ tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
+
+ gpc = -1;
+ for (i = 0, gpc = -1; i < 32; i++) {
+ int ltp = i * (priv->tp_total - 1) / 32;
+
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpnr[gpc]);
+ tp = priv->tp_nr[gpc] - tpnr[gpc]--;
+
+ tp_set |= 1 << ((gpc * 8) + tp);
+
+ do {
+ nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
+ tp_set ^= tp_mask;
+ nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set);
+ tp_set ^= tp_mask;
+ } while (ltp == (++i * (priv->tp_total - 1) / 32));
+ i--;
+ }
+ }
+
+ nv_wr32(dev, 0x400208, 0x80000000);
+
+ nv_icmd(dev, 0x00001000, 0x00000004);
+ nv_icmd(dev, 0x000000a9, 0x0000ffff);
+ nv_icmd(dev, 0x00000038, 0x0fac6881);
+ nv_icmd(dev, 0x0000003d, 0x00000001);
+ nv_icmd(dev, 0x000000e8, 0x00000400);
+ nv_icmd(dev, 0x000000e9, 0x00000400);
+ nv_icmd(dev, 0x000000ea, 0x00000400);
+ nv_icmd(dev, 0x000000eb, 0x00000400);
+ nv_icmd(dev, 0x000000ec, 0x00000400);
+ nv_icmd(dev, 0x000000ed, 0x00000400);
+ nv_icmd(dev, 0x000000ee, 0x00000400);
+ nv_icmd(dev, 0x000000ef, 0x00000400);
+ nv_icmd(dev, 0x00000078, 0x00000300);
+ nv_icmd(dev, 0x00000079, 0x00000300);
+ nv_icmd(dev, 0x0000007a, 0x00000300);
+ nv_icmd(dev, 0x0000007b, 0x00000300);
+ nv_icmd(dev, 0x0000007c, 0x00000300);
+ nv_icmd(dev, 0x0000007d, 0x00000300);
+ nv_icmd(dev, 0x0000007e, 0x00000300);
+ nv_icmd(dev, 0x0000007f, 0x00000300);
+ nv_icmd(dev, 0x00000050, 0x00000011);
+ nv_icmd(dev, 0x00000058, 0x00000008);
+ nv_icmd(dev, 0x00000059, 0x00000008);
+ nv_icmd(dev, 0x0000005a, 0x00000008);
+ nv_icmd(dev, 0x0000005b, 0x00000008);
+ nv_icmd(dev, 0x0000005c, 0x00000008);
+ nv_icmd(dev, 0x0000005d, 0x00000008);
+ nv_icmd(dev, 0x0000005e, 0x00000008);
+ nv_icmd(dev, 0x0000005f, 0x00000008);
+ nv_icmd(dev, 0x00000208, 0x00000001);
+ nv_icmd(dev, 0x00000209, 0x00000001);
+ nv_icmd(dev, 0x0000020a, 0x00000001);
+ nv_icmd(dev, 0x0000020b, 0x00000001);
+ nv_icmd(dev, 0x0000020c, 0x00000001);
+ nv_icmd(dev, 0x0000020d, 0x00000001);
+ nv_icmd(dev, 0x0000020e, 0x00000001);
+ nv_icmd(dev, 0x0000020f, 0x00000001);
+ nv_icmd(dev, 0x00000081, 0x00000001);
+ nv_icmd(dev, 0x00000085, 0x00000004);
+ nv_icmd(dev, 0x00000088, 0x00000400);
+ nv_icmd(dev, 0x00000090, 0x00000300);
+ nv_icmd(dev, 0x00000098, 0x00001001);
+ nv_icmd(dev, 0x000000e3, 0x00000001);
+ nv_icmd(dev, 0x000000da, 0x00000001);
+ nv_icmd(dev, 0x000000f8, 0x00000003);
+ nv_icmd(dev, 0x000000fa, 0x00000001);
+ nv_icmd(dev, 0x0000009f, 0x0000ffff);
+ nv_icmd(dev, 0x000000a0, 0x0000ffff);
+ nv_icmd(dev, 0x000000a1, 0x0000ffff);
+ nv_icmd(dev, 0x000000a2, 0x0000ffff);
+ nv_icmd(dev, 0x000000b1, 0x00000001);
+ nv_icmd(dev, 0x000000b2, 0x00000000);
+ nv_icmd(dev, 0x000000b3, 0x00000000);
+ nv_icmd(dev, 0x000000b4, 0x00000000);
+ nv_icmd(dev, 0x000000b5, 0x00000000);
+ nv_icmd(dev, 0x000000b6, 0x00000000);
+ nv_icmd(dev, 0x000000b7, 0x00000000);
+ nv_icmd(dev, 0x000000b8, 0x00000000);
+ nv_icmd(dev, 0x000000b9, 0x00000000);
+ nv_icmd(dev, 0x000000ba, 0x00000000);
+ nv_icmd(dev, 0x000000bb, 0x00000000);
+ nv_icmd(dev, 0x000000bc, 0x00000000);
+ nv_icmd(dev, 0x000000bd, 0x00000000);
+ nv_icmd(dev, 0x000000be, 0x00000000);
+ nv_icmd(dev, 0x000000bf, 0x00000000);
+ nv_icmd(dev, 0x000000c0, 0x00000000);
+ nv_icmd(dev, 0x000000c1, 0x00000000);
+ nv_icmd(dev, 0x000000c2, 0x00000000);
+ nv_icmd(dev, 0x000000c3, 0x00000000);
+ nv_icmd(dev, 0x000000c4, 0x00000000);
+ nv_icmd(dev, 0x000000c5, 0x00000000);
+ nv_icmd(dev, 0x000000c6, 0x00000000);
+ nv_icmd(dev, 0x000000c7, 0x00000000);
+ nv_icmd(dev, 0x000000c8, 0x00000000);
+ nv_icmd(dev, 0x000000c9, 0x00000000);
+ nv_icmd(dev, 0x000000ca, 0x00000000);
+ nv_icmd(dev, 0x000000cb, 0x00000000);
+ nv_icmd(dev, 0x000000cc, 0x00000000);
+ nv_icmd(dev, 0x000000cd, 0x00000000);
+ nv_icmd(dev, 0x000000ce, 0x00000000);
+ nv_icmd(dev, 0x000000cf, 0x00000000);
+ nv_icmd(dev, 0x000000d0, 0x00000000);
+ nv_icmd(dev, 0x000000d1, 0x00000000);
+ nv_icmd(dev, 0x000000d2, 0x00000000);
+ nv_icmd(dev, 0x000000d3, 0x00000000);
+ nv_icmd(dev, 0x000000d4, 0x00000000);
+ nv_icmd(dev, 0x000000d5, 0x00000000);
+ nv_icmd(dev, 0x000000d6, 0x00000000);
+ nv_icmd(dev, 0x000000d7, 0x00000000);
+ nv_icmd(dev, 0x000000d8, 0x00000000);
+ nv_icmd(dev, 0x000000d9, 0x00000000);
+ nv_icmd(dev, 0x00000210, 0x00000040);
+ nv_icmd(dev, 0x00000211, 0x00000040);
+ nv_icmd(dev, 0x00000212, 0x00000040);
+ nv_icmd(dev, 0x00000213, 0x00000040);
+ nv_icmd(dev, 0x00000214, 0x00000040);
+ nv_icmd(dev, 0x00000215, 0x00000040);
+ nv_icmd(dev, 0x00000216, 0x00000040);
+ nv_icmd(dev, 0x00000217, 0x00000040);
+ nv_icmd(dev, 0x00000218, 0x0000c080);
+ nv_icmd(dev, 0x00000219, 0x0000c080);
+ nv_icmd(dev, 0x0000021a, 0x0000c080);
+ nv_icmd(dev, 0x0000021b, 0x0000c080);
+ nv_icmd(dev, 0x0000021c, 0x0000c080);
+ nv_icmd(dev, 0x0000021d, 0x0000c080);
+ nv_icmd(dev, 0x0000021e, 0x0000c080);
+ nv_icmd(dev, 0x0000021f, 0x0000c080);
+ nv_icmd(dev, 0x000000ad, 0x0000013e);
+ nv_icmd(dev, 0x000000e1, 0x00000010);
+ nv_icmd(dev, 0x00000290, 0x00000000);
+ nv_icmd(dev, 0x00000291, 0x00000000);
+ nv_icmd(dev, 0x00000292, 0x00000000);
+ nv_icmd(dev, 0x00000293, 0x00000000);
+ nv_icmd(dev, 0x00000294, 0x00000000);
+ nv_icmd(dev, 0x00000295, 0x00000000);
+ nv_icmd(dev, 0x00000296, 0x00000000);
+ nv_icmd(dev, 0x00000297, 0x00000000);
+ nv_icmd(dev, 0x00000298, 0x00000000);
+ nv_icmd(dev, 0x00000299, 0x00000000);
+ nv_icmd(dev, 0x0000029a, 0x00000000);
+ nv_icmd(dev, 0x0000029b, 0x00000000);
+ nv_icmd(dev, 0x0000029c, 0x00000000);
+ nv_icmd(dev, 0x0000029d, 0x00000000);
+ nv_icmd(dev, 0x0000029e, 0x00000000);
+ nv_icmd(dev, 0x0000029f, 0x00000000);
+ nv_icmd(dev, 0x000003b0, 0x00000000);
+ nv_icmd(dev, 0x000003b1, 0x00000000);
+ nv_icmd(dev, 0x000003b2, 0x00000000);
+ nv_icmd(dev, 0x000003b3, 0x00000000);
+ nv_icmd(dev, 0x000003b4, 0x00000000);
+ nv_icmd(dev, 0x000003b5, 0x00000000);
+ nv_icmd(dev, 0x000003b6, 0x00000000);
+ nv_icmd(dev, 0x000003b7, 0x00000000);
+ nv_icmd(dev, 0x000003b8, 0x00000000);
+ nv_icmd(dev, 0x000003b9, 0x00000000);
+ nv_icmd(dev, 0x000003ba, 0x00000000);
+ nv_icmd(dev, 0x000003bb, 0x00000000);
+ nv_icmd(dev, 0x000003bc, 0x00000000);
+ nv_icmd(dev, 0x000003bd, 0x00000000);
+ nv_icmd(dev, 0x000003be, 0x00000000);
+ nv_icmd(dev, 0x000003bf, 0x00000000);
+ nv_icmd(dev, 0x000002a0, 0x00000000);
+ nv_icmd(dev, 0x000002a1, 0x00000000);
+ nv_icmd(dev, 0x000002a2, 0x00000000);
+ nv_icmd(dev, 0x000002a3, 0x00000000);
+ nv_icmd(dev, 0x000002a4, 0x00000000);
+ nv_icmd(dev, 0x000002a5, 0x00000000);
+ nv_icmd(dev, 0x000002a6, 0x00000000);
+ nv_icmd(dev, 0x000002a7, 0x00000000);
+ nv_icmd(dev, 0x000002a8, 0x00000000);
+ nv_icmd(dev, 0x000002a9, 0x00000000);
+ nv_icmd(dev, 0x000002aa, 0x00000000);
+ nv_icmd(dev, 0x000002ab, 0x00000000);
+ nv_icmd(dev, 0x000002ac, 0x00000000);
+ nv_icmd(dev, 0x000002ad, 0x00000000);
+ nv_icmd(dev, 0x000002ae, 0x00000000);
+ nv_icmd(dev, 0x000002af, 0x00000000);
+ nv_icmd(dev, 0x00000420, 0x00000000);
+ nv_icmd(dev, 0x00000421, 0x00000000);
+ nv_icmd(dev, 0x00000422, 0x00000000);
+ nv_icmd(dev, 0x00000423, 0x00000000);
+ nv_icmd(dev, 0x00000424, 0x00000000);
+ nv_icmd(dev, 0x00000425, 0x00000000);
+ nv_icmd(dev, 0x00000426, 0x00000000);
+ nv_icmd(dev, 0x00000427, 0x00000000);
+ nv_icmd(dev, 0x00000428, 0x00000000);
+ nv_icmd(dev, 0x00000429, 0x00000000);
+ nv_icmd(dev, 0x0000042a, 0x00000000);
+ nv_icmd(dev, 0x0000042b, 0x00000000);
+ nv_icmd(dev, 0x0000042c, 0x00000000);
+ nv_icmd(dev, 0x0000042d, 0x00000000);
+ nv_icmd(dev, 0x0000042e, 0x00000000);
+ nv_icmd(dev, 0x0000042f, 0x00000000);
+ nv_icmd(dev, 0x000002b0, 0x00000000);
+ nv_icmd(dev, 0x000002b1, 0x00000000);
+ nv_icmd(dev, 0x000002b2, 0x00000000);
+ nv_icmd(dev, 0x000002b3, 0x00000000);
+ nv_icmd(dev, 0x000002b4, 0x00000000);
+ nv_icmd(dev, 0x000002b5, 0x00000000);
+ nv_icmd(dev, 0x000002b6, 0x00000000);
+ nv_icmd(dev, 0x000002b7, 0x00000000);
+ nv_icmd(dev, 0x000002b8, 0x00000000);
+ nv_icmd(dev, 0x000002b9, 0x00000000);
+ nv_icmd(dev, 0x000002ba, 0x00000000);
+ nv_icmd(dev, 0x000002bb, 0x00000000);
+ nv_icmd(dev, 0x000002bc, 0x00000000);
+ nv_icmd(dev, 0x000002bd, 0x00000000);
+ nv_icmd(dev, 0x000002be, 0x00000000);
+ nv_icmd(dev, 0x000002bf, 0x00000000);
+ nv_icmd(dev, 0x00000430, 0x00000000);
+ nv_icmd(dev, 0x00000431, 0x00000000);
+ nv_icmd(dev, 0x00000432, 0x00000000);
+ nv_icmd(dev, 0x00000433, 0x00000000);
+ nv_icmd(dev, 0x00000434, 0x00000000);
+ nv_icmd(dev, 0x00000435, 0x00000000);
+ nv_icmd(dev, 0x00000436, 0x00000000);
+ nv_icmd(dev, 0x00000437, 0x00000000);
+ nv_icmd(dev, 0x00000438, 0x00000000);
+ nv_icmd(dev, 0x00000439, 0x00000000);
+ nv_icmd(dev, 0x0000043a, 0x00000000);
+ nv_icmd(dev, 0x0000043b, 0x00000000);
+ nv_icmd(dev, 0x0000043c, 0x00000000);
+ nv_icmd(dev, 0x0000043d, 0x00000000);
+ nv_icmd(dev, 0x0000043e, 0x00000000);
+ nv_icmd(dev, 0x0000043f, 0x00000000);
+ nv_icmd(dev, 0x000002c0, 0x00000000);
+ nv_icmd(dev, 0x000002c1, 0x00000000);
+ nv_icmd(dev, 0x000002c2, 0x00000000);
+ nv_icmd(dev, 0x000002c3, 0x00000000);
+ nv_icmd(dev, 0x000002c4, 0x00000000);
+ nv_icmd(dev, 0x000002c5, 0x00000000);
+ nv_icmd(dev, 0x000002c6, 0x00000000);
+ nv_icmd(dev, 0x000002c7, 0x00000000);
+ nv_icmd(dev, 0x000002c8, 0x00000000);
+ nv_icmd(dev, 0x000002c9, 0x00000000);
+ nv_icmd(dev, 0x000002ca, 0x00000000);
+ nv_icmd(dev, 0x000002cb, 0x00000000);
+ nv_icmd(dev, 0x000002cc, 0x00000000);
+ nv_icmd(dev, 0x000002cd, 0x00000000);
+ nv_icmd(dev, 0x000002ce, 0x00000000);
+ nv_icmd(dev, 0x000002cf, 0x00000000);
+ nv_icmd(dev, 0x000004d0, 0x00000000);
+ nv_icmd(dev, 0x000004d1, 0x00000000);
+ nv_icmd(dev, 0x000004d2, 0x00000000);
+ nv_icmd(dev, 0x000004d3, 0x00000000);
+ nv_icmd(dev, 0x000004d4, 0x00000000);
+ nv_icmd(dev, 0x000004d5, 0x00000000);
+ nv_icmd(dev, 0x000004d6, 0x00000000);
+ nv_icmd(dev, 0x000004d7, 0x00000000);
+ nv_icmd(dev, 0x000004d8, 0x00000000);
+ nv_icmd(dev, 0x000004d9, 0x00000000);
+ nv_icmd(dev, 0x000004da, 0x00000000);
+ nv_icmd(dev, 0x000004db, 0x00000000);
+ nv_icmd(dev, 0x000004dc, 0x00000000);
+ nv_icmd(dev, 0x000004dd, 0x00000000);
+ nv_icmd(dev, 0x000004de, 0x00000000);
+ nv_icmd(dev, 0x000004df, 0x00000000);
+ nv_icmd(dev, 0x00000720, 0x00000000);
+ nv_icmd(dev, 0x00000721, 0x00000000);
+ nv_icmd(dev, 0x00000722, 0x00000000);
+ nv_icmd(dev, 0x00000723, 0x00000000);
+ nv_icmd(dev, 0x00000724, 0x00000000);
+ nv_icmd(dev, 0x00000725, 0x00000000);
+ nv_icmd(dev, 0x00000726, 0x00000000);
+ nv_icmd(dev, 0x00000727, 0x00000000);
+ nv_icmd(dev, 0x00000728, 0x00000000);
+ nv_icmd(dev, 0x00000729, 0x00000000);
+ nv_icmd(dev, 0x0000072a, 0x00000000);
+ nv_icmd(dev, 0x0000072b, 0x00000000);
+ nv_icmd(dev, 0x0000072c, 0x00000000);
+ nv_icmd(dev, 0x0000072d, 0x00000000);
+ nv_icmd(dev, 0x0000072e, 0x00000000);
+ nv_icmd(dev, 0x0000072f, 0x00000000);
+ nv_icmd(dev, 0x000008c0, 0x00000000);
+ nv_icmd(dev, 0x000008c1, 0x00000000);
+ nv_icmd(dev, 0x000008c2, 0x00000000);
+ nv_icmd(dev, 0x000008c3, 0x00000000);
+ nv_icmd(dev, 0x000008c4, 0x00000000);
+ nv_icmd(dev, 0x000008c5, 0x00000000);
+ nv_icmd(dev, 0x000008c6, 0x00000000);
+ nv_icmd(dev, 0x000008c7, 0x00000000);
+ nv_icmd(dev, 0x000008c8, 0x00000000);
+ nv_icmd(dev, 0x000008c9, 0x00000000);
+ nv_icmd(dev, 0x000008ca, 0x00000000);
+ nv_icmd(dev, 0x000008cb, 0x00000000);
+ nv_icmd(dev, 0x000008cc, 0x00000000);
+ nv_icmd(dev, 0x000008cd, 0x00000000);
+ nv_icmd(dev, 0x000008ce, 0x00000000);
+ nv_icmd(dev, 0x000008cf, 0x00000000);
+ nv_icmd(dev, 0x00000890, 0x00000000);
+ nv_icmd(dev, 0x00000891, 0x00000000);
+ nv_icmd(dev, 0x00000892, 0x00000000);
+ nv_icmd(dev, 0x00000893, 0x00000000);
+ nv_icmd(dev, 0x00000894, 0x00000000);
+ nv_icmd(dev, 0x00000895, 0x00000000);
+ nv_icmd(dev, 0x00000896, 0x00000000);
+ nv_icmd(dev, 0x00000897, 0x00000000);
+ nv_icmd(dev, 0x00000898, 0x00000000);
+ nv_icmd(dev, 0x00000899, 0x00000000);
+ nv_icmd(dev, 0x0000089a, 0x00000000);
+ nv_icmd(dev, 0x0000089b, 0x00000000);
+ nv_icmd(dev, 0x0000089c, 0x00000000);
+ nv_icmd(dev, 0x0000089d, 0x00000000);
+ nv_icmd(dev, 0x0000089e, 0x00000000);
+ nv_icmd(dev, 0x0000089f, 0x00000000);
+ nv_icmd(dev, 0x000008e0, 0x00000000);
+ nv_icmd(dev, 0x000008e1, 0x00000000);
+ nv_icmd(dev, 0x000008e2, 0x00000000);
+ nv_icmd(dev, 0x000008e3, 0x00000000);
+ nv_icmd(dev, 0x000008e4, 0x00000000);
+ nv_icmd(dev, 0x000008e5, 0x00000000);
+ nv_icmd(dev, 0x000008e6, 0x00000000);
+ nv_icmd(dev, 0x000008e7, 0x00000000);
+ nv_icmd(dev, 0x000008e8, 0x00000000);
+ nv_icmd(dev, 0x000008e9, 0x00000000);
+ nv_icmd(dev, 0x000008ea, 0x00000000);
+ nv_icmd(dev, 0x000008eb, 0x00000000);
+ nv_icmd(dev, 0x000008ec, 0x00000000);
+ nv_icmd(dev, 0x000008ed, 0x00000000);
+ nv_icmd(dev, 0x000008ee, 0x00000000);
+ nv_icmd(dev, 0x000008ef, 0x00000000);
+ nv_icmd(dev, 0x000008a0, 0x00000000);
+ nv_icmd(dev, 0x000008a1, 0x00000000);
+ nv_icmd(dev, 0x000008a2, 0x00000000);
+ nv_icmd(dev, 0x000008a3, 0x00000000);
+ nv_icmd(dev, 0x000008a4, 0x00000000);
+ nv_icmd(dev, 0x000008a5, 0x00000000);
+ nv_icmd(dev, 0x000008a6, 0x00000000);
+ nv_icmd(dev, 0x000008a7, 0x00000000);
+ nv_icmd(dev, 0x000008a8, 0x00000000);
+ nv_icmd(dev, 0x000008a9, 0x00000000);
+ nv_icmd(dev, 0x000008aa, 0x00000000);
+ nv_icmd(dev, 0x000008ab, 0x00000000);
+ nv_icmd(dev, 0x000008ac, 0x00000000);
+ nv_icmd(dev, 0x000008ad, 0x00000000);
+ nv_icmd(dev, 0x000008ae, 0x00000000);
+ nv_icmd(dev, 0x000008af, 0x00000000);
+ nv_icmd(dev, 0x000008f0, 0x00000000);
+ nv_icmd(dev, 0x000008f1, 0x00000000);
+ nv_icmd(dev, 0x000008f2, 0x00000000);
+ nv_icmd(dev, 0x000008f3, 0x00000000);
+ nv_icmd(dev, 0x000008f4, 0x00000000);
+ nv_icmd(dev, 0x000008f5, 0x00000000);
+ nv_icmd(dev, 0x000008f6, 0x00000000);
+ nv_icmd(dev, 0x000008f7, 0x00000000);
+ nv_icmd(dev, 0x000008f8, 0x00000000);
+ nv_icmd(dev, 0x000008f9, 0x00000000);
+ nv_icmd(dev, 0x000008fa, 0x00000000);
+ nv_icmd(dev, 0x000008fb, 0x00000000);
+ nv_icmd(dev, 0x000008fc, 0x00000000);
+ nv_icmd(dev, 0x000008fd, 0x00000000);
+ nv_icmd(dev, 0x000008fe, 0x00000000);
+ nv_icmd(dev, 0x000008ff, 0x00000000);
+ nv_icmd(dev, 0x0000094c, 0x000000ff);
+ nv_icmd(dev, 0x0000094d, 0xffffffff);
+ nv_icmd(dev, 0x0000094e, 0x00000002);
+ nv_icmd(dev, 0x000002ec, 0x00000001);
+ nv_icmd(dev, 0x00000303, 0x00000001);
+ nv_icmd(dev, 0x000002e6, 0x00000001);
+ nv_icmd(dev, 0x00000466, 0x00000052);
+ nv_icmd(dev, 0x00000301, 0x3f800000);
+ nv_icmd(dev, 0x00000304, 0x30201000);
+ nv_icmd(dev, 0x00000305, 0x70605040);
+ nv_icmd(dev, 0x00000306, 0xb8a89888);
+ nv_icmd(dev, 0x00000307, 0xf8e8d8c8);
+ nv_icmd(dev, 0x0000030a, 0x00ffff00);
+ nv_icmd(dev, 0x0000030b, 0x0000001a);
+ nv_icmd(dev, 0x0000030c, 0x00000001);
+ nv_icmd(dev, 0x00000318, 0x00000001);
+ nv_icmd(dev, 0x00000340, 0x00000000);
+ nv_icmd(dev, 0x00000375, 0x00000001);
+ nv_icmd(dev, 0x00000351, 0x00000100);
+ nv_icmd(dev, 0x0000037d, 0x00000006);
+ nv_icmd(dev, 0x000003a0, 0x00000002);
+ nv_icmd(dev, 0x000003aa, 0x00000001);
+ nv_icmd(dev, 0x000003a9, 0x00000001);
+ nv_icmd(dev, 0x00000380, 0x00000001);
+ nv_icmd(dev, 0x00000360, 0x00000040);
+ nv_icmd(dev, 0x00000366, 0x00000000);
+ nv_icmd(dev, 0x00000367, 0x00000000);
+ nv_icmd(dev, 0x00000368, 0x00001fff);
+ nv_icmd(dev, 0x00000370, 0x00000000);
+ nv_icmd(dev, 0x00000371, 0x00000000);
+ nv_icmd(dev, 0x00000372, 0x003fffff);
+ nv_icmd(dev, 0x0000037a, 0x00000012);
+ nv_icmd(dev, 0x000005e0, 0x00000022);
+ nv_icmd(dev, 0x000005e1, 0x00000022);
+ nv_icmd(dev, 0x000005e2, 0x00000022);
+ nv_icmd(dev, 0x000005e3, 0x00000022);
+ nv_icmd(dev, 0x000005e4, 0x00000022);
+ nv_icmd(dev, 0x00000619, 0x00000003);
+ nv_icmd(dev, 0x00000811, 0x00000003);
+ nv_icmd(dev, 0x00000812, 0x00000004);
+ nv_icmd(dev, 0x00000813, 0x00000006);
+ nv_icmd(dev, 0x00000814, 0x00000008);
+ nv_icmd(dev, 0x00000815, 0x0000000b);
+ nv_icmd(dev, 0x00000800, 0x00000001);
+ nv_icmd(dev, 0x00000801, 0x00000001);
+ nv_icmd(dev, 0x00000802, 0x00000001);
+ nv_icmd(dev, 0x00000803, 0x00000001);
+ nv_icmd(dev, 0x00000804, 0x00000001);
+ nv_icmd(dev, 0x00000805, 0x00000001);
+ nv_icmd(dev, 0x00000632, 0x00000001);
+ nv_icmd(dev, 0x00000633, 0x00000002);
+ nv_icmd(dev, 0x00000634, 0x00000003);
+ nv_icmd(dev, 0x00000635, 0x00000004);
+ nv_icmd(dev, 0x00000654, 0x3f800000);
+ nv_icmd(dev, 0x00000657, 0x3f800000);
+ nv_icmd(dev, 0x00000655, 0x3f800000);
+ nv_icmd(dev, 0x00000656, 0x3f800000);
+ nv_icmd(dev, 0x000006cd, 0x3f800000);
+ nv_icmd(dev, 0x000007f5, 0x3f800000);
+ nv_icmd(dev, 0x000007dc, 0x39291909);
+ nv_icmd(dev, 0x000007dd, 0x79695949);
+ nv_icmd(dev, 0x000007de, 0xb9a99989);
+ nv_icmd(dev, 0x000007df, 0xf9e9d9c9);
+ nv_icmd(dev, 0x000007e8, 0x00003210);
+ nv_icmd(dev, 0x000007e9, 0x00007654);
+ nv_icmd(dev, 0x000007ea, 0x00000098);
+ nv_icmd(dev, 0x000007ec, 0x39291909);
+ nv_icmd(dev, 0x000007ed, 0x79695949);
+ nv_icmd(dev, 0x000007ee, 0xb9a99989);
+ nv_icmd(dev, 0x000007ef, 0xf9e9d9c9);
+ nv_icmd(dev, 0x000007f0, 0x00003210);
+ nv_icmd(dev, 0x000007f1, 0x00007654);
+ nv_icmd(dev, 0x000007f2, 0x00000098);
+ nv_icmd(dev, 0x000005a5, 0x00000001);
+ nv_icmd(dev, 0x00000980, 0x00000000);
+ nv_icmd(dev, 0x00000981, 0x00000000);
+ nv_icmd(dev, 0x00000982, 0x00000000);
+ nv_icmd(dev, 0x00000983, 0x00000000);
+ nv_icmd(dev, 0x00000984, 0x00000000);
+ nv_icmd(dev, 0x00000985, 0x00000000);
+ nv_icmd(dev, 0x00000986, 0x00000000);
+ nv_icmd(dev, 0x00000987, 0x00000000);
+ nv_icmd(dev, 0x00000988, 0x00000000);
+ nv_icmd(dev, 0x00000989, 0x00000000);
+ nv_icmd(dev, 0x0000098a, 0x00000000);
+ nv_icmd(dev, 0x0000098b, 0x00000000);
+ nv_icmd(dev, 0x0000098c, 0x00000000);
+ nv_icmd(dev, 0x0000098d, 0x00000000);
+ nv_icmd(dev, 0x0000098e, 0x00000000);
+ nv_icmd(dev, 0x0000098f, 0x00000000);
+ nv_icmd(dev, 0x00000990, 0x00000000);
+ nv_icmd(dev, 0x00000991, 0x00000000);
+ nv_icmd(dev, 0x00000992, 0x00000000);
+ nv_icmd(dev, 0x00000993, 0x00000000);
+ nv_icmd(dev, 0x00000994, 0x00000000);
+ nv_icmd(dev, 0x00000995, 0x00000000);
+ nv_icmd(dev, 0x00000996, 0x00000000);
+ nv_icmd(dev, 0x00000997, 0x00000000);
+ nv_icmd(dev, 0x00000998, 0x00000000);
+ nv_icmd(dev, 0x00000999, 0x00000000);
+ nv_icmd(dev, 0x0000099a, 0x00000000);
+ nv_icmd(dev, 0x0000099b, 0x00000000);
+ nv_icmd(dev, 0x0000099c, 0x00000000);
+ nv_icmd(dev, 0x0000099d, 0x00000000);
+ nv_icmd(dev, 0x0000099e, 0x00000000);
+ nv_icmd(dev, 0x0000099f, 0x00000000);
+ nv_icmd(dev, 0x000009a0, 0x00000000);
+ nv_icmd(dev, 0x000009a1, 0x00000000);
+ nv_icmd(dev, 0x000009a2, 0x00000000);
+ nv_icmd(dev, 0x000009a3, 0x00000000);
+ nv_icmd(dev, 0x000009a4, 0x00000000);
+ nv_icmd(dev, 0x000009a5, 0x00000000);
+ nv_icmd(dev, 0x000009a6, 0x00000000);
+ nv_icmd(dev, 0x000009a7, 0x00000000);
+ nv_icmd(dev, 0x000009a8, 0x00000000);
+ nv_icmd(dev, 0x000009a9, 0x00000000);
+ nv_icmd(dev, 0x000009aa, 0x00000000);
+ nv_icmd(dev, 0x000009ab, 0x00000000);
+ nv_icmd(dev, 0x000009ac, 0x00000000);
+ nv_icmd(dev, 0x000009ad, 0x00000000);
+ nv_icmd(dev, 0x000009ae, 0x00000000);
+ nv_icmd(dev, 0x000009af, 0x00000000);
+ nv_icmd(dev, 0x000009b0, 0x00000000);
+ nv_icmd(dev, 0x000009b1, 0x00000000);
+ nv_icmd(dev, 0x000009b2, 0x00000000);
+ nv_icmd(dev, 0x000009b3, 0x00000000);
+ nv_icmd(dev, 0x000009b4, 0x00000000);
+ nv_icmd(dev, 0x000009b5, 0x00000000);
+ nv_icmd(dev, 0x000009b6, 0x00000000);
+ nv_icmd(dev, 0x000009b7, 0x00000000);
+ nv_icmd(dev, 0x000009b8, 0x00000000);
+ nv_icmd(dev, 0x000009b9, 0x00000000);
+ nv_icmd(dev, 0x000009ba, 0x00000000);
+ nv_icmd(dev, 0x000009bb, 0x00000000);
+ nv_icmd(dev, 0x000009bc, 0x00000000);
+ nv_icmd(dev, 0x000009bd, 0x00000000);
+ nv_icmd(dev, 0x000009be, 0x00000000);
+ nv_icmd(dev, 0x000009bf, 0x00000000);
+ nv_icmd(dev, 0x000009c0, 0x00000000);
+ nv_icmd(dev, 0x000009c1, 0x00000000);
+ nv_icmd(dev, 0x000009c2, 0x00000000);
+ nv_icmd(dev, 0x000009c3, 0x00000000);
+ nv_icmd(dev, 0x000009c4, 0x00000000);
+ nv_icmd(dev, 0x000009c5, 0x00000000);
+ nv_icmd(dev, 0x000009c6, 0x00000000);
+ nv_icmd(dev, 0x000009c7, 0x00000000);
+ nv_icmd(dev, 0x000009c8, 0x00000000);
+ nv_icmd(dev, 0x000009c9, 0x00000000);
+ nv_icmd(dev, 0x000009ca, 0x00000000);
+ nv_icmd(dev, 0x000009cb, 0x00000000);
+ nv_icmd(dev, 0x000009cc, 0x00000000);
+ nv_icmd(dev, 0x000009cd, 0x00000000);
+ nv_icmd(dev, 0x000009ce, 0x00000000);
+ nv_icmd(dev, 0x000009cf, 0x00000000);
+ nv_icmd(dev, 0x000009d0, 0x00000000);
+ nv_icmd(dev, 0x000009d1, 0x00000000);
+ nv_icmd(dev, 0x000009d2, 0x00000000);
+ nv_icmd(dev, 0x000009d3, 0x00000000);
+ nv_icmd(dev, 0x000009d4, 0x00000000);
+ nv_icmd(dev, 0x000009d5, 0x00000000);
+ nv_icmd(dev, 0x000009d6, 0x00000000);
+ nv_icmd(dev, 0x000009d7, 0x00000000);
+ nv_icmd(dev, 0x000009d8, 0x00000000);
+ nv_icmd(dev, 0x000009d9, 0x00000000);
+ nv_icmd(dev, 0x000009da, 0x00000000);
+ nv_icmd(dev, 0x000009db, 0x00000000);
+ nv_icmd(dev, 0x000009dc, 0x00000000);
+ nv_icmd(dev, 0x000009dd, 0x00000000);
+ nv_icmd(dev, 0x000009de, 0x00000000);
+ nv_icmd(dev, 0x000009df, 0x00000000);
+ nv_icmd(dev, 0x000009e0, 0x00000000);
+ nv_icmd(dev, 0x000009e1, 0x00000000);
+ nv_icmd(dev, 0x000009e2, 0x00000000);
+ nv_icmd(dev, 0x000009e3, 0x00000000);
+ nv_icmd(dev, 0x000009e4, 0x00000000);
+ nv_icmd(dev, 0x000009e5, 0x00000000);
+ nv_icmd(dev, 0x000009e6, 0x00000000);
+ nv_icmd(dev, 0x000009e7, 0x00000000);
+ nv_icmd(dev, 0x000009e8, 0x00000000);
+ nv_icmd(dev, 0x000009e9, 0x00000000);
+ nv_icmd(dev, 0x000009ea, 0x00000000);
+ nv_icmd(dev, 0x000009eb, 0x00000000);
+ nv_icmd(dev, 0x000009ec, 0x00000000);
+ nv_icmd(dev, 0x000009ed, 0x00000000);
+ nv_icmd(dev, 0x000009ee, 0x00000000);
+ nv_icmd(dev, 0x000009ef, 0x00000000);
+ nv_icmd(dev, 0x000009f0, 0x00000000);
+ nv_icmd(dev, 0x000009f1, 0x00000000);
+ nv_icmd(dev, 0x000009f2, 0x00000000);
+ nv_icmd(dev, 0x000009f3, 0x00000000);
+ nv_icmd(dev, 0x000009f4, 0x00000000);
+ nv_icmd(dev, 0x000009f5, 0x00000000);
+ nv_icmd(dev, 0x000009f6, 0x00000000);
+ nv_icmd(dev, 0x000009f7, 0x00000000);
+ nv_icmd(dev, 0x000009f8, 0x00000000);
+ nv_icmd(dev, 0x000009f9, 0x00000000);
+ nv_icmd(dev, 0x000009fa, 0x00000000);
+ nv_icmd(dev, 0x000009fb, 0x00000000);
+ nv_icmd(dev, 0x000009fc, 0x00000000);
+ nv_icmd(dev, 0x000009fd, 0x00000000);
+ nv_icmd(dev, 0x000009fe, 0x00000000);
+ nv_icmd(dev, 0x000009ff, 0x00000000);
+ nv_icmd(dev, 0x00000468, 0x00000004);
+ nv_icmd(dev, 0x0000046c, 0x00000001);
+ nv_icmd(dev, 0x00000470, 0x00000000);
+ nv_icmd(dev, 0x00000471, 0x00000000);
+ nv_icmd(dev, 0x00000472, 0x00000000);
+ nv_icmd(dev, 0x00000473, 0x00000000);
+ nv_icmd(dev, 0x00000474, 0x00000000);
+ nv_icmd(dev, 0x00000475, 0x00000000);
+ nv_icmd(dev, 0x00000476, 0x00000000);
+ nv_icmd(dev, 0x00000477, 0x00000000);
+ nv_icmd(dev, 0x00000478, 0x00000000);
+ nv_icmd(dev, 0x00000479, 0x00000000);
+ nv_icmd(dev, 0x0000047a, 0x00000000);
+ nv_icmd(dev, 0x0000047b, 0x00000000);
+ nv_icmd(dev, 0x0000047c, 0x00000000);
+ nv_icmd(dev, 0x0000047d, 0x00000000);
+ nv_icmd(dev, 0x0000047e, 0x00000000);
+ nv_icmd(dev, 0x0000047f, 0x00000000);
+ nv_icmd(dev, 0x00000480, 0x00000000);
+ nv_icmd(dev, 0x00000481, 0x00000000);
+ nv_icmd(dev, 0x00000482, 0x00000000);
+ nv_icmd(dev, 0x00000483, 0x00000000);
+ nv_icmd(dev, 0x00000484, 0x00000000);
+ nv_icmd(dev, 0x00000485, 0x00000000);
+ nv_icmd(dev, 0x00000486, 0x00000000);
+ nv_icmd(dev, 0x00000487, 0x00000000);
+ nv_icmd(dev, 0x00000488, 0x00000000);
+ nv_icmd(dev, 0x00000489, 0x00000000);
+ nv_icmd(dev, 0x0000048a, 0x00000000);
+ nv_icmd(dev, 0x0000048b, 0x00000000);
+ nv_icmd(dev, 0x0000048c, 0x00000000);
+ nv_icmd(dev, 0x0000048d, 0x00000000);
+ nv_icmd(dev, 0x0000048e, 0x00000000);
+ nv_icmd(dev, 0x0000048f, 0x00000000);
+ nv_icmd(dev, 0x00000490, 0x00000000);
+ nv_icmd(dev, 0x00000491, 0x00000000);
+ nv_icmd(dev, 0x00000492, 0x00000000);
+ nv_icmd(dev, 0x00000493, 0x00000000);
+ nv_icmd(dev, 0x00000494, 0x00000000);
+ nv_icmd(dev, 0x00000495, 0x00000000);
+ nv_icmd(dev, 0x00000496, 0x00000000);
+ nv_icmd(dev, 0x00000497, 0x00000000);
+ nv_icmd(dev, 0x00000498, 0x00000000);
+ nv_icmd(dev, 0x00000499, 0x00000000);
+ nv_icmd(dev, 0x0000049a, 0x00000000);
+ nv_icmd(dev, 0x0000049b, 0x00000000);
+ nv_icmd(dev, 0x0000049c, 0x00000000);
+ nv_icmd(dev, 0x0000049d, 0x00000000);
+ nv_icmd(dev, 0x0000049e, 0x00000000);
+ nv_icmd(dev, 0x0000049f, 0x00000000);
+ nv_icmd(dev, 0x000004a0, 0x00000000);
+ nv_icmd(dev, 0x000004a1, 0x00000000);
+ nv_icmd(dev, 0x000004a2, 0x00000000);
+ nv_icmd(dev, 0x000004a3, 0x00000000);
+ nv_icmd(dev, 0x000004a4, 0x00000000);
+ nv_icmd(dev, 0x000004a5, 0x00000000);
+ nv_icmd(dev, 0x000004a6, 0x00000000);
+ nv_icmd(dev, 0x000004a7, 0x00000000);
+ nv_icmd(dev, 0x000004a8, 0x00000000);
+ nv_icmd(dev, 0x000004a9, 0x00000000);
+ nv_icmd(dev, 0x000004aa, 0x00000000);
+ nv_icmd(dev, 0x000004ab, 0x00000000);
+ nv_icmd(dev, 0x000004ac, 0x00000000);
+ nv_icmd(dev, 0x000004ad, 0x00000000);
+ nv_icmd(dev, 0x000004ae, 0x00000000);
+ nv_icmd(dev, 0x000004af, 0x00000000);
+ nv_icmd(dev, 0x000004b0, 0x00000000);
+ nv_icmd(dev, 0x000004b1, 0x00000000);
+ nv_icmd(dev, 0x000004b2, 0x00000000);
+ nv_icmd(dev, 0x000004b3, 0x00000000);
+ nv_icmd(dev, 0x000004b4, 0x00000000);
+ nv_icmd(dev, 0x000004b5, 0x00000000);
+ nv_icmd(dev, 0x000004b6, 0x00000000);
+ nv_icmd(dev, 0x000004b7, 0x00000000);
+ nv_icmd(dev, 0x000004b8, 0x00000000);
+ nv_icmd(dev, 0x000004b9, 0x00000000);
+ nv_icmd(dev, 0x000004ba, 0x00000000);
+ nv_icmd(dev, 0x000004bb, 0x00000000);
+ nv_icmd(dev, 0x000004bc, 0x00000000);
+ nv_icmd(dev, 0x000004bd, 0x00000000);
+ nv_icmd(dev, 0x000004be, 0x00000000);
+ nv_icmd(dev, 0x000004bf, 0x00000000);
+ nv_icmd(dev, 0x000004c0, 0x00000000);
+ nv_icmd(dev, 0x000004c1, 0x00000000);
+ nv_icmd(dev, 0x000004c2, 0x00000000);
+ nv_icmd(dev, 0x000004c3, 0x00000000);
+ nv_icmd(dev, 0x000004c4, 0x00000000);
+ nv_icmd(dev, 0x000004c5, 0x00000000);
+ nv_icmd(dev, 0x000004c6, 0x00000000);
+ nv_icmd(dev, 0x000004c7, 0x00000000);
+ nv_icmd(dev, 0x000004c8, 0x00000000);
+ nv_icmd(dev, 0x000004c9, 0x00000000);
+ nv_icmd(dev, 0x000004ca, 0x00000000);
+ nv_icmd(dev, 0x000004cb, 0x00000000);
+ nv_icmd(dev, 0x000004cc, 0x00000000);
+ nv_icmd(dev, 0x000004cd, 0x00000000);
+ nv_icmd(dev, 0x000004ce, 0x00000000);
+ nv_icmd(dev, 0x000004cf, 0x00000000);
+ nv_icmd(dev, 0x00000510, 0x3f800000);
+ nv_icmd(dev, 0x00000511, 0x3f800000);
+ nv_icmd(dev, 0x00000512, 0x3f800000);
+ nv_icmd(dev, 0x00000513, 0x3f800000);
+ nv_icmd(dev, 0x00000514, 0x3f800000);
+ nv_icmd(dev, 0x00000515, 0x3f800000);
+ nv_icmd(dev, 0x00000516, 0x3f800000);
+ nv_icmd(dev, 0x00000517, 0x3f800000);
+ nv_icmd(dev, 0x00000518, 0x3f800000);
+ nv_icmd(dev, 0x00000519, 0x3f800000);
+ nv_icmd(dev, 0x0000051a, 0x3f800000);
+ nv_icmd(dev, 0x0000051b, 0x3f800000);
+ nv_icmd(dev, 0x0000051c, 0x3f800000);
+ nv_icmd(dev, 0x0000051d, 0x3f800000);
+ nv_icmd(dev, 0x0000051e, 0x3f800000);
+ nv_icmd(dev, 0x0000051f, 0x3f800000);
+ nv_icmd(dev, 0x00000520, 0x000002b6);
+ nv_icmd(dev, 0x00000529, 0x00000001);
+ nv_icmd(dev, 0x00000530, 0xffff0000);
+ nv_icmd(dev, 0x00000531, 0xffff0000);
+ nv_icmd(dev, 0x00000532, 0xffff0000);
+ nv_icmd(dev, 0x00000533, 0xffff0000);
+ nv_icmd(dev, 0x00000534, 0xffff0000);
+ nv_icmd(dev, 0x00000535, 0xffff0000);
+ nv_icmd(dev, 0x00000536, 0xffff0000);
+ nv_icmd(dev, 0x00000537, 0xffff0000);
+ nv_icmd(dev, 0x00000538, 0xffff0000);
+ nv_icmd(dev, 0x00000539, 0xffff0000);
+ nv_icmd(dev, 0x0000053a, 0xffff0000);
+ nv_icmd(dev, 0x0000053b, 0xffff0000);
+ nv_icmd(dev, 0x0000053c, 0xffff0000);
+ nv_icmd(dev, 0x0000053d, 0xffff0000);
+ nv_icmd(dev, 0x0000053e, 0xffff0000);
+ nv_icmd(dev, 0x0000053f, 0xffff0000);
+ nv_icmd(dev, 0x00000585, 0x0000003f);
+ nv_icmd(dev, 0x00000576, 0x00000003);
+ nv_icmd(dev, 0x00000586, 0x00000040);
+ nv_icmd(dev, 0x00000582, 0x00000080);
+ nv_icmd(dev, 0x00000583, 0x00000080);
+ nv_icmd(dev, 0x000005c2, 0x00000001);
+ nv_icmd(dev, 0x00000638, 0x00000001);
+ nv_icmd(dev, 0x00000639, 0x00000001);
+ nv_icmd(dev, 0x0000063a, 0x00000002);
+ nv_icmd(dev, 0x0000063b, 0x00000001);
+ nv_icmd(dev, 0x0000063c, 0x00000001);
+ nv_icmd(dev, 0x0000063d, 0x00000002);
+ nv_icmd(dev, 0x0000063e, 0x00000001);
+ nv_icmd(dev, 0x000008b8, 0x00000001);
+ nv_icmd(dev, 0x000008b9, 0x00000001);
+ nv_icmd(dev, 0x000008ba, 0x00000001);
+ nv_icmd(dev, 0x000008bb, 0x00000001);
+ nv_icmd(dev, 0x000008bc, 0x00000001);
+ nv_icmd(dev, 0x000008bd, 0x00000001);
+ nv_icmd(dev, 0x000008be, 0x00000001);
+ nv_icmd(dev, 0x000008bf, 0x00000001);
+ nv_icmd(dev, 0x00000900, 0x00000001);
+ nv_icmd(dev, 0x00000901, 0x00000001);
+ nv_icmd(dev, 0x00000902, 0x00000001);
+ nv_icmd(dev, 0x00000903, 0x00000001);
+ nv_icmd(dev, 0x00000904, 0x00000001);
+ nv_icmd(dev, 0x00000905, 0x00000001);
+ nv_icmd(dev, 0x00000906, 0x00000001);
+ nv_icmd(dev, 0x00000907, 0x00000001);
+ nv_icmd(dev, 0x00000908, 0x00000002);
+ nv_icmd(dev, 0x00000909, 0x00000002);
+ nv_icmd(dev, 0x0000090a, 0x00000002);
+ nv_icmd(dev, 0x0000090b, 0x00000002);
+ nv_icmd(dev, 0x0000090c, 0x00000002);
+ nv_icmd(dev, 0x0000090d, 0x00000002);
+ nv_icmd(dev, 0x0000090e, 0x00000002);
+ nv_icmd(dev, 0x0000090f, 0x00000002);
+ nv_icmd(dev, 0x00000910, 0x00000001);
+ nv_icmd(dev, 0x00000911, 0x00000001);
+ nv_icmd(dev, 0x00000912, 0x00000001);
+ nv_icmd(dev, 0x00000913, 0x00000001);
+ nv_icmd(dev, 0x00000914, 0x00000001);
+ nv_icmd(dev, 0x00000915, 0x00000001);
+ nv_icmd(dev, 0x00000916, 0x00000001);
+ nv_icmd(dev, 0x00000917, 0x00000001);
+ nv_icmd(dev, 0x00000918, 0x00000001);
+ nv_icmd(dev, 0x00000919, 0x00000001);
+ nv_icmd(dev, 0x0000091a, 0x00000001);
+ nv_icmd(dev, 0x0000091b, 0x00000001);
+ nv_icmd(dev, 0x0000091c, 0x00000001);
+ nv_icmd(dev, 0x0000091d, 0x00000001);
+ nv_icmd(dev, 0x0000091e, 0x00000001);
+ nv_icmd(dev, 0x0000091f, 0x00000001);
+ nv_icmd(dev, 0x00000920, 0x00000002);
+ nv_icmd(dev, 0x00000921, 0x00000002);
+ nv_icmd(dev, 0x00000922, 0x00000002);
+ nv_icmd(dev, 0x00000923, 0x00000002);
+ nv_icmd(dev, 0x00000924, 0x00000002);
+ nv_icmd(dev, 0x00000925, 0x00000002);
+ nv_icmd(dev, 0x00000926, 0x00000002);
+ nv_icmd(dev, 0x00000927, 0x00000002);
+ nv_icmd(dev, 0x00000928, 0x00000001);
+ nv_icmd(dev, 0x00000929, 0x00000001);
+ nv_icmd(dev, 0x0000092a, 0x00000001);
+ nv_icmd(dev, 0x0000092b, 0x00000001);
+ nv_icmd(dev, 0x0000092c, 0x00000001);
+ nv_icmd(dev, 0x0000092d, 0x00000001);
+ nv_icmd(dev, 0x0000092e, 0x00000001);
+ nv_icmd(dev, 0x0000092f, 0x00000001);
+ nv_icmd(dev, 0x00000648, 0x00000001);
+ nv_icmd(dev, 0x00000649, 0x00000001);
+ nv_icmd(dev, 0x0000064a, 0x00000001);
+ nv_icmd(dev, 0x0000064b, 0x00000001);
+ nv_icmd(dev, 0x0000064c, 0x00000001);
+ nv_icmd(dev, 0x0000064d, 0x00000001);
+ nv_icmd(dev, 0x0000064e, 0x00000001);
+ nv_icmd(dev, 0x0000064f, 0x00000001);
+ nv_icmd(dev, 0x00000650, 0x00000001);
+ nv_icmd(dev, 0x00000658, 0x0000000f);
+ nv_icmd(dev, 0x000007ff, 0x0000000a);
+ nv_icmd(dev, 0x0000066a, 0x40000000);
+ nv_icmd(dev, 0x0000066b, 0x10000000);
+ nv_icmd(dev, 0x0000066c, 0xffff0000);
+ nv_icmd(dev, 0x0000066d, 0xffff0000);
+ nv_icmd(dev, 0x000007af, 0x00000008);
+ nv_icmd(dev, 0x000007b0, 0x00000008);
+ nv_icmd(dev, 0x000007f6, 0x00000001);
+ nv_icmd(dev, 0x000006b2, 0x00000055);
+ nv_icmd(dev, 0x000007ad, 0x00000003);
+ nv_icmd(dev, 0x00000937, 0x00000001);
+ nv_icmd(dev, 0x00000971, 0x00000008);
+ nv_icmd(dev, 0x00000972, 0x00000040);
+ nv_icmd(dev, 0x00000973, 0x0000012c);
+ nv_icmd(dev, 0x0000097c, 0x00000040);
+ nv_icmd(dev, 0x00000979, 0x00000003);
+ nv_icmd(dev, 0x00000975, 0x00000020);
+ nv_icmd(dev, 0x00000976, 0x00000001);
+ nv_icmd(dev, 0x00000977, 0x00000020);
+ nv_icmd(dev, 0x00000978, 0x00000001);
+ nv_icmd(dev, 0x00000957, 0x00000003);
+ nv_icmd(dev, 0x0000095e, 0x20164010);
+ nv_icmd(dev, 0x0000095f, 0x00000020);
+ nv_icmd(dev, 0x00000683, 0x00000006);
+ nv_icmd(dev, 0x00000685, 0x003fffff);
+ nv_icmd(dev, 0x00000687, 0x00000c48);
+ nv_icmd(dev, 0x000006a0, 0x00000005);
+ nv_icmd(dev, 0x00000840, 0x00300008);
+ nv_icmd(dev, 0x00000841, 0x04000080);
+ nv_icmd(dev, 0x00000842, 0x00300008);
+ nv_icmd(dev, 0x00000843, 0x04000080);
+ nv_icmd(dev, 0x00000818, 0x00000000);
+ nv_icmd(dev, 0x00000819, 0x00000000);
+ nv_icmd(dev, 0x0000081a, 0x00000000);
+ nv_icmd(dev, 0x0000081b, 0x00000000);
+ nv_icmd(dev, 0x0000081c, 0x00000000);
+ nv_icmd(dev, 0x0000081d, 0x00000000);
+ nv_icmd(dev, 0x0000081e, 0x00000000);
+ nv_icmd(dev, 0x0000081f, 0x00000000);
+ nv_icmd(dev, 0x00000848, 0x00000000);
+ nv_icmd(dev, 0x00000849, 0x00000000);
+ nv_icmd(dev, 0x0000084a, 0x00000000);
+ nv_icmd(dev, 0x0000084b, 0x00000000);
+ nv_icmd(dev, 0x0000084c, 0x00000000);
+ nv_icmd(dev, 0x0000084d, 0x00000000);
+ nv_icmd(dev, 0x0000084e, 0x00000000);
+ nv_icmd(dev, 0x0000084f, 0x00000000);
+ nv_icmd(dev, 0x00000850, 0x00000000);
+ nv_icmd(dev, 0x00000851, 0x00000000);
+ nv_icmd(dev, 0x00000852, 0x00000000);
+ nv_icmd(dev, 0x00000853, 0x00000000);
+ nv_icmd(dev, 0x00000854, 0x00000000);
+ nv_icmd(dev, 0x00000855, 0x00000000);
+ nv_icmd(dev, 0x00000856, 0x00000000);
+ nv_icmd(dev, 0x00000857, 0x00000000);
+ nv_icmd(dev, 0x00000738, 0x00000000);
+ nv_icmd(dev, 0x000006aa, 0x00000001);
+ nv_icmd(dev, 0x000006ab, 0x00000002);
+ nv_icmd(dev, 0x000006ac, 0x00000080);
+ nv_icmd(dev, 0x000006ad, 0x00000100);
+ nv_icmd(dev, 0x000006ae, 0x00000100);
+ nv_icmd(dev, 0x000006b1, 0x00000011);
+ nv_icmd(dev, 0x000006bb, 0x000000cf);
+ nv_icmd(dev, 0x000006ce, 0x2a712488);
+ nv_icmd(dev, 0x00000739, 0x4085c000);
+ nv_icmd(dev, 0x0000073a, 0x00000080);
+ nv_icmd(dev, 0x00000786, 0x80000100);
+ nv_icmd(dev, 0x0000073c, 0x00010100);
+ nv_icmd(dev, 0x0000073d, 0x02800000);
+ nv_icmd(dev, 0x00000787, 0x000000cf);
+ nv_icmd(dev, 0x0000078c, 0x00000008);
+ nv_icmd(dev, 0x00000792, 0x00000001);
+ nv_icmd(dev, 0x00000794, 0x00000001);
+ nv_icmd(dev, 0x00000795, 0x00000001);
+ nv_icmd(dev, 0x00000796, 0x00000001);
+ nv_icmd(dev, 0x00000797, 0x000000cf);
+ nv_icmd(dev, 0x00000836, 0x00000001);
+ nv_icmd(dev, 0x0000079a, 0x00000002);
+ nv_icmd(dev, 0x00000833, 0x04444480);
+ nv_icmd(dev, 0x000007a1, 0x00000001);
+ nv_icmd(dev, 0x000007a3, 0x00000001);
+ nv_icmd(dev, 0x000007a4, 0x00000001);
+ nv_icmd(dev, 0x000007a5, 0x00000001);
+ nv_icmd(dev, 0x00000831, 0x00000004);
+ nv_icmd(dev, 0x0000080c, 0x00000002);
+ nv_icmd(dev, 0x0000080d, 0x00000100);
+ nv_icmd(dev, 0x0000080e, 0x00000100);
+ nv_icmd(dev, 0x0000080f, 0x00000001);
+ nv_icmd(dev, 0x00000823, 0x00000002);
+ nv_icmd(dev, 0x00000824, 0x00000100);
+ nv_icmd(dev, 0x00000825, 0x00000100);
+ nv_icmd(dev, 0x00000826, 0x00000001);
+ nv_icmd(dev, 0x0000095d, 0x00000001);
+ nv_icmd(dev, 0x0000082b, 0x00000004);
+ nv_icmd(dev, 0x00000942, 0x00010001);
+ nv_icmd(dev, 0x00000943, 0x00000001);
+ nv_icmd(dev, 0x00000944, 0x00000022);
+ nv_icmd(dev, 0x000007c5, 0x00010001);
+ nv_icmd(dev, 0x00000834, 0x00000001);
+ nv_icmd(dev, 0x000007c7, 0x00000001);
+ nv_icmd(dev, 0x0000c1b0, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b1, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b2, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b3, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b4, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b5, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b6, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b7, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b8, 0x0fac6881);
+ nv_icmd(dev, 0x0000c1b9, 0x00fac688);
+ nv_icmd(dev, 0x0001e100, 0x00000001);
+ nv_icmd(dev, 0x00001000, 0x00000002);
+ nv_icmd(dev, 0x000006aa, 0x00000001);
+ nv_icmd(dev, 0x000006ad, 0x00000100);
+ nv_icmd(dev, 0x000006ae, 0x00000100);
+ nv_icmd(dev, 0x000006b1, 0x00000011);
+ nv_icmd(dev, 0x0000078c, 0x00000008);
+ nv_icmd(dev, 0x00000792, 0x00000001);
+ nv_icmd(dev, 0x00000794, 0x00000001);
+ nv_icmd(dev, 0x00000795, 0x00000001);
+ nv_icmd(dev, 0x00000796, 0x00000001);
+ nv_icmd(dev, 0x00000797, 0x000000cf);
+ nv_icmd(dev, 0x0000079a, 0x00000002);
+ nv_icmd(dev, 0x00000833, 0x04444480);
+ nv_icmd(dev, 0x000007a1, 0x00000001);
+ nv_icmd(dev, 0x000007a3, 0x00000001);
+ nv_icmd(dev, 0x000007a4, 0x00000001);
+ nv_icmd(dev, 0x000007a5, 0x00000001);
+ nv_icmd(dev, 0x00000831, 0x00000004);
+ nv_icmd(dev, 0x0001e100, 0x00000001);
+ nv_icmd(dev, 0x00001000, 0x00000014);
+ nv_icmd(dev, 0x00000351, 0x00000100);
+ nv_icmd(dev, 0x00000957, 0x00000003);
+ nv_icmd(dev, 0x0000095d, 0x00000001);
+ nv_icmd(dev, 0x0000082b, 0x00000004);
+ nv_icmd(dev, 0x00000942, 0x00010001);
+ nv_icmd(dev, 0x00000943, 0x00000001);
+ nv_icmd(dev, 0x000007c5, 0x00010001);
+ nv_icmd(dev, 0x00000834, 0x00000001);
+ nv_icmd(dev, 0x000007c7, 0x00000001);
+ nv_icmd(dev, 0x0001e100, 0x00000001);
+ nv_icmd(dev, 0x00001000, 0x00000001);
+ nv_icmd(dev, 0x0000080c, 0x00000002);
+ nv_icmd(dev, 0x0000080d, 0x00000100);
+ nv_icmd(dev, 0x0000080e, 0x00000100);
+ nv_icmd(dev, 0x0000080f, 0x00000001);
+ nv_icmd(dev, 0x00000823, 0x00000002);
+ nv_icmd(dev, 0x00000824, 0x00000100);
+ nv_icmd(dev, 0x00000825, 0x00000100);
+ nv_icmd(dev, 0x00000826, 0x00000001);
+ nv_icmd(dev, 0x0001e100, 0x00000001);
+ nv_wr32(dev, 0x400208, 0x00000000);
+ nv_wr32(dev, 0x404154, 0x00000400);
+
+ nvc0_grctx_generate_9097(dev);
+ nvc0_grctx_generate_902d(dev);
+ nvc0_grctx_generate_9039(dev);
+ nvc0_grctx_generate_90c0(dev);
+
+ nv_wr32(dev, 0x000260, r000260);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 13a0f78a9088..c09091749054 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -25,206 +25,207 @@
#include "drmP.h"
#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+struct nvc0_instmem_priv {
+ struct nouveau_gpuobj *bar1_pgd;
+ struct nouveau_channel *bar1;
+ struct nouveau_gpuobj *bar3_pgd;
+ struct nouveau_channel *bar3;
+ struct nouveau_gpuobj *chan_pgd;
+};
int
-nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
- uint32_t *size)
+nvc0_instmem_suspend(struct drm_device *dev)
{
- int ret;
-
- *size = ALIGN(*size, 4096);
- if (*size == 0)
- return -EINVAL;
-
- ret = nouveau_bo_new(dev, NULL, *size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
- true, false, &gpuobj->im_backing);
- if (ret) {
- NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
- return ret;
- }
-
- ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
- nouveau_bo_ref(NULL, &gpuobj->im_backing);
- return ret;
- }
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
- gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
+ dev_priv->ramin_available = false;
return 0;
}
void
-nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nvc0_instmem_resume(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
- if (gpuobj && gpuobj->im_backing) {
- if (gpuobj->im_bound)
- dev_priv->engine.instmem.unbind(dev, gpuobj);
- nouveau_bo_unpin(gpuobj->im_backing);
- nouveau_bo_ref(NULL, &gpuobj->im_backing);
- gpuobj->im_backing = NULL;
- }
+ nv_mask(dev, 0x100c80, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12);
+ nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12);
+ dev_priv->ramin_available = true;
}
-int
-nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+static void
+nvc0_channel_del(struct nouveau_channel **pchan)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t pte, pte_end;
- uint64_t vram;
-
- if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
- return -EINVAL;
-
- NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
- gpuobj->im_pramin->start, gpuobj->im_pramin->size);
+ struct nouveau_channel *chan;
+
+ chan = *pchan;
+ *pchan = NULL;
+ if (!chan)
+ return;
+
+ nouveau_vm_ref(NULL, &chan->vm, NULL);
+ if (chan->ramin_heap.free_stack.next)
+ drm_mm_takedown(&chan->ramin_heap);
+ nouveau_gpuobj_ref(NULL, &chan->ramin);
+ kfree(chan);
+}
- pte = gpuobj->im_pramin->start >> 12;
- pte_end = (gpuobj->im_pramin->size >> 12) + pte;
- vram = gpuobj->vinst;
+static int
+nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
+ struct nouveau_channel **pchan,
+ struct nouveau_gpuobj *pgd, u64 vm_size)
+{
+ struct nouveau_channel *chan;
+ int ret;
- NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
- gpuobj->im_pramin->start, pte, pte_end);
- NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ chan->dev = dev;
- while (pte < pte_end) {
- nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1);
- nv_wr32(dev, 0x702004 + (pte * 8), 0);
- vram += 4096;
- pte++;
+ ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
+ if (ret) {
+ nvc0_channel_del(&chan);
+ return ret;
}
- dev_priv->engine.instmem.flush(dev);
- if (1) {
- u32 chan = nv_rd32(dev, 0x1700) << 16;
- nv_wr32(dev, 0x100cb8, (chan + 0x1000) >> 8);
- nv_wr32(dev, 0x100cbc, 0x80000005);
+ ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
+ if (ret) {
+ nvc0_channel_del(&chan);
+ return ret;
}
- gpuobj->im_bound = 1;
- return 0;
-}
-
-int
-nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t pte, pte_end;
-
- if (gpuobj->im_bound == 0)
- return -EINVAL;
-
- pte = gpuobj->im_pramin->start >> 12;
- pte_end = (gpuobj->im_pramin->size >> 12) + pte;
- while (pte < pte_end) {
- nv_wr32(dev, 0x702000 + (pte * 8), 0);
- nv_wr32(dev, 0x702004 + (pte * 8), 0);
- pte++;
+ ret = nouveau_vm_ref(vm, &chan->vm, NULL);
+ if (ret) {
+ nvc0_channel_del(&chan);
+ return ret;
}
- dev_priv->engine.instmem.flush(dev);
- gpuobj->im_bound = 0;
- return 0;
-}
+ nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
+ nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
+ nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
+ nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));
-void
-nvc0_instmem_flush(struct drm_device *dev)
-{
- nv_wr32(dev, 0x070000, 1);
- if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
- NV_ERROR(dev, "PRAMIN flush timeout\n");
+ *pchan = chan;
+ return 0;
}
int
-nvc0_instmem_suspend(struct drm_device *dev)
+nvc0_instmem_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 *buf;
- int i;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct pci_dev *pdev = dev->pdev;
+ struct nvc0_instmem_priv *priv;
+ struct nouveau_vm *vm = NULL;
+ int ret;
- dev_priv->susres.ramin_copy = vmalloc(65536);
- if (!dev_priv->susres.ramin_copy)
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- buf = dev_priv->susres.ramin_copy;
-
- for (i = 0; i < 65536; i += 4)
- buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
+ pinstmem->priv = priv;
+
+ /* BAR3 VM */
+ ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
+ &dev_priv->bar3_vm);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL,
+ (pci_resource_len(pdev, 3) >> 12) * 8, 0,
+ NVOBJ_FLAG_DONT_MAP |
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->bar3_vm->pgt[0].obj[0]);
+ if (ret)
+ goto error;
+ dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
+
+ nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
+ if (ret)
+ goto error;
+ nouveau_vm_ref(NULL, &vm, NULL);
+
+ ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
+ priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
+ if (ret)
+ goto error;
+
+ /* BAR1 VM */
+ ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
+ if (ret)
+ goto error;
+ nouveau_vm_ref(NULL, &vm, NULL);
+
+ ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
+ priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
+ if (ret)
+ goto error;
+
+ /* channel vm */
+ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd);
+ if (ret)
+ goto error;
+
+ nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd);
+ nouveau_vm_ref(NULL, &vm, NULL);
+
+ nvc0_instmem_resume(dev);
return 0;
+error:
+ nvc0_instmem_takedown(dev);
+ return ret;
}
void
-nvc0_instmem_resume(struct drm_device *dev)
+nvc0_instmem_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 *buf = dev_priv->susres.ramin_copy;
- u64 chan;
- int i;
+ struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ struct nouveau_vm *vm = NULL;
- chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
- nv_wr32(dev, 0x001700, chan >> 16);
+ nvc0_instmem_suspend(dev);
- for (i = 0; i < 65536; i += 4)
- nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
- vfree(dev_priv->susres.ramin_copy);
- dev_priv->susres.ramin_copy = NULL;
+ nv_wr32(dev, 0x1704, 0x00000000);
+ nv_wr32(dev, 0x1714, 0x00000000);
- nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
-}
+ nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd);
+ nouveau_gpuobj_ref(NULL, &priv->chan_pgd);
-int
-nvc0_instmem_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u64 chan, pgt3, imem, lim3 = dev_priv->ramin_size - 1;
- int ret, i;
-
- dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
- chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
- imem = 4096 + 4096 + 32768;
-
- nv_wr32(dev, 0x001700, chan >> 16);
-
- /* channel setup */
- nv_wr32(dev, 0x700200, lower_32_bits(chan + 0x1000));
- nv_wr32(dev, 0x700204, upper_32_bits(chan + 0x1000));
- nv_wr32(dev, 0x700208, lower_32_bits(lim3));
- nv_wr32(dev, 0x70020c, upper_32_bits(lim3));
-
- /* point pgd -> pgt */
- nv_wr32(dev, 0x701000, 0);
- nv_wr32(dev, 0x701004, ((chan + 0x2000) >> 8) | 1);
-
- /* point pgt -> physical vram for channel */
- pgt3 = 0x2000;
- for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4096, pgt3 += 8) {
- nv_wr32(dev, 0x700000 + pgt3, ((chan + i) >> 8) | 1);
- nv_wr32(dev, 0x700004 + pgt3, 0);
- }
-
- /* clear rest of pgt */
- for (; i < dev_priv->ramin_size; i += 4096, pgt3 += 8) {
- nv_wr32(dev, 0x700000 + pgt3, 0);
- nv_wr32(dev, 0x700004 + pgt3, 0);
- }
-
- /* point bar3 at the channel */
- nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
-
- /* Global PRAMIN heap */
- ret = drm_mm_init(&dev_priv->ramin_heap, imem,
- dev_priv->ramin_size - imem);
- if (ret) {
- NV_ERROR(dev, "Failed to init RAMIN heap\n");
- return -ENOMEM;
- }
+ nvc0_channel_del(&priv->bar1);
+ nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
+ nouveau_gpuobj_ref(NULL, &priv->bar1_pgd);
- return 0;
-}
+ nvc0_channel_del(&priv->bar3);
+ nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL);
+ nouveau_vm_ref(NULL, &vm, priv->bar3_pgd);
+ nouveau_gpuobj_ref(NULL, &priv->bar3_pgd);
+ nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
+ nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
-void
-nvc0_instmem_takedown(struct drm_device *dev)
-{
+ dev_priv->engine.instmem.priv = NULL;
+ kfree(priv);
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
new file mode 100644
index 000000000000..e4e83c2caf5b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+void
+nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
+ struct nouveau_gpuobj *pgt[2])
+{
+ u32 pde[2] = { 0, 0 };
+
+ if (pgt[0])
+ pde[1] = 0x00000001 | (pgt[0]->vinst >> 8);
+ if (pgt[1])
+ pde[0] = 0x00000001 | (pgt[1]->vinst >> 8);
+
+ nv_wo32(pgd, (index * 8) + 0, pde[0]);
+ nv_wo32(pgd, (index * 8) + 4, pde[1]);
+}
+
+static inline u64
+nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
+{
+ phys >>= 8;
+
+ phys |= 0x00000001; /* present */
+ if (vma->access & NV_MEM_ACCESS_SYS)
+ phys |= 0x00000002;
+
+ phys |= ((u64)target << 32);
+ phys |= ((u64)memtype << 36);
+
+ return phys;
+}
+
+void
+nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+{
+ u32 next = 1 << (vma->node->type - 8);
+
+ phys = nvc0_vm_addr(vma, phys, mem->memtype, 0);
+ pte <<= 3;
+ while (cnt--) {
+ nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+ nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ phys += next;
+ pte += 8;
+ }
+}
+
+void
+nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ u32 pte, dma_addr_t *list, u32 cnt)
+{
+ pte <<= 3;
+ while (cnt--) {
+ u64 phys = nvc0_vm_addr(vma, *list++, 0, 5);
+ nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+ nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ pte += 8;
+ }
+}
+
+void
+nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+ pte <<= 3;
+ while (cnt--) {
+ nv_wo32(pgt, pte + 0, 0x00000000);
+ nv_wo32(pgt, pte + 4, 0x00000000);
+ pte += 8;
+ }
+}
+
+void
+nvc0_vm_flush(struct nouveau_vm *vm)
+{
+ struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct drm_device *dev = vm->dev;
+ struct nouveau_vm_pgd *vpgd;
+ u32 r100c80, engine;
+
+ pinstmem->flush(vm->dev);
+
+ if (vm == dev_priv->chan_vm)
+ engine = 1;
+ else
+ engine = 5;
+
+ list_for_each_entry(vpgd, &vm->pgd_list, head) {
+ r100c80 = nv_rd32(dev, 0x100c80);
+ nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
+ nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
+ if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80))
+ NV_ERROR(dev, "vm flush timeout eng %d\n", engine);
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
new file mode 100644
index 000000000000..858eda5dedd1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+bool
+nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+ switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
+ case 0x0000:
+ case 0xfe00:
+ case 0xdb00:
+ case 0x1100:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+int
+nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
+ u32 type, struct nouveau_vram **pvram)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+ struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm_node *r;
+ struct nouveau_vram *vram;
+ int ret;
+
+ size >>= 12;
+ align >>= 12;
+ ncmin >>= 12;
+
+ vram = kzalloc(sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&vram->regions);
+ vram->dev = dev_priv->dev;
+ vram->memtype = type;
+ vram->size = size;
+
+ mutex_lock(&mm->mutex);
+ do {
+ ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
+ if (ret) {
+ mutex_unlock(&mm->mutex);
+ nv50_vram_del(dev, &vram);
+ return ret;
+ }
+
+ list_add_tail(&r->rl_entry, &vram->regions);
+ size -= r->length;
+ } while (size);
+ mutex_unlock(&mm->mutex);
+
+ r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+ vram->offset = (u64)r->offset << 12;
+ *pvram = vram;
+ return 0;
+}
+
+int
+nvc0_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
+ dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
+ dev_priv->vram_rblock_size = 4096;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index 881f8a585613..fe0f253089ac 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -153,7 +153,8 @@
#define NV_PCRTC_START 0x00600800
#define NV_PCRTC_CONFIG 0x00600804
# define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0)
-# define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0)
+# define NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC (4 << 0)
+# define NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0)
#define NV_PCRTC_CURSOR_CONFIG 0x00600810
# define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0)
# define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4)
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 6cae4f2028d2..e47eecfc2df4 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -65,10 +65,13 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
- evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o
+ evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
+ radeon_trace_points.o ni.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
radeon-$(CONFIG_ACPI) += radeon_acpi.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
+
+CFLAGS_radeon_trace_points.o := -I$(src) \ No newline at end of file
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
index c714179d1bfa..c61c3fe9fb98 100644
--- a/drivers/gpu/drm/radeon/ObjectID.h
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -37,6 +37,8 @@
#define GRAPH_OBJECT_TYPE_CONNECTOR 0x3
#define GRAPH_OBJECT_TYPE_ROUTER 0x4
/* deleted */
+#define GRAPH_OBJECT_TYPE_DISPLAY_PATH 0x6
+#define GRAPH_OBJECT_TYPE_GENERIC 0x7
/****************************************************/
/* Encoder Object ID Definition */
@@ -64,6 +66,9 @@
#define ENCODER_OBJECT_ID_VT1623 0x10
#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11
#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12
+#define ENCODER_OBJECT_ID_ALMOND 0x22
+#define ENCODER_OBJECT_ID_TRAVIS 0x23
+#define ENCODER_OBJECT_ID_NUTMEG 0x22
/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
@@ -108,6 +113,7 @@
#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
+#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
/* deleted */
@@ -124,6 +130,7 @@
#define GENERIC_OBJECT_ID_GLSYNC 0x01
#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
#define GENERIC_OBJECT_ID_MXM_OPM 0x03
+#define GENERIC_OBJECT_ID_STEREO_PIN 0x04 //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
/****************************************************/
/* Graphics Object ENUM ID Definition */
@@ -360,6 +367,26 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
+#define ENCODER_ALMOND_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_ALMOND_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_NUTMEG_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT)
+
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
@@ -421,6 +448,14 @@
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
@@ -512,6 +547,7 @@
#define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
#define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
@@ -593,6 +629,14 @@
GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC
+#define CONNECTOR_LVDS_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
/****************************************************/
/* Router Object ID definition - Shared with BIOS */
/****************************************************/
@@ -621,6 +665,10 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
+#define GENERICOBJECT_STEREO_PIN_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
+
/****************************************************/
/* Object Cap definition - Shared with BIOS */
/****************************************************/
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 05efb5b9f13e..258fa5e7a2d9 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -734,16 +734,16 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
- uint32_t dst, src1, src2, saved;
+ uint32_t dst, mask, src, saved;
int dptr = *ptr;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
- SDEBUG(" src1: ");
- src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
- SDEBUG(" src2: ");
- src2 = atom_get_src(ctx, attr, ptr);
- dst &= src1;
- dst |= src2;
+ mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
+ SDEBUG(" mask: 0x%08x", mask);
+ SDEBUG(" src: ");
+ src = atom_get_src(ctx, attr, ptr);
+ dst &= mask;
+ dst |= src;
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index fe359a239df3..04b269d14a59 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -73,8 +73,18 @@
#define ATOM_PPLL1 0
#define ATOM_PPLL2 1
#define ATOM_DCPLL 2
+#define ATOM_PPLL0 2
+#define ATOM_EXT_PLL1 8
+#define ATOM_EXT_PLL2 9
+#define ATOM_EXT_CLOCK 10
#define ATOM_PPLL_INVALID 0xFF
+#define ENCODER_REFCLK_SRC_P1PLL 0
+#define ENCODER_REFCLK_SRC_P2PLL 1
+#define ENCODER_REFCLK_SRC_DCPLL 2
+#define ENCODER_REFCLK_SRC_EXTCLK 3
+#define ENCODER_REFCLK_SRC_INVALID 0xFF
+
#define ATOM_SCALER1 0
#define ATOM_SCALER2 1
@@ -192,6 +202,9 @@ typedef struct _ATOM_COMMON_TABLE_HEADER
/*Image can't be updated, while Driver needs to carry the new table! */
}ATOM_COMMON_TABLE_HEADER;
+/****************************************************************************/
+// Structure stores the ROM header.
+/****************************************************************************/
typedef struct _ATOM_ROM_HEADER
{
ATOM_COMMON_TABLE_HEADER sHeader;
@@ -221,6 +234,9 @@ typedef struct _ATOM_ROM_HEADER
#define USHORT void*
#endif
+/****************************************************************************/
+// Structures used in Command.mtb
+/****************************************************************************/
typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1
USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON
@@ -312,6 +328,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
#define HPDInterruptService ReadHWAssistedI2CStatus
#define EnableVGA_Access GetSCLKOverMCLKRatio
+#define GetDispObjectInfo EnableYUV
typedef struct _ATOM_MASTER_COMMAND_TABLE
{
@@ -357,6 +374,24 @@ typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER
/****************************************************************************/
#define COMPUTE_MEMORY_PLL_PARAM 1
#define COMPUTE_ENGINE_PLL_PARAM 2
+#define ADJUST_MC_SETTING_PARAM 3
+
+/****************************************************************************/
+// Structures used by AdjustMemoryControllerTable
+/****************************************************************************/
+typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+ ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block
+ ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0]
+ ULONG ulClockFreq:24;
+#else
+ ULONG ulClockFreq:24;
+ ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0]
+ ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block
+#endif
+}ATOM_ADJUST_MEMORY_CLOCK_FREQ;
+#define POINTER_RETURN_FLAG 0x80
typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
{
@@ -440,6 +475,26 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
#endif
}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
+{
+ union
+ {
+ ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
+ };
+ UCHAR ucRefDiv; //Output Parameter
+ UCHAR ucPostDiv; //Output Parameter
+ union
+ {
+ UCHAR ucCntlFlag; //Output Flags
+ UCHAR ucInputFlag; //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode
+ };
+ UCHAR ucReserved;
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
+
+// ucInputFlag
+#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
+
typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
{
ATOM_COMPUTE_CLOCK_FREQ ulClock;
@@ -583,6 +638,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01
#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00
#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ 0x02
#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04
#define ATOM_ENCODER_CONFIG_LINKA 0x00
#define ATOM_ENCODER_CONFIG_LINKB 0x04
@@ -608,6 +664,9 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
#define ATOM_ENCODER_MODE_TV 13
#define ATOM_ENCODER_MODE_CV 14
#define ATOM_ENCODER_MODE_CRT 15
+#define ATOM_ENCODER_MODE_DVO 16
+#define ATOM_ENCODER_MODE_DP_SST ATOM_ENCODER_MODE_DP // For DP1.2
+#define ATOM_ENCODER_MODE_DP_MST 5 // For DP1.2
typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
{
@@ -661,6 +720,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3 0x13
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b
#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c
#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d
@@ -671,24 +731,34 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10
#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00
+//ucTableFormatRevision=1
+//ucTableContentRevision=3
// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
{
#if ATOM_BIG_ENDIAN
UCHAR ucReserved1:1;
- UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
UCHAR ucReserved:3;
UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
#else
UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
UCHAR ucReserved:3;
- UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
UCHAR ucReserved1:1;
#endif
}ATOM_DIG_ENCODER_CONFIG_V3;
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01
#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70
-
+#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER 0x00
+#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER 0x10
+#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER 0x20
+#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER 0x30
+#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER 0x40
+#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER 0x50
typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
{
@@ -707,6 +777,56 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
UCHAR ucReserved;
}DIG_ENCODER_CONTROL_PARAMETERS_V3;
+//ucTableFormatRevision=1
+//ucTableContentRevision=4
+// start from NI
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucReserved1:1;
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
+ UCHAR ucReserved:2;
+ UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version
+#else
+ UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version
+ UCHAR ucReserved:2;
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
+ UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V4;
+
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK 0x03
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02
+#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70
+#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00
+#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10
+#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER 0x20
+#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30
+#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40
+#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ union{
+ ATOM_DIG_ENCODER_CONFIG_V4 acConfig;
+ UCHAR ucConfig;
+ };
+ UCHAR ucAction;
+ UCHAR ucEncoderMode;
+ // =0: DP encoder
+ // =1: LVDS encoder
+ // =2: DVI encoder
+ // =3: HDMI encoder
+ // =4: SDVO encoder
+ // =5: DP audio
+ UCHAR ucLaneNum; // how many lanes to enable
+ UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+ UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version
+}DIG_ENCODER_CONTROL_PARAMETERS_V4;
// define ucBitPerColor:
#define PANEL_BPC_UNDEFINE 0x00
@@ -893,6 +1013,7 @@ typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3
#endif
}ATOM_DIG_TRANSMITTER_CONFIG_V3;
+
typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
{
union
@@ -936,6 +1057,149 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD
#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF
+
+/****************************************************************************/
+// Structures used by UNIPHYTransmitterControlTable V1.4
+// ASIC Families: NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=4
+/****************************************************************************/
+typedef struct _ATOM_DP_VS_MODE_V4
+{
+ UCHAR ucLaneSel;
+ union
+ {
+ UCHAR ucLaneSet;
+ struct {
+#if ATOM_BIG_ENDIAN
+ UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4
+ UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level
+ UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level
+#else
+ UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level
+ UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level
+ UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4
+#endif
+ };
+ };
+}ATOM_DP_VS_MODE_V4;
+
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+#else
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V4;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
+{
+ union
+ {
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+ ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode Redefined comparing to previous version
+ };
+ union
+ {
+ ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig;
+ UCHAR ucConfig;
+ };
+ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
+ UCHAR ucLaneNum;
+ UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4;
+
+//ucConfig
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR 0x01
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT 0x02
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK 0x04
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKA 0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKB 0x04
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK 0x08
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER 0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER 0x08
+// Bit5:4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 0x30
+#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL 0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL 0x10
+#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL 0x20 // New in _V4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT 0x30 // Changed comparing to V3
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK 0xC0
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1 0x00 //AB
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2 0x40 //CD
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF
+
+
+/****************************************************************************/
+// Structures used by ExternalEncoderControlTable V1.3
+// ASIC Families: Evergreen, Llano, NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
+{
+ union{
+ USHORT usPixelClock; // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT
+ USHORT usConnectorId; // connector id, valid when ucAction = INIT
+ };
+ UCHAR ucConfig; // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT
+ UCHAR ucAction; //
+ UCHAR ucEncoderMode; // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT
+ UCHAR ucLaneNum; // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT
+ UCHAR ucBitPerColor; // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP
+ UCHAR ucReserved;
+}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3;
+
+// ucAction
+#define EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT 0x00
+#define EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT 0x01
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT 0x07
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP 0x0f
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11
+#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12
+
+// ucConfig
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ 0x02
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MASK 0x70
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1 0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2 0x10
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3 0x20
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3
+{
+ EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder;
+ ULONG ulReserved[2];
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3;
+
+
/****************************************************************************/
// Structures used by DAC1OuputControlTable
// DAC2OuputControlTable
@@ -1142,6 +1406,7 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V2
#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10
#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20
+
typedef struct _PIXEL_CLOCK_PARAMETERS_V3
{
USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
@@ -1202,6 +1467,55 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V5
#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08
#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10
+typedef struct _CRTC_PIXEL_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+ ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to
+ // drive the pixel clock. not used for DCPLL case.
+ ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing.
+ // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+#else
+ ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing.
+ // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+ ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to
+ // drive the pixel clock. not used for DCPLL case.
+#endif
+}CRTC_PIXEL_CLOCK_FREQ;
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V6
+{
+ union{
+ CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq; // pixel clock and CRTC id frequency
+ ULONG ulDispEngClkFreq; // dispclk frequency
+ };
+ USHORT usFbDiv; // feedback divider integer part.
+ UCHAR ucPostDiv; // post divider.
+ UCHAR ucRefDiv; // Reference divider
+ UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+ UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h,
+ // indicate which graphic encoder will be used.
+ UCHAR ucEncoderMode; // Encoder mode:
+ UCHAR ucMiscInfo; // bit[0]= Force program PPLL
+ // bit[1]= when VGA timing is used.
+ // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+ // bit[4]= RefClock source for PPLL.
+ // =0: XTLAIN( default mode )
+ // =1: other external clock source, which is pre-defined
+ // by VBIOS depend on the feature required.
+ // bit[7:5]: reserved.
+ ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V6;
+
+#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL 0x01
+#define PIXEL_CLOCK_V6_MISC_VGA_MODE 0x02
+#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c
+#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08
+#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c
+#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10
+
typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
{
PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
@@ -1241,10 +1555,11 @@ typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS
typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
{
USHORT usPixelClock; // target pixel clock
- UCHAR ucTransmitterID; // transmitter id defined in objectid.h
+ UCHAR ucTransmitterID; // GPU transmitter id defined in objectid.h
UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI
UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
- UCHAR ucReserved[3];
+ UCHAR ucExtTransmitterID; // external encoder id.
+ UCHAR ucReserved[2];
}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
// usDispPllConfig v1.2 for RoadRunner
@@ -1314,7 +1629,7 @@ typedef struct _GET_ENGINE_CLOCK_PARAMETERS
typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
{
USHORT usPrescale; //Ratio between Engine clock and I2C clock
- USHORT usVRAMAddress; //Adress in Frame Buffer where to pace raw EDID
+ USHORT usVRAMAddress; //Address in Frame Buffer where to pace raw EDID
USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status
//WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte
UCHAR ucSlaveAddr; //Read from which slave
@@ -1358,6 +1673,7 @@ typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS
/**************************************************************************/
#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
/****************************************************************************/
// Structures used by PowerConnectorDetectionTable
/****************************************************************************/
@@ -1438,6 +1754,31 @@ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00
#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8
+// Used by DCE5.0
+ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3
+{
+ USHORT usSpreadSpectrumAmountFrac; // SS_AMOUNT_DSFRAC New in DCE5.0
+ UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread.
+ // Bit[1]: 1-Ext. 0-Int.
+ // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+ // Bits[7:4] reserved
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]
+ USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3;
+
+#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD 0x00
+#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD 0x01
+#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD 0x02
+#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK 0x0c
+#define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00
+#define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04
+#define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT 8
+
#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL
/**************************************************************************/
@@ -1706,7 +2047,7 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
USHORT StandardVESA_Timing; // Only used by Bios
USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
USHORT DAC_Info; // Will be obsolete from R600
- USHORT LVDS_Info; // Shared by various SW components,latest version 1.1
+ USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info
USHORT TMDS_Info; // Will be obsolete from R600
USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
USHORT SupportedDevicesInfo; // Will be obsolete from R600
@@ -1736,12 +2077,16 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
}ATOM_MASTER_LIST_OF_DATA_TABLES;
+// For backward compatible
+#define LVDS_Info LCD_Info
+
typedef struct _ATOM_MASTER_DATA_TABLE
{
ATOM_COMMON_TABLE_HEADER sHeader;
ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
}ATOM_MASTER_DATA_TABLE;
+
/****************************************************************************/
// Structure used in MultimediaCapabilityInfoTable
/****************************************************************************/
@@ -1776,6 +2121,7 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
}ATOM_MULTIMEDIA_CONFIG_INFO;
+
/****************************************************************************/
// Structures used in FirmwareInfoTable
/****************************************************************************/
@@ -2031,8 +2377,47 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_1
UCHAR ucReserved4[3];
}ATOM_FIRMWARE_INFO_V2_1;
+//the structure below to be used from NI
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct _ATOM_FIRMWARE_INFO_V2_2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulReserved[2];
+ ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
+ ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock ?
+ ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage.
+ UCHAR ucReserved3; //Was ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ USHORT usBootUpVDDCVoltage; //In MV unit
+ USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
+ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
+ ULONG ulReserved4; //Was ulAsicMaximumVoltage
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulReserved5; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
+ ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
+ ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
+ USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usBootUpVDDCIVoltage; //In unit of mv; Was usMinPixelClockPLL_Output;
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usCoreReferenceClock; //In 10Khz unit
+ USHORT usMemoryReferenceClock; //In 10Khz unit
+ USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+ UCHAR ucReserved9[3];
+ USHORT usBootUpMVDDCVoltage; //In unit of mv; Was usMinPixelClockPLL_Output;
+ USHORT usReserved12;
+ ULONG ulReserved10[3]; // New added comparing to previous version
+}ATOM_FIRMWARE_INFO_V2_2;
-#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1
+#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2
/****************************************************************************/
// Structures used in IntegratedSystemInfoTable
@@ -2212,7 +2597,7 @@ ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pi
ucDockingPinBit: which bit in this register to read the pin status;
ucDockingPinPolarity:Polarity of the pin when docked;
-ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
+ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0
usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
@@ -2250,6 +2635,14 @@ usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to rep
usMinDownStreamHTLinkWidth: same as above.
*/
+// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo - CPU type definition
+#define INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU 0
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN 1
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4
+
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH // this deff reflects max defined CPU code
#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
@@ -2778,8 +3171,88 @@ typedef struct _ATOM_LVDS_INFO_V12
#define PANEL_RANDOM_DITHER 0x80
#define PANEL_RANDOM_DITHER_MASK 0x80
+#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 // no need to change this
+
+/****************************************************************************/
+// Structures used by LCD_InfoTable V1.3 Note: previous version was called ATOM_LVDS_INFO_V12
+// ASIC Families: NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/
+typedef struct _ATOM_LCD_INFO_V13
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT sLCDTiming;
+ USHORT usExtInfoTableOffset;
+ USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
+ ULONG ulReserved0;
+ UCHAR ucLCD_Misc; // Reorganized in V13
+ // Bit0: {=0:single, =1:dual},
+ // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888} // was {=0:666RGB, =1:888RGB},
+ // Bit3:2: {Grey level}
+ // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h)
+ // Bit7 Reserved. was for ATOM_PANEL_MISC_API_ENABLED, still need it?
+ UCHAR ucPanelDefaultRefreshRate;
+ UCHAR ucPanelIdentification;
+ UCHAR ucSS_Id;
+ USHORT usLCDVenderID;
+ USHORT usLCDProductID;
+ UCHAR ucLCDPanel_SpecialHandlingCap; // Reorganized in V13
+ // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own
+ // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED
+ // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1)
+ // Bit7-3: Reserved
+ UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+ USHORT usBacklightPWM; // Backlight PWM in Hz. New in _V13
+
+ UCHAR ucPowerSequenceDIGONtoDE_in4Ms;
+ UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms;
+ UCHAR ucPowerSequenceDEtoDIGON_in4Ms;
+ UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms;
+
+ UCHAR ucOffDelay_in4Ms;
+ UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms;
+ UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms;
+ UCHAR ucReserved1;
+
+ ULONG ulReserved[4];
+}ATOM_LCD_INFO_V13;
+
+#define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13
+
+//Definitions for ucLCD_Misc
+#define ATOM_PANEL_MISC_V13_DUAL 0x00000001
+#define ATOM_PANEL_MISC_V13_FPDI 0x00000002
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL 0x0000000C
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT 2
+#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK 0x70
+#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR 0x10
+#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR 0x20
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6 5 4
+ // 0 0 0 - Color bit depth is undefined
+ // 0 0 1 - 6 Bits per Primary Color
+ // 0 1 0 - 8 Bits per Primary Color
+ // 0 1 1 - 10 Bits per Primary Color
+ // 1 0 0 - 12 Bits per Primary Color
+ // 1 0 1 - 14 Bits per Primary Color
+ // 1 1 0 - 16 Bits per Primary Color
+ // 1 1 1 - Reserved
+
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12.
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL
+#define LCDPANEL_CAP_V13_READ_EDID 0x1 // = LCDPANEL_CAP_READ_EDID no change comparing to previous version
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define LCDPANEL_CAP_V13_DRR_SUPPORTED 0x2 // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version
-#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version
typedef struct _ATOM_PATCH_RECORD_MODE
{
@@ -2944,9 +3417,9 @@ typedef struct _ATOM_DPCD_INFO
#define MAX_DTD_MODE_IN_VRAM 6
#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT)
#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
-#define DFP_ENCODER_TYPE_OFFSET 0x80
-#define DP_ENCODER_LANE_NUM_OFFSET 0x84
-#define DP_ENCODER_LINK_RATE_OFFSET 0x88
+//20 bytes for Encoder Type and DPCD in STD EDID area
+#define DFP_ENCODER_TYPE_OFFSET (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20)
+#define ATOM_DP_DPCD_OFFSET (DFP_ENCODER_TYPE_OFFSET + 4 )
#define ATOM_HWICON1_SURFACE_ADDR 0
#define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
@@ -2997,14 +3470,16 @@ typedef struct _ATOM_DPCD_INFO
#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256)
-#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512
+#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 1024)
+#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START + 512
//The size below is in Kb!
#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
+#define ATOM_VRAM_RESERVE_V2_SIZE 32
+
#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L
#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
@@ -3206,6 +3681,15 @@ typedef struct _ATOM_DISPLAY_OBJECT_PATH
USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
}ATOM_DISPLAY_OBJECT_PATH;
+typedef struct _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH
+{
+ USHORT usDeviceTag; //supported device
+ USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH
+ USHORT usConnObjectId; //Connector Object ID
+ USHORT usGPUObjectId; //GPU ID
+ USHORT usGraphicObjIds[2]; //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder
+}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH;
+
typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
{
UCHAR ucNumOfDispPath;
@@ -3261,6 +3745,47 @@ typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset
#define EXT_AUXDDC_LUTINDEX_7 7
#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1)
+//ucChannelMapping are defined as following
+//for DP connector, eDP, DP to VGA/LVDS
+//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucDP_Lane3_Source:2;
+ UCHAR ucDP_Lane2_Source:2;
+ UCHAR ucDP_Lane1_Source:2;
+ UCHAR ucDP_Lane0_Source:2;
+#else
+ UCHAR ucDP_Lane0_Source:2;
+ UCHAR ucDP_Lane1_Source:2;
+ UCHAR ucDP_Lane2_Source:2;
+ UCHAR ucDP_Lane3_Source:2;
+#endif
+}ATOM_DP_CONN_CHANNEL_MAPPING;
+
+//for DVI/HDMI, in dual link case, both links have to have same mapping.
+//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucDVI_CLK_Source:2;
+ UCHAR ucDVI_DATA0_Source:2;
+ UCHAR ucDVI_DATA1_Source:2;
+ UCHAR ucDVI_DATA2_Source:2;
+#else
+ UCHAR ucDVI_DATA2_Source:2;
+ UCHAR ucDVI_DATA1_Source:2;
+ UCHAR ucDVI_DATA0_Source:2;
+ UCHAR ucDVI_CLK_Source:2;
+#endif
+}ATOM_DVI_CONN_CHANNEL_MAPPING;
+
typedef struct _EXT_DISPLAY_PATH
{
USHORT usDeviceTag; //A bit vector to show what devices are supported
@@ -3269,7 +3794,13 @@ typedef struct _EXT_DISPLAY_PATH
UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT
UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT
USHORT usExtEncoderObjId; //external encoder object id
- USHORT usReserved[3];
+ union{
+ UCHAR ucChannelMapping; // if ucChannelMapping=0, using default one to one mapping
+ ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
+ ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
+ };
+ UCHAR ucReserved;
+ USHORT usReserved[2];
}EXT_DISPLAY_PATH;
#define NUMBER_OF_UCHAR_FOR_GUID 16
@@ -3281,7 +3812,8 @@ typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string
EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
- UCHAR Reserved [7]; // for potential expansion
+ UCHAR uc3DStereoPinId; // use for eDP panel
+ UCHAR Reserved [6]; // for potential expansion
}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
//Related definitions, all records are differnt but they have a commond header
@@ -3311,10 +3843,11 @@ typedef struct _ATOM_COMMON_RECORD_HEADER
#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table
#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
+#define ATOM_ENCODER_CAP_RECORD_TYPE 20
//Must be updated when new record type is added,equal to that record definition!
-#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE
+#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE
typedef struct _ATOM_I2C_RECORD
{
@@ -3441,6 +3974,26 @@ typedef struct _ATOM_ENCODER_DVO_CF_RECORD
UCHAR ucPadding[2];
}ATOM_ENCODER_DVO_CF_RECORD;
+// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
+#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by this path
+
+typedef struct _ATOM_ENCODER_CAP_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ union {
+ USHORT usEncoderCap;
+ struct {
+#if ATOM_BIG_ENDIAN
+ USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
+ USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
+#else
+ USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
+ USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
+#endif
+ };
+ };
+}ATOM_ENCODER_CAP_RECORD;
+
// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
@@ -3580,6 +4133,11 @@ typedef struct _ATOM_VOLTAGE_CONTROL
#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage
#define VOLTAGE_CONTROL_ID_DS4402 0x04
+#define VOLTAGE_CONTROL_ID_UP6266 0x05
+#define VOLTAGE_CONTROL_ID_SCORPIO 0x06
+#define VOLTAGE_CONTROL_ID_VT1556M 0x07
+#define VOLTAGE_CONTROL_ID_CHL822x 0x08
+#define VOLTAGE_CONTROL_ID_VT1586M 0x09
typedef struct _ATOM_VOLTAGE_OBJECT
{
@@ -3670,66 +4228,157 @@ typedef struct _ATOM_POWER_SOURCE_INFO
#define POWER_SENSOR_GPIO 0x01
#define POWER_SENSOR_I2C 0x02
+typedef struct _ATOM_CLK_VOLT_CAPABILITY
+{
+ ULONG ulVoltageIndex; // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table
+ ULONG ulMaximumSupportedCLK; // Maximum clock supported with specified voltage index, unit in 10kHz
+}ATOM_CLK_VOLT_CAPABILITY;
+
+typedef struct _ATOM_AVAILABLE_SCLK_LIST
+{
+ ULONG ulSupportedSCLK; // Maximum clock supported with specified voltage index, unit in 10kHz
+ USHORT usVoltageIndex; // The Voltage Index indicated by FUSE for specified SCLK
+ USHORT usVoltageID; // The Voltage ID indicated by FUSE for specified SCLK
+}ATOM_AVAILABLE_SCLK_LIST;
+
+// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition
+#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE 1 // refer to ulSystemConfig bit[0]
+
+// this IntegrateSystemInfoTable is used for Liano/Ontario APU
typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
{
ATOM_COMMON_TABLE_HEADER sHeader;
ULONG ulBootUpEngineClock;
ULONG ulDentistVCOFreq;
ULONG ulBootUpUMAClock;
- ULONG ulReserved1[8];
+ ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4];
ULONG ulBootUpReqDisplayVector;
ULONG ulOtherDisplayMisc;
ULONG ulGPUCapInfo;
- ULONG ulReserved2[3];
+ ULONG ulSB_MMIO_Base_Addr;
+ USHORT usRequestedPWMFreqInHz;
+ UCHAR ucHtcTmpLmt;
+ UCHAR ucHtcHystLmt;
+ ULONG ulMinEngineClock;
ULONG ulSystemConfig;
ULONG ulCPUCapInfo;
- USHORT usMaxNBVoltage;
- USHORT usMinNBVoltage;
- USHORT usBootUpNBVoltage;
- USHORT usExtDispConnInfoOffset;
- UCHAR ucHtcTmpLmt;
- UCHAR ucTjOffset;
+ USHORT usNBP0Voltage;
+ USHORT usNBP1Voltage;
+ USHORT usBootUpNBVoltage;
+ USHORT usExtDispConnInfoOffset;
+ USHORT usPanelRefreshRateRange;
UCHAR ucMemoryType;
UCHAR ucUMAChannelNumber;
ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];
ULONG ulCSR_M3_ARB_CNTL_UVD[10];
ULONG ulCSR_M3_ARB_CNTL_FS3D[10];
- ULONG ulReserved3[42];
+ ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
+ ULONG ulGMCRestoreResetTime;
+ ULONG ulMinimumNClk;
+ ULONG ulIdleNClk;
+ ULONG ulDDR_DLL_PowerUpTime;
+ ULONG ulDDR_PLL_PowerUpTime;
+ USHORT usPCIEClkSSPercentage;
+ USHORT usPCIEClkSSType;
+ USHORT usLvdsSSPercentage;
+ USHORT usLvdsSSpreadRateIn10Hz;
+ USHORT usHDMISSPercentage;
+ USHORT usHDMISSpreadRateIn10Hz;
+ USHORT usDVISSPercentage;
+ USHORT usDVISSpreadRateIn10Hz;
+ ULONG ulReserved3[21];
ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
}ATOM_INTEGRATED_SYSTEM_INFO_V6;
+// ulGPUCapInfo
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08
+
+// ulOtherDisplayMisc
+#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01
+
+
/**********************************************************************************************************************
-// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
-//ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit.
-//ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
-//ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
-//ulReserved1[8] Reserved by now, must be 0x0.
-//ulBootUpReqDisplayVector VBIOS boot up display IDs
-// ATOM_DEVICE_CRT1_SUPPORT 0x0001
-// ATOM_DEVICE_CRT2_SUPPORT 0x0010
-// ATOM_DEVICE_DFP1_SUPPORT 0x0008
-// ATOM_DEVICE_DFP6_SUPPORT 0x0040
-// ATOM_DEVICE_DFP2_SUPPORT 0x0080
-// ATOM_DEVICE_DFP3_SUPPORT 0x0200
-// ATOM_DEVICE_DFP4_SUPPORT 0x0400
-// ATOM_DEVICE_DFP5_SUPPORT 0x0800
-// ATOM_DEVICE_LCD1_SUPPORT 0x0002
-//ulOtherDisplayMisc Other display related flags, not defined yet.
-//ulGPUCapInfo TBD
-//ulReserved2[3] must be 0x0 for the reserved.
-//ulSystemConfig TBD
-//ulCPUCapInfo TBD
-//usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
-//usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
-//usBootUpNBVoltage Boot up NB voltage in unit of mv.
-//ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register.
-//ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed.
-//ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
-//ucUMAChannelNumber System memory channel numbers.
-//usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table.
-//ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default
-//ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback.
-//ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+ ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
+ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
+ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
+sDISPCLK_Voltage: Report Display clock voltage requirement.
+
+ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects:
+ ATOM_DEVICE_CRT1_SUPPORT 0x0001
+ ATOM_DEVICE_CRT2_SUPPORT 0x0010
+ ATOM_DEVICE_DFP1_SUPPORT 0x0008
+ ATOM_DEVICE_DFP6_SUPPORT 0x0040
+ ATOM_DEVICE_DFP2_SUPPORT 0x0080
+ ATOM_DEVICE_DFP3_SUPPORT 0x0200
+ ATOM_DEVICE_DFP4_SUPPORT 0x0400
+ ATOM_DEVICE_DFP5_SUPPORT 0x0800
+ ATOM_DEVICE_LCD1_SUPPORT 0x0002
+ulOtherDisplayMisc: Other display related flags, not defined yet.
+ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
+ =1: TMDS/HDMI Coherent Mode use signel PLL mode.
+ bit[3]=0: Enable HW AUX mode detection logic
+ =1: Disable HW AUX mode dettion logic
+ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
+
+usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
+ Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+
+ When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+ 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+ VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+ Changing BL using VBIOS function is functional in both driver and non-driver present environment;
+ and enabling VariBri under the driver environment from PP table is optional.
+
+ 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+ that BL control from GPU is expected.
+ VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+ Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+ it's per platform
+ and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt.
+ Threshold on value to enter HTC_active state.
+ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt.
+ To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
+ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled
+ =1: PCIE Power Gating Enabled
+ Bit[1]=0: DDR-DLL shut-down feature disabled.
+ 1: DDR-DLL shut-down feature enabled.
+ Bit[2]=0: DDR-PLL Power down feature disabled.
+ 1: DDR-PLL Power down feature enabled.
+ulCPUCapInfo: TBD
+usNBP0Voltage: VID for voltage on NB P0 State
+usNBP1Voltage: VID for voltage on NB P1 State
+usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
+usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+ to indicate a range.
+ SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
+ SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
+ SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
+ SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
+ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+ucUMAChannelNumber: System memory channel numbers.
+ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
+ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
+ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
+ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
+ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
+ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
+usPCIEClkSSPercentage: PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType: PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
+usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
+usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
+usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
**********************************************************************************************************************/
/**************************************************************************/
@@ -3790,6 +4439,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT
#define ASIC_INTERNAL_SS_ON_LVDS 6
#define ASIC_INTERNAL_SS_ON_DP 7
#define ASIC_INTERNAL_SS_ON_DCPLL 8
+#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
{
@@ -3903,6 +4553,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4
//Byte aligned defintion for BIOS usage
#define ATOM_S0_CRT1_MONOb0 0x01
@@ -4529,7 +5180,8 @@ typedef struct _ATOM_INIT_REG_BLOCK{
#define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1)
#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1)
#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1)
-
+//#define ACCESS_MCIODEBUGIND 0x40 //defined in BIOS code
+#define ACCESS_PLACEHOLDER 0x80
typedef struct _ATOM_MC_INIT_PARAM_TABLE
{
@@ -4554,6 +5206,10 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
#define _32Mx32 0x33
#define _64Mx8 0x41
#define _64Mx16 0x42
+#define _64Mx32 0x43
+#define _128Mx8 0x51
+#define _128Mx16 0x52
+#define _256Mx8 0x61
#define SAMSUNG 0x1
#define INFINEON 0x2
@@ -4569,10 +5225,11 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
#define QIMONDA INFINEON
#define PROMOS MOSEL
#define KRETON INFINEON
+#define ELIXIR NANYA
/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
-#define UCODE_ROM_START_ADDRESS 0x1c000
+#define UCODE_ROM_START_ADDRESS 0x1b800
#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode
//uCode block header for reference
@@ -4903,7 +5560,34 @@ typedef struct _ATOM_VRAM_MODULE_V6
ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
}ATOM_VRAM_MODULE_V6;
-
+typedef struct _ATOM_VRAM_MODULE_V7
+{
+// Design Specific Values
+ ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP
+ USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7
+ USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // Current memory module ID
+ UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
+ UCHAR ucChannelNum; // Number of mem. channels supported in this module
+ UCHAR ucChannelWidth; // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucReserve; // Former container for Mx_FLAGS like DBI_AC_MODE_ENABLE_ASIC for GDDR4. Not used now.
+ UCHAR ucMisc; // RANK_OF_THISMEMORY etc.
+ UCHAR ucVREFI; // Not used.
+ UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+ UCHAR ucReserved[3];
+// Memory Module specific values
+ USHORT usEMRS2Value; // EMRS2/MR2 Value.
+ USHORT usEMRS3Value; // EMRS3/MR3 Value.
+ UCHAR ucMemoryVenderID; // [7:4] Revision, [3:0] Vendor code
+ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+ UCHAR ucFIFODepth; // FIFO depth can be detected during vendor detection, here is hardcoded per memory
+ UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+ char strMemPNString[20]; // part number end with '0'.
+}ATOM_VRAM_MODULE_V7;
typedef struct _ATOM_VRAM_INFO_V2
{
@@ -4942,6 +5626,20 @@ typedef struct _ATOM_VRAM_INFO_V4
// ATOM_INIT_REG_BLOCK aMemAdjust;
}ATOM_VRAM_INFO_V4;
+typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+ USHORT usReserved[4];
+ UCHAR ucNumOfVRAMModule; // indicate number of VRAM module
+ UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list
+ UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version
+ UCHAR ucReserved;
+ ATOM_VRAM_MODULE_V7 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_HEADER_V2_1;
+
+
typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
{
ATOM_COMMON_TABLE_HEADER sHeader;
@@ -5182,6 +5880,16 @@ typedef struct _ASIC_TRANSMITTER_INFO
UCHAR ucReserved;
}ASIC_TRANSMITTER_INFO;
+#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE 0x01
+#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE 0x02
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK 0xc4
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A 0x00
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B 0x04
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C 0x40
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D 0x44
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E 0x80
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F 0x84
+
typedef struct _ASIC_ENCODER_INFO
{
UCHAR ucEncoderID;
@@ -5284,6 +5992,28 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS
/* /obselete */
#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2
+{
+ USHORT usExtEncoderObjId; // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+ UCHAR ucAuxId;
+ UCHAR ucAction;
+ UCHAR ucSinkType; // Iput and Output parameters.
+ UCHAR ucHPDId; // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+ UCHAR ucReserved[2];
+}DP_ENCODER_SERVICE_PARAMETERS_V2;
+
+typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2
+{
+ DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam;
+ PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam;
+}DP_ENCODER_SERVICE_PS_ALLOCATION_V2;
+
+// ucAction
+#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE 0x01
+#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION 0x02
+
+
// DP_TRAINING_TABLE
#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 )
@@ -5339,6 +6069,7 @@ typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
#define SELECT_DCIO_IMPCAL 4
#define SELECT_DCIO_DIG 6
#define SELECT_CRTC_PIXEL_RATE 7
+#define SELECT_VGA_BLK 8
/****************************************************************************/
//Portion VI: Definitinos for vbios MC scratch registers that driver used
@@ -5744,7 +6475,17 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
+#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
+#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
+
+// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+// We probably should reserve the bit 0x80 for this use.
+// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+// The driver can pick the correct internal controller based on the ASIC.
+
#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
+#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
typedef struct _ATOM_PPLIB_STATE
{
@@ -5841,6 +6582,29 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
USHORT usExtendendedHeaderOffset;
} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
+{
+ ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
+ ULONG ulGoldenPPID; // PPGen use only
+ ULONG ulGoldenRevision; // PPGen use only
+ USHORT usVddcDependencyOnSCLKOffset;
+ USHORT usVddciDependencyOnMCLKOffset;
+ USHORT usVddcDependencyOnMCLKOffset;
+ USHORT usMaxClockVoltageOnDCOffset;
+ USHORT usReserved[2];
+} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+{
+ ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
+ ULONG ulTDPLimit;
+ ULONG ulNearTDPLimit;
+ ULONG ulSQRampingThreshold;
+ USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
+ ULONG ulCACLeakage; // TBD, this parameter is still under discussion. Change to ulReserved if not needed.
+ ULONG ulReserved;
+} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
+
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
@@ -5864,6 +6628,10 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
+#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
+
//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
@@ -5896,9 +6664,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
#define ATOM_PPLIB_M3ARB_MASK 0x00060000
#define ATOM_PPLIB_M3ARB_SHIFT 17
+#define ATOM_PPLIB_ENABLE_DRR 0x00080000
+
+// remaining 16 bits are reserved
+typedef struct _ATOM_PPLIB_THERMAL_STATE
+{
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucThermalAction;
+}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
+
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
typedef struct _ATOM_PPLIB_NONCLOCK_INFO
{
USHORT usClassification;
@@ -5906,15 +6686,15 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO
UCHAR ucMaxTemperature;
ULONG ulCapsAndSettings;
UCHAR ucRequiredPower;
- UCHAR ucUnused1[3];
+ USHORT usClassification2;
+ ULONG ulVCLK;
+ ULONG ulDCLK;
+ UCHAR ucUnused[5];
} ATOM_PPLIB_NONCLOCK_INFO;
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
-#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
-#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
-
typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
{
USHORT usEngineClockLow;
@@ -5985,6 +6765,93 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
+typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+ USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
+ UCHAR ucEngineClockHigh; //clockfrequency >> 16.
+ UCHAR vddcIndex; //2-bit vddc index;
+ UCHAR leakage; //please use 8-bit absolute value, not the 6-bit % value
+ //please initalize to 0
+ UCHAR rsv;
+ //please initalize to 0
+ USHORT rsv1;
+ //please initialize to 0s
+ ULONG rsv2[2];
+}ATOM_PPLIB_SUMO_CLOCK_INFO;
+
+
+
+typedef struct _ATOM_PPLIB_STATE_V2
+{
+ //number of valid dpm levels in this state; Driver uses it to calculate the whole
+ //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+ UCHAR ucNumDPMLevels;
+
+ //a index to the array of nonClockInfos
+ UCHAR nonClockInfoIndex;
+ /**
+ * Driver will read the first ucNumDPMLevels in this array
+ */
+ UCHAR clockInfoIndex[1];
+} ATOM_PPLIB_STATE_V2;
+
+typedef struct StateArray{
+ //how many states we have
+ UCHAR ucNumEntries;
+
+ ATOM_PPLIB_STATE_V2 states[1];
+}StateArray;
+
+
+typedef struct ClockInfoArray{
+ //how many clock levels we have
+ UCHAR ucNumEntries;
+
+ //sizeof(ATOM_PPLIB_SUMO_CLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ //this is for Sumo
+ ATOM_PPLIB_SUMO_CLOCK_INFO clockInfo[1];
+}ClockInfoArray;
+
+typedef struct NonClockInfoArray{
+
+ //how many non-clock levels we have. normally should be same as number of states
+ UCHAR ucNumEntries;
+ //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+}NonClockInfoArray;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+{
+ USHORT usClockLow;
+ UCHAR ucClockHigh;
+ USHORT usVoltage;
+}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+{
+ USHORT usSclkLow;
+ UCHAR ucSclkHigh;
+ USHORT usMclkLow;
+ UCHAR ucMclkHigh;
+ USHORT usVddc;
+ USHORT usVddci;
+}ATOM_PPLIB_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Limit_Table;
+
/**************************************************************************/
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9fbabaa6ee44..a4e5e53e0a62 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -48,29 +48,29 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
switch (radeon_crtc->rmx_type) {
case RMX_CENTER:
- args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
- args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
- args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
- args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
+ args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+ args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+ args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
+ args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
break;
case RMX_ASPECT:
a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
if (a1 > a2) {
- args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
- args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
+ args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
+ args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
} else if (a2 > a1) {
- args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
- args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
+ args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+ args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
}
break;
case RMX_FULL:
default:
- args.usOverscanRight = radeon_crtc->h_border;
- args.usOverscanLeft = radeon_crtc->h_border;
- args.usOverscanBottom = radeon_crtc->v_border;
- args.usOverscanTop = radeon_crtc->v_border;
+ args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border);
+ args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border);
+ args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border);
+ args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
break;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -403,6 +403,7 @@ union atom_enable_ss {
ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2;
ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
};
static void atombios_crtc_program_ss(struct drm_crtc *crtc,
@@ -417,24 +418,47 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
memset(&args, 0, sizeof(args));
- if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
+ args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
+ args.v3.ucSpreadSpectrumType = ss->type;
+ switch (pll_id) {
+ case ATOM_PPLL1:
+ args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
+ args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+ args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
+ break;
+ case ATOM_PPLL2:
+ args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
+ args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+ args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
+ break;
+ case ATOM_DCPLL:
+ args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
+ args.v3.usSpreadSpectrumAmount = cpu_to_le16(0);
+ args.v3.usSpreadSpectrumStep = cpu_to_le16(0);
+ break;
+ case ATOM_PPLL_INVALID:
+ return;
+ }
+ args.v2.ucEnable = enable;
+ } else if (ASIC_IS_DCE4(rdev)) {
args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.v2.ucSpreadSpectrumType = ss->type;
switch (pll_id) {
case ATOM_PPLL1:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
- args.v2.usSpreadSpectrumAmount = ss->amount;
- args.v2.usSpreadSpectrumStep = ss->step;
+ args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+ args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_PPLL2:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
- args.v2.usSpreadSpectrumAmount = ss->amount;
- args.v2.usSpreadSpectrumStep = ss->step;
+ args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+ args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_DCPLL:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
- args.v2.usSpreadSpectrumAmount = 0;
- args.v2.usSpreadSpectrumStep = 0;
+ args.v2.usSpreadSpectrumAmount = cpu_to_le16(0);
+ args.v2.usSpreadSpectrumStep = cpu_to_le16(0);
break;
case ATOM_PPLL_INVALID:
return;
@@ -514,7 +538,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
-
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -531,23 +554,28 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
dp_clock = dig_connector->dp_clock;
}
}
-#if 0 /* doesn't work properly on some laptops */
+
/* use recommended ref_div for ss */
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (ss_enabled) {
if (ss->refdiv) {
+ pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = ss->refdiv;
+ if (ASIC_IS_AVIVO(rdev))
+ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
}
}
}
-#endif
+
if (ASIC_IS_AVIVO(rdev)) {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
adjusted_clock = mode->clock * 2;
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ pll->flags |= RADEON_PLL_IS_LCD;
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -582,14 +610,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
args.v1.ucEncodeMode = encoder_mode;
- if (encoder_mode == ATOM_ENCODER_MODE_DP) {
- if (ss_enabled)
- args.v1.ucConfig |=
- ADJUST_DISPLAY_CONFIG_SS_ENABLE;
- } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
+ if (ss_enabled)
args.v1.ucConfig |=
ADJUST_DISPLAY_CONFIG_SS_ENABLE;
- }
atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&args);
@@ -600,12 +623,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
args.v3.sInput.ucEncodeMode = encoder_mode;
args.v3.sInput.ucDispPllConfig = 0;
+ if (ss_enabled)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (encoder_mode == ATOM_ENCODER_MODE_DP) {
- if (ss_enabled)
- args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_SS_ENABLE;
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
/* 16200 or 27000 */
@@ -625,18 +648,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
}
} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (encoder_mode == ATOM_ENCODER_MODE_DP) {
- if (ss_enabled)
- args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_SS_ENABLE;
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
/* 16200 or 27000 */
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
- } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
- if (ss_enabled)
- args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_SS_ENABLE;
- } else {
+ } else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) {
if (mode->clock > 165000)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_DUAL_LINK;
@@ -646,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
index, (uint32_t *)&args);
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
if (args.v3.sOutput.ucRefDiv) {
+ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = args.v3.sOutput.ucRefDiv;
}
if (args.v3.sOutput.ucPostDiv) {
+ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
pll->flags |= RADEON_PLL_USE_POST_DIV;
pll->post_div = args.v3.sOutput.ucPostDiv;
}
@@ -673,9 +691,14 @@ union set_pixel_clock {
PIXEL_CLOCK_PARAMETERS_V2 v2;
PIXEL_CLOCK_PARAMETERS_V3 v3;
PIXEL_CLOCK_PARAMETERS_V5 v5;
+ PIXEL_CLOCK_PARAMETERS_V6 v6;
};
-static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
+/* on DCE5, make sure the voltage is high enough to support the
+ * required disp clk.
+ */
+static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
+ u32 dispclk)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -698,9 +721,16 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
* SetPixelClock provides the dividers
*/
args.v5.ucCRTC = ATOM_CRTC_INVALID;
- args.v5.usPixelClock = rdev->clock.default_dispclk;
+ args.v5.usPixelClock = cpu_to_le16(dispclk);
args.v5.ucPpll = ATOM_DCPLL;
break;
+ case 6:
+ /* if the default dcpll clock is specified,
+ * SetPixelClock provides the dividers
+ */
+ args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
+ args.v6.ucPpll = ATOM_DCPLL;
+ break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
@@ -784,6 +814,18 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
args.v5.ucEncoderMode = encoder_mode;
args.v5.ucPpll = pll_id;
break;
+ case 6:
+ args.v6.ulCrtcPclkFreq.ucCRTC = crtc_id;
+ args.v6.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(clock / 10);
+ args.v6.ucRefDiv = ref_div;
+ args.v6.usFbDiv = cpu_to_le16(fb_div);
+ args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+ args.v6.ucPostDiv = post_div;
+ args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
+ args.v6.ucTransmitterID = encoder_id;
+ args.v6.ucEncoderMode = encoder_mode;
+ args.v6.ucPpll = pll_id;
+ break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
@@ -915,8 +957,12 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
/* adjust pixel clock as needed */
adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
- radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
- &ref_div, &post_div);
+ if (ASIC_IS_AVIVO(rdev))
+ radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+ &ref_div, &post_div);
+ else
+ radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+ &ref_div, &post_div);
atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
@@ -945,9 +991,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
}
}
-static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, int atomic)
+static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -958,6 +1004,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
struct radeon_bo *rbo;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+ u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
int r;
/* no fb bound */
@@ -1009,11 +1056,17 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
case 16:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
break;
case 24:
case 32:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
break;
default:
DRM_ERROR("Unsupported screen depth %d\n",
@@ -1058,6 +1111,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
(u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+ WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
@@ -1079,12 +1133,6 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
- if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
- WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
- EVERGREEN_INTERLEAVE_EN);
- else
- WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
-
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
rbo = radeon_fb->obj->driver_private;
@@ -1114,6 +1162,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *target_fb;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+ u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
int r;
/* no fb bound */
@@ -1167,12 +1216,18 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
+#ifdef __BIG_ENDIAN
+ fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
break;
case 24:
case 32:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
+#ifdef __BIG_ENDIAN
+ fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
+#endif
break;
default:
DRM_ERROR("Unsupported screen depth %d\n",
@@ -1212,6 +1267,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
radeon_crtc->crtc_offset, (u32) fb_location);
WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+ if (rdev->family >= CHIP_R600)
+ WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
@@ -1233,12 +1290,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
- if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
- WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
- AVIVO_D1MODE_INTERLEAVE_EN);
- else
- WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
-
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
rbo = radeon_fb->obj->driver_private;
@@ -1262,7 +1313,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct radeon_device *rdev = dev->dev_private;
if (ASIC_IS_DCE4(rdev))
- return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0);
+ return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
else if (ASIC_IS_AVIVO(rdev))
return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
else
@@ -1277,7 +1328,7 @@ int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
struct radeon_device *rdev = dev->dev_private;
if (ASIC_IS_DCE4(rdev))
- return evergreen_crtc_do_set_base(crtc, fb, x, y, 1);
+ return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
else if (ASIC_IS_AVIVO(rdev))
return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
else
@@ -1377,7 +1428,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss);
- atombios_crtc_set_dcpll(crtc);
+ /* XXX: DCE5, make sure voltage, dispclk is high enough */
+ atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss);
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 4e7778d44b8d..695de9a38506 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -187,9 +187,9 @@ static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
{
int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
- int bw = dp_lanes_for_mode_clock(dpcd, mode_clock);
+ int dp_clock = dp_link_clock_for_mode_clock(dpcd, mode_clock);
- if ((lanes == 0) || (bw == 0))
+ if ((lanes == 0) || (dp_clock == 0))
return MODE_CLOCK_HIGH;
return MODE_OK;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7b337c361a12..d270b3ff896b 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -39,20 +39,87 @@
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
+static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+
+void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+ u32 tmp;
+
+ /* make sure flip is at vb rather than hb */
+ tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+ WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+ /* enable the pflip int */
+ radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* disable the pflip int */
+ radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+
+ /* Lock the graphics update lock */
+ tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* update the scanout addresses */
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(crtc_base));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(crtc_base));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* Wait for update_pending to go high. */
+ while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* Return current update_pending status: */
+ return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
+}
/* get temperature in millidegrees */
-u32 evergreen_get_temp(struct radeon_device *rdev)
+int evergreen_get_temp(struct radeon_device *rdev)
{
u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
ASIC_T_SHIFT;
u32 actual_temp = 0;
- if ((temp >> 10) & 1)
- actual_temp = 0;
- else if ((temp >> 9) & 1)
+ if (temp & 0x400)
+ actual_temp = -256;
+ else if (temp & 0x200)
actual_temp = 255;
- else
- actual_temp = (temp >> 1) & 0xff;
+ else if (temp & 0x100) {
+ actual_temp = temp & 0x1ff;
+ actual_temp |= ~0x1ff;
+ } else
+ actual_temp = temp & 0xff;
+
+ return (actual_temp * 1000) / 2;
+}
+
+int sumo_get_temp(struct radeon_device *rdev)
+{
+ u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
+ int actual_temp = temp - 49;
return actual_temp * 1000;
}
@@ -337,16 +404,28 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
case 0:
case 4:
default:
- return 3840 * 2;
+ if (ASIC_IS_DCE5(rdev))
+ return 4096 * 2;
+ else
+ return 3840 * 2;
case 1:
case 5:
- return 5760 * 2;
+ if (ASIC_IS_DCE5(rdev))
+ return 6144 * 2;
+ else
+ return 5760 * 2;
case 2:
case 6:
- return 7680 * 2;
+ if (ASIC_IS_DCE5(rdev))
+ return 8192 * 2;
+ else
+ return 7680 * 2;
case 3:
case 7:
- return 1920 * 2;
+ if (ASIC_IS_DCE5(rdev))
+ return 2048 * 2;
+ else
+ return 1920 * 2;
}
}
@@ -890,31 +969,39 @@ static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_sa
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
- save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
- save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
- save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
- save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ }
/* Stop all video */
WREG32(VGA_RENDER_CONTROL, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ }
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
WREG32(D1VGA_CONTROL, 0);
WREG32(D2VGA_CONTROL, 0);
@@ -944,41 +1031,43 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
-
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
-
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
-
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ }
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
@@ -994,22 +1083,28 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ }
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
+ }
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
}
@@ -1057,11 +1152,17 @@ static void evergreen_mc_program(struct radeon_device *rdev)
rdev->mc.vram_end >> 12);
}
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+ if (rdev->flags & RADEON_IS_IGP) {
+ tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
+ tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
+ tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
+ WREG32(MC_FUS_VM_FB_OFFSET, tmp);
+ }
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
- WREG32(HDP_NONSURFACE_INFO, (2 << 7));
+ WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
if (rdev->flags & RADEON_IS_AGP) {
WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
@@ -1084,6 +1185,22 @@ static void evergreen_mc_program(struct radeon_device *rdev)
/*
* CP.
*/
+void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ /* set to DX10/11 mode */
+ radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(rdev, 1);
+ /* FIXME: implement */
+ radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
+ radeon_ring_write(rdev, ib->length_dw);
+}
+
static int evergreen_cp_load_microcode(struct radeon_device *rdev)
{
@@ -1094,7 +1211,11 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
return -EINVAL;
r700_cp_stop(rdev);
- WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+ WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+ BUF_SWAP_32BIT |
+#endif
+ RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
fw_data = (const __be32 *)rdev->pfp_fw->data;
WREG32(CP_PFP_UCODE_ADDR, 0);
@@ -1135,7 +1256,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
- r = radeon_ring_lock(rdev, evergreen_default_size + 15);
+ r = radeon_ring_lock(rdev, evergreen_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
@@ -1168,6 +1289,11 @@ static int evergreen_cp_start(struct radeon_device *rdev)
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xc0026900);
+ radeon_ring_write(rdev, 0x00000316);
+ radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ radeon_ring_write(rdev, 0x00000010); /* */
+
radeon_ring_unlock_commit(rdev);
return 0;
@@ -1208,7 +1334,11 @@ int evergreen_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB_WPTR, 0);
/* set the wb address wether it's enabled or not */
- WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB_RPTR_ADDR,
+#ifdef __BIG_ENDIAN
+ RB_RPTR_SWAP(2) |
+#endif
+ ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
@@ -1285,11 +1415,15 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_REDWOOD:
+ case CHIP_PALM:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
force_no_swizzle = false;
break;
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_JUNIPER:
+ case CHIP_BARTS:
default:
force_no_swizzle = true;
break;
@@ -1384,6 +1518,46 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map;
}
+static void evergreen_program_channel_remap(struct radeon_device *rdev)
+{
+ u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
+
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ default:
+ /* default mapping */
+ mc_shared_chremap = 0x00fac688;
+ break;
+ }
+
+ switch (rdev->family) {
+ case CHIP_HEMLOCK:
+ case CHIP_CYPRESS:
+ case CHIP_BARTS:
+ tcp_chan_steer_lo = 0x54763210;
+ tcp_chan_steer_hi = 0x0000ba98;
+ break;
+ case CHIP_JUNIPER:
+ case CHIP_REDWOOD:
+ case CHIP_CEDAR:
+ case CHIP_PALM:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
+ default:
+ tcp_chan_steer_lo = 0x76543210;
+ tcp_chan_steer_hi = 0x0000ba98;
+ break;
+ }
+
+ WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
+ WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
+ WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
+}
+
static void evergreen_gpu_init(struct radeon_device *rdev)
{
u32 cc_rb_backend_disable = 0;
@@ -1495,6 +1669,90 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
+ case CHIP_PALM:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 192;
+ rdev->config.evergreen.max_gs_threads = 16;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 128;
+ rdev->config.evergreen.sx_max_export_pos_size = 32;
+ rdev->config.evergreen.sx_max_export_smx_size = 96;
+ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 1;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_BARTS:
+ rdev->config.evergreen.num_ses = 2;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 8;
+ rdev->config.evergreen.max_simds = 7;
+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_TURKS:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 6;
+ rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_CAICOS:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 192;
+ rdev->config.evergreen.max_gs_threads = 16;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 128;
+ rdev->config.evergreen.sx_max_export_pos_size = 32;
+ rdev->config.evergreen.sx_max_export_smx_size = 96;
+ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 1;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
}
/* Initialize HDP */
@@ -1636,6 +1894,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
+ case CHIP_BARTS:
gb_backend_map = 0x66442200;
break;
case CHIP_JUNIPER:
@@ -1687,6 +1946,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ evergreen_program_channel_remap(rdev);
+
num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
@@ -1769,9 +2030,16 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
GS_PRIO(2) |
ES_PRIO(3));
- if (rdev->family == CHIP_CEDAR)
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_PALM:
+ case CHIP_CAICOS:
/* no vertex cache */
sq_config &= ~VC_ENABLE;
+ break;
+ default:
+ break;
+ }
sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
@@ -1783,10 +2051,15 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
- if (rdev->family == CHIP_CEDAR)
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_PALM:
ps_thread_count = 96;
- else
+ break;
+ default:
ps_thread_count = 128;
+ break;
+ }
sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
@@ -1817,14 +2090,21 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
FORCE_EOV_MAX_REZ_CNT(255)));
- if (rdev->family == CHIP_CEDAR)
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_PALM:
+ case CHIP_CAICOS:
vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
- else
+ break;
+ default:
vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
+ break;
+ }
vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
WREG32(VGT_GS_VERTEX_REUSE, 16);
+ WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
@@ -1904,12 +2184,18 @@ int evergreen_mc_init(struct radeon_device *rdev)
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* Setup GPU memory space */
- /* size in MB on evergreen */
- rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
- rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ if (rdev->flags & RADEON_IS_IGP) {
+ /* size in bytes on fusion */
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+ } else {
+ /* size in MB on evergreen */
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ }
rdev->mc.visible_vram_size = rdev->mc.aper_size;
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
- r600_vram_gtt_location(rdev, &rdev->mc);
+ r700_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
return 0;
@@ -1917,8 +2203,30 @@ int evergreen_mc_init(struct radeon_device *rdev)
bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
{
- /* FIXME: implement for evergreen */
- return false;
+ u32 srbm_status;
+ u32 grbm_status;
+ u32 grbm_status_se0, grbm_status_se1;
+ struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
+ int r;
+
+ srbm_status = RREG32(SRBM_STATUS);
+ grbm_status = RREG32(GRBM_STATUS);
+ grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
+ grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
+ if (!(grbm_status & GUI_ACTIVE)) {
+ r100_gpu_lockup_update(lockup, &rdev->cp);
+ return false;
+ }
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
+ }
+ rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
}
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -1926,6 +2234,9 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
struct evergreen_mc_save save;
u32 grbm_reset = 0;
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ return 0;
+
dev_info(rdev->dev, "GPU softreset \n");
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
@@ -2011,17 +2322,21 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
@@ -2047,6 +2362,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
+ u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2072,27 +2388,33 @@ int evergreen_irq_set(struct radeon_device *rdev)
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
- if (rdev->irq.crtc_vblank_int[0]) {
+ if (rdev->irq.crtc_vblank_int[0] ||
+ rdev->irq.pflip[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
crtc1 |= VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[1]) {
+ if (rdev->irq.crtc_vblank_int[1] ||
+ rdev->irq.pflip[1]) {
DRM_DEBUG("evergreen_irq_set: vblank 1\n");
crtc2 |= VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[2]) {
+ if (rdev->irq.crtc_vblank_int[2] ||
+ rdev->irq.pflip[2]) {
DRM_DEBUG("evergreen_irq_set: vblank 2\n");
crtc3 |= VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[3]) {
+ if (rdev->irq.crtc_vblank_int[3] ||
+ rdev->irq.pflip[3]) {
DRM_DEBUG("evergreen_irq_set: vblank 3\n");
crtc4 |= VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[4]) {
+ if (rdev->irq.crtc_vblank_int[4] ||
+ rdev->irq.pflip[4]) {
DRM_DEBUG("evergreen_irq_set: vblank 4\n");
crtc5 |= VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[5]) {
+ if (rdev->irq.crtc_vblank_int[5] ||
+ rdev->irq.pflip[5]) {
DRM_DEBUG("evergreen_irq_set: vblank 5\n");
crtc6 |= VBLANK_INT_MASK;
}
@@ -2130,10 +2452,19 @@ int evergreen_irq_set(struct radeon_device *rdev)
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
- WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
- WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
- WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
- WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+ }
+
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
@@ -2145,79 +2476,92 @@ int evergreen_irq_set(struct radeon_device *rdev)
return 0;
}
-static inline void evergreen_irq_ack(struct radeon_device *rdev,
- u32 *disp_int,
- u32 *disp_int_cont,
- u32 *disp_int_cont2,
- u32 *disp_int_cont3,
- u32 *disp_int_cont4,
- u32 *disp_int_cont5)
+static inline void evergreen_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
- *disp_int = RREG32(DISP_INTERRUPT_STATUS);
- *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
- *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
- *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
- *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
- *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
-
- if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+ rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+ rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+
+ if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int & LB_D1_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int_cont & LB_D2_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int & DC_HPD1_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
tmp = RREG32(DC_HPD1_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD1_INT_CONTROL, tmp);
}
- if (*disp_int_cont & DC_HPD2_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
tmp = RREG32(DC_HPD2_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD2_INT_CONTROL, tmp);
}
- if (*disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
tmp = RREG32(DC_HPD3_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD3_INT_CONTROL, tmp);
}
- if (*disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
tmp = RREG32(DC_HPD4_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD4_INT_CONTROL, tmp);
}
- if (*disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
- if (*disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
@@ -2226,14 +2570,10 @@ static inline void evergreen_irq_ack(struct radeon_device *rdev,
void evergreen_irq_disable(struct radeon_device *rdev)
{
- u32 disp_int, disp_int_cont, disp_int_cont2;
- u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
-
r600_disable_interrupts(rdev);
/* Wait and acknowledge irq */
mdelay(1);
- evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
- &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+ evergreen_irq_ack(rdev);
evergreen_disable_interrupt_state(rdev);
}
@@ -2273,8 +2613,6 @@ int evergreen_irq_process(struct radeon_device *rdev)
u32 rptr = rdev->ih.rptr;
u32 src_id, src_data;
u32 ring_index;
- u32 disp_int, disp_int_cont, disp_int_cont2;
- u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
unsigned long flags;
bool queue_hotplug = false;
@@ -2295,31 +2633,34 @@ int evergreen_irq_process(struct radeon_device *rdev)
restart_ih:
/* display interrupts */
- evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
- &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+ evergreen_irq_ack(rdev);
rdev->ih.wptr = wptr;
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
- src_id = rdev->ih.ring[ring_index] & 0xff;
- src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+ src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+ src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
switch (src_id) {
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (disp_int & LB_D1_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[0])
+ radeon_crtc_handle_flip(rdev, 0);
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
break;
case 1: /* D1 vline */
- if (disp_int & LB_D1_VLINE_INTERRUPT) {
- disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
DRM_DEBUG("IH: D1 vline\n");
}
break;
@@ -2331,17 +2672,21 @@ restart_ih:
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[1])
+ radeon_crtc_handle_flip(rdev, 1);
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
break;
case 1: /* D2 vline */
- if (disp_int_cont & LB_D2_VLINE_INTERRUPT) {
- disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
DRM_DEBUG("IH: D2 vline\n");
}
break;
@@ -2353,17 +2698,21 @@ restart_ih:
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[2])
+ radeon_crtc_handle_flip(rdev, 2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
}
break;
case 1: /* D3 vline */
- if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
- disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
DRM_DEBUG("IH: D3 vline\n");
}
break;
@@ -2375,17 +2724,21 @@ restart_ih:
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[3])
+ radeon_crtc_handle_flip(rdev, 3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
}
break;
case 1: /* D4 vline */
- if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
- disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
DRM_DEBUG("IH: D4 vline\n");
}
break;
@@ -2397,17 +2750,21 @@ restart_ih:
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[4])
+ radeon_crtc_handle_flip(rdev, 4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
}
break;
case 1: /* D5 vline */
- if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
- disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
DRM_DEBUG("IH: D5 vline\n");
}
break;
@@ -2419,17 +2776,21 @@ restart_ih:
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[5])
+ radeon_crtc_handle_flip(rdev, 5);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
}
break;
case 1: /* D6 vline */
- if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
- disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
DRM_DEBUG("IH: D6 vline\n");
}
break;
@@ -2441,43 +2802,43 @@ restart_ih:
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (disp_int & DC_HPD1_INTERRUPT) {
- disp_int &= ~DC_HPD1_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD1\n");
}
break;
case 1:
- if (disp_int_cont & DC_HPD2_INTERRUPT) {
- disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD2\n");
}
break;
case 2:
- if (disp_int_cont2 & DC_HPD3_INTERRUPT) {
- disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD3\n");
}
break;
case 3:
- if (disp_int_cont3 & DC_HPD4_INTERRUPT) {
- disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD4\n");
}
break;
case 4:
- if (disp_int_cont4 & DC_HPD5_INTERRUPT) {
- disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD5\n");
}
break;
case 5:
- if (disp_int_cont5 & DC_HPD6_INTERRUPT) {
- disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD6\n");
}
@@ -2516,7 +2877,7 @@ restart_ih:
if (wptr != rdev->ih.wptr)
goto restart_ih;
if (queue_hotplug)
- queue_work(rdev->wq, &rdev->hotplug_work);
+ schedule_work(&rdev->hotplug_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
@@ -2527,12 +2888,31 @@ static int evergreen_startup(struct radeon_device *rdev)
{
int r;
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
- r = r600_init_microcode(rdev);
+ /* enable pcie gen2 link */
+ if (!ASIC_IS_DCE5(rdev))
+ evergreen_pcie_gen2_enable(rdev);
+
+ if (ASIC_IS_DCE5(rdev)) {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ r = btc_mc_load_microcode(rdev);
if (r) {
- DRM_ERROR("Failed to load firmware!\n");
+ DRM_ERROR("Failed to load MC firmware!\n");
return r;
}
+ } else {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
}
evergreen_mc_program(rdev);
@@ -2551,6 +2931,11 @@ static int evergreen_startup(struct radeon_device *rdev)
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
+ /* XXX: ontario has problems blitting to gart at the moment */
+ if (rdev->family == CHIP_PALM) {
+ rdev->asic->copy = NULL;
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+ }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
@@ -2653,27 +3038,6 @@ int evergreen_copy_blit(struct radeon_device *rdev,
return 0;
}
-static bool evergreen_card_posted(struct radeon_device *rdev)
-{
- u32 reg;
-
- /* first check CRTCs */
- reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
- if (reg & EVERGREEN_CRTC_MASTER_EN)
- return true;
-
- /* then check MEM_SIZE, in case the crtcs are off */
- if (RREG32(CONFIG_MEMSIZE))
- return true;
-
- return false;
-}
-
/* Plan is to move initialization in that function and use
* helper function so that radeon_device_init pretty much
* do nothing more than calling asic specific function. This
@@ -2710,7 +3074,7 @@ int evergreen_init(struct radeon_device *rdev)
if (radeon_asic_reset(rdev))
dev_warn(rdev->dev, "GPU reset failed !\n");
/* Post card if necessary */
- if (!evergreen_card_posted(rdev)) {
+ if (!radeon_card_posted(rdev)) {
if (!rdev->bios) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
@@ -2800,3 +3164,55 @@ void evergreen_fini(struct radeon_device *rdev)
rdev->bios = NULL;
radeon_dummy_page_fini(rdev);
}
+
+static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
+{
+ u32 link_width_cntl, speed_cntl;
+
+ if (radeon_pcie_gen2 == 0)
+ return;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return;
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
+ (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_GEN2_EN_STRAP;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ } else {
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+ if (1)
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ else
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index e0e590110dd4..2adfb03f479b 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -55,7 +55,7 @@ set_render_target(struct radeon_device *rdev, int format,
if (h < 8)
h = 8;
- cb_color_info = ((format << 2) | (1 << 24));
+ cb_color_info = ((format << 2) | (1 << 24) | (1 << 8));
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
@@ -133,6 +133,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
/* high addr, stride */
sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+ sq_vtx_constant_word2 |= (2 << 30);
+#endif
/* xyzw swizzles */
sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
@@ -147,7 +150,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
- if (rdev->family == CHIP_CEDAR)
+ if ((rdev->family == CHIP_CEDAR) ||
+ (rdev->family == CHIP_PALM) ||
+ (rdev->family == CHIP_CAICOS))
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, 48, gpu_addr);
else
@@ -171,7 +176,7 @@ set_tex_resource(struct radeon_device *rdev,
sq_tex_resource_word0 = (1 << 0); /* 2D */
sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
((w - 1) << 18));
- sq_tex_resource_word1 = ((h - 1) << 0);
+ sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28);
/* xyzw swizzles */
sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
@@ -219,7 +224,11 @@ draw_auto(struct radeon_device *rdev)
radeon_ring_write(rdev, DI_PT_RECTLIST);
radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
+ radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+ (2 << 2) |
+#endif
+ DI_INDEX_SIZE_16_BIT);
radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
radeon_ring_write(rdev, 1);
@@ -230,7 +239,7 @@ draw_auto(struct radeon_device *rdev)
}
-/* emits 30 */
+/* emits 36 */
static void
set_default_state(struct radeon_device *rdev)
{
@@ -243,6 +252,8 @@ set_default_state(struct radeon_device *rdev)
int num_hs_threads, num_ls_threads;
int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
int num_hs_stack_entries, num_ls_stack_entries;
+ u64 gpu_addr;
+ int dwords;
switch (rdev->family) {
case CHIP_CEDAR:
@@ -331,9 +342,95 @@ set_default_state(struct radeon_device *rdev)
num_hs_stack_entries = 85;
num_ls_stack_entries = 85;
break;
+ case CHIP_PALM:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 96;
+ num_vs_threads = 16;
+ num_gs_threads = 16;
+ num_es_threads = 16;
+ num_hs_threads = 16;
+ num_ls_threads = 16;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_BARTS:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 85;
+ num_vs_stack_entries = 85;
+ num_gs_stack_entries = 85;
+ num_es_stack_entries = 85;
+ num_hs_stack_entries = 85;
+ num_ls_stack_entries = 85;
+ break;
+ case CHIP_TURKS:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_CAICOS:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 10;
+ num_gs_threads = 10;
+ num_es_threads = 10;
+ num_hs_threads = 10;
+ num_ls_threads = 10;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
}
- if (rdev->family == CHIP_CEDAR)
+ if ((rdev->family == CHIP_CEDAR) ||
+ (rdev->family == CHIP_PALM) ||
+ (rdev->family == CHIP_CAICOS))
sq_config = 0;
else
sq_config = VC_ENABLE;
@@ -409,6 +506,18 @@ set_default_state(struct radeon_device *rdev)
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
+ /* set to DX10/11 mode */
+ radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(rdev, 1);
+
+ /* emit an IB pointing at default state */
+ dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
+ radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
+ radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
+ radeon_ring_write(rdev, dwords);
+
}
static inline uint32_t i2f(uint32_t input)
@@ -439,8 +548,10 @@ static inline uint32_t i2f(uint32_t input)
int evergreen_blit_init(struct radeon_device *rdev)
{
u32 obj_size;
- int r;
+ int i, r, dwords;
void *ptr;
+ u32 packet2s[16];
+ int num_packet2s = 0;
/* pin copy shader into vram if already initialized */
if (rdev->r600_blit.shader_obj)
@@ -448,8 +559,17 @@ int evergreen_blit_init(struct radeon_device *rdev)
mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
- rdev->r600_blit.state_len = 0;
- obj_size = 0;
+
+ rdev->r600_blit.state_len = evergreen_default_size;
+
+ dwords = rdev->r600_blit.state_len;
+ while (dwords & 0xf) {
+ packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
+ dwords++;
+ }
+
+ obj_size = dwords * 4;
+ obj_size = ALIGN(obj_size, 256);
rdev->r600_blit.vs_offset = obj_size;
obj_size += evergreen_vs_size * 4;
@@ -479,8 +599,16 @@ int evergreen_blit_init(struct radeon_device *rdev)
return r;
}
- memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4);
- memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4);
+ memcpy_toio(ptr + rdev->r600_blit.state_offset,
+ evergreen_default_state, rdev->r600_blit.state_len * 4);
+
+ if (num_packet2s)
+ memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+ packet2s, num_packet2s * 4);
+ for (i = 0; i < evergreen_vs_size; i++)
+ *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
+ for (i = 0; i < evergreen_ps_size; i++)
+ *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
@@ -564,7 +692,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
- ring_size += 46; /* shaders + def state */
+ ring_size += 52; /* shaders + def state */
ring_size += 10; /* fence emit for VB IB */
ring_size += 5; /* done copy */
ring_size += 10; /* fence emit for done copy */
@@ -572,7 +700,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
if (r)
return r;
- set_default_state(rdev); /* 30 */
+ set_default_state(rdev); /* 36 */
set_shaders(rdev); /* 16 */
return 0;
}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
index ef1d28c07fbf..3a10399e0066 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -311,11 +311,19 @@ const u32 evergreen_vs[] =
0x00000000,
0x3c000000,
0x67961001,
+#ifdef __BIG_ENDIAN
+ 0x000a0000,
+#else
0x00080000,
+#endif
0x00000000,
0x1c000000,
0x67961000,
+#ifdef __BIG_ENDIAN
+ 0x00020008,
+#else
0x00000008,
+#endif
0x00000000,
};
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 2330f3a36fd5..c781c92c3451 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -105,6 +105,11 @@
#define EVERGREEN_GRPH_Y_START 0x6830
#define EVERGREEN_GRPH_X_END 0x6834
#define EVERGREEN_GRPH_Y_END 0x6838
+#define EVERGREEN_GRPH_UPDATE 0x6844
+# define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
+# define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
+#define EVERGREEN_GRPH_FLIP_CONTROL 0x6848
+# define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
#define EVERGREEN_CUR_CONTROL 0x6998
@@ -178,6 +183,7 @@
# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
#define EVERGREEN_CRTC_STATUS 0x6e8c
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
+#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a73b53c44359..eb4acf4528ff 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -98,6 +98,7 @@
#define BUF_SWAP_32BIT (2 << 16)
#define CP_RB_RPTR 0x8700
#define CP_RB_RPTR_ADDR 0xC10C
+#define RB_RPTR_SWAP(x) ((x) << 0)
#define CP_RB_RPTR_ADDR_HI 0xC110
#define CP_RB_RPTR_WR 0xC108
#define CP_RB_WPTR 0xC114
@@ -164,11 +165,13 @@
#define SE_SC_BUSY (1 << 29)
#define SE_DB_BUSY (1 << 30)
#define SE_CB_BUSY (1 << 31)
-
+/* evergreen */
#define CG_MULT_THERMAL_STATUS 0x740
#define ASIC_T(x) ((x) << 16)
#define ASIC_T_MASK 0x7FF0000
#define ASIC_T_SHIFT 16
+/* APU */
+#define CG_THERMAL_STATUS 0x678
#define HDP_HOST_PATH_CNTL 0x2C00
#define HDP_NONSURFACE_BASE 0x2C04
@@ -181,6 +184,7 @@
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x00003000
+#define MC_SHARED_CHREMAP 0x2008
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
@@ -200,6 +204,7 @@
#define MC_VM_AGP_BOT 0x202C
#define MC_VM_AGP_BASE 0x2030
#define MC_VM_FB_LOCATION 0x2024
+#define MC_FUS_VM_FB_OFFSET 0x2898
#define MC_VM_MB_L1_TLB0_CNTL 0x2234
#define MC_VM_MB_L1_TLB1_CNTL 0x2238
#define MC_VM_MB_L1_TLB2_CNTL 0x223C
@@ -236,6 +241,7 @@
#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
#define PA_SC_LINE_STIPPLE 0x28A0C
+#define PA_SU_LINE_STIPPLE_VALUE 0x8A60
#define PA_SC_LINE_STIPPLE_STATE 0x8B10
#define SCRATCH_REG0 0x8500
@@ -349,6 +355,9 @@
#define SYNC_WALKER (1 << 25)
#define SYNC_ALIGNER (1 << 26)
+#define TCP_CHAN_STEER_LO 0x960c
+#define TCP_CHAN_STEER_HI 0x9610
+
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x) << 0)
#define VC_ONLY 0
@@ -574,6 +583,44 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
+# define LC_CURRENT_DATA_RATE (1 << 11)
+# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
+#define MM_CFGREGS_CNTL 0x544c
+# define MM_WR_TO_CFG_EN (1 << 3)
+#define LINK_CNTL2 0x88 /* F0 */
+# define TARGET_LINK_SPEED_MASK (0xf << 0)
+# define SELECTABLE_DEEMPHASIS (1 << 6)
+
/*
* PM4
*/
@@ -603,10 +650,11 @@
#define PACKET3_NOP 0x10
#define PACKET3_SET_BASE 0x11
#define PACKET3_CLEAR_STATE 0x12
-#define PACKET3_INDIRECT_BUFFER_SIZE 0x13
+#define PACKET3_INDEX_BUFFER_SIZE 0x13
#define PACKET3_DISPATCH_DIRECT 0x15
#define PACKET3_DISPATCH_INDIRECT 0x16
#define PACKET3_INDIRECT_BUFFER_END 0x17
+#define PACKET3_MODE_CONTROL 0x18
#define PACKET3_SET_PREDICATION 0x20
#define PACKET3_REG_RMW 0x21
#define PACKET3_COND_EXEC 0x22
@@ -644,14 +692,14 @@
# define PACKET3_CB8_DEST_BASE_ENA (1 << 15)
# define PACKET3_CB9_DEST_BASE_ENA (1 << 16)
# define PACKET3_CB10_DEST_BASE_ENA (1 << 17)
-# define PACKET3_CB11_DEST_BASE_ENA (1 << 17)
+# define PACKET3_CB11_DEST_BASE_ENA (1 << 18)
# define PACKET3_FULL_CACHE_ENA (1 << 20)
# define PACKET3_TC_ACTION_ENA (1 << 23)
# define PACKET3_VC_ACTION_ENA (1 << 24)
# define PACKET3_CB_ACTION_ENA (1 << 25)
# define PACKET3_DB_ACTION_ENA (1 << 26)
# define PACKET3_SH_ACTION_ENA (1 << 27)
-# define PACKET3_SMX_ACTION_ENA (1 << 28)
+# define PACKET3_SX_ACTION_ENA (1 << 28)
#define PACKET3_ME_INITIALIZE 0x44
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index 607241c6a8a9..5a82b6b75849 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -673,8 +673,10 @@ static int parser_auth(struct table *t, const char *filename)
last_reg = strtol(last_reg_s, NULL, 16);
do {
- if (fgets(buf, 1024, file) == NULL)
+ if (fgets(buf, 1024, file) == NULL) {
+ fclose(file);
return -1;
+ }
len = strlen(buf);
if (ftell(file) == end)
done = 1;
@@ -685,6 +687,7 @@ static int parser_auth(struct table *t, const char *filename)
fprintf(stderr,
"Error matching regular expression %d in %s\n",
r, filename);
+ fclose(file);
return -1;
} else {
buf[match[0].rm_eo] = 0;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
new file mode 100644
index 000000000000..5e0bef80ad7f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "drmP.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_drm.h"
+#include "nid.h"
+#include "atom.h"
+#include "ni_reg.h"
+
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
+#define BTC_MC_UCODE_SIZE 6024
+
+/* Firmware Names */
+MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
+MODULE_FIRMWARE("radeon/BARTS_me.bin");
+MODULE_FIRMWARE("radeon/BARTS_mc.bin");
+MODULE_FIRMWARE("radeon/BTC_rlc.bin");
+MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
+MODULE_FIRMWARE("radeon/TURKS_me.bin");
+MODULE_FIRMWARE("radeon/TURKS_mc.bin");
+MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
+MODULE_FIRMWARE("radeon/CAICOS_me.bin");
+MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
+
+#define BTC_IO_MC_REGS_SIZE 29
+
+static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+ {0x00000077, 0xff010100},
+ {0x00000078, 0x00000000},
+ {0x00000079, 0x00001434},
+ {0x0000007a, 0xcc08ec08},
+ {0x0000007b, 0x00040000},
+ {0x0000007c, 0x000080c0},
+ {0x0000007d, 0x09000000},
+ {0x0000007e, 0x00210404},
+ {0x00000081, 0x08a8e800},
+ {0x00000082, 0x00030444},
+ {0x00000083, 0x00000000},
+ {0x00000085, 0x00000001},
+ {0x00000086, 0x00000002},
+ {0x00000087, 0x48490000},
+ {0x00000088, 0x20244647},
+ {0x00000089, 0x00000005},
+ {0x0000008b, 0x66030000},
+ {0x0000008c, 0x00006603},
+ {0x0000008d, 0x00000100},
+ {0x0000008f, 0x00001c0a},
+ {0x00000090, 0xff000001},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00946a00}
+};
+
+static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+ {0x00000077, 0xff010100},
+ {0x00000078, 0x00000000},
+ {0x00000079, 0x00001434},
+ {0x0000007a, 0xcc08ec08},
+ {0x0000007b, 0x00040000},
+ {0x0000007c, 0x000080c0},
+ {0x0000007d, 0x09000000},
+ {0x0000007e, 0x00210404},
+ {0x00000081, 0x08a8e800},
+ {0x00000082, 0x00030444},
+ {0x00000083, 0x00000000},
+ {0x00000085, 0x00000001},
+ {0x00000086, 0x00000002},
+ {0x00000087, 0x48490000},
+ {0x00000088, 0x20244647},
+ {0x00000089, 0x00000005},
+ {0x0000008b, 0x66030000},
+ {0x0000008c, 0x00006603},
+ {0x0000008d, 0x00000100},
+ {0x0000008f, 0x00001c0a},
+ {0x00000090, 0xff000001},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00936a00}
+};
+
+static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+ {0x00000077, 0xff010100},
+ {0x00000078, 0x00000000},
+ {0x00000079, 0x00001434},
+ {0x0000007a, 0xcc08ec08},
+ {0x0000007b, 0x00040000},
+ {0x0000007c, 0x000080c0},
+ {0x0000007d, 0x09000000},
+ {0x0000007e, 0x00210404},
+ {0x00000081, 0x08a8e800},
+ {0x00000082, 0x00030444},
+ {0x00000083, 0x00000000},
+ {0x00000085, 0x00000001},
+ {0x00000086, 0x00000002},
+ {0x00000087, 0x48490000},
+ {0x00000088, 0x20244647},
+ {0x00000089, 0x00000005},
+ {0x0000008b, 0x66030000},
+ {0x0000008c, 0x00006603},
+ {0x0000008d, 0x00000100},
+ {0x0000008f, 0x00001c0a},
+ {0x00000090, 0xff000001},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00916a00}
+};
+
+int btc_mc_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ u32 mem_type, running, blackout = 0;
+ u32 *io_mc_regs;
+ int i;
+
+ if (!rdev->mc_fw)
+ return -EINVAL;
+
+ switch (rdev->family) {
+ case CHIP_BARTS:
+ io_mc_regs = (u32 *)&barts_io_mc_regs;
+ break;
+ case CHIP_TURKS:
+ io_mc_regs = (u32 *)&turks_io_mc_regs;
+ break;
+ case CHIP_CAICOS:
+ default:
+ io_mc_regs = (u32 *)&caicos_io_mc_regs;
+ break;
+ }
+
+ mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
+ running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+ if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
+ if (running) {
+ blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+ WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
+ }
+
+ /* reset the engine and set to writable */
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+ /* load mc io regs */
+ for (i = 0; i < BTC_IO_MC_REGS_SIZE; i++) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ }
+ /* load the MC ucode */
+ fw_data = (const __be32 *)rdev->mc_fw->data;
+ for (i = 0; i < BTC_MC_UCODE_SIZE; i++)
+ WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+
+ /* put the engine back into the active state */
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+ /* wait for training to complete */
+ while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD))
+ udelay(10);
+
+ if (running)
+ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
+ }
+
+ return 0;
+}
+
+int ni_init_microcode(struct radeon_device *rdev)
+{
+ struct platform_device *pdev;
+ const char *chip_name;
+ const char *rlc_chip_name;
+ size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
+ char fw_name[30];
+ int err;
+
+ DRM_DEBUG("\n");
+
+ pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
+ err = IS_ERR(pdev);
+ if (err) {
+ printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
+ return -EINVAL;
+ }
+
+ switch (rdev->family) {
+ case CHIP_BARTS:
+ chip_name = "BARTS";
+ rlc_chip_name = "BTC";
+ break;
+ case CHIP_TURKS:
+ chip_name = "TURKS";
+ rlc_chip_name = "BTC";
+ break;
+ case CHIP_CAICOS:
+ chip_name = "CAICOS";
+ rlc_chip_name = "BTC";
+ break;
+ default: BUG();
+ }
+
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ mc_req_size = BTC_MC_UCODE_SIZE * 4;
+
+ DRM_INFO("Loading %s Microcode\n", chip_name);
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->pfp_fw->size != pfp_req_size) {
+ printk(KERN_ERR
+ "ni_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->pfp_fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->me_fw->size != me_req_size) {
+ printk(KERN_ERR
+ "ni_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->me_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->mc_fw->size != mc_req_size) {
+ printk(KERN_ERR
+ "ni_mc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+out:
+ platform_device_unregister(pdev);
+
+ if (err) {
+ if (err != -EINVAL)
+ printk(KERN_ERR
+ "ni_cp: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->pfp_fw);
+ rdev->pfp_fw = NULL;
+ release_firmware(rdev->me_fw);
+ rdev->me_fw = NULL;
+ release_firmware(rdev->rlc_fw);
+ rdev->rlc_fw = NULL;
+ release_firmware(rdev->mc_fw);
+ rdev->mc_fw = NULL;
+ }
+ return err;
+}
+
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
new file mode 100644
index 000000000000..5db7b7d6feb0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __NI_REG_H__
+#define __NI_REG_H__
+
+/* northern islands - DCE5 */
+
+#define NI_INPUT_GAMMA_CONTROL 0x6840
+# define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0)
+# define NI_INPUT_GAMMA_USE_LUT 0
+# define NI_INPUT_GAMMA_BYPASS 1
+# define NI_INPUT_GAMMA_SRGB_24 2
+# define NI_INPUT_GAMMA_XVYCC_222 3
+# define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4)
+
+#define NI_PRESCALE_GRPH_CONTROL 0x68b4
+# define NI_GRPH_PRESCALE_BYPASS (1 << 4)
+
+#define NI_PRESCALE_OVL_CONTROL 0x68c4
+# define NI_OVL_PRESCALE_BYPASS (1 << 4)
+
+#define NI_INPUT_CSC_CONTROL 0x68d4
+# define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0)
+# define NI_INPUT_CSC_BYPASS 0
+# define NI_INPUT_CSC_PROG_COEFF 1
+# define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2
+# define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4)
+
+#define NI_OUTPUT_CSC_CONTROL 0x68f0
+# define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0)
+# define NI_OUTPUT_CSC_BYPASS 0
+# define NI_OUTPUT_CSC_TV_RGB 1
+# define NI_OUTPUT_CSC_YCBCR_601 2
+# define NI_OUTPUT_CSC_YCBCR_709 3
+# define NI_OUTPUT_CSC_PROG_COEFF 4
+# define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5
+# define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4)
+
+#define NI_DEGAMMA_CONTROL 0x6960
+# define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0)
+# define NI_DEGAMMA_BYPASS 0
+# define NI_DEGAMMA_SRGB_24 1
+# define NI_DEGAMMA_XVYCC_222 2
+# define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4)
+# define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
+# define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12)
+
+#define NI_GAMUT_REMAP_CONTROL 0x6964
+# define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0)
+# define NI_GAMUT_REMAP_BYPASS 0
+# define NI_GAMUT_REMAP_PROG_COEFF 1
+# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2
+# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3
+# define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4)
+
+#define NI_REGAMMA_CONTROL 0x6a80
+# define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0)
+# define NI_REGAMMA_BYPASS 0
+# define NI_REGAMMA_SRGB_24 1
+# define NI_REGAMMA_XVYCC_222 2
+# define NI_REGAMMA_PROG_A 3
+# define NI_REGAMMA_PROG_B 4
+# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
new file mode 100644
index 000000000000..f7b445390e02
--- /dev/null
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef NI_H
+#define NI_H
+
+#define MC_SHARED_BLACKOUT_CNTL 0x20ac
+#define MC_SEQ_SUP_CNTL 0x28c8
+#define RUN_MASK (1 << 0)
+#define MC_SEQ_SUP_PGM 0x28cc
+#define MC_IO_PAD_CNTL_D0 0x29d0
+#define MEM_FALL_OUT_CMD (1 << 8)
+#define MC_SEQ_MISC0 0x2a00
+#define MC_SEQ_MISC0_GDDR5_SHIFT 28
+#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
+#define MC_SEQ_MISC0_GDDR5_VALUE 5
+#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
+#define MC_SEQ_IO_DEBUG_DATA 0x2a48
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8e10aa9f74b0..93fa735c8c1a 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -68,6 +68,56 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/
+void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+ u32 tmp;
+
+ /* make sure flip is at vb rather than hb */
+ tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset);
+ tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL;
+ /* make sure pending bit is asserted */
+ tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
+ WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp);
+
+ /* set pageflip to happen as late as possible in the vblank interval.
+ * same field for crtc1/2
+ */
+ tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+ tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK;
+ WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+
+ /* enable the pflip int */
+ radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void r100_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* disable the pflip int */
+ radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+
+ /* Lock the graphics update lock */
+ /* update the scanout addresses */
+ WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+ /* Wait for update_pending to go high. */
+ while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
+ WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+ /* Return current update_pending status: */
+ return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
+}
+
void r100_pm_get_dynpm_state(struct radeon_device *rdev)
{
int i;
@@ -526,10 +576,12 @@ int r100_irq_set(struct radeon_device *rdev)
if (rdev->irq.gui_idle) {
tmp |= RADEON_GUI_IDLE_MASK;
}
- if (rdev->irq.crtc_vblank_int[0]) {
+ if (rdev->irq.crtc_vblank_int[0] ||
+ rdev->irq.pflip[0]) {
tmp |= RADEON_CRTC_VBLANK_MASK;
}
- if (rdev->irq.crtc_vblank_int[1]) {
+ if (rdev->irq.crtc_vblank_int[1] ||
+ rdev->irq.pflip[1]) {
tmp |= RADEON_CRTC2_VBLANK_MASK;
}
if (rdev->irq.hpd[0]) {
@@ -600,14 +652,22 @@ int r100_irq_process(struct radeon_device *rdev)
}
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[0])
+ radeon_crtc_handle_flip(rdev, 0);
}
if (status & RADEON_CRTC2_VBLANK_STAT) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[1])
+ radeon_crtc_handle_flip(rdev, 1);
}
if (status & RADEON_FP_DETECT_STAT) {
queue_hotplug = true;
@@ -622,7 +682,7 @@ int r100_irq_process(struct radeon_device *rdev)
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
- queue_work(rdev->wq, &rdev->hotplug_work);
+ schedule_work(&rdev->hotplug_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS400:
@@ -971,8 +1031,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
REG_SET(RADEON_INDIRECT1_START, indirect1_start));
- WREG32(0x718, 0);
- WREG32(0x744, 0x00004D4D);
+ WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
+ WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
radeon_ring_start(rdev);
r = radeon_ring_test(rdev);
@@ -1367,6 +1427,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
}
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
+ track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
@@ -1379,6 +1440,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
}
track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value;
+ track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_PP_TXOFFSET_0:
@@ -1394,6 +1456,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
+ track->tex_dirty = true;
break;
case RADEON_PP_CUBIC_OFFSET_T0_0:
case RADEON_PP_CUBIC_OFFSET_T0_1:
@@ -1411,6 +1474,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->textures[0].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[0].cube_info[i].robj = reloc->robj;
+ track->tex_dirty = true;
break;
case RADEON_PP_CUBIC_OFFSET_T1_0:
case RADEON_PP_CUBIC_OFFSET_T1_1:
@@ -1428,6 +1492,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->textures[1].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[1].cube_info[i].robj = reloc->robj;
+ track->tex_dirty = true;
break;
case RADEON_PP_CUBIC_OFFSET_T2_0:
case RADEON_PP_CUBIC_OFFSET_T2_1:
@@ -1445,9 +1510,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->textures[2].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[2].cube_info[i].robj = reloc->robj;
+ track->tex_dirty = true;
break;
case RADEON_RE_WIDTH_HEIGHT:
track->maxy = ((idx_value >> 16) & 0x7FF);
+ track->cb_dirty = true;
+ track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1468,9 +1536,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
ib[idx] = tmp;
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+ track->cb_dirty = true;
break;
case RADEON_RB3D_DEPTHPITCH:
track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+ track->zb_dirty = true;
break;
case RADEON_RB3D_CNTL:
switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -1495,6 +1565,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return -EINVAL;
}
track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+ track->cb_dirty = true;
+ track->zb_dirty = true;
break;
case RADEON_RB3D_ZSTENCILCNTL:
switch (idx_value & 0xf) {
@@ -1512,6 +1584,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
default:
break;
}
+ track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1528,6 +1601,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
uint32_t temp = idx_value >> 4;
for (i = 0; i < track->num_texture; i++)
track->textures[i].enabled = !!(temp & (1 << i));
+ track->tex_dirty = true;
}
break;
case RADEON_SE_VF_CNTL:
@@ -1542,12 +1616,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+ track->tex_dirty = true;
break;
case RADEON_PP_TEX_PITCH_0:
case RADEON_PP_TEX_PITCH_1:
case RADEON_PP_TEX_PITCH_2:
i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
track->textures[i].pitch = idx_value + 32;
+ track->tex_dirty = true;
break;
case RADEON_PP_TXFILTER_0:
case RADEON_PP_TXFILTER_1:
@@ -1561,6 +1637,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
tmp = (idx_value >> 27) & 0x7;
if (tmp == 2 || tmp == 6)
track->textures[i].roundup_h = false;
+ track->tex_dirty = true;
break;
case RADEON_PP_TXFORMAT_0:
case RADEON_PP_TXFORMAT_1:
@@ -1613,6 +1690,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
}
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+ track->tex_dirty = true;
break;
case RADEON_PP_CUBIC_FACES_0:
case RADEON_PP_CUBIC_FACES_1:
@@ -1623,6 +1701,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
}
+ track->tex_dirty = true;
break;
default:
printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
@@ -2026,12 +2105,13 @@ int r100_asic_reset(struct radeon_device *rdev)
{
struct r100_mc_save save;
u32 status, tmp;
+ int ret = 0;
- r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) {
return 0;
}
+ r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */
@@ -2071,11 +2151,11 @@ int r100_asic_reset(struct radeon_device *rdev)
G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
- return -1;
- }
+ ret = -1;
+ } else
+ dev_info(rdev->dev, "GPU reset succeed\n");
r100_mc_resume(rdev, &save);
- dev_info(rdev->dev, "GPU reset succeed\n");
- return 0;
+ return ret;
}
void r100_set_common_regs(struct radeon_device *rdev)
@@ -2286,10 +2366,10 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
temp = RREG32(RADEON_CONFIG_CNTL);
if (state == false) {
- temp &= ~(1<<8);
- temp |= (1<<9);
+ temp &= ~RADEON_CFG_VGA_RAM_EN;
+ temp |= RADEON_CFG_VGA_IO_DIS;
} else {
- temp &= ~(1<<9);
+ temp &= ~RADEON_CFG_VGA_IO_DIS;
}
WREG32(RADEON_CONFIG_CNTL, temp);
}
@@ -3257,9 +3337,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
unsigned long size;
unsigned prim_walk;
unsigned nverts;
- unsigned num_cb = track->num_cb;
+ unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
- if (!track->zb_cb_clear && !track->color_channel_mask &&
+ if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
!track->blend_read_enable)
num_cb = 0;
@@ -3280,7 +3360,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
return -EINVAL;
}
}
- if (track->z_enabled) {
+ track->cb_dirty = false;
+
+ if (track->zb_dirty && track->z_enabled) {
if (track->zb.robj == NULL) {
DRM_ERROR("[drm] No buffer for z buffer !\n");
return -EINVAL;
@@ -3297,6 +3379,28 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
return -EINVAL;
}
}
+ track->zb_dirty = false;
+
+ if (track->aa_dirty && track->aaresolve) {
+ if (track->aa.robj == NULL) {
+ DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
+ return -EINVAL;
+ }
+ /* I believe the format comes from colorbuffer0. */
+ size = track->aa.pitch * track->cb[0].cpp * track->maxy;
+ size += track->aa.offset;
+ if (size > radeon_bo_size(track->aa.robj)) {
+ DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
+ "(need %lu have %lu) !\n", i, size,
+ radeon_bo_size(track->aa.robj));
+ DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
+ i, track->aa.pitch, track->cb[0].cpp,
+ track->aa.offset, track->maxy);
+ return -EINVAL;
+ }
+ }
+ track->aa_dirty = false;
+
prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
if (track->vap_vf_cntl & (1 << 14)) {
nverts = track->vap_alt_nverts;
@@ -3356,13 +3460,23 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
prim_walk);
return -EINVAL;
}
- return r100_cs_track_texture_check(rdev, track);
+
+ if (track->tex_dirty) {
+ track->tex_dirty = false;
+ return r100_cs_track_texture_check(rdev, track);
+ }
+ return 0;
}
void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
{
unsigned i, face;
+ track->cb_dirty = true;
+ track->zb_dirty = true;
+ track->tex_dirty = true;
+ track->aa_dirty = true;
+
if (rdev->family < CHIP_R300) {
track->num_cb = 1;
if (rdev->family <= CHIP_RS200)
@@ -3376,6 +3490,8 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
track->num_texture = 16;
track->maxy = 4096;
track->separate_cube = 0;
+ track->aaresolve = false;
+ track->aa.robj = NULL;
}
for (i = 0; i < track->num_cb; i++) {
@@ -3461,7 +3577,7 @@ int r100_ring_test(struct radeon_device *rdev)
if (i < rdev->usec_timeout) {
DRM_INFO("ring test succeeded in %d usecs\n", i);
} else {
- DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
+ DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
@@ -3685,8 +3801,6 @@ static int r100_startup(struct radeon_device *rdev)
r100_mc_program(rdev);
/* Resume clock */
r100_clock_startup(rdev);
- /* Initialize GPU configuration (# pipes, ...) */
-// r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r100_enable_bm(rdev);
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index af65600e6564..2fef9de7f363 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -52,14 +52,7 @@ struct r100_cs_track_texture {
unsigned compress_format;
};
-struct r100_cs_track_limits {
- unsigned num_cb;
- unsigned num_texture;
- unsigned max_levels;
-};
-
struct r100_cs_track {
- struct radeon_device *rdev;
unsigned num_cb;
unsigned num_texture;
unsigned maxy;
@@ -73,11 +66,17 @@ struct r100_cs_track {
struct r100_cs_track_array arrays[11];
struct r100_cs_track_cb cb[R300_MAX_CB];
struct r100_cs_track_cb zb;
+ struct r100_cs_track_cb aa;
struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
bool z_enabled;
bool separate_cube;
bool zb_cb_clear;
bool blend_read_enable;
+ bool cb_dirty;
+ bool zb_dirty;
+ bool tex_dirty;
+ bool aa_dirty;
+ bool aaresolve;
};
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index b121b6c678d4..eab91760fae0 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -551,7 +551,7 @@
#define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31)
#define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1)
#define C_000360_CUR2_LOCK 0x7FFFFFFF
-#define R_0003C2_GENMO_WT 0x0003C0
+#define R_0003C2_GENMO_WT 0x0003C2
#define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0)
#define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1)
#define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index d2408c395619..f24058300413 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -184,6 +184,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
}
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
+ track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
@@ -196,6 +197,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
}
track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value;
+ track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R200_PP_TXOFFSET_0:
@@ -214,6 +216,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
+ track->tex_dirty = true;
break;
case R200_PP_CUBIC_OFFSET_F1_0:
case R200_PP_CUBIC_OFFSET_F2_0:
@@ -257,9 +260,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->textures[i].cube_info[face - 1].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].cube_info[face - 1].robj = reloc->robj;
+ track->tex_dirty = true;
break;
case RADEON_RE_WIDTH_HEIGHT:
track->maxy = ((idx_value >> 16) & 0x7FF);
+ track->cb_dirty = true;
+ track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
r = r100_cs_packet_next_reloc(p, &reloc);
@@ -280,9 +286,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
ib[idx] = tmp;
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+ track->cb_dirty = true;
break;
case RADEON_RB3D_DEPTHPITCH:
track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+ track->zb_dirty = true;
break;
case RADEON_RB3D_CNTL:
switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -312,6 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
}
track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+ track->cb_dirty = true;
+ track->zb_dirty = true;
break;
case RADEON_RB3D_ZSTENCILCNTL:
switch (idx_value & 0xf) {
@@ -329,6 +339,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
default:
break;
}
+ track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
r = r100_cs_packet_next_reloc(p, &reloc);
@@ -345,6 +356,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
uint32_t temp = idx_value >> 4;
for (i = 0; i < track->num_texture; i++)
track->textures[i].enabled = !!(temp & (1 << i));
+ track->tex_dirty = true;
}
break;
case RADEON_SE_VF_CNTL:
@@ -369,6 +381,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
i = (reg - R200_PP_TXSIZE_0) / 32;
track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+ track->tex_dirty = true;
break;
case R200_PP_TXPITCH_0:
case R200_PP_TXPITCH_1:
@@ -378,6 +391,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXPITCH_5:
i = (reg - R200_PP_TXPITCH_0) / 32;
track->textures[i].pitch = idx_value + 32;
+ track->tex_dirty = true;
break;
case R200_PP_TXFILTER_0:
case R200_PP_TXFILTER_1:
@@ -394,6 +408,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
tmp = (idx_value >> 27) & 0x7;
if (tmp == 2 || tmp == 6)
track->textures[i].roundup_h = false;
+ track->tex_dirty = true;
break;
case R200_PP_TXMULTI_CTL_0:
case R200_PP_TXMULTI_CTL_1:
@@ -432,6 +447,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->textures[i].tex_coord_type = 1;
break;
}
+ track->tex_dirty = true;
break;
case R200_PP_TXFORMAT_0:
case R200_PP_TXFORMAT_1:
@@ -488,6 +504,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
}
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+ track->tex_dirty = true;
break;
case R200_PP_CUBIC_FACES_0:
case R200_PP_CUBIC_FACES_1:
@@ -501,6 +518,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
}
+ track->tex_dirty = true;
break;
default:
printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index cde1d3480d93..069efa8c8ecf 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -69,6 +69,9 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
mb();
}
+#define R300_PTE_WRITEABLE (1 << 2)
+#define R300_PTE_READABLE (1 << 3)
+
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
@@ -78,7 +81,7 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
}
addr = (lower_32_bits(addr) >> 8) |
((upper_32_bits(addr) & 0xff) << 24) |
- 0xc;
+ R300_PTE_WRITEABLE | R300_PTE_READABLE;
/* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
@@ -135,7 +138,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
/* Clear error */
- WREG32_PCIE(0x18, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0);
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_EN;
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
@@ -405,12 +408,13 @@ int r300_asic_reset(struct radeon_device *rdev)
{
struct r100_mc_save save;
u32 status, tmp;
+ int ret = 0;
- r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) {
return 0;
}
+ r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */
@@ -451,11 +455,11 @@ int r300_asic_reset(struct radeon_device *rdev)
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
- return -1;
- }
+ ret = -1;
+ } else
+ dev_info(rdev->dev, "GPU reset succeed\n");
r100_mc_resume(rdev, &save);
- dev_info(rdev->dev, "GPU reset succeed\n");
- return 0;
+ return ret;
}
/*
@@ -558,10 +562,7 @@ int rv370_get_pcie_lanes(struct radeon_device *rdev)
/* FIXME wait for idle */
- if (rdev->family < CHIP_R600)
- link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
- else
- link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
case RADEON_PCIE_LC_LINK_WIDTH_X0:
@@ -666,6 +667,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
}
track->cb[i].robj = reloc->robj;
track->cb[i].offset = idx_value;
+ track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_ZB_DEPTHOFFSET:
@@ -678,6 +680,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
}
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
+ track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_TX_OFFSET_0:
@@ -716,6 +719,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
tmp |= tile_flags;
ib[idx] = tmp;
track->textures[i].robj = reloc->robj;
+ track->tex_dirty = true;
break;
/* Tracked registers */
case 0x2084:
@@ -742,10 +746,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (p->rdev->family < CHIP_RV515) {
track->maxy -= 1440;
}
+ track->cb_dirty = true;
+ track->zb_dirty = true;
break;
case 0x4E00:
/* RB3D_CCTL */
+ if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
+ p->rdev->cmask_filp != p->filp) {
+ DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
+ return -EINVAL;
+ }
track->num_cb = ((idx_value >> 5) & 0x3) + 1;
+ track->cb_dirty = true;
break;
case 0x4E38:
case 0x4E3C:
@@ -787,6 +799,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 15:
track->cb[i].cpp = 2;
break;
+ case 5:
+ if (p->rdev->family < CHIP_RV515) {
+ DRM_ERROR("Invalid color buffer format (%d)!\n",
+ ((idx_value >> 21) & 0xF));
+ return -EINVAL;
+ }
+ /* Pass through. */
case 6:
track->cb[i].cpp = 4;
break;
@@ -801,6 +820,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
((idx_value >> 21) & 0xF));
return -EINVAL;
}
+ track->cb_dirty = true;
break;
case 0x4F00:
/* ZB_CNTL */
@@ -809,6 +829,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
} else {
track->z_enabled = false;
}
+ track->zb_dirty = true;
break;
case 0x4F10:
/* ZB_FORMAT */
@@ -825,6 +846,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
(idx_value & 0xF));
return -EINVAL;
}
+ track->zb_dirty = true;
break;
case 0x4F24:
/* ZB_DEPTHPITCH */
@@ -848,14 +870,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
ib[idx] = tmp;
track->zb.pitch = idx_value & 0x3FFC;
+ track->zb_dirty = true;
break;
case 0x4104:
+ /* TX_ENABLE */
for (i = 0; i < 16; i++) {
bool enabled;
enabled = !!(idx_value & (1 << i));
track->textures[i].enabled = enabled;
}
+ track->tex_dirty = true;
break;
case 0x44C0:
case 0x44C4:
@@ -885,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_X16:
+ case R300_TX_FORMAT_FL_I16:
case R300_TX_FORMAT_Y8X8:
case R300_TX_FORMAT_Z5Y6X5:
case R300_TX_FORMAT_Z6Y5X5:
@@ -897,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_Y16X16:
+ case R300_TX_FORMAT_FL_I16A16:
case R300_TX_FORMAT_Z11Y11X10:
case R300_TX_FORMAT_Z10Y11X11:
case R300_TX_FORMAT_W8Z8Y8X8:
@@ -938,8 +965,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
DRM_ERROR("Invalid texture format %u\n",
(idx_value & 0x1F));
return -EINVAL;
- break;
}
+ track->tex_dirty = true;
break;
case 0x4400:
case 0x4404:
@@ -967,6 +994,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (tmp == 2 || tmp == 4 || tmp == 6) {
track->textures[i].roundup_h = false;
}
+ track->tex_dirty = true;
break;
case 0x4500:
case 0x4504:
@@ -1004,6 +1032,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
return -EINVAL;
}
+ track->tex_dirty = true;
break;
case 0x4480:
case 0x4484:
@@ -1033,6 +1062,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].use_pitch = !!tmp;
tmp = (idx_value >> 22) & 0xF;
track->textures[i].txdepth = tmp;
+ track->tex_dirty = true;
break;
case R300_ZB_ZPASS_ADDR:
r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1047,6 +1077,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x4e0c:
/* RB3D_COLOR_CHANNEL_MASK */
track->color_channel_mask = idx_value;
+ track->cb_dirty = true;
break;
case 0x43a4:
/* SC_HYPERZ_EN */
@@ -1060,6 +1091,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x4f1c:
/* ZB_BW_CNTL */
track->zb_cb_clear = !!(idx_value & (1 << 5));
+ track->cb_dirty = true;
+ track->zb_dirty = true;
if (p->rdev->hyperz_filp != p->filp) {
if (idx_value & (R300_HIZ_ENABLE |
R300_RD_COMP_ENABLE |
@@ -1071,8 +1104,28 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x4e04:
/* RB3D_BLENDCNTL */
track->blend_read_enable = !!(idx_value & (1 << 2));
+ track->cb_dirty = true;
break;
- case 0x4f28: /* ZB_DEPTHCLEARVALUE */
+ case R300_RB3D_AARESOLVE_OFFSET:
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ track->aa.robj = reloc->robj;
+ track->aa.offset = idx_value;
+ track->aa_dirty = true;
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ break;
+ case R300_RB3D_AARESOLVE_PITCH:
+ track->aa.pitch = idx_value & 0x3FFE;
+ track->aa_dirty = true;
+ break;
+ case R300_RB3D_AARESOLVE_CTL:
+ track->aaresolve = idx_value & 0x1;
+ track->aa_dirty = true;
break;
case 0x4f30: /* ZB_MASK_OFFSET */
case 0x4f34: /* ZB_ZMASK_PITCH */
@@ -1199,6 +1252,10 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
if (p->rdev->hyperz_filp != p->filp)
return -EINVAL;
break;
+ case PACKET3_3D_CLEAR_CMASK:
+ if (p->rdev->cmask_filp != p->filp)
+ return -EINVAL;
+ break;
case PACKET3_NOP:
break;
default:
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 1a0d5362cd79..f0bce399c9f3 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -1371,6 +1371,8 @@
#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
+#define R300_RB3D_AARESOLVE_OFFSET 0x4E80
+#define R300_RB3D_AARESOLVE_PITCH 0x4E84
#define R300_RB3D_AARESOLVE_CTL 0x4E88
/* gap */
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 0c036c60d9df..1f519a5ffb8c 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -54,6 +54,7 @@
#define PACKET3_3D_DRAW_IMMD_2 0x35
#define PACKET3_3D_DRAW_INDX_2 0x36
#define PACKET3_3D_CLEAR_HIZ 0x37
+#define PACKET3_3D_CLEAR_CMASK 0x38
#define PACKET3_BITBLT_MULTI 0x9B
#define PACKET0(reg, n) (CP_PACKET0 | \
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c387346f93a9..0b59ed7c7d2c 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -96,7 +96,7 @@ void r420_pipes_init(struct radeon_device *rdev)
"programming pipes. Bad things might happen.\n");
}
/* get max number of pipes */
- gb_pipe_select = RREG32(0x402C);
+ gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
/* SE chips have 1 pipe */
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 6ac1f604e29b..fc437059918f 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -355,6 +355,8 @@
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
+#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
+
/* master controls */
#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
#define AVIVO_DC_CRTC_TV_CONTROL 0x60fc
@@ -409,8 +411,10 @@
#define AVIVO_D1GRPH_X_END 0x6134
#define AVIVO_D1GRPH_Y_END 0x6138
#define AVIVO_D1GRPH_UPDATE 0x6144
+# define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING (1 << 2)
# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16)
#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148
+# define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
#define AVIVO_D1CUR_CONTROL 0x6400
# define AVIVO_D1CURSOR_EN (1 << 0)
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 3c8677f9e385..2ce80d976568 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -79,8 +79,8 @@ static void r520_gpu_init(struct radeon_device *rdev)
WREG32(0x4128, 0xFF);
}
r420_pipes_init(rdev);
- gb_pipe_select = RREG32(0x402C);
- tmp = RREG32(0x170C);
+ gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+ tmp = RREG32(R300_DST_PIPE_CONFIG);
pipe_select_current = (tmp >> 2) & 3;
tmp = (1 << pipe_select_current) |
(((gb_pipe_select >> 8) & 0xF) << 4);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9c92db7c896b..de88624d5f87 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -83,6 +83,9 @@ MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
+MODULE_FIRMWARE("radeon/PALM_pfp.bin");
+MODULE_FIRMWARE("radeon/PALM_me.bin");
+MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
@@ -91,14 +94,19 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
+static void r600_pcie_gen2_enable(struct radeon_device *rdev);
/* get temperature in millidegrees */
-u32 rv6xx_get_temp(struct radeon_device *rdev)
+int rv6xx_get_temp(struct radeon_device *rdev)
{
u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
ASIC_T_SHIFT;
+ int actual_temp = temp & 0xff;
- return temp * 1000;
+ if (temp & 0x100)
+ actual_temp -= 256;
+
+ return actual_temp * 1000;
}
void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -1164,7 +1172,7 @@ static void r600_mc_program(struct radeon_device *rdev)
* Note: GTT start, end, size should be initialized before calling this
* function on AGP platform.
*/
-void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
u64 size_bf, size_af;
@@ -1283,6 +1291,9 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
u32 tmp;
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ return 0;
+
dev_info(rdev->dev, "GPU softreset \n");
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
RREG32(R_008010_GRBM_STATUS));
@@ -2009,6 +2020,10 @@ int r600_init_microcode(struct radeon_device *rdev)
chip_name = "CYPRESS";
rlc_chip_name = "CYPRESS";
break;
+ case CHIP_PALM:
+ chip_name = "PALM";
+ rlc_chip_name = "SUMO";
+ break;
default: BUG();
}
@@ -2090,7 +2105,11 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
r600_cp_stop(rdev);
- WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+ WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+ BUF_SWAP_32BIT |
+#endif
+ RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
/* Reset cp */
WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
@@ -2177,7 +2196,11 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB_WPTR, 0);
/* set the wb address whether it's enabled or not */
- WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB_RPTR_ADDR,
+#ifdef __BIG_ENDIAN
+ RB_RPTR_SWAP(2) |
+#endif
+ ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
@@ -2350,28 +2373,13 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
/* FIXME: implement */
}
-
-bool r600_card_posted(struct radeon_device *rdev)
-{
- uint32_t reg;
-
- /* first check CRTCs */
- reg = RREG32(D1CRTC_CONTROL) |
- RREG32(D2CRTC_CONTROL);
- if (reg & CRTC_EN)
- return true;
-
- /* then check MEM_SIZE, in case the crtcs are off */
- if (RREG32(CONFIG_MEMSIZE))
- return true;
-
- return false;
-}
-
int r600_startup(struct radeon_device *rdev)
{
int r;
+ /* enable pcie gen2 link */
+ r600_pcie_gen2_enable(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
@@ -2525,7 +2533,7 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
/* Post card if necessary */
- if (!r600_card_posted(rdev)) {
+ if (!radeon_card_posted(rdev)) {
if (!rdev->bios) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
@@ -2628,7 +2636,11 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
/* FIXME: implement */
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
+ radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(rdev, ib->length_dw);
}
@@ -2874,6 +2886,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0);
WREG32(DxMODE_INT_MASK, 0);
+ WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
+ WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
if (ASIC_IS_DCE3(rdev)) {
WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
@@ -2998,6 +3012,7 @@ int r600_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
u32 hdmi1, hdmi2;
+ u32 d1grph = 0, d2grph = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3034,11 +3049,13 @@ int r600_irq_set(struct radeon_device *rdev)
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
- if (rdev->irq.crtc_vblank_int[0]) {
+ if (rdev->irq.crtc_vblank_int[0] ||
+ rdev->irq.pflip[0]) {
DRM_DEBUG("r600_irq_set: vblank 0\n");
mode_int |= D1MODE_VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[1]) {
+ if (rdev->irq.crtc_vblank_int[1] ||
+ rdev->irq.pflip[1]) {
DRM_DEBUG("r600_irq_set: vblank 1\n");
mode_int |= D2MODE_VBLANK_INT_MASK;
}
@@ -3081,6 +3098,8 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
+ WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
+ WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
if (ASIC_IS_DCE3(rdev)) {
@@ -3103,32 +3122,35 @@ int r600_irq_set(struct radeon_device *rdev)
return 0;
}
-static inline void r600_irq_ack(struct radeon_device *rdev,
- u32 *disp_int,
- u32 *disp_int_cont,
- u32 *disp_int_cont2)
+static inline void r600_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
if (ASIC_IS_DCE3(rdev)) {
- *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
- *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
- *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+ rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
+ rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
} else {
- *disp_int = RREG32(DISP_INTERRUPT_STATUS);
- *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
- *disp_int_cont2 = 0;
- }
-
- if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+ rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
+ }
+ rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
+
+ if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+ WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+ WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
- if (*disp_int & LB_D1_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
- if (*disp_int & LB_D2_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
- if (*disp_int & LB_D2_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
- if (*disp_int & DC_HPD1_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
if (ASIC_IS_DCE3(rdev)) {
tmp = RREG32(DC_HPD1_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
@@ -3139,7 +3161,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
}
}
- if (*disp_int & DC_HPD2_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
if (ASIC_IS_DCE3(rdev)) {
tmp = RREG32(DC_HPD2_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
@@ -3150,7 +3172,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
}
}
- if (*disp_int_cont & DC_HPD3_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
if (ASIC_IS_DCE3(rdev)) {
tmp = RREG32(DC_HPD3_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
@@ -3161,18 +3183,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
}
}
- if (*disp_int_cont & DC_HPD4_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
tmp = RREG32(DC_HPD4_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD4_INT_CONTROL, tmp);
}
if (ASIC_IS_DCE32(rdev)) {
- if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
- if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
@@ -3194,12 +3216,10 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
void r600_irq_disable(struct radeon_device *rdev)
{
- u32 disp_int, disp_int_cont, disp_int_cont2;
-
r600_disable_interrupts(rdev);
/* Wait and acknowledge irq */
mdelay(1);
- r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+ r600_irq_ack(rdev);
r600_disable_interrupt_state(rdev);
}
@@ -3262,7 +3282,7 @@ int r600_irq_process(struct radeon_device *rdev)
u32 wptr = r600_get_ih_wptr(rdev);
u32 rptr = rdev->ih.rptr;
u32 src_id, src_data;
- u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
+ u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
@@ -3283,30 +3303,34 @@ int r600_irq_process(struct radeon_device *rdev)
restart_ih:
/* display interrupts */
- r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+ r600_irq_ack(rdev);
rdev->ih.wptr = wptr;
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
- src_id = rdev->ih.ring[ring_index] & 0xff;
- src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+ src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+ src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
switch (src_id) {
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (disp_int & LB_D1_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[0])
+ radeon_crtc_handle_flip(rdev, 0);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
break;
case 1: /* D1 vline */
- if (disp_int & LB_D1_VLINE_INTERRUPT) {
- disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
DRM_DEBUG("IH: D1 vline\n");
}
break;
@@ -3318,17 +3342,21 @@ restart_ih:
case 5: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (disp_int & LB_D2_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[1])
+ radeon_crtc_handle_flip(rdev, 1);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
break;
case 1: /* D1 vline */
- if (disp_int & LB_D2_VLINE_INTERRUPT) {
- disp_int &= ~LB_D2_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
DRM_DEBUG("IH: D2 vline\n");
}
break;
@@ -3340,43 +3368,43 @@ restart_ih:
case 19: /* HPD/DAC hotplug */
switch (src_data) {
case 0:
- if (disp_int & DC_HPD1_INTERRUPT) {
- disp_int &= ~DC_HPD1_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD1\n");
}
break;
case 1:
- if (disp_int & DC_HPD2_INTERRUPT) {
- disp_int &= ~DC_HPD2_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD2\n");
}
break;
case 4:
- if (disp_int_cont & DC_HPD3_INTERRUPT) {
- disp_int_cont &= ~DC_HPD3_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD3\n");
}
break;
case 5:
- if (disp_int_cont & DC_HPD4_INTERRUPT) {
- disp_int_cont &= ~DC_HPD4_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD4\n");
}
break;
case 10:
- if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
- disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD5\n");
}
break;
case 12:
- if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
- disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD6\n");
}
@@ -3419,7 +3447,7 @@ restart_ih:
if (wptr != rdev->ih.wptr)
goto restart_ih;
if (queue_hotplug)
- queue_work(rdev->wq, &rdev->hotplug_work);
+ schedule_work(&rdev->hotplug_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
@@ -3508,3 +3536,222 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
} else
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
}
+
+void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
+{
+ u32 link_width_cntl, mask, target_reg;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return;
+
+ /* FIXME wait for idle */
+
+ switch (lanes) {
+ case 0:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
+ break;
+ case 1:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
+ break;
+ case 2:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
+ break;
+ case 4:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
+ break;
+ case 8:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
+ break;
+ case 12:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
+ break;
+ case 16:
+ default:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
+ break;
+ }
+
+ link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+ if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
+ (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
+ return;
+
+ if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
+ return;
+
+ link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
+ RADEON_PCIE_LC_RECONFIG_NOW |
+ R600_PCIE_LC_RENEGOTIATE_EN |
+ R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl |= mask;
+
+ WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+
+ /* some northbridges can renegotiate the link rather than requiring
+ * a complete re-config.
+ * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
+ */
+ if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
+ link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
+ else
+ link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
+
+ WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
+ RADEON_PCIE_LC_RECONFIG_NOW));
+
+ if (rdev->family >= CHIP_RV770)
+ target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
+ else
+ target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
+
+ /* wait for lane set to complete */
+ link_width_cntl = RREG32(target_reg);
+ while (link_width_cntl == 0xffffffff)
+ link_width_cntl = RREG32(target_reg);
+
+}
+
+int r600_get_pcie_lanes(struct radeon_device *rdev)
+{
+ u32 link_width_cntl;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return 0;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return 0;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return 0;
+
+ /* FIXME wait for idle */
+
+ link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+ switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+ case RADEON_PCIE_LC_LINK_WIDTH_X0:
+ return 0;
+ case RADEON_PCIE_LC_LINK_WIDTH_X1:
+ return 1;
+ case RADEON_PCIE_LC_LINK_WIDTH_X2:
+ return 2;
+ case RADEON_PCIE_LC_LINK_WIDTH_X4:
+ return 4;
+ case RADEON_PCIE_LC_LINK_WIDTH_X8:
+ return 8;
+ case RADEON_PCIE_LC_LINK_WIDTH_X16:
+ default:
+ return 16;
+ }
+}
+
+static void r600_pcie_gen2_enable(struct radeon_device *rdev)
+{
+ u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
+ u16 link_cntl2;
+
+ if (radeon_pcie_gen2 == 0)
+ return;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return;
+
+ /* only RV6xx+ chips are supported */
+ if (rdev->family <= CHIP_R600)
+ return;
+
+ /* 55 nm r6xx asics */
+ if ((rdev->family == CHIP_RV670) ||
+ (rdev->family == CHIP_RV620) ||
+ (rdev->family == CHIP_RV635)) {
+ /* advertise upconfig capability */
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+ lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+ link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+ LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ } else {
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+ }
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+ (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+ /* 55 nm r6xx asics */
+ if ((rdev->family == CHIP_RV670) ||
+ (rdev->family == CHIP_RV620) ||
+ (rdev->family == CHIP_RV635)) {
+ WREG32(MM_CFGREGS_CNTL, 0x8);
+ link_cntl2 = RREG32(0x4088);
+ WREG32(MM_CFGREGS_CNTL, 0);
+ /* not supported yet */
+ if (link_cntl2 & SELECTABLE_DEEMPHASIS)
+ return;
+ }
+
+ speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
+ speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
+ speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
+ speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
+ speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ tmp = RREG32(0x541c);
+ WREG32(0x541c, tmp | 0x8);
+ WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+ link_cntl2 = RREG16(0x4088);
+ link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+ link_cntl2 |= 0x2;
+ WREG16(0x4088, link_cntl2);
+ WREG32(MM_CFGREGS_CNTL, 0);
+
+ if ((rdev->family == CHIP_RV670) ||
+ (rdev->family == CHIP_RV620) ||
+ (rdev->family == CHIP_RV635)) {
+ training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
+ training_cntl &= ~LC_POINT_7_PLUS_EN;
+ WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
+ } else {
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ }
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_GEN2_EN_STRAP;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ } else {
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+ if (1)
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ else
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index ca5c29f70779..7f1043448d25 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -137,9 +137,9 @@ set_shaders(struct drm_device *dev)
ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
for (i = 0; i < r6xx_vs_size; i++)
- vs[i] = r6xx_vs[i];
+ vs[i] = cpu_to_le32(r6xx_vs[i]);
for (i = 0; i < r6xx_ps_size; i++)
- ps[i] = r6xx_ps[i];
+ ps[i] = cpu_to_le32(r6xx_ps[i]);
dev_priv->blit_vb->used = 512;
@@ -192,6 +192,9 @@ set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
DRM_DEBUG("\n");
sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+ sq_vtx_constant_word2 |= (2 << 30);
+#endif
BEGIN_RING(9);
OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
@@ -291,7 +294,11 @@ draw_auto(drm_radeon_private_t *dev_priv)
OUT_RING(DI_PT_RECTLIST);
OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
+#ifdef __BIG_ENDIAN
+ OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT);
+#else
OUT_RING(DI_INDEX_SIZE_16_BIT);
+#endif
OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
OUT_RING(1);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 86e5aa07f0db..41f7aafc97c4 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -54,7 +54,7 @@ set_render_target(struct radeon_device *rdev, int format,
if (h < 8)
h = 8;
- cb_color_info = ((format << 2) | (1 << 27));
+ cb_color_info = ((format << 2) | (1 << 27) | (1 << 8));
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
@@ -165,6 +165,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
u32 sq_vtx_constant_word2;
sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+ sq_vtx_constant_word2 |= (2 << 30);
+#endif
radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
radeon_ring_write(rdev, 0x460);
@@ -199,7 +202,7 @@ set_tex_resource(struct radeon_device *rdev,
if (h < 1)
h = 1;
- sq_tex_resource_word0 = (1 << 0);
+ sq_tex_resource_word0 = (1 << 0) | (1 << 3);
sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
((w - 1) << 19));
@@ -253,7 +256,11 @@ draw_auto(struct radeon_device *rdev)
radeon_ring_write(rdev, DI_PT_RECTLIST);
radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
+ radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+ (2 << 2) |
+#endif
+ DI_INDEX_SIZE_16_BIT);
radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
radeon_ring_write(rdev, 1);
@@ -424,7 +431,11 @@ set_default_state(struct radeon_device *rdev)
dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
+ radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (gpu_addr & 0xFFFFFFFC));
radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
radeon_ring_write(rdev, dwords);
@@ -467,7 +478,7 @@ static inline uint32_t i2f(uint32_t input)
int r600_blit_init(struct radeon_device *rdev)
{
u32 obj_size;
- int r, dwords;
+ int i, r, dwords;
void *ptr;
u32 packet2s[16];
int num_packet2s = 0;
@@ -486,7 +497,7 @@ int r600_blit_init(struct radeon_device *rdev)
dwords = rdev->r600_blit.state_len;
while (dwords & 0xf) {
- packet2s[num_packet2s++] = PACKET2(0);
+ packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
dwords++;
}
@@ -529,8 +540,10 @@ int r600_blit_init(struct radeon_device *rdev)
if (num_packet2s)
memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
packet2s, num_packet2s * 4);
- memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
- memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
+ for (i = 0; i < r6xx_vs_size; i++)
+ *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
+ for (i = 0; i < r6xx_ps_size; i++)
+ *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index e8151c1d55b2..2d1f6c5ee2a7 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -684,7 +684,11 @@ const u32 r6xx_vs[] =
0x00000000,
0x3c000000,
0x68cd1000,
+#ifdef __BIG_ENDIAN
+ 0x000a0000,
+#else
0x00080000,
+#endif
0x00000000,
};
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 4f4cd8b286d5..c3ab959bdc7c 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -396,6 +396,9 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
r600_do_cp_stop(dev_priv);
RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+ R600_BUF_SWAP_32BIT |
+#endif
R600_RB_NO_UPDATE |
R600_RB_BLKSZ(15) |
R600_RB_BUFSZ(3));
@@ -486,9 +489,12 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
r600_do_cp_stop(dev_priv);
RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+ R600_BUF_SWAP_32BIT |
+#endif
R600_RB_NO_UPDATE |
- (15 << 8) |
- (3 << 0));
+ R600_RB_BLKSZ(15) |
+ R600_RB_BUFSZ(3));
RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
RADEON_READ(R600_GRBM_SOFT_RESET);
@@ -550,8 +556,12 @@ static void r600_test_writeback(drm_radeon_private_t *dev_priv)
if (!dev_priv->writeback_works) {
/* Disable writeback to avoid unnecessary bus master transfer */
- RADEON_WRITE(R600_CP_RB_CNTL, RADEON_READ(R600_CP_RB_CNTL) |
- RADEON_RB_NO_UPDATE);
+ RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+ R600_BUF_SWAP_32BIT |
+#endif
+ RADEON_READ(R600_CP_RB_CNTL) |
+ R600_RB_NO_UPDATE);
RADEON_WRITE(R600_SCRATCH_UMSK, 0);
}
}
@@ -575,7 +585,11 @@ int r600_do_engine_reset(struct drm_device *dev)
RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL);
- RADEON_WRITE(R600_CP_RB_CNTL, R600_RB_RPTR_WR_ENA);
+ RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+ R600_BUF_SWAP_32BIT |
+#endif
+ R600_RB_RPTR_WR_ENA);
RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr);
RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr);
@@ -1838,7 +1852,10 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
+ dev_priv->gart_vm_start;
}
RADEON_WRITE(R600_CP_RB_RPTR_ADDR,
- rptr_addr & 0xffffffff);
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (rptr_addr & 0xfffffffc));
RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI,
upper_32_bits(rptr_addr));
@@ -1889,7 +1906,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
{
u64 scratch_addr;
- scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR);
+ scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC;
scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32;
scratch_addr += R600_SCRATCH_REG_OFFSET;
scratch_addr >>= 8;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 7831e0890210..153095fba62f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -295,17 +295,18 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
}
if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
+ dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, pitch, pitch_align, array_mode);
return -EINVAL;
}
if (!IS_ALIGNED(height, height_align)) {
- dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
- __func__, __LINE__, height);
+ dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, height, height_align, array_mode);
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
+ dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
+ base_offset, base_align, array_mode);
return -EINVAL;
}
@@ -320,7 +321,10 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
* broken userspace.
*/
} else {
- dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
+ dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i,
+ array_mode,
+ track->cb_color_bo_offset[i], tmp,
+ radeon_bo_size(track->cb_color_bo[i]));
return -EINVAL;
}
}
@@ -455,17 +459,18 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
}
if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
+ dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, pitch, pitch_align, array_mode);
return -EINVAL;
}
if (!IS_ALIGNED(height, height_align)) {
- dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
- __func__, __LINE__, height);
+ dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, height, height_align, array_mode);
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
+ dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
+ base_offset, base_align, array_mode);
return -EINVAL;
}
@@ -473,9 +478,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
tmp = ntiles * bpe * 64 * nviews;
if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
- dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n",
- track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
- radeon_bo_size(track->db_bo));
+ dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+ array_mode,
+ track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+ radeon_bo_size(track->db_bo));
return -EINVAL;
}
}
@@ -1227,18 +1233,18 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
/* XXX check height as well... */
if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
+ dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n",
- __func__, __LINE__, base_offset);
+ dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
+ __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
return -EINVAL;
}
if (!IS_ALIGNED(mip_offset, base_align)) {
- dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n",
- __func__, __LINE__, mip_offset);
+ dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
+ __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 33cda016b083..f869897c7456 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -81,7 +81,11 @@
#define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720
#define R600_LOW_VID_LOWER_GPIO_CNTL 0x724
-
+#define R600_D1GRPH_SWAP_CONTROL 0x610C
+# define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0)
+# define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0)
+# define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0)
+# define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0)
#define R600_HDP_NONSURFACE_BASE 0x2c04
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index bff4dc4f410f..04bac0bbd3ec 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -154,13 +154,14 @@
#define ROQ_IB2_START(x) ((x) << 8)
#define CP_RB_BASE 0xC100
#define CP_RB_CNTL 0xC104
-#define RB_BUFSZ(x) ((x)<<0)
-#define RB_BLKSZ(x) ((x)<<8)
-#define RB_NO_UPDATE (1<<27)
-#define RB_RPTR_WR_ENA (1<<31)
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
#define BUF_SWAP_32BIT (2 << 16)
#define CP_RB_RPTR 0x8700
#define CP_RB_RPTR_ADDR 0xC10C
+#define RB_RPTR_SWAP(x) ((x) << 0)
#define CP_RB_RPTR_ADDR_HI 0xC110
#define CP_RB_RPTR_WR 0xC108
#define CP_RB_WPTR 0xC114
@@ -728,6 +729,54 @@
/* DCE 3.2 */
# define DC_HPDx_EN (1 << 28)
+#define D1GRPH_INTERRUPT_STATUS 0x6158
+#define D2GRPH_INTERRUPT_STATUS 0x6958
+# define DxGRPH_PFLIP_INT_OCCURRED (1 << 0)
+# define DxGRPH_PFLIP_INT_CLEAR (1 << 8)
+#define D1GRPH_INTERRUPT_CONTROL 0x615c
+#define D2GRPH_INTERRUPT_CONTROL 0x695c
+# define DxGRPH_PFLIP_INT_MASK (1 << 0)
+# define DxGRPH_PFLIP_INT_TYPE (1 << 8)
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
+# define LC_POINT_7_PLUS_EN (1 << 6)
+#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
+# define LC_CURRENT_DATA_RATE (1 << 11)
+# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
+#define MM_CFGREGS_CNTL 0x544c
+# define MM_WR_TO_CFG_EN (1 << 3)
+#define LINK_CNTL2 0x88 /* F0 */
+# define TARGET_LINK_SPEED_MASK (0xf << 0)
+# define SELECTABLE_DEEMPHASIS (1 << 6)
+
/*
* PM4
*/
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3a7095743d44..56c48b67ef3d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -69,6 +69,7 @@
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
+#include <ttm/ttm_execbuf_util.h>
#include "radeon_family.h"
#include "radeon_mode.h"
@@ -91,6 +92,7 @@ extern int radeon_tv;
extern int radeon_audio;
extern int radeon_disp_priority;
extern int radeon_hw_i2c;
+extern int radeon_pcie_gen2;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -177,9 +179,10 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
void rs690_pm_info(struct radeon_device *rdev);
-extern u32 rv6xx_get_temp(struct radeon_device *rdev);
-extern u32 rv770_get_temp(struct radeon_device *rdev);
-extern u32 evergreen_get_temp(struct radeon_device *rdev);
+extern int rv6xx_get_temp(struct radeon_device *rdev);
+extern int rv770_get_temp(struct radeon_device *rdev);
+extern int evergreen_get_temp(struct radeon_device *rdev);
+extern int sumo_get_temp(struct radeon_device *rdev);
/*
* Fences.
@@ -259,13 +262,12 @@ struct radeon_bo {
};
struct radeon_bo_list {
- struct list_head list;
+ struct ttm_validate_buffer tv;
struct radeon_bo *bo;
uint64_t gpu_offset;
unsigned rdomain;
unsigned wdomain;
u32 tiling_flags;
- bool reserved;
};
/*
@@ -377,11 +379,56 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
/*
* IRQS.
*/
+
+struct radeon_unpin_work {
+ struct work_struct work;
+ struct radeon_device *rdev;
+ int crtc_id;
+ struct radeon_fence *fence;
+ struct drm_pending_vblank_event *event;
+ struct radeon_bo *old_rbo;
+ u64 new_crtc_base;
+};
+
+struct r500_irq_stat_regs {
+ u32 disp_int;
+};
+
+struct r600_irq_stat_regs {
+ u32 disp_int;
+ u32 disp_int_cont;
+ u32 disp_int_cont2;
+ u32 d1grph_int;
+ u32 d2grph_int;
+};
+
+struct evergreen_irq_stat_regs {
+ u32 disp_int;
+ u32 disp_int_cont;
+ u32 disp_int_cont2;
+ u32 disp_int_cont3;
+ u32 disp_int_cont4;
+ u32 disp_int_cont5;
+ u32 d1grph_int;
+ u32 d2grph_int;
+ u32 d3grph_int;
+ u32 d4grph_int;
+ u32 d5grph_int;
+ u32 d6grph_int;
+};
+
+union radeon_irq_stat_regs {
+ struct r500_irq_stat_regs r500;
+ struct r600_irq_stat_regs r600;
+ struct evergreen_irq_stat_regs evergreen;
+};
+
struct radeon_irq {
bool installed;
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int[6];
+ bool pflip[6];
wait_queue_head_t vblank_queue;
/* FIXME: use defines for max hpd/dacs */
bool hpd[6];
@@ -392,12 +439,17 @@ struct radeon_irq {
bool hdmi[2];
spinlock_t sw_lock;
int sw_refcount;
+ union radeon_irq_stat_regs stat_regs;
+ spinlock_t pflip_lock[6];
+ int pflip_refcount[6];
};
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
/*
* CP & ring.
@@ -687,6 +739,8 @@ enum radeon_int_thermal_type {
THERMAL_TYPE_RV6XX,
THERMAL_TYPE_RV770,
THERMAL_TYPE_EVERGREEN,
+ THERMAL_TYPE_SUMO,
+ THERMAL_TYPE_NI,
};
struct radeon_voltage {
@@ -758,8 +812,7 @@ struct radeon_pm {
fixed20_12 sclk;
fixed20_12 mclk;
fixed20_12 needed_bandwidth;
- /* XXX: use a define for num power modes */
- struct radeon_power_state power_state[8];
+ struct radeon_power_state *power_state;
/* number of valid power states */
int num_power_states;
int current_power_state_index;
@@ -770,6 +823,9 @@ struct radeon_pm {
u32 current_sclk;
u32 current_mclk;
u32 current_vddc;
+ u32 default_sclk;
+ u32 default_mclk;
+ u32 default_vddc;
struct radeon_i2c_chan *i2c_bus;
/* selected pm method */
enum radeon_pm_method pm_method;
@@ -881,6 +937,10 @@ struct radeon_asic {
void (*pm_finish)(struct radeon_device *rdev);
void (*pm_init_profile)(struct radeon_device *rdev);
void (*pm_get_dynpm_state)(struct radeon_device *rdev);
+ /* pageflipping */
+ void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
+ u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+ void (*post_page_flip)(struct radeon_device *rdev, int crtc);
};
/*
@@ -975,6 +1035,7 @@ struct evergreen_asic {
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned tile_config;
+ struct r100_gpu_lockup lockup;
};
union radeon_asic_config {
@@ -1091,11 +1152,11 @@ struct radeon_device {
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
+ const struct firmware *mc_fw; /* NI MC firmware */
struct r600_blit r600_blit;
struct r700_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
- struct workqueue_struct *wq;
struct work_struct hotplug_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
@@ -1110,10 +1171,10 @@ struct radeon_device {
uint8_t audio_status_bits;
uint8_t audio_category_code;
- bool powered_down;
struct notifier_block acpi_nb;
- /* only one userspace can use Hyperz features at a time */
+ /* only one userspace can use Hyperz features or CMASK at a time */
struct drm_file *hyperz_filp;
+ struct drm_file *cmask_filp;
/* i2c buses */
struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
};
@@ -1188,6 +1249,8 @@ static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
*/
#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
+#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg))
+#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg))
#define RREG32(reg) r100_mm_rreg(rdev, (reg))
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
@@ -1261,6 +1324,14 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
(rdev->family == CHIP_RV410) || \
(rdev->family == CHIP_RS400) || \
(rdev->family == CHIP_RS480))
+#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
+ (rdev->ddev->pdev->device == 0x9443) || \
+ (rdev->ddev->pdev->device == 0x944B) || \
+ (rdev->ddev->pdev->device == 0x9506) || \
+ (rdev->ddev->pdev->device == 0x9509) || \
+ (rdev->ddev->pdev->device == 0x950F) || \
+ (rdev->ddev->pdev->device == 0x689C) || \
+ (rdev->ddev->pdev->device == 0x689D))
#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
(rdev->family == CHIP_RS690) || \
@@ -1269,6 +1340,9 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
+#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
+ (rdev->flags & RADEON_IS_IGP))
+#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
/*
* BIOS helpers.
@@ -1344,6 +1418,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
+#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc))
+#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base))
+#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc))
/* Common functions */
/* AGP */
@@ -1372,67 +1449,7 @@ extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc
extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
-/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
-extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
-extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
-
-/* rv200,rv250,rv280 */
-extern void r200_set_safe_registers(struct radeon_device *rdev);
-
-/* r300,r350,rv350,rv370,rv380 */
-extern void r300_set_reg_safe(struct radeon_device *rdev);
-extern void r300_mc_program(struct radeon_device *rdev);
-extern void r300_mc_init(struct radeon_device *rdev);
-extern void r300_clock_startup(struct radeon_device *rdev);
-extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
-extern int rv370_pcie_gart_init(struct radeon_device *rdev);
-extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
-extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
-extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
-
-/* r420,r423,rv410 */
-extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
-extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
-extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
-extern void r420_pipes_init(struct radeon_device *rdev);
-
-/* rv515 */
-struct rv515_mc_save {
- u32 d1vga_control;
- u32 d2vga_control;
- u32 vga_render_control;
- u32 vga_hdp_control;
- u32 d1crtc_control;
- u32 d2crtc_control;
-};
-extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
-extern void rv515_vga_render_disable(struct radeon_device *rdev);
-extern void rv515_set_safe_registers(struct radeon_device *rdev);
-extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
-extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
-extern void rv515_clock_startup(struct radeon_device *rdev);
-extern void rv515_debugfs(struct radeon_device *rdev);
-extern int rv515_suspend(struct radeon_device *rdev);
-
-/* rs400 */
-extern int rs400_gart_init(struct radeon_device *rdev);
-extern int rs400_gart_enable(struct radeon_device *rdev);
-extern void rs400_gart_adjust_size(struct radeon_device *rdev);
-extern void rs400_gart_disable(struct radeon_device *rdev);
-extern void rs400_gart_fini(struct radeon_device *rdev);
-
-/* rs600 */
-extern void rs600_set_safe_registers(struct radeon_device *rdev);
-extern int rs600_irq_set(struct radeon_device *rdev);
-extern void rs600_irq_disable(struct radeon_device *rdev);
-
-/* rs690, rs740 */
-extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
- struct drm_display_mode *mode1,
- struct drm_display_mode *mode2);
-
/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
-extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern bool r600_card_posted(struct radeon_device *rdev);
extern void r600_cp_stop(struct radeon_device *rdev);
extern int r600_cp_start(struct radeon_device *rdev);
@@ -1478,6 +1495,7 @@ extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mo
extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern void r700_cp_stop(struct radeon_device *rdev);
extern void r700_cp_fini(struct radeon_device *rdev);
extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
@@ -1485,6 +1503,9 @@ extern int evergreen_irq_set(struct radeon_device *rdev);
extern int evergreen_blit_init(struct radeon_device *rdev);
extern void evergreen_blit_fini(struct radeon_device *rdev);
+extern int ni_init_microcode(struct radeon_device *rdev);
+extern int btc_mc_load_microcode(struct radeon_device *rdev);
+
/* radeon_acpi.c */
#if defined(CONFIG_ACPI)
extern int radeon_acpi_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 64fb89ecbf74..e75d63b8e21d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
}
- if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
+ if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
}
@@ -171,6 +171,9 @@ static struct radeon_asic r100_asic = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic r200_asic = {
@@ -215,6 +218,9 @@ static struct radeon_asic r200_asic = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic r300_asic = {
@@ -260,6 +266,9 @@ static struct radeon_asic r300_asic = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic r300_asic_pcie = {
@@ -304,6 +313,9 @@ static struct radeon_asic r300_asic_pcie = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic r420_asic = {
@@ -349,6 +361,9 @@ static struct radeon_asic r420_asic = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic rs400_asic = {
@@ -394,6 +409,9 @@ static struct radeon_asic rs400_asic = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic rs600_asic = {
@@ -439,6 +457,9 @@ static struct radeon_asic rs600_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic rs690_asic = {
@@ -484,6 +505,9 @@ static struct radeon_asic rs690_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic rv515_asic = {
@@ -529,6 +553,9 @@ static struct radeon_asic rv515_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic r520_asic = {
@@ -574,6 +601,9 @@ static struct radeon_asic r520_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic r600_asic = {
@@ -601,8 +631,8 @@ static struct radeon_asic r600_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = NULL,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
@@ -618,6 +648,9 @@ static struct radeon_asic r600_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r600_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic rs780_asic = {
@@ -662,6 +695,9 @@ static struct radeon_asic rs780_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &rs780_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic rv770_asic = {
@@ -689,8 +725,8 @@ static struct radeon_asic rv770_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = NULL,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
@@ -706,6 +742,9 @@ static struct radeon_asic rv770_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r600_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rv770_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic evergreen_asic = {
@@ -720,7 +759,96 @@ static struct radeon_asic evergreen_asic = {
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &r600_ring_ib_execute,
+ .ring_ib_execute = &evergreen_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .copy_blit = &evergreen_copy_blit,
+ .copy_dma = &evergreen_copy_blit,
+ .copy = &evergreen_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
+};
+
+static struct radeon_asic sumo_asic = {
+ .init = &evergreen_init,
+ .fini = &evergreen_fini,
+ .suspend = &evergreen_suspend,
+ .resume = &evergreen_resume,
+ .cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &evergreen_gpu_is_lockup,
+ .asic_reset = &evergreen_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &evergreen_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .copy_blit = &evergreen_copy_blit,
+ .copy_dma = &evergreen_copy_blit,
+ .copy = &evergreen_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = NULL,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &rs780_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+};
+
+static struct radeon_asic btc_asic = {
+ .init = &evergreen_init,
+ .fini = &evergreen_fini,
+ .suspend = &evergreen_suspend,
+ .resume = &evergreen_resume,
+ .cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &evergreen_gpu_is_lockup,
+ .asic_reset = &evergreen_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &evergreen_ring_ib_execute,
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
@@ -749,6 +877,9 @@ static struct radeon_asic evergreen_asic = {
.pm_finish = &evergreen_pm_finish,
.pm_init_profile = &r600_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
};
int radeon_asic_init(struct radeon_device *rdev)
@@ -835,6 +966,14 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_HEMLOCK:
rdev->asic = &evergreen_asic;
break;
+ case CHIP_PALM:
+ rdev->asic = &sumo_asic;
+ break;
+ case CHIP_BARTS:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
+ rdev->asic = &btc_asic;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@@ -849,7 +988,9 @@ int radeon_asic_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_SINGLE_CRTC)
rdev->num_crtc = 1;
else {
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_DCE41(rdev))
+ rdev->num_crtc = 2;
+ else if (ASIC_IS_DCE4(rdev))
rdev->num_crtc = 6;
else
rdev->num_crtc = 2;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 740988244143..c59bd98a2029 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -102,6 +102,11 @@ int r100_pci_gart_enable(struct radeon_device *rdev);
void r100_pci_gart_disable(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
+ struct radeon_cp *cp);
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
+ struct r100_gpu_lockup *lockup,
+ struct radeon_cp *cp);
void r100_ib_fini(struct radeon_device *rdev);
int r100_ib_init(struct radeon_device *rdev);
void r100_irq_disable(struct radeon_device *rdev);
@@ -130,15 +135,19 @@ extern void r100_pm_prepare(struct radeon_device *rdev);
extern void r100_pm_finish(struct radeon_device *rdev);
extern void r100_pm_init_profile(struct radeon_device *rdev);
extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
/*
* r200,rv250,rs300,rv280
*/
extern int r200_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_pages,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
struct radeon_fence *fence);
+void r200_set_safe_registers(struct radeon_device *rdev);
/*
* r300,r350,rv350,rv380
@@ -159,6 +168,15 @@ extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
+extern void r300_set_reg_safe(struct radeon_device *rdev);
+extern void r300_mc_program(struct radeon_device *rdev);
+extern void r300_mc_init(struct radeon_device *rdev);
+extern void r300_clock_startup(struct radeon_device *rdev);
+extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
+extern int rv370_pcie_gart_init(struct radeon_device *rdev);
+extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
+extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
+extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
/*
* r420,r423,rv410
@@ -168,6 +186,10 @@ extern void r420_fini(struct radeon_device *rdev);
extern int r420_suspend(struct radeon_device *rdev);
extern int r420_resume(struct radeon_device *rdev);
extern void r420_pm_init_profile(struct radeon_device *rdev);
+extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
+extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
+extern void r420_pipes_init(struct radeon_device *rdev);
/*
* rs400,rs480
@@ -180,6 +202,12 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev);
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+int rs400_gart_init(struct radeon_device *rdev);
+int rs400_gart_enable(struct radeon_device *rdev);
+void rs400_gart_adjust_size(struct radeon_device *rdev);
+void rs400_gart_disable(struct radeon_device *rdev);
+void rs400_gart_fini(struct radeon_device *rdev);
+
/*
* rs600.
@@ -191,6 +219,7 @@ extern int rs600_suspend(struct radeon_device *rdev);
extern int rs600_resume(struct radeon_device *rdev);
int rs600_irq_set(struct radeon_device *rdev);
int rs600_irq_process(struct radeon_device *rdev);
+void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
@@ -205,6 +234,11 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev,
extern void rs600_pm_misc(struct radeon_device *rdev);
extern void rs600_pm_prepare(struct radeon_device *rdev);
extern void rs600_pm_finish(struct radeon_device *rdev);
+extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
+void rs600_set_safe_registers(struct radeon_device *rdev);
+
/*
* rs690,rs740
@@ -216,10 +250,21 @@ int rs690_suspend(struct radeon_device *rdev);
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs690_bandwidth_update(struct radeon_device *rdev);
+void rs690_line_buffer_adjust(struct radeon_device *rdev,
+ struct drm_display_mode *mode1,
+ struct drm_display_mode *mode2);
/*
* rv515
*/
+struct rv515_mc_save {
+ u32 d1vga_control;
+ u32 d2vga_control;
+ u32 vga_render_control;
+ u32 vga_hdp_control;
+ u32 d1crtc_control;
+ u32 d2crtc_control;
+};
int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -230,6 +275,14 @@ void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_bandwidth_update(struct radeon_device *rdev);
int rv515_resume(struct radeon_device *rdev);
int rv515_suspend(struct radeon_device *rdev);
+void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
+void rv515_vga_render_disable(struct radeon_device *rdev);
+void rv515_set_safe_registers(struct radeon_device *rdev);
+void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_clock_startup(struct radeon_device *rdev);
+void rv515_debugfs(struct radeon_device *rdev);
+
/*
* r520,rv530,rv560,rv570,r580
@@ -278,6 +331,8 @@ extern void r600_pm_misc(struct radeon_device *rdev);
extern void r600_pm_init_profile(struct radeon_device *rdev);
extern void rs780_pm_init_profile(struct radeon_device *rdev);
extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+extern int r600_get_pcie_lanes(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -287,6 +342,7 @@ void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
extern void rv770_pm_misc(struct radeon_device *rdev);
+extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
/*
* evergreen
@@ -299,6 +355,7 @@ int evergreen_resume(struct radeon_device *rdev);
bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
+void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence);
@@ -314,5 +371,8 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p);
extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
+extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index bc5a2c3382d9..02d5c415f499 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -37,7 +37,7 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
extern void radeon_link_encoder_connector(struct drm_device *dev);
extern void
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
- uint32_t supported_device);
+ uint32_t supported_device, u16 caps);
/* from radeon_connector.c */
extern void
@@ -88,7 +88,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
/* some evergreen boards have bad data for this entry */
if (ASIC_IS_DCE4(rdev)) {
if ((i == 7) &&
- (gpio->usClkMaskRegisterIndex == 0x1936) &&
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
(gpio->sucI2cId.ucAccess == 0)) {
gpio->sucI2cId.ucAccess = 0x97;
gpio->ucDataMaskShift = 8;
@@ -101,7 +101,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
/* some DCE3 boards have bad data for this entry */
if (ASIC_IS_DCE3(rdev)) {
if ((i == 4) &&
- (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
(gpio->sucI2cId.ucAccess == 0x94))
gpio->sucI2cId.ucAccess = 0x14;
}
@@ -172,7 +172,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
/* some evergreen boards have bad data for this entry */
if (ASIC_IS_DCE4(rdev)) {
if ((i == 7) &&
- (gpio->usClkMaskRegisterIndex == 0x1936) &&
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
(gpio->sucI2cId.ucAccess == 0)) {
gpio->sucI2cId.ucAccess = 0x97;
gpio->ucDataMaskShift = 8;
@@ -185,7 +185,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
/* some DCE3 boards have bad data for this entry */
if (ASIC_IS_DCE3(rdev)) {
if ((i == 4) &&
- (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
(gpio->sucI2cId.ucAccess == 0x94))
gpio->sucI2cId.ucAccess = 0x14;
}
@@ -252,7 +252,7 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd
pin = &gpio_info->asGPIO_Pin[i];
if (id == pin->ucGPIO_ID) {
gpio.id = pin->ucGPIO_ID;
- gpio.reg = pin->usGpioPin_AIndex * 4;
+ gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
gpio.mask = (1 << pin->ucGpioPinBitShift);
gpio.valid = true;
break;
@@ -313,7 +313,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
uint16_t *line_mux,
struct radeon_hpd *hpd)
{
- struct radeon_device *rdev = dev->dev_private;
/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x791e) &&
@@ -388,6 +387,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*line_mux = 0x90;
}
+ /* mac rv630, rv730, others */
+ if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
+ (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
+ *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
+ *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
+ }
+
/* ASUS HD 3600 XT board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x9598) &&
(dev->pdev->subsystem_vendor == 0x1043) &&
@@ -425,21 +431,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
- /* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */
+ /* Acer laptop (Acer TravelMate 5730G) has an HDMI port
+ * on the laptop and a DVI port on the docking station and
+ * both share the same encoder, hpd pin, and ddc line.
+ * So while the bios table is technically correct,
+ * we drop the DVI port here since xrandr has no concept of
+ * encoders and will try and drive both connectors
+ * with different crtcs which isn't possible on the hardware
+ * side and leaves no crtcs for LVDS or VGA.
+ */
if ((dev->pdev->device == 0x95c4) &&
(dev->pdev->subsystem_vendor == 0x1025) &&
(dev->pdev->subsystem_device == 0x013c)) {
- struct radeon_gpio_rec gpio;
-
if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
(supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
- gpio = radeon_lookup_gpio(rdev, 6);
- *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+ /* actually it's a DVI-D port not DVI-I */
*connector_type = DRM_MODE_CONNECTOR_DVID;
- } else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
- (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
- gpio = radeon_lookup_gpio(rdev, 7);
- *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+ return false;
}
}
@@ -525,6 +533,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
u16 size, data_offset;
u8 frev, crev;
ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
+ ATOM_ENCODER_OBJECT_TABLE *enc_obj;
ATOM_OBJECT_TABLE *router_obj;
ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
ATOM_OBJECT_HEADER *obj_header;
@@ -549,6 +558,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
(ctx->bios + data_offset +
le16_to_cpu(obj_header->usConnectorObjectTableOffset));
+ enc_obj = (ATOM_ENCODER_OBJECT_TABLE *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(obj_header->usEncoderObjectTableOffset));
router_obj = (ATOM_OBJECT_TABLE *)
(ctx->bios + data_offset +
le16_to_cpu(obj_header->usRouterObjectTableOffset));
@@ -654,14 +666,35 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
- u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]);
-
- radeon_add_atom_encoder(dev,
- encoder_obj,
- le16_to_cpu
- (path->
- usDeviceTag));
+ for (k = 0; k < enc_obj->ucNumberOfObjects; k++) {
+ u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID);
+ if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) {
+ ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(enc_obj->asObjects[k].usRecordOffset));
+ ATOM_ENCODER_CAP_RECORD *cap_record;
+ u16 caps = 0;
+ while (record->ucRecordType > 0 &&
+ record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+ switch (record->ucRecordType) {
+ case ATOM_ENCODER_CAP_RECORD_TYPE:
+ cap_record =(ATOM_ENCODER_CAP_RECORD *)
+ record;
+ caps = le16_to_cpu(cap_record->usEncoderCap);
+ break;
+ }
+ record = (ATOM_COMMON_RECORD_HEADER *)
+ ((char *)record + record->ucRecordSize);
+ }
+ radeon_add_atom_encoder(dev,
+ encoder_obj,
+ le16_to_cpu
+ (path->
+ usDeviceTag),
+ caps);
+ }
+ }
} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
@@ -995,7 +1028,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
radeon_get_encoder_enum(dev,
(1 << i),
dac),
- (1 << i));
+ (1 << i),
+ 0);
else
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
@@ -1074,6 +1108,7 @@ union firmware_info {
ATOM_FIRMWARE_INFO_V1_3 info_13;
ATOM_FIRMWARE_INFO_V1_4 info_14;
ATOM_FIRMWARE_INFO_V2_1 info_21;
+ ATOM_FIRMWARE_INFO_V2_2 info_22;
};
bool radeon_atom_get_clock_info(struct drm_device *dev)
@@ -1128,16 +1163,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
p1pll->pll_out_min = 64800;
else
p1pll->pll_out_min = 20000;
- } else if (p1pll->pll_out_min > 64800) {
- /* Limiting the pll output range is a good thing generally as
- * it limits the number of possible pll combinations for a given
- * frequency presumably to the ones that work best on each card.
- * However, certain duallink DVI monitors seem to like
- * pll combinations that would be limited by this at least on
- * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
- * family.
- */
- p1pll->pll_out_min = 64800;
}
p1pll->pll_in_min =
@@ -1148,8 +1173,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
*p2pll = *p1pll;
/* system clock */
- spll->reference_freq =
- le16_to_cpu(firmware_info->info.usReferenceClock);
+ if (ASIC_IS_DCE4(rdev))
+ spll->reference_freq =
+ le16_to_cpu(firmware_info->info_21.usCoreReferenceClock);
+ else
+ spll->reference_freq =
+ le16_to_cpu(firmware_info->info.usReferenceClock);
spll->reference_div = 0;
spll->pll_out_min =
@@ -1171,8 +1200,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
/* memory clock */
- mpll->reference_freq =
- le16_to_cpu(firmware_info->info.usReferenceClock);
+ if (ASIC_IS_DCE4(rdev))
+ mpll->reference_freq =
+ le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock);
+ else
+ mpll->reference_freq =
+ le16_to_cpu(firmware_info->info.usReferenceClock);
mpll->reference_div = 0;
mpll->pll_out_min =
@@ -1201,8 +1234,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
if (ASIC_IS_DCE4(rdev)) {
rdev->clock.default_dispclk =
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
- if (rdev->clock.default_dispclk == 0)
- rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+ if (rdev->clock.default_dispclk == 0) {
+ if (ASIC_IS_DCE5(rdev))
+ rdev->clock.default_dispclk = 54000; /* 540 Mhz */
+ else
+ rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+ }
rdev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
}
@@ -1237,11 +1274,11 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
data_offset);
switch (crev) {
case 1:
- if (igp_info->info.ulBootUpMemoryClock)
+ if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock))
return true;
break;
case 2:
- if (igp_info->info_2.ulBootUpSidePortClock)
+ if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock))
return true;
break;
default:
@@ -1337,6 +1374,43 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
return false;
}
+static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
+ struct radeon_atom_ss *ss,
+ int id)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ u16 data_offset, size;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info;
+ u8 frev, crev;
+ u16 percentage = 0, rate = 0;
+
+ /* get any igp specific overrides */
+ if (atom_parse_data_header(mode_info->atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (id) {
+ case ASIC_INTERNAL_SS_ON_TMDS:
+ percentage = le16_to_cpu(igp_info->usDVISSPercentage);
+ rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_HDMI:
+ percentage = le16_to_cpu(igp_info->usHDMISSPercentage);
+ rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_LVDS:
+ percentage = le16_to_cpu(igp_info->usLvdsSSPercentage);
+ rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz);
+ break;
+ }
+ if (percentage)
+ ss->percentage = percentage;
+ if (rate)
+ ss->rate = rate;
+ }
+}
+
union asic_ss_info {
struct _ATOM_ASIC_INTERNAL_SS_INFO info;
struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
@@ -1368,7 +1442,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
for (i = 0; i < num_indices; i++) {
if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
- (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) {
+ (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
ss->percentage =
le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1382,7 +1456,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
for (i = 0; i < num_indices; i++) {
if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
- (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) {
+ (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
ss->percentage =
le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1396,11 +1470,13 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
for (i = 0; i < num_indices; i++) {
if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
- (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) {
+ (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
ss->percentage =
le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+ if (rdev->flags & RADEON_IS_IGP)
+ radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
return true;
}
}
@@ -1477,6 +1553,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
if (misc & ATOM_DOUBLE_CLOCK_MODE)
lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
+ lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
+ lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
+
/* set crtc values */
drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
@@ -1489,6 +1568,59 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
else
lvds->linkb = false;
+ /* parse the lcd record table */
+ if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
+ ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
+ ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
+ bool bad_record = false;
+ u8 *record = (u8 *)(mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(lvds_info->info.usModePatchTableOffset));
+ while (*record != ATOM_RECORD_END_TYPE) {
+ switch (*record) {
+ case LCD_MODE_PATCH_RECORD_MODE_TYPE:
+ record += sizeof(ATOM_PATCH_RECORD_MODE);
+ break;
+ case LCD_RTS_RECORD_TYPE:
+ record += sizeof(ATOM_LCD_RTS_RECORD);
+ break;
+ case LCD_CAP_RECORD_TYPE:
+ record += sizeof(ATOM_LCD_MODE_CONTROL_CAP);
+ break;
+ case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
+ fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
+ if (fake_edid_record->ucFakeEDIDLength) {
+ struct edid *edid;
+ int edid_size =
+ max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
+ edid = kmalloc(edid_size, GFP_KERNEL);
+ if (edid) {
+ memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
+ fake_edid_record->ucFakeEDIDLength);
+
+ if (drm_edid_is_valid(edid))
+ rdev->mode_info.bios_hardcoded_edid = edid;
+ else
+ kfree(edid);
+ }
+ }
+ record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+ break;
+ case LCD_PANEL_RESOLUTION_RECORD_TYPE:
+ panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
+ lvds->native_mode.width_mm = panel_res_record->usHSize;
+ lvds->native_mode.height_mm = panel_res_record->usVSize;
+ record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD);
+ break;
+ default:
+ DRM_ERROR("Bad LCD record %d\n", *record);
+ bad_record = true;
+ break;
+ }
+ if (bad_record)
+ break;
+ }
+ }
}
return lvds;
}
@@ -1740,510 +1872,642 @@ static const char *pp_lib_thermal_controller_names[] = {
"RV6xx",
"RV770",
"adt7473",
+ "NONE",
"External GPIO",
"Evergreen",
- "adt7473 with internal",
-
+ "emc2103",
+ "Sumo",
+ "Northern Islands",
};
union power_info {
struct _ATOM_POWERPLAY_INFO info;
struct _ATOM_POWERPLAY_INFO_V2 info_2;
struct _ATOM_POWERPLAY_INFO_V3 info_3;
- struct _ATOM_PPLIB_POWERPLAYTABLE info_4;
+ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
};
-void radeon_atombios_get_power_modes(struct radeon_device *rdev)
+union pplib_clock_info {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+ struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+};
+
+union pplib_power_state {
+ struct _ATOM_PPLIB_STATE v1;
+ struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev,
+ int state_index,
+ u32 misc, u32 misc2)
+{
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
+ /* order matters! */
+ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_POWERSAVE;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ }
+ if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ } else if (state_index == 0) {
+ rdev->pm.power_state[state_index].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+}
+
+static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
+ u32 misc, misc2 = 0;
+ int num_modes = 0, i;
+ int state_index = 0;
+ struct radeon_i2c_bus_rec i2c_bus;
+ union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
- u32 misc, misc2 = 0, sclk, mclk;
- union power_info *power_info;
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
- struct _ATOM_PPLIB_STATE *power_state;
- int num_modes = 0, i, j;
- int state_index = 0, mode_index = 0;
- struct radeon_i2c_bus_rec i2c_bus;
-
- rdev->pm.default_power_state_index = -1;
- if (atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset)) {
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
- if (frev < 4) {
- /* add the i2c bus for thermal/fan chip */
- if (power_info->info.ucOverdriveThermalController > 0) {
- DRM_INFO("Possible %s thermal controller at 0x%02x\n",
- thermal_controller_names[power_info->info.ucOverdriveThermalController],
- power_info->info.ucOverdriveControllerAddress >> 1);
- i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
- rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
- if (rdev->pm.i2c_bus) {
- struct i2c_board_info info = { };
- const char *name = thermal_controller_names[power_info->info.
- ucOverdriveThermalController];
- info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
- strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
- }
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return state_index;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ /* add the i2c bus for thermal/fan chip */
+ if (power_info->info.ucOverdriveThermalController > 0) {
+ DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+ thermal_controller_names[power_info->info.ucOverdriveThermalController],
+ power_info->info.ucOverdriveControllerAddress >> 1);
+ i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = thermal_controller_names[power_info->info.
+ ucOverdriveThermalController];
+ info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ }
+ }
+ num_modes = power_info->info.ucNumOfPowerModeEntries;
+ if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
+ num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
+ if (!rdev->pm.power_state)
+ return state_index;
+ /* last mode is usually default, array is low to high */
+ for (i = 0; i < num_modes; i++) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ switch (frev) {
+ case 1:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
}
- num_modes = power_info->info.ucNumOfPowerModeEntries;
- if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
- num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
- /* last mode is usually default, array is low to high */
- for (i = 0; i < num_modes; i++) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
- switch (frev) {
- case 1:
- rdev->pm.power_state[state_index].num_clock_modes = 1;
- rdev->pm.power_state[state_index].clock_info[0].mclk =
- le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
- rdev->pm.power_state[state_index].clock_info[0].sclk =
- le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
- /* skip invalid modes */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
- continue;
- rdev->pm.power_state[state_index].pcie_lanes =
- power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
- misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
- if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
- (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_GPIO;
- rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_lookup_gpio(rdev,
- power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- true;
- else
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- false;
- } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_VDDC;
- rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
- power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
- }
- rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- rdev->pm.power_state[state_index].misc = misc;
- /* order matters! */
- if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_POWERSAVE;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_PERFORMANCE;
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- }
- if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.power_state[state_index].default_clock_mode =
- &rdev->pm.power_state[state_index].clock_info[0];
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- } else if (state_index == 0) {
- rdev->pm.power_state[state_index].clock_info[0].flags |=
- RADEON_PM_MODE_NO_DISPLAY;
- }
- state_index++;
- break;
- case 2:
- rdev->pm.power_state[state_index].num_clock_modes = 1;
- rdev->pm.power_state[state_index].clock_info[0].mclk =
- le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
- rdev->pm.power_state[state_index].clock_info[0].sclk =
- le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
- /* skip invalid modes */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
- continue;
- rdev->pm.power_state[state_index].pcie_lanes =
- power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
- misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
- misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
- if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
- (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_GPIO;
- rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_lookup_gpio(rdev,
- power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- true;
- else
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- false;
- } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_VDDC;
- rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
- power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
- }
- rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- rdev->pm.power_state[state_index].misc = misc;
- rdev->pm.power_state[state_index].misc2 = misc2;
- /* order matters! */
- if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_POWERSAVE;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_PERFORMANCE;
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- }
- if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT)
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.power_state[state_index].default_clock_mode =
- &rdev->pm.power_state[state_index].clock_info[0];
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- } else if (state_index == 0) {
- rdev->pm.power_state[state_index].clock_info[0].flags |=
- RADEON_PM_MODE_NO_DISPLAY;
- }
- state_index++;
- break;
- case 3:
- rdev->pm.power_state[state_index].num_clock_modes = 1;
- rdev->pm.power_state[state_index].clock_info[0].mclk =
- le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
- rdev->pm.power_state[state_index].clock_info[0].sclk =
- le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
- /* skip invalid modes */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
- continue;
- rdev->pm.power_state[state_index].pcie_lanes =
- power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
- misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
- misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
- if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
- (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_GPIO;
- rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_lookup_gpio(rdev,
- power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- true;
- else
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- false;
- } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_VDDC;
- rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
- power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
- if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
- true;
- rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
- power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
- }
- }
- rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- rdev->pm.power_state[state_index].misc = misc;
- rdev->pm.power_state[state_index].misc2 = misc2;
- /* order matters! */
- if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_POWERSAVE;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_PERFORMANCE;
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- }
- if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.power_state[state_index].default_clock_mode =
- &rdev->pm.power_state[state_index].clock_info[0];
- } else if (state_index == 0) {
- rdev->pm.power_state[state_index].clock_info[0].flags |=
- RADEON_PM_MODE_NO_DISPLAY;
- }
- state_index++;
- break;
- }
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0);
+ state_index++;
+ break;
+ case 2:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
+ misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
}
- /* last mode is usually default */
- if (rdev->pm.default_power_state_index == -1) {
- rdev->pm.power_state[state_index - 1].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = state_index - 1;
- rdev->pm.power_state[state_index - 1].default_clock_mode =
- &rdev->pm.power_state[state_index - 1].clock_info[0];
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- rdev->pm.power_state[state_index].misc = 0;
- rdev->pm.power_state[state_index].misc2 = 0;
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+ state_index++;
+ break;
+ case 3:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
+ misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
+ if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
+ true;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
+ power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
+ }
}
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+ state_index++;
+ break;
+ }
+ }
+ /* last mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[state_index - 1].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index - 1;
+ rdev->pm.power_state[state_index - 1].default_clock_mode =
+ &rdev->pm.power_state[state_index - 1].clock_info[0];
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = 0;
+ rdev->pm.power_state[state_index].misc2 = 0;
+ }
+ return state_index;
+}
+
+static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev,
+ ATOM_PPLIB_THERMALCONTROLLER *controller)
+{
+ struct radeon_i2c_bus_rec i2c_bus;
+
+ /* add the i2c bus for thermal/fan chip */
+ if (controller->ucType > 0) {
+ if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_NI;
+ } else if ((controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+ (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
+ (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
+ DRM_INFO("Special thermal controller config\n");
} else {
- int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
- uint8_t fw_frev, fw_crev;
- uint16_t fw_data_offset, vddc = 0;
- union firmware_info *firmware_info;
- ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
-
- if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL,
- &fw_frev, &fw_crev, &fw_data_offset)) {
- firmware_info =
- (union firmware_info *)(mode_info->atom_context->bios +
- fw_data_offset);
- vddc = firmware_info->info_14.usBootUpVDDCVoltage;
+ DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+ pp_lib_thermal_controller_names[controller->ucType],
+ controller->ucI2cAddress >> 1,
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = pp_lib_thermal_controller_names[controller->ucType];
+ info.addr = controller->ucI2cAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
}
+ }
+ }
+}
- /* add the i2c bus for thermal/fan chip */
- if (controller->ucType > 0) {
- if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
- } else if ((controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
- (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) {
- DRM_INFO("Special thermal controller config\n");
- } else {
- DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
- pp_lib_thermal_controller_names[controller->ucType],
- controller->ucI2cAddress >> 1,
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
- rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
- if (rdev->pm.i2c_bus) {
- struct i2c_board_info info = { };
- const char *name = pp_lib_thermal_controller_names[controller->ucType];
- info.addr = controller->ucI2cAddress >> 1;
- strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
- }
+static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+ u8 frev, crev;
+ u16 data_offset;
+ union firmware_info *firmware_info;
+ u16 vddc = 0;
- }
- }
- /* first mode is usually default, followed by low to high */
- for (i = 0; i < power_info->info_4.ucNumStates; i++) {
- mode_index = 0;
- power_state = (struct _ATOM_PPLIB_STATE *)
- (mode_info->atom_context->bios +
- data_offset +
- le16_to_cpu(power_info->info_4.usStateArrayOffset) +
- i * power_info->info_4.ucStateEntrySize);
- non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
- (mode_info->atom_context->bios +
- data_offset +
- le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) +
- (power_state->ucNonClockStateIndex *
- power_info->info_4.ucNonClockSize));
- for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) {
- if (rdev->flags & RADEON_IS_IGP) {
- struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info =
- (struct _ATOM_PPLIB_RS780_CLOCK_INFO *)
- (mode_info->atom_context->bios +
- data_offset +
- le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
- (power_state->ucClockStateIndices[j] *
- power_info->info_4.ucClockInfoSize));
- sclk = le16_to_cpu(clock_info->usLowEngineClockLow);
- sclk |= clock_info->ucLowEngineClockHigh << 16;
- rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
- /* skip invalid modes */
- if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
- continue;
- /* voltage works differently on IGPs */
- mode_index++;
- } else if (ASIC_IS_DCE4(rdev)) {
- struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
- (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *)
- (mode_info->atom_context->bios +
- data_offset +
- le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
- (power_state->ucClockStateIndices[j] *
- power_info->info_4.ucClockInfoSize));
- sclk = le16_to_cpu(clock_info->usEngineClockLow);
- sclk |= clock_info->ucEngineClockHigh << 16;
- mclk = le16_to_cpu(clock_info->usMemoryClockLow);
- mclk |= clock_info->ucMemoryClockHigh << 16;
- rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
- rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
- /* skip invalid modes */
- if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
- (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
- continue;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
- VOLTAGE_SW;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
- clock_info->usVDDC;
- /* XXX usVDDCI */
- mode_index++;
- } else {
- struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
- (struct _ATOM_PPLIB_R600_CLOCK_INFO *)
- (mode_info->atom_context->bios +
- data_offset +
- le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
- (power_state->ucClockStateIndices[j] *
- power_info->info_4.ucClockInfoSize));
- sclk = le16_to_cpu(clock_info->usEngineClockLow);
- sclk |= clock_info->ucEngineClockHigh << 16;
- mclk = le16_to_cpu(clock_info->usMemoryClockLow);
- mclk |= clock_info->ucMemoryClockHigh << 16;
- rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
- rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
- /* skip invalid modes */
- if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
- (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
- continue;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
- VOLTAGE_SW;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
- clock_info->usVDDC;
- mode_index++;
- }
- }
- rdev->pm.power_state[state_index].num_clock_modes = mode_index;
- if (mode_index) {
- misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
- misc2 = le16_to_cpu(non_clock_info->usClassification);
- rdev->pm.power_state[state_index].misc = misc;
- rdev->pm.power_state[state_index].misc2 = misc2;
- rdev->pm.power_state[state_index].pcie_lanes =
- ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
- ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
- switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
- case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_PERFORMANCE;
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
- if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_PERFORMANCE;
- break;
- }
- rdev->pm.power_state[state_index].flags = 0;
- if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
- rdev->pm.power_state[state_index].flags |=
- RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.power_state[state_index].default_clock_mode =
- &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
- /* patch the table values with the default slck/mclk from firmware info */
- for (j = 0; j < mode_index; j++) {
- rdev->pm.power_state[state_index].clock_info[j].mclk =
- rdev->clock.default_mclk;
- rdev->pm.power_state[state_index].clock_info[j].sclk =
- rdev->clock.default_sclk;
- if (vddc)
- rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
- vddc;
- }
- }
- state_index++;
- }
- }
- /* if multiple clock modes, mark the lowest as no display */
- for (i = 0; i < state_index; i++) {
- if (rdev->pm.power_state[i].num_clock_modes > 1)
- rdev->pm.power_state[i].clock_info[0].flags |=
- RADEON_PM_MODE_NO_DISPLAY;
- }
- /* first mode is usually default */
- if (rdev->pm.default_power_state_index == -1) {
- rdev->pm.power_state[0].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = 0;
- rdev->pm.power_state[0].default_clock_mode =
- &rdev->pm.power_state[0].clock_info[0];
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ firmware_info =
+ (union firmware_info *)(mode_info->atom_context->bios +
+ data_offset);
+ vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+ }
+
+ return vddc;
+}
+
+static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
+ int state_index, int mode_index,
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info)
+{
+ int j;
+ u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
+ u16 vddc = radeon_atombios_get_default_vddc(rdev);
+
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
+ ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+ switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+ case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
+ if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ }
+ rdev->pm.power_state[state_index].flags = 0;
+ if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+ rdev->pm.power_state[state_index].flags |=
+ RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
+ if (ASIC_IS_DCE5(rdev)) {
+ /* NI chips post without MC ucode, so default clocks are strobe mode only */
+ rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
+ rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
+ rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
+ } else {
+ /* patch the table values with the default slck/mclk from firmware info */
+ for (j = 0; j < mode_index; j++) {
+ rdev->pm.power_state[state_index].clock_info[j].mclk =
+ rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[j].sclk =
+ rdev->clock.default_sclk;
+ if (vddc)
+ rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
+ vddc;
}
}
+ }
+}
+
+static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
+ int state_index, int mode_index,
+ union pplib_clock_info *clock_info)
+{
+ u32 sclk, mclk;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (rdev->family >= CHIP_PALM) {
+ sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+ sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ } else {
+ sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow);
+ sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ }
+ } else if (ASIC_IS_DCE4(rdev)) {
+ sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
+ sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
+ mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ le16_to_cpu(clock_info->evergreen.usVDDC);
} else {
- /* add the default mode */
- rdev->pm.power_state[state_index].type =
+ sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
+ sclk |= clock_info->r600.ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
+ mclk |= clock_info->r600.ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ le16_to_cpu(clock_info->r600.usVDDC);
+ }
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ /* skip invalid modes */
+ if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
+ return false;
+ } else {
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
+ return false;
+ }
+ return true;
+}
+
+static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j;
+ int state_index = 0, mode_index = 0;
+ union pplib_clock_info *clock_info;
+ bool valid;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return state_index;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+ power_info->pplib.ucNumStates, GFP_KERNEL);
+ if (!rdev->pm.power_state)
+ return state_index;
+ /* first mode is usually default, followed by low to high */
+ for (i = 0; i < power_info->pplib.ucNumStates; i++) {
+ mode_index = 0;
+ power_state = (union pplib_power_state *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usStateArrayOffset) +
+ i * power_info->pplib.ucStateEntrySize);
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
+ (power_state->v1.ucNonClockStateIndex *
+ power_info->pplib.ucNonClockSize));
+ for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
+ clock_info = (union pplib_clock_info *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+ (power_state->v1.ucClockStateIndices[j] *
+ power_info->pplib.ucClockInfoSize));
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+ state_index, mode_index,
+ clock_info);
+ if (valid)
+ mode_index++;
+ }
+ rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+ if (mode_index) {
+ radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+ non_clock_info);
+ state_index++;
+ }
+ }
+ /* if multiple clock modes, mark the lowest as no display */
+ for (i = 0; i < state_index; i++) {
+ if (rdev->pm.power_state[i].num_clock_modes > 1)
+ rdev->pm.power_state[i].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+ /* first mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[0].type =
POWER_STATE_TYPE_DEFAULT;
- rdev->pm.power_state[state_index].num_clock_modes = 1;
- rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
- rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
- rdev->pm.power_state[state_index].default_clock_mode =
- &rdev->pm.power_state[state_index].clock_info[0];
- rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
- rdev->pm.power_state[state_index].pcie_lanes = 16;
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.power_state[state_index].flags = 0;
- state_index++;
+ rdev->pm.default_power_state_index = 0;
+ rdev->pm.power_state[0].default_clock_mode =
+ &rdev->pm.power_state[0].clock_info[0];
+ }
+ return state_index;
+}
+
+static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j, non_clock_array_index, clock_array_index;
+ int state_index = 0, mode_index = 0;
+ union pplib_clock_info *clock_info;
+ struct StateArray *state_array;
+ struct ClockInfoArray *clock_info_array;
+ struct NonClockInfoArray *non_clock_info_array;
+ bool valid;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return state_index;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+ state_array = (struct StateArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usStateArrayOffset));
+ clock_info_array = (struct ClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+ non_clock_info_array = (struct NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+ state_array->ucNumEntries, GFP_KERNEL);
+ if (!rdev->pm.power_state)
+ return state_index;
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ mode_index = 0;
+ power_state = (union pplib_power_state *)&state_array->states[i];
+ /* XXX this might be an inagua bug... */
+ non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = power_state->v2.clockInfoIndex[j];
+ /* XXX this might be an inagua bug... */
+ if (clock_array_index >= clock_info_array->ucNumEntries)
+ continue;
+ clock_info = (union pplib_clock_info *)
+ &clock_info_array->clockInfo[clock_array_index];
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+ state_index, mode_index,
+ clock_info);
+ if (valid)
+ mode_index++;
+ }
+ rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+ if (mode_index) {
+ radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+ non_clock_info);
+ state_index++;
+ }
+ }
+ /* if multiple clock modes, mark the lowest as no display */
+ for (i = 0; i < state_index; i++) {
+ if (rdev->pm.power_state[i].num_clock_modes > 1)
+ rdev->pm.power_state[i].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+ /* first mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[0].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = 0;
+ rdev->pm.power_state[0].default_clock_mode =
+ &rdev->pm.power_state[0].clock_info[0];
+ }
+ return state_index;
+}
+
+void radeon_atombios_get_power_modes(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ int state_index = 0;
+
+ rdev->pm.default_power_state_index = -1;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ switch (frev) {
+ case 1:
+ case 2:
+ case 3:
+ state_index = radeon_atombios_parse_power_table_1_3(rdev);
+ break;
+ case 4:
+ case 5:
+ state_index = radeon_atombios_parse_power_table_4_5(rdev);
+ break;
+ case 6:
+ state_index = radeon_atombios_parse_power_table_6(rdev);
+ break;
+ default:
+ break;
+ }
+ } else {
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
+ if (rdev->pm.power_state) {
+ /* add the default mode */
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ rdev->pm.power_state[state_index].pcie_lanes = 16;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].flags = 0;
+ state_index++;
+ }
}
rdev->pm.num_power_states = state_index;
@@ -2269,7 +2533,7 @@ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- return args.ulReturnEngineClock;
+ return le32_to_cpu(args.ulReturnEngineClock);
}
uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
@@ -2278,7 +2542,7 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- return args.ulReturnMemoryClock;
+ return le32_to_cpu(args.ulReturnMemoryClock);
}
void radeon_atom_set_engine_clock(struct radeon_device *rdev,
@@ -2287,7 +2551,7 @@ void radeon_atom_set_engine_clock(struct radeon_device *rdev,
SET_ENGINE_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
- args.ulTargetEngineClock = eng_clock; /* 10 khz */
+ args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@@ -2301,7 +2565,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
if (rdev->flags & RADEON_IS_IGP)
return;
- args.ulTargetMemoryClock = mem_clock; /* 10 khz */
+ args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@@ -2359,7 +2623,7 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
/* tell the bios not to handle mode switching */
- bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE);
+ bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
if (rdev->family >= CHIP_R600) {
WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
@@ -2410,10 +2674,13 @@ void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock)
else
bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
- if (lock)
+ if (lock) {
bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
- else
+ bios_6_scratch &= ~ATOM_S6_ACC_MODE;
+ } else {
bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
+ bios_6_scratch |= ATOM_S6_ACC_MODE;
+ }
if (rdev->family >= CHIP_R600)
WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 8f2c7b50dcf5..1aba85cad1a8 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -131,6 +131,45 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
return true;
}
+static bool ni_read_disabled_bios(struct radeon_device *rdev)
+{
+ u32 bus_cntl;
+ u32 d1vga_control;
+ u32 d2vga_control;
+ u32 vga_render_control;
+ u32 rom_cntl;
+ bool r;
+
+ bus_cntl = RREG32(R600_BUS_CNTL);
+ d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+ d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+ vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+ rom_cntl = RREG32(R600_ROM_CNTL);
+
+ /* enable the rom */
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+ /* Disable VGA mode */
+ WREG32(AVIVO_D1VGA_CONTROL,
+ (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_D2VGA_CONTROL,
+ (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_VGA_RENDER_CONTROL,
+ (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+ WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
+
+ r = radeon_read_bios(rdev);
+
+ /* restore regs */
+ WREG32(R600_BUS_CNTL, bus_cntl);
+ WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+ WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+ WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+ WREG32(R600_ROM_CNTL, rom_cntl);
+ return r;
+}
+
static bool r700_read_disabled_bios(struct radeon_device *rdev)
{
uint32_t viph_control;
@@ -416,6 +455,8 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
{
if (rdev->flags & RADEON_IS_IGP)
return igp_read_bios_from_vram(rdev);
+ else if (rdev->family >= CHIP_BARTS)
+ return ni_read_disabled_bios(rdev);
else if (rdev->family >= CHIP_RV770)
return r700_read_disabled_bios(rdev);
else if (rdev->family >= CHIP_R600)
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 137b8075f6e7..cf7c8d5b4ec2 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -471,8 +471,9 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
return true;
}
+/* this is used for atom LCDs as well */
struct edid *
-radeon_combios_get_hardcoded_edid(struct radeon_device *rdev)
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
{
if (rdev->mode_info.bios_hardcoded_edid)
return rdev->mode_info.bios_hardcoded_edid;
@@ -1503,6 +1504,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
(rdev->pdev->subsystem_device == 0x4a48)) {
/* Mac X800 */
rdev->mode_info.connector_table = CT_MAC_X800;
+ } else if ((rdev->pdev->device == 0x4150) &&
+ (rdev->pdev->subsystem_vendor == 0x1002) &&
+ (rdev->pdev->subsystem_device == 0x4150)) {
+ /* Mac G5 9600 */
+ rdev->mode_info.connector_table = CT_MAC_G5_9600;
} else
#endif /* CONFIG_PPC_PMAC */
#ifdef CONFIG_PPC64
@@ -2021,6 +2027,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
&hpd);
break;
+ case CT_MAC_G5_9600:
+ DRM_INFO("Connector Table: %d (mac g5 9600)\n",
+ rdev->mode_info.connector_table);
+ /* DVI - tv dac, dvo */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP2_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP2_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP2_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* ADC - primary dac, internal tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_2; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
@@ -2441,6 +2489,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
rdev->pm.default_power_state_index = -1;
+ /* allocate 2 power states */
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
+ if (!rdev->pm.power_state) {
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.num_power_states = 0;
+
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
+ return;
+ }
+
if (rdev->flags & RADEON_IS_MOBILITY) {
offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
if (offset) {
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 8afaf7a7459e..22b7e3dc0eca 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -472,6 +472,9 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
if (mode) {
ret = 1;
drm_mode_probed_add(connector, mode);
+ /* add the width/height from vbios tables if available */
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
/* add scaled modes */
radeon_add_common_modes(encoder, connector);
}
@@ -1216,7 +1219,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
- UNDERSCAN_AUTO);
+ UNDERSCAN_OFF);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_hborder_property,
0);
@@ -1256,7 +1259,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
- UNDERSCAN_AUTO);
+ UNDERSCAN_OFF);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_hborder_property,
0);
@@ -1299,7 +1302,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
- UNDERSCAN_AUTO);
+ UNDERSCAN_OFF);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_hborder_property,
0);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 6d64a2705f12..35b5eb8fbe2a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -77,13 +77,13 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs_ptr[i] = &p->relocs[i];
p->relocs[i].robj = p->relocs[i].gobj->driver_private;
p->relocs[i].lobj.bo = p->relocs[i].robj;
- p->relocs[i].lobj.rdomain = r->read_domains;
p->relocs[i].lobj.wdomain = r->write_domain;
+ p->relocs[i].lobj.rdomain = r->read_domains;
+ p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].handle = r->handle;
p->relocs[i].flags = r->flags;
- INIT_LIST_HEAD(&p->relocs[i].lobj.list);
radeon_bo_list_add_object(&p->relocs[i].lobj,
- &p->validated);
+ &p->validated);
}
}
return radeon_bo_list_validate(&p->validated);
@@ -189,10 +189,13 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
- if (!error && parser->ib) {
- radeon_bo_list_fence(&parser->validated, parser->ib->fence);
- }
- radeon_bo_list_unreserve(&parser->validated);
+
+ if (!error && parser->ib)
+ ttm_eu_fence_buffer_objects(&parser->validated,
+ parser->ib->fence);
+ else
+ ttm_eu_backoff_reservation(&parser->validated);
+
if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) {
if (parser->relocs[i].gobj)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 501966a13f48..4954e2d6ffa2 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -81,6 +81,10 @@ static const char radeon_family_name[][16] = {
"JUNIPER",
"CYPRESS",
"HEMLOCK",
+ "PALM",
+ "BARTS",
+ "TURKS",
+ "CAICOS",
"LAST",
};
@@ -224,6 +228,11 @@ int radeon_wb_init(struct radeon_device *rdev)
rdev->wb.use_event = true;
}
}
+ /* always use writeback/events on NI */
+ if (ASIC_IS_DCE5(rdev)) {
+ rdev->wb.enabled = true;
+ rdev->wb.use_event = true;
+ }
dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
@@ -335,7 +344,12 @@ bool radeon_card_posted(struct radeon_device *rdev)
uint32_t reg;
/* first check CRTCs */
- if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE41(rdev)) {
+ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ if (reg & EVERGREEN_CRTC_MASTER_EN)
+ return true;
+ } else if (ASIC_IS_DCE4(rdev)) {
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
@@ -636,20 +650,20 @@ void radeon_check_arguments(struct radeon_device *rdev)
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct radeon_device *rdev = dev->dev_private;
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_INFO "radeon: switched on\n");
/* don't suspend or resume card normally */
- rdev->powered_down = false;
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
radeon_resume_kms(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
} else {
printk(KERN_INFO "radeon: switched off\n");
drm_kms_helper_poll_disable(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
radeon_suspend_kms(dev, pmm);
- /* don't suspend or resume card normally */
- rdev->powered_down = true;
+ dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -704,11 +718,6 @@ int radeon_device_init(struct radeon_device *rdev,
init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue);
- /* setup workqueue */
- rdev->wq = create_workqueue("radeon");
- if (rdev->wq == NULL)
- return -ENOMEM;
-
/* Set asic functions */
r = radeon_asic_init(rdev);
if (r)
@@ -773,6 +782,7 @@ int radeon_device_init(struct radeon_device *rdev,
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
vga_switcheroo_register_client(rdev->pdev,
radeon_switcheroo_set_state,
+ NULL,
radeon_switcheroo_can_switch);
r = radeon_init(rdev);
@@ -806,7 +816,6 @@ void radeon_device_fini(struct radeon_device *rdev)
/* evict vram memory */
radeon_bo_evict_vram(rdev);
radeon_fini(rdev);
- destroy_workqueue(rdev->wq);
vga_switcheroo_unregister_client(rdev->pdev);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
if (rdev->rio_mem)
@@ -835,7 +844,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
}
rdev = dev->dev_private;
- if (rdev->powered_down)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
/* turn off display hw */
@@ -882,9 +891,9 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
}
- acquire_console_sem();
+ console_lock();
radeon_fbdev_set_suspend(rdev, 1);
- release_console_sem();
+ console_unlock();
return 0;
}
@@ -893,14 +902,14 @@ int radeon_resume_kms(struct drm_device *dev)
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
- if (rdev->powered_down)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- acquire_console_sem();
+ console_lock();
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
if (pci_enable_device(dev->pdev)) {
- release_console_sem();
+ console_unlock();
return -1;
}
pci_set_master(dev->pdev);
@@ -911,7 +920,7 @@ int radeon_resume_kms(struct drm_device *dev)
radeon_restore_bios_scratch_regs(rdev);
radeon_fbdev_set_suspend(rdev, 0);
- release_console_sem();
+ console_unlock();
/* reset hpd state */
radeon_hpd_init(rdev);
@@ -927,8 +936,11 @@ int radeon_resume_kms(struct drm_device *dev)
int radeon_gpu_reset(struct radeon_device *rdev)
{
int r;
+ int resched;
radeon_save_bios_scratch_regs(rdev);
+ /* block TTM */
+ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_suspend(rdev);
r = radeon_asic_reset(rdev);
@@ -937,6 +949,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
radeon_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
drm_helper_resume_force_mode(rdev->ddev);
+ ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
return 0;
}
/* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 1df4dc6c063c..3e7e7f9eb781 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -68,7 +68,7 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
}
-static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
+static void dce4_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -98,6 +98,66 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
}
}
+static void dce5_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int i;
+
+ DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+
+ WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+ (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
+ NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
+ WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset,
+ NI_GRPH_PRESCALE_BYPASS);
+ WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset,
+ NI_OVL_PRESCALE_BYPASS);
+ WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset,
+ (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
+ NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
+
+ WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+ WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
+
+ WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
+ for (i = 0; i < 256; i++) {
+ WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
+ (radeon_crtc->lut_r[i] << 20) |
+ (radeon_crtc->lut_g[i] << 10) |
+ (radeon_crtc->lut_b[i] << 0));
+ }
+
+ WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset,
+ (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
+ WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset,
+ (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
+ NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
+ WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset,
+ (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
+ NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
+ WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+ (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) |
+ NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+ /* XXX match this to the depth of the crtc fmt block, move to modeset? */
+ WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
+
+}
+
static void legacy_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -130,8 +190,10 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc)
if (!crtc->enabled)
return;
- if (ASIC_IS_DCE4(rdev))
- evergreen_crtc_load_lut(crtc);
+ if (ASIC_IS_DCE5(rdev))
+ dce5_crtc_load_lut(crtc);
+ else if (ASIC_IS_DCE4(rdev))
+ dce4_crtc_load_lut(crtc);
else if (ASIC_IS_AVIVO(rdev))
avivo_crtc_load_lut(crtc);
else
@@ -183,12 +245,272 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc)
kfree(radeon_crtc);
}
+/*
+ * Handle unpin events outside the interrupt handler proper.
+ */
+static void radeon_unpin_work_func(struct work_struct *__work)
+{
+ struct radeon_unpin_work *work =
+ container_of(__work, struct radeon_unpin_work, work);
+ int r;
+
+ /* unpin of the old buffer */
+ r = radeon_bo_reserve(work->old_rbo, false);
+ if (likely(r == 0)) {
+ r = radeon_bo_unpin(work->old_rbo);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to unpin buffer after flip\n");
+ }
+ radeon_bo_unreserve(work->old_rbo);
+ } else
+ DRM_ERROR("failed to reserve buffer after flip\n");
+ kfree(work);
+}
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ struct radeon_unpin_work *work;
+ struct drm_pending_vblank_event *e;
+ struct timeval now;
+ unsigned long flags;
+ u32 update_pending;
+ int vpos, hpos;
+
+ spin_lock_irqsave(&rdev->ddev->event_lock, flags);
+ work = radeon_crtc->unpin_work;
+ if (work == NULL ||
+ !radeon_fence_signaled(work->fence)) {
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ return;
+ }
+ /* New pageflip, or just completion of a previous one? */
+ if (!radeon_crtc->deferred_flip_completion) {
+ /* do the flip (mmio) */
+ update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base);
+ } else {
+ /* This is just a completion of a flip queued in crtc
+ * at last invocation. Make sure we go directly to
+ * completion routine.
+ */
+ update_pending = 0;
+ radeon_crtc->deferred_flip_completion = 0;
+ }
+
+ /* Has the pageflip already completed in crtc, or is it certain
+ * to complete in this vblank?
+ */
+ if (update_pending &&
+ (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+ &vpos, &hpos)) &&
+ (vpos >=0) &&
+ (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) {
+ /* crtc didn't flip in this target vblank interval,
+ * but flip is pending in crtc. It will complete it
+ * in next vblank interval, so complete the flip at
+ * next vblank irq.
+ */
+ radeon_crtc->deferred_flip_completion = 1;
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ return;
+ }
+
+ /* Pageflip (will be) certainly completed in this vblank. Clean up. */
+ radeon_crtc->unpin_work = NULL;
+
+ /* wakeup userspace */
+ if (work->event) {
+ e = work->event;
+ e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+
+ drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
+ radeon_fence_unref(&work->fence);
+ radeon_post_page_flip(work->rdev, work->crtc_id);
+ schedule_work(&work->work);
+}
+
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_framebuffer *old_radeon_fb;
+ struct radeon_framebuffer *new_radeon_fb;
+ struct drm_gem_object *obj;
+ struct radeon_bo *rbo;
+ struct radeon_fence *fence;
+ struct radeon_unpin_work *work;
+ unsigned long flags;
+ u32 tiling_flags, pitch_pixels;
+ u64 base;
+ int r;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL)
+ return -ENOMEM;
+
+ r = radeon_fence_create(rdev, &fence);
+ if (unlikely(r != 0)) {
+ kfree(work);
+ DRM_ERROR("flip queue: failed to create fence.\n");
+ return -ENOMEM;
+ }
+ work->event = event;
+ work->rdev = rdev;
+ work->crtc_id = radeon_crtc->crtc_id;
+ work->fence = radeon_fence_ref(fence);
+ old_radeon_fb = to_radeon_framebuffer(crtc->fb);
+ new_radeon_fb = to_radeon_framebuffer(fb);
+ /* schedule unpin of the old buffer */
+ obj = old_radeon_fb->obj;
+ rbo = obj->driver_private;
+ work->old_rbo = rbo;
+ INIT_WORK(&work->work, radeon_unpin_work_func);
+
+ /* We borrow the event spin lock for protecting unpin_work */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (radeon_crtc->unpin_work) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(work);
+ radeon_fence_unref(&fence);
+
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+ return -EBUSY;
+ }
+ radeon_crtc->unpin_work = work;
+ radeon_crtc->deferred_flip_completion = 0;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /* pin the new buffer */
+ obj = new_radeon_fb->obj;
+ rbo = obj->driver_private;
+
+ DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
+ work->old_rbo, rbo);
+
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to reserve new rbo buffer before flip\n");
+ goto pflip_cleanup;
+ }
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ r = -EINVAL;
+ DRM_ERROR("failed to pin new rbo buffer before flip\n");
+ goto pflip_cleanup;
+ }
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
+
+ if (!ASIC_IS_AVIVO(rdev)) {
+ /* crtc offset is from display base addr not FB location */
+ base -= radeon_crtc->legacy_display_base_addr;
+ pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8);
+
+ if (tiling_flags & RADEON_TILING_MACRO) {
+ if (ASIC_IS_R300(rdev)) {
+ base &= ~0x7ff;
+ } else {
+ int byteshift = fb->bits_per_pixel >> 4;
+ int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11;
+ base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8);
+ }
+ } else {
+ int offset = crtc->y * pitch_pixels + crtc->x;
+ switch (fb->bits_per_pixel) {
+ case 8:
+ default:
+ offset *= 1;
+ break;
+ case 15:
+ case 16:
+ offset *= 2;
+ break;
+ case 24:
+ offset *= 3;
+ break;
+ case 32:
+ offset *= 4;
+ break;
+ }
+ base += offset;
+ }
+ base &= ~7;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work->new_crtc_base = base;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /* update crtc fb */
+ crtc->fb = fb;
+
+ r = drm_vblank_get(dev, radeon_crtc->crtc_id);
+ if (r) {
+ DRM_ERROR("failed to get vblank before flip\n");
+ goto pflip_cleanup1;
+ }
+
+ /* 32 ought to cover us */
+ r = radeon_ring_lock(rdev, 32);
+ if (r) {
+ DRM_ERROR("failed to lock the ring before flip\n");
+ goto pflip_cleanup2;
+ }
+
+ /* emit the fence */
+ radeon_fence_emit(rdev, fence);
+ /* set the proper interrupt */
+ radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
+ /* fire the ring */
+ radeon_ring_unlock_commit(rdev);
+
+ return 0;
+
+pflip_cleanup2:
+ drm_vblank_put(dev, radeon_crtc->crtc_id);
+
+pflip_cleanup1:
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to reserve new rbo in error path\n");
+ goto pflip_cleanup;
+ }
+ r = radeon_bo_unpin(rbo);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ r = -EINVAL;
+ DRM_ERROR("failed to unpin new rbo in error path\n");
+ goto pflip_cleanup;
+ }
+ radeon_bo_unreserve(rbo);
+
+pflip_cleanup:
+ spin_lock_irqsave(&dev->event_lock, flags);
+ radeon_crtc->unpin_work = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ radeon_fence_unref(&fence);
+ kfree(work);
+
+ return r;
+}
+
static const struct drm_crtc_funcs radeon_crtc_funcs = {
.cursor_set = radeon_crtc_cursor_set,
.cursor_move = radeon_crtc_cursor_move,
.gamma_set = radeon_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = radeon_crtc_destroy,
+ .page_flip = radeon_crtc_page_flip,
};
static void radeon_crtc_init(struct drm_device *dev, int index)
@@ -225,7 +547,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
radeon_legacy_init_crtc(dev, radeon_crtc);
}
-static const char *encoder_names[34] = {
+static const char *encoder_names[36] = {
"NONE",
"INTERNAL_LVDS",
"INTERNAL_TMDS1",
@@ -260,6 +582,8 @@ static const char *encoder_names[34] = {
"INTERNAL_KLDSCP_LVTMA",
"INTERNAL_UNIPHY1",
"INTERNAL_UNIPHY2",
+ "NUTMEG",
+ "TRAVIS",
};
static const char *connector_names[15] = {
@@ -417,9 +741,17 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
if (!radeon_connector->edid) {
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
}
- /* some servers provide a hardcoded edid in rom for KVMs */
- if (!radeon_connector->edid)
- radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev);
+
+ if (!radeon_connector->edid) {
+ if (rdev->is_atom_bios) {
+ /* some laptops provide a hardcoded edid in rom for LCDs */
+ if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ } else
+ /* some servers provide a hardcoded edid in rom for KVMs */
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ }
if (radeon_connector->edid) {
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
@@ -448,6 +780,125 @@ static int radeon_ddc_dump(struct drm_connector *connector)
return ret;
}
+/* avivo */
+static void avivo_get_fb_div(struct radeon_pll *pll,
+ u32 target_clock,
+ u32 post_div,
+ u32 ref_div,
+ u32 *fb_div,
+ u32 *frac_fb_div)
+{
+ u32 tmp = post_div * ref_div;
+
+ tmp *= target_clock;
+ *fb_div = tmp / pll->reference_freq;
+ *frac_fb_div = tmp % pll->reference_freq;
+
+ if (*fb_div > pll->max_feedback_div)
+ *fb_div = pll->max_feedback_div;
+ else if (*fb_div < pll->min_feedback_div)
+ *fb_div = pll->min_feedback_div;
+}
+
+static u32 avivo_get_post_div(struct radeon_pll *pll,
+ u32 target_clock)
+{
+ u32 vco, post_div, tmp;
+
+ if (pll->flags & RADEON_PLL_USE_POST_DIV)
+ return pll->post_div;
+
+ if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
+ if (pll->flags & RADEON_PLL_IS_LCD)
+ vco = pll->lcd_pll_out_min;
+ else
+ vco = pll->pll_out_min;
+ } else {
+ if (pll->flags & RADEON_PLL_IS_LCD)
+ vco = pll->lcd_pll_out_max;
+ else
+ vco = pll->pll_out_max;
+ }
+
+ post_div = vco / target_clock;
+ tmp = vco % target_clock;
+
+ if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
+ if (tmp)
+ post_div++;
+ } else {
+ if (!tmp)
+ post_div--;
+ }
+
+ if (post_div > pll->max_post_div)
+ post_div = pll->max_post_div;
+ else if (post_div < pll->min_post_div)
+ post_div = pll->min_post_div;
+
+ return post_div;
+}
+
+#define MAX_TOLERANCE 10
+
+void radeon_compute_pll_avivo(struct radeon_pll *pll,
+ u32 freq,
+ u32 *dot_clock_p,
+ u32 *fb_div_p,
+ u32 *frac_fb_div_p,
+ u32 *ref_div_p,
+ u32 *post_div_p)
+{
+ u32 target_clock = freq / 10;
+ u32 post_div = avivo_get_post_div(pll, target_clock);
+ u32 ref_div = pll->min_ref_div;
+ u32 fb_div = 0, frac_fb_div = 0, tmp;
+
+ if (pll->flags & RADEON_PLL_USE_REF_DIV)
+ ref_div = pll->reference_div;
+
+ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+ avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div);
+ frac_fb_div = (100 * frac_fb_div) / pll->reference_freq;
+ if (frac_fb_div >= 5) {
+ frac_fb_div -= 5;
+ frac_fb_div = frac_fb_div / 10;
+ frac_fb_div++;
+ }
+ if (frac_fb_div >= 10) {
+ fb_div++;
+ frac_fb_div = 0;
+ }
+ } else {
+ while (ref_div <= pll->max_ref_div) {
+ avivo_get_fb_div(pll, target_clock, post_div, ref_div,
+ &fb_div, &frac_fb_div);
+ if (frac_fb_div >= (pll->reference_freq / 2))
+ fb_div++;
+ frac_fb_div = 0;
+ tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
+ tmp = (tmp * 10000) / target_clock;
+
+ if (tmp > (10000 + MAX_TOLERANCE))
+ ref_div++;
+ else if (tmp >= (10000 - MAX_TOLERANCE))
+ break;
+ else
+ ref_div++;
+ }
+ }
+
+ *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) /
+ (ref_div * post_div * 10);
+ *fb_div_p = fb_div;
+ *frac_fb_div_p = frac_fb_div;
+ *ref_div_p = ref_div;
+ *post_div_p = post_div;
+ DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+ *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div);
+}
+
+/* pre-avivo */
static inline uint32_t radeon_div(uint64_t n, uint32_t d)
{
uint64_t mod;
@@ -458,13 +909,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
return n;
}
-void radeon_compute_pll(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p)
+void radeon_compute_pll_legacy(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p)
{
uint32_t min_ref_div = pll->min_ref_div;
uint32_t max_ref_div = pll->max_ref_div;
@@ -494,6 +945,9 @@ void radeon_compute_pll(struct radeon_pll *pll,
pll_out_max = pll->pll_out_max;
}
+ if (pll_out_min > 64800)
+ pll_out_min = 64800;
+
if (pll->flags & RADEON_PLL_USE_REF_DIV)
min_ref_div = max_ref_div = pll->reference_div;
else {
@@ -633,6 +1087,10 @@ void radeon_compute_pll(struct radeon_pll *pll,
*frac_fb_div_p = best_frac_feedback_div;
*ref_div_p = best_ref_div;
*post_div_p = best_post_div;
+ DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+ freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div,
+ best_ref_div, best_post_div);
+
}
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -849,7 +1307,10 @@ int radeon_modeset_init(struct radeon_device *rdev)
rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
+ rdev->ddev->mode_config.max_width = 16384;
+ rdev->ddev->mode_config.max_height = 16384;
+ } else if (ASIC_IS_AVIVO(rdev)) {
rdev->ddev->mode_config.max_width = 8192;
rdev->ddev->mode_config.max_height = 8192;
} else {
@@ -1019,7 +1480,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
/*
* Retrieve current video scanout position of crtc on a given gpu.
*
- * \param rdev Device to query.
+ * \param dev Device to query.
* \param crtc Crtc to query.
* \param *vpos Location where vertical scanout position should be stored.
* \param *hpos Location where horizontal scanout position should go.
@@ -1031,72 +1492,74 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
*
* \return Flags, or'ed together as follows:
*
- * RADEON_SCANOUTPOS_VALID = Query successfull.
- * RADEON_SCANOUTPOS_INVBL = Inside vblank.
- * RADEON_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * DRM_SCANOUTPOS_VALID = Query successfull.
+ * DRM_SCANOUTPOS_INVBL = Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
* this flag means that returned position may be offset by a constant but
* unknown small number of scanlines wrt. real scanout position.
*
*/
-int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos)
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
{
u32 stat_crtc = 0, vbl = 0, position = 0;
int vbl_start, vbl_end, vtotal, ret = 0;
bool in_vbl = true;
+ struct radeon_device *rdev = dev->dev_private;
+
if (ASIC_IS_DCE4(rdev)) {
if (crtc == 0) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC0_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC0_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 1) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC1_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC1_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 2) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC2_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC2_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 3) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC3_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC3_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 4) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC4_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC4_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 5) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC5_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC5_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
} else if (ASIC_IS_AVIVO(rdev)) {
if (crtc == 0) {
vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 1) {
vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
} else {
/* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
@@ -1112,7 +1575,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
if (!(stat_crtc & 1))
in_vbl = false;
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 1) {
vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
@@ -1122,7 +1585,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
if (!(stat_crtc & 1))
in_vbl = false;
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
}
@@ -1133,13 +1596,13 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
/* Valid vblank area boundaries from gpu retrieved? */
if (vbl > 0) {
/* Yes: Decode. */
- ret |= RADEON_SCANOUTPOS_ACCURATE;
+ ret |= DRM_SCANOUTPOS_ACCURATE;
vbl_start = vbl & 0x1fff;
vbl_end = (vbl >> 16) & 0x1fff;
}
else {
/* No: Fake something reasonable which gives at least ok results. */
- vbl_start = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vdisplay;
+ vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
vbl_end = 0;
}
@@ -1155,7 +1618,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
/* Inside "upper part" of vblank area? Apply corrective offset if so: */
if (in_vbl && (*vpos >= vbl_start)) {
- vtotal = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vtotal;
+ vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
*vpos = *vpos - vtotal;
}
@@ -1164,7 +1627,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
/* In vblank? */
if (in_vbl)
- ret |= RADEON_SCANOUTPOS_INVBL;
+ ret |= DRM_SCANOUTPOS_INVBL;
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 60e689f2d048..275b26a708d6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -48,9 +48,10 @@
* - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
* - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
* 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
+ * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 7
+#define KMS_DRIVER_MINOR 8
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -66,6 +67,10 @@ int radeon_resume_kms(struct drm_device *dev);
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags);
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
@@ -74,6 +79,8 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int radeon_gem_object_init(struct drm_gem_object *obj);
void radeon_gem_object_free(struct drm_gem_object *obj);
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+ int *vpos, int *hpos);
extern struct drm_ioctl_desc radeon_ioctls_kms[];
extern int radeon_max_kms_ioctl;
int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -97,6 +104,7 @@ int radeon_tv = 1;
int radeon_audio = 1;
int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
+int radeon_pcie_gen2 = 0;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -140,6 +148,9 @@ module_param_named(disp_priority, radeon_disp_priority, int, 0444);
MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
+MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)");
+module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -296,6 +307,8 @@ static struct drm_driver kms_driver = {
.get_vblank_counter = radeon_get_vblank_counter_kms,
.enable_vblank = radeon_enable_vblank_kms,
.disable_vblank = radeon_disable_vblank_kms,
+ .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
+ .get_scanout_position = radeon_get_crtc_scanoutpos,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = radeon_debugfs_init,
.debugfs_cleanup = radeon_debugfs_cleanup,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 448eba89d1e6..5cba46b9779a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1524,6 +1524,7 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
#define R600_CP_RB_CNTL 0xc104
# define R600_RB_BUFSZ(x) ((x) << 0)
# define R600_RB_BLKSZ(x) ((x) << 8)
+# define R600_BUF_SWAP_32BIT (2 << 16)
# define R600_RB_NO_UPDATE (1 << 27)
# define R600_RB_RPTR_WR_ENA (1 << 31)
#define R600_CP_RB_RPTR_WR 0xc108
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 041943df966b..b4274883227f 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -641,7 +641,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
@@ -655,7 +655,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
@@ -673,7 +673,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
@@ -712,8 +712,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
* - 2 DIG encoder blocks.
* DIG1/2 can drive UNIPHY0/1/2 link A or link B
*
- * DCE 4.0
- * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B).
+ * DCE 4.0/5.0
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
* Supports up to 6 digital outputs
* - 6 DIG encoder blocks.
* - DIG to PHY mapping is hardcoded
@@ -724,6 +724,12 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
* DIG5 drives UNIPHY2 link A, A+B
* DIG6 drives UNIPHY2 link B
*
+ * DCE 4.1
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
* Routing
* crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
* Examples:
@@ -737,6 +743,7 @@ union dig_encoder_control {
DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
+ DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
};
void
@@ -752,6 +759,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
uint8_t frev, crev;
int dp_clock = 0;
int dp_lane_count = 0;
+ int hpd_id = RADEON_HPD_NONE;
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -760,6 +768,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
+ hpd_id = radeon_connector->hpd.hpd;
}
/* no dig encoder assigned */
@@ -784,19 +793,36 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
- if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
- if (dp_clock == 270000)
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
+ (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST))
args.v1.ucLaneNum = dp_lane_count;
- } else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_encoder->pixel_clock > 165000)
args.v1.ucLaneNum = 8;
else
args.v1.ucLaneNum = 4;
- if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
+ if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
+ (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) {
+ if (dp_clock == 270000)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
+ else if (dp_clock == 540000)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
+ }
+ args.v4.acConfig.ucDigSel = dig->dig_encoder;
+ args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ if (hpd_id == RADEON_HPD_NONE)
+ args.v4.ucHPD_ID = 0;
+ else
+ args.v4.ucHPD_ID = hpd_id + 1;
+ } else if (ASIC_IS_DCE4(rdev)) {
+ if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
args.v3.acConfig.ucDigSel = dig->dig_encoder;
args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
} else {
+ if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@@ -823,6 +849,7 @@ union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
};
void
@@ -883,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
- args.v1.usInitInfo = connector_object_id;
+ args.v1.usInitInfo = cpu_to_le16(connector_object_id);
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v1.asMode.ucLaneSel = lane_num;
args.v1.asMode.ucLaneSet = lane_set;
@@ -917,10 +944,18 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
pll_id = radeon_crtc->pll_id;
}
- if (is_dp && rdev->clock.dp_extclk)
- args.v3.acConfig.ucRefClkSource = 2; /* external src */
- else
- args.v3.acConfig.ucRefClkSource = pll_id;
+
+ if (ASIC_IS_DCE5(rdev)) {
+ if (is_dp && rdev->clock.dp_extclk)
+ args.v4.acConfig.ucRefClkSource = 3; /* external src */
+ else
+ args.v4.acConfig.ucRefClkSource = pll_id;
+ } else {
+ if (is_dp && rdev->clock.dp_extclk)
+ args.v3.acConfig.ucRefClkSource = 2; /* external src */
+ else
+ args.v3.acConfig.ucRefClkSource = pll_id;
+ }
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -1028,7 +1063,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action)
if (!ASIC_IS_DCE4(rdev))
return;
- if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) ||
+ if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
(action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
return;
@@ -1044,6 +1079,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action)
union external_encoder_control {
EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
+ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
};
static void
@@ -1054,6 +1090,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
union external_encoder_control args;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
@@ -1061,6 +1098,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
int dp_clock = 0;
int dp_lane_count = 0;
int connector_object_id = 0;
+ u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1099,6 +1137,37 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
else
args.v1.sDigEncoder.ucLaneNum = 4;
break;
+ case 3:
+ args.v3.sExtEncoder.ucAction = action;
+ if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+ args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
+ else
+ args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+ if (dp_clock == 270000)
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+ else if (dp_clock == 540000)
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
+ args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
+ } else if (radeon_encoder->pixel_clock > 165000)
+ args.v3.sExtEncoder.ucLaneNum = 8;
+ else
+ args.v3.sExtEncoder.ucLaneNum = 4;
+ switch (ext_enum) {
+ case GRAPH_OBJECT_ENUM_ID1:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
+ break;
+ case GRAPH_OBJECT_ENUM_ID2:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
+ break;
+ case GRAPH_OBJECT_ENUM_ID3:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
+ break;
+ }
+ args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
@@ -1158,6 +1227,8 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
int index = 0;
bool is_dig = false;
+ bool is_dce5_dac = false;
+ bool is_dce5_dvo = false;
memset(&args, 0, sizeof(args));
@@ -1180,7 +1251,9 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- if (ASIC_IS_DCE3(rdev))
+ if (ASIC_IS_DCE5(rdev))
+ is_dce5_dvo = true;
+ else if (ASIC_IS_DCE3(rdev))
is_dig = true;
else
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
@@ -1196,12 +1269,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+ if (ASIC_IS_DCE5(rdev))
+ is_dce5_dac = true;
+ else {
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
@@ -1260,6 +1337,28 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
break;
}
+ } else if (is_dce5_dac) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ atombios_dac_setup(encoder, ATOM_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ atombios_dac_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+ } else if (is_dce5_dvo) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ atombios_dvo_setup(encoder, ATOM_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ atombios_dvo_setup(encoder, ATOM_DISABLE);
+ break;
+ }
} else {
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -1289,12 +1388,18 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
default:
- action = ATOM_ENABLE;
+ if (ASIC_IS_DCE41(rdev))
+ action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT;
+ else
+ action = ATOM_ENABLE;
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- action = ATOM_DISABLE;
+ if (ASIC_IS_DCE41(rdev))
+ action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT;
+ else
+ action = ATOM_DISABLE;
break;
}
atombios_external_encoder_setup(encoder, ext_encoder, action);
@@ -1465,11 +1570,21 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
}
/* set scaler clears this on some chips */
- /* XXX check DCE4 */
- if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
- if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
- WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
- AVIVO_D1MODE_INTERLEAVE_EN);
+ if (ASIC_IS_AVIVO(rdev) &&
+ (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
+ if (ASIC_IS_DCE4(rdev)) {
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+ EVERGREEN_INTERLEAVE_EN);
+ else
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+ } else {
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+ AVIVO_D1MODE_INTERLEAVE_EN);
+ else
+ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+ }
}
}
@@ -1483,27 +1598,35 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
struct radeon_encoder_atom_dig *dig;
uint32_t dig_enc_in_use = 0;
+ /* DCE4/5 */
if (ASIC_IS_DCE4(rdev)) {
dig = radeon_encoder->enc_priv;
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (ASIC_IS_DCE41(rdev)) {
if (dig->linkb)
return 1;
else
return 0;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return 3;
- else
- return 2;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return 5;
- else
- return 4;
- break;
+ } else {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig->linkb)
+ return 1;
+ else
+ return 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ if (dig->linkb)
+ return 3;
+ else
+ return 2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig->linkb)
+ return 5;
+ else
+ return 4;
+ break;
+ }
}
}
@@ -1610,7 +1733,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
}
if (ext_encoder) {
- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+ if (ASIC_IS_DCE41(rdev)) {
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+ } else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
}
atombios_apply_encoder_quirks(encoder, adjusted_mode);
@@ -1927,7 +2056,10 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
}
void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
+radeon_add_atom_encoder(struct drm_device *dev,
+ uint32_t encoder_enum,
+ uint32_t supported_device,
+ u16 caps)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
@@ -1970,6 +2102,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
radeon_encoder->rmx_type = RMX_OFF;
radeon_encoder->underscan_type = UNDERSCAN_OFF;
radeon_encoder->is_ext_encoder = false;
+ radeon_encoder->caps = caps;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
@@ -2029,6 +2162,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
case ENCODER_OBJECT_ID_TITFP513:
case ENCODER_OBJECT_ID_VT1623:
case ENCODER_OBJECT_ID_HDMI_SI1930:
+ case ENCODER_OBJECT_ID_TRAVIS:
+ case ENCODER_OBJECT_ID_NUTMEG:
/* these are handled by the primary encoders */
radeon_encoder->is_ext_encoder = true;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index e329066dcabd..1ca55eb09ad3 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -80,6 +80,10 @@ enum radeon_family {
CHIP_JUNIPER,
CHIP_CYPRESS,
CHIP_HEMLOCK,
+ CHIP_PALM,
+ CHIP_BARTS,
+ CHIP_TURKS,
+ CHIP_CAICOS,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 6abea32be5e8..cc44bdfec80f 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
u32 tiling_flags = 0;
int ret;
int aligned_size, size;
+ int height = mode_cmd->height;
/* need to align pitch with crtc limits */
mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
- size = mode_cmd->pitch * mode_cmd->height;
+ if (rdev->family >= CHIP_R600)
+ height = ALIGN(mode_cmd->height, 8);
+ size = mode_cmd->pitch * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
@@ -247,8 +250,6 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = rdev->mc.aper_size;
- info->fix.mmio_start = 0;
- info->fix.mmio_len = 0;
info->pixmap.size = 64*1024;
info->pixmap.buf_align = 8;
info->pixmap.access_align = 32;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index daacb281dfaf..171b0b2e3a64 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -38,6 +38,7 @@
#include "drm.h"
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_trace.h"
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
@@ -57,6 +58,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
} else
radeon_fence_ring_emit(rdev, fence);
+ trace_radeon_fence_emit(rdev->ddev, fence->seq);
fence->emited = true;
list_del(&fence->list);
list_add_tail(&fence->list, &rdev->fence_drv.emited);
@@ -213,6 +215,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
retry:
/* save current sequence used to check for GPU lockup */
seq = rdev->fence_drv.last_seq;
+ trace_radeon_fence_wait_begin(rdev->ddev, seq);
if (intr) {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
@@ -227,6 +230,7 @@ retry:
radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev);
}
+ trace_radeon_fence_wait_end(rdev->ddev, seq);
if (unlikely(!radeon_fence_signaled(fence))) {
/* we were interrupted for some reason and fence isn't
* isn't signaled yet, resume wait
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a108c7ed14f5..9ec830c77af0 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -64,15 +64,15 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
struct radeon_device *rdev = dev->dev_private;
unsigned i;
- INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
-
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
rdev->irq.gui_idle = false;
for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 6; i++) {
rdev->irq.hpd[i] = false;
+ rdev->irq.pflip[i] = false;
+ }
radeon_irq_set(rdev);
/* Clear bits */
radeon_irq_process(rdev);
@@ -101,16 +101,23 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
rdev->irq.gui_idle = false;
for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 6; i++) {
rdev->irq.hpd[i] = false;
+ rdev->irq.pflip[i] = false;
+ }
radeon_irq_set(rdev);
}
int radeon_irq_kms_init(struct radeon_device *rdev)
{
+ int i;
int r = 0;
+ INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+
spin_lock_init(&rdev->irq.sw_lock);
+ for (i = 0; i < rdev->num_crtc; i++)
+ spin_lock_init(&rdev->irq.pflip_lock[i]);
r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
if (r) {
return r;
@@ -121,7 +128,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
* chips. Disable MSI on them for now.
*/
if ((rdev->family >= CHIP_RV380) &&
- (!(rdev->flags & RADEON_IS_IGP)) &&
+ ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) &&
(!(rdev->flags & RADEON_IS_AGP))) {
int ret = pci_enable_msi(rdev->pdev);
if (!ret) {
@@ -148,6 +155,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
if (rdev->msi_enabled)
pci_disable_msi(rdev->pdev);
}
+ flush_work_sync(&rdev->hotplug_work);
}
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
@@ -175,3 +183,34 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
}
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
+{
+ unsigned long irqflags;
+
+ if (crtc < 0 || crtc >= rdev->num_crtc)
+ return;
+
+ spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
+ if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) {
+ rdev->irq.pflip[crtc] = true;
+ radeon_irq_set(rdev);
+ }
+ spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
+}
+
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
+{
+ unsigned long irqflags;
+
+ if (crtc < 0 || crtc >= rdev->num_crtc)
+ return;
+
+ spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
+ BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0);
+ if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) {
+ rdev->irq.pflip[crtc] = false;
+ radeon_irq_set(rdev);
+ }
+ spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8fbbe1c6ebbd..8387d32caaa7 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -96,9 +96,27 @@ out:
return r;
}
+static void radeon_set_filp_rights(struct drm_device *dev,
+ struct drm_file **owner,
+ struct drm_file *applier,
+ uint32_t *value)
+{
+ mutex_lock(&dev->struct_mutex);
+ if (*value == 1) {
+ /* wants rights */
+ if (!*owner)
+ *owner = applier;
+ } else if (*value == 0) {
+ /* revokes rights */
+ if (*owner == applier)
+ *owner = NULL;
+ }
+ *value = *owner == applier ? 1 : 0;
+ mutex_unlock(&dev->struct_mutex);
+}
/*
- * Userspace get informations ioctl
+ * Userspace get information ioctl
*/
int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
@@ -173,18 +191,19 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
return -EINVAL;
}
- mutex_lock(&dev->struct_mutex);
- if (value == 1) {
- /* wants hyper-z */
- if (!rdev->hyperz_filp)
- rdev->hyperz_filp = filp;
- } else if (value == 0) {
- /* revokes hyper-z */
- if (rdev->hyperz_filp == filp)
- rdev->hyperz_filp = NULL;
+ radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value);
+ break;
+ case RADEON_INFO_WANT_CMASK:
+ /* The same logic as Hyper-Z. */
+ if (value >= 2) {
+ DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value);
+ return -EINVAL;
}
- value = rdev->hyperz_filp == filp ? 1 : 0;
- mutex_unlock(&dev->struct_mutex);
+ radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
+ break;
+ case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
+ /* return clock value in KHz */
+ value = rdev->clock.spll.reference_freq * 10;
break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
@@ -203,10 +222,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
*/
int radeon_driver_firstopen_kms(struct drm_device *dev)
{
- struct radeon_device *rdev = dev->dev_private;
-
- if (rdev->powered_down)
- return -EINVAL;
return 0;
}
@@ -232,6 +247,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
struct radeon_device *rdev = dev->dev_private;
if (rdev->hyperz_filp == file_priv)
rdev->hyperz_filp = NULL;
+ if (rdev->cmask_filp == file_priv)
+ rdev->cmask_filp = NULL;
}
/*
@@ -277,6 +294,27 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
radeon_irq_set(rdev);
}
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags)
+{
+ struct drm_crtc *drmcrtc;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (crtc < 0 || crtc >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ /* Get associated drm_crtc: */
+ drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
+
+ /* Helper routine in DRM core does all the work: */
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
+ vblank_time, flags,
+ drmcrtc);
+}
/*
* IOCTL.
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index ace2e6384d40..cf0638c3b7c7 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -778,9 +778,9 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
DRM_DEBUG_KMS("\n");
if (!use_bios_divs) {
- radeon_compute_pll(pll, mode->clock,
- &freq, &feedback_div, &frac_fb_div,
- &reference_div, &post_divider);
+ radeon_compute_pll_legacy(pll, mode->clock,
+ &freq, &feedback_div, &frac_fb_div,
+ &reference_div, &post_divider);
for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
if (post_div->divider == post_divider)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e301c6f9e059..a670caaee29e 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -149,6 +149,7 @@ struct radeon_tmds_pll {
#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
#define RADEON_PLL_USE_POST_DIV (1 << 12)
#define RADEON_PLL_IS_LCD (1 << 13)
+#define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14)
struct radeon_pll {
/* reference frequency */
@@ -208,6 +209,7 @@ enum radeon_connector_table {
CT_EMAC,
CT_RN50_POWER,
CT_MAC_X800,
+ CT_MAC_G5_9600,
};
enum radeon_dvo_chip {
@@ -277,6 +279,9 @@ struct radeon_crtc {
fixed20_12 hsc;
struct drm_display_mode native_mode;
int pll_id;
+ /* page flipping */
+ struct radeon_unpin_work *unpin_work;
+ int deferred_flip_completion;
};
struct radeon_encoder_primary_dac {
@@ -376,6 +381,7 @@ struct radeon_encoder {
int hdmi_audio_workaround;
int hdmi_buffer_status;
bool is_ext_encoder;
+ u16 caps;
};
struct radeon_connector_atom_dig {
@@ -442,10 +448,6 @@ struct radeon_framebuffer {
struct drm_gem_object *obj;
};
-/* radeon_get_crtc_scanoutpos() return flags */
-#define RADEON_SCANOUTPOS_VALID (1 << 0)
-#define RADEON_SCANOUTPOS_INVBL (1 << 1)
-#define RADEON_SCANOUTPOS_ACCURATE (1 << 2)
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
@@ -510,13 +512,21 @@ extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id, u32 clock);
-extern void radeon_compute_pll(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p);
+extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p);
+
+extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
+ u32 freq,
+ u32 *dot_clock_p,
+ u32 *fb_div_p,
+ u32 *frac_fb_div_p,
+ u32 *ref_div_p,
+ u32 *post_div_p);
extern void radeon_setup_encoder_clones(struct drm_device *dev);
@@ -562,11 +572,12 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
-extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos);
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+ int *vpos, int *hpos);
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
extern struct edid *
-radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev);
extern bool radeon_atom_get_clock_info(struct drm_device *dev);
extern bool radeon_combios_get_clock_info(struct drm_device *dev);
extern struct radeon_encoder_atom_dig *
@@ -662,4 +673,7 @@ int radeon_fbdev_total_size(struct radeon_device *rdev);
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
void radeon_fb_output_poll_changed(struct radeon_device *rdev);
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index a598d0049aa5..7d6b8e88f746 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -34,6 +34,7 @@
#include <drm/drmP.h>
#include "radeon_drm.h"
#include "radeon.h"
+#include "radeon_trace.h"
int radeon_ttm_init(struct radeon_device *rdev);
@@ -146,6 +147,7 @@ retry:
list_add_tail(&bo->list, &rdev->gem.objects);
mutex_unlock(&bo->rdev->gem.mutex);
}
+ trace_radeon_bo_create(bo);
return 0;
}
@@ -302,34 +304,9 @@ void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head)
{
if (lobj->wdomain) {
- list_add(&lobj->list, head);
+ list_add(&lobj->tv.head, head);
} else {
- list_add_tail(&lobj->list, head);
- }
-}
-
-int radeon_bo_list_reserve(struct list_head *head)
-{
- struct radeon_bo_list *lobj;
- int r;
-
- list_for_each_entry(lobj, head, list){
- r = radeon_bo_reserve(lobj->bo, false);
- if (unlikely(r != 0))
- return r;
- lobj->reserved = true;
- }
- return 0;
-}
-
-void radeon_bo_list_unreserve(struct list_head *head)
-{
- struct radeon_bo_list *lobj;
-
- list_for_each_entry(lobj, head, list) {
- /* only unreserve object we successfully reserved */
- if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
- radeon_bo_unreserve(lobj->bo);
+ list_add_tail(&lobj->tv.head, head);
}
}
@@ -340,14 +317,11 @@ int radeon_bo_list_validate(struct list_head *head)
u32 domain;
int r;
- list_for_each_entry(lobj, head, list) {
- lobj->reserved = false;
- }
- r = radeon_bo_list_reserve(head);
+ r = ttm_eu_reserve_buffers(head);
if (unlikely(r != 0)) {
return r;
}
- list_for_each_entry(lobj, head, list) {
+ list_for_each_entry(lobj, head, tv.head) {
bo = lobj->bo;
if (!bo->pin_count) {
domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
@@ -370,25 +344,6 @@ int radeon_bo_list_validate(struct list_head *head)
return 0;
}
-void radeon_bo_list_fence(struct list_head *head, void *fence)
-{
- struct radeon_bo_list *lobj;
- struct radeon_bo *bo;
- struct radeon_fence *old_fence = NULL;
-
- list_for_each_entry(lobj, head, list) {
- bo = lobj->bo;
- spin_lock(&bo->tbo.lock);
- old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
- bo->tbo.sync_obj = radeon_fence_ref(fence);
- bo->tbo.sync_obj_arg = NULL;
- spin_unlock(&bo->tbo.lock);
- if (old_fence) {
- radeon_fence_unref(&old_fence);
- }
- }
-}
-
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma)
{
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index d143702b244a..22d4c237dea5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -126,12 +126,12 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0))
return r;
- spin_lock(&bo->tbo.lock);
+ spin_lock(&bo->tbo.bdev->fence_lock);
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
if (bo->tbo.sync_obj)
r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
- spin_unlock(&bo->tbo.lock);
+ spin_unlock(&bo->tbo.bdev->fence_lock);
ttm_bo_unreserve(&bo->tbo);
return r;
}
@@ -152,10 +152,7 @@ extern int radeon_bo_init(struct radeon_device *rdev);
extern void radeon_bo_fini(struct radeon_device *rdev);
extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head);
-extern int radeon_bo_list_reserve(struct list_head *head);
-extern void radeon_bo_list_unreserve(struct list_head *head);
extern int radeon_bo_list_validate(struct list_head *head);
-extern void radeon_bo_list_fence(struct list_head *head, void *fence);
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8c9b2ef32c68..2aed03bde4b2 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -167,13 +167,13 @@ static void radeon_set_power_state(struct radeon_device *rdev)
if (radeon_gui_idle(rdev)) {
sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk;
- if (sclk > rdev->clock.default_sclk)
- sclk = rdev->clock.default_sclk;
+ if (sclk > rdev->pm.default_sclk)
+ sclk = rdev->pm.default_sclk;
mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk;
- if (mclk > rdev->clock.default_mclk)
- mclk = rdev->clock.default_mclk;
+ if (mclk > rdev->pm.default_mclk)
+ mclk = rdev->pm.default_mclk;
/* upvolt before raising clocks, downvolt after lowering clocks */
if (sclk < rdev->pm.current_sclk)
@@ -405,20 +405,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
mutex_unlock(&rdev->pm.mutex);
} else if (strncmp("profile", buf, strlen("profile")) == 0) {
- bool flush_wq = false;
-
mutex_lock(&rdev->pm.mutex);
- if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
- cancel_delayed_work(&rdev->pm.dynpm_idle_work);
- flush_wq = true;
- }
/* disable dynpm */
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
rdev->pm.pm_method = PM_METHOD_PROFILE;
mutex_unlock(&rdev->pm.mutex);
- if (flush_wq)
- flush_workqueue(rdev->wq);
+ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
} else {
DRM_ERROR("invalid power method!\n");
goto fail;
@@ -437,7 +430,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
- u32 temp;
+ int temp;
switch (rdev->pm.int_thermal_type) {
case THERMAL_TYPE_RV6XX:
@@ -447,8 +440,12 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
temp = rv770_get_temp(rdev);
break;
case THERMAL_TYPE_EVERGREEN:
+ case THERMAL_TYPE_NI:
temp = evergreen_get_temp(rdev);
break;
+ case THERMAL_TYPE_SUMO:
+ temp = sumo_get_temp(rdev);
+ break;
default:
temp = 0;
break;
@@ -487,6 +484,7 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
case THERMAL_TYPE_RV6XX:
case THERMAL_TYPE_RV770:
case THERMAL_TYPE_EVERGREEN:
+ case THERMAL_TYPE_SUMO:
rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
if (IS_ERR(rdev->pm.int_hwmon_dev)) {
err = PTR_ERR(rdev->pm.int_hwmon_dev);
@@ -520,34 +518,39 @@ static void radeon_hwmon_fini(struct radeon_device *rdev)
void radeon_pm_suspend(struct radeon_device *rdev)
{
- bool flush_wq = false;
-
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
- cancel_delayed_work(&rdev->pm.dynpm_idle_work);
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
- flush_wq = true;
}
mutex_unlock(&rdev->pm.mutex);
- if (flush_wq)
- flush_workqueue(rdev->wq);
+
+ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
}
void radeon_pm_resume(struct radeon_device *rdev)
{
+ /* set up the default clocks if the MC ucode is loaded */
+ if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+ if (rdev->pm.default_sclk)
+ radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+ if (rdev->pm.default_mclk)
+ radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+ }
/* asic init will reset the default power state */
mutex_lock(&rdev->pm.mutex);
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
- rdev->pm.current_sclk = rdev->clock.default_sclk;
- rdev->pm.current_mclk = rdev->clock.default_mclk;
+ rdev->pm.current_sclk = rdev->pm.default_sclk;
+ rdev->pm.current_mclk = rdev->pm.default_mclk;
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
if (rdev->pm.pm_method == PM_METHOD_DYNPM
&& rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
- queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
mutex_unlock(&rdev->pm.mutex);
radeon_pm_compute_clocks(rdev);
@@ -564,6 +567,8 @@ int radeon_pm_init(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
rdev->pm.dynpm_can_upclock = true;
rdev->pm.dynpm_can_downclock = true;
+ rdev->pm.default_sclk = rdev->clock.default_sclk;
+ rdev->pm.default_mclk = rdev->clock.default_mclk;
rdev->pm.current_sclk = rdev->clock.default_sclk;
rdev->pm.current_mclk = rdev->clock.default_mclk;
rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
@@ -575,12 +580,24 @@ int radeon_pm_init(struct radeon_device *rdev)
radeon_combios_get_power_modes(rdev);
radeon_pm_print_states(rdev);
radeon_pm_init_profile(rdev);
+ /* set up the default clocks if the MC ucode is loaded */
+ if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+ if (rdev->pm.default_sclk)
+ radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+ if (rdev->pm.default_mclk)
+ radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+ }
}
/* set up the internal thermal sensor if applicable */
ret = radeon_hwmon_init(rdev);
if (ret)
return ret;
+
+ INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
+
if (rdev->pm.num_power_states > 1) {
/* where's the best place to put these? */
ret = device_create_file(rdev->dev, &dev_attr_power_profile);
@@ -594,8 +611,6 @@ int radeon_pm_init(struct radeon_device *rdev)
rdev->acpi_nb.notifier_call = radeon_acpi_event;
register_acpi_notifier(&rdev->acpi_nb);
#endif
- INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
-
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for PM!\n");
}
@@ -609,25 +624,20 @@ int radeon_pm_init(struct radeon_device *rdev)
void radeon_pm_fini(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states > 1) {
- bool flush_wq = false;
-
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
rdev->pm.profile = PM_PROFILE_DEFAULT;
radeon_pm_update_profile(rdev);
radeon_pm_set_clocks(rdev);
} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
- /* cancel work */
- cancel_delayed_work(&rdev->pm.dynpm_idle_work);
- flush_wq = true;
/* reset default clocks */
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
radeon_pm_set_clocks(rdev);
}
mutex_unlock(&rdev->pm.mutex);
- if (flush_wq)
- flush_workqueue(rdev->wq);
+
+ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
device_remove_file(rdev->dev, &dev_attr_power_profile);
device_remove_file(rdev->dev, &dev_attr_power_method);
@@ -636,6 +646,9 @@ void radeon_pm_fini(struct radeon_device *rdev)
#endif
}
+ if (rdev->pm.power_state)
+ kfree(rdev->pm.power_state);
+
radeon_hwmon_fini(rdev);
}
@@ -686,12 +699,12 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
- queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
- queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
}
} else { /* count == 0 */
@@ -720,9 +733,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
*/
for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
if (rdev->pm.active_crtcs & (1 << crtc)) {
- vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos);
- if ((vbl_status & RADEON_SCANOUTPOS_VALID) &&
- !(vbl_status & RADEON_SCANOUTPOS_INVBL))
+ vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
+ if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
+ !(vbl_status & DRM_SCANOUTPOS_INVBL))
in_vbl = false;
}
}
@@ -796,8 +809,8 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
radeon_pm_set_clocks(rdev);
}
- queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
mutex_unlock(&rdev->pm.mutex);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
@@ -814,9 +827,9 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
+ seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
- seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
+ seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
if (rdev->asic->get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
if (rdev->pm.current_vddc)
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 64928814de53..ec93a75369e6 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -55,6 +55,7 @@
#include "r500_reg.h"
#include "r600_reg.h"
#include "evergreen_reg.h"
+#include "ni_reg.h"
#define RADEON_MC_AGP_LOCATION 0x014c
#define RADEON_MC_AGP_START_MASK 0x0000FFFF
@@ -320,6 +321,15 @@
# define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8)
# define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9)
# define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10)
+# define R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define R600_PCIE_LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define R600_PCIE_LC_RENEGOTIATE_EN (1 << 10)
+# define R600_PCIE_LC_SHORT_RECONFIG_EN (1 << 11)
+# define R600_PCIE_LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define R600_PCIE_LC_UPCONFIGURE_DIS (1 << 13)
+
+#define R600_TARGET_AND_CURRENT_PROFILE_INDEX 0x70c
+#define R700_TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
#define RADEON_CACHE_CNTL 0x1724
#define RADEON_CACHE_LINE 0x0f0c /* PCI */
@@ -365,6 +375,8 @@
#define RADEON_CONFIG_APER_SIZE 0x0108
#define RADEON_CONFIG_BONDS 0x00e8
#define RADEON_CONFIG_CNTL 0x00e0
+# define RADEON_CFG_VGA_RAM_EN (1 << 8)
+# define RADEON_CFG_VGA_IO_DIS (1 << 9)
# define RADEON_CFG_ATI_REV_A11 (0 << 16)
# define RADEON_CFG_ATI_REV_A12 (1 << 16)
# define RADEON_CFG_ATI_REV_A13 (2 << 16)
@@ -422,6 +434,7 @@
# define RADEON_CRTC_CSYNC_EN (1 << 4)
# define RADEON_CRTC_ICON_EN (1 << 15)
# define RADEON_CRTC_CUR_EN (1 << 16)
+# define RADEON_CRTC_VSTAT_MODE_MASK (3 << 17)
# define RADEON_CRTC_CUR_MODE_MASK (7 << 20)
# define RADEON_CRTC_CUR_MODE_SHIFT 20
# define RADEON_CRTC_CUR_MODE_MONO 0
@@ -509,6 +522,8 @@
# define RADEON_CRTC_TILE_EN (1 << 15)
# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16)
# define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17)
+# define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN (1 << 28)
+# define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN (1 << 29)
#define R300_CRTC_TILE_X0_Y0 0x0350
#define R300_CRTC2_TILE_X0_Y0 0x0358
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
new file mode 100644
index 000000000000..eafd8160a155
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -0,0 +1,82 @@
+#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RADEON_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM radeon
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE radeon_trace
+
+TRACE_EVENT(radeon_bo_create,
+ TP_PROTO(struct radeon_bo *bo),
+ TP_ARGS(bo),
+ TP_STRUCT__entry(
+ __field(struct radeon_bo *, bo)
+ __field(u32, pages)
+ ),
+
+ TP_fast_assign(
+ __entry->bo = bo;
+ __entry->pages = bo->tbo.num_pages;
+ ),
+ TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
+);
+
+DECLARE_EVENT_CLASS(radeon_fence_request,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/radeon/radeon_trace_points.c b/drivers/gpu/drm/radeon/radeon_trace_points.c
new file mode 100644
index 000000000000..8175993df84d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_trace_points.c
@@ -0,0 +1,9 @@
+/* Copyright Red Hat Inc 2010.
+ * Author : Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#define CREATE_TRACE_POINTS
+#include "radeon_trace.h"
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1272e4b6a1d4..e5b2cf10cbf4 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -787,9 +787,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
radeon_mem_types_list[i].show = &radeon_mm_dump_table;
radeon_mem_types_list[i].driver_features = 0;
if (i == 0)
- radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv;
+ radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
else
- radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv;
+ radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
}
/* Add ttm page pool to debugfs */
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index ac40fd39d787..9177f9191837 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -439,7 +439,7 @@ evergreen 0x9400
0x000286EC SPI_COMPUTE_NUM_THREAD_X
0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
-0x000286F8 GDS_ADDR_SIZE
+0x00028724 GDS_ADDR_SIZE
0x00028780 CB_BLEND0_CONTROL
0x00028784 CB_BLEND1_CONTROL
0x00028788 CB_BLEND2_CONTROL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
index b506ec1cab4b..e8a1786b6426 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r300
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -683,9 +683,7 @@ r300 0x4f60
0x4DF4 US_ALU_CONST_G_31
0x4DF8 US_ALU_CONST_B_31
0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
0x4E10 RB3D_CONSTANT_COLOR
0x4E14 RB3D_COLOR_CLEAR_VALUE
0x4E18 RB3D_ROPCNTL_R3
@@ -706,13 +704,11 @@ r300 0x4f60
0x4E74 RB3D_CMASK_WRINDEX
0x4E78 RB3D_CMASK_DWORD
0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
0x4F04 ZB_ZSTENCILCNTL
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
index 8c1214c2390f..722074e21e2f 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r420
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -130,7 +130,6 @@ r420 0x4f60
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
-0x4028 GB_Z_PEQ_CONFIG
0x4100 TX_INVALTAGS
0x4200 GA_POINT_S0
0x4204 GA_POINT_T0
@@ -750,9 +749,7 @@ r420 0x4f60
0x4DF4 US_ALU_CONST_G_31
0x4DF8 US_ALU_CONST_B_31
0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
0x4E10 RB3D_CONSTANT_COLOR
0x4E14 RB3D_COLOR_CLEAR_VALUE
0x4E18 RB3D_ROPCNTL_R3
@@ -773,13 +770,11 @@ r420 0x4f60
0x4E74 RB3D_CMASK_WRINDEX
0x4E78 RB3D_CMASK_DWORD
0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
0x4F04 ZB_ZSTENCILCNTL
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 0828d80396f2..d9f62866bbc1 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -749,9 +749,7 @@ rs600 0x6d40
0x4DF4 US_ALU_CONST_G_31
0x4DF8 US_ALU_CONST_B_31
0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
0x4E10 RB3D_CONSTANT_COLOR
0x4E14 RB3D_COLOR_CLEAR_VALUE
0x4E18 RB3D_ROPCNTL_R3
@@ -772,13 +770,11 @@ rs600 0x6d40
0x4E74 RB3D_CMASK_WRINDEX
0x4E78 RB3D_CMASK_DWORD
0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
0x4F04 ZB_ZSTENCILCNTL
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index b3f9f1d92005..911a8fbd32bb 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -164,7 +164,6 @@ rv515 0x6d40
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
-0x4028 GB_Z_PEQ_CONFIG
0x4100 TX_INVALTAGS
0x4114 SU_TEX_WRAP_PS3
0x4118 PS3_ENABLE
@@ -304,6 +303,22 @@ rv515 0x6d40
0x4630 US_CODE_ADDR
0x4634 US_CODE_RANGE
0x4638 US_CODE_OFFSET
+0x4640 US_FORMAT0_0
+0x4644 US_FORMAT0_1
+0x4648 US_FORMAT0_2
+0x464C US_FORMAT0_3
+0x4650 US_FORMAT0_4
+0x4654 US_FORMAT0_5
+0x4658 US_FORMAT0_6
+0x465C US_FORMAT0_7
+0x4660 US_FORMAT0_8
+0x4664 US_FORMAT0_9
+0x4668 US_FORMAT0_10
+0x466C US_FORMAT0_11
+0x4670 US_FORMAT0_12
+0x4674 US_FORMAT0_13
+0x4678 US_FORMAT0_14
+0x467C US_FORMAT0_15
0x46A4 US_OUT_FMT_0
0x46A8 US_OUT_FMT_1
0x46AC US_OUT_FMT_2
@@ -445,9 +460,7 @@ rv515 0x6d40
0x4DF4 US_ALU_CONST_G_31
0x4DF8 US_ALU_CONST_B_31
0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
0x4E10 RB3D_CONSTANT_COLOR
0x4E14 RB3D_COLOR_CLEAR_VALUE
0x4E18 RB3D_ROPCNTL_R3
@@ -468,9 +481,6 @@ rv515 0x6d40
0x4E74 RB3D_CMASK_WRINDEX
0x4E78 RB3D_CMASK_DWORD
0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
0x4EF8 RB3D_CONSTANT_COLOR_AR
@@ -480,4 +490,5 @@ rv515 0x6d40
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
0x4F58 ZB_ZPASS_DATA
+0x4F28 ZB_DEPTHCLEARVALUE
0x4FD4 ZB_STENCILREFMASK_BF
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 5512e4e5e636..c76283d9eb3d 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -203,6 +203,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
radeon_gart_table_ram_free(rdev);
}
+#define RS400_PTE_WRITEABLE (1 << 2)
+#define RS400_PTE_READABLE (1 << 3)
+
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
uint32_t entry;
@@ -213,7 +216,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
entry = (lower_32_bits(addr) & PAGE_MASK) |
((upper_32_bits(addr) & 0xff) << 4) |
- 0xc;
+ RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
entry = cpu_to_le32(entry);
rdev->gart.table.ram.ptr[i] = entry;
return 0;
@@ -226,8 +229,8 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
- tmp = RREG32(0x0150);
- if (tmp & (1 << 2)) {
+ tmp = RREG32(RADEON_MC_STATUS);
+ if (tmp & RADEON_MC_IDLE) {
return 0;
}
DRM_UDELAY(1);
@@ -241,7 +244,7 @@ void rs400_gpu_init(struct radeon_device *rdev)
r420_pipes_init(rdev);
if (rs400_mc_wait_for_idle(rdev)) {
printk(KERN_WARNING "rs400: Failed to wait MC idle while "
- "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
+ "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
}
}
@@ -300,9 +303,9 @@ static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
- tmp = RREG32_MC(0x100);
+ tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
- tmp = RREG32(0x134);
+ tmp = RREG32(RS690_HDP_FB_LOCATION);
seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
} else {
tmp = RREG32(RADEON_AGP_BASE);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index f1c6e02c2e6b..5afe294ed51f 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -46,6 +46,56 @@
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+ u32 tmp;
+
+ /* make sure flip is at vb rather than hb */
+ tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+ WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+ /* enable the pflip int */
+ radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* disable the pflip int */
+ radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+
+ /* Lock the graphics update lock */
+ tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* update the scanout addresses */
+ WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+ WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* Wait for update_pending to go high. */
+ while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* Return current update_pending status: */
+ return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+}
+
void rs600_pm_misc(struct radeon_device *rdev)
{
int requested_index = rdev->pm.requested_power_state_index;
@@ -289,16 +339,16 @@ void rs600_bm_disable(struct radeon_device *rdev)
int rs600_asic_reset(struct radeon_device *rdev)
{
- u32 status, tmp;
-
struct rv515_mc_save save;
+ u32 status, tmp;
+ int ret = 0;
- /* Stops all mc clients */
- rv515_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) {
return 0;
}
+ /* Stops all mc clients */
+ rv515_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */
@@ -342,11 +392,11 @@ int rs600_asic_reset(struct radeon_device *rdev)
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
- return -1;
- }
+ ret = -1;
+ } else
+ dev_info(rdev->dev, "GPU reset succeed\n");
rv515_mc_resume(rdev, &save);
- dev_info(rdev->dev, "GPU reset succeed\n");
- return 0;
+ return ret;
}
/*
@@ -515,10 +565,12 @@ int rs600_irq_set(struct radeon_device *rdev)
if (rdev->irq.gui_idle) {
tmp |= S_000040_GUI_IDLE(1);
}
- if (rdev->irq.crtc_vblank_int[0]) {
+ if (rdev->irq.crtc_vblank_int[0] ||
+ rdev->irq.pflip[0]) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
}
- if (rdev->irq.crtc_vblank_int[1]) {
+ if (rdev->irq.crtc_vblank_int[1] ||
+ rdev->irq.pflip[1]) {
mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.hpd[0]) {
@@ -534,7 +586,7 @@ int rs600_irq_set(struct radeon_device *rdev)
return 0;
}
-static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
+static inline u32 rs600_irq_ack(struct radeon_device *rdev)
{
uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
uint32_t irq_mask = S_000044_SW_INT(1);
@@ -547,27 +599,27 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
}
if (G_000044_DISPLAY_INT_STAT(irqs)) {
- *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
- if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
+ rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
+ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
WREG32(R_006534_D1MODE_VBLANK_STATUS,
S_006534_D1MODE_VBLANK_ACK(1));
}
- if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) {
+ if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
WREG32(R_006D34_D2MODE_VBLANK_STATUS,
S_006D34_D2MODE_VBLANK_ACK(1));
}
- if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
+ if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
}
- if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
+ if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
}
} else {
- *r500_disp_int = 0;
+ rdev->irq.stat_regs.r500.disp_int = 0;
}
if (irqs) {
@@ -578,32 +630,30 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
void rs600_irq_disable(struct radeon_device *rdev)
{
- u32 tmp;
-
WREG32(R_000040_GEN_INT_CNTL, 0);
WREG32(R_006540_DxMODE_INT_MASK, 0);
/* Wait and acknowledge irq */
mdelay(1);
- rs600_irq_ack(rdev, &tmp);
+ rs600_irq_ack(rdev);
}
int rs600_irq_process(struct radeon_device *rdev)
{
- uint32_t status, msi_rearm;
- uint32_t r500_disp_int;
+ u32 status, msi_rearm;
bool queue_hotplug = false;
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
- status = rs600_irq_ack(rdev, &r500_disp_int);
- if (!status && !r500_disp_int) {
+ status = rs600_irq_ack(rdev);
+ if (!status && !rdev->irq.stat_regs.r500.disp_int) {
return IRQ_NONE;
}
- while (status || r500_disp_int) {
+ while (status || rdev->irq.stat_regs.r500.disp_int) {
/* SW interrupt */
- if (G_000044_SW_INT(status))
+ if (G_000044_SW_INT(status)) {
radeon_fence_process(rdev);
+ }
/* GUI idle */
if (G_000040_GUI_IDLE(status)) {
rdev->irq.gui_idle_acked = true;
@@ -611,30 +661,38 @@ int rs600_irq_process(struct radeon_device *rdev)
wake_up(&rdev->irq.idle_queue);
}
/* Vertical blank interrupts */
- if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[0])
+ radeon_crtc_handle_flip(rdev, 0);
}
- if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[1])
+ radeon_crtc_handle_flip(rdev, 1);
}
- if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
+ if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
queue_hotplug = true;
DRM_DEBUG("HPD1\n");
}
- if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) {
+ if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
queue_hotplug = true;
DRM_DEBUG("HPD2\n");
}
- status = rs600_irq_ack(rdev, &r500_disp_int);
+ status = rs600_irq_ack(rdev);
}
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
- queue_work(rdev->wq, &rdev->hotplug_work);
+ schedule_work(&rdev->hotplug_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS600:
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 0137d3e3728d..6638c8e4c81b 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -77,9 +77,9 @@ void rs690_pm_info(struct radeon_device *rdev)
switch (crev) {
case 1:
tmp.full = dfixed_const(100);
- rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
- if (info->info.usK8MemoryClock)
+ if (le16_to_cpu(info->info.usK8MemoryClock))
rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
else if (rdev->clock.default_mclk) {
rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
@@ -91,16 +91,16 @@ void rs690_pm_info(struct radeon_device *rdev)
break;
case 2:
tmp.full = dfixed_const(100);
- rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
- if (info->info_v2.ulBootUpUMAClock)
- rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
+ if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
+ rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
else if (rdev->clock.default_mclk)
rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
else
rdev->pm.igp_system_mclk.full = dfixed_const(66700);
rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
- rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
break;
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 5d569f41f4ae..64b57af93714 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -69,13 +69,13 @@ void rv515_ring_start(struct radeon_device *rdev)
ISYNC_CPSCRATCH_IDLEGUI);
radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(0x170C, 0));
- radeon_ring_write(rdev, 1 << 31);
+ radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
+ radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(0x42C8, 0));
+ radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
radeon_ring_write(rdev, 0);
@@ -153,8 +153,8 @@ void rv515_gpu_init(struct radeon_device *rdev)
}
rv515_vga_render_disable(rdev);
r420_pipes_init(rdev);
- gb_pipe_select = RREG32(0x402C);
- tmp = RREG32(0x170C);
+ gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+ tmp = RREG32(R300_DST_PIPE_CONFIG);
pipe_select_current = (tmp >> 2) & 3;
tmp = (1 << pipe_select_current) |
(((gb_pipe_select >> 8) & 0xF) << 4);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4dfead8cee33..d8ba67690656 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -41,20 +41,60 @@
static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
+
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+
+ /* Lock the graphics update lock */
+ tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* update the scanout addresses */
+ if (radeon_crtc->crtc_id) {
+ WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+ WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+ } else {
+ WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+ WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+ }
+ WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+ WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* Wait for update_pending to go high. */
+ while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* Return current update_pending status: */
+ return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+}
/* get temperature in millidegrees */
-u32 rv770_get_temp(struct radeon_device *rdev)
+int rv770_get_temp(struct radeon_device *rdev)
{
u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
ASIC_T_SHIFT;
- u32 actual_temp = 0;
-
- if ((temp >> 9) & 1)
- actual_temp = 0;
- else
- actual_temp = (temp >> 1) & 0xff;
-
- return actual_temp * 1000;
+ int actual_temp;
+
+ if (temp & 0x400)
+ actual_temp = -256;
+ else if (temp & 0x200)
+ actual_temp = 255;
+ else if (temp & 0x100) {
+ actual_temp = temp & 0x1ff;
+ actual_temp |= ~0x1ff;
+ } else
+ actual_temp = temp & 0xff;
+
+ return (actual_temp * 1000) / 2;
}
void rv770_pm_misc(struct radeon_device *rdev)
@@ -281,7 +321,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
return -EINVAL;
r700_cp_stop(rdev);
- WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+ WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+ BUF_SWAP_32BIT |
+#endif
+ RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
/* Reset cp */
WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
@@ -489,6 +533,49 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map;
}
+static void rv770_program_channel_remap(struct radeon_device *rdev)
+{
+ u32 tcp_chan_steer, mc_shared_chremap, tmp;
+ bool force_no_swizzle;
+
+ switch (rdev->family) {
+ case CHIP_RV770:
+ case CHIP_RV730:
+ force_no_swizzle = false;
+ break;
+ case CHIP_RV710:
+ case CHIP_RV740:
+ default:
+ force_no_swizzle = true;
+ break;
+ }
+
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ case 1:
+ default:
+ /* default mapping */
+ mc_shared_chremap = 0x00fac688;
+ break;
+ case 2:
+ case 3:
+ if (force_no_swizzle)
+ mc_shared_chremap = 0x00fac688;
+ else
+ mc_shared_chremap = 0x00bbc298;
+ break;
+ }
+
+ if (rdev->family == CHIP_RV740)
+ tcp_chan_steer = 0x00ef2a60;
+ else
+ tcp_chan_steer = 0x00fac688;
+
+ WREG32(TCP_CHAN_STEER, tcp_chan_steer);
+ WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
+}
+
static void rv770_gpu_init(struct radeon_device *rdev)
{
int i, j, num_qd_pipes;
@@ -688,6 +775,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ rv770_program_channel_remap(rdev);
+
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
@@ -956,6 +1045,45 @@ static void rv770_vram_scratch_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->vram_scratch.robj);
}
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+ u64 size_bf, size_af;
+
+ if (mc->mc_vram_size > 0xE0000000) {
+ /* leave room for at least 512M GTT */
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = 0xE0000000;
+ mc->mc_vram_size = 0xE0000000;
+ }
+ if (rdev->flags & RADEON_IS_AGP) {
+ size_bf = mc->gtt_start;
+ size_af = 0xFFFFFFFF - mc->gtt_end + 1;
+ if (size_bf > size_af) {
+ if (mc->mc_vram_size > size_bf) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_bf;
+ mc->mc_vram_size = size_bf;
+ }
+ mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+ } else {
+ if (mc->mc_vram_size > size_af) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_af;
+ mc->mc_vram_size = size_af;
+ }
+ mc->vram_start = mc->gtt_end;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+ } else {
+ radeon_vram_location(rdev, &rdev->mc, 0);
+ rdev->mc.gtt_base_align = 0;
+ radeon_gtt_location(rdev, mc);
+ }
+}
+
int rv770_mc_init(struct radeon_device *rdev)
{
u32 tmp;
@@ -996,7 +1124,7 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
- r600_vram_gtt_location(rdev, &rdev->mc);
+ r700_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
return 0;
@@ -1006,6 +1134,9 @@ static int rv770_startup(struct radeon_device *rdev)
{
int r;
+ /* enable pcie gen2 link */
+ rv770_pcie_gen2_enable(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
@@ -1146,7 +1277,7 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
/* Post card if necessary */
- if (!r600_card_posted(rdev)) {
+ if (!radeon_card_posted(rdev)) {
if (!rdev->bios) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
@@ -1244,3 +1375,78 @@ void rv770_fini(struct radeon_device *rdev)
rdev->bios = NULL;
radeon_dummy_page_fini(rdev);
}
+
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
+{
+ u32 link_width_cntl, lanes, speed_cntl, tmp;
+ u16 link_cntl2;
+
+ if (radeon_pcie_gen2 == 0)
+ return;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return;
+
+ /* advertise upconfig capability */
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+ lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+ link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+ LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl |= lanes | LC_RECONFIG_NOW |
+ LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ } else {
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+ (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+ tmp = RREG32(0x541c);
+ WREG32(0x541c, tmp | 0x8);
+ WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+ link_cntl2 = RREG16(0x4088);
+ link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+ link_cntl2 |= 0x2;
+ WREG16(0x4088, link_cntl2);
+ WREG32(MM_CFGREGS_CNTL, 0);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_GEN2_EN_STRAP;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ } else {
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+ if (1)
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ else
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index b7a5a20e81dc..79fa588e9ed5 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -76,10 +76,10 @@
#define ROQ_IB1_START(x) ((x) << 0)
#define ROQ_IB2_START(x) ((x) << 8)
#define CP_RB_CNTL 0xC104
-#define RB_BUFSZ(x) ((x)<<0)
-#define RB_BLKSZ(x) ((x)<<8)
-#define RB_NO_UPDATE (1<<27)
-#define RB_RPTR_WR_ENA (1<<31)
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
#define BUF_SWAP_32BIT (2 << 16)
#define CP_RB_RPTR 0x8700
#define CP_RB_RPTR_ADDR 0xC10C
@@ -138,6 +138,7 @@
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x00003000
+#define MC_SHARED_CHREMAP 0x2008
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
@@ -303,6 +304,7 @@
#define BILINEAR_PRECISION_8_BIT (1 << 31)
#define TCP_CNTL 0x9610
+#define TCP_CHAN_STEER 0x9614
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x)<<0)
@@ -351,4 +353,49 @@
#define SRBM_STATUS 0x0E50
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
+#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
+#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
+# define LC_CURRENT_DATA_RATE (1 << 11)
+# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
+#define MM_CFGREGS_CNTL 0x544c
+# define MM_WR_TO_CFG_EN (1 << 3)
+#define LINK_CNTL2 0x88 /* F0 */
+# define TARGET_LINK_SPEED_MASK (0xf << 0)
+# define SELECTABLE_DEEMPHASIS (1 << 6)
+
#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 934a96a78540..af61fc29e843 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -169,7 +169,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
}
EXPORT_SYMBOL(ttm_bo_wait_unreserved);
-static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
@@ -191,11 +191,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
}
}
-/**
- * Call with the lru_lock held.
- */
-
-static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
int put_count = 0;
@@ -227,9 +223,18 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
/**
* Deadlock avoidance for multi-bo reserving.
*/
- if (use_sequence && bo->seq_valid &&
- (sequence - bo->val_seq < (1 << 31))) {
- return -EAGAIN;
+ if (use_sequence && bo->seq_valid) {
+ /**
+ * We've already reserved this one.
+ */
+ if (unlikely(sequence == bo->val_seq))
+ return -EDEADLK;
+ /**
+ * Already reserved by a thread that will not back
+ * off for us. We need to back off.
+ */
+ if (unlikely(sequence - bo->val_seq < (1 << 31)))
+ return -EAGAIN;
}
if (no_wait)
@@ -267,6 +272,13 @@ static void ttm_bo_ref_bug(struct kref *list_kref)
BUG();
}
+void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+ bool never_free)
+{
+ kref_sub(&bo->list_kref, count,
+ (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
+}
+
int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_sequence, uint32_t sequence)
@@ -282,20 +294,24 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
return ret;
}
+void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
+{
+ ttm_bo_add_to_lru(bo);
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+}
+
void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
struct ttm_bo_global *glob = bo->glob;
spin_lock(&glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
+ ttm_bo_unreserve_locked(bo);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_unreserve);
@@ -362,8 +378,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
int ret = 0;
if (old_is_pci || new_is_pci ||
- ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
- ttm_bo_unmap_virtual(bo);
+ ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
+ ret = ttm_mem_io_lock(old_man, true);
+ if (unlikely(ret != 0))
+ goto out_err;
+ ttm_bo_unmap_virtual_locked(bo);
+ ttm_mem_io_unlock(old_man);
+ }
/*
* Create and bind a ttm if required.
@@ -416,11 +437,9 @@ moved:
}
if (bo->mem.mm_node) {
- spin_lock(&bo->lock);
bo->offset = (bo->mem.start << PAGE_SHIFT) +
bdev->man[bo->mem.mem_type].gpu_offset;
bo->cur_placement = bo->mem.placement;
- spin_unlock(&bo->lock);
} else
bo->offset = 0;
@@ -452,7 +471,6 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
-
ttm_bo_mem_put(bo, &bo->mem);
atomic_set(&bo->reserved, 0);
@@ -474,14 +492,14 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
int put_count;
int ret;
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
if (!bo->sync_obj) {
spin_lock(&glob->lru_lock);
/**
- * Lock inversion between bo::reserve and bo::lock here,
+ * Lock inversion between bo:reserve and bdev::fence_lock here,
* but that's OK, since we're only trylocking.
*/
@@ -490,14 +508,13 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
if (unlikely(ret == -EBUSY))
goto queue;
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
return;
} else {
@@ -512,7 +529,7 @@ queue:
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (sync_obj) {
driver->sync_obj_flush(sync_obj, sync_obj_arg);
@@ -537,14 +554,15 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool no_wait_reserve,
bool no_wait_gpu)
{
+ struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
int put_count;
int ret = 0;
retry:
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (unlikely(ret != 0))
return ret;
@@ -580,8 +598,7 @@ retry:
spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
return 0;
}
@@ -652,6 +669,7 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_buffer_object *bo =
container_of(kref, struct ttm_buffer_object, kref);
struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
if (likely(bo->vm_node != NULL)) {
rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
@@ -659,6 +677,9 @@ static void ttm_bo_release(struct kref *kref)
bo->vm_node = NULL;
}
write_unlock(&bdev->vm_lock);
+ ttm_mem_io_lock(man, false);
+ ttm_mem_io_free_vm(bo);
+ ttm_mem_io_unlock(man);
ttm_bo_cleanup_refs_or_queue(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
write_lock(&bdev->vm_lock);
@@ -698,9 +719,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
struct ttm_placement placement;
int ret = 0;
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS) {
@@ -715,7 +736,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
- evict_mem.bus.io_reserved = false;
+ evict_mem.bus.io_reserved_vm = false;
+ evict_mem.bus.io_reserved_count = 0;
placement.fpfn = 0;
placement.lpfn = 0;
@@ -802,8 +824,7 @@ retry:
BUG_ON(ret != 0);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
ttm_bo_unreserve(bo);
@@ -1036,6 +1057,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
{
int ret = 0;
struct ttm_mem_reg mem;
+ struct ttm_bo_device *bdev = bo->bdev;
BUG_ON(!atomic_read(&bo->reserved));
@@ -1044,15 +1066,16 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* Have the driver move function wait for idle when necessary,
* instead of doing it here.
*/
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (ret)
return ret;
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
- mem.bus.io_reserved = false;
+ mem.bus.io_reserved_vm = false;
+ mem.bus.io_reserved_count = 0;
/*
* Determine where to move the buffer.
*/
@@ -1163,7 +1186,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
}
bo->destroy = destroy;
- spin_lock_init(&bo->lock);
kref_init(&bo->kref);
kref_init(&bo->list_kref);
atomic_set(&bo->cpu_writers, 0);
@@ -1172,6 +1194,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
+ INIT_LIST_HEAD(&bo->io_reserve_lru);
bo->bdev = bdev;
bo->glob = bdev->glob;
bo->type = type;
@@ -1181,7 +1204,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment;
- bo->mem.bus.io_reserved = false;
+ bo->mem.bus.io_reserved_vm = false;
+ bo->mem.bus.io_reserved_count = 0;
bo->buffer_start = buffer_start & PAGE_MASK;
bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1355,6 +1379,10 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
BUG_ON(type >= TTM_NUM_MEM_TYPES);
man = &bdev->man[type];
BUG_ON(man->has_type);
+ man->io_reserve_fastpath = true;
+ man->use_io_reserve_lru = false;
+ mutex_init(&man->io_reserve_mutex);
+ INIT_LIST_HEAD(&man->io_reserve_lru);
ret = bdev->driver->init_mem_type(bdev, type, man);
if (ret)
@@ -1526,7 +1554,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
bdev->dev_mapping = NULL;
bdev->glob = glob;
bdev->need_dma32 = need_dma32;
-
+ bdev->val_seq = 0;
+ spin_lock_init(&bdev->fence_lock);
mutex_lock(&glob->device_list_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&glob->device_list_mutex);
@@ -1560,7 +1589,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
return true;
}
-void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
loff_t offset = (loff_t) bo->addr_space_offset;
@@ -1569,8 +1598,20 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
if (!bdev->dev_mapping)
return;
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
- ttm_mem_io_free(bdev, &bo->mem);
+ ttm_mem_io_free_vm(bo);
+}
+
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+
+ ttm_mem_io_lock(man, false);
+ ttm_bo_unmap_virtual_locked(bo);
+ ttm_mem_io_unlock(man);
}
+
+
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
@@ -1650,6 +1691,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
bool lazy, bool interruptible, bool no_wait)
{
struct ttm_bo_driver *driver = bo->bdev->driver;
+ struct ttm_bo_device *bdev = bo->bdev;
void *sync_obj;
void *sync_obj_arg;
int ret = 0;
@@ -1663,9 +1705,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&tmp_obj);
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
continue;
}
@@ -1674,29 +1716,29 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
sync_obj = driver->sync_obj_ref(bo->sync_obj);
sync_obj_arg = bo->sync_obj_arg;
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
lazy, interruptible);
if (unlikely(ret != 0)) {
driver->sync_obj_unref(&sync_obj);
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
return ret;
}
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
if (likely(bo->sync_obj == sync_obj &&
bo->sync_obj_arg == sync_obj_arg)) {
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING,
&bo->priv_flags);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&sync_obj);
driver->sync_obj_unref(&tmp_obj);
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
} else {
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&sync_obj);
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
}
}
return 0;
@@ -1705,6 +1747,7 @@ EXPORT_SYMBOL(ttm_bo_wait);
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
+ struct ttm_bo_device *bdev = bo->bdev;
int ret = 0;
/*
@@ -1714,9 +1757,9 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
if (unlikely(ret != 0))
return ret;
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, true, no_wait);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (likely(ret == 0))
atomic_inc(&bo->cpu_writers);
ttm_bo_unreserve(bo);
@@ -1782,16 +1825,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
/**
* Wait for GPU, then move to system cached.
*/
- spin_lock(&bo->lock);
+ spin_lock(&bo->bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bo->lock);
+ spin_unlock(&bo->bdev->fence_lock);
if (unlikely(ret != 0))
goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 3106d5bcce32..77dbf408c0d0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -75,37 +75,123 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_ttm);
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
{
- int ret;
+ if (likely(man->io_reserve_fastpath))
+ return 0;
+
+ if (interruptible)
+ return mutex_lock_interruptible(&man->io_reserve_mutex);
+
+ mutex_lock(&man->io_reserve_mutex);
+ return 0;
+}
- if (!mem->bus.io_reserved) {
- mem->bus.io_reserved = true;
+void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
+{
+ if (likely(man->io_reserve_fastpath))
+ return;
+
+ mutex_unlock(&man->io_reserve_mutex);
+}
+
+static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
+{
+ struct ttm_buffer_object *bo;
+
+ if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
+ return -EAGAIN;
+
+ bo = list_first_entry(&man->io_reserve_lru,
+ struct ttm_buffer_object,
+ io_reserve_lru);
+ list_del_init(&bo->io_reserve_lru);
+ ttm_bo_unmap_virtual_locked(bo);
+
+ return 0;
+}
+
+static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ int ret = 0;
+
+ if (!bdev->driver->io_mem_reserve)
+ return 0;
+ if (likely(man->io_reserve_fastpath))
+ return bdev->driver->io_mem_reserve(bdev, mem);
+
+ if (bdev->driver->io_mem_reserve &&
+ mem->bus.io_reserved_count++ == 0) {
+retry:
ret = bdev->driver->io_mem_reserve(bdev, mem);
+ if (ret == -EAGAIN) {
+ ret = ttm_mem_io_evict(man);
+ if (ret == 0)
+ goto retry;
+ }
+ }
+ return ret;
+}
+
+static void ttm_mem_io_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+ if (likely(man->io_reserve_fastpath))
+ return;
+
+ if (bdev->driver->io_mem_reserve &&
+ --mem->bus.io_reserved_count == 0 &&
+ bdev->driver->io_mem_free)
+ bdev->driver->io_mem_free(bdev, mem);
+
+}
+
+int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
+{
+ struct ttm_mem_reg *mem = &bo->mem;
+ int ret;
+
+ if (!mem->bus.io_reserved_vm) {
+ struct ttm_mem_type_manager *man =
+ &bo->bdev->man[mem->mem_type];
+
+ ret = ttm_mem_io_reserve(bo->bdev, mem);
if (unlikely(ret != 0))
return ret;
+ mem->bus.io_reserved_vm = true;
+ if (man->use_io_reserve_lru)
+ list_add_tail(&bo->io_reserve_lru,
+ &man->io_reserve_lru);
}
return 0;
}
-void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
{
- if (bdev->driver->io_mem_reserve) {
- if (mem->bus.io_reserved) {
- mem->bus.io_reserved = false;
- bdev->driver->io_mem_free(bdev, mem);
- }
+ struct ttm_mem_reg *mem = &bo->mem;
+
+ if (mem->bus.io_reserved_vm) {
+ mem->bus.io_reserved_vm = false;
+ list_del_init(&bo->io_reserve_lru);
+ ttm_mem_io_free(bo->bdev, mem);
}
}
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret;
void *addr;
*virtual = NULL;
+ (void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bdev, mem);
+ ttm_mem_io_unlock(man);
if (ret || !mem->bus.is_iomem)
return ret;
@@ -117,7 +203,9 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
else
addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
if (!addr) {
+ (void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
+ ttm_mem_io_unlock(man);
return -ENOMEM;
}
}
@@ -134,7 +222,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
if (virtual && mem->bus.addr == NULL)
iounmap(virtual);
+ (void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
+ ttm_mem_io_unlock(man);
}
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -231,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
- struct ttm_mem_reg old_copy = *old_mem;
+ struct ttm_mem_reg old_copy;
void *old_iomap;
void *new_iomap;
int ret;
@@ -280,8 +370,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
}
mb();
out2:
- ttm_bo_free_old_node(bo);
-
+ old_copy = *old_mem;
*old_mem = *new_mem;
new_mem->mm_node = NULL;
@@ -292,9 +381,10 @@ out2:
}
out1:
- ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
+ ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
out:
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
+ ttm_bo_mem_put(bo, &old_copy);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
@@ -337,11 +427,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
- spin_lock_init(&fbo->lock);
init_waitqueue_head(&fbo->event_queue);
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
+ INIT_LIST_HEAD(&fbo->io_reserve_lru);
fbo->vm_node = NULL;
atomic_set(&fbo->cpu_writers, 0);
@@ -453,6 +543,8 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
+ struct ttm_mem_type_manager *man =
+ &bo->bdev->man[bo->mem.mem_type];
unsigned long offset, size;
int ret;
@@ -467,7 +559,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
return -EPERM;
#endif
+ (void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
+ ttm_mem_io_unlock(man);
if (ret)
return ret;
if (!bo->mem.bus.is_iomem) {
@@ -482,12 +576,15 @@ EXPORT_SYMBOL(ttm_bo_kmap);
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
+ struct ttm_buffer_object *bo = map->bo;
+ struct ttm_mem_type_manager *man =
+ &bo->bdev->man[bo->mem.mem_type];
+
if (!map->virtual)
return;
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
iounmap(map->virtual);
- ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
break;
case ttm_bo_map_vmap:
vunmap(map->virtual);
@@ -500,6 +597,9 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
default:
BUG();
}
+ (void) ttm_mem_io_lock(man, false);
+ ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
+ ttm_mem_io_unlock(man);
map->virtual = NULL;
map->page = NULL;
}
@@ -520,7 +620,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_buffer_object *ghost_obj;
void *tmp_obj = NULL;
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
if (bo->sync_obj) {
tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
@@ -529,7 +629,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
bo->sync_obj_arg = sync_obj_arg;
if (evict) {
ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
if (ret)
@@ -552,7 +652,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
*/
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index fe6cb77899f4..221b924acebe 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -83,6 +83,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int i;
unsigned long address = (unsigned long)vmf->virtual_address;
int retval = VM_FAULT_NOPAGE;
+ struct ttm_mem_type_manager *man =
+ &bdev->man[bo->mem.mem_type];
/*
* Work around locking order reversal in fault / nopfn
@@ -118,24 +120,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* move.
*/
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
ret = ttm_bo_wait(bo, false, true, false);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (unlikely(ret != 0)) {
retval = (ret != -ERESTARTSYS) ?
VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
goto out_unlock;
}
} else
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
-
- ret = ttm_mem_io_reserve(bdev, &bo->mem);
- if (ret) {
- retval = VM_FAULT_SIGBUS;
+ ret = ttm_mem_io_lock(man, true);
+ if (unlikely(ret != 0)) {
+ retval = VM_FAULT_NOPAGE;
goto out_unlock;
}
+ ret = ttm_mem_io_reserve_vm(bo);
+ if (unlikely(ret != 0)) {
+ retval = VM_FAULT_SIGBUS;
+ goto out_io_unlock;
+ }
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
bo->vm_node->start - vma->vm_pgoff;
@@ -144,7 +150,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (unlikely(page_offset >= bo->num_pages)) {
retval = VM_FAULT_SIGBUS;
- goto out_unlock;
+ goto out_io_unlock;
}
/*
@@ -182,7 +188,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
page = ttm_tt_get_page(ttm, page_offset);
if (unlikely(!page && i == 0)) {
retval = VM_FAULT_OOM;
- goto out_unlock;
+ goto out_io_unlock;
} else if (unlikely(!page)) {
break;
}
@@ -200,14 +206,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
else if (unlikely(ret != 0)) {
retval =
(ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
- goto out_unlock;
+ goto out_io_unlock;
}
address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last))
break;
}
-
+out_io_unlock:
+ ttm_mem_io_unlock(man);
out_unlock:
ttm_bo_unreserve(bo);
return retval;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index c285c2902d15..3832fe10b4df 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,7 +32,7 @@
#include <linux/sched.h>
#include <linux/module.h>
-void ttm_eu_backoff_reservation(struct list_head *list)
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
{
struct ttm_validate_buffer *entry;
@@ -41,10 +41,77 @@ void ttm_eu_backoff_reservation(struct list_head *list)
if (!entry->reserved)
continue;
+ if (entry->removed) {
+ ttm_bo_add_to_lru(bo);
+ entry->removed = false;
+
+ }
entry->reserved = false;
- ttm_bo_unreserve(bo);
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+ }
+}
+
+static void ttm_eu_del_from_lru_locked(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
+
+ if (!entry->removed) {
+ entry->put_count = ttm_bo_del_from_lru(bo);
+ entry->removed = true;
+ }
}
}
+
+static void ttm_eu_list_ref_sub(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+ if (entry->put_count) {
+ ttm_bo_list_ref_sub(bo, entry->put_count, true);
+ entry->put_count = 0;
+ }
+ }
+}
+
+static int ttm_eu_wait_unreserved_locked(struct list_head *list,
+ struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ int ret;
+
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_bo_wait_unreserved(bo, true);
+ spin_lock(&glob->lru_lock);
+ if (unlikely(ret != 0))
+ ttm_eu_backoff_reservation_locked(list);
+ return ret;
+}
+
+
+void ttm_eu_backoff_reservation(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+ struct ttm_bo_global *glob;
+
+ if (list_empty(list))
+ return;
+
+ entry = list_first_entry(list, struct ttm_validate_buffer, head);
+ glob = entry->bo->glob;
+ spin_lock(&glob->lru_lock);
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
/*
@@ -59,37 +126,76 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
* buffers in different orders.
*/
-int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
+int ttm_eu_reserve_buffers(struct list_head *list)
{
+ struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
+ uint32_t val_seq;
+
+ if (list_empty(list))
+ return 0;
+
+ list_for_each_entry(entry, list, head) {
+ entry->reserved = false;
+ entry->put_count = 0;
+ entry->removed = false;
+ }
+
+ entry = list_first_entry(list, struct ttm_validate_buffer, head);
+ glob = entry->bo->glob;
retry:
+ spin_lock(&glob->lru_lock);
+ val_seq = entry->bo->bdev->val_seq++;
+
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- entry->reserved = false;
- ret = ttm_bo_reserve(bo, true, false, true, val_seq);
- if (ret != 0) {
- ttm_eu_backoff_reservation(list);
- if (ret == -EAGAIN) {
- ret = ttm_bo_wait_unreserved(bo, true);
- if (unlikely(ret != 0))
- return ret;
- goto retry;
- } else
+retry_this_bo:
+ ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ ret = ttm_eu_wait_unreserved_locked(list, bo);
+ if (unlikely(ret != 0)) {
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
return ret;
+ }
+ goto retry_this_bo;
+ case -EAGAIN:
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ ret = ttm_bo_wait_unreserved(bo, true);
+ if (unlikely(ret != 0))
+ return ret;
+ goto retry;
+ default:
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ return ret;
}
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- ttm_eu_backoff_reservation(list);
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
ret = ttm_bo_wait_cpu(bo, false);
if (ret)
return ret;
goto retry;
}
}
+
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -97,21 +203,36 @@ EXPORT_SYMBOL(ttm_eu_reserve_buffers);
void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
{
struct ttm_validate_buffer *entry;
+ struct ttm_buffer_object *bo;
+ struct ttm_bo_global *glob;
+ struct ttm_bo_device *bdev;
+ struct ttm_bo_driver *driver;
- list_for_each_entry(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
- struct ttm_bo_driver *driver = bo->bdev->driver;
- void *old_sync_obj;
+ if (list_empty(list))
+ return;
+
+ bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
+ bdev = bo->bdev;
+ driver = bdev->driver;
+ glob = bo->glob;
- spin_lock(&bo->lock);
- old_sync_obj = bo->sync_obj;
+ spin_lock(&bdev->fence_lock);
+ spin_lock(&glob->lru_lock);
+
+ list_for_each_entry(entry, list, head) {
+ bo = entry->bo;
+ entry->old_sync_obj = bo->sync_obj;
bo->sync_obj = driver->sync_obj_ref(sync_obj);
bo->sync_obj_arg = entry->new_sync_obj_arg;
- spin_unlock(&bo->lock);
- ttm_bo_unreserve(bo);
+ ttm_bo_unreserve_locked(bo);
entry->reserved = false;
- if (old_sync_obj)
- driver->sync_obj_unref(&old_sync_obj);
+ }
+ spin_unlock(&glob->lru_lock);
+ spin_unlock(&bdev->fence_lock);
+
+ list_for_each_entry(entry, list, head) {
+ if (entry->old_sync_obj)
+ driver->sync_obj_unref(&entry->old_sync_obj);
}
}
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e7a58d055041..10fc01f69c40 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -264,7 +264,6 @@ struct vmw_private {
*/
struct vmw_sw_context ctx;
- uint32_t val_seq;
struct mutex cmdbuf_mutex;
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 76954e3528c1..41b95ed6dbcd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -653,8 +653,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
if (unlikely(ret != 0))
goto out_err;
- ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
- dev_priv->val_seq++);
+ ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
if (unlikely(ret != 0))
goto out_err;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index fe096a7cc0d7..bfab60c938ac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -480,9 +480,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->fix.smem_start = 0;
info->fix.smem_len = fb_size;
- info->fix.mmio_start = 0;
- info->fix.mmio_len = 0;
-
info->pseudo_palette = par->pseudo_palette;
info->screen_base = par->vmalloc;
info->screen_size = fb_size;
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 0e1edd7311ff..70e60a4bb678 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -1,12 +1,13 @@
config STUB_POULSBO
tristate "Intel GMA500 Stub Driver"
depends on PCI
+ depends on NET # for THERMAL
# Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
- select VIDEO_OUTPUT_CONTROL if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
+ select THERMAL if ACPI
help
Choose this option if you have a system that has Intel GMA500
(Poulsbo) integrated graphics. If M is selected, the module will
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 8d0e31a22027..96c83a9a76bb 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -1,5 +1,5 @@
config VGA_ARB
- bool "VGA Arbitration" if EMBEDDED
+ bool "VGA Arbitration" if EXPERT
default y
depends on PCI
help
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index c8768f38511e..e01cacba685f 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -33,6 +33,7 @@ struct vga_switcheroo_client {
struct fb_info *fb_info;
int pwr_state;
void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state);
+ void (*reprobe)(struct pci_dev *pdev);
bool (*can_switch)(struct pci_dev *pdev);
int id;
bool active;
@@ -103,6 +104,7 @@ static void vga_switcheroo_enable(void)
int vga_switcheroo_register_client(struct pci_dev *pdev,
void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state),
+ void (*reprobe)(struct pci_dev *pdev),
bool (*can_switch)(struct pci_dev *pdev))
{
int index;
@@ -117,6 +119,7 @@ int vga_switcheroo_register_client(struct pci_dev *pdev,
vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON;
vgasr_priv.clients[index].pdev = pdev;
vgasr_priv.clients[index].set_gpu_state = set_gpu_state;
+ vgasr_priv.clients[index].reprobe = reprobe;
vgasr_priv.clients[index].can_switch = can_switch;
vgasr_priv.clients[index].id = -1;
if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
@@ -174,7 +177,8 @@ static int vga_switcheroo_show(struct seq_file *m, void *v)
int i;
mutex_lock(&vgasr_mutex);
for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- seq_printf(m, "%d:%c:%s:%s\n", i,
+ seq_printf(m, "%d:%s:%c:%s:%s\n", i,
+ vgasr_priv.clients[i].id == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
vgasr_priv.clients[i].active ? '+' : ' ',
vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off",
pci_name(vgasr_priv.clients[i].pdev));
@@ -190,9 +194,8 @@ static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file)
static int vga_switchon(struct vga_switcheroo_client *client)
{
- int ret;
-
- ret = vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
+ if (vgasr_priv.handler->power_state)
+ vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
/* call the driver callback to turn on device */
client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
client->pwr_state = VGA_SWITCHEROO_ON;
@@ -203,12 +206,14 @@ static int vga_switchoff(struct vga_switcheroo_client *client)
{
/* call the driver callback to turn off device */
client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
- vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
+ if (vgasr_priv.handler->power_state)
+ vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
client->pwr_state = VGA_SWITCHEROO_OFF;
return 0;
}
-static int vga_switchto(struct vga_switcheroo_client *new_client)
+/* stage one happens before delay */
+static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
{
int ret;
int i;
@@ -235,10 +240,28 @@ static int vga_switchto(struct vga_switcheroo_client *new_client)
vga_switchon(new_client);
/* swap shadow resource to denote boot VGA device has changed so X starts on new device */
- active->active = false;
-
active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW;
new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
+ return 0;
+}
+
+/* post delay */
+static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
+{
+ int ret;
+ int i;
+ struct vga_switcheroo_client *active = NULL;
+
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ if (vgasr_priv.clients[i].active == true) {
+ active = &vgasr_priv.clients[i];
+ break;
+ }
+ }
+ if (!active)
+ return 0;
+
+ active->active = false;
if (new_client->fb_info) {
struct fb_event event;
@@ -250,6 +273,9 @@ static int vga_switchto(struct vga_switcheroo_client *new_client)
if (ret)
return ret;
+ if (new_client->reprobe)
+ new_client->reprobe(new_client->pdev);
+
if (active->pwr_state == VGA_SWITCHEROO_ON)
vga_switchoff(active);
@@ -265,6 +291,7 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
const char *pdev_name;
int i, ret;
bool delay = false, can_switch;
+ bool just_mux = false;
int client_id = -1;
struct vga_switcheroo_client *client = NULL;
@@ -319,6 +346,15 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
if (strncmp(usercmd, "DIS", 3) == 0)
client_id = VGA_SWITCHEROO_DIS;
+ if (strncmp(usercmd, "MIGD", 4) == 0) {
+ just_mux = true;
+ client_id = VGA_SWITCHEROO_IGD;
+ }
+ if (strncmp(usercmd, "MDIS", 4) == 0) {
+ just_mux = true;
+ client_id = VGA_SWITCHEROO_DIS;
+ }
+
if (client_id == -1)
goto out;
@@ -330,6 +366,12 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
}
vgasr_priv.delayed_switch_active = false;
+
+ if (just_mux) {
+ ret = vgasr_priv.handler->switchto(client_id);
+ goto out;
+ }
+
/* okay we want a switch - test if devices are willing to switch */
can_switch = true;
for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
@@ -345,18 +387,22 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
if (can_switch == true) {
pdev_name = pci_name(client->pdev);
- ret = vga_switchto(client);
+ ret = vga_switchto_stage1(client);
if (ret)
- printk(KERN_ERR "vga_switcheroo: switching failed %d\n", ret);
+ printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
+
+ ret = vga_switchto_stage2(client);
+ if (ret)
+ printk(KERN_ERR "vga_switcheroo: switching failed stage 2 %d\n", ret);
+
} else {
printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id);
vgasr_priv.delayed_switch_active = true;
vgasr_priv.delayed_client_id = client_id;
- /* we should at least power up the card to
- make the switch faster */
- if (client->pwr_state == VGA_SWITCHEROO_OFF)
- vga_switchon(client);
+ ret = vga_switchto_stage1(client);
+ if (ret)
+ printk(KERN_ERR "vga_switcheroo: delayed switching stage 1 failed %d\n", ret);
}
out:
@@ -438,9 +484,9 @@ int vga_switcheroo_process_delayed_switch(void)
goto err;
pdev_name = pci_name(client->pdev);
- ret = vga_switchto(client);
+ ret = vga_switchto_stage2(client);
if (ret)
- printk(KERN_ERR "vga_switcheroo: delayed switching failed %d\n", ret);
+ printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
vgasr_priv.delayed_switch_active = false;
err = 0;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index c380c65da417..ace2b1623b21 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -636,7 +636,7 @@ int vga_client_register(struct pci_dev *pdev, void *cookie,
void (*irq_set_state)(void *cookie, bool state),
unsigned int (*set_vga_decode)(void *cookie, bool decode))
{
- int ret = -1;
+ int ret = -ENODEV;
struct vga_device *vgadev;
unsigned long flags;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 401acecc7f32..2560f01c1a63 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -62,9 +62,9 @@ config HID_3M_PCT
Support for 3M PCT touch screens.
config HID_A4TECH
- tristate "A4 tech mice" if EMBEDDED
+ tristate "A4 tech mice" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for A4 tech X5 and WOP-35 / Trust 450L mice.
@@ -77,9 +77,9 @@ config HID_ACRUX_FF
game controllers.
config HID_APPLE
- tristate "Apple {i,Power,Mac}Books" if EMBEDDED
+ tristate "Apple {i,Power,Mac}Books" if EXPERT
depends on (USB_HID || BT_HIDP)
- default !EMBEDDED
+ default !EXPERT
---help---
Support for some Apple devices which less or more break
HID specification.
@@ -88,9 +88,9 @@ config HID_APPLE
MacBooks, MacBook Pros and Apple Aluminum.
config HID_BELKIN
- tristate "Belkin Flip KVM and Wireless keyboard" if EMBEDDED
+ tristate "Belkin Flip KVM and Wireless keyboard" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Belkin Flip KVM and Wireless keyboard.
@@ -101,16 +101,16 @@ config HID_CANDO
Support for Cando dual touch panel.
config HID_CHERRY
- tristate "Cherry Cymotion keyboard" if EMBEDDED
+ tristate "Cherry Cymotion keyboard" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Cherry Cymotion keyboard.
config HID_CHICONY
- tristate "Chicony Tactical pad" if EMBEDDED
+ tristate "Chicony Tactical pad" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Chicony Tactical pad.
@@ -130,9 +130,9 @@ config HID_PRODIKEYS
and some additional multimedia keys.
config HID_CYPRESS
- tristate "Cypress mouse and barcode readers" if EMBEDDED
+ tristate "Cypress mouse and barcode readers" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for cypress mouse and barcode readers.
@@ -150,6 +150,16 @@ config DRAGONRISE_FF
Say Y here if you want to enable force feedback support for DragonRise Inc.
game controllers.
+config HID_EMS_FF
+ tristate "EMS Production Inc. force feedback support"
+ depends on USB_HID
+ select INPUT_FF_MEMLESS
+ ---help---
+ Say Y here if you want to enable force feedback support for devices by
+ EMS Production Ltd.
+ Currently the following devices are known to be supported:
+ - Trio Linker Plus II
+
config HID_EGALAX
tristate "eGalax multi-touch panel"
depends on USB_HID
@@ -164,16 +174,16 @@ config HID_ELECOM
Support for the ELECOM BM084 (bluetooth mouse).
config HID_EZKEY
- tristate "Ezkey BTC 8193 keyboard" if EMBEDDED
+ tristate "Ezkey BTC 8193 keyboard" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Ezkey BTC 8193 keyboard.
config HID_KYE
- tristate "Kye/Genius Ergo Mouse" if EMBEDDED
+ tristate "Kye/Genius Ergo Mouse" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Kye/Genius Ergo Mouse.
@@ -202,16 +212,16 @@ config HID_TWINHAN
Support for Twinhan IR remote control.
config HID_KENSINGTON
- tristate "Kensington Slimblade Trackball" if EMBEDDED
+ tristate "Kensington Slimblade Trackball" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Kensington Slimblade Trackball.
config HID_LOGITECH
- tristate "Logitech devices" if EMBEDDED
+ tristate "Logitech devices" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Logitech devices that are not fully compliant with HID standard.
@@ -266,9 +276,9 @@ config HID_MAGICMOUSE
Apple Wireless "Magic" Mouse.
config HID_MICROSOFT
- tristate "Microsoft non-fully HID-compliant devices" if EMBEDDED
+ tristate "Microsoft non-fully HID-compliant devices" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Microsoft devices that are not fully compliant with HID standard.
@@ -279,12 +289,29 @@ config HID_MOSART
Support for MosArt dual-touch panels.
config HID_MONTEREY
- tristate "Monterey Genius KB29E keyboard" if EMBEDDED
+ tristate "Monterey Genius KB29E keyboard" if EXPERT
depends on USB_HID
- default !EMBEDDED
+ default !EXPERT
---help---
Support for Monterey Genius KB29E.
+config HID_MULTITOUCH
+ tristate "HID Multitouch panels"
+ depends on USB_HID
+ ---help---
+ Generic support for HID multitouch panels.
+
+ Say Y here if you have one of the following devices:
+ - Cypress TrueTouch panels
+ - Hanvon dual touch panels
+ - Pixcir dual touch panels
+ - 'Sensing Win7-TwoFinger' panel by GeneralTouch
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hid-multitouch.
+
config HID_NTRIG
tristate "N-Trig touch screen"
depends on USB_HID
@@ -338,8 +365,8 @@ config HID_PICOLCD
- IR
config HID_PICOLCD_FB
- bool "Framebuffer support" if EMBEDDED
- default !EMBEDDED
+ bool "Framebuffer support" if EXPERT
+ default !EXPERT
depends on HID_PICOLCD
depends on HID_PICOLCD=FB || FB=y
select FB_DEFERRED_IO
@@ -352,8 +379,8 @@ config HID_PICOLCD_FB
frambuffer device.
config HID_PICOLCD_BACKLIGHT
- bool "Backlight control" if EMBEDDED
- default !EMBEDDED
+ bool "Backlight control" if EXPERT
+ default !EXPERT
depends on HID_PICOLCD
depends on HID_PICOLCD=BACKLIGHT_CLASS_DEVICE || BACKLIGHT_CLASS_DEVICE=y
---help---
@@ -361,16 +388,16 @@ config HID_PICOLCD_BACKLIGHT
class.
config HID_PICOLCD_LCD
- bool "Contrast control" if EMBEDDED
- default !EMBEDDED
+ bool "Contrast control" if EXPERT
+ default !EXPERT
depends on HID_PICOLCD
depends on HID_PICOLCD=LCD_CLASS_DEVICE || LCD_CLASS_DEVICE=y
---help---
Provide access to PicoLCD's LCD contrast via lcd class.
config HID_PICOLCD_LEDS
- bool "GPO via leds class" if EMBEDDED
- default !EMBEDDED
+ bool "GPO via leds class" if EXPERT
+ default !EXPERT
depends on HID_PICOLCD
depends on HID_PICOLCD=LEDS_CLASS || LEDS_CLASS=y
---help---
@@ -397,6 +424,13 @@ config HID_ROCCAT_KONE
---help---
Support for Roccat Kone mouse.
+config HID_ROCCAT_KONEPLUS
+ tristate "Roccat Kone[+] mouse support"
+ depends on USB_HID
+ select HID_ROCCAT
+ ---help---
+ Support for Roccat Kone[+] mouse.
+
config HID_ROCCAT_PYRA
tristate "Roccat Pyra mouse support"
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index c335605b9200..6efc2a0370ad 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -1,7 +1,7 @@
#
# Makefile for the HID driver
#
-hid-objs := hid-core.o hid-input.o
+hid-y := hid-core.o hid-input.o
ifdef CONFIG_DEBUG_FS
hid-objs += hid-debug.o
@@ -11,18 +11,18 @@ obj-$(CONFIG_HID) += hid.o
hid-$(CONFIG_HIDRAW) += hidraw.o
-hid-logitech-objs := hid-lg.o
+hid-logitech-y := hid-lg.o
ifdef CONFIG_LOGITECH_FF
- hid-logitech-objs += hid-lgff.o
+ hid-logitech-y += hid-lgff.o
endif
ifdef CONFIG_LOGIRUMBLEPAD2_FF
- hid-logitech-objs += hid-lg2ff.o
+ hid-logitech-y += hid-lg2ff.o
endif
ifdef CONFIG_LOGIG940_FF
- hid-logitech-objs += hid-lg3ff.o
+ hid-logitech-y += hid-lg3ff.o
endif
ifdef CONFIG_LOGIWII_FF
- hid-logitech-objs += hid-lg4ff.o
+ hid-logitech-y += hid-lg4ff.o
endif
obj-$(CONFIG_HID_3M_PCT) += hid-3m-pct.o
@@ -35,6 +35,7 @@ obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o
obj-$(CONFIG_HID_DRAGONRISE) += hid-drff.o
+obj-$(CONFIG_HID_EMS_FF) += hid-emsff.o
obj-$(CONFIG_HID_EGALAX) += hid-egalax.o
obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
obj-$(CONFIG_HID_MOSART) += hid-mosart.o
+obj-$(CONFIG_HID_MULTITOUCH) += hid-multitouch.o
obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o
obj-$(CONFIG_HID_ORTEK) += hid-ortek.o
obj-$(CONFIG_HID_PRODIKEYS) += hid-prodikeys.o
@@ -55,6 +57,7 @@ obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o
obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o
+obj-$(CONFIG_HID_ROCCAT_KONEPLUS) += hid-roccat-koneplus.o
obj-$(CONFIG_HID_ROCCAT_PYRA) += hid-roccat-pyra.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
diff --git a/drivers/hid/hid-3m-pct.c b/drivers/hid/hid-3m-pct.c
index 4fb7c7528d16..5243ae2d3730 100644
--- a/drivers/hid/hid-3m-pct.c
+++ b/drivers/hid/hid-3m-pct.c
@@ -246,7 +246,7 @@ static int mmm_probe(struct hid_device *hdev, const struct hid_device_id *id)
md = kzalloc(sizeof(struct mmm_data), GFP_KERNEL);
if (!md) {
- dev_err(&hdev->dev, "cannot allocate 3M data\n");
+ hid_err(hdev, "cannot allocate 3M data\n");
return -ENOMEM;
}
hid_set_drvdata(hdev, md);
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 1666c1684e79..902d1dfeb1b5 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -93,7 +93,7 @@ static int a4_probe(struct hid_device *hdev, const struct hid_device_id *id)
a4 = kzalloc(sizeof(*a4), GFP_KERNEL);
if (a4 == NULL) {
- dev_err(&hdev->dev, "can't alloc device descriptor\n");
+ hid_err(hdev, "can't alloc device descriptor\n");
ret = -ENOMEM;
goto err_free;
}
@@ -104,13 +104,13 @@ static int a4_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err_free;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err_free;
}
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index eaeca564a8d3..61aa71233392 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -16,6 +16,8 @@
* any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
@@ -59,6 +61,27 @@ struct apple_key_translation {
u8 flags;
};
+static const struct apple_key_translation macbookair_fn_keys[] = {
+ { KEY_BACKSPACE, KEY_DELETE },
+ { KEY_ENTER, KEY_INSERT },
+ { KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY },
+ { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY },
+ { KEY_F3, KEY_SCALE, APPLE_FLAG_FKEY },
+ { KEY_F4, KEY_DASHBOARD, APPLE_FLAG_FKEY },
+ { KEY_F6, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY },
+ { KEY_F7, KEY_PLAYPAUSE, APPLE_FLAG_FKEY },
+ { KEY_F8, KEY_NEXTSONG, APPLE_FLAG_FKEY },
+ { KEY_F9, KEY_MUTE, APPLE_FLAG_FKEY },
+ { KEY_F10, KEY_VOLUMEDOWN, APPLE_FLAG_FKEY },
+ { KEY_F11, KEY_VOLUMEUP, APPLE_FLAG_FKEY },
+ { KEY_F12, KEY_EJECTCD, APPLE_FLAG_FKEY },
+ { KEY_UP, KEY_PAGEUP },
+ { KEY_DOWN, KEY_PAGEDOWN },
+ { KEY_LEFT, KEY_HOME },
+ { KEY_RIGHT, KEY_END },
+ { }
+};
+
static const struct apple_key_translation apple_fn_keys[] = {
{ KEY_BACKSPACE, KEY_DELETE },
{ KEY_ENTER, KEY_INSERT },
@@ -146,7 +169,7 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
struct hid_usage *usage, __s32 value)
{
struct apple_sc *asc = hid_get_drvdata(hid);
- const struct apple_key_translation *trans;
+ const struct apple_key_translation *trans, *table;
if (usage->code == KEY_FN) {
asc->fn_on = !!value;
@@ -157,10 +180,16 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
if (fnmode) {
int do_translate;
- trans = apple_find_translation((hid->product < 0x21d ||
- hid->product >= 0x300) ?
- powerbook_fn_keys : apple_fn_keys,
- usage->code);
+ if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
+ hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
+ table = macbookair_fn_keys;
+ else if (hid->product < 0x21d || hid->product >= 0x300)
+ table = powerbook_fn_keys;
+ else
+ table = apple_fn_keys;
+
+ trans = apple_find_translation (table, usage->code);
+
if (trans) {
if (test_bit(usage->code, asc->pressed_fn))
do_translate = 1;
@@ -253,8 +282,8 @@ static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&
rdesc[53] == 0x65 && rdesc[59] == 0x65) {
- dev_info(&hdev->dev, "fixing up MacBook JIS keyboard report "
- "descriptor\n");
+ hid_info(hdev,
+ "fixing up MacBook JIS keyboard report descriptor\n");
rdesc[53] = rdesc[59] = 0xe7;
}
return rdesc;
@@ -324,7 +353,7 @@ static int apple_probe(struct hid_device *hdev,
asc = kzalloc(sizeof(*asc), GFP_KERNEL);
if (asc == NULL) {
- dev_err(&hdev->dev, "can't alloc apple descriptor\n");
+ hid_err(hdev, "can't alloc apple descriptor\n");
return -ENOMEM;
}
@@ -334,7 +363,7 @@ static int apple_probe(struct hid_device *hdev,
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err_free;
}
@@ -345,7 +374,7 @@ static int apple_probe(struct hid_device *hdev,
ret = hid_hw_start(hdev, connect_mask);
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err_free;
}
@@ -440,6 +469,18 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
@@ -473,7 +514,7 @@ static int __init apple_init(void)
ret = hid_register_driver(&apple_driver);
if (ret)
- printk(KERN_ERR "can't register apple driver\n");
+ pr_err("can't register apple driver\n");
return ret;
}
diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
index f42ee140738a..e5b961d6ff22 100644
--- a/drivers/hid/hid-axff.c
+++ b/drivers/hid/hid-axff.c
@@ -73,14 +73,14 @@ static int axff_init(struct hid_device *hid)
int error;
if (list_empty(report_list)) {
- dev_err(&hid->dev, "no output reports found\n");
+ hid_err(hid, "no output reports found\n");
return -ENODEV;
}
report = list_first_entry(report_list, struct hid_report, list);
if (report->maxfield < 4) {
- dev_err(&hid->dev, "no fields in the report: %d\n", report->maxfield);
+ hid_err(hid, "no fields in the report: %d\n", report->maxfield);
return -ENODEV;
}
@@ -101,7 +101,7 @@ static int axff_init(struct hid_device *hid)
axff->report->field[3]->value[0] = 0x00;
usbhid_submit_report(hid, axff->report, USB_DIR_OUT);
- dev_info(&hid->dev, "Force Feedback for ACRUX game controllers by Sergei Kolzun<x0r@dv-life.ru>\n");
+ hid_info(hid, "Force Feedback for ACRUX game controllers by Sergei Kolzun<x0r@dv-life.ru>\n");
return 0;
@@ -114,17 +114,17 @@ static int ax_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int error;
- dev_dbg(&hdev->dev, "ACRUX HID hardware probe...");
+ dev_dbg(&hdev->dev, "ACRUX HID hardware probe...\n");
error = hid_parse(hdev);
if (error) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
return error;
}
error = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
if (error) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
return error;
}
@@ -134,7 +134,7 @@ static int ax_probe(struct hid_device *hdev, const struct hid_device_id *id)
* Do not fail device initialization completely as device
* may still be partially operable, just warn.
*/
- dev_warn(&hdev->dev,
+ hid_warn(hdev,
"Failed to enable force feedback support, error: %d\n",
error);
}
diff --git a/drivers/hid/hid-belkin.c b/drivers/hid/hid-belkin.c
index 4ce7aa3a519f..a1a765a5b08a 100644
--- a/drivers/hid/hid-belkin.c
+++ b/drivers/hid/hid-belkin.c
@@ -56,14 +56,14 @@ static int belkin_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err_free;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
((quirks & BELKIN_HIDDEV) ? HID_CONNECT_HIDDEV_FORCE : 0));
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err_free;
}
diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c
index 5925bdcd417d..1ea066c55201 100644
--- a/drivers/hid/hid-cando.c
+++ b/drivers/hid/hid-cando.c
@@ -207,7 +207,7 @@ static int cando_probe(struct hid_device *hdev, const struct hid_device_id *id)
td = kmalloc(sizeof(struct cando_data), GFP_KERNEL);
if (!td) {
- dev_err(&hdev->dev, "cannot allocate Cando Touch data\n");
+ hid_err(hdev, "cannot allocate Cando Touch data\n");
return -ENOMEM;
}
hid_set_drvdata(hdev, td);
@@ -236,6 +236,8 @@ static const struct hid_device_id cando_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+ USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
index e880086c2311..888ece68a47c 100644
--- a/drivers/hid/hid-cherry.c
+++ b/drivers/hid/hid-cherry.c
@@ -30,8 +30,7 @@ static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
- dev_info(&hdev->dev, "fixing up Cherry Cymotion report "
- "descriptor\n");
+ hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n");
rdesc[11] = rdesc[16] = 0xff;
rdesc[12] = rdesc[17] = 0x03;
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3f9673d94da9..d678cf3d33d5 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -14,6 +14,8 @@
* any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -59,7 +61,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type,
if (report_enum->report_id_hash[id])
return report_enum->report_id_hash[id];
- if (!(report = kzalloc(sizeof(struct hid_report), GFP_KERNEL)))
+ report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
+ if (!report)
return NULL;
if (id != 0)
@@ -90,8 +93,11 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
return NULL;
}
- if (!(field = kzalloc(sizeof(struct hid_field) + usages * sizeof(struct hid_usage)
- + values * sizeof(unsigned), GFP_KERNEL))) return NULL;
+ field = kzalloc((sizeof(struct hid_field) +
+ usages * sizeof(struct hid_usage) +
+ values * sizeof(unsigned)), GFP_KERNEL);
+ if (!field)
+ return NULL;
field->index = report->maxfield++;
report->field[field->index] = field;
@@ -172,10 +178,14 @@ static int close_collection(struct hid_parser *parser)
static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
{
+ struct hid_collection *collection = parser->device->collection;
int n;
- for (n = parser->collection_stack_ptr - 1; n >= 0; n--)
- if (parser->device->collection[parser->collection_stack[n]].type == type)
- return parser->device->collection[parser->collection_stack[n]].usage;
+
+ for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
+ unsigned index = parser->collection_stack[n];
+ if (collection[index].type == type)
+ return collection[index].usage;
+ }
return 0; /* we know nothing about this usage type */
}
@@ -209,7 +219,8 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
unsigned offset;
int i;
- if (!(report = hid_register_report(parser->device, report_type, parser->global.report_id))) {
+ report = hid_register_report(parser->device, report_type, parser->global.report_id);
+ if (!report) {
dbg_hid("hid_register_report failed\n");
return -1;
}
@@ -227,7 +238,8 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
usages = max_t(int, parser->local.usage_index, parser->global.report_count);
- if ((field = hid_register_field(report, usages, parser->global.report_count)) == NULL)
+ field = hid_register_field(report, usages, parser->global.report_count);
+ if (!field)
return 0;
field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
@@ -652,13 +664,12 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
return -ENOMEM;
device->rsize = size;
- parser = vmalloc(sizeof(struct hid_parser));
+ parser = vzalloc(sizeof(struct hid_parser));
if (!parser) {
ret = -ENOMEM;
goto err;
}
- memset(parser, 0, sizeof(struct hid_parser));
parser->device = device;
end = start + size;
@@ -672,7 +683,8 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
if (dispatch_type[item.type](parser, &item)) {
dbg_hid("item %u %u %u %u parsing failed\n",
- item.format, (unsigned)item.size, (unsigned)item.type, (unsigned)item.tag);
+ item.format, (unsigned)item.size,
+ (unsigned)item.type, (unsigned)item.tag);
goto err;
}
@@ -737,13 +749,14 @@ static u32 s32ton(__s32 value, unsigned n)
* Search linux-kernel and linux-usb-devel archives for "hid-core extract".
*/
-static __inline__ __u32 extract(__u8 *report, unsigned offset, unsigned n)
+static __u32 extract(const struct hid_device *hid, __u8 *report,
+ unsigned offset, unsigned n)
{
u64 x;
if (n > 32)
- printk(KERN_WARNING "HID: extract() called with n (%d) > 32! (%s)\n",
- n, current->comm);
+ hid_warn(hid, "extract() called with n (%d) > 32! (%s)\n",
+ n, current->comm);
report += offset >> 3; /* adjust byte index */
offset &= 7; /* now only need bit offset into one byte */
@@ -760,18 +773,19 @@ static __inline__ __u32 extract(__u8 *report, unsigned offset, unsigned n)
* endianness of register values by considering a register
* a "cached" copy of the little endiad bit stream.
*/
-static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u32 value)
+static void implement(const struct hid_device *hid, __u8 *report,
+ unsigned offset, unsigned n, __u32 value)
{
u64 x;
u64 m = (1ULL << n) - 1;
if (n > 32)
- printk(KERN_WARNING "HID: implement() called with n (%d) > 32! (%s)\n",
- n, current->comm);
+ hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
+ __func__, n, current->comm);
if (value > m)
- printk(KERN_WARNING "HID: implement() called with too large value %d! (%s)\n",
- value, current->comm);
+ hid_warn(hid, "%s() called with too large value %d! (%s)\n",
+ __func__, value, current->comm);
WARN_ON(value > m);
value &= m;
@@ -788,7 +802,7 @@ static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u3
* Search an array for a value.
*/
-static __inline__ int search(__s32 *array, __s32 value, unsigned n)
+static int search(__s32 *array, __s32 value, unsigned n)
{
while (n--) {
if (*array++ == value)
@@ -887,18 +901,22 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
__s32 max = field->logical_maximum;
__s32 *value;
- if (!(value = kmalloc(sizeof(__s32) * count, GFP_ATOMIC)))
+ value = kmalloc(sizeof(__s32) * count, GFP_ATOMIC);
+ if (!value)
return;
for (n = 0; n < count; n++) {
- value[n] = min < 0 ? snto32(extract(data, offset + n * size, size), size) :
- extract(data, offset + n * size, size);
+ value[n] = min < 0 ?
+ snto32(extract(hid, data, offset + n * size, size),
+ size) :
+ extract(hid, data, offset + n * size, size);
- if (!(field->flags & HID_MAIN_ITEM_VARIABLE) /* Ignore report if ErrorRollOver */
- && value[n] >= min && value[n] <= max
- && field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
- goto exit;
+ /* Ignore report if ErrorRollOver */
+ if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
+ value[n] >= min && value[n] <= max &&
+ field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
+ goto exit;
}
for (n = 0; n < count; n++) {
@@ -928,7 +946,8 @@ exit:
* Output the field into the report.
*/
-static void hid_output_field(struct hid_field *field, __u8 *data)
+static void hid_output_field(const struct hid_device *hid,
+ struct hid_field *field, __u8 *data)
{
unsigned count = field->report_count;
unsigned offset = field->report_offset;
@@ -937,9 +956,11 @@ static void hid_output_field(struct hid_field *field, __u8 *data)
for (n = 0; n < count; n++) {
if (field->logical_minimum < 0) /* signed values */
- implement(data, offset + n * size, size, s32ton(field->value[n], size));
+ implement(hid, data, offset + n * size, size,
+ s32ton(field->value[n], size));
else /* unsigned values */
- implement(data, offset + n * size, size, field->value[n]);
+ implement(hid, data, offset + n * size, size,
+ field->value[n]);
}
}
@@ -956,7 +977,7 @@ void hid_output_report(struct hid_report *report, __u8 *data)
memset(data, 0, ((report->size - 1) >> 3) + 1);
for (n = 0; n < report->maxfield; n++)
- hid_output_field(report->field[n], data);
+ hid_output_field(report->device, report->field[n], data);
}
EXPORT_SYMBOL_GPL(hid_output_report);
@@ -1169,8 +1190,7 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
hdev->claimed |= HID_CLAIMED_HIDRAW;
if (!hdev->claimed) {
- dev_err(&hdev->dev, "claimed by neither input, hiddev nor "
- "hidraw\n");
+ hid_err(hdev, "claimed by neither input, hiddev nor hidraw\n");
return -ENODEV;
}
@@ -1210,9 +1230,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
bus = "<UNKNOWN>";
}
- dev_info(&hdev->dev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
- buf, bus, hdev->version >> 8, hdev->version & 0xff,
- type, hdev->name, hdev->phys);
+ hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
+ buf, bus, hdev->version >> 8, hdev->version & 0xff,
+ type, hdev->name, hdev->phys);
return 0;
}
@@ -1230,7 +1250,7 @@ void hid_disconnect(struct hid_device *hdev)
EXPORT_SYMBOL_GPL(hid_disconnect);
/* a list of devices for which there is a specialized driver on HID bus */
-static const struct hid_device_id hid_blacklist[] = {
+static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
@@ -1276,6 +1296,12 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -1286,17 +1312,21 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
@@ -1304,14 +1334,17 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
@@ -1372,6 +1405,7 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
@@ -1393,6 +1427,7 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
@@ -1499,9 +1534,9 @@ static int hid_bus_match(struct device *dev, struct device_driver *drv)
if (!hid_match_device(hdev, hdrv))
return 0;
- /* generic wants all non-blacklisted */
+ /* generic wants all that don't have specialized driver */
if (!strncmp(hdrv->name, "generic-", 8))
- return !hid_match_id(hdev, hid_blacklist);
+ return !hid_match_id(hdev, hid_have_special_driver);
return 1;
}
@@ -1611,7 +1646,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
@@ -1761,6 +1795,12 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ }
@@ -1952,12 +1992,12 @@ static int __init hid_init(void)
int ret;
if (hid_debug)
- printk(KERN_WARNING "HID: hid_debug is now used solely for parser and driver debugging.\n"
- "HID: debugfs is now used for inspecting the device (report descriptor, reports)\n");
+ pr_warn("hid_debug is now used solely for parser and driver debugging.\n"
+ "debugfs is now used for inspecting the device (report descriptor, reports)\n");
ret = bus_register(&hid_bus_type);
if (ret) {
- printk(KERN_ERR "HID: can't register hid bus\n");
+ pr_err("can't register hid bus\n");
goto err;
}
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index 4cd0e2345991..2f0be4c66af7 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -107,13 +107,13 @@ static int cp_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err_free;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err_free;
}
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 75c5e23d09d2..555382fc7417 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -26,6 +26,8 @@
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/sched.h>
@@ -393,7 +395,7 @@ char *hid_resolv_usage(unsigned usage, struct seq_file *f) {
buf = resolv_usage_page(usage >> 16, f);
if (IS_ERR(buf)) {
- printk(KERN_ERR "error allocating HID debug buffer\n");
+ pr_err("error allocating HID debug buffer\n");
return NULL;
}
diff --git a/drivers/hid/hid-drff.c b/drivers/hid/hid-drff.c
index 968b04f9b796..afcf3d67eb02 100644
--- a/drivers/hid/hid-drff.c
+++ b/drivers/hid/hid-drff.c
@@ -96,18 +96,18 @@ static int drff_init(struct hid_device *hid)
int error;
if (list_empty(report_list)) {
- dev_err(&hid->dev, "no output reports found\n");
+ hid_err(hid, "no output reports found\n");
return -ENODEV;
}
report = list_first_entry(report_list, struct hid_report, list);
if (report->maxfield < 1) {
- dev_err(&hid->dev, "no fields in the report\n");
+ hid_err(hid, "no fields in the report\n");
return -ENODEV;
}
if (report->field[0]->report_count < 7) {
- dev_err(&hid->dev, "not enough values in the field\n");
+ hid_err(hid, "not enough values in the field\n");
return -ENODEV;
}
@@ -133,8 +133,8 @@ static int drff_init(struct hid_device *hid)
drff->report->field[0]->value[6] = 0x00;
usbhid_submit_report(hid, drff->report, USB_DIR_OUT);
- dev_info(&hid->dev, "Force Feedback for DragonRise Inc. game "
- "controllers by Richard Walmsley <richwalm@gmail.com>\n");
+ hid_info(hid, "Force Feedback for DragonRise Inc. "
+ "game controllers by Richard Walmsley <richwalm@gmail.com>\n");
return 0;
}
@@ -153,13 +153,13 @@ static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err;
}
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
index 8fbff2358eb6..03bee1970d70 100644
--- a/drivers/hid/hid-egalax.c
+++ b/drivers/hid/hid-egalax.c
@@ -200,7 +200,7 @@ static int egalax_probe(struct hid_device *hdev, const struct hid_device_id *id)
td = kzalloc(sizeof(struct egalax_data), GFP_KERNEL);
if (!td) {
- dev_err(&hdev->dev, "cannot allocate eGalax data\n");
+ hid_err(hdev, "cannot allocate eGalax data\n");
return -ENOMEM;
}
hid_set_drvdata(hdev, td);
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 6e31f305397d..79d0c61e7214 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -24,8 +24,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) {
- dev_info(&hdev->dev, "Fixing up Elecom BM084 "
- "report descriptor.\n");
+ hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n");
rdesc[47] = 0x00;
}
return rdesc;
diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c
new file mode 100644
index 000000000000..81877c67caea
--- /dev/null
+++ b/drivers/hid/hid-emsff.c
@@ -0,0 +1,161 @@
+/*
+ * Force feedback support for EMS Trio Linker Plus II
+ *
+ * Copyright (c) 2010 Ignaz Forster <ignaz.forster@gmx.de>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/usb.h>
+
+#include "hid-ids.h"
+#include "usbhid/usbhid.h"
+
+struct emsff_device {
+ struct hid_report *report;
+};
+
+static int emsff_play(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct emsff_device *emsff = data;
+ int weak, strong;
+
+ weak = effect->u.rumble.weak_magnitude;
+ strong = effect->u.rumble.strong_magnitude;
+
+ dbg_hid("called with 0x%04x 0x%04x\n", strong, weak);
+
+ weak = weak * 0xff / 0xffff;
+ strong = strong * 0xff / 0xffff;
+
+ emsff->report->field[0]->value[1] = weak;
+ emsff->report->field[0]->value[2] = strong;
+
+ dbg_hid("running with 0x%02x 0x%02x\n", strong, weak);
+ usbhid_submit_report(hid, emsff->report, USB_DIR_OUT);
+
+ return 0;
+}
+
+static int emsff_init(struct hid_device *hid)
+{
+ struct emsff_device *emsff;
+ struct hid_report *report;
+ struct hid_input *hidinput = list_first_entry(&hid->inputs,
+ struct hid_input, list);
+ struct list_head *report_list =
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+ int error;
+
+ if (list_empty(report_list)) {
+ hid_err(hid, "no output reports found\n");
+ return -ENODEV;
+ }
+
+ report = list_first_entry(report_list, struct hid_report, list);
+ if (report->maxfield < 1) {
+ hid_err(hid, "no fields in the report\n");
+ return -ENODEV;
+ }
+
+ if (report->field[0]->report_count < 7) {
+ hid_err(hid, "not enough values in the field\n");
+ return -ENODEV;
+ }
+
+ emsff = kzalloc(sizeof(struct emsff_device), GFP_KERNEL);
+ if (!emsff)
+ return -ENOMEM;
+
+ set_bit(FF_RUMBLE, dev->ffbit);
+
+ error = input_ff_create_memless(dev, emsff, emsff_play);
+ if (error) {
+ kfree(emsff);
+ return error;
+ }
+
+ emsff->report = report;
+ emsff->report->field[0]->value[0] = 0x01;
+ emsff->report->field[0]->value[1] = 0x00;
+ emsff->report->field[0]->value[2] = 0x00;
+ emsff->report->field[0]->value[3] = 0x00;
+ emsff->report->field[0]->value[4] = 0x00;
+ emsff->report->field[0]->value[5] = 0x00;
+ emsff->report->field[0]->value[6] = 0x00;
+ usbhid_submit_report(hid, emsff->report, USB_DIR_OUT);
+
+ hid_info(hid, "force feedback for EMS based devices by Ignaz Forster <ignaz.forster@gmx.de>\n");
+
+ return 0;
+}
+
+static int ems_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ goto err;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ goto err;
+ }
+
+ emsff_init(hdev);
+
+ return 0;
+err:
+ return ret;
+}
+
+static const struct hid_device_id ems_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_EMS, 0x118) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, ems_devices);
+
+static struct hid_driver ems_driver = {
+ .name = "hkems",
+ .id_table = ems_devices,
+ .probe = ems_probe,
+};
+
+static int ems_init(void)
+{
+ return hid_register_driver(&ems_driver);
+}
+
+static void ems_exit(void)
+{
+ hid_unregister_driver(&ems_driver);
+}
+
+module_init(ems_init);
+module_exit(ems_exit);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c
index 88dfcf49a5d7..279ba530003c 100644
--- a/drivers/hid/hid-gaff.c
+++ b/drivers/hid/hid-gaff.c
@@ -87,7 +87,7 @@ static int gaff_init(struct hid_device *hid)
int error;
if (list_empty(report_list)) {
- dev_err(&hid->dev, "no output reports found\n");
+ hid_err(hid, "no output reports found\n");
return -ENODEV;
}
@@ -95,12 +95,12 @@ static int gaff_init(struct hid_device *hid)
report = list_entry(report_ptr, struct hid_report, list);
if (report->maxfield < 1) {
- dev_err(&hid->dev, "no fields in the report\n");
+ hid_err(hid, "no fields in the report\n");
return -ENODEV;
}
if (report->field[0]->report_count < 6) {
- dev_err(&hid->dev, "not enough values in the field\n");
+ hid_err(hid, "not enough values in the field\n");
return -ENODEV;
}
@@ -128,8 +128,7 @@ static int gaff_init(struct hid_device *hid)
usbhid_submit_report(hid, gaff->report, USB_DIR_OUT);
- dev_info(&hid->dev, "Force Feedback for GreenAsia 0x12"
- " devices by Lukasz Lubojanski <lukasz@lubojanski.info>\n");
+ hid_info(hid, "Force Feedback for GreenAsia 0x12 devices by Lukasz Lubojanski <lukasz@lubojanski.info>\n");
return 0;
}
@@ -148,13 +147,13 @@ static int ga_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 8e11af86b014..92a0d61a7379 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -97,6 +97,12 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237
#define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238
+#define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI 0x023f
+#define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO 0x0240
+#define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS 0x0241
+#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242
+#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243
+#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
@@ -134,7 +140,9 @@
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577
#define USB_VENDOR_ID_CANDO 0x2087
+#define USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH 0x0703
#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
+#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1 0x0a02
#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03
#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6 0x0f01
@@ -156,6 +164,7 @@
#define USB_VENDOR_ID_CHICONY 0x04f2
#define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418
#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
+#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
#define USB_VENDOR_ID_CIDC 0x1677
@@ -179,6 +188,7 @@
#define USB_DEVICE_ID_CYPRESS_BARCODE_1 0xde61
#define USB_DEVICE_ID_CYPRESS_BARCODE_2 0xde64
#define USB_DEVICE_ID_CYPRESS_BARCODE_3 0xbca1
+#define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001
#define USB_VENDOR_ID_DEALEXTREAME 0x10c5
#define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701 0x819a
@@ -208,6 +218,9 @@
#define USB_VENDOR_ID_ELO 0x04E7
#define USB_DEVICE_ID_ELO_TS2700 0x0020
+#define USB_VENDOR_ID_EMS 0x2006
+#define USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II 0x0118
+
#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f
#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
@@ -226,6 +239,7 @@
#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001
#define USB_VENDOR_ID_GLAB 0x06c2
#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
@@ -308,6 +322,9 @@
#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff
+#define USB_VENDOR_ID_HANVON 0x20b3
+#define USB_DEVICE_ID_HANVON_MULTITOUCH 0x0a18
+
#define USB_VENDOR_ID_HAPP 0x078b
#define USB_DEVICE_ID_UGCI_DRIVING 0x0010
#define USB_DEVICE_ID_UGCI_FLYING 0x0020
@@ -480,6 +497,7 @@
#define USB_VENDOR_ID_ROCCAT 0x1e7d
#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
+#define USB_DEVICE_ID_ROCCAT_KONEPLUS 0x2d51
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRED 0x2c24
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS 0x2cf6
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index d8d372bae3cc..7f552bfad32c 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -290,6 +290,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
goto ignore;
}
+ if (field->report_type == HID_FEATURE_REPORT) {
+ if (device->driver->feature_mapping) {
+ device->driver->feature_mapping(device, hidinput, field,
+ usage);
+ }
+ goto ignore;
+ }
+
if (device->driver->input_mapping) {
int ret = device->driver->input_mapping(device, hidinput, field,
usage, &bit, &max);
@@ -319,21 +327,21 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
switch (field->application) {
case HID_GD_MOUSE:
- case HID_GD_POINTER: code += 0x110; break;
+ case HID_GD_POINTER: code += BTN_MOUSE; break;
case HID_GD_JOYSTICK:
if (code <= 0xf)
code += BTN_JOYSTICK;
else
code += BTN_TRIGGER_HAPPY;
break;
- case HID_GD_GAMEPAD: code += 0x130; break;
+ case HID_GD_GAMEPAD: code += BTN_GAMEPAD; break;
default:
switch (field->physical) {
case HID_GD_MOUSE:
- case HID_GD_POINTER: code += 0x110; break;
- case HID_GD_JOYSTICK: code += 0x120; break;
- case HID_GD_GAMEPAD: code += 0x130; break;
- default: code += 0x100;
+ case HID_GD_POINTER: code += BTN_MOUSE; break;
+ case HID_GD_JOYSTICK: code += BTN_JOYSTICK; break;
+ case HID_GD_GAMEPAD: code += BTN_GAMEPAD; break;
+ default: code += BTN_MISC;
}
}
@@ -817,14 +825,14 @@ static int hidinput_open(struct input_dev *dev)
{
struct hid_device *hid = input_get_drvdata(dev);
- return hid->ll_driver->open(hid);
+ return hid_hw_open(hid);
}
static void hidinput_close(struct input_dev *dev)
{
struct hid_device *hid = input_get_drvdata(dev);
- hid->ll_driver->close(hid);
+ hid_hw_close(hid);
}
/*
@@ -839,7 +847,6 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
struct hid_input *hidinput = NULL;
struct input_dev *input_dev;
int i, j, k;
- int max_report_type = HID_OUTPUT_REPORT;
INIT_LIST_HEAD(&hid->inputs);
@@ -856,10 +863,11 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
return -1;
}
- if (hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS)
- max_report_type = HID_INPUT_REPORT;
+ for (k = HID_INPUT_REPORT; k <= HID_FEATURE_REPORT; k++) {
+ if (k == HID_OUTPUT_REPORT &&
+ hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS)
+ continue;
- for (k = HID_INPUT_REPORT; k <= max_report_type; k++)
list_for_each_entry(report, &hid->report_enum[k].report_list, list) {
if (!report->maxfield)
@@ -871,7 +879,7 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
if (!hidinput || !input_dev) {
kfree(hidinput);
input_free_device(input_dev);
- err_hid("Out of memory during hid input probe");
+ hid_err(hid, "Out of memory during hid input probe\n");
goto out_unwind;
}
@@ -912,6 +920,7 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
hidinput = NULL;
}
}
+ }
if (hidinput && input_register_device(hidinput->input))
goto out_cleanup;
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index 817247ee006c..f2ba9efc3a53 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -32,8 +32,8 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[65] == 0x29 && rdesc[66] == 0x0f &&
rdesc[71] == 0x75 && rdesc[72] == 0x08 &&
rdesc[73] == 0x95 && rdesc[74] == 0x01) {
- dev_info(&hdev->dev, "fixing up Kye/Genius Ergo Mouse report "
- "descriptor\n");
+ hid_info(hdev,
+ "fixing up Kye/Genius Ergo Mouse report descriptor\n");
rdesc[62] = 0x09;
rdesc[64] = 0x04;
rdesc[66] = 0x07;
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index b629fba5a057..aef4104da141 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -53,23 +53,22 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if ((quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
rdesc[84] == 0x8c && rdesc[85] == 0x02) {
- dev_info(&hdev->dev, "fixing up Logitech keyboard report "
- "descriptor\n");
+ hid_info(hdev,
+ "fixing up Logitech keyboard report descriptor\n");
rdesc[84] = rdesc[89] = 0x4d;
rdesc[85] = rdesc[90] = 0x10;
}
if ((quirks & LG_RDESC_REL_ABS) && *rsize >= 50 &&
rdesc[32] == 0x81 && rdesc[33] == 0x06 &&
rdesc[49] == 0x81 && rdesc[50] == 0x06) {
- dev_info(&hdev->dev, "fixing up rel/abs in Logitech "
- "report descriptor\n");
+ hid_info(hdev,
+ "fixing up rel/abs in Logitech report descriptor\n");
rdesc[33] = rdesc[50] = 0x02;
}
if ((quirks & LG_FF4) && *rsize >= 101 &&
rdesc[41] == 0x95 && rdesc[42] == 0x0B &&
rdesc[47] == 0x05 && rdesc[48] == 0x09) {
- dev_info(&hdev->dev, "fixing up Logitech Speed Force Wireless "
- "button descriptor\n");
+ hid_info(hdev, "fixing up Logitech Speed Force Wireless button descriptor\n");
rdesc[41] = 0x05;
rdesc[42] = 0x09;
rdesc[47] = 0x95;
@@ -288,7 +287,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err_free;
}
@@ -297,7 +296,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_hw_start(hdev, connect_mask);
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err_free;
}
diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
index 4258253c36b3..3c31bc650e5d 100644
--- a/drivers/hid/hid-lg2ff.c
+++ b/drivers/hid/hid-lg2ff.c
@@ -72,18 +72,18 @@ int lg2ff_init(struct hid_device *hid)
int error;
if (list_empty(report_list)) {
- dev_err(&hid->dev, "no output report found\n");
+ hid_err(hid, "no output report found\n");
return -ENODEV;
}
report = list_entry(report_list->next, struct hid_report, list);
if (report->maxfield < 1) {
- dev_err(&hid->dev, "output report is empty\n");
+ hid_err(hid, "output report is empty\n");
return -ENODEV;
}
if (report->field[0]->report_count < 7) {
- dev_err(&hid->dev, "not enough values in the field\n");
+ hid_err(hid, "not enough values in the field\n");
return -ENODEV;
}
@@ -110,8 +110,7 @@ int lg2ff_init(struct hid_device *hid)
usbhid_submit_report(hid, report, USB_DIR_OUT);
- dev_info(&hid->dev, "Force feedback for Logitech RumblePad/Rumblepad 2 by "
- "Anssi Hannula <anssi.hannula@gmail.com>\n");
+ hid_info(hid, "Force feedback for Logitech RumblePad/Rumblepad 2 by Anssi Hannula <anssi.hannula@gmail.com>\n");
return 0;
}
diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
index 4002832ee4af..f98644c26c1d 100644
--- a/drivers/hid/hid-lg3ff.c
+++ b/drivers/hid/hid-lg3ff.c
@@ -141,20 +141,20 @@ int lg3ff_init(struct hid_device *hid)
/* Find the report to use */
if (list_empty(report_list)) {
- err_hid("No output report found");
+ hid_err(hid, "No output report found\n");
return -1;
}
/* Check that the report looks ok */
report = list_entry(report_list->next, struct hid_report, list);
if (!report) {
- err_hid("NULL output report");
+ hid_err(hid, "NULL output report\n");
return -1;
}
field = report->field[0];
if (!field) {
- err_hid("NULL field");
+ hid_err(hid, "NULL field\n");
return -1;
}
@@ -169,8 +169,7 @@ int lg3ff_init(struct hid_device *hid)
if (test_bit(FF_AUTOCENTER, dev->ffbit))
dev->ff->set_autocenter = hid_lg3ff_set_autocenter;
- dev_info(&hid->dev, "Force feedback for Logitech Flight System G940 by "
- "Gary Stein <LordCnidarian@gmail.com>\n");
+ hid_info(hid, "Force feedback for Logitech Flight System G940 by Gary Stein <LordCnidarian@gmail.com>\n");
return 0;
}
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 7eef5a2ce948..fa550c8e1d1b 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -101,20 +101,20 @@ int lg4ff_init(struct hid_device *hid)
/* Find the report to use */
if (list_empty(report_list)) {
- err_hid("No output report found");
+ hid_err(hid, "No output report found\n");
return -1;
}
/* Check that the report looks ok */
report = list_entry(report_list->next, struct hid_report, list);
if (!report) {
- err_hid("NULL output report");
+ hid_err(hid, "NULL output report\n");
return -1;
}
field = report->field[0];
if (!field) {
- err_hid("NULL field");
+ hid_err(hid, "NULL field\n");
return -1;
}
@@ -129,8 +129,7 @@ int lg4ff_init(struct hid_device *hid)
if (test_bit(FF_AUTOCENTER, dev->ffbit))
dev->ff->set_autocenter = hid_lg4ff_set_autocenter;
- dev_info(&hid->dev, "Force feedback for Logitech Speed Force Wireless by "
- "Simon Wood <simon@mungewell.org>\n");
+ hid_info(hid, "Force feedback for Logitech Speed Force Wireless by Simon Wood <simon@mungewell.org>\n");
return 0;
}
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
index 61142b76a9b1..90d0ef2c92be 100644
--- a/drivers/hid/hid-lgff.c
+++ b/drivers/hid/hid-lgff.c
@@ -27,6 +27,8 @@
* e-mail - mail your message to <johann.deneux@it.uu.se>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/input.h>
#include <linux/usb.h>
#include <linux/hid.h>
@@ -146,7 +148,7 @@ int lgff_init(struct hid_device* hid)
/* Find the report to use */
if (list_empty(report_list)) {
- err_hid("No output report found");
+ hid_err(hid, "No output report found\n");
return -1;
}
@@ -154,7 +156,7 @@ int lgff_init(struct hid_device* hid)
report = list_entry(report_list->next, struct hid_report, list);
field = report->field[0];
if (!field) {
- err_hid("NULL field");
+ hid_err(hid, "NULL field\n");
return -1;
}
@@ -176,7 +178,7 @@ int lgff_init(struct hid_device* hid)
if ( test_bit(FF_AUTOCENTER, dev->ffbit) )
dev->ff->set_autocenter = hid_lgff_set_autocenter;
- printk(KERN_INFO "Force feedback for Logitech force feedback devices by Johann Deneux <johann.deneux@it.uu.se>\n");
+ pr_info("Force feedback for Logitech force feedback devices by Johann Deneux <johann.deneux@it.uu.se>\n");
return 0;
}
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index e6dc15171664..698e6459fd0b 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -12,6 +12,8 @@
* any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
@@ -433,6 +435,11 @@ static int magicmouse_input_mapping(struct hid_device *hdev,
if (!msc->input)
msc->input = hi->input;
+ /* Magic Trackpad does not give relative data after switching to MT */
+ if (hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD &&
+ field->flags & HID_MAIN_ITEM_RELATIVE)
+ return -1;
+
return 0;
}
@@ -446,7 +453,7 @@ static int magicmouse_probe(struct hid_device *hdev,
msc = kzalloc(sizeof(*msc), GFP_KERNEL);
if (msc == NULL) {
- dev_err(&hdev->dev, "can't alloc magicmouse descriptor\n");
+ hid_err(hdev, "can't alloc magicmouse descriptor\n");
return -ENOMEM;
}
@@ -459,13 +466,13 @@ static int magicmouse_probe(struct hid_device *hdev,
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "magicmouse hid parse failed\n");
+ hid_err(hdev, "magicmouse hid parse failed\n");
goto err_free;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
- dev_err(&hdev->dev, "magicmouse hw start failed\n");
+ hid_err(hdev, "magicmouse hw start failed\n");
goto err_free;
}
@@ -486,7 +493,7 @@ static int magicmouse_probe(struct hid_device *hdev,
}
if (!report) {
- dev_err(&hdev->dev, "unable to register touch report\n");
+ hid_err(hdev, "unable to register touch report\n");
ret = -ENOMEM;
goto err_stop_hw;
}
@@ -495,8 +502,7 @@ static int magicmouse_probe(struct hid_device *hdev,
ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
HID_FEATURE_REPORT);
if (ret != sizeof(feature)) {
- dev_err(&hdev->dev, "unable to request touch data (%d)\n",
- ret);
+ hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
}
@@ -540,7 +546,7 @@ static int __init magicmouse_init(void)
ret = hid_register_driver(&magicmouse_driver);
if (ret)
- printk(KERN_ERR "can't register magicmouse driver\n");
+ pr_err("can't register magicmouse driver\n");
return ret;
}
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index dc618c33d0a2..0f6fc54dc196 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -40,8 +40,7 @@ static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if ((quirks & MS_RDESC) && *rsize == 571 && rdesc[557] == 0x19 &&
rdesc[559] == 0x29) {
- dev_info(&hdev->dev, "fixing up Microsoft Wireless Receiver "
- "Model 1028 report descriptor\n");
+ hid_info(hdev, "fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n");
rdesc[557] = 0x35;
rdesc[559] = 0x45;
}
@@ -155,14 +154,14 @@ static int ms_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err_free;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT | ((quirks & MS_HIDINPUT) ?
HID_CONNECT_HIDINPUT_FORCE : 0));
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err_free;
}
diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c
index c95c31e2d869..dedf757781ae 100644
--- a/drivers/hid/hid-monterey.c
+++ b/drivers/hid/hid-monterey.c
@@ -26,8 +26,7 @@ static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
- dev_info(&hdev->dev, "fixing up button/consumer in HID report "
- "descriptor\n");
+ hid_info(hdev, "fixing up button/consumer in HID report descriptor\n");
rdesc[30] = 0x0c;
}
return rdesc;
diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c
index ac5421d568f1..aed7ffe36283 100644
--- a/drivers/hid/hid-mosart.c
+++ b/drivers/hid/hid-mosart.c
@@ -90,6 +90,10 @@ static int mosart_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case 0xff000000:
/* ignore HID features */
return -1;
+
+ case HID_UP_BUTTON:
+ /* ignore buttons */
+ return -1;
}
return 0;
@@ -199,7 +203,7 @@ static int mosart_probe(struct hid_device *hdev, const struct hid_device_id *id)
td = kmalloc(sizeof(struct mosart_data), GFP_KERNEL);
if (!td) {
- dev_err(&hdev->dev, "cannot allocate MosArt data\n");
+ hid_err(hdev, "cannot allocate MosArt data\n");
return -ENOMEM;
}
td->valid = false;
@@ -230,6 +234,19 @@ static int mosart_probe(struct hid_device *hdev, const struct hid_device_id *id)
return ret;
}
+#ifdef CONFIG_PM
+static int mosart_reset_resume(struct hid_device *hdev)
+{
+ struct hid_report_enum *re = hdev->report_enum
+ + HID_FEATURE_REPORT;
+ struct hid_report *r = re->report_id_hash[7];
+
+ r->field[0]->value[0] = 0x02;
+ usbhid_submit_report(hdev, r, USB_DIR_OUT);
+ return 0;
+}
+#endif
+
static void mosart_remove(struct hid_device *hdev)
{
hid_hw_stop(hdev);
@@ -240,6 +257,7 @@ static void mosart_remove(struct hid_device *hdev)
static const struct hid_device_id mosart_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
{ }
};
MODULE_DEVICE_TABLE(hid, mosart_devices);
@@ -258,6 +276,9 @@ static struct hid_driver mosart_driver = {
.input_mapped = mosart_input_mapped,
.usage_table = mosart_grabbed_usages,
.event = mosart_event,
+#ifdef CONFIG_PM
+ .reset_resume = mosart_reset_resume,
+#endif
};
static int __init mosart_init(void)
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
new file mode 100644
index 000000000000..07d3183fdde5
--- /dev/null
+++ b/drivers/hid/hid-multitouch.c
@@ -0,0 +1,516 @@
+/*
+ * HID driver for multitouch panels
+ *
+ * Copyright (c) 2010-2011 Stephane Chatty <chatty@enac.fr>
+ * Copyright (c) 2010-2011 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ * Copyright (c) 2010-2011 Ecole Nationale de l'Aviation Civile, France
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/input/mt.h>
+#include "usbhid/usbhid.h"
+
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("HID multitouch panels");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+/* quirks to control the device */
+#define MT_QUIRK_NOT_SEEN_MEANS_UP (1 << 0)
+#define MT_QUIRK_SLOT_IS_CONTACTID (1 << 1)
+#define MT_QUIRK_CYPRESS (1 << 2)
+#define MT_QUIRK_SLOT_IS_CONTACTNUMBER (1 << 3)
+#define MT_QUIRK_VALID_IS_INRANGE (1 << 4)
+#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 5)
+
+struct mt_slot {
+ __s32 x, y, p, w, h;
+ __s32 contactid; /* the device ContactID assigned to this slot */
+ bool touch_state; /* is the touch valid? */
+ bool seen_in_this_frame;/* has this slot been updated */
+};
+
+struct mt_device {
+ struct mt_slot curdata; /* placeholder of incoming data */
+ struct mt_class *mtclass; /* our mt device class */
+ unsigned last_field_index; /* last field index of the report */
+ unsigned last_slot_field; /* the last field of a slot */
+ __s8 inputmode; /* InputMode HID feature, -1 if non-existent */
+ __u8 num_received; /* how many contacts we received */
+ __u8 num_expected; /* expected last contact index */
+ bool curvalid; /* is the current contact valid? */
+ struct mt_slot slots[0]; /* first slot */
+};
+
+struct mt_class {
+ __s32 name; /* MT_CLS */
+ __s32 quirks;
+ __s32 sn_move; /* Signal/noise ratio for move events */
+ __s32 sn_pressure; /* Signal/noise ratio for pressure events */
+ __u8 maxcontacts;
+};
+
+/* classes of device behavior */
+#define MT_CLS_DEFAULT 1
+#define MT_CLS_DUAL1 2
+#define MT_CLS_DUAL2 3
+#define MT_CLS_CYPRESS 4
+
+/*
+ * these device-dependent functions determine what slot corresponds
+ * to a valid contact that was just read.
+ */
+
+static int cypress_compute_slot(struct mt_device *td)
+{
+ if (td->curdata.contactid != 0 || td->num_received == 0)
+ return td->curdata.contactid;
+ else
+ return -1;
+}
+
+static int find_slot_from_contactid(struct mt_device *td)
+{
+ int i;
+ for (i = 0; i < td->mtclass->maxcontacts; ++i) {
+ if (td->slots[i].contactid == td->curdata.contactid &&
+ td->slots[i].touch_state)
+ return i;
+ }
+ for (i = 0; i < td->mtclass->maxcontacts; ++i) {
+ if (!td->slots[i].seen_in_this_frame &&
+ !td->slots[i].touch_state)
+ return i;
+ }
+ /* should not occurs. If this happens that means
+ * that the device sent more touches that it says
+ * in the report descriptor. It is ignored then. */
+ return -1;
+}
+
+struct mt_class mt_classes[] = {
+ { .name = MT_CLS_DEFAULT,
+ .quirks = MT_QUIRK_VALID_IS_INRANGE,
+ .maxcontacts = 10 },
+ { .name = MT_CLS_DUAL1,
+ .quirks = MT_QUIRK_VALID_IS_INRANGE |
+ MT_QUIRK_SLOT_IS_CONTACTID,
+ .maxcontacts = 2 },
+ { .name = MT_CLS_DUAL2,
+ .quirks = MT_QUIRK_VALID_IS_INRANGE |
+ MT_QUIRK_SLOT_IS_CONTACTNUMBER,
+ .maxcontacts = 2 },
+ { .name = MT_CLS_CYPRESS,
+ .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
+ MT_QUIRK_CYPRESS,
+ .maxcontacts = 10 },
+
+ { }
+};
+
+static void mt_feature_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage)
+{
+ if (usage->hid == HID_DG_INPUTMODE) {
+ struct mt_device *td = hid_get_drvdata(hdev);
+ td->inputmode = field->report->id;
+ }
+}
+
+static void set_abs(struct input_dev *input, unsigned int code,
+ struct hid_field *field, int snratio)
+{
+ int fmin = field->logical_minimum;
+ int fmax = field->logical_maximum;
+ int fuzz = snratio ? (fmax - fmin) / snratio : 0;
+ input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
+}
+
+static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ struct mt_device *td = hid_get_drvdata(hdev);
+ struct mt_class *cls = td->mtclass;
+ switch (usage->hid & HID_USAGE_PAGE) {
+
+ case HID_UP_GENDESK:
+ switch (usage->hid) {
+ case HID_GD_X:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_X);
+ set_abs(hi->input, ABS_MT_POSITION_X, field,
+ cls->sn_move);
+ /* touchscreen emulation */
+ set_abs(hi->input, ABS_X, field, cls->sn_move);
+ td->last_slot_field = usage->hid;
+ return 1;
+ case HID_GD_Y:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_Y);
+ set_abs(hi->input, ABS_MT_POSITION_Y, field,
+ cls->sn_move);
+ /* touchscreen emulation */
+ set_abs(hi->input, ABS_Y, field, cls->sn_move);
+ td->last_slot_field = usage->hid;
+ return 1;
+ }
+ return 0;
+
+ case HID_UP_DIGITIZER:
+ switch (usage->hid) {
+ case HID_DG_INRANGE:
+ td->last_slot_field = usage->hid;
+ return 1;
+ case HID_DG_CONFIDENCE:
+ td->last_slot_field = usage->hid;
+ return 1;
+ case HID_DG_TIPSWITCH:
+ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+ input_set_capability(hi->input, EV_KEY, BTN_TOUCH);
+ td->last_slot_field = usage->hid;
+ return 1;
+ case HID_DG_CONTACTID:
+ input_mt_init_slots(hi->input,
+ td->mtclass->maxcontacts);
+ td->last_slot_field = usage->hid;
+ return 1;
+ case HID_DG_WIDTH:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOUCH_MAJOR);
+ td->last_slot_field = usage->hid;
+ return 1;
+ case HID_DG_HEIGHT:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOUCH_MINOR);
+ field->logical_maximum = 1;
+ field->logical_minimum = 0;
+ set_abs(hi->input, ABS_MT_ORIENTATION, field, 0);
+ td->last_slot_field = usage->hid;
+ return 1;
+ case HID_DG_TIPPRESSURE:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_PRESSURE);
+ set_abs(hi->input, ABS_MT_PRESSURE, field,
+ cls->sn_pressure);
+ /* touchscreen emulation */
+ set_abs(hi->input, ABS_PRESSURE, field,
+ cls->sn_pressure);
+ td->last_slot_field = usage->hid;
+ return 1;
+ case HID_DG_CONTACTCOUNT:
+ td->last_field_index = field->report->maxfield - 1;
+ return 1;
+ case HID_DG_CONTACTMAX:
+ /* we don't set td->last_slot_field as contactcount and
+ * contact max are global to the report */
+ return -1;
+ }
+ /* let hid-input decide for the others */
+ return 0;
+
+ case 0xff000000:
+ /* we do not want to map these: no input-oriented meaning */
+ return -1;
+ }
+
+ return 0;
+}
+
+static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (usage->type == EV_KEY || usage->type == EV_ABS)
+ set_bit(usage->type, hi->input->evbit);
+
+ return -1;
+}
+
+static int mt_compute_slot(struct mt_device *td)
+{
+ __s32 quirks = td->mtclass->quirks;
+
+ if (quirks & MT_QUIRK_SLOT_IS_CONTACTID)
+ return td->curdata.contactid;
+
+ if (quirks & MT_QUIRK_CYPRESS)
+ return cypress_compute_slot(td);
+
+ if (quirks & MT_QUIRK_SLOT_IS_CONTACTNUMBER)
+ return td->num_received;
+
+ return find_slot_from_contactid(td);
+}
+
+/*
+ * this function is called when a whole contact has been processed,
+ * so that it can assign it to a slot and store the data there
+ */
+static void mt_complete_slot(struct mt_device *td)
+{
+ td->curdata.seen_in_this_frame = true;
+ if (td->curvalid) {
+ int slotnum = mt_compute_slot(td);
+
+ if (slotnum >= 0 && slotnum < td->mtclass->maxcontacts)
+ td->slots[slotnum] = td->curdata;
+ }
+ td->num_received++;
+}
+
+
+/*
+ * this function is called when a whole packet has been received and processed,
+ * so that it can decide what to send to the input layer.
+ */
+static void mt_emit_event(struct mt_device *td, struct input_dev *input)
+{
+ int i;
+
+ for (i = 0; i < td->mtclass->maxcontacts; ++i) {
+ struct mt_slot *s = &(td->slots[i]);
+ if ((td->mtclass->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) &&
+ !s->seen_in_this_frame) {
+ s->touch_state = false;
+ }
+
+ input_mt_slot(input, i);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER,
+ s->touch_state);
+ if (s->touch_state) {
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, s->x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, s->y);
+ input_event(input, EV_ABS, ABS_MT_PRESSURE, s->p);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, s->w);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, s->h);
+ }
+ s->seen_in_this_frame = false;
+
+ }
+
+ input_mt_report_pointer_emulation(input, true);
+ input_sync(input);
+ td->num_received = 0;
+}
+
+
+
+static int mt_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct mt_device *td = hid_get_drvdata(hid);
+ __s32 quirks = td->mtclass->quirks;
+
+ if (hid->claimed & HID_CLAIMED_INPUT) {
+ switch (usage->hid) {
+ case HID_DG_INRANGE:
+ if (quirks & MT_QUIRK_VALID_IS_INRANGE)
+ td->curvalid = value;
+ break;
+ case HID_DG_TIPSWITCH:
+ if (quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
+ td->curvalid = value;
+ td->curdata.touch_state = value;
+ break;
+ case HID_DG_CONFIDENCE:
+ if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
+ td->curvalid = value;
+ break;
+ case HID_DG_CONTACTID:
+ td->curdata.contactid = value;
+ break;
+ case HID_DG_TIPPRESSURE:
+ td->curdata.p = value;
+ break;
+ case HID_GD_X:
+ td->curdata.x = value;
+ break;
+ case HID_GD_Y:
+ td->curdata.y = value;
+ break;
+ case HID_DG_WIDTH:
+ td->curdata.w = value;
+ break;
+ case HID_DG_HEIGHT:
+ td->curdata.h = value;
+ break;
+ case HID_DG_CONTACTCOUNT:
+ /*
+ * Includes multi-packet support where subsequent
+ * packets are sent with zero contactcount.
+ */
+ if (value)
+ td->num_expected = value;
+ break;
+
+ default:
+ /* fallback to the generic hidinput handling */
+ return 0;
+ }
+
+ if (usage->hid == td->last_slot_field)
+ mt_complete_slot(td);
+
+ if (field->index == td->last_field_index
+ && td->num_received >= td->num_expected)
+ mt_emit_event(td, field->hidinput->input);
+
+ }
+
+ /* we have handled the hidinput part, now remains hiddev */
+ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+ hid->hiddev_hid_event(hid, field, usage, value);
+
+ return 1;
+}
+
+static void mt_set_input_mode(struct hid_device *hdev)
+{
+ struct mt_device *td = hid_get_drvdata(hdev);
+ struct hid_report *r;
+ struct hid_report_enum *re;
+
+ if (td->inputmode < 0)
+ return;
+
+ re = &(hdev->report_enum[HID_FEATURE_REPORT]);
+ r = re->report_id_hash[td->inputmode];
+ if (r) {
+ r->field[0]->value[0] = 0x02;
+ usbhid_submit_report(hdev, r, USB_DIR_OUT);
+ }
+}
+
+static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret, i;
+ struct mt_device *td;
+ struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */
+
+ for (i = 0; mt_classes[i].name ; i++) {
+ if (id->driver_data == mt_classes[i].name) {
+ mtclass = &(mt_classes[i]);
+ break;
+ }
+ }
+
+ /* This allows the driver to correctly support devices
+ * that emit events over several HID messages.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
+
+ td = kzalloc(sizeof(struct mt_device) +
+ mtclass->maxcontacts * sizeof(struct mt_slot),
+ GFP_KERNEL);
+ if (!td) {
+ dev_err(&hdev->dev, "cannot allocate multitouch data\n");
+ return -ENOMEM;
+ }
+ td->mtclass = mtclass;
+ td->inputmode = -1;
+ hid_set_drvdata(hdev, td);
+
+ ret = hid_parse(hdev);
+ if (ret != 0)
+ goto fail;
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret)
+ goto fail;
+
+ mt_set_input_mode(hdev);
+
+ return 0;
+
+fail:
+ kfree(td);
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int mt_reset_resume(struct hid_device *hdev)
+{
+ mt_set_input_mode(hdev);
+ return 0;
+}
+#endif
+
+static void mt_remove(struct hid_device *hdev)
+{
+ struct mt_device *td = hid_get_drvdata(hdev);
+ hid_hw_stop(hdev);
+ kfree(td);
+ hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id mt_devices[] = {
+
+ /* Cypress panel */
+ { .driver_data = MT_CLS_CYPRESS,
+ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS,
+ USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
+
+ /* GeneralTouch panel */
+ { .driver_data = MT_CLS_DUAL2,
+ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+ USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) },
+
+ /* PixCir-based panels */
+ { .driver_data = MT_CLS_DUAL1,
+ HID_USB_DEVICE(USB_VENDOR_ID_HANVON,
+ USB_DEVICE_ID_HANVON_MULTITOUCH) },
+ { .driver_data = MT_CLS_DUAL1,
+ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+ USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
+
+ { }
+};
+MODULE_DEVICE_TABLE(hid, mt_devices);
+
+static const struct hid_usage_id mt_grabbed_usages[] = {
+ { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver mt_driver = {
+ .name = "hid-multitouch",
+ .id_table = mt_devices,
+ .probe = mt_probe,
+ .remove = mt_remove,
+ .input_mapping = mt_input_mapping,
+ .input_mapped = mt_input_mapped,
+ .feature_mapping = mt_feature_mapping,
+ .usage_table = mt_grabbed_usages,
+ .event = mt_event,
+#ifdef CONFIG_PM
+ .reset_resume = mt_reset_resume,
+#endif
+};
+
+static int __init mt_init(void)
+{
+ return hid_register_driver(&mt_driver);
+}
+
+static void __exit mt_exit(void)
+{
+ hid_unregister_driver(&mt_driver);
+}
+
+module_init(mt_init);
+module_exit(mt_exit);
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 69169efa1e16..beb403421e72 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -130,8 +130,7 @@ static void ntrig_report_version(struct hid_device *hdev)
if (ret == 8) {
ret = ntrig_version_string(&data[2], buf);
- dev_info(&hdev->dev,
- "Firmware version: %s (%02x%02x %02x%02x)\n",
+ hid_info(hdev, "Firmware version: %s (%02x%02x %02x%02x)\n",
buf, data[2], data[3], data[4], data[5]);
}
@@ -831,7 +830,7 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
nd = kmalloc(sizeof(struct ntrig_data), GFP_KERNEL);
if (!nd) {
- dev_err(&hdev->dev, "cannot allocate N-Trig data\n");
+ hid_err(hdev, "cannot allocate N-Trig data\n");
return -ENOMEM;
}
@@ -850,13 +849,13 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err_free;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err_free;
}
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c
index 2e79716dca31..e90edfc63051 100644
--- a/drivers/hid/hid-ortek.c
+++ b/drivers/hid/hid-ortek.c
@@ -23,8 +23,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) {
- dev_info(&hdev->dev, "Fixing up Ortek WKB-2000 "
- "report descriptor.\n");
+ hid_info(hdev, "Fixing up Ortek WKB-2000 report descriptor\n");
rdesc[55] = 0x92;
}
return rdesc;
diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c
index 308d6ae48a3e..f1ea3ff8a98d 100644
--- a/drivers/hid/hid-petalynx.c
+++ b/drivers/hid/hid-petalynx.c
@@ -29,8 +29,7 @@ static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
rdesc[41] == 0x00 && rdesc[59] == 0x26 &&
rdesc[60] == 0xf9 && rdesc[61] == 0x00) {
- dev_info(&hdev->dev, "fixing up Petalynx Maxter Remote report "
- "descriptor\n");
+ hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n");
rdesc[60] = 0xfa;
rdesc[40] = 0xfa;
}
@@ -77,13 +76,13 @@ static int pl_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err_free;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err_free;
}
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index bc2e07740628..de9cf21b3494 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -253,7 +253,7 @@ static struct hid_report *picolcd_report(int id, struct hid_device *hdev, int di
if (report->id == id)
return report;
}
- dev_warn(&hdev->dev, "No report with id 0x%x found\n", id);
+ hid_warn(hdev, "No report with id 0x%x found\n", id);
return NULL;
}
@@ -1329,7 +1329,7 @@ static int picolcd_check_version(struct hid_device *hdev)
verinfo = picolcd_send_and_wait(hdev, REPORT_VERSION, NULL, 0);
if (!verinfo) {
- dev_err(&hdev->dev, "no version response from PicoLCD");
+ hid_err(hdev, "no version response from PicoLCD\n");
return -ENODEV;
}
@@ -1337,14 +1337,14 @@ static int picolcd_check_version(struct hid_device *hdev)
data->version[0] = verinfo->raw_data[1];
data->version[1] = verinfo->raw_data[0];
if (data->status & PICOLCD_BOOTLOADER) {
- dev_info(&hdev->dev, "PicoLCD, bootloader version %d.%d\n",
- verinfo->raw_data[1], verinfo->raw_data[0]);
+ hid_info(hdev, "PicoLCD, bootloader version %d.%d\n",
+ verinfo->raw_data[1], verinfo->raw_data[0]);
} else {
- dev_info(&hdev->dev, "PicoLCD, firmware version %d.%d\n",
- verinfo->raw_data[1], verinfo->raw_data[0]);
+ hid_info(hdev, "PicoLCD, firmware version %d.%d\n",
+ verinfo->raw_data[1], verinfo->raw_data[0]);
}
} else {
- dev_err(&hdev->dev, "confused, got unexpected version response from PicoLCD\n");
+ hid_err(hdev, "confused, got unexpected version response from PicoLCD\n");
ret = -EINVAL;
}
kfree(verinfo);
@@ -1544,7 +1544,7 @@ static ssize_t picolcd_debug_eeprom_read(struct file *f, char __user *u,
/* prepare buffer with info about what we want to read (addr & len) */
raw_data[0] = *off & 0xff;
- raw_data[1] = (*off >> 8) && 0xff;
+ raw_data[1] = (*off >> 8) & 0xff;
raw_data[2] = s < 20 ? s : 20;
if (*off + raw_data[2] > 0xff)
raw_data[2] = 0x100 - *off;
@@ -1583,7 +1583,7 @@ static ssize_t picolcd_debug_eeprom_write(struct file *f, const char __user *u,
memset(raw_data, 0, sizeof(raw_data));
raw_data[0] = *off & 0xff;
- raw_data[1] = (*off >> 8) && 0xff;
+ raw_data[1] = (*off >> 8) & 0xff;
raw_data[2] = s < 20 ? s : 20;
if (*off + raw_data[2] > 0xff)
raw_data[2] = 0x100 - *off;
@@ -1867,6 +1867,7 @@ static void picolcd_debug_out_report(struct picolcd_data *data,
report->id, raw_size);
hid_debug_event(hdev, buff);
if (raw_size + 5 > sizeof(raw_data)) {
+ kfree(buff);
hid_debug_event(hdev, " TOO BIG\n");
return;
} else {
@@ -2328,8 +2329,7 @@ static void picolcd_init_devfs(struct picolcd_data *data,
(flash_w ? S_IWUSR : 0) | (flash_r ? S_IRUSR : 0),
hdev->debug_dir, data, &picolcd_debug_flash_fops);
} else if (flash_r || flash_w)
- dev_warn(&hdev->dev, "Unexpected FLASH access reports, "
- "please submit rdesc for review\n");
+ hid_warn(hdev, "Unexpected FLASH access reports, please submit rdesc for review\n");
}
static void picolcd_exit_devfs(struct picolcd_data *data)
@@ -2457,13 +2457,13 @@ static int picolcd_init_keys(struct picolcd_data *data,
return -ENODEV;
if (report->maxfield != 1 || report->field[0]->report_count != 2 ||
report->field[0]->report_size != 8) {
- dev_err(&hdev->dev, "unsupported KEY_STATE report");
+ hid_err(hdev, "unsupported KEY_STATE report\n");
return -EINVAL;
}
idev = input_allocate_device();
if (idev == NULL) {
- dev_err(&hdev->dev, "failed to allocate input device");
+ hid_err(hdev, "failed to allocate input device\n");
return -ENOMEM;
}
input_set_drvdata(idev, hdev);
@@ -2485,7 +2485,7 @@ static int picolcd_init_keys(struct picolcd_data *data,
input_set_capability(idev, EV_KEY, data->keycode[i]);
error = input_register_device(idev);
if (error) {
- dev_err(&hdev->dev, "error registering the input device");
+ hid_err(hdev, "error registering the input device\n");
input_free_device(idev);
return error;
}
@@ -2522,9 +2522,8 @@ static int picolcd_probe_lcd(struct hid_device *hdev, struct picolcd_data *data)
return error;
if (data->version[0] != 0 && data->version[1] != 3)
- dev_info(&hdev->dev, "Device with untested firmware revision, "
- "please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
- dev_name(&hdev->dev));
+ hid_info(hdev, "Device with untested firmware revision, please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
+ dev_name(&hdev->dev));
/* Setup keypad input device */
error = picolcd_init_keys(data, picolcd_in_report(REPORT_KEY_STATE, hdev));
@@ -2581,9 +2580,8 @@ static int picolcd_probe_bootloader(struct hid_device *hdev, struct picolcd_data
return error;
if (data->version[0] != 1 && data->version[1] != 0)
- dev_info(&hdev->dev, "Device with untested bootloader revision, "
- "please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
- dev_name(&hdev->dev));
+ hid_info(hdev, "Device with untested bootloader revision, please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
+ dev_name(&hdev->dev));
picolcd_init_devfs(data, NULL, NULL,
picolcd_out_report(REPORT_BL_READ_MEMORY, hdev),
@@ -2605,7 +2603,7 @@ static int picolcd_probe(struct hid_device *hdev,
*/
data = kzalloc(sizeof(struct picolcd_data), GFP_KERNEL);
if (data == NULL) {
- dev_err(&hdev->dev, "can't allocate space for Minibox PicoLCD device data\n");
+ hid_err(hdev, "can't allocate space for Minibox PicoLCD device data\n");
error = -ENOMEM;
goto err_no_cleanup;
}
@@ -2621,7 +2619,7 @@ static int picolcd_probe(struct hid_device *hdev,
/* Parse the device reports and start it up */
error = hid_parse(hdev);
if (error) {
- dev_err(&hdev->dev, "device report parse failed\n");
+ hid_err(hdev, "device report parse failed\n");
goto err_cleanup_data;
}
@@ -2631,25 +2629,25 @@ static int picolcd_probe(struct hid_device *hdev,
error = hid_hw_start(hdev, 0);
hdev->claimed = 0;
if (error) {
- dev_err(&hdev->dev, "hardware start failed\n");
+ hid_err(hdev, "hardware start failed\n");
goto err_cleanup_data;
}
- error = hdev->ll_driver->open(hdev);
+ error = hid_hw_open(hdev);
if (error) {
- dev_err(&hdev->dev, "failed to open input interrupt pipe for key and IR events\n");
+ hid_err(hdev, "failed to open input interrupt pipe for key and IR events\n");
goto err_cleanup_hid_hw;
}
error = device_create_file(&hdev->dev, &dev_attr_operation_mode_delay);
if (error) {
- dev_err(&hdev->dev, "failed to create sysfs attributes\n");
+ hid_err(hdev, "failed to create sysfs attributes\n");
goto err_cleanup_hid_ll;
}
error = device_create_file(&hdev->dev, &dev_attr_operation_mode);
if (error) {
- dev_err(&hdev->dev, "failed to create sysfs attributes\n");
+ hid_err(hdev, "failed to create sysfs attributes\n");
goto err_cleanup_sysfs1;
}
@@ -2668,7 +2666,7 @@ err_cleanup_sysfs2:
err_cleanup_sysfs1:
device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay);
err_cleanup_hid_ll:
- hdev->ll_driver->close(hdev);
+ hid_hw_close(hdev);
err_cleanup_hid_hw:
hid_hw_stop(hdev);
err_cleanup_data:
@@ -2699,7 +2697,7 @@ static void picolcd_remove(struct hid_device *hdev)
picolcd_exit_devfs(data);
device_remove_file(&hdev->dev, &dev_attr_operation_mode);
device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay);
- hdev->ll_driver->close(hdev);
+ hid_hw_close(hdev);
hid_hw_stop(hdev);
hid_set_drvdata(hdev, NULL);
@@ -2753,7 +2751,7 @@ static void __exit picolcd_exit(void)
{
hid_unregister_driver(&picolcd_driver);
#ifdef CONFIG_HID_PICOLCD_FB
- flush_scheduled_work();
+ flush_work_sync(&picolcd_fb_cleanup);
WARN_ON(fb_pending);
#endif
}
diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
index 9f41e2bd8483..06e5300d43d2 100644
--- a/drivers/hid/hid-pl.c
+++ b/drivers/hid/hid-pl.c
@@ -103,7 +103,7 @@ static int plff_init(struct hid_device *hid)
*/
if (list_empty(report_list)) {
- dev_err(&hid->dev, "no output reports found\n");
+ hid_err(hid, "no output reports found\n");
return -ENODEV;
}
@@ -112,14 +112,13 @@ static int plff_init(struct hid_device *hid)
report_ptr = report_ptr->next;
if (report_ptr == report_list) {
- dev_err(&hid->dev, "required output report is "
- "missing\n");
+ hid_err(hid, "required output report is missing\n");
return -ENODEV;
}
report = list_entry(report_ptr, struct hid_report, list);
if (report->maxfield < 1) {
- dev_err(&hid->dev, "no fields in the report\n");
+ hid_err(hid, "no fields in the report\n");
return -ENODEV;
}
@@ -137,7 +136,7 @@ static int plff_init(struct hid_device *hid)
weak = &report->field[3]->value[0];
debug("detected 4-field device");
} else {
- dev_err(&hid->dev, "not enough fields or values\n");
+ hid_err(hid, "not enough fields or values\n");
return -ENODEV;
}
@@ -164,8 +163,7 @@ static int plff_init(struct hid_device *hid)
usbhid_submit_report(hid, plff->report, USB_DIR_OUT);
}
- dev_info(&hid->dev, "Force feedback for PantherLord/GreenAsia "
- "devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
+ hid_info(hid, "Force feedback for PantherLord/GreenAsia devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
return 0;
}
@@ -185,13 +183,13 @@ static int pl_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err;
}
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 48eab84f53b5..ab19f2905d27 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -16,6 +16,8 @@
* any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/device.h>
#include <linux/module.h>
#include <linux/usb.h>
@@ -130,7 +132,7 @@ static ssize_t store_channel(struct device *dev,
return -EINVAL;
}
-static DEVICE_ATTR(channel, S_IRUGO | S_IWUGO, show_channel,
+static DEVICE_ATTR(channel, S_IRUGO | S_IWUSR | S_IWGRP , show_channel,
store_channel);
static struct device_attribute *sysfs_device_attr_channel = {
@@ -169,7 +171,7 @@ static ssize_t store_sustain(struct device *dev,
return -EINVAL;
}
-static DEVICE_ATTR(sustain, S_IRUGO | S_IWUGO, show_sustain,
+static DEVICE_ATTR(sustain, S_IRUGO | S_IWUSR | S_IWGRP, show_sustain,
store_sustain);
static struct device_attribute *sysfs_device_attr_sustain = {
@@ -207,7 +209,7 @@ static ssize_t store_octave(struct device *dev,
return -EINVAL;
}
-static DEVICE_ATTR(octave, S_IRUGO | S_IWUGO, show_octave,
+static DEVICE_ATTR(octave, S_IRUGO | S_IWUSR | S_IWGRP, show_octave,
store_octave);
static struct device_attribute *sysfs_device_attr_octave = {
@@ -285,11 +287,11 @@ static int pcmidi_get_output_report(struct pcmidi_snd *pm)
continue;
if (report->maxfield < 1) {
- dev_err(&hdev->dev, "output report is empty\n");
+ hid_err(hdev, "output report is empty\n");
break;
}
if (report->field[0]->report_count != 2) {
- dev_err(&hdev->dev, "field count too low\n");
+ hid_err(hdev, "field count too low\n");
break;
}
pm->pcmidi_report6 = report;
@@ -746,8 +748,8 @@ static __u8 *pk_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if (*rsize == 178 &&
rdesc[111] == 0x06 && rdesc[112] == 0x00 &&
rdesc[113] == 0xff) {
- dev_info(&hdev->dev, "fixing up pc-midi keyboard report "
- "descriptor\n");
+ hid_info(hdev,
+ "fixing up pc-midi keyboard report descriptor\n");
rdesc[144] = 0x18; /* report 4: was 0x10 report count */
}
@@ -805,7 +807,7 @@ static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
pk = kzalloc(sizeof(*pk), GFP_KERNEL);
if (pk == NULL) {
- dev_err(&hdev->dev, "prodikeys: can't alloc descriptor\n");
+ hid_err(hdev, "can't alloc descriptor\n");
return -ENOMEM;
}
@@ -813,8 +815,7 @@ static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
pm = kzalloc(sizeof(*pm), GFP_KERNEL);
if (pm == NULL) {
- dev_err(&hdev->dev,
- "prodikeys: can't alloc descriptor\n");
+ hid_err(hdev, "can't alloc descriptor\n");
ret = -ENOMEM;
goto err_free;
}
@@ -827,7 +828,7 @@ static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "prodikeys: hid parse failed\n");
+ hid_err(hdev, "hid parse failed\n");
goto err_free;
}
@@ -837,7 +838,7 @@ static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
- dev_err(&hdev->dev, "prodikeys: hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err_free;
}
@@ -896,7 +897,7 @@ static int pk_init(void)
ret = hid_register_driver(&pk_driver);
if (ret)
- printk(KERN_ERR "can't register prodikeys driver\n");
+ pr_err("can't register prodikeys driver\n");
return ret;
}
diff --git a/drivers/hid/hid-quanta.c b/drivers/hid/hid-quanta.c
index 54d3db50605b..87a54df4d4ac 100644
--- a/drivers/hid/hid-quanta.c
+++ b/drivers/hid/hid-quanta.c
@@ -195,7 +195,7 @@ static int quanta_probe(struct hid_device *hdev, const struct hid_device_id *id)
td = kmalloc(sizeof(struct quanta_data), GFP_KERNEL);
if (!td) {
- dev_err(&hdev->dev, "cannot allocate Quanta Touch data\n");
+ hid_err(hdev, "cannot allocate Quanta Touch data\n");
return -ENOMEM;
}
td->valid = false;
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index f77695762cb5..cbd8cc42e75a 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -35,6 +35,11 @@
#include "hid-roccat.h"
#include "hid-roccat-kone.h"
+static uint profile_numbers[5] = {0, 1, 2, 3, 4};
+
+/* kone_class is used for creating sysfs attributes via roccat char device */
+static struct class *kone_class;
+
static void kone_set_settings_checksum(struct kone_settings *settings)
{
uint16_t checksum = 0;
@@ -90,8 +95,7 @@ static int kone_check_write(struct usb_device *usb_dev)
kfree(data);
return 0;
} else { /* unknown answer */
- dev_err(&usb_dev->dev, "got retval %d when checking write\n",
- *data);
+ hid_err(usb_dev, "got retval %d when checking write\n", *data);
kfree(data);
return -EIO;
}
@@ -262,7 +266,8 @@ static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
if (off >= sizeof(struct kone_settings))
@@ -286,7 +291,8 @@ static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
int retval = 0, difference;
@@ -319,10 +325,11 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
return sizeof(struct kone_settings);
}
-static ssize_t kone_sysfs_read_profilex(struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count, int number) {
- struct device *dev = container_of(kobj, struct device, kobj);
+static ssize_t kone_sysfs_read_profilex(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count) {
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
if (off >= sizeof(struct kone_profile))
@@ -332,47 +339,18 @@ static ssize_t kone_sysfs_read_profilex(struct kobject *kobj,
count = sizeof(struct kone_profile) - off;
mutex_lock(&kone->kone_lock);
- memcpy(buf, ((char const *)&kone->profiles[number - 1]) + off, count);
+ memcpy(buf, ((char const *)&kone->profiles[*(uint *)(attr->private)]) + off, count);
mutex_unlock(&kone->kone_lock);
return count;
}
-static ssize_t kone_sysfs_read_profile1(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count) {
- return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 1);
-}
-
-static ssize_t kone_sysfs_read_profile2(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count) {
- return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 2);
-}
-
-static ssize_t kone_sysfs_read_profile3(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count) {
- return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 3);
-}
-
-static ssize_t kone_sysfs_read_profile4(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count) {
- return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 4);
-}
-
-static ssize_t kone_sysfs_read_profile5(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count) {
- return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 5);
-}
-
/* Writes data only if different to stored data */
-static ssize_t kone_sysfs_write_profilex(struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count, int number) {
- struct device *dev = container_of(kobj, struct device, kobj);
+static ssize_t kone_sysfs_write_profilex(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count) {
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
struct kone_profile *profile;
@@ -382,13 +360,14 @@ static ssize_t kone_sysfs_write_profilex(struct kobject *kobj,
if (off != 0 || count != sizeof(struct kone_profile))
return -EINVAL;
- profile = &kone->profiles[number - 1];
+ profile = &kone->profiles[*(uint *)(attr->private)];
mutex_lock(&kone->kone_lock);
difference = memcmp(buf, profile, sizeof(struct kone_profile));
if (difference) {
retval = kone_set_profile(usb_dev,
- (struct kone_profile const *)buf, number);
+ (struct kone_profile const *)buf,
+ *(uint *)(attr->private) + 1);
if (!retval)
memcpy(profile, buf, sizeof(struct kone_profile));
}
@@ -400,47 +379,19 @@ static ssize_t kone_sysfs_write_profilex(struct kobject *kobj,
return sizeof(struct kone_profile);
}
-static ssize_t kone_sysfs_write_profile1(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count) {
- return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 1);
-}
-
-static ssize_t kone_sysfs_write_profile2(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count) {
- return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 2);
-}
-
-static ssize_t kone_sysfs_write_profile3(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count) {
- return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 3);
-}
-
-static ssize_t kone_sysfs_write_profile4(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count) {
- return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 4);
-}
-
-static ssize_t kone_sysfs_write_profile5(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count) {
- return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 5);
-}
-
static ssize_t kone_sysfs_show_actual_profile(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct kone_device *kone =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_profile);
}
static ssize_t kone_sysfs_show_actual_dpi(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct kone_device *kone =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_dpi);
}
@@ -448,11 +399,15 @@ static ssize_t kone_sysfs_show_actual_dpi(struct device *dev,
static ssize_t kone_sysfs_show_weight(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
- struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ struct kone_device *kone;
+ struct usb_device *usb_dev;
int weight = 0;
int retval;
+ dev = dev->parent->parent;
+ kone = hid_get_drvdata(dev_get_drvdata(dev));
+ usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
mutex_lock(&kone->kone_lock);
retval = kone_get_weight(usb_dev, &weight);
mutex_unlock(&kone->kone_lock);
@@ -465,14 +420,16 @@ static ssize_t kone_sysfs_show_weight(struct device *dev,
static ssize_t kone_sysfs_show_firmware_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct kone_device *kone =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kone->firmware_version);
}
static ssize_t kone_sysfs_show_tcu(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct kone_device *kone =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.tcu);
}
@@ -504,11 +461,15 @@ static int kone_tcu_command(struct usb_device *usb_dev, int number)
static ssize_t kone_sysfs_set_tcu(struct device *dev,
struct device_attribute *attr, char const *buf, size_t size)
{
- struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
- struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ struct kone_device *kone;
+ struct usb_device *usb_dev;
int retval;
unsigned long state;
+ dev = dev->parent->parent;
+ kone = hid_get_drvdata(dev_get_drvdata(dev));
+ usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
retval = strict_strtoul(buf, 10, &state);
if (retval)
return retval;
@@ -556,7 +517,7 @@ static ssize_t kone_sysfs_set_tcu(struct device *dev,
retval = kone_set_settings(usb_dev, &kone->settings);
if (retval) {
- dev_err(&usb_dev->dev, "couldn't set tcu state\n");
+ hid_err(usb_dev, "couldn't set tcu state\n");
/*
* try to reread valid settings into buffer overwriting
* first error code
@@ -570,7 +531,7 @@ static ssize_t kone_sysfs_set_tcu(struct device *dev,
retval = size;
exit_no_settings:
- dev_err(&usb_dev->dev, "couldn't read settings\n");
+ hid_err(usb_dev, "couldn't read settings\n");
exit_unlock:
mutex_unlock(&kone->kone_lock);
return retval;
@@ -579,18 +540,23 @@ exit_unlock:
static ssize_t kone_sysfs_show_startup_profile(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct kone_device *kone =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.startup_profile);
}
static ssize_t kone_sysfs_set_startup_profile(struct device *dev,
struct device_attribute *attr, char const *buf, size_t size)
{
- struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
- struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ struct kone_device *kone;
+ struct usb_device *usb_dev;
int retval;
unsigned long new_startup_profile;
+ dev = dev->parent->parent;
+ kone = hid_get_drvdata(dev_get_drvdata(dev));
+ usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
retval = strict_strtoul(buf, 10, &new_startup_profile);
if (retval)
return retval;
@@ -617,160 +583,92 @@ static ssize_t kone_sysfs_set_startup_profile(struct device *dev,
return size;
}
-/*
- * Read actual dpi settings.
- * Returns raw value for further processing. Refer to enum kone_polling_rates to
- * get real value.
- */
-static DEVICE_ATTR(actual_dpi, 0440, kone_sysfs_show_actual_dpi, NULL);
-
-static DEVICE_ATTR(actual_profile, 0440, kone_sysfs_show_actual_profile, NULL);
-
-/*
- * The mouse can be equipped with one of four supplied weights from 5 to 20
- * grams which are recognized and its value can be read out.
- * This returns the raw value reported by the mouse for easy evaluation by
- * software. Refer to enum kone_weights to get corresponding real weight.
- */
-static DEVICE_ATTR(weight, 0440, kone_sysfs_show_weight, NULL);
-
-/*
- * Prints firmware version stored in mouse as integer.
- * The raw value reported by the mouse is returned for easy evaluation, to get
- * the real version number the decimal point has to be shifted 2 positions to
- * the left. E.g. a value of 138 means 1.38.
- */
-static DEVICE_ATTR(firmware_version, 0440,
- kone_sysfs_show_firmware_version, NULL);
-
-/*
- * Prints state of Tracking Control Unit as number where 0 = off and 1 = on
- * Writing 0 deactivates tcu and writing 1 calibrates and activates the tcu
- */
-static DEVICE_ATTR(tcu, 0660, kone_sysfs_show_tcu, kone_sysfs_set_tcu);
-
-/* Prints and takes the number of the profile the mouse starts with */
-static DEVICE_ATTR(startup_profile, 0660,
- kone_sysfs_show_startup_profile,
- kone_sysfs_set_startup_profile);
-
-static struct attribute *kone_attributes[] = {
- &dev_attr_actual_dpi.attr,
- &dev_attr_actual_profile.attr,
- &dev_attr_weight.attr,
- &dev_attr_firmware_version.attr,
- &dev_attr_tcu.attr,
- &dev_attr_startup_profile.attr,
- NULL
-};
-
-static struct attribute_group kone_attribute_group = {
- .attrs = kone_attributes
-};
-
-static struct bin_attribute kone_settings_attr = {
- .attr = { .name = "settings", .mode = 0660 },
- .size = sizeof(struct kone_settings),
- .read = kone_sysfs_read_settings,
- .write = kone_sysfs_write_settings
-};
+static struct device_attribute kone_attributes[] = {
+ /*
+ * Read actual dpi settings.
+ * Returns raw value for further processing. Refer to enum
+ * kone_polling_rates to get real value.
+ */
+ __ATTR(actual_dpi, 0440, kone_sysfs_show_actual_dpi, NULL),
+ __ATTR(actual_profile, 0440, kone_sysfs_show_actual_profile, NULL),
-static struct bin_attribute kone_profile1_attr = {
- .attr = { .name = "profile1", .mode = 0660 },
- .size = sizeof(struct kone_profile),
- .read = kone_sysfs_read_profile1,
- .write = kone_sysfs_write_profile1
-};
+ /*
+ * The mouse can be equipped with one of four supplied weights from 5
+ * to 20 grams which are recognized and its value can be read out.
+ * This returns the raw value reported by the mouse for easy evaluation
+ * by software. Refer to enum kone_weights to get corresponding real
+ * weight.
+ */
+ __ATTR(weight, 0440, kone_sysfs_show_weight, NULL),
-static struct bin_attribute kone_profile2_attr = {
- .attr = { .name = "profile2", .mode = 0660 },
- .size = sizeof(struct kone_profile),
- .read = kone_sysfs_read_profile2,
- .write = kone_sysfs_write_profile2
-};
+ /*
+ * Prints firmware version stored in mouse as integer.
+ * The raw value reported by the mouse is returned for easy evaluation,
+ * to get the real version number the decimal point has to be shifted 2
+ * positions to the left. E.g. a value of 138 means 1.38.
+ */
+ __ATTR(firmware_version, 0440,
+ kone_sysfs_show_firmware_version, NULL),
-static struct bin_attribute kone_profile3_attr = {
- .attr = { .name = "profile3", .mode = 0660 },
- .size = sizeof(struct kone_profile),
- .read = kone_sysfs_read_profile3,
- .write = kone_sysfs_write_profile3
-};
+ /*
+ * Prints state of Tracking Control Unit as number where 0 = off and
+ * 1 = on. Writing 0 deactivates tcu and writing 1 calibrates and
+ * activates the tcu
+ */
+ __ATTR(tcu, 0660, kone_sysfs_show_tcu, kone_sysfs_set_tcu),
-static struct bin_attribute kone_profile4_attr = {
- .attr = { .name = "profile4", .mode = 0660 },
- .size = sizeof(struct kone_profile),
- .read = kone_sysfs_read_profile4,
- .write = kone_sysfs_write_profile4
+ /* Prints and takes the number of the profile the mouse starts with */
+ __ATTR(startup_profile, 0660,
+ kone_sysfs_show_startup_profile,
+ kone_sysfs_set_startup_profile),
+ __ATTR_NULL
};
-static struct bin_attribute kone_profile5_attr = {
- .attr = { .name = "profile5", .mode = 0660 },
- .size = sizeof(struct kone_profile),
- .read = kone_sysfs_read_profile5,
- .write = kone_sysfs_write_profile5
+static struct bin_attribute kone_bin_attributes[] = {
+ {
+ .attr = { .name = "settings", .mode = 0660 },
+ .size = sizeof(struct kone_settings),
+ .read = kone_sysfs_read_settings,
+ .write = kone_sysfs_write_settings
+ },
+ {
+ .attr = { .name = "profile1", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profilex,
+ .write = kone_sysfs_write_profilex,
+ .private = &profile_numbers[0]
+ },
+ {
+ .attr = { .name = "profile2", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profilex,
+ .write = kone_sysfs_write_profilex,
+ .private = &profile_numbers[1]
+ },
+ {
+ .attr = { .name = "profile3", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profilex,
+ .write = kone_sysfs_write_profilex,
+ .private = &profile_numbers[2]
+ },
+ {
+ .attr = { .name = "profile4", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profilex,
+ .write = kone_sysfs_write_profilex,
+ .private = &profile_numbers[3]
+ },
+ {
+ .attr = { .name = "profile5", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profilex,
+ .write = kone_sysfs_write_profilex,
+ .private = &profile_numbers[4]
+ },
+ __ATTR_NULL
};
-static int kone_create_sysfs_attributes(struct usb_interface *intf)
-{
- int retval;
-
- retval = sysfs_create_group(&intf->dev.kobj, &kone_attribute_group);
- if (retval)
- goto exit_1;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_settings_attr);
- if (retval)
- goto exit_2;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile1_attr);
- if (retval)
- goto exit_3;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile2_attr);
- if (retval)
- goto exit_4;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile3_attr);
- if (retval)
- goto exit_5;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile4_attr);
- if (retval)
- goto exit_6;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile5_attr);
- if (retval)
- goto exit_7;
-
- return 0;
-
-exit_7:
- sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile4_attr);
-exit_6:
- sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile3_attr);
-exit_5:
- sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile2_attr);
-exit_4:
- sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile1_attr);
-exit_3:
- sysfs_remove_bin_file(&intf->dev.kobj, &kone_settings_attr);
-exit_2:
- sysfs_remove_group(&intf->dev.kobj, &kone_attribute_group);
-exit_1:
- return retval;
-}
-
-static void kone_remove_sysfs_attributes(struct usb_interface *intf)
-{
- sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile5_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile4_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile3_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile2_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile1_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &kone_settings_attr);
- sysfs_remove_group(&intf->dev.kobj, &kone_attribute_group);
-}
-
static int kone_init_kone_device_struct(struct usb_device *usb_dev,
struct kone_device *kone)
{
@@ -818,32 +716,25 @@ static int kone_init_specials(struct hid_device *hdev)
kone = kzalloc(sizeof(*kone), GFP_KERNEL);
if (!kone) {
- dev_err(&hdev->dev, "can't alloc device descriptor\n");
+ hid_err(hdev, "can't alloc device descriptor\n");
return -ENOMEM;
}
hid_set_drvdata(hdev, kone);
retval = kone_init_kone_device_struct(usb_dev, kone);
if (retval) {
- dev_err(&hdev->dev,
- "couldn't init struct kone_device\n");
+ hid_err(hdev, "couldn't init struct kone_device\n");
goto exit_free;
}
- retval = roccat_connect(hdev);
+ retval = roccat_connect(kone_class, hdev);
if (retval < 0) {
- dev_err(&hdev->dev, "couldn't init char dev\n");
+ hid_err(hdev, "couldn't init char dev\n");
/* be tolerant about not getting chrdev */
} else {
kone->roccat_claimed = 1;
kone->chrdev_minor = retval;
}
-
- retval = kone_create_sysfs_attributes(intf);
- if (retval) {
- dev_err(&hdev->dev, "cannot create sysfs files\n");
- goto exit_free;
- }
} else {
hid_set_drvdata(hdev, NULL);
}
@@ -854,7 +745,6 @@ exit_free:
return retval;
}
-
static void kone_remove_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
@@ -862,7 +752,6 @@ static void kone_remove_specials(struct hid_device *hdev)
if (intf->cur_altsetting->desc.bInterfaceProtocol
== USB_INTERFACE_PROTOCOL_MOUSE) {
- kone_remove_sysfs_attributes(intf);
kone = hid_get_drvdata(hdev);
if (kone->roccat_claimed)
roccat_disconnect(kone->chrdev_minor);
@@ -876,19 +765,19 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
retval = hid_parse(hdev);
if (retval) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto exit;
}
retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (retval) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto exit;
}
retval = kone_init_specials(hdev);
if (retval) {
- dev_err(&hdev->dev, "couldn't install mouse\n");
+ hid_err(hdev, "couldn't install mouse\n");
goto exit_stop;
}
@@ -1006,11 +895,24 @@ static struct hid_driver kone_driver = {
static int __init kone_init(void)
{
- return hid_register_driver(&kone_driver);
+ int retval;
+
+ /* class name has to be same as driver name */
+ kone_class = class_create(THIS_MODULE, "kone");
+ if (IS_ERR(kone_class))
+ return PTR_ERR(kone_class);
+ kone_class->dev_attrs = kone_attributes;
+ kone_class->dev_bin_attrs = kone_bin_attributes;
+
+ retval = hid_register_driver(&kone_driver);
+ if (retval)
+ class_destroy(kone_class);
+ return retval;
}
static void __exit kone_exit(void)
{
+ class_destroy(kone_class);
hid_unregister_driver(&kone_driver);
}
diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h
index 130d6566ea82..64abb5b8a59a 100644
--- a/drivers/hid/hid-roccat-kone.h
+++ b/drivers/hid/hid-roccat-kone.h
@@ -14,14 +14,11 @@
#include <linux/types.h>
-#pragma pack(push)
-#pragma pack(1)
-
struct kone_keystroke {
uint8_t key;
uint8_t action;
uint16_t period; /* in milliseconds */
-};
+} __attribute__ ((__packed__));
enum kone_keystroke_buttons {
kone_keystroke_button_1 = 0xf0, /* left mouse button */
@@ -44,7 +41,7 @@ struct kone_button_info {
uint8_t macro_name[16]; /* can be max 15 chars long */
uint8_t count;
struct kone_keystroke keystrokes[20];
-};
+} __attribute__ ((__packed__));
enum kone_button_info_types {
/* valid button types until firmware 1.32 */
@@ -95,7 +92,7 @@ struct kone_light_info {
uint8_t red; /* range 0x00-0xff */
uint8_t green; /* range 0x00-0xff */
uint8_t blue; /* range 0x00-0xff */
-};
+} __attribute__ ((__packed__));
struct kone_profile {
uint16_t size; /* always 975 */
@@ -130,7 +127,7 @@ struct kone_profile {
struct kone_button_info button_infos[8];
uint16_t checksum; /* \brief holds checksum of struct */
-};
+} __attribute__ ((__packed__));
enum kone_polling_rates {
kone_polling_rate_125 = 1,
@@ -147,7 +144,7 @@ struct kone_settings {
uint8_t calibration_data[4];
uint8_t unknown3[2];
uint16_t checksum;
-};
+} __attribute__ ((__packed__));
/*
* 12 byte mouse event read by interrupt_read
@@ -163,7 +160,7 @@ struct kone_mouse_event {
uint8_t event;
uint8_t value; /* press = 0, release = 1 */
uint8_t macro_key; /* 0 to 8 */
-};
+} __attribute__ ((__packed__));
enum kone_mouse_events {
/* osd events are thought to be display on screen */
@@ -191,9 +188,7 @@ struct kone_roccat_report {
uint8_t event;
uint8_t value; /* holds dpi or profile value */
uint8_t key; /* macro key on overlong macro execution */
-};
-
-#pragma pack(pop)
+} __attribute__ ((__packed__));
struct kone_device {
/*
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
new file mode 100644
index 000000000000..1608c8d1efd6
--- /dev/null
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -0,0 +1,837 @@
+/*
+ * Roccat Kone[+] driver for Linux
+ *
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Kone[+] is an updated/improved version of the Kone with more memory
+ * and functionality and without the non-standard behaviours the Kone had.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/usb.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "hid-ids.h"
+#include "hid-roccat.h"
+#include "hid-roccat-koneplus.h"
+
+static uint profile_numbers[5] = {0, 1, 2, 3, 4};
+
+static struct class *koneplus_class;
+
+static void koneplus_profile_activated(struct koneplus_device *koneplus,
+ uint new_profile)
+{
+ koneplus->actual_profile = new_profile;
+}
+
+static int koneplus_send_control(struct usb_device *usb_dev, uint value,
+ enum koneplus_control_requests request)
+{
+ int len;
+ struct koneplus_control *control;
+
+ if ((request == KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS ||
+ request == KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) &&
+ value > 4)
+ return -EINVAL;
+
+ control = kmalloc(sizeof(struct koneplus_control), GFP_KERNEL);
+ if (!control)
+ return -ENOMEM;
+
+ control->command = KONEPLUS_COMMAND_CONTROL;
+ control->value = value;
+ control->request = request;
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_CONFIGURATION,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ KONEPLUS_USB_COMMAND_CONTROL, 0, control,
+ sizeof(struct koneplus_control),
+ USB_CTRL_SET_TIMEOUT);
+
+ kfree(control);
+
+ if (len != sizeof(struct koneplus_control))
+ return len;
+
+ return 0;
+}
+
+static int koneplus_receive(struct usb_device *usb_dev, uint usb_command,
+ void *buf, uint size) {
+ int len;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+
+ return (len != size) ? -EIO : 0;
+}
+
+static int koneplus_receive_control_status(struct usb_device *usb_dev)
+{
+ int retval;
+ struct koneplus_control *control;
+
+ control = kmalloc(sizeof(struct koneplus_control), GFP_KERNEL);
+ if (!control)
+ return -ENOMEM;
+
+ do {
+ retval = koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_CONTROL,
+ control, sizeof(struct koneplus_control));
+
+ /* check if we get a completely wrong answer */
+ if (retval)
+ goto out;
+
+ if (control->value == KONEPLUS_CONTROL_REQUEST_STATUS_OK) {
+ retval = 0;
+ goto out;
+ }
+
+ /* indicates that hardware needs some more time to complete action */
+ if (control->value == KONEPLUS_CONTROL_REQUEST_STATUS_WAIT) {
+ msleep(500); /* windows driver uses 1000 */
+ continue;
+ }
+
+ /* seems to be critical - replug necessary */
+ if (control->value == KONEPLUS_CONTROL_REQUEST_STATUS_OVERLOAD) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ dev_err(&usb_dev->dev, "koneplus_receive_control_status: "
+ "unknown response value 0x%x\n", control->value);
+ retval = -EINVAL;
+ goto out;
+
+ } while (1);
+out:
+ kfree(control);
+ return retval;
+}
+
+static int koneplus_send(struct usb_device *usb_dev, uint command,
+ void *buf, uint size) {
+ int len;
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_CONFIGURATION,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+
+ if (len != size)
+ return -EIO;
+
+ if (koneplus_receive_control_status(usb_dev))
+ return -EIO;
+
+ return 0;
+}
+
+static int koneplus_select_profile(struct usb_device *usb_dev, uint number,
+ enum koneplus_control_requests request)
+{
+ int retval;
+
+ retval = koneplus_send_control(usb_dev, number, request);
+ if (retval)
+ return retval;
+
+ /* allow time to settle things - windows driver uses 500 */
+ msleep(100);
+
+ retval = koneplus_receive_control_status(usb_dev);
+ if (retval)
+ return retval;
+
+ return 0;
+}
+
+static int koneplus_get_info(struct usb_device *usb_dev,
+ struct koneplus_info *buf)
+{
+ return koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_INFO,
+ buf, sizeof(struct koneplus_info));
+}
+
+static int koneplus_get_profile_settings(struct usb_device *usb_dev,
+ struct koneplus_profile_settings *buf, uint number)
+{
+ int retval;
+
+ retval = koneplus_select_profile(usb_dev, number,
+ KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS);
+ if (retval)
+ return retval;
+
+ return koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS,
+ buf, sizeof(struct koneplus_profile_settings));
+}
+
+static int koneplus_set_profile_settings(struct usb_device *usb_dev,
+ struct koneplus_profile_settings const *settings)
+{
+ return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS,
+ (void *)settings, sizeof(struct koneplus_profile_settings));
+}
+
+static int koneplus_get_profile_buttons(struct usb_device *usb_dev,
+ struct koneplus_profile_buttons *buf, int number)
+{
+ int retval;
+
+ retval = koneplus_select_profile(usb_dev, number,
+ KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS);
+ if (retval)
+ return retval;
+
+ return koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS,
+ buf, sizeof(struct koneplus_profile_buttons));
+}
+
+static int koneplus_set_profile_buttons(struct usb_device *usb_dev,
+ struct koneplus_profile_buttons const *buttons)
+{
+ return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS,
+ (void *)buttons, sizeof(struct koneplus_profile_buttons));
+}
+
+/* retval is 0-4 on success, < 0 on error */
+static int koneplus_get_startup_profile(struct usb_device *usb_dev)
+{
+ struct koneplus_startup_profile *buf;
+ int retval;
+
+ buf = kmalloc(sizeof(struct koneplus_startup_profile), GFP_KERNEL);
+
+ retval = koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_STARTUP_PROFILE,
+ buf, sizeof(struct koneplus_startup_profile));
+
+ if (retval)
+ goto out;
+
+ retval = buf->startup_profile;
+out:
+ kfree(buf);
+ return retval;
+}
+
+static int koneplus_set_startup_profile(struct usb_device *usb_dev,
+ int startup_profile)
+{
+ struct koneplus_startup_profile buf;
+
+ buf.command = KONEPLUS_COMMAND_STARTUP_PROFILE;
+ buf.size = sizeof(struct koneplus_startup_profile);
+ buf.startup_profile = startup_profile;
+
+ return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_STARTUP_PROFILE,
+ (char *)&buf, sizeof(struct koneplus_profile_buttons));
+}
+
+static ssize_t koneplus_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&koneplus->koneplus_lock);
+ retval = koneplus_receive(usb_dev, command, buf, real_size);
+ mutex_unlock(&koneplus->koneplus_lock);
+
+ if (retval)
+ return retval;
+
+ return real_size;
+}
+
+static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&koneplus->koneplus_lock);
+ retval = koneplus_send(usb_dev, command, (void *)buf, real_size);
+ mutex_unlock(&koneplus->koneplus_lock);
+
+ if (retval)
+ return retval;
+
+ return real_size;
+}
+
+static ssize_t koneplus_sysfs_write_macro(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ return koneplus_sysfs_write(fp, kobj, buf, off, count,
+ sizeof(struct koneplus_macro), KONEPLUS_USB_COMMAND_MACRO);
+}
+
+static ssize_t koneplus_sysfs_read_sensor(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ return koneplus_sysfs_read(fp, kobj, buf, off, count,
+ sizeof(struct koneplus_sensor), KONEPLUS_USB_COMMAND_SENSOR);
+}
+
+static ssize_t koneplus_sysfs_write_sensor(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ return koneplus_sysfs_write(fp, kobj, buf, off, count,
+ sizeof(struct koneplus_sensor), KONEPLUS_USB_COMMAND_SENSOR);
+}
+
+static ssize_t koneplus_sysfs_write_tcu(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ return koneplus_sysfs_write(fp, kobj, buf, off, count,
+ sizeof(struct koneplus_tcu), KONEPLUS_USB_COMMAND_TCU);
+}
+
+static ssize_t koneplus_sysfs_read_tcu_image(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ return koneplus_sysfs_read(fp, kobj, buf, off, count,
+ sizeof(struct koneplus_tcu_image), KONEPLUS_USB_COMMAND_TCU);
+}
+
+static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
+
+ if (off >= sizeof(struct koneplus_profile_settings))
+ return 0;
+
+ if (off + count > sizeof(struct koneplus_profile_settings))
+ count = sizeof(struct koneplus_profile_settings) - off;
+
+ mutex_lock(&koneplus->koneplus_lock);
+ memcpy(buf, ((void const *)&koneplus->profile_settings[*(uint *)(attr->private)]) + off,
+ count);
+ mutex_unlock(&koneplus->koneplus_lock);
+
+ return count;
+}
+
+static ssize_t koneplus_sysfs_write_profile_settings(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval = 0;
+ int difference;
+ int profile_number;
+ struct koneplus_profile_settings *profile_settings;
+
+ if (off != 0 || count != sizeof(struct koneplus_profile_settings))
+ return -EINVAL;
+
+ profile_number = ((struct koneplus_profile_settings const *)buf)->number;
+ profile_settings = &koneplus->profile_settings[profile_number];
+
+ mutex_lock(&koneplus->koneplus_lock);
+ difference = memcmp(buf, profile_settings,
+ sizeof(struct koneplus_profile_settings));
+ if (difference) {
+ retval = koneplus_set_profile_settings(usb_dev,
+ (struct koneplus_profile_settings const *)buf);
+ if (!retval)
+ memcpy(profile_settings, buf,
+ sizeof(struct koneplus_profile_settings));
+ }
+ mutex_unlock(&koneplus->koneplus_lock);
+
+ if (retval)
+ return retval;
+
+ return sizeof(struct koneplus_profile_settings);
+}
+
+static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
+
+ if (off >= sizeof(struct koneplus_profile_buttons))
+ return 0;
+
+ if (off + count > sizeof(struct koneplus_profile_buttons))
+ count = sizeof(struct koneplus_profile_buttons) - off;
+
+ mutex_lock(&koneplus->koneplus_lock);
+ memcpy(buf, ((void const *)&koneplus->profile_buttons[*(uint *)(attr->private)]) + off,
+ count);
+ mutex_unlock(&koneplus->koneplus_lock);
+
+ return count;
+}
+
+static ssize_t koneplus_sysfs_write_profile_buttons(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval = 0;
+ int difference;
+ uint profile_number;
+ struct koneplus_profile_buttons *profile_buttons;
+
+ if (off != 0 || count != sizeof(struct koneplus_profile_buttons))
+ return -EINVAL;
+
+ profile_number = ((struct koneplus_profile_buttons const *)buf)->number;
+ profile_buttons = &koneplus->profile_buttons[profile_number];
+
+ mutex_lock(&koneplus->koneplus_lock);
+ difference = memcmp(buf, profile_buttons,
+ sizeof(struct koneplus_profile_buttons));
+ if (difference) {
+ retval = koneplus_set_profile_buttons(usb_dev,
+ (struct koneplus_profile_buttons const *)buf);
+ if (!retval)
+ memcpy(profile_buttons, buf,
+ sizeof(struct koneplus_profile_buttons));
+ }
+ mutex_unlock(&koneplus->koneplus_lock);
+
+ if (retval)
+ return retval;
+
+ return sizeof(struct koneplus_profile_buttons);
+}
+
+static ssize_t koneplus_sysfs_show_startup_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct koneplus_device *koneplus =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ return snprintf(buf, PAGE_SIZE, "%d\n", koneplus->startup_profile);
+}
+
+static ssize_t koneplus_sysfs_set_startup_profile(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct koneplus_device *koneplus;
+ struct usb_device *usb_dev;
+ unsigned long profile;
+ int retval;
+
+ dev = dev->parent->parent;
+ koneplus = hid_get_drvdata(dev_get_drvdata(dev));
+ usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
+ retval = strict_strtoul(buf, 10, &profile);
+ if (retval)
+ return retval;
+
+ mutex_lock(&koneplus->koneplus_lock);
+ retval = koneplus_set_startup_profile(usb_dev, profile);
+ mutex_unlock(&koneplus->koneplus_lock);
+ if (retval)
+ return retval;
+
+ return size;
+}
+
+static ssize_t koneplus_sysfs_show_actual_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct koneplus_device *koneplus =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ return snprintf(buf, PAGE_SIZE, "%d\n", koneplus->actual_profile);
+}
+
+static ssize_t koneplus_sysfs_show_firmware_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct koneplus_device *koneplus =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ return snprintf(buf, PAGE_SIZE, "%d\n", koneplus->info.firmware_version);
+}
+
+static struct device_attribute koneplus_attributes[] = {
+ __ATTR(startup_profile, 0660,
+ koneplus_sysfs_show_startup_profile,
+ koneplus_sysfs_set_startup_profile),
+ __ATTR(actual_profile, 0440,
+ koneplus_sysfs_show_actual_profile, NULL),
+ __ATTR(firmware_version, 0440,
+ koneplus_sysfs_show_firmware_version, NULL),
+ __ATTR_NULL
+};
+
+static struct bin_attribute koneplus_bin_attributes[] = {
+ {
+ .attr = { .name = "sensor", .mode = 0220 },
+ .size = sizeof(struct koneplus_sensor),
+ .read = koneplus_sysfs_read_sensor,
+ .write = koneplus_sysfs_write_sensor
+ },
+ {
+ .attr = { .name = "tcu", .mode = 0220 },
+ .size = sizeof(struct koneplus_tcu),
+ .write = koneplus_sysfs_write_tcu
+ },
+ {
+ .attr = { .name = "tcu_image", .mode = 0440 },
+ .size = sizeof(struct koneplus_tcu_image),
+ .read = koneplus_sysfs_read_tcu_image
+ },
+ {
+ .attr = { .name = "profile_settings", .mode = 0220 },
+ .size = sizeof(struct koneplus_profile_settings),
+ .write = koneplus_sysfs_write_profile_settings
+ },
+ {
+ .attr = { .name = "profile1_settings", .mode = 0440 },
+ .size = sizeof(struct koneplus_profile_settings),
+ .read = koneplus_sysfs_read_profilex_settings,
+ .private = &profile_numbers[0]
+ },
+ {
+ .attr = { .name = "profile2_settings", .mode = 0440 },
+ .size = sizeof(struct koneplus_profile_settings),
+ .read = koneplus_sysfs_read_profilex_settings,
+ .private = &profile_numbers[1]
+ },
+ {
+ .attr = { .name = "profile3_settings", .mode = 0440 },
+ .size = sizeof(struct koneplus_profile_settings),
+ .read = koneplus_sysfs_read_profilex_settings,
+ .private = &profile_numbers[2]
+ },
+ {
+ .attr = { .name = "profile4_settings", .mode = 0440 },
+ .size = sizeof(struct koneplus_profile_settings),
+ .read = koneplus_sysfs_read_profilex_settings,
+ .private = &profile_numbers[3]
+ },
+ {
+ .attr = { .name = "profile5_settings", .mode = 0440 },
+ .size = sizeof(struct koneplus_profile_settings),
+ .read = koneplus_sysfs_read_profilex_settings,
+ .private = &profile_numbers[4]
+ },
+ {
+ .attr = { .name = "profile_buttons", .mode = 0220 },
+ .size = sizeof(struct koneplus_profile_buttons),
+ .write = koneplus_sysfs_write_profile_buttons
+ },
+ {
+ .attr = { .name = "profile1_buttons", .mode = 0440 },
+ .size = sizeof(struct koneplus_profile_buttons),
+ .read = koneplus_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[0]
+ },
+ {
+ .attr = { .name = "profile2_buttons", .mode = 0440 },
+ .size = sizeof(struct koneplus_profile_buttons),
+ .read = koneplus_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[1]
+ },
+ {
+ .attr = { .name = "profile3_buttons", .mode = 0440 },
+ .size = sizeof(struct koneplus_profile_buttons),
+ .read = koneplus_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[2]
+ },
+ {
+ .attr = { .name = "profile4_buttons", .mode = 0440 },
+ .size = sizeof(struct koneplus_profile_buttons),
+ .read = koneplus_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[3]
+ },
+ {
+ .attr = { .name = "profile5_buttons", .mode = 0440 },
+ .size = sizeof(struct koneplus_profile_buttons),
+ .read = koneplus_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[4]
+ },
+ {
+ .attr = { .name = "macro", .mode = 0220 },
+ .size = sizeof(struct koneplus_macro),
+ .write = koneplus_sysfs_write_macro
+ },
+ __ATTR_NULL
+};
+
+static int koneplus_init_koneplus_device_struct(struct usb_device *usb_dev,
+ struct koneplus_device *koneplus)
+{
+ int retval, i;
+ static uint wait = 70; /* device will freeze with just 60 */
+
+ mutex_init(&koneplus->koneplus_lock);
+
+ koneplus->startup_profile = koneplus_get_startup_profile(usb_dev);
+
+ msleep(wait);
+ retval = koneplus_get_info(usb_dev, &koneplus->info);
+ if (retval)
+ return retval;
+
+ for (i = 0; i < 5; ++i) {
+ msleep(wait);
+ retval = koneplus_get_profile_settings(usb_dev,
+ &koneplus->profile_settings[i], i);
+ if (retval)
+ return retval;
+
+ msleep(wait);
+ retval = koneplus_get_profile_buttons(usb_dev,
+ &koneplus->profile_buttons[i], i);
+ if (retval)
+ return retval;
+ }
+
+ koneplus_profile_activated(koneplus, koneplus->startup_profile);
+
+ return 0;
+}
+
+static int koneplus_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct koneplus_device *koneplus;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_MOUSE) {
+
+ koneplus = kzalloc(sizeof(*koneplus), GFP_KERNEL);
+ if (!koneplus) {
+ dev_err(&hdev->dev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, koneplus);
+
+ retval = koneplus_init_koneplus_device_struct(usb_dev, koneplus);
+ if (retval) {
+ dev_err(&hdev->dev,
+ "couldn't init struct koneplus_device\n");
+ goto exit_free;
+ }
+
+ retval = roccat_connect(koneplus_class, hdev);
+ if (retval < 0) {
+ dev_err(&hdev->dev, "couldn't init char dev\n");
+ } else {
+ koneplus->chrdev_minor = retval;
+ koneplus->roccat_claimed = 1;
+ }
+ } else {
+ hid_set_drvdata(hdev, NULL);
+ }
+
+ return 0;
+exit_free:
+ kfree(koneplus);
+ return retval;
+}
+
+static void koneplus_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct koneplus_device *koneplus;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_MOUSE) {
+ koneplus = hid_get_drvdata(hdev);
+ if (koneplus->roccat_claimed)
+ roccat_disconnect(koneplus->chrdev_minor);
+ kfree(koneplus);
+ }
+}
+
+static int koneplus_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ dev_err(&hdev->dev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ dev_err(&hdev->dev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = koneplus_init_specials(hdev);
+ if (retval) {
+ dev_err(&hdev->dev, "couldn't install mouse\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void koneplus_remove(struct hid_device *hdev)
+{
+ koneplus_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+static void koneplus_keep_values_up_to_date(struct koneplus_device *koneplus,
+ u8 const *data)
+{
+ struct koneplus_mouse_report_button const *button_report;
+
+ switch (data[0]) {
+ case KONEPLUS_MOUSE_REPORT_NUMBER_BUTTON:
+ button_report = (struct koneplus_mouse_report_button const *)data;
+ switch (button_report->type) {
+ case KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE:
+ koneplus_profile_activated(koneplus, button_report->data1 - 1);
+ break;
+ }
+ break;
+ }
+}
+
+static void koneplus_report_to_chrdev(struct koneplus_device const *koneplus,
+ u8 const *data)
+{
+ struct koneplus_roccat_report roccat_report;
+ struct koneplus_mouse_report_button const *button_report;
+
+ if (data[0] != KONEPLUS_MOUSE_REPORT_NUMBER_BUTTON)
+ return;
+
+ button_report = (struct koneplus_mouse_report_button const *)data;
+
+ if ((button_report->type == KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH ||
+ button_report->type == KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_TIMER) &&
+ button_report->data2 != KONEPLUS_MOUSE_REPORT_BUTTON_ACTION_PRESS)
+ return;
+
+ roccat_report.type = button_report->type;
+ roccat_report.data1 = button_report->data1;
+ roccat_report.data2 = button_report->data2;
+ roccat_report.profile = koneplus->actual_profile + 1;
+ roccat_report_event(koneplus->chrdev_minor,
+ (uint8_t const *)&roccat_report,
+ sizeof(struct koneplus_roccat_report));
+}
+
+static int koneplus_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct koneplus_device *koneplus = hid_get_drvdata(hdev);
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != USB_INTERFACE_PROTOCOL_MOUSE)
+ return 0;
+
+ koneplus_keep_values_up_to_date(koneplus, data);
+
+ if (koneplus->roccat_claimed)
+ koneplus_report_to_chrdev(koneplus, data);
+
+ return 0;
+}
+
+static const struct hid_device_id koneplus_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, koneplus_devices);
+
+static struct hid_driver koneplus_driver = {
+ .name = "koneplus",
+ .id_table = koneplus_devices,
+ .probe = koneplus_probe,
+ .remove = koneplus_remove,
+ .raw_event = koneplus_raw_event
+};
+
+static int __init koneplus_init(void)
+{
+ int retval;
+
+ /* class name has to be same as driver name */
+ koneplus_class = class_create(THIS_MODULE, "koneplus");
+ if (IS_ERR(koneplus_class))
+ return PTR_ERR(koneplus_class);
+ koneplus_class->dev_attrs = koneplus_attributes;
+ koneplus_class->dev_bin_attrs = koneplus_bin_attributes;
+
+ retval = hid_register_driver(&koneplus_driver);
+ if (retval)
+ class_destroy(koneplus_class);
+ return retval;
+}
+
+static void __exit koneplus_exit(void)
+{
+ class_destroy(koneplus_class);
+ hid_unregister_driver(&koneplus_driver);
+}
+
+module_init(koneplus_init);
+module_exit(koneplus_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Kone[+] driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-koneplus.h b/drivers/hid/hid-roccat-koneplus.h
new file mode 100644
index 000000000000..57a5c1ab7b05
--- /dev/null
+++ b/drivers/hid/hid-roccat-koneplus.h
@@ -0,0 +1,224 @@
+#ifndef __HID_ROCCAT_KONEPLUS_H
+#define __HID_ROCCAT_KONEPLUS_H
+
+/*
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+/*
+ * case 1: writes request 80 and reads value 1
+ *
+ */
+struct koneplus_control {
+ uint8_t command; /* KONEPLUS_COMMAND_CONTROL */
+ /*
+ * value is profile number in range 0-4 for requesting settings and buttons
+ * 1 if status ok for requesting status
+ */
+ uint8_t value;
+ uint8_t request;
+} __attribute__ ((__packed__));
+
+enum koneplus_control_requests {
+ KONEPLUS_CONTROL_REQUEST_STATUS = 0x00,
+ KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 0x80,
+ KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS = 0x90,
+};
+
+enum koneplus_control_values {
+ KONEPLUS_CONTROL_REQUEST_STATUS_OVERLOAD = 0,
+ KONEPLUS_CONTROL_REQUEST_STATUS_OK = 1,
+ KONEPLUS_CONTROL_REQUEST_STATUS_WAIT = 3,
+};
+
+struct koneplus_startup_profile {
+ uint8_t command; /* KONEPLUS_COMMAND_STARTUP_PROFILE */
+ uint8_t size; /* always 3 */
+ uint8_t startup_profile; /* Range 0-4! */
+} __attribute__ ((__packed__));
+
+struct koneplus_profile_settings {
+ uint8_t command; /* KONEPLUS_COMMAND_PROFILE_SETTINGS */
+ uint8_t size; /* always 43 */
+ uint8_t number; /* range 0-4 */
+ uint8_t advanced_sensitivity;
+ uint8_t sensitivity_x;
+ uint8_t sensitivity_y;
+ uint8_t cpi_levels_enabled;
+ uint8_t cpi_levels_x[5];
+ uint8_t cpi_startup_level; /* range 0-4 */
+ uint8_t cpi_levels_y[5]; /* range 1-60 means 100-6000 cpi */
+ uint8_t unknown1;
+ uint8_t polling_rate;
+ uint8_t lights_enabled;
+ uint8_t light_effect_mode;
+ uint8_t color_flow_effect;
+ uint8_t light_effect_type;
+ uint8_t light_effect_speed;
+ uint8_t lights[16];
+ uint16_t checksum;
+} __attribute__ ((__packed__));
+
+struct koneplus_profile_buttons {
+ uint8_t command; /* KONEPLUS_COMMAND_PROFILE_BUTTONS */
+ uint8_t size; /* always 77 */
+ uint8_t number; /* range 0-4 */
+ uint8_t data[72];
+ uint16_t checksum;
+} __attribute__ ((__packed__));
+
+struct koneplus_macro {
+ uint8_t command; /* KONEPLUS_COMMAND_MACRO */
+ uint16_t size; /* always 0x822 little endian */
+ uint8_t profile; /* range 0-4 */
+ uint8_t button; /* range 0-23 */
+ uint8_t data[2075];
+ uint16_t checksum;
+} __attribute__ ((__packed__));
+
+struct koneplus_info {
+ uint8_t command; /* KONEPLUS_COMMAND_INFO */
+ uint8_t size; /* always 6 */
+ uint8_t firmware_version;
+ uint8_t unknown[3];
+} __attribute__ ((__packed__));
+
+struct koneplus_e {
+ uint8_t command; /* KONEPLUS_COMMAND_E */
+ uint8_t size; /* always 3 */
+ uint8_t unknown; /* TODO 1; 0 before firmware update */
+} __attribute__ ((__packed__));
+
+struct koneplus_sensor {
+ uint8_t command; /* KONEPLUS_COMMAND_SENSOR */
+ uint8_t size; /* always 6 */
+ uint8_t data[4];
+} __attribute__ ((__packed__));
+
+struct koneplus_firmware_write {
+ uint8_t command; /* KONEPLUS_COMMAND_FIRMWARE_WRITE */
+ uint8_t unknown[1025];
+} __attribute__ ((__packed__));
+
+struct koneplus_firmware_write_control {
+ uint8_t command; /* KONEPLUS_COMMAND_FIRMWARE_WRITE_CONTROL */
+ /*
+ * value is 1 on success
+ * 3 means "not finished yet"
+ */
+ uint8_t value;
+ uint8_t unknown; /* always 0x75 */
+} __attribute__ ((__packed__));
+
+struct koneplus_tcu {
+ uint16_t usb_command; /* KONEPLUS_USB_COMMAND_TCU */
+ uint8_t data[2];
+} __attribute__ ((__packed__));
+
+struct koneplus_tcu_image {
+ uint16_t usb_command; /* KONEPLUS_USB_COMMAND_TCU */
+ uint8_t data[1024];
+ uint16_t checksum;
+} __attribute__ ((__packed__));
+
+enum koneplus_commands {
+ KONEPLUS_COMMAND_CONTROL = 0x4,
+ KONEPLUS_COMMAND_STARTUP_PROFILE = 0x5,
+ KONEPLUS_COMMAND_PROFILE_SETTINGS = 0x6,
+ KONEPLUS_COMMAND_PROFILE_BUTTONS = 0x7,
+ KONEPLUS_COMMAND_MACRO = 0x8,
+ KONEPLUS_COMMAND_INFO = 0x9,
+ KONEPLUS_COMMAND_E = 0xe,
+ KONEPLUS_COMMAND_SENSOR = 0xf,
+ KONEPLUS_COMMAND_FIRMWARE_WRITE = 0x1b,
+ KONEPLUS_COMMAND_FIRMWARE_WRITE_CONTROL = 0x1c,
+};
+
+enum koneplus_usb_commands {
+ KONEPLUS_USB_COMMAND_CONTROL = 0x304,
+ KONEPLUS_USB_COMMAND_STARTUP_PROFILE = 0x305,
+ KONEPLUS_USB_COMMAND_PROFILE_SETTINGS = 0x306,
+ KONEPLUS_USB_COMMAND_PROFILE_BUTTONS = 0x307,
+ KONEPLUS_USB_COMMAND_MACRO = 0x308,
+ KONEPLUS_USB_COMMAND_INFO = 0x309,
+ KONEPLUS_USB_COMMAND_TCU = 0x30c,
+ KONEPLUS_USB_COMMAND_E = 0x30e,
+ KONEPLUS_USB_COMMAND_SENSOR = 0x30f,
+ KONEPLUS_USB_COMMAND_FIRMWARE_WRITE = 0x31b,
+ KONEPLUS_USB_COMMAND_FIRMWARE_WRITE_CONTROL = 0x31c,
+};
+
+enum koneplus_mouse_report_numbers {
+ KONEPLUS_MOUSE_REPORT_NUMBER_HID = 1,
+ KONEPLUS_MOUSE_REPORT_NUMBER_AUDIO = 2,
+ KONEPLUS_MOUSE_REPORT_NUMBER_BUTTON = 3,
+};
+
+struct koneplus_mouse_report_button {
+ uint8_t report_number; /* always KONEPLUS_MOUSE_REPORT_NUMBER_BUTTON */
+ uint8_t zero1;
+ uint8_t type;
+ uint8_t data1;
+ uint8_t data2;
+ uint8_t zero2;
+ uint8_t unknown[2];
+} __attribute__ ((__packed__));
+
+enum koneplus_mouse_report_button_types {
+ /* data1 = new profile range 1-5 */
+ KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE = 0x20,
+
+ /* data1 = button number range 1-24; data2 = action */
+ KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH = 0x60,
+
+ /* data1 = button number range 1-24; data2 = action */
+ KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_TIMER = 0x80,
+
+ /* data1 = setting number range 1-5 */
+ KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI = 0xb0,
+
+ /* data1 and data2 = range 0x1-0xb */
+ KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY = 0xc0,
+
+ /* data1 = 22 = next track...
+ * data2 = action
+ */
+ KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_MULTIMEDIA = 0xf0,
+};
+
+enum koneplus_mouse_report_button_action {
+ KONEPLUS_MOUSE_REPORT_BUTTON_ACTION_PRESS = 0,
+ KONEPLUS_MOUSE_REPORT_BUTTON_ACTION_RELEASE = 1,
+};
+
+struct koneplus_roccat_report {
+ uint8_t type;
+ uint8_t data1;
+ uint8_t data2;
+ uint8_t profile;
+} __attribute__ ((__packed__));
+
+struct koneplus_device {
+ int actual_profile;
+
+ int roccat_claimed;
+ int chrdev_minor;
+
+ struct mutex koneplus_lock;
+
+ int startup_profile;
+ struct koneplus_info info;
+ struct koneplus_profile_settings profile_settings[5];
+ struct koneplus_profile_buttons profile_buttons[5];
+};
+
+#endif
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 9bf23047892a..02c58e015bee 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -27,6 +27,11 @@
#include "hid-roccat.h"
#include "hid-roccat-pyra.h"
+static uint profile_numbers[5] = {0, 1, 2, 3, 4};
+
+/* pyra_class is used for creating sysfs attributes via roccat char device */
+static struct class *pyra_class;
+
static void profile_activated(struct pyra_device *pyra,
unsigned int new_profile)
{
@@ -87,9 +92,8 @@ static int pyra_receive_control_status(struct usb_device *usb_dev)
control.value == 1)
return 0;
else {
- dev_err(&usb_dev->dev, "receive control status: "
- "unknown response 0x%x 0x%x\n",
- control.request, control.value);
+ hid_err(usb_dev, "receive control status: unknown response 0x%x 0x%x\n",
+ control.request, control.value);
return -EINVAL;
}
}
@@ -221,9 +225,10 @@ static int pyra_set_settings(struct usb_device *usb_dev,
static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count, int number)
+ loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
if (off >= sizeof(struct pyra_profile_settings))
@@ -233,58 +238,19 @@ static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
count = sizeof(struct pyra_profile_settings) - off;
mutex_lock(&pyra->pyra_lock);
- memcpy(buf, ((char const *)&pyra->profile_settings[number]) + off,
+ memcpy(buf, ((char const *)&pyra->profile_settings[*(uint *)(attr->private)]) + off,
count);
mutex_unlock(&pyra->pyra_lock);
return count;
}
-static ssize_t pyra_sysfs_read_profile1_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return pyra_sysfs_read_profilex_settings(fp, kobj,
- attr, buf, off, count, 0);
-}
-
-static ssize_t pyra_sysfs_read_profile2_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return pyra_sysfs_read_profilex_settings(fp, kobj,
- attr, buf, off, count, 1);
-}
-
-static ssize_t pyra_sysfs_read_profile3_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return pyra_sysfs_read_profilex_settings(fp, kobj,
- attr, buf, off, count, 2);
-}
-
-static ssize_t pyra_sysfs_read_profile4_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return pyra_sysfs_read_profilex_settings(fp, kobj,
- attr, buf, off, count, 3);
-}
-
-static ssize_t pyra_sysfs_read_profile5_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return pyra_sysfs_read_profilex_settings(fp, kobj,
- attr, buf, off, count, 4);
-}
-
static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count, int number)
+ loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
if (off >= sizeof(struct pyra_profile_buttons))
@@ -294,58 +260,19 @@ static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
count = sizeof(struct pyra_profile_buttons) - off;
mutex_lock(&pyra->pyra_lock);
- memcpy(buf, ((char const *)&pyra->profile_buttons[number]) + off,
+ memcpy(buf, ((char const *)&pyra->profile_buttons[*(uint *)(attr->private)]) + off,
count);
mutex_unlock(&pyra->pyra_lock);
return count;
}
-static ssize_t pyra_sysfs_read_profile1_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return pyra_sysfs_read_profilex_buttons(fp, kobj,
- attr, buf, off, count, 0);
-}
-
-static ssize_t pyra_sysfs_read_profile2_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return pyra_sysfs_read_profilex_buttons(fp, kobj,
- attr, buf, off, count, 1);
-}
-
-static ssize_t pyra_sysfs_read_profile3_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return pyra_sysfs_read_profilex_buttons(fp, kobj,
- attr, buf, off, count, 2);
-}
-
-static ssize_t pyra_sysfs_read_profile4_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return pyra_sysfs_read_profilex_buttons(fp, kobj,
- attr, buf, off, count, 3);
-}
-
-static ssize_t pyra_sysfs_read_profile5_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return pyra_sysfs_read_profilex_buttons(fp, kobj,
- attr, buf, off, count, 4);
-}
-
static ssize_t pyra_sysfs_write_profile_settings(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
int retval = 0;
@@ -381,7 +308,8 @@ static ssize_t pyra_sysfs_write_profile_buttons(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
int retval = 0;
@@ -417,7 +345,8 @@ static ssize_t pyra_sysfs_read_settings(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
if (off >= sizeof(struct pyra_settings))
@@ -437,7 +366,8 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
int retval = 0;
@@ -469,255 +399,125 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
static ssize_t pyra_sysfs_show_actual_cpi(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
+ struct pyra_device *pyra =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", pyra->actual_cpi);
}
static ssize_t pyra_sysfs_show_actual_profile(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
+ struct pyra_device *pyra =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", pyra->actual_profile);
}
static ssize_t pyra_sysfs_show_firmware_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
+ struct pyra_device *pyra =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", pyra->firmware_version);
}
static ssize_t pyra_sysfs_show_startup_profile(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
+ struct pyra_device *pyra =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", pyra->settings.startup_profile);
}
-static DEVICE_ATTR(actual_cpi, 0440, pyra_sysfs_show_actual_cpi, NULL);
-
-static DEVICE_ATTR(actual_profile, 0440, pyra_sysfs_show_actual_profile, NULL);
-
-static DEVICE_ATTR(firmware_version, 0440,
- pyra_sysfs_show_firmware_version, NULL);
-
-static DEVICE_ATTR(startup_profile, 0440,
- pyra_sysfs_show_startup_profile, NULL);
-
-static struct attribute *pyra_attributes[] = {
- &dev_attr_actual_cpi.attr,
- &dev_attr_actual_profile.attr,
- &dev_attr_firmware_version.attr,
- &dev_attr_startup_profile.attr,
- NULL
-};
-
-static struct attribute_group pyra_attribute_group = {
- .attrs = pyra_attributes
+static struct device_attribute pyra_attributes[] = {
+ __ATTR(actual_cpi, 0440, pyra_sysfs_show_actual_cpi, NULL),
+ __ATTR(actual_profile, 0440, pyra_sysfs_show_actual_profile, NULL),
+ __ATTR(firmware_version, 0440,
+ pyra_sysfs_show_firmware_version, NULL),
+ __ATTR(startup_profile, 0440,
+ pyra_sysfs_show_startup_profile, NULL),
+ __ATTR_NULL
};
-static struct bin_attribute pyra_profile_settings_attr = {
+static struct bin_attribute pyra_bin_attributes[] = {
+ {
.attr = { .name = "profile_settings", .mode = 0220 },
.size = sizeof(struct pyra_profile_settings),
.write = pyra_sysfs_write_profile_settings
-};
-
-static struct bin_attribute pyra_profile1_settings_attr = {
+ },
+ {
.attr = { .name = "profile1_settings", .mode = 0440 },
.size = sizeof(struct pyra_profile_settings),
- .read = pyra_sysfs_read_profile1_settings
-};
-
-static struct bin_attribute pyra_profile2_settings_attr = {
+ .read = pyra_sysfs_read_profilex_settings,
+ .private = &profile_numbers[0]
+ },
+ {
.attr = { .name = "profile2_settings", .mode = 0440 },
.size = sizeof(struct pyra_profile_settings),
- .read = pyra_sysfs_read_profile2_settings
-};
-
-static struct bin_attribute pyra_profile3_settings_attr = {
+ .read = pyra_sysfs_read_profilex_settings,
+ .private = &profile_numbers[1]
+ },
+ {
.attr = { .name = "profile3_settings", .mode = 0440 },
.size = sizeof(struct pyra_profile_settings),
- .read = pyra_sysfs_read_profile3_settings
-};
-
-static struct bin_attribute pyra_profile4_settings_attr = {
+ .read = pyra_sysfs_read_profilex_settings,
+ .private = &profile_numbers[2]
+ },
+ {
.attr = { .name = "profile4_settings", .mode = 0440 },
.size = sizeof(struct pyra_profile_settings),
- .read = pyra_sysfs_read_profile4_settings
-};
-
-static struct bin_attribute pyra_profile5_settings_attr = {
+ .read = pyra_sysfs_read_profilex_settings,
+ .private = &profile_numbers[3]
+ },
+ {
.attr = { .name = "profile5_settings", .mode = 0440 },
.size = sizeof(struct pyra_profile_settings),
- .read = pyra_sysfs_read_profile5_settings
-};
-
-static struct bin_attribute pyra_profile_buttons_attr = {
+ .read = pyra_sysfs_read_profilex_settings,
+ .private = &profile_numbers[4]
+ },
+ {
.attr = { .name = "profile_buttons", .mode = 0220 },
.size = sizeof(struct pyra_profile_buttons),
.write = pyra_sysfs_write_profile_buttons
-};
-
-static struct bin_attribute pyra_profile1_buttons_attr = {
+ },
+ {
.attr = { .name = "profile1_buttons", .mode = 0440 },
.size = sizeof(struct pyra_profile_buttons),
- .read = pyra_sysfs_read_profile1_buttons
-};
-
-static struct bin_attribute pyra_profile2_buttons_attr = {
+ .read = pyra_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[0]
+ },
+ {
.attr = { .name = "profile2_buttons", .mode = 0440 },
.size = sizeof(struct pyra_profile_buttons),
- .read = pyra_sysfs_read_profile2_buttons
-};
-
-static struct bin_attribute pyra_profile3_buttons_attr = {
+ .read = pyra_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[1]
+ },
+ {
.attr = { .name = "profile3_buttons", .mode = 0440 },
.size = sizeof(struct pyra_profile_buttons),
- .read = pyra_sysfs_read_profile3_buttons
-};
-
-static struct bin_attribute pyra_profile4_buttons_attr = {
+ .read = pyra_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[2]
+ },
+ {
.attr = { .name = "profile4_buttons", .mode = 0440 },
.size = sizeof(struct pyra_profile_buttons),
- .read = pyra_sysfs_read_profile4_buttons
-};
-
-static struct bin_attribute pyra_profile5_buttons_attr = {
+ .read = pyra_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[3]
+ },
+ {
.attr = { .name = "profile5_buttons", .mode = 0440 },
.size = sizeof(struct pyra_profile_buttons),
- .read = pyra_sysfs_read_profile5_buttons
-};
-
-static struct bin_attribute pyra_settings_attr = {
+ .read = pyra_sysfs_read_profilex_buttons,
+ .private = &profile_numbers[4]
+ },
+ {
.attr = { .name = "settings", .mode = 0660 },
.size = sizeof(struct pyra_settings),
.read = pyra_sysfs_read_settings,
.write = pyra_sysfs_write_settings
+ },
+ __ATTR_NULL
};
-static int pyra_create_sysfs_attributes(struct usb_interface *intf)
-{
- int retval;
-
- retval = sysfs_create_group(&intf->dev.kobj, &pyra_attribute_group);
- if (retval)
- goto exit_1;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj,
- &pyra_profile_settings_attr);
- if (retval)
- goto exit_2;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj,
- &pyra_profile1_settings_attr);
- if (retval)
- goto exit_3;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj,
- &pyra_profile2_settings_attr);
- if (retval)
- goto exit_4;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj,
- &pyra_profile3_settings_attr);
- if (retval)
- goto exit_5;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj,
- &pyra_profile4_settings_attr);
- if (retval)
- goto exit_6;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj,
- &pyra_profile5_settings_attr);
- if (retval)
- goto exit_7;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj,
- &pyra_profile_buttons_attr);
- if (retval)
- goto exit_8;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj,
- &pyra_profile1_buttons_attr);
- if (retval)
- goto exit_9;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj,
- &pyra_profile2_buttons_attr);
- if (retval)
- goto exit_10;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj,
- &pyra_profile3_buttons_attr);
- if (retval)
- goto exit_11;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj,
- &pyra_profile4_buttons_attr);
- if (retval)
- goto exit_12;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj,
- &pyra_profile5_buttons_attr);
- if (retval)
- goto exit_13;
-
- retval = sysfs_create_bin_file(&intf->dev.kobj,
- &pyra_settings_attr);
- if (retval)
- goto exit_14;
-
- return 0;
-
-exit_14:
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile5_buttons_attr);
-exit_13:
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile4_buttons_attr);
-exit_12:
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile3_buttons_attr);
-exit_11:
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile2_buttons_attr);
-exit_10:
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile1_buttons_attr);
-exit_9:
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile_buttons_attr);
-exit_8:
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile5_settings_attr);
-exit_7:
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile4_settings_attr);
-exit_6:
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile3_settings_attr);
-exit_5:
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile2_settings_attr);
-exit_4:
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile1_settings_attr);
-exit_3:
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile_settings_attr);
-exit_2:
- sysfs_remove_group(&intf->dev.kobj, &pyra_attribute_group);
-exit_1:
- return retval;
-}
-
-static void pyra_remove_sysfs_attributes(struct usb_interface *intf)
-{
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_settings_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile5_buttons_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile4_buttons_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile3_buttons_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile2_buttons_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile1_buttons_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile_buttons_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile5_settings_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile4_settings_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile3_settings_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile2_settings_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile1_settings_attr);
- sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile_settings_attr);
- sysfs_remove_group(&intf->dev.kobj, &pyra_attribute_group);
-}
-
static int pyra_init_pyra_device_struct(struct usb_device *usb_dev,
struct pyra_device *pyra)
{
@@ -770,31 +570,24 @@ static int pyra_init_specials(struct hid_device *hdev)
pyra = kzalloc(sizeof(*pyra), GFP_KERNEL);
if (!pyra) {
- dev_err(&hdev->dev, "can't alloc device descriptor\n");
+ hid_err(hdev, "can't alloc device descriptor\n");
return -ENOMEM;
}
hid_set_drvdata(hdev, pyra);
retval = pyra_init_pyra_device_struct(usb_dev, pyra);
if (retval) {
- dev_err(&hdev->dev,
- "couldn't init struct pyra_device\n");
+ hid_err(hdev, "couldn't init struct pyra_device\n");
goto exit_free;
}
- retval = roccat_connect(hdev);
+ retval = roccat_connect(pyra_class, hdev);
if (retval < 0) {
- dev_err(&hdev->dev, "couldn't init char dev\n");
+ hid_err(hdev, "couldn't init char dev\n");
} else {
pyra->chrdev_minor = retval;
pyra->roccat_claimed = 1;
}
-
- retval = pyra_create_sysfs_attributes(intf);
- if (retval) {
- dev_err(&hdev->dev, "cannot create sysfs files\n");
- goto exit_free;
- }
} else {
hid_set_drvdata(hdev, NULL);
}
@@ -812,7 +605,6 @@ static void pyra_remove_specials(struct hid_device *hdev)
if (intf->cur_altsetting->desc.bInterfaceProtocol
== USB_INTERFACE_PROTOCOL_MOUSE) {
- pyra_remove_sysfs_attributes(intf);
pyra = hid_get_drvdata(hdev);
if (pyra->roccat_claimed)
roccat_disconnect(pyra->chrdev_minor);
@@ -826,19 +618,19 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id)
retval = hid_parse(hdev);
if (retval) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto exit;
}
retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (retval) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto exit;
}
retval = pyra_init_specials(hdev);
if (retval) {
- dev_err(&hdev->dev, "couldn't install mouse\n");
+ hid_err(hdev, "couldn't install mouse\n");
goto exit_stop;
}
return 0;
@@ -952,11 +744,24 @@ static struct hid_driver pyra_driver = {
static int __init pyra_init(void)
{
- return hid_register_driver(&pyra_driver);
+ int retval;
+
+ /* class name has to be same as driver name */
+ pyra_class = class_create(THIS_MODULE, "pyra");
+ if (IS_ERR(pyra_class))
+ return PTR_ERR(pyra_class);
+ pyra_class->dev_attrs = pyra_attributes;
+ pyra_class->dev_bin_attrs = pyra_bin_attributes;
+
+ retval = hid_register_driver(&pyra_driver);
+ if (retval)
+ class_destroy(pyra_class);
+ return retval;
}
static void __exit pyra_exit(void)
{
+ class_destroy(pyra_class);
hid_unregister_driver(&pyra_driver);
}
diff --git a/drivers/hid/hid-roccat-pyra.h b/drivers/hid/hid-roccat-pyra.h
index 22f80a8f26f9..14cbbe1621e0 100644
--- a/drivers/hid/hid-roccat-pyra.h
+++ b/drivers/hid/hid-roccat-pyra.h
@@ -14,14 +14,11 @@
#include <linux/types.h>
-#pragma pack(push)
-#pragma pack(1)
-
struct pyra_b {
uint8_t command; /* PYRA_COMMAND_B */
uint8_t size; /* always 3 */
uint8_t unknown; /* 1 */
-};
+} __attribute__ ((__packed__));
struct pyra_control {
uint8_t command; /* PYRA_COMMAND_CONTROL */
@@ -31,7 +28,7 @@ struct pyra_control {
*/
uint8_t value; /* Range 0-4 */
uint8_t request;
-};
+} __attribute__ ((__packed__));
enum pyra_control_requests {
PYRA_CONTROL_REQUEST_STATUS = 0x00,
@@ -43,7 +40,7 @@ struct pyra_settings {
uint8_t command; /* PYRA_COMMAND_SETTINGS */
uint8_t size; /* always 3 */
uint8_t startup_profile; /* Range 0-4! */
-};
+} __attribute__ ((__packed__));
struct pyra_profile_settings {
uint8_t command; /* PYRA_COMMAND_PROFILE_SETTINGS */
@@ -58,7 +55,7 @@ struct pyra_profile_settings {
uint8_t light_effect;
uint8_t handedness;
uint16_t checksum; /* byte sum */
-};
+} __attribute__ ((__packed__));
struct pyra_profile_buttons {
uint8_t command; /* PYRA_COMMAND_PROFILE_BUTTONS */
@@ -66,7 +63,7 @@ struct pyra_profile_buttons {
uint8_t number; /* Range 0-4 */
uint8_t buttons[14];
uint16_t checksum; /* byte sum */
-};
+} __attribute__ ((__packed__));
struct pyra_info {
uint8_t command; /* PYRA_COMMAND_INFO */
@@ -75,7 +72,7 @@ struct pyra_info {
uint8_t unknown1; /* always 0 */
uint8_t unknown2; /* always 1 */
uint8_t unknown3; /* always 0 */
-};
+} __attribute__ ((__packed__));
enum pyra_commands {
PYRA_COMMAND_CONTROL = 0x4,
@@ -107,13 +104,13 @@ struct pyra_mouse_event_button {
uint8_t type;
uint8_t data1;
uint8_t data2;
-};
+} __attribute__ ((__packed__));
struct pyra_mouse_event_audio {
uint8_t report_number; /* always 2 */
uint8_t type;
uint8_t unused; /* always 0 */
-};
+} __attribute__ ((__packed__));
/* hid audio controls */
enum pyra_mouse_event_audio_types {
@@ -167,9 +164,7 @@ struct pyra_roccat_report {
uint8_t type;
uint8_t value;
uint8_t key;
-};
-
-#pragma pack(pop)
+} __attribute__ ((__packed__));
struct pyra_device {
int actual_profile;
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index 5a6879e235ac..a14c579ea781 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -21,6 +21,8 @@
* It is inspired by hidraw, but uses only one circular buffer for all readers.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/cdev.h>
#include <linux/poll.h>
#include <linux/sched.h>
@@ -65,7 +67,6 @@ struct roccat_reader {
};
static int roccat_major;
-static struct class *roccat_class;
static struct cdev roccat_cdev;
static struct roccat_device *devices[ROCCAT_MAX_DEVICES];
@@ -165,27 +166,22 @@ static int roccat_open(struct inode *inode, struct file *file)
mutex_lock(&device->readers_lock);
if (!device) {
- printk(KERN_EMERG "roccat device with minor %d doesn't exist\n",
- minor);
+ pr_emerg("roccat device with minor %d doesn't exist\n", minor);
error = -ENODEV;
goto exit_err;
}
if (!device->open++) {
/* power on device on adding first reader */
- if (device->hid->ll_driver->power) {
- error = device->hid->ll_driver->power(device->hid,
- PM_HINT_FULLON);
- if (error < 0) {
- --device->open;
- goto exit_err;
- }
+ error = hid_hw_power(device->hid, PM_HINT_FULLON);
+ if (error < 0) {
+ --device->open;
+ goto exit_err;
}
- error = device->hid->ll_driver->open(device->hid);
+
+ error = hid_hw_open(device->hid);
if (error < 0) {
- if (device->hid->ll_driver->power)
- device->hid->ll_driver->power(device->hid,
- PM_HINT_NORMAL);
+ hid_hw_power(device->hid, PM_HINT_NORMAL);
--device->open;
goto exit_err;
}
@@ -218,8 +214,7 @@ static int roccat_release(struct inode *inode, struct file *file)
device = devices[minor];
if (!device) {
mutex_unlock(&devices_lock);
- printk(KERN_EMERG "roccat device with minor %d doesn't exist\n",
- minor);
+ pr_emerg("roccat device with minor %d doesn't exist\n", minor);
return -ENODEV;
}
@@ -231,10 +226,8 @@ static int roccat_release(struct inode *inode, struct file *file)
if (!--device->open) {
/* removing last reader */
if (device->exist) {
- if (device->hid->ll_driver->power)
- device->hid->ll_driver->power(device->hid,
- PM_HINT_NORMAL);
- device->hid->ll_driver->close(device->hid);
+ hid_hw_power(device->hid, PM_HINT_NORMAL);
+ hid_hw_close(device->hid);
} else {
kfree(device);
}
@@ -295,12 +288,14 @@ EXPORT_SYMBOL_GPL(roccat_report_event);
/*
* roccat_connect() - create a char device for special event output
+ * @class: the class thats used to create the device. Meant to hold device
+ * specific sysfs attributes.
* @hid: the hid device the char device should be connected to.
*
* Return value is minor device number in Range [0, ROCCAT_MAX_DEVICES] on
* success, a negative error code on failure.
*/
-int roccat_connect(struct hid_device *hid)
+int roccat_connect(struct class *klass, struct hid_device *hid)
{
unsigned int minor;
struct roccat_device *device;
@@ -326,7 +321,7 @@ int roccat_connect(struct hid_device *hid)
return -EINVAL;
}
- device->dev = device_create(roccat_class, &hid->dev,
+ device->dev = device_create(klass, &hid->dev,
MKDEV(roccat_major, minor), NULL,
"%s%s%d", "roccat", hid->driver->name, minor);
@@ -367,10 +362,10 @@ void roccat_disconnect(int minor)
device->exist = 0; /* TODO exist maybe not needed */
- device_destroy(roccat_class, MKDEV(roccat_major, minor));
+ device_destroy(device->dev->class, MKDEV(roccat_major, minor));
if (device->open) {
- device->hid->ll_driver->close(device->hid);
+ hid_hw_close(device->hid);
wake_up_interruptible(&device->wait);
} else {
kfree(device);
@@ -398,14 +393,7 @@ static int __init roccat_init(void)
roccat_major = MAJOR(dev_id);
if (retval < 0) {
- printk(KERN_WARNING "roccat: can't get major number\n");
- return retval;
- }
-
- roccat_class = class_create(THIS_MODULE, "roccat");
- if (IS_ERR(roccat_class)) {
- retval = PTR_ERR(roccat_class);
- unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES);
+ pr_warn("can't get major number\n");
return retval;
}
@@ -420,7 +408,6 @@ static void __exit roccat_exit(void)
dev_t dev_id = MKDEV(roccat_major, 0);
cdev_del(&roccat_cdev);
- class_destroy(roccat_class);
unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES);
}
diff --git a/drivers/hid/hid-roccat.h b/drivers/hid/hid-roccat.h
index 09e864e9f79d..5784281d613f 100644
--- a/drivers/hid/hid-roccat.h
+++ b/drivers/hid/hid-roccat.h
@@ -16,11 +16,12 @@
#include <linux/types.h>
#if defined(CONFIG_HID_ROCCAT) || defined(CONFIG_HID_ROCCAT_MODULE)
-int roccat_connect(struct hid_device *hid);
+int roccat_connect(struct class *klass, struct hid_device *hid);
void roccat_disconnect(int minor);
int roccat_report_event(int minor, u8 const *data, int len);
#else
-static inline int roccat_connect(struct hid_device *hid) { return -1; }
+static inline int roccat_connect(struct class *klass,
+ struct hid_device *hid) { return -1; }
static inline void roccat_disconnect(int minor) {}
static inline int roccat_report_event(int minor, u8 const *data, int len)
{
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index 35894444e000..3c1fd8af5e0c 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -57,8 +57,8 @@
static inline void samsung_irda_dev_trace(struct hid_device *hdev,
unsigned int rsize)
{
- dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
- "descriptor\n", rsize);
+ hid_info(hdev, "fixing up Samsung IrDA %d byte report descriptor\n",
+ rsize);
}
static __u8 *samsung_irda_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -160,7 +160,7 @@ static int samsung_probe(struct hid_device *hdev,
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err_free;
}
@@ -174,7 +174,7 @@ static int samsung_probe(struct hid_device *hdev,
ret = hid_hw_start(hdev, cmask);
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err_free;
}
diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c
index e10a7687ebf2..16f7cafc9695 100644
--- a/drivers/hid/hid-sjoy.c
+++ b/drivers/hid/hid-sjoy.c
@@ -74,26 +74,25 @@ static int sjoyff_init(struct hid_device *hid)
int error;
if (list_empty(report_list)) {
- dev_err(&hid->dev, "no output reports found\n");
+ hid_err(hid, "no output reports found\n");
return -ENODEV;
}
report_ptr = report_ptr->next;
if (report_ptr == report_list) {
- dev_err(&hid->dev, "required output report is "
- "missing\n");
+ hid_err(hid, "required output report is missing\n");
return -ENODEV;
}
report = list_entry(report_ptr, struct hid_report, list);
if (report->maxfield < 1) {
- dev_err(&hid->dev, "no fields in the report\n");
+ hid_err(hid, "no fields in the report\n");
return -ENODEV;
}
if (report->field[0]->report_count < 3) {
- dev_err(&hid->dev, "not enough values in the field\n");
+ hid_err(hid, "not enough values in the field\n");
return -ENODEV;
}
@@ -117,8 +116,7 @@ static int sjoyff_init(struct hid_device *hid)
sjoyff->report->field[0]->value[2] = 0x00;
usbhid_submit_report(hid, sjoyff->report, USB_DIR_OUT);
- dev_info(&hid->dev,
- "Force feedback for SmartJoy PLUS PS2/USB adapter\n");
+ hid_info(hid, "Force feedback for SmartJoy PLUS PS2/USB adapter\n");
return 0;
}
@@ -135,13 +133,13 @@ static int sjoy_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err;
}
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 677bb3da10e8..68d7b36e31e4 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -40,8 +40,7 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if ((sc->quirks & VAIO_RDESC_CONSTANT) &&
*rsize >= 56 && rdesc[54] == 0x81 && rdesc[55] == 0x07) {
- dev_info(&hdev->dev, "Fixing up Sony Vaio VGX report "
- "descriptor\n");
+ hid_info(hdev, "Fixing up Sony Vaio VGX report descriptor\n");
rdesc[55] = 0x06;
}
return rdesc;
@@ -89,7 +88,7 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
(3 << 8) | 0xf2, ifnum, buf, 17,
USB_CTRL_GET_TIMEOUT);
if (ret < 0)
- dev_err(&hdev->dev, "can't set operational mode\n");
+ hid_err(hdev, "can't set operational mode\n");
kfree(buf);
@@ -110,7 +109,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
sc = kzalloc(sizeof(*sc), GFP_KERNEL);
if (sc == NULL) {
- dev_err(&hdev->dev, "can't alloc sony descriptor\n");
+ hid_err(hdev, "can't alloc sony descriptor\n");
return -ENOMEM;
}
@@ -119,14 +118,14 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err_free;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
HID_CONNECT_HIDDEV_FORCE);
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err_free;
}
diff --git a/drivers/hid/hid-stantum.c b/drivers/hid/hid-stantum.c
index 3171be28c3d5..b2be1d11916b 100644
--- a/drivers/hid/hid-stantum.c
+++ b/drivers/hid/hid-stantum.c
@@ -222,7 +222,7 @@ static int stantum_probe(struct hid_device *hdev,
sd = kmalloc(sizeof(struct stantum_data), GFP_KERNEL);
if (!sd) {
- dev_err(&hdev->dev, "cannot allocate Stantum data\n");
+ hid_err(hdev, "cannot allocate Stantum data\n");
return -ENOMEM;
}
sd->valid = false;
diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c
index 164ed568f6cf..d484a0043dd4 100644
--- a/drivers/hid/hid-sunplus.c
+++ b/drivers/hid/hid-sunplus.c
@@ -27,8 +27,7 @@ static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
{
if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
rdesc[106] == 0x03) {
- dev_info(&hdev->dev, "fixing up Sunplus Wireless Desktop "
- "report descriptor\n");
+ hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n");
rdesc[105] = rdesc[110] = 0x03;
rdesc[106] = rdesc[111] = 0x21;
}
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index 25be4e1461bd..575862b0688e 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -151,28 +151,23 @@ static int tmff_init(struct hid_device *hid, const signed short *ff_bits)
switch (field->usage[0].hid) {
case THRUSTMASTER_USAGE_FF:
if (field->report_count < 2) {
- dev_warn(&hid->dev, "ignoring FF field "
- "with report_count < 2\n");
+ hid_warn(hid, "ignoring FF field with report_count < 2\n");
continue;
}
if (field->logical_maximum ==
field->logical_minimum) {
- dev_warn(&hid->dev, "ignoring FF field "
- "with logical_maximum "
- "== logical_minimum\n");
+ hid_warn(hid, "ignoring FF field with logical_maximum == logical_minimum\n");
continue;
}
if (tmff->report && tmff->report != report) {
- dev_warn(&hid->dev, "ignoring FF field "
- "in other report\n");
+ hid_warn(hid, "ignoring FF field in other report\n");
continue;
}
if (tmff->ff_field && tmff->ff_field != field) {
- dev_warn(&hid->dev, "ignoring "
- "duplicate FF field\n");
+ hid_warn(hid, "ignoring duplicate FF field\n");
continue;
}
@@ -185,16 +180,15 @@ static int tmff_init(struct hid_device *hid, const signed short *ff_bits)
break;
default:
- dev_warn(&hid->dev, "ignoring unknown output "
- "usage %08x\n",
- field->usage[0].hid);
+ hid_warn(hid, "ignoring unknown output usage %08x\n",
+ field->usage[0].hid);
continue;
}
}
}
if (!tmff->report) {
- dev_err(&hid->dev, "can't find FF field in output reports\n");
+ hid_err(hid, "can't find FF field in output reports\n");
error = -ENODEV;
goto fail;
}
@@ -203,8 +197,7 @@ static int tmff_init(struct hid_device *hid, const signed short *ff_bits)
if (error)
goto fail;
- dev_info(&hid->dev, "force feedback for ThrustMaster devices by Zinx "
- "Verituse <zinx@epicsol.org>");
+ hid_info(hid, "force feedback for ThrustMaster devices by Zinx Verituse <zinx@epicsol.org>\n");
return 0;
fail:
@@ -224,13 +217,13 @@ static int tm_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err;
}
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 956ed9ac19d4..613ff7b1d746 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -66,6 +66,7 @@ static const struct hid_device_id ts_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
{ }
};
MODULE_DEVICE_TABLE(hid, ts_devices);
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 724f46ed612f..06888323828c 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -18,6 +18,8 @@
* any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
@@ -141,8 +143,8 @@ static void wacom_poke(struct hid_device *hdev, u8 speed)
* Note that if the raw queries fail, it's not a hard failure and it
* is safe to continue
*/
- dev_warn(&hdev->dev, "failed to poke device, command %d, err %d\n",
- rep_data[0], ret);
+ hid_warn(hdev, "failed to poke device, command %d, err %d\n",
+ rep_data[0], ret);
return;
}
@@ -172,7 +174,7 @@ static ssize_t wacom_store_speed(struct device *dev,
return -EINVAL;
}
-static DEVICE_ATTR(speed, S_IRUGO | S_IWUGO,
+static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR | S_IWGRP,
wacom_show_speed, wacom_store_speed);
static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
@@ -312,7 +314,7 @@ static int wacom_probe(struct hid_device *hdev,
wdata = kzalloc(sizeof(*wdata), GFP_KERNEL);
if (wdata == NULL) {
- dev_err(&hdev->dev, "can't alloc wacom descriptor\n");
+ hid_err(hdev, "can't alloc wacom descriptor\n");
return -ENOMEM;
}
@@ -321,20 +323,20 @@ static int wacom_probe(struct hid_device *hdev,
/* Parse the HID report now */
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err_free;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err_free;
}
ret = device_create_file(&hdev->dev, &dev_attr_speed);
if (ret)
- dev_warn(&hdev->dev,
- "can't create sysfs speed attribute err: %d\n", ret);
+ hid_warn(hdev,
+ "can't create sysfs speed attribute err: %d\n", ret);
/* Set Wacom mode 2 with high reporting speed */
wacom_poke(hdev, 1);
@@ -349,8 +351,8 @@ static int wacom_probe(struct hid_device *hdev,
ret = power_supply_register(&hdev->dev, &wdata->battery);
if (ret) {
- dev_warn(&hdev->dev,
- "can't create sysfs battery attribute, err: %d\n", ret);
+ hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",
+ ret);
/*
* battery attribute is not critical for the tablet, but if it
* failed then there is no need to create ac attribute
@@ -367,8 +369,8 @@ static int wacom_probe(struct hid_device *hdev,
ret = power_supply_register(&hdev->dev, &wdata->ac);
if (ret) {
- dev_warn(&hdev->dev,
- "can't create ac battery attribute, err: %d\n", ret);
+ hid_warn(hdev,
+ "can't create ac battery attribute, err: %d\n", ret);
/*
* ac attribute is not critical for the tablet, but if it
* failed then we don't want to battery attribute to exist
@@ -454,7 +456,7 @@ static int __init wacom_init(void)
ret = hid_register_driver(&wacom_driver);
if (ret)
- printk(KERN_ERR "can't register wacom driver\n");
+ pr_err("can't register wacom driver\n");
return ret;
}
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index b7acceabba80..f31fab012f2f 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -75,14 +75,14 @@ static int zpff_init(struct hid_device *hid)
int error;
if (list_empty(report_list)) {
- dev_err(&hid->dev, "no output report found\n");
+ hid_err(hid, "no output report found\n");
return -ENODEV;
}
report = list_entry(report_list->next, struct hid_report, list);
if (report->maxfield < 4) {
- dev_err(&hid->dev, "not enough fields in report\n");
+ hid_err(hid, "not enough fields in report\n");
return -ENODEV;
}
@@ -105,8 +105,7 @@ static int zpff_init(struct hid_device *hid)
zpff->report->field[3]->value[0] = 0x00;
usbhid_submit_report(hid, zpff->report, USB_DIR_OUT);
- dev_info(&hid->dev, "force feedback for Zeroplus based devices by "
- "Anssi Hannula <anssi.hannula@gmail.com>\n");
+ hid_info(hid, "force feedback for Zeroplus based devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
return 0;
}
@@ -123,13 +122,13 @@ static int zp_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
if (ret) {
- dev_err(&hdev->dev, "hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err;
}
diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c
index aac1f9273149..e90371508fd2 100644
--- a/drivers/hid/hid-zydacron.c
+++ b/drivers/hid/hid-zydacron.c
@@ -34,9 +34,8 @@ static __u8 *zc_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[0x96] == 0xbc && rdesc[0x97] == 0xff &&
rdesc[0xca] == 0xbc && rdesc[0xcb] == 0xff &&
rdesc[0xe1] == 0xbc && rdesc[0xe2] == 0xff) {
- dev_info(&hdev->dev,
- "fixing up zydacron remote control report "
- "descriptor\n");
+ hid_info(hdev,
+ "fixing up zydacron remote control report descriptor\n");
rdesc[0x96] = rdesc[0xca] = rdesc[0xe1] = 0x0c;
rdesc[0x97] = rdesc[0xcb] = rdesc[0xe2] = 0x00;
}
@@ -172,7 +171,7 @@ static int zc_probe(struct hid_device *hdev, const struct hid_device_id *id)
zc = kzalloc(sizeof(*zc), GFP_KERNEL);
if (zc == NULL) {
- dev_err(&hdev->dev, "zydacron: can't alloc descriptor\n");
+ hid_err(hdev, "can't alloc descriptor\n");
return -ENOMEM;
}
@@ -180,13 +179,13 @@ static int zc_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
- dev_err(&hdev->dev, "zydacron: parse failed\n");
+ hid_err(hdev, "parse failed\n");
goto err_free;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
- dev_err(&hdev->dev, "zydacron: hw start failed\n");
+ hid_err(hdev, "hw start failed\n");
goto err_free;
}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index e1f07483691f..468e87b53ed2 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -19,6 +19,8 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/errno.h>
@@ -122,15 +124,15 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t
}
if (count > HID_MAX_BUFFER_SIZE) {
- printk(KERN_WARNING "hidraw: pid %d passed too large report\n",
- task_pid_nr(current));
+ hid_warn(dev, "pid %d passed too large report\n",
+ task_pid_nr(current));
ret = -EINVAL;
goto out;
}
if (count < 2) {
- printk(KERN_WARNING "hidraw: pid %d passed too short report\n",
- task_pid_nr(current));
+ hid_warn(dev, "pid %d passed too short report\n",
+ task_pid_nr(current));
ret = -EINVAL;
goto out;
}
@@ -192,15 +194,13 @@ static int hidraw_open(struct inode *inode, struct file *file)
dev = hidraw_table[minor];
if (!dev->open++) {
- if (dev->hid->ll_driver->power) {
- err = dev->hid->ll_driver->power(dev->hid, PM_HINT_FULLON);
- if (err < 0)
- goto out_unlock;
- }
- err = dev->hid->ll_driver->open(dev->hid);
+ err = hid_hw_power(dev->hid, PM_HINT_FULLON);
+ if (err < 0)
+ goto out_unlock;
+
+ err = hid_hw_open(dev->hid);
if (err < 0) {
- if (dev->hid->ll_driver->power)
- dev->hid->ll_driver->power(dev->hid, PM_HINT_NORMAL);
+ hid_hw_power(dev->hid, PM_HINT_NORMAL);
dev->open--;
}
}
@@ -229,9 +229,8 @@ static int hidraw_release(struct inode * inode, struct file * file)
dev = hidraw_table[minor];
if (!--dev->open) {
if (list->hidraw->exist) {
- if (dev->hid->ll_driver->power)
- dev->hid->ll_driver->power(dev->hid, PM_HINT_NORMAL);
- dev->hid->ll_driver->close(dev->hid);
+ hid_hw_power(dev->hid, PM_HINT_NORMAL);
+ hid_hw_close(dev->hid);
} else {
kfree(list->hidraw);
}
@@ -345,6 +344,9 @@ static const struct file_operations hidraw_ops = {
.open = hidraw_open,
.release = hidraw_release,
.unlocked_ioctl = hidraw_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = hidraw_ioctl,
+#endif
.llseek = noop_llseek,
};
@@ -433,7 +435,7 @@ void hidraw_disconnect(struct hid_device *hid)
device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
if (hidraw->open) {
- hid->ll_driver->close(hid);
+ hid_hw_close(hid);
wake_up_interruptible(&hidraw->wait);
} else {
kfree(hidraw);
@@ -452,7 +454,7 @@ int __init hidraw_init(void)
hidraw_major = MAJOR(dev_id);
if (result < 0) {
- printk(KERN_WARNING "hidraw: can't get major number\n");
+ pr_warn("can't get major number\n");
result = 0;
goto out;
}
diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
index 4edb3bef94a6..0f20fd17cf06 100644
--- a/drivers/hid/usbhid/Kconfig
+++ b/drivers/hid/usbhid/Kconfig
@@ -45,7 +45,7 @@ config USB_HIDDEV
If unsure, say Y.
menu "USB HID Boot Protocol drivers"
- depends on USB!=n && USB_HID!=y && EMBEDDED
+ depends on USB!=n && USB_HID!=y && EXPERT
config USB_KBD
tristate "USB HIDBP Keyboard (simple Boot) support"
diff --git a/drivers/hid/usbhid/Makefile b/drivers/hid/usbhid/Makefile
index 1329ecb37a1c..db3cf31c6fa1 100644
--- a/drivers/hid/usbhid/Makefile
+++ b/drivers/hid/usbhid/Makefile
@@ -3,15 +3,15 @@
#
# Multipart objects.
-usbhid-objs := hid-core.o hid-quirks.o
+usbhid-y := hid-core.o hid-quirks.o
# Optional parts of multipart objects.
ifeq ($(CONFIG_USB_HIDDEV),y)
- usbhid-objs += hiddev.o
+ usbhid-y += hiddev.o
endif
ifeq ($(CONFIG_HID_PID),y)
- usbhid-objs += hid-pidff.o
+ usbhid-y += hid-pidff.o
endif
obj-$(CONFIG_USB_HID) += usbhid.o
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 5489eab3a6bd..b336dd84036f 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -67,7 +67,6 @@ MODULE_PARM_DESC(quirks, "Add/modify USB HID quirks by specifying "
* Input submission and I/O error handler.
*/
static DEFINE_MUTEX(hid_open_mut);
-static struct workqueue_struct *resumption_waker;
static void hid_io_error(struct hid_device *hid);
static int hid_submit_out(struct hid_device *hid);
@@ -136,10 +135,10 @@ static void hid_reset(struct work_struct *work)
hid_io_error(hid);
break;
default:
- err_hid("can't reset device, %s-%s/input%d, status %d",
- hid_to_usb_dev(hid)->bus->bus_name,
- hid_to_usb_dev(hid)->devpath,
- usbhid->ifnum, rc);
+ hid_err(hid, "can't reset device, %s-%s/input%d, status %d\n",
+ hid_to_usb_dev(hid)->bus->bus_name,
+ hid_to_usb_dev(hid)->devpath,
+ usbhid->ifnum, rc);
/* FALLTHROUGH */
case -EHOSTUNREACH:
case -ENODEV:
@@ -278,18 +277,18 @@ static void hid_irq_in(struct urb *urb)
hid_io_error(hid);
return;
default: /* error */
- dev_warn(&urb->dev->dev, "input irq status %d "
- "received\n", urb->status);
+ hid_warn(urb->dev, "input irq status %d received\n",
+ urb->status);
}
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
clear_bit(HID_IN_RUNNING, &usbhid->iofl);
if (status != -EPERM) {
- err_hid("can't resubmit intr, %s-%s/input%d, status %d",
- hid_to_usb_dev(hid)->bus->bus_name,
- hid_to_usb_dev(hid)->devpath,
- usbhid->ifnum, status);
+ hid_err(hid, "can't resubmit intr, %s-%s/input%d, status %d\n",
+ hid_to_usb_dev(hid)->bus->bus_name,
+ hid_to_usb_dev(hid)->devpath,
+ usbhid->ifnum, status);
hid_io_error(hid);
}
}
@@ -300,10 +299,19 @@ static int hid_submit_out(struct hid_device *hid)
struct hid_report *report;
char *raw_report;
struct usbhid_device *usbhid = hid->driver_data;
+ int r;
report = usbhid->out[usbhid->outtail].report;
raw_report = usbhid->out[usbhid->outtail].raw_report;
+ r = usb_autopm_get_interface_async(usbhid->intf);
+ if (r < 0)
+ return -1;
+
+ /*
+ * if the device hasn't been woken, we leave the output
+ * to resume()
+ */
if (!test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) {
usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) + 1 + (report->id > 0);
usbhid->urbout->dev = hid_to_usb_dev(hid);
@@ -313,17 +321,11 @@ static int hid_submit_out(struct hid_device *hid)
dbg_hid("submitting out urb\n");
if (usb_submit_urb(usbhid->urbout, GFP_ATOMIC)) {
- err_hid("usb_submit_urb(out) failed");
+ hid_err(hid, "usb_submit_urb(out) failed\n");
+ usb_autopm_put_interface_async(usbhid->intf);
return -1;
}
usbhid->last_out = jiffies;
- } else {
- /*
- * queue work to wake up the device.
- * as the work queue is freezeable, this is safe
- * with respect to STD and STR
- */
- queue_work(resumption_waker, &usbhid->restart_work);
}
return 0;
@@ -334,13 +336,16 @@ static int hid_submit_ctrl(struct hid_device *hid)
struct hid_report *report;
unsigned char dir;
char *raw_report;
- int len;
+ int len, r;
struct usbhid_device *usbhid = hid->driver_data;
report = usbhid->ctrl[usbhid->ctrltail].report;
raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report;
dir = usbhid->ctrl[usbhid->ctrltail].dir;
+ r = usb_autopm_get_interface_async(usbhid->intf);
+ if (r < 0)
+ return -1;
if (!test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) {
len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
if (dir == USB_DIR_OUT) {
@@ -375,17 +380,11 @@ static int hid_submit_ctrl(struct hid_device *hid)
usbhid->cr->wValue, usbhid->cr->wIndex, usbhid->cr->wLength);
if (usb_submit_urb(usbhid->urbctrl, GFP_ATOMIC)) {
- err_hid("usb_submit_urb(ctrl) failed");
+ usb_autopm_put_interface_async(usbhid->intf);
+ hid_err(hid, "usb_submit_urb(ctrl) failed\n");
return -1;
}
usbhid->last_ctrl = jiffies;
- } else {
- /*
- * queue work to wake up the device.
- * as the work queue is freezeable, this is safe
- * with respect to STD and STR
- */
- queue_work(resumption_waker, &usbhid->restart_work);
}
return 0;
@@ -413,8 +412,8 @@ static void hid_irq_out(struct urb *urb)
case -ENOENT:
break;
default: /* error */
- dev_warn(&urb->dev->dev, "output irq status %d "
- "received\n", urb->status);
+ hid_warn(urb->dev, "output irq status %d received\n",
+ urb->status);
}
spin_lock_irqsave(&usbhid->lock, flags);
@@ -435,6 +434,7 @@ static void hid_irq_out(struct urb *urb)
clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
spin_unlock_irqrestore(&usbhid->lock, flags);
+ usb_autopm_put_interface_async(usbhid->intf);
wake_up(&usbhid->wait);
}
@@ -466,8 +466,7 @@ static void hid_ctrl(struct urb *urb)
case -EPIPE: /* report not available */
break;
default: /* error */
- dev_warn(&urb->dev->dev, "ctrl urb status %d "
- "received\n", status);
+ hid_warn(urb->dev, "ctrl urb status %d received\n", status);
}
if (unplug)
@@ -481,11 +480,13 @@ static void hid_ctrl(struct urb *urb)
wake_up(&usbhid->wait);
}
spin_unlock(&usbhid->lock);
+ usb_autopm_put_interface_async(usbhid->intf);
return;
}
clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
spin_unlock(&usbhid->lock);
+ usb_autopm_put_interface_async(usbhid->intf);
wake_up(&usbhid->wait);
}
@@ -501,13 +502,13 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
if (usbhid->urbout && dir == USB_DIR_OUT && report->type == HID_OUTPUT_REPORT) {
if ((head = (usbhid->outhead + 1) & (HID_OUTPUT_FIFO_SIZE - 1)) == usbhid->outtail) {
- dev_warn(&hid->dev, "output queue full\n");
+ hid_warn(hid, "output queue full\n");
return;
}
usbhid->out[usbhid->outhead].raw_report = kmalloc(len, GFP_ATOMIC);
if (!usbhid->out[usbhid->outhead].raw_report) {
- dev_warn(&hid->dev, "output queueing failed\n");
+ hid_warn(hid, "output queueing failed\n");
return;
}
hid_output_report(report, usbhid->out[usbhid->outhead].raw_report);
@@ -532,14 +533,14 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
}
if ((head = (usbhid->ctrlhead + 1) & (HID_CONTROL_FIFO_SIZE - 1)) == usbhid->ctrltail) {
- dev_warn(&hid->dev, "control queue full\n");
+ hid_warn(hid, "control queue full\n");
return;
}
if (dir == USB_DIR_OUT) {
usbhid->ctrl[usbhid->ctrlhead].raw_report = kmalloc(len, GFP_ATOMIC);
if (!usbhid->ctrl[usbhid->ctrlhead].raw_report) {
- dev_warn(&hid->dev, "control queueing failed\n");
+ hid_warn(hid, "control queueing failed\n");
return;
}
hid_output_report(report, usbhid->ctrl[usbhid->ctrlhead].raw_report);
@@ -590,7 +591,7 @@ static int usb_hidinput_input_event(struct input_dev *dev, unsigned int type, un
return -1;
if ((offset = hidinput_find_field(hid, type, code, &field)) == -1) {
- dev_warn(&dev->dev, "event field not found\n");
+ hid_warn(dev, "event field not found\n");
return -1;
}
@@ -656,7 +657,7 @@ int usbhid_open(struct hid_device *hid)
mutex_lock(&hid_open_mut);
if (!hid->open++) {
res = usb_autopm_get_interface(usbhid->intf);
- /* the device must be awake to reliable request remote wakeup */
+ /* the device must be awake to reliably request remote wakeup */
if (res < 0) {
hid->open--;
mutex_unlock(&hid_open_mut);
@@ -722,7 +723,7 @@ void usbhid_init_reports(struct hid_device *hid)
}
if (err)
- dev_warn(&hid->dev, "timeout initializing reports\n");
+ hid_warn(hid, "timeout initializing reports\n");
}
/*
@@ -857,18 +858,6 @@ static void usbhid_restart_queues(struct usbhid_device *usbhid)
usbhid_restart_ctrl_queue(usbhid);
}
-static void __usbhid_restart_queues(struct work_struct *work)
-{
- struct usbhid_device *usbhid =
- container_of(work, struct usbhid_device, restart_work);
- int r;
-
- r = usb_autopm_get_interface(usbhid->intf);
- if (r < 0)
- return;
- usb_autopm_put_interface(usbhid->intf);
-}
-
static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
@@ -1140,8 +1129,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
if (usb_endpoint_is_int_in(&interface->endpoint[n].desc))
has_in++;
if (!has_in) {
- dev_err(&intf->dev, "couldn't find an input interrupt "
- "endpoint\n");
+ hid_err(intf, "couldn't find an input interrupt endpoint\n");
return -ENODEV;
}
@@ -1206,14 +1194,13 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
init_waitqueue_head(&usbhid->wait);
INIT_WORK(&usbhid->reset_work, hid_reset);
- INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
spin_lock_init(&usbhid->lock);
ret = hid_add_device(hid);
if (ret) {
if (ret != -ENODEV)
- dev_err(&intf->dev, "can't add hid device: %d\n", ret);
+ hid_err(intf, "can't add hid device: %d\n", ret);
goto err_free;
}
@@ -1241,7 +1228,6 @@ static void usbhid_disconnect(struct usb_interface *intf)
static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid)
{
del_timer_sync(&usbhid->io_retry);
- cancel_work_sync(&usbhid->restart_work);
cancel_work_sync(&usbhid->reset_work);
}
@@ -1262,7 +1248,6 @@ static int hid_pre_reset(struct usb_interface *intf)
spin_lock_irq(&usbhid->lock);
set_bit(HID_RESET_PENDING, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
- cancel_work_sync(&usbhid->restart_work);
hid_cease_io(usbhid);
return 0;
@@ -1461,9 +1446,6 @@ static int __init hid_init(void)
{
int retval = -ENOMEM;
- resumption_waker = create_freezeable_workqueue("usbhid_resumer");
- if (!resumption_waker)
- goto no_queue;
retval = hid_register_driver(&hid_usb_driver);
if (retval)
goto hid_register_fail;
@@ -1481,8 +1463,6 @@ usb_register_fail:
usbhid_quirks_init_fail:
hid_unregister_driver(&hid_usb_driver);
hid_register_fail:
- destroy_workqueue(resumption_waker);
-no_queue:
return retval;
}
@@ -1491,7 +1471,6 @@ static void __exit hid_exit(void)
usb_deregister(&hid_driver);
usbhid_quirks_exit();
hid_unregister_driver(&hid_usb_driver);
- destroy_workqueue(resumption_waker);
}
module_init(hid_init);
diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
index ef381d79cfa8..f91c136821f7 100644
--- a/drivers/hid/usbhid/hid-pidff.c
+++ b/drivers/hid/usbhid/hid-pidff.c
@@ -22,7 +22,7 @@
/* #define DEBUG */
-#define debug(format, arg...) pr_debug("hid-pidff: " format "\n" , ## arg)
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/input.h>
#include <linux/slab.h>
@@ -220,7 +220,7 @@ static int pidff_rescale_signed(int i, struct hid_field *field)
static void pidff_set(struct pidff_usage *usage, u16 value)
{
usage->value[0] = pidff_rescale(value, 0xffff, usage->field);
- debug("calculated from %d to %d", value, usage->value[0]);
+ pr_debug("calculated from %d to %d\n", value, usage->value[0]);
}
static void pidff_set_signed(struct pidff_usage *usage, s16 value)
@@ -235,7 +235,7 @@ static void pidff_set_signed(struct pidff_usage *usage, s16 value)
usage->value[0] =
pidff_rescale(value, 0x7fff, usage->field);
}
- debug("calculated from %d to %d", value, usage->value[0]);
+ pr_debug("calculated from %d to %d\n", value, usage->value[0]);
}
/*
@@ -259,8 +259,9 @@ static void pidff_set_envelope_report(struct pidff_device *pidff,
pidff->set_envelope[PID_ATTACK_TIME].value[0] = envelope->attack_length;
pidff->set_envelope[PID_FADE_TIME].value[0] = envelope->fade_length;
- debug("attack %u => %d", envelope->attack_level,
- pidff->set_envelope[PID_ATTACK_LEVEL].value[0]);
+ hid_dbg(pidff->hid, "attack %u => %d\n",
+ envelope->attack_level,
+ pidff->set_envelope[PID_ATTACK_LEVEL].value[0]);
usbhid_submit_report(pidff->hid, pidff->reports[PID_SET_ENVELOPE],
USB_DIR_OUT);
@@ -466,33 +467,33 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum)
pidff->create_new_effect_type->value[0] = efnum;
usbhid_submit_report(pidff->hid, pidff->reports[PID_CREATE_NEW_EFFECT],
USB_DIR_OUT);
- debug("create_new_effect sent, type: %d", efnum);
+ hid_dbg(pidff->hid, "create_new_effect sent, type: %d\n", efnum);
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0] = 0;
pidff->block_load_status->value[0] = 0;
usbhid_wait_io(pidff->hid);
for (j = 0; j < 60; j++) {
- debug("pid_block_load requested");
+ hid_dbg(pidff->hid, "pid_block_load requested\n");
usbhid_submit_report(pidff->hid, pidff->reports[PID_BLOCK_LOAD],
USB_DIR_IN);
usbhid_wait_io(pidff->hid);
if (pidff->block_load_status->value[0] ==
pidff->status_id[PID_BLOCK_LOAD_SUCCESS]) {
- debug("device reported free memory: %d bytes",
- pidff->block_load[PID_RAM_POOL_AVAILABLE].value ?
- pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1);
+ hid_dbg(pidff->hid, "device reported free memory: %d bytes\n",
+ pidff->block_load[PID_RAM_POOL_AVAILABLE].value ?
+ pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1);
return 0;
}
if (pidff->block_load_status->value[0] ==
pidff->status_id[PID_BLOCK_LOAD_FULL]) {
- debug("not enough memory free: %d bytes",
- pidff->block_load[PID_RAM_POOL_AVAILABLE].value ?
+ hid_dbg(pidff->hid, "not enough memory free: %d bytes\n",
+ pidff->block_load[PID_RAM_POOL_AVAILABLE].value ?
pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1);
return -ENOSPC;
}
}
- printk(KERN_ERR "hid-pidff: pid_block_load failed 60 times\n");
+ hid_err(pidff->hid, "pid_block_load failed 60 times\n");
return -EIO;
}
@@ -546,7 +547,8 @@ static int pidff_erase_effect(struct input_dev *dev, int effect_id)
struct pidff_device *pidff = dev->ff->private;
int pid_id = pidff->pid_id[effect_id];
- debug("starting to erase %d/%d", effect_id, pidff->pid_id[effect_id]);
+ hid_dbg(pidff->hid, "starting to erase %d/%d\n",
+ effect_id, pidff->pid_id[effect_id]);
/* Wait for the queue to clear. We do not want a full fifo to
prevent the effect removal. */
usbhid_wait_io(pidff->hid);
@@ -604,8 +606,7 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
type_id = PID_SAW_DOWN;
break;
default:
- printk(KERN_ERR
- "hid-pidff: invalid waveform\n");
+ hid_err(pidff->hid, "invalid waveform\n");
return -EINVAL;
}
@@ -696,7 +697,7 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
break;
default:
- printk(KERN_ERR "hid-pidff: invalid type\n");
+ hid_err(pidff->hid, "invalid type\n");
return -EINVAL;
}
@@ -704,7 +705,7 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
pidff->pid_id[effect->id] =
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
- debug("uploaded");
+ hid_dbg(pidff->hid, "uploaded\n");
return 0;
}
@@ -770,14 +771,14 @@ static int pidff_find_fields(struct pidff_usage *usage, const u8 *table,
for (i = 0; i < report->maxfield; i++) {
if (report->field[i]->maxusage !=
report->field[i]->report_count) {
- debug("maxusage and report_count do not match, "
- "skipping");
+ pr_debug("maxusage and report_count do not match, skipping\n");
continue;
}
for (j = 0; j < report->field[i]->maxusage; j++) {
if (report->field[i]->usage[j].hid ==
(HID_UP_PID | table[k])) {
- debug("found %d at %d->%d", k, i, j);
+ pr_debug("found %d at %d->%d\n",
+ k, i, j);
usage[k].field = report->field[i];
usage[k].value =
&report->field[i]->value[j];
@@ -789,7 +790,7 @@ static int pidff_find_fields(struct pidff_usage *usage, const u8 *table,
break;
}
if (!found && strict) {
- debug("failed to locate %d", k);
+ pr_debug("failed to locate %d\n", k);
return -1;
}
}
@@ -826,8 +827,8 @@ static void pidff_find_reports(struct hid_device *hid, int report_type,
continue;
ret = pidff_check_usage(report->field[0]->logical);
if (ret != -1) {
- debug("found usage 0x%02x from field->logical",
- pidff_reports[ret]);
+ hid_dbg(hid, "found usage 0x%02x from field->logical\n",
+ pidff_reports[ret]);
pidff->reports[ret] = report;
continue;
}
@@ -845,8 +846,9 @@ static void pidff_find_reports(struct hid_device *hid, int report_type,
continue;
ret = pidff_check_usage(hid->collection[i - 1].usage);
if (ret != -1 && !pidff->reports[ret]) {
- debug("found usage 0x%02x from collection array",
- pidff_reports[ret]);
+ hid_dbg(hid,
+ "found usage 0x%02x from collection array\n",
+ pidff_reports[ret]);
pidff->reports[ret] = report;
}
}
@@ -861,7 +863,7 @@ static int pidff_reports_ok(struct pidff_device *pidff)
for (i = 0; i <= PID_REQUIRED_REPORTS; i++) {
if (!pidff->reports[i]) {
- debug("%d missing", i);
+ hid_dbg(pidff->hid, "%d missing\n", i);
return 0;
}
}
@@ -884,8 +886,7 @@ static struct hid_field *pidff_find_special_field(struct hid_report *report,
report->field[i]->logical_minimum == 1)
return report->field[i];
else {
- printk(KERN_ERR "hid-pidff: logical_minimum "
- "is not 1 as it should be\n");
+ pr_err("logical_minimum is not 1 as it should be\n");
return NULL;
}
}
@@ -924,7 +925,7 @@ static int pidff_find_special_keys(int *keys, struct hid_field *fld,
*/
static int pidff_find_special_fields(struct pidff_device *pidff)
{
- debug("finding special fields");
+ hid_dbg(pidff->hid, "finding special fields\n");
pidff->create_new_effect_type =
pidff_find_special_field(pidff->reports[PID_CREATE_NEW_EFFECT],
@@ -945,32 +946,30 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
pidff_find_special_field(pidff->reports[PID_EFFECT_OPERATION],
0x78, 1);
- debug("search done");
+ hid_dbg(pidff->hid, "search done\n");
if (!pidff->create_new_effect_type || !pidff->set_effect_type) {
- printk(KERN_ERR "hid-pidff: effect lists not found\n");
+ hid_err(pidff->hid, "effect lists not found\n");
return -1;
}
if (!pidff->effect_direction) {
- printk(KERN_ERR "hid-pidff: direction field not found\n");
+ hid_err(pidff->hid, "direction field not found\n");
return -1;
}
if (!pidff->device_control) {
- printk(KERN_ERR "hid-pidff: device control field not found\n");
+ hid_err(pidff->hid, "device control field not found\n");
return -1;
}
if (!pidff->block_load_status) {
- printk(KERN_ERR
- "hid-pidff: block load status field not found\n");
+ hid_err(pidff->hid, "block load status field not found\n");
return -1;
}
if (!pidff->effect_operation_status) {
- printk(KERN_ERR
- "hid-pidff: effect operation field not found\n");
+ hid_err(pidff->hid, "effect operation field not found\n");
return -1;
}
@@ -982,23 +981,22 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
if (!PIDFF_FIND_SPECIAL_KEYS(type_id, create_new_effect_type,
effect_types)) {
- printk(KERN_ERR "hid-pidff: no effect types found\n");
+ hid_err(pidff->hid, "no effect types found\n");
return -1;
}
if (PIDFF_FIND_SPECIAL_KEYS(status_id, block_load_status,
block_load_status) !=
sizeof(pidff_block_load_status)) {
- printk(KERN_ERR
- "hidpidff: block load status identifiers not found\n");
+ hid_err(pidff->hid,
+ "block load status identifiers not found\n");
return -1;
}
if (PIDFF_FIND_SPECIAL_KEYS(operation_id, effect_operation_status,
effect_operation_status) !=
sizeof(pidff_effect_operation_status)) {
- printk(KERN_ERR
- "hidpidff: effect operation identifiers not found\n");
+ hid_err(pidff->hid, "effect operation identifiers not found\n");
return -1;
}
@@ -1017,8 +1015,8 @@ static int pidff_find_effects(struct pidff_device *pidff,
int pidff_type = pidff->type_id[i];
if (pidff->set_effect_type->usage[pidff_type].hid !=
pidff->create_new_effect_type->usage[pidff_type].hid) {
- printk(KERN_ERR "hid-pidff: "
- "effect type number %d is invalid\n", i);
+ hid_err(pidff->hid,
+ "effect type number %d is invalid\n", i);
return -1;
}
}
@@ -1073,27 +1071,23 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
int envelope_ok = 0;
if (PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1)) {
- printk(KERN_ERR
- "hid-pidff: unknown set_effect report layout\n");
+ hid_err(pidff->hid, "unknown set_effect report layout\n");
return -ENODEV;
}
PIDFF_FIND_FIELDS(block_load, PID_BLOCK_LOAD, 0);
if (!pidff->block_load[PID_EFFECT_BLOCK_INDEX].value) {
- printk(KERN_ERR
- "hid-pidff: unknown pid_block_load report layout\n");
+ hid_err(pidff->hid, "unknown pid_block_load report layout\n");
return -ENODEV;
}
if (PIDFF_FIND_FIELDS(effect_operation, PID_EFFECT_OPERATION, 1)) {
- printk(KERN_ERR
- "hid-pidff: unknown effect_operation report layout\n");
+ hid_err(pidff->hid, "unknown effect_operation report layout\n");
return -ENODEV;
}
if (PIDFF_FIND_FIELDS(block_free, PID_BLOCK_FREE, 1)) {
- printk(KERN_ERR
- "hid-pidff: unknown pid_block_free report layout\n");
+ hid_err(pidff->hid, "unknown pid_block_free report layout\n");
return -ENODEV;
}
@@ -1105,27 +1099,26 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
if (!envelope_ok) {
if (test_and_clear_bit(FF_CONSTANT, dev->ffbit))
- printk(KERN_WARNING "hid-pidff: "
- "has constant effect but no envelope\n");
+ hid_warn(pidff->hid,
+ "has constant effect but no envelope\n");
if (test_and_clear_bit(FF_RAMP, dev->ffbit))
- printk(KERN_WARNING "hid-pidff: "
- "has ramp effect but no envelope\n");
+ hid_warn(pidff->hid,
+ "has ramp effect but no envelope\n");
if (test_and_clear_bit(FF_PERIODIC, dev->ffbit))
- printk(KERN_WARNING "hid-pidff: "
- "has periodic effect but no envelope\n");
+ hid_warn(pidff->hid,
+ "has periodic effect but no envelope\n");
}
if (test_bit(FF_CONSTANT, dev->ffbit) &&
PIDFF_FIND_FIELDS(set_constant, PID_SET_CONSTANT, 1)) {
- printk(KERN_WARNING
- "hid-pidff: unknown constant effect layout\n");
+ hid_warn(pidff->hid, "unknown constant effect layout\n");
clear_bit(FF_CONSTANT, dev->ffbit);
}
if (test_bit(FF_RAMP, dev->ffbit) &&
PIDFF_FIND_FIELDS(set_ramp, PID_SET_RAMP, 1)) {
- printk(KERN_WARNING "hid-pidff: unknown ramp effect layout\n");
+ hid_warn(pidff->hid, "unknown ramp effect layout\n");
clear_bit(FF_RAMP, dev->ffbit);
}
@@ -1134,8 +1127,7 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
test_bit(FF_FRICTION, dev->ffbit) ||
test_bit(FF_INERTIA, dev->ffbit)) &&
PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1)) {
- printk(KERN_WARNING
- "hid-pidff: unknown condition effect layout\n");
+ hid_warn(pidff->hid, "unknown condition effect layout\n");
clear_bit(FF_SPRING, dev->ffbit);
clear_bit(FF_DAMPER, dev->ffbit);
clear_bit(FF_FRICTION, dev->ffbit);
@@ -1144,8 +1136,7 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
if (test_bit(FF_PERIODIC, dev->ffbit) &&
PIDFF_FIND_FIELDS(set_periodic, PID_SET_PERIODIC, 1)) {
- printk(KERN_WARNING
- "hid-pidff: unknown periodic effect layout\n");
+ hid_warn(pidff->hid, "unknown periodic effect layout\n");
clear_bit(FF_PERIODIC, dev->ffbit);
}
@@ -1184,12 +1175,12 @@ static void pidff_reset(struct pidff_device *pidff)
if (pidff->pool[PID_SIMULTANEOUS_MAX].value) {
while (pidff->pool[PID_SIMULTANEOUS_MAX].value[0] < 2) {
if (i++ > 20) {
- printk(KERN_WARNING "hid-pidff: device reports "
- "%d simultaneous effects\n",
- pidff->pool[PID_SIMULTANEOUS_MAX].value[0]);
+ hid_warn(pidff->hid,
+ "device reports %d simultaneous effects\n",
+ pidff->pool[PID_SIMULTANEOUS_MAX].value[0]);
break;
}
- debug("pid_pool requested again");
+ hid_dbg(pidff->hid, "pid_pool requested again\n");
usbhid_submit_report(hid, pidff->reports[PID_POOL],
USB_DIR_IN);
usbhid_wait_io(hid);
@@ -1215,7 +1206,7 @@ static int pidff_check_autocenter(struct pidff_device *pidff,
error = pidff_request_effect_upload(pidff, 1);
if (error) {
- printk(KERN_ERR "hid-pidff: upload request failed\n");
+ hid_err(pidff->hid, "upload request failed\n");
return error;
}
@@ -1224,8 +1215,8 @@ static int pidff_check_autocenter(struct pidff_device *pidff,
pidff_autocenter(pidff, 0xffff);
set_bit(FF_AUTOCENTER, dev->ffbit);
} else {
- printk(KERN_NOTICE "hid-pidff: "
- "device has unknown autocenter control method\n");
+ hid_notice(pidff->hid,
+ "device has unknown autocenter control method\n");
}
pidff_erase_pid(pidff,
@@ -1248,10 +1239,10 @@ int hid_pidff_init(struct hid_device *hid)
int max_effects;
int error;
- debug("starting pid init");
+ hid_dbg(hid, "starting pid init\n");
if (list_empty(&hid->report_enum[HID_OUTPUT_REPORT].report_list)) {
- debug("not a PID device, no output report");
+ hid_dbg(hid, "not a PID device, no output report\n");
return -ENODEV;
}
@@ -1265,7 +1256,7 @@ int hid_pidff_init(struct hid_device *hid)
pidff_find_reports(hid, HID_FEATURE_REPORT, pidff);
if (!pidff_reports_ok(pidff)) {
- debug("reports not ok, aborting");
+ hid_dbg(hid, "reports not ok, aborting\n");
error = -ENODEV;
goto fail;
}
@@ -1278,8 +1269,8 @@ int hid_pidff_init(struct hid_device *hid)
if (test_bit(FF_GAIN, dev->ffbit)) {
pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], 0xffff);
- usbhid_submit_report(pidff->hid, pidff->reports[PID_DEVICE_GAIN],
- USB_DIR_OUT);
+ usbhid_submit_report(hid, pidff->reports[PID_DEVICE_GAIN],
+ USB_DIR_OUT);
}
error = pidff_check_autocenter(pidff, dev);
@@ -1290,23 +1281,23 @@ int hid_pidff_init(struct hid_device *hid)
pidff->block_load[PID_EFFECT_BLOCK_INDEX].field->logical_maximum -
pidff->block_load[PID_EFFECT_BLOCK_INDEX].field->logical_minimum +
1;
- debug("max effects is %d", max_effects);
+ hid_dbg(hid, "max effects is %d\n", max_effects);
if (max_effects > PID_EFFECTS_MAX)
max_effects = PID_EFFECTS_MAX;
if (pidff->pool[PID_SIMULTANEOUS_MAX].value)
- debug("max simultaneous effects is %d",
- pidff->pool[PID_SIMULTANEOUS_MAX].value[0]);
+ hid_dbg(hid, "max simultaneous effects is %d\n",
+ pidff->pool[PID_SIMULTANEOUS_MAX].value[0]);
if (pidff->pool[PID_RAM_POOL_SIZE].value)
- debug("device memory size is %d bytes",
- pidff->pool[PID_RAM_POOL_SIZE].value[0]);
+ hid_dbg(hid, "device memory size is %d bytes\n",
+ pidff->pool[PID_RAM_POOL_SIZE].value[0]);
if (pidff->pool[PID_DEVICE_MANAGED_POOL].value &&
pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) {
- printk(KERN_NOTICE "hid-pidff: "
- "device does not support device managed pool\n");
+ hid_notice(hid,
+ "device does not support device managed pool\n");
goto fail;
}
@@ -1322,8 +1313,7 @@ int hid_pidff_init(struct hid_device *hid)
ff->set_autocenter = pidff_set_autocenter;
ff->playback = pidff_playback;
- printk(KERN_INFO "Force feedback for USB HID PID devices by "
- "Anssi Hannula <anssi.hannula@gmail.com>\n");
+ hid_info(dev, "Force feedback for USB HID PID devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
return 0;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 2c185477eeb3..9a94b643ccde 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -35,7 +35,6 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
@@ -85,7 +84,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
-
+ { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
{ 0, 0 }
};
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 984feb351a5a..af0a7c1002af 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -585,163 +585,168 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct hiddev_list *list = file->private_data;
struct hiddev *hiddev = list->hiddev;
- struct hid_device *hid = hiddev->hid;
- struct usb_device *dev;
+ struct hid_device *hid;
struct hiddev_collection_info cinfo;
struct hiddev_report_info rinfo;
struct hiddev_field_info finfo;
struct hiddev_devinfo dinfo;
struct hid_report *report;
struct hid_field *field;
- struct usbhid_device *usbhid = hid->driver_data;
void __user *user_arg = (void __user *)arg;
- int i, r;
-
+ int i, r = -EINVAL;
+
/* Called without BKL by compat methods so no BKL taken */
- /* FIXME: Who or what stop this racing with a disconnect ?? */
- if (!hiddev->exist || !hid)
- return -EIO;
+ mutex_lock(&hiddev->existancelock);
+ if (!hiddev->exist) {
+ r = -ENODEV;
+ goto ret_unlock;
+ }
- dev = hid_to_usb_dev(hid);
+ hid = hiddev->hid;
switch (cmd) {
case HIDIOCGVERSION:
- return put_user(HID_VERSION, (int __user *)arg);
+ r = put_user(HID_VERSION, (int __user *)arg) ?
+ -EFAULT : 0;
+ break;
case HIDIOCAPPLICATION:
if (arg < 0 || arg >= hid->maxapplication)
- return -EINVAL;
+ break;
for (i = 0; i < hid->maxcollection; i++)
if (hid->collection[i].type ==
HID_COLLECTION_APPLICATION && arg-- == 0)
break;
- if (i == hid->maxcollection)
- return -EINVAL;
-
- return hid->collection[i].usage;
+ if (i < hid->maxcollection)
+ r = hid->collection[i].usage;
+ break;
case HIDIOCGDEVINFO:
- dinfo.bustype = BUS_USB;
- dinfo.busnum = dev->bus->busnum;
- dinfo.devnum = dev->devnum;
- dinfo.ifnum = usbhid->ifnum;
- dinfo.vendor = le16_to_cpu(dev->descriptor.idVendor);
- dinfo.product = le16_to_cpu(dev->descriptor.idProduct);
- dinfo.version = le16_to_cpu(dev->descriptor.bcdDevice);
- dinfo.num_applications = hid->maxapplication;
- if (copy_to_user(user_arg, &dinfo, sizeof(dinfo)))
- return -EFAULT;
-
- return 0;
+ {
+ struct usb_device *dev = hid_to_usb_dev(hid);
+ struct usbhid_device *usbhid = hid->driver_data;
+
+ dinfo.bustype = BUS_USB;
+ dinfo.busnum = dev->bus->busnum;
+ dinfo.devnum = dev->devnum;
+ dinfo.ifnum = usbhid->ifnum;
+ dinfo.vendor = le16_to_cpu(dev->descriptor.idVendor);
+ dinfo.product = le16_to_cpu(dev->descriptor.idProduct);
+ dinfo.version = le16_to_cpu(dev->descriptor.bcdDevice);
+ dinfo.num_applications = hid->maxapplication;
+
+ r = copy_to_user(user_arg, &dinfo, sizeof(dinfo)) ?
+ -EFAULT : 0;
+ break;
+ }
case HIDIOCGFLAG:
- if (put_user(list->flags, (int __user *)arg))
- return -EFAULT;
-
- return 0;
+ r = put_user(list->flags, (int __user *)arg) ?
+ -EFAULT : 0;
+ break;
case HIDIOCSFLAG:
{
int newflags;
- if (get_user(newflags, (int __user *)arg))
- return -EFAULT;
+
+ if (get_user(newflags, (int __user *)arg)) {
+ r = -EFAULT;
+ break;
+ }
if ((newflags & ~HIDDEV_FLAGS) != 0 ||
((newflags & HIDDEV_FLAG_REPORT) != 0 &&
(newflags & HIDDEV_FLAG_UREF) == 0))
- return -EINVAL;
+ break;
list->flags = newflags;
- return 0;
+ r = 0;
+ break;
}
case HIDIOCGSTRING:
- mutex_lock(&hiddev->existancelock);
- if (hiddev->exist)
- r = hiddev_ioctl_string(hiddev, cmd, user_arg);
- else
- r = -ENODEV;
- mutex_unlock(&hiddev->existancelock);
- return r;
+ r = hiddev_ioctl_string(hiddev, cmd, user_arg);
+ break;
case HIDIOCINITREPORT:
- mutex_lock(&hiddev->existancelock);
- if (!hiddev->exist) {
- mutex_unlock(&hiddev->existancelock);
- return -ENODEV;
- }
usbhid_init_reports(hid);
- mutex_unlock(&hiddev->existancelock);
-
- return 0;
+ r = 0;
+ break;
case HIDIOCGREPORT:
- if (copy_from_user(&rinfo, user_arg, sizeof(rinfo)))
- return -EFAULT;
+ if (copy_from_user(&rinfo, user_arg, sizeof(rinfo))) {
+ r = -EFAULT;
+ break;
+ }
if (rinfo.report_type == HID_REPORT_TYPE_OUTPUT)
- return -EINVAL;
+ break;
- if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
- return -EINVAL;
+ report = hiddev_lookup_report(hid, &rinfo);
+ if (report == NULL)
+ break;
- mutex_lock(&hiddev->existancelock);
- if (hiddev->exist) {
- usbhid_submit_report(hid, report, USB_DIR_IN);
- usbhid_wait_io(hid);
- }
- mutex_unlock(&hiddev->existancelock);
+ usbhid_submit_report(hid, report, USB_DIR_IN);
+ usbhid_wait_io(hid);
- return 0;
+ r = 0;
+ break;
case HIDIOCSREPORT:
- if (copy_from_user(&rinfo, user_arg, sizeof(rinfo)))
- return -EFAULT;
+ if (copy_from_user(&rinfo, user_arg, sizeof(rinfo))) {
+ r = -EFAULT;
+ break;
+ }
if (rinfo.report_type == HID_REPORT_TYPE_INPUT)
- return -EINVAL;
+ break;
- if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
- return -EINVAL;
+ report = hiddev_lookup_report(hid, &rinfo);
+ if (report == NULL)
+ break;
- mutex_lock(&hiddev->existancelock);
- if (hiddev->exist) {
- usbhid_submit_report(hid, report, USB_DIR_OUT);
- usbhid_wait_io(hid);
- }
- mutex_unlock(&hiddev->existancelock);
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+ usbhid_wait_io(hid);
- return 0;
+ r = 0;
+ break;
case HIDIOCGREPORTINFO:
- if (copy_from_user(&rinfo, user_arg, sizeof(rinfo)))
- return -EFAULT;
+ if (copy_from_user(&rinfo, user_arg, sizeof(rinfo))) {
+ r = -EFAULT;
+ break;
+ }
- if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
- return -EINVAL;
+ report = hiddev_lookup_report(hid, &rinfo);
+ if (report == NULL)
+ break;
rinfo.num_fields = report->maxfield;
- if (copy_to_user(user_arg, &rinfo, sizeof(rinfo)))
- return -EFAULT;
-
- return 0;
+ r = copy_to_user(user_arg, &rinfo, sizeof(rinfo)) ?
+ -EFAULT : 0;
+ break;
case HIDIOCGFIELDINFO:
- if (copy_from_user(&finfo, user_arg, sizeof(finfo)))
- return -EFAULT;
+ if (copy_from_user(&finfo, user_arg, sizeof(finfo))) {
+ r = -EFAULT;
+ break;
+ }
+
rinfo.report_type = finfo.report_type;
rinfo.report_id = finfo.report_id;
- if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
- return -EINVAL;
+
+ report = hiddev_lookup_report(hid, &rinfo);
+ if (report == NULL)
+ break;
if (finfo.field_index >= report->maxfield)
- return -EINVAL;
+ break;
field = report->field[finfo.field_index];
memset(&finfo, 0, sizeof(finfo));
@@ -760,10 +765,9 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
finfo.unit_exponent = field->unit_exponent;
finfo.unit = field->unit;
- if (copy_to_user(user_arg, &finfo, sizeof(finfo)))
- return -EFAULT;
-
- return 0;
+ r = copy_to_user(user_arg, &finfo, sizeof(finfo)) ?
+ -EFAULT : 0;
+ break;
case HIDIOCGUCODE:
/* fall through */
@@ -772,57 +776,66 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case HIDIOCGUSAGES:
case HIDIOCSUSAGES:
case HIDIOCGCOLLECTIONINDEX:
- mutex_lock(&hiddev->existancelock);
- if (hiddev->exist)
- r = hiddev_ioctl_usage(hiddev, cmd, user_arg);
- else
- r = -ENODEV;
- mutex_unlock(&hiddev->existancelock);
- return r;
+ r = hiddev_ioctl_usage(hiddev, cmd, user_arg);
+ break;
case HIDIOCGCOLLECTIONINFO:
- if (copy_from_user(&cinfo, user_arg, sizeof(cinfo)))
- return -EFAULT;
+ if (copy_from_user(&cinfo, user_arg, sizeof(cinfo))) {
+ r = -EFAULT;
+ break;
+ }
if (cinfo.index >= hid->maxcollection)
- return -EINVAL;
+ break;
cinfo.type = hid->collection[cinfo.index].type;
cinfo.usage = hid->collection[cinfo.index].usage;
cinfo.level = hid->collection[cinfo.index].level;
- if (copy_to_user(user_arg, &cinfo, sizeof(cinfo)))
- return -EFAULT;
- return 0;
+ r = copy_to_user(user_arg, &cinfo, sizeof(cinfo)) ?
+ -EFAULT : 0;
+ break;
default:
-
if (_IOC_TYPE(cmd) != 'H' || _IOC_DIR(cmd) != _IOC_READ)
- return -EINVAL;
+ break;
if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGNAME(0))) {
int len;
- if (!hid->name)
- return 0;
+
+ if (!hid->name) {
+ r = 0;
+ break;
+ }
+
len = strlen(hid->name) + 1;
if (len > _IOC_SIZE(cmd))
len = _IOC_SIZE(cmd);
- return copy_to_user(user_arg, hid->name, len) ?
+ r = copy_to_user(user_arg, hid->name, len) ?
-EFAULT : len;
+ break;
}
if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGPHYS(0))) {
int len;
- if (!hid->phys)
- return 0;
+
+ if (!hid->phys) {
+ r = 0;
+ break;
+ }
+
len = strlen(hid->phys) + 1;
if (len > _IOC_SIZE(cmd))
len = _IOC_SIZE(cmd);
- return copy_to_user(user_arg, hid->phys, len) ?
+ r = copy_to_user(user_arg, hid->phys, len) ?
-EFAULT : len;
+ break;
}
}
- return -EINVAL;
+
+ret_unlock:
+ mutex_unlock(&hiddev->existancelock);
+ return r;
}
#ifdef CONFIG_COMPAT
@@ -892,7 +905,7 @@ int hiddev_connect(struct hid_device *hid, unsigned int force)
hiddev->exist = 1;
retval = usb_register_dev(usbhid->intf, &hiddev_class);
if (retval) {
- err_hid("Not able to get a minor for this device.");
+ hid_err(hid, "Not able to get a minor for this device\n");
hid->hiddev = NULL;
kfree(hiddev);
return -1;
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 89d2e847dcc6..1673cac93d77 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -95,7 +95,6 @@ struct usbhid_device {
unsigned long stop_retry; /* Time to give up, in jiffies */
unsigned int retry_delay; /* Delay length in ms */
struct work_struct reset_work; /* Task context for resets */
- struct work_struct restart_work; /* waking up for output to be done in a task */
wait_queue_head_t wait; /* For sleeping */
int ledcount; /* counting the number of active leds */
};
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index a948605564fb..065817329f03 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -24,6 +24,8 @@
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -104,16 +106,18 @@ static void usb_kbd_irq(struct urb *urb)
if (usb_kbd_keycode[kbd->old[i]])
input_report_key(kbd->dev, usb_kbd_keycode[kbd->old[i]], 0);
else
- dev_info(&urb->dev->dev,
- "Unknown key (scancode %#x) released.\n", kbd->old[i]);
+ hid_info(urb->dev,
+ "Unknown key (scancode %#x) released.\n",
+ kbd->old[i]);
}
if (kbd->new[i] > 3 && memscan(kbd->old + 2, kbd->new[i], 6) == kbd->old + 8) {
if (usb_kbd_keycode[kbd->new[i]])
input_report_key(kbd->dev, usb_kbd_keycode[kbd->new[i]], 1);
else
- dev_info(&urb->dev->dev,
- "Unknown key (scancode %#x) released.\n", kbd->new[i]);
+ hid_info(urb->dev,
+ "Unknown key (scancode %#x) released.\n",
+ kbd->new[i]);
}
}
@@ -124,9 +128,9 @@ static void usb_kbd_irq(struct urb *urb)
resubmit:
i = usb_submit_urb (urb, GFP_ATOMIC);
if (i)
- err_hid ("can't resubmit intr, %s-%s/input0, status %d",
- kbd->usbdev->bus->bus_name,
- kbd->usbdev->devpath, i);
+ hid_err(urb->dev, "can't resubmit intr, %s-%s/input0, status %d",
+ kbd->usbdev->bus->bus_name,
+ kbd->usbdev->devpath, i);
}
static int usb_kbd_event(struct input_dev *dev, unsigned int type,
@@ -150,7 +154,7 @@ static int usb_kbd_event(struct input_dev *dev, unsigned int type,
*(kbd->leds) = kbd->newleds;
kbd->led->dev = kbd->usbdev;
if (usb_submit_urb(kbd->led, GFP_ATOMIC))
- err_hid("usb_submit_urb(leds) failed");
+ pr_err("usb_submit_urb(leds) failed\n");
return 0;
}
@@ -160,7 +164,7 @@ static void usb_kbd_led(struct urb *urb)
struct usb_kbd *kbd = urb->context;
if (urb->status)
- dev_warn(&urb->dev->dev, "led urb status %d received\n",
+ hid_warn(urb->dev, "led urb status %d received\n",
urb->status);
if (*(kbd->leds) == kbd->newleds)
@@ -169,7 +173,7 @@ static void usb_kbd_led(struct urb *urb)
*(kbd->leds) = kbd->newleds;
kbd->led->dev = kbd->usbdev;
if (usb_submit_urb(kbd->led, GFP_ATOMIC))
- err_hid("usb_submit_urb(leds) failed");
+ hid_err(urb->dev, "usb_submit_urb(leds) failed\n");
}
static int usb_kbd_open(struct input_dev *dev)
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index a56f6adf3b76..297bc9a7d6e6 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -238,13 +238,13 @@ config SENSORS_K8TEMP
will be called k8temp.
config SENSORS_K10TEMP
- tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor"
+ tristate "AMD Family 10h/11h/12h/14h temperature sensor"
depends on X86 && PCI
help
If you say yes here you get support for the temperature
sensor(s) inside your CPU. Supported are later revisions of
- the AMD Family 10h and all revisions of the AMD Family 11h
- microarchitectures.
+ the AMD Family 10h and all revisions of the AMD Family 11h,
+ 12h (Llano), and 14h (Brazos) microarchitectures.
This driver can also be built as a module. If so, the module
will be called k10temp.
@@ -274,6 +274,16 @@ config SENSORS_ATXP1
This driver can also be built as a module. If so, the module
will be called atxp1.
+config SENSORS_DS620
+ tristate "Dallas Semiconductor DS620"
+ depends on I2C
+ help
+ If you say yes here you get support for Dallas Semiconductor
+ DS620 sensor chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called ds620.
+
config SENSORS_DS1621
tristate "Dallas Semiconductor DS1621 and DS1625"
depends on I2C
@@ -445,13 +455,14 @@ config SENSORS_JZ4740
called jz4740-hwmon.
config SENSORS_JC42
- tristate "JEDEC JC42.4 compliant temperature sensors"
+ tristate "JEDEC JC42.4 compliant memory module temperature sensors"
depends on I2C
help
- If you say yes here you get support for Jedec JC42.4 compliant
- temperature sensors. Support will include, but not be limited to,
- ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
- MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3.
+ If you say yes here, you get support for JEDEC JC42.4 compliant
+ temperature sensors, which are used on many DDR3 memory modules for
+ mobile devices and servers. Support will include, but not be limited
+ to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
+ MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3.
This driver can also be built as a module. If so, the module
will be called jc42.
@@ -564,7 +575,7 @@ config SENSORS_LM85
help
If you say yes here you get support for National Semiconductor LM85
sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100,
- EMC6D101 and EMC6D102.
+ EMC6D101, EMC6D102, and EMC6D103.
This driver can also be built as a module. If so, the module
will be called lm85.
@@ -608,8 +619,8 @@ config SENSORS_LM93
depends on I2C
select HWMON_VID
help
- If you say yes here you get support for National Semiconductor LM93
- sensor chips.
+ If you say yes here you get support for National Semiconductor LM93,
+ LM94, and compatible sensor chips.
This driver can also be built as a module. If so, the module
will be called lm93.
@@ -734,6 +745,16 @@ config SENSORS_SHT15
This driver can also be built as a module. If so, the module
will be called sht15.
+config SENSORS_SHT21
+ tristate "Sensiron humidity and temperature sensors. SHT21 and compat."
+ depends on I2C
+ help
+ If you say yes here you get support for the Sensiron SHT21, SHT25
+ humidity and temperature sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called sht21.
+
config SENSORS_S3C
tristate "Samsung built-in ADC"
depends on S3C_ADC
@@ -789,10 +810,10 @@ config SENSORS_DME1737
will be called dme1737.
config SENSORS_EMC1403
- tristate "SMSC EMC1403 thermal sensor"
+ tristate "SMSC EMC1403/23 thermal sensor"
depends on I2C
help
- If you say yes here you get support for the SMSC EMC1403
+ If you say yes here you get support for the SMSC EMC1403/23
temperature monitoring chip.
Threshold values can be configured using sysfs.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 2479b3da272c..dde02d99c238 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
obj-$(CONFIG_SENSORS_PKGTEMP) += pkgtemp.o
obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
+obj-$(CONFIG_SENSORS_DS620) += ds620.o
obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
obj-$(CONFIG_SENSORS_EMC2103) += emc2103.o
@@ -90,6 +91,7 @@ obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
obj-$(CONFIG_SENSORS_S3C) += s3c-hwmon.o
obj-$(CONFIG_SENSORS_SHT15) += sht15.o
+obj-$(CONFIG_SENSORS_SHT21) += sht21.o
obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
obj-$(CONFIG_SENSORS_SMM665) += smm665.o
obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index 03694cc17a32..8f07a9dda152 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -20,6 +20,9 @@
the custom Abit uGuru chip found on Abit uGuru motherboards. Note: because
of lack of specs the CPU/RAM voltage & frequency control is not supported!
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
@@ -220,6 +223,10 @@ struct abituguru_data {
u8 pwm_settings[ABIT_UGURU_MAX_PWMS][5];
};
+static const char *never_happen = "This should never happen.";
+static const char *report_this =
+ "Please report this to the abituguru maintainer (see MAINTAINERS)";
+
/* wait till the uguru is in the specified state */
static int abituguru_wait(struct abituguru_data *data, u8 state)
{
@@ -438,8 +445,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
/* Test val is sane / usable for sensor type detection. */
if ((val < 10u) || (val > 250u)) {
- printk(KERN_WARNING ABIT_UGURU_NAME
- ": bank1-sensor: %d reading (%d) too close to limits, "
+ pr_warn("bank1-sensor: %d reading (%d) too close to limits, "
"unable to determine sensor type, skipping sensor\n",
(int)sensor_addr, (int)val);
/* assume no sensor is there for sensors for which we can't
@@ -535,10 +541,8 @@ abituguru_detect_bank1_sensor_type_exit:
3) == 3)
break;
if (i == 3) {
- printk(KERN_ERR ABIT_UGURU_NAME
- ": Fatal error could not restore original settings. "
- "This should never happen please report this to the "
- "abituguru maintainer (see MAINTAINERS)\n");
+ pr_err("Fatal error could not restore original settings. %s %s\n",
+ never_happen, report_this);
return -ENODEV;
}
return ret;
@@ -1268,14 +1272,12 @@ static int __devinit abituguru_probe(struct platform_device *pdev)
}
/* Fail safe check, this should never happen! */
if (sysfs_names_free < 0) {
- printk(KERN_ERR ABIT_UGURU_NAME ": Fatal error ran out of "
- "space for sysfs attr names. This should never "
- "happen please report to the abituguru maintainer "
- "(see MAINTAINERS)\n");
+ pr_err("Fatal error ran out of space for sysfs attr names. %s %s",
+ never_happen, report_this);
res = -ENAMETOOLONG;
goto abituguru_probe_error;
}
- printk(KERN_INFO ABIT_UGURU_NAME ": found Abit uGuru\n");
+ pr_info("found Abit uGuru\n");
/* Register sysfs hooks */
for (i = 0; i < sysfs_attr_i; i++)
@@ -1432,8 +1434,7 @@ static int __init abituguru_detect(void)
"0x%02X\n", (unsigned int)data_val, (unsigned int)cmd_val);
if (force) {
- printk(KERN_INFO ABIT_UGURU_NAME ": Assuming Abit uGuru is "
- "present because of \"force\" parameter\n");
+ pr_info("Assuming Abit uGuru is present because of \"force\" parameter\n");
return ABIT_UGURU_BASE;
}
@@ -1467,8 +1468,7 @@ static int __init abituguru_init(void)
abituguru_pdev = platform_device_alloc(ABIT_UGURU_NAME, address);
if (!abituguru_pdev) {
- printk(KERN_ERR ABIT_UGURU_NAME
- ": Device allocation failed\n");
+ pr_err("Device allocation failed\n");
err = -ENOMEM;
goto exit_driver_unregister;
}
@@ -1479,15 +1479,13 @@ static int __init abituguru_init(void)
err = platform_device_add_resources(abituguru_pdev, &res, 1);
if (err) {
- printk(KERN_ERR ABIT_UGURU_NAME
- ": Device resource addition failed (%d)\n", err);
+ pr_err("Device resource addition failed (%d)\n", err);
goto exit_device_put;
}
err = platform_device_add(abituguru_pdev);
if (err) {
- printk(KERN_ERR ABIT_UGURU_NAME
- ": Device addition failed (%d)\n", err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_put;
}
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index 3cf28af614b5..48d21e22e930 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -23,6 +23,9 @@
chip found on newer Abit uGuru motherboards. Note: because of lack of specs
only reading the sensors and their settings is supported.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -608,6 +611,9 @@ static int verbose = 1;
module_param(verbose, bool, 0644);
MODULE_PARM_DESC(verbose, "Enable/disable verbose error reporting");
+static const char *never_happen = "This should never happen.";
+static const char *report_this =
+ "Please report this to the abituguru3 maintainer (see MAINTAINERS)";
/* wait while the uguru is busy (usually after a write) */
static int abituguru3_wait_while_busy(struct abituguru3_data *data)
@@ -940,15 +946,13 @@ static int __devinit abituguru3_probe(struct platform_device *pdev)
if (abituguru3_motherboards[i].id == id)
break;
if (!abituguru3_motherboards[i].id) {
- printk(KERN_ERR ABIT_UGURU3_NAME ": error unknown motherboard "
- "ID: %04X. Please report this to the abituguru3 "
- "maintainer (see MAINTAINERS)\n", (unsigned int)id);
+ pr_err("error unknown motherboard ID: %04X. %s\n",
+ (unsigned int)id, report_this);
goto abituguru3_probe_error;
}
data->sensors = abituguru3_motherboards[i].sensors;
- printk(KERN_INFO ABIT_UGURU3_NAME ": found Abit uGuru3, motherboard "
- "ID: %04X\n", (unsigned int)id);
+ pr_info("found Abit uGuru3, motherboard ID: %04X\n", (unsigned int)id);
/* Fill the sysfs attr array */
sysfs_attr_i = 0;
@@ -957,11 +961,8 @@ static int __devinit abituguru3_probe(struct platform_device *pdev)
for (i = 0; data->sensors[i].name; i++) {
/* Fail safe check, this should never happen! */
if (i >= ABIT_UGURU3_MAX_NO_SENSORS) {
- printk(KERN_ERR ABIT_UGURU3_NAME
- ": Fatal error motherboard has more sensors "
- "then ABIT_UGURU3_MAX_NO_SENSORS. This should "
- "never happen please report to the abituguru3 "
- "maintainer (see MAINTAINERS)\n");
+ pr_err("Fatal error motherboard has more sensors then ABIT_UGURU3_MAX_NO_SENSORS. %s %s\n",
+ never_happen, report_this);
res = -ENAMETOOLONG;
goto abituguru3_probe_error;
}
@@ -983,10 +984,8 @@ static int __devinit abituguru3_probe(struct platform_device *pdev)
}
/* Fail safe check, this should never happen! */
if (sysfs_names_free < 0) {
- printk(KERN_ERR ABIT_UGURU3_NAME
- ": Fatal error ran out of space for sysfs attr names. "
- "This should never happen please report to the "
- "abituguru3 maintainer (see MAINTAINERS)\n");
+ pr_err("Fatal error ran out of space for sysfs attr names. %s %s\n",
+ never_happen, report_this);
res = -ENAMETOOLONG;
goto abituguru3_probe_error;
}
@@ -1189,8 +1188,7 @@ static int __init abituguru3_detect(void)
"0x%02X\n", (unsigned int)data_val, (unsigned int)cmd_val);
if (force) {
- printk(KERN_INFO ABIT_UGURU3_NAME ": Assuming Abit uGuru3 is "
- "present because of \"force\" parameter\n");
+ pr_info("Assuming Abit uGuru3 is present because of \"force\" parameter\n");
return 0;
}
@@ -1219,10 +1217,8 @@ static int __init abituguru3_init(void)
return err;
#ifdef CONFIG_DMI
- printk(KERN_WARNING ABIT_UGURU3_NAME ": this motherboard was "
- "not detected using DMI. Please send the output of "
- "\"dmidecode\" to the abituguru3 maintainer "
- "(see MAINTAINERS)\n");
+ pr_warn("this motherboard was not detected using DMI. "
+ "Please send the output of \"dmidecode\" to the abituguru3 maintainer (see MAINTAINERS)\n");
#endif
}
@@ -1233,8 +1229,7 @@ static int __init abituguru3_init(void)
abituguru3_pdev = platform_device_alloc(ABIT_UGURU3_NAME,
ABIT_UGURU3_BASE);
if (!abituguru3_pdev) {
- printk(KERN_ERR ABIT_UGURU3_NAME
- ": Device allocation failed\n");
+ pr_err("Device allocation failed\n");
err = -ENOMEM;
goto exit_driver_unregister;
}
@@ -1245,15 +1240,13 @@ static int __init abituguru3_init(void)
err = platform_device_add_resources(abituguru3_pdev, &res, 1);
if (err) {
- printk(KERN_ERR ABIT_UGURU3_NAME
- ": Device resource addition failed (%d)\n", err);
+ pr_err("Device resource addition failed (%d)\n", err);
goto exit_device_put;
}
err = platform_device_add(abituguru3_pdev);
if (err) {
- printk(KERN_ERR ABIT_UGURU3_NAME
- ": Device addition failed (%d)\n", err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_put;
}
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 86d822aa9bbf..d46c0c758ddf 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = {
{ "ad7414", 0 },
{}
};
+MODULE_DEVICE_TABLE(i2c, ad7414_id);
static struct i2c_driver ad7414_driver = {
.driver = {
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 0727ad250793..9e234b981b83 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -20,7 +20,7 @@
* Alarms 16-bit map of active alarms
* Analog Out 0..1250 mV output
*
- * Chassis Intrusion: clear CI latch with 'echo 1 > chassis_clear'
+ * Chassis Intrusion: clear CI latch with 'echo 0 > intrusion0_alarm'
*
* Test hardware: Intel SE440BX-2 desktop motherboard --Grant
*
@@ -476,13 +476,16 @@ static ssize_t set_aout(struct device *dev,
static DEVICE_ATTR(aout_output, S_IRUGO | S_IWUSR, show_aout, set_aout);
/* chassis_clear */
-static ssize_t chassis_clear(struct device *dev,
+static ssize_t chassis_clear_legacy(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
unsigned long val = simple_strtol(buf, NULL, 10);
+ dev_warn(dev, "Attribute chassis_clear is deprecated, "
+ "use intrusion0_alarm instead\n");
+
if (val == 1) {
i2c_smbus_write_byte_data(client,
ADM9240_REG_CHASSIS_CLEAR, 0x80);
@@ -490,7 +493,29 @@ static ssize_t chassis_clear(struct device *dev,
}
return count;
}
-static DEVICE_ATTR(chassis_clear, S_IWUSR, NULL, chassis_clear);
+static DEVICE_ATTR(chassis_clear, S_IWUSR, NULL, chassis_clear_legacy);
+
+static ssize_t chassis_clear(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adm9240_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) || val != 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ i2c_smbus_write_byte_data(client, ADM9240_REG_CHASSIS_CLEAR, 0x80);
+ data->valid = 0; /* Force cache refresh */
+ mutex_unlock(&data->update_lock);
+ dev_dbg(&client->dev, "chassis intrusion latch cleared\n");
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR, show_alarm,
+ chassis_clear, 12);
static struct attribute *adm9240_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
@@ -532,6 +557,7 @@ static struct attribute *adm9240_attributes[] = {
&dev_attr_alarms.attr,
&dev_attr_aout_output.attr,
&dev_attr_chassis_clear.attr,
+ &sensor_dev_attr_intrusion0_alarm.dev_attr.attr,
&dev_attr_cpu0_vid.attr,
NULL
};
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index aac85f3aed50..c42c5a69a664 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -4,7 +4,7 @@
This driver is based on the lm75 and other lm_sensors/hwmon drivers
- Written by Steve Hardy <steve@linuxrealtime.co.uk>
+ Written by Steve Hardy <shardy@redhat.com>
Datasheet available at: http://focus.ti.com/lit/ds/symlink/ads7828.pdf
@@ -271,7 +271,7 @@ static void __exit sensors_ads7828_exit(void)
i2c_del_driver(&ads7828_driver);
}
-MODULE_AUTHOR("Steve Hardy <steve@linuxrealtime.co.uk>");
+MODULE_AUTHOR("Steve Hardy <shardy@redhat.com>");
MODULE_DESCRIPTION("ADS7828 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index f13c843a2964..5cc3e3784b42 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = {
{ "adt7411", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, adt7411_id);
static struct i2c_driver adt7411_driver = {
.driver = {
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 87d92a56a939..c6d1ce059aea 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -19,6 +19,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
@@ -274,7 +276,7 @@ static int adt7470_read_temperatures(struct i2c_client *client,
i2c_smbus_write_byte_data(client, ADT7470_REG_PWM_CFG(2), pwm_cfg[1]);
if (res) {
- printk(KERN_ERR "ha ha, interrupted");
+ pr_err("ha ha, interrupted\n");
return -EAGAIN;
}
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index b6598aa557a0..4c0743660e9c 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -4,6 +4,7 @@
* computers.
*
* Copyright (C) 2007 Nicolas Boichat <nicolas@boichat.ch>
+ * Copyright (C) 2010 Henrik Rydberg <rydberg@euromail.se>
*
* Based on hdaps.c driver:
* Copyright (C) 2005 Robert Love <rml@novell.com>
@@ -26,10 +27,13 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/input-polldev.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/dmi.h>
@@ -49,6 +53,7 @@
#define APPLESMC_MAX_DATA_LENGTH 32
+/* wait up to 32 ms for a status change. */
#define APPLESMC_MIN_WAIT 0x0040
#define APPLESMC_MAX_WAIT 0x8000
@@ -73,104 +78,15 @@
#define FANS_COUNT "FNum" /* r-o ui8 */
#define FANS_MANUAL "FS! " /* r-w ui16 */
-#define FAN_ACTUAL_SPEED "F0Ac" /* r-o fpe2 (2 bytes) */
-#define FAN_MIN_SPEED "F0Mn" /* r-o fpe2 (2 bytes) */
-#define FAN_MAX_SPEED "F0Mx" /* r-o fpe2 (2 bytes) */
-#define FAN_SAFE_SPEED "F0Sf" /* r-o fpe2 (2 bytes) */
-#define FAN_TARGET_SPEED "F0Tg" /* r-w fpe2 (2 bytes) */
-#define FAN_POSITION "F0ID" /* r-o char[16] */
-
-/*
- * Temperature sensors keys (sp78 - 2 bytes).
- */
-static const char *temperature_sensors_sets[][41] = {
-/* Set 0: Macbook Pro */
- { "TA0P", "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "Th0H",
- "Th1H", "Tm0P", "Ts0P", "Ts1P", NULL },
-/* Set 1: Macbook2 set */
- { "TB0T", "TC0D", "TC0P", "TM0P", "TN0P", "TN1P", "TTF0", "Th0H",
- "Th0S", "Th1H", NULL },
-/* Set 2: Macbook set */
- { "TB0T", "TC0D", "TC0P", "TM0P", "TN0P", "TN1P", "Th0H", "Th0S",
- "Th1H", "Ts0P", NULL },
-/* Set 3: Macmini set */
- { "TC0D", "TC0P", NULL },
-/* Set 4: Mac Pro (2 x Quad-Core) */
- { "TA0P", "TCAG", "TCAH", "TCBG", "TCBH", "TC0C", "TC0D", "TC0P",
- "TC1C", "TC1D", "TC2C", "TC2D", "TC3C", "TC3D", "THTG", "TH0P",
- "TH1P", "TH2P", "TH3P", "TMAP", "TMAS", "TMBS", "TM0P", "TM0S",
- "TM1P", "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P",
- "TM9S", "TN0H", "TS0C", NULL },
-/* Set 5: iMac */
- { "TC0D", "TA0P", "TG0P", "TG0D", "TG0H", "TH0P", "Tm0P", "TO0P",
- "Tp0C", NULL },
-/* Set 6: Macbook3 set */
- { "TB0T", "TC0D", "TC0P", "TM0P", "TN0P", "TTF0", "TW0P", "Th0H",
- "Th0S", "Th1H", NULL },
-/* Set 7: Macbook Air */
- { "TB0T", "TB1S", "TB1T", "TB2S", "TB2T", "TC0D", "TC0P", "TCFP",
- "TTF0", "TW0P", "Th0H", "Tp0P", "TpFP", "Ts0P", "Ts0S", NULL },
-/* Set 8: Macbook Pro 4,1 (Penryn) */
- { "TB0T", "TC0D", "TC0P", "TG0D", "TG0H", "TTF0", "TW0P", "Th0H",
- "Th1H", "Th2H", "Tm0P", "Ts0P", NULL },
-/* Set 9: Macbook Pro 3,1 (Santa Rosa) */
- { "TALP", "TB0T", "TC0D", "TC0P", "TG0D", "TG0H", "TTF0", "TW0P",
- "Th0H", "Th1H", "Th2H", "Tm0P", "Ts0P", NULL },
-/* Set 10: iMac 5,1 */
- { "TA0P", "TC0D", "TC0P", "TG0D", "TH0P", "TO0P", "Tm0P", NULL },
-/* Set 11: Macbook 5,1 */
- { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0P", "TN0D", "TN0P",
- "TTF0", "Th0H", "Th1H", "ThFH", "Ts0P", "Ts0S", NULL },
-/* Set 12: Macbook Pro 5,1 */
- { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TG0D",
- "TG0F", "TG0H", "TG0P", "TG0T", "TG1H", "TN0D", "TN0P", "TTF0",
- "Th2H", "Tm0P", "Ts0P", "Ts0S", NULL },
-/* Set 13: iMac 8,1 */
- { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TG0P", "TH0P",
- "TL0P", "TO0P", "TW0P", "Tm0P", "Tp0P", NULL },
-/* Set 14: iMac 6,1 */
- { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TG0P", "TH0P",
- "TO0P", "Tp0P", NULL },
-/* Set 15: MacBook Air 2,1 */
- { "TB0T", "TB1S", "TB1T", "TB2S", "TB2T", "TC0D", "TN0D", "TTF0",
- "TV0P", "TVFP", "TW0P", "Th0P", "Tp0P", "Tp1P", "TpFP", "Ts0P",
- "Ts0S", NULL },
-/* Set 16: Mac Pro 3,1 (2 x Quad-Core) */
- { "TA0P", "TCAG", "TCAH", "TCBG", "TCBH", "TC0C", "TC0D", "TC0P",
- "TC1C", "TC1D", "TC2C", "TC2D", "TC3C", "TC3D", "TH0P", "TH1P",
- "TH2P", "TH3P", "TMAP", "TMAS", "TMBS", "TM0P", "TM0S", "TM1P",
- "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S",
- "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S",
- NULL },
-/* Set 17: iMac 9,1 */
- { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TH0P", "TL0P",
- "TN0D", "TN0H", "TN0P", "TO0P", "Tm0P", "Tp0P", NULL },
-/* Set 18: MacBook Pro 2,2 */
- { "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0",
- "Th0H", "Th1H", "Tm0P", "Ts0P", NULL },
-/* Set 19: Macbook Pro 5,3 */
- { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TG0D",
- "TG0F", "TG0H", "TG0P", "TG0T", "TN0D", "TN0P", "TTF0", "Th2H",
- "Tm0P", "Ts0P", "Ts0S", NULL },
-/* Set 20: MacBook Pro 5,4 */
- { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TN0D",
- "TN0P", "TTF0", "Th2H", "Ts0P", "Ts0S", NULL },
-/* Set 21: MacBook Pro 6,2 */
- { "TB0T", "TB1T", "TB2T", "TC0C", "TC0D", "TC0P", "TC1C", "TG0D",
- "TG0P", "TG0T", "TMCD", "TP0P", "TPCD", "Th1H", "Th2H", "Tm0P",
- "Ts0P", "Ts0S", NULL },
-/* Set 22: MacBook Pro 7,1 */
- { "TB0T", "TB1T", "TB2T", "TC0D", "TC0P", "TN0D", "TN0P", "TN0S",
- "TN1D", "TN1F", "TN1G", "TN1S", "Th1H", "Ts0P", "Ts0S", NULL },
-};
+#define FAN_ID_FMT "F%dID" /* r-o char[16] */
/* List of keys used to read/write fan speeds */
-static const char* fan_speed_keys[] = {
- FAN_ACTUAL_SPEED,
- FAN_MIN_SPEED,
- FAN_MAX_SPEED,
- FAN_SAFE_SPEED,
- FAN_TARGET_SPEED
+static const char *const fan_speed_fmt[] = {
+ "F%dAc", /* actual speed */
+ "F%dMn", /* minimum speed (rw) */
+ "F%dMx", /* maximum speed */
+ "F%dSf", /* safe speed - not all models */
+ "F%dTg", /* target speed (manual: rw) */
};
#define INIT_TIMEOUT_MSECS 5000 /* wait up to 5s for device init ... */
@@ -184,14 +100,48 @@ static const char* fan_speed_keys[] = {
#define SENSOR_Y 1
#define SENSOR_Z 2
-/* Structure to be passed to DMI_MATCH function */
-struct dmi_match_data {
-/* Indicates whether this computer has an accelerometer. */
- int accelerometer;
-/* Indicates whether this computer has light sensors and keyboard backlight. */
- int light;
-/* Indicates which temperature sensors set to use. */
- int temperature_set;
+#define to_index(attr) (to_sensor_dev_attr(attr)->index & 0xffff)
+#define to_option(attr) (to_sensor_dev_attr(attr)->index >> 16)
+
+/* Dynamic device node attributes */
+struct applesmc_dev_attr {
+ struct sensor_device_attribute sda; /* hwmon attributes */
+ char name[32]; /* room for node file name */
+};
+
+/* Dynamic device node group */
+struct applesmc_node_group {
+ char *format; /* format string */
+ void *show; /* show function */
+ void *store; /* store function */
+ int option; /* function argument */
+ struct applesmc_dev_attr *nodes; /* dynamic node array */
+};
+
+/* AppleSMC entry - cached register information */
+struct applesmc_entry {
+ char key[5]; /* four-letter key code */
+ u8 valid; /* set when entry is successfully read once */
+ u8 len; /* bounded by APPLESMC_MAX_DATA_LENGTH */
+ char type[5]; /* four-letter type code */
+ u8 flags; /* 0x10: func; 0x40: write; 0x80: read */
+};
+
+/* Register lookup and registers common to all SMCs */
+static struct applesmc_registers {
+ struct mutex mutex; /* register read/write mutex */
+ unsigned int key_count; /* number of SMC registers */
+ unsigned int fan_count; /* number of fans */
+ unsigned int temp_count; /* number of temperature registers */
+ unsigned int temp_begin; /* temperature lower index bound */
+ unsigned int temp_end; /* temperature upper index bound */
+ int num_light_sensors; /* number of light sensors */
+ bool has_accelerometer; /* has motion sensor */
+ bool has_key_backlight; /* has keyboard backlight */
+ bool init_complete; /* true when fully initialized */
+ struct applesmc_entry *cache; /* cached key entries */
+} smcreg = {
+ .mutex = __MUTEX_INITIALIZER(smcreg.mutex),
};
static const int debug;
@@ -203,20 +153,6 @@ static u8 backlight_state[2];
static struct device *hwmon_dev;
static struct input_polled_dev *applesmc_idev;
-/* Indicates whether this computer has an accelerometer. */
-static unsigned int applesmc_accelerometer;
-
-/* Indicates whether this computer has light sensors and keyboard backlight. */
-static unsigned int applesmc_light;
-
-/* The number of fans handled by the driver */
-static unsigned int fans_handled;
-
-/* Indicates which temperature sensors set to use. */
-static unsigned int applesmc_temperature_set;
-
-static DEFINE_MUTEX(applesmc_lock);
-
/*
* Last index written to key_at_index sysfs file, and value to use for all other
* key_at_index_* sysfs files.
@@ -238,18 +174,10 @@ static int __wait_status(u8 val)
for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
udelay(us);
- if ((inb(APPLESMC_CMD_PORT) & APPLESMC_STATUS_MASK) == val) {
- if (debug)
- printk(KERN_DEBUG
- "Waited %d us for status %x\n",
- 2 * us - APPLESMC_MIN_WAIT, val);
+ if ((inb(APPLESMC_CMD_PORT) & APPLESMC_STATUS_MASK) == val)
return 0;
- }
}
- printk(KERN_WARNING "applesmc: wait status failed: %x != %x\n",
- val, inb(APPLESMC_CMD_PORT));
-
return -EIO;
}
@@ -267,159 +195,242 @@ static int send_command(u8 cmd)
if ((inb(APPLESMC_CMD_PORT) & APPLESMC_STATUS_MASK) == 0x0c)
return 0;
}
- printk(KERN_WARNING "applesmc: command failed: %x -> %x\n",
- cmd, inb(APPLESMC_CMD_PORT));
return -EIO;
}
-/*
- * applesmc_read_key - reads len bytes from a given key, and put them in buffer.
- * Returns zero on success or a negative error on failure. Callers must
- * hold applesmc_lock.
- */
-static int applesmc_read_key(const char* key, u8* buffer, u8 len)
+static int send_argument(const char *key)
{
int i;
- if (len > APPLESMC_MAX_DATA_LENGTH) {
- printk(KERN_ERR "applesmc_read_key: cannot read more than "
- "%d bytes\n", APPLESMC_MAX_DATA_LENGTH);
- return -EINVAL;
- }
-
- if (send_command(APPLESMC_READ_CMD))
- return -EIO;
-
for (i = 0; i < 4; i++) {
outb(key[i], APPLESMC_DATA_PORT);
if (__wait_status(0x04))
return -EIO;
}
- if (debug)
- printk(KERN_DEBUG "<%s", key);
+ return 0;
+}
+
+static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+{
+ int i;
+
+ if (send_command(cmd) || send_argument(key)) {
+ pr_warn("%s: read arg fail\n", key);
+ return -EIO;
+ }
outb(len, APPLESMC_DATA_PORT);
- if (debug)
- printk(KERN_DEBUG ">%x", len);
for (i = 0; i < len; i++) {
- if (__wait_status(0x05))
+ if (__wait_status(0x05)) {
+ pr_warn("%s: read data fail\n", key);
return -EIO;
+ }
buffer[i] = inb(APPLESMC_DATA_PORT);
- if (debug)
- printk(KERN_DEBUG "<%x", buffer[i]);
}
- if (debug)
- printk(KERN_DEBUG "\n");
return 0;
}
-/*
- * applesmc_write_key - writes len bytes from buffer to a given key.
- * Returns zero on success or a negative error on failure. Callers must
- * hold applesmc_lock.
- */
-static int applesmc_write_key(const char* key, u8* buffer, u8 len)
+static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len)
{
int i;
- if (len > APPLESMC_MAX_DATA_LENGTH) {
- printk(KERN_ERR "applesmc_write_key: cannot write more than "
- "%d bytes\n", APPLESMC_MAX_DATA_LENGTH);
- return -EINVAL;
- }
-
- if (send_command(APPLESMC_WRITE_CMD))
+ if (send_command(cmd) || send_argument(key)) {
+ pr_warn("%s: write arg fail\n", key);
return -EIO;
-
- for (i = 0; i < 4; i++) {
- outb(key[i], APPLESMC_DATA_PORT);
- if (__wait_status(0x04))
- return -EIO;
}
outb(len, APPLESMC_DATA_PORT);
for (i = 0; i < len; i++) {
- if (__wait_status(0x04))
+ if (__wait_status(0x04)) {
+ pr_warn("%s: write data fail\n", key);
return -EIO;
+ }
outb(buffer[i], APPLESMC_DATA_PORT);
}
return 0;
}
+static int read_register_count(unsigned int *count)
+{
+ __be32 be;
+ int ret;
+
+ ret = read_smc(APPLESMC_READ_CMD, KEY_COUNT_KEY, (u8 *)&be, 4);
+ if (ret)
+ return ret;
+
+ *count = be32_to_cpu(be);
+ return 0;
+}
+
/*
- * applesmc_get_key_at_index - get key at index, and put the result in key
- * (char[6]). Returns zero on success or a negative error on failure. Callers
- * must hold applesmc_lock.
+ * Serialized I/O
+ *
+ * Returns zero on success or a negative error on failure.
+ * All functions below are concurrency safe - callers should NOT hold lock.
*/
-static int applesmc_get_key_at_index(int index, char* key)
+
+static int applesmc_read_entry(const struct applesmc_entry *entry,
+ u8 *buf, u8 len)
{
- int i;
- u8 readkey[4];
- readkey[0] = index >> 24;
- readkey[1] = index >> 16;
- readkey[2] = index >> 8;
- readkey[3] = index;
+ int ret;
- if (send_command(APPLESMC_GET_KEY_BY_INDEX_CMD))
- return -EIO;
+ if (entry->len != len)
+ return -EINVAL;
+ mutex_lock(&smcreg.mutex);
+ ret = read_smc(APPLESMC_READ_CMD, entry->key, buf, len);
+ mutex_unlock(&smcreg.mutex);
- for (i = 0; i < 4; i++) {
- outb(readkey[i], APPLESMC_DATA_PORT);
- if (__wait_status(0x04))
- return -EIO;
+ return ret;
+}
+
+static int applesmc_write_entry(const struct applesmc_entry *entry,
+ const u8 *buf, u8 len)
+{
+ int ret;
+
+ if (entry->len != len)
+ return -EINVAL;
+ mutex_lock(&smcreg.mutex);
+ ret = write_smc(APPLESMC_WRITE_CMD, entry->key, buf, len);
+ mutex_unlock(&smcreg.mutex);
+ return ret;
+}
+
+static const struct applesmc_entry *applesmc_get_entry_by_index(int index)
+{
+ struct applesmc_entry *cache = &smcreg.cache[index];
+ u8 key[4], info[6];
+ __be32 be;
+ int ret = 0;
+
+ if (cache->valid)
+ return cache;
+
+ mutex_lock(&smcreg.mutex);
+
+ if (cache->valid)
+ goto out;
+ be = cpu_to_be32(index);
+ ret = read_smc(APPLESMC_GET_KEY_BY_INDEX_CMD, (u8 *)&be, key, 4);
+ if (ret)
+ goto out;
+ ret = read_smc(APPLESMC_GET_KEY_TYPE_CMD, key, info, 6);
+ if (ret)
+ goto out;
+
+ memcpy(cache->key, key, 4);
+ cache->len = info[0];
+ memcpy(cache->type, &info[1], 4);
+ cache->flags = info[5];
+ cache->valid = 1;
+
+out:
+ mutex_unlock(&smcreg.mutex);
+ if (ret)
+ return ERR_PTR(ret);
+ return cache;
+}
+
+static int applesmc_get_lower_bound(unsigned int *lo, const char *key)
+{
+ int begin = 0, end = smcreg.key_count;
+ const struct applesmc_entry *entry;
+
+ while (begin != end) {
+ int middle = begin + (end - begin) / 2;
+ entry = applesmc_get_entry_by_index(middle);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+ if (strcmp(entry->key, key) < 0)
+ begin = middle + 1;
+ else
+ end = middle;
}
- outb(4, APPLESMC_DATA_PORT);
+ *lo = begin;
+ return 0;
+}
- for (i = 0; i < 4; i++) {
- if (__wait_status(0x05))
- return -EIO;
- key[i] = inb(APPLESMC_DATA_PORT);
+static int applesmc_get_upper_bound(unsigned int *hi, const char *key)
+{
+ int begin = 0, end = smcreg.key_count;
+ const struct applesmc_entry *entry;
+
+ while (begin != end) {
+ int middle = begin + (end - begin) / 2;
+ entry = applesmc_get_entry_by_index(middle);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+ if (strcmp(key, entry->key) < 0)
+ end = middle;
+ else
+ begin = middle + 1;
}
- key[4] = 0;
+ *hi = begin;
return 0;
}
-/*
- * applesmc_get_key_type - get key type, and put the result in type (char[6]).
- * Returns zero on success or a negative error on failure. Callers must
- * hold applesmc_lock.
- */
-static int applesmc_get_key_type(char* key, char* type)
+static const struct applesmc_entry *applesmc_get_entry_by_key(const char *key)
{
- int i;
+ int begin, end;
+ int ret;
- if (send_command(APPLESMC_GET_KEY_TYPE_CMD))
- return -EIO;
+ ret = applesmc_get_lower_bound(&begin, key);
+ if (ret)
+ return ERR_PTR(ret);
+ ret = applesmc_get_upper_bound(&end, key);
+ if (ret)
+ return ERR_PTR(ret);
+ if (end - begin != 1)
+ return ERR_PTR(-EINVAL);
- for (i = 0; i < 4; i++) {
- outb(key[i], APPLESMC_DATA_PORT);
- if (__wait_status(0x04))
- return -EIO;
- }
+ return applesmc_get_entry_by_index(begin);
+}
- outb(6, APPLESMC_DATA_PORT);
+static int applesmc_read_key(const char *key, u8 *buffer, u8 len)
+{
+ const struct applesmc_entry *entry;
- for (i = 0; i < 6; i++) {
- if (__wait_status(0x05))
- return -EIO;
- type[i] = inb(APPLESMC_DATA_PORT);
- }
- type[5] = 0;
+ entry = applesmc_get_entry_by_key(key);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+
+ return applesmc_read_entry(entry, buffer, len);
+}
+
+static int applesmc_write_key(const char *key, const u8 *buffer, u8 len)
+{
+ const struct applesmc_entry *entry;
+ entry = applesmc_get_entry_by_key(key);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+
+ return applesmc_write_entry(entry, buffer, len);
+}
+
+static int applesmc_has_key(const char *key, bool *value)
+{
+ const struct applesmc_entry *entry;
+
+ entry = applesmc_get_entry_by_key(key);
+ if (IS_ERR(entry) && PTR_ERR(entry) != -EINVAL)
+ return PTR_ERR(entry);
+
+ *value = !IS_ERR(entry);
return 0;
}
/*
- * applesmc_read_motion_sensor - Read motion sensor (X, Y or Z). Callers must
- * hold applesmc_lock.
+ * applesmc_read_motion_sensor - Read motion sensor (X, Y or Z).
*/
-static int applesmc_read_motion_sensor(int index, s16* value)
+static int applesmc_read_motion_sensor(int index, s16 *value)
{
u8 buffer[2];
int ret;
@@ -444,69 +455,120 @@ static int applesmc_read_motion_sensor(int index, s16* value)
}
/*
- * applesmc_device_init - initialize the accelerometer. Returns zero on success
- * and negative error code on failure. Can sleep.
+ * applesmc_device_init - initialize the accelerometer. Can sleep.
*/
-static int applesmc_device_init(void)
+static void applesmc_device_init(void)
{
- int total, ret = -ENXIO;
+ int total;
u8 buffer[2];
- if (!applesmc_accelerometer)
- return 0;
-
- mutex_lock(&applesmc_lock);
+ if (!smcreg.has_accelerometer)
+ return;
for (total = INIT_TIMEOUT_MSECS; total > 0; total -= INIT_WAIT_MSECS) {
- if (debug)
- printk(KERN_DEBUG "applesmc try %d\n", total);
if (!applesmc_read_key(MOTION_SENSOR_KEY, buffer, 2) &&
- (buffer[0] != 0x00 || buffer[1] != 0x00)) {
- if (total == INIT_TIMEOUT_MSECS) {
- printk(KERN_DEBUG "applesmc: device has"
- " already been initialized"
- " (0x%02x, 0x%02x).\n",
- buffer[0], buffer[1]);
- } else {
- printk(KERN_DEBUG "applesmc: device"
- " successfully initialized"
- " (0x%02x, 0x%02x).\n",
- buffer[0], buffer[1]);
- }
- ret = 0;
- goto out;
- }
+ (buffer[0] != 0x00 || buffer[1] != 0x00))
+ return;
buffer[0] = 0xe0;
buffer[1] = 0x00;
applesmc_write_key(MOTION_SENSOR_KEY, buffer, 2);
msleep(INIT_WAIT_MSECS);
}
- printk(KERN_WARNING "applesmc: failed to init the device\n");
-
-out:
- mutex_unlock(&applesmc_lock);
- return ret;
+ pr_warn("failed to init the device\n");
}
/*
- * applesmc_get_fan_count - get the number of fans. Callers must NOT hold
- * applesmc_lock.
+ * applesmc_init_smcreg_try - Try to initialize register cache. Idempotent.
*/
-static int applesmc_get_fan_count(void)
+static int applesmc_init_smcreg_try(void)
{
+ struct applesmc_registers *s = &smcreg;
+ bool left_light_sensor, right_light_sensor;
+ u8 tmp[1];
int ret;
- u8 buffer[1];
- mutex_lock(&applesmc_lock);
+ if (s->init_complete)
+ return 0;
- ret = applesmc_read_key(FANS_COUNT, buffer, 1);
+ ret = read_register_count(&s->key_count);
+ if (ret)
+ return ret;
+
+ if (!s->cache)
+ s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
+ if (!s->cache)
+ return -ENOMEM;
- mutex_unlock(&applesmc_lock);
+ ret = applesmc_read_key(FANS_COUNT, tmp, 1);
if (ret)
return ret;
- else
- return buffer[0];
+ s->fan_count = tmp[0];
+
+ ret = applesmc_get_lower_bound(&s->temp_begin, "T");
+ if (ret)
+ return ret;
+ ret = applesmc_get_lower_bound(&s->temp_end, "U");
+ if (ret)
+ return ret;
+ s->temp_count = s->temp_end - s->temp_begin;
+
+ ret = applesmc_has_key(LIGHT_SENSOR_LEFT_KEY, &left_light_sensor);
+ if (ret)
+ return ret;
+ ret = applesmc_has_key(LIGHT_SENSOR_RIGHT_KEY, &right_light_sensor);
+ if (ret)
+ return ret;
+ ret = applesmc_has_key(MOTION_SENSOR_KEY, &s->has_accelerometer);
+ if (ret)
+ return ret;
+ ret = applesmc_has_key(BACKLIGHT_KEY, &s->has_key_backlight);
+ if (ret)
+ return ret;
+
+ s->num_light_sensors = left_light_sensor + right_light_sensor;
+ s->init_complete = true;
+
+ pr_info("key=%d fan=%d temp=%d acc=%d lux=%d kbd=%d\n",
+ s->key_count, s->fan_count, s->temp_count,
+ s->has_accelerometer,
+ s->num_light_sensors,
+ s->has_key_backlight);
+
+ return 0;
+}
+
+/*
+ * applesmc_init_smcreg - Initialize register cache.
+ *
+ * Retries until initialization is successful, or the operation times out.
+ *
+ */
+static int applesmc_init_smcreg(void)
+{
+ int ms, ret;
+
+ for (ms = 0; ms < INIT_TIMEOUT_MSECS; ms += INIT_WAIT_MSECS) {
+ ret = applesmc_init_smcreg_try();
+ if (!ret) {
+ if (ms)
+ pr_info("init_smcreg() took %d ms\n", ms);
+ return 0;
+ }
+ msleep(INIT_WAIT_MSECS);
+ }
+
+ kfree(smcreg.cache);
+ smcreg.cache = NULL;
+
+ return ret;
+}
+
+static void applesmc_destroy_smcreg(void)
+{
+ kfree(smcreg.cache);
+ smcreg.cache = NULL;
+ smcreg.init_complete = false;
}
/* Device model stuff */
@@ -514,30 +576,27 @@ static int applesmc_probe(struct platform_device *dev)
{
int ret;
- ret = applesmc_device_init();
+ ret = applesmc_init_smcreg();
if (ret)
return ret;
- printk(KERN_INFO "applesmc: device successfully initialized.\n");
+ applesmc_device_init();
+
return 0;
}
/* Synchronize device with memorized backlight state */
static int applesmc_pm_resume(struct device *dev)
{
- mutex_lock(&applesmc_lock);
- if (applesmc_light)
+ if (smcreg.has_key_backlight)
applesmc_write_key(BACKLIGHT_KEY, backlight_state, 2);
- mutex_unlock(&applesmc_lock);
return 0;
}
/* Reinitialize device on resume from hibernation */
static int applesmc_pm_restore(struct device *dev)
{
- int ret = applesmc_device_init();
- if (ret)
- return ret;
+ applesmc_device_init();
return applesmc_pm_resume(dev);
}
@@ -571,20 +630,15 @@ static void applesmc_idev_poll(struct input_polled_dev *dev)
struct input_dev *idev = dev->input;
s16 x, y;
- mutex_lock(&applesmc_lock);
-
if (applesmc_read_motion_sensor(SENSOR_X, &x))
- goto out;
+ return;
if (applesmc_read_motion_sensor(SENSOR_Y, &y))
- goto out;
+ return;
x = -x;
input_report_abs(idev, ABS_X, x - rest_x);
input_report_abs(idev, ABS_Y, y - rest_y);
input_sync(idev);
-
-out:
- mutex_unlock(&applesmc_lock);
}
/* Sysfs Files */
@@ -601,8 +655,6 @@ static ssize_t applesmc_position_show(struct device *dev,
int ret;
s16 x, y, z;
- mutex_lock(&applesmc_lock);
-
ret = applesmc_read_motion_sensor(SENSOR_X, &x);
if (ret)
goto out;
@@ -614,7 +666,6 @@ static ssize_t applesmc_position_show(struct device *dev,
goto out;
out:
- mutex_unlock(&applesmc_lock);
if (ret)
return ret;
else
@@ -624,20 +675,20 @@ out:
static ssize_t applesmc_light_show(struct device *dev,
struct device_attribute *attr, char *sysfsbuf)
{
+ const struct applesmc_entry *entry;
static int data_length;
int ret;
u8 left = 0, right = 0;
- u8 buffer[10], query[6];
-
- mutex_lock(&applesmc_lock);
+ u8 buffer[10];
if (!data_length) {
- ret = applesmc_get_key_type(LIGHT_SENSOR_LEFT_KEY, query);
- if (ret)
- goto out;
- data_length = clamp_val(query[0], 0, 10);
- printk(KERN_INFO "applesmc: light sensor data length set to "
- "%d\n", data_length);
+ entry = applesmc_get_entry_by_key(LIGHT_SENSOR_LEFT_KEY);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+ if (entry->len > 10)
+ return -ENXIO;
+ data_length = entry->len;
+ pr_info("light sensor data length set to %d\n", data_length);
}
ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length);
@@ -653,7 +704,6 @@ static ssize_t applesmc_light_show(struct device *dev,
right = buffer[2];
out:
- mutex_unlock(&applesmc_lock);
if (ret)
return ret;
else
@@ -664,36 +714,44 @@ out:
static ssize_t applesmc_show_sensor_label(struct device *dev,
struct device_attribute *devattr, char *sysfsbuf)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- const char *key =
- temperature_sensors_sets[applesmc_temperature_set][attr->index];
+ int index = smcreg.temp_begin + to_index(devattr);
+ const struct applesmc_entry *entry;
+
+ entry = applesmc_get_entry_by_index(index);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
- return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", key);
+ return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", entry->key);
}
/* Displays degree Celsius * 1000 */
static ssize_t applesmc_show_temperature(struct device *dev,
struct device_attribute *devattr, char *sysfsbuf)
{
+ int index = smcreg.temp_begin + to_index(devattr);
+ const struct applesmc_entry *entry;
int ret;
u8 buffer[2];
unsigned int temp;
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- const char* key =
- temperature_sensors_sets[applesmc_temperature_set][attr->index];
-
- mutex_lock(&applesmc_lock);
- ret = applesmc_read_key(key, buffer, 2);
- temp = buffer[0]*1000;
- temp += (buffer[1] >> 6) * 250;
-
- mutex_unlock(&applesmc_lock);
+ entry = applesmc_get_entry_by_index(index);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+ if (entry->len > 2)
+ return -EINVAL;
+ ret = applesmc_read_entry(entry, buffer, entry->len);
if (ret)
return ret;
- else
- return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", temp);
+
+ if (entry->len == 2) {
+ temp = buffer[0] * 1000;
+ temp += (buffer[1] >> 6) * 250;
+ } else {
+ temp = buffer[0] * 4000;
+ }
+
+ return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", temp);
}
static ssize_t applesmc_show_fan_speed(struct device *dev,
@@ -703,21 +761,12 @@ static ssize_t applesmc_show_fan_speed(struct device *dev,
unsigned int speed = 0;
char newkey[5];
u8 buffer[2];
- struct sensor_device_attribute_2 *sensor_attr =
- to_sensor_dev_attr_2(attr);
-
- newkey[0] = fan_speed_keys[sensor_attr->nr][0];
- newkey[1] = '0' + sensor_attr->index;
- newkey[2] = fan_speed_keys[sensor_attr->nr][2];
- newkey[3] = fan_speed_keys[sensor_attr->nr][3];
- newkey[4] = 0;
- mutex_lock(&applesmc_lock);
+ sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
ret = applesmc_read_key(newkey, buffer, 2);
speed = ((buffer[0] << 8 | buffer[1]) >> 2);
- mutex_unlock(&applesmc_lock);
if (ret)
return ret;
else
@@ -729,30 +778,19 @@ static ssize_t applesmc_store_fan_speed(struct device *dev,
const char *sysfsbuf, size_t count)
{
int ret;
- u32 speed;
+ unsigned long speed;
char newkey[5];
u8 buffer[2];
- struct sensor_device_attribute_2 *sensor_attr =
- to_sensor_dev_attr_2(attr);
-
- speed = simple_strtoul(sysfsbuf, NULL, 10);
-
- if (speed > 0x4000) /* Bigger than a 14-bit value */
- return -EINVAL;
- newkey[0] = fan_speed_keys[sensor_attr->nr][0];
- newkey[1] = '0' + sensor_attr->index;
- newkey[2] = fan_speed_keys[sensor_attr->nr][2];
- newkey[3] = fan_speed_keys[sensor_attr->nr][3];
- newkey[4] = 0;
+ if (strict_strtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000)
+ return -EINVAL; /* Bigger than a 14-bit value */
- mutex_lock(&applesmc_lock);
+ sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
buffer[0] = (speed >> 6) & 0xff;
buffer[1] = (speed << 2) & 0xff;
ret = applesmc_write_key(newkey, buffer, 2);
- mutex_unlock(&applesmc_lock);
if (ret)
return ret;
else
@@ -760,19 +798,15 @@ static ssize_t applesmc_store_fan_speed(struct device *dev,
}
static ssize_t applesmc_show_fan_manual(struct device *dev,
- struct device_attribute *devattr, char *sysfsbuf)
+ struct device_attribute *attr, char *sysfsbuf)
{
int ret;
u16 manual = 0;
u8 buffer[2];
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-
- mutex_lock(&applesmc_lock);
ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
- manual = ((buffer[0] << 8 | buffer[1]) >> attr->index) & 0x01;
+ manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
- mutex_unlock(&applesmc_lock);
if (ret)
return ret;
else
@@ -780,18 +814,16 @@ static ssize_t applesmc_show_fan_manual(struct device *dev,
}
static ssize_t applesmc_store_fan_manual(struct device *dev,
- struct device_attribute *devattr,
+ struct device_attribute *attr,
const char *sysfsbuf, size_t count)
{
int ret;
u8 buffer[2];
- u32 input;
+ unsigned long input;
u16 val;
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-
- input = simple_strtoul(sysfsbuf, NULL, 10);
- mutex_lock(&applesmc_lock);
+ if (strict_strtoul(sysfsbuf, 10, &input) < 0)
+ return -EINVAL;
ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
val = (buffer[0] << 8 | buffer[1]);
@@ -799,9 +831,9 @@ static ssize_t applesmc_store_fan_manual(struct device *dev,
goto out;
if (input)
- val = val | (0x01 << attr->index);
+ val = val | (0x01 << to_index(attr));
else
- val = val & ~(0x01 << attr->index);
+ val = val & ~(0x01 << to_index(attr));
buffer[0] = (val >> 8) & 0xFF;
buffer[1] = val & 0xFF;
@@ -809,7 +841,6 @@ static ssize_t applesmc_store_fan_manual(struct device *dev,
ret = applesmc_write_key(FANS_MANUAL, buffer, 2);
out:
- mutex_unlock(&applesmc_lock);
if (ret)
return ret;
else
@@ -822,21 +853,12 @@ static ssize_t applesmc_show_fan_position(struct device *dev,
int ret;
char newkey[5];
u8 buffer[17];
- struct sensor_device_attribute_2 *sensor_attr =
- to_sensor_dev_attr_2(attr);
-
- newkey[0] = FAN_POSITION[0];
- newkey[1] = '0' + sensor_attr->index;
- newkey[2] = FAN_POSITION[2];
- newkey[3] = FAN_POSITION[3];
- newkey[4] = 0;
- mutex_lock(&applesmc_lock);
+ sprintf(newkey, FAN_ID_FMT, to_index(attr));
ret = applesmc_read_key(newkey, buffer, 16);
buffer[16] = 0;
- mutex_unlock(&applesmc_lock);
if (ret)
return ret;
else
@@ -852,18 +874,14 @@ static ssize_t applesmc_calibrate_show(struct device *dev,
static ssize_t applesmc_calibrate_store(struct device *dev,
struct device_attribute *attr, const char *sysfsbuf, size_t count)
{
- mutex_lock(&applesmc_lock);
applesmc_calibrate();
- mutex_unlock(&applesmc_lock);
return count;
}
static void applesmc_backlight_set(struct work_struct *work)
{
- mutex_lock(&applesmc_lock);
applesmc_write_key(BACKLIGHT_KEY, backlight_state, 2);
- mutex_unlock(&applesmc_lock);
}
static DECLARE_WORK(backlight_work, &applesmc_backlight_set);
@@ -886,13 +904,10 @@ static ssize_t applesmc_key_count_show(struct device *dev,
u8 buffer[4];
u32 count;
- mutex_lock(&applesmc_lock);
-
ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4);
count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
((u32)buffer[2]<<8) + buffer[3];
- mutex_unlock(&applesmc_lock);
if (ret)
return ret;
else
@@ -902,113 +917,53 @@ static ssize_t applesmc_key_count_show(struct device *dev,
static ssize_t applesmc_key_at_index_read_show(struct device *dev,
struct device_attribute *attr, char *sysfsbuf)
{
- char key[5];
- char info[6];
+ const struct applesmc_entry *entry;
int ret;
- mutex_lock(&applesmc_lock);
-
- ret = applesmc_get_key_at_index(key_at_index, key);
-
- if (ret || !key[0]) {
- mutex_unlock(&applesmc_lock);
-
- return -EINVAL;
- }
-
- ret = applesmc_get_key_type(key, info);
-
- if (ret) {
- mutex_unlock(&applesmc_lock);
-
+ entry = applesmc_get_entry_by_index(key_at_index);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+ ret = applesmc_read_entry(entry, sysfsbuf, entry->len);
+ if (ret)
return ret;
- }
-
- /*
- * info[0] maximum value (APPLESMC_MAX_DATA_LENGTH) is much lower than
- * PAGE_SIZE, so we don't need any checks before writing to sysfsbuf.
- */
- ret = applesmc_read_key(key, sysfsbuf, info[0]);
-
- mutex_unlock(&applesmc_lock);
- if (!ret) {
- return info[0];
- } else {
- return ret;
- }
+ return entry->len;
}
static ssize_t applesmc_key_at_index_data_length_show(struct device *dev,
struct device_attribute *attr, char *sysfsbuf)
{
- char key[5];
- char info[6];
- int ret;
-
- mutex_lock(&applesmc_lock);
-
- ret = applesmc_get_key_at_index(key_at_index, key);
+ const struct applesmc_entry *entry;
- if (ret || !key[0]) {
- mutex_unlock(&applesmc_lock);
+ entry = applesmc_get_entry_by_index(key_at_index);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
- return -EINVAL;
- }
-
- ret = applesmc_get_key_type(key, info);
-
- mutex_unlock(&applesmc_lock);
-
- if (!ret)
- return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", info[0]);
- else
- return ret;
+ return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", entry->len);
}
static ssize_t applesmc_key_at_index_type_show(struct device *dev,
struct device_attribute *attr, char *sysfsbuf)
{
- char key[5];
- char info[6];
- int ret;
-
- mutex_lock(&applesmc_lock);
-
- ret = applesmc_get_key_at_index(key_at_index, key);
-
- if (ret || !key[0]) {
- mutex_unlock(&applesmc_lock);
-
- return -EINVAL;
- }
-
- ret = applesmc_get_key_type(key, info);
+ const struct applesmc_entry *entry;
- mutex_unlock(&applesmc_lock);
+ entry = applesmc_get_entry_by_index(key_at_index);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
- if (!ret)
- return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", info+1);
- else
- return ret;
+ return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", entry->type);
}
static ssize_t applesmc_key_at_index_name_show(struct device *dev,
struct device_attribute *attr, char *sysfsbuf)
{
- char key[5];
- int ret;
+ const struct applesmc_entry *entry;
- mutex_lock(&applesmc_lock);
+ entry = applesmc_get_entry_by_index(key_at_index);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
- ret = applesmc_get_key_at_index(key_at_index, key);
-
- mutex_unlock(&applesmc_lock);
-
- if (!ret && key[0])
- return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", key);
- else
- return -EINVAL;
+ return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", entry->key);
}
static ssize_t applesmc_key_at_index_show(struct device *dev,
@@ -1020,12 +975,13 @@ static ssize_t applesmc_key_at_index_show(struct device *dev,
static ssize_t applesmc_key_at_index_store(struct device *dev,
struct device_attribute *attr, const char *sysfsbuf, size_t count)
{
- mutex_lock(&applesmc_lock);
-
- key_at_index = simple_strtoul(sysfsbuf, NULL, 10);
+ unsigned long newkey;
- mutex_unlock(&applesmc_lock);
+ if (strict_strtoul(sysfsbuf, 10, &newkey) < 0
+ || newkey >= smcreg.key_count)
+ return -EINVAL;
+ key_at_index = newkey;
return count;
}
@@ -1035,387 +991,102 @@ static struct led_classdev applesmc_backlight = {
.brightness_set = applesmc_brightness_set,
};
-static DEVICE_ATTR(name, 0444, applesmc_name_show, NULL);
-
-static DEVICE_ATTR(position, 0444, applesmc_position_show, NULL);
-static DEVICE_ATTR(calibrate, 0644,
- applesmc_calibrate_show, applesmc_calibrate_store);
-
-static struct attribute *accelerometer_attributes[] = {
- &dev_attr_position.attr,
- &dev_attr_calibrate.attr,
- NULL
-};
-
-static const struct attribute_group accelerometer_attributes_group =
- { .attrs = accelerometer_attributes };
-
-static DEVICE_ATTR(light, 0444, applesmc_light_show, NULL);
-
-static DEVICE_ATTR(key_count, 0444, applesmc_key_count_show, NULL);
-static DEVICE_ATTR(key_at_index, 0644,
- applesmc_key_at_index_show, applesmc_key_at_index_store);
-static DEVICE_ATTR(key_at_index_name, 0444,
- applesmc_key_at_index_name_show, NULL);
-static DEVICE_ATTR(key_at_index_type, 0444,
- applesmc_key_at_index_type_show, NULL);
-static DEVICE_ATTR(key_at_index_data_length, 0444,
- applesmc_key_at_index_data_length_show, NULL);
-static DEVICE_ATTR(key_at_index_data, 0444,
- applesmc_key_at_index_read_show, NULL);
-
-static struct attribute *key_enumeration_attributes[] = {
- &dev_attr_key_count.attr,
- &dev_attr_key_at_index.attr,
- &dev_attr_key_at_index_name.attr,
- &dev_attr_key_at_index_type.attr,
- &dev_attr_key_at_index_data_length.attr,
- &dev_attr_key_at_index_data.attr,
- NULL
-};
-
-static const struct attribute_group key_enumeration_group =
- { .attrs = key_enumeration_attributes };
-
-/*
- * Macro defining SENSOR_DEVICE_ATTR for a fan sysfs entries.
- * - show actual speed
- * - show/store minimum speed
- * - show maximum speed
- * - show safe speed
- * - show/store target speed
- * - show/store manual mode
- */
-#define sysfs_fan_speeds_offset(offset) \
-static SENSOR_DEVICE_ATTR_2(fan##offset##_input, S_IRUGO, \
- applesmc_show_fan_speed, NULL, 0, offset-1); \
-\
-static SENSOR_DEVICE_ATTR_2(fan##offset##_min, S_IRUGO | S_IWUSR, \
- applesmc_show_fan_speed, applesmc_store_fan_speed, 1, offset-1); \
-\
-static SENSOR_DEVICE_ATTR_2(fan##offset##_max, S_IRUGO, \
- applesmc_show_fan_speed, NULL, 2, offset-1); \
-\
-static SENSOR_DEVICE_ATTR_2(fan##offset##_safe, S_IRUGO, \
- applesmc_show_fan_speed, NULL, 3, offset-1); \
-\
-static SENSOR_DEVICE_ATTR_2(fan##offset##_output, S_IRUGO | S_IWUSR, \
- applesmc_show_fan_speed, applesmc_store_fan_speed, 4, offset-1); \
-\
-static SENSOR_DEVICE_ATTR(fan##offset##_manual, S_IRUGO | S_IWUSR, \
- applesmc_show_fan_manual, applesmc_store_fan_manual, offset-1); \
-\
-static SENSOR_DEVICE_ATTR(fan##offset##_label, S_IRUGO, \
- applesmc_show_fan_position, NULL, offset-1); \
-\
-static struct attribute *fan##offset##_attributes[] = { \
- &sensor_dev_attr_fan##offset##_input.dev_attr.attr, \
- &sensor_dev_attr_fan##offset##_min.dev_attr.attr, \
- &sensor_dev_attr_fan##offset##_max.dev_attr.attr, \
- &sensor_dev_attr_fan##offset##_safe.dev_attr.attr, \
- &sensor_dev_attr_fan##offset##_output.dev_attr.attr, \
- &sensor_dev_attr_fan##offset##_manual.dev_attr.attr, \
- &sensor_dev_attr_fan##offset##_label.dev_attr.attr, \
- NULL \
+static struct applesmc_node_group info_group[] = {
+ { "name", applesmc_name_show },
+ { "key_count", applesmc_key_count_show },
+ { "key_at_index", applesmc_key_at_index_show, applesmc_key_at_index_store },
+ { "key_at_index_name", applesmc_key_at_index_name_show },
+ { "key_at_index_type", applesmc_key_at_index_type_show },
+ { "key_at_index_data_length", applesmc_key_at_index_data_length_show },
+ { "key_at_index_data", applesmc_key_at_index_read_show },
+ { }
};
-/*
- * Create the needed functions for each fan using the macro defined above
- * (4 fans are supported)
- */
-sysfs_fan_speeds_offset(1);
-sysfs_fan_speeds_offset(2);
-sysfs_fan_speeds_offset(3);
-sysfs_fan_speeds_offset(4);
-
-static const struct attribute_group fan_attribute_groups[] = {
- { .attrs = fan1_attributes },
- { .attrs = fan2_attributes },
- { .attrs = fan3_attributes },
- { .attrs = fan4_attributes },
+static struct applesmc_node_group accelerometer_group[] = {
+ { "position", applesmc_position_show },
+ { "calibrate", applesmc_calibrate_show, applesmc_calibrate_store },
+ { }
};
-/*
- * Temperature sensors sysfs entries.
- */
-static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp7_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp8_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp9_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 8);
-static SENSOR_DEVICE_ATTR(temp10_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp11_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 10);
-static SENSOR_DEVICE_ATTR(temp12_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp13_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 12);
-static SENSOR_DEVICE_ATTR(temp14_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 13);
-static SENSOR_DEVICE_ATTR(temp15_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 14);
-static SENSOR_DEVICE_ATTR(temp16_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 15);
-static SENSOR_DEVICE_ATTR(temp17_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 16);
-static SENSOR_DEVICE_ATTR(temp18_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 17);
-static SENSOR_DEVICE_ATTR(temp19_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 18);
-static SENSOR_DEVICE_ATTR(temp20_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 19);
-static SENSOR_DEVICE_ATTR(temp21_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 20);
-static SENSOR_DEVICE_ATTR(temp22_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 21);
-static SENSOR_DEVICE_ATTR(temp23_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 22);
-static SENSOR_DEVICE_ATTR(temp24_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 23);
-static SENSOR_DEVICE_ATTR(temp25_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 24);
-static SENSOR_DEVICE_ATTR(temp26_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 25);
-static SENSOR_DEVICE_ATTR(temp27_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 26);
-static SENSOR_DEVICE_ATTR(temp28_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 27);
-static SENSOR_DEVICE_ATTR(temp29_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 28);
-static SENSOR_DEVICE_ATTR(temp30_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 29);
-static SENSOR_DEVICE_ATTR(temp31_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 30);
-static SENSOR_DEVICE_ATTR(temp32_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 31);
-static SENSOR_DEVICE_ATTR(temp33_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 32);
-static SENSOR_DEVICE_ATTR(temp34_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 33);
-static SENSOR_DEVICE_ATTR(temp35_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 34);
-static SENSOR_DEVICE_ATTR(temp36_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 35);
-static SENSOR_DEVICE_ATTR(temp37_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 36);
-static SENSOR_DEVICE_ATTR(temp38_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 37);
-static SENSOR_DEVICE_ATTR(temp39_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 38);
-static SENSOR_DEVICE_ATTR(temp40_label, S_IRUGO,
- applesmc_show_sensor_label, NULL, 39);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
- applesmc_show_temperature, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO,
- applesmc_show_temperature, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO,
- applesmc_show_temperature, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO,
- applesmc_show_temperature, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO,
- applesmc_show_temperature, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO,
- applesmc_show_temperature, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp7_input, S_IRUGO,
- applesmc_show_temperature, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO,
- applesmc_show_temperature, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp9_input, S_IRUGO,
- applesmc_show_temperature, NULL, 8);
-static SENSOR_DEVICE_ATTR(temp10_input, S_IRUGO,
- applesmc_show_temperature, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp11_input, S_IRUGO,
- applesmc_show_temperature, NULL, 10);
-static SENSOR_DEVICE_ATTR(temp12_input, S_IRUGO,
- applesmc_show_temperature, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp13_input, S_IRUGO,
- applesmc_show_temperature, NULL, 12);
-static SENSOR_DEVICE_ATTR(temp14_input, S_IRUGO,
- applesmc_show_temperature, NULL, 13);
-static SENSOR_DEVICE_ATTR(temp15_input, S_IRUGO,
- applesmc_show_temperature, NULL, 14);
-static SENSOR_DEVICE_ATTR(temp16_input, S_IRUGO,
- applesmc_show_temperature, NULL, 15);
-static SENSOR_DEVICE_ATTR(temp17_input, S_IRUGO,
- applesmc_show_temperature, NULL, 16);
-static SENSOR_DEVICE_ATTR(temp18_input, S_IRUGO,
- applesmc_show_temperature, NULL, 17);
-static SENSOR_DEVICE_ATTR(temp19_input, S_IRUGO,
- applesmc_show_temperature, NULL, 18);
-static SENSOR_DEVICE_ATTR(temp20_input, S_IRUGO,
- applesmc_show_temperature, NULL, 19);
-static SENSOR_DEVICE_ATTR(temp21_input, S_IRUGO,
- applesmc_show_temperature, NULL, 20);
-static SENSOR_DEVICE_ATTR(temp22_input, S_IRUGO,
- applesmc_show_temperature, NULL, 21);
-static SENSOR_DEVICE_ATTR(temp23_input, S_IRUGO,
- applesmc_show_temperature, NULL, 22);
-static SENSOR_DEVICE_ATTR(temp24_input, S_IRUGO,
- applesmc_show_temperature, NULL, 23);
-static SENSOR_DEVICE_ATTR(temp25_input, S_IRUGO,
- applesmc_show_temperature, NULL, 24);
-static SENSOR_DEVICE_ATTR(temp26_input, S_IRUGO,
- applesmc_show_temperature, NULL, 25);
-static SENSOR_DEVICE_ATTR(temp27_input, S_IRUGO,
- applesmc_show_temperature, NULL, 26);
-static SENSOR_DEVICE_ATTR(temp28_input, S_IRUGO,
- applesmc_show_temperature, NULL, 27);
-static SENSOR_DEVICE_ATTR(temp29_input, S_IRUGO,
- applesmc_show_temperature, NULL, 28);
-static SENSOR_DEVICE_ATTR(temp30_input, S_IRUGO,
- applesmc_show_temperature, NULL, 29);
-static SENSOR_DEVICE_ATTR(temp31_input, S_IRUGO,
- applesmc_show_temperature, NULL, 30);
-static SENSOR_DEVICE_ATTR(temp32_input, S_IRUGO,
- applesmc_show_temperature, NULL, 31);
-static SENSOR_DEVICE_ATTR(temp33_input, S_IRUGO,
- applesmc_show_temperature, NULL, 32);
-static SENSOR_DEVICE_ATTR(temp34_input, S_IRUGO,
- applesmc_show_temperature, NULL, 33);
-static SENSOR_DEVICE_ATTR(temp35_input, S_IRUGO,
- applesmc_show_temperature, NULL, 34);
-static SENSOR_DEVICE_ATTR(temp36_input, S_IRUGO,
- applesmc_show_temperature, NULL, 35);
-static SENSOR_DEVICE_ATTR(temp37_input, S_IRUGO,
- applesmc_show_temperature, NULL, 36);
-static SENSOR_DEVICE_ATTR(temp38_input, S_IRUGO,
- applesmc_show_temperature, NULL, 37);
-static SENSOR_DEVICE_ATTR(temp39_input, S_IRUGO,
- applesmc_show_temperature, NULL, 38);
-static SENSOR_DEVICE_ATTR(temp40_input, S_IRUGO,
- applesmc_show_temperature, NULL, 39);
-
-static struct attribute *label_attributes[] = {
- &sensor_dev_attr_temp1_label.dev_attr.attr,
- &sensor_dev_attr_temp2_label.dev_attr.attr,
- &sensor_dev_attr_temp3_label.dev_attr.attr,
- &sensor_dev_attr_temp4_label.dev_attr.attr,
- &sensor_dev_attr_temp5_label.dev_attr.attr,
- &sensor_dev_attr_temp6_label.dev_attr.attr,
- &sensor_dev_attr_temp7_label.dev_attr.attr,
- &sensor_dev_attr_temp8_label.dev_attr.attr,
- &sensor_dev_attr_temp9_label.dev_attr.attr,
- &sensor_dev_attr_temp10_label.dev_attr.attr,
- &sensor_dev_attr_temp11_label.dev_attr.attr,
- &sensor_dev_attr_temp12_label.dev_attr.attr,
- &sensor_dev_attr_temp13_label.dev_attr.attr,
- &sensor_dev_attr_temp14_label.dev_attr.attr,
- &sensor_dev_attr_temp15_label.dev_attr.attr,
- &sensor_dev_attr_temp16_label.dev_attr.attr,
- &sensor_dev_attr_temp17_label.dev_attr.attr,
- &sensor_dev_attr_temp18_label.dev_attr.attr,
- &sensor_dev_attr_temp19_label.dev_attr.attr,
- &sensor_dev_attr_temp20_label.dev_attr.attr,
- &sensor_dev_attr_temp21_label.dev_attr.attr,
- &sensor_dev_attr_temp22_label.dev_attr.attr,
- &sensor_dev_attr_temp23_label.dev_attr.attr,
- &sensor_dev_attr_temp24_label.dev_attr.attr,
- &sensor_dev_attr_temp25_label.dev_attr.attr,
- &sensor_dev_attr_temp26_label.dev_attr.attr,
- &sensor_dev_attr_temp27_label.dev_attr.attr,
- &sensor_dev_attr_temp28_label.dev_attr.attr,
- &sensor_dev_attr_temp29_label.dev_attr.attr,
- &sensor_dev_attr_temp30_label.dev_attr.attr,
- &sensor_dev_attr_temp31_label.dev_attr.attr,
- &sensor_dev_attr_temp32_label.dev_attr.attr,
- &sensor_dev_attr_temp33_label.dev_attr.attr,
- &sensor_dev_attr_temp34_label.dev_attr.attr,
- &sensor_dev_attr_temp35_label.dev_attr.attr,
- &sensor_dev_attr_temp36_label.dev_attr.attr,
- &sensor_dev_attr_temp37_label.dev_attr.attr,
- &sensor_dev_attr_temp38_label.dev_attr.attr,
- &sensor_dev_attr_temp39_label.dev_attr.attr,
- &sensor_dev_attr_temp40_label.dev_attr.attr,
- NULL
+static struct applesmc_node_group light_sensor_group[] = {
+ { "light", applesmc_light_show },
+ { }
};
-static struct attribute *temperature_attributes[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp5_input.dev_attr.attr,
- &sensor_dev_attr_temp6_input.dev_attr.attr,
- &sensor_dev_attr_temp7_input.dev_attr.attr,
- &sensor_dev_attr_temp8_input.dev_attr.attr,
- &sensor_dev_attr_temp9_input.dev_attr.attr,
- &sensor_dev_attr_temp10_input.dev_attr.attr,
- &sensor_dev_attr_temp11_input.dev_attr.attr,
- &sensor_dev_attr_temp12_input.dev_attr.attr,
- &sensor_dev_attr_temp13_input.dev_attr.attr,
- &sensor_dev_attr_temp14_input.dev_attr.attr,
- &sensor_dev_attr_temp15_input.dev_attr.attr,
- &sensor_dev_attr_temp16_input.dev_attr.attr,
- &sensor_dev_attr_temp17_input.dev_attr.attr,
- &sensor_dev_attr_temp18_input.dev_attr.attr,
- &sensor_dev_attr_temp19_input.dev_attr.attr,
- &sensor_dev_attr_temp20_input.dev_attr.attr,
- &sensor_dev_attr_temp21_input.dev_attr.attr,
- &sensor_dev_attr_temp22_input.dev_attr.attr,
- &sensor_dev_attr_temp23_input.dev_attr.attr,
- &sensor_dev_attr_temp24_input.dev_attr.attr,
- &sensor_dev_attr_temp25_input.dev_attr.attr,
- &sensor_dev_attr_temp26_input.dev_attr.attr,
- &sensor_dev_attr_temp27_input.dev_attr.attr,
- &sensor_dev_attr_temp28_input.dev_attr.attr,
- &sensor_dev_attr_temp29_input.dev_attr.attr,
- &sensor_dev_attr_temp30_input.dev_attr.attr,
- &sensor_dev_attr_temp31_input.dev_attr.attr,
- &sensor_dev_attr_temp32_input.dev_attr.attr,
- &sensor_dev_attr_temp33_input.dev_attr.attr,
- &sensor_dev_attr_temp34_input.dev_attr.attr,
- &sensor_dev_attr_temp35_input.dev_attr.attr,
- &sensor_dev_attr_temp36_input.dev_attr.attr,
- &sensor_dev_attr_temp37_input.dev_attr.attr,
- &sensor_dev_attr_temp38_input.dev_attr.attr,
- &sensor_dev_attr_temp39_input.dev_attr.attr,
- &sensor_dev_attr_temp40_input.dev_attr.attr,
- NULL
+static struct applesmc_node_group fan_group[] = {
+ { "fan%d_label", applesmc_show_fan_position },
+ { "fan%d_input", applesmc_show_fan_speed, NULL, 0 },
+ { "fan%d_min", applesmc_show_fan_speed, applesmc_store_fan_speed, 1 },
+ { "fan%d_max", applesmc_show_fan_speed, NULL, 2 },
+ { "fan%d_safe", applesmc_show_fan_speed, NULL, 3 },
+ { "fan%d_output", applesmc_show_fan_speed, applesmc_store_fan_speed, 4 },
+ { "fan%d_manual", applesmc_show_fan_manual, applesmc_store_fan_manual },
+ { }
};
-static const struct attribute_group temperature_attributes_group =
- { .attrs = temperature_attributes };
-
-static const struct attribute_group label_attributes_group = {
- .attrs = label_attributes
+static struct applesmc_node_group temp_group[] = {
+ { "temp%d_label", applesmc_show_sensor_label },
+ { "temp%d_input", applesmc_show_temperature },
+ { }
};
/* Module stuff */
/*
- * applesmc_dmi_match - found a match. return one, short-circuiting the hunt.
+ * applesmc_destroy_nodes - remove files and free associated memory
*/
-static int applesmc_dmi_match(const struct dmi_system_id *id)
+static void applesmc_destroy_nodes(struct applesmc_node_group *groups)
{
- int i = 0;
- struct dmi_match_data* dmi_data = id->driver_data;
- printk(KERN_INFO "applesmc: %s detected:\n", id->ident);
- applesmc_accelerometer = dmi_data->accelerometer;
- printk(KERN_INFO "applesmc: - Model %s accelerometer\n",
- applesmc_accelerometer ? "with" : "without");
- applesmc_light = dmi_data->light;
- printk(KERN_INFO "applesmc: - Model %s light sensors and backlight\n",
- applesmc_light ? "with" : "without");
-
- applesmc_temperature_set = dmi_data->temperature_set;
- while (temperature_sensors_sets[applesmc_temperature_set][i] != NULL)
- i++;
- printk(KERN_INFO "applesmc: - Model with %d temperature sensors\n", i);
- return 1;
+ struct applesmc_node_group *grp;
+ struct applesmc_dev_attr *node;
+
+ for (grp = groups; grp->nodes; grp++) {
+ for (node = grp->nodes; node->sda.dev_attr.attr.name; node++)
+ sysfs_remove_file(&pdev->dev.kobj,
+ &node->sda.dev_attr.attr);
+ kfree(grp->nodes);
+ grp->nodes = NULL;
+ }
+}
+
+/*
+ * applesmc_create_nodes - create a two-dimensional group of sysfs files
+ */
+static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
+{
+ struct applesmc_node_group *grp;
+ struct applesmc_dev_attr *node;
+ struct attribute *attr;
+ int ret, i;
+
+ for (grp = groups; grp->format; grp++) {
+ grp->nodes = kcalloc(num + 1, sizeof(*node), GFP_KERNEL);
+ if (!grp->nodes) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ for (i = 0; i < num; i++) {
+ node = &grp->nodes[i];
+ sprintf(node->name, grp->format, i + 1);
+ node->sda.index = (grp->option << 16) | (i & 0xffff);
+ node->sda.dev_attr.show = grp->show;
+ node->sda.dev_attr.store = grp->store;
+ attr = &node->sda.dev_attr.attr;
+ sysfs_attr_init(attr);
+ attr->name = node->name;
+ attr->mode = S_IRUGO | (grp->store ? S_IWUSR : 0);
+ ret = sysfs_create_file(&pdev->dev.kobj, attr);
+ if (ret) {
+ attr->name = NULL;
+ goto out;
+ }
+ }
+ }
+
+ return 0;
+out:
+ applesmc_destroy_nodes(groups);
+ return ret;
}
/* Create accelerometer ressources */
@@ -1424,8 +1095,10 @@ static int applesmc_create_accelerometer(void)
struct input_dev *idev;
int ret;
- ret = sysfs_create_group(&pdev->dev.kobj,
- &accelerometer_attributes_group);
+ if (!smcreg.has_accelerometer)
+ return 0;
+
+ ret = applesmc_create_nodes(accelerometer_group, 1);
if (ret)
goto out;
@@ -1462,184 +1135,96 @@ out_idev:
input_free_polled_device(applesmc_idev);
out_sysfs:
- sysfs_remove_group(&pdev->dev.kobj, &accelerometer_attributes_group);
+ applesmc_destroy_nodes(accelerometer_group);
out:
- printk(KERN_WARNING "applesmc: driver init failed (ret=%d)!\n", ret);
+ pr_warn("driver init failed (ret=%d)!\n", ret);
return ret;
}
/* Release all ressources used by the accelerometer */
static void applesmc_release_accelerometer(void)
{
+ if (!smcreg.has_accelerometer)
+ return;
input_unregister_polled_device(applesmc_idev);
input_free_polled_device(applesmc_idev);
- sysfs_remove_group(&pdev->dev.kobj, &accelerometer_attributes_group);
+ applesmc_destroy_nodes(accelerometer_group);
}
-static __initdata struct dmi_match_data applesmc_dmi_data[] = {
-/* MacBook Pro: accelerometer, backlight and temperature set 0 */
- { .accelerometer = 1, .light = 1, .temperature_set = 0 },
-/* MacBook2: accelerometer and temperature set 1 */
- { .accelerometer = 1, .light = 0, .temperature_set = 1 },
-/* MacBook: accelerometer and temperature set 2 */
- { .accelerometer = 1, .light = 0, .temperature_set = 2 },
-/* MacMini: temperature set 3 */
- { .accelerometer = 0, .light = 0, .temperature_set = 3 },
-/* MacPro: temperature set 4 */
- { .accelerometer = 0, .light = 0, .temperature_set = 4 },
-/* iMac: temperature set 5 */
- { .accelerometer = 0, .light = 0, .temperature_set = 5 },
-/* MacBook3, MacBook4: accelerometer and temperature set 6 */
- { .accelerometer = 1, .light = 0, .temperature_set = 6 },
-/* MacBook Air: accelerometer, backlight and temperature set 7 */
- { .accelerometer = 1, .light = 1, .temperature_set = 7 },
-/* MacBook Pro 4: accelerometer, backlight and temperature set 8 */
- { .accelerometer = 1, .light = 1, .temperature_set = 8 },
-/* MacBook Pro 3: accelerometer, backlight and temperature set 9 */
- { .accelerometer = 1, .light = 1, .temperature_set = 9 },
-/* iMac 5: light sensor only, temperature set 10 */
- { .accelerometer = 0, .light = 0, .temperature_set = 10 },
-/* MacBook 5: accelerometer, backlight and temperature set 11 */
- { .accelerometer = 1, .light = 1, .temperature_set = 11 },
-/* MacBook Pro 5: accelerometer, backlight and temperature set 12 */
- { .accelerometer = 1, .light = 1, .temperature_set = 12 },
-/* iMac 8: light sensor only, temperature set 13 */
- { .accelerometer = 0, .light = 0, .temperature_set = 13 },
-/* iMac 6: light sensor only, temperature set 14 */
- { .accelerometer = 0, .light = 0, .temperature_set = 14 },
-/* MacBook Air 2,1: accelerometer, backlight and temperature set 15 */
- { .accelerometer = 1, .light = 1, .temperature_set = 15 },
-/* MacPro3,1: temperature set 16 */
- { .accelerometer = 0, .light = 0, .temperature_set = 16 },
-/* iMac 9,1: light sensor only, temperature set 17 */
- { .accelerometer = 0, .light = 0, .temperature_set = 17 },
-/* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */
- { .accelerometer = 1, .light = 1, .temperature_set = 18 },
-/* MacBook Pro 5,3: accelerometer, backlight and temperature set 19 */
- { .accelerometer = 1, .light = 1, .temperature_set = 19 },
-/* MacBook Pro 5,4: accelerometer, backlight and temperature set 20 */
- { .accelerometer = 1, .light = 1, .temperature_set = 20 },
-/* MacBook Pro 6,2: accelerometer, backlight and temperature set 21 */
- { .accelerometer = 1, .light = 1, .temperature_set = 21 },
-/* MacBook Pro 7,1: accelerometer, backlight and temperature set 22 */
- { .accelerometer = 1, .light = 1, .temperature_set = 22 },
-};
+static int applesmc_create_light_sensor(void)
+{
+ if (!smcreg.num_light_sensors)
+ return 0;
+ return applesmc_create_nodes(light_sensor_group, 1);
+}
+
+static void applesmc_release_light_sensor(void)
+{
+ if (!smcreg.num_light_sensors)
+ return;
+ applesmc_destroy_nodes(light_sensor_group);
+}
+
+static int applesmc_create_key_backlight(void)
+{
+ if (!smcreg.has_key_backlight)
+ return 0;
+ applesmc_led_wq = create_singlethread_workqueue("applesmc-led");
+ if (!applesmc_led_wq)
+ return -ENOMEM;
+ return led_classdev_register(&pdev->dev, &applesmc_backlight);
+}
+
+static void applesmc_release_key_backlight(void)
+{
+ if (!smcreg.has_key_backlight)
+ return;
+ led_classdev_unregister(&applesmc_backlight);
+ destroy_workqueue(applesmc_led_wq);
+}
+
+static int applesmc_dmi_match(const struct dmi_system_id *id)
+{
+ return 1;
+}
/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
* So we need to put "Apple MacBook Pro" before "Apple MacBook". */
static __initdata struct dmi_system_id applesmc_whitelist[] = {
- { applesmc_dmi_match, "Apple MacBook Air 2", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir2") },
- &applesmc_dmi_data[15]},
{ applesmc_dmi_match, "Apple MacBook Air", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") },
- &applesmc_dmi_data[7]},
- { applesmc_dmi_match, "Apple MacBook Pro 7", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro7") },
- &applesmc_dmi_data[22]},
- { applesmc_dmi_match, "Apple MacBook Pro 5,4", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4") },
- &applesmc_dmi_data[20]},
- { applesmc_dmi_match, "Apple MacBook Pro 5,3", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3") },
- &applesmc_dmi_data[19]},
- { applesmc_dmi_match, "Apple MacBook Pro 6", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6") },
- &applesmc_dmi_data[21]},
- { applesmc_dmi_match, "Apple MacBook Pro 5", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5") },
- &applesmc_dmi_data[12]},
- { applesmc_dmi_match, "Apple MacBook Pro 4", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro4") },
- &applesmc_dmi_data[8]},
- { applesmc_dmi_match, "Apple MacBook Pro 3", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") },
- &applesmc_dmi_data[9]},
- { applesmc_dmi_match, "Apple MacBook Pro 2,2", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple Computer, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2") },
- &applesmc_dmi_data[18]},
+ },
{ applesmc_dmi_match, "Apple MacBook Pro", {
- DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") },
- &applesmc_dmi_data[0]},
- { applesmc_dmi_match, "Apple MacBook (v2)", {
- DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME,"MacBook2") },
- &applesmc_dmi_data[1]},
- { applesmc_dmi_match, "Apple MacBook (v3)", {
- DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME,"MacBook3") },
- &applesmc_dmi_data[6]},
- { applesmc_dmi_match, "Apple MacBook 4", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4") },
- &applesmc_dmi_data[6]},
- { applesmc_dmi_match, "Apple MacBook 5", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5") },
- &applesmc_dmi_data[11]},
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro") },
+ },
{ applesmc_dmi_match, "Apple MacBook", {
- DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME,"MacBook") },
- &applesmc_dmi_data[2]},
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
+ },
{ applesmc_dmi_match, "Apple Macmini", {
- DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME,"Macmini") },
- &applesmc_dmi_data[3]},
- { applesmc_dmi_match, "Apple MacPro2", {
- DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") },
- &applesmc_dmi_data[4]},
- { applesmc_dmi_match, "Apple MacPro3", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacPro3") },
- &applesmc_dmi_data[16]},
+ DMI_MATCH(DMI_PRODUCT_NAME, "Macmini") },
+ },
{ applesmc_dmi_match, "Apple MacPro", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") },
- &applesmc_dmi_data[4]},
- { applesmc_dmi_match, "Apple iMac 9,1", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1") },
- &applesmc_dmi_data[17]},
- { applesmc_dmi_match, "Apple iMac 8", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") },
- &applesmc_dmi_data[13]},
- { applesmc_dmi_match, "Apple iMac 6", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "iMac6") },
- &applesmc_dmi_data[14]},
- { applesmc_dmi_match, "Apple iMac 5", {
- DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME, "iMac5") },
- &applesmc_dmi_data[10]},
+ },
{ applesmc_dmi_match, "Apple iMac", {
- DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
- DMI_MATCH(DMI_PRODUCT_NAME,"iMac") },
- &applesmc_dmi_data[5]},
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac") },
+ },
{ .ident = NULL }
};
static int __init applesmc_init(void)
{
int ret;
- int count;
- int i;
if (!dmi_check_system(applesmc_whitelist)) {
- printk(KERN_WARNING "applesmc: supported laptop not found!\n");
+ pr_warn("supported laptop not found!\n");
ret = -ENODEV;
goto out;
}
@@ -1661,83 +1246,34 @@ static int __init applesmc_init(void)
goto out_driver;
}
- ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_name.attr);
+ /* create register cache */
+ ret = applesmc_init_smcreg();
if (ret)
goto out_device;
- /* Create key enumeration sysfs files */
- ret = sysfs_create_group(&pdev->dev.kobj, &key_enumeration_group);
+ ret = applesmc_create_nodes(info_group, 1);
if (ret)
- goto out_name;
-
- /* create fan files */
- count = applesmc_get_fan_count();
- if (count < 0)
- printk(KERN_ERR "applesmc: Cannot get the number of fans.\n");
- else
- printk(KERN_INFO "applesmc: %d fans found.\n", count);
+ goto out_smcreg;
- if (count > 4) {
- count = 4;
- printk(KERN_WARNING "applesmc: More than 4 fans found,"
- " but at most 4 fans are supported"
- " by the driver.\n");
- }
-
- while (fans_handled < count) {
- ret = sysfs_create_group(&pdev->dev.kobj,
- &fan_attribute_groups[fans_handled]);
- if (ret)
- goto out_fans;
- fans_handled++;
- }
-
- for (i = 0;
- temperature_sensors_sets[applesmc_temperature_set][i] != NULL;
- i++) {
- if (temperature_attributes[i] == NULL ||
- label_attributes[i] == NULL) {
- printk(KERN_ERR "applesmc: More temperature sensors "
- "in temperature_sensors_sets (at least %i)"
- "than available sysfs files in "
- "temperature_attributes (%i), please report "
- "this bug.\n", i, i-1);
- goto out_temperature;
- }
- ret = sysfs_create_file(&pdev->dev.kobj,
- temperature_attributes[i]);
- if (ret)
- goto out_temperature;
- ret = sysfs_create_file(&pdev->dev.kobj,
- label_attributes[i]);
- if (ret)
- goto out_temperature;
- }
+ ret = applesmc_create_nodes(fan_group, smcreg.fan_count);
+ if (ret)
+ goto out_info;
- if (applesmc_accelerometer) {
- ret = applesmc_create_accelerometer();
- if (ret)
- goto out_temperature;
- }
+ ret = applesmc_create_nodes(temp_group, smcreg.temp_count);
+ if (ret)
+ goto out_fans;
- if (applesmc_light) {
- /* Add light sensor file */
- ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_light.attr);
- if (ret)
- goto out_accelerometer;
+ ret = applesmc_create_accelerometer();
+ if (ret)
+ goto out_temperature;
- /* Create the workqueue */
- applesmc_led_wq = create_singlethread_workqueue("applesmc-led");
- if (!applesmc_led_wq) {
- ret = -ENOMEM;
- goto out_light_sysfs;
- }
+ ret = applesmc_create_light_sensor();
+ if (ret)
+ goto out_accelerometer;
- /* register as a led device */
- ret = led_classdev_register(&pdev->dev, &applesmc_backlight);
- if (ret < 0)
- goto out_light_wq;
- }
+ ret = applesmc_create_key_backlight();
+ if (ret)
+ goto out_light_sysfs;
hwmon_dev = hwmon_device_register(&pdev->dev);
if (IS_ERR(hwmon_dev)) {
@@ -1745,32 +1281,22 @@ static int __init applesmc_init(void)
goto out_light_ledclass;
}
- printk(KERN_INFO "applesmc: driver successfully loaded.\n");
-
return 0;
out_light_ledclass:
- if (applesmc_light)
- led_classdev_unregister(&applesmc_backlight);
-out_light_wq:
- if (applesmc_light)
- destroy_workqueue(applesmc_led_wq);
+ applesmc_release_key_backlight();
out_light_sysfs:
- if (applesmc_light)
- sysfs_remove_file(&pdev->dev.kobj, &dev_attr_light.attr);
+ applesmc_release_light_sensor();
out_accelerometer:
- if (applesmc_accelerometer)
- applesmc_release_accelerometer();
+ applesmc_release_accelerometer();
out_temperature:
- sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group);
- sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
+ applesmc_destroy_nodes(temp_group);
out_fans:
- while (fans_handled)
- sysfs_remove_group(&pdev->dev.kobj,
- &fan_attribute_groups[--fans_handled]);
- sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group);
-out_name:
- sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr);
+ applesmc_destroy_nodes(fan_group);
+out_info:
+ applesmc_destroy_nodes(info_group);
+out_smcreg:
+ applesmc_destroy_smcreg();
out_device:
platform_device_unregister(pdev);
out_driver:
@@ -1778,32 +1304,23 @@ out_driver:
out_region:
release_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS);
out:
- printk(KERN_WARNING "applesmc: driver init failed (ret=%d)!\n", ret);
+ pr_warn("driver init failed (ret=%d)!\n", ret);
return ret;
}
static void __exit applesmc_exit(void)
{
hwmon_device_unregister(hwmon_dev);
- if (applesmc_light) {
- led_classdev_unregister(&applesmc_backlight);
- destroy_workqueue(applesmc_led_wq);
- sysfs_remove_file(&pdev->dev.kobj, &dev_attr_light.attr);
- }
- if (applesmc_accelerometer)
- applesmc_release_accelerometer();
- sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group);
- sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
- while (fans_handled)
- sysfs_remove_group(&pdev->dev.kobj,
- &fan_attribute_groups[--fans_handled]);
- sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group);
- sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr);
+ applesmc_release_key_backlight();
+ applesmc_release_light_sensor();
+ applesmc_release_accelerometer();
+ applesmc_destroy_nodes(temp_group);
+ applesmc_destroy_nodes(fan_group);
+ applesmc_destroy_nodes(info_group);
+ applesmc_destroy_smcreg();
platform_device_unregister(pdev);
platform_driver_unregister(&applesmc_driver);
release_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS);
-
- printk(KERN_INFO "applesmc: driver unloaded.\n");
}
module_init(applesmc_init);
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 7dada559b3a1..c02a052d3085 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -36,6 +36,8 @@
asb100 7 3 1 4 0x31 0x0694 yes no
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -701,8 +703,7 @@ static int asb100_detect(struct i2c_client *client,
int val1, val2;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
- pr_debug("asb100.o: detect failed, "
- "smbus byte data not supported!\n");
+ pr_debug("detect failed, smbus byte data not supported!\n");
return -ENODEV;
}
@@ -715,7 +716,7 @@ static int asb100_detect(struct i2c_client *client,
(((!(val1 & 0x80)) && (val2 != 0x94)) ||
/* Check for ASB100 ID (high byte ) */
((val1 & 0x80) && (val2 != 0x06)))) {
- pr_debug("asb100: detect failed, bad chip id 0x%02x!\n", val2);
+ pr_debug("detect failed, bad chip id 0x%02x!\n", val2);
return -ENODEV;
}
@@ -744,7 +745,7 @@ static int asb100_probe(struct i2c_client *client,
data = kzalloc(sizeof(struct asb100_data), GFP_KERNEL);
if (!data) {
- pr_debug("asb100.o: probe failed, kzalloc failed!\n");
+ pr_debug("probe failed, kzalloc failed!\n");
err = -ENOMEM;
goto ERROR0;
}
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 23b8555215d2..b5e892017e0c 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -5,12 +5,15 @@
* See COPYING in the top level directory of the kernel tree.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/hwmon.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/dmi.h>
#include <acpi/acpi.h>
#include <acpi/acpixf.h>
@@ -20,6 +23,21 @@
#define ATK_HID "ATK0110"
+static bool new_if;
+module_param(new_if, bool, 0);
+MODULE_PARM_DESC(new_if, "Override detection heuristic and force the use of the new ATK0110 interface");
+
+static const struct dmi_system_id __initconst atk_force_new_if[] = {
+ {
+ /* Old interface has broken MCH temp monitoring */
+ .ident = "Asus Sabertooth X58",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58")
+ }
+ },
+ { }
+};
+
/* Minimum time between readings, enforced in order to avoid
* hogging the CPU.
*/
@@ -1300,7 +1318,9 @@ static int atk_probe_if(struct atk_data *data)
* analysis of multiple DSDTs indicates that when both interfaces
* are present the new one (GGRP/GITM) is not functional.
*/
- if (data->rtmp_handle && data->rvlt_handle && data->rfan_handle)
+ if (new_if)
+ dev_info(dev, "Overriding interface detection\n");
+ if (data->rtmp_handle && data->rvlt_handle && data->rfan_handle && !new_if)
data->old_interface = true;
else if (data->enumerate_handle && data->read_handle &&
data->write_handle)
@@ -1414,14 +1434,16 @@ static int __init atk0110_init(void)
/* Make sure it's safe to access the device through ACPI */
if (!acpi_resources_are_enforced()) {
- pr_err("atk: Resources not safely usable due to "
- "acpi_enforce_resources kernel parameter\n");
+ pr_err("Resources not safely usable due to acpi_enforce_resources kernel parameter\n");
return -EBUSY;
}
+ if (dmi_check_system(atk_force_new_if))
+ new_if = true;
+
ret = acpi_bus_register_driver(&atk_driver);
if (ret)
- pr_info("atk: acpi_bus_register_driver failed: %d\n", ret);
+ pr_info("acpi_bus_register_driver failed: %d\n", ret);
return ret;
}
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 42de98d73ff5..194ca0aa8b0c 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -20,6 +20,8 @@
* 02110-1301 USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -445,8 +447,8 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
* without thermal sensors will be filtered out.
*/
if (!cpu_has(c, X86_FEATURE_DTS)) {
- printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
- " has no thermal sensor.\n", c->x86_model);
+ pr_info("CPU (model=0x%x) has no thermal sensor\n",
+ c->x86_model);
return 0;
}
@@ -466,7 +468,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
pdev = platform_device_alloc(DRVNAME, cpu);
if (!pdev) {
err = -ENOMEM;
- printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+ pr_err("Device allocation failed\n");
goto exit;
}
@@ -478,8 +480,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_free;
}
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 980c17d5eeae..d9c592713919 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -25,6 +25,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -75,12 +77,14 @@ enum chips { dme1737, sch5027, sch311x, sch5127 };
* in4 +12V
* in5 VTR (+3.3V stby)
* in6 Vbat
+ * in7 Vtrip (sch5127 only)
*
* --------------------------------------------------------------------- */
-/* Voltages (in) numbered 0-6 (ix) */
-#define DME1737_REG_IN(ix) ((ix) < 5 ? 0x20 + (ix) \
- : 0x94 + (ix))
+/* Voltages (in) numbered 0-7 (ix) */
+#define DME1737_REG_IN(ix) ((ix) < 5 ? 0x20 + (ix) : \
+ (ix) < 7 ? 0x94 + (ix) : \
+ 0x1f)
#define DME1737_REG_IN_MIN(ix) ((ix) < 5 ? 0x44 + (ix) * 2 \
: 0x91 + (ix) * 2)
#define DME1737_REG_IN_MAX(ix) ((ix) < 5 ? 0x45 + (ix) * 2 \
@@ -99,10 +103,11 @@ enum chips { dme1737, sch5027, sch311x, sch5127 };
* IN_TEMP_LSB(1) = [temp3, temp1]
* IN_TEMP_LSB(2) = [in4, temp2]
* IN_TEMP_LSB(3) = [in3, in0]
- * IN_TEMP_LSB(4) = [in2, in1] */
+ * IN_TEMP_LSB(4) = [in2, in1]
+ * IN_TEMP_LSB(5) = [res, in7] */
#define DME1737_REG_IN_TEMP_LSB(ix) (0x84 + (ix))
-static const u8 DME1737_REG_IN_LSB[] = {3, 4, 4, 3, 2, 0, 0};
-static const u8 DME1737_REG_IN_LSB_SHL[] = {4, 4, 0, 0, 0, 0, 4};
+static const u8 DME1737_REG_IN_LSB[] = {3, 4, 4, 3, 2, 0, 0, 5};
+static const u8 DME1737_REG_IN_LSB_SHL[] = {4, 4, 0, 0, 0, 0, 4, 4};
static const u8 DME1737_REG_TEMP_LSB[] = {1, 2, 1};
static const u8 DME1737_REG_TEMP_LSB_SHL[] = {4, 4, 0};
@@ -143,7 +148,7 @@ static const u8 DME1737_REG_TEMP_LSB_SHL[] = {4, 4, 0};
#define DME1737_REG_ALARM1 0x41
#define DME1737_REG_ALARM2 0x42
#define DME1737_REG_ALARM3 0x83
-static const u8 DME1737_BIT_ALARM_IN[] = {0, 1, 2, 3, 8, 16, 17};
+static const u8 DME1737_BIT_ALARM_IN[] = {0, 1, 2, 3, 8, 16, 17, 18};
static const u8 DME1737_BIT_ALARM_TEMP[] = {4, 5, 6};
static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23};
@@ -188,6 +193,7 @@ static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23};
#define HAS_PWM_MIN (1 << 4) /* bit 4 */
#define HAS_FAN(ix) (1 << ((ix) + 5)) /* bits 5-10 */
#define HAS_PWM(ix) (1 << ((ix) + 11)) /* bits 11-16 */
+#define HAS_IN7 (1 << 17) /* bit 17 */
/* ---------------------------------------------------------------------
* Data structures and manipulation thereof
@@ -211,9 +217,9 @@ struct dme1737_data {
u32 has_features;
/* Register values */
- u16 in[7];
- u8 in_min[7];
- u8 in_max[7];
+ u16 in[8];
+ u8 in_min[8];
+ u8 in_max[8];
s16 temp[3];
s8 temp_min[3];
s8 temp_max[3];
@@ -245,7 +251,7 @@ static const int IN_NOMINAL_SCH311x[] = {2500, 1500, 3300, 5000, 12000, 3300,
static const int IN_NOMINAL_SCH5027[] = {5000, 2250, 3300, 1125, 1125, 3300,
3300};
static const int IN_NOMINAL_SCH5127[] = {2500, 2250, 3300, 1125, 1125, 3300,
- 3300};
+ 3300, 1500};
#define IN_NOMINAL(type) ((type) == sch311x ? IN_NOMINAL_SCH311x : \
(type) == sch5027 ? IN_NOMINAL_SCH5027 : \
(type) == sch5127 ? IN_NOMINAL_SCH5127 : \
@@ -578,7 +584,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
{
struct dme1737_data *data = dev_get_drvdata(dev);
int ix;
- u8 lsb[5];
+ u8 lsb[6];
mutex_lock(&data->update_lock);
@@ -601,6 +607,9 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
/* Voltage inputs are stored as 16 bit values even
* though they have only 12 bits resolution. This is
* to make it consistent with the temp inputs. */
+ if (ix == 7 && !(data->has_features & HAS_IN7)) {
+ continue;
+ }
data->in[ix] = dme1737_read(data,
DME1737_REG_IN(ix)) << 8;
data->in_min[ix] = dme1737_read(data,
@@ -633,10 +642,16 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
* which the registers are read (MSB first, then LSB) is
* important! */
for (ix = 0; ix < ARRAY_SIZE(lsb); ix++) {
+ if (ix == 5 && !(data->has_features & HAS_IN7)) {
+ continue;
+ }
lsb[ix] = dme1737_read(data,
DME1737_REG_IN_TEMP_LSB(ix));
}
for (ix = 0; ix < ARRAY_SIZE(data->in); ix++) {
+ if (ix == 7 && !(data->has_features & HAS_IN7)) {
+ continue;
+ }
data->in[ix] |= (lsb[DME1737_REG_IN_LSB[ix]] <<
DME1737_REG_IN_LSB_SHL[ix]) & 0xf0;
}
@@ -760,7 +775,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
/* ---------------------------------------------------------------------
* Voltage sysfs attributes
- * ix = [0-5]
+ * ix = [0-7]
* --------------------------------------------------------------------- */
#define SYS_IN_INPUT 0
@@ -1437,7 +1452,7 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr,
* Sysfs device attribute defines and structs
* --------------------------------------------------------------------- */
-/* Voltages 0-6 */
+/* Voltages 0-7 */
#define SENSOR_DEVICE_ATTR_IN(ix) \
static SENSOR_DEVICE_ATTR_2(in##ix##_input, S_IRUGO, \
@@ -1456,6 +1471,7 @@ SENSOR_DEVICE_ATTR_IN(3);
SENSOR_DEVICE_ATTR_IN(4);
SENSOR_DEVICE_ATTR_IN(5);
SENSOR_DEVICE_ATTR_IN(6);
+SENSOR_DEVICE_ATTR_IN(7);
/* Temperatures 1-3 */
@@ -1574,7 +1590,7 @@ static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); /* for ISA devices */
* created unconditionally. The attributes that need modification of their
* permissions are created read-only and write permissions are added or removed
* on the fly when required */
-static struct attribute *dme1737_attr[] ={
+static struct attribute *dme1737_attr[] = {
/* Voltages */
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_min.dev_attr.attr,
@@ -1679,7 +1695,7 @@ static const struct attribute_group dme1737_zone3_group = {
};
-/* The following struct holds temp zone hysteresis related attributes, which
+/* The following struct holds temp zone hysteresis related attributes, which
* are not available in all chips. The following chips support them:
* DME1737, SCH311x */
static struct attribute *dme1737_zone_hyst_attr[] = {
@@ -1693,6 +1709,21 @@ static const struct attribute_group dme1737_zone_hyst_group = {
.attrs = dme1737_zone_hyst_attr,
};
+/* The following struct holds voltage in7 related attributes, which
+ * are not available in all chips. The following chips support them:
+ * SCH5127 */
+static struct attribute *dme1737_in7_attr[] = {
+ &sensor_dev_attr_in7_input.dev_attr.attr,
+ &sensor_dev_attr_in7_min.dev_attr.attr,
+ &sensor_dev_attr_in7_max.dev_attr.attr,
+ &sensor_dev_attr_in7_alarm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group dme1737_in7_group = {
+ .attrs = dme1737_in7_attr,
+};
+
/* The following structs hold the PWM attributes, some of which are optional.
* Their creation depends on the chip configuration which is determined during
* module load. */
@@ -1984,6 +2015,9 @@ static void dme1737_remove_files(struct device *dev)
if (data->has_features & HAS_ZONE_HYST) {
sysfs_remove_group(&dev->kobj, &dme1737_zone_hyst_group);
}
+ if (data->has_features & HAS_IN7) {
+ sysfs_remove_group(&dev->kobj, &dme1737_in7_group);
+ }
sysfs_remove_group(&dev->kobj, &dme1737_group);
if (!data->client) {
@@ -1997,43 +2031,58 @@ static int dme1737_create_files(struct device *dev)
int err, ix;
/* Create a name attribute for ISA devices */
- if (!data->client &&
- (err = sysfs_create_file(&dev->kobj, &dev_attr_name.attr))) {
- goto exit;
+ if (!data->client) {
+ err = sysfs_create_file(&dev->kobj, &dev_attr_name.attr);
+ if (err) {
+ goto exit;
+ }
}
/* Create standard sysfs attributes */
- if ((err = sysfs_create_group(&dev->kobj, &dme1737_group))) {
+ err = sysfs_create_group(&dev->kobj, &dme1737_group);
+ if (err) {
goto exit_remove;
}
/* Create chip-dependent sysfs attributes */
- if ((data->has_features & HAS_TEMP_OFFSET) &&
- (err = sysfs_create_group(&dev->kobj,
- &dme1737_temp_offset_group))) {
- goto exit_remove;
+ if (data->has_features & HAS_TEMP_OFFSET) {
+ err = sysfs_create_group(&dev->kobj,
+ &dme1737_temp_offset_group);
+ if (err) {
+ goto exit_remove;
+ }
}
- if ((data->has_features & HAS_VID) &&
- (err = sysfs_create_group(&dev->kobj,
- &dme1737_vid_group))) {
- goto exit_remove;
+ if (data->has_features & HAS_VID) {
+ err = sysfs_create_group(&dev->kobj, &dme1737_vid_group);
+ if (err) {
+ goto exit_remove;
+ }
}
- if ((data->has_features & HAS_ZONE3) &&
- (err = sysfs_create_group(&dev->kobj,
- &dme1737_zone3_group))) {
- goto exit_remove;
+ if (data->has_features & HAS_ZONE3) {
+ err = sysfs_create_group(&dev->kobj, &dme1737_zone3_group);
+ if (err) {
+ goto exit_remove;
+ }
}
- if ((data->has_features & HAS_ZONE_HYST) &&
- (err = sysfs_create_group(&dev->kobj,
- &dme1737_zone_hyst_group))) {
- goto exit_remove;
+ if (data->has_features & HAS_ZONE_HYST) {
+ err = sysfs_create_group(&dev->kobj, &dme1737_zone_hyst_group);
+ if (err) {
+ goto exit_remove;
+ }
+ }
+ if (data->has_features & HAS_IN7) {
+ err = sysfs_create_group(&dev->kobj, &dme1737_in7_group);
+ if (err) {
+ goto exit_remove;
+ }
}
/* Create fan sysfs attributes */
for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
if (data->has_features & HAS_FAN(ix)) {
- if ((err = sysfs_create_group(&dev->kobj,
- &dme1737_fan_group[ix]))) {
+ err = sysfs_create_group(&dev->kobj,
+ &dme1737_fan_group[ix]);
+ if (err) {
goto exit_remove;
}
}
@@ -2042,14 +2091,17 @@ static int dme1737_create_files(struct device *dev)
/* Create PWM sysfs attributes */
for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
if (data->has_features & HAS_PWM(ix)) {
- if ((err = sysfs_create_group(&dev->kobj,
- &dme1737_pwm_group[ix]))) {
+ err = sysfs_create_group(&dev->kobj,
+ &dme1737_pwm_group[ix]);
+ if (err) {
goto exit_remove;
}
- if ((data->has_features & HAS_PWM_MIN) && ix < 3 &&
- (err = sysfs_create_file(&dev->kobj,
- dme1737_auto_pwm_min_attr[ix]))) {
- goto exit_remove;
+ if ((data->has_features & HAS_PWM_MIN) && (ix < 3)) {
+ err = sysfs_create_file(&dev->kobj,
+ dme1737_auto_pwm_min_attr[ix]);
+ if (err) {
+ goto exit_remove;
+ }
}
}
}
@@ -2186,7 +2238,7 @@ static int dme1737_init_device(struct device *dev)
data->has_features |= HAS_ZONE3;
break;
case sch5127:
- data->has_features |= HAS_FAN(2) | HAS_PWM(2);
+ data->has_features |= HAS_FAN(2) | HAS_PWM(2) | HAS_IN7;
break;
default:
break;
@@ -2279,8 +2331,9 @@ static int dme1737_i2c_get_features(int sio_cip, struct dme1737_data *data)
dme1737_sio_outb(sio_cip, 0x07, 0x0a);
/* Get the base address of the runtime registers */
- if (!(addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
- dme1737_sio_inb(sio_cip, 0x61))) {
+ addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
+ dme1737_sio_inb(sio_cip, 0x61);
+ if (!addr) {
err = -ENODEV;
goto exit;
}
@@ -2361,13 +2414,15 @@ static int dme1737_i2c_probe(struct i2c_client *client,
mutex_init(&data->update_lock);
/* Initialize the DME1737 chip */
- if ((err = dme1737_init_device(dev))) {
+ err = dme1737_init_device(dev);
+ if (err) {
dev_err(dev, "Failed to initialize device.\n");
goto exit_kfree;
}
/* Create sysfs files */
- if ((err = dme1737_create_files(dev))) {
+ err = dme1737_create_files(dev);
+ if (err) {
dev_err(dev, "Failed to create sysfs files.\n");
goto exit_kfree;
}
@@ -2444,9 +2499,10 @@ static int __init dme1737_isa_detect(int sio_cip, unsigned short *addr)
dme1737_sio_outb(sio_cip, 0x07, 0x0a);
/* Get the base address of the runtime registers */
- if (!(base_addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
- dme1737_sio_inb(sio_cip, 0x61))) {
- printk(KERN_ERR "dme1737: Base address not set.\n");
+ base_addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
+ dme1737_sio_inb(sio_cip, 0x61);
+ if (!base_addr) {
+ pr_err("Base address not set\n");
err = -ENODEV;
goto exit;
}
@@ -2474,21 +2530,22 @@ static int __init dme1737_isa_device_add(unsigned short addr)
if (err)
goto exit;
- if (!(pdev = platform_device_alloc("dme1737", addr))) {
- printk(KERN_ERR "dme1737: Failed to allocate device.\n");
+ pdev = platform_device_alloc("dme1737", addr);
+ if (!pdev) {
+ pr_err("Failed to allocate device\n");
err = -ENOMEM;
goto exit;
}
- if ((err = platform_device_add_resources(pdev, &res, 1))) {
- printk(KERN_ERR "dme1737: Failed to add device resource "
- "(err = %d).\n", err);
+ err = platform_device_add_resources(pdev, &res, 1);
+ if (err) {
+ pr_err("Failed to add device resource (err = %d)\n", err);
goto exit_device_put;
}
- if ((err = platform_device_add(pdev))) {
- printk(KERN_ERR "dme1737: Failed to add device (err = %d).\n",
- err);
+ err = platform_device_add(pdev);
+ if (err) {
+ pr_err("Failed to add device (err = %d)\n", err);
goto exit_device_put;
}
@@ -2514,11 +2571,12 @@ static int __devinit dme1737_isa_probe(struct platform_device *pdev)
dev_err(dev, "Failed to request region 0x%04x-0x%04x.\n",
(unsigned short)res->start,
(unsigned short)res->start + DME1737_EXTENT - 1);
- err = -EBUSY;
- goto exit;
- }
+ err = -EBUSY;
+ goto exit;
+ }
- if (!(data = kzalloc(sizeof(struct dme1737_data), GFP_KERNEL))) {
+ data = kzalloc(sizeof(struct dme1737_data), GFP_KERNEL);
+ if (!data) {
err = -ENOMEM;
goto exit_release_region;
}
@@ -2565,13 +2623,15 @@ static int __devinit dme1737_isa_probe(struct platform_device *pdev)
data->type == sch5127 ? "SCH5127" : "SCH311x", data->addr);
/* Initialize the chip */
- if ((err = dme1737_init_device(dev))) {
+ err = dme1737_init_device(dev);
+ if (err) {
dev_err(dev, "Failed to initialize device.\n");
goto exit_kfree;
}
/* Create sysfs files */
- if ((err = dme1737_create_files(dev))) {
+ err = dme1737_create_files(dev);
+ if (err) {
dev_err(dev, "Failed to create sysfs files.\n");
goto exit_kfree;
}
@@ -2628,7 +2688,8 @@ static int __init dme1737_init(void)
int err;
unsigned short addr;
- if ((err = i2c_add_driver(&dme1737_i2c_driver))) {
+ err = i2c_add_driver(&dme1737_i2c_driver);
+ if (err) {
goto exit;
}
@@ -2641,12 +2702,14 @@ static int __init dme1737_init(void)
return 0;
}
- if ((err = platform_driver_register(&dme1737_isa_driver))) {
+ err = platform_driver_register(&dme1737_isa_driver);
+ if (err) {
goto exit_del_i2c_driver;
}
/* Sets global pdev as a side effect */
- if ((err = dme1737_isa_device_add(addr))) {
+ err = dme1737_isa_device_add(addr);
+ if (err) {
goto exit_del_isa_driver;
}
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
new file mode 100644
index 000000000000..257957c69d92
--- /dev/null
+++ b/drivers/hwmon/ds620.c
@@ -0,0 +1,337 @@
+/*
+ * ds620.c - Support for temperature sensor and thermostat DS620
+ *
+ * Copyright (C) 2010, 2011 Roland Stigge <stigge@antcom.de>
+ *
+ * based on ds1621.c by Christian W. Zuckschwerdt <zany@triq.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include <linux/i2c/ds620.h>
+
+/*
+ * Many DS620 constants specified below
+ * 15 14 13 12 11 10 09 08
+ * |Done|NVB |THF |TLF |R1 |R0 |AUTOC|1SHOT|
+ *
+ * 07 06 05 04 03 02 01 00
+ * |PO2 |PO1 |A2 |A1 |A0 | | | |
+ */
+#define DS620_REG_CONFIG_DONE 0x8000
+#define DS620_REG_CONFIG_NVB 0x4000
+#define DS620_REG_CONFIG_THF 0x2000
+#define DS620_REG_CONFIG_TLF 0x1000
+#define DS620_REG_CONFIG_R1 0x0800
+#define DS620_REG_CONFIG_R0 0x0400
+#define DS620_REG_CONFIG_AUTOC 0x0200
+#define DS620_REG_CONFIG_1SHOT 0x0100
+#define DS620_REG_CONFIG_PO2 0x0080
+#define DS620_REG_CONFIG_PO1 0x0040
+#define DS620_REG_CONFIG_A2 0x0020
+#define DS620_REG_CONFIG_A1 0x0010
+#define DS620_REG_CONFIG_A0 0x0008
+
+/* The DS620 registers */
+static const u8 DS620_REG_TEMP[3] = {
+ 0xAA, /* input, word, RO */
+ 0xA2, /* min, word, RW */
+ 0xA0, /* max, word, RW */
+};
+
+#define DS620_REG_CONF 0xAC /* word, RW */
+#define DS620_COM_START 0x51 /* no data */
+#define DS620_COM_STOP 0x22 /* no data */
+
+/* Each client has this additional data */
+struct ds620_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+ char valid; /* !=0 if following fields are valid */
+ unsigned long last_updated; /* In jiffies */
+
+ u16 temp[3]; /* Register values, word */
+};
+
+/*
+ * Temperature registers are word-sized.
+ * DS620 uses a high-byte first convention, which is exactly opposite to
+ * the SMBus standard.
+ */
+static int ds620_read_temp(struct i2c_client *client, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+ return swab16(ret);
+}
+
+static int ds620_write_temp(struct i2c_client *client, u8 reg, u16 value)
+{
+ return i2c_smbus_write_word_data(client, reg, swab16(value));
+}
+
+static void ds620_init_client(struct i2c_client *client)
+{
+ struct ds620_platform_data *ds620_info = client->dev.platform_data;
+ u16 conf, new_conf;
+
+ new_conf = conf =
+ swab16(i2c_smbus_read_word_data(client, DS620_REG_CONF));
+
+ /* switch to continuous conversion mode */
+ new_conf &= ~DS620_REG_CONFIG_1SHOT;
+ /* already high at power-on, but don't trust the BIOS! */
+ new_conf |= DS620_REG_CONFIG_PO2;
+ /* thermostat mode according to platform data */
+ if (ds620_info && ds620_info->pomode == 1)
+ new_conf &= ~DS620_REG_CONFIG_PO1; /* PO_LOW */
+ else if (ds620_info && ds620_info->pomode == 2)
+ new_conf |= DS620_REG_CONFIG_PO1; /* PO_HIGH */
+ else
+ new_conf &= ~DS620_REG_CONFIG_PO2; /* always low */
+ /* with highest precision */
+ new_conf |= DS620_REG_CONFIG_R1 | DS620_REG_CONFIG_R0;
+
+ if (conf != new_conf)
+ i2c_smbus_write_word_data(client, DS620_REG_CONF,
+ swab16(new_conf));
+
+ /* start conversion */
+ i2c_smbus_write_byte(client, DS620_COM_START);
+}
+
+static struct ds620_data *ds620_update_client(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ds620_data *data = i2c_get_clientdata(client);
+ struct ds620_data *ret = data;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
+ || !data->valid) {
+ int i;
+ int res;
+
+ dev_dbg(&client->dev, "Starting ds620 update\n");
+
+ for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
+ res = ds620_read_temp(client,
+ DS620_REG_TEMP[i]);
+ if (res < 0) {
+ ret = ERR_PTR(res);
+ goto abort;
+ }
+
+ data->temp[i] = res;
+ }
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+
+ return ret;
+}
+
+static ssize_t show_temp(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ds620_data *data = ds620_update_client(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", ((data->temp[attr->index] / 8) * 625) / 10);
+}
+
+static ssize_t set_temp(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ int res;
+ long val;
+
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ds620_data *data = i2c_get_clientdata(client);
+
+ res = strict_strtol(buf, 10, &val);
+
+ if (res)
+ return res;
+
+ val = (val * 10 / 625) * 8;
+
+ mutex_lock(&data->update_lock);
+ data->temp[attr->index] = val;
+ ds620_write_temp(client, DS620_REG_TEMP[attr->index],
+ data->temp[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ds620_data *data = ds620_update_client(dev);
+ struct i2c_client *client = to_i2c_client(dev);
+ u16 conf, new_conf;
+ int res;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ /* reset alarms if necessary */
+ res = i2c_smbus_read_word_data(client, DS620_REG_CONF);
+ if (res < 0)
+ return res;
+
+ conf = swab16(res);
+ new_conf = conf;
+ new_conf &= ~attr->index;
+ if (conf != new_conf) {
+ res = i2c_smbus_write_word_data(client, DS620_REG_CONF,
+ swab16(new_conf));
+ if (res < 0)
+ return res;
+ }
+
+ return sprintf(buf, "%d\n", !!(conf & attr->index));
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp, set_temp, 1);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp, set_temp, 2);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL,
+ DS620_REG_CONFIG_TLF);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL,
+ DS620_REG_CONFIG_THF);
+
+static struct attribute *ds620_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ds620_group = {
+ .attrs = ds620_attributes,
+};
+
+static int ds620_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ds620_data *data;
+ int err;
+
+ data = kzalloc(sizeof(struct ds620_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* Initialize the DS620 chip */
+ ds620_init_client(client);
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&client->dev.kobj, &ds620_group);
+ if (err)
+ goto exit_free;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto exit_remove_files;
+ }
+
+ dev_info(&client->dev, "temperature sensor found\n");
+
+ return 0;
+
+exit_remove_files:
+ sysfs_remove_group(&client->dev.kobj, &ds620_group);
+exit_free:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int ds620_remove(struct i2c_client *client)
+{
+ struct ds620_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &ds620_group);
+
+ kfree(data);
+
+ return 0;
+}
+
+static const struct i2c_device_id ds620_id[] = {
+ {"ds620", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ds620_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver ds620_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "ds620",
+ },
+ .probe = ds620_probe,
+ .remove = ds620_remove,
+ .id_table = ds620_id,
+};
+
+static int __init ds620_init(void)
+{
+ return i2c_add_driver(&ds620_driver);
+}
+
+static void __exit ds620_exit(void)
+{
+ i2c_del_driver(&ds620_driver);
+}
+
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("DS620 driver");
+MODULE_LICENSE("GPL");
+
+module_init(ds620_init);
+module_exit(ds620_exit);
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 8dee3f38fdfb..cd2a6e437aec 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -269,23 +269,30 @@ static int emc1403_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
int id;
- /* Check if thermal chip is SMSC and EMC1403 */
+ /* Check if thermal chip is SMSC and EMC1403 or EMC1423 */
id = i2c_smbus_read_byte_data(client, THERMAL_SMSC_ID_REG);
if (id != 0x5d)
return -ENODEV;
+ id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG);
+ switch (id) {
+ case 0x21:
+ strlcpy(info->type, "emc1403", I2C_NAME_SIZE);
+ break;
+ case 0x23:
+ strlcpy(info->type, "emc1423", I2C_NAME_SIZE);
+ break;
/* Note: 0x25 is the 1404 which is very similar and this
driver could be extended */
- id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG);
- if (id != 0x21)
+ default:
return -ENODEV;
+ }
id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
if (id != 0x01)
return -ENODEV;
- strlcpy(info->type, "emc1403", I2C_NAME_SIZE);
return 0;
}
@@ -337,11 +344,12 @@ static int emc1403_remove(struct i2c_client *client)
}
static const unsigned short emc1403_address_list[] = {
- 0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END
+ 0x18, 0x29, 0x4c, 0x4d, I2C_CLIENT_END
};
static const struct i2c_device_id emc1403_idtable[] = {
{ "emc1403", 0 },
+ { "emc1423", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, emc1403_idtable);
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 525a00bd70b1..92f949767ece 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -28,6 +28,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -1309,7 +1311,7 @@ static int __devinit f71805f_probe(struct platform_device *pdev)
if (!(data = kzalloc(sizeof(struct f71805f_data), GFP_KERNEL))) {
err = -ENOMEM;
- printk(KERN_ERR DRVNAME ": Out of memory\n");
+ pr_err("Out of memory\n");
goto exit;
}
@@ -1451,7 +1453,7 @@ static int __init f71805f_device_add(unsigned short address,
pdev = platform_device_alloc(DRVNAME, address);
if (!pdev) {
err = -ENOMEM;
- printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+ pr_err("Device allocation failed\n");
goto exit;
}
@@ -1462,22 +1464,20 @@ static int __init f71805f_device_add(unsigned short address,
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
- printk(KERN_ERR DRVNAME ": Device resource addition failed "
- "(%d)\n", err);
+ pr_err("Device resource addition failed (%d)\n", err);
goto exit_device_put;
}
err = platform_device_add_data(pdev, sio_data,
sizeof(struct f71805f_sio_data));
if (err) {
- printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+ pr_err("Platform data allocation failed\n");
goto exit_device_put;
}
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_put;
}
@@ -1516,30 +1516,27 @@ static int __init f71805f_find(int sioaddr, unsigned short *address,
sio_data->fnsel1 = superio_inb(sioaddr, SIO_REG_FNSEL1);
break;
default:
- printk(KERN_INFO DRVNAME ": Unsupported Fintek device, "
- "skipping\n");
+ pr_info("Unsupported Fintek device, skipping\n");
goto exit;
}
superio_select(sioaddr, F71805F_LD_HWM);
if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
- printk(KERN_WARNING DRVNAME ": Device not activated, "
- "skipping\n");
+ pr_warn("Device not activated, skipping\n");
goto exit;
}
*address = superio_inw(sioaddr, SIO_REG_ADDR);
if (*address == 0) {
- printk(KERN_WARNING DRVNAME ": Base address not set, "
- "skipping\n");
+ pr_warn("Base address not set, skipping\n");
goto exit;
}
*address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */
err = 0;
- printk(KERN_INFO DRVNAME ": Found %s chip at %#x, revision %u\n",
- names[sio_data->kind], *address,
- superio_inb(sioaddr, SIO_REG_DEVREV));
+ pr_info("Found %s chip at %#x, revision %u\n",
+ names[sio_data->kind], *address,
+ superio_inb(sioaddr, SIO_REG_DEVREV));
exit:
superio_exit(sioaddr);
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 75afb3b0e076..3f49dd376f02 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -18,6 +18,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -865,8 +867,7 @@ static inline int superio_enter(int base)
{
/* Don't step on other drivers' I/O space by accident */
if (!request_muxed_region(base, 2, DRVNAME)) {
- printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
- base);
+ pr_err("I/O address 0x%04x already in use\n", base);
return -EBUSY;
}
@@ -2192,7 +2193,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
devid = superio_inw(sioaddr, SIO_REG_MANID);
if (devid != SIO_FINTEK_ID) {
- pr_debug(DRVNAME ": Not a Fintek device\n");
+ pr_debug("Not a Fintek device\n");
err = -ENODEV;
goto exit;
}
@@ -2215,8 +2216,8 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
sio_data->type = f8000;
break;
default:
- printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n",
- (unsigned int)devid);
+ pr_info("Unsupported Fintek device: %04x\n",
+ (unsigned int)devid);
err = -ENODEV;
goto exit;
}
@@ -2227,21 +2228,21 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
superio_select(sioaddr, SIO_F71882FG_LD_HWM);
if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
- printk(KERN_WARNING DRVNAME ": Device not activated\n");
+ pr_warn("Device not activated\n");
err = -ENODEV;
goto exit;
}
*address = superio_inw(sioaddr, SIO_REG_ADDR);
if (*address == 0) {
- printk(KERN_WARNING DRVNAME ": Base address not set\n");
+ pr_warn("Base address not set\n");
err = -ENODEV;
goto exit;
}
*address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */
err = 0;
- printk(KERN_INFO DRVNAME ": Found %s chip at %#x, revision %d\n",
+ pr_info("Found %s chip at %#x, revision %d\n",
f71882fg_names[sio_data->type], (unsigned int)*address,
(int)superio_inb(sioaddr, SIO_REG_DEVREV));
exit:
@@ -2270,20 +2271,20 @@ static int __init f71882fg_device_add(unsigned short address,
err = platform_device_add_resources(f71882fg_pdev, &res, 1);
if (err) {
- printk(KERN_ERR DRVNAME ": Device resource addition failed\n");
+ pr_err("Device resource addition failed\n");
goto exit_device_put;
}
err = platform_device_add_data(f71882fg_pdev, sio_data,
sizeof(struct f71882fg_sio_data));
if (err) {
- printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+ pr_err("Platform data allocation failed\n");
goto exit_device_put;
}
err = platform_device_add(f71882fg_pdev);
if (err) {
- printk(KERN_ERR DRVNAME ": Device addition failed\n");
+ pr_err("Device addition failed\n");
goto exit_device_put;
}
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index d4d4ca65d371..aa6d8b686f82 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -49,7 +49,6 @@
#include <linux/kref.h>
/* Addresses to scan */
-static DEFINE_MUTEX(watchdog_mutex);
static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
/* Insmod parameters */
@@ -850,7 +849,7 @@ static ssize_t watchdog_write(struct file *filp, const char __user *buf,
static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- static struct watchdog_info ident = {
+ struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
WDIOF_CARDRESET,
.identity = "FSC watchdog"
@@ -858,7 +857,6 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long ar
int i, ret = 0;
struct fschmd_data *data = filp->private_data;
- mutex_lock(&watchdog_mutex);
switch (cmd) {
case WDIOC_GETSUPPORT:
ident.firmware_version = data->revision;
@@ -915,7 +913,6 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long ar
default:
ret = -ENOTTY;
}
- mutex_unlock(&watchdog_mutex);
return ret;
}
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index a56a78412fcb..3d21fa2b97cd 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -20,6 +20,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/dmi.h>
@@ -147,7 +149,7 @@ int lis3lv02d_acpi_write(struct lis3lv02d *lis3, int reg, u8 val)
static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
{
lis3_dev.ac = *((union axis_conversion *)dmi->driver_data);
- printk(KERN_INFO DRIVER_NAME ": hardware type %s found.\n", dmi->ident);
+ pr_info("hardware type %s found\n", dmi->ident);
return 1;
}
@@ -303,11 +305,10 @@ static int lis3lv02d_add(struct acpi_device *device)
/* If possible use a "standard" axes order */
if (lis3_dev.ac.x && lis3_dev.ac.y && lis3_dev.ac.z) {
- printk(KERN_INFO DRIVER_NAME ": Using custom axes %d,%d,%d\n",
- lis3_dev.ac.x, lis3_dev.ac.y, lis3_dev.ac.z);
+ pr_info("Using custom axes %d,%d,%d\n",
+ lis3_dev.ac.x, lis3_dev.ac.y, lis3_dev.ac.z);
} else if (dmi_check_system(lis3lv02d_dmi_ids) == 0) {
- printk(KERN_INFO DRIVER_NAME ": laptop model unknown, "
- "using default axes configuration\n");
+ pr_info("laptop model unknown, using default axes configuration\n");
lis3_dev.ac = lis3lv02d_axis_normal;
}
@@ -385,7 +386,7 @@ static int __init lis3lv02d_init_module(void)
if (ret < 0)
return ret;
- printk(KERN_INFO DRIVER_NAME " driver loaded.\n");
+ pr_info("driver loaded\n");
return 0;
}
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 2b2ca1694f95..2582bfef6ccb 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -22,6 +22,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/hwmon-vid.h>
@@ -146,8 +148,8 @@ int vid_from_reg(int val, u8 vrm)
return(val > 0x77 ? 0 : (1500000 - (val * 12500) + 500) / 1000);
default: /* report 0 for unknown */
if (vrm)
- printk(KERN_WARNING "hwmon-vid: Requested unsupported "
- "VRM version (%u)\n", (unsigned int)vrm);
+ pr_warn("Requested unsupported VRM version (%u)\n",
+ (unsigned int)vrm);
return 0;
}
}
@@ -246,8 +248,7 @@ u8 vid_which_vrm(void)
}
vrm_ret = find_vrm(eff_family, eff_model, eff_stepping, c->x86_vendor);
if (vrm_ret == 0)
- printk(KERN_INFO "hwmon-vid: Unknown VRM version of your "
- "x86 CPU\n");
+ pr_info("Unknown VRM version of your x86 CPU\n");
return vrm_ret;
}
@@ -255,7 +256,7 @@ u8 vid_which_vrm(void)
#else
u8 vid_which_vrm(void)
{
- printk(KERN_INFO "hwmon-vid: Unknown VRM version of your CPU\n");
+ pr_info("Unknown VRM version of your CPU\n");
return 0;
}
#endif
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 29ea6753f3bb..a61e7815a2a9 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -10,6 +10,8 @@
the Free Software Foundation; version 2 of the License.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -119,7 +121,7 @@ static int __init hwmon_init(void)
hwmon_class = class_create(THIS_MODULE, "hwmon");
if (IS_ERR(hwmon_class)) {
- printk(KERN_ERR "hwmon.c: couldn't create sysfs class\n");
+ pr_err("couldn't create sysfs class\n");
return PTR_ERR(hwmon_class);
}
return 0;
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index eaee546af19a..bc6e2ab3a361 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -20,6 +20,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/ipmi.h>
#include <linux/module.h>
#include <linux/hwmon.h>
@@ -1090,7 +1092,7 @@ static int __init aem_init(void)
res = driver_register(&aem_driver.driver);
if (res) {
- printk(KERN_ERR "Can't register aem driver\n");
+ pr_err("Can't register aem driver\n");
return res;
}
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index a428a9264195..316b64823f7b 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -38,6 +38,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -1570,26 +1572,25 @@ static int __init it87_find(unsigned short *address,
case 0xffff: /* No device at all */
goto exit;
default:
- pr_debug(DRVNAME ": Unsupported chip (DEVID=0x%x)\n",
- chip_type);
+ pr_debug("Unsupported chip (DEVID=0x%x)\n", chip_type);
goto exit;
}
superio_select(PME);
if (!(superio_inb(IT87_ACT_REG) & 0x01)) {
- pr_info("it87: Device not activated, skipping\n");
+ pr_info("Device not activated, skipping\n");
goto exit;
}
*address = superio_inw(IT87_BASE_REG) & ~(IT87_EXTENT - 1);
if (*address == 0) {
- pr_info("it87: Base address not set, skipping\n");
+ pr_info("Base address not set, skipping\n");
goto exit;
}
err = 0;
sio_data->revision = superio_inb(DEVREV) & 0x0f;
- pr_info("it87: Found IT%04xF chip at 0x%x, revision %d\n",
+ pr_info("Found IT%04xF chip at 0x%x, revision %d\n",
chip_type, *address, sio_data->revision);
/* in8 (Vbat) is always internal */
@@ -1615,7 +1616,7 @@ static int __init it87_find(unsigned short *address,
} else {
/* We need at least 4 VID pins */
if (reg & 0x0f) {
- pr_info("it87: VID is disabled (pins used for GPIO)\n");
+ pr_info("VID is disabled (pins used for GPIO)\n");
sio_data->skip_vid = 1;
}
}
@@ -1651,7 +1652,7 @@ static int __init it87_find(unsigned short *address,
if (sio_data->type == it8720 && !(reg & (1 << 1))) {
reg |= (1 << 1);
superio_outb(IT87_SIO_PINX2_REG, reg);
- pr_notice("it87: Routing internal VCCH to in7\n");
+ pr_notice("Routing internal VCCH to in7\n");
}
if (reg & (1 << 0))
sio_data->internal |= (1 << 0);
@@ -1661,7 +1662,7 @@ static int __init it87_find(unsigned short *address,
sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
}
if (sio_data->beep_pin)
- pr_info("it87: Beeping is supported\n");
+ pr_info("Beeping is supported\n");
/* Disable specific features based on DMI strings */
board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
@@ -1675,8 +1676,7 @@ static int __init it87_find(unsigned short *address,
the PWM2 duty cycle, so we disable it.
I use the board name string as the trigger in case
the same board is ever used in other systems. */
- pr_info("it87: Disabling pwm2 due to "
- "hardware constraints\n");
+ pr_info("Disabling pwm2 due to hardware constraints\n");
sio_data->skip_pwm = (1 << 1);
}
}
@@ -2189,28 +2189,26 @@ static int __init it87_device_add(unsigned short address,
pdev = platform_device_alloc(DRVNAME, address);
if (!pdev) {
err = -ENOMEM;
- printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+ pr_err("Device allocation failed\n");
goto exit;
}
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
- printk(KERN_ERR DRVNAME ": Device resource addition failed "
- "(%d)\n", err);
+ pr_err("Device resource addition failed (%d)\n", err);
goto exit_device_put;
}
err = platform_device_add_data(pdev, sio_data,
sizeof(struct it87_sio_data));
if (err) {
- printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+ pr_err("Platform data allocation failed\n");
goto exit_device_put;
}
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_put;
}
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 340fc78c8dde..934991237061 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -53,6 +53,8 @@ static const unsigned short normal_i2c[] = {
/* Configuration register defines */
#define JC42_CFG_CRIT_ONLY (1 << 2)
+#define JC42_CFG_TCRIT_LOCK (1 << 6)
+#define JC42_CFG_EVENT_LOCK (1 << 7)
#define JC42_CFG_SHUTDOWN (1 << 8)
#define JC42_CFG_HYST_SHIFT 9
#define JC42_CFG_HYST_MASK 0x03
@@ -332,7 +334,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev);
struct jc42_data *data = i2c_get_clientdata(client);
- long val;
+ unsigned long val;
int diff, hyst;
int err;
int ret = count;
@@ -380,14 +382,14 @@ static ssize_t show_alarm(struct device *dev,
static DEVICE_ATTR(temp1_input, S_IRUGO,
show_temp_input, NULL);
-static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_crit, S_IRUGO,
show_temp_crit, set_temp_crit);
-static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_min, S_IRUGO,
show_temp_min, set_temp_min);
-static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_max, S_IRUGO,
show_temp_max, set_temp_max);
-static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_crit_hyst, S_IRUGO,
show_temp_crit_hyst, set_temp_crit_hyst);
static DEVICE_ATTR(temp1_max_hyst, S_IRUGO,
show_temp_max_hyst, NULL);
@@ -412,8 +414,31 @@ static struct attribute *jc42_attributes[] = {
NULL
};
+static mode_t jc42_attribute_mode(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct jc42_data *data = i2c_get_clientdata(client);
+ unsigned int config = data->config;
+ bool readonly;
+
+ if (attr == &dev_attr_temp1_crit.attr)
+ readonly = config & JC42_CFG_TCRIT_LOCK;
+ else if (attr == &dev_attr_temp1_min.attr ||
+ attr == &dev_attr_temp1_max.attr)
+ readonly = config & JC42_CFG_EVENT_LOCK;
+ else if (attr == &dev_attr_temp1_crit_hyst.attr)
+ readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK);
+ else
+ readonly = true;
+
+ return S_IRUGO | (readonly ? 0 : S_IWUSR);
+}
+
static const struct attribute_group jc42_group = {
.attrs = jc42_attributes,
+ .is_visible = jc42_attribute_mode,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index da5a2404cd3e..82bf65aa2968 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -1,5 +1,5 @@
/*
- * k10temp.c - AMD Family 10h/11h processor hardware monitoring
+ * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring
*
* Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
*
@@ -25,7 +25,7 @@
#include <linux/pci.h>
#include <asm/processor.h>
-MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor");
+MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor");
MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
MODULE_LICENSE("GPL");
@@ -208,6 +208,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev)
static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{}
};
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 0cee73a6124e..d805e8e57967 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -20,6 +20,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/dmi.h>
@@ -860,8 +862,7 @@ static void lis3lv02d_8b_configure(struct lis3lv02d *dev,
(p->irq_flags2 & IRQF_TRIGGER_MASK),
DRIVER_NAME, &lis3_dev);
if (err < 0)
- printk(KERN_ERR DRIVER_NAME
- "No second IRQ. Limited functionality\n");
+ pr_err("No second IRQ. Limited functionality\n");
}
}
@@ -879,7 +880,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
switch (dev->whoami) {
case WAI_12B:
- printk(KERN_INFO DRIVER_NAME ": 12 bits sensor found\n");
+ pr_info("12 bits sensor found\n");
dev->read_data = lis3lv02d_read_12;
dev->mdps_max_val = 2048;
dev->pwron_delay = LIS3_PWRON_DELAY_WAI_12B;
@@ -890,7 +891,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
dev->regs_size = ARRAY_SIZE(lis3_wai12_regs);
break;
case WAI_8B:
- printk(KERN_INFO DRIVER_NAME ": 8 bits sensor found\n");
+ pr_info("8 bits sensor found\n");
dev->read_data = lis3lv02d_read_8;
dev->mdps_max_val = 128;
dev->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
@@ -901,7 +902,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
dev->regs_size = ARRAY_SIZE(lis3_wai8_regs);
break;
case WAI_3DC:
- printk(KERN_INFO DRIVER_NAME ": 8 bits 3DC sensor found\n");
+ pr_info("8 bits 3DC sensor found\n");
dev->read_data = lis3lv02d_read_8;
dev->mdps_max_val = 128;
dev->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
@@ -910,8 +911,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
dev->scale = LIS3_SENSITIVITY_8B;
break;
default:
- printk(KERN_ERR DRIVER_NAME
- ": unknown sensor type 0x%X\n", dev->whoami);
+ pr_err("unknown sensor type 0x%X\n", dev->whoami);
return -EINVAL;
}
@@ -935,7 +935,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
}
if (lis3lv02d_joystick_enable())
- printk(KERN_ERR DRIVER_NAME ": joystick initialization failed\n");
+ pr_err("joystick initialization failed\n");
/* passing in platform specific data is purely optional and only
* used by the SPI transport layer at the moment */
@@ -957,8 +957,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
/* bail if we did not get an IRQ from the bus layer */
if (!dev->irq) {
- printk(KERN_ERR DRIVER_NAME
- ": No IRQ. Disabling /dev/freefall\n");
+ pr_debug("No IRQ. Disabling /dev/freefall\n");
goto out;
}
@@ -985,12 +984,12 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
DRIVER_NAME, &lis3_dev);
if (err < 0) {
- printk(KERN_ERR DRIVER_NAME "Cannot get IRQ\n");
+ pr_err("Cannot get IRQ\n");
goto out;
}
if (misc_register(&lis3lv02d_misc_device))
- printk(KERN_ERR DRIVER_NAME ": misc_register failed\n");
+ pr_err("misc_register failed\n");
out:
return 0;
}
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 776aeb3019d2..508cb291f71b 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -98,6 +98,9 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
* value, it uses signed 8-bit values with LSB = 1 degree Celsius.
* For remote temperature, low and high limits, it uses signed 11-bit values
* with LSB = 0.125 degree Celsius, left-justified in 16-bit registers.
+ * For LM64 the actual remote diode temperature is 16 degree Celsius higher
+ * than the register reading. Remote temperature setpoints have to be
+ * adapted accordingly.
*/
#define FAN_FROM_REG(reg) ((reg) == 0xFFFC || (reg) == 0 ? 0 : \
@@ -165,6 +168,8 @@ struct lm63_data {
struct mutex update_lock;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
+ int kind;
+ int temp2_offset;
/* registers values */
u8 config, config_fan;
@@ -247,16 +252,34 @@ static ssize_t show_pwm1_enable(struct device *dev, struct device_attribute *dum
return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2);
}
-static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
- char *buf)
+/*
+ * There are 8bit registers for both local(temp1) and remote(temp2) sensor.
+ * For remote sensor registers temp2_offset has to be considered,
+ * for local sensor it must not.
+ * So we need separate 8bit accessors for local and remote sensor.
+ */
+static ssize_t show_local_temp8(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index]));
}
-static ssize_t set_temp8(struct device *dev, struct device_attribute *dummy,
- const char *buf, size_t count)
+static ssize_t show_remote_temp8(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct lm63_data *data = lm63_update_device(dev);
+ return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])
+ + data->temp2_offset);
+}
+
+static ssize_t set_local_temp8(struct device *dev,
+ struct device_attribute *dummy,
+ const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
@@ -274,7 +297,8 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
- return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index]));
+ return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])
+ + data->temp2_offset);
}
static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
@@ -294,7 +318,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
int nr = attr->index;
mutex_lock(&data->update_lock);
- data->temp11[nr] = TEMP11_TO_REG(val);
+ data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2],
data->temp11[nr] >> 8);
i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1],
@@ -310,6 +334,7 @@ static ssize_t show_temp2_crit_hyst(struct device *dev, struct device_attribute
{
struct lm63_data *data = lm63_update_device(dev);
return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2])
+ + data->temp2_offset
- TEMP8_FROM_REG(data->temp2_crit_hyst));
}
@@ -324,7 +349,7 @@ static ssize_t set_temp2_crit_hyst(struct device *dev, struct device_attribute *
long hyst;
mutex_lock(&data->update_lock);
- hyst = TEMP8_FROM_REG(data->temp8[2]) - val;
+ hyst = TEMP8_FROM_REG(data->temp8[2]) + data->temp2_offset - val;
i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST,
HYST_TO_REG(hyst));
mutex_unlock(&data->update_lock);
@@ -355,16 +380,21 @@ static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan,
static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1);
static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp8, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8,
- set_temp8, 1);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8,
+ set_local_temp8, 1);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
set_temp11, 1);
static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
set_temp11, 2);
-static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp8, NULL, 2);
+/*
+ * On LM63, temp2_crit can be set only once, which should be job
+ * of the bootloader.
+ */
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8,
+ NULL, 2);
static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
set_temp2_crit_hyst);
@@ -479,7 +509,12 @@ static int lm63_probe(struct i2c_client *new_client,
data->valid = 0;
mutex_init(&data->update_lock);
- /* Initialize the LM63 chip */
+ /* Set the device type */
+ data->kind = id->driver_data;
+ if (data->kind == lm64)
+ data->temp2_offset = 16000;
+
+ /* Initialize chip */
lm63_init_client(new_client);
/* Register sysfs hooks */
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index fd108cfc05c7..3b84fb503053 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -24,6 +24,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -67,8 +69,7 @@ static ssize_t lm70_sense_temp(struct device *dev,
*/
status = spi_write_then_read(spi, NULL, 0, &rxbuf[0], 2);
if (status < 0) {
- printk(KERN_WARNING
- "spi_write_then_read failed with status %d\n", status);
+ pr_warn("spi_write_then_read failed with status %d\n", status);
goto out;
}
raw = (rxbuf[0] << 8) + rxbuf[1];
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 72ff2c4e757d..4cb24eafe318 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -19,6 +19,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -858,7 +860,7 @@ static int __init lm78_isa_found(unsigned short address)
* individually for the probing phase. */
for (port = address; port < address + LM78_EXTENT; port++) {
if (!request_region(port, 1, "lm78")) {
- pr_debug("lm78: Failed to request port 0x%x\n", port);
+ pr_debug("Failed to request port 0x%x\n", port);
goto release;
}
}
@@ -920,7 +922,7 @@ static int __init lm78_isa_found(unsigned short address)
found = 1;
if (found)
- pr_info("lm78: Found an %s chip at %#x\n",
+ pr_info("Found an %s chip at %#x\n",
val & 0x80 ? "LM79" : "LM78", (int)address);
release:
@@ -942,21 +944,19 @@ static int __init lm78_isa_device_add(unsigned short address)
pdev = platform_device_alloc("lm78", address);
if (!pdev) {
err = -ENOMEM;
- printk(KERN_ERR "lm78: Device allocation failed\n");
+ pr_err("Device allocation failed\n");
goto exit;
}
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
- printk(KERN_ERR "lm78: Device resource addition failed "
- "(%d)\n", err);
+ pr_err("Device resource addition failed (%d)\n", err);
goto exit_device_put;
}
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR "lm78: Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_put;
}
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 1e229847f37a..d2cc28660816 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
enum chips {
any_chip, lm85b, lm85c,
adm1027, adt7463, adt7468,
- emc6d100, emc6d102
+ emc6d100, emc6d102, emc6d103
};
/* The LM85 registers */
@@ -90,6 +90,9 @@ enum chips {
#define LM85_VERSTEP_EMC6D100_A0 0x60
#define LM85_VERSTEP_EMC6D100_A1 0x61
#define LM85_VERSTEP_EMC6D102 0x65
+#define LM85_VERSTEP_EMC6D103_A0 0x68
+#define LM85_VERSTEP_EMC6D103_A1 0x69
+#define LM85_VERSTEP_EMC6D103S 0x6A /* Also known as EMC6D103:A2 */
#define LM85_REG_CONFIG 0x40
@@ -348,6 +351,7 @@ static const struct i2c_device_id lm85_id[] = {
{ "emc6d100", emc6d100 },
{ "emc6d101", emc6d100 },
{ "emc6d102", emc6d102 },
+ { "emc6d103", emc6d103 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm85_id);
@@ -1250,6 +1254,20 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
case LM85_VERSTEP_EMC6D102:
type_name = "emc6d102";
break;
+ case LM85_VERSTEP_EMC6D103_A0:
+ case LM85_VERSTEP_EMC6D103_A1:
+ type_name = "emc6d103";
+ break;
+ /*
+ * Registers apparently missing in EMC6D103S/EMC6D103:A2
+ * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102
+ * (according to the data sheets), but used unconditionally
+ * in the driver: 62[5:7], 6D[0:7], and 6E[0:7].
+ * So skip EMC6D103S for now.
+ case LM85_VERSTEP_EMC6D103S:
+ type_name = "emc6d103s";
+ break;
+ */
}
} else {
dev_dbg(&adapter->dev,
@@ -1283,6 +1301,7 @@ static int lm85_probe(struct i2c_client *client,
case adt7468:
case emc6d100:
case emc6d102:
+ case emc6d103:
data->freq_map = adm1027_freq_map;
break;
default:
@@ -1468,7 +1487,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
/* More alarm bits */
data->alarms |= lm85_read_value(client,
EMC6D100_REG_ALARM3) << 16;
- } else if (data->type == emc6d102) {
+ } else if (data->type == emc6d102 || data->type == emc6d103) {
/* Have to read LSB bits after the MSB ones because
the reading of the MSB bits has frozen the
LSBs (backward from the ADM1027).
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index c9ed14eba5a6..3b43df418613 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -135,6 +135,11 @@
#define LM93_MFR_ID 0x73
#define LM93_MFR_ID_PROTOTYPE 0x72
+/* LM94 REGISTER VALUES */
+#define LM94_MFR_ID_2 0x7a
+#define LM94_MFR_ID 0x79
+#define LM94_MFR_ID_PROTOTYPE 0x78
+
/* SMBus capabilities */
#define LM93_SMBUS_FUNC_FULL (I2C_FUNC_SMBUS_BYTE_DATA | \
I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA)
@@ -2504,6 +2509,7 @@ static int lm93_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int mfr, ver;
+ const char *name;
if (!i2c_check_functionality(adapter, LM93_SMBUS_FUNC_MIN))
return -ENODEV;
@@ -2517,13 +2523,23 @@ static int lm93_detect(struct i2c_client *client, struct i2c_board_info *info)
}
ver = lm93_read_byte(client, LM93_REG_VER);
- if (ver != LM93_MFR_ID && ver != LM93_MFR_ID_PROTOTYPE) {
+ switch (ver) {
+ case LM93_MFR_ID:
+ case LM93_MFR_ID_PROTOTYPE:
+ name = "lm93";
+ break;
+ case LM94_MFR_ID_2:
+ case LM94_MFR_ID:
+ case LM94_MFR_ID_PROTOTYPE:
+ name = "lm94";
+ break;
+ default:
dev_dbg(&adapter->dev,
"detect failed, bad version id 0x%02x!\n", ver);
return -ENODEV;
}
- strlcpy(info->type, "lm93", I2C_NAME_SIZE);
+ strlcpy(info->type, name, I2C_NAME_SIZE);
dev_dbg(&adapter->dev,"loading %s at %d,0x%02x\n",
client->name, i2c_adapter_id(client->adapter),
client->addr);
@@ -2602,6 +2618,7 @@ static int lm93_remove(struct i2c_client *client)
static const struct i2c_device_id lm93_id[] = {
{ "lm93", 0 },
+ { "lm94", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm93_id);
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 4546d82f024a..1a6dfb6df1e7 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -1,13 +1,9 @@
/*
- * lm95241.c - Part of lm_sensors, Linux kernel modules for hardware
- * monitoring
- * Copyright (C) 2008 Davide Rizzo <elpa-rizzo@gmail.com>
+ * Copyright (C) 2008, 2010 Davide Rizzo <elpa.rizzo@gmail.com>
*
- * Based on the max1619 driver. The LM95241 is a sensor chip made by National
- * Semiconductors.
- * It reports up to three temperatures (its own plus up to
- * two external ones). Complete datasheet can be
- * obtained from National's website at:
+ * The LM95241 is a sensor chip made by National Semiconductors.
+ * It reports up to three temperatures (its own plus up to two external ones).
+ * Complete datasheet can be obtained from National's website at:
* http://www.national.com/ds.cgi/LM/LM95241.pdf
*
* This program is free software; you can redistribute it and/or modify
@@ -36,8 +32,10 @@
#include <linux/mutex.h>
#include <linux/sysfs.h>
+#define DEVNAME "lm95241"
+
static const unsigned short normal_i2c[] = {
- 0x19, 0x2a, 0x2b, I2C_CLIENT_END};
+ 0x19, 0x2a, 0x2b, I2C_CLIENT_END };
/* LM95241 registers */
#define LM95241_REG_R_MAN_ID 0xFE
@@ -46,7 +44,7 @@ static const unsigned short normal_i2c[] = {
#define LM95241_REG_RW_CONFIG 0x03
#define LM95241_REG_RW_REM_FILTER 0x06
#define LM95241_REG_RW_TRUTHERM 0x07
-#define LM95241_REG_W_ONE_SHOT 0x0F
+#define LM95241_REG_W_ONE_SHOT 0x0F
#define LM95241_REG_R_LOCAL_TEMPH 0x10
#define LM95241_REG_R_REMOTE1_TEMPH 0x11
#define LM95241_REG_R_REMOTE2_TEMPH 0x12
@@ -79,235 +77,246 @@ static const unsigned short normal_i2c[] = {
#define MANUFACTURER_ID 0x01
#define DEFAULT_REVISION 0xA4
-/* Conversions and various macros */
-#define TEMP_FROM_REG(val_h, val_l) (((val_h) & 0x80 ? (val_h) - 0x100 : \
- (val_h)) * 1000 + (val_l) * 1000 / 256)
-
-/* Functions declaration */
-static void lm95241_init_client(struct i2c_client *client);
-static struct lm95241_data *lm95241_update_device(struct device *dev);
+static const u8 lm95241_reg_address[] = {
+ LM95241_REG_R_LOCAL_TEMPH,
+ LM95241_REG_R_LOCAL_TEMPL,
+ LM95241_REG_R_REMOTE1_TEMPH,
+ LM95241_REG_R_REMOTE1_TEMPL,
+ LM95241_REG_R_REMOTE2_TEMPH,
+ LM95241_REG_R_REMOTE2_TEMPL
+};
/* Client data (each client gets its own) */
struct lm95241_data {
struct device *hwmon_dev;
struct mutex update_lock;
- unsigned long last_updated, interval; /* in jiffies */
- char valid; /* zero until following fields are valid */
+ unsigned long last_updated, interval; /* in jiffies */
+ char valid; /* zero until following fields are valid */
/* registers values */
- u8 local_h, local_l; /* local */
- u8 remote1_h, remote1_l; /* remote1 */
- u8 remote2_h, remote2_l; /* remote2 */
+ u8 temp[ARRAY_SIZE(lm95241_reg_address)];
u8 config, model, trutherm;
};
+/* Conversions */
+static int TempFromReg(u8 val_h, u8 val_l)
+{
+ if (val_h & 0x80)
+ return val_h - 0x100;
+ return val_h * 1000 + val_l * 1000 / 256;
+}
+
+static struct lm95241_data *lm95241_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95241_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + data->interval) ||
+ !data->valid) {
+ int i;
+
+ dev_dbg(&client->dev, "Updating lm95241 data.\n");
+ for (i = 0; i < ARRAY_SIZE(lm95241_reg_address); i++)
+ data->temp[i]
+ = i2c_smbus_read_byte_data(client,
+ lm95241_reg_address[i]);
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
/* Sysfs stuff */
-#define show_temp(value) \
-static ssize_t show_##value(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct lm95241_data *data = lm95241_update_device(dev); \
- snprintf(buf, PAGE_SIZE - 1, "%d\n", \
- TEMP_FROM_REG(data->value##_h, data->value##_l)); \
- return strlen(buf); \
+static ssize_t show_input(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct lm95241_data *data = lm95241_update_device(dev);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n",
+ TempFromReg(data->temp[to_sensor_dev_attr(attr)->index],
+ data->temp[to_sensor_dev_attr(attr)->index + 1]));
}
-show_temp(local);
-show_temp(remote1);
-show_temp(remote2);
-static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
+static ssize_t show_type(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct lm95241_data *data = lm95241_update_device(dev);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95241_data *data = i2c_get_clientdata(client);
- snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->interval / HZ);
- return strlen(buf);
+ return snprintf(buf, PAGE_SIZE - 1,
+ data->model & to_sensor_dev_attr(attr)->index ? "1\n" : "2\n");
}
-static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
+static ssize_t set_type(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm95241_data *data = i2c_get_clientdata(client);
unsigned long val;
+ int shift;
+ u8 mask = to_sensor_dev_attr(attr)->index;
if (strict_strtoul(buf, 10, &val) < 0)
return -EINVAL;
+ if (val != 1 && val != 2)
+ return -EINVAL;
- data->interval = val * HZ / 1000;
+ shift = mask == R1MS_MASK ? TT1_SHIFT : TT2_SHIFT;
+
+ mutex_lock(&data->update_lock);
+
+ data->trutherm &= ~(TT_MASK << shift);
+ if (val == 1) {
+ data->model |= mask;
+ data->trutherm |= (TT_ON << shift);
+ } else {
+ data->model &= ~mask;
+ data->trutherm |= (TT_OFF << shift);
+ }
+ data->valid = 0;
+
+ i2c_smbus_write_byte_data(client, LM95241_REG_RW_REMOTE_MODEL,
+ data->model);
+ i2c_smbus_write_byte_data(client, LM95241_REG_RW_TRUTHERM,
+ data->trutherm);
+
+ mutex_unlock(&data->update_lock);
return count;
}
-#define show_type(flag) \
-static ssize_t show_type##flag(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct i2c_client *client = to_i2c_client(dev); \
- struct lm95241_data *data = i2c_get_clientdata(client); \
-\
- snprintf(buf, PAGE_SIZE - 1, \
- data->model & R##flag##MS_MASK ? "1\n" : "2\n"); \
- return strlen(buf); \
+static ssize_t show_min(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95241_data *data = i2c_get_clientdata(client);
+
+ return snprintf(buf, PAGE_SIZE - 1,
+ data->config & to_sensor_dev_attr(attr)->index ?
+ "-127000\n" : "0\n");
}
-show_type(1);
-show_type(2);
-
-#define show_min(flag) \
-static ssize_t show_min##flag(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct i2c_client *client = to_i2c_client(dev); \
- struct lm95241_data *data = i2c_get_clientdata(client); \
-\
- snprintf(buf, PAGE_SIZE - 1, \
- data->config & R##flag##DF_MASK ? \
- "-127000\n" : "0\n"); \
- return strlen(buf); \
+
+static ssize_t set_min(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95241_data *data = i2c_get_clientdata(client);
+ long val;
+
+ if (strict_strtol(buf, 10, &val) < 0)
+ return -EINVAL;
+ if (val < -128000)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+
+ if (val < 0)
+ data->config |= to_sensor_dev_attr(attr)->index;
+ else
+ data->config &= ~to_sensor_dev_attr(attr)->index;
+ data->valid = 0;
+
+ i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, data->config);
+
+ mutex_unlock(&data->update_lock);
+
+ return count;
}
-show_min(1);
-show_min(2);
-
-#define show_max(flag) \
-static ssize_t show_max##flag(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct i2c_client *client = to_i2c_client(dev); \
- struct lm95241_data *data = i2c_get_clientdata(client); \
-\
- snprintf(buf, PAGE_SIZE - 1, \
- data->config & R##flag##DF_MASK ? \
- "127000\n" : "255000\n"); \
- return strlen(buf); \
+
+static ssize_t show_max(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95241_data *data = i2c_get_clientdata(client);
+
+ return snprintf(buf, PAGE_SIZE - 1,
+ data->config & to_sensor_dev_attr(attr)->index ?
+ "127000\n" : "255000\n");
}
-show_max(1);
-show_max(2);
-
-#define set_type(flag) \
-static ssize_t set_type##flag(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- struct i2c_client *client = to_i2c_client(dev); \
- struct lm95241_data *data = i2c_get_clientdata(client); \
-\
- long val; \
-\
- if (strict_strtol(buf, 10, &val) < 0) \
- return -EINVAL; \
-\
- if ((val == 1) || (val == 2)) { \
-\
- mutex_lock(&data->update_lock); \
-\
- data->trutherm &= ~(TT_MASK << TT##flag##_SHIFT); \
- if (val == 1) { \
- data->model |= R##flag##MS_MASK; \
- data->trutherm |= (TT_ON << TT##flag##_SHIFT); \
- } \
- else { \
- data->model &= ~R##flag##MS_MASK; \
- data->trutherm |= (TT_OFF << TT##flag##_SHIFT); \
- } \
-\
- data->valid = 0; \
-\
- i2c_smbus_write_byte_data(client, LM95241_REG_RW_REMOTE_MODEL, \
- data->model); \
- i2c_smbus_write_byte_data(client, LM95241_REG_RW_TRUTHERM, \
- data->trutherm); \
-\
- mutex_unlock(&data->update_lock); \
-\
- } \
- return count; \
+
+static ssize_t set_max(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95241_data *data = i2c_get_clientdata(client);
+ long val;
+
+ if (strict_strtol(buf, 10, &val) < 0)
+ return -EINVAL;
+ if (val >= 256000)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+
+ if (val <= 127000)
+ data->config |= to_sensor_dev_attr(attr)->index;
+ else
+ data->config &= ~to_sensor_dev_attr(attr)->index;
+ data->valid = 0;
+
+ i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, data->config);
+
+ mutex_unlock(&data->update_lock);
+
+ return count;
}
-set_type(1);
-set_type(2);
-
-#define set_min(flag) \
-static ssize_t set_min##flag(struct device *dev, \
- struct device_attribute *devattr, const char *buf, size_t count) \
-{ \
- struct i2c_client *client = to_i2c_client(dev); \
- struct lm95241_data *data = i2c_get_clientdata(client); \
-\
- long val; \
-\
- if (strict_strtol(buf, 10, &val) < 0) \
- return -EINVAL;\
-\
- mutex_lock(&data->update_lock); \
-\
- if (val < 0) \
- data->config |= R##flag##DF_MASK; \
- else \
- data->config &= ~R##flag##DF_MASK; \
-\
- data->valid = 0; \
-\
- i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, \
- data->config); \
-\
- mutex_unlock(&data->update_lock); \
-\
- return count; \
+
+static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct lm95241_data *data = lm95241_update_device(dev);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->interval
+ / HZ);
}
-set_min(1);
-set_min(2);
-
-#define set_max(flag) \
-static ssize_t set_max##flag(struct device *dev, \
- struct device_attribute *devattr, const char *buf, size_t count) \
-{ \
- struct i2c_client *client = to_i2c_client(dev); \
- struct lm95241_data *data = i2c_get_clientdata(client); \
-\
- long val; \
-\
- if (strict_strtol(buf, 10, &val) < 0) \
- return -EINVAL; \
-\
- mutex_lock(&data->update_lock); \
-\
- if (val <= 127000) \
- data->config |= R##flag##DF_MASK; \
- else \
- data->config &= ~R##flag##DF_MASK; \
-\
- data->valid = 0; \
-\
- i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, \
- data->config); \
-\
- mutex_unlock(&data->update_lock); \
-\
- return count; \
+
+static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95241_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ data->interval = val * HZ / 1000;
+
+ return count;
}
-set_max(1);
-set_max(2);
-
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_local, NULL);
-static DEVICE_ATTR(temp2_input, S_IRUGO, show_remote1, NULL);
-static DEVICE_ATTR(temp3_input, S_IRUGO, show_remote2, NULL);
-static DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type1, set_type1);
-static DEVICE_ATTR(temp3_type, S_IWUSR | S_IRUGO, show_type2, set_type2);
-static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min1, set_min1);
-static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2);
-static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1);
-static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2);
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_input, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_input, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type, set_type,
+ R1MS_MASK);
+static SENSOR_DEVICE_ATTR(temp3_type, S_IWUSR | S_IRUGO, show_type, set_type,
+ R2MS_MASK);
+static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min, set_min,
+ R1DF_MASK);
+static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min, set_min,
+ R2DF_MASK);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max, set_max,
+ R1DF_MASK);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max, set_max,
+ R2DF_MASK);
static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
set_interval);
static struct attribute *lm95241_attributes[] = {
- &dev_attr_temp1_input.attr,
- &dev_attr_temp2_input.attr,
- &dev_attr_temp3_input.attr,
- &dev_attr_temp2_type.attr,
- &dev_attr_temp3_type.attr,
- &dev_attr_temp2_min.attr,
- &dev_attr_temp3_min.attr,
- &dev_attr_temp2_max.attr,
- &dev_attr_temp3_max.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_type.dev_attr.attr,
+ &sensor_dev_attr_temp3_type.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp3_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
&dev_attr_update_interval.attr,
NULL
};
@@ -329,9 +338,9 @@ static int lm95241_detect(struct i2c_client *new_client,
if ((i2c_smbus_read_byte_data(new_client, LM95241_REG_R_MAN_ID)
== MANUFACTURER_ID)
- && (i2c_smbus_read_byte_data(new_client, LM95241_REG_R_CHIP_ID)
- >= DEFAULT_REVISION)) {
- name = "lm95241";
+ && (i2c_smbus_read_byte_data(new_client, LM95241_REG_R_CHIP_ID)
+ >= DEFAULT_REVISION)) {
+ name = DEVNAME;
} else {
dev_dbg(&adapter->dev, "LM95241 detection failed at 0x%02x\n",
address);
@@ -343,6 +352,25 @@ static int lm95241_detect(struct i2c_client *new_client,
return 0;
}
+static void lm95241_init_client(struct i2c_client *client)
+{
+ struct lm95241_data *data = i2c_get_clientdata(client);
+
+ data->interval = HZ; /* 1 sec default */
+ data->valid = 0;
+ data->config = CFG_CR0076;
+ data->model = 0;
+ data->trutherm = (TT_OFF << TT1_SHIFT) | (TT_OFF << TT2_SHIFT);
+
+ i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, data->config);
+ i2c_smbus_write_byte_data(client, LM95241_REG_RW_REM_FILTER,
+ R1FE_MASK | R2FE_MASK);
+ i2c_smbus_write_byte_data(client, LM95241_REG_RW_TRUTHERM,
+ data->trutherm);
+ i2c_smbus_write_byte_data(client, LM95241_REG_RW_REMOTE_MODEL,
+ data->model);
+}
+
static int lm95241_probe(struct i2c_client *new_client,
const struct i2c_device_id *id)
{
@@ -382,26 +410,6 @@ exit:
return err;
}
-static void lm95241_init_client(struct i2c_client *client)
-{
- struct lm95241_data *data = i2c_get_clientdata(client);
-
- data->interval = HZ; /* 1 sec default */
- data->valid = 0;
- data->config = CFG_CR0076;
- data->model = 0;
- data->trutherm = (TT_OFF << TT1_SHIFT) | (TT_OFF << TT2_SHIFT);
-
- i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG,
- data->config);
- i2c_smbus_write_byte_data(client, LM95241_REG_RW_REM_FILTER,
- R1FE_MASK | R2FE_MASK);
- i2c_smbus_write_byte_data(client, LM95241_REG_RW_TRUTHERM,
- data->trutherm);
- i2c_smbus_write_byte_data(client, LM95241_REG_RW_REMOTE_MODEL,
- data->model);
-}
-
static int lm95241_remove(struct i2c_client *client)
{
struct lm95241_data *data = i2c_get_clientdata(client);
@@ -413,46 +421,9 @@ static int lm95241_remove(struct i2c_client *client)
return 0;
}
-static struct lm95241_data *lm95241_update_device(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct lm95241_data *data = i2c_get_clientdata(client);
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + data->interval) ||
- !data->valid) {
- dev_dbg(&client->dev, "Updating lm95241 data.\n");
- data->local_h =
- i2c_smbus_read_byte_data(client,
- LM95241_REG_R_LOCAL_TEMPH);
- data->local_l =
- i2c_smbus_read_byte_data(client,
- LM95241_REG_R_LOCAL_TEMPL);
- data->remote1_h =
- i2c_smbus_read_byte_data(client,
- LM95241_REG_R_REMOTE1_TEMPH);
- data->remote1_l =
- i2c_smbus_read_byte_data(client,
- LM95241_REG_R_REMOTE1_TEMPL);
- data->remote2_h =
- i2c_smbus_read_byte_data(client,
- LM95241_REG_R_REMOTE2_TEMPH);
- data->remote2_l =
- i2c_smbus_read_byte_data(client,
- LM95241_REG_R_REMOTE2_TEMPL);
- data->last_updated = jiffies;
- data->valid = 1;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
-
/* Driver data (common to all clients) */
static const struct i2c_device_id lm95241_id[] = {
- { "lm95241", 0 },
+ { DEVNAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm95241_id);
@@ -460,7 +431,7 @@ MODULE_DEVICE_TABLE(i2c, lm95241_id);
static struct i2c_driver lm95241_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
- .name = "lm95241",
+ .name = DEVNAME,
},
.probe = lm95241_probe,
.remove = lm95241_remove,
@@ -479,7 +450,7 @@ static void __exit sensors_lm95241_exit(void)
i2c_del_driver(&lm95241_driver);
}
-MODULE_AUTHOR("Davide Rizzo <elpa-rizzo@gmail.com>");
+MODULE_AUTHOR("Davide Rizzo <elpa.rizzo@gmail.com>");
MODULE_DESCRIPTION("LM95241 sensor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 68e69a49633c..3d99b8854d7c 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -33,6 +33,8 @@
* the standard Super-I/O addresses is used (0x2E/0x2F or 0x4E/0x4F).
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -1031,16 +1033,15 @@ static int __init pc87360_find(int sioaddr, u8 *devid, unsigned short *addresses
val = superio_inb(sioaddr, ACT);
if (!(val & 0x01)) {
- printk(KERN_INFO "pc87360: Device 0x%02x not "
- "activated\n", logdev[i]);
+ pr_info("Device 0x%02x not activated\n", logdev[i]);
continue;
}
val = (superio_inb(sioaddr, BASE) << 8)
| superio_inb(sioaddr, BASE + 1);
if (!val) {
- printk(KERN_INFO "pc87360: Base address not set for "
- "device 0x%02x\n", logdev[i]);
+ pr_info("Base address not set for device 0x%02x\n",
+ logdev[i]);
continue;
}
@@ -1050,17 +1051,15 @@ static int __init pc87360_find(int sioaddr, u8 *devid, unsigned short *addresses
confreg[0] = superio_inb(sioaddr, 0xF0);
confreg[1] = superio_inb(sioaddr, 0xF1);
-#ifdef DEBUG
- printk(KERN_DEBUG "pc87360: Fan 1: mon=%d "
- "ctrl=%d inv=%d\n", (confreg[0]>>2)&1,
- (confreg[0]>>3)&1, (confreg[0]>>4)&1);
- printk(KERN_DEBUG "pc87360: Fan 2: mon=%d "
- "ctrl=%d inv=%d\n", (confreg[0]>>5)&1,
- (confreg[0]>>6)&1, (confreg[0]>>7)&1);
- printk(KERN_DEBUG "pc87360: Fan 3: mon=%d "
- "ctrl=%d inv=%d\n", confreg[1]&1,
- (confreg[1]>>1)&1, (confreg[1]>>2)&1);
-#endif
+ pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 1,
+ (confreg[0] >> 2) & 1, (confreg[0] >> 3) & 1,
+ (confreg[0] >> 4) & 1);
+ pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 2,
+ (confreg[0] >> 5) & 1, (confreg[0] >> 6) & 1,
+ (confreg[0] >> 7) & 1);
+ pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 3,
+ confreg[1] & 1, (confreg[1] >> 1) & 1,
+ (confreg[1] >> 2) & 1);
} else if (i==1) { /* Voltages */
/* Are we using thermistors? */
if (*devid == 0xE9) { /* PC87366 */
@@ -1071,14 +1070,12 @@ static int __init pc87360_find(int sioaddr, u8 *devid, unsigned short *addresses
confreg[3] = superio_inb(sioaddr, 0x25);
if (confreg[2] & 0x40) {
- printk(KERN_INFO "pc87360: Using "
- "thermistors for temperature "
- "monitoring\n");
+ pr_info("Using thermistors for "
+ "temperature monitoring\n");
}
if (confreg[3] & 0xE0) {
- printk(KERN_INFO "pc87360: VID "
- "inputs routed (mode %u)\n",
- confreg[3] >> 5);
+ pr_info("VID inputs routed (mode %u)\n",
+ confreg[3] >> 5);
}
}
}
@@ -1616,7 +1613,7 @@ static int __init pc87360_device_add(unsigned short address)
pdev = platform_device_alloc("pc87360", address);
if (!pdev) {
err = -ENOMEM;
- printk(KERN_ERR "pc87360: Device allocation failed\n");
+ pr_err("Device allocation failed\n");
goto exit;
}
@@ -1639,15 +1636,13 @@ static int __init pc87360_device_add(unsigned short address)
err = platform_device_add_resources(pdev, res, res_count);
if (err) {
- printk(KERN_ERR "pc87360: Device resources addition failed "
- "(%d)\n", err);
+ pr_err("Device resources addition failed (%d)\n", err);
goto exit_device_put;
}
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR "pc87360: Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_put;
}
@@ -1666,8 +1661,7 @@ static int __init pc87360_init(void)
if (pc87360_find(0x2e, &devid, extra_isa)
&& pc87360_find(0x4e, &devid, extra_isa)) {
- printk(KERN_WARNING "pc87360: PC8736x not detected, "
- "module not inserted.\n");
+ pr_warn("PC8736x not detected, module not inserted\n");
return -ENODEV;
}
@@ -1680,8 +1674,7 @@ static int __init pc87360_init(void)
}
if (address == 0x0000) {
- printk(KERN_WARNING "pc87360: No active logical device, "
- "module not inserted.\n");
+ pr_warn("No active logical device, module not inserted\n");
return -ENODEV;
}
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 9ec4daaf6ca6..8da2181630b1 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -22,6 +22,8 @@
* mode, and voltages aren't supported at all.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -1077,7 +1079,7 @@ static int __devinit pc87427_probe(struct platform_device *pdev)
data = kzalloc(sizeof(struct pc87427_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
- printk(KERN_ERR DRVNAME ": Out of memory\n");
+ pr_err("Out of memory\n");
goto exit;
}
@@ -1196,28 +1198,26 @@ static int __init pc87427_device_add(const struct pc87427_sio_data *sio_data)
pdev = platform_device_alloc(DRVNAME, res[0].start);
if (!pdev) {
err = -ENOMEM;
- printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+ pr_err("Device allocation failed\n");
goto exit;
}
err = platform_device_add_resources(pdev, res, res_count);
if (err) {
- printk(KERN_ERR DRVNAME ": Device resource addition failed "
- "(%d)\n", err);
+ pr_err("Device resource addition failed (%d)\n", err);
goto exit_device_put;
}
err = platform_device_add_data(pdev, sio_data,
sizeof(struct pc87427_sio_data));
if (err) {
- printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+ pr_err("Platform data allocation failed\n");
goto exit_device_put;
}
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_put;
}
@@ -1249,23 +1249,23 @@ static int __init pc87427_find(int sioaddr, struct pc87427_sio_data *sio_data)
val = superio_inb(sioaddr, SIOREG_ACT);
if (!(val & 0x01)) {
- printk(KERN_INFO DRVNAME ": Logical device 0x%02x "
- "not activated\n", logdev[i]);
+ pr_info("Logical device 0x%02x not activated\n",
+ logdev[i]);
continue;
}
val = superio_inb(sioaddr, SIOREG_MAP);
if (val & 0x01) {
- printk(KERN_WARNING DRVNAME ": Logical device 0x%02x "
- "is memory-mapped, can't use\n", logdev[i]);
+ pr_warn("Logical device 0x%02x is memory-mapped, "
+ "can't use\n", logdev[i]);
continue;
}
val = (superio_inb(sioaddr, SIOREG_IOBASE) << 8)
| superio_inb(sioaddr, SIOREG_IOBASE + 1);
if (!val) {
- printk(KERN_INFO DRVNAME ": I/O base address not set "
- "for logical device 0x%02x\n", logdev[i]);
+ pr_info("I/O base address not set for logical device "
+ "0x%02x\n", logdev[i]);
continue;
}
sio_data->address[i] = val;
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index dc7259d69812..731b09af76b9 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -18,6 +18,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -290,8 +292,7 @@ static struct i2c_driver pcf8591_driver = {
static int __init pcf8591_init(void)
{
if (input_mode < 0 || input_mode > 3) {
- printk(KERN_WARNING "pcf8591: invalid input_mode (%d)\n",
- input_mode);
+ pr_warn("invalid input_mode (%d)\n", input_mode);
input_mode = 0;
}
return i2c_add_driver(&pcf8591_driver);
diff --git a/drivers/hwmon/pkgtemp.c b/drivers/hwmon/pkgtemp.c
index 0798210590bc..21c817d98123 100644
--- a/drivers/hwmon/pkgtemp.c
+++ b/drivers/hwmon/pkgtemp.c
@@ -20,6 +20,8 @@
* 02110-1301 USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -303,7 +305,7 @@ static int __cpuinit pkgtemp_device_add(unsigned int cpu)
pdev = platform_device_alloc(DRVNAME, cpu);
if (!pdev) {
err = -ENOMEM;
- printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+ pr_err("Device allocation failed\n");
goto exit;
}
@@ -315,8 +317,7 @@ static int __cpuinit pkgtemp_device_add(unsigned int cpu)
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_free;
}
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
new file mode 100644
index 000000000000..1c8c9812f244
--- /dev/null
+++ b/drivers/hwmon/sht21.c
@@ -0,0 +1,307 @@
+/* Sensirion SHT21 humidity and temperature sensor driver
+ *
+ * Copyright (C) 2010 Urs Fleisch <urs.fleisch@sensirion.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Data sheet available (5/2010) at
+ * http://www.sensirion.com/en/pdf/product_information/Datasheet-humidity-sensor-SHT21.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+
+/* I2C command bytes */
+#define SHT21_TRIG_T_MEASUREMENT_HM 0xe3
+#define SHT21_TRIG_RH_MEASUREMENT_HM 0xe5
+
+/**
+ * struct sht21 - SHT21 device specific data
+ * @hwmon_dev: device registered with hwmon
+ * @lock: mutex to protect measurement values
+ * @valid: only 0 before first measurement is taken
+ * @last_update: time of last update (jiffies)
+ * @temperature: cached temperature measurement value
+ * @humidity: cached humidity measurement value
+ */
+struct sht21 {
+ struct device *hwmon_dev;
+ struct mutex lock;
+ char valid;
+ unsigned long last_update;
+ int temperature;
+ int humidity;
+};
+
+/**
+ * sht21_temp_ticks_to_millicelsius() - convert raw temperature ticks to
+ * milli celsius
+ * @ticks: temperature ticks value received from sensor
+ */
+static inline int sht21_temp_ticks_to_millicelsius(int ticks)
+{
+ ticks &= ~0x0003; /* clear status bits */
+ /*
+ * Formula T = -46.85 + 175.72 * ST / 2^16 from data sheet 6.2,
+ * optimized for integer fixed point (3 digits) arithmetic
+ */
+ return ((21965 * ticks) >> 13) - 46850;
+}
+
+/**
+ * sht21_rh_ticks_to_per_cent_mille() - convert raw humidity ticks to
+ * one-thousandths of a percent relative humidity
+ * @ticks: humidity ticks value received from sensor
+ */
+static inline int sht21_rh_ticks_to_per_cent_mille(int ticks)
+{
+ ticks &= ~0x0003; /* clear status bits */
+ /*
+ * Formula RH = -6 + 125 * SRH / 2^16 from data sheet 6.1,
+ * optimized for integer fixed point (3 digits) arithmetic
+ */
+ return ((15625 * ticks) >> 13) - 6000;
+}
+
+/**
+ * sht21_read_word_data() - read word from register
+ * @client: I2C client device
+ * @reg: I2C command byte
+ *
+ * Returns value, negative errno on error.
+ */
+static inline int sht21_read_word_data(struct i2c_client *client, u8 reg)
+{
+ int ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+ /*
+ * SMBus specifies low byte first, but the SHT21 returns MSB
+ * first, so we have to swab16 the values
+ */
+ return swab16(ret);
+}
+
+/**
+ * sht21_update_measurements() - get updated measurements from device
+ * @client: I2C client device
+ *
+ * Returns 0 on success, else negative errno.
+ */
+static int sht21_update_measurements(struct i2c_client *client)
+{
+ int ret = 0;
+ struct sht21 *sht21 = i2c_get_clientdata(client);
+
+ mutex_lock(&sht21->lock);
+ /*
+ * Data sheet 2.4:
+ * SHT2x should not be active for more than 10% of the time - e.g.
+ * maximum two measurements per second at 12bit accuracy shall be made.
+ */
+ if (time_after(jiffies, sht21->last_update + HZ / 2) || !sht21->valid) {
+ ret = sht21_read_word_data(client, SHT21_TRIG_T_MEASUREMENT_HM);
+ if (ret < 0)
+ goto out;
+ sht21->temperature = sht21_temp_ticks_to_millicelsius(ret);
+ ret = sht21_read_word_data(client,
+ SHT21_TRIG_RH_MEASUREMENT_HM);
+ if (ret < 0)
+ goto out;
+ sht21->humidity = sht21_rh_ticks_to_per_cent_mille(ret);
+ sht21->last_update = jiffies;
+ sht21->valid = 1;
+ }
+out:
+ mutex_unlock(&sht21->lock);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+/**
+ * sht21_show_temperature() - show temperature measurement value in sysfs
+ * @dev: device
+ * @attr: device attribute
+ * @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to
+ *
+ * Will be called on read access to temp1_input sysfs attribute.
+ * Returns number of bytes written into buffer, negative errno on error.
+ */
+static ssize_t sht21_show_temperature(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct sht21 *sht21 = i2c_get_clientdata(client);
+ int ret = sht21_update_measurements(client);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", sht21->temperature);
+}
+
+/**
+ * sht21_show_humidity() - show humidity measurement value in sysfs
+ * @dev: device
+ * @attr: device attribute
+ * @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to
+ *
+ * Will be called on read access to humidity1_input sysfs attribute.
+ * Returns number of bytes written into buffer, negative errno on error.
+ */
+static ssize_t sht21_show_humidity(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct sht21 *sht21 = i2c_get_clientdata(client);
+ int ret = sht21_update_measurements(client);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", sht21->humidity);
+}
+
+/* sysfs attributes */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, sht21_show_temperature,
+ NULL, 0);
+static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, sht21_show_humidity,
+ NULL, 0);
+
+static struct attribute *sht21_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_humidity1_input.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group sht21_attr_group = {
+ .attrs = sht21_attributes,
+};
+
+/**
+ * sht21_probe() - probe device
+ * @client: I2C client device
+ * @id: device ID
+ *
+ * Called by the I2C core when an entry in the ID table matches a
+ * device's name.
+ * Returns 0 on success.
+ */
+static int __devinit sht21_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct sht21 *sht21;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_err(&client->dev,
+ "adapter does not support SMBus word transactions\n");
+ return -ENODEV;
+ }
+
+ sht21 = kzalloc(sizeof(*sht21), GFP_KERNEL);
+ if (!sht21) {
+ dev_dbg(&client->dev, "kzalloc failed\n");
+ return -ENOMEM;
+ }
+ i2c_set_clientdata(client, sht21);
+
+ mutex_init(&sht21->lock);
+
+ err = sysfs_create_group(&client->dev.kobj, &sht21_attr_group);
+ if (err) {
+ dev_dbg(&client->dev, "could not create sysfs files\n");
+ goto fail_free;
+ }
+ sht21->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(sht21->hwmon_dev)) {
+ dev_dbg(&client->dev, "unable to register hwmon device\n");
+ err = PTR_ERR(sht21->hwmon_dev);
+ goto fail_remove_sysfs;
+ }
+
+ dev_info(&client->dev, "initialized\n");
+
+ return 0;
+
+fail_remove_sysfs:
+ sysfs_remove_group(&client->dev.kobj, &sht21_attr_group);
+fail_free:
+ kfree(sht21);
+
+ return err;
+}
+
+/**
+ * sht21_remove() - remove device
+ * @client: I2C client device
+ */
+static int __devexit sht21_remove(struct i2c_client *client)
+{
+ struct sht21 *sht21 = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(sht21->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &sht21_attr_group);
+ kfree(sht21);
+
+ return 0;
+}
+
+/* Device ID table */
+static const struct i2c_device_id sht21_id[] = {
+ { "sht21", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, sht21_id);
+
+static struct i2c_driver sht21_driver = {
+ .driver.name = "sht21",
+ .probe = sht21_probe,
+ .remove = __devexit_p(sht21_remove),
+ .id_table = sht21_id,
+};
+
+/**
+ * sht21_init() - initialize driver
+ *
+ * Called when kernel is booted or module is inserted.
+ * Returns 0 on success.
+ */
+static int __init sht21_init(void)
+{
+ return i2c_add_driver(&sht21_driver);
+}
+module_init(sht21_init);
+
+/**
+ * sht21_init() - clean up driver
+ *
+ * Called when module is removed.
+ */
+static void __exit sht21_exit(void)
+{
+ i2c_del_driver(&sht21_driver);
+}
+module_exit(sht21_exit);
+
+MODULE_AUTHOR("Urs Fleisch <urs.fleisch@sensirion.com>");
+MODULE_DESCRIPTION("Sensirion SHT21 humidity and temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 79c2931e3008..47d7ce9af8fb 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -50,6 +50,8 @@
735 0008 0735
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/ioport.h>
@@ -735,21 +737,19 @@ static int __devinit sis5595_device_add(unsigned short address)
pdev = platform_device_alloc("sis5595", address);
if (!pdev) {
err = -ENOMEM;
- printk(KERN_ERR "sis5595: Device allocation failed\n");
+ pr_err("Device allocation failed\n");
goto exit;
}
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
- printk(KERN_ERR "sis5595: Device resource addition failed "
- "(%d)\n", err);
+ pr_err("Device resource addition failed (%d)\n", err);
goto exit_device_put;
}
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR "sis5595: Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_put;
}
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index f46d936c12da..9fb7516e6f45 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -26,6 +26,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/ioport.h>
@@ -311,21 +313,19 @@ static int __init smsc47b397_device_add(unsigned short address)
pdev = platform_device_alloc(DRVNAME, address);
if (!pdev) {
err = -ENOMEM;
- printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+ pr_err("Device allocation failed\n");
goto exit;
}
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
- printk(KERN_ERR DRVNAME ": Device resource addition failed "
- "(%d)\n", err);
+ pr_err("Device resource addition failed (%d)\n", err);
goto exit_device_put;
}
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_put;
}
@@ -367,8 +367,7 @@ static int __init smsc47b397_find(unsigned short *addr)
*addr = (superio_inb(SUPERIO_REG_BASE_MSB) << 8)
| superio_inb(SUPERIO_REG_BASE_LSB);
- printk(KERN_INFO DRVNAME ": found SMSC %s "
- "(base address 0x%04x, revision %u)\n",
+ pr_info("found SMSC %s (base address 0x%04x, revision %u)\n",
name, *addr, rev);
superio_exit();
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 8fa462f2b570..f44a89aac381 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -26,6 +26,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/ioport.h>
@@ -435,30 +437,29 @@ static int __init smsc47m1_find(unsigned short *addr,
*/
switch (val) {
case 0x51:
- pr_info(DRVNAME ": Found SMSC LPC47B27x\n");
+ pr_info("Found SMSC LPC47B27x\n");
sio_data->type = smsc47m1;
break;
case 0x59:
- pr_info(DRVNAME ": Found SMSC LPC47M10x/LPC47M112/LPC47M13x\n");
+ pr_info("Found SMSC LPC47M10x/LPC47M112/LPC47M13x\n");
sio_data->type = smsc47m1;
break;
case 0x5F:
- pr_info(DRVNAME ": Found SMSC LPC47M14x\n");
+ pr_info("Found SMSC LPC47M14x\n");
sio_data->type = smsc47m1;
break;
case 0x60:
- pr_info(DRVNAME ": Found SMSC LPC47M15x/LPC47M192/LPC47M997\n");
+ pr_info("Found SMSC LPC47M15x/LPC47M192/LPC47M997\n");
sio_data->type = smsc47m1;
break;
case 0x6B:
if (superio_inb(SUPERIO_REG_DEVREV) & 0x80) {
- pr_debug(DRVNAME ": "
- "Found SMSC LPC47M233, unsupported\n");
+ pr_debug("Found SMSC LPC47M233, unsupported\n");
superio_exit();
return -ENODEV;
}
- pr_info(DRVNAME ": Found SMSC LPC47M292\n");
+ pr_info("Found SMSC LPC47M292\n");
sio_data->type = smsc47m2;
break;
default:
@@ -470,7 +471,7 @@ static int __init smsc47m1_find(unsigned short *addr,
*addr = (superio_inb(SUPERIO_REG_BASE) << 8)
| superio_inb(SUPERIO_REG_BASE + 1);
if (*addr == 0) {
- pr_info(DRVNAME ": Device address not set, will not use\n");
+ pr_info("Device address not set, will not use\n");
superio_exit();
return -ENODEV;
}
@@ -479,7 +480,7 @@ static int __init smsc47m1_find(unsigned short *addr,
* Compaq Presario S4000NX) */
sio_data->activate = superio_inb(SUPERIO_REG_ACT);
if ((sio_data->activate & 0x01) == 0) {
- pr_info(DRVNAME ": Enabling device\n");
+ pr_info("Enabling device\n");
superio_outb(SUPERIO_REG_ACT, sio_data->activate | 0x01);
}
@@ -494,7 +495,7 @@ static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
superio_enter();
superio_select();
- pr_info(DRVNAME ": Disabling device\n");
+ pr_info("Disabling device\n");
superio_outb(SUPERIO_REG_ACT, sio_data->activate);
superio_exit();
@@ -823,28 +824,26 @@ static int __init smsc47m1_device_add(unsigned short address,
pdev = platform_device_alloc(DRVNAME, address);
if (!pdev) {
err = -ENOMEM;
- printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+ pr_err("Device allocation failed\n");
goto exit;
}
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
- printk(KERN_ERR DRVNAME ": Device resource addition failed "
- "(%d)\n", err);
+ pr_err("Device resource addition failed (%d)\n", err);
goto exit_device_put;
}
err = platform_device_add_data(pdev, sio_data,
sizeof(struct smsc47m1_sio_data));
if (err) {
- printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+ pr_err("Platform data allocation failed\n");
goto exit_device_put;
}
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_put;
}
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index ec7fad747adc..0d18de424c66 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -21,6 +21,8 @@
* 02110-1301 USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -202,7 +204,7 @@ static int __cpuinit via_cputemp_device_add(unsigned int cpu)
pdev = platform_device_alloc(DRVNAME, cpu);
if (!pdev) {
err = -ENOMEM;
- printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+ pr_err("Device allocation failed\n");
goto exit;
}
@@ -214,8 +216,7 @@ static int __cpuinit via_cputemp_device_add(unsigned int cpu)
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_free;
}
@@ -237,13 +238,16 @@ exit:
static void __cpuinit via_cputemp_device_remove(unsigned int cpu)
{
- struct pdev_entry *p, *n;
+ struct pdev_entry *p;
+
mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
+ list_for_each_entry(p, &pdev_list, list) {
if (p->cpu == cpu) {
platform_device_unregister(p->pdev);
list_del(&p->list);
+ mutex_unlock(&pdev_list_mutex);
kfree(p);
+ return;
}
}
mutex_unlock(&pdev_list_mutex);
@@ -273,7 +277,6 @@ static struct notifier_block via_cputemp_cpu_notifier __refdata = {
static int __init via_cputemp_init(void)
{
int i, err;
- struct pdev_entry *p, *n;
if (cpu_data(0).x86_vendor != X86_VENDOR_CENTAUR) {
printk(KERN_DEBUG DRVNAME ": Not a VIA CPU\n");
@@ -295,33 +298,27 @@ static int __init via_cputemp_init(void)
continue;
if (c->x86_model > 0x0f) {
- printk(KERN_WARNING DRVNAME ": Unknown CPU "
- "model 0x%x\n", c->x86_model);
+ pr_warn("Unknown CPU model 0x%x\n", c->x86_model);
continue;
}
- err = via_cputemp_device_add(i);
- if (err)
- goto exit_devices_unreg;
+ via_cputemp_device_add(i);
}
+
+#ifndef CONFIG_HOTPLUG_CPU
if (list_empty(&pdev_list)) {
err = -ENODEV;
goto exit_driver_unreg;
}
+#endif
register_hotcpu_notifier(&via_cputemp_cpu_notifier);
return 0;
-exit_devices_unreg:
- mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
- platform_device_unregister(p->pdev);
- list_del(&p->list);
- kfree(p);
- }
- mutex_unlock(&pdev_list_mutex);
+#ifndef CONFIG_HOTPLUG_CPU
exit_driver_unreg:
platform_driver_unregister(&via_cputemp_driver);
+#endif
exit:
return err;
}
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index f397ce7ad598..25e91665a0a2 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -30,6 +30,8 @@
Warning - only supports a single device.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
@@ -687,6 +689,13 @@ static int __devexit via686a_remove(struct platform_device *pdev)
return 0;
}
+static void via686a_update_fan_div(struct via686a_data *data)
+{
+ int reg = via686a_read_value(data, VIA686A_REG_FANDIV);
+ data->fan_div[0] = (reg >> 4) & 0x03;
+ data->fan_div[1] = reg >> 6;
+}
+
static void __devinit via686a_init_device(struct via686a_data *data)
{
u8 reg;
@@ -700,6 +709,9 @@ static void __devinit via686a_init_device(struct via686a_data *data)
via686a_write_value(data, VIA686A_REG_TEMP_MODE,
(reg & ~VIA686A_TEMP_MODE_MASK)
| VIA686A_TEMP_MODE_CONTINUOUS);
+
+ /* Pre-read fan clock divisor values */
+ via686a_update_fan_div(data);
}
static struct via686a_data *via686a_update_device(struct device *dev)
@@ -751,9 +763,7 @@ static struct via686a_data *via686a_update_device(struct device *dev)
(via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
0xc0) >> 6;
- i = via686a_read_value(data, VIA686A_REG_FANDIV);
- data->fan_div[0] = (i >> 4) & 0x03;
- data->fan_div[1] = i >> 6;
+ via686a_update_fan_div(data);
data->alarms =
via686a_read_value(data,
VIA686A_REG_ALARM1) |
@@ -791,21 +801,19 @@ static int __devinit via686a_device_add(unsigned short address)
pdev = platform_device_alloc("via686a", address);
if (!pdev) {
err = -ENOMEM;
- printk(KERN_ERR "via686a: Device allocation failed\n");
+ pr_err("Device allocation failed\n");
goto exit;
}
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
- printk(KERN_ERR "via686a: Device resource addition failed "
- "(%d)\n", err);
+ pr_err("Device resource addition failed (%d)\n", err);
goto exit_device_put;
}
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR "via686a: Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_put;
}
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index ae33bbb577c7..49163d48e966 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -21,6 +21,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -1254,8 +1256,7 @@ static int __init vt1211_device_add(unsigned short address)
pdev = platform_device_alloc(DRVNAME, address);
if (!pdev) {
err = -ENOMEM;
- printk(KERN_ERR DRVNAME ": Device allocation failed (%d)\n",
- err);
+ pr_err("Device allocation failed (%d)\n", err);
goto EXIT;
}
@@ -1266,15 +1267,13 @@ static int __init vt1211_device_add(unsigned short address)
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
- printk(KERN_ERR DRVNAME ": Device resource addition failed "
- "(%d)\n", err);
+ pr_err("Device resource addition failed (%d)\n", err);
goto EXIT_DEV_PUT;
}
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto EXIT_DEV_PUT;
}
@@ -1301,23 +1300,20 @@ static int __init vt1211_find(int sio_cip, unsigned short *address)
superio_select(sio_cip, SIO_VT1211_LDN_HWMON);
if ((superio_inb(sio_cip, SIO_VT1211_ACTIVE) & 1) == 0) {
- printk(KERN_WARNING DRVNAME ": HW monitor is disabled, "
- "skipping\n");
+ pr_warn("HW monitor is disabled, skipping\n");
goto EXIT;
}
*address = ((superio_inb(sio_cip, SIO_VT1211_BADDR) << 8) |
(superio_inb(sio_cip, SIO_VT1211_BADDR + 1))) & 0xff00;
if (*address == 0) {
- printk(KERN_WARNING DRVNAME ": Base address is not set, "
- "skipping\n");
+ pr_warn("Base address is not set, skipping\n");
goto EXIT;
}
err = 0;
- printk(KERN_INFO DRVNAME ": Found VT1211 chip at 0x%04x, "
- "revision %u\n", *address,
- superio_inb(sio_cip, SIO_VT1211_DEVREV));
+ pr_info("Found VT1211 chip at 0x%04x, revision %u\n",
+ *address, superio_inb(sio_cip, SIO_VT1211_DEVREV));
EXIT:
superio_exit(sio_cip);
@@ -1336,15 +1332,15 @@ static int __init vt1211_init(void)
if ((uch_config < -1) || (uch_config > 31)) {
err = -EINVAL;
- printk(KERN_WARNING DRVNAME ": Invalid UCH configuration %d. "
- "Choose a value between 0 and 31.\n", uch_config);
+ pr_warn("Invalid UCH configuration %d. "
+ "Choose a value between 0 and 31.\n", uch_config);
goto EXIT;
}
if ((int_mode < -1) || (int_mode > 0)) {
err = -EINVAL;
- printk(KERN_WARNING DRVNAME ": Invalid interrupt mode %d. "
- "Only mode 0 is supported.\n", int_mode);
+ pr_warn("Invalid interrupt mode %d. "
+ "Only mode 0 is supported.\n", int_mode);
goto EXIT;
}
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index e6078c9f0e27..db3b2e8d2a67 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -24,6 +24,8 @@
/* Supports VIA VT8231 South Bridge embedded sensors
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -902,21 +904,19 @@ static int __devinit vt8231_device_add(unsigned short address)
pdev = platform_device_alloc("vt8231", address);
if (!pdev) {
err = -ENOMEM;
- printk(KERN_ERR "vt8231: Device allocation failed\n");
+ pr_err("Device allocation failed\n");
goto exit;
}
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
- printk(KERN_ERR "vt8231: Device resource addition failed "
- "(%d)\n", err);
+ pr_err("Device resource addition failed (%d)\n", err);
goto exit_device_put;
}
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR "vt8231: Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_put;
}
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 072c58008a63..073eabedc432 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -42,6 +42,8 @@
w83667hg-b 9 5 3 3 0xb350 0xc1 0x5ca3
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -1668,8 +1670,7 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
break;
default:
if (val != 0xffff)
- pr_debug(DRVNAME ": unsupported chip ID: 0x%04x\n",
- val);
+ pr_debug("unsupported chip ID: 0x%04x\n", val);
superio_exit(sioaddr);
return -ENODEV;
}
@@ -1680,8 +1681,7 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
| superio_inb(sioaddr, SIO_REG_ADDR + 1);
*addr = val & IOREGION_ALIGNMENT;
if (*addr == 0) {
- printk(KERN_ERR DRVNAME ": Refusing to enable a Super-I/O "
- "device with a base I/O port 0.\n");
+ pr_err("Refusing to enable a Super-I/O device with a base I/O port 0\n");
superio_exit(sioaddr);
return -ENODEV;
}
@@ -1689,13 +1689,12 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
/* Activate logical device if needed */
val = superio_inb(sioaddr, SIO_REG_ENABLE);
if (!(val & 0x01)) {
- printk(KERN_WARNING DRVNAME ": Forcibly enabling Super-I/O. "
- "Sensor is probably unusable.\n");
+ pr_warn("Forcibly enabling Super-I/O. Sensor is probably unusable.\n");
superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
}
superio_exit(sioaddr);
- pr_info(DRVNAME ": Found %s chip at %#x\n", sio_name, *addr);
+ pr_info("Found %s chip at %#x\n", sio_name, *addr);
sio_data->sioreg = sioaddr;
return 0;
@@ -1729,14 +1728,14 @@ static int __init sensors_w83627ehf_init(void)
if (!(pdev = platform_device_alloc(DRVNAME, address))) {
err = -ENOMEM;
- printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+ pr_err("Device allocation failed\n");
goto exit_unregister;
}
err = platform_device_add_data(pdev, &sio_data,
sizeof(struct w83627ehf_sio_data));
if (err) {
- printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+ pr_err("Platform data allocation failed\n");
goto exit_device_put;
}
@@ -1752,16 +1751,14 @@ static int __init sensors_w83627ehf_init(void)
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
- printk(KERN_ERR DRVNAME ": Device resource addition failed "
- "(%d)\n", err);
+ pr_err("Device resource addition failed (%d)\n", err);
goto exit_device_put;
}
/* platform_device_add calls probe() */
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_put;
}
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 38e280523071..bde50e34d013 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -39,6 +39,8 @@
supported yet.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -1166,14 +1168,13 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
superio_inb(sio_data, WINB_BASE_REG + 1);
*addr = val & WINB_ALIGNMENT;
if (*addr == 0) {
- printk(KERN_WARNING DRVNAME ": Base address not set, "
- "skipping\n");
+ pr_warn("Base address not set, skipping\n");
goto exit;
}
val = superio_inb(sio_data, WINB_ACT_REG);
if (!(val & 0x01)) {
- printk(KERN_WARNING DRVNAME ": Enabling HWM logical device\n");
+ pr_warn("Enabling HWM logical device\n");
superio_outb(sio_data, WINB_ACT_REG, val | 0x01);
}
@@ -1789,28 +1790,26 @@ static int __init w83627hf_device_add(unsigned short address,
pdev = platform_device_alloc(DRVNAME, address);
if (!pdev) {
err = -ENOMEM;
- printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+ pr_err("Device allocation failed\n");
goto exit;
}
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
- printk(KERN_ERR DRVNAME ": Device resource addition failed "
- "(%d)\n", err);
+ pr_err("Device resource addition failed (%d)\n", err);
goto exit_device_put;
}
err = platform_device_add_data(pdev, sio_data,
sizeof(struct w83627hf_sio_data));
if (err) {
- printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+ pr_err("Platform data allocation failed\n");
goto exit_device_put;
}
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_put;
}
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index c84b9b4e6960..eed43a008be1 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -33,6 +33,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -1798,8 +1800,7 @@ w83781d_isa_found(unsigned short address)
* individually for the probing phase. */
for (port = address; port < address + W83781D_EXTENT; port++) {
if (!request_region(port, 1, "w83781d")) {
- pr_debug("w83781d: Failed to request port 0x%x\n",
- port);
+ pr_debug("Failed to request port 0x%x\n", port);
goto release;
}
}
@@ -1811,7 +1812,7 @@ w83781d_isa_found(unsigned short address)
if (inb_p(address + 2) != val
|| inb_p(address + 3) != val
|| inb_p(address + 7) != val) {
- pr_debug("w83781d: Detection failed at step 1\n");
+ pr_debug("Detection failed at step %d\n", 1);
goto release;
}
#undef REALLY_SLOW_IO
@@ -1820,14 +1821,14 @@ w83781d_isa_found(unsigned short address)
MSB (busy flag) should be clear initially, set after the write. */
save = inb_p(address + W83781D_ADDR_REG_OFFSET);
if (save & 0x80) {
- pr_debug("w83781d: Detection failed at step 2\n");
+ pr_debug("Detection failed at step %d\n", 2);
goto release;
}
val = ~save & 0x7f;
outb_p(val, address + W83781D_ADDR_REG_OFFSET);
if (inb_p(address + W83781D_ADDR_REG_OFFSET) != (val | 0x80)) {
outb_p(save, address + W83781D_ADDR_REG_OFFSET);
- pr_debug("w83781d: Detection failed at step 3\n");
+ pr_debug("Detection failed at step %d\n", 3);
goto release;
}
@@ -1835,7 +1836,7 @@ w83781d_isa_found(unsigned short address)
outb_p(W83781D_REG_CONFIG, address + W83781D_ADDR_REG_OFFSET);
val = inb_p(address + W83781D_DATA_REG_OFFSET);
if (val & 0x80) {
- pr_debug("w83781d: Detection failed at step 4\n");
+ pr_debug("Detection failed at step %d\n", 4);
goto release;
}
outb_p(W83781D_REG_BANK, address + W83781D_ADDR_REG_OFFSET);
@@ -1844,19 +1845,19 @@ w83781d_isa_found(unsigned short address)
val = inb_p(address + W83781D_DATA_REG_OFFSET);
if ((!(save & 0x80) && (val != 0xa3))
|| ((save & 0x80) && (val != 0x5c))) {
- pr_debug("w83781d: Detection failed at step 5\n");
+ pr_debug("Detection failed at step %d\n", 5);
goto release;
}
outb_p(W83781D_REG_I2C_ADDR, address + W83781D_ADDR_REG_OFFSET);
val = inb_p(address + W83781D_DATA_REG_OFFSET);
if (val < 0x03 || val > 0x77) { /* Not a valid I2C address */
- pr_debug("w83781d: Detection failed at step 6\n");
+ pr_debug("Detection failed at step %d\n", 6);
goto release;
}
/* The busy flag should be clear again */
if (inb_p(address + W83781D_ADDR_REG_OFFSET) & 0x80) {
- pr_debug("w83781d: Detection failed at step 7\n");
+ pr_debug("Detection failed at step %d\n", 7);
goto release;
}
@@ -1871,7 +1872,7 @@ w83781d_isa_found(unsigned short address)
found = 1;
if (found)
- pr_info("w83781d: Found a %s chip at %#x\n",
+ pr_info("Found a %s chip at %#x\n",
val == 0x30 ? "W83782D" : "W83781D", (int)address);
release:
@@ -1894,21 +1895,19 @@ w83781d_isa_device_add(unsigned short address)
pdev = platform_device_alloc("w83781d", address);
if (!pdev) {
err = -ENOMEM;
- printk(KERN_ERR "w83781d: Device allocation failed\n");
+ pr_err("Device allocation failed\n");
goto exit;
}
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
- printk(KERN_ERR "w83781d: Device resource addition failed "
- "(%d)\n", err);
+ pr_err("Device resource addition failed (%d)\n", err);
goto exit_device_put;
}
err = platform_device_add(pdev);
if (err) {
- printk(KERN_ERR "w83781d: Device addition failed (%d)\n",
- err);
+ pr_err("Device addition failed (%d)\n", err);
goto exit_device_put;
}
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 679718e6b017..63841f8cec07 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -691,7 +691,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-show_regs_chassis(struct device *dev, struct device_attribute *attr,
+show_chassis(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct w83792d_data *data = w83792d_update_device(dev);
@@ -699,6 +699,16 @@ show_regs_chassis(struct device *dev, struct device_attribute *attr,
}
static ssize_t
+show_regs_chassis(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ dev_warn(dev,
+ "Attribute %s is deprecated, use intrusion0_alarm instead\n",
+ "chassis");
+ return show_chassis(dev, attr, buf);
+}
+
+static ssize_t
show_chassis_clear(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83792d_data *data = w83792d_update_device(dev);
@@ -706,7 +716,7 @@ show_chassis_clear(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-store_chassis_clear(struct device *dev, struct device_attribute *attr,
+store_chassis_clear_legacy(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -714,6 +724,10 @@ store_chassis_clear(struct device *dev, struct device_attribute *attr,
u32 val;
u8 temp1 = 0, temp2 = 0;
+ dev_warn(dev,
+ "Attribute %s is deprecated, use intrusion0_alarm instead\n",
+ "chassis_clear");
+
val = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->chassis_clear = SENSORS_LIMIT(val, 0 ,1);
@@ -726,6 +740,27 @@ store_chassis_clear(struct device *dev, struct device_attribute *attr,
return count;
}
+static ssize_t
+store_chassis_clear(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83792d_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+ u8 reg;
+
+ if (strict_strtoul(buf, 10, &val) || val != 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ reg = w83792d_read_value(client, W83792D_REG_CHASSIS_CLR);
+ w83792d_write_value(client, W83792D_REG_CHASSIS_CLR, reg | 0x80);
+ data->valid = 0; /* Force cache refresh */
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
/* For Smart Fan I / Thermal Cruise */
static ssize_t
show_thermal_cruise(struct device *dev, struct device_attribute *attr,
@@ -1012,7 +1047,9 @@ static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 22);
static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_alarm, NULL, 23);
static DEVICE_ATTR(chassis, S_IRUGO, show_regs_chassis, NULL);
static DEVICE_ATTR(chassis_clear, S_IRUGO | S_IWUSR,
- show_chassis_clear, store_chassis_clear);
+ show_chassis_clear, store_chassis_clear_legacy);
+static DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR,
+ show_chassis, store_chassis_clear);
static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0);
static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1);
static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2);
@@ -1214,6 +1251,7 @@ static struct attribute *w83792d_attributes[] = {
&dev_attr_alarms.attr,
&dev_attr_chassis.attr,
&dev_attr_chassis_clear.attr,
+ &dev_attr_intrusion0_alarm.attr,
&sensor_dev_attr_tolerance1.dev_attr.attr,
&sensor_dev_attr_thermal_cruise1.dev_attr.attr,
&sensor_dev_attr_tolerance2.dev_attr.attr,
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 8e540ada47d2..e3bdedfb5347 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -51,7 +51,6 @@
#define WATCHDOG_TIMEOUT 2 /* 2 minute default timeout */
/* Addresses to scan */
-static DEFINE_MUTEX(watchdog_mutex);
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
I2C_CLIENT_END };
@@ -421,14 +420,17 @@ store_beep_enable(struct device *dev, struct device_attribute *attr,
/* Write any value to clear chassis alarm */
static ssize_t
-store_chassis_clear(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+store_chassis_clear_legacy(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
u8 val;
+ dev_warn(dev, "Attribute chassis is deprecated, "
+ "use intrusion0_alarm instead\n");
+
mutex_lock(&data->update_lock);
val = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
val |= 0x80;
@@ -437,6 +439,28 @@ store_chassis_clear(struct device *dev,
return count;
}
+/* Write 0 to clear chassis alarm */
+static ssize_t
+store_chassis_clear(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct w83793_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+ u8 reg;
+
+ if (strict_strtoul(buf, 10, &val) || val != 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ reg = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
+ w83793_write_value(client, W83793_REG_CLR_CHASSIS, reg | 0x80);
+ data->valid = 0; /* Force cache refresh */
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
#define FAN_INPUT 0
#define FAN_MIN 1
static ssize_t
@@ -1102,6 +1126,8 @@ static DEVICE_ATTR(vrm, S_IWUSR | S_IRUGO, show_vrm, store_vrm);
static struct sensor_device_attribute_2 sda_single_files[] = {
SENSOR_ATTR_2(chassis, S_IWUSR | S_IRUGO, show_alarm_beep,
+ store_chassis_clear_legacy, ALARM_STATUS, 30),
+ SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep,
store_chassis_clear, ALARM_STATUS, 30),
SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_beep_enable,
store_beep_enable, NOT_USED, NOT_USED),
@@ -1323,7 +1349,7 @@ static ssize_t watchdog_write(struct file *filp, const char __user *buf,
static long watchdog_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
- static struct watchdog_info ident = {
+ struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT |
WDIOF_CARDRESET,
@@ -1333,7 +1359,6 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd,
int val, ret = 0;
struct w83793_data *data = filp->private_data;
- mutex_lock(&watchdog_mutex);
switch (cmd) {
case WDIOC_GETSUPPORT:
if (!nowayout)
@@ -1387,7 +1412,6 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd,
default:
ret = -ENOTTY;
}
- mutex_unlock(&watchdog_mutex);
return ret;
}
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index cdbc7448491e..845232d7f611 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -458,6 +458,7 @@ static void w83795_update_limits(struct i2c_client *client)
{
struct w83795_data *data = i2c_get_clientdata(client);
int i, limit;
+ u8 lsb;
/* Read the voltage limits */
for (i = 0; i < ARRAY_SIZE(data->in); i++) {
@@ -479,9 +480,8 @@ static void w83795_update_limits(struct i2c_client *client)
}
/* Read the fan limits */
+ lsb = 0; /* Silent false gcc warning */
for (i = 0; i < ARRAY_SIZE(data->fan); i++) {
- u8 lsb;
-
/* Each register contains LSB for 2 fans, but we want to
* read it only once to save time */
if ((i & 1) == 0 && (data->has_fan & (3 << i)))
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index a39e6cff86e7..38319a69bd0a 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -600,12 +600,14 @@ static const struct i2c_algorithm i2c_bit_algo = {
/*
* registering functions to load algorithms at runtime
*/
-static int i2c_bit_prepare_bus(struct i2c_adapter *adap)
+static int __i2c_bit_add_bus(struct i2c_adapter *adap,
+ int (*add_adapter)(struct i2c_adapter *))
{
struct i2c_algo_bit_data *bit_adap = adap->algo_data;
+ int ret;
if (bit_test) {
- int ret = test_bus(bit_adap, adap->name);
+ ret = test_bus(bit_adap, adap->name);
if (ret < 0)
return -ENODEV;
}
@@ -614,30 +616,27 @@ static int i2c_bit_prepare_bus(struct i2c_adapter *adap)
adap->algo = &i2c_bit_algo;
adap->retries = 3;
+ ret = add_adapter(adap);
+ if (ret < 0)
+ return ret;
+
+ /* Complain if SCL can't be read */
+ if (bit_adap->getscl == NULL) {
+ dev_warn(&adap->dev, "Not I2C compliant: can't read SCL\n");
+ dev_warn(&adap->dev, "Bus may be unreliable\n");
+ }
return 0;
}
int i2c_bit_add_bus(struct i2c_adapter *adap)
{
- int err;
-
- err = i2c_bit_prepare_bus(adap);
- if (err)
- return err;
-
- return i2c_add_adapter(adap);
+ return __i2c_bit_add_bus(adap, i2c_add_adapter);
}
EXPORT_SYMBOL(i2c_bit_add_bus);
int i2c_bit_add_numbered_bus(struct i2c_adapter *adap)
{
- int err;
-
- err = i2c_bit_prepare_bus(adap);
- if (err)
- return err;
-
- return i2c_add_numbered_adapter(adap);
+ return __i2c_bit_add_bus(adap, i2c_add_numbered_adapter);
}
EXPORT_SYMBOL(i2c_bit_add_numbered_bus);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 3a6321cb8030..113505a6434e 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -638,6 +638,14 @@ config I2C_XILINX
This driver can also be built as a module. If so, the module
will be called xilinx_i2c.
+config I2C_EG20T
+ tristate "PCH I2C of Intel EG20T"
+ depends on PCI
+ help
+ This driver is for PCH(Platform controller Hub) I2C of EG20T which
+ is an IOH(Input/Output Hub) for x86 embedded processor.
+ This driver can access PCH I2C bus device.
+
comment "External I2C/SMBus adapter drivers"
config I2C_PARPORT
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 84cb16ae6f9e..9d2d0ec7fb23 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
+obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o
# External I2C/SMBus adapter drivers
obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index fb26e5c67515..52b545a795f2 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -20,6 +20,7 @@
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/delay.h>
#include <asm/blackfin.h>
#include <asm/portmux.h>
@@ -159,6 +160,27 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface,
if (mast_stat & BUFWRERR)
dev_dbg(&iface->adap.dev, "Buffer Write Error\n");
+ /* Faulty slave devices, may drive SDA low after a transfer
+ * finishes. To release the bus this code generates up to 9
+ * extra clocks until SDA is released.
+ */
+
+ if (read_MASTER_STAT(iface) & SDASEN) {
+ int cnt = 9;
+ do {
+ write_MASTER_CTL(iface, SCLOVR);
+ udelay(6);
+ write_MASTER_CTL(iface, 0);
+ udelay(6);
+ } while ((read_MASTER_STAT(iface) & SDASEN) && cnt--);
+
+ write_MASTER_CTL(iface, SDAOVR | SCLOVR);
+ udelay(6);
+ write_MASTER_CTL(iface, SDAOVR);
+ udelay(6);
+ write_MASTER_CTL(iface, 0);
+ }
+
/* If it is a quick transfer, only address without data,
* not an err, return 1.
*/
@@ -760,7 +782,7 @@ static void __exit i2c_bfin_twi_exit(void)
platform_driver_unregister(&i2c_bfin_twi_driver);
}
-module_init(i2c_bfin_twi_init);
+subsys_initcall(i2c_bfin_twi_init);
module_exit(i2c_bfin_twi_exit);
MODULE_AUTHOR("Bryan Wu, Sonic Zhang");
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
new file mode 100644
index 000000000000..2e067dd2ee51
--- /dev/null
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -0,0 +1,900 @@
+/*
+ * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/pci.h>
+#include <linux/mutex.h>
+#include <linux/ktime.h>
+
+#define PCH_EVENT_SET 0 /* I2C Interrupt Event Set Status */
+#define PCH_EVENT_NONE 1 /* I2C Interrupt Event Clear Status */
+#define PCH_MAX_CLK 100000 /* Maximum Clock speed in MHz */
+#define PCH_BUFFER_MODE_ENABLE 0x0002 /* flag for Buffer mode enable */
+#define PCH_EEPROM_SW_RST_MODE_ENABLE 0x0008 /* EEPROM SW RST enable flag */
+
+#define PCH_I2CSADR 0x00 /* I2C slave address register */
+#define PCH_I2CCTL 0x04 /* I2C control register */
+#define PCH_I2CSR 0x08 /* I2C status register */
+#define PCH_I2CDR 0x0C /* I2C data register */
+#define PCH_I2CMON 0x10 /* I2C bus monitor register */
+#define PCH_I2CBC 0x14 /* I2C bus transfer rate setup counter */
+#define PCH_I2CMOD 0x18 /* I2C mode register */
+#define PCH_I2CBUFSLV 0x1C /* I2C buffer mode slave address register */
+#define PCH_I2CBUFSUB 0x20 /* I2C buffer mode subaddress register */
+#define PCH_I2CBUFFOR 0x24 /* I2C buffer mode format register */
+#define PCH_I2CBUFCTL 0x28 /* I2C buffer mode control register */
+#define PCH_I2CBUFMSK 0x2C /* I2C buffer mode interrupt mask register */
+#define PCH_I2CBUFSTA 0x30 /* I2C buffer mode status register */
+#define PCH_I2CBUFLEV 0x34 /* I2C buffer mode level register */
+#define PCH_I2CESRFOR 0x38 /* EEPROM software reset mode format register */
+#define PCH_I2CESRCTL 0x3C /* EEPROM software reset mode ctrl register */
+#define PCH_I2CESRMSK 0x40 /* EEPROM software reset mode */
+#define PCH_I2CESRSTA 0x44 /* EEPROM software reset mode status register */
+#define PCH_I2CTMR 0x48 /* I2C timer register */
+#define PCH_I2CSRST 0xFC /* I2C reset register */
+#define PCH_I2CNF 0xF8 /* I2C noise filter register */
+
+#define BUS_IDLE_TIMEOUT 20
+#define PCH_I2CCTL_I2CMEN 0x0080
+#define TEN_BIT_ADDR_DEFAULT 0xF000
+#define TEN_BIT_ADDR_MASK 0xF0
+#define PCH_START 0x0020
+#define PCH_ESR_START 0x0001
+#define PCH_BUFF_START 0x1
+#define PCH_REPSTART 0x0004
+#define PCH_ACK 0x0008
+#define PCH_GETACK 0x0001
+#define CLR_REG 0x0
+#define I2C_RD 0x1
+#define I2CMCF_BIT 0x0080
+#define I2CMIF_BIT 0x0002
+#define I2CMAL_BIT 0x0010
+#define I2CBMFI_BIT 0x0001
+#define I2CBMAL_BIT 0x0002
+#define I2CBMNA_BIT 0x0004
+#define I2CBMTO_BIT 0x0008
+#define I2CBMIS_BIT 0x0010
+#define I2CESRFI_BIT 0X0001
+#define I2CESRTO_BIT 0x0002
+#define I2CESRFIIE_BIT 0x1
+#define I2CESRTOIE_BIT 0x2
+#define I2CBMDZ_BIT 0x0040
+#define I2CBMAG_BIT 0x0020
+#define I2CMBB_BIT 0x0020
+#define BUFFER_MODE_MASK (I2CBMFI_BIT | I2CBMAL_BIT | I2CBMNA_BIT | \
+ I2CBMTO_BIT | I2CBMIS_BIT)
+#define I2C_ADDR_MSK 0xFF
+#define I2C_MSB_2B_MSK 0x300
+#define FAST_MODE_CLK 400
+#define FAST_MODE_EN 0x0001
+#define SUB_ADDR_LEN_MAX 4
+#define BUF_LEN_MAX 32
+#define PCH_BUFFER_MODE 0x1
+#define EEPROM_SW_RST_MODE 0x0002
+#define NORMAL_INTR_ENBL 0x0300
+#define EEPROM_RST_INTR_ENBL (I2CESRFIIE_BIT | I2CESRTOIE_BIT)
+#define EEPROM_RST_INTR_DISBL 0x0
+#define BUFFER_MODE_INTR_ENBL 0x001F
+#define BUFFER_MODE_INTR_DISBL 0x0
+#define NORMAL_MODE 0x0
+#define BUFFER_MODE 0x1
+#define EEPROM_SR_MODE 0x2
+#define I2C_TX_MODE 0x0010
+#define PCH_BUF_TX 0xFFF7
+#define PCH_BUF_RD 0x0008
+#define I2C_ERROR_MASK (I2CESRTO_EVENT | I2CBMIS_EVENT | I2CBMTO_EVENT | \
+ I2CBMNA_EVENT | I2CBMAL_EVENT | I2CMAL_EVENT)
+#define I2CMAL_EVENT 0x0001
+#define I2CMCF_EVENT 0x0002
+#define I2CBMFI_EVENT 0x0004
+#define I2CBMAL_EVENT 0x0008
+#define I2CBMNA_EVENT 0x0010
+#define I2CBMTO_EVENT 0x0020
+#define I2CBMIS_EVENT 0x0040
+#define I2CESRFI_EVENT 0x0080
+#define I2CESRTO_EVENT 0x0100
+#define PCI_DEVICE_ID_PCH_I2C 0x8817
+
+#define pch_dbg(adap, fmt, arg...) \
+ dev_dbg(adap->pch_adapter.dev.parent, "%s :" fmt, __func__, ##arg)
+
+#define pch_err(adap, fmt, arg...) \
+ dev_err(adap->pch_adapter.dev.parent, "%s :" fmt, __func__, ##arg)
+
+#define pch_pci_err(pdev, fmt, arg...) \
+ dev_err(&pdev->dev, "%s :" fmt, __func__, ##arg)
+
+#define pch_pci_dbg(pdev, fmt, arg...) \
+ dev_dbg(&pdev->dev, "%s :" fmt, __func__, ##arg)
+
+/**
+ * struct i2c_algo_pch_data - for I2C driver functionalities
+ * @pch_adapter: stores the reference to i2c_adapter structure
+ * @p_adapter_info: stores the reference to adapter_info structure
+ * @pch_base_address: specifies the remapped base address
+ * @pch_buff_mode_en: specifies if buffer mode is enabled
+ * @pch_event_flag: specifies occurrence of interrupt events
+ * @pch_i2c_xfer_in_progress: specifies whether the transfer is completed
+ */
+struct i2c_algo_pch_data {
+ struct i2c_adapter pch_adapter;
+ struct adapter_info *p_adapter_info;
+ void __iomem *pch_base_address;
+ int pch_buff_mode_en;
+ u32 pch_event_flag;
+ bool pch_i2c_xfer_in_progress;
+};
+
+/**
+ * struct adapter_info - This structure holds the adapter information for the
+ PCH i2c controller
+ * @pch_data: stores a list of i2c_algo_pch_data
+ * @pch_i2c_suspended: specifies whether the system is suspended or not
+ * perhaps with more lines and words.
+ *
+ * pch_data has as many elements as maximum I2C channels
+ */
+struct adapter_info {
+ struct i2c_algo_pch_data pch_data;
+ bool pch_i2c_suspended;
+};
+
+
+static int pch_i2c_speed = 100; /* I2C bus speed in Kbps */
+static int pch_clk = 50000; /* specifies I2C clock speed in KHz */
+static wait_queue_head_t pch_event;
+static DEFINE_MUTEX(pch_mutex);
+
+static struct pci_device_id __devinitdata pch_pcidev_id[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_I2C)},
+ {0,}
+};
+
+static irqreturn_t pch_i2c_handler(int irq, void *pData);
+
+static inline void pch_setbit(void __iomem *addr, u32 offset, u32 bitmask)
+{
+ u32 val;
+ val = ioread32(addr + offset);
+ val |= bitmask;
+ iowrite32(val, addr + offset);
+}
+
+static inline void pch_clrbit(void __iomem *addr, u32 offset, u32 bitmask)
+{
+ u32 val;
+ val = ioread32(addr + offset);
+ val &= (~bitmask);
+ iowrite32(val, addr + offset);
+}
+
+/**
+ * pch_i2c_init() - hardware initialization of I2C module
+ * @adap: Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_init(struct i2c_algo_pch_data *adap)
+{
+ void __iomem *p = adap->pch_base_address;
+ u32 pch_i2cbc;
+ u32 pch_i2ctmr;
+ u32 reg_value;
+
+ /* reset I2C controller */
+ iowrite32(0x01, p + PCH_I2CSRST);
+ msleep(20);
+ iowrite32(0x0, p + PCH_I2CSRST);
+
+ /* Initialize I2C registers */
+ iowrite32(0x21, p + PCH_I2CNF);
+
+ pch_setbit(adap->pch_base_address, PCH_I2CCTL,
+ PCH_I2CCTL_I2CMEN);
+
+ if (pch_i2c_speed != 400)
+ pch_i2c_speed = 100;
+
+ reg_value = PCH_I2CCTL_I2CMEN;
+ if (pch_i2c_speed == FAST_MODE_CLK) {
+ reg_value |= FAST_MODE_EN;
+ pch_dbg(adap, "Fast mode enabled\n");
+ }
+
+ if (pch_clk > PCH_MAX_CLK)
+ pch_clk = 62500;
+
+ pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
+ /* Set transfer speed in I2CBC */
+ iowrite32(pch_i2cbc, p + PCH_I2CBC);
+
+ pch_i2ctmr = (pch_clk) / 8;
+ iowrite32(pch_i2ctmr, p + PCH_I2CTMR);
+
+ reg_value |= NORMAL_INTR_ENBL; /* Enable interrupts in normal mode */
+ iowrite32(reg_value, p + PCH_I2CCTL);
+
+ pch_dbg(adap,
+ "I2CCTL=%x pch_i2cbc=%x pch_i2ctmr=%x Enable interrupts\n",
+ ioread32(p + PCH_I2CCTL), pch_i2cbc, pch_i2ctmr);
+
+ init_waitqueue_head(&pch_event);
+}
+
+static inline bool ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
+{
+ return cmp1.tv64 < cmp2.tv64;
+}
+
+/**
+ * pch_i2c_wait_for_bus_idle() - check the status of bus.
+ * @adap: Pointer to struct i2c_algo_pch_data.
+ * @timeout: waiting time counter (us).
+ */
+static s32 pch_i2c_wait_for_bus_idle(struct i2c_algo_pch_data *adap,
+ s32 timeout)
+{
+ void __iomem *p = adap->pch_base_address;
+
+ /* MAX timeout value is timeout*1000*1000nsec */
+ ktime_t ns_val = ktime_add_ns(ktime_get(), timeout*1000*1000);
+ do {
+ if ((ioread32(p + PCH_I2CSR) & I2CMBB_BIT) == 0)
+ break;
+ msleep(20);
+ } while (ktime_lt(ktime_get(), ns_val));
+
+ pch_dbg(adap, "I2CSR = %x\n", ioread32(p + PCH_I2CSR));
+
+ if (timeout == 0) {
+ pch_err(adap, "%s: Timeout Error.return%d\n", __func__, -ETIME);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+/**
+ * pch_i2c_start() - Generate I2C start condition in normal mode.
+ * @adap: Pointer to struct i2c_algo_pch_data.
+ *
+ * Generate I2C start condition in normal mode by setting I2CCTL.I2CMSTA to 1.
+ */
+static void pch_i2c_start(struct i2c_algo_pch_data *adap)
+{
+ void __iomem *p = adap->pch_base_address;
+ pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
+ pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_START);
+}
+
+/**
+ * pch_i2c_wait_for_xfer_complete() - initiates a wait for the tx complete event
+ * @adap: Pointer to struct i2c_algo_pch_data.
+ */
+static s32 pch_i2c_wait_for_xfer_complete(struct i2c_algo_pch_data *adap)
+{
+ s32 ret;
+ ret = wait_event_timeout(pch_event,
+ (adap->pch_event_flag != 0), msecs_to_jiffies(50));
+ if (ret < 0) {
+ pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
+ return ret;
+ }
+
+ if (ret == 0) {
+ pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
+ return -ETIMEDOUT;
+ }
+
+ if (adap->pch_event_flag & I2C_ERROR_MASK) {
+ pch_err(adap, "error bits set: %x\n", adap->pch_event_flag);
+ return -EIO;
+ }
+
+ adap->pch_event_flag = 0;
+
+ return 0;
+}
+
+/**
+ * pch_i2c_getack() - to confirm ACK/NACK
+ * @adap: Pointer to struct i2c_algo_pch_data.
+ */
+static s32 pch_i2c_getack(struct i2c_algo_pch_data *adap)
+{
+ u32 reg_val;
+ void __iomem *p = adap->pch_base_address;
+ reg_val = ioread32(p + PCH_I2CSR) & PCH_GETACK;
+
+ if (reg_val != 0) {
+ pch_err(adap, "return%d\n", -EPROTO);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+/**
+ * pch_i2c_stop() - generate stop condition in normal mode.
+ * @adap: Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_stop(struct i2c_algo_pch_data *adap)
+{
+ void __iomem *p = adap->pch_base_address;
+ pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
+ /* clear the start bit */
+ pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_START);
+}
+
+/**
+ * pch_i2c_repstart() - generate repeated start condition in normal mode
+ * @adap: Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_repstart(struct i2c_algo_pch_data *adap)
+{
+ void __iomem *p = adap->pch_base_address;
+ pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
+ pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_REPSTART);
+}
+
+/**
+ * pch_i2c_writebytes() - write data to I2C bus in normal mode
+ * @i2c_adap: Pointer to the struct i2c_adapter.
+ * @last: specifies whether last message or not.
+ * In the case of compound mode it will be 1 for last message,
+ * otherwise 0.
+ * @first: specifies whether first message or not.
+ * 1 for first message otherwise 0.
+ */
+static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, u32 last, u32 first)
+{
+ struct i2c_algo_pch_data *adap = i2c_adap->algo_data;
+ u8 *buf;
+ u32 length;
+ u32 addr;
+ u32 addr_2_msb;
+ u32 addr_8_lsb;
+ s32 wrcount;
+ void __iomem *p = adap->pch_base_address;
+
+ length = msgs->len;
+ buf = msgs->buf;
+ addr = msgs->addr;
+
+ /* enable master tx */
+ pch_setbit(adap->pch_base_address, PCH_I2CCTL, I2C_TX_MODE);
+
+ pch_dbg(adap, "I2CCTL = %x msgs->len = %d\n", ioread32(p + PCH_I2CCTL),
+ length);
+
+ if (first) {
+ if (pch_i2c_wait_for_bus_idle(adap, BUS_IDLE_TIMEOUT) == -ETIME)
+ return -ETIME;
+ }
+
+ if (msgs->flags & I2C_M_TEN) {
+ addr_2_msb = ((addr & I2C_MSB_2B_MSK) >> 7);
+ iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
+ if (first)
+ pch_i2c_start(adap);
+ if (pch_i2c_wait_for_xfer_complete(adap) == 0 &&
+ pch_i2c_getack(adap) == 0) {
+ addr_8_lsb = (addr & I2C_ADDR_MSK);
+ iowrite32(addr_8_lsb, p + PCH_I2CDR);
+ } else {
+ pch_i2c_stop(adap);
+ return -ETIME;
+ }
+ } else {
+ /* set 7 bit slave address and R/W bit as 0 */
+ iowrite32(addr << 1, p + PCH_I2CDR);
+ if (first)
+ pch_i2c_start(adap);
+ }
+
+ if ((pch_i2c_wait_for_xfer_complete(adap) == 0) &&
+ (pch_i2c_getack(adap) == 0)) {
+ for (wrcount = 0; wrcount < length; ++wrcount) {
+ /* write buffer value to I2C data register */
+ iowrite32(buf[wrcount], p + PCH_I2CDR);
+ pch_dbg(adap, "writing %x to Data register\n",
+ buf[wrcount]);
+
+ if (pch_i2c_wait_for_xfer_complete(adap) != 0)
+ return -ETIME;
+
+ if (pch_i2c_getack(adap))
+ return -EIO;
+ }
+
+ /* check if this is the last message */
+ if (last)
+ pch_i2c_stop(adap);
+ else
+ pch_i2c_repstart(adap);
+ } else {
+ pch_i2c_stop(adap);
+ return -EIO;
+ }
+
+ pch_dbg(adap, "return=%d\n", wrcount);
+
+ return wrcount;
+}
+
+/**
+ * pch_i2c_sendack() - send ACK
+ * @adap: Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_sendack(struct i2c_algo_pch_data *adap)
+{
+ void __iomem *p = adap->pch_base_address;
+ pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
+ pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_ACK);
+}
+
+/**
+ * pch_i2c_sendnack() - send NACK
+ * @adap: Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_sendnack(struct i2c_algo_pch_data *adap)
+{
+ void __iomem *p = adap->pch_base_address;
+ pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
+ pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_ACK);
+}
+
+/**
+ * pch_i2c_readbytes() - read data from I2C bus in normal mode.
+ * @i2c_adap: Pointer to the struct i2c_adapter.
+ * @msgs: Pointer to i2c_msg structure.
+ * @last: specifies whether last message or not.
+ * @first: specifies whether first message or not.
+ */
+s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
+ u32 last, u32 first)
+{
+ struct i2c_algo_pch_data *adap = i2c_adap->algo_data;
+
+ u8 *buf;
+ u32 count;
+ u32 length;
+ u32 addr;
+ u32 addr_2_msb;
+ void __iomem *p = adap->pch_base_address;
+
+ length = msgs->len;
+ buf = msgs->buf;
+ addr = msgs->addr;
+
+ /* enable master reception */
+ pch_clrbit(adap->pch_base_address, PCH_I2CCTL, I2C_TX_MODE);
+
+ if (first) {
+ if (pch_i2c_wait_for_bus_idle(adap, BUS_IDLE_TIMEOUT) == -ETIME)
+ return -ETIME;
+ }
+
+ if (msgs->flags & I2C_M_TEN) {
+ addr_2_msb = (((addr & I2C_MSB_2B_MSK) >> 7) | (I2C_RD));
+ iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
+
+ } else {
+ /* 7 address bits + R/W bit */
+ addr = (((addr) << 1) | (I2C_RD));
+ iowrite32(addr, p + PCH_I2CDR);
+ }
+
+ /* check if it is the first message */
+ if (first)
+ pch_i2c_start(adap);
+
+ if ((pch_i2c_wait_for_xfer_complete(adap) == 0) &&
+ (pch_i2c_getack(adap) == 0)) {
+ pch_dbg(adap, "return %d\n", 0);
+
+ if (length == 0) {
+ pch_i2c_stop(adap);
+ ioread32(p + PCH_I2CDR); /* Dummy read needs */
+
+ count = length;
+ } else {
+ int read_index;
+ int loop;
+ pch_i2c_sendack(adap);
+
+ /* Dummy read */
+ for (loop = 1, read_index = 0; loop < length; loop++) {
+ buf[read_index] = ioread32(p + PCH_I2CDR);
+
+ if (loop != 1)
+ read_index++;
+
+ if (pch_i2c_wait_for_xfer_complete(adap) != 0) {
+ pch_i2c_stop(adap);
+ return -ETIME;
+ }
+ } /* end for */
+
+ pch_i2c_sendnack(adap);
+
+ buf[read_index] = ioread32(p + PCH_I2CDR);
+
+ if (length != 1)
+ read_index++;
+
+ if (pch_i2c_wait_for_xfer_complete(adap) == 0) {
+ if (last)
+ pch_i2c_stop(adap);
+ else
+ pch_i2c_repstart(adap);
+
+ buf[read_index++] = ioread32(p + PCH_I2CDR);
+ count = read_index;
+ } else {
+ count = -ETIME;
+ }
+
+ }
+ } else {
+ count = -ETIME;
+ pch_i2c_stop(adap);
+ }
+
+ return count;
+}
+
+/**
+ * pch_i2c_cb_ch0() - Interrupt handler Call back function
+ * @adap: Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_cb_ch0(struct i2c_algo_pch_data *adap)
+{
+ u32 sts;
+ void __iomem *p = adap->pch_base_address;
+
+ sts = ioread32(p + PCH_I2CSR);
+ sts &= (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT);
+ if (sts & I2CMAL_BIT)
+ adap->pch_event_flag |= I2CMAL_EVENT;
+
+ if (sts & I2CMCF_BIT)
+ adap->pch_event_flag |= I2CMCF_EVENT;
+
+ /* clear the applicable bits */
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, sts);
+
+ pch_dbg(adap, "PCH_I2CSR = %x\n", ioread32(p + PCH_I2CSR));
+
+ wake_up(&pch_event);
+}
+
+/**
+ * pch_i2c_handler() - interrupt handler for the PCH I2C controller
+ * @irq: irq number.
+ * @pData: cookie passed back to the handler function.
+ */
+static irqreturn_t pch_i2c_handler(int irq, void *pData)
+{
+ s32 reg_val;
+
+ struct i2c_algo_pch_data *adap_data = (struct i2c_algo_pch_data *)pData;
+ void __iomem *p = adap_data->pch_base_address;
+ u32 mode = ioread32(p + PCH_I2CMOD) & (BUFFER_MODE | EEPROM_SR_MODE);
+
+ if (mode != NORMAL_MODE) {
+ pch_err(adap_data, "I2C mode is not supported\n");
+ return IRQ_NONE;
+ }
+
+ reg_val = ioread32(p + PCH_I2CSR);
+ if (reg_val & (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT))
+ pch_i2c_cb_ch0(adap_data);
+ else
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * pch_i2c_xfer() - Reading adnd writing data through I2C bus
+ * @i2c_adap: Pointer to the struct i2c_adapter.
+ * @msgs: Pointer to i2c_msg structure.
+ * @num: number of messages.
+ */
+static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, s32 num)
+{
+ struct i2c_msg *pmsg;
+ u32 i = 0;
+ u32 status;
+ u32 msglen;
+ u32 subaddrlen;
+ s32 ret;
+
+ struct i2c_algo_pch_data *adap = i2c_adap->algo_data;
+
+ ret = mutex_lock_interruptible(&pch_mutex);
+ if (ret)
+ return -ERESTARTSYS;
+
+ if (adap->p_adapter_info->pch_i2c_suspended) {
+ mutex_unlock(&pch_mutex);
+ return -EBUSY;
+ }
+
+ pch_dbg(adap, "adap->p_adapter_info->pch_i2c_suspended is %d\n",
+ adap->p_adapter_info->pch_i2c_suspended);
+ /* transfer not completed */
+ adap->pch_i2c_xfer_in_progress = true;
+
+ pmsg = &msgs[0];
+ pmsg->flags |= adap->pch_buff_mode_en;
+ status = pmsg->flags;
+ pch_dbg(adap,
+ "After invoking I2C_MODE_SEL :flag= 0x%x\n", status);
+ /* calculate sub address length and message length */
+ /* these are applicable only for buffer mode */
+ subaddrlen = pmsg->buf[0];
+ /* calculate actual message length excluding
+ * the sub address fields */
+ msglen = (pmsg->len) - (subaddrlen + 1);
+ if (status & (I2C_M_RD)) {
+ pch_dbg(adap, "invoking pch_i2c_readbytes\n");
+ ret = pch_i2c_readbytes(i2c_adap, pmsg, (i + 1 == num),
+ (i == 0));
+ } else {
+ pch_dbg(adap, "invoking pch_i2c_writebytes\n");
+ ret = pch_i2c_writebytes(i2c_adap, pmsg, (i + 1 == num),
+ (i == 0));
+ }
+
+ adap->pch_i2c_xfer_in_progress = false; /* transfer completed */
+
+ mutex_unlock(&pch_mutex);
+
+ return ret;
+}
+
+/**
+ * pch_i2c_func() - return the functionality of the I2C driver
+ * @adap: Pointer to struct i2c_algo_pch_data.
+ */
+static u32 pch_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR;
+}
+
+static struct i2c_algorithm pch_algorithm = {
+ .master_xfer = pch_i2c_xfer,
+ .functionality = pch_i2c_func
+};
+
+/**
+ * pch_i2c_disbl_int() - Disable PCH I2C interrupts
+ * @adap: Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_disbl_int(struct i2c_algo_pch_data *adap)
+{
+ void __iomem *p = adap->pch_base_address;
+
+ pch_clrbit(adap->pch_base_address, PCH_I2CCTL, NORMAL_INTR_ENBL);
+
+ iowrite32(EEPROM_RST_INTR_DISBL, p + PCH_I2CESRMSK);
+
+ iowrite32(BUFFER_MODE_INTR_DISBL, p + PCH_I2CBUFMSK);
+}
+
+static int __devinit pch_i2c_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ void __iomem *base_addr;
+ s32 ret;
+ struct adapter_info *adap_info;
+
+ pch_pci_dbg(pdev, "Entered.\n");
+
+ adap_info = kzalloc((sizeof(struct adapter_info)), GFP_KERNEL);
+ if (adap_info == NULL) {
+ pch_pci_err(pdev, "Memory allocation FAILED\n");
+ return -ENOMEM;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ pch_pci_err(pdev, "pci_enable_device FAILED\n");
+ goto err_pci_enable;
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ pch_pci_err(pdev, "pci_request_regions FAILED\n");
+ goto err_pci_req;
+ }
+
+ base_addr = pci_iomap(pdev, 1, 0);
+
+ if (base_addr == NULL) {
+ pch_pci_err(pdev, "pci_iomap FAILED\n");
+ ret = -ENOMEM;
+ goto err_pci_iomap;
+ }
+
+ adap_info->pch_i2c_suspended = false;
+
+ adap_info->pch_data.p_adapter_info = adap_info;
+
+ adap_info->pch_data.pch_adapter.owner = THIS_MODULE;
+ adap_info->pch_data.pch_adapter.class = I2C_CLASS_HWMON;
+ strcpy(adap_info->pch_data.pch_adapter.name, KBUILD_MODNAME);
+ adap_info->pch_data.pch_adapter.algo = &pch_algorithm;
+ adap_info->pch_data.pch_adapter.algo_data =
+ &adap_info->pch_data;
+
+ /* (i * 0x80) + base_addr; */
+ adap_info->pch_data.pch_base_address = base_addr;
+
+ adap_info->pch_data.pch_adapter.dev.parent = &pdev->dev;
+
+ ret = i2c_add_adapter(&(adap_info->pch_data.pch_adapter));
+
+ if (ret) {
+ pch_pci_err(pdev, "i2c_add_adapter FAILED\n");
+ goto err_i2c_add_adapter;
+ }
+
+ pch_i2c_init(&adap_info->pch_data);
+ ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+ KBUILD_MODNAME, &adap_info->pch_data);
+ if (ret) {
+ pch_pci_err(pdev, "request_irq FAILED\n");
+ goto err_request_irq;
+ }
+
+ pci_set_drvdata(pdev, adap_info);
+ pch_pci_dbg(pdev, "returns %d.\n", ret);
+ return 0;
+
+err_request_irq:
+ i2c_del_adapter(&(adap_info->pch_data.pch_adapter));
+err_i2c_add_adapter:
+ pci_iounmap(pdev, base_addr);
+err_pci_iomap:
+ pci_release_regions(pdev);
+err_pci_req:
+ pci_disable_device(pdev);
+err_pci_enable:
+ kfree(adap_info);
+ return ret;
+}
+
+static void __devexit pch_i2c_remove(struct pci_dev *pdev)
+{
+ struct adapter_info *adap_info = pci_get_drvdata(pdev);
+
+ pch_i2c_disbl_int(&adap_info->pch_data);
+ free_irq(pdev->irq, &adap_info->pch_data);
+ i2c_del_adapter(&(adap_info->pch_data.pch_adapter));
+
+ if (adap_info->pch_data.pch_base_address) {
+ pci_iounmap(pdev, adap_info->pch_data.pch_base_address);
+ adap_info->pch_data.pch_base_address = 0;
+ }
+
+ pci_set_drvdata(pdev, NULL);
+
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+ kfree(adap_info);
+}
+
+#ifdef CONFIG_PM
+static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int ret;
+ struct adapter_info *adap_info = pci_get_drvdata(pdev);
+ void __iomem *p = adap_info->pch_data.pch_base_address;
+
+ adap_info->pch_i2c_suspended = true;
+
+ while ((adap_info->pch_data.pch_i2c_xfer_in_progress)) {
+ /* Wait until all channel transfers are completed */
+ msleep(20);
+ }
+ /* Disable the i2c interrupts */
+ pch_i2c_disbl_int(&adap_info->pch_data);
+
+ pch_pci_dbg(pdev, "I2CSR = %x I2CBUFSTA = %x I2CESRSTA = %x "
+ "invoked function pch_i2c_disbl_int successfully\n",
+ ioread32(p + PCH_I2CSR), ioread32(p + PCH_I2CBUFSTA),
+ ioread32(p + PCH_I2CESRSTA));
+
+ ret = pci_save_state(pdev);
+
+ if (ret) {
+ pch_pci_err(pdev, "pci_save_state\n");
+ return ret;
+ }
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int pch_i2c_resume(struct pci_dev *pdev)
+{
+ struct adapter_info *adap_info = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (pci_enable_device(pdev) < 0) {
+ pch_pci_err(pdev, "pch_i2c_resume:pci_enable_device FAILED\n");
+ return -EIO;
+ }
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+
+ pch_i2c_init(&adap_info->pch_data);
+
+ adap_info->pch_i2c_suspended = false;
+
+ return 0;
+}
+#else
+#define pch_i2c_suspend NULL
+#define pch_i2c_resume NULL
+#endif
+
+static struct pci_driver pch_pcidriver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pch_pcidev_id,
+ .probe = pch_i2c_probe,
+ .remove = __devexit_p(pch_i2c_remove),
+ .suspend = pch_i2c_suspend,
+ .resume = pch_i2c_resume
+};
+
+static int __init pch_pci_init(void)
+{
+ return pci_register_driver(&pch_pcidriver);
+}
+module_init(pch_pci_init);
+
+static void __exit pch_pci_exit(void)
+{
+ pci_unregister_driver(&pch_pcidriver);
+}
+module_exit(pch_pci_exit);
+
+MODULE_DESCRIPTION("PCH I2C PCI Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.okisemi.com>");
+module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
+module_param(pch_clk, int, (S_IRUSR | S_IWUSR));
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 02835ce7ff4b..7979aef7ee7b 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -72,6 +72,7 @@
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/dmi.h>
+#include <linux/slab.h>
/* I801 SMBus address offsets */
#define SMBHSTSTS(p) (0 + (p)->smba)
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 112c61f7b8cd..f09c9319a2ba 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -409,7 +409,7 @@ iop3xx_i2c_remove(struct platform_device *pdev)
IOP3XX_ICR_RXFULL_IE | IOP3XX_ICR_TXEMPTY_IE);
__raw_writel(cr, adapter_data->ioaddr + CR_OFFSET);
- iounmap((void __iomem*)adapter_data->ioaddr);
+ iounmap(adapter_data->ioaddr);
release_mem_region(res->start, IOP3XX_I2C_IO_SIZE);
kfree(adapter_data);
kfree(padapter);
@@ -453,7 +453,7 @@ iop3xx_i2c_probe(struct platform_device *pdev)
/* set the adapter enumeration # */
adapter_data->id = i2c_id++;
- adapter_data->ioaddr = (u32)ioremap(res->start, IOP3XX_I2C_IO_SIZE);
+ adapter_data->ioaddr = ioremap(res->start, IOP3XX_I2C_IO_SIZE);
if (!adapter_data->ioaddr) {
ret = -ENOMEM;
goto release_region;
@@ -498,7 +498,7 @@ iop3xx_i2c_probe(struct platform_device *pdev)
return 0;
unmap:
- iounmap((void __iomem*)adapter_data->ioaddr);
+ iounmap(adapter_data->ioaddr);
release_region:
release_mem_region(res->start, IOP3XX_I2C_IO_SIZE);
diff --git a/drivers/i2c/busses/i2c-iop3xx.h b/drivers/i2c/busses/i2c-iop3xx.h
index 8485861f6a36..097e270955d0 100644
--- a/drivers/i2c/busses/i2c-iop3xx.h
+++ b/drivers/i2c/busses/i2c-iop3xx.h
@@ -97,7 +97,7 @@
#define IOP3XX_I2C_IO_SIZE 0x18
struct i2c_algo_iop3xx_data {
- u32 ioaddr;
+ void __iomem *ioaddr;
wait_queue_head_t waitq;
spinlock_t lock;
u32 SR_enabled, SR_received;
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 16242063144f..a9941c65f226 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -59,6 +59,7 @@ enum {
MV64XXX_I2C_STATE_INVALID,
MV64XXX_I2C_STATE_IDLE,
MV64XXX_I2C_STATE_WAITING_FOR_START_COND,
+ MV64XXX_I2C_STATE_WAITING_FOR_RESTART,
MV64XXX_I2C_STATE_WAITING_FOR_ADDR_1_ACK,
MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK,
MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_ACK,
@@ -70,6 +71,7 @@ enum {
MV64XXX_I2C_ACTION_INVALID,
MV64XXX_I2C_ACTION_CONTINUE,
MV64XXX_I2C_ACTION_SEND_START,
+ MV64XXX_I2C_ACTION_SEND_RESTART,
MV64XXX_I2C_ACTION_SEND_ADDR_1,
MV64XXX_I2C_ACTION_SEND_ADDR_2,
MV64XXX_I2C_ACTION_SEND_DATA,
@@ -91,6 +93,7 @@ struct mv64xxx_i2c_data {
u32 addr2;
u32 bytes_left;
u32 byte_posn;
+ u32 send_stop;
u32 block;
int rc;
u32 freq_m;
@@ -159,8 +162,15 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
if ((drv_data->bytes_left == 0)
|| (drv_data->aborting
&& (drv_data->byte_posn != 0))) {
- drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP;
- drv_data->state = MV64XXX_I2C_STATE_IDLE;
+ if (drv_data->send_stop) {
+ drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP;
+ drv_data->state = MV64XXX_I2C_STATE_IDLE;
+ } else {
+ drv_data->action =
+ MV64XXX_I2C_ACTION_SEND_RESTART;
+ drv_data->state =
+ MV64XXX_I2C_STATE_WAITING_FOR_RESTART;
+ }
} else {
drv_data->action = MV64XXX_I2C_ACTION_SEND_DATA;
drv_data->state =
@@ -228,6 +238,15 @@ static void
mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
{
switch(drv_data->action) {
+ case MV64XXX_I2C_ACTION_SEND_RESTART:
+ drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START;
+ drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN;
+ writel(drv_data->cntl_bits,
+ drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
+ drv_data->block = 0;
+ wake_up_interruptible(&drv_data->waitq);
+ break;
+
case MV64XXX_I2C_ACTION_CONTINUE:
writel(drv_data->cntl_bits,
drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
@@ -386,7 +405,8 @@ mv64xxx_i2c_wait_for_completion(struct mv64xxx_i2c_data *drv_data)
}
static int
-mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg)
+mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
+ int is_first, int is_last)
{
unsigned long flags;
@@ -406,10 +426,18 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg)
drv_data->bytes_left--;
}
} else {
- drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
- drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+ if (is_first) {
+ drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
+ drv_data->state =
+ MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+ } else {
+ drv_data->action = MV64XXX_I2C_ACTION_SEND_ADDR_1;
+ drv_data->state =
+ MV64XXX_I2C_STATE_WAITING_FOR_ADDR_1_ACK;
+ }
}
+ drv_data->send_stop = is_last;
drv_data->block = 1;
mv64xxx_i2c_do_action(drv_data);
spin_unlock_irqrestore(&drv_data->lock, flags);
@@ -437,9 +465,12 @@ mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
struct mv64xxx_i2c_data *drv_data = i2c_get_adapdata(adap);
int i, rc;
- for (i=0; i<num; i++)
- if ((rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[i])) < 0)
+ for (i = 0; i < num; i++) {
+ rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[i],
+ i == 0, i + 1 == num);
+ if (rc < 0)
return rc;
+ }
return num;
}
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index a605a5029cfe..ff1e127dfea8 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -432,7 +432,7 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_
static void __devexit nforce2_remove(struct pci_dev *dev)
{
- struct nforce2_smbus *smbuses = (void*) pci_get_drvdata(dev);
+ struct nforce2_smbus *smbuses = pci_get_drvdata(dev);
nforce2_set_reference(NULL);
if (smbuses[0].base) {
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index c9fffd0389fe..594ed5059c4a 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -434,7 +434,7 @@ static int read_i2c(struct nmk_i2c_dev *dev)
}
if (timeout == 0) {
- /* controler has timedout, re-init the h/w */
+ /* controller has timedout, re-init the h/w */
dev_err(&dev->pdev->dev, "controller timed out, re-init h/w\n");
(void) init_hw(dev);
status = -ETIMEDOUT;
@@ -498,7 +498,7 @@ static int write_i2c(struct nmk_i2c_dev *dev)
}
if (timeout == 0) {
- /* controler has timedout, re-init the h/w */
+ /* controller has timedout, re-init the h/w */
dev_err(&dev->pdev->dev, "controller timed out, re-init h/w\n");
(void) init_hw(dev);
status = -ETIMEDOUT;
@@ -872,6 +872,8 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->algo = &nmk_i2c_algo;
+ snprintf(adap->name, sizeof(adap->name),
+ "Nomadik I2C%d at %lx", pdev->id, (unsigned long)res->start);
/* fetch the controller id */
adap->nr = pdev->id;
@@ -891,8 +893,8 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
goto err_init_hw;
}
- dev_dbg(&pdev->dev, "initialize I2C%d bus on virtual "
- "base %p\n", pdev->id, dev->virtbase);
+ dev_info(&pdev->dev, "initialize %s on virtual "
+ "base %p\n", adap->name, dev->virtbase);
ret = i2c_add_numbered_adapter(adap);
if (ret) {
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 0070371b29f3..ef3bcb1ce864 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -9,6 +9,41 @@
* kind, whether express or implied.
*/
+/*
+ * Device tree configuration:
+ *
+ * Required properties:
+ * - compatible : "opencores,i2c-ocores"
+ * - reg : bus address start and address range size of device
+ * - interrupts : interrupt number
+ * - regstep : size of device registers in bytes
+ * - clock-frequency : frequency of bus clock in Hz
+ *
+ * Example:
+ *
+ * i2c0: ocores@a0000000 {
+ * compatible = "opencores,i2c-ocores";
+ * reg = <0xa0000000 0x8>;
+ * interrupts = <10>;
+ *
+ * regstep = <1>;
+ * clock-frequency = <20000000>;
+ *
+ * -- Devices connected on this I2C bus get
+ * -- defined here; address- and size-cells
+ * -- apply to these child devices
+ *
+ * #address-cells = <1>;
+ * #size-cells = <0>;
+ *
+ * dummy@60 {
+ * compatible = "dummy";
+ * reg = <60>;
+ * };
+ * };
+ *
+ */
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -210,6 +245,32 @@ static struct i2c_adapter ocores_adapter = {
.algo = &ocores_algorithm,
};
+#ifdef CONFIG_OF
+static int ocores_i2c_of_probe(struct platform_device* pdev,
+ struct ocores_i2c* i2c)
+{
+ __be32* val;
+
+ val = of_get_property(pdev->dev.of_node, "regstep", NULL);
+ if (!val) {
+ dev_err(&pdev->dev, "Missing required parameter 'regstep'");
+ return -ENODEV;
+ }
+ i2c->regstep = be32_to_cpup(val);
+
+ val = of_get_property(pdev->dev.of_node, "clock-frequency", NULL);
+ if (!val) {
+ dev_err(&pdev->dev,
+ "Missing required parameter 'clock-frequency'");
+ return -ENODEV;
+ }
+ i2c->clock_khz = be32_to_cpup(val) / 1000;
+
+ return 0;
+}
+#else
+#define ocores_i2c_of_probe(pdev,i2c) -ENODEV
+#endif
static int __devinit ocores_i2c_probe(struct platform_device *pdev)
{
@@ -227,37 +288,41 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
if (!res2)
return -ENODEV;
- pdata = (struct ocores_i2c_platform_data*) pdev->dev.platform_data;
- if (!pdata)
- return -ENODEV;
-
- i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
+ i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
return -ENOMEM;
- if (!request_mem_region(res->start, resource_size(res),
- pdev->name)) {
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), pdev->name)) {
dev_err(&pdev->dev, "Memory region busy\n");
- ret = -EBUSY;
- goto request_mem_failed;
+ return -EBUSY;
}
- i2c->base = ioremap(res->start, resource_size(res));
+ i2c->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
if (!i2c->base) {
dev_err(&pdev->dev, "Unable to map registers\n");
- ret = -EIO;
- goto map_failed;
+ return -EIO;
+ }
+
+ pdata = pdev->dev.platform_data;
+ if (pdata) {
+ i2c->regstep = pdata->regstep;
+ i2c->clock_khz = pdata->clock_khz;
+ } else {
+ ret = ocores_i2c_of_probe(pdev, i2c);
+ if (ret)
+ return ret;
}
- i2c->regstep = pdata->regstep;
- i2c->clock_khz = pdata->clock_khz;
ocores_init(i2c);
init_waitqueue_head(&i2c->wait);
- ret = request_irq(res2->start, ocores_isr, 0, pdev->name, i2c);
+ ret = devm_request_irq(&pdev->dev, res2->start, ocores_isr, 0,
+ pdev->name, i2c);
if (ret) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- goto request_irq_failed;
+ return ret;
}
/* hook up driver to tree */
@@ -265,36 +330,29 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
i2c->adap = ocores_adapter;
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.dev.parent = &pdev->dev;
+#ifdef CONFIG_OF
+ i2c->adap.dev.of_node = pdev->dev.of_node;
+#endif
/* add i2c adapter to i2c tree */
ret = i2c_add_adapter(&i2c->adap);
if (ret) {
dev_err(&pdev->dev, "Failed to add adapter\n");
- goto add_adapter_failed;
+ return ret;
}
/* add in known devices to the bus */
- for (i = 0; i < pdata->num_devices; i++)
- i2c_new_device(&i2c->adap, pdata->devices + i);
+ if (pdata) {
+ for (i = 0; i < pdata->num_devices; i++)
+ i2c_new_device(&i2c->adap, pdata->devices + i);
+ }
return 0;
-
-add_adapter_failed:
- free_irq(res2->start, i2c);
-request_irq_failed:
- iounmap(i2c->base);
-map_failed:
- release_mem_region(res->start, resource_size(res));
-request_mem_failed:
- kfree(i2c);
-
- return ret;
}
static int __devexit ocores_i2c_remove(struct platform_device* pdev)
{
struct ocores_i2c *i2c = platform_get_drvdata(pdev);
- struct resource *res;
/* disable i2c logic */
oc_setreg(i2c, OCI2C_CONTROL, oc_getreg(i2c, OCI2C_CONTROL)
@@ -304,18 +362,6 @@ static int __devexit ocores_i2c_remove(struct platform_device* pdev)
i2c_del_adapter(&i2c->adap);
platform_set_drvdata(pdev, NULL);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res)
- free_irq(res->start, i2c);
-
- iounmap(i2c->base);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
- release_mem_region(res->start, resource_size(res));
-
- kfree(i2c);
-
return 0;
}
@@ -344,6 +390,16 @@ static int ocores_i2c_resume(struct platform_device *pdev)
#define ocores_i2c_resume NULL
#endif
+#ifdef CONFIG_OF
+static struct of_device_id ocores_i2c_match[] = {
+ {
+ .compatible = "opencores,i2c-ocores",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ocores_i2c_match);
+#endif
+
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:ocores-i2c");
@@ -355,6 +411,9 @@ static struct platform_driver ocores_i2c_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "ocores-i2c",
+#ifdef CONFIG_OF
+ .of_match_table = ocores_i2c_match,
+#endif
},
};
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 9d090833e245..829a2a1029f7 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -598,12 +598,8 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
* REVISIT: We should abort the transfer on signals, but the bus goes
* into arbitration and we're currently unable to recover from it.
*/
- if (dev->set_mpu_wkup_lat != NULL)
- dev->set_mpu_wkup_lat(dev->dev, dev->latency);
r = wait_for_completion_timeout(&dev->cmd_complete,
OMAP_I2C_TIMEOUT);
- if (dev->set_mpu_wkup_lat != NULL)
- dev->set_mpu_wkup_lat(dev->dev, -1);
dev->buf_len = 0;
if (r < 0)
return r;
@@ -654,12 +650,18 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
if (r < 0)
goto out;
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, dev->latency);
+
for (i = 0; i < num; i++) {
r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
if (r != 0)
break;
}
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, -1);
+
if (r == 0)
r = num;
@@ -845,11 +847,15 @@ complete:
dev_err(dev->dev, "Arbitration lost\n");
err |= OMAP_I2C_STAT_AL;
}
+ /*
+ * ProDB0017052: Clear ARDY bit twice
+ */
if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
OMAP_I2C_STAT_AL)) {
omap_i2c_ack_stat(dev, stat &
(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
- OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
+ OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR |
+ OMAP_I2C_STAT_ARDY));
omap_i2c_complete_cmd(dev, err);
return IRQ_HANDLED;
}
@@ -1135,12 +1141,41 @@ omap_i2c_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_SUSPEND
+static int omap_i2c_suspend(struct device *dev)
+{
+ if (!pm_runtime_suspended(dev))
+ if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
+ dev->bus->pm->runtime_suspend(dev);
+
+ return 0;
+}
+
+static int omap_i2c_resume(struct device *dev)
+{
+ if (!pm_runtime_suspended(dev))
+ if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
+ dev->bus->pm->runtime_resume(dev);
+
+ return 0;
+}
+
+static struct dev_pm_ops omap_i2c_pm_ops = {
+ .suspend = omap_i2c_suspend,
+ .resume = omap_i2c_resume,
+};
+#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
+#else
+#define OMAP_I2C_PM_OPS NULL
+#endif
+
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
.remove = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
.owner = THIS_MODULE,
+ .pm = OMAP_I2C_PM_OPS,
},
};
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 495be451d326..266135ddf7fa 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -942,7 +942,7 @@ stu300_probe(struct platform_device *pdev)
adap->owner = THIS_MODULE;
/* DDC class but actually often used for more generic I2C */
adap->class = I2C_CLASS_DDC;
- strncpy(adap->name, "ST Microelectronics DDC I2C adapter",
+ strlcpy(adap->name, "ST Microelectronics DDC I2C adapter",
sizeof(adap->name));
adap->nr = bus_nr;
adap->algo = &stu300_algo;
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 53fab518b3da..986e5f62debe 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -29,6 +29,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -40,6 +41,7 @@
MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
MODULE_DESCRIPTION("NatSemi SCx200 ACCESS.bus Driver");
+MODULE_ALIAS("platform:cs5535-smb");
MODULE_LICENSE("GPL");
#define MAX_DEVICES 4
@@ -84,10 +86,6 @@ struct scx200_acb_iface {
u8 *ptr;
char needs_reset;
unsigned len;
-
- /* PCI device info */
- struct pci_dev *pdev;
- int bar;
};
/* Register Definitions */
@@ -391,7 +389,7 @@ static const struct i2c_algorithm scx200_acb_algorithm = {
static struct scx200_acb_iface *scx200_acb_list;
static DEFINE_MUTEX(scx200_acb_list_mutex);
-static __init int scx200_acb_probe(struct scx200_acb_iface *iface)
+static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface)
{
u8 val;
@@ -427,7 +425,7 @@ static __init int scx200_acb_probe(struct scx200_acb_iface *iface)
return 0;
}
-static __init struct scx200_acb_iface *scx200_create_iface(const char *text,
+static __devinit struct scx200_acb_iface *scx200_create_iface(const char *text,
struct device *dev, int index)
{
struct scx200_acb_iface *iface;
@@ -452,7 +450,7 @@ static __init struct scx200_acb_iface *scx200_create_iface(const char *text,
return iface;
}
-static int __init scx200_acb_create(struct scx200_acb_iface *iface)
+static int __devinit scx200_acb_create(struct scx200_acb_iface *iface)
{
struct i2c_adapter *adapter;
int rc;
@@ -472,183 +470,145 @@ static int __init scx200_acb_create(struct scx200_acb_iface *iface)
return -ENODEV;
}
- mutex_lock(&scx200_acb_list_mutex);
- iface->next = scx200_acb_list;
- scx200_acb_list = iface;
- mutex_unlock(&scx200_acb_list_mutex);
+ if (!adapter->dev.parent) {
+ /* If there's no dev, we're tracking (ISA) ifaces manually */
+ mutex_lock(&scx200_acb_list_mutex);
+ iface->next = scx200_acb_list;
+ scx200_acb_list = iface;
+ mutex_unlock(&scx200_acb_list_mutex);
+ }
return 0;
}
-static __init int scx200_create_pci(const char *text, struct pci_dev *pdev,
- int bar)
+static struct scx200_acb_iface * __devinit scx200_create_dev(const char *text,
+ unsigned long base, int index, struct device *dev)
{
struct scx200_acb_iface *iface;
int rc;
- iface = scx200_create_iface(text, &pdev->dev, 0);
+ iface = scx200_create_iface(text, dev, index);
if (iface == NULL)
- return -ENOMEM;
-
- iface->pdev = pdev;
- iface->bar = bar;
-
- rc = pci_enable_device_io(iface->pdev);
- if (rc)
- goto errout_free;
+ return NULL;
- rc = pci_request_region(iface->pdev, iface->bar, iface->adapter.name);
- if (rc) {
- printk(KERN_ERR NAME ": can't allocate PCI BAR %d\n",
- iface->bar);
+ if (!request_region(base, 8, iface->adapter.name)) {
+ printk(KERN_ERR NAME ": can't allocate io 0x%lx-0x%lx\n",
+ base, base + 8 - 1);
goto errout_free;
}
- iface->base = pci_resource_start(iface->pdev, iface->bar);
+ iface->base = base;
rc = scx200_acb_create(iface);
if (rc == 0)
- return 0;
+ return iface;
- pci_release_region(iface->pdev, iface->bar);
- pci_dev_put(iface->pdev);
+ release_region(base, 8);
errout_free:
kfree(iface);
- return rc;
+ return NULL;
}
-static int __init scx200_create_isa(const char *text, unsigned long base,
- int index)
+static int __devinit scx200_probe(struct platform_device *pdev)
{
struct scx200_acb_iface *iface;
- int rc;
-
- iface = scx200_create_iface(text, NULL, index);
-
- if (iface == NULL)
- return -ENOMEM;
+ struct resource *res;
- if (!request_region(base, 8, iface->adapter.name)) {
- printk(KERN_ERR NAME ": can't allocate io 0x%lx-0x%lx\n",
- base, base + 8 - 1);
- rc = -EBUSY;
- goto errout_free;
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't fetch device resource info\n");
+ return -ENODEV;
}
- iface->base = base;
- rc = scx200_acb_create(iface);
+ iface = scx200_create_dev("CS5535", res->start, 0, &pdev->dev);
+ if (!iface)
+ return -EIO;
- if (rc == 0)
- return 0;
+ dev_info(&pdev->dev, "SCx200 device '%s' registered\n",
+ iface->adapter.name);
+ platform_set_drvdata(pdev, iface);
- release_region(base, 8);
- errout_free:
- kfree(iface);
- return rc;
+ return 0;
}
-/* Driver data is an index into the scx200_data array that indicates
- * the name and the BAR where the I/O address resource is located. ISA
- * devices are flagged with a bar value of -1 */
-
-static const struct pci_device_id scx200_pci[] __initconst = {
- { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE),
- .driver_data = 0 },
- { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE),
- .driver_data = 0 },
- { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA),
- .driver_data = 1 },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA),
- .driver_data = 2 },
- { 0, }
-};
-
-static struct {
- const char *name;
- int bar;
-} scx200_data[] = {
- { "SCx200", -1 },
- { "CS5535", 0 },
- { "CS5536", 0 }
-};
+static void __devexit scx200_cleanup_iface(struct scx200_acb_iface *iface)
+{
+ i2c_del_adapter(&iface->adapter);
+ release_region(iface->base, 8);
+ kfree(iface);
+}
-static __init int scx200_scan_pci(void)
+static int __devexit scx200_remove(struct platform_device *pdev)
{
- int data, dev;
- int rc = -ENODEV;
- struct pci_dev *pdev;
+ struct scx200_acb_iface *iface;
- for(dev = 0; dev < ARRAY_SIZE(scx200_pci); dev++) {
- pdev = pci_get_device(scx200_pci[dev].vendor,
- scx200_pci[dev].device, NULL);
+ iface = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
+ scx200_cleanup_iface(iface);
- if (pdev == NULL)
- continue;
+ return 0;
+}
- data = scx200_pci[dev].driver_data;
+static struct platform_driver scx200_pci_drv = {
+ .driver = {
+ .name = "cs5535-smb",
+ .owner = THIS_MODULE,
+ },
+ .probe = scx200_probe,
+ .remove = __devexit_p(scx200_remove),
+};
- /* if .bar is greater or equal to zero, this is a
- * PCI device - otherwise, we assume
- that the ports are ISA based
- */
+static const struct pci_device_id scx200_isa[] __initconst = {
+ { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) },
+ { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) },
+ { 0, }
+};
- if (scx200_data[data].bar >= 0)
- rc = scx200_create_pci(scx200_data[data].name, pdev,
- scx200_data[data].bar);
- else {
- int i;
+static __init void scx200_scan_isa(void)
+{
+ int i;
- pci_dev_put(pdev);
- for (i = 0; i < MAX_DEVICES; ++i) {
- if (base[i] == 0)
- continue;
+ if (!pci_dev_present(scx200_isa))
+ return;
- rc = scx200_create_isa(scx200_data[data].name,
- base[i],
- i);
- }
- }
+ for (i = 0; i < MAX_DEVICES; ++i) {
+ if (base[i] == 0)
+ continue;
- break;
+ /* XXX: should we care about failures? */
+ scx200_create_dev("SCx200", base[i], i, NULL);
}
-
- return rc;
}
static int __init scx200_acb_init(void)
{
- int rc;
-
pr_debug(NAME ": NatSemi SCx200 ACCESS.bus Driver\n");
- rc = scx200_scan_pci();
+ /* First scan for ISA-based devices */
+ scx200_scan_isa(); /* XXX: should we care about errors? */
/* If at least one bus was created, init must succeed */
if (scx200_acb_list)
return 0;
- return rc;
+
+ /* No ISA devices; register the platform driver for PCI-based devices */
+ return platform_driver_register(&scx200_pci_drv);
}
static void __exit scx200_acb_cleanup(void)
{
struct scx200_acb_iface *iface;
+ platform_driver_unregister(&scx200_pci_drv);
+
mutex_lock(&scx200_acb_list_mutex);
while ((iface = scx200_acb_list) != NULL) {
scx200_acb_list = iface->next;
mutex_unlock(&scx200_acb_list_mutex);
- i2c_del_adapter(&iface->adapter);
-
- if (iface->pdev) {
- pci_release_region(iface->pdev, iface->bar);
- pci_dev_put(iface->pdev);
- }
- else
- release_region(iface->base, 8);
+ scx200_cleanup_iface(iface);
- kfree(iface);
mutex_lock(&scx200_acb_list_mutex);
}
mutex_unlock(&scx200_acb_list_mutex);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 6b4cc567645b..f0bd5bcdf563 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -196,88 +196,60 @@ static int i2c_device_pm_suspend(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (pm) {
- if (pm_runtime_suspended(dev))
- return 0;
- else
- return pm->suspend ? pm->suspend(dev) : 0;
- }
-
- return i2c_legacy_suspend(dev, PMSG_SUSPEND);
+ if (pm)
+ return pm_generic_suspend(dev);
+ else
+ return i2c_legacy_suspend(dev, PMSG_SUSPEND);
}
static int i2c_device_pm_resume(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret;
if (pm)
- ret = pm->resume ? pm->resume(dev) : 0;
+ return pm_generic_resume(dev);
else
- ret = i2c_legacy_resume(dev);
-
- return ret;
+ return i2c_legacy_resume(dev);
}
static int i2c_device_pm_freeze(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (pm) {
- if (pm_runtime_suspended(dev))
- return 0;
- else
- return pm->freeze ? pm->freeze(dev) : 0;
- }
-
- return i2c_legacy_suspend(dev, PMSG_FREEZE);
+ if (pm)
+ return pm_generic_freeze(dev);
+ else
+ return i2c_legacy_suspend(dev, PMSG_FREEZE);
}
static int i2c_device_pm_thaw(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (pm) {
- if (pm_runtime_suspended(dev))
- return 0;
- else
- return pm->thaw ? pm->thaw(dev) : 0;
- }
-
- return i2c_legacy_resume(dev);
+ if (pm)
+ return pm_generic_thaw(dev);
+ else
+ return i2c_legacy_resume(dev);
}
static int i2c_device_pm_poweroff(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (pm) {
- if (pm_runtime_suspended(dev))
- return 0;
- else
- return pm->poweroff ? pm->poweroff(dev) : 0;
- }
-
- return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
+ if (pm)
+ return pm_generic_poweroff(dev);
+ else
+ return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
}
static int i2c_device_pm_restore(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret;
if (pm)
- ret = pm->restore ? pm->restore(dev) : 0;
+ return pm_generic_restore(dev);
else
- ret = i2c_legacy_resume(dev);
-
- if (!ret) {
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- }
-
- return ret;
+ return i2c_legacy_resume(dev);
}
#else /* !CONFIG_PM_SLEEP */
#define i2c_device_pm_suspend NULL
@@ -1021,6 +993,14 @@ static int i2c_do_del_adapter(struct i2c_driver *driver,
static int __unregister_client(struct device *dev, void *dummy)
{
struct i2c_client *client = i2c_verify_client(dev);
+ if (client && strcmp(client->name, "dummy"))
+ i2c_unregister_device(client);
+ return 0;
+}
+
+static int __unregister_dummy(struct device *dev, void *dummy)
+{
+ struct i2c_client *client = i2c_verify_client(dev);
if (client)
i2c_unregister_device(client);
return 0;
@@ -1075,8 +1055,12 @@ int i2c_del_adapter(struct i2c_adapter *adap)
mutex_unlock(&adap->userspace_clients_lock);
/* Detach any active clients. This can't fail, thus we do not
- checking the returned value. */
+ * check the returned value. This is a two-pass process, because
+ * we can't remove the dummy devices during the first pass: they
+ * could have been instantiated by real devices wishing to clean
+ * them up properly, so we give them a chance to do that first. */
res = device_for_each_child(&adap->dev, NULL, __unregister_client);
+ res = device_for_each_child(&adap->dev, NULL, __unregister_dummy);
#ifdef CONFIG_I2C_COMPAT
class_compat_remove_link(i2c_adapter_compat_class, &adap->dev,
@@ -1140,6 +1124,14 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
if (res)
return res;
+ /* Drivers should switch to dev_pm_ops instead. */
+ if (driver->suspend)
+ pr_warn("i2c-core: driver [%s] using legacy suspend method\n",
+ driver->driver.name);
+ if (driver->resume)
+ pr_warn("i2c-core: driver [%s] using legacy resume method\n",
+ driver->driver.name);
+
pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
INIT_LIST_HEAD(&driver->clients);
@@ -1362,7 +1354,7 @@ EXPORT_SYMBOL(i2c_transfer);
*
* Returns negative errno, or else the number of bytes written.
*/
-int i2c_master_send(struct i2c_client *client, const char *buf, int count)
+int i2c_master_send(const struct i2c_client *client, const char *buf, int count)
{
int ret;
struct i2c_adapter *adap = client->adapter;
@@ -1389,7 +1381,7 @@ EXPORT_SYMBOL(i2c_master_send);
*
* Returns negative errno, or else the number of bytes read.
*/
-int i2c_master_recv(struct i2c_client *client, char *buf, int count)
+int i2c_master_recv(const struct i2c_client *client, char *buf, int count)
{
struct i2c_adapter *adap = client->adapter;
struct i2c_msg msg;
@@ -1679,7 +1671,7 @@ static int i2c_smbus_check_pec(u8 cpec, struct i2c_msg *msg)
* This executes the SMBus "receive byte" protocol, returning negative errno
* else the byte received from the device.
*/
-s32 i2c_smbus_read_byte(struct i2c_client *client)
+s32 i2c_smbus_read_byte(const struct i2c_client *client)
{
union i2c_smbus_data data;
int status;
@@ -1699,7 +1691,7 @@ EXPORT_SYMBOL(i2c_smbus_read_byte);
* This executes the SMBus "send byte" protocol, returning negative errno
* else zero on success.
*/
-s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value)
+s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value)
{
return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL);
@@ -1714,7 +1706,7 @@ EXPORT_SYMBOL(i2c_smbus_write_byte);
* This executes the SMBus "read byte" protocol, returning negative errno
* else a data byte received from the device.
*/
-s32 i2c_smbus_read_byte_data(struct i2c_client *client, u8 command)
+s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command)
{
union i2c_smbus_data data;
int status;
@@ -1735,7 +1727,8 @@ EXPORT_SYMBOL(i2c_smbus_read_byte_data);
* This executes the SMBus "write byte" protocol, returning negative errno
* else zero on success.
*/
-s32 i2c_smbus_write_byte_data(struct i2c_client *client, u8 command, u8 value)
+s32 i2c_smbus_write_byte_data(const struct i2c_client *client, u8 command,
+ u8 value)
{
union i2c_smbus_data data;
data.byte = value;
@@ -1753,7 +1746,7 @@ EXPORT_SYMBOL(i2c_smbus_write_byte_data);
* This executes the SMBus "read word" protocol, returning negative errno
* else a 16-bit unsigned "word" received from the device.
*/
-s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command)
+s32 i2c_smbus_read_word_data(const struct i2c_client *client, u8 command)
{
union i2c_smbus_data data;
int status;
@@ -1774,7 +1767,8 @@ EXPORT_SYMBOL(i2c_smbus_read_word_data);
* This executes the SMBus "write word" protocol, returning negative errno
* else zero on success.
*/
-s32 i2c_smbus_write_word_data(struct i2c_client *client, u8 command, u16 value)
+s32 i2c_smbus_write_word_data(const struct i2c_client *client, u8 command,
+ u16 value)
{
union i2c_smbus_data data;
data.word = value;
@@ -1793,7 +1787,8 @@ EXPORT_SYMBOL(i2c_smbus_write_word_data);
* This executes the SMBus "process call" protocol, returning negative errno
* else a 16-bit unsigned "word" received from the device.
*/
-s32 i2c_smbus_process_call(struct i2c_client *client, u8 command, u16 value)
+s32 i2c_smbus_process_call(const struct i2c_client *client, u8 command,
+ u16 value)
{
union i2c_smbus_data data;
int status;
@@ -1821,7 +1816,7 @@ EXPORT_SYMBOL(i2c_smbus_process_call);
* support this; its emulation through I2C messaging relies on a specific
* mechanism (I2C_M_RECV_LEN) which may not be implemented.
*/
-s32 i2c_smbus_read_block_data(struct i2c_client *client, u8 command,
+s32 i2c_smbus_read_block_data(const struct i2c_client *client, u8 command,
u8 *values)
{
union i2c_smbus_data data;
@@ -1848,7 +1843,7 @@ EXPORT_SYMBOL(i2c_smbus_read_block_data);
* This executes the SMBus "block write" protocol, returning negative errno
* else zero on success.
*/
-s32 i2c_smbus_write_block_data(struct i2c_client *client, u8 command,
+s32 i2c_smbus_write_block_data(const struct i2c_client *client, u8 command,
u8 length, const u8 *values)
{
union i2c_smbus_data data;
@@ -1864,7 +1859,7 @@ s32 i2c_smbus_write_block_data(struct i2c_client *client, u8 command,
EXPORT_SYMBOL(i2c_smbus_write_block_data);
/* Returns the number of read bytes */
-s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client, u8 command,
+s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client, u8 command,
u8 length, u8 *values)
{
union i2c_smbus_data data;
@@ -1884,7 +1879,7 @@ s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client, u8 command,
}
EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data);
-s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, u8 command,
+s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, u8 command,
u8 length, const u8 *values)
{
union i2c_smbus_data data;
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 4d91d80bfd23..90b7a0163899 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -5,6 +5,18 @@
menu "Multiplexer I2C Chip support"
depends on I2C_MUX
+config I2C_MUX_GPIO
+ tristate "GPIO-based I2C multiplexer"
+ depends on GENERIC_GPIO
+ help
+ If you say yes to this option, support will be included for a
+ GPIO based I2C multiplexer. This driver provides access to
+ I2C busses connected through a MUX, which is controlled
+ through GPIO pins.
+
+ This driver can also be built as a module. If so, the module
+ will be called gpio-i2cmux.
+
config I2C_MUX_PCA9541
tristate "NXP PCA9541 I2C Master Selector"
depends on EXPERIMENTAL
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index d743806d9b42..4640436ea61f 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for multiplexer I2C chip drivers.
+obj-$(CONFIG_I2C_MUX_GPIO) += gpio-i2cmux.o
obj-$(CONFIG_I2C_MUX_PCA9541) += pca9541.o
obj-$(CONFIG_I2C_MUX_PCA954x) += pca954x.o
diff --git a/drivers/i2c/muxes/gpio-i2cmux.c b/drivers/i2c/muxes/gpio-i2cmux.c
new file mode 100644
index 000000000000..7b6ce624cd6e
--- /dev/null
+++ b/drivers/i2c/muxes/gpio-i2cmux.c
@@ -0,0 +1,184 @@
+/*
+ * I2C multiplexer using GPIO API
+ *
+ * Peter Korsgaard <peter.korsgaard@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/gpio-i2cmux.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+
+struct gpiomux {
+ struct i2c_adapter *parent;
+ struct i2c_adapter **adap; /* child busses */
+ struct gpio_i2cmux_platform_data data;
+};
+
+static void gpiomux_set(const struct gpiomux *mux, unsigned val)
+{
+ int i;
+
+ for (i = 0; i < mux->data.n_gpios; i++)
+ gpio_set_value(mux->data.gpios[i], val & (1 << i));
+}
+
+static int gpiomux_select(struct i2c_adapter *adap, void *data, u32 chan)
+{
+ struct gpiomux *mux = data;
+
+ gpiomux_set(mux, mux->data.values[chan]);
+
+ return 0;
+}
+
+static int gpiomux_deselect(struct i2c_adapter *adap, void *data, u32 chan)
+{
+ struct gpiomux *mux = data;
+
+ gpiomux_set(mux, mux->data.idle);
+
+ return 0;
+}
+
+static int __devinit gpiomux_probe(struct platform_device *pdev)
+{
+ struct gpiomux *mux;
+ struct gpio_i2cmux_platform_data *pdata;
+ struct i2c_adapter *parent;
+ int (*deselect) (struct i2c_adapter *, void *, u32);
+ unsigned initial_state;
+ int i, ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "Missing platform data\n");
+ return -ENODEV;
+ }
+
+ parent = i2c_get_adapter(pdata->parent);
+ if (!parent) {
+ dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
+ pdata->parent);
+ return -ENODEV;
+ }
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+
+ mux->parent = parent;
+ mux->data = *pdata;
+ mux->adap = kzalloc(sizeof(struct i2c_adapter *) * pdata->n_values,
+ GFP_KERNEL);
+ if (!mux->adap) {
+ ret = -ENOMEM;
+ goto alloc_failed2;
+ }
+
+ if (pdata->idle != GPIO_I2CMUX_NO_IDLE) {
+ initial_state = pdata->idle;
+ deselect = gpiomux_deselect;
+ } else {
+ initial_state = pdata->values[0];
+ deselect = NULL;
+ }
+
+ for (i = 0; i < pdata->n_gpios; i++) {
+ ret = gpio_request(pdata->gpios[i], "gpio-i2cmux");
+ if (ret)
+ goto err_request_gpio;
+ gpio_direction_output(pdata->gpios[i],
+ initial_state & (1 << i));
+ }
+
+ for (i = 0; i < pdata->n_values; i++) {
+ u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0;
+
+ mux->adap[i] = i2c_add_mux_adapter(parent, mux, nr, i,
+ gpiomux_select, deselect);
+ if (!mux->adap[i]) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
+ goto add_adapter_failed;
+ }
+ }
+
+ dev_info(&pdev->dev, "%d port mux on %s adapter\n",
+ pdata->n_values, parent->name);
+
+ platform_set_drvdata(pdev, mux);
+
+ return 0;
+
+add_adapter_failed:
+ for (; i > 0; i--)
+ i2c_del_mux_adapter(mux->adap[i - 1]);
+ i = pdata->n_gpios;
+err_request_gpio:
+ for (; i > 0; i--)
+ gpio_free(pdata->gpios[i - 1]);
+ kfree(mux->adap);
+alloc_failed2:
+ kfree(mux);
+alloc_failed:
+ i2c_put_adapter(parent);
+
+ return ret;
+}
+
+static int __devexit gpiomux_remove(struct platform_device *pdev)
+{
+ struct gpiomux *mux = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < mux->data.n_values; i++)
+ i2c_del_mux_adapter(mux->adap[i]);
+
+ for (i = 0; i < mux->data.n_gpios; i++)
+ gpio_free(mux->data.gpios[i]);
+
+ platform_set_drvdata(pdev, NULL);
+ i2c_put_adapter(mux->parent);
+ kfree(mux->adap);
+ kfree(mux);
+
+ return 0;
+}
+
+static struct platform_driver gpiomux_driver = {
+ .probe = gpiomux_probe,
+ .remove = __devexit_p(gpiomux_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "gpio-i2cmux",
+ },
+};
+
+static int __init gpiomux_init(void)
+{
+ return platform_driver_register(&gpiomux_driver);
+}
+
+static void __exit gpiomux_exit(void)
+{
+ platform_driver_unregister(&gpiomux_driver);
+}
+
+module_init(gpiomux_init);
+module_exit(gpiomux_exit);
+
+MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver");
+MODULE_AUTHOR("Peter Korsgaard <peter.korsgaard@barco.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gpio-i2cmux");
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 98ccfeb3f5aa..9827c5e686cb 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -134,7 +134,7 @@ config BLK_DEV_IDECD
module will be called ide-cd.
config BLK_DEV_IDECD_VERBOSE_ERRORS
- bool "Verbose error logging for IDE/ATAPI CDROM driver" if EMBEDDED
+ bool "Verbose error logging for IDE/ATAPI CDROM driver" if EXPERT
depends on BLK_DEV_IDECD
default y
help
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 56ac09d6c930..4a5c4a44ffb1 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -59,7 +59,10 @@
#include <linux/hrtimer.h> /* ktime_get_real() */
#include <trace/events/power.h>
#include <linux/sched.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
#include <asm/mwait.h>
+#include <asm/msr.h>
#define INTEL_IDLE_VERSION "0.4"
#define PREFIX "intel_idle: "
@@ -73,6 +76,7 @@ static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
static unsigned int mwait_substates;
+#define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF
/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */
@@ -82,6 +86,20 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
static struct cpuidle_state *cpuidle_state_table;
/*
+ * Hardware C-state auto-demotion may not always be optimal.
+ * Indicate which enable bits to clear here.
+ */
+static unsigned long long auto_demotion_disable_flags;
+
+/*
+ * Set this flag for states where the HW flushes the TLB for us
+ * and so we don't need cross-calls to keep it consistent.
+ * If this flag is set, SW flushes the TLB, so even if the
+ * HW doesn't do the flushing, this flag is safe to use.
+ */
+#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000
+
+/*
* States are indexed by the cstate number,
* which is also the index into the MWAIT hint array.
* Thus C0 is a dummy.
@@ -122,7 +140,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x00,
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 1,
- .target_residency = 4,
+ .target_residency = 1,
.enter = &intel_idle },
{ /* MWAIT C2 */
.name = "SNB-C3",
@@ -130,7 +148,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x10,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
- .target_residency = 160,
+ .target_residency = 211,
.enter = &intel_idle },
{ /* MWAIT C3 */
.name = "SNB-C6",
@@ -138,7 +156,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x20,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 104,
- .target_residency = 208,
+ .target_residency = 345,
.enter = &intel_idle },
{ /* MWAIT C4 */
.name = "SNB-C7",
@@ -146,7 +164,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x30,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 109,
- .target_residency = 300,
+ .target_residency = 345,
.enter = &intel_idle },
};
@@ -220,8 +238,6 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
kt_before = ktime_get_real();
stop_critical_timings();
- trace_power_start(POWER_CSTATE, (eax >> 4) + 1, cpu);
- trace_cpu_idle((eax >> 4) + 1, cpu);
if (!need_resched()) {
__monitor((void *)&current_thread_info()->flags, 0, 0);
@@ -243,6 +259,44 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
return usec_delta;
}
+static void __setup_broadcast_timer(void *arg)
+{
+ unsigned long reason = (unsigned long)arg;
+ int cpu = smp_processor_id();
+
+ reason = reason ?
+ CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
+
+ clockevents_notify(reason, &cpu);
+}
+
+static int setup_broadcast_cpuhp_notify(struct notifier_block *n,
+ unsigned long action, void *hcpu)
+{
+ int hotcpu = (unsigned long)hcpu;
+
+ switch (action & 0xf) {
+ case CPU_ONLINE:
+ smp_call_function_single(hotcpu, __setup_broadcast_timer,
+ (void *)true, 1);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block setup_broadcast_notifier = {
+ .notifier_call = setup_broadcast_cpuhp_notify,
+};
+
+static void auto_demotion_disable(void *dummy)
+{
+ unsigned long long msr_bits;
+
+ rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+ msr_bits &= ~auto_demotion_disable_flags;
+ wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+}
+
/*
* intel_idle_probe()
*/
@@ -286,11 +340,17 @@ static int intel_idle_probe(void)
case 0x25: /* Westmere */
case 0x2C: /* Westmere */
cpuidle_state_table = nehalem_cstates;
+ auto_demotion_disable_flags =
+ (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE);
break;
case 0x1C: /* 28 - Atom Processor */
+ cpuidle_state_table = atom_cstates;
+ break;
+
case 0x26: /* 38 - Lincroft Atom Processor */
cpuidle_state_table = atom_cstates;
+ auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE;
break;
case 0x2A: /* SNB */
@@ -305,7 +365,11 @@ static int intel_idle_probe(void)
}
if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
- lapic_timer_reliable_states = 0xFFFFFFFF;
+ lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+ else {
+ smp_call_function(__setup_broadcast_timer, (void *)true, 1);
+ register_cpu_notifier(&setup_broadcast_notifier);
+ }
pr_debug(PREFIX "v" INTEL_IDLE_VERSION
" model 0x%X\n", boot_cpu_data.x86_model);
@@ -394,6 +458,8 @@ static int intel_idle_cpuidle_devices_init(void)
return -EIO;
}
}
+ if (auto_demotion_disable_flags)
+ smp_call_function(auto_demotion_disable, NULL, 1);
return 0;
}
@@ -403,6 +469,10 @@ static int __init intel_idle_init(void)
{
int retval;
+ /* Do not load intel_idle at all for now if idle= is passed */
+ if (boot_option_idle_override != IDLE_NO_OVERRIDE)
+ return -ENODEV;
+
retval = intel_idle_probe();
if (retval)
return retval;
@@ -428,6 +498,11 @@ static void __exit intel_idle_exit(void)
intel_idle_cpuidle_devices_uninit();
cpuidle_unregister_driver(&intel_idle_driver);
+ if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
+ smp_call_function(__setup_broadcast_timer, (void *)false, 1);
+ unregister_cpu_notifier(&setup_broadcast_notifier);
+ }
+
return;
}
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 68883565b725..f9ba7d74dfc0 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -308,7 +308,7 @@ static void ib_cache_event(struct ib_event_handler *handler,
INIT_WORK(&work->work, ib_cache_task);
work->device = event->device;
work->port_num = event->element.port_num;
- schedule_work(&work->work);
+ queue_work(ib_wq, &work->work);
}
}
}
@@ -368,7 +368,7 @@ static void ib_cache_cleanup_one(struct ib_device *device)
int p;
ib_unregister_event_handler(&device->cache.event_handler);
- flush_scheduled_work();
+ flush_workqueue(ib_wq);
for (p = 0; p <= end_port(device) - start_port(device); ++p) {
kfree(device->cache.pkey_cache[p]);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index a19effad0811..f793bf2f5da7 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -38,7 +38,6 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
#include "core_priv.h"
@@ -52,6 +51,9 @@ struct ib_client_data {
void * data;
};
+struct workqueue_struct *ib_wq;
+EXPORT_SYMBOL_GPL(ib_wq);
+
static LIST_HEAD(device_list);
static LIST_HEAD(client_list);
@@ -718,6 +720,10 @@ static int __init ib_core_init(void)
{
int ret;
+ ib_wq = alloc_workqueue("infiniband", 0, 0);
+ if (!ib_wq)
+ return -ENOMEM;
+
ret = ib_sysfs_setup();
if (ret)
printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
@@ -726,6 +732,7 @@ static int __init ib_core_init(void)
if (ret) {
printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
ib_sysfs_cleanup();
+ destroy_workqueue(ib_wq);
}
return ret;
@@ -736,7 +743,7 @@ static void __exit ib_core_cleanup(void)
ib_cache_cleanup();
ib_sysfs_cleanup();
/* Make sure that any pending umem accounting work is done. */
- flush_scheduled_work();
+ destroy_workqueue(ib_wq);
}
module_init(ib_core_init);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 91a660310b7c..fbbfa24cf572 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -425,7 +425,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
port->sm_ah = NULL;
spin_unlock_irqrestore(&port->ah_lock, flags);
- schedule_work(&sa_dev->port[event->element.port_num -
+ queue_work(ib_wq, &sa_dev->port[event->element.port_num -
sa_dev->start_port].update_task);
}
}
@@ -1079,7 +1079,7 @@ static void ib_sa_remove_one(struct ib_device *device)
ib_unregister_event_handler(&sa_dev->event_handler);
- flush_scheduled_work();
+ flush_workqueue(ib_wq);
for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ca12acf38379..ec1e9da1488b 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -636,6 +636,16 @@ static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
}
}
+static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
+ struct rdma_route *route)
+{
+ struct rdma_dev_addr *dev_addr;
+
+ dev_addr = &route->addr.dev_addr;
+ rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
+ rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
+}
+
static ssize_t ucma_query_route(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len)
@@ -670,8 +680,10 @@ static ssize_t ucma_query_route(struct ucma_file *file,
resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
resp.port_num = ctx->cm_id->port_num;
- if (rdma_node_get_transport(ctx->cm_id->device->node_type) == RDMA_TRANSPORT_IB) {
- switch (rdma_port_get_link_layer(ctx->cm_id->device, ctx->cm_id->port_num)) {
+ switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
+ case RDMA_TRANSPORT_IB:
+ switch (rdma_port_get_link_layer(ctx->cm_id->device,
+ ctx->cm_id->port_num)) {
case IB_LINK_LAYER_INFINIBAND:
ucma_copy_ib_route(&resp, &ctx->cm_id->route);
break;
@@ -681,6 +693,12 @@ static ssize_t ucma_query_route(struct ucma_file *file,
default:
break;
}
+ break;
+ case RDMA_TRANSPORT_IWARP:
+ ucma_copy_iw_route(&resp, &ctx->cm_id->route);
+ break;
+ default:
+ break;
}
out:
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 415e186eee32..b645e558876f 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -262,7 +262,7 @@ void ib_umem_release(struct ib_umem *umem)
umem->mm = mm;
umem->diff = diff;
- schedule_work(&umem->work);
+ queue_work(ib_wq, &umem->work);
return;
}
} else
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 85cfae4cad71..8c81992fa6db 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -459,13 +459,12 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
IB_DEVICE_MEM_WINDOW);
/* Allocate the qptr_array */
- c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *));
+ c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *));
if (!c2dev->qptr_array) {
return -ENOMEM;
}
- /* Inialize the qptr_array */
- memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *));
+ /* Initialize the qptr_array */
c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
c2dev->qptr_array[2] = (void *) &c2dev->aeq;
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c
index 9ce7819b7b2e..2ec716fb2edb 100644
--- a/drivers/infiniband/hw/amso1100/c2_vq.c
+++ b/drivers/infiniband/hw/amso1100/c2_vq.c
@@ -107,7 +107,7 @@ struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL);
if (r) {
init_waitqueue_head(&r->wait_object);
- r->reply_msg = (u64) NULL;
+ r->reply_msg = 0;
r->event = 0;
r->cm_id = NULL;
r->qp = NULL;
@@ -123,7 +123,7 @@ struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
*/
void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r)
{
- r->reply_msg = (u64) NULL;
+ r->reply_msg = 0;
if (atomic_dec_and_test(&r->refcnt)) {
kfree(r);
}
@@ -151,7 +151,7 @@ void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r)
void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
{
if (atomic_dec_and_test(&r->refcnt)) {
- if (r->reply_msg != (u64) NULL)
+ if (r->reply_msg != 0)
vq_repbuf_free(c2dev,
(void *) (unsigned long) r->reply_msg);
kfree(r);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 09dda0b8740e..c3f5aca4ef00 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -189,6 +189,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
}
+#ifdef notyet
int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
{
struct rdma_cq_setup setup;
@@ -200,6 +201,7 @@ int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
setup.ovfl_mode = 1;
return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
}
+#endif
static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
{
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 4bb997aa39d0..83d2e19d31ae 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -689,7 +689,7 @@ struct t3_swrq {
* A T3 WQ implements both the SQ and RQ.
*/
struct t3_wq {
- union t3_wr *queue; /* DMA accessable memory */
+ union t3_wr *queue; /* DMA accessible memory */
dma_addr_t dma_addr; /* DMA address for HW */
DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
u32 error; /* 1 once we go to ERROR */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index a237d49bdcc9..c5406da3f4cd 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -335,8 +335,6 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
int iwch_post_zb_read(struct iwch_qp *qhp);
int iwch_register_device(struct iwch_dev *dev);
void iwch_unregister_device(struct iwch_dev *dev);
-int iwch_quiesce_qps(struct iwch_cq *chp);
-int iwch_resume_qps(struct iwch_cq *chp);
void stop_read_rep_timer(struct iwch_qp *qhp);
int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
struct iwch_mr *mhp, int shift);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 0993137181d7..1b4cd09f74dc 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -1149,59 +1149,3 @@ out:
PDBG("%s exit state %d\n", __func__, qhp->attr.state);
return ret;
}
-
-static int quiesce_qp(struct iwch_qp *qhp)
-{
- spin_lock_irq(&qhp->lock);
- iwch_quiesce_tid(qhp->ep);
- qhp->flags |= QP_QUIESCED;
- spin_unlock_irq(&qhp->lock);
- return 0;
-}
-
-static int resume_qp(struct iwch_qp *qhp)
-{
- spin_lock_irq(&qhp->lock);
- iwch_resume_tid(qhp->ep);
- qhp->flags &= ~QP_QUIESCED;
- spin_unlock_irq(&qhp->lock);
- return 0;
-}
-
-int iwch_quiesce_qps(struct iwch_cq *chp)
-{
- int i;
- struct iwch_qp *qhp;
-
- for (i=0; i < T3_MAX_NUM_QP; i++) {
- qhp = get_qhp(chp->rhp, i);
- if (!qhp)
- continue;
- if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) {
- quiesce_qp(qhp);
- continue;
- }
- if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp))
- quiesce_qp(qhp);
- }
- return 0;
-}
-
-int iwch_resume_qps(struct iwch_cq *chp)
-{
- int i;
- struct iwch_qp *qhp;
-
- for (i=0; i < T3_MAX_NUM_QP; i++) {
- qhp = get_qhp(chp->rhp, i);
- if (!qhp)
- continue;
- if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) {
- resume_qp(qhp);
- continue;
- }
- if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp))
- resume_qp(qhp);
- }
- return 0;
-}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 0dc62b1438be..8b00e6c46f01 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -380,7 +380,7 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
16)) | FW_WR_FLOWID(ep->hwtid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
- flowc->mnemval[0].val = cpu_to_be32(0);
+ flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 16032cdb4337..2fe19ec9ba60 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -46,7 +46,6 @@
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/kfifo.h>
-#include <linux/mutex.h>
#include <asm/byteorder.h>
@@ -760,7 +759,6 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
-int c4iw_post_zb_read(struct c4iw_qp *qhp);
int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 057cb2505ea1..4f0be25cab1a 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -220,7 +220,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
V_FW_RI_RES_WR_DCAEN(0) |
V_FW_RI_RES_WR_DCACPU(0) |
V_FW_RI_RES_WR_FBMIN(2) |
- V_FW_RI_RES_WR_FBMAX(3) |
+ V_FW_RI_RES_WR_FBMAX(2) |
V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
V_FW_RI_RES_WR_CIDXFTHRESH(0) |
V_FW_RI_RES_WR_EQSIZE(eqsize));
@@ -243,7 +243,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
V_FW_RI_RES_WR_DCAEN(0) |
V_FW_RI_RES_WR_DCACPU(0) |
V_FW_RI_RES_WR_FBMIN(2) |
- V_FW_RI_RES_WR_FBMAX(3) |
+ V_FW_RI_RES_WR_FBMAX(2) |
V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
V_FW_RI_RES_WR_CIDXFTHRESH(0) |
V_FW_RI_RES_WR_EQSIZE(eqsize));
@@ -892,36 +892,6 @@ static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
}
}
-int c4iw_post_zb_read(struct c4iw_qp *qhp)
-{
- union t4_wr *wqe;
- struct sk_buff *skb;
- u8 len16;
-
- PDBG("%s enter\n", __func__);
- skb = alloc_skb(40, GFP_KERNEL);
- if (!skb) {
- printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
- return -ENOMEM;
- }
- set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
-
- wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
- memset(wqe, 0, sizeof wqe->read);
- wqe->read.r2 = cpu_to_be64(0);
- wqe->read.stag_sink = cpu_to_be32(1);
- wqe->read.to_sink_hi = cpu_to_be32(0);
- wqe->read.to_sink_lo = cpu_to_be32(1);
- wqe->read.stag_src = cpu_to_be32(1);
- wqe->read.plen = cpu_to_be32(0);
- wqe->read.to_src_hi = cpu_to_be32(0);
- wqe->read.to_src_lo = cpu_to_be32(1);
- len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
- init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
-
- return c4iw_ofld_send(&qhp->rhp->rdev, skb);
-}
-
static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
gfp_t gfp)
{
@@ -1029,7 +999,6 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
wqe->cookie = (unsigned long) &ep->com.wr_wait;
wqe->u.fini.type = FW_RI_TYPE_FINI;
- c4iw_init_wr_wait(&ep->com.wr_wait);
ret = c4iw_ofld_send(&rhp->rdev, skb);
if (ret)
goto out;
@@ -1125,7 +1094,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
if (qhp->attr.mpa_attr.initiator)
build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
- c4iw_init_wr_wait(&qhp->ep->com.wr_wait);
ret = c4iw_ofld_send(&rhp->rdev, skb);
if (ret)
goto out;
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
index 1596e3085344..1898d6e7cce5 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -222,15 +222,14 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
queue->small_page = NULL;
/* allocate queue page pointers */
- queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
+ queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
if (!queue->queue_pages) {
- queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
+ queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
if (!queue->queue_pages) {
ehca_gen_err("Couldn't allocate queue page list");
return 0;
}
}
- memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
/* allocate actual queue pages */
if (is_small) {
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 765f0fc1da76..47db4bf34628 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -199,12 +199,11 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
goto bail;
}
- dd = vmalloc(sizeof(*dd));
+ dd = vzalloc(sizeof(*dd));
if (!dd) {
dd = ERR_PTR(-ENOMEM);
goto bail;
}
- memset(dd, 0, sizeof(*dd));
dd->ipath_unit = -1;
spin_lock_irqsave(&ipath_devs_lock, flags);
@@ -530,9 +529,8 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
for (j = 0; j < 6; j++) {
if (!pdev->resource[j].start)
continue;
- ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n",
- j, (unsigned long long)pdev->resource[j].start,
- (unsigned long long)pdev->resource[j].end,
+ ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
+ j, &pdev->resource[j],
(unsigned long long)pci_resource_len(pdev, j));
}
@@ -757,7 +755,7 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
*/
ipath_shutdown_device(dd);
- flush_scheduled_work();
+ flush_workqueue(ib_wq);
if (dd->verbs_dev)
ipath_unregister_ib_device(dd->verbs_dev);
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 9292a15ad7c4..6d4b29c4cd89 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1530,7 +1530,7 @@ static int init_subports(struct ipath_devdata *dd,
}
num_subports = uinfo->spu_subport_cnt;
- pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports);
+ pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports);
if (!pd->subport_uregbase) {
ret = -ENOMEM;
goto bail;
@@ -1538,13 +1538,13 @@ static int init_subports(struct ipath_devdata *dd,
/* Note: pd->port_rcvhdrq_size isn't initialized yet. */
size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
sizeof(u32), PAGE_SIZE) * num_subports;
- pd->subport_rcvhdr_base = vmalloc(size);
+ pd->subport_rcvhdr_base = vzalloc(size);
if (!pd->subport_rcvhdr_base) {
ret = -ENOMEM;
goto bail_ureg;
}
- pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks *
+ pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks *
pd->port_rcvegrbuf_size *
num_subports);
if (!pd->subport_rcvegrbuf) {
@@ -1556,11 +1556,6 @@ static int init_subports(struct ipath_devdata *dd,
pd->port_subport_id = uinfo->spu_subport_id;
pd->active_slaves = 1;
set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
- memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports);
- memset(pd->subport_rcvhdr_base, 0, size);
- memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks *
- pd->port_rcvegrbuf_size *
- num_subports);
goto bail;
bail_rhdr:
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 776938299e4c..fef0f4201257 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -442,7 +442,7 @@ static void init_shadow_tids(struct ipath_devdata *dd)
struct page **pages;
dma_addr_t *addrs;
- pages = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
+ pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
sizeof(struct page *));
if (!pages) {
ipath_dev_err(dd, "failed to allocate shadow page * "
@@ -461,9 +461,6 @@ static void init_shadow_tids(struct ipath_devdata *dd)
return;
}
- memset(pages, 0, dd->ipath_cfgports * dd->ipath_rcvtidcnt *
- sizeof(struct page *));
-
dd->ipath_pageshadow = pages;
dd->ipath_physshadow = addrs;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 5e86d73eba2a..bab9f74c0665 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -220,7 +220,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
work->mm = mm;
work->num_pages = num_pages;
- schedule_work(&work->work);
+ queue_work(ib_wq, &work->work);
return;
bail_mm:
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5a219a2fdf16..e8df155bc3b0 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -397,10 +397,14 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
cq->resize_buf = NULL;
cq->resize_umem = NULL;
} else {
+ struct mlx4_ib_cq_buf tmp_buf;
+ int tmp_cqe = 0;
+
spin_lock_irq(&cq->lock);
if (cq->resize_buf) {
mlx4_ib_cq_resize_copy_cqes(cq);
- mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
+ tmp_buf = cq->buf;
+ tmp_cqe = cq->ibcq.cqe;
cq->buf = cq->resize_buf->buf;
cq->ibcq.cqe = cq->resize_buf->cqe;
@@ -408,6 +412,9 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
cq->resize_buf = NULL;
}
spin_unlock_irq(&cq->lock);
+
+ if (tmp_cqe)
+ mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
}
goto out;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c9a8dd63b9e2..57ffa50f509e 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -211,6 +211,8 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
if (agent) {
send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
IB_MGMT_MAD_DATA, GFP_ATOMIC);
+ if (IS_ERR(send_buf))
+ return;
/*
* We rely here on the fact that MLX QPs don't use the
* address handle after the send is posted (this is
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 4c85224aeaa7..c7a6213c6996 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -623,8 +623,9 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
- err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags &
- MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK));
+ err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
+ !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
+ MLX4_PROTOCOL_IB);
if (err)
return err;
@@ -635,7 +636,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
return 0;
err_add:
- mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw);
+ mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB);
return err;
}
@@ -665,7 +666,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
struct mlx4_ib_gid_entry *ge;
err = mlx4_multicast_detach(mdev->dev,
- &mqp->mqp, gid->raw);
+ &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB);
if (err)
return err;
@@ -1005,7 +1006,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
goto err_pd;
- ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+ ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
+ PAGE_SIZE);
if (!ibdev->uar_map)
goto err_uar;
MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
diff --git a/drivers/infiniband/hw/mthca/Kconfig b/drivers/infiniband/hw/mthca/Kconfig
index 03efc074967e..da314c3fec23 100644
--- a/drivers/infiniband/hw/mthca/Kconfig
+++ b/drivers/infiniband/hw/mthca/Kconfig
@@ -7,7 +7,7 @@ config INFINIBAND_MTHCA
("Tavor") and the MT25208 PCI Express HCA ("Arbel").
config INFINIBAND_MTHCA_DEBUG
- bool "Verbose debugging output" if EMBEDDED
+ bool "Verbose debugging output" if EXPERT
depends on INFINIBAND_MTHCA
default y
---help---
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 0aa0110e4b6c..e4a08c2819e4 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -146,7 +146,7 @@ static void poll_catas(unsigned long dev_ptr)
void mthca_start_catas_poll(struct mthca_dev *dev)
{
- unsigned long addr;
+ phys_addr_t addr;
init_timer(&dev->catas_err.timer);
dev->catas_err.map = NULL;
@@ -158,7 +158,8 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4);
if (!dev->catas_err.map) {
mthca_warn(dev, "couldn't map catastrophic error region "
- "at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4);
+ "at 0x%llx/0x%x\n", (unsigned long long) addr,
+ dev->catas_err.size * 4);
return;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index f4ceecd9684b..7bfa2a164955 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -713,7 +713,7 @@ int mthca_RUN_FW(struct mthca_dev *dev, u8 *status)
static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
{
- unsigned long addr;
+ phys_addr_t addr;
u16 max_off = 0;
int i;
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 8e8c728aff88..76785c653c13 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -653,7 +653,7 @@ static int mthca_map_reg(struct mthca_dev *dev,
unsigned long offset, unsigned long size,
void __iomem **map)
{
- unsigned long base = pci_resource_start(dev->pdev, 0);
+ phys_addr_t base = pci_resource_start(dev->pdev, 0);
*map = ioremap(base + offset, size);
if (!*map)
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 5648659ff0b0..03a59534f59e 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -171,6 +171,8 @@ static void forward_trap(struct mthca_dev *dev,
if (agent) {
send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
IB_MGMT_MAD_DATA, GFP_ATOMIC);
+ if (IS_ERR(send_buf))
+ return;
/*
* We rely here on the fact that MLX QPs don't use the
* address handle after the send is posted (this is
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 5eee6665919a..8a40cd539ab1 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -790,7 +790,7 @@ static int mthca_setup_hca(struct mthca_dev *dev)
goto err_uar_table_free;
}
- dev->kar = ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+ dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
if (!dev->kar) {
mthca_err(dev, "Couldn't map kernel access region, "
"aborting.\n");
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 065b20899876..44045c8846db 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -853,7 +853,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
int mthca_init_mr_table(struct mthca_dev *dev)
{
- unsigned long addr;
+ phys_addr_t addr;
int mpts, mtts, err, i;
err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 0c9f0aa5d4ea..3b4ec3238ceb 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -144,6 +144,7 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
struct nes_device *nesdev;
struct net_device *netdev;
struct nes_vnic *nesvnic;
+ unsigned int is_bonded;
nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %pI4, netmask %pI4.\n",
&ifa->ifa_address, &ifa->ifa_mask);
@@ -152,7 +153,8 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
nesdev, nesdev->netdev[0]->name);
netdev = nesdev->netdev[0];
nesvnic = netdev_priv(netdev);
- if (netdev == event_netdev) {
+ is_bonded = (netdev->master == event_netdev);
+ if ((netdev == event_netdev) || is_bonded) {
if (nesvnic->rdma_enabled == 0) {
nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
" RDMA is not enabled.\n",
@@ -169,7 +171,10 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
nes_manage_arp_cache(netdev, netdev->dev_addr,
ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE);
nesvnic->local_ipaddr = 0;
- return NOTIFY_OK;
+ if (is_bonded)
+ continue;
+ else
+ return NOTIFY_OK;
break;
case NETDEV_UP:
nes_debug(NES_DBG_NETDEV, "event:UP\n");
@@ -178,15 +183,24 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n");
return NOTIFY_OK;
}
+ /* fall through */
+ case NETDEV_CHANGEADDR:
/* Add the address to the IP table */
- nesvnic->local_ipaddr = ifa->ifa_address;
+ if (netdev->master)
+ nesvnic->local_ipaddr =
+ ((struct in_device *)netdev->master->ip_ptr)->ifa_list->ifa_address;
+ else
+ nesvnic->local_ipaddr = ifa->ifa_address;
nes_write_indexed(nesdev,
NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)),
- ntohl(ifa->ifa_address));
+ ntohl(nesvnic->local_ipaddr));
nes_manage_arp_cache(netdev, netdev->dev_addr,
ntohl(nesvnic->local_ipaddr), NES_ARP_ADD);
- return NOTIFY_OK;
+ if (is_bonded)
+ continue;
+ else
+ return NOTIFY_OK;
break;
default:
break;
@@ -660,6 +674,8 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
}
nes_notifiers_registered++;
+ INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
+
/* Initialize network devices */
if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL)
goto bail7;
@@ -742,6 +758,7 @@ static void __devexit nes_remove(struct pci_dev *pcidev)
struct nes_device *nesdev = pci_get_drvdata(pcidev);
struct net_device *netdev;
int netdev_index = 0;
+ unsigned long flags;
if (nesdev->netdev_count) {
netdev = nesdev->netdev[netdev_index];
@@ -768,6 +785,14 @@ static void __devexit nes_remove(struct pci_dev *pcidev)
free_irq(pcidev->irq, nesdev);
tasklet_kill(&nesdev->dpc_tasklet);
+ spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
+ if (nesdev->link_recheck) {
+ spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
+ cancel_delayed_work_sync(&nesdev->work);
+ } else {
+ spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
+ }
+
/* Deallocate the Adapter Structure */
nes_destroy_adapter(nesdev->nesadapter);
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index b3d145e82b4c..6fe79876009e 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -268,6 +268,9 @@ struct nes_device {
u8 napi_isr_ran;
u8 disable_rx_flow_control;
u8 disable_tx_flow_control;
+
+ struct delayed_work work;
+ u8 link_recheck;
};
@@ -507,6 +510,7 @@ void nes_nic_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
int nes_destroy_cqp(struct nes_device *);
int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
+void nes_recheck_link_status(struct work_struct *work);
/* nes_nic.c */
struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 25ad0f9944c0..009ec814d517 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1107,6 +1107,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
struct flowi fl;
struct neighbour *neigh;
int rc = arpindex;
+ struct net_device *netdev;
struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
memset(&fl, 0, sizeof fl);
@@ -1117,7 +1118,12 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
return rc;
}
- neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, nesvnic->netdev);
+ if (nesvnic->netdev->master)
+ netdev = nesvnic->netdev->master;
+ else
+ netdev = nesvnic->netdev;
+
+ neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
if (neigh) {
if (neigh->nud_state & NUD_VALID) {
nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 1980a461c499..08c194861af5 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2608,6 +2608,15 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
netif_start_queue(nesvnic->netdev);
nesvnic->linkup = 1;
netif_carrier_on(nesvnic->netdev);
+
+ spin_lock(&nesvnic->port_ibevent_lock);
+ if (nesvnic->of_device_registered) {
+ if (nesdev->iw_status == 0) {
+ nesdev->iw_status = 1;
+ nes_port_ibevent(nesvnic);
+ }
+ }
+ spin_unlock(&nesvnic->port_ibevent_lock);
}
}
} else {
@@ -2633,9 +2642,25 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
netif_stop_queue(nesvnic->netdev);
nesvnic->linkup = 0;
netif_carrier_off(nesvnic->netdev);
+
+ spin_lock(&nesvnic->port_ibevent_lock);
+ if (nesvnic->of_device_registered) {
+ if (nesdev->iw_status == 1) {
+ nesdev->iw_status = 0;
+ nes_port_ibevent(nesvnic);
+ }
+ }
+ spin_unlock(&nesvnic->port_ibevent_lock);
}
}
}
+ if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) {
+ if (nesdev->link_recheck)
+ cancel_delayed_work(&nesdev->work);
+ nesdev->link_recheck = 1;
+ schedule_delayed_work(&nesdev->work,
+ NES_LINK_RECHECK_DELAY);
+ }
}
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
@@ -2643,6 +2668,84 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE;
}
+void nes_recheck_link_status(struct work_struct *work)
+{
+ unsigned long flags;
+ struct nes_device *nesdev = container_of(work, struct nes_device, work.work);
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_vnic *nesvnic;
+ u32 mac_index = nesdev->mac_index;
+ u16 phy_data;
+ u16 temp_phy_data;
+
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+
+ /* check link status */
+ nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003);
+ temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+
+ nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021);
+ nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021);
+ phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+
+ phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0;
+
+ nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
+ __func__, phy_data,
+ nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP");
+
+ if (phy_data & 0x0004) {
+ nesadapter->mac_link_down[mac_index] = 0;
+ list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
+ if (nesvnic->linkup == 0) {
+ printk(PFX "The Link is now up for port %s, netdev %p.\n",
+ nesvnic->netdev->name, nesvnic->netdev);
+ if (netif_queue_stopped(nesvnic->netdev))
+ netif_start_queue(nesvnic->netdev);
+ nesvnic->linkup = 1;
+ netif_carrier_on(nesvnic->netdev);
+
+ spin_lock(&nesvnic->port_ibevent_lock);
+ if (nesvnic->of_device_registered) {
+ if (nesdev->iw_status == 0) {
+ nesdev->iw_status = 1;
+ nes_port_ibevent(nesvnic);
+ }
+ }
+ spin_unlock(&nesvnic->port_ibevent_lock);
+ }
+ }
+
+ } else {
+ nesadapter->mac_link_down[mac_index] = 1;
+ list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
+ if (nesvnic->linkup == 1) {
+ printk(PFX "The Link is now down for port %s, netdev %p.\n",
+ nesvnic->netdev->name, nesvnic->netdev);
+ if (!(netif_queue_stopped(nesvnic->netdev)))
+ netif_stop_queue(nesvnic->netdev);
+ nesvnic->linkup = 0;
+ netif_carrier_off(nesvnic->netdev);
+
+ spin_lock(&nesvnic->port_ibevent_lock);
+ if (nesvnic->of_device_registered) {
+ if (nesdev->iw_status == 1) {
+ nesdev->iw_status = 0;
+ nes_port_ibevent(nesvnic);
+ }
+ }
+ spin_unlock(&nesvnic->port_ibevent_lock);
+ }
+ }
+ }
+ if (nesdev->link_recheck++ < NES_LINK_RECHECK_MAX)
+ schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY);
+ else
+ nesdev->link_recheck = 0;
+
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+}
static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 1204c3432b63..d2abe07133a5 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -1193,6 +1193,8 @@ struct nes_listener {
struct nes_ib_device;
+#define NES_EVENT_DELAY msecs_to_jiffies(100)
+
struct nes_vnic {
struct nes_ib_device *nesibdev;
u64 sq_full;
@@ -1247,6 +1249,10 @@ struct nes_vnic {
u32 lro_max_aggr;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS];
+ struct timer_list event_timer;
+ enum ib_event_type delayed_event;
+ enum ib_event_type last_dispatched_event;
+ spinlock_t port_ibevent_lock;
};
struct nes_ib_device {
@@ -1348,6 +1354,10 @@ struct nes_terminate_hdr {
#define BAD_FRAME_OFFSET 64
#define CQE_MAJOR_DRV 0x8000
+/* Used for link status recheck after interrupt processing */
+#define NES_LINK_RECHECK_DELAY msecs_to_jiffies(50)
+#define NES_LINK_RECHECK_MAX 60
+
#define nes_vlan_rx vlan_hwaccel_receive_skb
#define nes_netif_rx netif_receive_skb
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 3892e2c0e95a..2c9c1933bbe3 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -144,6 +144,7 @@ static int nes_netdev_open(struct net_device *netdev)
u32 nic_active_bit;
u32 nic_active;
struct list_head *list_pos, *list_temp;
+ unsigned long flags;
assert(nesdev != NULL);
@@ -233,18 +234,36 @@ static int nes_netdev_open(struct net_device *netdev)
first_nesvnic = nesvnic;
}
- if (nesvnic->of_device_registered) {
- nesdev->iw_status = 1;
- nesdev->nesadapter->send_term_ok = 1;
- nes_port_ibevent(nesvnic);
- }
-
if (first_nesvnic->linkup) {
/* Enable network packets */
nesvnic->linkup = 1;
netif_start_queue(netdev);
netif_carrier_on(netdev);
}
+
+ spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
+ if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) {
+ if (nesdev->link_recheck)
+ cancel_delayed_work(&nesdev->work);
+ nesdev->link_recheck = 1;
+ schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY);
+ }
+ spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
+
+ spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags);
+ if (nesvnic->of_device_registered) {
+ nesdev->nesadapter->send_term_ok = 1;
+ if (nesvnic->linkup == 1) {
+ if (nesdev->iw_status == 0) {
+ nesdev->iw_status = 1;
+ nes_port_ibevent(nesvnic);
+ }
+ } else {
+ nesdev->iw_status = 0;
+ }
+ }
+ spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags);
+
napi_enable(&nesvnic->napi);
nesvnic->netdev_open = 1;
@@ -263,6 +282,7 @@ static int nes_netdev_stop(struct net_device *netdev)
u32 nic_active;
struct nes_vnic *first_nesvnic = NULL;
struct list_head *list_pos, *list_temp;
+ unsigned long flags;
nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n",
nesvnic, nesdev, netdev, netdev->name);
@@ -315,12 +335,17 @@ static int nes_netdev_stop(struct net_device *netdev)
nic_active &= nic_active_mask;
nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
-
+ spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags);
if (nesvnic->of_device_registered) {
nesdev->nesadapter->send_term_ok = 0;
nesdev->iw_status = 0;
- nes_port_ibevent(nesvnic);
+ if (nesvnic->linkup == 1)
+ nes_port_ibevent(nesvnic);
}
+ del_timer_sync(&nesvnic->event_timer);
+ nesvnic->event_timer.function = NULL;
+ spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags);
+
nes_destroy_nic_qp(nesvnic);
nesvnic->netdev_open = 0;
@@ -908,8 +933,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
nesvnic->nic_index &&
mc_index < max_pft_entries_avaiable) {
nes_debug(NES_DBG_NIC_RX,
- "mc_index=%d skipping nic_index=%d,\
- used for=%d \n", mc_index,
+ "mc_index=%d skipping nic_index=%d, "
+ "used for=%d \n", mc_index,
nesvnic->nic_index,
nesadapter->pft_mcast_map[mc_index]);
mc_index++;
@@ -1750,7 +1775,10 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
nesvnic->rdma_enabled = 0;
}
nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id;
+ init_timer(&nesvnic->event_timer);
+ nesvnic->event_timer.function = NULL;
spin_lock_init(&nesvnic->tx_lock);
+ spin_lock_init(&nesvnic->port_ibevent_lock);
nesdev->netdev[nesdev->netdev_count] = netdev;
nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n",
@@ -1763,8 +1791,11 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
(((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) ||
((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) {
u32 u32temp;
- u32 link_mask;
- u32 link_val;
+ u32 link_mask = 0;
+ u32 link_val = 0;
+ u16 temp_phy_data;
+ u16 phy_data = 0;
+ unsigned long flags;
u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200 * (nesdev->mac_index & 1)));
@@ -1786,6 +1817,23 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
link_val = 0x02020000;
}
break;
+ case NES_PHY_TYPE_SFP_D:
+ spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
+ nes_read_10G_phy_reg(nesdev,
+ nesdev->nesadapter->phy_index[nesdev->mac_index],
+ 1, 0x9003);
+ temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ nes_read_10G_phy_reg(nesdev,
+ nesdev->nesadapter->phy_index[nesdev->mac_index],
+ 3, 0x0021);
+ nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ nes_read_10G_phy_reg(nesdev,
+ nesdev->nesadapter->phy_index[nesdev->mac_index],
+ 3, 0x0021);
+ phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
+ phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0;
+ break;
default:
link_mask = 0x0f1f0000;
link_val = 0x0f0f0000;
@@ -1795,8 +1843,14 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
u32temp = nes_read_indexed(nesdev,
NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200 * (nesdev->mac_index & 1)));
- if ((u32temp & link_mask) == link_val)
- nesvnic->linkup = 1;
+
+ if (phy_type == NES_PHY_TYPE_SFP_D) {
+ if (phy_data & 0x0004)
+ nesvnic->linkup = 1;
+ } else {
+ if ((u32temp & link_mask) == link_val)
+ nesvnic->linkup = 1;
+ }
/* clear the MAC interrupt status, assumes direct logical to physical mapping */
u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 99933e4e48ff..26d8018c0a7c 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -3936,6 +3936,30 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
return nesibdev;
}
+
+/**
+ * nes_handle_delayed_event
+ */
+static void nes_handle_delayed_event(unsigned long data)
+{
+ struct nes_vnic *nesvnic = (void *) data;
+
+ if (nesvnic->delayed_event != nesvnic->last_dispatched_event) {
+ struct ib_event event;
+
+ event.device = &nesvnic->nesibdev->ibdev;
+ if (!event.device)
+ goto stop_timer;
+ event.event = nesvnic->delayed_event;
+ event.element.port_num = nesvnic->logical_port + 1;
+ ib_dispatch_event(&event);
+ }
+
+stop_timer:
+ nesvnic->event_timer.function = NULL;
+}
+
+
void nes_port_ibevent(struct nes_vnic *nesvnic)
{
struct nes_ib_device *nesibdev = nesvnic->nesibdev;
@@ -3944,7 +3968,18 @@ void nes_port_ibevent(struct nes_vnic *nesvnic)
event.device = &nesibdev->ibdev;
event.element.port_num = nesvnic->logical_port + 1;
event.event = nesdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
- ib_dispatch_event(&event);
+
+ if (!nesvnic->event_timer.function) {
+ ib_dispatch_event(&event);
+ nesvnic->last_dispatched_event = event.event;
+ nesvnic->event_timer.function = nes_handle_delayed_event;
+ nesvnic->event_timer.data = (unsigned long) nesvnic;
+ nesvnic->event_timer.expires = jiffies + NES_EVENT_DELAY;
+ add_timer(&nesvnic->event_timer);
+ } else {
+ mod_timer(&nesvnic->event_timer, jiffies + NES_EVENT_DELAY);
+ }
+ nesvnic->delayed_event = event.event;
}
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 64c9e7d02d4a..73225eee3cc6 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -766,7 +766,7 @@ struct qib_devdata {
void (*f_sdma_hw_start_up)(struct qib_pportdata *);
void (*f_sdma_init_early)(struct qib_pportdata *);
void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
- void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32);
+ void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
u32 (*f_hdrqempty)(struct qib_ctxtdata *);
u64 (*f_portcntr)(struct qib_pportdata *, u32);
u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
index a86cbf880f98..5246aa486bbe 100644
--- a/drivers/infiniband/hw/qib/qib_cq.c
+++ b/drivers/infiniband/hw/qib/qib_cq.c
@@ -100,7 +100,8 @@ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
wc->head = next;
if (cq->notify == IB_CQ_NEXT_COMP ||
- (cq->notify == IB_CQ_SOLICITED && solicited)) {
+ (cq->notify == IB_CQ_SOLICITED &&
+ (solicited || entry->status != IB_WC_SUCCESS))) {
cq->notify = IB_CQ_NONE;
cq->triggered++;
/*
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 9cd193603fb1..23e584f4c36c 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -71,6 +71,11 @@ MODULE_DESCRIPTION("QLogic IB driver");
*/
#define QIB_PIO_MAXIBHDR 128
+/*
+ * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
+ */
+#define QIB_MAX_PKT_RECV 64
+
struct qlogic_ib_stats qib_stats;
const char *qib_get_unit_name(int unit)
@@ -284,14 +289,147 @@ static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
* Returns 1 if error was a CRC, else 0.
* Needed for some chip's synthesized error counters.
*/
-static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt,
- u32 eflags, u32 l, u32 etail, __le32 *rhf_addr,
- struct qib_message_header *hdr)
+static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
+ u32 ctxt, u32 eflags, u32 l, u32 etail,
+ __le32 *rhf_addr, struct qib_message_header *rhdr)
{
u32 ret = 0;
if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
ret = 1;
+ else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
+ /* For TIDERR and RC QPs premptively schedule a NAK */
+ struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
+ struct qib_other_headers *ohdr = NULL;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ struct qib_qp *qp = NULL;
+ u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
+ u16 lid = be16_to_cpu(hdr->lrh[1]);
+ int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+ u32 qp_num;
+ u32 opcode;
+ u32 psn;
+ int diff;
+ unsigned long flags;
+
+ /* Sanity check packet */
+ if (tlen < 24)
+ goto drop;
+
+ if (lid < QIB_MULTICAST_LID_BASE) {
+ lid &= ~((1 << ppd->lmc) - 1);
+ if (unlikely(lid != ppd->lid))
+ goto drop;
+ }
+
+ /* Check for GRH */
+ if (lnh == QIB_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else if (lnh == QIB_LRH_GRH) {
+ u32 vtf;
+
+ ohdr = &hdr->u.l.oth;
+ if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
+ goto drop;
+ vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
+ if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+ goto drop;
+ } else
+ goto drop;
+
+ /* Get opcode and PSN from packet */
+ opcode = be32_to_cpu(ohdr->bth[0]);
+ opcode >>= 24;
+ psn = be32_to_cpu(ohdr->bth[2]);
+
+ /* Get the destination QP number. */
+ qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
+ if (qp_num != QIB_MULTICAST_QPN) {
+ int ruc_res;
+ qp = qib_lookup_qpn(ibp, qp_num);
+ if (!qp)
+ goto drop;
+
+ /*
+ * Handle only RC QPs - for other QP types drop error
+ * packet.
+ */
+ spin_lock(&qp->r_lock);
+
+ /* Check for valid receive state. */
+ if (!(ib_qib_state_ops[qp->state] &
+ QIB_PROCESS_RECV_OK)) {
+ ibp->n_pkt_drops++;
+ goto unlock;
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ spin_lock_irqsave(&qp->s_lock, flags);
+ ruc_res =
+ qib_ruc_check_hdr(
+ ibp, hdr,
+ lnh == QIB_LRH_GRH,
+ qp,
+ be32_to_cpu(ohdr->bth[0]));
+ if (ruc_res) {
+ spin_unlock_irqrestore(&qp->s_lock,
+ flags);
+ goto unlock;
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ /* Only deal with RDMA Writes for now */
+ if (opcode <
+ IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
+ diff = qib_cmp24(psn, qp->r_psn);
+ if (!qp->r_nak_state && diff >= 0) {
+ ibp->n_rc_seqnak++;
+ qp->r_nak_state =
+ IB_NAK_PSN_ERROR;
+ /* Use the expected PSN. */
+ qp->r_ack_psn = qp->r_psn;
+ /*
+ * Wait to send the sequence
+ * NAK until all packets
+ * in the receive queue have
+ * been processed.
+ * Otherwise, we end up
+ * propagating congestion.
+ */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |=
+ QIB_R_RSP_NAK;
+ atomic_inc(
+ &qp->refcount);
+ list_add_tail(
+ &qp->rspwait,
+ &rcd->qp_wait_list);
+ }
+ } /* Out of sequence NAK */
+ } /* QP Request NAKs */
+ break;
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ case IB_QPT_UC:
+ default:
+ /* For now don't handle any other QP types */
+ break;
+ }
+
+unlock:
+ spin_unlock(&qp->r_lock);
+ /*
+ * Notify qib_destroy_qp() if it is waiting
+ * for us to finish.
+ */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ } /* Unicast QP */
+ } /* Valid packet with TIDErr */
+
+drop:
return ret;
}
@@ -335,7 +473,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
}
- for (last = 0, i = 1; !last && i <= 64; i += !last) {
+ for (last = 0, i = 1; !last; i += !last) {
hdr = dd->f_get_msgheader(dd, rhf_addr);
eflags = qib_hdrget_err_flags(rhf_addr);
etype = qib_hdrget_rcv_type(rhf_addr);
@@ -371,7 +509,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
* packets; only qibhdrerr should be set.
*/
if (unlikely(eflags))
- crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l,
+ crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
etail, rhf_addr, hdr);
else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
qib_ib_rcv(rcd, hdr, ebuf, tlen);
@@ -384,6 +522,9 @@ move_along:
l += rsize;
if (l >= maxcnt)
l = 0;
+ if (i == QIB_MAX_PKT_RECV)
+ last = 1;
+
rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
if (dd->flags & QIB_NODMA_RTAIL) {
u32 seq = qib_hdrget_seq(rhf_addr);
@@ -402,7 +543,7 @@ move_along:
*/
lval = l;
if (!last && !(i & 0xf)) {
- dd->f_update_usrhead(rcd, lval, updegr, etail);
+ dd->f_update_usrhead(rcd, lval, updegr, etail, i);
updegr = 0;
}
}
@@ -444,7 +585,7 @@ bail:
* if no packets were processed.
*/
lval = (u64)rcd->head | dd->rhdrhead_intr_off;
- dd->f_update_usrhead(rcd, lval, updegr, etail);
+ dd->f_update_usrhead(rcd, lval, updegr, etail, i);
return crcs;
}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 79d9971aff1f..75bfad16c114 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1379,17 +1379,17 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
/* find device (with ACTIVE ports) with fewest ctxts in use */
for (ndev = 0; ndev < devmax; ndev++) {
struct qib_devdata *dd = qib_lookup(ndev);
- unsigned cused = 0, cfree = 0;
+ unsigned cused = 0, cfree = 0, pusable = 0;
if (!dd)
continue;
if (port && port <= dd->num_pports &&
usable(dd->pport + port - 1))
- dusable = 1;
+ pusable = 1;
else
for (i = 0; i < dd->num_pports; i++)
if (usable(dd->pport + i))
- dusable++;
- if (!dusable)
+ pusable++;
+ if (!pusable)
continue;
for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
ctxt++)
@@ -1397,7 +1397,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
cused++;
else
cfree++;
- if (cfree && cused < inuse) {
+ if (pusable && cfree && cused < inuse) {
udd = dd;
inuse = cused;
}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a5e29dbb9537..774dea897e9c 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2074,7 +2074,7 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd)
}
static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
- u32 updegr, u32 egrhd)
+ u32 updegr, u32 egrhd, u32 npkts)
{
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 6fd8d74e7392..de799f17cb9e 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -1692,8 +1692,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
wake_up(&ppd->cpspec->autoneg_wait);
- cancel_delayed_work(&ppd->cpspec->autoneg_work);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
shutdown_7220_relock_poll(ppd->dd);
val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
@@ -2297,7 +2296,7 @@ static void qib_7220_config_ctxts(struct qib_devdata *dd)
nchipctxts = qib_read_kreg32(dd, kr_portcnt);
dd->cspec->numctxts = nchipctxts;
if (qib_n_krcv_queues > 1) {
- dd->qpn_mask = 0x3f;
+ dd->qpn_mask = 0x3e;
dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
if (dd->first_user_ctxt > nchipctxts)
dd->first_user_ctxt = nchipctxts;
@@ -2703,7 +2702,7 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
}
static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
- u32 updegr, u32 egrhd)
+ u32 updegr, u32 egrhd, u32 npkts)
{
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
@@ -3515,8 +3514,8 @@ static void try_7220_autoneg(struct qib_pportdata *ppd)
toggle_7220_rclkrls(ppd->dd);
/* 2 msec is minimum length of a poll cycle */
- schedule_delayed_work(&ppd->cpspec->autoneg_work,
- msecs_to_jiffies(2));
+ queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
+ msecs_to_jiffies(2));
}
/*
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 584d443b5335..b01809a82cb0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -71,6 +71,9 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
+static void serdes_7322_los_enable(struct qib_pportdata *, int);
+static int serdes_7322_init_old(struct qib_pportdata *);
+static int serdes_7322_init_new(struct qib_pportdata *);
#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
@@ -111,6 +114,21 @@ static ushort qib_singleport;
module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
+/*
+ * Receive header queue sizes
+ */
+static unsigned qib_rcvhdrcnt;
+module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
+
+static unsigned qib_rcvhdrsize;
+module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
+
+static unsigned qib_rcvhdrentsize;
+module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
+
#define MAX_ATTEN_LEN 64 /* plenty for any real system */
/* for read back, default index is ~5m copper cable */
static char txselect_list[MAX_ATTEN_LEN] = "10";
@@ -314,7 +332,7 @@ MODULE_PARM_DESC(txselect, \
#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
/*
- * Per-context kernel registers. Acess only with qib_read_kreg_ctxt()
+ * Per-context kernel registers. Access only with qib_read_kreg_ctxt()
* or qib_write_kreg_ctxt()
*/
#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
@@ -544,6 +562,7 @@ static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
#define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
+#define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
#define H1_FORCE_VAL 8
@@ -1677,6 +1696,8 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
force_h1(ppd);
ppd->cpspec->qdr_reforce = 1;
+ if (!ppd->dd->cspec->r1)
+ serdes_7322_los_enable(ppd, 0);
} else if (ppd->cpspec->qdr_reforce &&
(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
(ibclt == IB_7322_LT_STATE_CFGENH ||
@@ -1692,18 +1713,37 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
adj_tx_serdes(ppd);
- if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP &&
- ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
- ppd->cpspec->qdr_dfe_on = 1;
- ppd->cpspec->qdr_dfe_time = 0;
- /* On link down, reenable QDR adaptation */
- qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
- ppd->dd->cspec->r1 ?
- QDR_STATIC_ADAPT_DOWN_R1 :
- QDR_STATIC_ADAPT_DOWN);
+ if (ibclt != IB_7322_LT_STATE_LINKUP) {
+ u8 ltstate = qib_7322_phys_portstate(ibcst);
+ u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
+ LinkTrainingState);
+ if (!ppd->dd->cspec->r1 &&
+ pibclt == IB_7322_LT_STATE_LINKUP &&
+ ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
+ ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
+ ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
+ ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
+ /* If the link went down (but no into recovery,
+ * turn LOS back on */
+ serdes_7322_los_enable(ppd, 1);
+ if (!ppd->cpspec->qdr_dfe_on &&
+ ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
+ ppd->cpspec->qdr_dfe_on = 1;
+ ppd->cpspec->qdr_dfe_time = 0;
+ /* On link down, reenable QDR adaptation */
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+ ppd->dd->cspec->r1 ?
+ QDR_STATIC_ADAPT_DOWN_R1 :
+ QDR_STATIC_ADAPT_DOWN);
+ printk(KERN_INFO QIB_DRV_NAME
+ " IB%u:%u re-enabled QDR adaptation "
+ "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt);
+ }
}
}
+static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
+
/*
* This is per-pport error handling.
* will likely get it's own MSIx interrupt (one for each port,
@@ -2323,6 +2363,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+ /* Hold the link state machine for mezz boards */
+ if (IS_QMH(dd) || IS_QME(dd))
+ qib_set_ib_7322_lstate(ppd, 0,
+ QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+
/* Also enable IBSTATUSCHG interrupt. */
val = qib_read_kreg_port(ppd, krp_errmask);
qib_write_kreg_port(ppd, krp_errmask,
@@ -2348,10 +2393,9 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
wake_up(&ppd->cpspec->autoneg_wait);
- cancel_delayed_work(&ppd->cpspec->autoneg_work);
+ cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
if (ppd->dd->cspec->r1)
- cancel_delayed_work(&ppd->cpspec->ipg_work);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
ppd->cpspec->chase_end = 0;
if (ppd->cpspec->chase_timer.data) /* if initted */
@@ -2648,7 +2692,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
if (!(pins & mask)) {
++handled;
qd->t_insert = get_jiffies_64();
- schedule_work(&qd->work);
+ queue_work(ib_wq, &qd->work);
}
}
}
@@ -2785,7 +2829,6 @@ static irqreturn_t qib_7322intr(int irq, void *data)
ctxtrbits &= ~rmask;
if (dd->rcd[i]) {
qib_kreceive(dd->rcd[i], NULL, &npkts);
- adjust_rcv_timeout(dd->rcd[i], npkts);
}
}
rmask <<= 1;
@@ -2835,7 +2878,6 @@ static irqreturn_t qib_7322pintr(int irq, void *data)
(1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
qib_kreceive(rcd, NULL, &npkts);
- adjust_rcv_timeout(rcd, npkts);
return IRQ_HANDLED;
}
@@ -3157,6 +3199,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
case BOARD_QME7342:
n = "InfiniPath_QME7342";
break;
+ case 8:
+ n = "InfiniPath_QME7362";
+ dd->flags |= QIB_HAS_QSFP;
+ break;
case 15:
n = "InfiniPath_QLE7342_TEST";
dd->flags |= QIB_HAS_QSFP;
@@ -3475,11 +3521,6 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd)
nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
dd->cspec->numctxts = nchipctxts;
if (qib_n_krcv_queues > 1 && dd->num_pports) {
- /*
- * Set the mask for which bits from the QPN are used
- * to select a context number.
- */
- dd->qpn_mask = 0x3f;
dd->first_user_ctxt = NUM_IB_PORTS +
(qib_n_krcv_queues - 1) * dd->num_pports;
if (dd->first_user_ctxt > nchipctxts)
@@ -3530,8 +3571,11 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd)
/* kr_rcvegrcnt changes based on the number of contexts enabled */
dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
- dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
- dd->num_pports > 1 ? 1024U : 2048U);
+ if (qib_rcvhdrcnt)
+ dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
+ else
+ dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
+ dd->num_pports > 1 ? 1024U : 2048U);
}
static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
@@ -4002,8 +4046,14 @@ static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
}
static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
- u32 updegr, u32 egrhd)
+ u32 updegr, u32 egrhd, u32 npkts)
{
+ /*
+ * Need to write timeout register before updating rcvhdrhead to ensure
+ * that the timer is enabled on reception of a packet.
+ */
+ if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
+ adjust_rcv_timeout(rcd, npkts);
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
@@ -4926,8 +4976,8 @@ static void try_7322_autoneg(struct qib_pportdata *ppd)
set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
qib_7322_mini_pcs_reset(ppd);
/* 2 msec is minimum length of a poll cycle */
- schedule_delayed_work(&ppd->cpspec->autoneg_work,
- msecs_to_jiffies(2));
+ queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
+ msecs_to_jiffies(2));
}
/*
@@ -5057,7 +5107,8 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
ib_free_send_mad(send_buf);
retry:
delay = 2 << ppd->cpspec->ipg_tries;
- schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay));
+ queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
+ msecs_to_jiffies(delay));
}
/*
@@ -5522,7 +5573,7 @@ static void qsfp_7322_event(struct work_struct *work)
u64 now = get_jiffies_64();
if (time_after64(now, pwrup))
break;
- msleep(1);
+ msleep(20);
}
ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
/*
@@ -5579,6 +5630,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
u32 pidx, unit, port, deflt, h1;
unsigned long val;
int any = 0, seth1;
+ int txdds_size;
str = txselect_list;
@@ -5587,6 +5639,10 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
for (pidx = 0; pidx < dd->num_pports; ++pidx)
dd->pport[pidx].cpspec->no_eep = deflt;
+ txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
+ if (IS_QME(dd) || IS_QMH(dd))
+ txdds_size += TXDDS_MFG_SZ;
+
while (*nxt && nxt[1]) {
str = ++nxt;
unit = simple_strtoul(str, &nxt, 0);
@@ -5609,7 +5665,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
;
continue;
}
- if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)
+ if (val >= txdds_size)
continue;
seth1 = 0;
h1 = 0; /* gcc thinks it might be used uninitted */
@@ -5633,6 +5689,11 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
ppd->cpspec->h1_val = h1;
/* now change the IBC and serdes, overriding generic */
init_txdds_table(ppd, 1);
+ /* Re-enable the physical state machine on mezz boards
+ * now that the correct settings have been set. */
+ if (IS_QMH(dd) || IS_QME(dd))
+ qib_set_ib_7322_lstate(ppd, 0,
+ QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
any++;
}
if (*nxt == '\n')
@@ -5661,10 +5722,11 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
return -ENOSPC;
}
val = simple_strtoul(str, &n, 0);
- if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
+ if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
+ TXDDS_MFG_SZ)) {
printk(KERN_INFO QIB_DRV_NAME
"txselect_values must start with a number < %d\n",
- TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
+ TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
return -EINVAL;
}
strcpy(txselect_list, str);
@@ -5810,7 +5872,8 @@ static void write_7322_initregs(struct qib_devdata *dd)
unsigned n, regno;
unsigned long flags;
- if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported)
+ if (dd->n_krcv_queues < 2 ||
+ !dd->pport[pidx].link_speed_supported)
continue;
ppd = &dd->pport[pidx];
@@ -6097,8 +6160,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
ppd++;
}
- dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
- dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+ dd->rcvhdrentsize = qib_rcvhdrentsize ?
+ qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
+ dd->rcvhdrsize = qib_rcvhdrsize ?
+ qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
/* we always allocate at least 2048 bytes for eager buffers */
@@ -6495,7 +6560,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
/* make sure we see an updated copy next time around */
sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
sleeps++;
- msleep(1);
+ msleep(20);
}
switch (which) {
@@ -6993,6 +7058,12 @@ static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
{ 0, 1, 0, 12 }, /* QMH7342 backplane settings */
};
+static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
+ /* amp, pre, main, post */
+ { 0, 0, 0, 0 }, /* QME7342 mfg settings */
+ { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
+};
+
static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
unsigned atten)
{
@@ -7066,6 +7137,16 @@ static void find_best_ent(struct qib_pportdata *ppd,
*sdr_dds = &txdds_extra_sdr[idx];
*ddr_dds = &txdds_extra_ddr[idx];
*qdr_dds = &txdds_extra_qdr[idx];
+ } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
+ ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
+ TXDDS_MFG_SZ)) {
+ idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
+ printk(KERN_INFO QIB_DRV_NAME
+ " IB%u:%u use idx %u into txdds_mfg\n",
+ ppd->dd->unit, ppd->port, idx);
+ *sdr_dds = &txdds_extra_mfg[idx];
+ *ddr_dds = &txdds_extra_mfg[idx];
+ *qdr_dds = &txdds_extra_mfg[idx];
} else {
/* this shouldn't happen, it's range checked */
*sdr_dds = txdds_sdr + qib_long_atten;
@@ -7210,9 +7291,30 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
}
}
+static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
+{
+ u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
+ printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n",
+ ppd->dd->unit, ppd->port, (enable ? "on" : "off"));
+ if (enable)
+ data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+ else
+ data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+ qib_write_kreg_port(ppd, krp_serdesctrl, data);
+}
+
static int serdes_7322_init(struct qib_pportdata *ppd)
{
- u64 data;
+ int ret = 0;
+ if (ppd->dd->cspec->r1)
+ ret = serdes_7322_init_old(ppd);
+ else
+ ret = serdes_7322_init_new(ppd);
+ return ret;
+}
+
+static int serdes_7322_init_old(struct qib_pportdata *ppd)
+{
u32 le_val;
/*
@@ -7270,11 +7372,7 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
- data = qib_read_kreg_port(ppd, krp_serdesctrl);
- /* Turn off IB latency mode */
- data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE);
- qib_write_kreg_port(ppd, krp_serdesctrl, data |
- SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
+ serdes_7322_los_enable(ppd, 1);
/* rxbistena; set 0 to avoid effects of it switch later */
ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
@@ -7314,6 +7412,205 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
return 0;
}
+static int serdes_7322_init_new(struct qib_pportdata *ppd)
+{
+ u64 tstart;
+ u32 le_val, rxcaldone;
+ int chan, chan_done = (1 << SERDES_CHANS) - 1;
+
+ /*
+ * Initialize the Tx DDS tables. Also done every QSFP event,
+ * for adapters with QSFP
+ */
+ init_txdds_table(ppd, 0);
+
+ /* Clear cmode-override, may be set from older driver */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
+
+ /* ensure no tx overrides from earlier driver loads */
+ qib_write_kreg_port(ppd, krp_tx_deemph_override,
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ reset_tx_deemphasis_override));
+
+ /* START OF LSI SUGGESTED SERDES BRINGUP */
+ /* Reset - Calibration Setup */
+ /* Stop DFE adaptaion */
+ ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
+ /* Disable LE1 */
+ ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
+ /* Disable autoadapt for LE1 */
+ ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
+ /* Disable LE2 */
+ ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
+ /* Disable VGA */
+ ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
+ /* Disable AFE Offset Cancel */
+ ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
+ /* Disable Timing Loop */
+ ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
+ /* Disable Frequency Loop */
+ ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
+ /* Disable Baseline Wander Correction */
+ ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
+ /* Disable RX Calibration */
+ ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
+ /* Disable RX Offset Calibration */
+ ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
+ /* Select BB CDR */
+ ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
+ /* CDR Step Size */
+ ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
+ /* Enable phase Calibration */
+ ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
+ /* DFE Bandwidth [2:14-12] */
+ ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
+ /* DFE Config (4 taps only) */
+ ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
+ /* Gain Loop Bandwidth */
+ if (!ppd->dd->cspec->r1) {
+ ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
+ ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
+ } else {
+ ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
+ }
+ /* Baseline Wander Correction Gain [13:4-0] (leave as default) */
+ /* Baseline Wander Correction Gain [3:7-5] (leave as default) */
+ /* Data Rate Select [5:7-6] (leave as default) */
+ /* RX Parralel Word Width [3:10-8] (leave as default) */
+
+ /* RX REST */
+ /* Single- or Multi-channel reset */
+ /* RX Analog reset */
+ /* RX Digital reset */
+ ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
+ msleep(20);
+ /* RX Analog reset */
+ ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
+ msleep(20);
+ /* RX Digital reset */
+ ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
+ msleep(20);
+
+ /* setup LoS params; these are subsystem, so chan == 5 */
+ /* LoS filter threshold_count on, ch 0-3, set to 8 */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
+
+ /* LoS filter threshold_count off, ch 0-3, set to 4 */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
+
+ /* LoS filter select enabled */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
+
+ /* LoS target data: SDR=4, DDR=2, QDR=1 */
+ ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
+ ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
+ ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
+
+ /* Turn on LOS on initial SERDES init */
+ serdes_7322_los_enable(ppd, 1);
+ /* FLoop LOS gate: PPM filter enabled */
+ ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
+
+ /* RX LATCH CALIBRATION */
+ /* Enable Eyefinder Phase Calibration latch */
+ ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
+ /* Enable RX Offset Calibration latch */
+ ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
+ msleep(20);
+ /* Start Calibration */
+ ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
+ tstart = get_jiffies_64();
+ while (chan_done &&
+ !time_after64(tstart, tstart + msecs_to_jiffies(500))) {
+ msleep(20);
+ for (chan = 0; chan < SERDES_CHANS; ++chan) {
+ rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
+ (chan + (chan >> 1)),
+ 25, 0, 0);
+ if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
+ (~chan_done & (1 << chan)) == 0)
+ chan_done &= ~(1 << chan);
+ }
+ }
+ if (chan_done) {
+ printk(KERN_INFO QIB_DRV_NAME
+ " Serdes %d calibration not done after .5 sec: 0x%x\n",
+ IBSD(ppd->hw_pidx), chan_done);
+ } else {
+ for (chan = 0; chan < SERDES_CHANS; ++chan) {
+ rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
+ (chan + (chan >> 1)),
+ 25, 0, 0);
+ if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
+ printk(KERN_INFO QIB_DRV_NAME
+ " Serdes %d chan %d calibration "
+ "failed\n", IBSD(ppd->hw_pidx), chan);
+ }
+ }
+
+ /* Turn off Calibration */
+ ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
+ msleep(20);
+
+ /* BRING RX UP */
+ /* Set LE2 value (May be overridden in qsfp_7322_event) */
+ le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
+ ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
+ /* Set LE2 Loop bandwidth */
+ ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
+ /* Enable LE2 */
+ ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
+ msleep(20);
+ /* Enable H0 only */
+ ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
+ /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
+ le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
+ ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
+ /* Enable VGA */
+ ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
+ msleep(20);
+ /* Set Frequency Loop Bandwidth */
+ ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5));
+ /* Enable Frequency Loop */
+ ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
+ /* Set Timing Loop Bandwidth */
+ ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
+ /* Enable Timing Loop */
+ ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
+ msleep(50);
+ /* Enable DFE
+ * Set receive adaptation mode. SDR and DDR adaptation are
+ * always on, and QDR is initially enabled; later disabled.
+ */
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+ ppd->dd->cspec->r1 ?
+ QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
+ ppd->cpspec->qdr_dfe_on = 1;
+ /* Disable LE1 */
+ ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
+ /* Disable auto adapt for LE1 */
+ ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
+ msleep(20);
+ /* Enable AFE Offset Cancel */
+ ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
+ /* Enable Baseline Wander Correction */
+ ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
+ /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
+ ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
+ /* VGA output common mode */
+ ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
+
+ return 0;
+}
+
/* start adjust QMH serdes parameters */
static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index f3b503936043..ffefb78b8949 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -80,7 +80,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
-struct workqueue_struct *qib_wq;
struct workqueue_struct *qib_cq_wq;
static void verify_interrupt(unsigned long);
@@ -92,9 +91,11 @@ unsigned long *qib_cpulist;
/* set number of contexts we'll actually use */
void qib_set_ctxtcnt(struct qib_devdata *dd)
{
- if (!qib_cfgctxts)
+ if (!qib_cfgctxts) {
dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
- else if (qib_cfgctxts < dd->num_pports)
+ if (dd->cfgctxts > dd->ctxtcnt)
+ dd->cfgctxts = dd->ctxtcnt;
+ } else if (qib_cfgctxts < dd->num_pports)
dd->cfgctxts = dd->ctxtcnt;
else if (qib_cfgctxts <= dd->ctxtcnt)
dd->cfgctxts = qib_cfgctxts;
@@ -268,23 +269,20 @@ static void init_shadow_tids(struct qib_devdata *dd)
struct page **pages;
dma_addr_t *addrs;
- pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
+ pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
if (!pages) {
qib_dev_err(dd, "failed to allocate shadow page * "
"array, no expected sends!\n");
goto bail;
}
- addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
+ addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
if (!addrs) {
qib_dev_err(dd, "failed to allocate shadow dma handle "
"array, no expected sends!\n");
goto bail_free;
}
- memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
- memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
-
dd->pageshadow = pages;
dd->physshadow = addrs;
return;
@@ -1045,24 +1043,10 @@ static int __init qlogic_ib_init(void)
if (ret)
goto bail;
- /*
- * We create our own workqueue mainly because we want to be
- * able to flush it when devices are being removed. We can't
- * use schedule_work()/flush_scheduled_work() because both
- * unregister_netdev() and linkwatch_event take the rtnl lock,
- * so flush_scheduled_work() can deadlock during device
- * removal.
- */
- qib_wq = create_workqueue("qib");
- if (!qib_wq) {
- ret = -ENOMEM;
- goto bail_dev;
- }
-
qib_cq_wq = create_singlethread_workqueue("qib_cq");
if (!qib_cq_wq) {
ret = -ENOMEM;
- goto bail_wq;
+ goto bail_dev;
}
/*
@@ -1092,8 +1076,6 @@ bail_unit:
idr_destroy(&qib_unit_table);
bail_cq_wq:
destroy_workqueue(qib_cq_wq);
-bail_wq:
- destroy_workqueue(qib_wq);
bail_dev:
qib_dev_cleanup();
bail:
@@ -1117,7 +1099,6 @@ static void __exit qlogic_ib_cleanup(void)
pci_unregister_driver(&qib_driver);
- destroy_workqueue(qib_wq);
destroy_workqueue(qib_cq_wq);
qib_cpulist_count = 0;
@@ -1290,7 +1271,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
if (qib_mini_init || initfail || ret) {
qib_stop_timers(dd);
- flush_scheduled_work();
+ flush_workqueue(ib_wq);
for (pidx = 0; pidx < dd->num_pports; ++pidx)
dd->f_quiet_serdes(dd->pport + pidx);
if (qib_mini_init)
@@ -1339,8 +1320,8 @@ static void __devexit qib_remove_one(struct pci_dev *pdev)
qib_stop_timers(dd);
- /* wait until all of our (qsfp) schedule_work() calls complete */
- flush_scheduled_work();
+ /* wait until all of our (qsfp) queue_work() calls complete */
+ flush_workqueue(ib_wq);
ret = qibfs_remove(dd);
if (ret)
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index 54a40828a106..a693c56ec8a6 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -131,7 +131,8 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
/* start a 75msec timer to clear symbol errors */
mod_timer(&ppd->symerr_clear_timer,
msecs_to_jiffies(75));
- } else if (ltstate == IB_PHYSPORTSTATE_LINKUP) {
+ } else if (ltstate == IB_PHYSPORTSTATE_LINKUP &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
/* active, but not active defered */
qib_hol_up(ppd); /* useful only for 6120 now */
*ppd->statusp |=
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 4b80eb153d57..8fd19a47df0c 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -136,7 +136,6 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
struct qib_mregion *mr;
unsigned n, m;
size_t off;
- int ret = 0;
unsigned long flags;
/*
@@ -152,6 +151,8 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
if (!dev->dma_mr)
goto bail;
atomic_inc(&dev->dma_mr->refcount);
+ spin_unlock_irqrestore(&rkt->lock, flags);
+
isge->mr = dev->dma_mr;
isge->vaddr = (void *) sge->addr;
isge->length = sge->length;
@@ -170,19 +171,34 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
off + sge->length > mr->length ||
(mr->access_flags & acc) != acc))
goto bail;
+ atomic_inc(&mr->refcount);
+ spin_unlock_irqrestore(&rkt->lock, flags);
off += mr->offset;
- m = 0;
- n = 0;
- while (off >= mr->map[m]->segs[n].length) {
- off -= mr->map[m]->segs[n].length;
- n++;
- if (n >= QIB_SEGSZ) {
- m++;
- n = 0;
+ if (mr->page_shift) {
+ /*
+ page sizes are uniform power of 2 so no loop is necessary
+ entries_spanned_by_off is the number of times the loop below
+ would have executed.
+ */
+ size_t entries_spanned_by_off;
+
+ entries_spanned_by_off = off >> mr->page_shift;
+ off -= (entries_spanned_by_off << mr->page_shift);
+ m = entries_spanned_by_off/QIB_SEGSZ;
+ n = entries_spanned_by_off%QIB_SEGSZ;
+ } else {
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
}
}
- atomic_inc(&mr->refcount);
isge->mr = mr;
isge->vaddr = mr->map[m]->segs[n].vaddr + off;
isge->length = mr->map[m]->segs[n].length - off;
@@ -190,10 +206,10 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
isge->m = m;
isge->n = n;
ok:
- ret = 1;
+ return 1;
bail:
spin_unlock_irqrestore(&rkt->lock, flags);
- return ret;
+ return 0;
}
/**
@@ -214,7 +230,6 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
struct qib_mregion *mr;
unsigned n, m;
size_t off;
- int ret = 0;
unsigned long flags;
/*
@@ -231,6 +246,8 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
if (!dev->dma_mr)
goto bail;
atomic_inc(&dev->dma_mr->refcount);
+ spin_unlock_irqrestore(&rkt->lock, flags);
+
sge->mr = dev->dma_mr;
sge->vaddr = (void *) vaddr;
sge->length = len;
@@ -248,19 +265,34 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
if (unlikely(vaddr < mr->iova || off + len > mr->length ||
(mr->access_flags & acc) == 0))
goto bail;
+ atomic_inc(&mr->refcount);
+ spin_unlock_irqrestore(&rkt->lock, flags);
off += mr->offset;
- m = 0;
- n = 0;
- while (off >= mr->map[m]->segs[n].length) {
- off -= mr->map[m]->segs[n].length;
- n++;
- if (n >= QIB_SEGSZ) {
- m++;
- n = 0;
+ if (mr->page_shift) {
+ /*
+ page sizes are uniform power of 2 so no loop is necessary
+ entries_spanned_by_off is the number of times the loop below
+ would have executed.
+ */
+ size_t entries_spanned_by_off;
+
+ entries_spanned_by_off = off >> mr->page_shift;
+ off -= (entries_spanned_by_off << mr->page_shift);
+ m = entries_spanned_by_off/QIB_SEGSZ;
+ n = entries_spanned_by_off%QIB_SEGSZ;
+ } else {
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
}
}
- atomic_inc(&mr->refcount);
sge->mr = mr;
sge->vaddr = mr->map[m]->segs[n].vaddr + off;
sge->length = mr->map[m]->segs[n].length - off;
@@ -268,10 +300,10 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
sge->m = m;
sge->n = n;
ok:
- ret = 1;
+ return 1;
bail:
spin_unlock_irqrestore(&rkt->lock, flags);
- return ret;
+ return 0;
}
/*
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 94b0d1f3a8f0..5ad224e4a38b 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -668,8 +668,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
lid = be16_to_cpu(pip->lid);
/* Must be a valid unicast LID address. */
if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
- goto err;
- if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
if (ppd->lid != lid)
qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
@@ -683,8 +683,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
msl = pip->neighbormtu_mastersmsl & 0xF;
/* Must be a valid unicast LID address. */
if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
- goto err;
- if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
spin_lock_irqsave(&ibp->lock, flags);
if (ibp->sm_ah) {
if (smlid != ibp->sm_lid)
@@ -707,8 +707,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
if (lwe == 0xFF)
lwe = ppd->link_width_supported;
else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
- goto err;
- set_link_width_enabled(ppd, lwe);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else if (lwe != ppd->link_width_enabled)
+ set_link_width_enabled(ppd, lwe);
}
lse = pip->linkspeedactive_enabled & 0xF;
@@ -721,8 +722,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
if (lse == 15)
lse = ppd->link_speed_supported;
else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
- goto err;
- set_link_speed_enabled(ppd, lse);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else if (lse != ppd->link_speed_enabled)
+ set_link_speed_enabled(ppd, lse);
}
/* Set link down default state. */
@@ -738,7 +740,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
IB_LINKINITCMD_POLL);
break;
default:
- goto err;
+ smp->status |= IB_SMP_INVALID_FIELD;
}
ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
@@ -748,15 +750,17 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
if (mtu == -1)
- goto err;
- qib_set_mtu(ppd, mtu);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else
+ qib_set_mtu(ppd, mtu);
/* Set operational VLs */
vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
if (vls) {
if (vls > ppd->vls_supported)
- goto err;
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
}
if (pip->mkey_violations == 0)
@@ -770,10 +774,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
ore = pip->localphyerrors_overrunerrors;
if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
- goto err;
+ smp->status |= IB_SMP_INVALID_FIELD;
if (set_overrunthreshold(ppd, (ore & 0xF)))
- goto err;
+ smp->status |= IB_SMP_INVALID_FIELD;
ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
@@ -792,7 +796,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
state = pip->linkspeed_portstate & 0xF;
lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
- goto err;
+ smp->status |= IB_SMP_INVALID_FIELD;
/*
* Only state changes of DOWN, ARM, and ACTIVE are valid
@@ -812,8 +816,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
lstate = QIB_IB_LINKDOWN;
else if (lstate == 3)
lstate = QIB_IB_LINKDOWN_DISABLE;
- else
- goto err;
+ else {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ break;
+ }
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags &= ~QIBL_LINKV;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
@@ -835,8 +841,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
break;
default:
- /* XXX We have already partially updated our state! */
- goto err;
+ smp->status |= IB_SMP_INVALID_FIELD;
}
ret = subn_get_portinfo(smp, ibdev, port);
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index 5f95f0f6385d..08944e2ee334 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -39,7 +39,6 @@
/* Fast memory region */
struct qib_fmr {
struct ib_fmr ibfmr;
- u8 page_shift;
struct qib_mregion mr; /* must be last */
};
@@ -107,6 +106,7 @@ static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
goto bail;
}
mr->mr.mapsz = m;
+ mr->mr.page_shift = 0;
mr->mr.max_segs = count;
/*
@@ -231,6 +231,8 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mr.access_flags = mr_access_flags;
mr->umem = umem;
+ if (is_power_of_2(umem->page_size))
+ mr->mr.page_shift = ilog2(umem->page_size);
m = 0;
n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) {
@@ -390,7 +392,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
fmr->mr.offset = 0;
fmr->mr.access_flags = mr_access_flags;
fmr->mr.max_segs = fmr_attr->max_pages;
- fmr->page_shift = fmr_attr->page_shift;
+ fmr->mr.page_shift = fmr_attr->page_shift;
atomic_set(&fmr->mr.refcount, 0);
ret = &fmr->ibfmr;
@@ -437,7 +439,7 @@ int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
spin_lock_irqsave(&rkt->lock, flags);
fmr->mr.user_base = iova;
fmr->mr.iova = iova;
- ps = 1 << fmr->page_shift;
+ ps = 1 << fmr->mr.page_shift;
fmr->mr.length = list_len * ps;
m = 0;
n = 0;
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 6c39851d2ded..e16751f8639e 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -48,13 +48,12 @@ static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
struct qpn_map *map, unsigned off,
- unsigned r)
+ unsigned n)
{
if (qpt->mask) {
off++;
- if ((off & qpt->mask) >> 1 != r)
- off = ((off & qpt->mask) ?
- (off | qpt->mask) + 1 : off) | (r << 1);
+ if (((off & qpt->mask) >> 1) >= n)
+ off = (off | qpt->mask) + 2;
} else
off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
return off;
@@ -123,7 +122,6 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
u32 i, offset, max_scan, qpn;
struct qpn_map *map;
u32 ret;
- int r;
if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
unsigned n;
@@ -139,15 +137,11 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
goto bail;
}
- r = smp_processor_id();
- if (r >= dd->n_krcv_queues)
- r %= dd->n_krcv_queues;
- qpn = qpt->last + 1;
+ qpn = qpt->last + 2;
if (qpn >= QPN_MAX)
qpn = 2;
- if (qpt->mask && ((qpn & qpt->mask) >> 1) != r)
- qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) |
- (r << 1);
+ if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
+ qpn = (qpn | qpt->mask) + 2;
offset = qpn & BITS_PER_PAGE_MASK;
map = &qpt->map[qpn / BITS_PER_PAGE];
max_scan = qpt->nmaps - !offset;
@@ -163,7 +157,8 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
ret = qpn;
goto bail;
}
- offset = find_next_offset(qpt, map, offset, r);
+ offset = find_next_offset(qpt, map, offset,
+ dd->n_krcv_queues);
qpn = mk_qpn(qpt, map, offset);
/*
* This test differs from alloc_pidmap().
@@ -183,13 +178,13 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
if (qpt->nmaps == QPNMAP_ENTRIES)
break;
map = &qpt->map[qpt->nmaps++];
- offset = qpt->mask ? (r << 1) : 0;
+ offset = 0;
} else if (map < &qpt->map[qpt->nmaps]) {
++map;
- offset = qpt->mask ? (r << 1) : 0;
+ offset = 0;
} else {
map = &qpt->map[0];
- offset = qpt->mask ? (r << 1) : 2;
+ offset = 2;
}
qpn = mk_qpn(qpt, map, offset);
}
@@ -468,6 +463,10 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
del_timer(&qp->s_timer);
}
+
+ if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
+ qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
+
spin_lock(&dev->pending_lock);
if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
@@ -1061,7 +1060,6 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
}
qp->ibqp.qp_num = err;
qp->port_num = init_attr->port_num;
- qp->processor_id = smp_processor_id();
qib_reset_qp(qp, init_attr->qp_type);
break;
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index 35b3604b691d..3374a52232c1 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -485,7 +485,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
goto bail;
/* We see a module, but it may be unwise to look yet. Just schedule */
qd->t_insert = get_jiffies_64();
- schedule_work(&qd->work);
+ queue_work(ib_wq, &qd->work);
bail:
return;
}
@@ -493,10 +493,9 @@ bail:
void qib_qsfp_deinit(struct qib_qsfp_data *qd)
{
/*
- * There is nothing to do here for now. our
- * work is scheduled with schedule_work(), and
- * flush_scheduled_work() from remove_one will
- * block until all work ssetup with schedule_work()
+ * There is nothing to do here for now. our work is scheduled
+ * with queue_work(), and flush_workqueue() from remove_one
+ * will block until all work setup with queue_work()
* completes.
*/
}
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 955fb7157793..eca0c41f1226 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1005,7 +1005,8 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
* there are still requests that haven't been acked.
*/
if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
- !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)))
+ !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
+ (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
start_timer(qp);
while (qp->s_last != qp->s_acked) {
@@ -1407,6 +1408,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
struct qib_ctxtdata *rcd)
{
struct qib_swqe *wqe;
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
enum ib_wc_status status;
unsigned long flags;
int diff;
@@ -1414,7 +1416,32 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
u32 aeth;
u64 val;
+ if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
+ /*
+ * If ACK'd PSN on SDMA busy list try to make progress to
+ * reclaim SDMA credits.
+ */
+ if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
+ (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
+
+ /*
+ * If send tasklet not running attempt to progress
+ * SDMA queue.
+ */
+ if (!(qp->s_flags & QIB_S_BUSY)) {
+ /* Acquire SDMA Lock */
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ /* Invoke sdma make progress */
+ qib_sdma_make_progress(ppd);
+ /* Release SDMA Lock */
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ }
+ }
+ }
+
spin_lock_irqsave(&qp->s_lock, flags);
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto ack_done;
/* Ignore invalid responses. */
if (qib_cmp24(psn, qp->s_next_psn) >= 0)
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index e1b3da2a1f85..4a51fd1e9cb7 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -445,13 +445,14 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
- /* Get the number of bytes the message was padded by. */
+ /*
+ * Get the number of bytes the message was padded by
+ * and drop incomplete packets.
+ */
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- if (unlikely(tlen < (hdrsize + pad + 4))) {
- /* Drop incomplete packets. */
- ibp->n_pkt_drops++;
- goto bail;
- }
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto drop;
+
tlen -= hdrsize + pad + 4;
/*
@@ -460,10 +461,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
*/
if (qp->ibqp.qp_num) {
if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
- hdr->lrh[3] == IB_LID_PERMISSIVE)) {
- ibp->n_pkt_drops++;
- goto bail;
- }
+ hdr->lrh[3] == IB_LID_PERMISSIVE))
+ goto drop;
if (qp->ibqp.qp_num > 1) {
u16 pkey1, pkey2;
@@ -476,7 +475,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
0xF,
src_qp, qp->ibqp.qp_num,
hdr->lrh[3], hdr->lrh[1]);
- goto bail;
+ return;
}
}
if (unlikely(qkey != qp->qkey)) {
@@ -484,30 +483,24 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
src_qp, qp->ibqp.qp_num,
hdr->lrh[3], hdr->lrh[1]);
- goto bail;
+ return;
}
/* Drop invalid MAD packets (see 13.5.3.1). */
if (unlikely(qp->ibqp.qp_num == 1 &&
(tlen != 256 ||
- (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) {
- ibp->n_pkt_drops++;
- goto bail;
- }
+ (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
+ goto drop;
} else {
struct ib_smp *smp;
/* Drop invalid MAD packets (see 13.5.3.1). */
- if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) {
- ibp->n_pkt_drops++;
- goto bail;
- }
+ if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
+ goto drop;
smp = (struct ib_smp *) data;
if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
hdr->lrh[3] == IB_LID_PERMISSIVE) &&
- smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- ibp->n_pkt_drops++;
- goto bail;
- }
+ smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ goto drop;
}
/*
@@ -519,14 +512,12 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
wc.ex.imm_data = ohdr->u.ud.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
- hdrsize += sizeof(u32);
+ tlen -= sizeof(u32);
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
wc.ex.imm_data = 0;
wc.wc_flags = 0;
- } else {
- ibp->n_pkt_drops++;
- goto bail;
- }
+ } else
+ goto drop;
/*
* A GRH is expected to preceed the data even if not
@@ -556,8 +547,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
qp->r_flags |= QIB_R_REUSE_SGE;
- ibp->n_pkt_drops++;
- return;
+ goto drop;
}
if (has_grh) {
qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
@@ -594,5 +584,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
cpu_to_be32(IB_BTH_SOLICITED)) != 0);
-bail:;
+ return;
+
+drop:
+ ibp->n_pkt_drops++;
}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index 4c19e06b5e85..66208bcd7c13 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -382,6 +382,7 @@ static void qib_user_sdma_free_pkt_list(struct device *dev,
kmem_cache_free(pq->pkt_slab, pkt);
}
+ INIT_LIST_HEAD(list);
}
/*
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index bd57c1273225..95e5b47223b3 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -301,6 +301,7 @@ struct qib_mregion {
int access_flags;
u32 max_segs; /* number of qib_segs in all the arrays */
u32 mapsz; /* size of the map array */
+ u8 page_shift; /* 0 - non unform/non powerof2 sizes */
atomic_t refcount;
struct qib_segarray *map[0]; /* the segments */
};
@@ -435,7 +436,6 @@ struct qib_qp {
spinlock_t r_lock; /* used for APM */
spinlock_t s_lock;
atomic_t s_dma_busy;
- unsigned processor_id; /* Processor ID QP is bound to */
u32 s_flags;
u32 s_cur_size; /* size of send packet in bytes */
u32 s_len; /* total length of s_sge */
@@ -805,7 +805,6 @@ static inline int qib_send_ok(struct qib_qp *qp)
!(qp->s_flags & QIB_S_ANY_WAIT_SEND));
}
-extern struct workqueue_struct *qib_wq;
extern struct workqueue_struct *qib_cq_wq;
/*
@@ -813,13 +812,8 @@ extern struct workqueue_struct *qib_cq_wq;
*/
static inline void qib_schedule_send(struct qib_qp *qp)
{
- if (qib_send_ok(qp)) {
- if (qp->processor_id == smp_processor_id())
- queue_work(qib_wq, &qp->s_work);
- else
- queue_work_on(qp->processor_id,
- qib_wq, &qp->s_work);
- }
+ if (qib_send_ok(qp))
+ queue_work(ib_wq, &qp->s_work);
}
static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index 9d9a9dc51f18..cda8eac55fff 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -1,7 +1,6 @@
config INFINIBAND_IPOIB
tristate "IP-over-InfiniBand"
depends on NETDEVICES && INET && (IPV6 || IPV6=n)
- select INET_LRO
---help---
Support for the IP-over-InfiniBand protocol (IPoIB). This
transports IP packets over InfiniBand so you can use your IB
@@ -25,7 +24,7 @@ config INFINIBAND_IPOIB_CM
unless you limit mtu for these destinations to 2044.
config INFINIBAND_IPOIB_DEBUG
- bool "IP-over-InfiniBand debugging" if EMBEDDED
+ bool "IP-over-InfiniBand debugging" if EXPERT
depends on INFINIBAND_IPOIB
default y
---help---
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 753a983a5fdc..ab97f92fc257 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -50,7 +50,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_sa.h>
-#include <linux/inet_lro.h>
+#include <linux/sched.h>
/* constants */
@@ -100,9 +100,6 @@ enum {
IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
IPOIB_MCAST_FLAG_ATTACHED = 3,
- IPOIB_MAX_LRO_DESCRIPTORS = 8,
- IPOIB_LRO_MAX_AGGR = 64,
-
MAX_SEND_CQE = 16,
IPOIB_CM_COPYBREAK = 256,
};
@@ -262,11 +259,6 @@ struct ipoib_ethtool_st {
u16 max_coalesced_frames;
};
-struct ipoib_lro {
- struct net_lro_mgr lro_mgr;
- struct net_lro_desc lro_desc[IPOIB_MAX_LRO_DESCRIPTORS];
-};
-
/*
* Device private locking: network stack tx_lock protects members used
* in TX fast path, lock protects everything else. lock nests inside
@@ -352,8 +344,6 @@ struct ipoib_dev_priv {
int hca_caps;
struct ipoib_ethtool_st ethtool;
struct timer_list poll_timer;
-
- struct ipoib_lro lro;
};
struct ipoib_ah {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index bb1004114dec..93d55806b967 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -352,15 +352,13 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
int ret;
int i;
- rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
+ rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
if (!rx->rx_ring) {
printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
priv->ca->name, ipoib_recvq_size);
return -ENOMEM;
}
- memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
-
t = kmalloc(sizeof *t, GFP_KERNEL);
if (!t) {
ret = -ENOMEM;
@@ -1097,13 +1095,12 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
int ret;
- p->tx_ring = vmalloc(ipoib_sendq_size * sizeof *p->tx_ring);
+ p->tx_ring = vzalloc(ipoib_sendq_size * sizeof *p->tx_ring);
if (!p->tx_ring) {
ipoib_warn(priv, "failed to allocate tx ring\n");
ret = -ENOMEM;
goto err_tx;
}
- memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
p->qp = ipoib_cm_create_tx_qp(p->dev, p);
if (IS_ERR(p->qp)) {
@@ -1480,6 +1477,7 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+ priv->dev->features |= NETIF_F_GRO;
if (priv->hca_caps & IB_DEVICE_UD_TSO)
dev->features |= NETIF_F_TSO;
}
@@ -1520,7 +1518,7 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
return;
}
- priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
+ priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
if (!priv->cm.srq_ring) {
printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
priv->ca->name, ipoib_recvq_size);
@@ -1529,7 +1527,6 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
return;
}
- memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
}
int ipoib_cm_dev_init(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 1a1657c82edd..19f7f5206f78 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -106,63 +106,12 @@ static int ipoib_set_coalesce(struct net_device *dev,
return 0;
}
-static const char ipoib_stats_keys[][ETH_GSTRING_LEN] = {
- "LRO aggregated", "LRO flushed",
- "LRO avg aggr", "LRO no desc"
-};
-
-static void ipoib_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
-{
- switch (stringset) {
- case ETH_SS_STATS:
- memcpy(data, *ipoib_stats_keys, sizeof(ipoib_stats_keys));
- break;
- }
-}
-
-static int ipoib_get_sset_count(struct net_device *dev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(ipoib_stats_keys);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void ipoib_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, uint64_t *data)
-{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- int index = 0;
-
- /* Get LRO statistics */
- data[index++] = priv->lro.lro_mgr.stats.aggregated;
- data[index++] = priv->lro.lro_mgr.stats.flushed;
- if (priv->lro.lro_mgr.stats.flushed)
- data[index++] = priv->lro.lro_mgr.stats.aggregated /
- priv->lro.lro_mgr.stats.flushed;
- else
- data[index++] = 0;
- data[index++] = priv->lro.lro_mgr.stats.no_desc;
-}
-
-static int ipoib_set_flags(struct net_device *dev, u32 flags)
-{
- return ethtool_op_set_flags(dev, flags, ETH_FLAG_LRO);
-}
-
static const struct ethtool_ops ipoib_ethtool_ops = {
.get_drvinfo = ipoib_get_drvinfo,
.get_rx_csum = ipoib_get_rx_csum,
.set_tso = ipoib_set_tso,
.get_coalesce = ipoib_get_coalesce,
.set_coalesce = ipoib_set_coalesce,
- .get_flags = ethtool_op_get_flags,
- .set_flags = ipoib_set_flags,
- .get_strings = ipoib_get_strings,
- .get_sset_count = ipoib_get_sset_count,
- .get_ethtool_stats = ipoib_get_ethtool_stats,
};
void ipoib_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index dfa71903d6e4..806d0292dc39 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -295,10 +295,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (dev->features & NETIF_F_LRO)
- lro_receive_skb(&priv->lro.lro_mgr, skb, NULL);
- else
- netif_receive_skb(skb);
+ napi_gro_receive(&priv->napi, skb);
repost:
if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
@@ -450,9 +447,6 @@ poll_more:
}
if (done < budget) {
- if (dev->features & NETIF_F_LRO)
- lro_flush_all(&priv->lro.lro_mgr);
-
napi_complete(napi);
if (unlikely(ib_req_notify_cq(priv->recv_cq,
IB_CQ_NEXT_COMP |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 9ff7bc73ed95..aca3b44f7aed 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -60,15 +60,6 @@ MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
-static int lro;
-module_param(lro, bool, 0444);
-MODULE_PARM_DESC(lro, "Enable LRO (Large Receive Offload)");
-
-static int lro_max_aggr = IPOIB_LRO_MAX_AGGR;
-module_param(lro_max_aggr, int, 0644);
-MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated "
- "(default = 64)");
-
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
int ipoib_debug_level;
@@ -925,13 +916,12 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
goto out;
}
- priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
+ priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
if (!priv->tx_ring) {
printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
ca->name, ipoib_sendq_size);
goto out_rx_ring_cleanup;
}
- memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring);
/* priv->tx_head, tx_tail & tx_outstanding are already 0 */
@@ -976,54 +966,6 @@ static const struct header_ops ipoib_header_ops = {
.create = ipoib_hard_header,
};
-static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
- void **tcph, u64 *hdr_flags, void *priv)
-{
- unsigned int ip_len;
- struct iphdr *iph;
-
- if (unlikely(skb->protocol != htons(ETH_P_IP)))
- return -1;
-
- /*
- * In the future we may add an else clause that verifies the
- * checksum and allows devices which do not calculate checksum
- * to use LRO.
- */
- if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY))
- return -1;
-
- /* Check for non-TCP packet */
- skb_reset_network_header(skb);
- iph = ip_hdr(skb);
- if (iph->protocol != IPPROTO_TCP)
- return -1;
-
- ip_len = ip_hdrlen(skb);
- skb_set_transport_header(skb, ip_len);
- *tcph = tcp_hdr(skb);
-
- /* check if IP header and TCP header are complete */
- if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
- return -1;
-
- *hdr_flags = LRO_IPV4 | LRO_TCP;
- *iphdr = iph;
-
- return 0;
-}
-
-static void ipoib_lro_setup(struct ipoib_dev_priv *priv)
-{
- priv->lro.lro_mgr.max_aggr = lro_max_aggr;
- priv->lro.lro_mgr.max_desc = IPOIB_MAX_LRO_DESCRIPTORS;
- priv->lro.lro_mgr.lro_arr = priv->lro.lro_desc;
- priv->lro.lro_mgr.get_skb_header = get_skb_hdr;
- priv->lro.lro_mgr.features = LRO_F_NAPI;
- priv->lro.lro_mgr.dev = priv->dev;
- priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-}
-
static const struct net_device_ops ipoib_netdev_ops = {
.ndo_open = ipoib_open,
.ndo_stop = ipoib_stop,
@@ -1067,8 +1009,6 @@ static void ipoib_setup(struct net_device *dev)
priv->dev = dev;
- ipoib_lro_setup(priv);
-
spin_lock_init(&priv->lock);
mutex_init(&priv->vlan_mutex);
@@ -1218,8 +1158,7 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
}
- if (lro)
- priv->dev->features |= NETIF_F_LRO;
+ priv->dev->features |= NETIF_F_GRO;
if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
priv->dev->features |= NETIF_F_TSO;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 1e1e347a7715..83664ed2804f 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -441,18 +441,28 @@ static void srp_disconnect_target(struct srp_target_port *target)
wait_for_completion(&target->done);
}
+static bool srp_change_state(struct srp_target_port *target,
+ enum srp_target_state old,
+ enum srp_target_state new)
+{
+ bool changed = false;
+
+ spin_lock_irq(&target->lock);
+ if (target->state == old) {
+ target->state = new;
+ changed = true;
+ }
+ spin_unlock_irq(&target->lock);
+ return changed;
+}
+
static void srp_remove_work(struct work_struct *work)
{
struct srp_target_port *target =
container_of(work, struct srp_target_port, work);
- spin_lock_irq(target->scsi_host->host_lock);
- if (target->state != SRP_TARGET_DEAD) {
- spin_unlock_irq(target->scsi_host->host_lock);
+ if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
return;
- }
- target->state = SRP_TARGET_REMOVED;
- spin_unlock_irq(target->scsi_host->host_lock);
spin_lock(&target->srp_host->target_lock);
list_del(&target->list);
@@ -539,33 +549,34 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
scsi_sg_count(scmnd), scmnd->sc_data_direction);
}
-static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
+static void srp_remove_req(struct srp_target_port *target,
+ struct srp_request *req, s32 req_lim_delta)
{
+ unsigned long flags;
+
srp_unmap_data(req->scmnd, target, req);
- list_move_tail(&req->list, &target->free_reqs);
+ spin_lock_irqsave(&target->lock, flags);
+ target->req_lim += req_lim_delta;
+ req->scmnd = NULL;
+ list_add_tail(&req->list, &target->free_reqs);
+ spin_unlock_irqrestore(&target->lock, flags);
}
static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
{
req->scmnd->result = DID_RESET << 16;
req->scmnd->scsi_done(req->scmnd);
- srp_remove_req(target, req);
+ srp_remove_req(target, req, 0);
}
static int srp_reconnect_target(struct srp_target_port *target)
{
struct ib_qp_attr qp_attr;
- struct srp_request *req, *tmp;
struct ib_wc wc;
- int ret;
+ int i, ret;
- spin_lock_irq(target->scsi_host->host_lock);
- if (target->state != SRP_TARGET_LIVE) {
- spin_unlock_irq(target->scsi_host->host_lock);
+ if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
return -EAGAIN;
- }
- target->state = SRP_TARGET_CONNECTING;
- spin_unlock_irq(target->scsi_host->host_lock);
srp_disconnect_target(target);
/*
@@ -590,27 +601,23 @@ static int srp_reconnect_target(struct srp_target_port *target)
while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
; /* nothing */
- spin_lock_irq(target->scsi_host->host_lock);
- list_for_each_entry_safe(req, tmp, &target->req_queue, list)
- srp_reset_req(target, req);
- spin_unlock_irq(target->scsi_host->host_lock);
+ for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+ struct srp_request *req = &target->req_ring[i];
+ if (req->scmnd)
+ srp_reset_req(target, req);
+ }
- target->rx_head = 0;
- target->tx_head = 0;
- target->tx_tail = 0;
+ INIT_LIST_HEAD(&target->free_tx);
+ for (i = 0; i < SRP_SQ_SIZE; ++i)
+ list_add(&target->tx_ring[i]->list, &target->free_tx);
target->qp_in_error = 0;
ret = srp_connect_target(target);
if (ret)
goto err;
- spin_lock_irq(target->scsi_host->host_lock);
- if (target->state == SRP_TARGET_CONNECTING) {
- ret = 0;
- target->state = SRP_TARGET_LIVE;
- } else
+ if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
ret = -EAGAIN;
- spin_unlock_irq(target->scsi_host->host_lock);
return ret;
@@ -620,17 +627,20 @@ err:
/*
* We couldn't reconnect, so kill our target port off.
- * However, we have to defer the real removal because we might
- * be in the context of the SCSI error handler now, which
- * would deadlock if we call scsi_remove_host().
+ * However, we have to defer the real removal because we
+ * are in the context of the SCSI error handler now, which
+ * will deadlock if we call scsi_remove_host().
+ *
+ * Schedule our work inside the lock to avoid a race with
+ * the flush_scheduled_work() in srp_remove_one().
*/
- spin_lock_irq(target->scsi_host->host_lock);
+ spin_lock_irq(&target->lock);
if (target->state == SRP_TARGET_CONNECTING) {
target->state = SRP_TARGET_DEAD;
INIT_WORK(&target->work, srp_remove_work);
- schedule_work(&target->work);
+ queue_work(ib_wq, &target->work);
}
- spin_unlock_irq(target->scsi_host->host_lock);
+ spin_unlock_irq(&target->lock);
return ret;
}
@@ -758,7 +768,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
struct srp_direct_buf *buf = (void *) cmd->add_data;
buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
- buf->key = cpu_to_be32(dev->mr->rkey);
+ buf->key = cpu_to_be32(target->rkey);
buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
} else if (srp_map_fmr(target, scat, count, req,
(void *) cmd->add_data)) {
@@ -783,7 +793,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
buf->desc_list[i].va =
cpu_to_be64(ib_sg_dma_address(ibdev, sg));
buf->desc_list[i].key =
- cpu_to_be32(dev->mr->rkey);
+ cpu_to_be32(target->rkey);
buf->desc_list[i].len = cpu_to_be32(dma_len);
datalen += dma_len;
}
@@ -796,7 +806,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
buf->table_desc.va =
cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
buf->table_desc.key =
- cpu_to_be32(target->srp_host->srp_dev->mr->rkey);
+ cpu_to_be32(target->rkey);
buf->table_desc.len =
cpu_to_be32(count * sizeof (struct srp_direct_buf));
@@ -812,9 +822,23 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
}
/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head. Lock cannot be dropped between call here and
- * call to __srp_post_send().
+ * Return an IU and possible credit to the free pool
+ */
+static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
+ enum srp_iu_type iu_type)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&target->lock, flags);
+ list_add(&iu->list, &target->free_tx);
+ if (iu_type != SRP_IU_RSP)
+ ++target->req_lim;
+ spin_unlock_irqrestore(&target->lock, flags);
+}
+
+/*
+ * Must be called with target->lock held to protect req_lim and free_tx.
+ * If IU is not sent, it must be returned using srp_put_tx_iu().
*
* Note:
* An upper limit for the number of allocated information units for each
@@ -833,83 +857,59 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
srp_send_completion(target->send_cq, target);
- if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
+ if (list_empty(&target->free_tx))
return NULL;
/* Initiator responses to target requests do not consume credits */
- if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) {
- ++target->zero_req_lim;
- return NULL;
+ if (iu_type != SRP_IU_RSP) {
+ if (target->req_lim <= rsv) {
+ ++target->zero_req_lim;
+ return NULL;
+ }
+
+ --target->req_lim;
}
- iu = target->tx_ring[target->tx_head & SRP_SQ_MASK];
- iu->type = iu_type;
+ iu = list_first_entry(&target->free_tx, struct srp_iu, list);
+ list_del(&iu->list);
return iu;
}
-/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.
- */
-static int __srp_post_send(struct srp_target_port *target,
- struct srp_iu *iu, int len)
+static int srp_post_send(struct srp_target_port *target,
+ struct srp_iu *iu, int len)
{
struct ib_sge list;
struct ib_send_wr wr, *bad_wr;
- int ret = 0;
list.addr = iu->dma;
list.length = len;
- list.lkey = target->srp_host->srp_dev->mr->lkey;
+ list.lkey = target->lkey;
wr.next = NULL;
- wr.wr_id = target->tx_head & SRP_SQ_MASK;
+ wr.wr_id = (uintptr_t) iu;
wr.sg_list = &list;
wr.num_sge = 1;
wr.opcode = IB_WR_SEND;
wr.send_flags = IB_SEND_SIGNALED;
- ret = ib_post_send(target->qp, &wr, &bad_wr);
-
- if (!ret) {
- ++target->tx_head;
- if (iu->type != SRP_IU_RSP)
- --target->req_lim;
- }
-
- return ret;
+ return ib_post_send(target->qp, &wr, &bad_wr);
}
-static int srp_post_recv(struct srp_target_port *target)
+static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
{
- unsigned long flags;
- struct srp_iu *iu;
- struct ib_sge list;
struct ib_recv_wr wr, *bad_wr;
- unsigned int next;
- int ret;
-
- spin_lock_irqsave(target->scsi_host->host_lock, flags);
-
- next = target->rx_head & SRP_RQ_MASK;
- wr.wr_id = next;
- iu = target->rx_ring[next];
+ struct ib_sge list;
list.addr = iu->dma;
list.length = iu->size;
- list.lkey = target->srp_host->srp_dev->mr->lkey;
+ list.lkey = target->lkey;
wr.next = NULL;
+ wr.wr_id = (uintptr_t) iu;
wr.sg_list = &list;
wr.num_sge = 1;
- ret = ib_post_recv(target->qp, &wr, &bad_wr);
- if (!ret)
- ++target->rx_head;
-
- spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
-
- return ret;
+ return ib_post_recv(target->qp, &wr, &bad_wr);
}
static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
@@ -917,23 +917,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
struct srp_request *req;
struct scsi_cmnd *scmnd;
unsigned long flags;
- s32 delta;
-
- delta = (s32) be32_to_cpu(rsp->req_lim_delta);
-
- spin_lock_irqsave(target->scsi_host->host_lock, flags);
-
- target->req_lim += delta;
-
- req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
- if (be32_to_cpu(rsp->resp_data_len) < 4)
- req->tsk_status = -1;
- else
- req->tsk_status = rsp->data[3];
- complete(&req->done);
+ spin_lock_irqsave(&target->lock, flags);
+ target->req_lim += be32_to_cpu(rsp->req_lim_delta);
+ spin_unlock_irqrestore(&target->lock, flags);
+
+ target->tsk_mgmt_status = -1;
+ if (be32_to_cpu(rsp->resp_data_len) >= 4)
+ target->tsk_mgmt_status = rsp->data[3];
+ complete(&target->tsk_mgmt_done);
} else {
+ req = &target->req_ring[rsp->tag];
scmnd = req->scmnd;
if (!scmnd)
shost_printk(KERN_ERR, target->scsi_host,
@@ -953,49 +948,42 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
- if (!req->tsk_mgmt) {
- scmnd->host_scribble = (void *) -1L;
- scmnd->scsi_done(scmnd);
-
- srp_remove_req(target, req);
- } else
- req->cmd_done = 1;
+ srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
+ scmnd->host_scribble = NULL;
+ scmnd->scsi_done(scmnd);
}
-
- spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
}
static int srp_response_common(struct srp_target_port *target, s32 req_delta,
void *rsp, int len)
{
- struct ib_device *dev;
+ struct ib_device *dev = target->srp_host->srp_dev->dev;
unsigned long flags;
struct srp_iu *iu;
- int err = 1;
-
- dev = target->srp_host->srp_dev->dev;
+ int err;
- spin_lock_irqsave(target->scsi_host->host_lock, flags);
+ spin_lock_irqsave(&target->lock, flags);
target->req_lim += req_delta;
-
iu = __srp_get_tx_iu(target, SRP_IU_RSP);
+ spin_unlock_irqrestore(&target->lock, flags);
+
if (!iu) {
shost_printk(KERN_ERR, target->scsi_host, PFX
"no IU available to send response\n");
- goto out;
+ return 1;
}
ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
memcpy(iu->buf, rsp, len);
ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
- err = __srp_post_send(target, iu, len);
- if (err)
+ err = srp_post_send(target, iu, len);
+ if (err) {
shost_printk(KERN_ERR, target->scsi_host, PFX
"unable to post response: %d\n", err);
+ srp_put_tx_iu(target, iu, SRP_IU_RSP);
+ }
-out:
- spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
return err;
}
@@ -1032,14 +1020,11 @@ static void srp_process_aer_req(struct srp_target_port *target,
static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
{
- struct ib_device *dev;
- struct srp_iu *iu;
+ struct ib_device *dev = target->srp_host->srp_dev->dev;
+ struct srp_iu *iu = (struct srp_iu *) wc->wr_id;
int res;
u8 opcode;
- iu = target->rx_ring[wc->wr_id];
-
- dev = target->srp_host->srp_dev->dev;
ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
DMA_FROM_DEVICE);
@@ -1080,7 +1065,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
DMA_FROM_DEVICE);
- res = srp_post_recv(target);
+ res = srp_post_recv(target, iu);
if (res != 0)
shost_printk(KERN_ERR, target->scsi_host,
PFX "Recv failed with error code %d\n", res);
@@ -1109,6 +1094,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
{
struct srp_target_port *target = target_ptr;
struct ib_wc wc;
+ struct srp_iu *iu;
while (ib_poll_cq(cq, 1, &wc) > 0) {
if (wc.status) {
@@ -1119,18 +1105,19 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
break;
}
- ++target->tx_tail;
+ iu = (struct srp_iu *) wc.wr_id;
+ list_add(&iu->list, &target->free_tx);
}
}
-static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
- void (*done)(struct scsi_cmnd *))
+static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
{
- struct srp_target_port *target = host_to_target(scmnd->device->host);
+ struct srp_target_port *target = host_to_target(shost);
struct srp_request *req;
struct srp_iu *iu;
struct srp_cmd *cmd;
struct ib_device *dev;
+ unsigned long flags;
int len;
if (target->state == SRP_TARGET_CONNECTING)
@@ -1139,23 +1126,25 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
if (target->state == SRP_TARGET_DEAD ||
target->state == SRP_TARGET_REMOVED) {
scmnd->result = DID_BAD_TARGET << 16;
- done(scmnd);
+ scmnd->scsi_done(scmnd);
return 0;
}
+ spin_lock_irqsave(&target->lock, flags);
iu = __srp_get_tx_iu(target, SRP_IU_CMD);
if (!iu)
- goto err;
+ goto err_unlock;
+
+ req = list_first_entry(&target->free_reqs, struct srp_request, list);
+ list_del(&req->list);
+ spin_unlock_irqrestore(&target->lock, flags);
dev = target->srp_host->srp_dev->dev;
ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
DMA_TO_DEVICE);
- req = list_first_entry(&target->free_reqs, struct srp_request, list);
-
- scmnd->scsi_done = done;
scmnd->result = 0;
- scmnd->host_scribble = (void *) (long) req->index;
+ scmnd->host_scribble = (void *) req;
cmd = iu->buf;
memset(cmd, 0, sizeof *cmd);
@@ -1167,37 +1156,40 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
req->scmnd = scmnd;
req->cmd = iu;
- req->cmd_done = 0;
- req->tsk_mgmt = NULL;
len = srp_map_data(scmnd, target, req);
if (len < 0) {
shost_printk(KERN_ERR, target->scsi_host,
PFX "Failed to map data\n");
- goto err;
+ goto err_iu;
}
ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
DMA_TO_DEVICE);
- if (__srp_post_send(target, iu, len)) {
+ if (srp_post_send(target, iu, len)) {
shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
goto err_unmap;
}
- list_move_tail(&req->list, &target->req_queue);
-
return 0;
err_unmap:
srp_unmap_data(scmnd, target, req);
+err_iu:
+ srp_put_tx_iu(target, iu, SRP_IU_CMD);
+
+ spin_lock_irqsave(&target->lock, flags);
+ list_add(&req->list, &target->free_reqs);
+
+err_unlock:
+ spin_unlock_irqrestore(&target->lock, flags);
+
err:
return SCSI_MLQUEUE_HOST_BUSY;
}
-static DEF_SCSI_QCMD(srp_queuecommand)
-
static int srp_alloc_iu_bufs(struct srp_target_port *target)
{
int i;
@@ -1216,6 +1208,8 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
GFP_KERNEL, DMA_TO_DEVICE);
if (!target->tx_ring[i])
goto err;
+
+ list_add(&target->tx_ring[i]->list, &target->free_tx);
}
return 0;
@@ -1377,7 +1371,8 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
break;
for (i = 0; i < SRP_RQ_SIZE; i++) {
- target->status = srp_post_recv(target);
+ struct srp_iu *iu = target->rx_ring[i];
+ target->status = srp_post_recv(target, iu);
if (target->status)
break;
}
@@ -1442,25 +1437,24 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
}
static int srp_send_tsk_mgmt(struct srp_target_port *target,
- struct srp_request *req, u8 func)
+ u64 req_tag, unsigned int lun, u8 func)
{
struct ib_device *dev = target->srp_host->srp_dev->dev;
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
- spin_lock_irq(target->scsi_host->host_lock);
-
if (target->state == SRP_TARGET_DEAD ||
- target->state == SRP_TARGET_REMOVED) {
- req->scmnd->result = DID_BAD_TARGET << 16;
- goto out;
- }
+ target->state == SRP_TARGET_REMOVED)
+ return -1;
- init_completion(&req->done);
+ init_completion(&target->tsk_mgmt_done);
+ spin_lock_irq(&target->lock);
iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
+ spin_unlock_irq(&target->lock);
+
if (!iu)
- goto out;
+ return -1;
ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
DMA_TO_DEVICE);
@@ -1468,70 +1462,46 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
tsk_mgmt->opcode = SRP_TSK_MGMT;
- tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48);
- tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT;
+ tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
+ tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
tsk_mgmt->tsk_mgmt_func = func;
- tsk_mgmt->task_tag = req->index;
+ tsk_mgmt->task_tag = req_tag;
ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
DMA_TO_DEVICE);
- if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
- goto out;
-
- req->tsk_mgmt = iu;
-
- spin_unlock_irq(target->scsi_host->host_lock);
-
- if (!wait_for_completion_timeout(&req->done,
- msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
+ if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
+ srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
return -1;
+ }
- return 0;
-
-out:
- spin_unlock_irq(target->scsi_host->host_lock);
- return -1;
-}
-
-static int srp_find_req(struct srp_target_port *target,
- struct scsi_cmnd *scmnd,
- struct srp_request **req)
-{
- if (scmnd->host_scribble == (void *) -1L)
+ if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
+ msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
return -1;
- *req = &target->req_ring[(long) scmnd->host_scribble];
-
return 0;
}
static int srp_abort(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
- struct srp_request *req;
+ struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
int ret = SUCCESS;
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
- if (target->qp_in_error)
+ if (!req || target->qp_in_error)
return FAILED;
- if (srp_find_req(target, scmnd, &req))
+ if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
+ SRP_TSK_ABORT_TASK))
return FAILED;
- if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
- return FAILED;
-
- spin_lock_irq(target->scsi_host->host_lock);
- if (req->cmd_done) {
- srp_remove_req(target, req);
- scmnd->scsi_done(scmnd);
- } else if (!req->tsk_status) {
- srp_remove_req(target, req);
- scmnd->result = DID_ABORT << 16;
- } else
- ret = FAILED;
-
- spin_unlock_irq(target->scsi_host->host_lock);
+ if (req->scmnd) {
+ if (!target->tsk_mgmt_status) {
+ srp_remove_req(target, req, 0);
+ scmnd->result = DID_ABORT << 16;
+ } else
+ ret = FAILED;
+ }
return ret;
}
@@ -1539,26 +1509,23 @@ static int srp_abort(struct scsi_cmnd *scmnd)
static int srp_reset_device(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
- struct srp_request *req, *tmp;
+ int i;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
if (target->qp_in_error)
return FAILED;
- if (srp_find_req(target, scmnd, &req))
+ if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
+ SRP_TSK_LUN_RESET))
return FAILED;
- if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
+ if (target->tsk_mgmt_status)
return FAILED;
- if (req->tsk_status)
- return FAILED;
-
- spin_lock_irq(target->scsi_host->host_lock);
- list_for_each_entry_safe(req, tmp, &target->req_queue, list)
- if (req->scmnd->device == scmnd->device)
+ for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+ struct srp_request *req = &target->req_ring[i];
+ if (req->scmnd && req->scmnd->device == scmnd->device)
srp_reset_req(target, req);
-
- spin_unlock_irq(target->scsi_host->host_lock);
+ }
return SUCCESS;
}
@@ -1987,9 +1954,12 @@ static ssize_t srp_create_target(struct device *dev,
target->io_class = SRP_REV16A_IB_IO_CLASS;
target->scsi_host = target_host;
target->srp_host = host;
+ target->lkey = host->srp_dev->mr->lkey;
+ target->rkey = host->srp_dev->mr->rkey;
+ spin_lock_init(&target->lock);
+ INIT_LIST_HEAD(&target->free_tx);
INIT_LIST_HEAD(&target->free_reqs);
- INIT_LIST_HEAD(&target->req_queue);
for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
target->req_ring[i].index = i;
list_add_tail(&target->req_ring[i].list, &target->free_reqs);
@@ -2217,9 +2187,9 @@ static void srp_remove_one(struct ib_device *device)
*/
spin_lock(&host->target_lock);
list_for_each_entry(target, &host->target_list, list) {
- spin_lock_irq(target->scsi_host->host_lock);
+ spin_lock_irq(&target->lock);
target->state = SRP_TARGET_REMOVED;
- spin_unlock_irq(target->scsi_host->host_lock);
+ spin_unlock_irq(&target->lock);
}
spin_unlock(&host->target_lock);
@@ -2228,7 +2198,7 @@ static void srp_remove_one(struct ib_device *device)
* started before we marked our target ports as
* removed, and any target port removal tasks.
*/
- flush_scheduled_work();
+ flush_workqueue(ib_wq);
list_for_each_entry_safe(target, tmp_target,
&host->target_list, list) {
@@ -2258,8 +2228,7 @@ static int __init srp_init_module(void)
{
int ret;
- BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE);
- BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
+ BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
if (srp_sg_tablesize > 255) {
printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index ed0dce9e479f..9dc6fc3fd894 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -59,16 +59,15 @@ enum {
SRP_RQ_SHIFT = 6,
SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
- SRP_RQ_MASK = SRP_RQ_SIZE - 1,
SRP_SQ_SIZE = SRP_RQ_SIZE,
- SRP_SQ_MASK = SRP_SQ_SIZE - 1,
SRP_RSP_SQ_SIZE = 1,
SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
SRP_TSK_MGMT_SQ_SIZE = 1,
SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
- SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1),
+ SRP_TAG_NO_REQ = ~0U,
+ SRP_TAG_TSK_MGMT = 1U << 31,
SRP_FMR_SIZE = 256,
SRP_FMR_POOL_SIZE = 1024,
@@ -113,15 +112,29 @@ struct srp_request {
struct list_head list;
struct scsi_cmnd *scmnd;
struct srp_iu *cmd;
- struct srp_iu *tsk_mgmt;
struct ib_pool_fmr *fmr;
- struct completion done;
short index;
- u8 cmd_done;
- u8 tsk_status;
};
struct srp_target_port {
+ /* These are RW in the hot path, and commonly used together */
+ struct list_head free_tx;
+ struct list_head free_reqs;
+ spinlock_t lock;
+ s32 req_lim;
+
+ /* These are read-only in the hot path */
+ struct ib_cq *send_cq ____cacheline_aligned_in_smp;
+ struct ib_cq *recv_cq;
+ struct ib_qp *qp;
+ u32 lkey;
+ u32 rkey;
+ enum srp_target_state state;
+
+ /* Everything above this point is used in the hot path of
+ * command processing. Try to keep them packed into cachelines.
+ */
+
__be64 id_ext;
__be64 ioc_guid;
__be64 service_id;
@@ -138,24 +151,13 @@ struct srp_target_port {
int path_query_id;
struct ib_cm_id *cm_id;
- struct ib_cq *recv_cq;
- struct ib_cq *send_cq;
- struct ib_qp *qp;
int max_ti_iu_len;
- s32 req_lim;
int zero_req_lim;
- unsigned rx_head;
- struct srp_iu *rx_ring[SRP_RQ_SIZE];
-
- unsigned tx_head;
- unsigned tx_tail;
struct srp_iu *tx_ring[SRP_SQ_SIZE];
-
- struct list_head free_reqs;
- struct list_head req_queue;
+ struct srp_iu *rx_ring[SRP_RQ_SIZE];
struct srp_request req_ring[SRP_CMD_SQ_SIZE];
struct work_struct work;
@@ -163,16 +165,18 @@ struct srp_target_port {
struct list_head list;
struct completion done;
int status;
- enum srp_target_state state;
int qp_in_error;
+
+ struct completion tsk_mgmt_done;
+ u8 tsk_mgmt_status;
};
struct srp_iu {
+ struct list_head list;
u64 dma;
void *buf;
size_t size;
enum dma_data_direction direction;
- enum srp_iu_type type;
};
#endif /* IB_SRP_H */
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 07c2cd43109c..1903c0f5b925 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -6,7 +6,7 @@ menu "Input device support"
depends on !S390
config INPUT
- tristate "Generic input layer (needed for keyboard, mouse, ...)" if EMBEDDED
+ tristate "Generic input layer (needed for keyboard, mouse, ...)" if EXPERT
default y
help
Say Y here if you have any input device (mouse, keyboard, tablet,
@@ -67,7 +67,7 @@ config INPUT_SPARSEKMAP
comment "Userland interfaces"
config INPUT_MOUSEDEV
- tristate "Mouse interface" if EMBEDDED
+ tristate "Mouse interface" if EXPERT
default y
help
Say Y here if you want your mouse to be accessible as char devices
@@ -150,7 +150,7 @@ config INPUT_EVBUG
module will be called evbug.
config INPUT_APMPOWER
- tristate "Input Power Event -> APM Bridge" if EMBEDDED
+ tristate "Input Power Event -> APM Bridge" if EXPERT
depends on INPUT && APM_EMULATION
help
Say Y here if you want suspend key events to trigger a user
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 23cf8fc933ec..5b8f59d6c3e8 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -360,7 +360,7 @@ static int gameport_queue_event(void *object, struct module *owner,
event->owner = owner;
list_add_tail(&event->node, &gameport_event_list);
- schedule_work(&gameport_event_work);
+ queue_work(system_long_wq, &gameport_event_work);
out:
spin_unlock_irqrestore(&gameport_event_lock, flags);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 7985114beac7..11905b6a3023 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -75,7 +75,6 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz)
* dev->event_lock held and interrupts disabled.
*/
static void input_pass_event(struct input_dev *dev,
- struct input_handler *src_handler,
unsigned int type, unsigned int code, int value)
{
struct input_handler *handler;
@@ -94,15 +93,6 @@ static void input_pass_event(struct input_dev *dev,
continue;
handler = handle->handler;
-
- /*
- * If this is the handler that injected this
- * particular event we want to skip it to avoid
- * filters firing again and again.
- */
- if (handler == src_handler)
- continue;
-
if (!handler->filter) {
if (filtered)
break;
@@ -132,7 +122,7 @@ static void input_repeat_key(unsigned long data)
if (test_bit(dev->repeat_key, dev->key) &&
is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
- input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2);
+ input_pass_event(dev, EV_KEY, dev->repeat_key, 2);
if (dev->sync) {
/*
@@ -141,7 +131,7 @@ static void input_repeat_key(unsigned long data)
* Otherwise assume that the driver will send
* SYN_REPORT once it's done.
*/
- input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
+ input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
}
if (dev->rep[REP_PERIOD])
@@ -174,7 +164,6 @@ static void input_stop_autorepeat(struct input_dev *dev)
#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
static int input_handle_abs_event(struct input_dev *dev,
- struct input_handler *src_handler,
unsigned int code, int *pval)
{
bool is_mt_event;
@@ -218,15 +207,13 @@ static int input_handle_abs_event(struct input_dev *dev,
/* Flush pending "slot" event */
if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
input_abs_set_val(dev, ABS_MT_SLOT, dev->slot);
- input_pass_event(dev, src_handler,
- EV_ABS, ABS_MT_SLOT, dev->slot);
+ input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot);
}
return INPUT_PASS_TO_HANDLERS;
}
static void input_handle_event(struct input_dev *dev,
- struct input_handler *src_handler,
unsigned int type, unsigned int code, int value)
{
int disposition = INPUT_IGNORE_EVENT;
@@ -279,8 +266,7 @@ static void input_handle_event(struct input_dev *dev,
case EV_ABS:
if (is_event_supported(code, dev->absbit, ABS_MAX))
- disposition = input_handle_abs_event(dev, src_handler,
- code, &value);
+ disposition = input_handle_abs_event(dev, code, &value);
break;
@@ -338,7 +324,7 @@ static void input_handle_event(struct input_dev *dev,
dev->event(dev, type, code, value);
if (disposition & INPUT_PASS_TO_HANDLERS)
- input_pass_event(dev, src_handler, type, code, value);
+ input_pass_event(dev, type, code, value);
}
/**
@@ -367,7 +353,7 @@ void input_event(struct input_dev *dev,
spin_lock_irqsave(&dev->event_lock, flags);
add_input_randomness(type, code, value);
- input_handle_event(dev, NULL, type, code, value);
+ input_handle_event(dev, type, code, value);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
@@ -397,8 +383,7 @@ void input_inject_event(struct input_handle *handle,
rcu_read_lock();
grab = rcu_dereference(dev->grab);
if (!grab || grab == handle)
- input_handle_event(dev, handle->handler,
- type, code, value);
+ input_handle_event(dev, type, code, value);
rcu_read_unlock();
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -611,10 +596,10 @@ static void input_dev_release_keys(struct input_dev *dev)
for (code = 0; code <= KEY_MAX; code++) {
if (is_event_supported(code, dev->keybit, KEY_MAX) &&
__test_and_clear_bit(code, dev->key)) {
- input_pass_event(dev, NULL, EV_KEY, code, 0);
+ input_pass_event(dev, EV_KEY, code, 0);
}
}
- input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
+ input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
}
}
@@ -889,9 +874,9 @@ int input_set_keycode(struct input_dev *dev,
!is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
__test_and_clear_bit(old_keycode, dev->key)) {
- input_pass_event(dev, NULL, EV_KEY, old_keycode, 0);
+ input_pass_event(dev, EV_KEY, old_keycode, 0);
if (dev->sync)
- input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
+ input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
}
out:
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 5b596165b571..56eb471b5576 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -255,6 +255,16 @@ config JOYSTICK_AMIGA
To compile this driver as a module, choose M here: the
module will be called amijoy.
+config JOYSTICK_AS5011
+ tristate "Austria Microsystem AS5011 joystick"
+ depends on I2C
+ help
+ Say Y here if you have an AS5011 digital joystick connected to your
+ system.
+
+ To compile this driver as a module, choose M here: the
+ module will be called as5011.
+
config JOYSTICK_JOYDUMP
tristate "Gameport data dumper"
select GAMEPORT
diff --git a/drivers/input/joystick/Makefile b/drivers/input/joystick/Makefile
index f3a8cbe2abb6..92dc0de9dfed 100644
--- a/drivers/input/joystick/Makefile
+++ b/drivers/input/joystick/Makefile
@@ -7,6 +7,7 @@
obj-$(CONFIG_JOYSTICK_A3D) += a3d.o
obj-$(CONFIG_JOYSTICK_ADI) += adi.o
obj-$(CONFIG_JOYSTICK_AMIGA) += amijoy.o
+obj-$(CONFIG_JOYSTICK_AS5011) += as5011.o
obj-$(CONFIG_JOYSTICK_ANALOG) += analog.o
obj-$(CONFIG_JOYSTICK_COBRA) += cobra.o
obj-$(CONFIG_JOYSTICK_DB9) += db9.o
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
new file mode 100644
index 000000000000..f6732b57ca07
--- /dev/null
+++ b/drivers/input/joystick/as5011.c
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2010, 2011 Fabien Marteau <fabien.marteau@armadeus.com>
+ * Sponsored by ARMadeus Systems
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Driver for Austria Microsystems joysticks AS5011
+ *
+ * TODO:
+ * - Power on the chip when open() and power down when close()
+ * - Manage power mode
+ */
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/input/as5011.h>
+#include <linux/slab.h>
+
+#define DRIVER_DESC "Driver for Austria Microsystems AS5011 joystick"
+#define MODULE_DEVICE_ALIAS "as5011"
+
+MODULE_AUTHOR("Fabien Marteau <fabien.marteau@armadeus.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+/* registers */
+#define AS5011_CTRL1 0x76
+#define AS5011_CTRL2 0x75
+#define AS5011_XP 0x43
+#define AS5011_XN 0x44
+#define AS5011_YP 0x53
+#define AS5011_YN 0x54
+#define AS5011_X_REG 0x41
+#define AS5011_Y_REG 0x42
+#define AS5011_X_RES_INT 0x51
+#define AS5011_Y_RES_INT 0x52
+
+/* CTRL1 bits */
+#define AS5011_CTRL1_LP_PULSED 0x80
+#define AS5011_CTRL1_LP_ACTIVE 0x40
+#define AS5011_CTRL1_LP_CONTINUE 0x20
+#define AS5011_CTRL1_INT_WUP_EN 0x10
+#define AS5011_CTRL1_INT_ACT_EN 0x08
+#define AS5011_CTRL1_EXT_CLK_EN 0x04
+#define AS5011_CTRL1_SOFT_RST 0x02
+#define AS5011_CTRL1_DATA_VALID 0x01
+
+/* CTRL2 bits */
+#define AS5011_CTRL2_EXT_SAMPLE_EN 0x08
+#define AS5011_CTRL2_RC_BIAS_ON 0x04
+#define AS5011_CTRL2_INV_SPINNING 0x02
+
+#define AS5011_MAX_AXIS 80
+#define AS5011_MIN_AXIS (-80)
+#define AS5011_FUZZ 8
+#define AS5011_FLAT 40
+
+struct as5011_device {
+ struct input_dev *input_dev;
+ struct i2c_client *i2c_client;
+ unsigned int button_gpio;
+ unsigned int button_irq;
+ unsigned int axis_irq;
+};
+
+static int as5011_i2c_write(struct i2c_client *client,
+ uint8_t aregaddr,
+ uint8_t avalue)
+{
+ uint8_t data[2] = { aregaddr, avalue };
+ struct i2c_msg msg = {
+ client->addr, I2C_M_IGNORE_NAK, 2, (uint8_t *)data
+ };
+ int error;
+
+ error = i2c_transfer(client->adapter, &msg, 1);
+ return error < 0 ? error : 0;
+}
+
+static int as5011_i2c_read(struct i2c_client *client,
+ uint8_t aregaddr, signed char *value)
+{
+ uint8_t data[2] = { aregaddr };
+ struct i2c_msg msg_set[2] = {
+ { client->addr, I2C_M_REV_DIR_ADDR, 1, (uint8_t *)data },
+ { client->addr, I2C_M_RD | I2C_M_NOSTART, 1, (uint8_t *)data }
+ };
+ int error;
+
+ error = i2c_transfer(client->adapter, msg_set, 2);
+ if (error < 0)
+ return error;
+
+ *value = data[0] & 0x80 ? -1 * (1 + ~data[0]) : data[0];
+ return 0;
+}
+
+static irqreturn_t as5011_button_interrupt(int irq, void *dev_id)
+{
+ struct as5011_device *as5011 = dev_id;
+ int val = gpio_get_value_cansleep(as5011->button_gpio);
+
+ input_report_key(as5011->input_dev, BTN_JOYSTICK, !val);
+ input_sync(as5011->input_dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t as5011_axis_interrupt(int irq, void *dev_id)
+{
+ struct as5011_device *as5011 = dev_id;
+ int error;
+ signed char x, y;
+
+ error = as5011_i2c_read(as5011->i2c_client, AS5011_X_RES_INT, &x);
+ if (error < 0)
+ goto out;
+
+ error = as5011_i2c_read(as5011->i2c_client, AS5011_Y_RES_INT, &y);
+ if (error < 0)
+ goto out;
+
+ input_report_abs(as5011->input_dev, ABS_X, x);
+ input_report_abs(as5011->input_dev, ABS_Y, y);
+ input_sync(as5011->input_dev);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int __devinit as5011_configure_chip(struct as5011_device *as5011,
+ const struct as5011_platform_data *plat_dat)
+{
+ struct i2c_client *client = as5011->i2c_client;
+ int error;
+ signed char value;
+
+ /* chip soft reset */
+ error = as5011_i2c_write(client, AS5011_CTRL1,
+ AS5011_CTRL1_SOFT_RST);
+ if (error < 0) {
+ dev_err(&client->dev, "Soft reset failed\n");
+ return error;
+ }
+
+ mdelay(10);
+
+ error = as5011_i2c_write(client, AS5011_CTRL1,
+ AS5011_CTRL1_LP_PULSED |
+ AS5011_CTRL1_LP_ACTIVE |
+ AS5011_CTRL1_INT_ACT_EN);
+ if (error < 0) {
+ dev_err(&client->dev, "Power config failed\n");
+ return error;
+ }
+
+ error = as5011_i2c_write(client, AS5011_CTRL2,
+ AS5011_CTRL2_INV_SPINNING);
+ if (error < 0) {
+ dev_err(&client->dev, "Can't invert spinning\n");
+ return error;
+ }
+
+ /* write threshold */
+ error = as5011_i2c_write(client, AS5011_XP, plat_dat->xp);
+ if (error < 0) {
+ dev_err(&client->dev, "Can't write threshold\n");
+ return error;
+ }
+
+ error = as5011_i2c_write(client, AS5011_XN, plat_dat->xn);
+ if (error < 0) {
+ dev_err(&client->dev, "Can't write threshold\n");
+ return error;
+ }
+
+ error = as5011_i2c_write(client, AS5011_YP, plat_dat->yp);
+ if (error < 0) {
+ dev_err(&client->dev, "Can't write threshold\n");
+ return error;
+ }
+
+ error = as5011_i2c_write(client, AS5011_YN, plat_dat->yn);
+ if (error < 0) {
+ dev_err(&client->dev, "Can't write threshold\n");
+ return error;
+ }
+
+ /* to free irq gpio in chip */
+ error = as5011_i2c_read(client, AS5011_X_RES_INT, &value);
+ if (error < 0) {
+ dev_err(&client->dev, "Can't read i2c X resolution value\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static int __devinit as5011_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct as5011_platform_data *plat_data;
+ struct as5011_device *as5011;
+ struct input_dev *input_dev;
+ int irq;
+ int error;
+
+ plat_data = client->dev.platform_data;
+ if (!plat_data)
+ return -EINVAL;
+
+ if (!plat_data->axis_irq) {
+ dev_err(&client->dev, "No axis IRQ?\n");
+ return -EINVAL;
+ }
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_PROTOCOL_MANGLING)) {
+ dev_err(&client->dev,
+ "need i2c bus that supports protocol mangling\n");
+ return -ENODEV;
+ }
+
+ as5011 = kmalloc(sizeof(struct as5011_device), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!as5011 || !input_dev) {
+ dev_err(&client->dev,
+ "Can't allocate memory for device structure\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ as5011->i2c_client = client;
+ as5011->input_dev = input_dev;
+ as5011->button_gpio = plat_data->button_gpio;
+ as5011->axis_irq = plat_data->axis_irq;
+
+ input_dev->name = "Austria Microsystem as5011 joystick";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = &client->dev;
+
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(BTN_JOYSTICK, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_X,
+ AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT);
+ input_set_abs_params(as5011->input_dev, ABS_Y,
+ AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT);
+
+ error = gpio_request(as5011->button_gpio, "AS5011 button");
+ if (error < 0) {
+ dev_err(&client->dev, "Failed to request button gpio\n");
+ goto err_free_mem;
+ }
+
+ irq = gpio_to_irq(as5011->button_gpio);
+ if (irq < 0) {
+ dev_err(&client->dev,
+ "Failed to get irq number for button gpio\n");
+ goto err_free_button_gpio;
+ }
+
+ as5011->button_irq = irq;
+
+ error = request_threaded_irq(as5011->button_irq,
+ NULL, as5011_button_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "as5011_button", as5011);
+ if (error < 0) {
+ dev_err(&client->dev,
+ "Can't allocate button irq %d\n", as5011->button_irq);
+ goto err_free_button_gpio;
+ }
+
+ error = as5011_configure_chip(as5011, plat_data);
+ if (error)
+ goto err_free_button_irq;
+
+ error = request_threaded_irq(as5011->axis_irq, NULL,
+ as5011_axis_interrupt,
+ plat_data->axis_irqflags,
+ "as5011_joystick", as5011);
+ if (error) {
+ dev_err(&client->dev,
+ "Can't allocate axis irq %d\n", plat_data->axis_irq);
+ goto err_free_button_irq;
+ }
+
+ error = input_register_device(as5011->input_dev);
+ if (error) {
+ dev_err(&client->dev, "Failed to register input device\n");
+ goto err_free_axis_irq;
+ }
+
+ i2c_set_clientdata(client, as5011);
+
+ return 0;
+
+err_free_axis_irq:
+ free_irq(as5011->axis_irq, as5011);
+err_free_button_irq:
+ free_irq(as5011->button_irq, as5011);
+err_free_button_gpio:
+ gpio_free(as5011->button_gpio);
+err_free_mem:
+ input_free_device(input_dev);
+ kfree(as5011);
+
+ return error;
+}
+
+static int __devexit as5011_remove(struct i2c_client *client)
+{
+ struct as5011_device *as5011 = i2c_get_clientdata(client);
+
+ free_irq(as5011->axis_irq, as5011);
+ free_irq(as5011->button_irq, as5011);
+ gpio_free(as5011->button_gpio);
+
+ input_unregister_device(as5011->input_dev);
+ kfree(as5011);
+
+ return 0;
+}
+
+static const struct i2c_device_id as5011_id[] = {
+ { MODULE_DEVICE_ALIAS, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, as5011_id);
+
+static struct i2c_driver as5011_driver = {
+ .driver = {
+ .name = "as5011",
+ },
+ .probe = as5011_probe,
+ .remove = __devexit_p(as5011_remove),
+ .id_table = as5011_id,
+};
+
+static int __init as5011_init(void)
+{
+ return i2c_add_driver(&as5011_driver);
+}
+module_init(as5011_init);
+
+static void __exit as5011_exit(void)
+{
+ i2c_del_driver(&as5011_driver);
+}
+module_exit(as5011_exit);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index f829998fabe6..c7a92028f450 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -2,7 +2,7 @@
# Input core configuration
#
menuconfig INPUT_KEYBOARD
- bool "Keyboards" if EMBEDDED || !X86
+ bool "Keyboards" if EXPERT || !X86
default y
help
Say Y here, and a list of supported keyboards will be displayed.
@@ -12,18 +12,6 @@ menuconfig INPUT_KEYBOARD
if INPUT_KEYBOARD
-config KEYBOARD_AAED2000
- tristate "AAED-2000 keyboard"
- depends on MACH_AAED2000
- select INPUT_POLLDEV
- default y
- help
- Say Y here to enable the keyboard on the Agilent AAED-2000
- development board.
-
- To compile this driver as a module, choose M here: the
- module will be called aaed2000_kbd.
-
config KEYBOARD_ADP5520
tristate "Keypad Support for ADP5520 PMIC"
depends on PMIC_ADP5520
@@ -69,7 +57,7 @@ config KEYBOARD_ATARI
module will be called atakbd.
config KEYBOARD_ATKBD
- tristate "AT keyboard" if EMBEDDED || !X86
+ tristate "AT keyboard" if EXPERT || !X86
default y
select SERIO
select SERIO_LIBPS2
@@ -355,6 +343,16 @@ config KEYBOARD_NOMADIK
To compile this driver as a module, choose M here: the
module will be called nmk-ske-keypad.
+config KEYBOARD_TEGRA
+ tristate "NVIDIA Tegra internal matrix keyboard controller support"
+ depends on ARCH_TEGRA
+ help
+ Say Y here if you want to use a matrix keyboard connected directly
+ to the internal keyboard controller on Tegra SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tegra-kbc.
+
config KEYBOARD_OPENCORES
tristate "OpenCores Keyboard Controller"
help
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 8933e9ca938d..468c627a2844 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -4,7 +4,6 @@
# Each configuration option enables a list of files.
-obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o
obj-$(CONFIG_KEYBOARD_ADP5520) += adp5520-keys.o
obj-$(CONFIG_KEYBOARD_ADP5588) += adp5588-keys.o
obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
@@ -43,6 +42,7 @@ obj-$(CONFIG_KEYBOARD_STMPE) += stmpe-keypad.o
obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
obj-$(CONFIG_KEYBOARD_TC3589X) += tc3589x-keypad.o
+obj-$(CONFIG_KEYBOARD_TEGRA) += tegra-kbc.o
obj-$(CONFIG_KEYBOARD_TNETV107X) += tnetv107x-keypad.o
obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o
obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
diff --git a/drivers/input/keyboard/aaed2000_kbd.c b/drivers/input/keyboard/aaed2000_kbd.c
deleted file mode 100644
index 18222a689a03..000000000000
--- a/drivers/input/keyboard/aaed2000_kbd.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Keyboard driver for the AAED-2000 dev board
- *
- * Copyright (c) 2006 Nicolas Bellido Y Ortega
- *
- * Based on corgikbd.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/input-polldev.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <mach/hardware.h>
-#include <mach/aaed2000.h>
-
-#define KB_ROWS 12
-#define KB_COLS 8
-#define KB_ROWMASK(r) (1 << (r))
-#define SCANCODE(r,c) (((c) * KB_ROWS) + (r))
-#define NR_SCANCODES (KB_COLS * KB_ROWS)
-
-#define SCAN_INTERVAL (50) /* ms */
-#define KB_ACTIVATE_DELAY (20) /* us */
-
-static unsigned char aaedkbd_keycode[NR_SCANCODES] = {
- KEY_9, KEY_0, KEY_MINUS, KEY_EQUAL, KEY_BACKSPACE, 0, KEY_SPACE, KEY_KP6, 0, KEY_KPDOT, 0, 0,
- KEY_K, KEY_M, KEY_O, KEY_DOT, KEY_SLASH, 0, KEY_F, 0, 0, 0, KEY_LEFTSHIFT, 0,
- KEY_I, KEY_P, KEY_LEFTBRACE, KEY_RIGHTBRACE, KEY_BACKSLASH, 0, 0, 0, 0, 0, KEY_RIGHTSHIFT, 0,
- KEY_8, KEY_L, KEY_SEMICOLON, KEY_APOSTROPHE, KEY_ENTER, 0, 0, 0, 0, 0, 0, 0,
- KEY_J, KEY_H, KEY_B, KEY_KP8, KEY_KP4, 0, KEY_C, KEY_D, KEY_S, KEY_A, 0, KEY_CAPSLOCK,
- KEY_Y, KEY_U, KEY_N, KEY_T, 0, 0, KEY_R, KEY_E, KEY_W, KEY_Q, 0, KEY_TAB,
- KEY_7, KEY_6, KEY_G, 0, KEY_5, 0, KEY_4, KEY_3, KEY_2, KEY_1, 0, KEY_GRAVE,
- 0, 0, KEY_COMMA, 0, KEY_KP2, 0, KEY_V, KEY_LEFTALT, KEY_X, KEY_Z, 0, KEY_LEFTCTRL
-};
-
-struct aaedkbd {
- unsigned char keycode[ARRAY_SIZE(aaedkbd_keycode)];
- struct input_polled_dev *poll_dev;
- int kbdscan_state[KB_COLS];
- int kbdscan_count[KB_COLS];
-};
-
-#define KBDSCAN_STABLE_COUNT 2
-
-static void aaedkbd_report_col(struct aaedkbd *aaedkbd,
- unsigned int col, unsigned int rowd)
-{
- unsigned int scancode, pressed;
- unsigned int row;
-
- for (row = 0; row < KB_ROWS; row++) {
- scancode = SCANCODE(row, col);
- pressed = rowd & KB_ROWMASK(row);
-
- input_report_key(aaedkbd->poll_dev->input,
- aaedkbd->keycode[scancode], pressed);
- }
-}
-
-/* Scan the hardware keyboard and push any changes up through the input layer */
-static void aaedkbd_poll(struct input_polled_dev *dev)
-{
- struct aaedkbd *aaedkbd = dev->private;
- unsigned int col, rowd;
-
- col = 0;
- do {
- AAEC_GPIO_KSCAN = col + 8;
- udelay(KB_ACTIVATE_DELAY);
- rowd = AAED_EXT_GPIO & AAED_EGPIO_KBD_SCAN;
-
- if (rowd != aaedkbd->kbdscan_state[col]) {
- aaedkbd->kbdscan_count[col] = 0;
- aaedkbd->kbdscan_state[col] = rowd;
- } else if (++aaedkbd->kbdscan_count[col] >= KBDSCAN_STABLE_COUNT) {
- aaedkbd_report_col(aaedkbd, col, rowd);
- col++;
- }
- } while (col < KB_COLS);
-
- AAEC_GPIO_KSCAN = 0x07;
- input_sync(dev->input);
-}
-
-static int __devinit aaedkbd_probe(struct platform_device *pdev)
-{
- struct aaedkbd *aaedkbd;
- struct input_polled_dev *poll_dev;
- struct input_dev *input_dev;
- int i;
- int error;
-
- aaedkbd = kzalloc(sizeof(struct aaedkbd), GFP_KERNEL);
- poll_dev = input_allocate_polled_device();
- if (!aaedkbd || !poll_dev) {
- error = -ENOMEM;
- goto fail;
- }
-
- platform_set_drvdata(pdev, aaedkbd);
-
- aaedkbd->poll_dev = poll_dev;
- memcpy(aaedkbd->keycode, aaedkbd_keycode, sizeof(aaedkbd->keycode));
-
- poll_dev->private = aaedkbd;
- poll_dev->poll = aaedkbd_poll;
- poll_dev->poll_interval = SCAN_INTERVAL;
-
- input_dev = poll_dev->input;
- input_dev->name = "AAED-2000 Keyboard";
- input_dev->phys = "aaedkbd/input0";
- input_dev->id.bustype = BUS_HOST;
- input_dev->id.vendor = 0x0001;
- input_dev->id.product = 0x0001;
- input_dev->id.version = 0x0100;
- input_dev->dev.parent = &pdev->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
- input_dev->keycode = aaedkbd->keycode;
- input_dev->keycodesize = sizeof(unsigned char);
- input_dev->keycodemax = ARRAY_SIZE(aaedkbd_keycode);
-
- for (i = 0; i < ARRAY_SIZE(aaedkbd_keycode); i++)
- set_bit(aaedkbd->keycode[i], input_dev->keybit);
- clear_bit(0, input_dev->keybit);
-
- error = input_register_polled_device(aaedkbd->poll_dev);
- if (error)
- goto fail;
-
- return 0;
-
- fail: kfree(aaedkbd);
- input_free_polled_device(poll_dev);
- return error;
-}
-
-static int __devexit aaedkbd_remove(struct platform_device *pdev)
-{
- struct aaedkbd *aaedkbd = platform_get_drvdata(pdev);
-
- input_unregister_polled_device(aaedkbd->poll_dev);
- input_free_polled_device(aaedkbd->poll_dev);
- kfree(aaedkbd);
-
- return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:aaed2000-keyboard");
-
-static struct platform_driver aaedkbd_driver = {
- .probe = aaedkbd_probe,
- .remove = __devexit_p(aaedkbd_remove),
- .driver = {
- .name = "aaed2000-keyboard",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init aaedkbd_init(void)
-{
- return platform_driver_register(&aaedkbd_driver);
-}
-
-static void __exit aaedkbd_exit(void)
-{
- platform_driver_unregister(&aaedkbd_driver);
-}
-
-module_init(aaedkbd_init);
-module_exit(aaedkbd_exit);
-
-MODULE_AUTHOR("Nicolas Bellido Y Ortega");
-MODULE_DESCRIPTION("AAED-2000 Keyboard Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 6069abe31e42..eb3006361ee4 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -322,7 +322,7 @@ static void gpio_keys_report_event(struct gpio_button_data *bdata)
struct gpio_keys_button *button = bdata->button;
struct input_dev *input = bdata->input;
unsigned int type = button->type ?: EV_KEY;
- int state = (gpio_get_value(button->gpio) ? 1 : 0) ^ button->active_low;
+ int state = (gpio_get_value_cansleep(button->gpio) ? 1 : 0) ^ button->active_low;
input_event(input, type, button->code, !!state);
input_sync(input);
@@ -410,8 +410,8 @@ static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
if (!button->can_disable)
irqflags |= IRQF_SHARED;
- error = request_irq(irq, gpio_keys_isr, irqflags, desc, bdata);
- if (error) {
+ error = request_any_context_irq(irq, gpio_keys_isr, irqflags, desc, bdata);
+ if (error < 0) {
dev_err(dev, "Unable to claim irq %d; error %d\n",
irq, error);
goto fail3;
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
new file mode 100644
index 000000000000..99ce9032d08c
--- /dev/null
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -0,0 +1,783 @@
+/*
+ * Keyboard class input driver for the NVIDIA Tegra SoC internal matrix
+ * keyboard controller
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <mach/clk.h>
+#include <mach/kbc.h>
+
+#define KBC_MAX_DEBOUNCE_CNT 0x3ffu
+
+/* KBC row scan time and delay for beginning the row scan. */
+#define KBC_ROW_SCAN_TIME 16
+#define KBC_ROW_SCAN_DLY 5
+
+/* KBC uses a 32KHz clock so a cycle = 1/32Khz */
+#define KBC_CYCLE_USEC 32
+
+/* KBC Registers */
+
+/* KBC Control Register */
+#define KBC_CONTROL_0 0x0
+#define KBC_FIFO_TH_CNT_SHIFT(cnt) (cnt << 14)
+#define KBC_DEBOUNCE_CNT_SHIFT(cnt) (cnt << 4)
+#define KBC_CONTROL_FIFO_CNT_INT_EN (1 << 3)
+#define KBC_CONTROL_KBC_EN (1 << 0)
+
+/* KBC Interrupt Register */
+#define KBC_INT_0 0x4
+#define KBC_INT_FIFO_CNT_INT_STATUS (1 << 2)
+
+#define KBC_ROW_CFG0_0 0x8
+#define KBC_COL_CFG0_0 0x18
+#define KBC_INIT_DLY_0 0x28
+#define KBC_RPT_DLY_0 0x2c
+#define KBC_KP_ENT0_0 0x30
+#define KBC_KP_ENT1_0 0x34
+#define KBC_ROW0_MASK_0 0x38
+
+#define KBC_ROW_SHIFT 3
+
+struct tegra_kbc {
+ void __iomem *mmio;
+ struct input_dev *idev;
+ unsigned int irq;
+ unsigned int wake_enable_rows;
+ unsigned int wake_enable_cols;
+ spinlock_t lock;
+ unsigned int repoll_dly;
+ unsigned long cp_dly_jiffies;
+ bool use_fn_map;
+ const struct tegra_kbc_platform_data *pdata;
+ unsigned short keycode[KBC_MAX_KEY * 2];
+ unsigned short current_keys[KBC_MAX_KPENT];
+ unsigned int num_pressed_keys;
+ struct timer_list timer;
+ struct clk *clk;
+};
+
+static const u32 tegra_kbc_default_keymap[] = {
+ KEY(0, 2, KEY_W),
+ KEY(0, 3, KEY_S),
+ KEY(0, 4, KEY_A),
+ KEY(0, 5, KEY_Z),
+ KEY(0, 7, KEY_FN),
+
+ KEY(1, 7, KEY_LEFTMETA),
+
+ KEY(2, 6, KEY_RIGHTALT),
+ KEY(2, 7, KEY_LEFTALT),
+
+ KEY(3, 0, KEY_5),
+ KEY(3, 1, KEY_4),
+ KEY(3, 2, KEY_R),
+ KEY(3, 3, KEY_E),
+ KEY(3, 4, KEY_F),
+ KEY(3, 5, KEY_D),
+ KEY(3, 6, KEY_X),
+
+ KEY(4, 0, KEY_7),
+ KEY(4, 1, KEY_6),
+ KEY(4, 2, KEY_T),
+ KEY(4, 3, KEY_H),
+ KEY(4, 4, KEY_G),
+ KEY(4, 5, KEY_V),
+ KEY(4, 6, KEY_C),
+ KEY(4, 7, KEY_SPACE),
+
+ KEY(5, 0, KEY_9),
+ KEY(5, 1, KEY_8),
+ KEY(5, 2, KEY_U),
+ KEY(5, 3, KEY_Y),
+ KEY(5, 4, KEY_J),
+ KEY(5, 5, KEY_N),
+ KEY(5, 6, KEY_B),
+ KEY(5, 7, KEY_BACKSLASH),
+
+ KEY(6, 0, KEY_MINUS),
+ KEY(6, 1, KEY_0),
+ KEY(6, 2, KEY_O),
+ KEY(6, 3, KEY_I),
+ KEY(6, 4, KEY_L),
+ KEY(6, 5, KEY_K),
+ KEY(6, 6, KEY_COMMA),
+ KEY(6, 7, KEY_M),
+
+ KEY(7, 1, KEY_EQUAL),
+ KEY(7, 2, KEY_RIGHTBRACE),
+ KEY(7, 3, KEY_ENTER),
+ KEY(7, 7, KEY_MENU),
+
+ KEY(8, 4, KEY_RIGHTSHIFT),
+ KEY(8, 5, KEY_LEFTSHIFT),
+
+ KEY(9, 5, KEY_RIGHTCTRL),
+ KEY(9, 7, KEY_LEFTCTRL),
+
+ KEY(11, 0, KEY_LEFTBRACE),
+ KEY(11, 1, KEY_P),
+ KEY(11, 2, KEY_APOSTROPHE),
+ KEY(11, 3, KEY_SEMICOLON),
+ KEY(11, 4, KEY_SLASH),
+ KEY(11, 5, KEY_DOT),
+
+ KEY(12, 0, KEY_F10),
+ KEY(12, 1, KEY_F9),
+ KEY(12, 2, KEY_BACKSPACE),
+ KEY(12, 3, KEY_3),
+ KEY(12, 4, KEY_2),
+ KEY(12, 5, KEY_UP),
+ KEY(12, 6, KEY_PRINT),
+ KEY(12, 7, KEY_PAUSE),
+
+ KEY(13, 0, KEY_INSERT),
+ KEY(13, 1, KEY_DELETE),
+ KEY(13, 3, KEY_PAGEUP),
+ KEY(13, 4, KEY_PAGEDOWN),
+ KEY(13, 5, KEY_RIGHT),
+ KEY(13, 6, KEY_DOWN),
+ KEY(13, 7, KEY_LEFT),
+
+ KEY(14, 0, KEY_F11),
+ KEY(14, 1, KEY_F12),
+ KEY(14, 2, KEY_F8),
+ KEY(14, 3, KEY_Q),
+ KEY(14, 4, KEY_F4),
+ KEY(14, 5, KEY_F3),
+ KEY(14, 6, KEY_1),
+ KEY(14, 7, KEY_F7),
+
+ KEY(15, 0, KEY_ESC),
+ KEY(15, 1, KEY_GRAVE),
+ KEY(15, 2, KEY_F5),
+ KEY(15, 3, KEY_TAB),
+ KEY(15, 4, KEY_F1),
+ KEY(15, 5, KEY_F2),
+ KEY(15, 6, KEY_CAPSLOCK),
+ KEY(15, 7, KEY_F6),
+
+ /* Software Handled Function Keys */
+ KEY(20, 0, KEY_KP7),
+
+ KEY(21, 0, KEY_KP9),
+ KEY(21, 1, KEY_KP8),
+ KEY(21, 2, KEY_KP4),
+ KEY(21, 4, KEY_KP1),
+
+ KEY(22, 1, KEY_KPSLASH),
+ KEY(22, 2, KEY_KP6),
+ KEY(22, 3, KEY_KP5),
+ KEY(22, 4, KEY_KP3),
+ KEY(22, 5, KEY_KP2),
+ KEY(22, 7, KEY_KP0),
+
+ KEY(27, 1, KEY_KPASTERISK),
+ KEY(27, 3, KEY_KPMINUS),
+ KEY(27, 4, KEY_KPPLUS),
+ KEY(27, 5, KEY_KPDOT),
+
+ KEY(28, 5, KEY_VOLUMEUP),
+
+ KEY(29, 3, KEY_HOME),
+ KEY(29, 4, KEY_END),
+ KEY(29, 5, KEY_BRIGHTNESSDOWN),
+ KEY(29, 6, KEY_VOLUMEDOWN),
+ KEY(29, 7, KEY_BRIGHTNESSUP),
+
+ KEY(30, 0, KEY_NUMLOCK),
+ KEY(30, 1, KEY_SCROLLLOCK),
+ KEY(30, 2, KEY_MUTE),
+
+ KEY(31, 4, KEY_HELP),
+};
+
+static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
+ .keymap = tegra_kbc_default_keymap,
+ .keymap_size = ARRAY_SIZE(tegra_kbc_default_keymap),
+};
+
+static void tegra_kbc_report_released_keys(struct input_dev *input,
+ unsigned short old_keycodes[],
+ unsigned int old_num_keys,
+ unsigned short new_keycodes[],
+ unsigned int new_num_keys)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < old_num_keys; i++) {
+ for (j = 0; j < new_num_keys; j++)
+ if (old_keycodes[i] == new_keycodes[j])
+ break;
+
+ if (j == new_num_keys)
+ input_report_key(input, old_keycodes[i], 0);
+ }
+}
+
+static void tegra_kbc_report_pressed_keys(struct input_dev *input,
+ unsigned char scancodes[],
+ unsigned short keycodes[],
+ unsigned int num_pressed_keys)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_pressed_keys; i++) {
+ input_event(input, EV_MSC, MSC_SCAN, scancodes[i]);
+ input_report_key(input, keycodes[i], 1);
+ }
+}
+
+static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
+{
+ unsigned char scancodes[KBC_MAX_KPENT];
+ unsigned short keycodes[KBC_MAX_KPENT];
+ u32 val = 0;
+ unsigned int i;
+ unsigned int num_down = 0;
+ unsigned long flags;
+ bool fn_keypress = false;
+
+ spin_lock_irqsave(&kbc->lock, flags);
+ for (i = 0; i < KBC_MAX_KPENT; i++) {
+ if ((i % 4) == 0)
+ val = readl(kbc->mmio + KBC_KP_ENT0_0 + i);
+
+ if (val & 0x80) {
+ unsigned int col = val & 0x07;
+ unsigned int row = (val >> 3) & 0x0f;
+ unsigned char scancode =
+ MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT);
+
+ scancodes[num_down] = scancode;
+ keycodes[num_down] = kbc->keycode[scancode];
+ /* If driver uses Fn map, do not report the Fn key. */
+ if ((keycodes[num_down] == KEY_FN) && kbc->use_fn_map)
+ fn_keypress = true;
+ else
+ num_down++;
+ }
+
+ val >>= 8;
+ }
+
+ /*
+ * If the platform uses Fn keymaps, translate keys on a Fn keypress.
+ * Function keycodes are KBC_MAX_KEY apart from the plain keycodes.
+ */
+ if (fn_keypress) {
+ for (i = 0; i < num_down; i++) {
+ scancodes[i] += KBC_MAX_KEY;
+ keycodes[i] = kbc->keycode[scancodes[i]];
+ }
+ }
+
+ spin_unlock_irqrestore(&kbc->lock, flags);
+
+ tegra_kbc_report_released_keys(kbc->idev,
+ kbc->current_keys, kbc->num_pressed_keys,
+ keycodes, num_down);
+ tegra_kbc_report_pressed_keys(kbc->idev, scancodes, keycodes, num_down);
+ input_sync(kbc->idev);
+
+ memcpy(kbc->current_keys, keycodes, sizeof(kbc->current_keys));
+ kbc->num_pressed_keys = num_down;
+}
+
+static void tegra_kbc_keypress_timer(unsigned long data)
+{
+ struct tegra_kbc *kbc = (struct tegra_kbc *)data;
+ unsigned long flags;
+ u32 val;
+ unsigned int i;
+
+ val = (readl(kbc->mmio + KBC_INT_0) >> 4) & 0xf;
+ if (val) {
+ unsigned long dly;
+
+ tegra_kbc_report_keys(kbc);
+
+ /*
+ * If more than one keys are pressed we need not wait
+ * for the repoll delay.
+ */
+ dly = (val == 1) ? kbc->repoll_dly : 1;
+ mod_timer(&kbc->timer, jiffies + msecs_to_jiffies(dly));
+ } else {
+ /* Release any pressed keys and exit the polling loop */
+ for (i = 0; i < kbc->num_pressed_keys; i++)
+ input_report_key(kbc->idev, kbc->current_keys[i], 0);
+ input_sync(kbc->idev);
+
+ kbc->num_pressed_keys = 0;
+
+ /* All keys are released so enable the keypress interrupt */
+ spin_lock_irqsave(&kbc->lock, flags);
+ val = readl(kbc->mmio + KBC_CONTROL_0);
+ val |= KBC_CONTROL_FIFO_CNT_INT_EN;
+ writel(val, kbc->mmio + KBC_CONTROL_0);
+ spin_unlock_irqrestore(&kbc->lock, flags);
+ }
+}
+
+static irqreturn_t tegra_kbc_isr(int irq, void *args)
+{
+ struct tegra_kbc *kbc = args;
+ u32 val, ctl;
+
+ /*
+ * Until all keys are released, defer further processing to
+ * the polling loop in tegra_kbc_keypress_timer
+ */
+ ctl = readl(kbc->mmio + KBC_CONTROL_0);
+ ctl &= ~KBC_CONTROL_FIFO_CNT_INT_EN;
+ writel(ctl, kbc->mmio + KBC_CONTROL_0);
+
+ /*
+ * Quickly bail out & reenable interrupts if the fifo threshold
+ * count interrupt wasn't the interrupt source
+ */
+ val = readl(kbc->mmio + KBC_INT_0);
+ writel(val, kbc->mmio + KBC_INT_0);
+
+ if (val & KBC_INT_FIFO_CNT_INT_STATUS) {
+ /*
+ * Schedule timer to run when hardware is in continuous
+ * polling mode.
+ */
+ mod_timer(&kbc->timer, jiffies + kbc->cp_dly_jiffies);
+ } else {
+ ctl |= KBC_CONTROL_FIFO_CNT_INT_EN;
+ writel(ctl, kbc->mmio + KBC_CONTROL_0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void tegra_kbc_setup_wakekeys(struct tegra_kbc *kbc, bool filter)
+{
+ const struct tegra_kbc_platform_data *pdata = kbc->pdata;
+ int i;
+ unsigned int rst_val;
+
+ BUG_ON(pdata->wake_cnt > KBC_MAX_KEY);
+ rst_val = (filter && pdata->wake_cnt) ? ~0 : 0;
+
+ for (i = 0; i < KBC_MAX_ROW; i++)
+ writel(rst_val, kbc->mmio + KBC_ROW0_MASK_0 + i * 4);
+
+ if (filter) {
+ for (i = 0; i < pdata->wake_cnt; i++) {
+ u32 val, addr;
+ addr = pdata->wake_cfg[i].row * 4 + KBC_ROW0_MASK_0;
+ val = readl(kbc->mmio + addr);
+ val &= ~(1 << pdata->wake_cfg[i].col);
+ writel(val, kbc->mmio + addr);
+ }
+ }
+}
+
+static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
+{
+ const struct tegra_kbc_platform_data *pdata = kbc->pdata;
+ int i;
+
+ for (i = 0; i < KBC_MAX_GPIO; i++) {
+ u32 r_shft = 5 * (i % 6);
+ u32 c_shft = 4 * (i % 8);
+ u32 r_mask = 0x1f << r_shft;
+ u32 c_mask = 0x0f << c_shft;
+ u32 r_offs = (i / 6) * 4 + KBC_ROW_CFG0_0;
+ u32 c_offs = (i / 8) * 4 + KBC_COL_CFG0_0;
+ u32 row_cfg = readl(kbc->mmio + r_offs);
+ u32 col_cfg = readl(kbc->mmio + c_offs);
+
+ row_cfg &= ~r_mask;
+ col_cfg &= ~c_mask;
+
+ if (pdata->pin_cfg[i].is_row)
+ row_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << r_shft;
+ else
+ col_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << c_shft;
+
+ writel(row_cfg, kbc->mmio + r_offs);
+ writel(col_cfg, kbc->mmio + c_offs);
+ }
+}
+
+static int tegra_kbc_start(struct tegra_kbc *kbc)
+{
+ const struct tegra_kbc_platform_data *pdata = kbc->pdata;
+ unsigned long flags;
+ unsigned int debounce_cnt;
+ u32 val = 0;
+
+ clk_enable(kbc->clk);
+
+ /* Reset the KBC controller to clear all previous status.*/
+ tegra_periph_reset_assert(kbc->clk);
+ udelay(100);
+ tegra_periph_reset_deassert(kbc->clk);
+ udelay(100);
+
+ tegra_kbc_config_pins(kbc);
+ tegra_kbc_setup_wakekeys(kbc, false);
+
+ writel(pdata->repeat_cnt, kbc->mmio + KBC_RPT_DLY_0);
+
+ /* Keyboard debounce count is maximum of 12 bits. */
+ debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
+ val = KBC_DEBOUNCE_CNT_SHIFT(debounce_cnt);
+ val |= KBC_FIFO_TH_CNT_SHIFT(1); /* set fifo interrupt threshold to 1 */
+ val |= KBC_CONTROL_FIFO_CNT_INT_EN; /* interrupt on FIFO threshold */
+ val |= KBC_CONTROL_KBC_EN; /* enable */
+ writel(val, kbc->mmio + KBC_CONTROL_0);
+
+ /*
+ * Compute the delay(ns) from interrupt mode to continuous polling
+ * mode so the timer routine is scheduled appropriately.
+ */
+ val = readl(kbc->mmio + KBC_INIT_DLY_0);
+ kbc->cp_dly_jiffies = usecs_to_jiffies((val & 0xfffff) * 32);
+
+ kbc->num_pressed_keys = 0;
+
+ /*
+ * Atomically clear out any remaining entries in the key FIFO
+ * and enable keyboard interrupts.
+ */
+ spin_lock_irqsave(&kbc->lock, flags);
+ while (1) {
+ val = readl(kbc->mmio + KBC_INT_0);
+ val >>= 4;
+ if (!val)
+ break;
+
+ val = readl(kbc->mmio + KBC_KP_ENT0_0);
+ val = readl(kbc->mmio + KBC_KP_ENT1_0);
+ }
+ writel(0x7, kbc->mmio + KBC_INT_0);
+ spin_unlock_irqrestore(&kbc->lock, flags);
+
+ enable_irq(kbc->irq);
+
+ return 0;
+}
+
+static void tegra_kbc_stop(struct tegra_kbc *kbc)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&kbc->lock, flags);
+ val = readl(kbc->mmio + KBC_CONTROL_0);
+ val &= ~1;
+ writel(val, kbc->mmio + KBC_CONTROL_0);
+ spin_unlock_irqrestore(&kbc->lock, flags);
+
+ disable_irq(kbc->irq);
+ del_timer_sync(&kbc->timer);
+
+ clk_disable(kbc->clk);
+}
+
+static int tegra_kbc_open(struct input_dev *dev)
+{
+ struct tegra_kbc *kbc = input_get_drvdata(dev);
+
+ return tegra_kbc_start(kbc);
+}
+
+static void tegra_kbc_close(struct input_dev *dev)
+{
+ struct tegra_kbc *kbc = input_get_drvdata(dev);
+
+ return tegra_kbc_stop(kbc);
+}
+
+static bool __devinit
+tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
+ struct device *dev, unsigned int *num_rows)
+{
+ int i;
+
+ *num_rows = 0;
+
+ for (i = 0; i < KBC_MAX_GPIO; i++) {
+ const struct tegra_kbc_pin_cfg *pin_cfg = &pdata->pin_cfg[i];
+
+ if (pin_cfg->is_row) {
+ if (pin_cfg->num >= KBC_MAX_ROW) {
+ dev_err(dev,
+ "pin_cfg[%d]: invalid row number %d\n",
+ i, pin_cfg->num);
+ return false;
+ }
+ (*num_rows)++;
+ } else {
+ if (pin_cfg->num >= KBC_MAX_COL) {
+ dev_err(dev,
+ "pin_cfg[%d]: invalid column number %d\n",
+ i, pin_cfg->num);
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+static int __devinit tegra_kbc_probe(struct platform_device *pdev)
+{
+ const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
+ const struct matrix_keymap_data *keymap_data;
+ struct tegra_kbc *kbc;
+ struct input_dev *input_dev;
+ struct resource *res;
+ int irq;
+ int err;
+ int i;
+ int num_rows = 0;
+ unsigned int debounce_cnt;
+ unsigned int scan_time_rows;
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows))
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get I/O memory\n");
+ return -ENXIO;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get keyboard IRQ\n");
+ return -ENXIO;
+ }
+
+ kbc = kzalloc(sizeof(*kbc), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!kbc || !input_dev) {
+ err = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ kbc->pdata = pdata;
+ kbc->idev = input_dev;
+ kbc->irq = irq;
+ spin_lock_init(&kbc->lock);
+ setup_timer(&kbc->timer, tegra_kbc_keypress_timer, (unsigned long)kbc);
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to request I/O memory\n");
+ err = -EBUSY;
+ goto err_free_mem;
+ }
+
+ kbc->mmio = ioremap(res->start, resource_size(res));
+ if (!kbc->mmio) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ err = -ENXIO;
+ goto err_free_mem_region;
+ }
+
+ kbc->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(kbc->clk)) {
+ dev_err(&pdev->dev, "failed to get keyboard clock\n");
+ err = PTR_ERR(kbc->clk);
+ goto err_iounmap;
+ }
+
+ kbc->wake_enable_rows = 0;
+ kbc->wake_enable_cols = 0;
+ for (i = 0; i < pdata->wake_cnt; i++) {
+ kbc->wake_enable_rows |= (1 << pdata->wake_cfg[i].row);
+ kbc->wake_enable_cols |= (1 << pdata->wake_cfg[i].col);
+ }
+
+ /*
+ * The time delay between two consecutive reads of the FIFO is
+ * the sum of the repeat time and the time taken for scanning
+ * the rows. There is an additional delay before the row scanning
+ * starts. The repoll delay is computed in milliseconds.
+ */
+ debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
+ scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows;
+ kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt;
+ kbc->repoll_dly = ((kbc->repoll_dly * KBC_CYCLE_USEC) + 999) / 1000;
+
+ input_dev->name = pdev->name;
+ input_dev->id.bustype = BUS_HOST;
+ input_dev->dev.parent = &pdev->dev;
+ input_dev->open = tegra_kbc_open;
+ input_dev->close = tegra_kbc_close;
+
+ input_set_drvdata(input_dev, kbc);
+
+ input_dev->evbit[0] = BIT_MASK(EV_KEY);
+ input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+
+ input_dev->keycode = kbc->keycode;
+ input_dev->keycodesize = sizeof(kbc->keycode[0]);
+ input_dev->keycodemax = KBC_MAX_KEY;
+ if (pdata->use_fn_map)
+ input_dev->keycodemax *= 2;
+
+ kbc->use_fn_map = pdata->use_fn_map;
+ keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
+ matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
+ input_dev->keycode, input_dev->keybit);
+
+ err = request_irq(kbc->irq, tegra_kbc_isr, IRQF_TRIGGER_HIGH,
+ pdev->name, kbc);
+ if (err) {
+ dev_err(&pdev->dev, "failed to request keyboard IRQ\n");
+ goto err_put_clk;
+ }
+
+ disable_irq(kbc->irq);
+
+ err = input_register_device(kbc->idev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register input device\n");
+ goto err_free_irq;
+ }
+
+ platform_set_drvdata(pdev, kbc);
+ device_init_wakeup(&pdev->dev, pdata->wakeup);
+
+ return 0;
+
+err_free_irq:
+ free_irq(kbc->irq, pdev);
+err_put_clk:
+ clk_put(kbc->clk);
+err_iounmap:
+ iounmap(kbc->mmio);
+err_free_mem_region:
+ release_mem_region(res->start, resource_size(res));
+err_free_mem:
+ input_free_device(kbc->idev);
+ kfree(kbc);
+
+ return err;
+}
+
+static int __devexit tegra_kbc_remove(struct platform_device *pdev)
+{
+ struct tegra_kbc *kbc = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ free_irq(kbc->irq, pdev);
+ clk_put(kbc->clk);
+
+ input_unregister_device(kbc->idev);
+ iounmap(kbc->mmio);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(kbc);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_kbc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct tegra_kbc *kbc = platform_get_drvdata(pdev);
+
+ if (device_may_wakeup(&pdev->dev)) {
+ tegra_kbc_setup_wakekeys(kbc, true);
+ enable_irq_wake(kbc->irq);
+ /* Forcefully clear the interrupt status */
+ writel(0x7, kbc->mmio + KBC_INT_0);
+ msleep(30);
+ } else {
+ mutex_lock(&kbc->idev->mutex);
+ if (kbc->idev->users)
+ tegra_kbc_stop(kbc);
+ mutex_unlock(&kbc->idev->mutex);
+ }
+
+ return 0;
+}
+
+static int tegra_kbc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct tegra_kbc *kbc = platform_get_drvdata(pdev);
+ int err = 0;
+
+ if (device_may_wakeup(&pdev->dev)) {
+ disable_irq_wake(kbc->irq);
+ tegra_kbc_setup_wakekeys(kbc, false);
+ } else {
+ mutex_lock(&kbc->idev->mutex);
+ if (kbc->idev->users)
+ err = tegra_kbc_start(kbc);
+ mutex_unlock(&kbc->idev->mutex);
+ }
+
+ return err;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tegra_kbc_pm_ops, tegra_kbc_suspend, tegra_kbc_resume);
+
+static struct platform_driver tegra_kbc_driver = {
+ .probe = tegra_kbc_probe,
+ .remove = __devexit_p(tegra_kbc_remove),
+ .driver = {
+ .name = "tegra-kbc",
+ .owner = THIS_MODULE,
+ .pm = &tegra_kbc_pm_ops,
+ },
+};
+
+static void __exit tegra_kbc_exit(void)
+{
+ platform_driver_unregister(&tegra_kbc_driver);
+}
+module_exit(tegra_kbc_exit);
+
+static int __init tegra_kbc_init(void)
+{
+ return platform_driver_register(&tegra_kbc_driver);
+}
+module_init(tegra_kbc_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rakesh Iyer <riyer@nvidia.com>");
+MODULE_DESCRIPTION("Tegra matrix keyboard controller driver");
+MODULE_ALIAS("platform:tegra-kbc");
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index b4a81ebfab92..c8f097a15d89 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -14,6 +14,7 @@
*/
#include <linux/kernel.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/platform_device.h>
@@ -219,9 +220,9 @@ static int __devinit keypad_probe(struct platform_device *pdev)
}
kp->clk = clk_get(dev, NULL);
- if (!kp->clk) {
+ if (IS_ERR(kp->clk)) {
dev_err(dev, "cannot claim device clock\n");
- error = -EINVAL;
+ error = PTR_ERR(kp->clk);
goto error_clk;
}
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 9dfd6e5f786f..1f38302a5951 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -69,11 +69,7 @@ static int ixp4xx_spkr_event(struct input_dev *dev, unsigned int type, unsigned
}
if (value > 20 && value < 32767)
-#ifndef FREQ
- count = (ixp4xx_get_board_tick_rate() / (value * 4)) - 1;
-#else
- count = (FREQ / (value * 4)) - 1;
-#endif
+ count = (IXP4XX_TIMER_FREQ / (value * 4)) - 1;
ixp4xx_spkr_control(pin, count);
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 1f8e0108962e..7e64d01da2be 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -176,7 +176,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
/* request the IRQs */
err = request_irq(encoder->irq_a, &rotary_encoder_irq,
- IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
DRV_NAME, encoder);
if (err) {
dev_err(&pdev->dev, "unable to request IRQ %d\n",
@@ -185,7 +185,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
}
err = request_irq(encoder->irq_b, &rotary_encoder_irq,
- IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
DRV_NAME, encoder);
if (err) {
dev_err(&pdev->dev, "unable to request IRQ %d\n",
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index bf5fd7f6a313..9c1e6ee83531 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -39,7 +39,7 @@ config MOUSE_PS2
module will be called psmouse.
config MOUSE_PS2_ALPS
- bool "ALPS PS/2 mouse protocol extension" if EMBEDDED
+ bool "ALPS PS/2 mouse protocol extension" if EXPERT
default y
depends on MOUSE_PS2
help
@@ -49,7 +49,7 @@ config MOUSE_PS2_ALPS
If unsure, say Y.
config MOUSE_PS2_LOGIPS2PP
- bool "Logitech PS/2++ mouse protocol extension" if EMBEDDED
+ bool "Logitech PS/2++ mouse protocol extension" if EXPERT
default y
depends on MOUSE_PS2
help
@@ -59,7 +59,7 @@ config MOUSE_PS2_LOGIPS2PP
If unsure, say Y.
config MOUSE_PS2_SYNAPTICS
- bool "Synaptics PS/2 mouse protocol extension" if EMBEDDED
+ bool "Synaptics PS/2 mouse protocol extension" if EXPERT
default y
depends on MOUSE_PS2
help
@@ -69,7 +69,7 @@ config MOUSE_PS2_SYNAPTICS
If unsure, say Y.
config MOUSE_PS2_LIFEBOOK
- bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED
+ bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EXPERT
default y
depends on MOUSE_PS2 && X86 && DMI
help
@@ -79,7 +79,7 @@ config MOUSE_PS2_LIFEBOOK
If unsure, say Y.
config MOUSE_PS2_TRACKPOINT
- bool "IBM Trackpoint PS/2 mouse protocol extension" if EMBEDDED
+ bool "IBM Trackpoint PS/2 mouse protocol extension" if EXPERT
default y
depends on MOUSE_PS2
help
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index b95231763911..ee82851afe3e 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -55,6 +55,14 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237
#define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238
+/* MacbookAir3,2 (unibody), aka wellspring5 */
+#define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI 0x023f
+#define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO 0x0240
+#define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS 0x0241
+/* MacbookAir3,1 (unibody), aka wellspring4 */
+#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242
+#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243
+#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244
#define BCM5974_DEVICE(prod) { \
.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -80,6 +88,14 @@ static const struct usb_device_id bcm5974_table[] = {
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
+ /* MacbookAir3,2 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_JIS),
+ /* MacbookAir3,1 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS),
/* Terminating entry */
{}
};
@@ -234,6 +250,30 @@ static const struct bcm5974_config bcm5974_config_table[] = {
{ DIM_X, DIM_X / SN_COORD, -4460, 5166 },
{ DIM_Y, DIM_Y / SN_COORD, -75, 6700 }
},
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING4_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
+ HAS_INTEGRATED_BUTTON,
+ 0x84, sizeof(struct bt_data),
+ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+ { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
+ { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
+ },
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
+ HAS_INTEGRATED_BUTTON,
+ 0x84, sizeof(struct bt_data),
+ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+ { DIM_X, DIM_X / SN_COORD, -4616, 5112 },
+ { DIM_Y, DIM_Y / SN_COORD, -142, 5234 }
+ },
{}
};
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index da392c22fc6c..aa186cf6c514 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -755,23 +755,26 @@ static int synaptics_reconnect(struct psmouse *psmouse)
{
struct synaptics_data *priv = psmouse->private;
struct synaptics_data old_priv = *priv;
+ int retry = 0;
+ int error;
- psmouse_reset(psmouse);
+ do {
+ psmouse_reset(psmouse);
+ error = synaptics_detect(psmouse, 0);
+ } while (error && ++retry < 3);
- if (synaptics_detect(psmouse, 0))
+ if (error)
return -1;
+ if (retry > 1)
+ printk(KERN_DEBUG "Synaptics reconnected after %d tries\n",
+ retry);
+
if (synaptics_query_hardware(psmouse)) {
printk(KERN_ERR "Unable to query Synaptics hardware.\n");
return -1;
}
- if (old_priv.identity != priv->identity ||
- old_priv.model_id != priv->model_id ||
- old_priv.capabilities != priv->capabilities ||
- old_priv.ext_cap != priv->ext_cap)
- return -1;
-
if (synaptics_set_absolute_mode(psmouse)) {
printk(KERN_ERR "Unable to initialize Synaptics hardware.\n");
return -1;
@@ -782,6 +785,19 @@ static int synaptics_reconnect(struct psmouse *psmouse)
return -1;
}
+ if (old_priv.identity != priv->identity ||
+ old_priv.model_id != priv->model_id ||
+ old_priv.capabilities != priv->capabilities ||
+ old_priv.ext_cap != priv->ext_cap) {
+ printk(KERN_ERR "Synaptics hardware appears to be different: "
+ "id(%ld-%ld), model(%ld-%ld), caps(%lx-%lx), ext(%lx-%lx).\n",
+ old_priv.identity, priv->identity,
+ old_priv.model_id, priv->model_id,
+ old_priv.capabilities, priv->capabilities,
+ old_priv.ext_cap, priv->ext_cap);
+ return -1;
+ }
+
return 0;
}
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 25e5d042a72c..7453938bf5ef 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -51,6 +51,29 @@
#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
+
+/*
+ * The following describes response for the 0x0c query.
+ *
+ * byte mask name meaning
+ * ---- ---- ------- ------------
+ * 1 0x01 adjustable threshold capacitive button sensitivity
+ * can be adjusted
+ * 1 0x02 report max query 0x0d gives max coord reported
+ * 1 0x04 clearpad sensor is ClearPad product
+ * 1 0x08 advanced gesture not particularly meaningful
+ * 1 0x10 clickpad bit 0 1-button ClickPad
+ * 1 0x60 multifinger mode identifies firmware finger counting
+ * (not reporting!) algorithm.
+ * Not particularly meaningful
+ * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered
+ * 2 0x01 clickpad bit 1 2-button ClickPad
+ * 2 0x02 deluxe LED controls touchpad support LED commands
+ * ala multimedia control bar
+ * 2 0x04 reduced filtering firmware does less filtering on
+ * position data, driver should watch
+ * for noise.
+ */
#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000)
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index bcb1fdedb595..55f2c2293ec6 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -2,7 +2,7 @@
# Input core configuration
#
config SERIO
- tristate "Serial I/O support" if EMBEDDED || !X86
+ tristate "Serial I/O support" if EXPERT || !X86
default y
help
Say Yes here if you have any input device that uses serial I/O to
@@ -19,7 +19,7 @@ config SERIO
if SERIO
config SERIO_I8042
- tristate "i8042 PC Keyboard controller" if EMBEDDED || !X86
+ tristate "i8042 PC Keyboard controller" if EXPERT || !X86
default y
depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
(!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN
@@ -168,7 +168,7 @@ config SERIO_MACEPS2
module will be called maceps2.
config SERIO_LIBPS2
- tristate "PS/2 driver library" if EMBEDDED
+ tristate "PS/2 driver library" if EXPERT
depends on SERIO_I8042 || SERIO_I8042=n
help
Say Y here if you are using a driver for device connected
@@ -229,7 +229,7 @@ config SERIO_PS2MULT
tristate "TQC PS/2 multiplexer"
help
Say Y here if you have the PS/2 line multiplexer like the one
- present on TQC boads.
+ present on TQC boards.
To compile this driver as a module, choose M here: the
module will be called ps2mult.
diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c
index 448c7724beb9..852816567241 100644
--- a/drivers/input/serio/ct82c710.c
+++ b/drivers/input/serio/ct82c710.c
@@ -111,9 +111,11 @@ static void ct82c710_close(struct serio *serio)
static int ct82c710_open(struct serio *serio)
{
unsigned char status;
+ int err;
- if (request_irq(CT82C710_IRQ, ct82c710_interrupt, 0, "ct82c710", NULL))
- return -1;
+ err = request_irq(CT82C710_IRQ, ct82c710_interrupt, 0, "ct82c710", NULL);
+ if (err)
+ return err;
status = inb_p(CT82C710_STATUS);
@@ -131,7 +133,7 @@ static int ct82c710_open(struct serio *serio)
status &= ~(CT82C710_ENABLE | CT82C710_INTS_ON);
outb_p(status, CT82C710_STATUS);
free_irq(CT82C710_IRQ, NULL);
- return -1;
+ return -EBUSY;
}
return 0;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 5ae0fc4578fe..bb9f5d31f0d0 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -424,6 +424,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
},
},
+ {
+ /* Dell Vostro V13 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+ },
+ },
{ }
};
@@ -545,6 +552,17 @@ static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
};
#endif
+static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
+ {
+ /* Dell Vostro V13 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+ },
+ },
+ { }
+};
+
/*
* Some Wistron based laptops need us to explicitly enable the 'Dritek
* keyboard extension' to make their extra keys start generating scancodes.
@@ -896,6 +914,9 @@ static int __init i8042_platform_init(void)
if (dmi_check_system(i8042_dmi_nomux_table))
i8042_nomux = true;
+ if (dmi_check_system(i8042_dmi_notimeout_table))
+ i8042_notimeout = true;
+
if (dmi_check_system(i8042_dmi_dritek_table))
i8042_dritek = true;
#endif /* CONFIG_X86 */
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index c04ff00a3663..ac4c93689ab9 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -63,6 +63,10 @@ static bool i8042_noloop;
module_param_named(noloop, i8042_noloop, bool, 0);
MODULE_PARM_DESC(noloop, "Disable the AUX Loopback command while probing for the AUX port");
+static bool i8042_notimeout;
+module_param_named(notimeout, i8042_notimeout, bool, 0);
+MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
+
#ifdef CONFIG_X86
static bool i8042_dritek;
module_param_named(dritek, i8042_dritek, bool, 0);
@@ -504,7 +508,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
} else {
dfl = ((str & I8042_STR_PARITY) ? SERIO_PARITY : 0) |
- ((str & I8042_STR_TIMEOUT) ? SERIO_TIMEOUT : 0);
+ ((str & I8042_STR_TIMEOUT && !i8042_notimeout) ? SERIO_TIMEOUT : 0);
port_no = (str & I8042_STR_AUXDATA) ?
I8042_AUX_PORT_NO : I8042_KBD_PORT_NO;
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index db5b0bca1a1a..ba70058e2be3 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -188,7 +188,8 @@ static void serio_free_event(struct serio_event *event)
kfree(event);
}
-static void serio_remove_duplicate_events(struct serio_event *event)
+static void serio_remove_duplicate_events(void *object,
+ enum serio_event_type type)
{
struct serio_event *e, *next;
unsigned long flags;
@@ -196,13 +197,13 @@ static void serio_remove_duplicate_events(struct serio_event *event)
spin_lock_irqsave(&serio_event_lock, flags);
list_for_each_entry_safe(e, next, &serio_event_list, node) {
- if (event->object == e->object) {
+ if (object == e->object) {
/*
* If this event is of different type we should not
* look further - we only suppress duplicate events
* that were sent back-to-back.
*/
- if (event->type != e->type)
+ if (type != e->type)
break;
list_del_init(&e->node);
@@ -245,7 +246,7 @@ static void serio_handle_event(struct work_struct *work)
break;
}
- serio_remove_duplicate_events(event);
+ serio_remove_duplicate_events(event->object, event->type);
serio_free_event(event);
}
@@ -298,7 +299,7 @@ static int serio_queue_event(void *object, struct module *owner,
event->owner = owner;
list_add_tail(&event->node, &serio_event_list);
- schedule_work(&serio_event_work);
+ queue_work(system_long_wq, &serio_event_work);
out:
spin_unlock_irqrestore(&serio_event_lock, flags);
@@ -436,10 +437,12 @@ static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *
} else if (!strncmp(buf, "rescan", count)) {
serio_disconnect_port(serio);
serio_find_driver(serio);
+ serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
} else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
serio_disconnect_port(serio);
error = serio_bind_driver(serio, to_serio_driver(drv));
put_driver(drv);
+ serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
} else {
error = -EINVAL;
}
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 6e362de3f412..8755f5f3ad37 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -116,14 +116,15 @@ static void serport_ldisc_close(struct tty_struct *tty)
/*
* serport_ldisc_receive() is called by the low level tty driver when characters
- * are ready for us. We forward the characters, one by one to the 'interrupt'
- * routine.
+ * are ready for us. We forward the characters and flags, one by one to the
+ * 'interrupt' routine.
*/
static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
{
struct serport *serport = (struct serport*) tty->disc_data;
unsigned long flags;
+ unsigned int ch_flags;
int i;
spin_lock_irqsave(&serport->lock, flags);
@@ -131,8 +132,23 @@ static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *c
if (!test_bit(SERPORT_ACTIVE, &serport->flags))
goto out;
- for (i = 0; i < count; i++)
- serio_interrupt(serport->serio, cp[i], 0);
+ for (i = 0; i < count; i++) {
+ switch (fp[i]) {
+ case TTY_FRAME:
+ ch_flags = SERIO_FRAME;
+ break;
+
+ case TTY_PARITY:
+ ch_flags = SERIO_PARITY;
+ break;
+
+ default:
+ ch_flags = 0;
+ break;
+ }
+
+ serio_interrupt(serport->serio, cp[i], ch_flags);
+ }
out:
spin_unlock_irqrestore(&serport->lock, flags);
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index a29a7812bd46..7729e547ba65 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -201,6 +201,7 @@ int sparse_keymap_setup(struct input_dev *dev,
break;
case KE_SW:
+ case KE_VSW:
__set_bit(EV_SW, dev->evbit);
__set_bit(entry->sw.code, dev->swbit);
break;
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index fc381498b798..cf8fb9f5d4a8 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -519,7 +519,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
/* Retrieve the physical and logical size for OEM devices */
error = wacom_retrieve_hid_descriptor(intf, features);
if (error)
- goto fail2;
+ goto fail3;
wacom_setup_device_quirks(features);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 518782999fea..367fa82a607e 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1101,6 +1101,13 @@ void wacom_setup_device_quirks(struct wacom_features *features)
}
}
+static unsigned int wacom_calculate_touch_res(unsigned int logical_max,
+ unsigned int physical_max)
+{
+ /* Touch physical dimensions are in 100th of mm */
+ return (logical_max * 100) / physical_max;
+}
+
void wacom_setup_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac)
{
@@ -1228,8 +1235,12 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
case TABLETPC:
if (features->device_type == BTN_TOOL_DOUBLETAP ||
features->device_type == BTN_TOOL_TRIPLETAP) {
- input_set_abs_params(input_dev, ABS_RX, 0, features->x_phy, 0, 0);
- input_set_abs_params(input_dev, ABS_RY, 0, features->y_phy, 0, 0);
+ input_abs_set_res(input_dev, ABS_X,
+ wacom_calculate_touch_res(features->x_max,
+ features->x_phy));
+ input_abs_set_res(input_dev, ABS_Y,
+ wacom_calculate_touch_res(features->y_max,
+ features->y_phy));
__set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
}
@@ -1272,6 +1283,12 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
input_set_abs_params(input_dev, ABS_MT_PRESSURE,
0, features->pressure_max,
features->pressure_fuzz, 0);
+ input_abs_set_res(input_dev, ABS_X,
+ wacom_calculate_touch_res(features->x_max,
+ features->x_phy));
+ input_abs_set_res(input_dev, ABS_Y,
+ wacom_calculate_touch_res(features->y_max,
+ features->y_phy));
} else if (features->device_type == BTN_TOOL_PEN) {
__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
@@ -1426,6 +1443,10 @@ static struct wacom_features wacom_features_0xD3 =
{ "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT };
static const struct wacom_features wacom_features_0xD4 =
{ "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 255, 63, BAMBOO_PT };
+static struct wacom_features wacom_features_0xD6 =
+ { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT };
+static struct wacom_features wacom_features_0xD7 =
+ { "Wacom BambooPT 2FG Small", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT };
static struct wacom_features wacom_features_0xD8 =
{ "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT };
static struct wacom_features wacom_features_0xDA =
@@ -1507,6 +1528,8 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xD2) },
{ USB_DEVICE_WACOM(0xD3) },
{ USB_DEVICE_WACOM(0xD4) },
+ { USB_DEVICE_WACOM(0xD6) },
+ { USB_DEVICE_WACOM(0xD7) },
{ USB_DEVICE_WACOM(0xD8) },
{ USB_DEVICE_WACOM(0xDA) },
{ USB_DEVICE_WACOM(0xDB) },
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 07ac77d393a4..61834ae282e1 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -540,62 +540,62 @@ config TOUCHSCREEN_MC13783
config TOUCHSCREEN_USB_EGALAX
default y
- bool "eGalax, eTurboTouch CT-410/510/700 device support" if EMBEDDED
+ bool "eGalax, eTurboTouch CT-410/510/700 device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_PANJIT
default y
- bool "PanJit device support" if EMBEDDED
+ bool "PanJit device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_3M
default y
- bool "3M/Microtouch EX II series device support" if EMBEDDED
+ bool "3M/Microtouch EX II series device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_ITM
default y
- bool "ITM device support" if EMBEDDED
+ bool "ITM device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_ETURBO
default y
- bool "eTurboTouch (non-eGalax compatible) device support" if EMBEDDED
+ bool "eTurboTouch (non-eGalax compatible) device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_GUNZE
default y
- bool "Gunze AHL61 device support" if EMBEDDED
+ bool "Gunze AHL61 device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_DMC_TSC10
default y
- bool "DMC TSC-10/25 device support" if EMBEDDED
+ bool "DMC TSC-10/25 device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_IRTOUCH
default y
- bool "IRTOUCHSYSTEMS/UNITOP device support" if EMBEDDED
+ bool "IRTOUCHSYSTEMS/UNITOP device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_IDEALTEK
default y
- bool "IdealTEK URTC1000 device support" if EMBEDDED
+ bool "IdealTEK URTC1000 device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_GENERAL_TOUCH
default y
- bool "GeneralTouch Touchscreen device support" if EMBEDDED
+ bool "GeneralTouch Touchscreen device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_GOTOP
default y
- bool "GoTop Super_Q2/GogoPen/PenPower tablet device support" if EMBEDDED
+ bool "GoTop Super_Q2/GogoPen/PenPower tablet device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_JASTEC
default y
- bool "JASTEC/DigiTech DTR-02U USB touch controller device support" if EMBEDDED
+ bool "JASTEC/DigiTech DTR-02U USB touch controller device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_E2I
@@ -605,17 +605,17 @@ config TOUCHSCREEN_USB_E2I
config TOUCHSCREEN_USB_ZYTRONIC
default y
- bool "Zytronic controller" if EMBEDDED
+ bool "Zytronic controller" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_ETT_TC45USB
default y
- bool "ET&T USB series TC4UM/TC5UH touchscreen controler support" if EMBEDDED
+ bool "ET&T USB series TC4UM/TC5UH touchscreen controller support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_NEXIO
default y
- bool "NEXIO/iNexio device support" if EMBEDDED
+ bool "NEXIO/iNexio device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_TOUCHIT213
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index d82a38ee9a3e..4e4e58cec6c8 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -10,14 +10,16 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/pm.h>
#include "ad7879.h"
#define AD7879_DEVID 0x79 /* AD7879-1/AD7889-1 */
#ifdef CONFIG_PM
-static int ad7879_i2c_suspend(struct i2c_client *client, pm_message_t message)
+static int ad7879_i2c_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct ad7879 *ts = i2c_get_clientdata(client);
ad7879_suspend(ts);
@@ -25,17 +27,17 @@ static int ad7879_i2c_suspend(struct i2c_client *client, pm_message_t message)
return 0;
}
-static int ad7879_i2c_resume(struct i2c_client *client)
+static int ad7879_i2c_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct ad7879 *ts = i2c_get_clientdata(client);
ad7879_resume(ts);
return 0;
}
-#else
-# define ad7879_i2c_suspend NULL
-# define ad7879_i2c_resume NULL
+
+static SIMPLE_DEV_PM_OPS(ad7879_i2c_pm, ad7879_i2c_suspend, ad7879_i2c_resume);
#endif
/* All registers are word-sized.
@@ -117,11 +119,12 @@ static struct i2c_driver ad7879_i2c_driver = {
.driver = {
.name = "ad7879",
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &ad7879_i2c_pm,
+#endif
},
.probe = ad7879_i2c_probe,
.remove = __devexit_p(ad7879_i2c_remove),
- .suspend = ad7879_i2c_suspend,
- .resume = ad7879_i2c_resume,
.id_table = ad7879_id,
};
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 14ea54b78e46..4bf2316e3284 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -941,28 +941,29 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784
struct ads7846_platform_data *pdata = spi->dev.platform_data;
int err;
- /* REVISIT when the irq can be triggered active-low, or if for some
+ /*
+ * REVISIT when the irq can be triggered active-low, or if for some
* reason the touchscreen isn't hooked up, we don't need to access
* the pendown state.
*/
- if (!pdata->get_pendown_state && !gpio_is_valid(pdata->gpio_pendown)) {
- dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
- return -EINVAL;
- }
if (pdata->get_pendown_state) {
ts->get_pendown_state = pdata->get_pendown_state;
- return 0;
- }
+ } else if (gpio_is_valid(pdata->gpio_pendown)) {
- err = gpio_request(pdata->gpio_pendown, "ads7846_pendown");
- if (err) {
- dev_err(&spi->dev, "failed to request pendown GPIO%d\n",
- pdata->gpio_pendown);
- return err;
- }
+ err = gpio_request(pdata->gpio_pendown, "ads7846_pendown");
+ if (err) {
+ dev_err(&spi->dev, "failed to request pendown GPIO%d\n",
+ pdata->gpio_pendown);
+ return err;
+ }
- ts->gpio_pendown = pdata->gpio_pendown;
+ ts->gpio_pendown = pdata->gpio_pendown;
+
+ } else {
+ dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -1353,7 +1354,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
err_put_regulator:
regulator_put(ts->reg);
err_free_gpio:
- if (ts->gpio_pendown != -1)
+ if (!ts->get_pendown_state)
gpio_free(ts->gpio_pendown);
err_cleanup_filter:
if (ts->filter_cleanup)
@@ -1383,8 +1384,13 @@ static int __devexit ads7846_remove(struct spi_device *spi)
regulator_disable(ts->reg);
regulator_put(ts->reg);
- if (ts->gpio_pendown != -1)
+ if (!ts->get_pendown_state) {
+ /*
+ * If we are not using specialized pendown method we must
+ * have been relying on gpio we set up ourselves.
+ */
gpio_free(ts->gpio_pendown);
+ }
if (ts->filter_cleanup)
ts->filter_cleanup(ts->filter_data);
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index f7fa9ef4cd65..1507ce108d5b 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -12,6 +12,7 @@
#include <linux/input.h>
#include <linux/input/bu21013.h>
#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
#define PEN_DOWN_INTR 0
#define MAX_FINGERS 2
@@ -139,6 +140,7 @@
* @chip: pointer to the touch panel controller
* @in_dev: pointer to the input device structure
* @intr_pin: interrupt pin value
+ * @regulator: pointer to the Regulator used for touch screen
*
* Touch panel device data structure
*/
@@ -149,6 +151,7 @@ struct bu21013_ts_data {
const struct bu21013_platform_device *chip;
struct input_dev *in_dev;
unsigned int intr_pin;
+ struct regulator *regulator;
};
/**
@@ -456,6 +459,20 @@ static int __devinit bu21013_probe(struct i2c_client *client,
bu21013_data->in_dev = in_dev;
bu21013_data->chip = pdata;
bu21013_data->client = client;
+
+ bu21013_data->regulator = regulator_get(&client->dev, "V-TOUCH");
+ if (IS_ERR(bu21013_data->regulator)) {
+ dev_err(&client->dev, "regulator_get failed\n");
+ error = PTR_ERR(bu21013_data->regulator);
+ goto err_free_mem;
+ }
+
+ error = regulator_enable(bu21013_data->regulator);
+ if (error < 0) {
+ dev_err(&client->dev, "regulator enable failed\n");
+ goto err_put_regulator;
+ }
+
bu21013_data->touch_stopped = false;
init_waitqueue_head(&bu21013_data->wait);
@@ -464,7 +481,7 @@ static int __devinit bu21013_probe(struct i2c_client *client,
error = pdata->cs_en(pdata->cs_pin);
if (error < 0) {
dev_err(&client->dev, "chip init failed\n");
- goto err_free_mem;
+ goto err_disable_regulator;
}
}
@@ -485,9 +502,9 @@ static int __devinit bu21013_probe(struct i2c_client *client,
__set_bit(EV_ABS, in_dev->evbit);
input_set_abs_params(in_dev, ABS_MT_POSITION_X, 0,
- pdata->x_max_res, 0, 0);
+ pdata->touch_x_max, 0, 0);
input_set_abs_params(in_dev, ABS_MT_POSITION_Y, 0,
- pdata->y_max_res, 0, 0);
+ pdata->touch_y_max, 0, 0);
input_set_drvdata(in_dev, bu21013_data);
error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
@@ -513,6 +530,10 @@ err_free_irq:
bu21013_free_irq(bu21013_data);
err_cs_disable:
pdata->cs_dis(pdata->cs_pin);
+err_disable_regulator:
+ regulator_disable(bu21013_data->regulator);
+err_put_regulator:
+ regulator_put(bu21013_data->regulator);
err_free_mem:
input_free_device(in_dev);
kfree(bu21013_data);
@@ -535,6 +556,10 @@ static int __devexit bu21013_remove(struct i2c_client *client)
bu21013_data->chip->cs_dis(bu21013_data->chip->cs_pin);
input_unregister_device(bu21013_data->in_dev);
+
+ regulator_disable(bu21013_data->regulator);
+ regulator_put(bu21013_data->regulator);
+
kfree(bu21013_data);
device_init_wakeup(&client->dev, false);
@@ -561,6 +586,8 @@ static int bu21013_suspend(struct device *dev)
else
disable_irq(bu21013_data->chip->irq);
+ regulator_disable(bu21013_data->regulator);
+
return 0;
}
@@ -577,6 +604,12 @@ static int bu21013_resume(struct device *dev)
struct i2c_client *client = bu21013_data->client;
int retval;
+ retval = regulator_enable(bu21013_data->regulator);
+ if (retval < 0) {
+ dev_err(&client->dev, "bu21013 regulator enable failed\n");
+ return retval;
+ }
+
retval = bu21013_init_chip(bu21013_data);
if (retval < 0) {
dev_err(&client->dev, "bu21013 controller config failed\n");
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index d0c3a7229adf..a93c5c26ab3f 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -280,8 +280,9 @@ err_free_mem:
}
#ifdef CONFIG_PM
-static int cy8ctmg110_suspend(struct i2c_client *client, pm_message_t mesg)
+static int cy8ctmg110_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct cy8ctmg110 *ts = i2c_get_clientdata(client);
if (device_may_wakeup(&client->dev))
@@ -293,8 +294,9 @@ static int cy8ctmg110_suspend(struct i2c_client *client, pm_message_t mesg)
return 0;
}
-static int cy8ctmg110_resume(struct i2c_client *client)
+static int cy8ctmg110_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct cy8ctmg110 *ts = i2c_get_clientdata(client);
if (device_may_wakeup(&client->dev))
@@ -305,6 +307,8 @@ static int cy8ctmg110_resume(struct i2c_client *client)
}
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(cy8ctmg110_pm, cy8ctmg110_suspend, cy8ctmg110_resume);
#endif
static int __devexit cy8ctmg110_remove(struct i2c_client *client)
@@ -335,14 +339,13 @@ static struct i2c_driver cy8ctmg110_driver = {
.driver = {
.owner = THIS_MODULE,
.name = CY8CTMG110_DRIVER_NAME,
+#ifdef CONFIG_PM
+ .pm = &cy8ctmg110_pm,
+#endif
},
.id_table = cy8ctmg110_idtable,
.probe = cy8ctmg110_probe,
.remove = __devexit_p(cy8ctmg110_remove),
-#ifdef CONFIG_PM
- .suspend = cy8ctmg110_suspend,
- .resume = cy8ctmg110_resume,
-#endif
};
static int __init cy8ctmg110_init(void)
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 7a3a916f84a8..7f8f538a9806 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -261,8 +261,9 @@ static int __devexit eeti_ts_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
-static int eeti_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+static int eeti_ts_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct eeti_ts_priv *priv = i2c_get_clientdata(client);
struct input_dev *input_dev = priv->input;
@@ -279,8 +280,9 @@ static int eeti_ts_suspend(struct i2c_client *client, pm_message_t mesg)
return 0;
}
-static int eeti_ts_resume(struct i2c_client *client)
+static int eeti_ts_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct eeti_ts_priv *priv = i2c_get_clientdata(client);
struct input_dev *input_dev = priv->input;
@@ -296,9 +298,8 @@ static int eeti_ts_resume(struct i2c_client *client)
return 0;
}
-#else
-#define eeti_ts_suspend NULL
-#define eeti_ts_resume NULL
+
+static SIMPLE_DEV_PM_OPS(eeti_ts_pm, eeti_ts_suspend, eeti_ts_resume);
#endif
static const struct i2c_device_id eeti_ts_id[] = {
@@ -310,11 +311,12 @@ MODULE_DEVICE_TABLE(i2c, eeti_ts_id);
static struct i2c_driver eeti_ts_driver = {
.driver = {
.name = "eeti_ts",
+#ifdef CONFIG_PM
+ .pm = &eeti_ts_pm,
+#endif
},
.probe = eeti_ts_probe,
.remove = __devexit_p(eeti_ts_remove),
- .suspend = eeti_ts_suspend,
- .resume = eeti_ts_resume,
.id_table = eeti_ts_id,
};
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
index 6ee9940aaf5b..2d84c80ceb66 100644
--- a/drivers/input/touchscreen/mcs5000_ts.c
+++ b/drivers/input/touchscreen/mcs5000_ts.c
@@ -261,25 +261,27 @@ static int __devexit mcs5000_ts_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
-static int mcs5000_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+static int mcs5000_ts_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
+
/* Touch sleep mode */
i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE, OP_MODE_SLEEP);
return 0;
}
-static int mcs5000_ts_resume(struct i2c_client *client)
+static int mcs5000_ts_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct mcs5000_ts_data *data = i2c_get_clientdata(client);
mcs5000_ts_phys_init(data);
return 0;
}
-#else
-#define mcs5000_ts_suspend NULL
-#define mcs5000_ts_resume NULL
+
+static SIMPLE_DEV_PM_OPS(mcs5000_ts_pm, mcs5000_ts_suspend, mcs5000_ts_resume);
#endif
static const struct i2c_device_id mcs5000_ts_id[] = {
@@ -291,10 +293,11 @@ MODULE_DEVICE_TABLE(i2c, mcs5000_ts_id);
static struct i2c_driver mcs5000_ts_driver = {
.probe = mcs5000_ts_probe,
.remove = __devexit_p(mcs5000_ts_remove),
- .suspend = mcs5000_ts_suspend,
- .resume = mcs5000_ts_resume,
.driver = {
.name = "mcs5000_ts",
+#ifdef CONFIG_PM
+ .pm = &mcs5000_ts_pm,
+#endif
},
.id_table = mcs5000_ts_id,
};
diff --git a/drivers/input/touchscreen/migor_ts.c b/drivers/input/touchscreen/migor_ts.c
index defe5dd3627c..5803bd0c1cca 100644
--- a/drivers/input/touchscreen/migor_ts.c
+++ b/drivers/input/touchscreen/migor_ts.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/pm.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/i2c.h>
@@ -226,8 +227,9 @@ static int migor_ts_remove(struct i2c_client *client)
return 0;
}
-static int migor_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+static int migor_ts_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
if (device_may_wakeup(&client->dev))
@@ -236,8 +238,9 @@ static int migor_ts_suspend(struct i2c_client *client, pm_message_t mesg)
return 0;
}
-static int migor_ts_resume(struct i2c_client *client)
+static int migor_ts_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
if (device_may_wakeup(&client->dev))
@@ -246,6 +249,8 @@ static int migor_ts_resume(struct i2c_client *client)
return 0;
}
+static SIMPLE_DEV_PM_OPS(migor_ts_pm, migor_ts_suspend, migor_ts_resume);
+
static const struct i2c_device_id migor_ts_id[] = {
{ "migor_ts", 0 },
{ }
@@ -255,11 +260,10 @@ MODULE_DEVICE_TABLE(i2c, migor_ts);
static struct i2c_driver migor_ts_driver = {
.driver = {
.name = "migor_ts",
+ .pm = &migor_ts_pm,
},
.probe = migor_ts_probe,
.remove = migor_ts_remove,
- .suspend = migor_ts_suspend,
- .resume = migor_ts_resume,
.id_table = migor_ts_id,
};
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index cf1dba2e267c..22a3411e93c5 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -14,6 +14,7 @@
*/
#include <linux/kernel.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/platform_device.h>
@@ -289,9 +290,9 @@ static int __devinit tsc_probe(struct platform_device *pdev)
}
ts->clk = clk_get(dev, NULL);
- if (!ts->clk) {
+ if (IS_ERR(ts->clk)) {
dev_err(dev, "cannot claim device clock\n");
- error = -EINVAL;
+ error = PTR_ERR(ts->clk);
goto error_clk;
}
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 8ed53aded2d3..c14412ef4648 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -3,6 +3,7 @@
*
* Copyright (c) 2008 Jaya Kumar
* Copyright (c) 2010 Red Hat, Inc.
+ * Copyright (c) 2010 - 2011 Ping Cheng, Wacom. <pingc@wacom.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
@@ -50,6 +51,10 @@ MODULE_LICENSE("GPL");
#define W8001_PKTLEN_TPCCTL 11 /* control packet */
#define W8001_PKTLEN_TOUCH2FG 13
+/* resolution in points/mm */
+#define W8001_PEN_RESOLUTION 100
+#define W8001_TOUCH_RESOLUTION 10
+
struct w8001_coord {
u8 rdy;
u8 tsw;
@@ -64,11 +69,11 @@ struct w8001_coord {
/* touch query reply packet */
struct w8001_touch_query {
+ u16 x;
+ u16 y;
u8 panel_res;
u8 capacity_res;
u8 sensor_id;
- u16 x;
- u16 y;
};
/*
@@ -87,9 +92,14 @@ struct w8001 {
char phys[32];
int type;
unsigned int pktlen;
+ u16 max_touch_x;
+ u16 max_touch_y;
+ u16 max_pen_x;
+ u16 max_pen_y;
+ char name[64];
};
-static void parse_data(u8 *data, struct w8001_coord *coord)
+static void parse_pen_data(u8 *data, struct w8001_coord *coord)
{
memset(coord, 0, sizeof(*coord));
@@ -113,11 +123,30 @@ static void parse_data(u8 *data, struct w8001_coord *coord)
coord->tilt_y = data[8] & 0x7F;
}
-static void parse_touch(struct w8001 *w8001)
+static void parse_single_touch(u8 *data, struct w8001_coord *coord)
+{
+ coord->x = (data[1] << 7) | data[2];
+ coord->y = (data[3] << 7) | data[4];
+ coord->tsw = data[0] & 0x01;
+}
+
+static void scale_touch_coordinates(struct w8001 *w8001,
+ unsigned int *x, unsigned int *y)
+{
+ if (w8001->max_pen_x && w8001->max_touch_x)
+ *x = *x * w8001->max_pen_x / w8001->max_touch_x;
+
+ if (w8001->max_pen_y && w8001->max_touch_y)
+ *y = *y * w8001->max_pen_y / w8001->max_touch_y;
+}
+
+static void parse_multi_touch(struct w8001 *w8001)
{
struct input_dev *dev = w8001->dev;
unsigned char *data = w8001->data;
+ unsigned int x, y;
int i;
+ int count = 0;
for (i = 0; i < 2; i++) {
bool touch = data[0] & (1 << i);
@@ -125,15 +154,29 @@ static void parse_touch(struct w8001 *w8001)
input_mt_slot(dev, i);
input_mt_report_slot_state(dev, MT_TOOL_FINGER, touch);
if (touch) {
- int x = (data[6 * i + 1] << 7) | (data[6 * i + 2]);
- int y = (data[6 * i + 3] << 7) | (data[6 * i + 4]);
+ x = (data[6 * i + 1] << 7) | data[6 * i + 2];
+ y = (data[6 * i + 3] << 7) | data[6 * i + 4];
/* data[5,6] and [11,12] is finger capacity */
+ /* scale to pen maximum */
+ scale_touch_coordinates(w8001, &x, &y);
+
input_report_abs(dev, ABS_MT_POSITION_X, x);
input_report_abs(dev, ABS_MT_POSITION_Y, y);
+ count++;
}
}
+ /* emulate single touch events when stylus is out of proximity.
+ * This is to make single touch backward support consistent
+ * across all Wacom single touch devices.
+ */
+ if (w8001->type != BTN_TOOL_PEN &&
+ w8001->type != BTN_TOOL_RUBBER) {
+ w8001->type = count == 1 ? BTN_TOOL_FINGER : KEY_RESERVED;
+ input_mt_report_pointer_emulation(dev, true);
+ }
+
input_sync(dev);
}
@@ -152,6 +195,15 @@ static void parse_touchquery(u8 *data, struct w8001_touch_query *query)
query->y = data[5] << 9;
query->y |= data[6] << 2;
query->y |= (data[2] >> 3) & 0x3;
+
+ /* Early days' single-finger touch models need the following defaults */
+ if (!query->x && !query->y) {
+ query->x = 1024;
+ query->y = 1024;
+ if (query->panel_res)
+ query->x = query->y = (1 << query->panel_res);
+ query->panel_res = W8001_TOUCH_RESOLUTION;
+ }
}
static void report_pen_events(struct w8001 *w8001, struct w8001_coord *coord)
@@ -161,16 +213,15 @@ static void report_pen_events(struct w8001 *w8001, struct w8001_coord *coord)
/*
* We have 1 bit for proximity (rdy) and 3 bits for tip, side,
* side2/eraser. If rdy && f2 are set, this can be either pen + side2,
- * or eraser. assume
+ * or eraser. Assume:
* - if dev is already in proximity and f2 is toggled → pen + side2
* - if dev comes into proximity with f2 set → eraser
* If f2 disappears after assuming eraser, fake proximity out for
* eraser and in for pen.
*/
- if (!w8001->type) {
- w8001->type = coord->f2 ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
- } else if (w8001->type == BTN_TOOL_RUBBER) {
+ switch (w8001->type) {
+ case BTN_TOOL_RUBBER:
if (!coord->f2) {
input_report_abs(dev, ABS_PRESSURE, 0);
input_report_key(dev, BTN_TOUCH, 0);
@@ -180,8 +231,21 @@ static void report_pen_events(struct w8001 *w8001, struct w8001_coord *coord)
input_sync(dev);
w8001->type = BTN_TOOL_PEN;
}
- } else {
+ break;
+
+ case BTN_TOOL_FINGER:
+ input_report_key(dev, BTN_TOUCH, 0);
+ input_report_key(dev, BTN_TOOL_FINGER, 0);
+ input_sync(dev);
+ /* fall through */
+
+ case KEY_RESERVED:
+ w8001->type = coord->f2 ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+ break;
+
+ default:
input_report_key(dev, BTN_STYLUS2, coord->f2);
+ break;
}
input_report_abs(dev, ABS_X, coord->x);
@@ -193,7 +257,26 @@ static void report_pen_events(struct w8001 *w8001, struct w8001_coord *coord)
input_sync(dev);
if (!coord->rdy)
- w8001->type = 0;
+ w8001->type = KEY_RESERVED;
+}
+
+static void report_single_touch(struct w8001 *w8001, struct w8001_coord *coord)
+{
+ struct input_dev *dev = w8001->dev;
+ unsigned int x = coord->x;
+ unsigned int y = coord->y;
+
+ /* scale to pen maximum */
+ scale_touch_coordinates(w8001, &x, &y);
+
+ input_report_abs(dev, ABS_X, x);
+ input_report_abs(dev, ABS_Y, y);
+ input_report_key(dev, BTN_TOUCH, coord->tsw);
+ input_report_key(dev, BTN_TOOL_FINGER, coord->tsw);
+
+ input_sync(dev);
+
+ w8001->type = coord->tsw ? BTN_TOOL_FINGER : KEY_RESERVED;
}
static irqreturn_t w8001_interrupt(struct serio *serio,
@@ -214,9 +297,18 @@ static irqreturn_t w8001_interrupt(struct serio *serio,
case W8001_PKTLEN_TOUCH93 - 1:
case W8001_PKTLEN_TOUCH9A - 1:
- /* ignore one-finger touch packet. */
- if (w8001->pktlen == w8001->idx)
+ tmp = w8001->data[0] & W8001_TOUCH_BYTE;
+ if (tmp != W8001_TOUCH_BYTE)
+ break;
+
+ if (w8001->pktlen == w8001->idx) {
w8001->idx = 0;
+ if (w8001->type != BTN_TOOL_PEN &&
+ w8001->type != BTN_TOOL_RUBBER) {
+ parse_single_touch(w8001->data, &coord);
+ report_single_touch(w8001, &coord);
+ }
+ }
break;
/* Pen coordinates packet */
@@ -225,18 +317,18 @@ static irqreturn_t w8001_interrupt(struct serio *serio,
if (unlikely(tmp == W8001_TAB_BYTE))
break;
- tmp = (w8001->data[0] & W8001_TOUCH_BYTE);
+ tmp = w8001->data[0] & W8001_TOUCH_BYTE;
if (tmp == W8001_TOUCH_BYTE)
break;
w8001->idx = 0;
- parse_data(w8001->data, &coord);
+ parse_pen_data(w8001->data, &coord);
report_pen_events(w8001, &coord);
break;
/* control packet */
case W8001_PKTLEN_TPCCTL - 1:
- tmp = (w8001->data[0] & W8001_TOUCH_MASK);
+ tmp = w8001->data[0] & W8001_TOUCH_MASK;
if (tmp == W8001_TOUCH_BYTE)
break;
@@ -249,7 +341,7 @@ static irqreturn_t w8001_interrupt(struct serio *serio,
/* 2 finger touch packet */
case W8001_PKTLEN_TOUCH2FG - 1:
w8001->idx = 0;
- parse_touch(w8001);
+ parse_multi_touch(w8001);
break;
}
@@ -279,6 +371,7 @@ static int w8001_setup(struct w8001 *w8001)
{
struct input_dev *dev = w8001->dev;
struct w8001_coord coord;
+ struct w8001_touch_query touch;
int error;
error = w8001_command(w8001, W8001_CMD_STOP, false);
@@ -287,22 +380,33 @@ static int w8001_setup(struct w8001 *w8001)
msleep(250); /* wait 250ms before querying the device */
+ dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name));
+
/* penabled? */
error = w8001_command(w8001, W8001_CMD_QUERY, true);
if (!error) {
+ __set_bit(BTN_TOUCH, dev->keybit);
__set_bit(BTN_TOOL_PEN, dev->keybit);
__set_bit(BTN_TOOL_RUBBER, dev->keybit);
__set_bit(BTN_STYLUS, dev->keybit);
__set_bit(BTN_STYLUS2, dev->keybit);
- parse_data(w8001->response, &coord);
+
+ parse_pen_data(w8001->response, &coord);
+ w8001->max_pen_x = coord.x;
+ w8001->max_pen_y = coord.y;
input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0);
input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0);
+ input_abs_set_res(dev, ABS_X, W8001_PEN_RESOLUTION);
+ input_abs_set_res(dev, ABS_Y, W8001_PEN_RESOLUTION);
input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0);
if (coord.tilt_x && coord.tilt_y) {
input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0);
input_set_abs_params(dev, ABS_TILT_Y, 0, coord.tilt_y, 0, 0);
}
+ w8001->id = 0x90;
+ strlcat(w8001->name, " Penabled", sizeof(w8001->name));
}
/* Touch enabled? */
@@ -313,24 +417,41 @@ static int w8001_setup(struct w8001 *w8001)
* second byte is empty, which indicates touch is not supported.
*/
if (!error && w8001->response[1]) {
- struct w8001_touch_query touch;
+ __set_bit(BTN_TOUCH, dev->keybit);
+ __set_bit(BTN_TOOL_FINGER, dev->keybit);
parse_touchquery(w8001->response, &touch);
+ w8001->max_touch_x = touch.x;
+ w8001->max_touch_y = touch.y;
+
+ if (w8001->max_pen_x && w8001->max_pen_y) {
+ /* if pen is supported scale to pen maximum */
+ touch.x = w8001->max_pen_x;
+ touch.y = w8001->max_pen_y;
+ touch.panel_res = W8001_PEN_RESOLUTION;
+ }
input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0);
input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0);
- __set_bit(BTN_TOOL_FINGER, dev->keybit);
+ input_abs_set_res(dev, ABS_X, touch.panel_res);
+ input_abs_set_res(dev, ABS_Y, touch.panel_res);
switch (touch.sensor_id) {
case 0:
case 2:
w8001->pktlen = W8001_PKTLEN_TOUCH93;
+ w8001->id = 0x93;
+ strlcat(w8001->name, " 1FG", sizeof(w8001->name));
break;
+
case 1:
case 3:
case 4:
w8001->pktlen = W8001_PKTLEN_TOUCH9A;
+ strlcat(w8001->name, " 1FG", sizeof(w8001->name));
+ w8001->id = 0x9a;
break;
+
case 5:
w8001->pktlen = W8001_PKTLEN_TOUCH2FG;
@@ -341,10 +462,18 @@ static int w8001_setup(struct w8001 *w8001)
0, touch.y, 0, 0);
input_set_abs_params(dev, ABS_MT_TOOL_TYPE,
0, MT_TOOL_MAX, 0, 0);
+
+ strlcat(w8001->name, " 2FG", sizeof(w8001->name));
+ if (w8001->max_pen_x && w8001->max_pen_y)
+ w8001->id = 0xE3;
+ else
+ w8001->id = 0xE2;
break;
}
}
+ strlcat(w8001->name, " Touchscreen", sizeof(w8001->name));
+
return w8001_command(w8001, W8001_CMD_START, false);
}
@@ -384,22 +513,10 @@ static int w8001_connect(struct serio *serio, struct serio_driver *drv)
}
w8001->serio = serio;
- w8001->id = serio->id.id;
w8001->dev = input_dev;
init_completion(&w8001->cmd_done);
snprintf(w8001->phys, sizeof(w8001->phys), "%s/input0", serio->phys);
- input_dev->name = "Wacom W8001 Penabled Serial TouchScreen";
- input_dev->phys = w8001->phys;
- input_dev->id.bustype = BUS_RS232;
- input_dev->id.vendor = SERIO_W8001;
- input_dev->id.product = w8001->id;
- input_dev->id.version = 0x0100;
- input_dev->dev.parent = &serio->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- __set_bit(BTN_TOUCH, input_dev->keybit);
-
serio_set_drvdata(serio, w8001);
err = serio_open(serio, drv);
if (err)
@@ -409,6 +526,14 @@ static int w8001_connect(struct serio *serio, struct serio_driver *drv)
if (err)
goto fail3;
+ input_dev->name = w8001->name;
+ input_dev->phys = w8001->phys;
+ input_dev->id.product = w8001->id;
+ input_dev->id.bustype = BUS_RS232;
+ input_dev->id.vendor = 0x056a;
+ input_dev->id.version = 0x0100;
+ input_dev->dev.parent = &serio->dev;
+
err = input_register_device(w8001->dev);
if (err)
goto fail3;
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 178942a2ee61..8a3c5cfc4fea 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2318,7 +2318,7 @@ static int gigaset_probe(struct usb_interface *interface,
__func__, le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
- /* allocate memory for our device state and intialize it */
+ /* allocate memory for our device state and initialize it */
cs = gigaset_initcs(driver, BAS_CHANNELS, 0, 0, cidmode,
GIGASET_MODULENAME);
if (!cs)
@@ -2576,7 +2576,7 @@ static int __init bas_gigaset_init(void)
{
int result;
- /* allocate memory for our driver state and intialize it */
+ /* allocate memory for our driver state and initialize it */
driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
GIGASET_MODULENAME, GIGASET_DEVNAME,
&gigops, THIS_MODULE);
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index d151dcbf770d..0ef09d0eb96b 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -513,7 +513,7 @@ gigaset_tty_open(struct tty_struct *tty)
return -ENODEV;
}
- /* allocate memory for our device state and intialize it */
+ /* allocate memory for our device state and initialize it */
cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
if (!cs)
goto error;
@@ -771,7 +771,7 @@ static int __init ser_gigaset_init(void)
return rc;
}
- /* allocate memory for our driver state and intialize it */
+ /* allocate memory for our driver state and initialize it */
driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
GIGASET_MODULENAME, GIGASET_DEVNAME,
&ops, THIS_MODULE);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 4a66338f4e7d..5e3300d8a2a5 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -695,7 +695,7 @@ static int gigaset_probe(struct usb_interface *interface,
dev_info(&udev->dev, "%s: Device matched ... !\n", __func__);
- /* allocate memory for our device state and intialize it */
+ /* allocate memory for our device state and initialize it */
cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
if (!cs)
return -ENODEV;
@@ -894,7 +894,7 @@ static int __init usb_gigaset_init(void)
{
int result;
- /* allocate memory for our driver state and intialize it */
+ /* allocate memory for our driver state and initialize it */
driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
GIGASET_MODULENAME, GIGASET_DEVNAME,
&ops, THIS_MODULE);
diff --git a/drivers/isdn/hardware/eicon/istream.c b/drivers/isdn/hardware/eicon/istream.c
index 18f8798442fa..7bd5baa547be 100644
--- a/drivers/isdn/hardware/eicon/istream.c
+++ b/drivers/isdn/hardware/eicon/istream.c
@@ -62,7 +62,7 @@ void diva_xdi_provide_istream_info (ADAPTER* a,
stream interface.
If synchronous service was requested, then function
does return amount of data written to stream.
- 'final' does indicate that pice of data to be written is
+ 'final' does indicate that piece of data to be written is
final part of frame (necessary only by structured datatransfer)
return 0 if zero lengh packet was written
return -1 if stream is full
diff --git a/drivers/isdn/hardware/mISDN/ipac.h b/drivers/isdn/hardware/mISDN/ipac.h
index 74a6ccf9065c..8121e046b739 100644
--- a/drivers/isdn/hardware/mISDN/ipac.h
+++ b/drivers/isdn/hardware/mISDN/ipac.h
@@ -29,7 +29,7 @@ struct isac_hw {
u32 type;
u32 off; /* offset to isac regs */
char *name;
- spinlock_t *hwlock; /* lock HW acccess */
+ spinlock_t *hwlock; /* lock HW access */
read_reg_func *read_reg;
write_reg_func *write_reg;
fifo_func *read_fifo;
@@ -70,7 +70,7 @@ struct ipac_hw {
struct hscx_hw hscx[2];
char *name;
void *hw;
- spinlock_t *hwlock; /* lock HW acccess */
+ spinlock_t *hwlock; /* lock HW access */
struct module *owner;
u32 type;
read_reg_func *read_reg;
diff --git a/drivers/isdn/hardware/mISDN/isar.h b/drivers/isdn/hardware/mISDN/isar.h
index 4a134acd44d0..9962bdf699c7 100644
--- a/drivers/isdn/hardware/mISDN/isar.h
+++ b/drivers/isdn/hardware/mISDN/isar.h
@@ -44,7 +44,7 @@ struct isar_ch {
struct isar_hw {
struct isar_ch ch[2];
void *hw;
- spinlock_t *hwlock; /* lock HW acccess */
+ spinlock_t *hwlock; /* lock HW access */
char *name;
struct module *owner;
read_reg_func *read_reg;
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c
index 0858791978d8..cfff0c41d298 100644
--- a/drivers/isdn/hisax/isdnl2.c
+++ b/drivers/isdn/hisax/isdnl2.c
@@ -1247,10 +1247,10 @@ static void
l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
- struct sk_buff *skb, *oskb;
+ struct sk_buff *skb;
struct Layer2 *l2 = &st->l2;
u_char header[MAX_HEADER_LEN];
- int i;
+ int i, hdr_space_needed;
int unsigned p1;
u_long flags;
@@ -1261,6 +1261,16 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
if (!skb)
return;
+ hdr_space_needed = l2headersize(l2, 0);
+ if (hdr_space_needed > skb_headroom(skb)) {
+ struct sk_buff *orig_skb = skb;
+
+ skb = skb_realloc_headroom(skb, hdr_space_needed);
+ if (!skb) {
+ dev_kfree_skb(orig_skb);
+ return;
+ }
+ }
spin_lock_irqsave(&l2->lock, flags);
if(test_bit(FLG_MOD128, &l2->flag))
p1 = (l2->vs - l2->va) % 128;
@@ -1285,19 +1295,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
l2->vs = (l2->vs + 1) % 8;
}
spin_unlock_irqrestore(&l2->lock, flags);
- p1 = skb->data - skb->head;
- if (p1 >= i)
- memcpy(skb_push(skb, i), header, i);
- else {
- printk(KERN_WARNING
- "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
- oskb = skb;
- skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
- memcpy(skb_put(skb, i), header, i);
- skb_copy_from_linear_data(oskb,
- skb_put(skb, oskb->len), oskb->len);
- dev_kfree_skb(oskb);
- }
+ memcpy(skb_push(skb, i), header, i);
st->l2.l2l1(st, PH_PULL | INDICATION, skb);
test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
diff --git a/drivers/isdn/hysdn/hysdn_defs.h b/drivers/isdn/hysdn/hysdn_defs.h
index 729df4089385..18b801ad97a4 100644
--- a/drivers/isdn/hysdn/hysdn_defs.h
+++ b/drivers/isdn/hysdn/hysdn_defs.h
@@ -227,7 +227,6 @@ extern hysdn_card *card_root; /* pointer to first card */
/*************************/
/* im/exported functions */
/*************************/
-extern char *hysdn_getrev(const char *);
/* hysdn_procconf.c */
extern int hysdn_procconf_init(void); /* init proc config filesys */
@@ -259,7 +258,6 @@ extern int hysdn_tx_cfgline(hysdn_card *, unsigned char *,
/* hysdn_net.c */
extern unsigned int hynet_enable;
-extern char *hysdn_net_revision;
extern int hysdn_net_create(hysdn_card *); /* create a new net device */
extern int hysdn_net_release(hysdn_card *); /* delete the device */
extern char *hysdn_net_getname(hysdn_card *); /* get name of net interface */
diff --git a/drivers/isdn/hysdn/hysdn_init.c b/drivers/isdn/hysdn/hysdn_init.c
index b7cc5c2f08c6..0ab42ace1692 100644
--- a/drivers/isdn/hysdn/hysdn_init.c
+++ b/drivers/isdn/hysdn/hysdn_init.c
@@ -36,7 +36,6 @@ MODULE_DESCRIPTION("ISDN4Linux: Driver for HYSDN cards");
MODULE_AUTHOR("Werner Cornelius");
MODULE_LICENSE("GPL");
-static char *hysdn_init_revision = "$Revision: 1.6.6.6 $";
static int cardmax; /* number of found cards */
hysdn_card *card_root = NULL; /* pointer to first card */
static hysdn_card *card_last = NULL; /* pointer to first card */
@@ -49,25 +48,6 @@ static hysdn_card *card_last = NULL; /* pointer to first card */
/* Additionally newer versions may be activated without rebooting. */
/****************************************************************************/
-/******************************************************/
-/* extract revision number from string for log output */
-/******************************************************/
-char *
-hysdn_getrev(const char *revision)
-{
- char *rev;
- char *p;
-
- if ((p = strchr(revision, ':'))) {
- rev = p + 2;
- p = strchr(rev, '$');
- *--p = 0;
- } else
- rev = "???";
- return rev;
-}
-
-
/****************************************************************************/
/* init_module is called once when the module is loaded to do all necessary */
/* things like autodetect... */
@@ -175,13 +155,9 @@ static int hysdn_have_procfs;
static int __init
hysdn_init(void)
{
- char tmp[50];
int rc;
- strcpy(tmp, hysdn_init_revision);
- printk(KERN_NOTICE "HYSDN: module Rev: %s loaded\n", hysdn_getrev(tmp));
- strcpy(tmp, hysdn_net_revision);
- printk(KERN_NOTICE "HYSDN: network interface Rev: %s \n", hysdn_getrev(tmp));
+ printk(KERN_NOTICE "HYSDN: module loaded\n");
rc = pci_register_driver(&hysdn_pci_driver);
if (rc)
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index feec8d89d719..11f2cce26005 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -26,9 +26,6 @@
unsigned int hynet_enable = 0xffffffff;
module_param(hynet_enable, uint, 0);
-/* store the actual version for log reporting */
-char *hysdn_net_revision = "$Revision: 1.8.6.4 $";
-
#define MAX_SKB_BUFFERS 20 /* number of buffers for keeping TX-data */
/****************************************************************************/
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c
index 96b3e39c3356..5fe83bd42061 100644
--- a/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/isdn/hysdn/hysdn_procconf.c
@@ -23,7 +23,6 @@
#include "hysdn_defs.h"
static DEFINE_MUTEX(hysdn_conf_mutex);
-static char *hysdn_procconf_revision = "$Revision: 1.8.6.4 $";
#define INFO_OUT_LEN 80 /* length of info line including lf */
@@ -404,7 +403,7 @@ hysdn_procconf_init(void)
card = card->next; /* next entry */
}
- printk(KERN_NOTICE "HYSDN: procfs Rev. %s initialised\n", hysdn_getrev(hysdn_procconf_revision));
+ printk(KERN_NOTICE "HYSDN: procfs initialised\n");
return (0);
} /* hysdn_procconf_init */
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index f2b5bab5e6a1..1f355bb85e54 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1627,7 +1627,7 @@ __setup("icn=", icn_setup);
static int __init icn_init(void)
{
char *p;
- char rev[20];
+ char rev[21];
memset(&dev, 0, sizeof(icn_dev));
dev.memaddr = (membase & 0x0ffc000);
@@ -1638,6 +1638,7 @@ static int __init icn_init(void)
if ((p = strchr(revision, ':'))) {
strncpy(rev, p + 1, 20);
+ rev[20] = '\0';
p = strchr(rev, '$');
if (p)
*p = 0;
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 76d9e673b4e1..309bacf1fadc 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -112,7 +112,7 @@
* Disable rx-data:
* If cmx is realized in hardware, rx data will be disabled if requested by
* the upper layer. If dtmf decoding is done by software and enabled, rx data
- * will not be diabled but blocked to the upper layer.
+ * will not be disabled but blocked to the upper layer.
*
* HFC conference engine:
* If it is possible to realize all features using hardware, hardware will be
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 33facd0c45d1..80a3ae3c00b9 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -98,7 +98,6 @@
#define LP5521_EXT_CLK_USED 0x08
struct lp5521_engine {
- const struct attribute_group *attributes;
int id;
u8 mode;
u8 prog_page;
@@ -225,25 +224,22 @@ static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
curr);
}
-static void lp5521_init_engine(struct lp5521_chip *chip,
- const struct attribute_group *attr_group)
+static void lp5521_init_engine(struct lp5521_chip *chip)
{
int i;
for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
chip->engines[i].id = i + 1;
chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2);
chip->engines[i].prog_page = i;
- chip->engines[i].attributes = &attr_group[i];
}
}
-static int lp5521_configure(struct i2c_client *client,
- const struct attribute_group *attr_group)
+static int lp5521_configure(struct i2c_client *client)
{
struct lp5521_chip *chip = i2c_get_clientdata(client);
int ret;
- lp5521_init_engine(chip, attr_group);
+ lp5521_init_engine(chip);
/* Set all PWMs to direct control mode */
ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F);
@@ -329,9 +325,6 @@ static int lp5521_detect(struct i2c_client *client)
/* Set engine mode and create appropriate sysfs attributes, if required. */
static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode)
{
- struct lp5521_chip *chip = engine_to_lp5521(engine);
- struct i2c_client *client = chip->client;
- struct device *dev = &client->dev;
int ret = 0;
/* if in that mode already do nothing, except for run */
@@ -343,18 +336,10 @@ static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode)
} else if (mode == LP5521_CMD_LOAD) {
lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
lp5521_set_engine_mode(engine, LP5521_CMD_LOAD);
-
- ret = sysfs_create_group(&dev->kobj, engine->attributes);
- if (ret)
- return ret;
} else if (mode == LP5521_CMD_DISABLED) {
lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
}
- /* remove load attribute from sysfs if not in load mode */
- if (engine->mode == LP5521_CMD_LOAD && mode != LP5521_CMD_LOAD)
- sysfs_remove_group(&dev->kobj, engine->attributes);
-
engine->mode = mode;
return ret;
@@ -373,6 +358,8 @@ static int lp5521_do_store_load(struct lp5521_engine *engine,
while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) {
/* separate sscanfs because length is working only for %s */
ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
+ if (ret != 2)
+ goto fail;
ret = sscanf(c, "%2x", &cmd);
if (ret != 1)
goto fail;
@@ -387,7 +374,10 @@ static int lp5521_do_store_load(struct lp5521_engine *engine,
goto fail;
mutex_lock(&chip->lock);
- ret = lp5521_load_program(engine, pattern);
+ if (engine->mode == LP5521_CMD_LOAD)
+ ret = lp5521_load_program(engine, pattern);
+ else
+ ret = -EINVAL;
mutex_unlock(&chip->lock);
if (ret) {
@@ -574,20 +564,8 @@ static struct attribute *lp5521_attributes[] = {
&dev_attr_engine2_mode.attr,
&dev_attr_engine3_mode.attr,
&dev_attr_selftest.attr,
- NULL
-};
-
-static struct attribute *lp5521_engine1_attributes[] = {
&dev_attr_engine1_load.attr,
- NULL
-};
-
-static struct attribute *lp5521_engine2_attributes[] = {
&dev_attr_engine2_load.attr,
- NULL
-};
-
-static struct attribute *lp5521_engine3_attributes[] = {
&dev_attr_engine3_load.attr,
NULL
};
@@ -596,12 +574,6 @@ static const struct attribute_group lp5521_group = {
.attrs = lp5521_attributes,
};
-static const struct attribute_group lp5521_engine_group[] = {
- {.attrs = lp5521_engine1_attributes },
- {.attrs = lp5521_engine2_attributes },
- {.attrs = lp5521_engine3_attributes },
-};
-
static int lp5521_register_sysfs(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -616,12 +588,6 @@ static void lp5521_unregister_sysfs(struct i2c_client *client)
sysfs_remove_group(&dev->kobj, &lp5521_group);
- for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
- if (chip->engines[i].mode == LP5521_CMD_LOAD)
- sysfs_remove_group(&dev->kobj,
- chip->engines[i].attributes);
- }
-
for (i = 0; i < chip->num_leds; i++)
sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
&lp5521_led_attribute_group);
@@ -651,7 +617,8 @@ static int __init lp5521_init_led(struct lp5521_led *led,
return -EINVAL;
}
- snprintf(name, sizeof(name), "%s:channel%d", client->name, chan);
+ snprintf(name, sizeof(name), "%s:channel%d",
+ pdata->label ?: client->name, chan);
led->cdev.brightness_set = lp5521_set_brightness;
led->cdev.name = name;
res = led_classdev_register(dev, &led->cdev);
@@ -723,7 +690,7 @@ static int lp5521_probe(struct i2c_client *client,
dev_info(&client->dev, "%s programmable led chip found\n", id->name);
- ret = lp5521_configure(client, lp5521_engine_group);
+ ret = lp5521_configure(client);
if (ret < 0) {
dev_err(&client->dev, "error configuring chip\n");
goto fail2;
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 0cc4ead2fd8b..d0c4068ecddd 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -105,7 +105,6 @@
#define SHIFT_MASK(id) (((id) - 1) * 2)
struct lp5523_engine {
- const struct attribute_group *attributes;
int id;
u8 mode;
u8 prog_page;
@@ -403,14 +402,23 @@ static ssize_t store_engine_leds(struct device *dev,
struct i2c_client *client = to_i2c_client(dev);
struct lp5523_chip *chip = i2c_get_clientdata(client);
u16 mux = 0;
+ ssize_t ret;
if (lp5523_mux_parse(buf, &mux, len))
return -EINVAL;
+ mutex_lock(&chip->lock);
+ ret = -EINVAL;
+ if (chip->engines[nr - 1].mode != LP5523_CMD_LOAD)
+ goto leave;
+
if (lp5523_load_mux(&chip->engines[nr - 1], mux))
- return -EINVAL;
+ goto leave;
- return len;
+ ret = len;
+leave:
+ mutex_unlock(&chip->lock);
+ return ret;
}
#define store_leds(nr) \
@@ -556,7 +564,11 @@ static int lp5523_do_store_load(struct lp5523_engine *engine,
mutex_lock(&chip->lock);
- ret = lp5523_load_program(engine, pattern);
+ if (engine->mode == LP5523_CMD_LOAD)
+ ret = lp5523_load_program(engine, pattern);
+ else
+ ret = -EINVAL;
+
mutex_unlock(&chip->lock);
if (ret) {
@@ -737,37 +749,18 @@ static struct attribute *lp5523_attributes[] = {
&dev_attr_engine2_mode.attr,
&dev_attr_engine3_mode.attr,
&dev_attr_selftest.attr,
- NULL
-};
-
-static struct attribute *lp5523_engine1_attributes[] = {
&dev_attr_engine1_load.attr,
&dev_attr_engine1_leds.attr,
- NULL
-};
-
-static struct attribute *lp5523_engine2_attributes[] = {
&dev_attr_engine2_load.attr,
&dev_attr_engine2_leds.attr,
- NULL
-};
-
-static struct attribute *lp5523_engine3_attributes[] = {
&dev_attr_engine3_load.attr,
&dev_attr_engine3_leds.attr,
- NULL
};
static const struct attribute_group lp5523_group = {
.attrs = lp5523_attributes,
};
-static const struct attribute_group lp5523_engine_group[] = {
- {.attrs = lp5523_engine1_attributes },
- {.attrs = lp5523_engine2_attributes },
- {.attrs = lp5523_engine3_attributes },
-};
-
static int lp5523_register_sysfs(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -788,10 +781,6 @@ static void lp5523_unregister_sysfs(struct i2c_client *client)
sysfs_remove_group(&dev->kobj, &lp5523_group);
- for (i = 0; i < ARRAY_SIZE(chip->engines); i++)
- if (chip->engines[i].mode == LP5523_CMD_LOAD)
- sysfs_remove_group(&dev->kobj, &lp5523_engine_group[i]);
-
for (i = 0; i < chip->num_leds; i++)
sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
&lp5523_led_attribute_group);
@@ -802,10 +791,6 @@ static void lp5523_unregister_sysfs(struct i2c_client *client)
/*--------------------------------------------------------------*/
static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode)
{
- /* engine to chip */
- struct lp5523_chip *chip = engine_to_lp5523(engine);
- struct i2c_client *client = chip->client;
- struct device *dev = &client->dev;
int ret = 0;
/* if in that mode already do nothing, except for run */
@@ -817,18 +802,10 @@ static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode)
} else if (mode == LP5523_CMD_LOAD) {
lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
-
- ret = sysfs_create_group(&dev->kobj, engine->attributes);
- if (ret)
- return ret;
} else if (mode == LP5523_CMD_DISABLED) {
lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
}
- /* remove load attribute from sysfs if not in load mode */
- if (engine->mode == LP5523_CMD_LOAD && mode != LP5523_CMD_LOAD)
- sysfs_remove_group(&dev->kobj, engine->attributes);
-
engine->mode = mode;
return ret;
@@ -845,7 +822,6 @@ static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id);
engine->prog_page = id - 1;
engine->mux_page = id + 2;
- engine->attributes = &lp5523_engine_group[id - 1];
return 0;
}
@@ -870,7 +846,8 @@ static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
return -EINVAL;
}
- snprintf(name, 32, "lp5523:channel%d", chan);
+ snprintf(name, sizeof(name), "%s:channel%d",
+ pdata->label ?: "lp5523", chan);
led->cdev.name = name;
led->cdev.brightness_set = lp5523_set_brightness;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 43d08756d823..afac338d5025 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -200,6 +200,32 @@ static void pca9532_led_work(struct work_struct *work)
pca9532_setled(led);
}
+static void pca9532_destroy_devices(struct pca9532_data *data, int n_devs)
+{
+ int i = n_devs;
+
+ if (!data)
+ return;
+
+ while (--i >= 0) {
+ switch (data->leds[i].type) {
+ case PCA9532_TYPE_NONE:
+ break;
+ case PCA9532_TYPE_LED:
+ led_classdev_unregister(&data->leds[i].ldev);
+ cancel_work_sync(&data->leds[i].work);
+ break;
+ case PCA9532_TYPE_N2100_BEEP:
+ if (data->idev != NULL) {
+ input_unregister_device(data->idev);
+ cancel_work_sync(&data->work);
+ data->idev = NULL;
+ }
+ break;
+ }
+ }
+}
+
static int pca9532_configure(struct i2c_client *client,
struct pca9532_data *data, struct pca9532_platform_data *pdata)
{
@@ -274,25 +300,7 @@ static int pca9532_configure(struct i2c_client *client,
return 0;
exit:
- if (i > 0)
- for (i = i - 1; i >= 0; i--)
- switch (data->leds[i].type) {
- case PCA9532_TYPE_NONE:
- break;
- case PCA9532_TYPE_LED:
- led_classdev_unregister(&data->leds[i].ldev);
- cancel_work_sync(&data->leds[i].work);
- break;
- case PCA9532_TYPE_N2100_BEEP:
- if (data->idev != NULL) {
- input_unregister_device(data->idev);
- input_free_device(data->idev);
- cancel_work_sync(&data->work);
- data->idev = NULL;
- }
- break;
- }
-
+ pca9532_destroy_devices(data, i);
return err;
}
@@ -329,25 +337,7 @@ static int pca9532_probe(struct i2c_client *client,
static int pca9532_remove(struct i2c_client *client)
{
struct pca9532_data *data = i2c_get_clientdata(client);
- int i;
- for (i = 0; i < 16; i++)
- switch (data->leds[i].type) {
- case PCA9532_TYPE_NONE:
- break;
- case PCA9532_TYPE_LED:
- led_classdev_unregister(&data->leds[i].ldev);
- cancel_work_sync(&data->leds[i].work);
- break;
- case PCA9532_TYPE_N2100_BEEP:
- if (data->idev != NULL) {
- input_unregister_device(data->idev);
- input_free_device(data->idev);
- cancel_work_sync(&data->work);
- data->idev = NULL;
- }
- break;
- }
-
+ pca9532_destroy_devices(data, 16);
kfree(data);
return 0;
}
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index da3fa8dcdf5b..666daf77872e 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -69,6 +69,7 @@ static int led_pwm_probe(struct platform_device *pdev)
led_dat->pwm = pwm_request(cur_led->pwm_id,
cur_led->name);
if (IS_ERR(led_dat->pwm)) {
+ ret = PTR_ERR(led_dat->pwm);
dev_err(&pdev->dev, "unable to request PWM %d\n",
cur_led->pwm_id);
goto err;
diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c
index f948e57bd9b8..2b513a2ad7de 100644
--- a/drivers/leds/ledtrig-backlight.c
+++ b/drivers/leds/ledtrig-backlight.c
@@ -26,6 +26,7 @@ struct bl_trig_notifier {
int brightness;
int old_status;
struct notifier_block notifier;
+ unsigned invert;
};
static int fb_notifier_callback(struct notifier_block *p,
@@ -36,23 +37,64 @@ static int fb_notifier_callback(struct notifier_block *p,
struct led_classdev *led = n->led;
struct fb_event *fb_event = data;
int *blank = fb_event->data;
+ int new_status = *blank ? BLANK : UNBLANK;
switch (event) {
case FB_EVENT_BLANK :
- if (*blank && n->old_status == UNBLANK) {
+ if (new_status == n->old_status)
+ break;
+
+ if ((n->old_status == UNBLANK) ^ n->invert) {
n->brightness = led->brightness;
led_set_brightness(led, LED_OFF);
- n->old_status = BLANK;
- } else if (!*blank && n->old_status == BLANK) {
+ } else {
led_set_brightness(led, n->brightness);
- n->old_status = UNBLANK;
}
+
+ n->old_status = new_status;
+
break;
}
return 0;
}
+static ssize_t bl_trig_invert_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led = dev_get_drvdata(dev);
+ struct bl_trig_notifier *n = led->trigger_data;
+
+ return sprintf(buf, "%u\n", n->invert);
+}
+
+static ssize_t bl_trig_invert_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t num)
+{
+ struct led_classdev *led = dev_get_drvdata(dev);
+ struct bl_trig_notifier *n = led->trigger_data;
+ unsigned long invert;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &invert);
+ if (ret < 0)
+ return ret;
+
+ if (invert > 1)
+ return -EINVAL;
+
+ n->invert = invert;
+
+ /* After inverting, we need to update the LED. */
+ if ((n->old_status == BLANK) ^ n->invert)
+ led_set_brightness(led, LED_OFF);
+ else
+ led_set_brightness(led, n->brightness);
+
+ return num;
+}
+static DEVICE_ATTR(inverted, 0644, bl_trig_invert_show, bl_trig_invert_store);
+
static void bl_trig_activate(struct led_classdev *led)
{
int ret;
@@ -66,6 +108,10 @@ static void bl_trig_activate(struct led_classdev *led)
return;
}
+ ret = device_create_file(led->dev, &dev_attr_inverted);
+ if (ret)
+ goto err_invert;
+
n->led = led;
n->brightness = led->brightness;
n->old_status = UNBLANK;
@@ -74,6 +120,12 @@ static void bl_trig_activate(struct led_classdev *led)
ret = fb_register_client(&n->notifier);
if (ret)
dev_err(led->dev, "unable to register backlight trigger\n");
+
+ return;
+
+err_invert:
+ led->trigger_data = NULL;
+ kfree(n);
}
static void bl_trig_deactivate(struct led_classdev *led)
@@ -82,6 +134,7 @@ static void bl_trig_deactivate(struct led_classdev *led)
(struct bl_trig_notifier *) led->trigger_data;
if (n) {
+ device_remove_file(led->dev, &dev_attr_inverted);
fb_unregister_client(&n->notifier);
kfree(n);
}
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index 991d93be0f44..ecc4bf3f37a9 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -99,7 +99,7 @@ static ssize_t gpio_trig_inverted_show(struct device *dev,
struct led_classdev *led = dev_get_drvdata(dev);
struct gpio_trig_data *gpio_data = led->trigger_data;
- return sprintf(buf, "%s\n", gpio_data->inverted ? "yes" : "no");
+ return sprintf(buf, "%u\n", gpio_data->inverted);
}
static ssize_t gpio_trig_inverted_store(struct device *dev,
@@ -107,16 +107,17 @@ static ssize_t gpio_trig_inverted_store(struct device *dev,
{
struct led_classdev *led = dev_get_drvdata(dev);
struct gpio_trig_data *gpio_data = led->trigger_data;
- unsigned inverted;
+ unsigned long inverted;
int ret;
- ret = sscanf(buf, "%u", &inverted);
- if (ret < 1) {
- dev_err(dev, "invalid value\n");
+ ret = strict_strtoul(buf, 10, &inverted);
+ if (ret < 0)
+ return ret;
+
+ if (inverted > 1)
return -EINVAL;
- }
- gpio_data->inverted = !!inverted;
+ gpio_data->inverted = inverted;
/* After inverting, we need to update the LED. */
schedule_work(&gpio_data->work);
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 04b22128a474..d21578ee95de 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -1137,7 +1137,7 @@ void free_guest_pagetable(struct lguest *lg)
*/
void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
{
- pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
+ pte_t *switcher_pte_page = __this_cpu_read(switcher_pte_pages);
pte_t regs_pte;
#ifdef CONFIG_X86_PAE
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index b4eb675a807e..9f1659c3d1f3 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -90,8 +90,8 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
* meanwhile). If that's not the case, we pretend everything in the
* Guest has changed.
*/
- if (__get_cpu_var(lg_last_cpu) != cpu || cpu->last_pages != pages) {
- __get_cpu_var(lg_last_cpu) = cpu;
+ if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) {
+ __this_cpu_write(lg_last_cpu, cpu);
cpu->last_pages = pages;
cpu->changed = CHANGED_ALL;
}
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index b6e7ddc09d76..4daf9e5a7736 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -387,11 +387,10 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
/* Set the DMA ops to the ones from the PCI device, this could be
* fishy if we didn't know that on PowerMac it's always direct ops
* or iommu ops that will work fine
+ *
+ * To get all the fields, copy all archdata
*/
- dev->ofdev.dev.archdata.dma_ops =
- chip->lbus.pdev->dev.archdata.dma_ops;
- dev->ofdev.dev.archdata.dma_data =
- chip->lbus.pdev->dev.archdata.dma_data;
+ dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
#endif /* CONFIG_PCI */
#ifdef DEBUG
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 44549272333c..f3a29f264db9 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -443,7 +443,7 @@ static int fan_read_reg(int reg, unsigned char *buf, int nb)
tries = 0;
for (;;) {
nr = i2c_master_recv(fcu, buf, nb);
- if (nr > 0 || (nr < 0 && nr != ENODEV) || tries >= 100)
+ if (nr > 0 || (nr < 0 && nr != -ENODEV) || tries >= 100)
break;
msleep(10);
++tries;
@@ -464,7 +464,7 @@ static int fan_write_reg(int reg, const unsigned char *ptr, int nb)
tries = 0;
for (;;) {
nw = i2c_master_send(fcu, buf, nb);
- if (nw > 0 || (nw < 0 && nw != EIO) || tries >= 100)
+ if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100)
break;
msleep(10);
++tries;
@@ -2213,6 +2213,9 @@ static void fcu_lookup_fans(struct device_node *fcu_node)
static int fcu_of_probe(struct platform_device* dev, const struct of_device_id *match)
{
state = state_detached;
+ of_dev = dev;
+
+ dev_info(&dev->dev, "PowerMac G5 Thermal control driver %s\n", VERSION);
/* Lookup the fans in the device tree */
fcu_lookup_fans(dev->dev.of_node);
@@ -2235,6 +2238,7 @@ static const struct of_device_id fcu_match[] =
},
{},
};
+MODULE_DEVICE_TABLE(of, fcu_match);
static struct of_platform_driver fcu_of_platform_driver =
{
@@ -2252,8 +2256,6 @@ static struct of_platform_driver fcu_of_platform_driver =
*/
static int __init therm_pm72_init(void)
{
- struct device_node *np;
-
rackmac = of_machine_is_compatible("RackMac3,1");
if (!of_machine_is_compatible("PowerMac7,2") &&
@@ -2261,34 +2263,12 @@ static int __init therm_pm72_init(void)
!rackmac)
return -ENODEV;
- printk(KERN_INFO "PowerMac G5 Thermal control driver %s\n", VERSION);
-
- np = of_find_node_by_type(NULL, "fcu");
- if (np == NULL) {
- /* Some machines have strangely broken device-tree */
- np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
- if (np == NULL) {
- printk(KERN_ERR "Can't find FCU in device-tree !\n");
- return -ENODEV;
- }
- }
- of_dev = of_platform_device_create(np, "temperature", NULL);
- if (of_dev == NULL) {
- printk(KERN_ERR "Can't register FCU platform device !\n");
- return -ENODEV;
- }
-
- of_register_platform_driver(&fcu_of_platform_driver);
-
- return 0;
+ return of_register_platform_driver(&fcu_of_platform_driver);
}
static void __exit therm_pm72_exit(void)
{
of_unregister_platform_driver(&fcu_of_platform_driver);
-
- if (of_dev)
- of_device_unregister(of_dev);
}
module_init(therm_pm72_init);
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index 1cec02f6c431..ade1e656bfb2 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -15,7 +15,7 @@
#define MAX_PMU_LEVEL 0xFF
-static struct backlight_ops pmu_backlight_data;
+static const struct backlight_ops pmu_backlight_data;
static DEFINE_SPINLOCK(pmu_backlight_lock);
static int sleeping, uses_pmu_bl;
static u8 bl_curve[FB_BACKLIGHT_LEVELS];
@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
return bd->props.brightness;
}
-static struct backlight_ops pmu_backlight_data = {
+static const struct backlight_ops pmu_backlight_data = {
.get_brightness = pmu_backlight_get_brightness,
.update_status = pmu_backlight_update_status,
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index cd29c8248386..8b021eb0d48c 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2257,7 +2257,7 @@ static int pmu_sleep_valid(suspend_state_t state)
&& (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
}
-static struct platform_suspend_ops pmu_pm_ops = {
+static const struct platform_suspend_ops pmu_pm_ops = {
.enter = powerbook_sleep,
.valid = pmu_sleep_valid,
};
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index bf1a95e31559..98d9ec85e0eb 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -240,6 +240,30 @@ config DM_MIRROR
Allow volume managers to mirror logical volumes, also
needed for live data migration tools such as 'pvmove'.
+config DM_RAID
+ tristate "RAID 4/5/6 target (EXPERIMENTAL)"
+ depends on BLK_DEV_DM && EXPERIMENTAL
+ select MD_RAID456
+ select BLK_DEV_MD
+ ---help---
+ A dm target that supports RAID4, RAID5 and RAID6 mappings
+
+ A RAID-5 set of N drives with a capacity of C MB per drive provides
+ the capacity of C * (N - 1) MB, and protects against a failure
+ of a single drive. For a given sector (row) number, (N - 1) drives
+ contain data sectors, and one drive contains the parity protection.
+ For a RAID-4 set, the parity blocks are present on a single drive,
+ while a RAID-5 set distributes the parity across the drives in one
+ of the available parity distribution methods.
+
+ A RAID-6 set of N drives with a capacity of C MB per drive
+ provides the capacity of C * (N - 2) MB, and protects
+ against a failure of any two drives. For a given sector
+ (row) number, (N - 2) drives contain data sectors, and two
+ drives contains two independent redundancy syndromes. Like
+ RAID-5, RAID-6 distributes the syndromes across the drives
+ in one of the available parity distribution methods.
+
config DM_LOG_USERSPACE
tristate "Mirror userspace logging (EXPERIMENTAL)"
depends on DM_MIRROR && EXPERIMENTAL && NET
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 5e3aac41919d..d0138606c2e8 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o
+obj-$(CONFIG_DM_RAID) += dm-raid.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 5a1ffe3527aa..9a35320fb59f 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -210,11 +210,11 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset,
|| test_bit(Faulty, &rdev->flags))
continue;
- target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
+ target = offset + index * (PAGE_SIZE/512);
if (sync_page_io(rdev, target,
roundup(size, bdev_logical_block_size(rdev->bdev)),
- page, READ)) {
+ page, READ, true)) {
page->index = index;
attach_page_buffers(page, NULL); /* so that free_buffer will
* quietly no-op */
@@ -264,14 +264,18 @@ static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
{
mdk_rdev_t *rdev = NULL;
+ struct block_device *bdev;
mddev_t *mddev = bitmap->mddev;
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
int size = PAGE_SIZE;
loff_t offset = mddev->bitmap_info.offset;
+
+ bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
+
if (page->index == bitmap->file_pages-1)
size = roundup(bitmap->last_page_size,
- bdev_logical_block_size(rdev->bdev));
+ bdev_logical_block_size(bdev));
/* Just make sure we aren't corrupting data or
* metadata
*/
@@ -1542,7 +1546,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
wait_event(bitmap->mddev->recovery_wait,
atomic_read(&bitmap->mddev->recovery_active) == 0);
- bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync;
+ bitmap->mddev->curr_resync_completed = sector;
set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
s = 0;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d5b0e4c0e702..4e054bd91664 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -18,10 +18,14 @@
#include <linux/crypto.h>
#include <linux/workqueue.h>
#include <linux/backing-dev.h>
+#include <linux/percpu.h>
#include <asm/atomic.h>
#include <linux/scatterlist.h>
#include <asm/page.h>
#include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/algapi.h>
#include <linux/device-mapper.h>
@@ -63,6 +67,7 @@ struct dm_crypt_request {
struct convert_context *ctx;
struct scatterlist sg_in;
struct scatterlist sg_out;
+ sector_t iv_sector;
};
struct crypt_config;
@@ -73,11 +78,13 @@ struct crypt_iv_operations {
void (*dtr)(struct crypt_config *cc);
int (*init)(struct crypt_config *cc);
int (*wipe)(struct crypt_config *cc);
- int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
+ int (*generator)(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq);
+ int (*post)(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq);
};
struct iv_essiv_private {
- struct crypto_cipher *tfm;
struct crypto_hash *hash_tfm;
u8 *salt;
};
@@ -86,11 +93,32 @@ struct iv_benbi_private {
int shift;
};
+#define LMK_SEED_SIZE 64 /* hash + 0 */
+struct iv_lmk_private {
+ struct crypto_shash *hash_tfm;
+ u8 *seed;
+};
+
/*
* Crypt: maps a linear range of a block device
* and encrypts / decrypts at the same time.
*/
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
+
+/*
+ * Duplicated per-CPU state for cipher.
+ */
+struct crypt_cpu {
+ struct ablkcipher_request *req;
+ /* ESSIV: struct crypto_cipher *essiv_tfm */
+ void *iv_private;
+ struct crypto_ablkcipher *tfms[0];
+};
+
+/*
+ * The fields in here must be read only after initialization,
+ * changing state should be in crypt_cpu.
+ */
struct crypt_config {
struct dm_dev *dev;
sector_t start;
@@ -108,17 +136,25 @@ struct crypt_config {
struct workqueue_struct *crypt_queue;
char *cipher;
- char *cipher_mode;
+ char *cipher_string;
struct crypt_iv_operations *iv_gen_ops;
union {
struct iv_essiv_private essiv;
struct iv_benbi_private benbi;
+ struct iv_lmk_private lmk;
} iv_gen_private;
sector_t iv_offset;
unsigned int iv_size;
/*
+ * Duplicated per cpu state. Access through
+ * per_cpu_ptr() only.
+ */
+ struct crypt_cpu __percpu *cpu;
+ unsigned tfms_count;
+
+ /*
* Layout of each crypto request:
*
* struct ablkcipher_request
@@ -132,11 +168,10 @@ struct crypt_config {
* correctly aligned.
*/
unsigned int dmreq_start;
- struct ablkcipher_request *req;
- struct crypto_ablkcipher *tfm;
unsigned long flags;
unsigned int key_size;
+ unsigned int key_parts;
u8 key[0];
};
@@ -148,6 +183,20 @@ static struct kmem_cache *_crypt_io_pool;
static void clone_init(struct dm_crypt_io *, struct bio *);
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
+static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
+
+static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
+{
+ return this_cpu_ptr(cc->cpu);
+}
+
+/*
+ * Use this to access cipher attributes that are the same for each CPU.
+ */
+static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
+{
+ return __this_cpu_ptr(cc->cpu)->tfms[0];
+}
/*
* Different IV generation algorithms:
@@ -168,23 +217,38 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io);
* null: the initial vector is always zero. Provides compatibility with
* obsolete loop_fish2 devices. Do not use for new devices.
*
+ * lmk: Compatible implementation of the block chaining mode used
+ * by the Loop-AES block device encryption system
+ * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
+ * It operates on full 512 byte sectors and uses CBC
+ * with an IV derived from the sector number, the data and
+ * optionally extra IV seed.
+ * This means that after decryption the first block
+ * of sector must be tweaked according to decrypted data.
+ * Loop-AES can use three encryption schemes:
+ * version 1: is plain aes-cbc mode
+ * version 2: uses 64 multikey scheme with lmk IV generator
+ * version 3: the same as version 2 with additional IV seed
+ * (it uses 65 keys, last key is used as IV seed)
+ *
* plumb: unimplemented, see:
* http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
*/
-static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
{
memset(iv, 0, cc->iv_size);
- *(u32 *)iv = cpu_to_le32(sector & 0xffffffff);
+ *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
return 0;
}
static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
- sector_t sector)
+ struct dm_crypt_request *dmreq)
{
memset(iv, 0, cc->iv_size);
- *(u64 *)iv = cpu_to_le64(sector);
+ *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
return 0;
}
@@ -195,7 +259,8 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
struct hash_desc desc;
struct scatterlist sg;
- int err;
+ struct crypto_cipher *essiv_tfm;
+ int err, cpu;
sg_init_one(&sg, cc->key, cc->key_size);
desc.tfm = essiv->hash_tfm;
@@ -205,8 +270,16 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
if (err)
return err;
- return crypto_cipher_setkey(essiv->tfm, essiv->salt,
+ for_each_possible_cpu(cpu) {
+ essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private,
+
+ err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
crypto_hash_digestsize(essiv->hash_tfm));
+ if (err)
+ return err;
+ }
+
+ return 0;
}
/* Wipe salt and reset key derived from volume key */
@@ -214,24 +287,76 @@ static int crypt_iv_essiv_wipe(struct crypt_config *cc)
{
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
+ struct crypto_cipher *essiv_tfm;
+ int cpu, r, err = 0;
memset(essiv->salt, 0, salt_size);
- return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size);
+ for_each_possible_cpu(cpu) {
+ essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private;
+ r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
+ if (r)
+ err = r;
+ }
+
+ return err;
+}
+
+/* Set up per cpu cipher state */
+static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
+ struct dm_target *ti,
+ u8 *salt, unsigned saltsize)
+{
+ struct crypto_cipher *essiv_tfm;
+ int err;
+
+ /* Setup the essiv_tfm with the given salt */
+ essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(essiv_tfm)) {
+ ti->error = "Error allocating crypto tfm for ESSIV";
+ return essiv_tfm;
+ }
+
+ if (crypto_cipher_blocksize(essiv_tfm) !=
+ crypto_ablkcipher_ivsize(any_tfm(cc))) {
+ ti->error = "Block size of ESSIV cipher does "
+ "not match IV size of block cipher";
+ crypto_free_cipher(essiv_tfm);
+ return ERR_PTR(-EINVAL);
+ }
+
+ err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
+ if (err) {
+ ti->error = "Failed to set key for ESSIV cipher";
+ crypto_free_cipher(essiv_tfm);
+ return ERR_PTR(err);
+ }
+
+ return essiv_tfm;
}
static void crypt_iv_essiv_dtr(struct crypt_config *cc)
{
+ int cpu;
+ struct crypt_cpu *cpu_cc;
+ struct crypto_cipher *essiv_tfm;
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
- crypto_free_cipher(essiv->tfm);
- essiv->tfm = NULL;
-
crypto_free_hash(essiv->hash_tfm);
essiv->hash_tfm = NULL;
kzfree(essiv->salt);
essiv->salt = NULL;
+
+ for_each_possible_cpu(cpu) {
+ cpu_cc = per_cpu_ptr(cc->cpu, cpu);
+ essiv_tfm = cpu_cc->iv_private;
+
+ if (essiv_tfm)
+ crypto_free_cipher(essiv_tfm);
+
+ cpu_cc->iv_private = NULL;
+ }
}
static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
@@ -240,7 +365,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
struct crypto_cipher *essiv_tfm = NULL;
struct crypto_hash *hash_tfm = NULL;
u8 *salt = NULL;
- int err;
+ int err, cpu;
if (!opts) {
ti->error = "Digest algorithm missing for ESSIV mode";
@@ -262,48 +387,44 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
goto bad;
}
- /* Allocate essiv_tfm */
- essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(essiv_tfm)) {
- ti->error = "Error allocating crypto tfm for ESSIV";
- err = PTR_ERR(essiv_tfm);
- goto bad;
- }
- if (crypto_cipher_blocksize(essiv_tfm) !=
- crypto_ablkcipher_ivsize(cc->tfm)) {
- ti->error = "Block size of ESSIV cipher does "
- "not match IV size of block cipher";
- err = -EINVAL;
- goto bad;
- }
-
cc->iv_gen_private.essiv.salt = salt;
- cc->iv_gen_private.essiv.tfm = essiv_tfm;
cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
+ for_each_possible_cpu(cpu) {
+ essiv_tfm = setup_essiv_cpu(cc, ti, salt,
+ crypto_hash_digestsize(hash_tfm));
+ if (IS_ERR(essiv_tfm)) {
+ crypt_iv_essiv_dtr(cc);
+ return PTR_ERR(essiv_tfm);
+ }
+ per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm;
+ }
+
return 0;
bad:
- if (essiv_tfm && !IS_ERR(essiv_tfm))
- crypto_free_cipher(essiv_tfm);
if (hash_tfm && !IS_ERR(hash_tfm))
crypto_free_hash(hash_tfm);
kfree(salt);
return err;
}
-static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
{
+ struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
+
memset(iv, 0, cc->iv_size);
- *(u64 *)iv = cpu_to_le64(sector);
- crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv);
+ *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
+ crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
+
return 0;
}
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
- unsigned bs = crypto_ablkcipher_blocksize(cc->tfm);
+ unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
int log = ilog2(bs);
/* we need to calculate how far we must shift the sector count
@@ -328,25 +449,177 @@ static void crypt_iv_benbi_dtr(struct crypt_config *cc)
{
}
-static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
{
__be64 val;
memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
- val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1);
+ val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
return 0;
}
-static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
{
memset(iv, 0, cc->iv_size);
return 0;
}
+static void crypt_iv_lmk_dtr(struct crypt_config *cc)
+{
+ struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+
+ if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
+ crypto_free_shash(lmk->hash_tfm);
+ lmk->hash_tfm = NULL;
+
+ kzfree(lmk->seed);
+ lmk->seed = NULL;
+}
+
+static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
+ const char *opts)
+{
+ struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+
+ lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
+ if (IS_ERR(lmk->hash_tfm)) {
+ ti->error = "Error initializing LMK hash";
+ return PTR_ERR(lmk->hash_tfm);
+ }
+
+ /* No seed in LMK version 2 */
+ if (cc->key_parts == cc->tfms_count) {
+ lmk->seed = NULL;
+ return 0;
+ }
+
+ lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
+ if (!lmk->seed) {
+ crypt_iv_lmk_dtr(cc);
+ ti->error = "Error kmallocing seed storage in LMK";
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int crypt_iv_lmk_init(struct crypt_config *cc)
+{
+ struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+ int subkey_size = cc->key_size / cc->key_parts;
+
+ /* LMK seed is on the position of LMK_KEYS + 1 key */
+ if (lmk->seed)
+ memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
+ crypto_shash_digestsize(lmk->hash_tfm));
+
+ return 0;
+}
+
+static int crypt_iv_lmk_wipe(struct crypt_config *cc)
+{
+ struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+
+ if (lmk->seed)
+ memset(lmk->seed, 0, LMK_SEED_SIZE);
+
+ return 0;
+}
+
+static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq,
+ u8 *data)
+{
+ struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+ struct {
+ struct shash_desc desc;
+ char ctx[crypto_shash_descsize(lmk->hash_tfm)];
+ } sdesc;
+ struct md5_state md5state;
+ u32 buf[4];
+ int i, r;
+
+ sdesc.desc.tfm = lmk->hash_tfm;
+ sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ r = crypto_shash_init(&sdesc.desc);
+ if (r)
+ return r;
+
+ if (lmk->seed) {
+ r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE);
+ if (r)
+ return r;
+ }
+
+ /* Sector is always 512B, block size 16, add data of blocks 1-31 */
+ r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31);
+ if (r)
+ return r;
+
+ /* Sector is cropped to 56 bits here */
+ buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
+ buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
+ buf[2] = cpu_to_le32(4024);
+ buf[3] = 0;
+ r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf));
+ if (r)
+ return r;
+
+ /* No MD5 padding here */
+ r = crypto_shash_export(&sdesc.desc, &md5state);
+ if (r)
+ return r;
+
+ for (i = 0; i < MD5_HASH_WORDS; i++)
+ __cpu_to_le32s(&md5state.hash[i]);
+ memcpy(iv, &md5state.hash, cc->iv_size);
+
+ return 0;
+}
+
+static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
+{
+ u8 *src;
+ int r = 0;
+
+ if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
+ src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0);
+ r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
+ kunmap_atomic(src, KM_USER0);
+ } else
+ memset(iv, 0, cc->iv_size);
+
+ return r;
+}
+
+static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
+{
+ u8 *dst;
+ int r;
+
+ if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
+ return 0;
+
+ dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0);
+ r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
+
+ /* Tweak the first block of plaintext sector */
+ if (!r)
+ crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
+
+ kunmap_atomic(dst, KM_USER0);
+ return r;
+}
+
static struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
@@ -373,6 +646,15 @@ static struct crypt_iv_operations crypt_iv_null_ops = {
.generator = crypt_iv_null_gen
};
+static struct crypt_iv_operations crypt_iv_lmk_ops = {
+ .ctr = crypt_iv_lmk_ctr,
+ .dtr = crypt_iv_lmk_dtr,
+ .init = crypt_iv_lmk_init,
+ .wipe = crypt_iv_lmk_wipe,
+ .generator = crypt_iv_lmk_gen,
+ .post = crypt_iv_lmk_post
+};
+
static void crypt_convert_init(struct crypt_config *cc,
struct convert_context *ctx,
struct bio *bio_out, struct bio *bio_in,
@@ -400,6 +682,13 @@ static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
}
+static u8 *iv_of_dmreq(struct crypt_config *cc,
+ struct dm_crypt_request *dmreq)
+{
+ return (u8 *)ALIGN((unsigned long)(dmreq + 1),
+ crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
+}
+
static int crypt_convert_block(struct crypt_config *cc,
struct convert_context *ctx,
struct ablkcipher_request *req)
@@ -411,9 +700,9 @@ static int crypt_convert_block(struct crypt_config *cc,
int r = 0;
dmreq = dmreq_of_req(cc, req);
- iv = (u8 *)ALIGN((unsigned long)(dmreq + 1),
- crypto_ablkcipher_alignmask(cc->tfm) + 1);
+ iv = iv_of_dmreq(cc, dmreq);
+ dmreq->iv_sector = ctx->sector;
dmreq->ctx = ctx;
sg_init_table(&dmreq->sg_in, 1);
sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
@@ -436,7 +725,7 @@ static int crypt_convert_block(struct crypt_config *cc,
}
if (cc->iv_gen_ops) {
- r = cc->iv_gen_ops->generator(cc, iv, ctx->sector);
+ r = cc->iv_gen_ops->generator(cc, iv, dmreq);
if (r < 0)
return r;
}
@@ -449,21 +738,28 @@ static int crypt_convert_block(struct crypt_config *cc,
else
r = crypto_ablkcipher_decrypt(req);
+ if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
+ r = cc->iv_gen_ops->post(cc, iv, dmreq);
+
return r;
}
static void kcryptd_async_done(struct crypto_async_request *async_req,
int error);
+
static void crypt_alloc_req(struct crypt_config *cc,
struct convert_context *ctx)
{
- if (!cc->req)
- cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
- ablkcipher_request_set_tfm(cc->req, cc->tfm);
- ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP,
- kcryptd_async_done,
- dmreq_of_req(cc, cc->req));
+ struct crypt_cpu *this_cc = this_crypt_config(cc);
+ unsigned key_index = ctx->sector & (cc->tfms_count - 1);
+
+ if (!this_cc->req)
+ this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
+
+ ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]);
+ ablkcipher_request_set_callback(this_cc->req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
}
/*
@@ -472,6 +768,7 @@ static void crypt_alloc_req(struct crypt_config *cc,
static int crypt_convert(struct crypt_config *cc,
struct convert_context *ctx)
{
+ struct crypt_cpu *this_cc = this_crypt_config(cc);
int r;
atomic_set(&ctx->pending, 1);
@@ -483,7 +780,7 @@ static int crypt_convert(struct crypt_config *cc,
atomic_inc(&ctx->pending);
- r = crypt_convert_block(cc, ctx, cc->req);
+ r = crypt_convert_block(cc, ctx, this_cc->req);
switch (r) {
/* async */
@@ -492,7 +789,7 @@ static int crypt_convert(struct crypt_config *cc,
INIT_COMPLETION(ctx->restart);
/* fall through*/
case -EINPROGRESS:
- cc->req = NULL;
+ this_cc->req = NULL;
ctx->sector++;
continue;
@@ -651,6 +948,9 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
* They must be separated as otherwise the final stages could be
* starved by new requests which can block in the first stages due
* to memory allocation.
+ *
+ * The work is done per CPU global for all dm-crypt instances.
+ * They should not depend on each other and do not block.
*/
static void crypt_endio(struct bio *clone, int error)
{
@@ -691,26 +991,30 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_destructor = dm_crypt_bio_destructor;
}
-static void kcryptd_io_read(struct dm_crypt_io *io)
+static void kcryptd_unplug(struct crypt_config *cc)
+{
+ blk_unplug(bdev_get_queue(cc->dev->bdev));
+}
+
+static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
{
struct crypt_config *cc = io->target->private;
struct bio *base_bio = io->base_bio;
struct bio *clone;
- crypt_inc_pending(io);
-
/*
* The block layer might modify the bvec array, so always
* copy the required bvecs because we need the original
* one in order to decrypt the whole bio data *afterwards*.
*/
- clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
- if (unlikely(!clone)) {
- io->error = -ENOMEM;
- crypt_dec_pending(io);
- return;
+ clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
+ if (!clone) {
+ kcryptd_unplug(cc);
+ return 1;
}
+ crypt_inc_pending(io);
+
clone_init(io, clone);
clone->bi_idx = 0;
clone->bi_vcnt = bio_segments(base_bio);
@@ -720,6 +1024,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
sizeof(struct bio_vec) * clone->bi_vcnt);
generic_make_request(clone);
+ return 0;
}
static void kcryptd_io_write(struct dm_crypt_io *io)
@@ -732,9 +1037,12 @@ static void kcryptd_io(struct work_struct *work)
{
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
- if (bio_data_dir(io->base_bio) == READ)
- kcryptd_io_read(io);
- else
+ if (bio_data_dir(io->base_bio) == READ) {
+ crypt_inc_pending(io);
+ if (kcryptd_io_read(io, GFP_NOIO))
+ io->error = -ENOMEM;
+ crypt_dec_pending(io);
+ } else
kcryptd_io_write(io);
}
@@ -901,6 +1209,9 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
return;
}
+ if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
+ error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
+
mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
if (!atomic_dec_and_test(&ctx->pending))
@@ -971,34 +1282,84 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
}
}
-static int crypt_set_key(struct crypt_config *cc, char *key)
+static void crypt_free_tfms(struct crypt_config *cc, int cpu)
{
- unsigned key_size = strlen(key) >> 1;
+ struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
+ unsigned i;
- if (cc->key_size && cc->key_size != key_size)
+ for (i = 0; i < cc->tfms_count; i++)
+ if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) {
+ crypto_free_ablkcipher(cpu_cc->tfms[i]);
+ cpu_cc->tfms[i] = NULL;
+ }
+}
+
+static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode)
+{
+ struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
+ unsigned i;
+ int err;
+
+ for (i = 0; i < cc->tfms_count; i++) {
+ cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
+ if (IS_ERR(cpu_cc->tfms[i])) {
+ err = PTR_ERR(cpu_cc->tfms[i]);
+ crypt_free_tfms(cc, cpu);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int crypt_setkey_allcpus(struct crypt_config *cc)
+{
+ unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
+ int cpu, err = 0, i, r;
+
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < cc->tfms_count; i++) {
+ r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i],
+ cc->key + (i * subkey_size), subkey_size);
+ if (r)
+ err = r;
+ }
+ }
+
+ return err;
+}
+
+static int crypt_set_key(struct crypt_config *cc, char *key)
+{
+ /* The key size may not be changed. */
+ if (cc->key_size != (strlen(key) >> 1))
return -EINVAL;
- cc->key_size = key_size; /* initial settings */
+ /* Hyphen (which gives a key_size of zero) means there is no key. */
+ if (!cc->key_size && strcmp(key, "-"))
+ return -EINVAL;
- if ((!key_size && strcmp(key, "-")) ||
- (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
+ if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
return -EINVAL;
set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
- return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size);
+ return crypt_setkey_allcpus(cc);
}
static int crypt_wipe_key(struct crypt_config *cc)
{
clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
memset(&cc->key, 0, cc->key_size * sizeof(u8));
- return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size);
+
+ return crypt_setkey_allcpus(cc);
}
static void crypt_dtr(struct dm_target *ti)
{
struct crypt_config *cc = ti->private;
+ struct crypt_cpu *cpu_cc;
+ int cpu;
ti->private = NULL;
@@ -1010,6 +1371,14 @@ static void crypt_dtr(struct dm_target *ti)
if (cc->crypt_queue)
destroy_workqueue(cc->crypt_queue);
+ if (cc->cpu)
+ for_each_possible_cpu(cpu) {
+ cpu_cc = per_cpu_ptr(cc->cpu, cpu);
+ if (cpu_cc->req)
+ mempool_free(cpu_cc->req, cc->req_pool);
+ crypt_free_tfms(cc, cpu);
+ }
+
if (cc->bs)
bioset_free(cc->bs);
@@ -1023,14 +1392,14 @@ static void crypt_dtr(struct dm_target *ti)
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
cc->iv_gen_ops->dtr(cc);
- if (cc->tfm && !IS_ERR(cc->tfm))
- crypto_free_ablkcipher(cc->tfm);
-
if (cc->dev)
dm_put_device(ti, cc->dev);
+ if (cc->cpu)
+ free_percpu(cc->cpu);
+
kzfree(cc->cipher);
- kzfree(cc->cipher_mode);
+ kzfree(cc->cipher_string);
/* Must zero key material before freeing */
kzfree(cc);
@@ -1040,9 +1409,9 @@ static int crypt_ctr_cipher(struct dm_target *ti,
char *cipher_in, char *key)
{
struct crypt_config *cc = ti->private;
- char *tmp, *cipher, *chainmode, *ivmode, *ivopts;
+ char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
char *cipher_api = NULL;
- int ret = -EINVAL;
+ int cpu, ret = -EINVAL;
/* Convert to crypto api definition? */
if (strchr(cipher_in, '(')) {
@@ -1050,23 +1419,31 @@ static int crypt_ctr_cipher(struct dm_target *ti,
return -EINVAL;
}
+ cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
+ if (!cc->cipher_string)
+ goto bad_mem;
+
/*
* Legacy dm-crypt cipher specification
- * cipher-mode-iv:ivopts
+ * cipher[:keycount]-mode-iv:ivopts
*/
tmp = cipher_in;
- cipher = strsep(&tmp, "-");
+ keycount = strsep(&tmp, "-");
+ cipher = strsep(&keycount, ":");
+
+ if (!keycount)
+ cc->tfms_count = 1;
+ else if (sscanf(keycount, "%u", &cc->tfms_count) != 1 ||
+ !is_power_of_2(cc->tfms_count)) {
+ ti->error = "Bad cipher key count specification";
+ return -EINVAL;
+ }
+ cc->key_parts = cc->tfms_count;
cc->cipher = kstrdup(cipher, GFP_KERNEL);
if (!cc->cipher)
goto bad_mem;
- if (tmp) {
- cc->cipher_mode = kstrdup(tmp, GFP_KERNEL);
- if (!cc->cipher_mode)
- goto bad_mem;
- }
-
chainmode = strsep(&tmp, "-");
ivopts = strsep(&tmp, "-");
ivmode = strsep(&ivopts, ":");
@@ -1074,10 +1451,19 @@ static int crypt_ctr_cipher(struct dm_target *ti,
if (tmp)
DMWARN("Ignoring unexpected additional cipher options");
- /* Compatibility mode for old dm-crypt mappings */
+ cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) +
+ cc->tfms_count * sizeof(*(cc->cpu->tfms)),
+ __alignof__(struct crypt_cpu));
+ if (!cc->cpu) {
+ ti->error = "Cannot allocate per cpu state";
+ goto bad_mem;
+ }
+
+ /*
+ * For compatibility with the original dm-crypt mapping format, if
+ * only the cipher name is supplied, use cbc-plain.
+ */
if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
- kfree(cc->cipher_mode);
- cc->cipher_mode = kstrdup("cbc-plain", GFP_KERNEL);
chainmode = "cbc";
ivmode = "plain";
}
@@ -1099,11 +1485,12 @@ static int crypt_ctr_cipher(struct dm_target *ti,
}
/* Allocate cipher */
- cc->tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0);
- if (IS_ERR(cc->tfm)) {
- ret = PTR_ERR(cc->tfm);
- ti->error = "Error allocating crypto tfm";
- goto bad;
+ for_each_possible_cpu(cpu) {
+ ret = crypt_alloc_tfms(cc, cpu, cipher_api);
+ if (ret < 0) {
+ ti->error = "Error allocating crypto tfm";
+ goto bad;
+ }
}
/* Initialize and set key */
@@ -1114,7 +1501,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
}
/* Initialize IV */
- cc->iv_size = crypto_ablkcipher_ivsize(cc->tfm);
+ cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
if (cc->iv_size)
/* at least a 64 bit sector number should fit in our buffer */
cc->iv_size = max(cc->iv_size,
@@ -1137,7 +1524,15 @@ static int crypt_ctr_cipher(struct dm_target *ti,
cc->iv_gen_ops = &crypt_iv_benbi_ops;
else if (strcmp(ivmode, "null") == 0)
cc->iv_gen_ops = &crypt_iv_null_ops;
- else {
+ else if (strcmp(ivmode, "lmk") == 0) {
+ cc->iv_gen_ops = &crypt_iv_lmk_ops;
+ /* Version 2 and 3 is recognised according
+ * to length of provided multi-key string.
+ * If present (version 3), last key is used as IV seed.
+ */
+ if (cc->key_size % cc->key_parts)
+ cc->key_parts++;
+ } else {
ret = -EINVAL;
ti->error = "Invalid IV mode";
goto bad;
@@ -1194,6 +1589,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Cannot allocate encryption context";
return -ENOMEM;
}
+ cc->key_size = key_size;
ti->private = cc;
ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
@@ -1208,9 +1604,9 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
cc->dmreq_start = sizeof(struct ablkcipher_request);
- cc->dmreq_start += crypto_ablkcipher_reqsize(cc->tfm);
+ cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
- cc->dmreq_start += crypto_ablkcipher_alignmask(cc->tfm) &
+ cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
~(crypto_tfm_ctx_alignment() - 1);
cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
@@ -1219,7 +1615,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Cannot allocate crypt request mempool";
goto bad;
}
- cc->req = NULL;
cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
if (!cc->page_pool) {
@@ -1252,13 +1647,20 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->start = tmpll;
ret = -ENOMEM;
- cc->io_queue = create_singlethread_workqueue("kcryptd_io");
+ cc->io_queue = alloc_workqueue("kcryptd_io",
+ WQ_NON_REENTRANT|
+ WQ_MEM_RECLAIM,
+ 1);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
- cc->crypt_queue = create_singlethread_workqueue("kcryptd");
+ cc->crypt_queue = alloc_workqueue("kcryptd",
+ WQ_NON_REENTRANT|
+ WQ_CPU_INTENSIVE|
+ WQ_MEM_RECLAIM,
+ 1);
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
goto bad;
@@ -1286,9 +1688,10 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
- if (bio_data_dir(io->base_bio) == READ)
- kcryptd_queue_io(io);
- else
+ if (bio_data_dir(io->base_bio) == READ) {
+ if (kcryptd_io_read(io, GFP_NOWAIT))
+ kcryptd_queue_io(io);
+ } else
kcryptd_queue_crypt(io);
return DM_MAPIO_SUBMITTED;
@@ -1306,10 +1709,7 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE:
- if (cc->cipher_mode)
- DMEMIT("%s-%s ", cc->cipher, cc->cipher_mode);
- else
- DMEMIT("%s ", cc->cipher);
+ DMEMIT("%s ", cc->cipher_string);
if (cc->key_size > 0) {
if ((maxlen - sz) < ((cc->key_size << 1) + 1))
@@ -1421,7 +1821,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 7, 0},
+ .version = {1, 10, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index baa11912cc94..f18375dcedd9 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -352,7 +352,7 @@ static int __init dm_delay_init(void)
{
int r = -ENOMEM;
- kdelayd_wq = create_workqueue("kdelayd");
+ kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
if (!kdelayd_wq) {
DMERR("Couldn't start kdelayd");
goto bad_queue;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 4b54618b4159..6d12775a1061 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -295,19 +295,55 @@ retry:
DMWARN("remove_all left %d open device(s)", dev_skipped);
}
+/*
+ * Set the uuid of a hash_cell that isn't already set.
+ */
+static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid)
+{
+ mutex_lock(&dm_hash_cells_mutex);
+ hc->uuid = new_uuid;
+ mutex_unlock(&dm_hash_cells_mutex);
+
+ list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid));
+}
+
+/*
+ * Changes the name of a hash_cell and returns the old name for
+ * the caller to free.
+ */
+static char *__change_cell_name(struct hash_cell *hc, char *new_name)
+{
+ char *old_name;
+
+ /*
+ * Rename and move the name cell.
+ */
+ list_del(&hc->name_list);
+ old_name = hc->name;
+
+ mutex_lock(&dm_hash_cells_mutex);
+ hc->name = new_name;
+ mutex_unlock(&dm_hash_cells_mutex);
+
+ list_add(&hc->name_list, _name_buckets + hash_str(new_name));
+
+ return old_name;
+}
+
static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
const char *new)
{
- char *new_name, *old_name;
+ char *new_data, *old_name = NULL;
struct hash_cell *hc;
struct dm_table *table;
struct mapped_device *md;
+ unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
/*
* duplicate new.
*/
- new_name = kstrdup(new, GFP_KERNEL);
- if (!new_name)
+ new_data = kstrdup(new, GFP_KERNEL);
+ if (!new_data)
return ERR_PTR(-ENOMEM);
down_write(&_hash_lock);
@@ -315,13 +351,19 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
/*
* Is new free ?
*/
- hc = __get_name_cell(new);
+ if (change_uuid)
+ hc = __get_uuid_cell(new);
+ else
+ hc = __get_name_cell(new);
+
if (hc) {
- DMWARN("asked to rename to an already-existing name %s -> %s",
+ DMWARN("Unable to change %s on mapped device %s to one that "
+ "already exists: %s",
+ change_uuid ? "uuid" : "name",
param->name, new);
dm_put(hc->md);
up_write(&_hash_lock);
- kfree(new_name);
+ kfree(new_data);
return ERR_PTR(-EBUSY);
}
@@ -330,22 +372,30 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
*/
hc = __get_name_cell(param->name);
if (!hc) {
- DMWARN("asked to rename a non-existent device %s -> %s",
- param->name, new);
+ DMWARN("Unable to rename non-existent device, %s to %s%s",
+ param->name, change_uuid ? "uuid " : "", new);
up_write(&_hash_lock);
- kfree(new_name);
+ kfree(new_data);
return ERR_PTR(-ENXIO);
}
/*
- * rename and move the name cell.
+ * Does this device already have a uuid?
*/
- list_del(&hc->name_list);
- old_name = hc->name;
- mutex_lock(&dm_hash_cells_mutex);
- hc->name = new_name;
- mutex_unlock(&dm_hash_cells_mutex);
- list_add(&hc->name_list, _name_buckets + hash_str(new_name));
+ if (change_uuid && hc->uuid) {
+ DMWARN("Unable to change uuid of mapped device %s to %s "
+ "because uuid is already set to %s",
+ param->name, new, hc->uuid);
+ dm_put(hc->md);
+ up_write(&_hash_lock);
+ kfree(new_data);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (change_uuid)
+ __set_cell_uuid(hc, new_data);
+ else
+ old_name = __change_cell_name(hc, new_data);
/*
* Wake up any dm event waiters.
@@ -729,7 +779,7 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
hc = __find_device_hash_cell(param);
if (!hc) {
- DMWARN("device doesn't appear to be in the dev hash table.");
+ DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
up_write(&_hash_lock);
return -ENXIO;
}
@@ -741,7 +791,7 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
*/
r = dm_lock_for_deletion(md);
if (r) {
- DMWARN("unable to remove open device %s", hc->name);
+ DMDEBUG_LIMIT("unable to remove open device %s", hc->name);
up_write(&_hash_lock);
dm_put(md);
return r;
@@ -774,21 +824,24 @@ static int invalid_str(char *str, void *end)
static int dev_rename(struct dm_ioctl *param, size_t param_size)
{
int r;
- char *new_name = (char *) param + param->data_start;
+ char *new_data = (char *) param + param->data_start;
struct mapped_device *md;
+ unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
- if (new_name < param->data ||
- invalid_str(new_name, (void *) param + param_size) ||
- strlen(new_name) > DM_NAME_LEN - 1) {
- DMWARN("Invalid new logical volume name supplied.");
+ if (new_data < param->data ||
+ invalid_str(new_data, (void *) param + param_size) ||
+ strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) {
+ DMWARN("Invalid new mapped device name or uuid string supplied.");
return -EINVAL;
}
- r = check_name(new_name);
- if (r)
- return r;
+ if (!change_uuid) {
+ r = check_name(new_data);
+ if (r)
+ return r;
+ }
- md = dm_hash_rename(param, new_name);
+ md = dm_hash_rename(param, new_data);
if (IS_ERR(md))
return PTR_ERR(md);
@@ -885,7 +938,7 @@ static int do_resume(struct dm_ioctl *param)
hc = __find_device_hash_cell(param);
if (!hc) {
- DMWARN("device doesn't appear to be in the dev hash table.");
+ DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
up_write(&_hash_lock);
return -ENXIO;
}
@@ -1212,7 +1265,7 @@ static int table_clear(struct dm_ioctl *param, size_t param_size)
hc = __find_device_hash_cell(param);
if (!hc) {
- DMWARN("device doesn't appear to be in the dev hash table.");
+ DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
up_write(&_hash_lock);
return -ENXIO;
}
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index d8587bac5682..924f5f0084c2 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -37,6 +37,13 @@ struct dm_kcopyd_client {
unsigned int nr_pages;
unsigned int nr_free_pages;
+ /*
+ * Block devices to unplug.
+ * Non-NULL pointer means that a block device has some pending requests
+ * and needs to be unplugged.
+ */
+ struct block_device *unplug[2];
+
struct dm_io_client *io_client;
wait_queue_head_t destroyq;
@@ -308,6 +315,31 @@ static int run_complete_job(struct kcopyd_job *job)
return 0;
}
+/*
+ * Unplug the block device at the specified index.
+ */
+static void unplug(struct dm_kcopyd_client *kc, int rw)
+{
+ if (kc->unplug[rw] != NULL) {
+ blk_unplug(bdev_get_queue(kc->unplug[rw]));
+ kc->unplug[rw] = NULL;
+ }
+}
+
+/*
+ * Prepare block device unplug. If there's another device
+ * to be unplugged at the same array index, we unplug that
+ * device first.
+ */
+static void prepare_unplug(struct dm_kcopyd_client *kc, int rw,
+ struct block_device *bdev)
+{
+ if (likely(kc->unplug[rw] == bdev))
+ return;
+ unplug(kc, rw);
+ kc->unplug[rw] = bdev;
+}
+
static void complete_io(unsigned long error, void *context)
{
struct kcopyd_job *job = (struct kcopyd_job *) context;
@@ -345,7 +377,7 @@ static int run_io_job(struct kcopyd_job *job)
{
int r;
struct dm_io_request io_req = {
- .bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG,
+ .bi_rw = job->rw,
.mem.type = DM_IO_PAGE_LIST,
.mem.ptr.pl = job->pages,
.mem.offset = job->offset,
@@ -354,10 +386,16 @@ static int run_io_job(struct kcopyd_job *job)
.client = job->kc->io_client,
};
- if (job->rw == READ)
+ if (job->rw == READ) {
r = dm_io(&io_req, 1, &job->source, NULL);
- else
+ prepare_unplug(job->kc, READ, job->source.bdev);
+ } else {
+ if (job->num_dests > 1)
+ io_req.bi_rw |= REQ_UNPLUG;
r = dm_io(&io_req, job->num_dests, job->dests, NULL);
+ if (!(io_req.bi_rw & REQ_UNPLUG))
+ prepare_unplug(job->kc, WRITE, job->dests[0].bdev);
+ }
return r;
}
@@ -435,10 +473,18 @@ static void do_work(struct work_struct *work)
* Pages jobs when successful will jump onto the io jobs
* list. io jobs call wake when they complete and it all
* starts again.
+ *
+ * Note that io_jobs add block devices to the unplug array,
+ * this array is cleared with "unplug" calls. It is thus
+ * forbidden to run complete_jobs after io_jobs and before
+ * unplug because the block device could be destroyed in
+ * job completion callback.
*/
process_jobs(&kc->complete_jobs, kc, run_complete_job);
process_jobs(&kc->pages_jobs, kc, run_pages_job);
process_jobs(&kc->io_jobs, kc, run_io_job);
+ unplug(kc, READ);
+ unplug(kc, WRITE);
}
/*
@@ -619,12 +665,15 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
INIT_LIST_HEAD(&kc->io_jobs);
INIT_LIST_HEAD(&kc->pages_jobs);
+ memset(kc->unplug, 0, sizeof(kc->unplug));
+
kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
if (!kc->job_pool)
goto bad_slab;
INIT_WORK(&kc->kcopyd_work, do_work);
- kc->kcopyd_wq = create_singlethread_workqueue("kcopyd");
+ kc->kcopyd_wq = alloc_workqueue("kcopyd",
+ WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
if (!kc->kcopyd_wq)
goto bad_workqueue;
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 1ed0094f064b..aa2e0c374ab3 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -12,12 +12,22 @@
#include "dm-log-userspace-transfer.h"
+#define DM_LOG_USERSPACE_VSN "1.1.0"
+
struct flush_entry {
int type;
region_t region;
struct list_head list;
};
+/*
+ * This limit on the number of mark and clear request is, to a degree,
+ * arbitrary. However, there is some basis for the choice in the limits
+ * imposed on the size of data payload by dm-log-userspace-transfer.c:
+ * dm_consult_userspace().
+ */
+#define MAX_FLUSH_GROUP_COUNT 32
+
struct log_c {
struct dm_target *ti;
uint32_t region_size;
@@ -37,8 +47,15 @@ struct log_c {
*/
uint64_t in_sync_hint;
+ /*
+ * Mark and clear requests are held until a flush is issued
+ * so that we can group, and thereby limit, the amount of
+ * network traffic between kernel and userspace. The 'flush_lock'
+ * is used to protect these lists.
+ */
spinlock_t flush_lock;
- struct list_head flush_list; /* only for clear and mark requests */
+ struct list_head mark_list;
+ struct list_head clear_list;
};
static mempool_t *flush_entry_pool;
@@ -169,7 +186,8 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
strncpy(lc->uuid, argv[0], DM_UUID_LEN);
spin_lock_init(&lc->flush_lock);
- INIT_LIST_HEAD(&lc->flush_list);
+ INIT_LIST_HEAD(&lc->mark_list);
+ INIT_LIST_HEAD(&lc->clear_list);
str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str);
if (str_size < 0) {
@@ -181,8 +199,11 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
ctr_str, str_size, NULL, NULL);
- if (r == -ESRCH) {
- DMERR("Userspace log server not found");
+ if (r < 0) {
+ if (r == -ESRCH)
+ DMERR("Userspace log server not found");
+ else
+ DMERR("Userspace log server failed to create log");
goto out;
}
@@ -214,10 +235,9 @@ out:
static void userspace_dtr(struct dm_dirty_log *log)
{
- int r;
struct log_c *lc = log->context;
- r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
+ (void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
NULL, 0,
NULL, NULL);
@@ -338,6 +358,71 @@ static int userspace_in_sync(struct dm_dirty_log *log, region_t region,
return (r) ? 0 : (int)in_sync;
}
+static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
+{
+ int r = 0;
+ struct flush_entry *fe;
+
+ list_for_each_entry(fe, flush_list, list) {
+ r = userspace_do_request(lc, lc->uuid, fe->type,
+ (char *)&fe->region,
+ sizeof(fe->region),
+ NULL, NULL);
+ if (r)
+ break;
+ }
+
+ return r;
+}
+
+static int flush_by_group(struct log_c *lc, struct list_head *flush_list)
+{
+ int r = 0;
+ int count;
+ uint32_t type = 0;
+ struct flush_entry *fe, *tmp_fe;
+ LIST_HEAD(tmp_list);
+ uint64_t group[MAX_FLUSH_GROUP_COUNT];
+
+ /*
+ * Group process the requests
+ */
+ while (!list_empty(flush_list)) {
+ count = 0;
+
+ list_for_each_entry_safe(fe, tmp_fe, flush_list, list) {
+ group[count] = fe->region;
+ count++;
+
+ list_del(&fe->list);
+ list_add(&fe->list, &tmp_list);
+
+ type = fe->type;
+ if (count >= MAX_FLUSH_GROUP_COUNT)
+ break;
+ }
+
+ r = userspace_do_request(lc, lc->uuid, type,
+ (char *)(group),
+ count * sizeof(uint64_t),
+ NULL, NULL);
+ if (r) {
+ /* Group send failed. Attempt one-by-one. */
+ list_splice_init(&tmp_list, flush_list);
+ r = flush_one_by_one(lc, flush_list);
+ break;
+ }
+ }
+
+ /*
+ * Must collect flush_entrys that were successfully processed
+ * as a group so that they will be free'd by the caller.
+ */
+ list_splice_init(&tmp_list, flush_list);
+
+ return r;
+}
+
/*
* userspace_flush
*
@@ -360,31 +445,25 @@ static int userspace_flush(struct dm_dirty_log *log)
int r = 0;
unsigned long flags;
struct log_c *lc = log->context;
- LIST_HEAD(flush_list);
+ LIST_HEAD(mark_list);
+ LIST_HEAD(clear_list);
struct flush_entry *fe, *tmp_fe;
spin_lock_irqsave(&lc->flush_lock, flags);
- list_splice_init(&lc->flush_list, &flush_list);
+ list_splice_init(&lc->mark_list, &mark_list);
+ list_splice_init(&lc->clear_list, &clear_list);
spin_unlock_irqrestore(&lc->flush_lock, flags);
- if (list_empty(&flush_list))
+ if (list_empty(&mark_list) && list_empty(&clear_list))
return 0;
- /*
- * FIXME: Count up requests, group request types,
- * allocate memory to stick all requests in and
- * send to server in one go. Failing the allocation,
- * do it one by one.
- */
+ r = flush_by_group(lc, &mark_list);
+ if (r)
+ goto fail;
- list_for_each_entry(fe, &flush_list, list) {
- r = userspace_do_request(lc, lc->uuid, fe->type,
- (char *)&fe->region,
- sizeof(fe->region),
- NULL, NULL);
- if (r)
- goto fail;
- }
+ r = flush_by_group(lc, &clear_list);
+ if (r)
+ goto fail;
r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
NULL, 0, NULL, NULL);
@@ -395,7 +474,11 @@ fail:
* Calling code will receive an error and will know that
* the log facility has failed.
*/
- list_for_each_entry_safe(fe, tmp_fe, &flush_list, list) {
+ list_for_each_entry_safe(fe, tmp_fe, &mark_list, list) {
+ list_del(&fe->list);
+ mempool_free(fe, flush_entry_pool);
+ }
+ list_for_each_entry_safe(fe, tmp_fe, &clear_list, list) {
list_del(&fe->list);
mempool_free(fe, flush_entry_pool);
}
@@ -425,7 +508,7 @@ static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
spin_lock_irqsave(&lc->flush_lock, flags);
fe->type = DM_ULOG_MARK_REGION;
fe->region = region;
- list_add(&fe->list, &lc->flush_list);
+ list_add(&fe->list, &lc->mark_list);
spin_unlock_irqrestore(&lc->flush_lock, flags);
return;
@@ -462,7 +545,7 @@ static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
spin_lock_irqsave(&lc->flush_lock, flags);
fe->type = DM_ULOG_CLEAR_REGION;
fe->region = region;
- list_add(&fe->list, &lc->flush_list);
+ list_add(&fe->list, &lc->clear_list);
spin_unlock_irqrestore(&lc->flush_lock, flags);
return;
@@ -684,7 +767,7 @@ static int __init userspace_dirty_log_init(void)
return r;
}
- DMINFO("version 1.0.0 loaded");
+ DMINFO("version " DM_LOG_USERSPACE_VSN " loaded");
return 0;
}
@@ -694,7 +777,7 @@ static void __exit userspace_dirty_log_exit(void)
dm_ulog_tfr_exit();
mempool_destroy(flush_entry_pool);
- DMINFO("version 1.0.0 unloaded");
+ DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded");
return;
}
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 075cbcf8a9f5..049eaf12aaab 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -198,6 +198,7 @@ resend:
memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
memcpy(tfr->uuid, uuid, DM_UUID_LEN);
+ tfr->version = DM_ULOG_REQUEST_VERSION;
tfr->luid = luid;
tfr->seq = dm_ulog_seq++;
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 33420e68d153..6951536ea29c 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -455,7 +455,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
r = PTR_ERR(lc->io_req.client);
DMWARN("couldn't allocate disk io client");
kfree(lc);
- return -ENOMEM;
+ return r;
}
lc->disk_header = vmalloc(buf_size);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 487ecda90ad4..b82d28819e2a 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -23,6 +23,8 @@
#define DM_MSG_PREFIX "multipath"
#define MESG_STR(x) x, sizeof(x)
+#define DM_PG_INIT_DELAY_MSECS 2000
+#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
/* Path properties */
struct pgpath {
@@ -33,8 +35,7 @@ struct pgpath {
unsigned fail_count; /* Cumulative failure count */
struct dm_path path;
- struct work_struct deactivate_path;
- struct work_struct activate_path;
+ struct delayed_work activate_path;
};
#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -65,11 +66,15 @@ struct multipath {
const char *hw_handler_name;
char *hw_handler_params;
+
unsigned nr_priority_groups;
struct list_head priority_groups;
+
+ wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
+
unsigned pg_init_required; /* pg_init needs calling? */
unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
- wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
+ unsigned pg_init_delay_retry; /* Delay pg_init retry? */
unsigned nr_valid_paths; /* Total number of usable paths */
struct pgpath *current_pgpath;
@@ -82,6 +87,7 @@ struct multipath {
unsigned saved_queue_if_no_path;/* Saved state during suspension */
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_count; /* Number of times pg_init called */
+ unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
struct work_struct process_queued_ios;
struct list_head queued_ios;
@@ -116,7 +122,6 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void process_queued_ios(struct work_struct *work);
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
-static void deactivate_path(struct work_struct *work);
/*-----------------------------------------------
@@ -129,8 +134,7 @@ static struct pgpath *alloc_pgpath(void)
if (pgpath) {
pgpath->is_active = 1;
- INIT_WORK(&pgpath->deactivate_path, deactivate_path);
- INIT_WORK(&pgpath->activate_path, activate_path);
+ INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
}
return pgpath;
@@ -141,14 +145,6 @@ static void free_pgpath(struct pgpath *pgpath)
kfree(pgpath);
}
-static void deactivate_path(struct work_struct *work)
-{
- struct pgpath *pgpath =
- container_of(work, struct pgpath, deactivate_path);
-
- blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
-}
-
static struct priority_group *alloc_priority_group(void)
{
struct priority_group *pg;
@@ -199,6 +195,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
INIT_LIST_HEAD(&m->queued_ios);
spin_lock_init(&m->lock);
m->queue_io = 1;
+ m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
INIT_WORK(&m->process_queued_ios, process_queued_ios);
INIT_WORK(&m->trigger_event, trigger_event);
init_waitqueue_head(&m->pg_init_wait);
@@ -238,14 +235,19 @@ static void free_multipath(struct multipath *m)
static void __pg_init_all_paths(struct multipath *m)
{
struct pgpath *pgpath;
+ unsigned long pg_init_delay = 0;
m->pg_init_count++;
m->pg_init_required = 0;
+ if (m->pg_init_delay_retry)
+ pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
+ m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
/* Skip failed paths */
if (!pgpath->is_active)
continue;
- if (queue_work(kmpath_handlerd, &pgpath->activate_path))
+ if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
+ pg_init_delay))
m->pg_init_in_progress++;
}
}
@@ -793,8 +795,9 @@ static int parse_features(struct arg_set *as, struct multipath *m)
const char *param_name;
static struct param _params[] = {
- {0, 3, "invalid number of feature args"},
+ {0, 5, "invalid number of feature args"},
{1, 50, "pg_init_retries must be between 1 and 50"},
+ {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
};
r = read_param(_params, shift(as), &argc, &ti->error);
@@ -821,6 +824,14 @@ static int parse_features(struct arg_set *as, struct multipath *m)
continue;
}
+ if (!strnicmp(param_name, MESG_STR("pg_init_delay_msecs")) &&
+ (argc >= 1)) {
+ r = read_param(_params + 2, shift(as),
+ &m->pg_init_delay_msecs, &ti->error);
+ argc--;
+ continue;
+ }
+
ti->error = "Unrecognised multipath feature request";
r = -EINVAL;
} while (argc && !r);
@@ -931,7 +942,7 @@ static void flush_multipath_work(struct multipath *m)
flush_workqueue(kmpath_handlerd);
multipath_wait_for_pg_init_completion(m);
flush_workqueue(kmultipathd);
- flush_scheduled_work();
+ flush_work_sync(&m->trigger_event);
}
static void multipath_dtr(struct dm_target *ti)
@@ -995,7 +1006,6 @@ static int fail_path(struct pgpath *pgpath)
pgpath->path.dev->name, m->nr_valid_paths);
schedule_work(&m->trigger_event);
- queue_work(kmultipathd, &pgpath->deactivate_path);
out:
spin_unlock_irqrestore(&m->lock, flags);
@@ -1034,7 +1044,7 @@ static int reinstate_path(struct pgpath *pgpath)
m->current_pgpath = NULL;
queue_work(kmultipathd, &m->process_queued_ios);
} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
- if (queue_work(kmpath_handlerd, &pgpath->activate_path))
+ if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
m->pg_init_in_progress++;
}
@@ -1169,6 +1179,7 @@ static void pg_init_done(void *data, int errors)
struct priority_group *pg = pgpath->pg;
struct multipath *m = pg->m;
unsigned long flags;
+ unsigned delay_retry = 0;
/* device or driver problems */
switch (errors) {
@@ -1193,8 +1204,9 @@ static void pg_init_done(void *data, int errors)
*/
bypass_pg(m, pg, 1);
break;
- /* TODO: For SCSI_DH_RETRY we should wait a couple seconds */
case SCSI_DH_RETRY:
+ /* Wait before retrying. */
+ delay_retry = 1;
case SCSI_DH_IMM_RETRY:
case SCSI_DH_RES_TEMP_UNAVAIL:
if (pg_init_limit_reached(m, pgpath))
@@ -1227,6 +1239,7 @@ static void pg_init_done(void *data, int errors)
if (!m->pg_init_required)
m->queue_io = 0;
+ m->pg_init_delay_retry = delay_retry;
queue_work(kmultipathd, &m->process_queued_ios);
/*
@@ -1241,7 +1254,7 @@ out:
static void activate_path(struct work_struct *work)
{
struct pgpath *pgpath =
- container_of(work, struct pgpath, activate_path);
+ container_of(work, struct pgpath, activate_path.work);
scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
pg_init_done, pgpath);
@@ -1382,11 +1395,14 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
else {
DMEMIT("%u ", m->queue_if_no_path +
- (m->pg_init_retries > 0) * 2);
+ (m->pg_init_retries > 0) * 2 +
+ (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2);
if (m->queue_if_no_path)
DMEMIT("queue_if_no_path ");
if (m->pg_init_retries)
DMEMIT("pg_init_retries %u ", m->pg_init_retries);
+ if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
+ DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
}
if (!m->hw_handler_name || type == STATUSTYPE_INFO)
@@ -1655,7 +1671,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 1, 1},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
@@ -1687,7 +1703,7 @@ static int __init dm_multipath_init(void)
return -EINVAL;
}
- kmultipathd = create_workqueue("kmpathd");
+ kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
if (!kmultipathd) {
DMERR("failed to create workqueue kmpathd");
dm_unregister_target(&multipath_target);
@@ -1701,7 +1717,8 @@ static int __init dm_multipath_init(void)
* old workqueue would also create a bottleneck in the
* path of the storage hardware device activation.
*/
- kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd");
+ kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
+ WQ_MEM_RECLAIM);
if (!kmpath_handlerd) {
DMERR("failed to create workqueue kmpath_handlerd");
destroy_workqueue(kmultipathd);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
new file mode 100644
index 000000000000..b9e1e15ef11c
--- /dev/null
+++ b/drivers/md/dm-raid.c
@@ -0,0 +1,697 @@
+/*
+ * Copyright (C) 2010-2011 Neil Brown
+ * Copyright (C) 2010-2011 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <linux/slab.h>
+
+#include "md.h"
+#include "raid5.h"
+#include "dm.h"
+#include "bitmap.h"
+
+#define DM_MSG_PREFIX "raid"
+
+/*
+ * If the MD doesn't support MD_SYNC_STATE_FORCED yet, then
+ * make it so the flag doesn't set anything.
+ */
+#ifndef MD_SYNC_STATE_FORCED
+#define MD_SYNC_STATE_FORCED 0
+#endif
+
+struct raid_dev {
+ /*
+ * Two DM devices, one to hold metadata and one to hold the
+ * actual data/parity. The reason for this is to not confuse
+ * ti->len and give more flexibility in altering size and
+ * characteristics.
+ *
+ * While it is possible for this device to be associated
+ * with a different physical device than the data_dev, it
+ * is intended for it to be the same.
+ * |--------- Physical Device ---------|
+ * |- meta_dev -|------ data_dev ------|
+ */
+ struct dm_dev *meta_dev;
+ struct dm_dev *data_dev;
+ struct mdk_rdev_s rdev;
+};
+
+/*
+ * Flags for rs->print_flags field.
+ */
+#define DMPF_DAEMON_SLEEP 0x1
+#define DMPF_MAX_WRITE_BEHIND 0x2
+#define DMPF_SYNC 0x4
+#define DMPF_NOSYNC 0x8
+#define DMPF_STRIPE_CACHE 0x10
+#define DMPF_MIN_RECOVERY_RATE 0x20
+#define DMPF_MAX_RECOVERY_RATE 0x40
+
+struct raid_set {
+ struct dm_target *ti;
+
+ uint64_t print_flags;
+
+ struct mddev_s md;
+ struct raid_type *raid_type;
+ struct dm_target_callbacks callbacks;
+
+ struct raid_dev dev[0];
+};
+
+/* Supported raid types and properties. */
+static struct raid_type {
+ const char *name; /* RAID algorithm. */
+ const char *descr; /* Descriptor text for logging. */
+ const unsigned parity_devs; /* # of parity devices. */
+ const unsigned minimal_devs; /* minimal # of devices in set. */
+ const unsigned level; /* RAID level. */
+ const unsigned algorithm; /* RAID algorithm. */
+} raid_types[] = {
+ {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
+ {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
+ {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
+ {"raid5_ls", "RAID5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
+ {"raid5_rs", "RAID5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
+ {"raid6_zr", "RAID6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
+ {"raid6_nr", "RAID6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
+ {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
+};
+
+static struct raid_type *get_raid_type(char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(raid_types); i++)
+ if (!strcmp(raid_types[i].name, name))
+ return &raid_types[i];
+
+ return NULL;
+}
+
+static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs)
+{
+ unsigned i;
+ struct raid_set *rs;
+ sector_t sectors_per_dev;
+
+ if (raid_devs <= raid_type->parity_devs) {
+ ti->error = "Insufficient number of devices";
+ return ERR_PTR(-EINVAL);
+ }
+
+ sectors_per_dev = ti->len;
+ if (sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
+ ti->error = "Target length not divisible by number of data devices";
+ return ERR_PTR(-EINVAL);
+ }
+
+ rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
+ if (!rs) {
+ ti->error = "Cannot allocate raid context";
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mddev_init(&rs->md);
+
+ rs->ti = ti;
+ rs->raid_type = raid_type;
+ rs->md.raid_disks = raid_devs;
+ rs->md.level = raid_type->level;
+ rs->md.new_level = rs->md.level;
+ rs->md.dev_sectors = sectors_per_dev;
+ rs->md.layout = raid_type->algorithm;
+ rs->md.new_layout = rs->md.layout;
+ rs->md.delta_disks = 0;
+ rs->md.recovery_cp = 0;
+
+ for (i = 0; i < raid_devs; i++)
+ md_rdev_init(&rs->dev[i].rdev);
+
+ /*
+ * Remaining items to be initialized by further RAID params:
+ * rs->md.persistent
+ * rs->md.external
+ * rs->md.chunk_sectors
+ * rs->md.new_chunk_sectors
+ */
+
+ return rs;
+}
+
+static void context_free(struct raid_set *rs)
+{
+ int i;
+
+ for (i = 0; i < rs->md.raid_disks; i++)
+ if (rs->dev[i].data_dev)
+ dm_put_device(rs->ti, rs->dev[i].data_dev);
+
+ kfree(rs);
+}
+
+/*
+ * For every device we have two words
+ * <meta_dev>: meta device name or '-' if missing
+ * <data_dev>: data device name or '-' if missing
+ *
+ * This code parses those words.
+ */
+static int dev_parms(struct raid_set *rs, char **argv)
+{
+ int i;
+ int rebuild = 0;
+ int metadata_available = 0;
+ int ret = 0;
+
+ for (i = 0; i < rs->md.raid_disks; i++, argv += 2) {
+ rs->dev[i].rdev.raid_disk = i;
+
+ rs->dev[i].meta_dev = NULL;
+ rs->dev[i].data_dev = NULL;
+
+ /*
+ * There are no offsets, since there is a separate device
+ * for data and metadata.
+ */
+ rs->dev[i].rdev.data_offset = 0;
+ rs->dev[i].rdev.mddev = &rs->md;
+
+ if (strcmp(argv[0], "-")) {
+ rs->ti->error = "Metadata devices not supported";
+ return -EINVAL;
+ }
+
+ if (!strcmp(argv[1], "-")) {
+ if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
+ (!rs->dev[i].rdev.recovery_offset)) {
+ rs->ti->error = "Drive designated for rebuild not specified";
+ return -EINVAL;
+ }
+
+ continue;
+ }
+
+ ret = dm_get_device(rs->ti, argv[1],
+ dm_table_get_mode(rs->ti->table),
+ &rs->dev[i].data_dev);
+ if (ret) {
+ rs->ti->error = "RAID device lookup failure";
+ return ret;
+ }
+
+ rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
+ list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
+ if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
+ rebuild++;
+ }
+
+ if (metadata_available) {
+ rs->md.external = 0;
+ rs->md.persistent = 1;
+ rs->md.major_version = 2;
+ } else if (rebuild && !rs->md.recovery_cp) {
+ /*
+ * Without metadata, we will not be able to tell if the array
+ * is in-sync or not - we must assume it is not. Therefore,
+ * it is impossible to rebuild a drive.
+ *
+ * Even if there is metadata, the on-disk information may
+ * indicate that the array is not in-sync and it will then
+ * fail at that time.
+ *
+ * User could specify 'nosync' option if desperate.
+ */
+ DMERR("Unable to rebuild drive while array is not in-sync");
+ rs->ti->error = "RAID device lookup failure";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Possible arguments are...
+ * RAID456:
+ * <chunk_size> [optional_args]
+ *
+ * Optional args:
+ * [[no]sync] Force or prevent recovery of the entire array
+ * [rebuild <idx>] Rebuild the drive indicated by the index
+ * [daemon_sleep <ms>] Time between bitmap daemon work to clear bits
+ * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
+ * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
+ * [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
+ * [stripe_cache <sectors>] Stripe cache size for higher RAIDs
+ */
+static int parse_raid_params(struct raid_set *rs, char **argv,
+ unsigned num_raid_params)
+{
+ unsigned i, rebuild_cnt = 0;
+ unsigned long value;
+ char *key;
+
+ /*
+ * First, parse the in-order required arguments
+ */
+ if ((strict_strtoul(argv[0], 10, &value) < 0) ||
+ !is_power_of_2(value) || (value < 8)) {
+ rs->ti->error = "Bad chunk size";
+ return -EINVAL;
+ }
+
+ rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
+ argv++;
+ num_raid_params--;
+
+ /*
+ * Second, parse the unordered optional arguments
+ */
+ for (i = 0; i < rs->md.raid_disks; i++)
+ set_bit(In_sync, &rs->dev[i].rdev.flags);
+
+ for (i = 0; i < num_raid_params; i++) {
+ if (!strcmp(argv[i], "nosync")) {
+ rs->md.recovery_cp = MaxSector;
+ rs->print_flags |= DMPF_NOSYNC;
+ rs->md.flags |= MD_SYNC_STATE_FORCED;
+ continue;
+ }
+ if (!strcmp(argv[i], "sync")) {
+ rs->md.recovery_cp = 0;
+ rs->print_flags |= DMPF_SYNC;
+ rs->md.flags |= MD_SYNC_STATE_FORCED;
+ continue;
+ }
+
+ /* The rest of the optional arguments come in key/value pairs */
+ if ((i + 1) >= num_raid_params) {
+ rs->ti->error = "Wrong number of raid parameters given";
+ return -EINVAL;
+ }
+
+ key = argv[i++];
+ if (strict_strtoul(argv[i], 10, &value) < 0) {
+ rs->ti->error = "Bad numerical argument given in raid params";
+ return -EINVAL;
+ }
+
+ if (!strcmp(key, "rebuild")) {
+ if (++rebuild_cnt > rs->raid_type->parity_devs) {
+ rs->ti->error = "Too many rebuild drives given";
+ return -EINVAL;
+ }
+ if (value > rs->md.raid_disks) {
+ rs->ti->error = "Invalid rebuild index given";
+ return -EINVAL;
+ }
+ clear_bit(In_sync, &rs->dev[value].rdev.flags);
+ rs->dev[value].rdev.recovery_offset = 0;
+ } else if (!strcmp(key, "max_write_behind")) {
+ rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
+
+ /*
+ * In device-mapper, we specify things in sectors, but
+ * MD records this value in kB
+ */
+ value /= 2;
+ if (value > COUNTER_MAX) {
+ rs->ti->error = "Max write-behind limit out of range";
+ return -EINVAL;
+ }
+ rs->md.bitmap_info.max_write_behind = value;
+ } else if (!strcmp(key, "daemon_sleep")) {
+ rs->print_flags |= DMPF_DAEMON_SLEEP;
+ if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
+ rs->ti->error = "daemon sleep period out of range";
+ return -EINVAL;
+ }
+ rs->md.bitmap_info.daemon_sleep = value;
+ } else if (!strcmp(key, "stripe_cache")) {
+ rs->print_flags |= DMPF_STRIPE_CACHE;
+
+ /*
+ * In device-mapper, we specify things in sectors, but
+ * MD records this value in kB
+ */
+ value /= 2;
+
+ if (rs->raid_type->level < 5) {
+ rs->ti->error = "Inappropriate argument: stripe_cache";
+ return -EINVAL;
+ }
+ if (raid5_set_cache_size(&rs->md, (int)value)) {
+ rs->ti->error = "Bad stripe_cache size";
+ return -EINVAL;
+ }
+ } else if (!strcmp(key, "min_recovery_rate")) {
+ rs->print_flags |= DMPF_MIN_RECOVERY_RATE;
+ if (value > INT_MAX) {
+ rs->ti->error = "min_recovery_rate out of range";
+ return -EINVAL;
+ }
+ rs->md.sync_speed_min = (int)value;
+ } else if (!strcmp(key, "max_recovery_rate")) {
+ rs->print_flags |= DMPF_MAX_RECOVERY_RATE;
+ if (value > INT_MAX) {
+ rs->ti->error = "max_recovery_rate out of range";
+ return -EINVAL;
+ }
+ rs->md.sync_speed_max = (int)value;
+ } else {
+ DMERR("Unable to parse RAID parameter: %s", key);
+ rs->ti->error = "Unable to parse RAID parameters";
+ return -EINVAL;
+ }
+ }
+
+ /* Assume there are no metadata devices until the drives are parsed */
+ rs->md.persistent = 0;
+ rs->md.external = 1;
+
+ return 0;
+}
+
+static void do_table_event(struct work_struct *ws)
+{
+ struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
+
+ dm_table_event(rs->ti->table);
+}
+
+static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
+{
+ struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
+
+ return md_raid5_congested(&rs->md, bits);
+}
+
+static void raid_unplug(struct dm_target_callbacks *cb)
+{
+ struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
+
+ md_raid5_unplug_device(rs->md.private);
+}
+
+/*
+ * Construct a RAID4/5/6 mapping:
+ * Args:
+ * <raid_type> <#raid_params> <raid_params> \
+ * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
+ *
+ * ** metadata devices are not supported yet, use '-' instead **
+ *
+ * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
+ * details on possible <raid_params>.
+ */
+static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+ int ret;
+ struct raid_type *rt;
+ unsigned long num_raid_params, num_raid_devs;
+ struct raid_set *rs = NULL;
+
+ /* Must have at least <raid_type> <#raid_params> */
+ if (argc < 2) {
+ ti->error = "Too few arguments";
+ return -EINVAL;
+ }
+
+ /* raid type */
+ rt = get_raid_type(argv[0]);
+ if (!rt) {
+ ti->error = "Unrecognised raid_type";
+ return -EINVAL;
+ }
+ argc--;
+ argv++;
+
+ /* number of RAID parameters */
+ if (strict_strtoul(argv[0], 10, &num_raid_params) < 0) {
+ ti->error = "Cannot understand number of RAID parameters";
+ return -EINVAL;
+ }
+ argc--;
+ argv++;
+
+ /* Skip over RAID params for now and find out # of devices */
+ if (num_raid_params + 1 > argc) {
+ ti->error = "Arguments do not agree with counts given";
+ return -EINVAL;
+ }
+
+ if ((strict_strtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
+ (num_raid_devs >= INT_MAX)) {
+ ti->error = "Cannot understand number of raid devices";
+ return -EINVAL;
+ }
+
+ rs = context_alloc(ti, rt, (unsigned)num_raid_devs);
+ if (IS_ERR(rs))
+ return PTR_ERR(rs);
+
+ ret = parse_raid_params(rs, argv, (unsigned)num_raid_params);
+ if (ret)
+ goto bad;
+
+ ret = -EINVAL;
+
+ argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
+ argv += num_raid_params + 1;
+
+ if (argc != (num_raid_devs * 2)) {
+ ti->error = "Supplied RAID devices does not match the count given";
+ goto bad;
+ }
+
+ ret = dev_parms(rs, argv);
+ if (ret)
+ goto bad;
+
+ INIT_WORK(&rs->md.event_work, do_table_event);
+ ti->split_io = rs->md.chunk_sectors;
+ ti->private = rs;
+
+ mutex_lock(&rs->md.reconfig_mutex);
+ ret = md_run(&rs->md);
+ rs->md.in_sync = 0; /* Assume already marked dirty */
+ mutex_unlock(&rs->md.reconfig_mutex);
+
+ if (ret) {
+ ti->error = "Fail to run raid array";
+ goto bad;
+ }
+
+ rs->callbacks.congested_fn = raid_is_congested;
+ rs->callbacks.unplug_fn = raid_unplug;
+ dm_table_add_target_callbacks(ti->table, &rs->callbacks);
+
+ return 0;
+
+bad:
+ context_free(rs);
+
+ return ret;
+}
+
+static void raid_dtr(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+
+ list_del_init(&rs->callbacks.list);
+ md_stop(&rs->md);
+ context_free(rs);
+}
+
+static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context)
+{
+ struct raid_set *rs = ti->private;
+ mddev_t *mddev = &rs->md;
+
+ mddev->pers->make_request(mddev, bio);
+
+ return DM_MAPIO_SUBMITTED;
+}
+
+static int raid_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+{
+ struct raid_set *rs = ti->private;
+ unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
+ unsigned sz = 0;
+ int i;
+ sector_t sync;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
+
+ for (i = 0; i < rs->md.raid_disks; i++) {
+ if (test_bit(Faulty, &rs->dev[i].rdev.flags))
+ DMEMIT("D");
+ else if (test_bit(In_sync, &rs->dev[i].rdev.flags))
+ DMEMIT("A");
+ else
+ DMEMIT("a");
+ }
+
+ if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
+ sync = rs->md.curr_resync_completed;
+ else
+ sync = rs->md.recovery_cp;
+
+ if (sync > rs->md.resync_max_sectors)
+ sync = rs->md.resync_max_sectors;
+
+ DMEMIT(" %llu/%llu",
+ (unsigned long long) sync,
+ (unsigned long long) rs->md.resync_max_sectors);
+
+ break;
+ case STATUSTYPE_TABLE:
+ /* The string you would use to construct this array */
+ for (i = 0; i < rs->md.raid_disks; i++)
+ if (rs->dev[i].data_dev &&
+ !test_bit(In_sync, &rs->dev[i].rdev.flags))
+ raid_param_cnt++; /* for rebuilds */
+
+ raid_param_cnt += (hweight64(rs->print_flags) * 2);
+ if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
+ raid_param_cnt--;
+
+ DMEMIT("%s %u %u", rs->raid_type->name,
+ raid_param_cnt, rs->md.chunk_sectors);
+
+ if ((rs->print_flags & DMPF_SYNC) &&
+ (rs->md.recovery_cp == MaxSector))
+ DMEMIT(" sync");
+ if (rs->print_flags & DMPF_NOSYNC)
+ DMEMIT(" nosync");
+
+ for (i = 0; i < rs->md.raid_disks; i++)
+ if (rs->dev[i].data_dev &&
+ !test_bit(In_sync, &rs->dev[i].rdev.flags))
+ DMEMIT(" rebuild %u", i);
+
+ if (rs->print_flags & DMPF_DAEMON_SLEEP)
+ DMEMIT(" daemon_sleep %lu",
+ rs->md.bitmap_info.daemon_sleep);
+
+ if (rs->print_flags & DMPF_MIN_RECOVERY_RATE)
+ DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
+
+ if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
+ DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
+
+ if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
+ DMEMIT(" max_write_behind %lu",
+ rs->md.bitmap_info.max_write_behind);
+
+ if (rs->print_flags & DMPF_STRIPE_CACHE) {
+ raid5_conf_t *conf = rs->md.private;
+
+ /* convert from kiB to sectors */
+ DMEMIT(" stripe_cache %d",
+ conf ? conf->max_nr_stripes * 2 : 0);
+ }
+
+ DMEMIT(" %d", rs->md.raid_disks);
+ for (i = 0; i < rs->md.raid_disks; i++) {
+ DMEMIT(" -"); /* metadata device */
+
+ if (rs->dev[i].data_dev)
+ DMEMIT(" %s", rs->dev[i].data_dev->name);
+ else
+ DMEMIT(" -");
+ }
+ }
+
+ return 0;
+}
+
+static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
+{
+ struct raid_set *rs = ti->private;
+ unsigned i;
+ int ret = 0;
+
+ for (i = 0; !ret && i < rs->md.raid_disks; i++)
+ if (rs->dev[i].data_dev)
+ ret = fn(ti,
+ rs->dev[i].data_dev,
+ 0, /* No offset on data devs */
+ rs->md.dev_sectors,
+ data);
+
+ return ret;
+}
+
+static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct raid_set *rs = ti->private;
+ unsigned chunk_size = rs->md.chunk_sectors << 9;
+ raid5_conf_t *conf = rs->md.private;
+
+ blk_limits_io_min(limits, chunk_size);
+ blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
+}
+
+static void raid_presuspend(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+
+ md_stop_writes(&rs->md);
+}
+
+static void raid_postsuspend(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+
+ mddev_suspend(&rs->md);
+}
+
+static void raid_resume(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+
+ mddev_resume(&rs->md);
+}
+
+static struct target_type raid_target = {
+ .name = "raid",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = raid_ctr,
+ .dtr = raid_dtr,
+ .map = raid_map,
+ .status = raid_status,
+ .iterate_devices = raid_iterate_devices,
+ .io_hints = raid_io_hints,
+ .presuspend = raid_presuspend,
+ .postsuspend = raid_postsuspend,
+ .resume = raid_resume,
+};
+
+static int __init dm_raid_init(void)
+{
+ return dm_register_target(&raid_target);
+}
+
+static void __exit dm_raid_exit(void)
+{
+ dm_unregister_target(&raid_target);
+}
+
+module_init(dm_raid_init);
+module_exit(dm_raid_exit);
+
+MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
+MODULE_ALIAS("dm-raid4");
+MODULE_ALIAS("dm-raid5");
+MODULE_ALIAS("dm-raid6");
+MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 19a59b041c27..dee326775c60 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -261,7 +261,7 @@ static int mirror_flush(struct dm_target *ti)
struct dm_io_request io_req = {
.bi_rw = WRITE_FLUSH,
.mem.type = DM_IO_KMEM,
- .mem.ptr.bvec = NULL,
+ .mem.ptr.addr = NULL,
.client = ms->io_client,
};
@@ -637,6 +637,12 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
.client = ms->io_client,
};
+ if (bio->bi_rw & REQ_DISCARD) {
+ io_req.bi_rw |= REQ_DISCARD;
+ io_req.mem.type = DM_IO_KMEM;
+ io_req.mem.ptr.addr = NULL;
+ }
+
for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
map_region(dest++, m, bio);
@@ -670,7 +676,8 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
bio_list_init(&requeue);
while ((bio = bio_list_pop(writes))) {
- if (bio->bi_rw & REQ_FLUSH) {
+ if ((bio->bi_rw & REQ_FLUSH) ||
+ (bio->bi_rw & REQ_DISCARD)) {
bio_list_add(&sync, bio);
continue;
}
@@ -1076,8 +1083,10 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = ms;
ti->split_io = dm_rh_get_region_size(ms->rh);
ti->num_flush_requests = 1;
+ ti->num_discard_requests = 1;
- ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
+ ms->kmirrord_wq = alloc_workqueue("kmirrord",
+ WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
if (!ms->kmirrord_wq) {
DMERR("couldn't start kmirrord");
r = -ENOMEM;
@@ -1130,7 +1139,7 @@ static void mirror_dtr(struct dm_target *ti)
del_timer_sync(&ms->timer);
flush_workqueue(ms->kmirrord_wq);
- flush_scheduled_work();
+ flush_work_sync(&ms->trigger_event);
dm_kcopyd_client_destroy(ms->kcopyd_client);
destroy_workqueue(ms->kmirrord_wq);
free_context(ms, ti, ms->nr_mirrors);
@@ -1406,7 +1415,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
static struct target_type mirror_target = {
.name = "mirror",
- .version = {1, 12, 0},
+ .version = {1, 12, 1},
.module = THIS_MODULE,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 2129cdb115dc..95891dfcbca0 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -256,7 +256,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
*/
INIT_WORK_ONSTACK(&req.work, do_metadata);
queue_work(ps->metadata_wq, &req.work);
- flush_workqueue(ps->metadata_wq);
+ flush_work(&req.work);
return req.result;
}
@@ -818,7 +818,7 @@ static int persistent_ctr(struct dm_exception_store *store,
atomic_set(&ps->pending_count, 0);
ps->callbacks = NULL;
- ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
+ ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
if (!ps->metadata_wq) {
kfree(ps);
DMERR("couldn't start header metadata update thread");
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 53cf79d8bcbc..fdde53cd12b7 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -19,7 +19,6 @@
#include <linux/vmalloc.h>
#include <linux/log2.h>
#include <linux/dm-kcopyd.h>
-#include <linux/workqueue.h>
#include "dm-exception-store.h"
@@ -80,9 +79,6 @@ struct dm_snapshot {
/* Origin writes don't trigger exceptions until this is set */
int active;
- /* Whether or not owning mapped_device is suspended */
- int suspended;
-
atomic_t pending_exceptions_count;
mempool_t *pending_pool;
@@ -106,10 +102,6 @@ struct dm_snapshot {
struct dm_kcopyd_client *kcopyd_client;
- /* Queue of snapshot writes for ksnapd to flush */
- struct bio_list queued_bios;
- struct work_struct queued_bios_work;
-
/* Wait for events based on state_bits */
unsigned long state_bits;
@@ -160,9 +152,6 @@ struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
}
EXPORT_SYMBOL(dm_snap_cow);
-static struct workqueue_struct *ksnapd;
-static void flush_queued_bios(struct work_struct *work);
-
static sector_t chunk_to_sector(struct dm_exception_store *store,
chunk_t chunk)
{
@@ -1110,7 +1099,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->ti = ti;
s->valid = 1;
s->active = 0;
- s->suspended = 0;
atomic_set(&s->pending_exceptions_count, 0);
init_rwsem(&s->lock);
INIT_LIST_HEAD(&s->list);
@@ -1153,9 +1141,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
spin_lock_init(&s->tracked_chunk_lock);
- bio_list_init(&s->queued_bios);
- INIT_WORK(&s->queued_bios_work, flush_queued_bios);
-
ti->private = s;
ti->num_flush_requests = num_flush_requests;
@@ -1279,8 +1264,6 @@ static void snapshot_dtr(struct dm_target *ti)
struct dm_snapshot *s = ti->private;
struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
- flush_workqueue(ksnapd);
-
down_read(&_origins_lock);
/* Check whether exception handover must be cancelled */
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
@@ -1342,20 +1325,6 @@ static void flush_bios(struct bio *bio)
}
}
-static void flush_queued_bios(struct work_struct *work)
-{
- struct dm_snapshot *s =
- container_of(work, struct dm_snapshot, queued_bios_work);
- struct bio *queued_bios;
- unsigned long flags;
-
- spin_lock_irqsave(&s->pe_lock, flags);
- queued_bios = bio_list_get(&s->queued_bios);
- spin_unlock_irqrestore(&s->pe_lock, flags);
-
- flush_bios(queued_bios);
-}
-
static int do_origin(struct dm_dev *origin, struct bio *bio);
/*
@@ -1760,15 +1729,6 @@ static void snapshot_merge_presuspend(struct dm_target *ti)
stop_merge(s);
}
-static void snapshot_postsuspend(struct dm_target *ti)
-{
- struct dm_snapshot *s = ti->private;
-
- down_write(&s->lock);
- s->suspended = 1;
- up_write(&s->lock);
-}
-
static int snapshot_preresume(struct dm_target *ti)
{
int r = 0;
@@ -1783,7 +1743,7 @@ static int snapshot_preresume(struct dm_target *ti)
DMERR("Unable to resume snapshot source until "
"handover completes.");
r = -EINVAL;
- } else if (!snap_src->suspended) {
+ } else if (!dm_suspended(snap_src->ti)) {
DMERR("Unable to perform snapshot handover until "
"source is suspended.");
r = -EINVAL;
@@ -1816,7 +1776,6 @@ static void snapshot_resume(struct dm_target *ti)
down_write(&s->lock);
s->active = 1;
- s->suspended = 0;
up_write(&s->lock);
}
@@ -2194,7 +2153,7 @@ static int origin_iterate_devices(struct dm_target *ti,
static struct target_type origin_target = {
.name = "snapshot-origin",
- .version = {1, 7, 0},
+ .version = {1, 7, 1},
.module = THIS_MODULE,
.ctr = origin_ctr,
.dtr = origin_dtr,
@@ -2207,13 +2166,12 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 9, 0},
+ .version = {1, 10, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
.map = snapshot_map,
.end_io = snapshot_end_io,
- .postsuspend = snapshot_postsuspend,
.preresume = snapshot_preresume,
.resume = snapshot_resume,
.status = snapshot_status,
@@ -2222,14 +2180,13 @@ static struct target_type snapshot_target = {
static struct target_type merge_target = {
.name = dm_snapshot_merge_target_name,
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
.map = snapshot_merge_map,
.end_io = snapshot_end_io,
.presuspend = snapshot_merge_presuspend,
- .postsuspend = snapshot_postsuspend,
.preresume = snapshot_preresume,
.resume = snapshot_merge_resume,
.status = snapshot_status,
@@ -2291,17 +2248,8 @@ static int __init dm_snapshot_init(void)
goto bad_tracked_chunk_cache;
}
- ksnapd = create_singlethread_workqueue("ksnapd");
- if (!ksnapd) {
- DMERR("Failed to create ksnapd workqueue.");
- r = -ENOMEM;
- goto bad_pending_pool;
- }
-
return 0;
-bad_pending_pool:
- kmem_cache_destroy(tracked_chunk_cache);
bad_tracked_chunk_cache:
kmem_cache_destroy(pending_cache);
bad_pending_cache:
@@ -2322,8 +2270,6 @@ bad_register_snapshot_target:
static void __exit dm_snapshot_exit(void)
{
- destroy_workqueue(ksnapd);
-
dm_unregister_target(&snapshot_target);
dm_unregister_target(&origin_target);
dm_unregister_target(&merge_target);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index f0371b4c4fbf..dddfa14f2982 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -39,23 +39,20 @@ struct stripe_c {
struct dm_target *ti;
/* Work struct used for triggering events*/
- struct work_struct kstriped_ws;
+ struct work_struct trigger_event;
struct stripe stripe[0];
};
-static struct workqueue_struct *kstriped;
-
/*
* An event is triggered whenever a drive
* drops out of a stripe volume.
*/
static void trigger_event(struct work_struct *work)
{
- struct stripe_c *sc = container_of(work, struct stripe_c, kstriped_ws);
-
+ struct stripe_c *sc = container_of(work, struct stripe_c,
+ trigger_event);
dm_table_event(sc->ti->table);
-
}
static inline struct stripe_c *alloc_context(unsigned int stripes)
@@ -160,7 +157,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -ENOMEM;
}
- INIT_WORK(&sc->kstriped_ws, trigger_event);
+ INIT_WORK(&sc->trigger_event, trigger_event);
/* Set pointer to dm target; used in trigger_event */
sc->ti = ti;
@@ -211,7 +208,7 @@ static void stripe_dtr(struct dm_target *ti)
for (i = 0; i < sc->stripes; i++)
dm_put_device(ti, sc->stripe[i].dev);
- flush_workqueue(kstriped);
+ flush_work_sync(&sc->trigger_event);
kfree(sc);
}
@@ -367,7 +364,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
atomic_inc(&(sc->stripe[i].error_count));
if (atomic_read(&(sc->stripe[i].error_count)) <
DM_IO_ERROR_THRESHOLD)
- queue_work(kstriped, &sc->kstriped_ws);
+ schedule_work(&sc->trigger_event);
}
return error;
@@ -401,7 +398,7 @@ static void stripe_io_hints(struct dm_target *ti,
static struct target_type stripe_target = {
.name = "striped",
- .version = {1, 3, 0},
+ .version = {1, 3, 1},
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
@@ -422,20 +419,10 @@ int __init dm_stripe_init(void)
return r;
}
- kstriped = create_singlethread_workqueue("kstriped");
- if (!kstriped) {
- DMERR("failed to create workqueue kstriped");
- dm_unregister_target(&stripe_target);
- return -ENOMEM;
- }
-
return r;
}
void dm_stripe_exit(void)
{
dm_unregister_target(&stripe_target);
- destroy_workqueue(kstriped);
-
- return;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 4d705cea0f8c..38e4eb1bb965 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -71,6 +71,8 @@ struct dm_table {
void *event_context;
struct dm_md_mempools *mempools;
+
+ struct list_head target_callbacks;
};
/*
@@ -204,6 +206,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
return -ENOMEM;
INIT_LIST_HEAD(&t->devices);
+ INIT_LIST_HEAD(&t->target_callbacks);
atomic_set(&t->holders, 0);
t->discards_supported = 1;
@@ -325,15 +328,18 @@ static int open_dev(struct dm_dev_internal *d, dev_t dev,
BUG_ON(d->dm_dev.bdev);
- bdev = open_by_devnum(dev, d->dm_dev.mode);
+ bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
- r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
- if (r)
- blkdev_put(bdev, d->dm_dev.mode);
- else
- d->dm_dev.bdev = bdev;
- return r;
+
+ r = bd_link_disk_holder(bdev, dm_disk(md));
+ if (r) {
+ blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL);
+ return r;
+ }
+
+ d->dm_dev.bdev = bdev;
+ return 0;
}
/*
@@ -344,8 +350,8 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
if (!d->dm_dev.bdev)
return;
- bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
- blkdev_put(d->dm_dev.bdev, d->dm_dev.mode);
+ bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md));
+ blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
d->dm_dev.bdev = NULL;
}
@@ -1223,10 +1229,17 @@ int dm_table_resume_targets(struct dm_table *t)
return 0;
}
+void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
+{
+ list_add(&cb->list, &t->target_callbacks);
+}
+EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
+
int dm_table_any_congested(struct dm_table *t, int bdi_bits)
{
struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
+ struct dm_target_callbacks *cb;
int r = 0;
list_for_each_entry(dd, devices, list) {
@@ -1241,6 +1254,10 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
bdevname(dd->dm_dev.bdev, b));
}
+ list_for_each_entry(cb, &t->target_callbacks, list)
+ if (cb->congested_fn)
+ r |= cb->congested_fn(cb, bdi_bits);
+
return r;
}
@@ -1262,6 +1279,7 @@ void dm_table_unplug_all(struct dm_table *t)
{
struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
+ struct dm_target_callbacks *cb;
list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
@@ -1274,6 +1292,10 @@ void dm_table_unplug_all(struct dm_table *t)
dm_device_name(t->md),
bdevname(dd->dm_dev.bdev, b));
}
+
+ list_for_each_entry(cb, &t->target_callbacks, list)
+ if (cb->unplug_fn)
+ cb->unplug_fn(cb);
}
struct mapped_device *dm_table_get_md(struct dm_table *t)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 7cb1352f7e7a..eaa3af0e0632 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -32,7 +32,6 @@
#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
#define DM_COOKIE_LENGTH 24
-static DEFINE_MUTEX(dm_mutex);
static const char *_name = DM_NAME;
static unsigned int major = 0;
@@ -328,7 +327,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mapped_device *md;
- mutex_lock(&dm_mutex);
spin_lock(&_minor_lock);
md = bdev->bd_disk->private_data;
@@ -346,7 +344,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
out:
spin_unlock(&_minor_lock);
- mutex_unlock(&dm_mutex);
return md ? 0 : -ENXIO;
}
@@ -355,10 +352,12 @@ static int dm_blk_close(struct gendisk *disk, fmode_t mode)
{
struct mapped_device *md = disk->private_data;
- mutex_lock(&dm_mutex);
+ spin_lock(&_minor_lock);
+
atomic_dec(&md->open_count);
dm_put(md);
- mutex_unlock(&dm_mutex);
+
+ spin_unlock(&_minor_lock);
return 0;
}
@@ -630,7 +629,7 @@ static void dec_pending(struct dm_io *io, int error)
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
- trace_block_bio_complete(md->queue, bio);
+ trace_block_bio_complete(md->queue, bio, io_error);
bio_endio(bio, io_error);
}
}
@@ -990,8 +989,8 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
if (r == DM_MAPIO_REMAPPED) {
/* the bio has been remapped so dispatch it */
- trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
- tio->io->bio->bi_bdev->bd_dev, sector);
+ trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
+ tio->io->bio->bi_bdev->bd_dev, sector);
generic_make_request(clone);
} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
@@ -1638,13 +1637,15 @@ static void dm_request_fn(struct request_queue *q)
if (map_request(ti, clone, md))
goto requeued;
- spin_lock_irq(q->queue_lock);
+ BUG_ON(!irqs_disabled());
+ spin_lock(q->queue_lock);
}
goto out;
requeued:
- spin_lock_irq(q->queue_lock);
+ BUG_ON(!irqs_disabled());
+ spin_lock(q->queue_lock);
plug_and_out:
if (!elv_queue_empty(q))
@@ -1884,7 +1885,8 @@ static struct mapped_device *alloc_dev(int minor)
add_disk(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
- md->wq = create_singlethread_workqueue("kdmflush");
+ md->wq = alloc_workqueue("kdmflush",
+ WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
if (!md->wq)
goto bad_thread;
@@ -1992,13 +1994,14 @@ static void event_callback(void *context)
wake_up(&md->eventq);
}
+/*
+ * Protected by md->suspend_lock obtained by dm_swap_table().
+ */
static void __set_size(struct mapped_device *md, sector_t size)
{
set_capacity(md->disk, size);
- mutex_lock(&md->bdev->bd_inode->i_mutex);
i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
- mutex_unlock(&md->bdev->bd_inode->i_mutex);
}
/*
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 8a2f767f26d8..0ed7f6bc2a7f 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = linear_conf(mddev, mddev->raid_disks);
if (!conf)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 175c424f201f..818313e277e7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -287,11 +287,14 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
mddev_t *mddev = q->queuedata;
int rv;
int cpu;
+ unsigned int sectors;
- if (mddev == NULL || mddev->pers == NULL) {
+ if (mddev == NULL || mddev->pers == NULL
+ || !mddev->ready) {
bio_io_error(bio);
return 0;
}
+ smp_rmb(); /* Ensure implications of 'active' are visible */
rcu_read_lock();
if (mddev->suspended) {
DEFINE_WAIT(__wait);
@@ -309,12 +312,16 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
atomic_inc(&mddev->active_io);
rcu_read_unlock();
+ /*
+ * save the sectors now since our bio can
+ * go away inside make_request
+ */
+ sectors = bio_sectors(bio);
rv = mddev->pers->make_request(mddev, bio);
cpu = part_stat_lock();
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
part_stat_unlock();
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
@@ -546,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
{
mddev_t *mddev, *new = NULL;
+ if (unit && MAJOR(unit) != MD_MAJOR)
+ unit &= ~((1<<MdpMinorShift)-1);
+
retry:
spin_lock(&all_mddevs_lock);
@@ -703,9 +713,9 @@ static struct mdk_personality *find_pers(int level, char *clevel)
}
/* return the offset of the super block in 512byte sectors */
-static inline sector_t calc_dev_sboffset(struct block_device *bdev)
+static inline sector_t calc_dev_sboffset(mdk_rdev_t *rdev)
{
- sector_t num_sectors = i_size_read(bdev->bd_inode) / 512;
+ sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
return MD_NEW_SIZE_SECTORS(num_sectors);
}
@@ -763,7 +773,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
*/
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
- bio->bi_bdev = rdev->bdev;
+ bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
bio->bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
@@ -793,7 +803,7 @@ static void bi_complete(struct bio *bio, int error)
}
int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
- struct page *page, int rw)
+ struct page *page, int rw, bool metadata_op)
{
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
struct completion event;
@@ -801,8 +811,12 @@ int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
rw |= REQ_SYNC | REQ_UNPLUG;
- bio->bi_bdev = rdev->bdev;
- bio->bi_sector = sector;
+ bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
+ rdev->meta_bdev : rdev->bdev;
+ if (metadata_op)
+ bio->bi_sector = sector + rdev->sb_start;
+ else
+ bio->bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0);
init_completion(&event);
bio->bi_private = &event;
@@ -827,7 +841,7 @@ static int read_disk_sb(mdk_rdev_t * rdev, int size)
return 0;
- if (!sync_page_io(rdev, rdev->sb_start, size, rdev->sb_page, READ))
+ if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
goto fail;
rdev->sb_loaded = 1;
return 0;
@@ -989,7 +1003,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
*
* It also happens to be a multiple of 4Kb.
*/
- rdev->sb_start = calc_dev_sboffset(rdev->bdev);
+ rdev->sb_start = calc_dev_sboffset(rdev);
ret = read_disk_sb(rdev, MD_SB_BYTES);
if (ret) return ret;
@@ -1330,7 +1344,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
return 0; /* component must fit device */
if (rdev->mddev->bitmap_info.offset)
return 0; /* can't move bitmap */
- rdev->sb_start = calc_dev_sboffset(rdev->bdev);
+ rdev->sb_start = calc_dev_sboffset(rdev);
if (!num_sectors || num_sectors > rdev->sb_start)
num_sectors = rdev->sb_start;
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
@@ -1879,7 +1893,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
list_add_rcu(&rdev->same_set, &mddev->disks);
- bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
+ bd_link_disk_holder(rdev->bdev, mddev->gendisk);
/* May as well allow recovery to be retried once */
mddev->recovery_disabled = 0;
@@ -1906,7 +1920,7 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
MD_BUG();
return;
}
- bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
+ bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
rdev->mddev = NULL;
@@ -1934,21 +1948,13 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
struct block_device *bdev;
char b[BDEVNAME_SIZE];
- bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
+ bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
+ shared ? (mdk_rdev_t *)lock_rdev : rdev);
if (IS_ERR(bdev)) {
printk(KERN_ERR "md: could not open %s.\n",
__bdevname(dev, b));
return PTR_ERR(bdev);
}
- err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
- if (err) {
- printk(KERN_ERR "md: could not bd_claim %s.\n",
- bdevname(bdev, b));
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
- return err;
- }
- if (!shared)
- set_bit(AllReserved, &rdev->flags);
rdev->bdev = bdev;
return err;
}
@@ -1959,8 +1965,7 @@ static void unlock_rdev(mdk_rdev_t *rdev)
rdev->bdev = NULL;
if (!bdev)
MD_BUG();
- bd_release(bdev);
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
void md_autodetect_dev(dev_t dev);
@@ -2466,6 +2471,9 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
if (rdev->raid_disk != -1)
return -EBUSY;
+ if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
+ return -EBUSY;
+
if (rdev->mddev->pers->hot_add_disk == NULL)
return -EINVAL;
@@ -2473,6 +2481,10 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
if (rdev2->raid_disk == slot)
return -EEXIST;
+ if (slot >= rdev->mddev->raid_disks &&
+ slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
+ return -ENOSPC;
+
rdev->raid_disk = slot;
if (test_bit(In_sync, &rdev->flags))
rdev->saved_raid_disk = slot;
@@ -2490,7 +2502,8 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
/* failure here is OK */;
/* don't wakeup anyone, leave that to userspace. */
} else {
- if (slot >= rdev->mddev->raid_disks)
+ if (slot >= rdev->mddev->raid_disks &&
+ slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
return -ENOSPC;
rdev->raid_disk = slot;
/* assume it is working */
@@ -2606,12 +2619,11 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
mddev_lock(mddev);
list_for_each_entry(rdev2, &mddev->disks, same_set)
- if (test_bit(AllReserved, &rdev2->flags) ||
- (rdev->bdev == rdev2->bdev &&
- rdev != rdev2 &&
- overlaps(rdev->data_offset, rdev->sectors,
- rdev2->data_offset,
- rdev2->sectors))) {
+ if (rdev->bdev == rdev2->bdev &&
+ rdev != rdev2 &&
+ overlaps(rdev->data_offset, rdev->sectors,
+ rdev2->data_offset,
+ rdev2->sectors)) {
overlap = 1;
break;
}
@@ -3115,7 +3127,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
char nm[20];
if (rdev->raid_disk < 0)
continue;
- if (rdev->new_raid_disk > mddev->raid_disks)
+ if (rdev->new_raid_disk >= mddev->raid_disks)
rdev->new_raid_disk = -1;
if (rdev->new_raid_disk == rdev->raid_disk)
continue;
@@ -3744,6 +3756,8 @@ action_show(mddev_t *mddev, char *page)
return sprintf(page, "%s\n", type);
}
+static void reap_sync_thread(mddev_t *mddev);
+
static ssize_t
action_store(mddev_t *mddev, const char *page, size_t len)
{
@@ -3758,9 +3772,7 @@ action_store(mddev_t *mddev, const char *page, size_t len)
if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_unregister_thread(mddev->sync_thread);
- mddev->sync_thread = NULL;
- mddev->recovery = 0;
+ reap_sync_thread(mddev);
}
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
@@ -3912,7 +3924,7 @@ static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
static ssize_t
sync_completed_show(mddev_t *mddev, char *page)
{
- unsigned long max_sectors, resync;
+ unsigned long long max_sectors, resync;
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return sprintf(page, "none\n");
@@ -3923,7 +3935,7 @@ sync_completed_show(mddev_t *mddev, char *page)
max_sectors = mddev->dev_sectors;
resync = mddev->curr_resync_completed;
- return sprintf(page, "%lu / %lu\n", resync, max_sectors);
+ return sprintf(page, "%llu / %llu\n", resync, max_sectors);
}
static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
@@ -4010,19 +4022,24 @@ suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
{
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
+ unsigned long long old = mddev->suspend_lo;
if (mddev->pers == NULL ||
mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
- if (new >= mddev->suspend_hi ||
- (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
- mddev->suspend_lo = new;
+
+ mddev->suspend_lo = new;
+ if (new >= old)
+ /* Shrinking suspended region */
mddev->pers->quiesce(mddev, 2);
- return len;
- } else
- return -EINVAL;
+ else {
+ /* Expanding suspended region - need to wait */
+ mddev->pers->quiesce(mddev, 1);
+ mddev->pers->quiesce(mddev, 0);
+ }
+ return len;
}
static struct md_sysfs_entry md_suspend_lo =
__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
@@ -4039,20 +4056,24 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
{
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
+ unsigned long long old = mddev->suspend_hi;
if (mddev->pers == NULL ||
mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
- if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
- (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
- mddev->suspend_hi = new;
+
+ mddev->suspend_hi = new;
+ if (new <= old)
+ /* Shrinking suspended region */
+ mddev->pers->quiesce(mddev, 2);
+ else {
+ /* Expanding suspended region - need to wait */
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
- return len;
- } else
- return -EINVAL;
+ }
+ return len;
}
static struct md_sysfs_entry md_suspend_hi =
__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
@@ -4120,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
}
mddev->array_sectors = sectors;
- set_capacity(mddev->gendisk, mddev->array_sectors);
- if (mddev->pers)
+ if (mddev->pers) {
+ set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
-
+ }
return len;
}
@@ -4430,7 +4451,9 @@ int md_run(mddev_t *mddev)
* We don't want the data to overlap the metadata,
* Internal Bitmap issues have been handled elsewhere.
*/
- if (rdev->data_offset < rdev->sb_start) {
+ if (rdev->meta_bdev) {
+ /* Nothing to check */;
+ } else if (rdev->data_offset < rdev->sb_start) {
if (mddev->dev_sectors &&
rdev->data_offset + mddev->dev_sectors
> rdev->sb_start) {
@@ -4564,7 +4587,8 @@ int md_run(mddev_t *mddev)
mddev->safemode_timer.data = (unsigned long) mddev;
mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
mddev->in_sync = 1;
-
+ smp_wmb();
+ mddev->ready = 1;
list_for_each_entry(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0) {
char nm[20];
@@ -4603,6 +4627,7 @@ static int do_md_run(mddev_t *mddev)
}
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
+ mddev->changed = 1;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
out:
return err;
@@ -4691,6 +4716,7 @@ static void md_clean(mddev_t *mddev)
mddev->sync_speed_min = mddev->sync_speed_max = 0;
mddev->recovery = 0;
mddev->in_sync = 0;
+ mddev->changed = 0;
mddev->degraded = 0;
mddev->safemode = 0;
mddev->bitmap_info.offset = 0;
@@ -4701,13 +4727,12 @@ static void md_clean(mddev_t *mddev)
mddev->plug = NULL;
}
-void md_stop_writes(mddev_t *mddev)
+static void __md_stop_writes(mddev_t *mddev)
{
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_unregister_thread(mddev->sync_thread);
- mddev->sync_thread = NULL;
+ reap_sync_thread(mddev);
}
del_timer_sync(&mddev->safemode_timer);
@@ -4721,10 +4746,18 @@ void md_stop_writes(mddev_t *mddev)
md_update_sb(mddev, 1);
}
}
+
+void md_stop_writes(mddev_t *mddev)
+{
+ mddev_lock(mddev);
+ __md_stop_writes(mddev);
+ mddev_unlock(mddev);
+}
EXPORT_SYMBOL_GPL(md_stop_writes);
void md_stop(mddev_t *mddev)
{
+ mddev->ready = 0;
mddev->pers->stop(mddev);
if (mddev->pers->sync_request && mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
@@ -4744,7 +4777,7 @@ static int md_set_readonly(mddev_t *mddev, int is_open)
goto out;
}
if (mddev->pers) {
- md_stop_writes(mddev);
+ __md_stop_writes(mddev);
err = -ENXIO;
if (mddev->ro==1)
@@ -4781,7 +4814,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
if (mddev->ro)
set_disk_ro(disk, 0);
- md_stop_writes(mddev);
+ __md_stop_writes(mddev);
md_stop(mddev);
mddev->queue->merge_bvec_fn = NULL;
mddev->queue->unplug_fn = NULL;
@@ -4799,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
+ mddev->changed = 1;
revalidate_disk(disk);
if (mddev->ro)
@@ -5159,9 +5193,10 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
/* set saved_raid_disk if appropriate */
if (!mddev->persistent) {
if (info->state & (1<<MD_DISK_SYNC) &&
- info->raid_disk < mddev->raid_disks)
+ info->raid_disk < mddev->raid_disks) {
rdev->raid_disk = info->raid_disk;
- else
+ set_bit(In_sync, &rdev->flags);
+ } else
rdev->raid_disk = -1;
} else
super_types[mddev->major_version].
@@ -5238,7 +5273,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
printk(KERN_INFO "md: nonpersistent superblock ...\n");
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
} else
- rdev->sb_start = calc_dev_sboffset(rdev->bdev);
+ rdev->sb_start = calc_dev_sboffset(rdev);
rdev->sectors = rdev->sb_start;
err = bind_rdev_to_array(rdev, mddev);
@@ -5305,7 +5340,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
}
if (mddev->persistent)
- rdev->sb_start = calc_dev_sboffset(rdev->bdev);
+ rdev->sb_start = calc_dev_sboffset(rdev);
else
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
@@ -5518,7 +5553,6 @@ static int update_size(mddev_t *mddev, sector_t num_sectors)
* sb_start or, if that is <data_offset, it must fit before the size
* of each device. If num_sectors is zero, we find the largest size
* that fits.
-
*/
if (mddev->sync_thread)
return -EBUSY;
@@ -5555,6 +5589,8 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
mddev->delta_disks = raid_disks - mddev->raid_disks;
rv = mddev->pers->check_reshape(mddev);
+ if (rv < 0)
+ mddev->delta_disks = 0;
return rv;
}
@@ -5981,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
- check_disk_size_change(mddev->gendisk, bdev);
+ check_disk_change(bdev);
out:
return err;
}
@@ -5996,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode)
return 0;
}
+
+static int md_media_changed(struct gendisk *disk)
+{
+ mddev_t *mddev = disk->private_data;
+
+ return mddev->changed;
+}
+
+static int md_revalidate(struct gendisk *disk)
+{
+ mddev_t *mddev = disk->private_data;
+
+ mddev->changed = 0;
+ return 0;
+}
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
@@ -6006,6 +6057,8 @@ static const struct block_device_operations md_fops =
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
+ .media_changed = md_media_changed,
+ .revalidate_disk= md_revalidate,
};
static int md_thread(void * arg)
@@ -6041,7 +6094,8 @@ static int md_thread(void * arg)
|| kthread_should_stop(),
thread->timeout);
- if (test_and_clear_bit(THREAD_WAKEUP, &thread->flags))
+ clear_bit(THREAD_WAKEUP, &thread->flags);
+ if (!kthread_should_stop())
thread->run(thread->mddev);
}
@@ -6807,7 +6861,7 @@ void md_do_sync(mddev_t *mddev)
desc, mdname(mddev));
mddev->curr_resync = j;
}
- mddev->curr_resync_completed = mddev->curr_resync;
+ mddev->curr_resync_completed = j;
while (j < max_sectors) {
sector_t sectors;
@@ -6825,8 +6879,7 @@ void md_do_sync(mddev_t *mddev)
md_unplug(mddev);
wait_event(mddev->recovery_wait,
atomic_read(&mddev->recovery_active) == 0);
- mddev->curr_resync_completed =
- mddev->curr_resync;
+ mddev->curr_resync_completed = j;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
@@ -6962,9 +7015,6 @@ void md_do_sync(mddev_t *mddev)
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = mddev->curr_resync_completed;
mddev->curr_resync = 0;
- if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
- mddev->curr_resync_completed = 0;
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
wake_up(&resync_wait);
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -7005,7 +7055,7 @@ static int remove_and_add_spares(mddev_t *mddev)
}
}
- if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
+ if (mddev->degraded && !mddev->recovery_disabled) {
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
@@ -7031,6 +7081,45 @@ static int remove_and_add_spares(mddev_t *mddev)
}
return spares;
}
+
+static void reap_sync_thread(mddev_t *mddev)
+{
+ mdk_rdev_t *rdev;
+
+ /* resync has finished, collect result */
+ md_unregister_thread(mddev->sync_thread);
+ mddev->sync_thread = NULL;
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ /* success...*/
+ /* activate any spares */
+ if (mddev->pers->spare_active(mddev))
+ sysfs_notify(&mddev->kobj, NULL,
+ "degraded");
+ }
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ mddev->pers->finish_reshape)
+ mddev->pers->finish_reshape(mddev);
+ md_update_sb(mddev, 1);
+
+ /* if array is no-longer degraded, then any saved_raid_disk
+ * information must be scrapped
+ */
+ if (!mddev->degraded)
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+ rdev->saved_raid_disk = -1;
+
+ clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+ clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+ clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ /* flag recovery needed just to double check */
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ sysfs_notify_dirent_safe(mddev->sysfs_action);
+ md_new_event(mddev);
+}
+
/*
* This routine is regularly called by all per-raid-array threads to
* deal with generic issues like resync and super-block update.
@@ -7055,9 +7144,6 @@ static int remove_and_add_spares(mddev_t *mddev)
*/
void md_check_recovery(mddev_t *mddev)
{
- mdk_rdev_t *rdev;
-
-
if (mddev->bitmap)
bitmap_daemon_work(mddev);
@@ -7092,7 +7178,20 @@ void md_check_recovery(mddev_t *mddev)
/* Only thing we do on a ro array is remove
* failed devices.
*/
- remove_and_add_spares(mddev);
+ mdk_rdev_t *rdev;
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Blocked, &rdev->flags) &&
+ test_bit(Faulty, &rdev->flags) &&
+ atomic_read(&rdev->nr_pending)==0) {
+ if (mddev->pers->hot_remove_disk(
+ mddev, rdev->raid_disk)==0) {
+ char nm[20];
+ sprintf(nm,"rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
+ rdev->raid_disk = -1;
+ }
+ }
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
@@ -7125,34 +7224,7 @@ void md_check_recovery(mddev_t *mddev)
goto unlock;
}
if (mddev->sync_thread) {
- /* resync has finished, collect result */
- md_unregister_thread(mddev->sync_thread);
- mddev->sync_thread = NULL;
- if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
- !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
- /* success...*/
- /* activate any spares */
- if (mddev->pers->spare_active(mddev))
- sysfs_notify(&mddev->kobj, NULL,
- "degraded");
- }
- if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- mddev->pers->finish_reshape)
- mddev->pers->finish_reshape(mddev);
- md_update_sb(mddev, 1);
-
- /* if array is no-longer degraded, then any saved_raid_disk
- * information must be scrapped
- */
- if (!mddev->degraded)
- list_for_each_entry(rdev, &mddev->disks, same_set)
- rdev->saved_raid_disk = -1;
-
- mddev->recovery = 0;
- /* flag recovery needed just to double check */
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- sysfs_notify_dirent_safe(mddev->sysfs_action);
- md_new_event(mddev);
+ reap_sync_thread(mddev);
goto unlock;
}
/* Set RUNNING before clearing NEEDED to avoid
@@ -7210,7 +7282,11 @@ void md_check_recovery(mddev_t *mddev)
" thread...\n",
mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
- mddev->recovery = 0;
+ clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+ clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+ clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
} else
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index d05bab55df4e..12215d437fcc 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -60,6 +60,12 @@ struct mdk_rdev_s
mddev_t *mddev; /* RAID array if running */
int last_events; /* IO event timestamp */
+ /*
+ * If meta_bdev is non-NULL, it means that a separate device is
+ * being used to store the metadata (superblock/bitmap) which
+ * would otherwise be contained on the same device as the data (bdev).
+ */
+ struct block_device *meta_bdev;
struct block_device *bdev; /* block device handle */
struct page *sb_page;
@@ -87,8 +93,6 @@ struct mdk_rdev_s
#define Faulty 1 /* device is known to have a fault */
#define In_sync 2 /* device is in_sync with rest of array */
#define WriteMostly 4 /* Avoid reading if at all possible */
-#define AllReserved 6 /* If whole device is reserved for
- * one array */
#define AutoDetected 7 /* added by auto-detect */
#define Blocked 8 /* An error occured on an externally
* managed array, don't allow writes
@@ -148,7 +152,8 @@ struct mddev_s
* are happening, so run/
* takeover/stop are not safe
*/
-
+ int ready; /* See when safe to pass
+ * IO requests down */
struct gendisk *gendisk;
struct kobject kobj;
@@ -269,6 +274,8 @@ struct mddev_s
atomic_t active; /* general refcount */
atomic_t openers; /* number of active opens */
+ int changed; /* True if we might need to
+ * reread partition info */
int degraded; /* whether md should consider
* adding a spare
*/
@@ -497,8 +504,8 @@ extern void md_flush_request(mddev_t *mddev, struct bio *bio);
extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
sector_t sector, int size, struct page *page);
extern void md_super_wait(mddev_t *mddev);
-extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
- struct page *page, int rw);
+extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
+ struct page *page, int rw, bool metadata_op);
extern void md_do_sync(mddev_t *mddev);
extern void md_new_event(mddev_t *mddev);
extern int md_allow_write(mddev_t *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 6d7ddf32ef2e..3a62d440e27b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
* bookkeeping area. [whatever we allocate in multipath_run(),
* should be freed in multipath_stop()]
*/
- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
mddev->private = conf;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a39f4c355e55..c0ac457f1218 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -179,6 +179,14 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
rdev1->new_raid_disk = j;
}
+ if (mddev->level == 1) {
+ /* taiking over a raid1 array-
+ * we have only one active disk
+ */
+ j = 0;
+ rdev1->new_raid_disk = j;
+ }
+
if (j < 0 || j >= mddev->raid_disks) {
printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
"aborting!\n", mdname(mddev), j);
@@ -353,7 +361,6 @@ static int raid0_run(mddev_t *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
@@ -644,12 +651,39 @@ static void *raid0_takeover_raid10(mddev_t *mddev)
return priv_conf;
}
+static void *raid0_takeover_raid1(mddev_t *mddev)
+{
+ raid0_conf_t *priv_conf;
+
+ /* Check layout:
+ * - (N - 1) mirror drives must be already faulty
+ */
+ if ((mddev->raid_disks - 1) != mddev->degraded) {
+ printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Set new parameters */
+ mddev->new_level = 0;
+ mddev->new_layout = 0;
+ mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
+ mddev->delta_disks = 1 - mddev->raid_disks;
+ mddev->raid_disks = 1;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ create_strip_zones(mddev, &priv_conf);
+ return priv_conf;
+}
+
static void *raid0_takeover(mddev_t *mddev)
{
/* raid0 can take over:
* raid4 - if all data disks are active.
* raid5 - providing it is Raid4 layout and one disk is faulty
* raid10 - assuming we have all necessary active disks
+ * raid1 - with (N -1) mirror drives faulty
*/
if (mddev->level == 4)
return raid0_takeover_raid45(mddev);
@@ -665,6 +699,12 @@ static void *raid0_takeover(mddev_t *mddev)
if (mddev->level == 10)
return raid0_takeover_raid10(mddev);
+ if (mddev->level == 1)
+ return raid0_takeover_raid1(mddev);
+
+ printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
+ mddev->level);
+
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 845cf95b612c..06cd712807d0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf)
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
+ /* Only take the spinlock to quiet a warning */
+ spin_lock(conf->mddev->queue->queue_lock);
blk_remove_plug(conf->mddev->queue);
+ spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to
* disk before proceeding w/ I/O */
@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r1_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- blk_plug_device(mddev->queue);
+ blk_plug_device_unlocked(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -1027,8 +1030,9 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
} else
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT "md/raid1:%s: Disk failure on %s, disabling device.\n"
- KERN_ALERT "md/raid1:%s: Operation continuing on %d devices.\n",
+ printk(KERN_ALERT
+ "md/raid1:%s: Disk failure on %s, disabling device.\n"
+ "md/raid1:%s: Operation continuing on %d devices.\n",
mdname(mddev), bdevname(rdev->bdev, b),
mdname(mddev), conf->raid_disks - mddev->degraded);
}
@@ -1364,10 +1368,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
*/
rdev = conf->mirrors[d].rdev;
if (sync_page_io(rdev,
- sect + rdev->data_offset,
+ sect,
s<<9,
bio->bi_io_vec[idx].bv_page,
- READ)) {
+ READ, false)) {
success = 1;
break;
}
@@ -1390,10 +1394,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
rdev = conf->mirrors[d].rdev;
atomic_add(s, &rdev->corrected_errors);
if (sync_page_io(rdev,
- sect + rdev->data_offset,
+ sect,
s<<9,
bio->bi_io_vec[idx].bv_page,
- WRITE) == 0)
+ WRITE, false) == 0)
md_error(mddev, rdev);
}
d = start;
@@ -1405,10 +1409,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
continue;
rdev = conf->mirrors[d].rdev;
if (sync_page_io(rdev,
- sect + rdev->data_offset,
+ sect,
s<<9,
bio->bi_io_vec[idx].bv_page,
- READ) == 0)
+ READ, false) == 0)
md_error(mddev, rdev);
}
} else {
@@ -1488,10 +1492,8 @@ static void fix_read_error(conf_t *conf, int read_disk,
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags) &&
- sync_page_io(rdev,
- sect + rdev->data_offset,
- s<<9,
- conf->tmppage, READ))
+ sync_page_io(rdev, sect, s<<9,
+ conf->tmppage, READ, false))
success = 1;
else {
d++;
@@ -1514,9 +1516,8 @@ static void fix_read_error(conf_t *conf, int read_disk,
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags)) {
- if (sync_page_io(rdev,
- sect + rdev->data_offset,
- s<<9, conf->tmppage, WRITE)
+ if (sync_page_io(rdev, sect, s<<9,
+ conf->tmppage, WRITE, false)
== 0)
/* Well, this device is dead */
md_error(mddev, rdev);
@@ -1531,9 +1532,8 @@ static void fix_read_error(conf_t *conf, int read_disk,
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags)) {
- if (sync_page_io(rdev,
- sect + rdev->data_offset,
- s<<9, conf->tmppage, READ)
+ if (sync_page_io(rdev, sect, s<<9,
+ conf->tmppage, READ, false)
== 0)
/* Well, this device is dead */
md_error(mddev, rdev);
@@ -2024,7 +2024,6 @@ static int run(mddev_t *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
- mddev->queue->queue_lock = &conf->device_lock;
list_for_each_entry(rdev, &mddev->disks, same_set) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 0641674827f0..747d061d8e05 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf)
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
+ /* Spinlock only taken to quiet a warning */
+ spin_lock(conf->mddev->queue->queue_lock);
blk_remove_plug(conf->mddev->queue);
+ spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to disk
* before proceeding w/ I/O */
@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r10_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- blk_plug_device(mddev->queue);
+ blk_plug_device_unlocked(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
@@ -1051,8 +1054,9 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
}
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT "md/raid10:%s: Disk failure on %s, disabling device.\n"
- KERN_ALERT "md/raid10:%s: Operation continuing on %d devices.\n",
+ printk(KERN_ALERT
+ "md/raid10:%s: Disk failure on %s, disabling device.\n"
+ "md/raid10:%s: Operation continuing on %d devices.\n",
mdname(mddev), bdevname(rdev->bdev, b),
mdname(mddev), conf->raid_disks - mddev->degraded);
}
@@ -1559,9 +1563,9 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
rcu_read_unlock();
success = sync_page_io(rdev,
r10_bio->devs[sl].addr +
- sect + rdev->data_offset,
+ sect,
s<<9,
- conf->tmppage, READ);
+ conf->tmppage, READ, false);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
if (success)
@@ -1598,8 +1602,8 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
atomic_add(s, &rdev->corrected_errors);
if (sync_page_io(rdev,
r10_bio->devs[sl].addr +
- sect + rdev->data_offset,
- s<<9, conf->tmppage, WRITE)
+ sect,
+ s<<9, conf->tmppage, WRITE, false)
== 0) {
/* Well, this device is dead */
printk(KERN_NOTICE
@@ -1635,9 +1639,9 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
rcu_read_unlock();
if (sync_page_io(rdev,
r10_bio->devs[sl].addr +
- sect + rdev->data_offset,
+ sect,
s<<9, conf->tmppage,
- READ) == 0) {
+ READ, false) == 0) {
/* Well, this device is dead */
printk(KERN_NOTICE
"md/raid10:%s: unable to read back "
@@ -2303,8 +2307,6 @@ static int run(mddev_t *mddev)
if (!conf)
goto out;
- mddev->queue->queue_lock = &conf->device_lock;
-
mddev->thread = conf->thread;
conf->thread = NULL;
@@ -2462,11 +2464,13 @@ static void *raid10_takeover_raid0(mddev_t *mddev)
mddev->recovery_cp = MaxSector;
conf = setup_conf(mddev);
- if (!IS_ERR(conf))
+ if (!IS_ERR(conf)) {
list_for_each_entry(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0)
rdev->new_raid_disk = rdev->raid_disk * 2;
-
+ conf->barrier = 1;
+ }
+
return conf;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dc574f303f8b..78536fdbd87f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1721,7 +1721,6 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
set_bit(Faulty, &rdev->flags);
printk(KERN_ALERT
"md/raid:%s: Disk failure on %s, disabling device.\n"
- KERN_ALERT
"md/raid:%s: Operation continuing on %d devices.\n",
mdname(mddev),
bdevname(rdev->bdev, b),
@@ -4237,7 +4236,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
wait_event(conf->wait_for_overlap,
atomic_read(&conf->reshape_stripes)==0);
mddev->reshape_position = conf->reshape_progress;
- mddev->curr_resync_completed = mddev->curr_resync;
+ mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
@@ -4338,7 +4337,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
wait_event(conf->wait_for_overlap,
atomic_read(&conf->reshape_stripes) == 0);
mddev->reshape_position = conf->reshape_progress;
- mddev->curr_resync_completed = mddev->curr_resync + reshape_sectors;
+ mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
@@ -5205,7 +5204,6 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
- mddev->queue->queue_lock = &conf->device_lock;
mddev->queue->unplug_fn = raid5_unplug_queue;
chunk_size = mddev->chunk_sectors << 9;
@@ -5339,7 +5337,7 @@ static int raid5_spare_active(mddev_t *mddev)
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
count++;
- sysfs_notify_dirent(tmp->rdev->sysfs_state);
+ sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
}
}
spin_lock_irqsave(&conf->device_lock, flags);
@@ -5518,7 +5516,6 @@ static int raid5_start_reshape(mddev_t *mddev)
raid5_conf_t *conf = mddev->private;
mdk_rdev_t *rdev;
int spares = 0;
- int added_devices = 0;
unsigned long flags;
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
@@ -5528,8 +5525,8 @@ static int raid5_start_reshape(mddev_t *mddev)
return -ENOSPC;
list_for_each_entry(rdev, &mddev->disks, same_set)
- if (rdev->raid_disk < 0 &&
- !test_bit(Faulty, &rdev->flags))
+ if (!test_bit(In_sync, &rdev->flags)
+ && !test_bit(Faulty, &rdev->flags))
spares++;
if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
@@ -5572,29 +5569,35 @@ static int raid5_start_reshape(mddev_t *mddev)
* to correctly record the "partially reconstructed" state of
* such devices during the reshape and confusion could result.
*/
- if (mddev->delta_disks >= 0)
- list_for_each_entry(rdev, &mddev->disks, same_set)
- if (rdev->raid_disk < 0 &&
- !test_bit(Faulty, &rdev->flags)) {
- if (raid5_add_disk(mddev, rdev) == 0) {
- char nm[20];
- if (rdev->raid_disk >= conf->previous_raid_disks) {
- set_bit(In_sync, &rdev->flags);
- added_devices++;
- } else
- rdev->recovery_offset = 0;
- sprintf(nm, "rd%d", rdev->raid_disk);
- if (sysfs_create_link(&mddev->kobj,
- &rdev->kobj, nm))
- /* Failure here is OK */;
- } else
- break;
- }
+ if (mddev->delta_disks >= 0) {
+ int added_devices = 0;
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+ if (rdev->raid_disk < 0 &&
+ !test_bit(Faulty, &rdev->flags)) {
+ if (raid5_add_disk(mddev, rdev) == 0) {
+ char nm[20];
+ if (rdev->raid_disk
+ >= conf->previous_raid_disks) {
+ set_bit(In_sync, &rdev->flags);
+ added_devices++;
+ } else
+ rdev->recovery_offset = 0;
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ if (sysfs_create_link(&mddev->kobj,
+ &rdev->kobj, nm))
+ /* Failure here is OK */;
+ }
+ } else if (rdev->raid_disk >= conf->previous_raid_disks
+ && !test_bit(Faulty, &rdev->flags)) {
+ /* This is a spare that was manually added */
+ set_bit(In_sync, &rdev->flags);
+ added_devices++;
+ }
- /* When a reshape changes the number of devices, ->degraded
- * is measured against the larger of the pre and post number of
- * devices.*/
- if (mddev->delta_disks > 0) {
+ /* When a reshape changes the number of devices,
+ * ->degraded is measured against the larger of the
+ * pre and post number of devices.
+ */
spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
- added_devices;
diff --git a/drivers/media/common/saa7146_core.c b/drivers/media/common/saa7146_core.c
index 982f000a57ff..9f47e383c57a 100644
--- a/drivers/media/common/saa7146_core.c
+++ b/drivers/media/common/saa7146_core.c
@@ -452,7 +452,7 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
INFO(("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x).\n", dev->mem, dev->revision, pci->irq, pci->subsystem_vendor, pci->subsystem_device));
dev->ext = ext;
- mutex_init(&dev->lock);
+ mutex_init(&dev->v4l2_lock);
spin_lock_init(&dev->int_slock);
spin_lock_init(&dev->slock);
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index e3fedc60fe77..1bd3dd762c6b 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -15,18 +15,15 @@ int saa7146_res_get(struct saa7146_fh *fh, unsigned int bit)
}
/* is it free? */
- mutex_lock(&dev->lock);
if (vv->resources & bit) {
DEB_D(("locked! vv->resources:0x%02x, we want:0x%02x\n",vv->resources,bit));
/* no, someone else uses it */
- mutex_unlock(&dev->lock);
return 0;
}
/* it's free, grab it */
fh->resources |= bit;
vv->resources |= bit;
DEB_D(("res: get 0x%02x, cur:0x%02x\n",bit,vv->resources));
- mutex_unlock(&dev->lock);
return 1;
}
@@ -37,11 +34,9 @@ void saa7146_res_free(struct saa7146_fh *fh, unsigned int bits)
BUG_ON((fh->resources & bits) != bits);
- mutex_lock(&dev->lock);
fh->resources &= ~bits;
vv->resources &= ~bits;
DEB_D(("res: put 0x%02x, cur:0x%02x\n",bits,vv->resources));
- mutex_unlock(&dev->lock);
}
@@ -396,7 +391,7 @@ static const struct v4l2_file_operations video_fops =
.write = fops_write,
.poll = fops_poll,
.mmap = fops_mmap,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
};
static void vv_callback(struct saa7146_dev *dev, unsigned long status)
@@ -505,6 +500,7 @@ int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
vfd->fops = &video_fops;
vfd->ioctl_ops = &dev->ext_vv_data->ops;
vfd->release = video_device_release;
+ vfd->lock = &dev->v4l2_lock;
vfd->tvnorms = 0;
for (i = 0; i < dev->ext_vv_data->num_stds; i++)
vfd->tvnorms |= dev->ext_vv_data->stds[i].id;
diff --git a/drivers/media/common/saa7146_vbi.c b/drivers/media/common/saa7146_vbi.c
index 2d4533ab22b7..afe85801d6ca 100644
--- a/drivers/media/common/saa7146_vbi.c
+++ b/drivers/media/common/saa7146_vbi.c
@@ -412,7 +412,7 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_SEQ_TB, // FIXME: does this really work?
sizeof(struct saa7146_buf),
- file, NULL);
+ file, &dev->v4l2_lock);
init_timer(&fh->vbi_read_timeout);
fh->vbi_read_timeout.function = vbi_read_timeout;
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index 0ac5c619aecf..9aafa4e969a8 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -553,8 +553,6 @@ static int vidioc_s_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *f
}
}
- mutex_lock(&dev->lock);
-
/* ok, accept it */
vv->ov_fb = *fb;
vv->ov_fmt = fmt;
@@ -563,8 +561,6 @@ static int vidioc_s_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *f
vv->ov_fb.fmt.bytesperline = vv->ov_fb.fmt.width * fmt->depth / 8;
DEB_D(("setting bytesperline to %d\n", vv->ov_fb.fmt.bytesperline));
}
-
- mutex_unlock(&dev->lock);
return 0;
}
@@ -649,8 +645,6 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
return -EINVAL;
}
- mutex_lock(&dev->lock);
-
switch (ctrl->type) {
case V4L2_CTRL_TYPE_BOOLEAN:
case V4L2_CTRL_TYPE_MENU:
@@ -693,7 +687,6 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
/* fixme: we can support changing VFLIP and HFLIP here... */
if (IS_CAPTURE_ACTIVE(fh) != 0) {
DEB_D(("V4L2_CID_HFLIP while active capture.\n"));
- mutex_unlock(&dev->lock);
return -EBUSY;
}
vv->hflip = c->value;
@@ -701,16 +694,13 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
case V4L2_CID_VFLIP:
if (IS_CAPTURE_ACTIVE(fh) != 0) {
DEB_D(("V4L2_CID_VFLIP while active capture.\n"));
- mutex_unlock(&dev->lock);
return -EBUSY;
}
vv->vflip = c->value;
break;
default:
- mutex_unlock(&dev->lock);
return -EINVAL;
}
- mutex_unlock(&dev->lock);
if (IS_OVERLAY_ACTIVE(fh) != 0) {
saa7146_stop_preview(fh);
@@ -902,22 +892,18 @@ static int vidioc_s_fmt_vid_overlay(struct file *file, void *__fh, struct v4l2_f
err = vidioc_try_fmt_vid_overlay(file, fh, f);
if (0 != err)
return err;
- mutex_lock(&dev->lock);
fh->ov.win = f->fmt.win;
fh->ov.nclips = f->fmt.win.clipcount;
if (fh->ov.nclips > 16)
fh->ov.nclips = 16;
if (copy_from_user(fh->ov.clips, f->fmt.win.clips,
sizeof(struct v4l2_clip) * fh->ov.nclips)) {
- mutex_unlock(&dev->lock);
return -EFAULT;
}
/* fh->ov.fh is used to indicate that we have valid overlay informations, too */
fh->ov.fh = fh;
- mutex_unlock(&dev->lock);
-
/* check if our current overlay is active */
if (IS_OVERLAY_ACTIVE(fh) != 0) {
saa7146_stop_preview(fh);
@@ -976,8 +962,6 @@ static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *id)
}
}
- mutex_lock(&dev->lock);
-
for (i = 0; i < dev->ext_vv_data->num_stds; i++)
if (*id & dev->ext_vv_data->stds[i].id)
break;
@@ -988,8 +972,6 @@ static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *id)
found = 1;
}
- mutex_unlock(&dev->lock);
-
if (vv->ov_suspend != NULL) {
saa7146_start_preview(vv->ov_suspend);
vv->ov_suspend = NULL;
@@ -1354,7 +1336,7 @@ static int video_open(struct saa7146_dev *dev, struct file *file)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct saa7146_buf),
- file, NULL);
+ file, &dev->v4l2_lock);
return 0;
}
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index 78b089526e02..6fc79f15dcbc 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -34,7 +34,7 @@ config MEDIA_TUNER
config MEDIA_TUNER_CUSTOMISE
bool "Customize analog and hybrid tuner modules to build"
depends on MEDIA_TUNER
- default y if EMBEDDED
+ default y if EXPERT
help
This allows the user to deselect tuner drivers unnecessary
for their hardware from the build. Use this option with care
diff --git a/drivers/media/common/tuners/tda8290.c b/drivers/media/common/tuners/tda8290.c
index c9062ceddc71..bc6a67768af1 100644
--- a/drivers/media/common/tuners/tda8290.c
+++ b/drivers/media/common/tuners/tda8290.c
@@ -95,8 +95,7 @@ static int tda8295_i2c_bridge(struct dvb_frontend *fe, int close)
msleep(20);
} else {
msg = disable;
- tuner_i2c_xfer_send(&priv->i2c_props, msg, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &msg[1], 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props, msg, 1, &msg[1], 1);
buf[2] = msg[1];
buf[2] &= ~0x04;
@@ -233,19 +232,22 @@ static void tda8290_set_params(struct dvb_frontend *fe,
tuner_i2c_xfer_send(&priv->i2c_props, pll_bw_nom, 2);
}
+
tda8290_i2c_bridge(fe, 1);
if (fe->ops.tuner_ops.set_analog_params)
fe->ops.tuner_ops.set_analog_params(fe, params);
for (i = 0; i < 3; i++) {
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_pll_stat, 1, &pll_stat, 1);
if (pll_stat & 0x80) {
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_adc_sat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &adc_sat, 1);
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_agc_stat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &agc_stat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_adc_sat, 1,
+ &adc_sat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_agc_stat, 1,
+ &agc_stat, 1);
tuner_dbg("tda8290 is locked, AGC: %d\n", agc_stat);
break;
} else {
@@ -259,20 +261,22 @@ static void tda8290_set_params(struct dvb_frontend *fe,
agc_stat, adc_sat, pll_stat & 0x80);
tuner_i2c_xfer_send(&priv->i2c_props, gainset_2, 2);
msleep(100);
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_agc_stat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &agc_stat, 1);
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_agc_stat, 1, &agc_stat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_pll_stat, 1, &pll_stat, 1);
if ((agc_stat > 115) || !(pll_stat & 0x80)) {
tuner_dbg("adjust gain, step 2. Agc: %d, lock: %d\n",
agc_stat, pll_stat & 0x80);
if (priv->cfg.agcf)
priv->cfg.agcf(fe);
msleep(100);
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_agc_stat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &agc_stat, 1);
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_agc_stat, 1,
+ &agc_stat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_pll_stat, 1,
+ &pll_stat, 1);
if((agc_stat > 115) || !(pll_stat & 0x80)) {
tuner_dbg("adjust gain, step 3. Agc: %d\n", agc_stat);
tuner_i2c_xfer_send(&priv->i2c_props, adc_head_12, 2);
@@ -284,10 +288,12 @@ static void tda8290_set_params(struct dvb_frontend *fe,
/* l/ l' deadlock? */
if(priv->tda8290_easy_mode & 0x60) {
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_adc_sat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &adc_sat, 1);
- tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_adc_sat, 1,
+ &adc_sat, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &addr_pll_stat, 1,
+ &pll_stat, 1);
if ((adc_sat > 20) || !(pll_stat & 0x80)) {
tuner_dbg("trying to resolve SECAM L deadlock\n");
tuner_i2c_xfer_send(&priv->i2c_props, agc_rst_on, 2);
@@ -307,8 +313,7 @@ static void tda8295_power(struct dvb_frontend *fe, int enable)
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char buf[] = { 0x30, 0x00 }; /* clb_stdbt */
- tuner_i2c_xfer_send(&priv->i2c_props, &buf[0], 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &buf[1], 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
if (enable)
buf[1] = 0x01;
@@ -323,8 +328,7 @@ static void tda8295_set_easy_mode(struct dvb_frontend *fe, int enable)
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char buf[] = { 0x01, 0x00 };
- tuner_i2c_xfer_send(&priv->i2c_props, &buf[0], 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &buf[1], 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
if (enable)
buf[1] = 0x01; /* rising edge sets regs 0x02 - 0x23 */
@@ -353,8 +357,7 @@ static void tda8295_agc1_out(struct dvb_frontend *fe, int enable)
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char buf[] = { 0x02, 0x00 }; /* DIV_FUNC */
- tuner_i2c_xfer_send(&priv->i2c_props, &buf[0], 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &buf[1], 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
if (enable)
buf[1] &= ~0x40;
@@ -370,10 +373,10 @@ static void tda8295_agc2_out(struct dvb_frontend *fe, int enable)
unsigned char set_gpio_cf[] = { 0x44, 0x00 };
unsigned char set_gpio_val[] = { 0x46, 0x00 };
- tuner_i2c_xfer_send(&priv->i2c_props, &set_gpio_cf[0], 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &set_gpio_cf[1], 1);
- tuner_i2c_xfer_send(&priv->i2c_props, &set_gpio_val[0], 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &set_gpio_val[1], 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &set_gpio_cf[0], 1, &set_gpio_cf[1], 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &set_gpio_val[0], 1, &set_gpio_val[1], 1);
set_gpio_cf[1] &= 0xf0; /* clear GPIO_0 bits 3-0 */
@@ -392,8 +395,7 @@ static int tda8295_has_signal(struct dvb_frontend *fe)
unsigned char hvpll_stat = 0x26;
unsigned char ret;
- tuner_i2c_xfer_send(&priv->i2c_props, &hvpll_stat, 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &ret, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props, &hvpll_stat, 1, &ret, 1);
return (ret & 0x01) ? 65535 : 0;
}
@@ -413,8 +415,8 @@ static void tda8295_set_params(struct dvb_frontend *fe,
tda8295_power(fe, 1);
tda8295_agc1_out(fe, 1);
- tuner_i2c_xfer_send(&priv->i2c_props, &blanking_mode[0], 1);
- tuner_i2c_xfer_recv(&priv->i2c_props, &blanking_mode[1], 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ &blanking_mode[0], 1, &blanking_mode[1], 1);
tda8295_set_video_std(fe);
@@ -447,8 +449,8 @@ static int tda8290_has_signal(struct dvb_frontend *fe)
unsigned char i2c_get_afc[1] = { 0x1B };
unsigned char afc = 0;
- tuner_i2c_xfer_send(&priv->i2c_props, i2c_get_afc, ARRAY_SIZE(i2c_get_afc));
- tuner_i2c_xfer_recv(&priv->i2c_props, &afc, 1);
+ tuner_i2c_xfer_send_recv(&priv->i2c_props,
+ i2c_get_afc, ARRAY_SIZE(i2c_get_afc), &afc, 1);
return (afc & 0x80)? 65535:0;
}
@@ -654,20 +656,26 @@ static int tda829x_find_tuner(struct dvb_frontend *fe)
static int tda8290_probe(struct tuner_i2c_props *i2c_props)
{
#define TDA8290_ID 0x89
- unsigned char tda8290_id[] = { 0x1f, 0x00 };
+ u8 reg = 0x1f, id;
+ struct i2c_msg msg_read[] = {
+ { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg },
+ { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id },
+ };
/* detect tda8290 */
- tuner_i2c_xfer_send(i2c_props, &tda8290_id[0], 1);
- tuner_i2c_xfer_recv(i2c_props, &tda8290_id[1], 1);
+ if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
+ printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n",
+ __func__, reg);
+ return -ENODEV;
+ }
- if (tda8290_id[1] == TDA8290_ID) {
+ if (id == TDA8290_ID) {
if (debug)
printk(KERN_DEBUG "%s: tda8290 detected @ %d-%04x\n",
__func__, i2c_adapter_id(i2c_props->adap),
i2c_props->addr);
return 0;
}
-
return -ENODEV;
}
@@ -675,16 +683,23 @@ static int tda8295_probe(struct tuner_i2c_props *i2c_props)
{
#define TDA8295_ID 0x8a
#define TDA8295C2_ID 0x8b
- unsigned char tda8295_id[] = { 0x2f, 0x00 };
+ u8 reg = 0x2f, id;
+ struct i2c_msg msg_read[] = {
+ { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg },
+ { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id },
+ };
- /* detect tda8295 */
- tuner_i2c_xfer_send(i2c_props, &tda8295_id[0], 1);
- tuner_i2c_xfer_recv(i2c_props, &tda8295_id[1], 1);
+ /* detect tda8290 */
+ if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
+ printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n",
+ __func__, reg);
+ return -ENODEV;
+ }
- if ((tda8295_id[1] & 0xfe) == TDA8295_ID) {
+ if ((id & 0xfe) == TDA8295_ID) {
if (debug)
printk(KERN_DEBUG "%s: %s detected @ %d-%04x\n",
- __func__, (tda8295_id[1] == TDA8295_ID) ?
+ __func__, (id == TDA8295_ID) ?
"tda8295c1" : "tda8295c2",
i2c_adapter_id(i2c_props->adap),
i2c_props->addr);
@@ -740,9 +755,11 @@ struct dvb_frontend *tda829x_attach(struct dvb_frontend *fe,
sizeof(struct analog_demod_ops));
}
- if ((!(cfg) || (TDA829X_PROBE_TUNER == cfg->probe_tuner)) &&
- (tda829x_find_tuner(fe) < 0))
- goto fail;
+ if (!(cfg) || (TDA829X_PROBE_TUNER == cfg->probe_tuner)) {
+ tda8295_power(fe, 1);
+ if (tda829x_find_tuner(fe) < 0)
+ goto fail;
+ }
switch (priv->ver) {
case TDA8290:
@@ -786,6 +803,8 @@ struct dvb_frontend *tda829x_attach(struct dvb_frontend *fe,
return fe;
fail:
+ memset(&fe->ops.analog_ops, 0, sizeof(struct analog_demod_ops));
+
tda829x_release(fe);
return NULL;
}
@@ -809,8 +828,8 @@ int tda829x_probe(struct i2c_adapter *i2c_adap, u8 i2c_addr)
int i;
/* rule out tda9887, which would return the same byte repeatedly */
- tuner_i2c_xfer_send(&i2c_props, soft_reset, 1);
- tuner_i2c_xfer_recv(&i2c_props, buf, PROBE_BUFFER_SIZE);
+ tuner_i2c_xfer_send_recv(&i2c_props,
+ soft_reset, 1, buf, PROBE_BUFFER_SIZE);
for (i = 1; i < PROBE_BUFFER_SIZE; i++) {
if (buf[i] != buf[0])
break;
@@ -827,13 +846,12 @@ int tda829x_probe(struct i2c_adapter *i2c_adap, u8 i2c_addr)
/* fall back to old probing method */
tuner_i2c_xfer_send(&i2c_props, easy_mode_b, 2);
tuner_i2c_xfer_send(&i2c_props, soft_reset, 2);
- tuner_i2c_xfer_send(&i2c_props, &addr_dto_lsb, 1);
- tuner_i2c_xfer_recv(&i2c_props, &data, 1);
+ tuner_i2c_xfer_send_recv(&i2c_props, &addr_dto_lsb, 1, &data, 1);
if (data == 0) {
tuner_i2c_xfer_send(&i2c_props, easy_mode_g, 2);
tuner_i2c_xfer_send(&i2c_props, soft_reset, 2);
- tuner_i2c_xfer_send(&i2c_props, &addr_dto_lsb, 1);
- tuner_i2c_xfer_recv(&i2c_props, &data, 1);
+ tuner_i2c_xfer_send_recv(&i2c_props,
+ &addr_dto_lsb, 1, &data, 1);
if (data == 0x7b) {
return 0;
}
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 8ca48f76dfa9..98ffb40728e3 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -514,8 +514,8 @@ struct dib0700_rc_response {
union {
u16 system16;
struct {
- u8 system;
u8 not_system;
+ u8 system;
};
};
u8 data;
@@ -575,7 +575,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
if ((poll_reply->system ^ poll_reply->not_system) != 0xff) {
deb_data("NEC extended protocol\n");
/* NEC extended code - 24 bits */
- keycode = poll_reply->system16 << 8 | poll_reply->data;
+ keycode = be16_to_cpu(poll_reply->system16) << 8 | poll_reply->data;
} else {
deb_data("NEC normal protocol\n");
/* normal NEC code - 16 bits */
@@ -587,7 +587,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
deb_data("RC5 protocol\n");
/* RC5 Protocol */
toggle = poll_reply->report_id;
- keycode = poll_reply->system16 << 8 | poll_reply->data;
+ keycode = poll_reply->system << 8 | poll_reply->data;
break;
}
diff --git a/drivers/media/dvb/firewire/firedtv-rc.c b/drivers/media/dvb/firewire/firedtv-rc.c
index fcf3828472b8..f82d4a93feb3 100644
--- a/drivers/media/dvb/firewire/firedtv-rc.c
+++ b/drivers/media/dvb/firewire/firedtv-rc.c
@@ -172,7 +172,8 @@ void fdtv_unregister_rc(struct firedtv *fdtv)
void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code)
{
- u16 *keycode = fdtv->remote_ctrl_dev->keycode;
+ struct input_dev *idev = fdtv->remote_ctrl_dev;
+ u16 *keycode = idev->keycode;
if (code >= 0x0300 && code <= 0x031f)
code = keycode[code - 0x0300];
@@ -188,6 +189,8 @@ void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code)
return;
}
- input_report_key(fdtv->remote_ctrl_dev, code, 1);
- input_report_key(fdtv->remote_ctrl_dev, code, 0);
+ input_report_key(idev, code, 1);
+ input_sync(idev);
+ input_report_key(idev, code, 0);
+ input_sync(idev);
}
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index ef3e43a03199..b8519ba511e5 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -1,7 +1,7 @@
config DVB_FE_CUSTOMISE
bool "Customise the frontend modules to build"
depends on DVB_CORE
- default y if EMBEDDED
+ default y if EXPERT
help
This allows the user to select/deselect frontend drivers for their
hardware from the build.
diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c
index ce222055526d..ba25fa0b0fc2 100644
--- a/drivers/media/dvb/frontends/af9013.c
+++ b/drivers/media/dvb/frontends/af9013.c
@@ -334,11 +334,11 @@ static int af9013_set_freq_ctrl(struct af9013_state *state, fe_bandwidth_t bw)
if_sample_freq = 3300000; /* 3.3 MHz */
break;
case BANDWIDTH_7_MHZ:
- if_sample_freq = 3800000; /* 3.8 MHz */
+ if_sample_freq = 3500000; /* 3.5 MHz */
break;
case BANDWIDTH_8_MHZ:
default:
- if_sample_freq = 4300000; /* 4.3 MHz */
+ if_sample_freq = 4000000; /* 4.0 MHz */
break;
}
} else if (state->config.tuner == AF9013_TUNER_TDA18218) {
diff --git a/drivers/media/dvb/frontends/ix2505v.c b/drivers/media/dvb/frontends/ix2505v.c
index 6360c681ded9..6c2e929bd79f 100644
--- a/drivers/media/dvb/frontends/ix2505v.c
+++ b/drivers/media/dvb/frontends/ix2505v.c
@@ -311,7 +311,7 @@ struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe,
return fe;
error:
- ix2505v_release(fe);
+ kfree(state);
return NULL;
}
EXPORT_SYMBOL(ix2505v_attach);
diff --git a/drivers/media/dvb/frontends/mb86a20s.c b/drivers/media/dvb/frontends/mb86a20s.c
index d3ad3e75a35a..cc4acd2f920d 100644
--- a/drivers/media/dvb/frontends/mb86a20s.c
+++ b/drivers/media/dvb/frontends/mb86a20s.c
@@ -43,6 +43,8 @@ struct mb86a20s_state {
const struct mb86a20s_config *config;
struct dvb_frontend frontend;
+
+ bool need_init;
};
struct regdata {
@@ -318,7 +320,7 @@ static int mb86a20s_i2c_writereg(struct mb86a20s_state *state,
rc = i2c_transfer(state->i2c, &msg, 1);
if (rc != 1) {
- printk("%s: writereg rcor(rc == %i, reg == 0x%02x,"
+ printk("%s: writereg error (rc == %i, reg == 0x%02x,"
" data == 0x%02x)\n", __func__, rc, reg, data);
return rc;
}
@@ -353,7 +355,7 @@ static int mb86a20s_i2c_readreg(struct mb86a20s_state *state,
rc = i2c_transfer(state->i2c, msg, 2);
if (rc != 2) {
- rc("%s: reg=0x%x (rcor=%d)\n", __func__, reg, rc);
+ rc("%s: reg=0x%x (error=%d)\n", __func__, reg, rc);
return rc;
}
@@ -382,23 +384,31 @@ static int mb86a20s_initfe(struct dvb_frontend *fe)
/* Initialize the frontend */
rc = mb86a20s_writeregdata(state, mb86a20s_init);
if (rc < 0)
- return rc;
+ goto err;
if (!state->config->is_serial) {
regD5 &= ~1;
rc = mb86a20s_writereg(state, 0x50, 0xd5);
if (rc < 0)
- return rc;
+ goto err;
rc = mb86a20s_writereg(state, 0x51, regD5);
if (rc < 0)
- return rc;
+ goto err;
}
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- return 0;
+err:
+ if (rc < 0) {
+ state->need_init = true;
+ printk(KERN_INFO "mb86a20s: Init failed. Will try again later\n");
+ } else {
+ state->need_init = false;
+ dprintk("Initialization succeded.\n");
+ }
+ return rc;
}
static int mb86a20s_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
@@ -485,8 +495,22 @@ static int mb86a20s_set_frontend(struct dvb_frontend *fe,
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
+ dprintk("Calling tuner set parameters\n");
fe->ops.tuner_ops.set_params(fe, p);
+ /*
+ * Make it more reliable: if, for some reason, the initial
+ * device initialization doesn't happen, initialize it when
+ * a SBTVD parameters are adjusted.
+ *
+ * Unfortunately, due to a hard to track bug at tda829x/tda18271,
+ * the agc callback logic is not called during DVB attach time,
+ * causing mb86a20s to not be initialized with Kworld SBTVD.
+ * So, this hack is needed, in order to make Kworld SBTVD to work.
+ */
+ if (state->need_init)
+ mb86a20s_initfe(fe);
+
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
rc = mb86a20s_writeregdata(state, mb86a20s_reset_reception);
diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c
index 122c72806916..9fc1dd0ba4c3 100644
--- a/drivers/media/dvb/ttpci/av7110_ca.c
+++ b/drivers/media/dvb/ttpci/av7110_ca.c
@@ -277,7 +277,7 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
{
ca_slot_info_t *info=(ca_slot_info_t *)parg;
- if (info->num > 1)
+ if (info->num < 0 || info->num > 1)
return -EINVAL;
av7110->ci_slot[info->num].num = info->num;
av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ?
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 3c5a4739ed70..ecdffa6aac66 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -151,20 +151,6 @@ config RADIO_GEMTEK_PROBE
following ports will be probed: 0x20c, 0x30c, 0x24c, 0x34c, 0x248 and
0x28c.
-config RADIO_GEMTEK_PCI
- tristate "GemTek PCI Radio Card support"
- depends on VIDEO_V4L2 && PCI
- ---help---
- Choose Y here if you have this PCI FM radio card.
-
- In order to control your radio card, you will need to use programs
- that are compatible with the Video for Linux API. Information on
- this API and pointers to "v4l" programs may be found at
- <file:Documentation/video4linux/API.html>.
-
- To compile this driver as a module, choose M here: the
- module will be called radio-gemtek-pci.
-
config RADIO_MAXIRADIO
tristate "Guillemot MAXI Radio FM 2000 radio"
depends on VIDEO_V4L2 && PCI
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index d2970748a69f..717656d2f749 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_RADIO_MAXIRADIO) += radio-maxiradio.o
obj-$(CONFIG_RADIO_RTRACK) += radio-aimslab.o
obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o
obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o
-obj-$(CONFIG_RADIO_GEMTEK_PCI) += radio-gemtek-pci.o
obj-$(CONFIG_RADIO_TRUST) += radio-trust.o
obj-$(CONFIG_I2C_SI4713) += si4713-i2c.o
obj-$(CONFIG_RADIO_SI4713) += radio-si4713.o
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 6cc5d130fbc8..4ce10dbeadd8 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -31,6 +31,7 @@
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
#include <linux/ioport.h> /* request_region */
+#include <linux/delay.h> /* msleep */
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
diff --git a/drivers/media/radio/radio-gemtek-pci.c b/drivers/media/radio/radio-gemtek-pci.c
deleted file mode 100644
index 28fa85ba2087..000000000000
--- a/drivers/media/radio/radio-gemtek-pci.c
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- ***************************************************************************
- *
- * radio-gemtek-pci.c - Gemtek PCI Radio driver
- * (C) 2001 Vladimir Shebordaev <vshebordaev@mail.ru>
- *
- ***************************************************************************
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
- ***************************************************************************
- *
- * Gemtek Corp still silently refuses to release any specifications
- * of their multimedia devices, so the protocol still has to be
- * reverse engineered.
- *
- * The v4l code was inspired by Jonas Munsin's Gemtek serial line
- * radio device driver.
- *
- * Please, let me know if this piece of code was useful :)
- *
- * TODO: multiple device support and portability were not tested
- *
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
- *
- ***************************************************************************
- */
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/videodev2.h>
-#include <linux/errno.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-
-MODULE_AUTHOR("Vladimir Shebordaev <vshebordaev@mail.ru>");
-MODULE_DESCRIPTION("The video4linux driver for the Gemtek PCI Radio Card");
-MODULE_LICENSE("GPL");
-
-static int nr_radio = -1;
-static int mx = 1;
-
-module_param(mx, bool, 0);
-MODULE_PARM_DESC(mx, "single digit: 1 - turn off the turner upon module exit (default), 0 - do not");
-module_param(nr_radio, int, 0);
-MODULE_PARM_DESC(nr_radio, "video4linux device number to use");
-
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
-#ifndef PCI_VENDOR_ID_GEMTEK
-#define PCI_VENDOR_ID_GEMTEK 0x5046
-#endif
-
-#ifndef PCI_DEVICE_ID_GEMTEK_PR103
-#define PCI_DEVICE_ID_GEMTEK_PR103 0x1001
-#endif
-
-#ifndef GEMTEK_PCI_RANGE_LOW
-#define GEMTEK_PCI_RANGE_LOW (87*16000)
-#endif
-
-#ifndef GEMTEK_PCI_RANGE_HIGH
-#define GEMTEK_PCI_RANGE_HIGH (108*16000)
-#endif
-
-struct gemtek_pci {
- struct v4l2_device v4l2_dev;
- struct video_device vdev;
- struct mutex lock;
- struct pci_dev *pdev;
-
- u32 iobase;
- u32 length;
-
- u32 current_frequency;
- u8 mute;
-};
-
-static inline struct gemtek_pci *to_gemtek_pci(struct v4l2_device *v4l2_dev)
-{
- return container_of(v4l2_dev, struct gemtek_pci, v4l2_dev);
-}
-
-static inline u8 gemtek_pci_out(u16 value, u32 port)
-{
- outw(value, port);
-
- return (u8)value;
-}
-
-#define _b0(v) (*((u8 *)&v))
-
-static void __gemtek_pci_cmd(u16 value, u32 port, u8 *last_byte, int keep)
-{
- u8 byte = *last_byte;
-
- if (!value) {
- if (!keep)
- value = (u16)port;
- byte &= 0xfd;
- } else
- byte |= 2;
-
- _b0(value) = byte;
- outw(value, port);
- byte |= 1;
- _b0(value) = byte;
- outw(value, port);
- byte &= 0xfe;
- _b0(value) = byte;
- outw(value, port);
-
- *last_byte = byte;
-}
-
-static inline void gemtek_pci_nil(u32 port, u8 *last_byte)
-{
- __gemtek_pci_cmd(0x00, port, last_byte, false);
-}
-
-static inline void gemtek_pci_cmd(u16 cmd, u32 port, u8 *last_byte)
-{
- __gemtek_pci_cmd(cmd, port, last_byte, true);
-}
-
-static void gemtek_pci_setfrequency(struct gemtek_pci *card, unsigned long frequency)
-{
- int i;
- u32 value = frequency / 200 + 856;
- u16 mask = 0x8000;
- u8 last_byte;
- u32 port = card->iobase;
-
- mutex_lock(&card->lock);
- card->current_frequency = frequency;
- last_byte = gemtek_pci_out(0x06, port);
-
- i = 0;
- do {
- gemtek_pci_nil(port, &last_byte);
- i++;
- } while (i < 9);
-
- i = 0;
- do {
- gemtek_pci_cmd(value & mask, port, &last_byte);
- mask >>= 1;
- i++;
- } while (i < 16);
-
- outw(0x10, port);
- mutex_unlock(&card->lock);
-}
-
-
-static void gemtek_pci_mute(struct gemtek_pci *card)
-{
- mutex_lock(&card->lock);
- outb(0x1f, card->iobase);
- card->mute = true;
- mutex_unlock(&card->lock);
-}
-
-static void gemtek_pci_unmute(struct gemtek_pci *card)
-{
- if (card->mute) {
- gemtek_pci_setfrequency(card, card->current_frequency);
- card->mute = false;
- }
-}
-
-static int gemtek_pci_getsignal(struct gemtek_pci *card)
-{
- int sig;
-
- mutex_lock(&card->lock);
- sig = (inb(card->iobase) & 0x08) ? 0 : 1;
- mutex_unlock(&card->lock);
- return sig;
-}
-
-static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *v)
-{
- struct gemtek_pci *card = video_drvdata(file);
-
- strlcpy(v->driver, "radio-gemtek-pci", sizeof(v->driver));
- strlcpy(v->card, "GemTek PCI Radio", sizeof(v->card));
- snprintf(v->bus_info, sizeof(v->bus_info), "PCI:%s", pci_name(card->pdev));
- v->version = RADIO_VERSION;
- v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
- return 0;
-}
-
-static int vidioc_g_tuner(struct file *file, void *priv,
- struct v4l2_tuner *v)
-{
- struct gemtek_pci *card = video_drvdata(file);
-
- if (v->index > 0)
- return -EINVAL;
-
- strlcpy(v->name, "FM", sizeof(v->name));
- v->type = V4L2_TUNER_RADIO;
- v->rangelow = GEMTEK_PCI_RANGE_LOW;
- v->rangehigh = GEMTEK_PCI_RANGE_HIGH;
- v->rxsubchans = V4L2_TUNER_SUB_MONO;
- v->capability = V4L2_TUNER_CAP_LOW;
- v->audmode = V4L2_TUNER_MODE_MONO;
- v->signal = 0xffff * gemtek_pci_getsignal(card);
- return 0;
-}
-
-static int vidioc_s_tuner(struct file *file, void *priv,
- struct v4l2_tuner *v)
-{
- return v->index ? -EINVAL : 0;
-}
-
-static int vidioc_s_frequency(struct file *file, void *priv,
- struct v4l2_frequency *f)
-{
- struct gemtek_pci *card = video_drvdata(file);
-
- if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
- return -EINVAL;
- if (f->frequency < GEMTEK_PCI_RANGE_LOW ||
- f->frequency > GEMTEK_PCI_RANGE_HIGH)
- return -EINVAL;
- gemtek_pci_setfrequency(card, f->frequency);
- card->mute = false;
- return 0;
-}
-
-static int vidioc_g_frequency(struct file *file, void *priv,
- struct v4l2_frequency *f)
-{
- struct gemtek_pci *card = video_drvdata(file);
-
- if (f->tuner != 0)
- return -EINVAL;
- f->type = V4L2_TUNER_RADIO;
- f->frequency = card->current_frequency;
- return 0;
-}
-
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
- case V4L2_CID_AUDIO_VOLUME:
- return v4l2_ctrl_query_fill(qc, 0, 65535, 65535, 65535);
- }
- return -EINVAL;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct gemtek_pci *card = video_drvdata(file);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- ctrl->value = card->mute;
- return 0;
- case V4L2_CID_AUDIO_VOLUME:
- if (card->mute)
- ctrl->value = 0;
- else
- ctrl->value = 65535;
- return 0;
- }
- return -EINVAL;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct gemtek_pci *card = video_drvdata(file);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- if (ctrl->value)
- gemtek_pci_mute(card);
- else
- gemtek_pci_unmute(card);
- return 0;
- case V4L2_CID_AUDIO_VOLUME:
- if (ctrl->value)
- gemtek_pci_unmute(card);
- else
- gemtek_pci_mute(card);
- return 0;
- }
- return -EINVAL;
-}
-
-static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
-{
- return i ? -EINVAL : 0;
-}
-
-static int vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- a->index = 0;
- strlcpy(a->name, "Radio", sizeof(a->name));
- a->capability = V4L2_AUDCAP_STEREO;
- return 0;
-}
-
-static int vidioc_s_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- return a->index ? -EINVAL : 0;
-}
-
-enum {
- GEMTEK_PR103
-};
-
-static char *card_names[] __devinitdata = {
- "GEMTEK_PR103"
-};
-
-static struct pci_device_id gemtek_pci_id[] =
-{
- { PCI_VENDOR_ID_GEMTEK, PCI_DEVICE_ID_GEMTEK_PR103,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, GEMTEK_PR103 },
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, gemtek_pci_id);
-
-static const struct v4l2_file_operations gemtek_pci_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = video_ioctl2,
-};
-
-static const struct v4l2_ioctl_ops gemtek_pci_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_audio = vidioc_g_audio,
- .vidioc_s_audio = vidioc_s_audio,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
-};
-
-static int __devinit gemtek_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
-{
- struct gemtek_pci *card;
- struct v4l2_device *v4l2_dev;
- int res;
-
- card = kzalloc(sizeof(struct gemtek_pci), GFP_KERNEL);
- if (card == NULL) {
- dev_err(&pdev->dev, "out of memory\n");
- return -ENOMEM;
- }
-
- v4l2_dev = &card->v4l2_dev;
- mutex_init(&card->lock);
- card->pdev = pdev;
-
- strlcpy(v4l2_dev->name, "gemtek_pci", sizeof(v4l2_dev->name));
-
- res = v4l2_device_register(&pdev->dev, v4l2_dev);
- if (res < 0) {
- v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
- kfree(card);
- return res;
- }
-
- if (pci_enable_device(pdev))
- goto err_pci;
-
- card->iobase = pci_resource_start(pdev, 0);
- card->length = pci_resource_len(pdev, 0);
-
- if (request_region(card->iobase, card->length, card_names[pci_id->driver_data]) == NULL) {
- v4l2_err(v4l2_dev, "i/o port already in use\n");
- goto err_pci;
- }
-
- strlcpy(card->vdev.name, v4l2_dev->name, sizeof(card->vdev.name));
- card->vdev.v4l2_dev = v4l2_dev;
- card->vdev.fops = &gemtek_pci_fops;
- card->vdev.ioctl_ops = &gemtek_pci_ioctl_ops;
- card->vdev.release = video_device_release_empty;
- video_set_drvdata(&card->vdev, card);
-
- gemtek_pci_mute(card);
-
- if (video_register_device(&card->vdev, VFL_TYPE_RADIO, nr_radio) < 0)
- goto err_video;
-
- v4l2_info(v4l2_dev, "Gemtek PCI Radio (rev. %d) found at 0x%04x-0x%04x.\n",
- pdev->revision, card->iobase, card->iobase + card->length - 1);
-
- return 0;
-
-err_video:
- release_region(card->iobase, card->length);
-
-err_pci:
- v4l2_device_unregister(v4l2_dev);
- kfree(card);
- return -ENODEV;
-}
-
-static void __devexit gemtek_pci_remove(struct pci_dev *pdev)
-{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
- struct gemtek_pci *card = to_gemtek_pci(v4l2_dev);
-
- video_unregister_device(&card->vdev);
- v4l2_device_unregister(v4l2_dev);
-
- release_region(card->iobase, card->length);
-
- if (mx)
- gemtek_pci_mute(card);
-
- kfree(card);
-}
-
-static struct pci_driver gemtek_pci_driver = {
- .name = "gemtek_pci",
- .id_table = gemtek_pci_id,
- .probe = gemtek_pci_probe,
- .remove = __devexit_p(gemtek_pci_remove),
-};
-
-static int __init gemtek_pci_init(void)
-{
- return pci_register_driver(&gemtek_pci_driver);
-}
-
-static void __exit gemtek_pci_exit(void)
-{
- pci_unregister_driver(&gemtek_pci_driver);
-}
-
-module_init(gemtek_pci_init);
-module_exit(gemtek_pci_exit);
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 6459a220b0dd..5c2a9058c09f 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -77,8 +77,8 @@ MODULE_PARM_DESC(debug, "activates debug info");
/* TEA5757 pin mappings */
static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
-#define FREQ_LO (50 * 16000)
-#define FREQ_HI (150 * 16000)
+#define FREQ_LO (87 * 16000)
+#define FREQ_HI (108 * 16000)
#define FREQ_IF 171200 /* 10.7*16000 */
#define FREQ_STEP 200 /* 12.5*16 */
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index dd6bd364efa0..7ecc8e657663 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1407,7 +1407,7 @@ static const struct v4l2_file_operations wl1273_fops = {
.read = wl1273_fm_fops_read,
.write = wl1273_fm_fops_write,
.poll = wl1273_fm_fops_poll,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
.open = wl1273_fm_fops_open,
.release = wl1273_fm_fops_release,
};
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index ac76dfe5b3fa..60c176fe328e 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -357,7 +357,8 @@ int si470x_start(struct si470x_device *radio)
goto done;
/* sysconfig 1 */
- radio->registers[SYSCONFIG1] = SYSCONFIG1_DE;
+ radio->registers[SYSCONFIG1] =
+ (de << 11) & SYSCONFIG1_DE; /* DE*/
retval = si470x_set_register(radio, SYSCONFIG1);
if (retval < 0)
goto done;
@@ -687,12 +688,8 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
/* driver constants */
strcpy(tuner->name, "FM");
tuner->type = V4L2_TUNER_RADIO;
-#if defined(CONFIG_USB_SI470X) || defined(CONFIG_USB_SI470X_MODULE)
tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO;
-#else
- tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
-#endif
/* range limits */
switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) {
@@ -718,12 +715,10 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
tuner->rxsubchans = V4L2_TUNER_SUB_MONO;
else
tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
-#if defined(CONFIG_USB_SI470X) || defined(CONFIG_USB_SI470X_MODULE)
/* If there is a reliable method of detecting an RDS channel,
then this code should check for that before setting this
RDS subchannel. */
tuner->rxsubchans |= V4L2_TUNER_SUB_RDS;
-#endif
/* mono/stereo selector */
if ((radio->registers[POWERCFG] & POWERCFG_MONO) == 0)
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index 80b3c319f698..1ac49139158d 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -446,27 +446,27 @@ static void ene_rx_setup(struct ene_device *dev)
select_timeout:
if (dev->rx_fan_input_inuse) {
- dev->rdev->rx_resolution = MS_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN);
+ dev->rdev->rx_resolution = US_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN);
/* Fan input doesn't support timeouts, it just ends the
input with a maximum sample */
dev->rdev->min_timeout = dev->rdev->max_timeout =
- MS_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK *
+ US_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK *
ENE_FW_SAMPLE_PERIOD_FAN);
} else {
- dev->rdev->rx_resolution = MS_TO_NS(sample_period);
+ dev->rdev->rx_resolution = US_TO_NS(sample_period);
/* Theoreticly timeout is unlimited, but we cap it
* because it was seen that on one device, it
* would stop sending spaces after around 250 msec.
* Besides, this is close to 2^32 anyway and timeout is u32.
*/
- dev->rdev->min_timeout = MS_TO_NS(127 * sample_period);
- dev->rdev->max_timeout = MS_TO_NS(200000);
+ dev->rdev->min_timeout = US_TO_NS(127 * sample_period);
+ dev->rdev->max_timeout = US_TO_NS(200000);
}
if (dev->hw_learning_and_tx_capable)
- dev->rdev->tx_resolution = MS_TO_NS(sample_period);
+ dev->rdev->tx_resolution = US_TO_NS(sample_period);
if (dev->rdev->timeout > dev->rdev->max_timeout)
dev->rdev->timeout = dev->rdev->max_timeout;
@@ -801,7 +801,7 @@ static irqreturn_t ene_isr(int irq, void *data)
dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space");
- ev.duration = MS_TO_NS(hw_sample);
+ ev.duration = US_TO_NS(hw_sample);
ev.pulse = pulse;
ir_raw_event_store_with_filter(dev->rdev, &ev);
}
@@ -821,7 +821,7 @@ static void ene_setup_default_settings(struct ene_device *dev)
dev->learning_mode_enabled = learning_mode_force;
/* Set reasonable default timeout */
- dev->rdev->timeout = MS_TO_NS(150000);
+ dev->rdev->timeout = US_TO_NS(150000);
}
/* Upload all hardware settings at once. Used at load and resume time */
@@ -1004,6 +1004,10 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
/* validate resources */
error = -ENODEV;
+ /* init these to -1, as 0 is valid for both */
+ dev->hw_io = -1;
+ dev->irq = -1;
+
if (!pnp_port_valid(pnp_dev, 0) ||
pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE)
goto error;
@@ -1072,6 +1076,8 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
rdev->input_name = "ENE eHome Infrared Remote Transceiver";
}
+ dev->rdev = rdev;
+
ene_rx_setup_hw_buffer(dev);
ene_setup_default_settings(dev);
ene_setup_hw_settings(dev);
@@ -1083,7 +1089,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
if (error < 0)
goto error;
- dev->rdev = rdev;
ene_notice("driver has been succesfully loaded");
return 0;
error:
diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
index c179baf34cb4..337a41d4450b 100644
--- a/drivers/media/rc/ene_ir.h
+++ b/drivers/media/rc/ene_ir.h
@@ -201,8 +201,6 @@
#define dbg_verbose(format, ...) __dbg(2, format, ## __VA_ARGS__)
#define dbg_regs(format, ...) __dbg(3, format, ## __VA_ARGS__)
-#define MS_TO_NS(msec) ((msec) * 1000)
-
struct ene_device {
struct pnp_dev *pnp_dev;
struct rc_dev *rdev;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 6811512b4e83..e7dc6b46fdfa 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -988,7 +988,6 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
int retval;
struct imon_context *ictx = rc->priv;
struct device *dev = ictx->dev;
- bool pad_mouse;
unsigned char ir_proto_packet[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
@@ -1000,29 +999,20 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
case RC_TYPE_RC6:
dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
ir_proto_packet[0] = 0x01;
- pad_mouse = false;
break;
case RC_TYPE_UNKNOWN:
case RC_TYPE_OTHER:
dev_dbg(dev, "Configuring IR receiver for iMON protocol\n");
- if (pad_stabilize && !nomouse)
- pad_mouse = true;
- else {
+ if (!pad_stabilize)
dev_dbg(dev, "PAD stabilize functionality disabled\n");
- pad_mouse = false;
- }
/* ir_proto_packet[0] = 0x00; // already the default */
rc_type = RC_TYPE_OTHER;
break;
default:
dev_warn(dev, "Unsupported IR protocol specified, overriding "
"to iMON IR protocol\n");
- if (pad_stabilize && !nomouse)
- pad_mouse = true;
- else {
+ if (!pad_stabilize)
dev_dbg(dev, "PAD stabilize functionality disabled\n");
- pad_mouse = false;
- }
/* ir_proto_packet[0] = 0x00; // already the default */
rc_type = RC_TYPE_OTHER;
break;
@@ -1035,7 +1025,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
goto out;
ictx->rc_type = rc_type;
- ictx->pad_mouse = pad_mouse;
+ ictx->pad_mouse = false;
out:
return retval;
@@ -1517,7 +1507,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
} else {
- ictx->pad_mouse = 0;
+ ictx->pad_mouse = false;
dev_dbg(dev, "mouse mode disabled, passing key value\n");
}
}
@@ -1756,7 +1746,6 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte);
ictx->display_type = detected_display_type;
- ictx->rdev->allowed_protos = allowed_protos;
ictx->rc_type = allowed_protos;
}
@@ -1839,10 +1828,6 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
rdev->allowed_protos = RC_TYPE_OTHER | RC_TYPE_RC6; /* iMON PAD or MCE */
rdev->change_protocol = imon_ir_change_protocol;
rdev->driver_name = MOD_NAME;
- if (ictx->rc_type == RC_TYPE_RC6)
- rdev->map_name = RC_MAP_IMON_MCE;
- else
- rdev->map_name = RC_MAP_IMON_PAD;
/* Enable front-panel buttons and/or knobs */
memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet));
@@ -1851,11 +1836,18 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
if (ret)
dev_info(ictx->dev, "panel buttons/knobs setup failed\n");
- if (ictx->product == 0xffdc)
+ if (ictx->product == 0xffdc) {
imon_get_ffdc_type(ictx);
+ rdev->allowed_protos = ictx->rc_type;
+ }
imon_set_display_type(ictx);
+ if (ictx->rc_type == RC_TYPE_RC6)
+ rdev->map_name = RC_MAP_IMON_MCE;
+ else
+ rdev->map_name = RC_MAP_IMON_PAD;
+
ret = rc_register_device(rdev);
if (ret < 0) {
dev_err(ictx->dev, "remote input dev register failed\n");
@@ -2108,18 +2100,6 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
goto find_endpoint_failed;
}
- ictx->idev = imon_init_idev(ictx);
- if (!ictx->idev) {
- dev_err(dev, "%s: input device setup failed\n", __func__);
- goto idev_setup_failed;
- }
-
- ictx->rdev = imon_init_rdev(ictx);
- if (!ictx->rdev) {
- dev_err(dev, "%s: rc device setup failed\n", __func__);
- goto rdev_setup_failed;
- }
-
usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0,
usb_rcvintpipe(ictx->usbdev_intf0,
ictx->rx_endpoint_intf0->bEndpointAddress),
@@ -2133,13 +2113,25 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
goto urb_submit_failed;
}
+ ictx->idev = imon_init_idev(ictx);
+ if (!ictx->idev) {
+ dev_err(dev, "%s: input device setup failed\n", __func__);
+ goto idev_setup_failed;
+ }
+
+ ictx->rdev = imon_init_rdev(ictx);
+ if (!ictx->rdev) {
+ dev_err(dev, "%s: rc device setup failed\n", __func__);
+ goto rdev_setup_failed;
+ }
+
return ictx;
-urb_submit_failed:
- rc_unregister_device(ictx->rdev);
rdev_setup_failed:
input_unregister_device(ictx->idev);
idev_setup_failed:
+ usb_kill_urb(ictx->rx_urb_intf0);
+urb_submit_failed:
find_endpoint_failed:
mutex_unlock(&ictx->lock);
usb_free_urb(tx_urb);
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index f011c5d9dea1..1c5cc65ea1e1 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -1,4 +1,4 @@
-/* ir-lirc-codec.c - ir-core to classic lirc interface bridge
+/* ir-lirc-codec.c - rc-core to classic lirc interface bridge
*
* Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com>
*
@@ -47,6 +47,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
/* Carrier reports */
if (ev.carrier_report) {
sample = LIRC_FREQUENCY(ev.carrier);
+ IR_dprintk(2, "carrier report (freq: %d)\n", sample);
/* Packet end */
} else if (ev.timeout) {
@@ -62,6 +63,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
return 0;
sample = LIRC_TIMEOUT(ev.duration / 1000);
+ IR_dprintk(2, "timeout report (duration: %d)\n", sample);
/* Normal sample */
} else {
@@ -85,6 +87,8 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) :
LIRC_SPACE(ev.duration / 1000);
+ IR_dprintk(2, "delivering %uus %s to lirc_dev\n",
+ TO_US(ev.duration), TO_STR(ev.pulse));
}
lirc_buffer_write(dev->raw->lirc.drv->rbuf,
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 185baddcbf14..73230ff93b8a 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_handle);
/* used internally by the sysfs interface */
u64
-ir_raw_get_allowed_protocols()
+ir_raw_get_allowed_protocols(void)
{
u64 protocols;
mutex_lock(&ir_raw_handler_lock);
diff --git a/drivers/media/rc/keymaps/rc-dib0700-nec.c b/drivers/media/rc/keymaps/rc-dib0700-nec.c
index c59851b203da..7a5f5300caf9 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-nec.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-nec.c
@@ -19,35 +19,35 @@
static struct rc_map_table dib0700_nec_table[] = {
/* Key codes for the Pixelview SBTVD remote */
- { 0x8613, KEY_MUTE },
- { 0x8612, KEY_POWER },
- { 0x8601, KEY_1 },
- { 0x8602, KEY_2 },
- { 0x8603, KEY_3 },
- { 0x8604, KEY_4 },
- { 0x8605, KEY_5 },
- { 0x8606, KEY_6 },
- { 0x8607, KEY_7 },
- { 0x8608, KEY_8 },
- { 0x8609, KEY_9 },
- { 0x8600, KEY_0 },
- { 0x860d, KEY_CHANNELUP },
- { 0x8619, KEY_CHANNELDOWN },
- { 0x8610, KEY_VOLUMEUP },
- { 0x860c, KEY_VOLUMEDOWN },
+ { 0x866b13, KEY_MUTE },
+ { 0x866b12, KEY_POWER },
+ { 0x866b01, KEY_1 },
+ { 0x866b02, KEY_2 },
+ { 0x866b03, KEY_3 },
+ { 0x866b04, KEY_4 },
+ { 0x866b05, KEY_5 },
+ { 0x866b06, KEY_6 },
+ { 0x866b07, KEY_7 },
+ { 0x866b08, KEY_8 },
+ { 0x866b09, KEY_9 },
+ { 0x866b00, KEY_0 },
+ { 0x866b0d, KEY_CHANNELUP },
+ { 0x866b19, KEY_CHANNELDOWN },
+ { 0x866b10, KEY_VOLUMEUP },
+ { 0x866b0c, KEY_VOLUMEDOWN },
- { 0x860a, KEY_CAMERA },
- { 0x860b, KEY_ZOOM },
- { 0x861b, KEY_BACKSPACE },
- { 0x8615, KEY_ENTER },
+ { 0x866b0a, KEY_CAMERA },
+ { 0x866b0b, KEY_ZOOM },
+ { 0x866b1b, KEY_BACKSPACE },
+ { 0x866b15, KEY_ENTER },
- { 0x861d, KEY_UP },
- { 0x861e, KEY_DOWN },
- { 0x860e, KEY_LEFT },
- { 0x860f, KEY_RIGHT },
+ { 0x866b1d, KEY_UP },
+ { 0x866b1e, KEY_DOWN },
+ { 0x866b0e, KEY_LEFT },
+ { 0x866b0f, KEY_RIGHT },
- { 0x8618, KEY_RECORD },
- { 0x861a, KEY_STOP },
+ { 0x866b18, KEY_RECORD },
+ { 0x866b1a, KEY_STOP },
/* Key codes for the EvolutePC TVWay+ remote */
{ 0x7a00, KEY_MENU },
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c
index 3bf3337875d1..2f5dc0622b94 100644
--- a/drivers/media/rc/keymaps/rc-rc6-mce.c
+++ b/drivers/media/rc/keymaps/rc-rc6-mce.c
@@ -3,6 +3,9 @@
*
* Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
*
+ * See http://mediacenterguides.com/book/export/html/31 for details on
+ * key mappings.
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -60,6 +63,9 @@ static struct rc_map_table rc6_mce[] = {
{ 0x800f0426, KEY_EPG }, /* Guide */
{ 0x800f0427, KEY_ZOOM }, /* Aspect */
+ { 0x800f0432, KEY_MODE }, /* Visualization */
+ { 0x800f0433, KEY_PRESENTATION }, /* Slide Show */
+ { 0x800f0434, KEY_EJECTCD },
{ 0x800f043a, KEY_BRIGHTNESSUP },
{ 0x800f0446, KEY_TV },
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 0fef6efad537..6df0a4980645 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -48,7 +48,6 @@
#define USB_BUFLEN 32 /* USB reception buffer length */
#define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
#define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
-#define MS_TO_NS(msec) ((msec) * 1000)
/* MCE constants */
#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */
@@ -817,7 +816,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
switch (ir->buf_in[index]) {
/* 2-byte return value commands */
case MCE_CMD_S_TIMEOUT:
- ir->rc->timeout = MS_TO_NS((hi << 8 | lo) / 2);
+ ir->rc->timeout = US_TO_NS((hi << 8 | lo) / 2);
break;
/* 1-byte return value commands */
@@ -856,9 +855,10 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
break;
case PARSE_IRDATA:
ir->rem--;
+ init_ir_raw_event(&rawir);
rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0);
rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
- * MS_TO_NS(MCE_TIME_UNIT);
+ * US_TO_NS(MCE_TIME_UNIT);
dev_dbg(ir->dev, "Storing %s with duration %d\n",
rawir.pulse ? "pulse" : "space",
@@ -884,6 +884,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
i, ir->rem + 1, false);
if (ir->rem)
ir->parser_state = PARSE_IRDATA;
+ else
+ ir_raw_event_reset(ir->rc);
break;
}
@@ -1061,7 +1063,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
rc->priv = ir;
rc->driver_type = RC_DRIVER_IR_RAW;
rc->allowed_protos = RC_TYPE_ALL;
- rc->timeout = MS_TO_NS(1000);
+ rc->timeout = US_TO_NS(1000);
if (!ir->flags.no_tx) {
rc->s_tx_mask = mceusb_set_tx_mask;
rc->s_tx_carrier = mceusb_set_tx_carrier;
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index dd4caf8ef80b..273d9d674792 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -460,7 +460,7 @@ static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
return 0;
}
- carrier = (count * 1000000) / duration;
+ carrier = MS_TO_NS(count) / duration;
if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
nvt_dbg("WTF? Carrier frequency out of range!");
@@ -612,8 +612,8 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
sample = nvt->buf[i];
rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
- rawir.duration = (sample & BUF_LEN_MASK)
- * SAMPLE_PERIOD * 1000;
+ rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
+ * SAMPLE_PERIOD);
if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
if (nvt->rawir.pulse == rawir.pulse)
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 72be8a02118c..512a2f4ada0e 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -458,21 +458,27 @@ static int ir_getkeycode(struct input_dev *idev,
index = ir_lookup_by_scancode(rc_map, scancode);
}
- if (index >= rc_map->len) {
- if (!(ke->flags & INPUT_KEYMAP_BY_INDEX))
- IR_dprintk(1, "unknown key for scancode 0x%04x\n",
- scancode);
+ if (index < rc_map->len) {
+ entry = &rc_map->scan[index];
+
+ ke->index = index;
+ ke->keycode = entry->keycode;
+ ke->len = sizeof(entry->scancode);
+ memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
+
+ } else if (!(ke->flags & INPUT_KEYMAP_BY_INDEX)) {
+ /*
+ * We do not really know the valid range of scancodes
+ * so let's respond with KEY_RESERVED to anything we
+ * do not have mapping for [yet].
+ */
+ ke->index = index;
+ ke->keycode = KEY_RESERVED;
+ } else {
retval = -EINVAL;
goto out;
}
- entry = &rc_map->scan[index];
-
- ke->index = index;
- ke->keycode = entry->keycode;
- ke->len = sizeof(entry->scancode);
- memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
-
retval = 0;
out:
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 6e2911c2abfb..e435d94c0776 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -164,7 +164,7 @@ static void sz_push_full_pulse(struct streamzap_ir *sz,
sz->signal_start.tv_usec -
sz->signal_last.tv_usec);
rawir.duration -= sz->sum;
- rawir.duration *= 1000;
+ rawir.duration = US_TO_NS(rawir.duration);
rawir.duration &= IR_MAX_DURATION;
}
sz_push(sz, rawir);
@@ -177,7 +177,7 @@ static void sz_push_full_pulse(struct streamzap_ir *sz,
rawir.duration = ((int) value) * SZ_RESOLUTION;
rawir.duration += SZ_RESOLUTION / 2;
sz->sum += rawir.duration;
- rawir.duration *= 1000;
+ rawir.duration = US_TO_NS(rawir.duration);
rawir.duration &= IR_MAX_DURATION;
sz_push(sz, rawir);
}
@@ -197,7 +197,7 @@ static void sz_push_full_space(struct streamzap_ir *sz,
rawir.duration = ((int) value) * SZ_RESOLUTION;
rawir.duration += SZ_RESOLUTION / 2;
sz->sum += rawir.duration;
- rawir.duration *= 1000;
+ rawir.duration = US_TO_NS(rawir.duration);
sz_push(sz, rawir);
}
@@ -273,6 +273,7 @@ static void streamzap_callback(struct urb *urb)
if (sz->timeout_enabled)
sz_push(sz, rawir);
ir_raw_event_handle(sz->rdev);
+ ir_raw_event_reset(sz->rdev);
} else {
sz_push_full_space(sz, sz->buf_in[i]);
}
@@ -290,6 +291,7 @@ static void streamzap_callback(struct urb *urb)
}
}
+ ir_raw_event_handle(sz->rdev);
usb_submit_urb(urb, GFP_ATOMIC);
return;
@@ -430,13 +432,13 @@ static int __devinit streamzap_probe(struct usb_interface *intf,
sz->decoder_state = PulseSpace;
/* FIXME: don't yet have a way to set this */
sz->timeout_enabled = true;
- sz->rdev->timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) &
+ sz->rdev->timeout = ((US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION) &
IR_MAX_DURATION) | 0x03000000);
#if 0
/* not yet supported, depends on patches from maxim */
/* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */
- sz->min_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000;
- sz->max_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000;
+ sz->min_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
+ sz->max_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
#endif
do_gettimeofday(&sz->signal_start);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index eb875af05e79..aa021600e9df 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -78,7 +78,7 @@ config VIDEO_FIXED_MINOR_RANGES
config VIDEO_HELPER_CHIPS_AUTO
bool "Autoselect pertinent encoders/decoders and other helper chips"
- default y if !EMBEDDED
+ default y if !EXPERT
---help---
Most video cards may require additional modules to encode or
decode audio/video standards. This option will autoselect
@@ -141,15 +141,6 @@ config VIDEO_TDA9840
To compile this driver as a module, choose M here: the
module will be called tda9840.
-config VIDEO_TDA9875
- tristate "Philips TDA9875 audio processor"
- depends on VIDEO_V4L2 && I2C
- ---help---
- Support for tda9875 audio decoder chip found on some bt8xx boards.
-
- To compile this driver as a module, choose M here: the
- module will be called tda9875.
-
config VIDEO_TEA6415C
tristate "Philips TEA6415C audio processor"
depends on I2C
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 81e38cb0b846..a509d317e258 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_VIDEO_V4L2_COMMON) += v4l2-common.o
obj-$(CONFIG_VIDEO_TUNER) += tuner.o
obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
-obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o
obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o
obj-$(CONFIG_VIDEO_TDA9840) += tda9840.o
obj-$(CONFIG_VIDEO_TEA6415C) += tea6415c.o
diff --git a/drivers/media/video/adv7175.c b/drivers/media/video/adv7175.c
index f318b51448b3..d2327dbb473f 100644
--- a/drivers/media/video/adv7175.c
+++ b/drivers/media/video/adv7175.c
@@ -303,11 +303,22 @@ static int adv7175_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ide
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7175, 0);
}
+static int adv7175_s_power(struct v4l2_subdev *sd, int on)
+{
+ if (on)
+ adv7175_write(sd, 0x01, 0x00);
+ else
+ adv7175_write(sd, 0x01, 0x78);
+
+ return 0;
+}
+
/* ----------------------------------------------------------------------- */
static const struct v4l2_subdev_core_ops adv7175_core_ops = {
.g_chip_ident = adv7175_g_chip_ident,
.init = adv7175_init,
+ .s_power = adv7175_s_power,
};
static const struct v4l2_subdev_video_ops adv7175_video_ops = {
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 49efcf660ba6..7f58756d72c8 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -1373,7 +1373,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 0x1800,
.audio_mode_gpio= fv2000s_audio,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -1511,7 +1510,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 0x09,
.needs_tvaudio = 1,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1550,7 +1548,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask2 = 0x07ff,
.muxsel = MUXSEL(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
.no_msp34xx = 1,
- .no_tda9875 = 1,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.muxsel_hook = rv605_muxsel,
@@ -1686,7 +1683,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
[BTTV_BOARD_OSPREY1x0_848] = {
@@ -1699,7 +1695,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
@@ -1714,7 +1709,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
[BTTV_BOARD_OSPREY1x1] = {
@@ -1727,7 +1721,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
[BTTV_BOARD_OSPREY1x1_SVID] = {
@@ -1740,7 +1733,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
[BTTV_BOARD_OSPREY2xx] = {
@@ -1753,7 +1745,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
@@ -1768,7 +1759,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
[BTTV_BOARD_OSPREY2x0] = {
@@ -1781,7 +1771,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
[BTTV_BOARD_OSPREY500] = {
@@ -1794,7 +1783,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
[BTTV_BOARD_OSPREY540] = {
@@ -1805,7 +1793,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
@@ -1820,7 +1807,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1, /* must avoid, conflicts with the bt860 */
},
[BTTV_BOARD_IDS_EAGLE] = {
@@ -1835,7 +1821,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 2, 2, 2),
.muxsel_hook = eagle_muxsel,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.pll = PLL_28,
},
[BTTV_BOARD_PINNACLESAT] = {
@@ -1846,7 +1831,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.muxsel = MUXSEL(3, 1),
.pll = PLL_28,
@@ -1897,7 +1881,6 @@ struct tvcard bttv_tvcards[] = {
.svhs = 2,
.gpiomask = 0,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.muxsel = MUXSEL(2, 0, 1),
.pll = PLL_28,
@@ -1970,7 +1953,6 @@ struct tvcard bttv_tvcards[] = {
/* Tuner, CVid, SVid, CVid over SVid connector */
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomask = 0,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.tuner_type = TUNER_PHILIPS_PAL_I,
.tuner_addr = ADDR_UNSET,
@@ -2017,7 +1999,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2,2,2,2, 3,3,3,3, 1,1,1,1, 0,0,0,0),
.muxsel_hook = xguard_muxsel,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.pll = PLL_28,
},
@@ -2029,7 +2010,6 @@ struct tvcard bttv_tvcards[] = {
.svhs = NO_SVHS,
.muxsel = MUXSEL(2, 3, 1, 0),
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
@@ -2134,7 +2114,6 @@ struct tvcard bttv_tvcards[] = {
.svhs = NO_SVHS, /* card has no svhs */
.needs_tvaudio = 0,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.gpiomask = 0x00,
.muxsel = MUXSEL(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
@@ -2156,7 +2135,6 @@ struct tvcard bttv_tvcards[] = {
[BTTV_BOARD_TWINHAN_DST] = {
.name = "Twinhan DST + clones",
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2171,7 +2149,6 @@ struct tvcard bttv_tvcards[] = {
/* Vid In, SVid In, Vid over SVid in connector */
.muxsel = MUXSEL(3, 1, 1, 3),
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2226,7 +2203,6 @@ struct tvcard bttv_tvcards[] = {
.svhs = NO_SVHS,
.muxsel = MUXSEL(2, 3, 1, 0),
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.needs_tvaudio = 0,
.tuner_type = TUNER_ABSENT,
@@ -2278,7 +2254,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.gpiomask2 = 0x3C<<16,/*Set the GPIO[18]->GPIO[21] as output pin.==> drive the video inputs through analog multiplexers*/
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
/*878A input is always MUX0, see above.*/
.muxsel = MUXSEL(2, 2, 2, 2),
@@ -2302,7 +2277,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_TEMIC_PAL,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
},
[BTTV_BOARD_AVDVBT_771] = {
/* Wolfram Joost <wojo@frokaschwei.de> */
@@ -2313,7 +2287,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_addr = ADDR_UNSET,
.muxsel = MUXSEL(3, 3),
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.pll = PLL_28,
.has_dvb = 1,
@@ -2329,7 +2302,6 @@ struct tvcard bttv_tvcards[] = {
.svhs = 1,
.muxsel = MUXSEL(3, 1, 2, 0), /* Comp0, S-Video, ?, ? */
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
@@ -2393,7 +2365,6 @@ struct tvcard bttv_tvcards[] = {
/* Chris Pascoe <c.pascoe@itee.uq.edu.au> */
.name = "DViCO FusionHDTV DVB-T Lite",
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.pll = PLL_28,
.no_video = 1,
@@ -2440,7 +2411,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2),
.pll = PLL_28,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2478,7 +2448,6 @@ struct tvcard bttv_tvcards[] = {
.pll = PLL_28,
.no_msp34xx = 1,
.no_tda7432 = 1,
- .no_tda9875 = 1,
.muxsel_hook = kodicom4400r_muxsel,
},
[BTTV_BOARD_KODICOM_4400R_SL] = {
@@ -2500,7 +2469,6 @@ struct tvcard bttv_tvcards[] = {
.pll = PLL_28,
.no_msp34xx = 1,
.no_tda7432 = 1,
- .no_tda9875 = 1,
.muxsel_hook = kodicom4400r_muxsel,
},
/* ---- card 0x86---------------------------------- */
@@ -2530,7 +2498,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0x00400005, 0, 0x00000001, 0 },
.gpiomute = 0x00c00007,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.has_dvb = 1,
},
@@ -2630,7 +2597,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
/* ---- card 0x8d ---------------------------------- */
@@ -2658,7 +2624,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 100000, 100002, 100002, 100000 },
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.pll = PLL_28,
.tuner_type = TUNER_TNF_5335MF,
@@ -2674,7 +2639,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x0f, /* old: 7 */
.muxsel = MUXSEL(0, 1, 3, 2), /* Composite 0-3 */
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2732,7 +2696,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0x00400005, 0, 0x00000001, 0 },
.gpiomute = 0x00c00007,
.no_msp34xx = 1,
- .no_tda9875 = 1,
.no_tda7432 = 1,
},
/* ---- card 0x95---------------------------------- */
@@ -2874,7 +2837,6 @@ struct tvcard bttv_tvcards[] = {
.pll = PLL_28,
.no_msp34xx = 1,
.no_tda7432 = 1,
- .no_tda9875 = 1,
.muxsel_hook = gv800s_muxsel,
},
[BTTV_BOARD_GEOVISION_GV800S_SL] = {
@@ -2899,7 +2861,6 @@ struct tvcard bttv_tvcards[] = {
.pll = PLL_28,
.no_msp34xx = 1,
.no_tda7432 = 1,
- .no_tda9875 = 1,
.muxsel_hook = gv800s_muxsel,
},
[BTTV_BOARD_PV183] = {
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index fd62bf15d779..c6333595c6b9 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -234,7 +234,6 @@ struct tvcard {
/* i2c audio flags */
unsigned int no_msp34xx:1;
- unsigned int no_tda9875:1;
unsigned int no_tda7432:1;
unsigned int needs_tvaudio:1;
unsigned int msp34xx_alt:1;
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 789087cd6a9c..55ffd60ffa7f 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -2001,6 +2001,11 @@ static int cafe_pci_probe(struct pci_dev *pdev,
.min_width = 320,
.min_height = 240,
};
+ struct i2c_board_info ov7670_info = {
+ .type = "ov7670",
+ .addr = 0x42,
+ .platform_data = &sensor_cfg,
+ };
/*
* Start putting together one of our big camera structures.
@@ -2062,9 +2067,9 @@ static int cafe_pci_probe(struct pci_dev *pdev,
if (dmi_check_system(olpc_xo1_dmi))
sensor_cfg.clock_speed = 45;
- cam->sensor_addr = 0x42;
- cam->sensor = v4l2_i2c_new_subdev_cfg(&cam->v4l2_dev, &cam->i2c_adapter,
- "ov7670", 0, &sensor_cfg, cam->sensor_addr, NULL);
+ cam->sensor_addr = ov7670_info.addr;
+ cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev, &cam->i2c_adapter,
+ &ov7670_info, NULL);
if (cam->sensor == NULL) {
ret = -ENODEV;
goto out_smbus;
@@ -2184,9 +2189,7 @@ static int cafe_pci_resume(struct pci_dev *pdev)
struct cafe_camera *cam = to_cam(v4l2_dev);
int ret = 0;
- ret = pci_restore_state(pdev);
- if (ret)
- return ret;
+ pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret) {
diff --git a/drivers/media/video/cpia2/cpia2.h b/drivers/media/video/cpia2/cpia2.h
index 916c13d5cf7d..6d6d1843791c 100644
--- a/drivers/media/video/cpia2/cpia2.h
+++ b/drivers/media/video/cpia2/cpia2.h
@@ -378,7 +378,7 @@ struct cpia2_fh {
struct camera_data {
/* locks */
- struct mutex busy_lock; /* guard against SMP multithreading */
+ struct mutex v4l2_lock; /* serialize file operations */
struct v4l2_prio_state prio;
/* camera status */
diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
index 9606bc01b803..aaffca8e13fd 100644
--- a/drivers/media/video/cpia2/cpia2_core.c
+++ b/drivers/media/video/cpia2/cpia2_core.c
@@ -2247,7 +2247,7 @@ struct camera_data *cpia2_init_camera_struct(void)
cam->present = 1;
- mutex_init(&cam->busy_lock);
+ mutex_init(&cam->v4l2_lock);
init_waitqueue_head(&cam->wq_stream);
return cam;
@@ -2365,9 +2365,9 @@ long cpia2_read(struct camera_data *cam,
char __user *buf, unsigned long count, int noblock)
{
struct framebuf *frame;
- if (!count) {
+
+ if (!count)
return 0;
- }
if (!buf) {
ERR("%s: buffer NULL\n",__func__);
@@ -2379,17 +2379,12 @@ long cpia2_read(struct camera_data *cam,
return -EINVAL;
}
- /* make this _really_ smp and multithread-safe */
- if (mutex_lock_interruptible(&cam->busy_lock))
- return -ERESTARTSYS;
-
if (!cam->present) {
LOG("%s: camera removed\n",__func__);
- mutex_unlock(&cam->busy_lock);
return 0; /* EOF */
}
- if(!cam->streaming) {
+ if (!cam->streaming) {
/* Start streaming */
cpia2_usb_stream_start(cam,
cam->params.camera_state.stream_mode);
@@ -2398,42 +2393,31 @@ long cpia2_read(struct camera_data *cam,
/* Copy cam->curbuff in case it changes while we're processing */
frame = cam->curbuff;
if (noblock && frame->status != FRAME_READY) {
- mutex_unlock(&cam->busy_lock);
return -EAGAIN;
}
- if(frame->status != FRAME_READY) {
- mutex_unlock(&cam->busy_lock);
+ if (frame->status != FRAME_READY) {
+ mutex_unlock(&cam->v4l2_lock);
wait_event_interruptible(cam->wq_stream,
!cam->present ||
(frame = cam->curbuff)->status == FRAME_READY);
+ mutex_lock(&cam->v4l2_lock);
if (signal_pending(current))
return -ERESTARTSYS;
- /* make this _really_ smp and multithread-safe */
- if (mutex_lock_interruptible(&cam->busy_lock)) {
- return -ERESTARTSYS;
- }
- if(!cam->present) {
- mutex_unlock(&cam->busy_lock);
+ if (!cam->present)
return 0;
- }
}
/* copy data to user space */
- if (frame->length > count) {
- mutex_unlock(&cam->busy_lock);
+ if (frame->length > count)
return -EFAULT;
- }
- if (copy_to_user(buf, frame->data, frame->length)) {
- mutex_unlock(&cam->busy_lock);
+ if (copy_to_user(buf, frame->data, frame->length))
return -EFAULT;
- }
count = frame->length;
frame->status = FRAME_EMPTY;
- mutex_unlock(&cam->busy_lock);
return count;
}
@@ -2447,17 +2431,13 @@ unsigned int cpia2_poll(struct camera_data *cam, struct file *filp,
{
unsigned int status=0;
- if(!cam) {
+ if (!cam) {
ERR("%s: Internal error, camera_data not found!\n",__func__);
return POLLERR;
}
- mutex_lock(&cam->busy_lock);
-
- if(!cam->present) {
- mutex_unlock(&cam->busy_lock);
+ if (!cam->present)
return POLLHUP;
- }
if(!cam->streaming) {
/* Start streaming */
@@ -2465,16 +2445,13 @@ unsigned int cpia2_poll(struct camera_data *cam, struct file *filp,
cam->params.camera_state.stream_mode);
}
- mutex_unlock(&cam->busy_lock);
poll_wait(filp, &cam->wq_stream, wait);
- mutex_lock(&cam->busy_lock);
if(!cam->present)
status = POLLHUP;
else if(cam->curbuff->status == FRAME_READY)
status = POLLIN | POLLRDNORM;
- mutex_unlock(&cam->busy_lock);
return status;
}
@@ -2496,29 +2473,19 @@ int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma)
DBG("mmap offset:%ld size:%ld\n", start_offset, size);
- /* make this _really_ smp-safe */
- if (mutex_lock_interruptible(&cam->busy_lock))
- return -ERESTARTSYS;
-
- if (!cam->present) {
- mutex_unlock(&cam->busy_lock);
+ if (!cam->present)
return -ENODEV;
- }
if (size > cam->frame_size*cam->num_frames ||
(start_offset % cam->frame_size) != 0 ||
- (start_offset+size > cam->frame_size*cam->num_frames)) {
- mutex_unlock(&cam->busy_lock);
+ (start_offset+size > cam->frame_size*cam->num_frames))
return -EINVAL;
- }
pos = ((unsigned long) (cam->frame_buffer)) + start_offset;
while (size > 0) {
page = kvirt_to_pa(pos);
- if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, PAGE_SIZE, PAGE_SHARED)) {
- mutex_unlock(&cam->busy_lock);
+ if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, PAGE_SIZE, PAGE_SHARED))
return -EAGAIN;
- }
start += PAGE_SIZE;
pos += PAGE_SIZE;
if (size > PAGE_SIZE)
@@ -2528,7 +2495,5 @@ int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma)
}
cam->mmapped = true;
- mutex_unlock(&cam->busy_lock);
return 0;
}
-
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 7edf80b0d01a..9bad39842936 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -238,59 +238,40 @@ static struct v4l2_queryctrl controls[] = {
static int cpia2_open(struct file *file)
{
struct camera_data *cam = video_drvdata(file);
- int retval = 0;
+ struct cpia2_fh *fh;
if (!cam) {
ERR("Internal error, camera_data not found!\n");
return -ENODEV;
}
- if(mutex_lock_interruptible(&cam->busy_lock))
- return -ERESTARTSYS;
-
- if(!cam->present) {
- retval = -ENODEV;
- goto err_return;
- }
+ if (!cam->present)
+ return -ENODEV;
- if (cam->open_count > 0) {
- goto skip_init;
- }
+ if (cam->open_count == 0) {
+ if (cpia2_allocate_buffers(cam))
+ return -ENOMEM;
- if (cpia2_allocate_buffers(cam)) {
- retval = -ENOMEM;
- goto err_return;
- }
+ /* reset the camera */
+ if (cpia2_reset_camera(cam) < 0)
+ return -EIO;
- /* reset the camera */
- if (cpia2_reset_camera(cam) < 0) {
- retval = -EIO;
- goto err_return;
+ cam->APP_len = 0;
+ cam->COM_len = 0;
}
- cam->APP_len = 0;
- cam->COM_len = 0;
-
-skip_init:
- {
- struct cpia2_fh *fh = kmalloc(sizeof(*fh),GFP_KERNEL);
- if(!fh) {
- retval = -ENOMEM;
- goto err_return;
- }
- file->private_data = fh;
- fh->prio = V4L2_PRIORITY_UNSET;
- v4l2_prio_open(&cam->prio, &fh->prio);
- fh->mmapped = 0;
- }
+ fh = kmalloc(sizeof(*fh), GFP_KERNEL);
+ if (!fh)
+ return -ENOMEM;
+ file->private_data = fh;
+ fh->prio = V4L2_PRIORITY_UNSET;
+ v4l2_prio_open(&cam->prio, &fh->prio);
+ fh->mmapped = 0;
++cam->open_count;
cpia2_dbg_dump_registers(cam);
-
-err_return:
- mutex_unlock(&cam->busy_lock);
- return retval;
+ return 0;
}
/******************************************************************************
@@ -304,15 +285,11 @@ static int cpia2_close(struct file *file)
struct camera_data *cam = video_get_drvdata(dev);
struct cpia2_fh *fh = file->private_data;
- mutex_lock(&cam->busy_lock);
-
if (cam->present &&
- (cam->open_count == 1
- || fh->prio == V4L2_PRIORITY_RECORD
- )) {
+ (cam->open_count == 1 || fh->prio == V4L2_PRIORITY_RECORD)) {
cpia2_usb_stream_stop(cam);
- if(cam->open_count == 1) {
+ if (cam->open_count == 1) {
/* save camera state for later open */
cpia2_save_camera_state(cam);
@@ -321,26 +298,21 @@ static int cpia2_close(struct file *file)
}
}
- {
- if(fh->mmapped)
- cam->mmapped = 0;
- v4l2_prio_close(&cam->prio, fh->prio);
- file->private_data = NULL;
- kfree(fh);
- }
+ if (fh->mmapped)
+ cam->mmapped = 0;
+ v4l2_prio_close(&cam->prio, fh->prio);
+ file->private_data = NULL;
+ kfree(fh);
if (--cam->open_count == 0) {
cpia2_free_buffers(cam);
if (!cam->present) {
video_unregister_device(dev);
- mutex_unlock(&cam->busy_lock);
kfree(cam);
return 0;
}
}
- mutex_unlock(&cam->busy_lock);
-
return 0;
}
@@ -405,11 +377,11 @@ static int sync(struct camera_data *cam, int frame_nr)
return 0;
}
- mutex_unlock(&cam->busy_lock);
+ mutex_unlock(&cam->v4l2_lock);
wait_event_interruptible(cam->wq_stream,
!cam->streaming ||
frame->status == FRAME_READY);
- mutex_lock(&cam->busy_lock);
+ mutex_lock(&cam->v4l2_lock);
if (signal_pending(current))
return -ERESTARTSYS;
if(!cam->present)
@@ -1293,11 +1265,11 @@ static int ioctl_dqbuf(void *arg,struct camera_data *cam, struct file *file)
if(frame < 0) {
/* Wait for a frame to become available */
struct framebuf *cb=cam->curbuff;
- mutex_unlock(&cam->busy_lock);
+ mutex_unlock(&cam->v4l2_lock);
wait_event_interruptible(cam->wq_stream,
!cam->present ||
(cb=cam->curbuff)->status == FRAME_READY);
- mutex_lock(&cam->busy_lock);
+ mutex_lock(&cam->v4l2_lock);
if (signal_pending(current))
return -ERESTARTSYS;
if(!cam->present)
@@ -1337,14 +1309,8 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (!cam)
return -ENOTTY;
- /* make this _really_ smp-safe */
- if (mutex_lock_interruptible(&cam->busy_lock))
- return -ERESTARTSYS;
-
- if (!cam->present) {
- mutex_unlock(&cam->busy_lock);
+ if (!cam->present)
return -ENODEV;
- }
/* Priority check */
switch (cmd) {
@@ -1352,10 +1318,8 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct cpia2_fh *fh = file->private_data;
retval = v4l2_prio_check(&cam->prio, fh->prio);
- if(retval) {
- mutex_unlock(&cam->busy_lock);
+ if (retval)
return retval;
- }
break;
}
default:
@@ -1529,7 +1493,6 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
break;
}
- mutex_unlock(&cam->busy_lock);
return retval;
}
@@ -1596,7 +1559,7 @@ static const struct v4l2_file_operations cpia2_fops = {
.release = cpia2_close,
.read = cpia2_v4l_read,
.poll = cpia2_v4l_poll,
- .ioctl = cpia2_ioctl,
+ .unlocked_ioctl = cpia2_ioctl,
.mmap = cpia2_mmap,
};
@@ -1620,6 +1583,7 @@ int cpia2_register_camera(struct camera_data *cam)
memcpy(cam->vdev, &cpia2_template, sizeof(cpia2_template));
video_set_drvdata(cam->vdev, cam);
+ cam->vdev->lock = &cam->v4l2_lock;
reset_camera_struct_v4l(cam);
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 133ec2bac180..944af8adbe0c 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -664,7 +664,7 @@ static int __devinit cx18_create_in_workq(struct cx18 *cx)
{
snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in",
cx->v4l2_dev.name);
- cx->in_work_queue = create_singlethread_workqueue(cx->in_workq_name);
+ cx->in_work_queue = alloc_ordered_workqueue(cx->in_workq_name, 0);
if (cx->in_work_queue == NULL) {
CX18_ERR("Unable to create incoming mailbox handler thread\n");
return -ENOMEM;
@@ -672,18 +672,6 @@ static int __devinit cx18_create_in_workq(struct cx18 *cx)
return 0;
}
-static int __devinit cx18_create_out_workq(struct cx18 *cx)
-{
- snprintf(cx->out_workq_name, sizeof(cx->out_workq_name), "%s-out",
- cx->v4l2_dev.name);
- cx->out_work_queue = create_workqueue(cx->out_workq_name);
- if (cx->out_work_queue == NULL) {
- CX18_ERR("Unable to create outgoing mailbox handler threads\n");
- return -ENOMEM;
- }
- return 0;
-}
-
static void __devinit cx18_init_in_work_orders(struct cx18 *cx)
{
int i;
@@ -710,15 +698,9 @@ static int __devinit cx18_init_struct1(struct cx18 *cx)
mutex_init(&cx->epu2apu_mb_lock);
mutex_init(&cx->epu2cpu_mb_lock);
- ret = cx18_create_out_workq(cx);
- if (ret)
- return ret;
-
ret = cx18_create_in_workq(cx);
- if (ret) {
- destroy_workqueue(cx->out_work_queue);
+ if (ret)
return ret;
- }
cx18_init_in_work_orders(cx);
@@ -1107,7 +1089,6 @@ free_mem:
release_mem_region(cx->base_addr, CX18_MEM_SIZE);
free_workqueues:
destroy_workqueue(cx->in_work_queue);
- destroy_workqueue(cx->out_work_queue);
err:
if (retval == 0)
retval = -ENODEV;
@@ -1259,7 +1240,6 @@ static void cx18_remove(struct pci_dev *pci_dev)
cx18_halt_firmware(cx);
destroy_workqueue(cx->in_work_queue);
- destroy_workqueue(cx->out_work_queue);
cx18_streams_cleanup(cx, 1);
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index f6f3e50d4bdf..306caac6d3fc 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -617,9 +617,6 @@ struct cx18 {
struct cx18_in_work_order in_work_order[CX18_MAX_IN_WORK_ORDERS];
char epu_debug_str[256]; /* CX18_EPU_DEBUG is rare: use shared space */
- struct workqueue_struct *out_work_queue;
- char out_workq_name[12]; /* "cx18-NN-out" */
-
/* i2c */
struct i2c_adapter i2c_adap[2];
struct i2c_algo_bit_data i2c_algo[2];
diff --git a/drivers/media/video/cx18/cx18-streams.h b/drivers/media/video/cx18/cx18-streams.h
index 51765eb12d39..713b0e61536d 100644
--- a/drivers/media/video/cx18/cx18-streams.h
+++ b/drivers/media/video/cx18/cx18-streams.h
@@ -42,8 +42,7 @@ static inline bool cx18_stream_enabled(struct cx18_stream *s)
/* Related to submission of mdls to firmware */
static inline void cx18_stream_load_fw_queue(struct cx18_stream *s)
{
- struct cx18 *cx = s->cx;
- queue_work(cx->out_work_queue, &s->out_work_order);
+ schedule_work(&s->out_work_order);
}
static inline void cx18_stream_put_mdl_fw(struct cx18_stream *s,
diff --git a/drivers/media/video/cx18/cx23418.h b/drivers/media/video/cx18/cx23418.h
index 2c00980acfcb..7e40035028d2 100644
--- a/drivers/media/video/cx18/cx23418.h
+++ b/drivers/media/video/cx18/cx23418.h
@@ -177,7 +177,7 @@
IN[0] - Task handle.
IN[1] - luma type: 0 = disable, 1 = 1D horizontal only, 2 = 1D vertical only,
3 = 2D H/V separable, 4 = 2D symmetric non-separable
- IN[2] - chroma type: 0 - diable, 1 = 1D horizontal
+ IN[2] - chroma type: 0 - disable, 1 = 1D horizontal
ReturnCode - One of the ERR_CAPTURE_... */
#define CX18_CPU_SET_SPATIAL_FILTER_TYPE (CPU_CMD_MASK_CAPTURE | 0x000C)
diff --git a/drivers/media/video/cx231xx/cx231xx-dvb.c b/drivers/media/video/cx231xx/cx231xx-dvb.c
index fe59a1c3f064..363aa6004221 100644
--- a/drivers/media/video/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/video/cx231xx/cx231xx-dvb.c
@@ -28,7 +28,6 @@
#include <media/videobuf-vmalloc.h>
#include "xc5000.h"
-#include "dvb_dummy_fe.h"
#include "s5h1432.h"
#include "tda18271.h"
#include "s5h1411.h"
@@ -619,7 +618,7 @@ static int dvb_init(struct cx231xx *dev)
if (dev->dvb->frontend == NULL) {
printk(DRIVER_NAME
- ": Failed to attach dummy front end\n");
+ ": Failed to attach s5h1411 front end\n");
result = -EINVAL;
goto out_free;
}
@@ -665,7 +664,7 @@ static int dvb_init(struct cx231xx *dev)
if (dev->dvb->frontend == NULL) {
printk(DRIVER_NAME
- ": Failed to attach dummy front end\n");
+ ": Failed to attach s5h1411 front end\n");
result = -EINVAL;
goto out_free;
}
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index f16461844c5c..6fc09dd41b9d 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -1682,20 +1682,6 @@ static int cx25840_log_status(struct v4l2_subdev *sd)
return 0;
}
-static int cx25840_s_config(struct v4l2_subdev *sd, int irq, void *platform_data)
-{
- struct cx25840_state *state = to_state(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- if (platform_data) {
- struct cx25840_platform_data *pdata = platform_data;
-
- state->pvr150_workaround = pdata->pvr150_workaround;
- set_input(client, state->vid_input, state->aud_input);
- }
- return 0;
-}
-
static int cx23885_irq_handler(struct v4l2_subdev *sd, u32 status,
bool *handled)
{
@@ -1787,7 +1773,6 @@ static const struct v4l2_ctrl_ops cx25840_ctrl_ops = {
static const struct v4l2_subdev_core_ops cx25840_core_ops = {
.log_status = cx25840_log_status,
- .s_config = cx25840_s_config,
.g_chip_ident = cx25840_g_chip_ident,
.g_ctrl = v4l2_subdev_g_ctrl,
.s_ctrl = v4l2_subdev_s_ctrl,
@@ -1974,7 +1959,6 @@ static int cx25840_probe(struct i2c_client *client,
state->vid_input = CX25840_COMPOSITE7;
state->aud_input = CX25840_AUDIO8;
state->audclk_freq = 48000;
- state->pvr150_workaround = 0;
state->audmode = V4L2_TUNER_MODE_LANG1;
state->vbi_line_offset = 8;
state->id = id;
@@ -2034,6 +2018,12 @@ static int cx25840_probe(struct i2c_client *client,
v4l2_ctrl_cluster(2, &state->volume);
v4l2_ctrl_handler_setup(&state->hdl);
+ if (client->dev.platform_data) {
+ struct cx25840_platform_data *pdata = client->dev.platform_data;
+
+ state->pvr150_workaround = pdata->pvr150_workaround;
+ }
+
cx25840_ir_probe(sd);
return 0;
}
diff --git a/drivers/media/video/cx25840/cx25840-ir.c b/drivers/media/video/cx25840/cx25840-ir.c
index 627926f6bde8..7eb79af28aa3 100644
--- a/drivers/media/video/cx25840/cx25840-ir.c
+++ b/drivers/media/video/cx25840/cx25840-ir.c
@@ -261,7 +261,7 @@ static u16 ns_to_pulse_width_count(u32 ns, u16 divider)
u32 rem;
/*
- * The 2 lsb's of the pulse width timer count are not accessable, hence
+ * The 2 lsb's of the pulse width timer count are not accessible, hence
* the (1 << 2)
*/
n = ((u64) ns) * CX25840_IR_REFCLK_FREQ / 1000000; /* millicycles */
diff --git a/drivers/media/video/davinci/vpif.c b/drivers/media/video/davinci/vpif.c
index 1f532e31cd49..9f3bfc1eb240 100644
--- a/drivers/media/video/davinci/vpif.c
+++ b/drivers/media/video/davinci/vpif.c
@@ -41,6 +41,183 @@ spinlock_t vpif_lock;
void __iomem *vpif_base;
+/**
+ * ch_params: video standard configuration parameters for vpif
+ * The table must include all presets from supported subdevices.
+ */
+const struct vpif_channel_config_params ch_params[] = {
+ /* HDTV formats */
+ {
+ .name = "480p59_94",
+ .width = 720,
+ .height = 480,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 138-8,
+ .sav2eav = 720,
+ .l1 = 1,
+ .l3 = 43,
+ .l5 = 523,
+ .vsize = 525,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_preset = V4L2_DV_480P59_94,
+ },
+ {
+ .name = "576p50",
+ .width = 720,
+ .height = 576,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 144-8,
+ .sav2eav = 720,
+ .l1 = 1,
+ .l3 = 45,
+ .l5 = 621,
+ .vsize = 625,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_preset = V4L2_DV_576P50,
+ },
+ {
+ .name = "720p50",
+ .width = 1280,
+ .height = 720,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 700-8,
+ .sav2eav = 1280,
+ .l1 = 1,
+ .l3 = 26,
+ .l5 = 746,
+ .vsize = 750,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_preset = V4L2_DV_720P50,
+ },
+ {
+ .name = "720p60",
+ .width = 1280,
+ .height = 720,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 370 - 8,
+ .sav2eav = 1280,
+ .l1 = 1,
+ .l3 = 26,
+ .l5 = 746,
+ .vsize = 750,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_preset = V4L2_DV_720P60,
+ },
+ {
+ .name = "1080I50",
+ .width = 1920,
+ .height = 1080,
+ .frm_fmt = 0,
+ .ycmux_mode = 0,
+ .eav2sav = 720 - 8,
+ .sav2eav = 1920,
+ .l1 = 1,
+ .l3 = 21,
+ .l5 = 561,
+ .l7 = 563,
+ .l9 = 584,
+ .l11 = 1124,
+ .vsize = 1125,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_preset = V4L2_DV_1080I50,
+ },
+ {
+ .name = "1080I60",
+ .width = 1920,
+ .height = 1080,
+ .frm_fmt = 0,
+ .ycmux_mode = 0,
+ .eav2sav = 280 - 8,
+ .sav2eav = 1920,
+ .l1 = 1,
+ .l3 = 21,
+ .l5 = 561,
+ .l7 = 563,
+ .l9 = 584,
+ .l11 = 1124,
+ .vsize = 1125,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_preset = V4L2_DV_1080I60,
+ },
+ {
+ .name = "1080p60",
+ .width = 1920,
+ .height = 1080,
+ .frm_fmt = 1,
+ .ycmux_mode = 0,
+ .eav2sav = 280 - 8,
+ .sav2eav = 1920,
+ .l1 = 1,
+ .l3 = 42,
+ .l5 = 1122,
+ .vsize = 1125,
+ .capture_format = 0,
+ .vbi_supported = 0,
+ .hd_sd = 1,
+ .dv_preset = V4L2_DV_1080P60,
+ },
+
+ /* SDTV formats */
+ {
+ .name = "NTSC_M",
+ .width = 720,
+ .height = 480,
+ .frm_fmt = 0,
+ .ycmux_mode = 1,
+ .eav2sav = 268,
+ .sav2eav = 1440,
+ .l1 = 1,
+ .l3 = 23,
+ .l5 = 263,
+ .l7 = 266,
+ .l9 = 286,
+ .l11 = 525,
+ .vsize = 525,
+ .capture_format = 0,
+ .vbi_supported = 1,
+ .hd_sd = 0,
+ .stdid = V4L2_STD_525_60,
+ },
+ {
+ .name = "PAL_BDGHIK",
+ .width = 720,
+ .height = 576,
+ .frm_fmt = 0,
+ .ycmux_mode = 1,
+ .eav2sav = 280,
+ .sav2eav = 1440,
+ .l1 = 1,
+ .l3 = 23,
+ .l5 = 311,
+ .l7 = 313,
+ .l9 = 336,
+ .l11 = 624,
+ .vsize = 625,
+ .capture_format = 0,
+ .vbi_supported = 1,
+ .hd_sd = 0,
+ .stdid = V4L2_STD_625_50,
+ },
+};
+
+const unsigned int vpif_ch_params_count = ARRAY_SIZE(ch_params);
+
static inline void vpif_wr_bit(u32 reg, u32 bit, u32 val)
{
if (val)
diff --git a/drivers/media/video/davinci/vpif.h b/drivers/media/video/davinci/vpif.h
index 188841b476e0..10550bd93b06 100644
--- a/drivers/media/video/davinci/vpif.h
+++ b/drivers/media/video/davinci/vpif.h
@@ -33,7 +33,7 @@ extern spinlock_t vpif_lock;
#define regr(reg) readl((reg) + vpif_base)
#define regw(value, reg) writel(value, (reg + vpif_base))
-/* Register Addresss Offsets */
+/* Register Address Offsets */
#define VPIF_PID (0x0000)
#define VPIF_CH0_CTRL (0x0004)
#define VPIF_CH1_CTRL (0x0008)
@@ -577,12 +577,10 @@ struct vpif_channel_config_params {
char name[VPIF_MAX_NAME]; /* Name of the mode */
u16 width; /* Indicates width of the image */
u16 height; /* Indicates height of the image */
- u8 fps;
- u8 frm_fmt; /* Indicates whether this is interlaced
- * or progressive format */
- u8 ycmux_mode; /* Indicates whether this mode requires
- * single or two channels */
- u16 eav2sav; /* length of sav 2 eav */
+ u8 frm_fmt; /* Interlaced (0) or progressive (1) */
+ u8 ycmux_mode; /* This mode requires one (0) or two (1)
+ channels */
+ u16 eav2sav; /* length of eav 2 sav */
u16 sav2eav; /* length of sav 2 eav */
u16 l1, l3, l5, l7, l9, l11; /* Other parameter configurations */
u16 vsize; /* Vertical size of the image */
@@ -590,10 +588,14 @@ struct vpif_channel_config_params {
* is in BT or in CCD/CMOS */
u8 vbi_supported; /* Indicates whether this mode
* supports capturing vbi or not */
- u8 hd_sd;
- v4l2_std_id stdid;
+ u8 hd_sd; /* HDTV (1) or SDTV (0) format */
+ v4l2_std_id stdid; /* SDTV format */
+ u32 dv_preset; /* HDTV format */
};
+extern const unsigned int vpif_ch_params_count;
+extern const struct vpif_channel_config_params ch_params[];
+
struct vpif_video_params;
struct vpif_params;
struct vpif_vbi_params;
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index 193abab6b355..d93ad74a34c5 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -37,6 +37,7 @@
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
#include "vpif_capture.h"
#include "vpif.h"
@@ -81,20 +82,6 @@ static struct vpif_device vpif_obj = { {NULL} };
static struct device *vpif_dev;
/**
- * ch_params: video standard configuration parameters for vpif
- */
-static const struct vpif_channel_config_params ch_params[] = {
- {
- "NTSC_M", 720, 480, 30, 0, 1, 268, 1440, 1, 23, 263, 266,
- 286, 525, 525, 0, 1, 0, V4L2_STD_525_60,
- },
- {
- "PAL_BDGHIK", 720, 576, 25, 0, 1, 280, 1440, 1, 23, 311, 313,
- 336, 624, 625, 0, 1, 0, V4L2_STD_625_50,
- },
-};
-
-/**
* vpif_uservirt_to_phys : translate user/virtual address to phy address
* @virtp: user/virtual address
*
@@ -342,7 +329,7 @@ static void vpif_schedule_next_buffer(struct common_obj *common)
* @dev_id: dev_id ptr
*
* It changes status of the captured buffer, takes next buffer from the queue
- * and sets its address in VPIF registers
+ * and sets its address in VPIF registers
*/
static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
{
@@ -435,24 +422,31 @@ static int vpif_update_std_info(struct channel_obj *ch)
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct vpif_params *vpifparams = &ch->vpifparams;
const struct vpif_channel_config_params *config;
- struct vpif_channel_config_params *std_info;
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
struct video_obj *vid_ch = &ch->video;
int index;
vpif_dbg(2, debug, "vpif_update_std_info\n");
- std_info = &vpifparams->std_info;
-
- for (index = 0; index < ARRAY_SIZE(ch_params); index++) {
+ for (index = 0; index < vpif_ch_params_count; index++) {
config = &ch_params[index];
- if (config->stdid & vid_ch->stdid) {
- memcpy(std_info, config, sizeof(*config));
- break;
+ if (config->hd_sd == 0) {
+ vpif_dbg(2, debug, "SD format\n");
+ if (config->stdid & vid_ch->stdid) {
+ memcpy(std_info, config, sizeof(*config));
+ break;
+ }
+ } else {
+ vpif_dbg(2, debug, "HD format\n");
+ if (config->dv_preset == vid_ch->dv_preset) {
+ memcpy(std_info, config, sizeof(*config));
+ break;
+ }
}
}
/* standard not found */
- if (index == ARRAY_SIZE(ch_params))
+ if (index == vpif_ch_params_count)
return -EINVAL;
common->fmt.fmt.pix.width = std_info->width;
@@ -462,6 +456,7 @@ static int vpif_update_std_info(struct channel_obj *ch)
common->fmt.fmt.pix.bytesperline = std_info->width;
vpifparams->video_params.hpitch = std_info->width;
vpifparams->video_params.storage_mode = std_info->frm_fmt;
+
return 0;
}
@@ -757,7 +752,7 @@ static int vpif_open(struct file *filep)
struct video_obj *vid_ch;
struct channel_obj *ch;
struct vpif_fh *fh;
- int i, ret = 0;
+ int i;
vpif_dbg(2, debug, "vpif_open\n");
@@ -766,9 +761,6 @@ static int vpif_open(struct file *filep)
vid_ch = &ch->video;
common = &ch->common[VPIF_VIDEO_INDEX];
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
if (NULL == ch->curr_subdev_info) {
/**
* search through the sub device to see a registered
@@ -785,8 +777,7 @@ static int vpif_open(struct file *filep)
}
if (i == config->subdev_count) {
vpif_err("No sub device registered\n");
- ret = -ENOENT;
- goto exit;
+ return -ENOENT;
}
}
@@ -794,8 +785,7 @@ static int vpif_open(struct file *filep)
fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL);
if (NULL == fh) {
vpif_err("unable to allocate memory for file handle object\n");
- ret = -ENOMEM;
- goto exit;
+ return -ENOMEM;
}
/* store pointer to fh in private_data member of filep */
@@ -815,9 +805,7 @@ static int vpif_open(struct file *filep)
/* Initialize priority of this instance to default priority */
fh->prio = V4L2_PRIORITY_UNSET;
v4l2_prio_open(&ch->prio, &fh->prio);
-exit:
- mutex_unlock(&common->lock);
- return ret;
+ return 0;
}
/**
@@ -837,9 +825,6 @@ static int vpif_release(struct file *filep)
common = &ch->common[VPIF_VIDEO_INDEX];
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
/* if this instance is doing IO */
if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
/* Reset io_usrs member of channel object */
@@ -863,9 +848,6 @@ static int vpif_release(struct file *filep)
/* Decrement channel usrs counter */
ch->usrs--;
- /* unlock mutex on channel object */
- mutex_unlock(&common->lock);
-
/* Close the priority */
v4l2_prio_close(&ch->prio, fh->prio);
@@ -890,7 +872,6 @@ static int vpif_reqbufs(struct file *file, void *priv,
struct channel_obj *ch = fh->channel;
struct common_obj *common;
u8 index = 0;
- int ret = 0;
vpif_dbg(2, debug, "vpif_reqbufs\n");
@@ -913,13 +894,8 @@ static int vpif_reqbufs(struct file *file, void *priv,
common = &ch->common[index];
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
- if (0 != common->io_usrs) {
- ret = -EBUSY;
- goto reqbuf_exit;
- }
+ if (0 != common->io_usrs)
+ return -EBUSY;
/* Initialize videobuf queue as per the buffer type */
videobuf_queue_dma_contig_init(&common->buffer_queue,
@@ -928,7 +904,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
reqbuf->type,
common->fmt.fmt.pix.field,
sizeof(struct videobuf_buffer), fh,
- NULL);
+ &common->lock);
/* Set io allowed member of file handle to TRUE */
fh->io_allowed[index] = 1;
@@ -939,11 +915,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
INIT_LIST_HEAD(&common->dma_queue);
/* Allocate buffers */
- ret = videobuf_reqbufs(&common->buffer_queue, reqbuf);
-
-reqbuf_exit:
- mutex_unlock(&common->lock);
- return ret;
+ return videobuf_reqbufs(&common->buffer_queue, reqbuf);
}
/**
@@ -1157,11 +1129,6 @@ static int vpif_streamon(struct file *file, void *priv,
return ret;
}
- if (mutex_lock_interruptible(&common->lock)) {
- ret = -ERESTARTSYS;
- goto streamoff_exit;
- }
-
/* If buffer queue is empty, return error */
if (list_empty(&common->dma_queue)) {
vpif_dbg(1, debug, "buffer queue is empty\n");
@@ -1240,13 +1207,10 @@ static int vpif_streamon(struct file *file, void *priv,
enable_channel1(1);
}
channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
- mutex_unlock(&common->lock);
return ret;
exit:
- mutex_unlock(&common->lock);
-streamoff_exit:
- ret = videobuf_streamoff(&common->buffer_queue);
+ videobuf_streamoff(&common->buffer_queue);
return ret;
}
@@ -1284,9 +1248,6 @@ static int vpif_streamoff(struct file *file, void *priv,
return -EINVAL;
}
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
/* disable channel */
if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
enable_channel0(0);
@@ -1304,8 +1265,6 @@ static int vpif_streamoff(struct file *file, void *priv,
if (ret && (ret != -ENOIOCTLCMD))
vpif_dbg(1, debug, "stream off failed in subdev\n");
- mutex_unlock(&common->lock);
-
return videobuf_streamoff(&common->buffer_queue);
}
@@ -1381,21 +1340,16 @@ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
{
struct vpif_fh *fh = priv;
struct channel_obj *ch = fh->channel;
- struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
int ret = 0;
vpif_dbg(2, debug, "vpif_querystd\n");
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
/* Call querystd function of decoder device */
ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video,
querystd, std_id);
if (ret < 0)
vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
- mutex_unlock(&common->lock);
return ret;
}
@@ -1451,16 +1405,14 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
fh->initialized = 1;
/* Call encoder subdevice function to set the standard */
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
ch->video.stdid = *std_id;
+ ch->video.dv_preset = V4L2_DV_INVALID;
+ memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
/* Get the information about the standard */
if (vpif_update_std_info(ch)) {
- ret = -EINVAL;
vpif_err("Error getting the standard info\n");
- goto s_std_exit;
+ return -EINVAL;
}
/* Configure the default format information */
@@ -1471,9 +1423,6 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
s_std, *std_id);
if (ret < 0)
vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
-
-s_std_exit:
- mutex_unlock(&common->lock);
return ret;
}
@@ -1567,9 +1516,6 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
return -EINVAL;
}
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
/* first setup input path from sub device to vpif */
if (config->setup_input_path) {
ret = config->setup_input_path(ch->channel_id,
@@ -1578,7 +1524,7 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
vpif_dbg(1, debug, "couldn't setup input path for the"
" sub device %s, for input index %d\n",
subdev_info->name, index);
- goto exit;
+ return ret;
}
}
@@ -1589,7 +1535,7 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
input, output, 0);
if (ret < 0) {
vpif_dbg(1, debug, "Failed to set input\n");
- goto exit;
+ return ret;
}
}
vid_ch->input_idx = index;
@@ -1600,9 +1546,6 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
/* update tvnorms from the sub device input info */
ch->video_dev->tvnorms = chan_cfg->inputs[index].input.std;
-
-exit:
- mutex_unlock(&common->lock);
return ret;
}
@@ -1671,11 +1614,7 @@ static int vpif_g_fmt_vid_cap(struct file *file, void *priv,
return -EINVAL;
/* Fill in the information about format */
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
*fmt = common->fmt;
- mutex_unlock(&common->lock);
return 0;
}
@@ -1694,7 +1633,7 @@ static int vpif_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_pix_format *pixfmt;
int ret = 0;
- vpif_dbg(2, debug, "VIDIOC_S_FMT\n");
+ vpif_dbg(2, debug, "%s\n", __func__);
/* If streaming is started, return error */
if (common->started) {
@@ -1723,12 +1662,7 @@ static int vpif_s_fmt_vid_cap(struct file *file, void *priv,
if (ret)
return ret;
/* store the format in the channel object */
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
common->fmt = *fmt;
- mutex_unlock(&common->lock);
-
return 0;
}
@@ -1807,6 +1741,306 @@ static int vpif_cropcap(struct file *file, void *priv,
return 0;
}
+/**
+ * vpif_enum_dv_presets() - ENUM_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_enum_dv_presets(struct file *file, void *priv,
+ struct v4l2_dv_enum_preset *preset)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+
+ return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+ video, enum_dv_presets, preset);
+}
+
+/**
+ * vpif_query_dv_presets() - QUERY_DV_PRESET handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_query_dv_preset(struct file *file, void *priv,
+ struct v4l2_dv_preset *preset)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+
+ return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+ video, query_dv_preset, preset);
+}
+/**
+ * vpif_s_dv_presets() - S_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_s_dv_preset(struct file *file, void *priv,
+ struct v4l2_dv_preset *preset)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ int ret = 0;
+
+ if (common->started) {
+ vpif_dbg(1, debug, "streaming in progress\n");
+ return -EBUSY;
+ }
+
+ if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) ||
+ (VPIF_CHANNEL1_VIDEO == ch->channel_id)) {
+ if (!fh->initialized) {
+ vpif_dbg(1, debug, "Channel Busy\n");
+ return -EBUSY;
+ }
+ }
+
+ ret = v4l2_prio_check(&ch->prio, fh->prio);
+ if (ret)
+ return ret;
+
+ fh->initialized = 1;
+
+ /* Call encoder subdevice function to set the standard */
+ if (mutex_lock_interruptible(&common->lock))
+ return -ERESTARTSYS;
+
+ ch->video.dv_preset = preset->preset;
+ ch->video.stdid = V4L2_STD_UNKNOWN;
+ memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
+
+ /* Get the information about the standard */
+ if (vpif_update_std_info(ch)) {
+ vpif_dbg(1, debug, "Error getting the standard info\n");
+ ret = -EINVAL;
+ } else {
+ /* Configure the default format information */
+ vpif_config_format(ch);
+
+ ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+ video, s_dv_preset, preset);
+ }
+
+ mutex_unlock(&common->lock);
+
+ return ret;
+}
+/**
+ * vpif_g_dv_presets() - G_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_g_dv_preset(struct file *file, void *priv,
+ struct v4l2_dv_preset *preset)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+
+ preset->preset = ch->video.dv_preset;
+
+ return 0;
+}
+
+/**
+ * vpif_s_dv_timings() - S_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_s_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+ struct video_obj *vid_ch = &ch->video;
+ struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+ int ret;
+
+ if (timings->type != V4L2_DV_BT_656_1120) {
+ vpif_dbg(2, debug, "Timing type not defined\n");
+ return -EINVAL;
+ }
+
+ /* Configure subdevice timings, if any */
+ ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+ video, s_dv_timings, timings);
+ if (ret == -ENOIOCTLCMD) {
+ vpif_dbg(2, debug, "Custom DV timings not supported by "
+ "subdevice\n");
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ vpif_dbg(2, debug, "Error setting custom DV timings\n");
+ return ret;
+ }
+
+ if (!(timings->bt.width && timings->bt.height &&
+ (timings->bt.hbackporch ||
+ timings->bt.hfrontporch ||
+ timings->bt.hsync) &&
+ timings->bt.vfrontporch &&
+ (timings->bt.vbackporch ||
+ timings->bt.vsync))) {
+ vpif_dbg(2, debug, "Timings for width, height, "
+ "horizontal back porch, horizontal sync, "
+ "horizontal front porch, vertical back porch, "
+ "vertical sync and vertical back porch "
+ "must be defined\n");
+ return -EINVAL;
+ }
+
+ *bt = timings->bt;
+
+ /* Configure video port timings */
+
+ std_info->eav2sav = bt->hbackporch + bt->hfrontporch +
+ bt->hsync - 8;
+ std_info->sav2eav = bt->width;
+
+ std_info->l1 = 1;
+ std_info->l3 = bt->vsync + bt->vbackporch + 1;
+
+ if (bt->interlaced) {
+ if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
+ std_info->vsize = bt->height * 2 +
+ bt->vfrontporch + bt->vsync + bt->vbackporch +
+ bt->il_vfrontporch + bt->il_vsync +
+ bt->il_vbackporch;
+ std_info->l5 = std_info->vsize/2 -
+ (bt->vfrontporch - 1);
+ std_info->l7 = std_info->vsize/2 + 1;
+ std_info->l9 = std_info->l7 + bt->il_vsync +
+ bt->il_vbackporch + 1;
+ std_info->l11 = std_info->vsize -
+ (bt->il_vfrontporch - 1);
+ } else {
+ vpif_dbg(2, debug, "Required timing values for "
+ "interlaced BT format missing\n");
+ return -EINVAL;
+ }
+ } else {
+ std_info->vsize = bt->height + bt->vfrontporch +
+ bt->vsync + bt->vbackporch;
+ std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
+ }
+ strncpy(std_info->name, "Custom timings BT656/1120", VPIF_MAX_NAME);
+ std_info->width = bt->width;
+ std_info->height = bt->height;
+ std_info->frm_fmt = bt->interlaced ? 0 : 1;
+ std_info->ycmux_mode = 0;
+ std_info->capture_format = 0;
+ std_info->vbi_supported = 0;
+ std_info->hd_sd = 1;
+ std_info->stdid = 0;
+ std_info->dv_preset = V4L2_DV_INVALID;
+
+ vid_ch->stdid = 0;
+ vid_ch->dv_preset = V4L2_DV_INVALID;
+ return 0;
+}
+
+/**
+ * vpif_g_dv_timings() - G_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_g_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct video_obj *vid_ch = &ch->video;
+ struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+
+ timings->bt = *bt;
+
+ return 0;
+}
+
+/*
+ * vpif_g_chip_ident() - Identify the chip
+ * @file: file ptr
+ * @priv: file handle
+ * @chip: chip identity
+ *
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_g_chip_ident(struct file *file, void *priv,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ chip->ident = V4L2_IDENT_NONE;
+ chip->revision = 0;
+ if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
+ chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) {
+ vpif_dbg(2, debug, "match_type is invalid.\n");
+ return -EINVAL;
+ }
+
+ return v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 0, core,
+ g_chip_ident, chip);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+/*
+ * vpif_dbg_g_register() - Read register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be read
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_dbg_g_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg){
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+
+ return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core,
+ g_register, reg);
+}
+
+/*
+ * vpif_dbg_s_register() - Write to register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be modified
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if write operations fails.
+ */
+static int vpif_dbg_s_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg){
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+
+ return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core,
+ s_register, reg);
+}
+#endif
+
+/*
+ * vpif_log_status() - Status information
+ * @file: file ptr
+ * @priv: file handle
+ *
+ * Returns zero.
+ */
+static int vpif_log_status(struct file *filep, void *priv)
+{
+ /* status for sub devices */
+ v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
+
+ return 0;
+}
+
/* vpif capture ioctl operations */
static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
.vidioc_querycap = vpif_querycap,
@@ -1829,6 +2063,18 @@ static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
.vidioc_streamon = vpif_streamon,
.vidioc_streamoff = vpif_streamoff,
.vidioc_cropcap = vpif_cropcap,
+ .vidioc_enum_dv_presets = vpif_enum_dv_presets,
+ .vidioc_s_dv_preset = vpif_s_dv_preset,
+ .vidioc_g_dv_preset = vpif_g_dv_preset,
+ .vidioc_query_dv_preset = vpif_query_dv_preset,
+ .vidioc_s_dv_timings = vpif_s_dv_timings,
+ .vidioc_g_dv_timings = vpif_g_dv_timings,
+ .vidioc_g_chip_ident = vpif_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = vpif_dbg_g_register,
+ .vidioc_s_register = vpif_dbg_s_register,
+#endif
+ .vidioc_log_status = vpif_log_status,
};
/* vpif file operations */
@@ -1836,7 +2082,7 @@ static struct v4l2_file_operations vpif_fops = {
.owner = THIS_MODULE,
.open = vpif_open,
.release = vpif_release,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
.mmap = vpif_mmap,
.poll = vpif_poll
};
@@ -1979,6 +2225,7 @@ static __init int vpif_probe(struct platform_device *pdev)
common = &(ch->common[VPIF_VIDEO_INDEX]);
spin_lock_init(&common->irqlock);
mutex_init(&common->lock);
+ ch->video_dev->lock = &common->lock;
/* Initialize prio member of channel object */
v4l2_prio_init(&ch->prio);
err = video_register_device(ch->video_dev,
@@ -2026,9 +2273,9 @@ static __init int vpif_probe(struct platform_device *pdev)
if (vpif_obj.sd[i])
vpif_obj.sd[i]->grp_id = 1 << i;
}
- v4l2_info(&vpif_obj.v4l2_dev, "DM646x VPIF Capture driver"
- " initialized\n");
+ v4l2_info(&vpif_obj.v4l2_dev,
+ "DM646x VPIF capture driver initialized\n");
return 0;
probe_subdev_out:
diff --git a/drivers/media/video/davinci/vpif_capture.h b/drivers/media/video/davinci/vpif_capture.h
index 4e12ec8cac6f..7a4196dfdce1 100644
--- a/drivers/media/video/davinci/vpif_capture.h
+++ b/drivers/media/video/davinci/vpif_capture.h
@@ -59,6 +59,8 @@ struct video_obj {
enum v4l2_field buf_field;
/* Currently selected or default standard */
v4l2_std_id stdid;
+ u32 dv_preset;
+ struct v4l2_bt_timings bt_timings;
/* This is to track the last input that is passed to application */
u32 input_idx;
};
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index 412c65d54fe1..cdf659abdc2a 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -38,6 +38,7 @@
#include <media/adv7343.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
#include <mach/dm646x.h>
@@ -84,17 +85,6 @@ static struct vpif_config_params config_params = {
static struct vpif_device vpif_obj = { {NULL} };
static struct device *vpif_dev;
-static const struct vpif_channel_config_params ch_params[] = {
- {
- "NTSC", 720, 480, 30, 0, 1, 268, 1440, 1, 23, 263, 266,
- 286, 525, 525, 0, 1, 0, V4L2_STD_525_60,
- },
- {
- "PAL", 720, 576, 25, 0, 1, 280, 1440, 1, 23, 311, 313,
- 336, 624, 625, 0, 1, 0, V4L2_STD_625_50,
- },
-};
-
/*
* vpif_uservirt_to_phys: This function is used to convert user
* space virtual address to physical address.
@@ -373,30 +363,54 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int vpif_get_std_info(struct channel_obj *ch)
+static int vpif_update_std_info(struct channel_obj *ch)
{
- struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct video_obj *vid_ch = &ch->video;
struct vpif_params *vpifparams = &ch->vpifparams;
struct vpif_channel_config_params *std_info = &vpifparams->std_info;
const struct vpif_channel_config_params *config;
- int index;
-
- std_info->stdid = vid_ch->stdid;
- if (!std_info->stdid)
- return -1;
+ int i;
- for (index = 0; index < ARRAY_SIZE(ch_params); index++) {
- config = &ch_params[index];
- if (config->stdid & std_info->stdid) {
- memcpy(std_info, config, sizeof(*config));
- break;
+ for (i = 0; i < vpif_ch_params_count; i++) {
+ config = &ch_params[i];
+ if (config->hd_sd == 0) {
+ vpif_dbg(2, debug, "SD format\n");
+ if (config->stdid & vid_ch->stdid) {
+ memcpy(std_info, config, sizeof(*config));
+ break;
+ }
+ } else {
+ vpif_dbg(2, debug, "HD format\n");
+ if (config->dv_preset == vid_ch->dv_preset) {
+ memcpy(std_info, config, sizeof(*config));
+ break;
+ }
}
}
- if (index == ARRAY_SIZE(ch_params))
- return -1;
+ if (i == vpif_ch_params_count) {
+ vpif_dbg(1, debug, "Format not found\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vpif_update_resolution(struct channel_obj *ch)
+{
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct video_obj *vid_ch = &ch->video;
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+
+ if (!vid_ch->stdid && !vid_ch->dv_preset && !vid_ch->bt_timings.height)
+ return -EINVAL;
+
+ if (vid_ch->stdid || vid_ch->dv_preset) {
+ if (vpif_update_std_info(ch))
+ return -EINVAL;
+ }
common->fmt.fmt.pix.width = std_info->width;
common->fmt.fmt.pix.height = std_info->height;
@@ -404,8 +418,8 @@ static int vpif_get_std_info(struct channel_obj *ch)
common->fmt.fmt.pix.width, common->fmt.fmt.pix.height);
/* Set height and width paramateres */
- ch->common[VPIF_VIDEO_INDEX].height = std_info->height;
- ch->common[VPIF_VIDEO_INDEX].width = std_info->width;
+ common->height = std_info->height;
+ common->width = std_info->width;
return 0;
}
@@ -516,10 +530,8 @@ static int vpif_check_format(struct channel_obj *ch,
else
sizeimage = config_params.channel_bufsize[ch->channel_id];
- if (vpif_get_std_info(ch)) {
- vpif_err("Error getting the standard info\n");
+ if (vpif_update_resolution(ch))
return -EINVAL;
- }
hpitch = pixfmt->bytesperline;
vpitch = sizeimage / (hpitch * 2);
@@ -568,7 +580,10 @@ static void vpif_config_addr(struct channel_obj *ch, int muxmode)
static int vpif_mmap(struct file *filep, struct vm_area_struct *vma)
{
struct vpif_fh *fh = filep->private_data;
- struct common_obj *common = &fh->channel->common[VPIF_VIDEO_INDEX];
+ struct channel_obj *ch = fh->channel;
+ struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]);
+
+ vpif_dbg(2, debug, "vpif_mmap\n");
return videobuf_mmap_mapper(&common->buffer_queue, vma);
}
@@ -637,9 +652,6 @@ static int vpif_release(struct file *filep)
struct channel_obj *ch = fh->channel;
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
/* if this instance is doing IO */
if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
/* Reset io_usrs member of channel object */
@@ -662,8 +674,6 @@ static int vpif_release(struct file *filep)
config_params.numbuffers[ch->channel_id];
}
- mutex_unlock(&common->lock);
-
/* Decrement channel usrs counter */
atomic_dec(&ch->usrs);
/* If this file handle has initialize encoder device, reset it */
@@ -680,7 +690,12 @@ static int vpif_release(struct file *filep)
}
/* functions implementing ioctls */
-
+/**
+ * vpif_querycap() - QUERYCAP handler
+ * @file: file ptr
+ * @priv: file handle
+ * @cap: ptr to v4l2_capability structure
+ */
static int vpif_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
@@ -722,17 +737,9 @@ static int vpif_g_fmt_vid_out(struct file *file, void *priv,
if (common->fmt.type != fmt->type)
return -EINVAL;
- /* Fill in the information about format */
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
- if (vpif_get_std_info(ch)) {
- vpif_err("Error getting the standard info\n");
+ if (vpif_update_resolution(ch))
return -EINVAL;
- }
-
*fmt = common->fmt;
- mutex_unlock(&common->lock);
return 0;
}
@@ -773,12 +780,7 @@ static int vpif_s_fmt_vid_out(struct file *file, void *priv,
/* store the pix format in the channel object */
common->fmt.fmt.pix = *pixfmt;
/* store the format in the channel object */
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
common->fmt = *fmt;
- mutex_unlock(&common->lock);
-
return 0;
}
@@ -808,7 +810,6 @@ static int vpif_reqbufs(struct file *file, void *priv,
struct common_obj *common;
enum v4l2_field field;
u8 index = 0;
- int ret = 0;
/* This file handle has not initialized the channel,
It is not allowed to do settings */
@@ -826,18 +827,12 @@ static int vpif_reqbufs(struct file *file, void *priv,
index = VPIF_VIDEO_INDEX;
common = &ch->common[index];
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
- if (common->fmt.type != reqbuf->type) {
- ret = -EINVAL;
- goto reqbuf_exit;
- }
+ if (common->fmt.type != reqbuf->type)
+ return -EINVAL;
- if (0 != common->io_usrs) {
- ret = -EBUSY;
- goto reqbuf_exit;
- }
+ if (0 != common->io_usrs)
+ return -EBUSY;
if (reqbuf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
if (common->fmt.fmt.pix.field == V4L2_FIELD_ANY)
@@ -854,7 +849,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
&common->irqlock,
reqbuf->type, field,
sizeof(struct videobuf_buffer), fh,
- NULL);
+ &common->lock);
/* Set io allowed member of file handle to TRUE */
fh->io_allowed[index] = 1;
@@ -865,11 +860,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
INIT_LIST_HEAD(&common->dma_queue);
/* Allocate buffers */
- ret = videobuf_reqbufs(&common->buffer_queue, reqbuf);
-
-reqbuf_exit:
- mutex_unlock(&common->lock);
- return ret;
+ return videobuf_reqbufs(&common->buffer_queue, reqbuf);
}
static int vpif_querybuf(struct file *file, void *priv,
@@ -990,22 +981,19 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
}
/* Call encoder subdevice function to set the standard */
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
ch->video.stdid = *std_id;
+ ch->video.dv_preset = V4L2_DV_INVALID;
+ memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
+
/* Get the information about the standard */
- if (vpif_get_std_info(ch)) {
- vpif_err("Error getting the standard info\n");
+ if (vpif_update_resolution(ch))
return -EINVAL;
- }
if ((ch->vpifparams.std_info.width *
ch->vpifparams.std_info.height * 2) >
config_params.channel_bufsize[ch->channel_id]) {
vpif_err("invalid std for this size\n");
- ret = -EINVAL;
- goto s_std_exit;
+ return -EINVAL;
}
common->fmt.fmt.pix.bytesperline = common->fmt.fmt.pix.width;
@@ -1016,16 +1004,13 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
s_std_output, *std_id);
if (ret < 0) {
vpif_err("Failed to set output standard\n");
- goto s_std_exit;
+ return ret;
}
ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, core,
s_std, *std_id);
if (ret < 0)
vpif_err("Failed to set standard for sub devices\n");
-
-s_std_exit:
- mutex_unlock(&common->lock);
return ret;
}
@@ -1090,21 +1075,17 @@ static int vpif_streamon(struct file *file, void *priv,
if (ret < 0)
return ret;
- /* Call videobuf_streamon to start streaming in videobuf */
+ /* Call videobuf_streamon to start streaming in videobuf */
ret = videobuf_streamon(&common->buffer_queue);
if (ret < 0) {
vpif_err("videobuf_streamon\n");
return ret;
}
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
/* If buffer queue is empty, return error */
if (list_empty(&common->dma_queue)) {
vpif_err("buffer queue is empty\n");
- ret = -EIO;
- goto streamon_exit;
+ return -EIO;
}
/* Get the next frame from the buffer queue */
@@ -1130,8 +1111,7 @@ static int vpif_streamon(struct file *file, void *priv,
|| (!ch->vpifparams.std_info.frm_fmt
&& (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) {
vpif_err("conflict in field format and std format\n");
- ret = -EINVAL;
- goto streamon_exit;
+ return -EINVAL;
}
/* clock settings */
@@ -1140,13 +1120,13 @@ static int vpif_streamon(struct file *file, void *priv,
ch->vpifparams.std_info.hd_sd);
if (ret < 0) {
vpif_err("can't set clock\n");
- goto streamon_exit;
+ return ret;
}
/* set the parameters and addresses */
ret = vpif_set_video_params(vpif, ch->channel_id + 2);
if (ret < 0)
- goto streamon_exit;
+ return ret;
common->started = ret;
vpif_config_addr(ch, ret);
@@ -1171,9 +1151,6 @@ static int vpif_streamon(struct file *file, void *priv,
}
channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
}
-
-streamon_exit:
- mutex_unlock(&common->lock);
return ret;
}
@@ -1199,9 +1176,6 @@ static int vpif_streamoff(struct file *file, void *priv,
return -EINVAL;
}
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
if (buftype == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
/* disable channel */
if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
@@ -1216,8 +1190,6 @@ static int vpif_streamoff(struct file *file, void *priv,
}
common->started = 0;
- mutex_unlock(&common->lock);
-
return videobuf_streamoff(&common->buffer_queue);
}
@@ -1264,13 +1236,9 @@ static int vpif_s_output(struct file *file, void *priv, unsigned int i)
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
int ret = 0;
- if (mutex_lock_interruptible(&common->lock))
- return -ERESTARTSYS;
-
if (common->started) {
vpif_err("Streaming in progress\n");
- ret = -EBUSY;
- goto s_output_exit;
+ return -EBUSY;
}
ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video,
@@ -1280,9 +1248,6 @@ static int vpif_s_output(struct file *file, void *priv, unsigned int i)
vpif_err("Failed to set output standard\n");
vid_ch->output_id = i;
-
-s_output_exit:
- mutex_unlock(&common->lock);
return ret;
}
@@ -1315,6 +1280,287 @@ static int vpif_s_priority(struct file *file, void *priv, enum v4l2_priority p)
return v4l2_prio_change(&ch->prio, &fh->prio, p);
}
+/**
+ * vpif_enum_dv_presets() - ENUM_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_enum_dv_presets(struct file *file, void *priv,
+ struct v4l2_dv_enum_preset *preset)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct video_obj *vid_ch = &ch->video;
+
+ return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
+ video, enum_dv_presets, preset);
+}
+
+/**
+ * vpif_s_dv_presets() - S_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_s_dv_preset(struct file *file, void *priv,
+ struct v4l2_dv_preset *preset)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct video_obj *vid_ch = &ch->video;
+ int ret = 0;
+
+ if (common->started) {
+ vpif_dbg(1, debug, "streaming in progress\n");
+ return -EBUSY;
+ }
+
+ ret = v4l2_prio_check(&ch->prio, fh->prio);
+ if (ret != 0)
+ return ret;
+
+ fh->initialized = 1;
+
+ /* Call encoder subdevice function to set the standard */
+ if (mutex_lock_interruptible(&common->lock))
+ return -ERESTARTSYS;
+
+ ch->video.dv_preset = preset->preset;
+ ch->video.stdid = V4L2_STD_UNKNOWN;
+ memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
+
+ /* Get the information about the standard */
+ if (vpif_update_resolution(ch)) {
+ ret = -EINVAL;
+ } else {
+ /* Configure the default format information */
+ vpif_config_format(ch);
+
+ ret = v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
+ video, s_dv_preset, preset);
+ }
+
+ mutex_unlock(&common->lock);
+
+ return ret;
+}
+/**
+ * vpif_g_dv_presets() - G_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_g_dv_preset(struct file *file, void *priv,
+ struct v4l2_dv_preset *preset)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+
+ preset->preset = ch->video.dv_preset;
+
+ return 0;
+}
+/**
+ * vpif_s_dv_timings() - S_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_s_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct vpif_params *vpifparams = &ch->vpifparams;
+ struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+ struct video_obj *vid_ch = &ch->video;
+ struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+ int ret;
+
+ if (timings->type != V4L2_DV_BT_656_1120) {
+ vpif_dbg(2, debug, "Timing type not defined\n");
+ return -EINVAL;
+ }
+
+ /* Configure subdevice timings, if any */
+ ret = v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
+ video, s_dv_timings, timings);
+ if (ret == -ENOIOCTLCMD) {
+ vpif_dbg(2, debug, "Custom DV timings not supported by "
+ "subdevice\n");
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ vpif_dbg(2, debug, "Error setting custom DV timings\n");
+ return ret;
+ }
+
+ if (!(timings->bt.width && timings->bt.height &&
+ (timings->bt.hbackporch ||
+ timings->bt.hfrontporch ||
+ timings->bt.hsync) &&
+ timings->bt.vfrontporch &&
+ (timings->bt.vbackporch ||
+ timings->bt.vsync))) {
+ vpif_dbg(2, debug, "Timings for width, height, "
+ "horizontal back porch, horizontal sync, "
+ "horizontal front porch, vertical back porch, "
+ "vertical sync and vertical back porch "
+ "must be defined\n");
+ return -EINVAL;
+ }
+
+ *bt = timings->bt;
+
+ /* Configure video port timings */
+
+ std_info->eav2sav = bt->hbackporch + bt->hfrontporch +
+ bt->hsync - 8;
+ std_info->sav2eav = bt->width;
+
+ std_info->l1 = 1;
+ std_info->l3 = bt->vsync + bt->vbackporch + 1;
+
+ if (bt->interlaced) {
+ if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
+ std_info->vsize = bt->height * 2 +
+ bt->vfrontporch + bt->vsync + bt->vbackporch +
+ bt->il_vfrontporch + bt->il_vsync +
+ bt->il_vbackporch;
+ std_info->l5 = std_info->vsize/2 -
+ (bt->vfrontporch - 1);
+ std_info->l7 = std_info->vsize/2 + 1;
+ std_info->l9 = std_info->l7 + bt->il_vsync +
+ bt->il_vbackporch + 1;
+ std_info->l11 = std_info->vsize -
+ (bt->il_vfrontporch - 1);
+ } else {
+ vpif_dbg(2, debug, "Required timing values for "
+ "interlaced BT format missing\n");
+ return -EINVAL;
+ }
+ } else {
+ std_info->vsize = bt->height + bt->vfrontporch +
+ bt->vsync + bt->vbackporch;
+ std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
+ }
+ strncpy(std_info->name, "Custom timings BT656/1120",
+ VPIF_MAX_NAME);
+ std_info->width = bt->width;
+ std_info->height = bt->height;
+ std_info->frm_fmt = bt->interlaced ? 0 : 1;
+ std_info->ycmux_mode = 0;
+ std_info->capture_format = 0;
+ std_info->vbi_supported = 0;
+ std_info->hd_sd = 1;
+ std_info->stdid = 0;
+ std_info->dv_preset = V4L2_DV_INVALID;
+
+ vid_ch->stdid = 0;
+ vid_ch->dv_preset = V4L2_DV_INVALID;
+
+ return 0;
+}
+
+/**
+ * vpif_g_dv_timings() - G_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_g_dv_timings(struct file *file, void *priv,
+ struct v4l2_dv_timings *timings)
+{
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct video_obj *vid_ch = &ch->video;
+ struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+
+ timings->bt = *bt;
+
+ return 0;
+}
+
+/*
+ * vpif_g_chip_ident() - Identify the chip
+ * @file: file ptr
+ * @priv: file handle
+ * @chip: chip identity
+ *
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_g_chip_ident(struct file *file, void *priv,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ chip->ident = V4L2_IDENT_NONE;
+ chip->revision = 0;
+ if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
+ chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) {
+ vpif_dbg(2, debug, "match_type is invalid.\n");
+ return -EINVAL;
+ }
+
+ return v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 0, core,
+ g_chip_ident, chip);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+/*
+ * vpif_dbg_g_register() - Read register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be read
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_dbg_g_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg){
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct video_obj *vid_ch = &ch->video;
+
+ return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id], core,
+ g_register, reg);
+}
+
+/*
+ * vpif_dbg_s_register() - Write to register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be modified
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if write operations fails.
+ */
+static int vpif_dbg_s_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg){
+ struct vpif_fh *fh = priv;
+ struct channel_obj *ch = fh->channel;
+ struct video_obj *vid_ch = &ch->video;
+
+ return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id], core,
+ s_register, reg);
+}
+#endif
+
+/*
+ * vpif_log_status() - Status information
+ * @file: file ptr
+ * @priv: file handle
+ *
+ * Returns zero.
+ */
+static int vpif_log_status(struct file *filep, void *priv)
+{
+ /* status for sub devices */
+ v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
+
+ return 0;
+}
+
/* vpif display ioctl operations */
static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
.vidioc_querycap = vpif_querycap,
@@ -1336,13 +1582,24 @@ static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
.vidioc_s_output = vpif_s_output,
.vidioc_g_output = vpif_g_output,
.vidioc_cropcap = vpif_cropcap,
+ .vidioc_enum_dv_presets = vpif_enum_dv_presets,
+ .vidioc_s_dv_preset = vpif_s_dv_preset,
+ .vidioc_g_dv_preset = vpif_g_dv_preset,
+ .vidioc_s_dv_timings = vpif_s_dv_timings,
+ .vidioc_g_dv_timings = vpif_g_dv_timings,
+ .vidioc_g_chip_ident = vpif_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = vpif_dbg_g_register,
+ .vidioc_s_register = vpif_dbg_s_register,
+#endif
+ .vidioc_log_status = vpif_log_status,
};
static const struct v4l2_file_operations vpif_fops = {
.owner = THIS_MODULE,
.open = vpif_open,
.release = vpif_release,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
.mmap = vpif_mmap,
.poll = vpif_poll
};
@@ -1526,6 +1783,7 @@ static __init int vpif_probe(struct platform_device *pdev)
v4l2_prio_init(&ch->prio);
ch->common[VPIF_VIDEO_INDEX].fmt.type =
V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ch->video_dev->lock = &common->lock;
/* register video device */
vpif_dbg(1, debug, "channel=%x,channel->video_dev=%x\n",
@@ -1565,6 +1823,8 @@ static __init int vpif_probe(struct platform_device *pdev)
vpif_obj.sd[i]->grp_id = 1 << i;
}
+ v4l2_info(&vpif_obj.v4l2_dev,
+ "DM646x VPIF display driver initialized\n");
return 0;
probe_subdev_out:
diff --git a/drivers/media/video/davinci/vpif_display.h b/drivers/media/video/davinci/vpif_display.h
index a2a7cd166bbf..b53aaa883075 100644
--- a/drivers/media/video/davinci/vpif_display.h
+++ b/drivers/media/video/davinci/vpif_display.h
@@ -67,6 +67,8 @@ struct video_obj {
* most recent displayed frame only */
v4l2_std_id stdid; /* Currently selected or default
* standard */
+ u32 dv_preset;
+ struct v4l2_bt_timings bt_timings;
u32 output_id; /* Current output id */
};
diff --git a/drivers/media/video/davinci/vpss.c b/drivers/media/video/davinci/vpss.c
index 7918680917d0..3e5cf27ec2b2 100644
--- a/drivers/media/video/davinci/vpss.c
+++ b/drivers/media/video/davinci/vpss.c
@@ -85,7 +85,7 @@ enum vpss_platform_type {
/*
* vpss operations. Depends on platform. Not all functions are available
* on all platforms. The api, first check if a functio is available before
- * invoking it. In the probe, the function ptrs are intialized based on
+ * invoking it. In the probe, the function ptrs are initialized based on
* vpss name. vpss name can be "dm355_vpss", "dm644x_vpss" etc.
*/
struct vpss_hw_ops {
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 099d5df8c572..87f77a34eeab 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -33,6 +33,7 @@
#include <media/saa7115.h>
#include <media/tvp5150.h>
#include <media/tvaudio.h>
+#include <media/mt9v011.h>
#include <media/i2c-addr.h>
#include <media/tveeprom.h>
#include <media/v4l2-common.h>
@@ -1917,11 +1918,6 @@ static unsigned short tvp5150_addrs[] = {
I2C_CLIENT_END
};
-static unsigned short mt9v011_addrs[] = {
- 0xba >> 1,
- I2C_CLIENT_END
-};
-
static unsigned short msp3400_addrs[] = {
0x80 >> 1,
0x88 >> 1,
@@ -2437,6 +2433,7 @@ void em28xx_register_i2c_ir(struct em28xx *dev)
dev->init_data.ir_codes = RC_MAP_RC5_HAUPPAUGE_NEW;
dev->init_data.get_key = em28xx_get_key_em_haup;
dev->init_data.name = "i2c IR (EM2840 Hauppauge)";
+ break;
case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
dev->init_data.ir_codes = RC_MAP_WINFAST_USBII_DELUXE;
dev->init_data.get_key = em28xx_get_key_winfast_usbii_deluxe;
@@ -2623,11 +2620,17 @@ void em28xx_card_setup(struct em28xx *dev)
"tvp5150", 0, tvp5150_addrs);
if (dev->em28xx_sensor == EM28XX_MT9V011) {
+ struct mt9v011_platform_data pdata;
+ struct i2c_board_info mt9v011_info = {
+ .type = "mt9v011",
+ .addr = 0xba >> 1,
+ .platform_data = &pdata,
+ };
struct v4l2_subdev *sd;
- sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "mt9v011", 0, mt9v011_addrs);
- v4l2_subdev_call(sd, core, s_config, 0, &dev->sensor_xtal);
+ pdata.xtal = dev->sensor_xtal;
+ sd = v4l2_i2c_new_subdev_board(&dev->v4l2_dev, &dev->i2c_adap,
+ &mt9v011_info, NULL);
}
diff --git a/drivers/media/video/et61x251/et61x251.h b/drivers/media/video/et61x251/et61x251.h
index cc77d144df3c..bf66189cb26d 100644
--- a/drivers/media/video/et61x251/et61x251.h
+++ b/drivers/media/video/et61x251/et61x251.h
@@ -59,31 +59,7 @@
/*****************************************************************************/
static const struct usb_device_id et61x251_id_table[] = {
- { USB_DEVICE(0x102c, 0x6151), },
{ USB_DEVICE(0x102c, 0x6251), },
- { USB_DEVICE(0x102c, 0x6253), },
- { USB_DEVICE(0x102c, 0x6254), },
- { USB_DEVICE(0x102c, 0x6255), },
- { USB_DEVICE(0x102c, 0x6256), },
- { USB_DEVICE(0x102c, 0x6257), },
- { USB_DEVICE(0x102c, 0x6258), },
- { USB_DEVICE(0x102c, 0x6259), },
- { USB_DEVICE(0x102c, 0x625a), },
- { USB_DEVICE(0x102c, 0x625b), },
- { USB_DEVICE(0x102c, 0x625c), },
- { USB_DEVICE(0x102c, 0x625d), },
- { USB_DEVICE(0x102c, 0x625e), },
- { USB_DEVICE(0x102c, 0x625f), },
- { USB_DEVICE(0x102c, 0x6260), },
- { USB_DEVICE(0x102c, 0x6261), },
- { USB_DEVICE(0x102c, 0x6262), },
- { USB_DEVICE(0x102c, 0x6263), },
- { USB_DEVICE(0x102c, 0x6264), },
- { USB_DEVICE(0x102c, 0x6265), },
- { USB_DEVICE(0x102c, 0x6266), },
- { USB_DEVICE(0x102c, 0x6267), },
- { USB_DEVICE(0x102c, 0x6268), },
- { USB_DEVICE(0x102c, 0x6269), },
{ }
};
diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c
index 629043933501..a09c4709d613 100644
--- a/drivers/media/video/gspca/benq.c
+++ b/drivers/media/video/gspca/benq.c
@@ -276,7 +276,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x04a5, 0x3035)},
{}
};
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index 1eacb6c7926d..8b398493f96b 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -1040,14 +1040,14 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0572, 0x0041)},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/cpia1.c b/drivers/media/video/gspca/cpia1.c
index c1ae05f4661f..4bf2cab98d64 100644
--- a/drivers/media/video/gspca/cpia1.c
+++ b/drivers/media/video/gspca/cpia1.c
@@ -2088,7 +2088,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0553, 0x0002)},
{USB_DEVICE(0x0813, 0x0001)},
{}
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index a594b36d6199..4b2c483fce6f 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -864,7 +864,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x102c, 0x6151), .driver_info = SENSOR_PAS106},
#if !defined CONFIG_USB_ET61X251 && !defined CONFIG_USB_ET61X251_MODULE
{USB_DEVICE(0x102c, 0x6251), .driver_info = SENSOR_TAS5130CXX},
@@ -875,7 +875,7 @@ static const struct usb_device_id device_table[] __devinitconst = {
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index d78226455d1f..987b4b69d7ab 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -229,7 +229,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
}
/* Table of supported USB devices */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x04cb, 0x0104)},
{USB_DEVICE(0x04cb, 0x0109)},
{USB_DEVICE(0x04cb, 0x010b)},
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index b05bec7321b5..99083038cec3 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -488,7 +488,7 @@ static void sd_callback(struct gspca_dev *gspca_dev)
/*=================== USB driver structure initialisation ==================*/
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x05e3, 0x0503)},
{USB_DEVICE(0x05e3, 0xf191)},
{}
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 442970073e8a..f21f2a258ae0 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -55,7 +55,7 @@ MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA USB Camera Driver");
MODULE_LICENSE("GPL");
-#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 11, 0)
+#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 12, 0)
#ifdef GSPCA_DEBUG
int gspca_debug = D_ERR | D_PROBE;
@@ -508,8 +508,8 @@ static int gspca_is_compressed(__u32 format)
return 0;
}
-static int frame_alloc(struct gspca_dev *gspca_dev,
- unsigned int count)
+static int frame_alloc(struct gspca_dev *gspca_dev, struct file *file,
+ enum v4l2_memory memory, unsigned int count)
{
struct gspca_frame *frame;
unsigned int frsz;
@@ -519,7 +519,6 @@ static int frame_alloc(struct gspca_dev *gspca_dev,
frsz = gspca_dev->cam.cam_mode[i].sizeimage;
PDEBUG(D_STREAM, "frame alloc frsz: %d", frsz);
frsz = PAGE_ALIGN(frsz);
- gspca_dev->frsz = frsz;
if (count >= GSPCA_MAX_FRAMES)
count = GSPCA_MAX_FRAMES - 1;
gspca_dev->frbuf = vmalloc_32(frsz * count);
@@ -527,6 +526,9 @@ static int frame_alloc(struct gspca_dev *gspca_dev,
err("frame alloc failed");
return -ENOMEM;
}
+ gspca_dev->capt_file = file;
+ gspca_dev->memory = memory;
+ gspca_dev->frsz = frsz;
gspca_dev->nframes = count;
for (i = 0; i < count; i++) {
frame = &gspca_dev->frame[i];
@@ -535,7 +537,7 @@ static int frame_alloc(struct gspca_dev *gspca_dev,
frame->v4l2_buf.flags = 0;
frame->v4l2_buf.field = V4L2_FIELD_NONE;
frame->v4l2_buf.length = frsz;
- frame->v4l2_buf.memory = gspca_dev->memory;
+ frame->v4l2_buf.memory = memory;
frame->v4l2_buf.sequence = 0;
frame->data = gspca_dev->frbuf + i * frsz;
frame->v4l2_buf.m.offset = i * frsz;
@@ -558,6 +560,9 @@ static void frame_free(struct gspca_dev *gspca_dev)
gspca_dev->frame[i].data = NULL;
}
gspca_dev->nframes = 0;
+ gspca_dev->frsz = 0;
+ gspca_dev->capt_file = NULL;
+ gspca_dev->memory = GSPCA_MEMORY_NO;
}
static void destroy_urbs(struct gspca_dev *gspca_dev)
@@ -1210,29 +1215,15 @@ static void gspca_release(struct video_device *vfd)
static int dev_open(struct file *file)
{
struct gspca_dev *gspca_dev;
- int ret;
PDEBUG(D_STREAM, "[%s] open", current->comm);
gspca_dev = (struct gspca_dev *) video_devdata(file);
- if (mutex_lock_interruptible(&gspca_dev->queue_lock))
- return -ERESTARTSYS;
- if (!gspca_dev->present) {
- ret = -ENODEV;
- goto out;
- }
-
- if (gspca_dev->users > 4) { /* (arbitrary value) */
- ret = -EBUSY;
- goto out;
- }
+ if (!gspca_dev->present)
+ return -ENODEV;
/* protect the subdriver against rmmod */
- if (!try_module_get(gspca_dev->module)) {
- ret = -ENODEV;
- goto out;
- }
-
- gspca_dev->users++;
+ if (!try_module_get(gspca_dev->module))
+ return -ENODEV;
file->private_data = gspca_dev;
#ifdef GSPCA_DEBUG
@@ -1244,14 +1235,7 @@ static int dev_open(struct file *file)
gspca_dev->vdev.debug &= ~(V4L2_DEBUG_IOCTL
| V4L2_DEBUG_IOCTL_ARG);
#endif
- ret = 0;
-out:
- mutex_unlock(&gspca_dev->queue_lock);
- if (ret != 0)
- PDEBUG(D_ERR|D_STREAM, "open failed err %d", ret);
- else
- PDEBUG(D_STREAM, "open done");
- return ret;
+ return 0;
}
static int dev_close(struct file *file)
@@ -1261,7 +1245,6 @@ static int dev_close(struct file *file)
PDEBUG(D_STREAM, "[%s] close", current->comm);
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
- gspca_dev->users--;
/* if the file did the capture, free the streaming resources */
if (gspca_dev->capt_file == file) {
@@ -1272,8 +1255,6 @@ static int dev_close(struct file *file)
mutex_unlock(&gspca_dev->usb_lock);
}
frame_free(gspca_dev);
- gspca_dev->capt_file = NULL;
- gspca_dev->memory = GSPCA_MEMORY_NO;
}
file->private_data = NULL;
module_put(gspca_dev->module);
@@ -1516,6 +1497,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
return -ERESTARTSYS;
if (gspca_dev->memory != GSPCA_MEMORY_NO
+ && gspca_dev->memory != GSPCA_MEMORY_READ
&& gspca_dev->memory != rb->memory) {
ret = -EBUSY;
goto out;
@@ -1544,19 +1526,18 @@ static int vidioc_reqbufs(struct file *file, void *priv,
gspca_stream_off(gspca_dev);
mutex_unlock(&gspca_dev->usb_lock);
}
+ /* Don't restart the stream when switching from read to mmap mode */
+ if (gspca_dev->memory == GSPCA_MEMORY_READ)
+ streaming = 0;
/* free the previous allocated buffers, if any */
- if (gspca_dev->nframes != 0) {
+ if (gspca_dev->nframes != 0)
frame_free(gspca_dev);
- gspca_dev->capt_file = NULL;
- }
if (rb->count == 0) /* unrequest */
goto out;
- gspca_dev->memory = rb->memory;
- ret = frame_alloc(gspca_dev, rb->count);
+ ret = frame_alloc(gspca_dev, file, rb->memory, rb->count);
if (ret == 0) {
rb->count = gspca_dev->nframes;
- gspca_dev->capt_file = file;
if (streaming)
ret = gspca_init_transfer(gspca_dev);
}
@@ -1630,11 +1611,15 @@ static int vidioc_streamoff(struct file *file, void *priv,
if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- if (!gspca_dev->streaming)
- return 0;
+
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
+ if (!gspca_dev->streaming) {
+ ret = 0;
+ goto out;
+ }
+
/* check the capture file */
if (gspca_dev->capt_file != file) {
ret = -EBUSY;
@@ -1649,6 +1634,8 @@ static int vidioc_streamoff(struct file *file, void *priv,
gspca_dev->usb_err = 0;
gspca_stream_off(gspca_dev);
mutex_unlock(&gspca_dev->usb_lock);
+ /* In case another thread is waiting in dqbuf */
+ wake_up_interruptible(&gspca_dev->wq);
/* empty the transfer queues */
atomic_set(&gspca_dev->fr_q, 0);
@@ -1827,33 +1814,77 @@ out:
return ret;
}
+static int frame_ready_nolock(struct gspca_dev *gspca_dev, struct file *file,
+ enum v4l2_memory memory)
+{
+ if (!gspca_dev->present)
+ return -ENODEV;
+ if (gspca_dev->capt_file != file || gspca_dev->memory != memory ||
+ !gspca_dev->streaming)
+ return -EINVAL;
+
+ /* check if a frame is ready */
+ return gspca_dev->fr_o != atomic_read(&gspca_dev->fr_i);
+}
+
+static int frame_ready(struct gspca_dev *gspca_dev, struct file *file,
+ enum v4l2_memory memory)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&gspca_dev->queue_lock))
+ return -ERESTARTSYS;
+ ret = frame_ready_nolock(gspca_dev, file, memory);
+ mutex_unlock(&gspca_dev->queue_lock);
+ return ret;
+}
+
/*
- * wait for a video frame
+ * dequeue a video buffer
*
- * If a frame is ready, its index is returned.
+ * If nonblock_ing is false, block until a buffer is available.
*/
-static int frame_wait(struct gspca_dev *gspca_dev,
- int nonblock_ing)
+static int vidioc_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *v4l2_buf)
{
- int i, ret;
+ struct gspca_dev *gspca_dev = priv;
+ struct gspca_frame *frame;
+ int i, j, ret;
- /* check if a frame is ready */
- i = gspca_dev->fr_o;
- if (i == atomic_read(&gspca_dev->fr_i)) {
- if (nonblock_ing)
+ PDEBUG(D_FRAM, "dqbuf");
+
+ if (mutex_lock_interruptible(&gspca_dev->queue_lock))
+ return -ERESTARTSYS;
+
+ for (;;) {
+ ret = frame_ready_nolock(gspca_dev, file, v4l2_buf->memory);
+ if (ret < 0)
+ goto out;
+ if (ret > 0)
+ break;
+
+ mutex_unlock(&gspca_dev->queue_lock);
+
+ if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
/* wait till a frame is ready */
ret = wait_event_interruptible_timeout(gspca_dev->wq,
- i != atomic_read(&gspca_dev->fr_i) ||
- !gspca_dev->streaming || !gspca_dev->present,
+ frame_ready(gspca_dev, file, v4l2_buf->memory),
msecs_to_jiffies(3000));
if (ret < 0)
return ret;
- if (ret == 0 || !gspca_dev->streaming || !gspca_dev->present)
+ if (ret == 0)
return -EIO;
+
+ if (mutex_lock_interruptible(&gspca_dev->queue_lock))
+ return -ERESTARTSYS;
}
+ i = gspca_dev->fr_o;
+ j = gspca_dev->fr_queue[i];
+ frame = &gspca_dev->frame[j];
+
gspca_dev->fr_o = (i + 1) % GSPCA_MAX_FRAMES;
if (gspca_dev->sd_desc->dq_callback) {
@@ -1863,46 +1894,12 @@ static int frame_wait(struct gspca_dev *gspca_dev,
gspca_dev->sd_desc->dq_callback(gspca_dev);
mutex_unlock(&gspca_dev->usb_lock);
}
- return gspca_dev->fr_queue[i];
-}
-
-/*
- * dequeue a video buffer
- *
- * If nonblock_ing is false, block until a buffer is available.
- */
-static int vidioc_dqbuf(struct file *file, void *priv,
- struct v4l2_buffer *v4l2_buf)
-{
- struct gspca_dev *gspca_dev = priv;
- struct gspca_frame *frame;
- int i, ret;
-
- PDEBUG(D_FRAM, "dqbuf");
- if (v4l2_buf->memory != gspca_dev->memory)
- return -EINVAL;
-
- if (!gspca_dev->present)
- return -ENODEV;
-
- /* if not streaming, be sure the application will not loop forever */
- if (!(file->f_flags & O_NONBLOCK)
- && !gspca_dev->streaming && gspca_dev->users == 1)
- return -EINVAL;
- /* only the capturing file may dequeue */
- if (gspca_dev->capt_file != file)
- return -EINVAL;
-
- /* only one dequeue / read at a time */
- if (mutex_lock_interruptible(&gspca_dev->read_lock))
- return -ERESTARTSYS;
+ frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_DONE;
+ memcpy(v4l2_buf, &frame->v4l2_buf, sizeof *v4l2_buf);
+ PDEBUG(D_FRAM, "dqbuf %d", j);
+ ret = 0;
- ret = frame_wait(gspca_dev, file->f_flags & O_NONBLOCK);
- if (ret < 0)
- goto out;
- i = ret; /* frame index */
- frame = &gspca_dev->frame[i];
if (gspca_dev->memory == V4L2_MEMORY_USERPTR) {
if (copy_to_user((__u8 __user *) frame->v4l2_buf.m.userptr,
frame->data,
@@ -1910,15 +1907,10 @@ static int vidioc_dqbuf(struct file *file, void *priv,
PDEBUG(D_ERR|D_STREAM,
"dqbuf cp to user failed");
ret = -EFAULT;
- goto out;
}
}
- frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_DONE;
- memcpy(v4l2_buf, &frame->v4l2_buf, sizeof *v4l2_buf);
- PDEBUG(D_FRAM, "dqbuf %d", i);
- ret = 0;
out:
- mutex_unlock(&gspca_dev->read_lock);
+ mutex_unlock(&gspca_dev->queue_lock);
return ret;
}
@@ -2033,9 +2025,7 @@ static unsigned int dev_poll(struct file *file, poll_table *wait)
poll_wait(file, &gspca_dev->wq, wait);
/* if reqbufs is not done, the user would use read() */
- if (gspca_dev->nframes == 0) {
- if (gspca_dev->memory != GSPCA_MEMORY_NO)
- return POLLERR; /* not the 1st time */
+ if (gspca_dev->memory == GSPCA_MEMORY_NO) {
ret = read_alloc(gspca_dev, file);
if (ret != 0)
return POLLERR;
@@ -2067,18 +2057,10 @@ static ssize_t dev_read(struct file *file, char __user *data,
PDEBUG(D_FRAM, "read (%zd)", count);
if (!gspca_dev->present)
return -ENODEV;
- switch (gspca_dev->memory) {
- case GSPCA_MEMORY_NO: /* first time */
+ if (gspca_dev->memory == GSPCA_MEMORY_NO) { /* first time ? */
ret = read_alloc(gspca_dev, file);
if (ret != 0)
return ret;
- break;
- case GSPCA_MEMORY_READ:
- if (gspca_dev->capt_file == file)
- break;
- /* fall thru */
- default:
- return -EINVAL;
}
/* get a frame */
@@ -2266,7 +2248,6 @@ int gspca_dev_probe2(struct usb_interface *intf,
goto out;
mutex_init(&gspca_dev->usb_lock);
- mutex_init(&gspca_dev->read_lock);
mutex_init(&gspca_dev->queue_lock);
init_waitqueue_head(&gspca_dev->wq);
@@ -2341,12 +2322,11 @@ void gspca_disconnect(struct usb_interface *intf)
PDEBUG(D_PROBE, "%s disconnect",
video_device_node_name(&gspca_dev->vdev));
mutex_lock(&gspca_dev->usb_lock);
+
gspca_dev->present = 0;
+ wake_up_interruptible(&gspca_dev->wq);
- if (gspca_dev->streaming) {
- destroy_urbs(gspca_dev);
- wake_up_interruptible(&gspca_dev->wq);
- }
+ destroy_urbs(gspca_dev);
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
gspca_input_destroy_urb(gspca_dev);
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 97b77a26a2eb..41755226d389 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -205,14 +205,12 @@ struct gspca_dev {
wait_queue_head_t wq; /* wait queue */
struct mutex usb_lock; /* usb exchange protection */
- struct mutex read_lock; /* read protection */
struct mutex queue_lock; /* ISOC queue protection */
int usb_err; /* USB error - protected by usb_lock */
u16 pkt_size; /* ISOC packet size */
#ifdef CONFIG_PM
char frozen; /* suspend - resume */
#endif
- char users; /* number of opens */
char present; /* device connected */
char nbufread; /* number of buffers for read() */
char memory; /* memory type (V4L2_MEMORY_xxx) */
diff --git a/drivers/media/video/gspca/jeilinj.c b/drivers/media/video/gspca/jeilinj.c
index a35e87bb0388..06b777f5379e 100644
--- a/drivers/media/video/gspca/jeilinj.c
+++ b/drivers/media/video/gspca/jeilinj.c
@@ -314,7 +314,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
/* Table of supported USB devices */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0979, 0x0280)},
{}
};
diff --git a/drivers/media/video/gspca/jpeg.h b/drivers/media/video/gspca/jpeg.h
index de63c36806c0..ab54910418b4 100644
--- a/drivers/media/video/gspca/jpeg.h
+++ b/drivers/media/video/gspca/jpeg.h
@@ -141,9 +141,9 @@ static void jpeg_define(u8 *jpeg_hdr,
memcpy(jpeg_hdr, jpeg_head, sizeof jpeg_head);
#ifndef CONEX_CAM
jpeg_hdr[JPEG_HEIGHT_OFFSET + 0] = height >> 8;
- jpeg_hdr[JPEG_HEIGHT_OFFSET + 1] = height & 0xff;
+ jpeg_hdr[JPEG_HEIGHT_OFFSET + 1] = height;
jpeg_hdr[JPEG_HEIGHT_OFFSET + 2] = width >> 8;
- jpeg_hdr[JPEG_HEIGHT_OFFSET + 3] = width & 0xff;
+ jpeg_hdr[JPEG_HEIGHT_OFFSET + 3] = width;
jpeg_hdr[JPEG_HEIGHT_OFFSET + 6] = samplesY;
#endif
}
diff --git a/drivers/media/video/gspca/konica.c b/drivers/media/video/gspca/konica.c
index d2ce65dcbfdc..5964691c0e95 100644
--- a/drivers/media/video/gspca/konica.c
+++ b/drivers/media/video/gspca/konica.c
@@ -607,7 +607,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x04c8, 0x0720)}, /* Intel YC 76 */
{}
};
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index c872b93a3351..a7722b1aef9b 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -28,7 +28,7 @@ int force_sensor;
static int dump_bridge;
int dump_sensor;
-static const __devinitdata struct usb_device_id m5602_table[] = {
+static const struct usb_device_id m5602_table[] = {
{USB_DEVICE(0x0402, 0x5602)},
{}
};
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index a81536e78698..cb4d0bf0d784 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -490,7 +490,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x093a, 0x050f)},
{}
};
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index 7607a288b51c..3884c9d300c5 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -1229,7 +1229,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x08ca, 0x0110)}, /* Trust Spyc@m 100 */
{USB_DEVICE(0x08ca, 0x0111)}, /* Aiptek Pencam VGA+ */
{USB_DEVICE(0x093a, 0x010f)}, /* All other known MR97310A VGA cams */
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index e1c3b9328ace..8ab2c452c25e 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -488,7 +488,6 @@ static const struct v4l2_pix_format ovfx2_ov3610_mode[] = {
#define R511_SNAP_PXDIV 0x1c
#define R511_SNAP_LNDIV 0x1d
#define R511_SNAP_UV_EN 0x1e
-#define R511_SNAP_UV_EN 0x1e
#define R511_SNAP_OPTS 0x1f
#define R511_DRAM_FLOW_CTL 0x20
@@ -1847,8 +1846,7 @@ static const struct ov_i2c_regvals norm_7670[] = {
{ 0x6c, 0x0a },
{ 0x6d, 0x55 },
{ 0x6e, 0x11 },
- { 0x6f, 0x9f },
- /* "9e for advance AWB" */
+ { 0x6f, 0x9f }, /* "9e for advance AWB" */
{ 0x6a, 0x40 },
{ OV7670_R01_BLUE, 0x40 },
{ OV7670_R02_RED, 0x60 },
@@ -3054,7 +3052,7 @@ static void ov519_configure(struct sd *sd)
{
static const struct ov_regvals init_519[] = {
{ 0x5a, 0x6d }, /* EnableSystem */
- { 0x53, 0x9b },
+ { 0x53, 0x9b }, /* don't enable the microcontroller */
{ OV519_R54_EN_CLK1, 0xff }, /* set bit2 to enable jpeg */
{ 0x5d, 0x03 },
{ 0x49, 0x01 },
@@ -4747,7 +4745,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF },
{USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 0edf93973b1c..04da22802736 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -479,15 +479,20 @@ static void ov534_reg_write(struct gspca_dev *gspca_dev, u16 reg, u8 val)
struct usb_device *udev = gspca_dev->dev;
int ret;
- PDEBUG(D_USBO, "reg=0x%04x, val=0%02x", reg, val);
+ if (gspca_dev->usb_err < 0)
+ return;
+
+ PDEBUG(D_USBO, "SET 01 0000 %04x %02x", reg, val);
gspca_dev->usb_buf[0] = val;
ret = usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
0x01,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
- if (ret < 0)
+ if (ret < 0) {
err("write failed %d", ret);
+ gspca_dev->usb_err = ret;
+ }
}
static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -495,14 +500,18 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
struct usb_device *udev = gspca_dev->dev;
int ret;
+ if (gspca_dev->usb_err < 0)
+ return 0;
ret = usb_control_msg(udev,
usb_rcvctrlpipe(udev, 0),
0x01,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
- PDEBUG(D_USBI, "reg=0x%04x, data=0x%02x", reg, gspca_dev->usb_buf[0]);
- if (ret < 0)
+ PDEBUG(D_USBI, "GET 01 0000 %04x %02x", reg, gspca_dev->usb_buf[0]);
+ if (ret < 0) {
err("read failed %d", ret);
+ gspca_dev->usb_err = ret;
+ }
return gspca_dev->usb_buf[0];
}
@@ -558,13 +567,15 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
static void sccb_reg_write(struct gspca_dev *gspca_dev, u8 reg, u8 val)
{
- PDEBUG(D_USBO, "reg: 0x%02x, val: 0x%02x", reg, val);
+ PDEBUG(D_USBO, "sccb write: %02x %02x", reg, val);
ov534_reg_write(gspca_dev, OV534_REG_SUBADDR, reg);
ov534_reg_write(gspca_dev, OV534_REG_WRITE, val);
ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_3);
- if (!sccb_check_status(gspca_dev))
+ if (!sccb_check_status(gspca_dev)) {
err("sccb_reg_write failed");
+ gspca_dev->usb_err = -EIO;
+ }
}
static u8 sccb_reg_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -885,7 +896,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
ov534_set_led(gspca_dev, 0);
set_frame_rate(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_start(struct gspca_dev *gspca_dev)
@@ -920,7 +931,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
ov534_set_led(gspca_dev, 1);
ov534_reg_write(gspca_dev, 0xe0, 0x00);
- return 0;
+ return gspca_dev->usb_err;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
@@ -1289,7 +1300,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x1415, 0x2000)},
{}
};
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index c5244b4b4777..aaf5428c57f5 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -1429,7 +1429,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x06f8, 0x3003)},
{}
};
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index 96f9986305b4..81739a2f205e 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -530,7 +530,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x4028)},
{USB_DEVICE(0x093a, 0x2460)},
{USB_DEVICE(0x093a, 0x2461)},
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index 2700975abce5..5615d7bd8304 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -1184,7 +1184,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x06f8, 0x3009)},
{USB_DEVICE(0x093a, 0x2620)},
{USB_DEVICE(0x093a, 0x2621)},
@@ -1201,7 +1201,7 @@ static const struct usb_device_id device_table[] __devinitconst = {
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 6820f5d58b19..f8801b50e64f 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -837,7 +837,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x093a, 0x2600)},
{USB_DEVICE(0x093a, 0x2601)},
{USB_DEVICE(0x093a, 0x2603)},
@@ -849,7 +849,7 @@ static const struct usb_device_id device_table[] __devinitconst = {
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/sn9c2028.c b/drivers/media/video/gspca/sn9c2028.c
index 40a06680502d..4271f86dfe01 100644
--- a/drivers/media/video/gspca/sn9c2028.c
+++ b/drivers/media/video/gspca/sn9c2028.c
@@ -703,7 +703,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0458, 0x7005)}, /* Genius Smart 300, version 2 */
/* The Genius Smart is untested. I can't find an owner ! */
/* {USB_DEVICE(0x0c45, 0x8000)}, DC31VC, Don't know this camera */
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index cb08d00d0a31..fcf29897b713 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -2470,7 +2470,7 @@ static const struct sd_desc sd_desc = {
| (SENSOR_ ## sensor << 8) \
| (i2c_addr)
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)},
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 73504a3f87b7..c6cd68d66b53 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -23,8 +23,15 @@
/* Some documentation on known sonixb registers:
Reg Use
+sn9c101 / sn9c102:
0x10 high nibble red gain low nibble blue gain
0x11 low nibble green gain
+sn9c103:
+0x05 red gain 0-127
+0x06 blue gain 0-127
+0x07 green gain 0-127
+all:
+0x08-0x0f i2c / 3wire registers
0x12 hstart
0x13 vstart
0x15 hsize (hsize = register-value * 16)
@@ -88,12 +95,9 @@ struct sd {
typedef const __u8 sensor_init_t[8];
struct sensor_data {
- const __u8 *bridge_init[2];
- int bridge_init_size[2];
+ const __u8 *bridge_init;
sensor_init_t *sensor_init;
int sensor_init_size;
- sensor_init_t *sensor_bridge_init[2];
- int sensor_bridge_init_size[2];
int flags;
unsigned ctrl_dis;
__u8 sensor_addr;
@@ -114,7 +118,6 @@ struct sensor_data {
#define NO_FREQ (1 << FREQ_IDX)
#define NO_BRIGHTNESS (1 << BRIGHTNESS_IDX)
-#define COMP2 0x8f
#define COMP 0xc7 /* 0x87 //0x07 */
#define COMP1 0xc9 /* 0x89 //0x09 */
@@ -123,15 +126,11 @@ struct sensor_data {
#define SYS_CLK 0x04
-#define SENS(bridge_1, bridge_3, sensor, sensor_1, \
- sensor_3, _flags, _ctrl_dis, _sensor_addr) \
+#define SENS(bridge, sensor, _flags, _ctrl_dis, _sensor_addr) \
{ \
- .bridge_init = { bridge_1, bridge_3 }, \
- .bridge_init_size = { sizeof(bridge_1), sizeof(bridge_3) }, \
+ .bridge_init = bridge, \
.sensor_init = sensor, \
.sensor_init_size = sizeof(sensor), \
- .sensor_bridge_init = { sensor_1, sensor_3,}, \
- .sensor_bridge_init_size = { sizeof(sensor_1), sizeof(sensor_3)}, \
.flags = _flags, .ctrl_dis = _ctrl_dis, .sensor_addr = _sensor_addr \
}
@@ -311,7 +310,6 @@ static const __u8 initHv7131d[] = {
0x00, 0x00,
0x00, 0x00, 0x00, 0x02, 0x02, 0x00,
0x28, 0x1e, 0x60, 0x8e, 0x42,
- 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c
};
static const __u8 hv7131d_sensor_init[][8] = {
{0xa0, 0x11, 0x01, 0x04, 0x00, 0x00, 0x00, 0x17},
@@ -326,7 +324,6 @@ static const __u8 initHv7131r[] = {
0x00, 0x00,
0x00, 0x00, 0x00, 0x02, 0x01, 0x00,
0x28, 0x1e, 0x60, 0x8a, 0x20,
- 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c
};
static const __u8 hv7131r_sensor_init[][8] = {
{0xc0, 0x11, 0x31, 0x38, 0x2a, 0x2e, 0x00, 0x10},
@@ -339,7 +336,7 @@ static const __u8 initOv6650[] = {
0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x01, 0x0a, 0x16, 0x12, 0x68, 0x8b,
- 0x10, 0x1d, 0x10, 0x02, 0x02, 0x09, 0x07
+ 0x10,
};
static const __u8 ov6650_sensor_init[][8] = {
/* Bright, contrast, etc are set through SCBB interface.
@@ -378,24 +375,13 @@ static const __u8 initOv7630[] = {
0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* r09 .. r10 */
0x00, 0x01, 0x01, 0x0a, /* r11 .. r14 */
0x28, 0x1e, /* H & V sizes r15 .. r16 */
- 0x68, COMP2, MCK_INIT1, /* r17 .. r19 */
- 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c /* r1a .. r1f */
-};
-static const __u8 initOv7630_3[] = {
- 0x44, 0x44, 0x00, 0x1a, 0x20, 0x20, 0x20, 0x80, /* r01 .. r08 */
- 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* r09 .. r10 */
- 0x00, 0x02, 0x01, 0x0a, /* r11 .. r14 */
- 0x28, 0x1e, /* H & V sizes r15 .. r16 */
0x68, 0x8f, MCK_INIT1, /* r17 .. r19 */
- 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c, 0x00, /* r1a .. r20 */
- 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, 0x80, /* r21 .. r28 */
- 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0xff /* r29 .. r30 */
};
static const __u8 ov7630_sensor_init[][8] = {
{0xa0, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10},
{0xb0, 0x21, 0x01, 0x77, 0x3a, 0x00, 0x00, 0x10},
/* {0xd0, 0x21, 0x12, 0x7c, 0x01, 0x80, 0x34, 0x10}, jfm */
- {0xd0, 0x21, 0x12, 0x1c, 0x00, 0x80, 0x34, 0x10}, /* jfm */
+ {0xd0, 0x21, 0x12, 0x5c, 0x00, 0x80, 0x34, 0x10}, /* jfm */
{0xa0, 0x21, 0x1b, 0x04, 0x00, 0x80, 0x34, 0x10},
{0xa0, 0x21, 0x20, 0x44, 0x00, 0x80, 0x34, 0x10},
{0xa0, 0x21, 0x23, 0xee, 0x00, 0x80, 0x34, 0x10},
@@ -413,16 +399,11 @@ static const __u8 ov7630_sensor_init[][8] = {
{0xd0, 0x21, 0x17, 0x1c, 0xbd, 0x06, 0xf6, 0x10},
};
-static const __u8 ov7630_sensor_init_3[][8] = {
- {0xa0, 0x21, 0x13, 0x80, 0x00, 0x00, 0x00, 0x10},
-};
-
static const __u8 initPas106[] = {
0x04, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x40, 0x00, 0x00, 0x00,
0x00, 0x00,
0x00, 0x00, 0x00, 0x04, 0x01, 0x00,
0x16, 0x12, 0x24, COMP1, MCK_INIT1,
- 0x18, 0x10, 0x02, 0x02, 0x09, 0x07
};
/* compression 0x86 mckinit1 0x2b */
@@ -496,7 +477,6 @@ static const __u8 initPas202[] = {
0x00, 0x00,
0x00, 0x00, 0x00, 0x06, 0x03, 0x0a,
0x28, 0x1e, 0x20, 0x89, 0x20,
- 0x00, 0x00, 0x02, 0x03, 0x0f, 0x0c
};
/* "Known" PAS202BCB registers:
@@ -537,7 +517,6 @@ static const __u8 initTas5110c[] = {
0x00, 0x00,
0x00, 0x00, 0x00, 0x45, 0x09, 0x0a,
0x16, 0x12, 0x60, 0x86, 0x2b,
- 0x14, 0x0a, 0x02, 0x02, 0x09, 0x07
};
/* Same as above, except a different hstart */
static const __u8 initTas5110d[] = {
@@ -545,12 +524,19 @@ static const __u8 initTas5110d[] = {
0x00, 0x00,
0x00, 0x00, 0x00, 0x41, 0x09, 0x0a,
0x16, 0x12, 0x60, 0x86, 0x2b,
- 0x14, 0x0a, 0x02, 0x02, 0x09, 0x07
};
-static const __u8 tas5110_sensor_init[][8] = {
+/* tas5110c is 3 wire, tas5110d is 2 wire (regular i2c) */
+static const __u8 tas5110c_sensor_init[][8] = {
{0x30, 0x11, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x10},
{0x30, 0x11, 0x02, 0x20, 0xa9, 0x00, 0x00, 0x10},
- {0xa0, 0x61, 0x9a, 0xca, 0x00, 0x00, 0x00, 0x17},
+};
+/* Known TAS5110D registers
+ * reg02: gain, bit order reversed!! 0 == max gain, 255 == min gain
+ * reg03: bit3: vflip, bit4: ~hflip, bit7: ~gainboost (~ == inverted)
+ * Note: writing reg03 seems to only work when written together with 02
+ */
+static const __u8 tas5110d_sensor_init[][8] = {
+ {0xa0, 0x61, 0x9a, 0xca, 0x00, 0x00, 0x00, 0x17}, /* reset */
};
static const __u8 initTas5130[] = {
@@ -558,7 +544,6 @@ static const __u8 initTas5130[] = {
0x00, 0x00,
0x00, 0x00, 0x00, 0x68, 0x0c, 0x0a,
0x28, 0x1e, 0x60, COMP, MCK_INIT,
- 0x18, 0x10, 0x04, 0x03, 0x11, 0x0c
};
static const __u8 tas5130_sensor_init[][8] = {
/* {0x30, 0x11, 0x00, 0x40, 0x47, 0x00, 0x00, 0x10},
@@ -569,21 +554,18 @@ static const __u8 tas5130_sensor_init[][8] = {
};
static struct sensor_data sensor_data[] = {
-SENS(initHv7131d, NULL, hv7131d_sensor_init, NULL, NULL, F_GAIN, NO_BRIGHTNESS|NO_FREQ, 0),
-SENS(initHv7131r, NULL, hv7131r_sensor_init, NULL, NULL, 0, NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0),
-SENS(initOv6650, NULL, ov6650_sensor_init, NULL, NULL, F_GAIN|F_SIF, 0, 0x60),
-SENS(initOv7630, initOv7630_3, ov7630_sensor_init, NULL, ov7630_sensor_init_3,
- F_GAIN, 0, 0x21),
-SENS(initPas106, NULL, pas106_sensor_init, NULL, NULL, F_GAIN|F_SIF, NO_FREQ,
- 0),
-SENS(initPas202, initPas202, pas202_sensor_init, NULL, NULL, F_GAIN,
- NO_FREQ, 0),
-SENS(initTas5110c, NULL, tas5110_sensor_init, NULL, NULL,
- F_GAIN|F_SIF|F_COARSE_EXPO, NO_BRIGHTNESS|NO_FREQ, 0),
-SENS(initTas5110d, NULL, tas5110_sensor_init, NULL, NULL,
- F_GAIN|F_SIF|F_COARSE_EXPO, NO_BRIGHTNESS|NO_FREQ, 0),
-SENS(initTas5130, NULL, tas5130_sensor_init, NULL, NULL, 0, NO_EXPO|NO_FREQ,
- 0),
+SENS(initHv7131d, hv7131d_sensor_init, F_GAIN, NO_BRIGHTNESS|NO_FREQ, 0),
+SENS(initHv7131r, hv7131r_sensor_init, 0, NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0),
+SENS(initOv6650, ov6650_sensor_init, F_GAIN|F_SIF, 0, 0x60),
+SENS(initOv7630, ov7630_sensor_init, F_GAIN, 0, 0x21),
+SENS(initPas106, pas106_sensor_init, F_GAIN|F_SIF, NO_FREQ, 0),
+SENS(initPas202, pas202_sensor_init, F_GAIN, NO_FREQ, 0),
+SENS(initTas5110c, tas5110c_sensor_init, F_GAIN|F_SIF|F_COARSE_EXPO,
+ NO_BRIGHTNESS|NO_FREQ, 0),
+SENS(initTas5110d, tas5110d_sensor_init, F_GAIN|F_SIF|F_COARSE_EXPO,
+ NO_BRIGHTNESS|NO_FREQ, 0),
+SENS(initTas5130, tas5130_sensor_init, F_GAIN,
+ NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0),
};
/* get one byte in gspca_dev->usb_buf */
@@ -655,7 +637,6 @@ static void i2c_w_vector(struct gspca_dev *gspca_dev,
static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- __u8 value;
switch (sd->sensor) {
case SENSOR_OV6650:
@@ -697,17 +678,6 @@ static void setbrightness(struct gspca_dev *gspca_dev)
goto err;
break;
}
- case SENSOR_TAS5130CXX: {
- __u8 i2c[] =
- {0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10};
-
- value = 0xff - sd->brightness;
- i2c[4] = value;
- PDEBUG(D_CONF, "brightness %d : %d", value, i2c[4]);
- if (i2c_w(gspca_dev, i2c) < 0)
- goto err;
- break;
- }
}
return;
err:
@@ -733,7 +703,7 @@ static void setsensorgain(struct gspca_dev *gspca_dev)
break;
}
case SENSOR_TAS5110C:
- case SENSOR_TAS5110D: {
+ case SENSOR_TAS5130CXX: {
__u8 i2c[] =
{0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10};
@@ -742,6 +712,23 @@ static void setsensorgain(struct gspca_dev *gspca_dev)
goto err;
break;
}
+ case SENSOR_TAS5110D: {
+ __u8 i2c[] = {
+ 0xb0, 0x61, 0x02, 0x00, 0x10, 0x00, 0x00, 0x17 };
+ gain = 255 - gain;
+ /* The bits in the register are the wrong way around!! */
+ i2c[3] |= (gain & 0x80) >> 7;
+ i2c[3] |= (gain & 0x40) >> 5;
+ i2c[3] |= (gain & 0x20) >> 3;
+ i2c[3] |= (gain & 0x10) >> 1;
+ i2c[3] |= (gain & 0x08) << 1;
+ i2c[3] |= (gain & 0x04) << 3;
+ i2c[3] |= (gain & 0x02) << 5;
+ i2c[3] |= (gain & 0x01) << 7;
+ if (i2c_w(gspca_dev, i2c) < 0)
+ goto err;
+ break;
+ }
case SENSOR_OV6650:
gain >>= 1;
@@ -796,7 +783,7 @@ static void setgain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
__u8 gain;
- __u8 buf[2] = { 0, 0 };
+ __u8 buf[3] = { 0, 0, 0 };
if (sensor_data[sd->sensor].flags & F_GAIN) {
/* Use the sensor gain to do the actual gain */
@@ -804,13 +791,18 @@ static void setgain(struct gspca_dev *gspca_dev)
return;
}
- gain = sd->gain >> 4;
-
- /* red and blue gain */
- buf[0] = gain << 4 | gain;
- /* green gain */
- buf[1] = gain;
- reg_w(gspca_dev, 0x10, buf, 2);
+ if (sd->bridge == BRIDGE_103) {
+ gain = sd->gain >> 1;
+ buf[0] = gain; /* Red */
+ buf[1] = gain; /* Green */
+ buf[2] = gain; /* Blue */
+ reg_w(gspca_dev, 0x05, buf, 3);
+ } else {
+ gain = sd->gain >> 4;
+ buf[0] = gain << 4 | gain; /* Red and blue */
+ buf[1] = gain; /* Green */
+ reg_w(gspca_dev, 0x10, buf, 2);
+ }
}
static void setexposure(struct gspca_dev *gspca_dev)
@@ -1049,7 +1041,7 @@ static void do_autogain(struct gspca_dev *gspca_dev)
desired_avg_lum = 5000;
} else {
deadzone = 1500;
- desired_avg_lum = 18000;
+ desired_avg_lum = 13000;
}
if (sensor_data[sd->sensor].flags & F_COARSE_EXPO)
@@ -1127,53 +1119,91 @@ static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam = &gspca_dev->cam;
- int mode, l;
- const __u8 *sn9c10x;
- __u8 reg12_19[8];
+ int i, mode;
+ __u8 regs[0x31];
mode = cam->cam_mode[gspca_dev->curr_mode].priv & 0x07;
- sn9c10x = sensor_data[sd->sensor].bridge_init[sd->bridge];
- l = sensor_data[sd->sensor].bridge_init_size[sd->bridge];
- memcpy(reg12_19, &sn9c10x[0x12 - 1], 8);
- reg12_19[6] = sn9c10x[0x18 - 1] | (mode << 4);
- /* Special cases where reg 17 and or 19 value depends on mode */
+ /* Copy registers 0x01 - 0x19 from the template */
+ memcpy(&regs[0x01], sensor_data[sd->sensor].bridge_init, 0x19);
+ /* Set the mode */
+ regs[0x18] |= mode << 4;
+
+ /* Set bridge gain to 1.0 */
+ if (sd->bridge == BRIDGE_103) {
+ regs[0x05] = 0x20; /* Red */
+ regs[0x06] = 0x20; /* Green */
+ regs[0x07] = 0x20; /* Blue */
+ } else {
+ regs[0x10] = 0x00; /* Red and blue */
+ regs[0x11] = 0x00; /* Green */
+ }
+
+ /* Setup pixel numbers and auto exposure window */
+ if (sensor_data[sd->sensor].flags & F_SIF) {
+ regs[0x1a] = 0x14; /* HO_SIZE 640, makes no sense */
+ regs[0x1b] = 0x0a; /* VO_SIZE 320, makes no sense */
+ regs[0x1c] = 0x02; /* AE H-start 64 */
+ regs[0x1d] = 0x02; /* AE V-start 64 */
+ regs[0x1e] = 0x09; /* AE H-end 288 */
+ regs[0x1f] = 0x07; /* AE V-end 224 */
+ } else {
+ regs[0x1a] = 0x1d; /* HO_SIZE 960, makes no sense */
+ regs[0x1b] = 0x10; /* VO_SIZE 512, makes no sense */
+ regs[0x1c] = 0x05; /* AE H-start 160 */
+ regs[0x1d] = 0x03; /* AE V-start 96 */
+ regs[0x1e] = 0x0f; /* AE H-end 480 */
+ regs[0x1f] = 0x0c; /* AE V-end 384 */
+ }
+
+ /* Setup the gamma table (only used with the sn9c103 bridge) */
+ for (i = 0; i < 16; i++)
+ regs[0x20 + i] = i * 16;
+ regs[0x20 + i] = 255;
+
+ /* Special cases where some regs depend on mode or bridge */
switch (sd->sensor) {
case SENSOR_TAS5130CXX:
- /* probably not mode specific at all most likely the upper
+ /* FIXME / TESTME
+ probably not mode specific at all most likely the upper
nibble of 0x19 is exposure (clock divider) just as with
the tas5110, we need someone to test this. */
- reg12_19[7] = mode ? 0x23 : 0x43;
+ regs[0x19] = mode ? 0x23 : 0x43;
break;
+ case SENSOR_OV7630:
+ /* FIXME / TESTME for some reason with the 101/102 bridge the
+ clock is set to 12 Mhz (reg1 == 0x04), rather then 24.
+ Also the hstart needs to go from 1 to 2 when using a 103,
+ which is likely related. This does not seem right. */
+ if (sd->bridge == BRIDGE_103) {
+ regs[0x01] = 0x44; /* Select 24 Mhz clock */
+ regs[0x12] = 0x02; /* Set hstart to 2 */
+ }
}
/* Disable compression when the raw bayer format has been selected */
if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW)
- reg12_19[6] &= ~0x80;
+ regs[0x18] &= ~0x80;
/* Vga mode emulation on SIF sensor? */
if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_REDUCED_SIF) {
- reg12_19[0] += 16; /* 0x12: hstart adjust */
- reg12_19[1] += 24; /* 0x13: vstart adjust */
- reg12_19[3] = 320 / 16; /* 0x15: hsize */
- reg12_19[4] = 240 / 16; /* 0x16: vsize */
+ regs[0x12] += 16; /* hstart adjust */
+ regs[0x13] += 24; /* vstart adjust */
+ regs[0x15] = 320 / 16; /* hsize */
+ regs[0x16] = 240 / 16; /* vsize */
}
/* reg 0x01 bit 2 video transfert on */
- reg_w(gspca_dev, 0x01, &sn9c10x[0x01 - 1], 1);
+ reg_w(gspca_dev, 0x01, &regs[0x01], 1);
/* reg 0x17 SensorClk enable inv Clk 0x60 */
- reg_w(gspca_dev, 0x17, &sn9c10x[0x17 - 1], 1);
+ reg_w(gspca_dev, 0x17, &regs[0x17], 1);
/* Set the registers from the template */
- reg_w(gspca_dev, 0x01, sn9c10x, l);
+ reg_w(gspca_dev, 0x01, &regs[0x01],
+ (sd->bridge == BRIDGE_103) ? 0x30 : 0x1f);
/* Init the sensor */
i2c_w_vector(gspca_dev, sensor_data[sd->sensor].sensor_init,
sensor_data[sd->sensor].sensor_init_size);
- if (sensor_data[sd->sensor].sensor_bridge_init[sd->bridge])
- i2c_w_vector(gspca_dev,
- sensor_data[sd->sensor].sensor_bridge_init[sd->bridge],
- sensor_data[sd->sensor].sensor_bridge_init_size[
- sd->bridge]);
- /* Mode specific sensor setup */
+ /* Mode / bridge specific sensor setup */
switch (sd->sensor) {
case SENSOR_PAS202: {
const __u8 i2cpclockdiv[] =
@@ -1181,27 +1211,37 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* clockdiv from 4 to 3 (7.5 -> 10 fps) when in low res mode */
if (mode)
i2c_w(gspca_dev, i2cpclockdiv);
+ break;
}
+ case SENSOR_OV7630:
+ /* FIXME / TESTME We should be able to handle this identical
+ for the 101/102 and the 103 case */
+ if (sd->bridge == BRIDGE_103) {
+ const __u8 i2c[] = { 0xa0, 0x21, 0x13,
+ 0x80, 0x00, 0x00, 0x00, 0x10 };
+ i2c_w(gspca_dev, i2c);
+ }
+ break;
}
/* H_size V_size 0x28, 0x1e -> 640x480. 0x16, 0x12 -> 352x288 */
- reg_w(gspca_dev, 0x15, &reg12_19[3], 2);
+ reg_w(gspca_dev, 0x15, &regs[0x15], 2);
/* compression register */
- reg_w(gspca_dev, 0x18, &reg12_19[6], 1);
+ reg_w(gspca_dev, 0x18, &regs[0x18], 1);
/* H_start */
- reg_w(gspca_dev, 0x12, &reg12_19[0], 1);
+ reg_w(gspca_dev, 0x12, &regs[0x12], 1);
/* V_START */
- reg_w(gspca_dev, 0x13, &reg12_19[1], 1);
+ reg_w(gspca_dev, 0x13, &regs[0x13], 1);
/* reset 0x17 SensorClk enable inv Clk 0x60 */
/*fixme: ov7630 [17]=68 8f (+20 if 102)*/
- reg_w(gspca_dev, 0x17, &reg12_19[5], 1);
+ reg_w(gspca_dev, 0x17, &regs[0x17], 1);
/*MCKSIZE ->3 */ /*fixme: not ov7630*/
- reg_w(gspca_dev, 0x19, &reg12_19[7], 1);
+ reg_w(gspca_dev, 0x19, &regs[0x19], 1);
/* AE_STRX AE_STRY AE_ENDX AE_ENDY */
- reg_w(gspca_dev, 0x1c, &sn9c10x[0x1c - 1], 4);
+ reg_w(gspca_dev, 0x1c, &regs[0x1c], 4);
/* Enable video transfert */
- reg_w(gspca_dev, 0x01, &sn9c10x[0], 1);
+ reg_w(gspca_dev, 0x01, &regs[0x01], 1);
/* Compression */
- reg_w(gspca_dev, 0x18, &reg12_19[6], 2);
+ reg_w(gspca_dev, 0x18, &regs[0x18], 2);
msleep(20);
sd->reg11 = -1;
@@ -1525,15 +1565,15 @@ static const struct sd_desc sd_desc = {
.driver_info = (SENSOR_ ## sensor << 8) | BRIDGE_ ## bridge
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6001), SB(TAS5110C, 102)}, /* TAS5110C1B */
{USB_DEVICE(0x0c45, 0x6005), SB(TAS5110C, 101)}, /* TAS5110C1B */
{USB_DEVICE(0x0c45, 0x6007), SB(TAS5110D, 101)}, /* TAS5110D */
{USB_DEVICE(0x0c45, 0x6009), SB(PAS106, 101)},
{USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)},
{USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
{USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)},
+#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
{USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
{USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
#endif
@@ -1544,18 +1584,22 @@ static const struct usb_device_id device_table[] __devinitconst = {
{USB_DEVICE(0x0c45, 0x602c), SB(OV7630, 102)},
{USB_DEVICE(0x0c45, 0x602d), SB(HV7131R, 102)},
{USB_DEVICE(0x0c45, 0x602e), SB(OV7630, 102)},
- /* {USB_DEVICE(0x0c45, 0x602b), SB(MI03XX, 102)}, */ /* MI0343 MI0360 MI0330 */
+ /* {USB_DEVICE(0x0c45, 0x6030), SB(MI03XX, 102)}, */ /* MI0343 MI0360 MI0330 */
+ /* {USB_DEVICE(0x0c45, 0x6082), SB(MI03XX, 103)}, */ /* MI0343 MI0360 */
+ {USB_DEVICE(0x0c45, 0x6083), SB(HV7131D, 103)},
+ {USB_DEVICE(0x0c45, 0x608c), SB(HV7131R, 103)},
+ /* {USB_DEVICE(0x0c45, 0x608e), SB(CISVF10, 103)}, */
{USB_DEVICE(0x0c45, 0x608f), SB(OV7630, 103)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
+ {USB_DEVICE(0x0c45, 0x60a8), SB(PAS106, 103)},
+ {USB_DEVICE(0x0c45, 0x60aa), SB(TAS5130CXX, 103)},
{USB_DEVICE(0x0c45, 0x60af), SB(PAS202, 103)},
-#endif
{USB_DEVICE(0x0c45, 0x60b0), SB(OV7630, 103)},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 2d0bb17a30a2..d6f39ce1b7e1 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -25,12 +25,12 @@
#include "gspca.h"
#include "jpeg.h"
-#define V4L2_CID_INFRARED (V4L2_CID_PRIVATE_BASE + 0)
-
MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA/SONIX JPEG USB Camera Driver");
MODULE_LICENSE("GPL");
+static int starcam;
+
/* controls */
enum e_ctrl {
BRIGHTNESS,
@@ -43,7 +43,7 @@ enum e_ctrl {
HFLIP,
VFLIP,
SHARPNESS,
- INFRARED,
+ ILLUM,
FREQ,
NCTRLS /* number of controls */
};
@@ -100,7 +100,8 @@ enum sensors {
};
/* device flags */
-#define PDN_INV 1 /* inverse pin S_PWR_DN / sn_xxx tables */
+#define F_PDN_INV 0x01 /* inverse pin S_PWR_DN / sn_xxx tables */
+#define F_ILLUM 0x02 /* presence of illuminator */
/* sn9c1xx definitions */
/* register 0x01 */
@@ -124,7 +125,7 @@ static void setgamma(struct gspca_dev *gspca_dev);
static void setautogain(struct gspca_dev *gspca_dev);
static void sethvflip(struct gspca_dev *gspca_dev);
static void setsharpness(struct gspca_dev *gspca_dev);
-static void setinfrared(struct gspca_dev *gspca_dev);
+static void setillum(struct gspca_dev *gspca_dev);
static void setfreq(struct gspca_dev *gspca_dev);
static const struct ctrl sd_ctrls[NCTRLS] = {
@@ -251,18 +252,17 @@ static const struct ctrl sd_ctrls[NCTRLS] = {
},
.set_control = setsharpness
},
-/* mt9v111 only */
-[INFRARED] = {
+[ILLUM] = {
{
- .id = V4L2_CID_INFRARED,
+ .id = V4L2_CID_ILLUMINATORS_1,
.type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Infrared",
+ .name = "Illuminator / infrared",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
},
- .set_control = setinfrared
+ .set_control = setillum
},
/* ov7630/ov7648/ov7660 only */
[FREQ] = {
@@ -282,32 +282,26 @@ static const struct ctrl sd_ctrls[NCTRLS] = {
/* table of the disabled controls */
static const __u32 ctrl_dis[] = {
[SENSOR_ADCM1700] = (1 << AUTOGAIN) |
- (1 << INFRARED) |
(1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
-[SENSOR_GC0307] = (1 << INFRARED) |
- (1 << HFLIP) |
+[SENSOR_GC0307] = (1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
-[SENSOR_HV7131R] = (1 << INFRARED) |
- (1 << HFLIP) |
+[SENSOR_HV7131R] = (1 << HFLIP) |
(1 << FREQ),
-[SENSOR_MI0360] = (1 << INFRARED) |
- (1 << HFLIP) |
+[SENSOR_MI0360] = (1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
-[SENSOR_MI0360B] = (1 << INFRARED) |
- (1 << HFLIP) |
+[SENSOR_MI0360B] = (1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
-[SENSOR_MO4000] = (1 << INFRARED) |
- (1 << HFLIP) |
+[SENSOR_MO4000] = (1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
@@ -315,40 +309,32 @@ static const __u32 ctrl_dis[] = {
(1 << VFLIP) |
(1 << FREQ),
-[SENSOR_OM6802] = (1 << INFRARED) |
- (1 << HFLIP) |
+[SENSOR_OM6802] = (1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
-[SENSOR_OV7630] = (1 << INFRARED) |
- (1 << HFLIP),
+[SENSOR_OV7630] = (1 << HFLIP),
-[SENSOR_OV7648] = (1 << INFRARED) |
- (1 << HFLIP),
+[SENSOR_OV7648] = (1 << HFLIP),
[SENSOR_OV7660] = (1 << AUTOGAIN) |
- (1 << INFRARED) |
(1 << HFLIP) |
(1 << VFLIP),
[SENSOR_PO1030] = (1 << AUTOGAIN) |
- (1 << INFRARED) |
(1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
[SENSOR_PO2030N] = (1 << AUTOGAIN) |
- (1 << INFRARED) |
(1 << FREQ),
[SENSOR_SOI768] = (1 << AUTOGAIN) |
- (1 << INFRARED) |
(1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
[SENSOR_SP80708] = (1 << AUTOGAIN) |
- (1 << INFRARED) |
(1 << HFLIP) |
(1 << VFLIP) |
(1 << FREQ),
@@ -1822,44 +1808,46 @@ static int sd_init(struct gspca_dev *gspca_dev)
PDEBUG(D_PROBE, "Sonix chip id: %02x", regF1);
switch (sd->bridge) {
case BRIDGE_SN9C102P:
+ case BRIDGE_SN9C105:
if (regF1 != 0x11)
return -ENODEV;
+ break;
+ default:
+/* case BRIDGE_SN9C110: */
+/* case BRIDGE_SN9C120: */
+ if (regF1 != 0x12)
+ return -ENODEV;
+ }
+
+ switch (sd->sensor) {
+ case SENSOR_MI0360:
+ mi0360_probe(gspca_dev);
+ break;
+ case SENSOR_OV7630:
+ ov7630_probe(gspca_dev);
+ break;
+ case SENSOR_OV7648:
+ ov7648_probe(gspca_dev);
+ break;
+ case SENSOR_PO2030N:
+ po2030n_probe(gspca_dev);
+ break;
+ }
+
+ switch (sd->bridge) {
+ case BRIDGE_SN9C102P:
reg_w1(gspca_dev, 0x02, regGpio[1]);
break;
case BRIDGE_SN9C105:
- if (regF1 != 0x11)
- return -ENODEV;
- if (sd->sensor == SENSOR_MI0360)
- mi0360_probe(gspca_dev);
reg_w(gspca_dev, 0x01, regGpio, 2);
break;
+ case BRIDGE_SN9C110:
+ reg_w1(gspca_dev, 0x02, 0x62);
+ break;
case BRIDGE_SN9C120:
- if (regF1 != 0x12)
- return -ENODEV;
- switch (sd->sensor) {
- case SENSOR_MI0360:
- mi0360_probe(gspca_dev);
- break;
- case SENSOR_OV7630:
- ov7630_probe(gspca_dev);
- break;
- case SENSOR_OV7648:
- ov7648_probe(gspca_dev);
- break;
- case SENSOR_PO2030N:
- po2030n_probe(gspca_dev);
- break;
- }
regGpio[1] = 0x70; /* no audio */
reg_w(gspca_dev, 0x01, regGpio, 2);
break;
- default:
-/* case BRIDGE_SN9C110: */
-/* case BRIDGE_SN9C325: */
- if (regF1 != 0x12)
- return -ENODEV;
- reg_w1(gspca_dev, 0x02, 0x62);
- break;
}
if (sd->sensor == SENSOR_OM6802)
@@ -1874,6 +1862,8 @@ static int sd_init(struct gspca_dev *gspca_dev)
sd->i2c_addr = sn9c1xx[9];
gspca_dev->ctrl_dis = ctrl_dis[sd->sensor];
+ if (!(sd->flags & F_ILLUM))
+ gspca_dev->ctrl_dis |= (1 << ILLUM);
return gspca_dev->usb_err;
}
@@ -2197,16 +2187,28 @@ static void setsharpness(struct gspca_dev *gspca_dev)
reg_w1(gspca_dev, 0x99, sd->ctrls[SHARPNESS].val);
}
-static void setinfrared(struct gspca_dev *gspca_dev)
+static void setillum(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (gspca_dev->ctrl_dis & (1 << INFRARED))
+ if (gspca_dev->ctrl_dis & (1 << ILLUM))
return;
-/*fixme: different sequence for StarCam Clip and StarCam 370i */
-/* Clip */
- i2c_w1(gspca_dev, 0x02, /* gpio */
- sd->ctrls[INFRARED].val ? 0x66 : 0x64);
+ switch (sd->sensor) {
+ case SENSOR_ADCM1700:
+ reg_w1(gspca_dev, 0x02, /* gpio */
+ sd->ctrls[ILLUM].val ? 0x64 : 0x60);
+ break;
+ case SENSOR_MT9V111:
+ if (starcam)
+ reg_w1(gspca_dev, 0x02,
+ sd->ctrls[ILLUM].val ?
+ 0x55 : 0x54); /* 370i */
+ else
+ reg_w1(gspca_dev, 0x02,
+ sd->ctrls[ILLUM].val ?
+ 0x66 : 0x64); /* Clip */
+ break;
+ }
}
static void setfreq(struct gspca_dev *gspca_dev)
@@ -2344,7 +2346,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* sensor clock already enabled in sd_init */
/* reg_w1(gspca_dev, 0xf1, 0x00); */
reg01 = sn9c1xx[1];
- if (sd->flags & PDN_INV)
+ if (sd->flags & F_PDN_INV)
reg01 ^= S_PDN_INV; /* power down inverted */
reg_w1(gspca_dev, 0x01, reg01);
@@ -2907,13 +2909,11 @@ static const struct sd_desc sd_desc = {
.driver_info = (BRIDGE_ ## bridge << 16) \
| (SENSOR_ ## sensor << 8) \
| (flags)
-static const __devinitdata struct usb_device_id device_table[] = {
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)},
{USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)},
-#endif
- {USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, PDN_INV)},
- {USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, PDN_INV)},
+ {USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, F_PDN_INV)},
+ {USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, F_PDN_INV)},
{USB_DEVICE(0x0471, 0x0327), BS(SN9C105, MI0360)},
{USB_DEVICE(0x0471, 0x0328), BS(SN9C105, MI0360)},
{USB_DEVICE(0x0471, 0x0330), BS(SN9C105, MI0360)},
@@ -2925,7 +2925,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
/* {USB_DEVICE(0x0c45, 0x607b), BS(SN9C102P, OV7660)}, */
{USB_DEVICE(0x0c45, 0x607c), BS(SN9C102P, HV7131R)},
/* {USB_DEVICE(0x0c45, 0x607e), BS(SN9C102P, OV7630)}, */
- {USB_DEVICE(0x0c45, 0x60c0), BS(SN9C105, MI0360)},
+ {USB_DEVICE(0x0c45, 0x60c0), BSF(SN9C105, MI0360, F_ILLUM)},
/* or MT9V111 */
/* {USB_DEVICE(0x0c45, 0x60c2), BS(SN9C105, P1030xC)}, */
/* {USB_DEVICE(0x0c45, 0x60c8), BS(SN9C105, OM6802)}, */
@@ -2936,10 +2936,8 @@ static const __devinitdata struct usb_device_id device_table[] = {
/* {USB_DEVICE(0x0c45, 0x60fa), BS(SN9C105, OV7648)}, */
/* {USB_DEVICE(0x0c45, 0x60f2), BS(SN9C105, OV7660)}, */
{USB_DEVICE(0x0c45, 0x60fb), BS(SN9C105, OV7660)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
{USB_DEVICE(0x0c45, 0x60fc), BS(SN9C105, HV7131R)},
{USB_DEVICE(0x0c45, 0x60fe), BS(SN9C105, OV7630)},
-#endif
{USB_DEVICE(0x0c45, 0x6100), BS(SN9C120, MI0360)}, /*sn9c128*/
{USB_DEVICE(0x0c45, 0x6102), BS(SN9C120, PO2030N)}, /* /GC0305*/
/* {USB_DEVICE(0x0c45, 0x6108), BS(SN9C120, OM6802)}, */
@@ -2962,16 +2960,15 @@ static const __devinitdata struct usb_device_id device_table[] = {
/* {USB_DEVICE(0x0c45, 0x6132), BS(SN9C120, OV7670)}, */
{USB_DEVICE(0x0c45, 0x6138), BS(SN9C120, MO4000)},
{USB_DEVICE(0x0c45, 0x613a), BS(SN9C120, OV7648)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
{USB_DEVICE(0x0c45, 0x613b), BS(SN9C120, OV7660)},
-#endif
{USB_DEVICE(0x0c45, 0x613c), BS(SN9C120, HV7131R)},
{USB_DEVICE(0x0c45, 0x613e), BS(SN9C120, OV7630)},
{USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)}, /*sn9c120b*/
/* or GC0305 / GC0307 */
{USB_DEVICE(0x0c45, 0x6143), BS(SN9C120, SP80708)}, /*sn9c120b*/
{USB_DEVICE(0x0c45, 0x6148), BS(SN9C120, OM6802)}, /*sn9c120b*/
- {USB_DEVICE(0x0c45, 0x614a), BS(SN9C120, ADCM1700)}, /*sn9c120b*/
+ {USB_DEVICE(0x0c45, 0x614a), BSF(SN9C120, ADCM1700, F_ILLUM)},
+/* {USB_DEVICE(0x0c45, 0x614c), BS(SN9C120, GC0306)}, */ /*sn9c120b*/
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
@@ -3007,3 +3004,7 @@ static void __exit sd_mod_exit(void)
module_init(sd_mod_init);
module_exit(sd_mod_exit);
+
+module_param(starcam, int, 0644);
+MODULE_PARM_DESC(starcam,
+ "StarCam model. 0: Clip, 1: 370i");
diff --git a/drivers/media/video/gspca/spca1528.c b/drivers/media/video/gspca/spca1528.c
index e64338664410..76c006b2bc83 100644
--- a/drivers/media/video/gspca/spca1528.c
+++ b/drivers/media/video/gspca/spca1528.c
@@ -555,7 +555,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x04fc, 0x1528)},
{}
};
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index 8e202b9039f1..45552c3ff8d9 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -1051,7 +1051,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x040a, 0x0300), .driver_info = KodakEZ200},
{USB_DEVICE(0x041e, 0x400a), .driver_info = CreativePCCam300},
{USB_DEVICE(0x046d, 0x0890), .driver_info = LogitechTraveler},
diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c
index 642839a11e8d..f7ef282cc600 100644
--- a/drivers/media/video/gspca/spca501.c
+++ b/drivers/media/video/gspca/spca501.c
@@ -2155,7 +2155,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x040a, 0x0002), .driver_info = KodakDVC325},
{USB_DEVICE(0x0497, 0xc001), .driver_info = SmileIntlCamera},
{USB_DEVICE(0x0506, 0x00df), .driver_info = ThreeComHomeConnectLite},
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index bc9dd9034ab4..e5bf865147d7 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -786,7 +786,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x401d), .driver_info = Nxultra},
{USB_DEVICE(0x0733, 0x0430), .driver_info = IntelPCCameraPro},
/*fixme: may be UsbGrabberPV321 BRIDGE_SPCA506 SENSOR_SAA7113 */
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index 7307638ac91d..348319371523 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -1509,7 +1509,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0130, 0x0130), .driver_info = HamaUSBSightcam},
{USB_DEVICE(0x041e, 0x4018), .driver_info = CreativeVista},
{USB_DEVICE(0x0733, 0x0110), .driver_info = ViewQuestVQ110},
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index 3a162c6d5466..e836e778dfb6 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -1061,7 +1061,7 @@ static const struct sd_desc *sd_desc[2] = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x401a), .driver_info = Rev072A},
{USB_DEVICE(0x041e, 0x403b), .driver_info = Rev012A},
{USB_DEVICE(0x0458, 0x7004), .driver_info = Rev072A},
diff --git a/drivers/media/video/gspca/sq905.c b/drivers/media/video/gspca/sq905.c
index 404067745775..2e9c06175192 100644
--- a/drivers/media/video/gspca/sq905.c
+++ b/drivers/media/video/gspca/sq905.c
@@ -396,7 +396,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
/* Table of supported USB devices */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x2770, 0x9120)},
{}
};
diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c
index 8ba199543856..457563b7a71b 100644
--- a/drivers/media/video/gspca/sq905c.c
+++ b/drivers/media/video/gspca/sq905c.c
@@ -298,7 +298,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
/* Table of supported USB devices */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x2770, 0x905c)},
{USB_DEVICE(0x2770, 0x9050)},
{USB_DEVICE(0x2770, 0x9051)},
diff --git a/drivers/media/video/gspca/sq930x.c b/drivers/media/video/gspca/sq930x.c
index a4a98811b9e3..8215d5dcd456 100644
--- a/drivers/media/video/gspca/sq930x.c
+++ b/drivers/media/video/gspca/sq930x.c
@@ -1163,7 +1163,7 @@ static const struct sd_desc sd_desc = {
#define ST(sensor, type) \
.driver_info = (SENSOR_ ## sensor << 8) \
| (type)
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x4038), ST(MI0360, 0)},
{USB_DEVICE(0x041e, 0x403c), ST(LZ24BP, 0)},
{USB_DEVICE(0x041e, 0x403d), ST(LZ24BP, 0)},
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index 11a192b95ed4..87be52b5e1e3 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -495,7 +495,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x05e1, 0x0893)},
{}
};
diff --git a/drivers/media/video/gspca/stv0680.c b/drivers/media/video/gspca/stv0680.c
index b199ad4666bd..e2ef41cf72d7 100644
--- a/drivers/media/video/gspca/stv0680.c
+++ b/drivers/media/video/gspca/stv0680.c
@@ -327,7 +327,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0553, 0x0202)},
{USB_DEVICE(0x041e, 0x4007)},
{}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index 28ea4175b80e..7e0661429293 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -564,7 +564,7 @@ static int stv06xx_config(struct gspca_dev *gspca_dev,
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
/* QuickCam Express */
{USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 },
/* LEGO cam / QuickCam Web */
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index a9cbcd6011d9..543542af2720 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -1162,7 +1162,7 @@ static const struct sd_desc sd_desc = {
#define BS(bridge, subtype) \
.driver_info = (BRIDGE_ ## bridge << 8) \
| (subtype)
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x400b), BS(SPCA504C, 0)},
{USB_DEVICE(0x041e, 0x4012), BS(SPCA504C, 0)},
{USB_DEVICE(0x041e, 0x4013), BS(SPCA504C, 0)},
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 8f0c33116e0d..a3eccd815766 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -1416,7 +1416,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x17a1, 0x0128)},
{}
};
diff --git a/drivers/media/video/gspca/tv8532.c b/drivers/media/video/gspca/tv8532.c
index 38c22f0a4263..933ef2ca658c 100644
--- a/drivers/media/video/gspca/tv8532.c
+++ b/drivers/media/video/gspca/tv8532.c
@@ -388,7 +388,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x046d, 0x0920)},
{USB_DEVICE(0x046d, 0x0921)},
{USB_DEVICE(0x0545, 0x808b)},
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index 9b2ae1b6cc75..6caed734a06a 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -4192,7 +4192,7 @@ static const struct sd_desc sd_desc = {
#define BF(bridge, flags) \
.driver_info = (BRIDGE_ ## bridge << 8) \
| (flags)
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x405b), BF(VC0323, FL_VFLIP)},
{USB_DEVICE(0x046d, 0x0892), BF(VC0321, 0)},
{USB_DEVICE(0x046d, 0x0896), BF(VC0321, 0)},
diff --git a/drivers/media/video/gspca/xirlink_cit.c b/drivers/media/video/gspca/xirlink_cit.c
index 5b5039a02031..c089a0f6f1d0 100644
--- a/drivers/media/video/gspca/xirlink_cit.c
+++ b/drivers/media/video/gspca/xirlink_cit.c
@@ -3270,7 +3270,7 @@ static const struct sd_desc sd_desc_isoc_nego = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{ USB_DEVICE_VER(0x0545, 0x8080, 0x0001, 0x0001), .driver_info = CIT_MODEL0 },
{ USB_DEVICE_VER(0x0545, 0x8080, 0x0002, 0x0002), .driver_info = CIT_MODEL1 },
{ USB_DEVICE_VER(0x0545, 0x8080, 0x030a, 0x030a), .driver_info = CIT_MODEL2 },
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 14b85d483163..47236a58bf33 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -5793,7 +5793,7 @@ static void usb_exchange(struct gspca_dev *gspca_dev,
break;
default:
/* case 0xdd: * delay */
- msleep(action->val / 64 + 10);
+ msleep(action->idx);
break;
}
action++;
@@ -5830,7 +5830,7 @@ static void setmatrix(struct gspca_dev *gspca_dev)
[SENSOR_GC0305] = gc0305_matrix,
[SENSOR_HDCS2020b] = NULL,
[SENSOR_HV7131B] = NULL,
- [SENSOR_HV7131R] = NULL,
+ [SENSOR_HV7131R] = po2030_matrix,
[SENSOR_ICM105A] = po2030_matrix,
[SENSOR_MC501CB] = NULL,
[SENSOR_MT9V111_1] = gc0305_matrix,
@@ -5936,6 +5936,7 @@ static void setquality(struct gspca_dev *gspca_dev)
case SENSOR_ADCM2700:
case SENSOR_GC0305:
case SENSOR_HV7131B:
+ case SENSOR_HV7131R:
case SENSOR_OV7620:
case SENSOR_PAS202B:
case SENSOR_PO2030:
@@ -6108,11 +6109,13 @@ static void send_unknown(struct gspca_dev *gspca_dev, int sensor)
reg_w(gspca_dev, 0x02, 0x003b);
reg_w(gspca_dev, 0x00, 0x0038);
break;
+ case SENSOR_HV7131R:
case SENSOR_PAS202B:
reg_w(gspca_dev, 0x03, 0x003b);
reg_w(gspca_dev, 0x0c, 0x003a);
reg_w(gspca_dev, 0x0b, 0x0039);
- reg_w(gspca_dev, 0x0b, 0x0038);
+ if (sensor == SENSOR_PAS202B)
+ reg_w(gspca_dev, 0x0b, 0x0038);
break;
}
}
@@ -6704,10 +6707,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x02, 0x003b);
reg_w(gspca_dev, 0x00, 0x0038);
break;
+ case SENSOR_HV7131R:
case SENSOR_PAS202B:
reg_w(gspca_dev, 0x03, 0x003b);
reg_w(gspca_dev, 0x0c, 0x003a);
reg_w(gspca_dev, 0x0b, 0x0039);
+ if (sd->sensor == SENSOR_HV7131R)
+ reg_w(gspca_dev, 0x50, ZC3XX_R11D_GLOBALGAIN);
break;
}
@@ -6720,6 +6726,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
case SENSOR_PAS202B:
case SENSOR_GC0305:
+ case SENSOR_HV7131R:
case SENSOR_TAS5130C:
reg_r(gspca_dev, 0x0008);
/* fall thru */
@@ -6760,6 +6767,12 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* ms-win + */
reg_w(gspca_dev, 0x40, 0x0117);
break;
+ case SENSOR_HV7131R:
+ i2c_write(gspca_dev, 0x25, 0x04, 0x00); /* exposure */
+ i2c_write(gspca_dev, 0x26, 0x93, 0x00);
+ i2c_write(gspca_dev, 0x27, 0xe0, 0x00);
+ reg_w(gspca_dev, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN);
+ break;
case SENSOR_GC0305:
case SENSOR_TAS5130C:
reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */
@@ -6808,9 +6821,17 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
{
struct sd *sd = (struct sd *) gspca_dev;
- if (data[0] == 0xff && data[1] == 0xd8) { /* start of frame */
+ /* check the JPEG end of frame */
+ if (len >= 3
+ && data[len - 3] == 0xff && data[len - 2] == 0xd9) {
+/*fixme: what does the last byte mean?*/
gspca_frame_add(gspca_dev, LAST_PACKET,
- NULL, 0);
+ data, len - 1);
+ return;
+ }
+
+ /* check the JPEG start of a frame */
+ if (data[0] == 0xff && data[1] == 0xd8) {
/* put the JPEG header in the new frame */
gspca_frame_add(gspca_dev, FIRST_PACKET,
sd->jpeg_hdr, JPEG_HDR_SZ);
@@ -6909,7 +6930,7 @@ static const struct sd_desc sd_desc = {
#endif
};
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x041e)},
{USB_DEVICE(0x041e, 0x4017)},
{USB_DEVICE(0x041e, 0x401c), .driver_info = SENSOR_PAS106},
diff --git a/drivers/media/video/hdpvr/Makefile b/drivers/media/video/hdpvr/Makefile
index e0230fcb2e36..3baa9f613ca3 100644
--- a/drivers/media/video/hdpvr/Makefile
+++ b/drivers/media/video/hdpvr/Makefile
@@ -1,6 +1,4 @@
-hdpvr-objs := hdpvr-control.o hdpvr-core.o hdpvr-video.o
-
-hdpvr-$(CONFIG_I2C) += hdpvr-i2c.o
+hdpvr-objs := hdpvr-control.o hdpvr-core.o hdpvr-video.o hdpvr-i2c.o
obj-$(CONFIG_VIDEO_HDPVR) += hdpvr.o
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index f7d1ee55185a..a27d93b503a5 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -283,6 +283,7 @@ static int hdpvr_probe(struct usb_interface *interface,
struct hdpvr_device *dev;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
+ struct i2c_client *client;
size_t buffer_size;
int i;
int retval = -ENOMEM;
@@ -378,25 +379,35 @@ static int hdpvr_probe(struct usb_interface *interface,
goto error;
}
-#ifdef CONFIG_I2C
- /* until i2c is working properly */
- retval = 0; /* hdpvr_register_i2c_adapter(dev); */
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ retval = hdpvr_register_i2c_adapter(dev);
if (retval < 0) {
- v4l2_err(&dev->v4l2_dev, "registering i2c adapter failed\n");
+ v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n");
goto error;
}
- /* until i2c is working properly */
- retval = 0; /* hdpvr_register_i2c_ir(dev); */
- if (retval < 0)
- v4l2_err(&dev->v4l2_dev, "registering i2c IR devices failed\n");
-#endif /* CONFIG_I2C */
+ client = hdpvr_register_ir_rx_i2c(dev);
+ if (!client) {
+ v4l2_err(&dev->v4l2_dev, "i2c IR RX device register failed\n");
+ goto reg_fail;
+ }
+
+ client = hdpvr_register_ir_tx_i2c(dev);
+ if (!client) {
+ v4l2_err(&dev->v4l2_dev, "i2c IR TX device register failed\n");
+ goto reg_fail;
+ }
+#endif
/* let the user know what node this device is now attached to */
v4l2_info(&dev->v4l2_dev, "device now attached to %s\n",
video_device_node_name(dev->video_dev));
return 0;
+reg_fail:
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_adapter(&dev->i2c_adapter);
+#endif
error:
if (dev) {
/* Destroy single thread */
@@ -426,6 +437,9 @@ static void hdpvr_disconnect(struct usb_interface *interface)
mutex_lock(&dev->io_mutex);
hdpvr_cancel_queue(dev);
mutex_unlock(&dev->io_mutex);
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_adapter(&dev->i2c_adapter);
+#endif
video_unregister_device(dev->video_dev);
atomic_dec(&dev_nr);
}
diff --git a/drivers/media/video/hdpvr/hdpvr-i2c.c b/drivers/media/video/hdpvr/hdpvr-i2c.c
index 24966aa02a70..e53fa55d56a1 100644
--- a/drivers/media/video/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/video/hdpvr/hdpvr-i2c.c
@@ -13,6 +13,8 @@
*
*/
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+
#include <linux/i2c.h>
#include <linux/slab.h>
@@ -28,106 +30,86 @@
#define Z8F0811_IR_TX_I2C_ADDR 0x70
#define Z8F0811_IR_RX_I2C_ADDR 0x71
-static const u8 ir_i2c_addrs[] = {
- Z8F0811_IR_TX_I2C_ADDR,
- Z8F0811_IR_RX_I2C_ADDR,
-};
-
-static const char * const ir_devicenames[] = {
- "ir_tx_z8f0811_hdpvr",
- "ir_rx_z8f0811_hdpvr",
-};
-static int hdpvr_new_i2c_ir(struct hdpvr_device *dev, struct i2c_adapter *adap,
- const char *type, u8 addr)
+struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev)
{
- struct i2c_board_info info;
struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
- unsigned short addr_list[2] = { addr, I2C_CLIENT_END };
+ struct i2c_board_info hdpvr_ir_tx_i2c_board_info = {
+ I2C_BOARD_INFO("ir_tx_z8f0811_hdpvr", Z8F0811_IR_TX_I2C_ADDR),
+ };
- memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, type, I2C_NAME_SIZE);
-
- /* Our default information for ir-kbd-i2c.c to use */
- switch (addr) {
- case Z8F0811_IR_RX_I2C_ADDR:
- init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
- init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_TYPE_RC5;
- init_data->name = "HD PVR";
- info.platform_data = init_data;
- break;
- }
+ init_data->name = "HD-PVR";
+ hdpvr_ir_tx_i2c_board_info.platform_data = init_data;
- return i2c_new_probed_device(adap, &info, addr_list, NULL) == NULL ?
- -1 : 0;
+ return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_tx_i2c_board_info);
}
-int hdpvr_register_i2c_ir(struct hdpvr_device *dev)
+struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev)
{
- int i;
- int ret = 0;
+ struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
+ struct i2c_board_info hdpvr_ir_rx_i2c_board_info = {
+ I2C_BOARD_INFO("ir_rx_z8f0811_hdpvr", Z8F0811_IR_RX_I2C_ADDR),
+ };
- for (i = 0; i < ARRAY_SIZE(ir_i2c_addrs); i++)
- ret += hdpvr_new_i2c_ir(dev, dev->i2c_adapter,
- ir_devicenames[i], ir_i2c_addrs[i]);
+ /* Our default information for ir-kbd-i2c.c to use */
+ init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
+ init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
+ init_data->type = RC_TYPE_RC5;
+ init_data->name = "HD-PVR";
+ hdpvr_ir_rx_i2c_board_info.platform_data = init_data;
- return ret;
+ return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_rx_i2c_board_info);
}
-static int hdpvr_i2c_read(struct hdpvr_device *dev, unsigned char addr,
- char *data, int len)
+static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus,
+ unsigned char addr, char *data, int len)
{
int ret;
- char *buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+
+ if (len > sizeof(dev->i2c_buf))
+ return -EINVAL;
ret = usb_control_msg(dev->udev,
usb_rcvctrlpipe(dev->udev, 0),
REQTYPE_I2C_READ, CTRL_READ_REQUEST,
- 0x100|addr, 0, buf, len, 1000);
+ (bus << 8) | addr, 0, &dev->i2c_buf, len, 1000);
if (ret == len) {
- memcpy(data, buf, len);
+ memcpy(data, &dev->i2c_buf, len);
ret = 0;
} else if (ret >= 0)
ret = -EIO;
- kfree(buf);
-
return ret;
}
-static int hdpvr_i2c_write(struct hdpvr_device *dev, unsigned char addr,
- char *data, int len)
+static int hdpvr_i2c_write(struct hdpvr_device *dev, int bus,
+ unsigned char addr, char *data, int len)
{
int ret;
- char *buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- memcpy(buf, data, len);
+ if (len > sizeof(dev->i2c_buf))
+ return -EINVAL;
+
+ memcpy(&dev->i2c_buf, data, len);
ret = usb_control_msg(dev->udev,
usb_sndctrlpipe(dev->udev, 0),
REQTYPE_I2C_WRITE, CTRL_WRITE_REQUEST,
- 0x100|addr, 0, buf, len, 1000);
+ (bus << 8) | addr, 0, &dev->i2c_buf, len, 1000);
if (ret < 0)
- goto error;
+ return ret;
ret = usb_control_msg(dev->udev,
usb_rcvctrlpipe(dev->udev, 0),
REQTYPE_I2C_WRITE_STATT, CTRL_READ_REQUEST,
- 0, 0, buf, 2, 1000);
+ 0, 0, &dev->i2c_buf, 2, 1000);
- if (ret == 2)
+ if ((ret == 2) && (dev->i2c_buf[1] == (len - 1)))
ret = 0;
else if (ret >= 0)
ret = -EIO;
-error:
- kfree(buf);
return ret;
}
@@ -146,10 +128,10 @@ static int hdpvr_transfer(struct i2c_adapter *i2c_adapter, struct i2c_msg *msgs,
addr = msgs[i].addr << 1;
if (msgs[i].flags & I2C_M_RD)
- retval = hdpvr_i2c_read(dev, addr, msgs[i].buf,
+ retval = hdpvr_i2c_read(dev, 1, addr, msgs[i].buf,
msgs[i].len);
else
- retval = hdpvr_i2c_write(dev, addr, msgs[i].buf,
+ retval = hdpvr_i2c_write(dev, 1, addr, msgs[i].buf,
msgs[i].len);
}
@@ -168,30 +150,47 @@ static struct i2c_algorithm hdpvr_algo = {
.functionality = hdpvr_functionality,
};
+static struct i2c_adapter hdpvr_i2c_adapter_template = {
+ .name = "Hauppage HD PVR I2C",
+ .owner = THIS_MODULE,
+ .algo = &hdpvr_algo,
+};
+
+static int hdpvr_activate_ir(struct hdpvr_device *dev)
+{
+ char buffer[8];
+
+ mutex_lock(&dev->i2c_mutex);
+
+ hdpvr_i2c_read(dev, 0, 0x54, buffer, 1);
+
+ buffer[0] = 0;
+ buffer[1] = 0x8;
+ hdpvr_i2c_write(dev, 1, 0x54, buffer, 2);
+
+ buffer[1] = 0x18;
+ hdpvr_i2c_write(dev, 1, 0x54, buffer, 2);
+
+ mutex_unlock(&dev->i2c_mutex);
+
+ return 0;
+}
+
int hdpvr_register_i2c_adapter(struct hdpvr_device *dev)
{
- struct i2c_adapter *i2c_adap;
int retval = -ENOMEM;
- i2c_adap = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
- if (i2c_adap == NULL)
- goto error;
-
- strlcpy(i2c_adap->name, "Hauppauge HD PVR I2C",
- sizeof(i2c_adap->name));
- i2c_adap->algo = &hdpvr_algo;
- i2c_adap->owner = THIS_MODULE;
- i2c_adap->dev.parent = &dev->udev->dev;
+ hdpvr_activate_ir(dev);
- i2c_set_adapdata(i2c_adap, dev);
+ memcpy(&dev->i2c_adapter, &hdpvr_i2c_adapter_template,
+ sizeof(struct i2c_adapter));
+ dev->i2c_adapter.dev.parent = &dev->udev->dev;
- retval = i2c_add_adapter(i2c_adap);
+ i2c_set_adapdata(&dev->i2c_adapter, dev);
- if (!retval)
- dev->i2c_adapter = i2c_adap;
- else
- kfree(i2c_adap);
+ retval = i2c_add_adapter(&dev->i2c_adapter);
-error:
return retval;
}
+
+#endif
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index d38fe1043e47..514aea76eaa5 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -1220,12 +1220,9 @@ static void hdpvr_device_release(struct video_device *vdev)
v4l2_device_unregister(&dev->v4l2_dev);
/* deregister I2C adapter */
-#ifdef CONFIG_I2C
+#if defined(CONFIG_I2C) || (CONFIG_I2C_MODULE)
mutex_lock(&dev->i2c_mutex);
- if (dev->i2c_adapter)
- i2c_del_adapter(dev->i2c_adapter);
- kfree(dev->i2c_adapter);
- dev->i2c_adapter = NULL;
+ i2c_del_adapter(&dev->i2c_adapter);
mutex_unlock(&dev->i2c_mutex);
#endif /* CONFIG_I2C */
diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h
index 37f1e4c7675d..072f23c570f3 100644
--- a/drivers/media/video/hdpvr/hdpvr.h
+++ b/drivers/media/video/hdpvr/hdpvr.h
@@ -25,6 +25,7 @@
KERNEL_VERSION(HDPVR_MAJOR_VERSION, HDPVR_MINOR_VERSION, HDPVR_RELEASE)
#define HDPVR_MAX 8
+#define HDPVR_I2C_MAX_SIZE 128
/* Define these values to match your devices */
#define HD_PVR_VENDOR_ID 0x2040
@@ -106,9 +107,11 @@ struct hdpvr_device {
struct work_struct worker;
/* I2C adapter */
- struct i2c_adapter *i2c_adapter;
+ struct i2c_adapter i2c_adapter;
/* I2C lock */
struct mutex i2c_mutex;
+ /* I2C message buffer space */
+ char i2c_buf[HDPVR_I2C_MAX_SIZE];
/* For passing data to ir-kbd-i2c */
struct IR_i2c_init_data ir_i2c_init_data;
@@ -310,7 +313,8 @@ int hdpvr_cancel_queue(struct hdpvr_device *dev);
/* i2c adapter registration */
int hdpvr_register_i2c_adapter(struct hdpvr_device *dev);
-int hdpvr_register_i2c_ir(struct hdpvr_device *dev);
+struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev);
+struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev);
/*========================================================================*/
/* buffer management */
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index c87b6bc45555..a221ad68b330 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -128,6 +128,19 @@ static int get_key_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
static int get_key_haup_xvr(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
{
+ int ret;
+ unsigned char buf[1] = { 0 };
+
+ /*
+ * This is the same apparent "are you ready?" poll command observed
+ * watching Windows driver traffic and implemented in lirc_zilog. With
+ * this added, we get far saner remote behavior with z8 chips on usb
+ * connected devices, even with the default polling interval of 100ms.
+ */
+ ret = i2c_master_send(ir->c, buf, 1);
+ if (ret != 1)
+ return (ret < 0) ? ret : -EINVAL;
+
return get_key_haup_common (ir, ir_key, ir_raw, 6, 3);
}
@@ -244,15 +257,17 @@ static void ir_key_poll(struct IR_i2c *ir)
static u32 ir_key, ir_raw;
int rc;
- dprintk(2,"ir_poll_key\n");
+ dprintk(3, "%s\n", __func__);
rc = ir->get_key(ir, &ir_key, &ir_raw);
if (rc < 0) {
dprintk(2,"error\n");
return;
}
- if (rc)
+ if (rc) {
+ dprintk(1, "%s: keycode = 0x%04x\n", __func__, ir_key);
rc_keydown(ir->rc, ir_key, 0);
+ }
}
static void ir_work(struct work_struct *work)
@@ -321,6 +336,12 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
rc_type = RC_TYPE_OTHER;
ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
break;
+ case 0x71:
+ name = "Hauppauge/Zilog Z8";
+ ir->get_key = get_key_haup_xvr;
+ rc_type = RC_TYPE_RC5;
+ ir_codes = hauppauge ? RC_MAP_HAUPPAUGE_NEW : RC_MAP_RC5_TV;
+ break;
}
/* Let the caller override settings */
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index e103b8fc7452..9fb86a081c0f 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -300,10 +300,15 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
adap, type, 0, I2C_ADDRS(hw_addrs[idx]));
} else if (hw == IVTV_HW_CX25840) {
struct cx25840_platform_data pdata;
+ struct i2c_board_info cx25840_info = {
+ .type = "cx25840",
+ .addr = hw_addrs[idx],
+ .platform_data = &pdata,
+ };
pdata.pvr150_workaround = itv->pvr150_workaround;
- sd = v4l2_i2c_new_subdev_cfg(&itv->v4l2_dev,
- adap, type, 0, &pdata, hw_addrs[idx], NULL);
+ sd = v4l2_i2c_new_subdev_board(&itv->v4l2_dev, adap,
+ &cx25840_info, NULL);
} else {
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
adap, type, hw_addrs[idx], NULL);
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c
index 209ff97261a9..4904d25f689f 100644
--- a/drivers/media/video/mt9v011.c
+++ b/drivers/media/video/mt9v011.c
@@ -12,17 +12,41 @@
#include <asm/div64.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include "mt9v011.h"
+#include <media/mt9v011.h>
MODULE_DESCRIPTION("Micron mt9v011 sensor driver");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
MODULE_LICENSE("GPL");
-
static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
+#define R00_MT9V011_CHIP_VERSION 0x00
+#define R01_MT9V011_ROWSTART 0x01
+#define R02_MT9V011_COLSTART 0x02
+#define R03_MT9V011_HEIGHT 0x03
+#define R04_MT9V011_WIDTH 0x04
+#define R05_MT9V011_HBLANK 0x05
+#define R06_MT9V011_VBLANK 0x06
+#define R07_MT9V011_OUT_CTRL 0x07
+#define R09_MT9V011_SHUTTER_WIDTH 0x09
+#define R0A_MT9V011_CLK_SPEED 0x0a
+#define R0B_MT9V011_RESTART 0x0b
+#define R0C_MT9V011_SHUTTER_DELAY 0x0c
+#define R0D_MT9V011_RESET 0x0d
+#define R1E_MT9V011_DIGITAL_ZOOM 0x1e
+#define R20_MT9V011_READ_MODE 0x20
+#define R2B_MT9V011_GREEN_1_GAIN 0x2b
+#define R2C_MT9V011_BLUE_GAIN 0x2c
+#define R2D_MT9V011_RED_GAIN 0x2d
+#define R2E_MT9V011_GREEN_2_GAIN 0x2e
+#define R35_MT9V011_GLOBAL_GAIN 0x35
+#define RF1_MT9V011_CHIP_ENABLE 0xf1
+
+#define MT9V011_VERSION 0x8232
+#define MT9V011_REV_B_VERSION 0x8243
+
/* supported controls */
static struct v4l2_queryctrl mt9v011_qctrl[] = {
{
@@ -469,23 +493,6 @@ static int mt9v011_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt
return 0;
}
-static int mt9v011_s_config(struct v4l2_subdev *sd, int dumb, void *data)
-{
- struct mt9v011 *core = to_mt9v011(sd);
- unsigned *xtal = data;
-
- v4l2_dbg(1, debug, sd, "s_config called\n");
-
- if (xtal) {
- core->xtal = *xtal;
- v4l2_dbg(1, debug, sd, "xtal set to %d.%03d MHz\n",
- *xtal / 1000000, (*xtal / 1000) % 1000);
- }
-
- return 0;
-}
-
-
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int mt9v011_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
@@ -536,7 +543,6 @@ static const struct v4l2_subdev_core_ops mt9v011_core_ops = {
.g_ctrl = mt9v011_g_ctrl,
.s_ctrl = mt9v011_s_ctrl,
.reset = mt9v011_reset,
- .s_config = mt9v011_s_config,
.g_chip_ident = mt9v011_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9v011_g_register,
@@ -596,6 +602,14 @@ static int mt9v011_probe(struct i2c_client *c,
core->height = 480;
core->xtal = 27000000; /* Hz */
+ if (c->dev.platform_data) {
+ struct mt9v011_platform_data *pdata = c->dev.platform_data;
+
+ core->xtal = pdata->xtal;
+ v4l2_dbg(1, debug, sd, "xtal set to %d.%03d MHz\n",
+ core->xtal / 1000000, (core->xtal / 1000) % 1000);
+ }
+
v4l_info(c, "chip found @ 0x%02x (%s - chip version 0x%04x)\n",
c->addr << 1, c->adapter->name, version);
diff --git a/drivers/media/video/mt9v011.h b/drivers/media/video/mt9v011.h
deleted file mode 100644
index 3350fd6083c3..000000000000
--- a/drivers/media/video/mt9v011.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * mt9v011 -Micron 1/4-Inch VGA Digital Image Sensor
- *
- * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
- * This code is placed under the terms of the GNU General Public License v2
- */
-
-#ifndef MT9V011_H_
-#define MT9V011_H_
-
-#define R00_MT9V011_CHIP_VERSION 0x00
-#define R01_MT9V011_ROWSTART 0x01
-#define R02_MT9V011_COLSTART 0x02
-#define R03_MT9V011_HEIGHT 0x03
-#define R04_MT9V011_WIDTH 0x04
-#define R05_MT9V011_HBLANK 0x05
-#define R06_MT9V011_VBLANK 0x06
-#define R07_MT9V011_OUT_CTRL 0x07
-#define R09_MT9V011_SHUTTER_WIDTH 0x09
-#define R0A_MT9V011_CLK_SPEED 0x0a
-#define R0B_MT9V011_RESTART 0x0b
-#define R0C_MT9V011_SHUTTER_DELAY 0x0c
-#define R0D_MT9V011_RESET 0x0d
-#define R1E_MT9V011_DIGITAL_ZOOM 0x1e
-#define R20_MT9V011_READ_MODE 0x20
-#define R2B_MT9V011_GREEN_1_GAIN 0x2b
-#define R2C_MT9V011_BLUE_GAIN 0x2c
-#define R2D_MT9V011_RED_GAIN 0x2d
-#define R2E_MT9V011_GREEN_2_GAIN 0x2e
-#define R35_MT9V011_GLOBAL_GAIN 0x35
-#define RF1_MT9V011_CHIP_ENABLE 0xf1
-
-#define MT9V011_VERSION 0x8232
-#define MT9V011_REV_B_VERSION 0x8243
-
-#endif
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 83de97ad971e..029a4babfd61 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -1286,7 +1286,7 @@ static int omap_vout_release(struct file *file)
videobuf_mmap_free(q);
/* Even if apply changes fails we should continue
- freeing allocated memeory */
+ freeing allocated memory */
if (vout->streaming) {
u32 mask = 0;
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index c881a64b41fd..d4e7c11553c3 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -1449,47 +1449,6 @@ static int ov7670_g_chip_ident(struct v4l2_subdev *sd,
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_OV7670, 0);
}
-static int ov7670_s_config(struct v4l2_subdev *sd, int dumb, void *data)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov7670_config *config = data;
- struct ov7670_info *info = to_state(sd);
- int ret;
-
- info->clock_speed = 30; /* default: a guess */
-
- /*
- * Must apply configuration before initializing device, because it
- * selects I/O method.
- */
- if (config) {
- info->min_width = config->min_width;
- info->min_height = config->min_height;
- info->use_smbus = config->use_smbus;
-
- if (config->clock_speed)
- info->clock_speed = config->clock_speed;
- }
-
- /* Make sure it's an ov7670 */
- ret = ov7670_detect(sd);
- if (ret) {
- v4l_dbg(1, debug, client,
- "chip found @ 0x%x (%s) is not an ov7670 chip.\n",
- client->addr << 1, client->adapter->name);
- kfree(info);
- return ret;
- }
- v4l_info(client, "chip found @ 0x%02x (%s)\n",
- client->addr << 1, client->adapter->name);
-
- info->fmt = &ov7670_formats[0];
- info->sat = 128; /* Review this */
- info->clkrc = info->clock_speed / 30;
-
- return 0;
-}
-
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int ov7670_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
{
@@ -1528,7 +1487,6 @@ static const struct v4l2_subdev_core_ops ov7670_core_ops = {
.s_ctrl = ov7670_s_ctrl,
.queryctrl = ov7670_queryctrl,
.reset = ov7670_reset,
- .s_config = ov7670_s_config,
.init = ov7670_init,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov7670_g_register,
@@ -1558,6 +1516,7 @@ static int ov7670_probe(struct i2c_client *client,
{
struct v4l2_subdev *sd;
struct ov7670_info *info;
+ int ret;
info = kzalloc(sizeof(struct ov7670_info), GFP_KERNEL);
if (info == NULL)
@@ -1565,6 +1524,37 @@ static int ov7670_probe(struct i2c_client *client,
sd = &info->sd;
v4l2_i2c_subdev_init(sd, client, &ov7670_ops);
+ info->clock_speed = 30; /* default: a guess */
+ if (client->dev.platform_data) {
+ struct ov7670_config *config = client->dev.platform_data;
+
+ /*
+ * Must apply configuration before initializing device, because it
+ * selects I/O method.
+ */
+ info->min_width = config->min_width;
+ info->min_height = config->min_height;
+ info->use_smbus = config->use_smbus;
+
+ if (config->clock_speed)
+ info->clock_speed = config->clock_speed;
+ }
+
+ /* Make sure it's an ov7670 */
+ ret = ov7670_detect(sd);
+ if (ret) {
+ v4l_dbg(1, debug, client,
+ "chip found @ 0x%x (%s) is not an ov7670 chip.\n",
+ client->addr << 1, client->adapter->name);
+ kfree(info);
+ return ret;
+ }
+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
+ client->addr << 1, client->adapter->name);
+
+ info->fmt = &ov7670_formats[0];
+ info->sat = 128; /* Review this */
+ info->clkrc = info->clock_speed / 30;
return 0;
}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
index ac94a8bf883e..305e6aaa844a 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
@@ -40,6 +40,7 @@
#include "pvrusb2-io.h"
#include <media/v4l2-device.h>
#include <media/cx2341x.h>
+#include <media/ir-kbd-i2c.h>
#include "pvrusb2-devattr.h"
/* Legal values for PVR2_CID_HSM */
@@ -202,6 +203,7 @@ struct pvr2_hdw {
/* IR related */
unsigned int ir_scheme_active; /* IR scheme as seen from the outside */
+ struct IR_i2c_init_data ir_init_data; /* params passed to IR modules */
/* Frequency table */
unsigned int freqTable[FREQTABLE_SIZE];
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
index 7cbe18c4ca95..451ecd485f97 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
@@ -19,6 +19,7 @@
*/
#include <linux/i2c.h>
+#include <media/ir-kbd-i2c.h>
#include "pvrusb2-i2c-core.h"
#include "pvrusb2-hdw-internal.h"
#include "pvrusb2-debug.h"
@@ -48,13 +49,6 @@ module_param_named(disable_autoload_ir_video, pvr2_disable_ir_video,
MODULE_PARM_DESC(disable_autoload_ir_video,
"1=do not try to autoload ir_video IR receiver");
-/* Mapping of IR schemes to known I2C addresses - if any */
-static const unsigned char ir_video_addresses[] = {
- [PVR2_IR_SCHEME_ZILOG] = 0x71,
- [PVR2_IR_SCHEME_29XXX] = 0x18,
- [PVR2_IR_SCHEME_24XXX] = 0x18,
-};
-
static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
u8 i2c_addr, /* I2C address we're talking to */
u8 *data, /* Data to write */
@@ -574,26 +568,55 @@ static void do_i2c_scan(struct pvr2_hdw *hdw)
static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
{
struct i2c_board_info info;
- unsigned char addr = 0;
+ struct IR_i2c_init_data *init_data = &hdw->ir_init_data;
if (pvr2_disable_ir_video) {
pvr2_trace(PVR2_TRACE_INFO,
"Automatic binding of ir_video has been disabled.");
return;
}
- if (hdw->ir_scheme_active < ARRAY_SIZE(ir_video_addresses)) {
- addr = ir_video_addresses[hdw->ir_scheme_active];
- }
- if (!addr) {
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ switch (hdw->ir_scheme_active) {
+ case PVR2_IR_SCHEME_24XXX: /* FX2-controlled IR */
+ case PVR2_IR_SCHEME_29XXX: /* Original 29xxx device */
+ init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
+ init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
+ init_data->type = RC_TYPE_RC5;
+ init_data->name = hdw->hdw_desc->description;
+ init_data->polling_interval = 100; /* ms From ir-kbd-i2c */
+ /* IR Receiver */
+ info.addr = 0x18;
+ info.platform_data = init_data;
+ strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
+ pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
+ info.type, info.addr);
+ i2c_new_device(&hdw->i2c_adap, &info);
+ break;
+ case PVR2_IR_SCHEME_ZILOG: /* HVR-1950 style */
+ case PVR2_IR_SCHEME_24XXX_MCE: /* 24xxx MCE device */
+ init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
+ init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
+ init_data->type = RC_TYPE_RC5;
+ init_data->name = hdw->hdw_desc->description;
+ /* IR Receiver */
+ info.addr = 0x71;
+ info.platform_data = init_data;
+ strlcpy(info.type, "ir_rx_z8f0811_haup", I2C_NAME_SIZE);
+ pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
+ info.type, info.addr);
+ i2c_new_device(&hdw->i2c_adap, &info);
+ /* IR Trasmitter */
+ info.addr = 0x70;
+ info.platform_data = init_data;
+ strlcpy(info.type, "ir_tx_z8f0811_haup", I2C_NAME_SIZE);
+ pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
+ info.type, info.addr);
+ i2c_new_device(&hdw->i2c_adap, &info);
+ break;
+ default:
/* The device either doesn't support I2C-based IR or we
don't know (yet) how to operate IR on the device. */
- return;
+ break;
}
- pvr2_trace(PVR2_TRACE_INFO,
- "Binding ir_video to i2c address 0x%02x.", addr);
- memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
- info.addr = addr;
- i2c_new_device(&hdw->i2c_adap, &info);
}
void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index f35459d1f42f..0db90922ee93 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -1565,7 +1565,7 @@ static int saa711x_probe(struct i2c_client *client,
chip_id = name[5];
/* Check whether this chip is part of the saa711x series */
- if (memcmp(name, "1f711", 5)) {
+ if (memcmp(name + 1, "f711", 4)) {
v4l_dbg(1, debug, client, "chip found @ 0x%x (ID %s) does not match a known saa711x chip.\n",
client->addr << 1, name);
return -ENODEV;
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index e7aa588c6c5a..deb8fcf4aa49 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -5179,18 +5179,8 @@ struct saa7134_board saa7134_boards[] = {
[SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG] = {
.name = "Kworld PCI SBTVD/ISDB-T Full-Seg Hybrid",
.audio_clock = 0x00187de7,
-#if 0
- /*
- * FIXME: Analog mode doesn't work, if digital is enabled. The proper
- * fix is to use tda8290 driver, but Kworld seems to use an
- * unsupported version of tda8295.
- */
- .tuner_type = TUNER_NXP_TDA18271, /* TUNER_PHILIPS_TDA8290 */
- .tuner_addr = 0x60,
-#else
- .tuner_type = UNSET,
+ .tuner_type = TUNER_PHILIPS_TDA8290,
.tuner_addr = ADDR_UNSET,
-#endif
.radio_type = UNSET,
.radio_addr = ADDR_UNSET,
.gpiomask = 0x8e054000,
@@ -6932,10 +6922,17 @@ static inline int saa7134_kworld_sbtvd_toggle_agc(struct saa7134_dev *dev,
/* toggle AGC switch through GPIO 27 */
switch (mode) {
case TDA18271_ANALOG:
- saa7134_set_gpio(dev, 27, 0);
+ saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x4000);
+ saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x4000);
+ msleep(20);
break;
case TDA18271_DIGITAL:
- saa7134_set_gpio(dev, 27, 1);
+ saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x14000);
+ saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x14000);
+ msleep(20);
+ saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x54000);
+ saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x54000);
+ msleep(30);
break;
default:
return -EINVAL;
@@ -6993,6 +6990,7 @@ static int saa7134_tda8290_callback(struct saa7134_dev *dev,
int saa7134_tuner_callback(void *priv, int component, int command, int arg)
{
struct saa7134_dev *dev = priv;
+
if (dev != NULL) {
switch (dev->tuner_type) {
case TUNER_PHILIPS_TDA8290:
@@ -7659,36 +7657,11 @@ int saa7134_board_init2(struct saa7134_dev *dev)
break;
}
case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG:
- {
- struct i2c_msg msg = { .addr = 0x4b, .flags = 0 };
- int i;
- static u8 buffer[][2] = {
- {0x30, 0x31},
- {0xff, 0x00},
- {0x41, 0x03},
- {0x41, 0x1a},
- {0xff, 0x02},
- {0x34, 0x00},
- {0x45, 0x97},
- {0x45, 0xc1},
- };
saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x4000);
saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x4000);
- /*
- * FIXME: identify what device is at addr 0x4b and what means
- * this initialization
- */
- for (i = 0; i < ARRAY_SIZE(buffer); i++) {
- msg.buf = &buffer[i][0];
- msg.len = ARRAY_SIZE(buffer[0]);
- if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1)
- printk(KERN_WARNING
- "%s: Unable to enable tuner(%i).\n",
- dev->name, i);
- }
+ saa7134_set_gpio(dev, 27, 0);
break;
- }
} /* switch() */
/* initialize tuner */
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 3315a48a848b..f65cad287b83 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -237,12 +237,39 @@ static struct tda18271_std_map mb86a20s_tda18271_std_map = {
static struct tda18271_config kworld_tda18271_config = {
.std_map = &mb86a20s_tda18271_std_map,
.gate = TDA18271_GATE_DIGITAL,
+ .config = 3, /* Use tuner callback for AGC */
+
};
static const struct mb86a20s_config kworld_mb86a20s_config = {
.demod_address = 0x10,
};
+static int kworld_sbtvd_gate_ctrl(struct dvb_frontend* fe, int enable)
+{
+ struct saa7134_dev *dev = fe->dvb->priv;
+
+ unsigned char initmsg[] = {0x45, 0x97};
+ unsigned char msg_enable[] = {0x45, 0xc1};
+ unsigned char msg_disable[] = {0x45, 0x81};
+ struct i2c_msg msg = {.addr = 0x4b, .flags = 0, .buf = initmsg, .len = 2};
+
+ if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
+ wprintk("could not access the I2C gate\n");
+ return -EIO;
+ }
+ if (enable)
+ msg.buf = msg_enable;
+ else
+ msg.buf = msg_disable;
+ if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
+ wprintk("could not access the I2C gate\n");
+ return -EIO;
+ }
+ msleep(20);
+ return 0;
+}
+
/* ==================================================================
* tda1004x based DVB-T cards, helper functions
*/
@@ -623,37 +650,6 @@ static struct tda827x_config tda827x_cfg_2_sw42 = {
/* ------------------------------------------------------------------ */
-static int __kworld_sbtvd_i2c_gate_ctrl(struct saa7134_dev *dev, int enable)
-{
- unsigned char initmsg[] = {0x45, 0x97};
- unsigned char msg_enable[] = {0x45, 0xc1};
- unsigned char msg_disable[] = {0x45, 0x81};
- struct i2c_msg msg = {.addr = 0x4b, .flags = 0, .buf = initmsg, .len = 2};
-
- if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
- wprintk("could not access the I2C gate\n");
- return -EIO;
- }
- if (enable)
- msg.buf = msg_enable;
- else
- msg.buf = msg_disable;
- if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
- wprintk("could not access the I2C gate\n");
- return -EIO;
- }
- msleep(20);
- return 0;
-}
-static int kworld_sbtvd_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
-{
- struct saa7134_dev *dev = fe->dvb->priv;
-
- return __kworld_sbtvd_i2c_gate_ctrl(dev, enable);
-}
-
-/* ------------------------------------------------------------------ */
-
static struct tda1004x_config tda827x_lifeview_config = {
.demod_address = 0x08,
.invert = 1,
@@ -1660,27 +1656,23 @@ static int dvb_init(struct saa7134_dev *dev)
}
break;
case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG:
- __kworld_sbtvd_i2c_gate_ctrl(dev, 0);
- saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x14000);
- saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x14000);
- msleep(20);
- saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x54000);
- saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x54000);
- msleep(20);
+ /* Switch to digital mode */
+ saa7134_tuner_callback(dev, 0,
+ TDA18271_CALLBACK_CMD_AGC_ENABLE, 1);
fe0->dvb.frontend = dvb_attach(mb86a20s_attach,
&kworld_mb86a20s_config,
&dev->i2c_adap);
- __kworld_sbtvd_i2c_gate_ctrl(dev, 1);
if (fe0->dvb.frontend != NULL) {
+ dvb_attach(tda829x_attach, fe0->dvb.frontend,
+ &dev->i2c_adap, 0x4b,
+ &tda829x_no_probe);
dvb_attach(tda18271_attach, fe0->dvb.frontend,
0x60, &dev->i2c_adap,
&kworld_tda18271_config);
- /*
- * Only after success, it can initialize the gate, otherwise
- * an OOPS will hit, due to kfree(fe0->dvb.frontend)
- */
- fe0->dvb.frontend->ops.i2c_gate_ctrl = kworld_sbtvd_i2c_gate_ctrl;
+ fe0->dvb.frontend->ops.i2c_gate_ctrl = kworld_sbtvd_gate_ctrl;
}
+
+ /* mb86a20s need to use the I2C gateway */
break;
default:
wprintk("Huh? unknown DVB card?\n");
diff --git a/drivers/media/video/saa7164/saa7164-core.c b/drivers/media/video/saa7164/saa7164-core.c
index d6bf3f82cc34..58af67f2278b 100644
--- a/drivers/media/video/saa7164/saa7164-core.c
+++ b/drivers/media/video/saa7164/saa7164-core.c
@@ -655,8 +655,8 @@ static irqreturn_t saa7164_irq(int irq, void *dev_id)
goto out;
}
- /* Check that the hardware is accessable. If the status bytes are
- * 0xFF then the device is not accessable, the the IRQ belongs
+ /* Check that the hardware is accessible. If the status bytes are
+ * 0xFF then the device is not accessible, the the IRQ belongs
* to another driver.
* 4 x u32 interrupt registers.
*/
diff --git a/drivers/media/video/sn9c102/sn9c102_devtable.h b/drivers/media/video/sn9c102/sn9c102_devtable.h
index 41064c7b5ef8..b3d2cc729657 100644
--- a/drivers/media/video/sn9c102/sn9c102_devtable.h
+++ b/drivers/media/video/sn9c102/sn9c102_devtable.h
@@ -47,8 +47,8 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x6009, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */
-#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), },
+#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), },
#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
@@ -56,78 +56,68 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
#endif
- { SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), },
+ { SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), }, /* not in sonixb */
#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x602c, BRIDGE_SN9C102), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x602d, BRIDGE_SN9C102), }, HV7131R */
{ SN9C102_USB_DEVICE(0x0c45, 0x602e, BRIDGE_SN9C102), },
#endif
- { SN9C102_USB_DEVICE(0x0c45, 0x6030, BRIDGE_SN9C102), },
+ { SN9C102_USB_DEVICE(0x0c45, 0x6030, BRIDGE_SN9C102), }, /* not in sonixb */
/* SN9C103 */
- { SN9C102_USB_DEVICE(0x0c45, 0x6080, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x6082, BRIDGE_SN9C103), },
+/* { SN9C102_USB_DEVICE(0x0c45, 0x6080, BRIDGE_SN9C103), }, non existent ? */
+ { SN9C102_USB_DEVICE(0x0c45, 0x6082, BRIDGE_SN9C103), }, /* not in sonixb */
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
/* { SN9C102_USB_DEVICE(0x0c45, 0x6083, BRIDGE_SN9C103), }, HY7131D/E */
- { SN9C102_USB_DEVICE(0x0c45, 0x6088, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x608a, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x608b, BRIDGE_SN9C103), },
+/* { SN9C102_USB_DEVICE(0x0c45, 0x6088, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x608a, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x608b, BRIDGE_SN9C103), }, non existent ? */
{ SN9C102_USB_DEVICE(0x0c45, 0x608c, BRIDGE_SN9C103), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x608e, BRIDGE_SN9C103), }, CISVF10 */
-#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x608f, BRIDGE_SN9C103), },
-#endif
- { SN9C102_USB_DEVICE(0x0c45, 0x60a0, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60a2, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60a3, BRIDGE_SN9C103), },
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60a0, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60a2, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60a3, BRIDGE_SN9C103), }, non existent ? */
/* { SN9C102_USB_DEVICE(0x0c45, 0x60a8, BRIDGE_SN9C103), }, PAS106 */
/* { SN9C102_USB_DEVICE(0x0c45, 0x60aa, BRIDGE_SN9C103), }, TAS5130 */
-/* { SN9C102_USB_DEVICE(0x0c45, 0x60ab, BRIDGE_SN9C103), }, TAS5130 */
- { SN9C102_USB_DEVICE(0x0c45, 0x60ac, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60ae, BRIDGE_SN9C103), },
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60ab, BRIDGE_SN9C103), }, TAS5110, non existent */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60ac, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60ae, BRIDGE_SN9C103), }, non existent ? */
{ SN9C102_USB_DEVICE(0x0c45, 0x60af, BRIDGE_SN9C103), },
-#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x60b0, BRIDGE_SN9C103), },
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60b2, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60b3, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60b8, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60ba, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60bb, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60bc, BRIDGE_SN9C103), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60be, BRIDGE_SN9C103), }, non existent ? */
#endif
- { SN9C102_USB_DEVICE(0x0c45, 0x60b2, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60b3, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60b8, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60ba, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60bb, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60bc, BRIDGE_SN9C103), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60be, BRIDGE_SN9C103), },
/* SN9C105 */
#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x045e, 0x00f5, BRIDGE_SN9C105), },
{ SN9C102_USB_DEVICE(0x045e, 0x00f7, BRIDGE_SN9C105), },
{ SN9C102_USB_DEVICE(0x0471, 0x0327, BRIDGE_SN9C105), },
{ SN9C102_USB_DEVICE(0x0471, 0x0328, BRIDGE_SN9C105), },
-#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x60c0, BRIDGE_SN9C105), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60c2, BRIDGE_SN9C105), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60c8, BRIDGE_SN9C105), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60cc, BRIDGE_SN9C105), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60ea, BRIDGE_SN9C105), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60ec, BRIDGE_SN9C105), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60ef, BRIDGE_SN9C105), },
- { SN9C102_USB_DEVICE(0x0c45, 0x60fa, BRIDGE_SN9C105), },
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60c2, BRIDGE_SN9C105), }, PO1030 */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60c8, BRIDGE_SN9C105), }, OM6801 */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60cc, BRIDGE_SN9C105), }, HV7131GP */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60ea, BRIDGE_SN9C105), }, non existent ? */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60ec, BRIDGE_SN9C105), }, MO4000 */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60ef, BRIDGE_SN9C105), }, ICM105C */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x60fa, BRIDGE_SN9C105), }, OV7648 */
{ SN9C102_USB_DEVICE(0x0c45, 0x60fb, BRIDGE_SN9C105), },
{ SN9C102_USB_DEVICE(0x0c45, 0x60fc, BRIDGE_SN9C105), },
{ SN9C102_USB_DEVICE(0x0c45, 0x60fe, BRIDGE_SN9C105), },
/* SN9C120 */
{ SN9C102_USB_DEVICE(0x0458, 0x7025, BRIDGE_SN9C120), },
-#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
- { SN9C102_USB_DEVICE(0x0c45, 0x6102, BRIDGE_SN9C120), },
-#endif
- { SN9C102_USB_DEVICE(0x0c45, 0x6108, BRIDGE_SN9C120), },
- { SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), },
-#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
+/* { SN9C102_USB_DEVICE(0x0c45, 0x6102, BRIDGE_SN9C120), }, po2030 */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x6108, BRIDGE_SN9C120), }, om6801 */
+/* { SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), }, S5K53BEB */
{ SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), },
-#endif
/* { SN9C102_USB_DEVICE(0x0c45, 0x6138, BRIDGE_SN9C120), }, MO8000 */
-#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), },
-#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x613b, BRIDGE_SN9C120), },
-#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x613c, BRIDGE_SN9C120), },
{ SN9C102_USB_DEVICE(0x0c45, 0x613e, BRIDGE_SN9C120), },
#endif
diff --git a/drivers/media/video/sn9c102/sn9c102_sensor.h b/drivers/media/video/sn9c102/sn9c102_sensor.h
index 494957b10bac..7f38549715b6 100644
--- a/drivers/media/video/sn9c102/sn9c102_sensor.h
+++ b/drivers/media/video/sn9c102/sn9c102_sensor.h
@@ -147,7 +147,7 @@ enum sn9c102_i2c_interface {
struct sn9c102_sensor {
char name[32], /* sensor name */
- maintainer[64]; /* name of the mantainer <email> */
+ maintainer[64]; /* name of the maintainer <email> */
enum sn9c102_bridge supported_bridge; /* supported SN9C1xx bridges */
diff --git a/drivers/media/video/sr030pc30.c b/drivers/media/video/sr030pc30.c
index 864696b7a006..c901721a1db3 100644
--- a/drivers/media/video/sr030pc30.c
+++ b/drivers/media/video/sr030pc30.c
@@ -714,15 +714,6 @@ static int sr030pc30_base_config(struct v4l2_subdev *sd)
return ret;
}
-static int sr030pc30_s_config(struct v4l2_subdev *sd,
- int irq, void *platform_data)
-{
- struct sr030pc30_info *info = to_sr030pc30(sd);
-
- info->pdata = platform_data;
- return 0;
-}
-
static int sr030pc30_s_stream(struct v4l2_subdev *sd, int enable)
{
return 0;
@@ -763,7 +754,6 @@ static int sr030pc30_s_power(struct v4l2_subdev *sd, int on)
}
static const struct v4l2_subdev_core_ops sr030pc30_core_ops = {
- .s_config = sr030pc30_s_config,
.s_power = sr030pc30_s_power,
.queryctrl = sr030pc30_queryctrl,
.s_ctrl = sr030pc30_s_ctrl,
diff --git a/drivers/media/video/tda9875.c b/drivers/media/video/tda9875.c
deleted file mode 100644
index 35b6ff5db319..000000000000
--- a/drivers/media/video/tda9875.c
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * For the TDA9875 chip
- * (The TDA9875 is used on the Diamond DTV2000 french version
- * Other cards probably use these chips as well.)
- * This driver will not complain if used with any
- * other i2c device with the same address.
- *
- * Copyright (c) 2000 Guillaume Delvit based on Gerd Knorr source and
- * Eric Sandeen
- * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org>
- * This code is placed under the terms of the GNU General Public License
- * Based on tda9855.c by Steve VanDeBogart (vandebo@uclink.berkeley.edu)
- * Which was based on tda8425.c by Greg Alexander (c) 1998
- *
- * OPTIONS:
- * debug - set to 1 if you'd like to see debug messages
- *
- * Revision: 0.1 - original version
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/videodev2.h>
-#include <media/v4l2-device.h>
-#include <media/i2c-addr.h>
-
-static int debug; /* insmod parameter */
-module_param(debug, int, S_IRUGO | S_IWUSR);
-MODULE_LICENSE("GPL");
-
-
-/* This is a superset of the TDA9875 */
-struct tda9875 {
- struct v4l2_subdev sd;
- int rvol, lvol;
- int bass, treble;
-};
-
-static inline struct tda9875 *to_state(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct tda9875, sd);
-}
-
-#define dprintk if (debug) printk
-
-/* The TDA9875 is made by Philips Semiconductor
- * http://www.semiconductors.philips.com
- * TDA9875: I2C-bus controlled DSP audio processor, FM demodulator
- *
- */
-
- /* subaddresses for TDA9875 */
-#define TDA9875_MUT 0x12 /*General mute (value --> 0b11001100*/
-#define TDA9875_CFG 0x01 /* Config register (value --> 0b00000000 */
-#define TDA9875_DACOS 0x13 /*DAC i/o select (ADC) 0b0000100*/
-#define TDA9875_LOSR 0x16 /*Line output select regirter 0b0100 0001*/
-
-#define TDA9875_CH1V 0x0c /*Channel 1 volume (mute)*/
-#define TDA9875_CH2V 0x0d /*Channel 2 volume (mute)*/
-#define TDA9875_SC1 0x14 /*SCART 1 in (mono)*/
-#define TDA9875_SC2 0x15 /*SCART 2 in (mono)*/
-
-#define TDA9875_ADCIS 0x17 /*ADC input select (mono) 0b0110 000*/
-#define TDA9875_AER 0x19 /*Audio effect (AVL+Pseudo) 0b0000 0110*/
-#define TDA9875_MCS 0x18 /*Main channel select (DAC) 0b0000100*/
-#define TDA9875_MVL 0x1a /* Main volume gauche */
-#define TDA9875_MVR 0x1b /* Main volume droite */
-#define TDA9875_MBA 0x1d /* Main Basse */
-#define TDA9875_MTR 0x1e /* Main treble */
-#define TDA9875_ACS 0x1f /* Auxilary channel select (FM) 0b0000000*/
-#define TDA9875_AVL 0x20 /* Auxilary volume gauche */
-#define TDA9875_AVR 0x21 /* Auxilary volume droite */
-#define TDA9875_ABA 0x22 /* Auxilary Basse */
-#define TDA9875_ATR 0x23 /* Auxilary treble */
-
-#define TDA9875_MSR 0x02 /* Monitor select register */
-#define TDA9875_C1MSB 0x03 /* Carrier 1 (FM) frequency register MSB */
-#define TDA9875_C1MIB 0x04 /* Carrier 1 (FM) frequency register (16-8]b */
-#define TDA9875_C1LSB 0x05 /* Carrier 1 (FM) frequency register LSB */
-#define TDA9875_C2MSB 0x06 /* Carrier 2 (nicam) frequency register MSB */
-#define TDA9875_C2MIB 0x07 /* Carrier 2 (nicam) frequency register (16-8]b */
-#define TDA9875_C2LSB 0x08 /* Carrier 2 (nicam) frequency register LSB */
-#define TDA9875_DCR 0x09 /* Demodulateur configuration regirter*/
-#define TDA9875_DEEM 0x0a /* FM de-emphasis regirter*/
-#define TDA9875_FMAT 0x0b /* FM Matrix regirter*/
-
-/* values */
-#define TDA9875_MUTE_ON 0xff /* general mute */
-#define TDA9875_MUTE_OFF 0xcc /* general no mute */
-
-
-
-/* Begin code */
-
-static int tda9875_write(struct v4l2_subdev *sd, int subaddr, unsigned char val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- unsigned char buffer[2];
-
- v4l2_dbg(1, debug, sd, "Writing %d 0x%x\n", subaddr, val);
- buffer[0] = subaddr;
- buffer[1] = val;
- if (2 != i2c_master_send(client, buffer, 2)) {
- v4l2_warn(sd, "I/O error, trying (write %d 0x%x)\n",
- subaddr, val);
- return -1;
- }
- return 0;
-}
-
-
-static int i2c_read_register(struct i2c_client *client, int addr, int reg)
-{
- unsigned char write[1];
- unsigned char read[1];
- struct i2c_msg msgs[2] = {
- { addr, 0, 1, write },
- { addr, I2C_M_RD, 1, read }
- };
-
- write[0] = reg;
-
- if (2 != i2c_transfer(client->adapter, msgs, 2)) {
- v4l_warn(client, "I/O error (read2)\n");
- return -1;
- }
- v4l_dbg(1, debug, client, "chip_read2: reg%d=0x%x\n", reg, read[0]);
- return read[0];
-}
-
-static void tda9875_set(struct v4l2_subdev *sd)
-{
- struct tda9875 *tda = to_state(sd);
- unsigned char a;
-
- v4l2_dbg(1, debug, sd, "tda9875_set(%04x,%04x,%04x,%04x)\n",
- tda->lvol, tda->rvol, tda->bass, tda->treble);
-
- a = tda->lvol & 0xff;
- tda9875_write(sd, TDA9875_MVL, a);
- a =tda->rvol & 0xff;
- tda9875_write(sd, TDA9875_MVR, a);
- a =tda->bass & 0xff;
- tda9875_write(sd, TDA9875_MBA, a);
- a =tda->treble & 0xff;
- tda9875_write(sd, TDA9875_MTR, a);
-}
-
-static void do_tda9875_init(struct v4l2_subdev *sd)
-{
- struct tda9875 *t = to_state(sd);
-
- v4l2_dbg(1, debug, sd, "In tda9875_init\n");
- tda9875_write(sd, TDA9875_CFG, 0xd0); /*reg de config 0 (reset)*/
- tda9875_write(sd, TDA9875_MSR, 0x03); /* Monitor 0b00000XXX*/
- tda9875_write(sd, TDA9875_C1MSB, 0x00); /*Car1(FM) MSB XMHz*/
- tda9875_write(sd, TDA9875_C1MIB, 0x00); /*Car1(FM) MIB XMHz*/
- tda9875_write(sd, TDA9875_C1LSB, 0x00); /*Car1(FM) LSB XMHz*/
- tda9875_write(sd, TDA9875_C2MSB, 0x00); /*Car2(NICAM) MSB XMHz*/
- tda9875_write(sd, TDA9875_C2MIB, 0x00); /*Car2(NICAM) MIB XMHz*/
- tda9875_write(sd, TDA9875_C2LSB, 0x00); /*Car2(NICAM) LSB XMHz*/
- tda9875_write(sd, TDA9875_DCR, 0x00); /*Demod config 0x00*/
- tda9875_write(sd, TDA9875_DEEM, 0x44); /*DE-Emph 0b0100 0100*/
- tda9875_write(sd, TDA9875_FMAT, 0x00); /*FM Matrix reg 0x00*/
- tda9875_write(sd, TDA9875_SC1, 0x00); /* SCART 1 (SC1)*/
- tda9875_write(sd, TDA9875_SC2, 0x01); /* SCART 2 (sc2)*/
-
- tda9875_write(sd, TDA9875_CH1V, 0x10); /* Channel volume 1 mute*/
- tda9875_write(sd, TDA9875_CH2V, 0x10); /* Channel volume 2 mute */
- tda9875_write(sd, TDA9875_DACOS, 0x02); /* sig DAC i/o(in:nicam)*/
- tda9875_write(sd, TDA9875_ADCIS, 0x6f); /* sig ADC input(in:mono)*/
- tda9875_write(sd, TDA9875_LOSR, 0x00); /* line out (in:mono)*/
- tda9875_write(sd, TDA9875_AER, 0x00); /*06 Effect (AVL+PSEUDO) */
- tda9875_write(sd, TDA9875_MCS, 0x44); /* Main ch select (DAC) */
- tda9875_write(sd, TDA9875_MVL, 0x03); /* Vol Main left 10dB */
- tda9875_write(sd, TDA9875_MVR, 0x03); /* Vol Main right 10dB*/
- tda9875_write(sd, TDA9875_MBA, 0x00); /* Main Bass Main 0dB*/
- tda9875_write(sd, TDA9875_MTR, 0x00); /* Main Treble Main 0dB*/
- tda9875_write(sd, TDA9875_ACS, 0x44); /* Aux chan select (dac)*/
- tda9875_write(sd, TDA9875_AVL, 0x00); /* Vol Aux left 0dB*/
- tda9875_write(sd, TDA9875_AVR, 0x00); /* Vol Aux right 0dB*/
- tda9875_write(sd, TDA9875_ABA, 0x00); /* Aux Bass Main 0dB*/
- tda9875_write(sd, TDA9875_ATR, 0x00); /* Aux Aigus Main 0dB*/
-
- tda9875_write(sd, TDA9875_MUT, 0xcc); /* General mute */
-
- t->lvol = t->rvol = 0; /* 0dB */
- t->bass = 0; /* 0dB */
- t->treble = 0; /* 0dB */
- tda9875_set(sd);
-}
-
-
-static int tda9875_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct tda9875 *t = to_state(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_VOLUME:
- {
- int left = (t->lvol+84)*606;
- int right = (t->rvol+84)*606;
-
- ctrl->value=max(left,right);
- return 0;
- }
- case V4L2_CID_AUDIO_BALANCE:
- {
- int left = (t->lvol+84)*606;
- int right = (t->rvol+84)*606;
- int volume = max(left,right);
- int balance = (32768*min(left,right))/
- (volume ? volume : 1);
- ctrl->value=(left<right)?
- (65535-balance) : balance;
- return 0;
- }
- case V4L2_CID_AUDIO_BASS:
- ctrl->value = (t->bass+12)*2427; /* min -12 max +15 */
- return 0;
- case V4L2_CID_AUDIO_TREBLE:
- ctrl->value = (t->treble+12)*2730;/* min -12 max +12 */
- return 0;
- }
- return -EINVAL;
-}
-
-static int tda9875_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct tda9875 *t = to_state(sd);
- int chvol = 0, volume = 0, balance = 0, left, right;
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_VOLUME:
- left = (t->lvol+84)*606;
- right = (t->rvol+84)*606;
-
- volume = max(left,right);
- balance = (32768*min(left,right))/
- (volume ? volume : 1);
- balance =(left<right)?
- (65535-balance) : balance;
-
- volume = ctrl->value;
-
- chvol=1;
- break;
- case V4L2_CID_AUDIO_BALANCE:
- left = (t->lvol+84)*606;
- right = (t->rvol+84)*606;
-
- volume=max(left,right);
-
- balance = ctrl->value;
-
- chvol=1;
- break;
- case V4L2_CID_AUDIO_BASS:
- t->bass = ((ctrl->value/2400)-12) & 0xff;
- if (t->bass > 15)
- t->bass = 15;
- if (t->bass < -12)
- t->bass = -12 & 0xff;
- break;
- case V4L2_CID_AUDIO_TREBLE:
- t->treble = ((ctrl->value/2700)-12) & 0xff;
- if (t->treble > 12)
- t->treble = 12;
- if (t->treble < -12)
- t->treble = -12 & 0xff;
- break;
- default:
- return -EINVAL;
- }
-
- if (chvol) {
- left = (min(65536 - balance,32768) *
- volume) / 32768;
- right = (min(balance,32768) *
- volume) / 32768;
- t->lvol = ((left/606)-84) & 0xff;
- if (t->lvol > 24)
- t->lvol = 24;
- if (t->lvol < -84)
- t->lvol = -84 & 0xff;
-
- t->rvol = ((right/606)-84) & 0xff;
- if (t->rvol > 24)
- t->rvol = 24;
- if (t->rvol < -84)
- t->rvol = -84 & 0xff;
- }
-
- tda9875_set(sd);
- return 0;
-}
-
-static int tda9875_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_AUDIO_VOLUME:
- return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 58880);
- case V4L2_CID_AUDIO_BASS:
- case V4L2_CID_AUDIO_TREBLE:
- return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768);
- }
- return -EINVAL;
-}
-
-/* ----------------------------------------------------------------------- */
-
-static const struct v4l2_subdev_core_ops tda9875_core_ops = {
- .queryctrl = tda9875_queryctrl,
- .g_ctrl = tda9875_g_ctrl,
- .s_ctrl = tda9875_s_ctrl,
-};
-
-static const struct v4l2_subdev_ops tda9875_ops = {
- .core = &tda9875_core_ops,
-};
-
-/* ----------------------------------------------------------------------- */
-
-
-/* *********************** *
- * i2c interface functions *
- * *********************** */
-
-static int tda9875_checkit(struct i2c_client *client, int addr)
-{
- int dic, rev;
-
- dic = i2c_read_register(client, addr, 254);
- rev = i2c_read_register(client, addr, 255);
-
- if (dic == 0 || dic == 2) { /* tda9875 and tda9875A */
- v4l_info(client, "tda9875%s rev. %d detected at 0x%02x\n",
- dic == 0 ? "" : "A", rev, addr << 1);
- return 1;
- }
- v4l_info(client, "no such chip at 0x%02x (dic=0x%x rev=0x%x)\n",
- addr << 1, dic, rev);
- return 0;
-}
-
-static int tda9875_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct tda9875 *t;
- struct v4l2_subdev *sd;
-
- v4l_info(client, "chip found @ 0x%02x (%s)\n",
- client->addr << 1, client->adapter->name);
-
- if (!tda9875_checkit(client, client->addr))
- return -ENODEV;
-
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
- return -ENOMEM;
- sd = &t->sd;
- v4l2_i2c_subdev_init(sd, client, &tda9875_ops);
-
- do_tda9875_init(sd);
- return 0;
-}
-
-static int tda9875_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
-
- do_tda9875_init(sd);
- v4l2_device_unregister_subdev(sd);
- kfree(to_state(sd));
- return 0;
-}
-
-static const struct i2c_device_id tda9875_id[] = {
- { "tda9875", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, tda9875_id);
-
-static struct i2c_driver tda9875_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "tda9875",
- },
- .probe = tda9875_probe,
- .remove = tda9875_remove,
- .id_table = tda9875_id,
-};
-
-static __init int init_tda9875(void)
-{
- return i2c_add_driver(&tda9875_driver);
-}
-
-static __exit void exit_tda9875(void)
-{
- i2c_del_driver(&tda9875_driver);
-}
-
-module_init(init_tda9875);
-module_exit(exit_tda9875);
diff --git a/drivers/media/video/tlg2300/pd-video.c b/drivers/media/video/tlg2300/pd-video.c
index a1ffe18640fe..df33a1d188bb 100644
--- a/drivers/media/video/tlg2300/pd-video.c
+++ b/drivers/media/video/tlg2300/pd-video.c
@@ -512,19 +512,20 @@ int alloc_bulk_urbs_generic(struct urb **urb_array, int num,
int buf_size, gfp_t gfp_flags,
usb_complete_t complete_fn, void *context)
{
- struct urb *urb;
- void *mem;
- int i;
+ int i = 0;
- for (i = 0; i < num; i++) {
- urb = usb_alloc_urb(0, gfp_flags);
+ for (; i < num; i++) {
+ void *mem;
+ struct urb *urb = usb_alloc_urb(0, gfp_flags);
if (urb == NULL)
return i;
mem = usb_alloc_coherent(udev, buf_size, gfp_flags,
&urb->transfer_dma);
- if (mem == NULL)
+ if (mem == NULL) {
+ usb_free_urb(urb);
return i;
+ }
usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, ep_addr),
mem, buf_size, complete_fn, context);
diff --git a/drivers/media/video/tvp7002.c b/drivers/media/video/tvp7002.c
index e63b40f5a706..c799e4eb6fcd 100644
--- a/drivers/media/video/tvp7002.c
+++ b/drivers/media/video/tvp7002.c
@@ -789,7 +789,7 @@ static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
* Get the value of a TVP7002 decoder device register.
* Returns zero when successful, -EINVAL if register read fails or
* access to I2C client fails, -EPERM if the call is not allowed
- * by diabled CAP_SYS_ADMIN.
+ * by disabled CAP_SYS_ADMIN.
*/
static int tvp7002_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 3f0871b550ad..810eef43c216 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -407,18 +407,6 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
/* Decrease the module use count to match the first try_module_get. */
module_put(client->driver->driver.owner);
- if (sd) {
- /* We return errors from v4l2_subdev_call only if we have the
- callback as the .s_config is not mandatory */
- int err = v4l2_subdev_call(sd, core, s_config,
- info->irq, info->platform_data);
-
- if (err && err != -ENOIOCTLCMD) {
- v4l2_device_unregister_subdev(sd);
- sd = NULL;
- }
- }
-
error:
/* If we have a client but no subdev, then something went wrong and
we must unregister the client. */
@@ -428,9 +416,8 @@ error:
}
EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
-struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
+struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
struct i2c_adapter *adapter, const char *client_type,
- int irq, void *platform_data,
u8 addr, const unsigned short *probe_addrs)
{
struct i2c_board_info info;
@@ -440,12 +427,10 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
memset(&info, 0, sizeof(info));
strlcpy(info.type, client_type, sizeof(info.type));
info.addr = addr;
- info.irq = irq;
- info.platform_data = platform_data;
return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, probe_addrs);
}
-EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_cfg);
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev);
/* Return i2c client address of v4l2_subdev. */
unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index 8f81efcfcf56..ef66d2af0c57 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -569,7 +569,7 @@ static int user_to_new(struct v4l2_ext_control *c,
int ret;
u32 size;
- ctrl->has_new = 1;
+ ctrl->is_new = 1;
switch (ctrl->type) {
case V4L2_CTRL_TYPE_INTEGER64:
ctrl->val64 = c->value64;
@@ -1280,8 +1280,12 @@ int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
if (ctrl->done)
continue;
- for (i = 0; i < master->ncontrols; i++)
- cur_to_new(master->cluster[i]);
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ cur_to_new(master->cluster[i]);
+ master->cluster[i]->is_new = 1;
+ }
+ }
/* Skip button controls and read-only controls. */
if (ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
@@ -1340,12 +1344,15 @@ int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
ctrl = ref->ctrl;
memset(qc, 0, sizeof(*qc));
- qc->id = ctrl->id;
+ if (id >= V4L2_CID_PRIVATE_BASE)
+ qc->id = id;
+ else
+ qc->id = ctrl->id;
strlcpy(qc->name, ctrl->name, sizeof(qc->name));
qc->minimum = ctrl->minimum;
qc->maximum = ctrl->maximum;
qc->default_value = ctrl->default_value;
- if (qc->type == V4L2_CTRL_TYPE_MENU)
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU)
qc->step = 1;
else
qc->step = ctrl->step;
@@ -1645,7 +1652,7 @@ static int try_or_set_control_cluster(struct v4l2_ctrl *master, bool set)
if (ctrl == NULL)
continue;
- if (ctrl->has_new) {
+ if (ctrl->is_new) {
/* Double check this: it may have changed since the
last check in try_or_set_ext_ctrls(). */
if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
@@ -1719,13 +1726,13 @@ static int try_or_set_ext_ctrls(struct v4l2_ctrl_handler *hdl,
v4l2_ctrl_lock(ctrl);
- /* Reset the 'has_new' flags of the cluster */
+ /* Reset the 'is_new' flags of the cluster */
for (j = 0; j < master->ncontrols; j++)
if (master->cluster[j])
- master->cluster[j]->has_new = 0;
+ master->cluster[j]->is_new = 0;
/* Copy the new caller-supplied control values.
- user_to_new() sets 'has_new' to 1. */
+ user_to_new() sets 'is_new' to 1. */
ret = cluster_walk(i, cs, helpers, user_to_new);
if (!ret)
@@ -1820,15 +1827,18 @@ static int set_ctrl(struct v4l2_ctrl *ctrl, s32 *val)
int ret;
int i;
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+ return -EACCES;
+
v4l2_ctrl_lock(ctrl);
- /* Reset the 'has_new' flags of the cluster */
+ /* Reset the 'is_new' flags of the cluster */
for (i = 0; i < master->ncontrols; i++)
if (master->cluster[i])
- master->cluster[i]->has_new = 0;
+ master->cluster[i]->is_new = 0;
ctrl->val = *val;
- ctrl->has_new = 1;
+ ctrl->is_new = 1;
ret = try_or_set_control_cluster(master, false);
if (!ret)
ret = try_or_set_control_cluster(master, true);
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 359e23290a7e..341764a3a990 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -419,6 +419,10 @@ static int get_index(struct video_device *vdev)
* The registration code assigns minor numbers and device node numbers
* based on the requested type and registers the new device node with
* the kernel.
+ *
+ * This function assumes that struct video_device was zeroed when it
+ * was allocated and does not contain any stale date.
+ *
* An error is returned if no free minor or device node number could be
* found, or if the registration of the device node failed.
*
@@ -440,7 +444,6 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
int minor_offset = 0;
int minor_cnt = VIDEO_NUM_DEVICES;
const char *name_base;
- void *priv = vdev->dev.p;
/* A minor value of -1 marks this video device as never
having been registered */
@@ -559,10 +562,6 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
}
/* Part 4: register the device with sysfs */
- memset(&vdev->dev, 0, sizeof(vdev->dev));
- /* The memset above cleared the device's device_private, so
- put back the copy we made earlier. */
- vdev->dev.p = priv;
vdev->dev.class = &video_class;
vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
if (vdev->parent)
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 7fe6f92af480..ce64fe16bc60 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -100,6 +100,7 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
is a platform bus, then it is never deleted. */
if (client)
i2c_unregister_device(client);
+ continue;
}
#endif
#if defined(CONFIG_SPI)
@@ -108,6 +109,7 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
if (spi)
spi_unregister_device(spi);
+ continue;
}
#endif
}
@@ -126,11 +128,19 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
WARN_ON(sd->v4l2_dev != NULL);
if (!try_module_get(sd->owner))
return -ENODEV;
+ sd->v4l2_dev = v4l2_dev;
+ if (sd->internal_ops && sd->internal_ops->registered) {
+ err = sd->internal_ops->registered(sd);
+ if (err)
+ return err;
+ }
/* This just returns 0 if either of the two args is NULL */
err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler);
- if (err)
+ if (err) {
+ if (sd->internal_ops && sd->internal_ops->unregistered)
+ sd->internal_ops->unregistered(sd);
return err;
- sd->v4l2_dev = v4l2_dev;
+ }
spin_lock(&v4l2_dev->lock);
list_add_tail(&sd->list, &v4l2_dev->subdevs);
spin_unlock(&v4l2_dev->lock);
@@ -146,6 +156,8 @@ void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)
spin_lock(&sd->v4l2_dev->lock);
list_del(&sd->list);
spin_unlock(&sd->v4l2_dev->lock);
+ if (sd->internal_ops && sd->internal_ops->unregistered)
+ sd->internal_ops->unregistered(sd);
sd->v4l2_dev = NULL;
module_put(sd->owner);
}
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 7e47f15f350d..f51327ef6757 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -1659,20 +1659,24 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_dbg_register *p = arg;
- if (!capable(CAP_SYS_ADMIN))
- ret = -EPERM;
- else if (ops->vidioc_g_register)
- ret = ops->vidioc_g_register(file, fh, p);
+ if (ops->vidioc_g_register) {
+ if (!capable(CAP_SYS_ADMIN))
+ ret = -EPERM;
+ else
+ ret = ops->vidioc_g_register(file, fh, p);
+ }
break;
}
case VIDIOC_DBG_S_REGISTER:
{
struct v4l2_dbg_register *p = arg;
- if (!capable(CAP_SYS_ADMIN))
- ret = -EPERM;
- else if (ops->vidioc_s_register)
- ret = ops->vidioc_s_register(file, fh, p);
+ if (ops->vidioc_s_register) {
+ if (!capable(CAP_SYS_ADMIN))
+ ret = -EPERM;
+ else
+ ret = ops->vidioc_s_register(file, fh, p);
+ }
break;
}
#endif
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
index e25aca5759fb..2f973cd56408 100644
--- a/drivers/media/video/via-camera.c
+++ b/drivers/media/video/via-camera.c
@@ -13,14 +13,12 @@
#include <linux/pci.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
-#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-chip-ident.h>
#include <media/videobuf-dma-sg.h>
-#include <linux/device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/pm_qos_params.h>
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index 019ee206cbee..fa35639d0c15 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -937,6 +937,7 @@ static void w9966_term(struct w9966 *cam)
parport_unregister_device(cam->pdev);
w9966_set_state(cam, W9966_STATE_PDEV, 0);
}
+ memset(cam, 0, sizeof(*cam));
}
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c
index 9cdc3bb15b15..9f2bac519647 100644
--- a/drivers/media/video/zoran/zoran_card.c
+++ b/drivers/media/video/zoran/zoran_card.c
@@ -1041,7 +1041,7 @@ zr36057_init (struct zoran *zr)
/* allocate memory *before* doing anything to the hardware
* in case allocation fails */
zr->stat_com = kzalloc(BUZ_NUM_STAT_COM * 4, GFP_KERNEL);
- zr->video_dev = kmalloc(sizeof(struct video_device), GFP_KERNEL);
+ zr->video_dev = video_device_alloc();
if (!zr->stat_com || !zr->video_dev) {
dprintk(1,
KERN_ERR
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index c00fe8253c51..8c1d85e27be4 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -465,6 +465,7 @@ static void memstick_check(struct work_struct *work)
if (!host->card) {
host->card = card;
if (device_register(&card->dev)) {
+ put_device(&card->dev);
kfree(host->card);
host->card = NULL;
}
@@ -510,14 +511,18 @@ int memstick_add_host(struct memstick_host *host)
{
int rc;
- if (!idr_pre_get(&memstick_host_idr, GFP_KERNEL))
- return -ENOMEM;
+ while (1) {
+ if (!idr_pre_get(&memstick_host_idr, GFP_KERNEL))
+ return -ENOMEM;
- spin_lock(&memstick_host_lock);
- rc = idr_get_new(&memstick_host_idr, host, &host->id);
- spin_unlock(&memstick_host_lock);
- if (rc)
- return rc;
+ spin_lock(&memstick_host_lock);
+ rc = idr_get_new(&memstick_host_idr, host, &host->id);
+ spin_unlock(&memstick_host_lock);
+ if (!rc)
+ break;
+ else if (rc != -EAGAIN)
+ return rc;
+ }
dev_set_name(&host->dev, "memstick%u", host->id);
@@ -616,7 +621,7 @@ static int __init memstick_init(void)
{
int rc;
- workqueue = create_freezeable_workqueue("kmemstick");
+ workqueue = create_freezable_workqueue("kmemstick");
if (!workqueue)
return -ENOMEM;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 02362eccc588..57b42bfc7d23 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -23,7 +23,6 @@
#define DRIVER_NAME "mspro_block"
-static DEFINE_MUTEX(mspro_block_mutex);
static int major;
module_param(major, int, 0644);
@@ -160,6 +159,13 @@ struct mspro_block_data {
int (*mrq_handler)(struct memstick_dev *card,
struct memstick_request **mrq);
+
+ /* Default request setup function for data access method preferred by
+ * this host instance.
+ */
+ void (*setup_transfer)(struct memstick_dev *card,
+ u64 offset, size_t length);
+
struct attribute_group attr_group;
struct scatterlist req_sg[MSPRO_BLOCK_MAX_SEGS];
@@ -181,7 +187,6 @@ static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode)
struct mspro_block_data *msb = disk->private_data;
int rc = -ENXIO;
- mutex_lock(&mspro_block_mutex);
mutex_lock(&mspro_block_disk_lock);
if (msb && msb->card) {
@@ -193,7 +198,6 @@ static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode)
}
mutex_unlock(&mspro_block_disk_lock);
- mutex_unlock(&mspro_block_mutex);
return rc;
}
@@ -225,11 +229,7 @@ static int mspro_block_disk_release(struct gendisk *disk)
static int mspro_block_bd_release(struct gendisk *disk, fmode_t mode)
{
- int ret;
- mutex_lock(&mspro_block_mutex);
- ret = mspro_block_disk_release(disk);
- mutex_unlock(&mspro_block_mutex);
- return ret;
+ return mspro_block_disk_release(disk);
}
static int mspro_block_bd_getgeo(struct block_device *bdev,
@@ -663,14 +663,43 @@ has_int_reg:
}
}
+/*** Transfer setup functions for different access methods. ***/
+
+/** Setup data transfer request for SET_CMD TPC with arguments in card
+ * registers.
+ *
+ * @card Current media instance
+ * @offset Target data offset in bytes
+ * @length Required transfer length in bytes.
+ */
+static void h_mspro_block_setup_cmd(struct memstick_dev *card, u64 offset,
+ size_t length)
+{
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ struct mspro_param_register param = {
+ .system = msb->system,
+ .data_count = cpu_to_be16((uint16_t)(length / msb->page_size)),
+ /* ISO C90 warning precludes direct initialization for now. */
+ .data_address = 0,
+ .tpc_param = 0
+ };
+
+ do_div(offset, msb->page_size);
+ param.data_address = cpu_to_be32((uint32_t)offset);
+
+ card->next_request = h_mspro_block_req_init;
+ msb->mrq_handler = h_mspro_block_transfer_data;
+ memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG,
+ &param, sizeof(param));
+}
+
/*** Data transfer ***/
static int mspro_block_issue_req(struct memstick_dev *card, int chunk)
{
struct mspro_block_data *msb = memstick_get_drvdata(card);
- sector_t t_sec;
+ u64 t_off;
unsigned int count;
- struct mspro_param_register param;
try_again:
while (chunk) {
@@ -685,30 +714,17 @@ try_again:
continue;
}
- t_sec = blk_rq_pos(msb->block_req) << 9;
- sector_div(t_sec, msb->page_size);
-
+ t_off = blk_rq_pos(msb->block_req);
+ t_off <<= 9;
count = blk_rq_bytes(msb->block_req);
- count /= msb->page_size;
- param.system = msb->system;
- param.data_count = cpu_to_be16(count);
- param.data_address = cpu_to_be32((uint32_t)t_sec);
- param.tpc_param = 0;
+ msb->setup_transfer(card, t_off, count);
msb->data_dir = rq_data_dir(msb->block_req);
msb->transfer_cmd = msb->data_dir == READ
? MSPRO_CMD_READ_DATA
: MSPRO_CMD_WRITE_DATA;
- dev_dbg(&card->dev, "data transfer: cmd %x, "
- "lba %x, count %x\n", msb->transfer_cmd,
- be32_to_cpu(param.data_address), count);
-
- card->next_request = h_mspro_block_req_init;
- msb->mrq_handler = h_mspro_block_transfer_data;
- memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG,
- &param, sizeof(param));
memstick_new_req(card->host);
return 0;
}
@@ -963,18 +979,16 @@ try_again:
static int mspro_block_read_attributes(struct memstick_dev *card)
{
struct mspro_block_data *msb = memstick_get_drvdata(card);
- struct mspro_param_register param = {
- .system = msb->system,
- .data_count = cpu_to_be16(1),
- .data_address = 0,
- .tpc_param = 0
- };
struct mspro_attribute *attr = NULL;
struct mspro_sys_attr *s_attr = NULL;
unsigned char *buffer = NULL;
int cnt, rc, attr_count;
- unsigned int addr;
- unsigned short page_count;
+ /* While normally physical device offsets, represented here by
+ * attr_offset and attr_len will be of large numeric types, we can be
+ * sure, that attributes are close enough to the beginning of the
+ * device, to save ourselves some trouble.
+ */
+ unsigned int addr, attr_offset = 0, attr_len = msb->page_size;
attr = kmalloc(msb->page_size, GFP_KERNEL);
if (!attr)
@@ -987,10 +1001,8 @@ static int mspro_block_read_attributes(struct memstick_dev *card)
msb->data_dir = READ;
msb->transfer_cmd = MSPRO_CMD_READ_ATRB;
- card->next_request = h_mspro_block_req_init;
- msb->mrq_handler = h_mspro_block_transfer_data;
- memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, &param,
- sizeof(param));
+ msb->setup_transfer(card, attr_offset, attr_len);
+
memstick_new_req(card->host);
wait_for_completion(&card->mrq_complete);
if (card->current_mrq.error) {
@@ -1021,13 +1033,12 @@ static int mspro_block_read_attributes(struct memstick_dev *card)
}
msb->attr_group.name = "media_attributes";
- buffer = kmalloc(msb->page_size, GFP_KERNEL);
+ buffer = kmalloc(attr_len, GFP_KERNEL);
if (!buffer) {
rc = -ENOMEM;
goto out_free_attr;
}
- memcpy(buffer, (char *)attr, msb->page_size);
- page_count = 1;
+ memcpy(buffer, (char *)attr, attr_len);
for (cnt = 0; cnt < attr_count; ++cnt) {
s_attr = kzalloc(sizeof(struct mspro_sys_attr), GFP_KERNEL);
@@ -1038,9 +1049,10 @@ static int mspro_block_read_attributes(struct memstick_dev *card)
msb->attr_group.attrs[cnt] = &s_attr->dev_attr.attr;
addr = be32_to_cpu(attr->entries[cnt].address);
- rc = be32_to_cpu(attr->entries[cnt].size);
+ s_attr->size = be32_to_cpu(attr->entries[cnt].size);
dev_dbg(&card->dev, "adding attribute %d: id %x, address %x, "
- "size %x\n", cnt, attr->entries[cnt].id, addr, rc);
+ "size %zx\n", cnt, attr->entries[cnt].id, addr,
+ s_attr->size);
s_attr->id = attr->entries[cnt].id;
if (mspro_block_attr_name(s_attr->id))
snprintf(s_attr->name, sizeof(s_attr->name), "%s",
@@ -1054,57 +1066,47 @@ static int mspro_block_read_attributes(struct memstick_dev *card)
s_attr->dev_attr.attr.mode = S_IRUGO;
s_attr->dev_attr.show = mspro_block_attr_show(s_attr->id);
- if (!rc)
+ if (!s_attr->size)
continue;
- s_attr->size = rc;
- s_attr->data = kmalloc(rc, GFP_KERNEL);
+ s_attr->data = kmalloc(s_attr->size, GFP_KERNEL);
if (!s_attr->data) {
rc = -ENOMEM;
goto out_free_buffer;
}
- if (((addr / msb->page_size)
- == be32_to_cpu(param.data_address))
- && (((addr + rc - 1) / msb->page_size)
- == be32_to_cpu(param.data_address))) {
+ if (((addr / msb->page_size) == (attr_offset / msb->page_size))
+ && (((addr + s_attr->size - 1) / msb->page_size)
+ == (attr_offset / msb->page_size))) {
memcpy(s_attr->data, buffer + addr % msb->page_size,
- rc);
+ s_attr->size);
continue;
}
- if (page_count <= (rc / msb->page_size)) {
+ attr_offset = (addr / msb->page_size) * msb->page_size;
+
+ if ((attr_offset + attr_len) < (addr + s_attr->size)) {
kfree(buffer);
- page_count = (rc / msb->page_size) + 1;
- buffer = kmalloc(page_count * msb->page_size,
- GFP_KERNEL);
+ attr_len = (((addr + s_attr->size) / msb->page_size)
+ + 1 ) * msb->page_size - attr_offset;
+ buffer = kmalloc(attr_len, GFP_KERNEL);
if (!buffer) {
rc = -ENOMEM;
goto out_free_attr;
}
}
- param.system = msb->system;
- param.data_count = cpu_to_be16((rc / msb->page_size) + 1);
- param.data_address = cpu_to_be32(addr / msb->page_size);
- param.tpc_param = 0;
-
- sg_init_one(&msb->req_sg[0], buffer,
- be16_to_cpu(param.data_count) * msb->page_size);
+ sg_init_one(&msb->req_sg[0], buffer, attr_len);
msb->seg_count = 1;
msb->current_seg = 0;
msb->current_page = 0;
msb->data_dir = READ;
msb->transfer_cmd = MSPRO_CMD_READ_ATRB;
- dev_dbg(&card->dev, "reading attribute pages %x, %x\n",
- be32_to_cpu(param.data_address),
- be16_to_cpu(param.data_count));
+ dev_dbg(&card->dev, "reading attribute range %x, %x\n",
+ attr_offset, attr_len);
- card->next_request = h_mspro_block_req_init;
- msb->mrq_handler = h_mspro_block_transfer_data;
- memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG,
- (char *)&param, sizeof(param));
+ msb->setup_transfer(card, attr_offset, attr_len);
memstick_new_req(card->host);
wait_for_completion(&card->mrq_complete);
if (card->current_mrq.error) {
@@ -1112,7 +1114,8 @@ static int mspro_block_read_attributes(struct memstick_dev *card)
goto out_free_buffer;
}
- memcpy(s_attr->data, buffer + addr % msb->page_size, rc);
+ memcpy(s_attr->data, buffer + addr % msb->page_size,
+ s_attr->size);
}
rc = 0;
@@ -1130,6 +1133,8 @@ static int mspro_block_init_card(struct memstick_dev *card)
int rc = 0;
msb->system = MEMSTICK_SYS_SERIAL;
+ msb->setup_transfer = h_mspro_block_setup_cmd;
+
card->reg_addr.r_offset = offsetof(struct mspro_register, status);
card->reg_addr.r_length = sizeof(struct ms_status_register);
card->reg_addr.w_offset = offsetof(struct mspro_register, param);
@@ -1206,10 +1211,12 @@ static int mspro_block_init_disk(struct memstick_dev *card)
msb->page_size = be16_to_cpu(sys_info->unit_size);
- if (!idr_pre_get(&mspro_block_disk_idr, GFP_KERNEL))
+ mutex_lock(&mspro_block_disk_lock);
+ if (!idr_pre_get(&mspro_block_disk_idr, GFP_KERNEL)) {
+ mutex_unlock(&mspro_block_disk_lock);
return -ENOMEM;
+ }
- mutex_lock(&mspro_block_disk_lock);
rc = idr_get_new(&mspro_block_disk_idr, card, &disk_id);
mutex_unlock(&mspro_block_disk_lock);
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index f2b894cd8b02..d89d925caecf 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -61,6 +61,7 @@ struct jmb38x_ms_host {
struct memstick_request *req;
unsigned char cmd_flags;
unsigned char io_pos;
+ unsigned char ifmode;
unsigned int io_word[2];
};
@@ -136,15 +137,14 @@ struct jmb38x_ms {
#define PAD_PU_PD_ON_MS_SOCK0 0x5f8f0000
#define PAD_PU_PD_ON_MS_SOCK1 0x0f0f0000
+#define CLOCK_CONTROL_BY_MMIO 0x00000008
#define CLOCK_CONTROL_40MHZ 0x00000001
-#define CLOCK_CONTROL_50MHZ 0x0000000a
-#define CLOCK_CONTROL_60MHZ 0x00000008
-#define CLOCK_CONTROL_62_5MHZ 0x0000000c
+#define CLOCK_CONTROL_50MHZ 0x00000002
+#define CLOCK_CONTROL_60MHZ 0x00000010
+#define CLOCK_CONTROL_62_5MHZ 0x00000004
#define CLOCK_CONTROL_OFF 0x00000000
#define PCI_CTL_CLOCK_DLY_ADDR 0x000000b0
-#define PCI_CTL_CLOCK_DLY_MASK_A 0x00000f00
-#define PCI_CTL_CLOCK_DLY_MASK_B 0x0000f000
enum {
CMD_READY = 0x01,
@@ -390,8 +390,13 @@ static int jmb38x_ms_issue_cmd(struct memstick_host *msh)
if (host->req->data_dir == READ)
cmd |= TPC_DIR;
- if (host->req->need_card_int)
- cmd |= TPC_WAIT_INT;
+
+ if (host->req->need_card_int) {
+ if (host->ifmode == MEMSTICK_SERIAL)
+ cmd |= TPC_GET_INT;
+ else
+ cmd |= TPC_WAIT_INT;
+ }
data = host->req->data;
@@ -529,7 +534,10 @@ static irqreturn_t jmb38x_ms_isr(int irq, void *dev_id)
if (irq_status & INT_STATUS_ANY_ERR) {
if (irq_status & INT_STATUS_CRC_ERR)
host->req->error = -EILSEQ;
- else
+ else if (irq_status & INT_STATUS_TPC_ERR) {
+ dev_dbg(&host->chip->pdev->dev, "TPC_ERR\n");
+ jmb38x_ms_complete_cmd(msh, 0);
+ } else
host->req->error = -ETIME;
} else {
if (host->cmd_flags & DMA_DATA) {
@@ -644,7 +652,6 @@ static int jmb38x_ms_reset(struct jmb38x_ms_host *host)
ndelay(20);
}
dev_dbg(&host->chip->pdev->dev, "reset_req timeout\n");
- /* return -EIO; */
reset_next:
writel(HOST_CONTROL_RESET | HOST_CONTROL_CLOCK_EN
@@ -675,7 +682,7 @@ static int jmb38x_ms_set_param(struct memstick_host *msh,
{
struct jmb38x_ms_host *host = memstick_priv(msh);
unsigned int host_ctl = readl(host->addr + HOST_CONTROL);
- unsigned int clock_ctl = CLOCK_CONTROL_40MHZ, clock_delay = 0;
+ unsigned int clock_ctl = CLOCK_CONTROL_BY_MMIO, clock_delay = 0;
int rc = 0;
switch (param) {
@@ -687,9 +694,7 @@ static int jmb38x_ms_set_param(struct memstick_host *msh,
host_ctl = 7;
host_ctl |= HOST_CONTROL_POWER_EN
- | HOST_CONTROL_CLOCK_EN
- | HOST_CONTROL_HW_OC_P
- | HOST_CONTROL_TDELAY_EN;
+ | HOST_CONTROL_CLOCK_EN;
writel(host_ctl, host->addr + HOST_CONTROL);
writel(host->id ? PAD_PU_PD_ON_MS_SOCK1
@@ -712,46 +717,88 @@ static int jmb38x_ms_set_param(struct memstick_host *msh,
return -EINVAL;
break;
case MEMSTICK_INTERFACE:
+ dev_dbg(&host->chip->pdev->dev,
+ "Set Host Interface Mode to %d\n", value);
+ host_ctl &= ~(HOST_CONTROL_FAST_CLK | HOST_CONTROL_REI |
+ HOST_CONTROL_REO);
+ host_ctl |= HOST_CONTROL_TDELAY_EN | HOST_CONTROL_HW_OC_P;
host_ctl &= ~(3 << HOST_CONTROL_IF_SHIFT);
- pci_read_config_dword(host->chip->pdev,
- PCI_CTL_CLOCK_DLY_ADDR,
- &clock_delay);
- clock_delay &= host->id ? ~PCI_CTL_CLOCK_DLY_MASK_B
- : ~PCI_CTL_CLOCK_DLY_MASK_A;
if (value == MEMSTICK_SERIAL) {
- host_ctl &= ~HOST_CONTROL_FAST_CLK;
- host_ctl &= ~HOST_CONTROL_REO;
host_ctl |= HOST_CONTROL_IF_SERIAL
<< HOST_CONTROL_IF_SHIFT;
host_ctl |= HOST_CONTROL_REI;
- clock_ctl = CLOCK_CONTROL_40MHZ;
+ clock_ctl |= CLOCK_CONTROL_40MHZ;
+ clock_delay = 0;
} else if (value == MEMSTICK_PAR4) {
- host_ctl |= HOST_CONTROL_FAST_CLK | HOST_CONTROL_REO;
+ host_ctl |= HOST_CONTROL_FAST_CLK;
host_ctl |= HOST_CONTROL_IF_PAR4
<< HOST_CONTROL_IF_SHIFT;
- host_ctl &= ~HOST_CONTROL_REI;
- clock_ctl = CLOCK_CONTROL_40MHZ;
- clock_delay |= host->id ? (4 << 12) : (4 << 8);
+ host_ctl |= HOST_CONTROL_REO;
+ clock_ctl |= CLOCK_CONTROL_40MHZ;
+ clock_delay = 4;
} else if (value == MEMSTICK_PAR8) {
host_ctl |= HOST_CONTROL_FAST_CLK;
host_ctl |= HOST_CONTROL_IF_PAR8
<< HOST_CONTROL_IF_SHIFT;
- host_ctl &= ~(HOST_CONTROL_REI | HOST_CONTROL_REO);
- clock_ctl = CLOCK_CONTROL_50MHZ;
+ clock_ctl |= CLOCK_CONTROL_50MHZ;
+ clock_delay = 0;
} else
return -EINVAL;
writel(host_ctl, host->addr + HOST_CONTROL);
+ writel(CLOCK_CONTROL_OFF, host->addr + CLOCK_CONTROL);
writel(clock_ctl, host->addr + CLOCK_CONTROL);
- pci_write_config_dword(host->chip->pdev,
- PCI_CTL_CLOCK_DLY_ADDR,
- clock_delay);
+ pci_write_config_byte(host->chip->pdev,
+ PCI_CTL_CLOCK_DLY_ADDR + 1,
+ clock_delay);
+ host->ifmode = value;
break;
};
return 0;
}
+#define PCI_PMOS0_CONTROL 0xae
+#define PMOS0_ENABLE 0x01
+#define PMOS0_OVERCURRENT_LEVEL_2_4V 0x06
+#define PMOS0_EN_OVERCURRENT_DEBOUNCE 0x40
+#define PMOS0_SW_LED_POLARITY_ENABLE 0x80
+#define PMOS0_ACTIVE_BITS (PMOS0_ENABLE | PMOS0_EN_OVERCURRENT_DEBOUNCE | \
+ PMOS0_OVERCURRENT_LEVEL_2_4V)
+#define PCI_PMOS1_CONTROL 0xbd
+#define PMOS1_ACTIVE_BITS 0x4a
+#define PCI_CLOCK_CTL 0xb9
+
+static int jmb38x_ms_pmos(struct pci_dev *pdev, int flag)
+{
+ unsigned char val;
+
+ pci_read_config_byte(pdev, PCI_PMOS0_CONTROL, &val);
+ if (flag)
+ val |= PMOS0_ACTIVE_BITS;
+ else
+ val &= ~PMOS0_ACTIVE_BITS;
+ pci_write_config_byte(pdev, PCI_PMOS0_CONTROL, val);
+ dev_dbg(&pdev->dev, "JMB38x: set PMOS0 val 0x%x\n", val);
+
+ if (pci_resource_flags(pdev, 1)) {
+ pci_read_config_byte(pdev, PCI_PMOS1_CONTROL, &val);
+ if (flag)
+ val |= PMOS1_ACTIVE_BITS;
+ else
+ val &= ~PMOS1_ACTIVE_BITS;
+ pci_write_config_byte(pdev, PCI_PMOS1_CONTROL, val);
+ dev_dbg(&pdev->dev, "JMB38x: set PMOS1 val 0x%x\n", val);
+ }
+
+ pci_read_config_byte(pdev, PCI_CLOCK_CTL, &val);
+ pci_write_config_byte(pdev, PCI_CLOCK_CTL, val & ~0x0f);
+ pci_write_config_byte(pdev, PCI_CLOCK_CTL, val | 0x01);
+ dev_dbg(&pdev->dev, "Clock Control by PCI config is disabled!\n");
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static int jmb38x_ms_suspend(struct pci_dev *dev, pm_message_t state)
@@ -784,8 +831,7 @@ static int jmb38x_ms_resume(struct pci_dev *dev)
return rc;
pci_set_master(dev);
- pci_read_config_dword(dev, 0xac, &rc);
- pci_write_config_dword(dev, 0xac, rc | 0x00470000);
+ jmb38x_ms_pmos(dev, 1);
for (rc = 0; rc < jm->host_cnt; ++rc) {
if (!jm->hosts[rc])
@@ -894,8 +940,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
goto err_out;
}
- pci_read_config_dword(pdev, 0xac, &rc);
- pci_write_config_dword(pdev, 0xac, rc | 0x00470000);
+ jmb38x_ms_pmos(pdev, 1);
cnt = jmb38x_ms_count_slots(pdev);
if (!cnt) {
@@ -976,6 +1021,8 @@ static void jmb38x_ms_remove(struct pci_dev *dev)
jmb38x_ms_free_host(jm->hosts[cnt]);
}
+ jmb38x_ms_pmos(dev, 0);
+
pci_set_drvdata(dev, NULL);
pci_release_regions(dev);
pci_disable_device(dev);
@@ -983,8 +1030,9 @@ static void jmb38x_ms_remove(struct pci_dev *dev)
}
static struct pci_device_id jmb38x_ms_id_tbl [] = {
- { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_MS, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_MS) },
+ { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB385_MS) },
+ { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB390_MS) },
{ }
};
diff --git a/drivers/message/fusion/lsi/mpi_log_sas.h b/drivers/message/fusion/lsi/mpi_log_sas.h
index 691620dbedd2..8b04810df469 100644
--- a/drivers/message/fusion/lsi/mpi_log_sas.h
+++ b/drivers/message/fusion/lsi/mpi_log_sas.h
@@ -268,7 +268,7 @@
/* Compatibility Error : IR Disabled */
#define IR_LOGINFO_COMPAT_ERROR_RAID_DISABLED (0x00010030)
-/* Compatibility Error : Inquiry Comand failed */
+/* Compatibility Error : Inquiry Command failed */
#define IR_LOGINFO_COMPAT_ERROR_INQUIRY_FAILED (0x00010031)
/* Compatibility Error : Device not direct access device */
#define IR_LOGINFO_COMPAT_ERROR_NOT_DIRECT_ACCESS (0x00010032)
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 3e57b61ca446..3358c0af3466 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -7977,7 +7977,7 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
NULL, /* 2Eh */
NULL, /* 2Fh */
"Compatibility Error: IR Disabled", /* 30h */
- "Compatibility Error: Inquiry Comand Failed", /* 31h */
+ "Compatibility Error: Inquiry Command Failed", /* 31h */
"Compatibility Error: Device not Direct Access "
"Device ", /* 32h */
"Compatibility Error: Removable Device Found", /* 33h */
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index f71f22948477..1735c84ff757 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
#endif
-#define MPT_LINUX_VERSION_COMMON "3.04.17"
-#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.17"
+#define MPT_LINUX_VERSION_COMMON "3.04.18"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.18"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index a3856ed90aef..e8deb8ed0499 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -597,6 +597,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
}
static int
+mptctl_release(struct inode *inode, struct file *filep)
+{
+ fasync_helper(-1, filep, 0, &async_queue);
+ return 0;
+}
+
+static int
mptctl_fasync(int fd, struct file *filep, int mode)
{
MPT_ADAPTER *ioc;
@@ -2815,6 +2822,7 @@ static const struct file_operations mptctl_fops = {
.llseek = no_llseek,
.fasync = mptctl_fasync,
.unlocked_ioctl = mptctl_ioctl,
+ .release = mptctl_release,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_mpctl_ioctl,
#endif
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index d48c2c6058e1..8aefb1829fcd 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1146,7 +1146,7 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
*
* This function will delete scheduled target reset from the list and
* try to send next target reset. This will be called from completion
- * context of any Task managment command.
+ * context of any Task management command.
*/
void
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 59b8f53d1ece..0d9b82a44540 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1873,8 +1873,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
}
out:
- printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
- ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
+ printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n",
+ ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
+ SCpnt, SCpnt->serial_number);
return retval;
}
@@ -1911,7 +1912,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
vdevice = SCpnt->device->hostdata;
if (!vdevice || !vdevice->vtarget) {
- retval = SUCCESS;
+ retval = 0;
goto out;
}
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index f87a9d405a5e..ae7cad185898 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -309,7 +309,7 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq)
* @ireq: I2O block request
* @mptr: message body pointer
*
- * Builds the SG list and map it to be accessable by the controller.
+ * Builds the SG list and map it to be accessible by the controller.
*
* Returns 0 on failure or 1 on success.
*/
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 20895e7a99c9..793300c554b4 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -361,12 +361,6 @@ static struct pm860x_irq_data pm860x_irqs[] = {
},
};
-static inline struct pm860x_irq_data *irq_to_pm860x(struct pm860x_chip *chip,
- int irq)
-{
- return &pm860x_irqs[irq - chip->irq_base];
-}
-
static irqreturn_t pm860x_irq(int irq, void *data)
{
struct pm860x_chip *chip = data;
@@ -388,16 +382,16 @@ static irqreturn_t pm860x_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static void pm860x_irq_lock(unsigned int irq)
+static void pm860x_irq_lock(struct irq_data *data)
{
- struct pm860x_chip *chip = get_irq_chip_data(irq);
+ struct pm860x_chip *chip = irq_data_get_irq_chip_data(data);
mutex_lock(&chip->irq_lock);
}
-static void pm860x_irq_sync_unlock(unsigned int irq)
+static void pm860x_irq_sync_unlock(struct irq_data *data)
{
- struct pm860x_chip *chip = get_irq_chip_data(irq);
+ struct pm860x_chip *chip = irq_data_get_irq_chip_data(data);
struct pm860x_irq_data *irq_data;
struct i2c_client *i2c;
static unsigned char cached[3] = {0x0, 0x0, 0x0};
@@ -439,25 +433,25 @@ static void pm860x_irq_sync_unlock(unsigned int irq)
mutex_unlock(&chip->irq_lock);
}
-static void pm860x_irq_enable(unsigned int irq)
+static void pm860x_irq_enable(struct irq_data *data)
{
- struct pm860x_chip *chip = get_irq_chip_data(irq);
- pm860x_irqs[irq - chip->irq_base].enable
- = pm860x_irqs[irq - chip->irq_base].offs;
+ struct pm860x_chip *chip = irq_data_get_irq_chip_data(data);
+ pm860x_irqs[data->irq - chip->irq_base].enable
+ = pm860x_irqs[data->irq - chip->irq_base].offs;
}
-static void pm860x_irq_disable(unsigned int irq)
+static void pm860x_irq_disable(struct irq_data *data)
{
- struct pm860x_chip *chip = get_irq_chip_data(irq);
- pm860x_irqs[irq - chip->irq_base].enable = 0;
+ struct pm860x_chip *chip = irq_data_get_irq_chip_data(data);
+ pm860x_irqs[data->irq - chip->irq_base].enable = 0;
}
static struct irq_chip pm860x_irq_chip = {
.name = "88pm860x",
- .bus_lock = pm860x_irq_lock,
- .bus_sync_unlock = pm860x_irq_sync_unlock,
- .enable = pm860x_irq_enable,
- .disable = pm860x_irq_disable,
+ .irq_bus_lock = pm860x_irq_lock,
+ .irq_bus_sync_unlock = pm860x_irq_sync_unlock,
+ .irq_enable = pm860x_irq_enable,
+ .irq_disable = pm860x_irq_disable,
};
static int __devinit device_gpadc_init(struct pm860x_chip *chip,
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index da9d2971102e..fd018366d670 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -496,13 +496,13 @@ config EZX_PCAP
config AB8500_CORE
bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
- depends on GENERIC_HARDIRQS && ABX500_CORE && SPI_MASTER && ARCH_U8500
+ depends on GENERIC_HARDIRQS && ABX500_CORE
select MFD_CORE
help
Select this option to enable access to AB8500 power management
- chip. This connects to U8500 either on the SSP/SPI bus
- or the I2C bus via PRCMU. It also adds the irq_chip
- parts for handling the Mixed Signal chip events.
+ chip. This connects to U8500 either on the SSP/SPI bus (deprecated
+ since hardware version v1.0) or the I2C bus via PRCMU. It also adds
+ the irq_chip parts for handling the Mixed Signal chip events.
This chip embeds various other multimedia funtionalities as well.
config AB8500_I2C_CORE
@@ -537,6 +537,14 @@ config AB3550_CORE
LEDs, vibrator, system power and temperature, power management
and ALSA sound.
+config MFD_CS5535
+ tristate "Support for CS5535 and CS5536 southbridge core functions"
+ select MFD_CORE
+ depends on PCI
+ ---help---
+ This is the core driver for CS5535/CS5536 MFD functions. This is
+ necessary for using the board's GPIO and MFGPT functionality.
+
config MFD_TIMBERDALE
tristate "Support for the Timberdale FPGA"
select MFD_CORE
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 848e7eac75aa..a54e2c7c6a1c 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -70,7 +70,7 @@ obj-$(CONFIG_ABX500_CORE) += abx500-core.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
obj-$(CONFIG_AB3550_CORE) += ab3550-core.o
-obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-spi.o
+obj-$(CONFIG_AB8500_CORE) += ab8500-core.o
obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o
obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
@@ -82,3 +82,4 @@ obj-$(CONFIG_MFD_JZ4740_ADC) += jz4740-adc.o
obj-$(CONFIG_MFD_TPS6586X) += tps6586x.o
obj-$(CONFIG_MFD_VX855) += vx855.o
obj-$(CONFIG_MFD_WL1273_CORE) += wl1273-core.o
+obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
index 8a98739e6d9c..5fbca346b998 100644
--- a/drivers/mfd/ab3550-core.c
+++ b/drivers/mfd/ab3550-core.c
@@ -1159,15 +1159,16 @@ static void ab3550_mask_work(struct work_struct *work)
}
}
-static void ab3550_mask(unsigned int irq)
+static void ab3550_mask(struct irq_data *data)
{
unsigned long flags;
struct ab3550 *ab;
struct ab3550_platform_data *plf_data;
+ int irq;
- ab = get_irq_chip_data(irq);
+ ab = irq_data_get_irq_chip_data(data);
plf_data = ab->i2c_client[0]->dev.platform_data;
- irq -= plf_data->irq.base;
+ irq = data->irq - plf_data->irq.base;
spin_lock_irqsave(&ab->event_lock, flags);
ab->event_mask[irq / 8] |= BIT(irq % 8);
@@ -1176,15 +1177,16 @@ static void ab3550_mask(unsigned int irq)
schedule_work(&ab->mask_work);
}
-static void ab3550_unmask(unsigned int irq)
+static void ab3550_unmask(struct irq_data *data)
{
unsigned long flags;
struct ab3550 *ab;
struct ab3550_platform_data *plf_data;
+ int irq;
- ab = get_irq_chip_data(irq);
+ ab = irq_data_get_irq_chip_data(data);
plf_data = ab->i2c_client[0]->dev.platform_data;
- irq -= plf_data->irq.base;
+ irq = data->irq - plf_data->irq.base;
spin_lock_irqsave(&ab->event_lock, flags);
ab->event_mask[irq / 8] &= ~BIT(irq % 8);
@@ -1193,20 +1195,16 @@ static void ab3550_unmask(unsigned int irq)
schedule_work(&ab->mask_work);
}
-static void noop(unsigned int irq)
+static void noop(struct irq_data *data)
{
}
static struct irq_chip ab3550_irq_chip = {
.name = "ab3550-core", /* Keep the same name as the request */
- .startup = NULL, /* defaults to enable */
- .shutdown = NULL, /* defaults to disable */
- .enable = NULL, /* defaults to unmask */
- .disable = ab3550_mask, /* No default to mask in chip.c */
- .ack = noop,
- .mask = ab3550_mask,
- .unmask = ab3550_unmask,
- .end = NULL,
+ .irq_disable = ab3550_mask, /* No default to mask in chip.c */
+ .irq_ack = noop,
+ .irq_mask = ab3550_mask,
+ .irq_unmask = ab3550_unmask,
};
struct ab_family_id {
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index d9640a623ff4..b6887014d687 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -52,6 +52,7 @@
#define AB8500_IT_LATCH8_REG 0x27
#define AB8500_IT_LATCH9_REG 0x28
#define AB8500_IT_LATCH10_REG 0x29
+#define AB8500_IT_LATCH12_REG 0x2B
#define AB8500_IT_LATCH19_REG 0x32
#define AB8500_IT_LATCH20_REG 0x33
#define AB8500_IT_LATCH21_REG 0x34
@@ -98,13 +99,17 @@
* offset 0.
*/
static const int ab8500_irq_regoffset[AB8500_NUM_IRQ_REGS] = {
- 0, 1, 2, 3, 4, 6, 7, 8, 9, 18, 19, 20, 21,
+ 0, 1, 2, 3, 4, 6, 7, 8, 9, 11, 18, 19, 20, 21,
};
static int ab8500_get_chip_id(struct device *dev)
{
- struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
- return (int)ab8500->chip_id;
+ struct ab8500 *ab8500;
+
+ if (!dev)
+ return -EINVAL;
+ ab8500 = dev_get_drvdata(dev->parent);
+ return ab8500 ? (int)ab8500->chip_id : -EINVAL;
}
static int set_register_interruptible(struct ab8500 *ab8500, u8 bank,
@@ -228,16 +233,16 @@ static struct abx500_ops ab8500_ops = {
.startup_irq_enabled = NULL,
};
-static void ab8500_irq_lock(unsigned int irq)
+static void ab8500_irq_lock(struct irq_data *data)
{
- struct ab8500 *ab8500 = get_irq_chip_data(irq);
+ struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
mutex_lock(&ab8500->irq_lock);
}
-static void ab8500_irq_sync_unlock(unsigned int irq)
+static void ab8500_irq_sync_unlock(struct irq_data *data)
{
- struct ab8500 *ab8500 = get_irq_chip_data(irq);
+ struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
int i;
for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
@@ -248,6 +253,10 @@ static void ab8500_irq_sync_unlock(unsigned int irq)
if (new == old)
continue;
+ /* Interrupt register 12 does'nt exist prior to version 0x20 */
+ if (ab8500_irq_regoffset[i] == 11 && ab8500->chip_id < 0x20)
+ continue;
+
ab8500->oldmask[i] = new;
reg = AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i];
@@ -257,20 +266,20 @@ static void ab8500_irq_sync_unlock(unsigned int irq)
mutex_unlock(&ab8500->irq_lock);
}
-static void ab8500_irq_mask(unsigned int irq)
+static void ab8500_irq_mask(struct irq_data *data)
{
- struct ab8500 *ab8500 = get_irq_chip_data(irq);
- int offset = irq - ab8500->irq_base;
+ struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
+ int offset = data->irq - ab8500->irq_base;
int index = offset / 8;
int mask = 1 << (offset % 8);
ab8500->mask[index] |= mask;
}
-static void ab8500_irq_unmask(unsigned int irq)
+static void ab8500_irq_unmask(struct irq_data *data)
{
- struct ab8500 *ab8500 = get_irq_chip_data(irq);
- int offset = irq - ab8500->irq_base;
+ struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
+ int offset = data->irq - ab8500->irq_base;
int index = offset / 8;
int mask = 1 << (offset % 8);
@@ -279,10 +288,10 @@ static void ab8500_irq_unmask(unsigned int irq)
static struct irq_chip ab8500_irq_chip = {
.name = "ab8500",
- .bus_lock = ab8500_irq_lock,
- .bus_sync_unlock = ab8500_irq_sync_unlock,
- .mask = ab8500_irq_mask,
- .unmask = ab8500_irq_unmask,
+ .irq_bus_lock = ab8500_irq_lock,
+ .irq_bus_sync_unlock = ab8500_irq_sync_unlock,
+ .irq_mask = ab8500_irq_mask,
+ .irq_unmask = ab8500_irq_unmask,
};
static irqreturn_t ab8500_irq(int irq, void *dev)
@@ -297,6 +306,10 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
int status;
u8 value;
+ /* Interrupt register 12 does'nt exist prior to version 0x20 */
+ if (regoffset == 11 && ab8500->chip_id < 0x20)
+ continue;
+
status = get_register_interruptible(ab8500, AB8500_INTERRUPT,
AB8500_IT_LATCH1_REG + regoffset, &value);
if (status < 0 || value == 0)
@@ -393,13 +406,195 @@ static struct resource ab8500_poweronkey_db_resources[] = {
},
};
+static struct resource ab8500_bm_resources[] = {
+ {
+ .name = "MAIN_EXT_CH_NOT_OK",
+ .start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
+ .end = AB8500_INT_MAIN_EXT_CH_NOT_OK,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "BATT_OVV",
+ .start = AB8500_INT_BATT_OVV,
+ .end = AB8500_INT_BATT_OVV,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "MAIN_CH_UNPLUG_DET",
+ .start = AB8500_INT_MAIN_CH_UNPLUG_DET,
+ .end = AB8500_INT_MAIN_CH_UNPLUG_DET,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "MAIN_CHARGE_PLUG_DET",
+ .start = AB8500_INT_MAIN_CH_PLUG_DET,
+ .end = AB8500_INT_MAIN_CH_PLUG_DET,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "VBUS_DET_F",
+ .start = AB8500_INT_VBUS_DET_F,
+ .end = AB8500_INT_VBUS_DET_F,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "VBUS_DET_R",
+ .start = AB8500_INT_VBUS_DET_R,
+ .end = AB8500_INT_VBUS_DET_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "BAT_CTRL_INDB",
+ .start = AB8500_INT_BAT_CTRL_INDB,
+ .end = AB8500_INT_BAT_CTRL_INDB,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "CH_WD_EXP",
+ .start = AB8500_INT_CH_WD_EXP,
+ .end = AB8500_INT_CH_WD_EXP,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "VBUS_OVV",
+ .start = AB8500_INT_VBUS_OVV,
+ .end = AB8500_INT_VBUS_OVV,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "NCONV_ACCU",
+ .start = AB8500_INT_CCN_CONV_ACC,
+ .end = AB8500_INT_CCN_CONV_ACC,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "LOW_BAT_F",
+ .start = AB8500_INT_LOW_BAT_F,
+ .end = AB8500_INT_LOW_BAT_F,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "LOW_BAT_R",
+ .start = AB8500_INT_LOW_BAT_R,
+ .end = AB8500_INT_LOW_BAT_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "BTEMP_LOW",
+ .start = AB8500_INT_BTEMP_LOW,
+ .end = AB8500_INT_BTEMP_LOW,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "BTEMP_HIGH",
+ .start = AB8500_INT_BTEMP_HIGH,
+ .end = AB8500_INT_BTEMP_HIGH,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_CHARGER_NOT_OKR",
+ .start = AB8500_INT_USB_CHARGER_NOT_OK,
+ .end = AB8500_INT_USB_CHARGER_NOT_OK,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_CHARGE_DET_DONE",
+ .start = AB8500_INT_USB_CHG_DET_DONE,
+ .end = AB8500_INT_USB_CHG_DET_DONE,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_CH_TH_PROT_R",
+ .start = AB8500_INT_USB_CH_TH_PROT_R,
+ .end = AB8500_INT_USB_CH_TH_PROT_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "MAIN_CH_TH_PROT_R",
+ .start = AB8500_INT_MAIN_CH_TH_PROT_R,
+ .end = AB8500_INT_MAIN_CH_TH_PROT_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_CHARGER_NOT_OKF",
+ .start = AB8500_INT_USB_CHARGER_NOT_OKF,
+ .end = AB8500_INT_USB_CHARGER_NOT_OKF,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource ab8500_debug_resources[] = {
+ {
+ .name = "IRQ_FIRST",
+ .start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
+ .end = AB8500_INT_MAIN_EXT_CH_NOT_OK,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "IRQ_LAST",
+ .start = AB8500_INT_USB_CHARGER_NOT_OKF,
+ .end = AB8500_INT_USB_CHARGER_NOT_OKF,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource ab8500_usb_resources[] = {
+ {
+ .name = "ID_WAKEUP_R",
+ .start = AB8500_INT_ID_WAKEUP_R,
+ .end = AB8500_INT_ID_WAKEUP_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "ID_WAKEUP_F",
+ .start = AB8500_INT_ID_WAKEUP_F,
+ .end = AB8500_INT_ID_WAKEUP_F,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "VBUS_DET_F",
+ .start = AB8500_INT_VBUS_DET_F,
+ .end = AB8500_INT_VBUS_DET_F,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "VBUS_DET_R",
+ .start = AB8500_INT_VBUS_DET_R,
+ .end = AB8500_INT_VBUS_DET_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_LINK_STATUS",
+ .start = AB8500_INT_USB_LINK_STATUS,
+ .end = AB8500_INT_USB_LINK_STATUS,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource ab8500_temp_resources[] = {
+ {
+ .name = "AB8500_TEMP_WARM",
+ .start = AB8500_INT_TEMP_WARM,
+ .end = AB8500_INT_TEMP_WARM,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct mfd_cell ab8500_devs[] = {
#ifdef CONFIG_DEBUG_FS
{
.name = "ab8500-debug",
+ .num_resources = ARRAY_SIZE(ab8500_debug_resources),
+ .resources = ab8500_debug_resources,
},
#endif
{
+ .name = "ab8500-sysctrl",
+ },
+ {
+ .name = "ab8500-regulator",
+ },
+ {
.name = "ab8500-gpadc",
.num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
.resources = ab8500_gpadc_resources,
@@ -410,6 +605,22 @@ static struct mfd_cell ab8500_devs[] = {
.resources = ab8500_rtc_resources,
},
{
+ .name = "ab8500-bm",
+ .num_resources = ARRAY_SIZE(ab8500_bm_resources),
+ .resources = ab8500_bm_resources,
+ },
+ { .name = "ab8500-codec", },
+ {
+ .name = "ab8500-usb",
+ .num_resources = ARRAY_SIZE(ab8500_usb_resources),
+ .resources = ab8500_usb_resources,
+ },
+ {
+ .name = "ab8500-poweron-key",
+ .num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
+ .resources = ab8500_poweronkey_db_resources,
+ },
+ {
.name = "ab8500-pwm",
.id = 1,
},
@@ -421,17 +632,37 @@ static struct mfd_cell ab8500_devs[] = {
.name = "ab8500-pwm",
.id = 3,
},
- { .name = "ab8500-charger", },
- { .name = "ab8500-audio", },
- { .name = "ab8500-usb", },
- { .name = "ab8500-regulator", },
+ { .name = "ab8500-leds", },
{
- .name = "ab8500-poweron-key",
- .num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
- .resources = ab8500_poweronkey_db_resources,
+ .name = "ab8500-denc",
+ },
+ {
+ .name = "ab8500-temp",
+ .num_resources = ARRAY_SIZE(ab8500_temp_resources),
+ .resources = ab8500_temp_resources,
},
};
+static ssize_t show_chip_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ab8500 *ab8500;
+
+ ab8500 = dev_get_drvdata(dev);
+ return sprintf(buf, "%#x\n", ab8500 ? ab8500->chip_id : -EINVAL);
+}
+
+static DEVICE_ATTR(chip_id, S_IRUGO, show_chip_id, NULL);
+
+static struct attribute *ab8500_sysfs_entries[] = {
+ &dev_attr_chip_id.attr,
+ NULL,
+};
+
+static struct attribute_group ab8500_attr_group = {
+ .attrs = ab8500_sysfs_entries,
+};
+
int __devinit ab8500_init(struct ab8500 *ab8500)
{
struct ab8500_platform_data *plat = dev_get_platdata(ab8500->dev);
@@ -454,8 +685,9 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
* 0x0 - Early Drop
* 0x10 - Cut 1.0
* 0x11 - Cut 1.1
+ * 0x20 - Cut 2.0
*/
- if (value == 0x0 || value == 0x10 || value == 0x11) {
+ if (value == 0x0 || value == 0x10 || value == 0x11 || value == 0x20) {
ab8500->revision = value;
dev_info(ab8500->dev, "detected chip, revision: %#x\n", value);
} else {
@@ -468,18 +700,16 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
plat->init(ab8500);
/* Clear and mask all interrupts */
- for (i = 0; i < 10; i++) {
- get_register_interruptible(ab8500, AB8500_INTERRUPT,
- AB8500_IT_LATCH1_REG + i, &value);
- set_register_interruptible(ab8500, AB8500_INTERRUPT,
- AB8500_IT_MASK1_REG + i, 0xff);
- }
+ for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
+ /* Interrupt register 12 does'nt exist prior to version 0x20 */
+ if (ab8500_irq_regoffset[i] == 11 && ab8500->chip_id < 0x20)
+ continue;
- for (i = 18; i < 24; i++) {
get_register_interruptible(ab8500, AB8500_INTERRUPT,
- AB8500_IT_LATCH1_REG + i, &value);
+ AB8500_IT_LATCH1_REG + ab8500_irq_regoffset[i],
+ &value);
set_register_interruptible(ab8500, AB8500_INTERRUPT,
- AB8500_IT_MASK1_REG + i, 0xff);
+ AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i], 0xff);
}
ret = abx500_register_ops(ab8500->dev, &ab8500_ops);
@@ -495,7 +725,8 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
return ret;
ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq,
- IRQF_ONESHOT, "ab8500", ab8500);
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "ab8500", ab8500);
if (ret)
goto out_removeirq;
}
@@ -506,6 +737,10 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
if (ret)
goto out_freeirq;
+ ret = sysfs_create_group(&ab8500->dev->kobj, &ab8500_attr_group);
+ if (ret)
+ dev_err(ab8500->dev, "error creating sysfs entries\n");
+
return ret;
out_freeirq:
@@ -519,6 +754,7 @@ out_removeirq:
int __devexit ab8500_exit(struct ab8500 *ab8500)
{
+ sysfs_remove_group(&ab8500->dev->kobj, &ab8500_attr_group);
mfd_remove_devices(ab8500->dev);
if (ab8500->irq_base) {
free_irq(ab8500->irq, ab8500);
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 8d1e05a39815..3c1541ae7223 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -24,9 +24,9 @@ static u32 debug_address;
* @perm: access permissions for the range
*/
struct ab8500_reg_range {
- u8 first;
- u8 last;
- u8 perm;
+ u8 first;
+ u8 last;
+ u8 perm;
};
/**
@@ -36,9 +36,9 @@ struct ab8500_reg_range {
* @range: the list of register ranges
*/
struct ab8500_i2c_ranges {
- u8 num_ranges;
- u8 bankid;
- const struct ab8500_reg_range *range;
+ u8 num_ranges;
+ u8 bankid;
+ const struct ab8500_reg_range *range;
};
#define AB8500_NAME_STRING "ab8500"
@@ -47,521 +47,521 @@ struct ab8500_i2c_ranges {
#define AB8500_REV_REG 0x80
static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
- [0x0] = {
- .num_ranges = 0,
- .range = 0,
- },
- [AB8500_SYS_CTRL1_BLOCK] = {
- .num_ranges = 3,
- .range = (struct ab8500_reg_range[]) {
- {
- .first = 0x00,
- .last = 0x02,
- },
- {
- .first = 0x42,
- .last = 0x42,
- },
- {
- .first = 0x80,
- .last = 0x81,
- },
- },
- },
- [AB8500_SYS_CTRL2_BLOCK] = {
- .num_ranges = 4,
- .range = (struct ab8500_reg_range[]) {
- {
- .first = 0x00,
- .last = 0x0D,
- },
- {
- .first = 0x0F,
- .last = 0x17,
- },
- {
- .first = 0x30,
- .last = 0x30,
- },
- {
- .first = 0x32,
- .last = 0x33,
- },
- },
- },
- [AB8500_REGU_CTRL1] = {
- .num_ranges = 3,
- .range = (struct ab8500_reg_range[]) {
- {
- .first = 0x00,
- .last = 0x00,
- },
- {
- .first = 0x03,
- .last = 0x10,
- },
- {
- .first = 0x80,
- .last = 0x84,
- },
- },
- },
- [AB8500_REGU_CTRL2] = {
- .num_ranges = 5,
- .range = (struct ab8500_reg_range[]) {
- {
- .first = 0x00,
- .last = 0x15,
- },
- {
- .first = 0x17,
- .last = 0x19,
- },
- {
- .first = 0x1B,
- .last = 0x1D,
- },
- {
- .first = 0x1F,
- .last = 0x22,
- },
- {
- .first = 0x40,
- .last = 0x44,
- },
- /* 0x80-0x8B is SIM registers and should
- * not be accessed from here */
- },
- },
- [AB8500_USB] = {
- .num_ranges = 2,
- .range = (struct ab8500_reg_range[]) {
- {
- .first = 0x80,
- .last = 0x83,
- },
- {
- .first = 0x87,
- .last = 0x8A,
- },
- },
- },
- [AB8500_TVOUT] = {
- .num_ranges = 9,
- .range = (struct ab8500_reg_range[]) {
- {
- .first = 0x00,
- .last = 0x12,
- },
- {
- .first = 0x15,
- .last = 0x17,
- },
- {
- .first = 0x19,
- .last = 0x21,
- },
- {
- .first = 0x27,
- .last = 0x2C,
- },
- {
- .first = 0x41,
- .last = 0x41,
- },
- {
- .first = 0x45,
- .last = 0x5B,
- },
- {
- .first = 0x5D,
- .last = 0x5D,
- },
- {
- .first = 0x69,
- .last = 0x69,
- },
- {
- .first = 0x80,
- .last = 0x81,
- },
- },
- },
- [AB8500_DBI] = {
- .num_ranges = 0,
- .range = 0,
- },
- [AB8500_ECI_AV_ACC] = {
- .num_ranges = 1,
- .range = (struct ab8500_reg_range[]) {
- {
- .first = 0x80,
- .last = 0x82,
- },
- },
- },
- [0x9] = {
- .num_ranges = 0,
- .range = 0,
- },
- [AB8500_GPADC] = {
- .num_ranges = 1,
- .range = (struct ab8500_reg_range[]) {
- {
- .first = 0x00,
- .last = 0x08,
- },
- },
- },
- [AB8500_CHARGER] = {
- .num_ranges = 8,
- .range = (struct ab8500_reg_range[]) {
- {
- .first = 0x00,
- .last = 0x03,
- },
- {
- .first = 0x05,
- .last = 0x05,
- },
- {
- .first = 0x40,
- .last = 0x40,
- },
- {
- .first = 0x42,
- .last = 0x42,
- },
- {
- .first = 0x44,
- .last = 0x44,
- },
- {
- .first = 0x50,
- .last = 0x55,
- },
- {
- .first = 0x80,
- .last = 0x82,
- },
- {
- .first = 0xC0,
- .last = 0xC2,
- },
- },
- },
- [AB8500_GAS_GAUGE] = {
- .num_ranges = 3,
- .range = (struct ab8500_reg_range[]) {
- {
- .first = 0x00,
- .last = 0x00,
- },
- {
- .first = 0x07,
- .last = 0x0A,
- },
- {
- .first = 0x10,
- .last = 0x14,
- },
- },
- },
- [AB8500_AUDIO] = {
- .num_ranges = 1,
- .range = (struct ab8500_reg_range[]) {
- {
- .first = 0x00,
- .last = 0x6F,
- },
- },
- },
- [AB8500_INTERRUPT] = {
- .num_ranges = 0,
- .range = 0,
- },
- [AB8500_RTC] = {
- .num_ranges = 1,
- .range = (struct ab8500_reg_range[]) {
- {
- .first = 0x00,
- .last = 0x0F,
- },
- },
- },
- [AB8500_MISC] = {
- .num_ranges = 8,
- .range = (struct ab8500_reg_range[]) {
- {
- .first = 0x00,
- .last = 0x05,
- },
- {
- .first = 0x10,
- .last = 0x15,
- },
- {
- .first = 0x20,
- .last = 0x25,
- },
- {
- .first = 0x30,
- .last = 0x35,
- },
- {
- .first = 0x40,
- .last = 0x45,
- },
- {
- .first = 0x50,
- .last = 0x50,
- },
- {
- .first = 0x60,
- .last = 0x67,
- },
- {
- .first = 0x80,
- .last = 0x80,
- },
- },
- },
- [0x11] = {
- .num_ranges = 0,
- .range = 0,
- },
- [0x12] = {
- .num_ranges = 0,
- .range = 0,
- },
- [0x13] = {
- .num_ranges = 0,
- .range = 0,
- },
- [0x14] = {
- .num_ranges = 0,
- .range = 0,
- },
- [AB8500_OTP_EMUL] = {
- .num_ranges = 1,
- .range = (struct ab8500_reg_range[]) {
- {
- .first = 0x01,
- .last = 0x0F,
- },
- },
- },
+ [0x0] = {
+ .num_ranges = 0,
+ .range = 0,
+ },
+ [AB8500_SYS_CTRL1_BLOCK] = {
+ .num_ranges = 3,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x02,
+ },
+ {
+ .first = 0x42,
+ .last = 0x42,
+ },
+ {
+ .first = 0x80,
+ .last = 0x81,
+ },
+ },
+ },
+ [AB8500_SYS_CTRL2_BLOCK] = {
+ .num_ranges = 4,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0D,
+ },
+ {
+ .first = 0x0F,
+ .last = 0x17,
+ },
+ {
+ .first = 0x30,
+ .last = 0x30,
+ },
+ {
+ .first = 0x32,
+ .last = 0x33,
+ },
+ },
+ },
+ [AB8500_REGU_CTRL1] = {
+ .num_ranges = 3,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x00,
+ },
+ {
+ .first = 0x03,
+ .last = 0x10,
+ },
+ {
+ .first = 0x80,
+ .last = 0x84,
+ },
+ },
+ },
+ [AB8500_REGU_CTRL2] = {
+ .num_ranges = 5,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x15,
+ },
+ {
+ .first = 0x17,
+ .last = 0x19,
+ },
+ {
+ .first = 0x1B,
+ .last = 0x1D,
+ },
+ {
+ .first = 0x1F,
+ .last = 0x22,
+ },
+ {
+ .first = 0x40,
+ .last = 0x44,
+ },
+ /* 0x80-0x8B is SIM registers and should
+ * not be accessed from here */
+ },
+ },
+ [AB8500_USB] = {
+ .num_ranges = 2,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x80,
+ .last = 0x83,
+ },
+ {
+ .first = 0x87,
+ .last = 0x8A,
+ },
+ },
+ },
+ [AB8500_TVOUT] = {
+ .num_ranges = 9,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x12,
+ },
+ {
+ .first = 0x15,
+ .last = 0x17,
+ },
+ {
+ .first = 0x19,
+ .last = 0x21,
+ },
+ {
+ .first = 0x27,
+ .last = 0x2C,
+ },
+ {
+ .first = 0x41,
+ .last = 0x41,
+ },
+ {
+ .first = 0x45,
+ .last = 0x5B,
+ },
+ {
+ .first = 0x5D,
+ .last = 0x5D,
+ },
+ {
+ .first = 0x69,
+ .last = 0x69,
+ },
+ {
+ .first = 0x80,
+ .last = 0x81,
+ },
+ },
+ },
+ [AB8500_DBI] = {
+ .num_ranges = 0,
+ .range = NULL,
+ },
+ [AB8500_ECI_AV_ACC] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x80,
+ .last = 0x82,
+ },
+ },
+ },
+ [0x9] = {
+ .num_ranges = 0,
+ .range = NULL,
+ },
+ [AB8500_GPADC] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x08,
+ },
+ },
+ },
+ [AB8500_CHARGER] = {
+ .num_ranges = 8,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x03,
+ },
+ {
+ .first = 0x05,
+ .last = 0x05,
+ },
+ {
+ .first = 0x40,
+ .last = 0x40,
+ },
+ {
+ .first = 0x42,
+ .last = 0x42,
+ },
+ {
+ .first = 0x44,
+ .last = 0x44,
+ },
+ {
+ .first = 0x50,
+ .last = 0x55,
+ },
+ {
+ .first = 0x80,
+ .last = 0x82,
+ },
+ {
+ .first = 0xC0,
+ .last = 0xC2,
+ },
+ },
+ },
+ [AB8500_GAS_GAUGE] = {
+ .num_ranges = 3,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x00,
+ },
+ {
+ .first = 0x07,
+ .last = 0x0A,
+ },
+ {
+ .first = 0x10,
+ .last = 0x14,
+ },
+ },
+ },
+ [AB8500_AUDIO] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x6F,
+ },
+ },
+ },
+ [AB8500_INTERRUPT] = {
+ .num_ranges = 0,
+ .range = NULL,
+ },
+ [AB8500_RTC] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0F,
+ },
+ },
+ },
+ [AB8500_MISC] = {
+ .num_ranges = 8,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x05,
+ },
+ {
+ .first = 0x10,
+ .last = 0x15,
+ },
+ {
+ .first = 0x20,
+ .last = 0x25,
+ },
+ {
+ .first = 0x30,
+ .last = 0x35,
+ },
+ {
+ .first = 0x40,
+ .last = 0x45,
+ },
+ {
+ .first = 0x50,
+ .last = 0x50,
+ },
+ {
+ .first = 0x60,
+ .last = 0x67,
+ },
+ {
+ .first = 0x80,
+ .last = 0x80,
+ },
+ },
+ },
+ [0x11] = {
+ .num_ranges = 0,
+ .range = NULL,
+ },
+ [0x12] = {
+ .num_ranges = 0,
+ .range = NULL,
+ },
+ [0x13] = {
+ .num_ranges = 0,
+ .range = NULL,
+ },
+ [0x14] = {
+ .num_ranges = 0,
+ .range = NULL,
+ },
+ [AB8500_OTP_EMUL] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x01,
+ .last = 0x0F,
+ },
+ },
+ },
};
static int ab8500_registers_print(struct seq_file *s, void *p)
{
- struct device *dev = s->private;
- unsigned int i;
- u32 bank = debug_bank;
-
- seq_printf(s, AB8500_NAME_STRING " register values:\n");
-
- seq_printf(s, " bank %u:\n", bank);
- for (i = 0; i < debug_ranges[bank].num_ranges; i++) {
- u32 reg;
-
- for (reg = debug_ranges[bank].range[i].first;
- reg <= debug_ranges[bank].range[i].last;
- reg++) {
- u8 value;
- int err;
-
- err = abx500_get_register_interruptible(dev,
- (u8)bank, (u8)reg, &value);
- if (err < 0) {
- dev_err(dev, "ab->read fail %d\n", err);
- return err;
- }
-
- err = seq_printf(s, " [%u/0x%02X]: 0x%02X\n", bank,
- reg, value);
- if (err < 0) {
- dev_err(dev, "seq_printf overflow\n");
- /* Error is not returned here since
- * the output is wanted in any case */
- return 0;
- }
- }
- }
- return 0;
+ struct device *dev = s->private;
+ unsigned int i;
+ u32 bank = debug_bank;
+
+ seq_printf(s, AB8500_NAME_STRING " register values:\n");
+
+ seq_printf(s, " bank %u:\n", bank);
+ for (i = 0; i < debug_ranges[bank].num_ranges; i++) {
+ u32 reg;
+
+ for (reg = debug_ranges[bank].range[i].first;
+ reg <= debug_ranges[bank].range[i].last;
+ reg++) {
+ u8 value;
+ int err;
+
+ err = abx500_get_register_interruptible(dev,
+ (u8)bank, (u8)reg, &value);
+ if (err < 0) {
+ dev_err(dev, "ab->read fail %d\n", err);
+ return err;
+ }
+
+ err = seq_printf(s, " [%u/0x%02X]: 0x%02X\n", bank,
+ reg, value);
+ if (err < 0) {
+ dev_err(dev, "seq_printf overflow\n");
+ /* Error is not returned here since
+ * the output is wanted in any case */
+ return 0;
+ }
+ }
+ }
+ return 0;
}
static int ab8500_registers_open(struct inode *inode, struct file *file)
{
- return single_open(file, ab8500_registers_print, inode->i_private);
+ return single_open(file, ab8500_registers_print, inode->i_private);
}
static const struct file_operations ab8500_registers_fops = {
- .open = ab8500_registers_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
+ .open = ab8500_registers_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
};
static int ab8500_bank_print(struct seq_file *s, void *p)
{
- return seq_printf(s, "%d\n", debug_bank);
+ return seq_printf(s, "%d\n", debug_bank);
}
static int ab8500_bank_open(struct inode *inode, struct file *file)
{
- return single_open(file, ab8500_bank_print, inode->i_private);
+ return single_open(file, ab8500_bank_print, inode->i_private);
}
static ssize_t ab8500_bank_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
- struct device *dev = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
- unsigned long user_bank;
- int err;
-
- /* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf) - 1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_bank);
- if (err)
- return -EINVAL;
-
- if (user_bank >= AB8500_NUM_BANKS) {
- dev_err(dev, "debugfs error input > number of banks\n");
- return -EINVAL;
- }
-
- debug_bank = user_bank;
-
- return buf_size;
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_bank;
+ int err;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_bank);
+ if (err)
+ return -EINVAL;
+
+ if (user_bank >= AB8500_NUM_BANKS) {
+ dev_err(dev, "debugfs error input > number of banks\n");
+ return -EINVAL;
+ }
+
+ debug_bank = user_bank;
+
+ return buf_size;
}
static int ab8500_address_print(struct seq_file *s, void *p)
{
- return seq_printf(s, "0x%02X\n", debug_address);
+ return seq_printf(s, "0x%02X\n", debug_address);
}
static int ab8500_address_open(struct inode *inode, struct file *file)
{
- return single_open(file, ab8500_address_print, inode->i_private);
+ return single_open(file, ab8500_address_print, inode->i_private);
}
static ssize_t ab8500_address_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
- struct device *dev = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
- unsigned long user_address;
- int err;
-
- /* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf) - 1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_address);
- if (err)
- return -EINVAL;
- if (user_address > 0xff) {
- dev_err(dev, "debugfs error input > 0xff\n");
- return -EINVAL;
- }
- debug_address = user_address;
- return buf_size;
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_address;
+ int err;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_address);
+ if (err)
+ return -EINVAL;
+ if (user_address > 0xff) {
+ dev_err(dev, "debugfs error input > 0xff\n");
+ return -EINVAL;
+ }
+ debug_address = user_address;
+ return buf_size;
}
static int ab8500_val_print(struct seq_file *s, void *p)
{
- struct device *dev = s->private;
- int ret;
- u8 regvalue;
-
- ret = abx500_get_register_interruptible(dev,
- (u8)debug_bank, (u8)debug_address, &regvalue);
- if (ret < 0) {
- dev_err(dev, "abx500_get_reg fail %d, %d\n",
- ret, __LINE__);
- return -EINVAL;
- }
- seq_printf(s, "0x%02X\n", regvalue);
-
- return 0;
+ struct device *dev = s->private;
+ int ret;
+ u8 regvalue;
+
+ ret = abx500_get_register_interruptible(dev,
+ (u8)debug_bank, (u8)debug_address, &regvalue);
+ if (ret < 0) {
+ dev_err(dev, "abx500_get_reg fail %d, %d\n",
+ ret, __LINE__);
+ return -EINVAL;
+ }
+ seq_printf(s, "0x%02X\n", regvalue);
+
+ return 0;
}
static int ab8500_val_open(struct inode *inode, struct file *file)
{
- return single_open(file, ab8500_val_print, inode->i_private);
+ return single_open(file, ab8500_val_print, inode->i_private);
}
static ssize_t ab8500_val_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
- struct device *dev = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
- unsigned long user_val;
- int err;
-
- /* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf)-1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_val);
- if (err)
- return -EINVAL;
- if (user_val > 0xff) {
- dev_err(dev, "debugfs error input > 0xff\n");
- return -EINVAL;
- }
- err = abx500_set_register_interruptible(dev,
- (u8)debug_bank, debug_address, (u8)user_val);
- if (err < 0) {
- printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__);
- return -EINVAL;
- }
-
- return buf_size;
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ if (user_val > 0xff) {
+ dev_err(dev, "debugfs error input > 0xff\n");
+ return -EINVAL;
+ }
+ err = abx500_set_register_interruptible(dev,
+ (u8)debug_bank, debug_address, (u8)user_val);
+ if (err < 0) {
+ printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__);
+ return -EINVAL;
+ }
+
+ return buf_size;
}
static const struct file_operations ab8500_bank_fops = {
- .open = ab8500_bank_open,
- .write = ab8500_bank_write,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
+ .open = ab8500_bank_open,
+ .write = ab8500_bank_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
};
static const struct file_operations ab8500_address_fops = {
- .open = ab8500_address_open,
- .write = ab8500_address_write,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
+ .open = ab8500_address_open,
+ .write = ab8500_address_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
};
static const struct file_operations ab8500_val_fops = {
- .open = ab8500_val_open,
- .write = ab8500_val_write,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
+ .open = ab8500_val_open,
+ .write = ab8500_val_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
};
static struct dentry *ab8500_dir;
@@ -572,77 +572,77 @@ static struct dentry *ab8500_val_file;
static int __devinit ab8500_debug_probe(struct platform_device *plf)
{
- debug_bank = AB8500_MISC;
- debug_address = AB8500_REV_REG & 0x00FF;
+ debug_bank = AB8500_MISC;
+ debug_address = AB8500_REV_REG & 0x00FF;
- ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
- if (!ab8500_dir)
- goto exit_no_debugfs;
+ ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
+ if (!ab8500_dir)
+ goto exit_no_debugfs;
- ab8500_reg_file = debugfs_create_file("all-bank-registers",
- S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops);
- if (!ab8500_reg_file)
- goto exit_destroy_dir;
+ ab8500_reg_file = debugfs_create_file("all-bank-registers",
+ S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops);
+ if (!ab8500_reg_file)
+ goto exit_destroy_dir;
- ab8500_bank_file = debugfs_create_file("register-bank",
- (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_bank_fops);
- if (!ab8500_bank_file)
- goto exit_destroy_reg;
+ ab8500_bank_file = debugfs_create_file("register-bank",
+ (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_bank_fops);
+ if (!ab8500_bank_file)
+ goto exit_destroy_reg;
- ab8500_address_file = debugfs_create_file("register-address",
- (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev,
- &ab8500_address_fops);
- if (!ab8500_address_file)
- goto exit_destroy_bank;
+ ab8500_address_file = debugfs_create_file("register-address",
+ (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev,
+ &ab8500_address_fops);
+ if (!ab8500_address_file)
+ goto exit_destroy_bank;
- ab8500_val_file = debugfs_create_file("register-value",
- (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_val_fops);
- if (!ab8500_val_file)
- goto exit_destroy_address;
+ ab8500_val_file = debugfs_create_file("register-value",
+ (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_val_fops);
+ if (!ab8500_val_file)
+ goto exit_destroy_address;
- return 0;
+ return 0;
exit_destroy_address:
- debugfs_remove(ab8500_address_file);
+ debugfs_remove(ab8500_address_file);
exit_destroy_bank:
- debugfs_remove(ab8500_bank_file);
+ debugfs_remove(ab8500_bank_file);
exit_destroy_reg:
- debugfs_remove(ab8500_reg_file);
+ debugfs_remove(ab8500_reg_file);
exit_destroy_dir:
- debugfs_remove(ab8500_dir);
+ debugfs_remove(ab8500_dir);
exit_no_debugfs:
- dev_err(&plf->dev, "failed to create debugfs entries.\n");
- return -ENOMEM;
+ dev_err(&plf->dev, "failed to create debugfs entries.\n");
+ return -ENOMEM;
}
static int __devexit ab8500_debug_remove(struct platform_device *plf)
{
- debugfs_remove(ab8500_val_file);
- debugfs_remove(ab8500_address_file);
- debugfs_remove(ab8500_bank_file);
- debugfs_remove(ab8500_reg_file);
- debugfs_remove(ab8500_dir);
+ debugfs_remove(ab8500_val_file);
+ debugfs_remove(ab8500_address_file);
+ debugfs_remove(ab8500_bank_file);
+ debugfs_remove(ab8500_reg_file);
+ debugfs_remove(ab8500_dir);
- return 0;
+ return 0;
}
static struct platform_driver ab8500_debug_driver = {
- .driver = {
- .name = "ab8500-debug",
- .owner = THIS_MODULE,
- },
- .probe = ab8500_debug_probe,
- .remove = __devexit_p(ab8500_debug_remove)
+ .driver = {
+ .name = "ab8500-debug",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab8500_debug_probe,
+ .remove = __devexit_p(ab8500_debug_remove)
};
static int __init ab8500_debug_init(void)
{
- return platform_driver_register(&ab8500_debug_driver);
+ return platform_driver_register(&ab8500_debug_driver);
}
static void __exit ab8500_debug_exit(void)
{
- platform_driver_unregister(&ab8500_debug_driver);
+ platform_driver_unregister(&ab8500_debug_driver);
}
subsys_initcall(ab8500_debug_init);
module_exit(ab8500_debug_exit);
diff --git a/drivers/mfd/ab8500-spi.c b/drivers/mfd/ab8500-spi.c
deleted file mode 100644
index b1653421edb5..000000000000
--- a/drivers/mfd/ab8500-spi.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * License Terms: GNU General Public License v2
- * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/mfd/ab8500.h>
-
-/*
- * This funtion writes to any AB8500 registers using
- * SPI protocol & before it writes it packs the data
- * in the below 24 bit frame format
- *
- * *|------------------------------------|
- * *| 23|22...18|17.......10|9|8|7......0|
- * *| r/w bank adr data |
- * * ------------------------------------
- *
- * This function shouldn't be called from interrupt
- * context
- */
-static int ab8500_spi_write(struct ab8500 *ab8500, u16 addr, u8 data)
-{
- struct spi_device *spi = container_of(ab8500->dev, struct spi_device,
- dev);
- unsigned long spi_data = addr << 10 | data;
- struct spi_transfer xfer;
- struct spi_message msg;
-
- ab8500->tx_buf[0] = spi_data;
- ab8500->rx_buf[0] = 0;
-
- xfer.tx_buf = ab8500->tx_buf;
- xfer.rx_buf = NULL;
- xfer.len = sizeof(unsigned long);
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- return spi_sync(spi, &msg);
-}
-
-static int ab8500_spi_read(struct ab8500 *ab8500, u16 addr)
-{
- struct spi_device *spi = container_of(ab8500->dev, struct spi_device,
- dev);
- unsigned long spi_data = 1 << 23 | addr << 10;
- struct spi_transfer xfer;
- struct spi_message msg;
- int ret;
-
- ab8500->tx_buf[0] = spi_data;
- ab8500->rx_buf[0] = 0;
-
- xfer.tx_buf = ab8500->tx_buf;
- xfer.rx_buf = ab8500->rx_buf;
- xfer.len = sizeof(unsigned long);
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- ret = spi_sync(spi, &msg);
- if (!ret)
- /*
- * Only the 8 lowermost bytes are
- * defined with value, the rest may
- * vary depending on chip/board noise.
- */
- ret = ab8500->rx_buf[0] & 0xFFU;
-
- return ret;
-}
-
-static int __devinit ab8500_spi_probe(struct spi_device *spi)
-{
- struct ab8500 *ab8500;
- int ret;
-
- spi->bits_per_word = 24;
- ret = spi_setup(spi);
- if (ret < 0)
- return ret;
-
- ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
- if (!ab8500)
- return -ENOMEM;
-
- ab8500->dev = &spi->dev;
- ab8500->irq = spi->irq;
-
- ab8500->read = ab8500_spi_read;
- ab8500->write = ab8500_spi_write;
-
- spi_set_drvdata(spi, ab8500);
-
- ret = ab8500_init(ab8500);
- if (ret)
- kfree(ab8500);
-
- return ret;
-}
-
-static int __devexit ab8500_spi_remove(struct spi_device *spi)
-{
- struct ab8500 *ab8500 = spi_get_drvdata(spi);
-
- ab8500_exit(ab8500);
- kfree(ab8500);
-
- return 0;
-}
-
-static struct spi_driver ab8500_spi_driver = {
- .driver = {
- .name = "ab8500-spi",
- .owner = THIS_MODULE,
- },
- .probe = ab8500_spi_probe,
- .remove = __devexit_p(ab8500_spi_remove)
-};
-
-static int __init ab8500_spi_init(void)
-{
- return spi_register_driver(&ab8500_spi_driver);
-}
-subsys_initcall(ab8500_spi_init);
-
-static void __exit ab8500_spi_exit(void)
-{
- spi_unregister_driver(&ab8500_spi_driver);
-}
-module_exit(ab8500_spi_exit);
-
-MODULE_AUTHOR("Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com");
-MODULE_DESCRIPTION("AB8500 SPI");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 7de708d15d72..c45e6305b26f 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -57,7 +57,7 @@ struct asic3_clk {
.rate = _rate, \
}
-struct asic3_clk asic3_clk_init[] __initdata = {
+static struct asic3_clk asic3_clk_init[] __initdata = {
INIT_CDEX(SPI, 0),
INIT_CDEX(OWM, 5000000),
INIT_CDEX(PWM0, 0),
@@ -102,7 +102,7 @@ static inline u32 asic3_read_register(struct asic3 *asic,
(reg >> asic->bus_shift));
}
-void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set)
+static void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set)
{
unsigned long flags;
u32 val;
@@ -143,9 +143,9 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
unsigned long flags;
struct asic3 *asic;
- desc->chip->ack(irq);
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
- asic = desc->handler_data;
+ asic = get_irq_data(irq);
for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
u32 status;
@@ -226,14 +226,14 @@ static inline int asic3_irq_to_index(struct asic3 *asic, int irq)
return (irq - asic->irq_base) & 0xf;
}
-static void asic3_mask_gpio_irq(unsigned int irq)
+static void asic3_mask_gpio_irq(struct irq_data *data)
{
- struct asic3 *asic = get_irq_chip_data(irq);
+ struct asic3 *asic = irq_data_get_irq_chip_data(data);
u32 val, bank, index;
unsigned long flags;
- bank = asic3_irq_to_bank(asic, irq);
- index = asic3_irq_to_index(asic, irq);
+ bank = asic3_irq_to_bank(asic, data->irq);
+ index = asic3_irq_to_index(asic, data->irq);
spin_lock_irqsave(&asic->lock, flags);
val = asic3_read_register(asic, bank + ASIC3_GPIO_MASK);
@@ -242,9 +242,9 @@ static void asic3_mask_gpio_irq(unsigned int irq)
spin_unlock_irqrestore(&asic->lock, flags);
}
-static void asic3_mask_irq(unsigned int irq)
+static void asic3_mask_irq(struct irq_data *data)
{
- struct asic3 *asic = get_irq_chip_data(irq);
+ struct asic3 *asic = irq_data_get_irq_chip_data(data);
int regval;
unsigned long flags;
@@ -254,7 +254,7 @@ static void asic3_mask_irq(unsigned int irq)
ASIC3_INTR_INT_MASK);
regval &= ~(ASIC3_INTMASK_MASK0 <<
- (irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
+ (data->irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
asic3_write_register(asic,
ASIC3_INTR_BASE +
@@ -263,14 +263,14 @@ static void asic3_mask_irq(unsigned int irq)
spin_unlock_irqrestore(&asic->lock, flags);
}
-static void asic3_unmask_gpio_irq(unsigned int irq)
+static void asic3_unmask_gpio_irq(struct irq_data *data)
{
- struct asic3 *asic = get_irq_chip_data(irq);
+ struct asic3 *asic = irq_data_get_irq_chip_data(data);
u32 val, bank, index;
unsigned long flags;
- bank = asic3_irq_to_bank(asic, irq);
- index = asic3_irq_to_index(asic, irq);
+ bank = asic3_irq_to_bank(asic, data->irq);
+ index = asic3_irq_to_index(asic, data->irq);
spin_lock_irqsave(&asic->lock, flags);
val = asic3_read_register(asic, bank + ASIC3_GPIO_MASK);
@@ -279,9 +279,9 @@ static void asic3_unmask_gpio_irq(unsigned int irq)
spin_unlock_irqrestore(&asic->lock, flags);
}
-static void asic3_unmask_irq(unsigned int irq)
+static void asic3_unmask_irq(struct irq_data *data)
{
- struct asic3 *asic = get_irq_chip_data(irq);
+ struct asic3 *asic = irq_data_get_irq_chip_data(data);
int regval;
unsigned long flags;
@@ -291,7 +291,7 @@ static void asic3_unmask_irq(unsigned int irq)
ASIC3_INTR_INT_MASK);
regval |= (ASIC3_INTMASK_MASK0 <<
- (irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
+ (data->irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
asic3_write_register(asic,
ASIC3_INTR_BASE +
@@ -300,15 +300,15 @@ static void asic3_unmask_irq(unsigned int irq)
spin_unlock_irqrestore(&asic->lock, flags);
}
-static int asic3_gpio_irq_type(unsigned int irq, unsigned int type)
+static int asic3_gpio_irq_type(struct irq_data *data, unsigned int type)
{
- struct asic3 *asic = get_irq_chip_data(irq);
+ struct asic3 *asic = irq_data_get_irq_chip_data(data);
u32 bank, index;
u16 trigger, level, edge, bit;
unsigned long flags;
- bank = asic3_irq_to_bank(asic, irq);
- index = asic3_irq_to_index(asic, irq);
+ bank = asic3_irq_to_bank(asic, data->irq);
+ index = asic3_irq_to_index(asic, data->irq);
bit = 1<<index;
spin_lock_irqsave(&asic->lock, flags);
@@ -318,7 +318,7 @@ static int asic3_gpio_irq_type(unsigned int irq, unsigned int type)
bank + ASIC3_GPIO_EDGE_TRIGGER);
trigger = asic3_read_register(asic,
bank + ASIC3_GPIO_TRIGGER_TYPE);
- asic->irq_bothedge[(irq - asic->irq_base) >> 4] &= ~bit;
+ asic->irq_bothedge[(data->irq - asic->irq_base) >> 4] &= ~bit;
if (type == IRQ_TYPE_EDGE_RISING) {
trigger |= bit;
@@ -328,11 +328,11 @@ static int asic3_gpio_irq_type(unsigned int irq, unsigned int type)
edge &= ~bit;
} else if (type == IRQ_TYPE_EDGE_BOTH) {
trigger |= bit;
- if (asic3_gpio_get(&asic->gpio, irq - asic->irq_base))
+ if (asic3_gpio_get(&asic->gpio, data->irq - asic->irq_base))
edge &= ~bit;
else
edge |= bit;
- asic->irq_bothedge[(irq - asic->irq_base) >> 4] |= bit;
+ asic->irq_bothedge[(data->irq - asic->irq_base) >> 4] |= bit;
} else if (type == IRQ_TYPE_LEVEL_LOW) {
trigger &= ~bit;
level &= ~bit;
@@ -359,17 +359,17 @@ static int asic3_gpio_irq_type(unsigned int irq, unsigned int type)
static struct irq_chip asic3_gpio_irq_chip = {
.name = "ASIC3-GPIO",
- .ack = asic3_mask_gpio_irq,
- .mask = asic3_mask_gpio_irq,
- .unmask = asic3_unmask_gpio_irq,
- .set_type = asic3_gpio_irq_type,
+ .irq_ack = asic3_mask_gpio_irq,
+ .irq_mask = asic3_mask_gpio_irq,
+ .irq_unmask = asic3_unmask_gpio_irq,
+ .irq_set_type = asic3_gpio_irq_type,
};
static struct irq_chip asic3_irq_chip = {
.name = "ASIC3",
- .ack = asic3_mask_irq,
- .mask = asic3_mask_irq,
- .unmask = asic3_unmask_irq,
+ .irq_ack = asic3_mask_irq,
+ .irq_mask = asic3_mask_irq,
+ .irq_unmask = asic3_unmask_irq,
};
static int __init asic3_irq_probe(struct platform_device *pdev)
@@ -635,7 +635,7 @@ static struct resource ds1wm_resources[] = {
},
{
.start = ASIC3_IRQ_OWM,
- .start = ASIC3_IRQ_OWM,
+ .end = ASIC3_IRQ_OWM,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
},
};
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
new file mode 100644
index 000000000000..59ca6f151e78
--- /dev/null
+++ b/drivers/mfd/cs5535-mfd.c
@@ -0,0 +1,151 @@
+/*
+ * cs5535-mfd.c - core MFD driver for CS5535/CS5536 southbridges
+ *
+ * The CS5535 and CS5536 has an ISA bridge on the PCI bus that is
+ * used for accessing GPIOs, MFGPTs, ACPI, etc. Each subdevice has
+ * an IO range that's specified in a single BAR. The BAR order is
+ * hardcoded in the CS553x specifications.
+ *
+ * Copyright (c) 2010 Andres Salomon <dilinger@queued.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#define DRV_NAME "cs5535-mfd"
+
+enum cs5535_mfd_bars {
+ SMB_BAR = 0,
+ GPIO_BAR = 1,
+ MFGPT_BAR = 2,
+ PMS_BAR = 4,
+ ACPI_BAR = 5,
+ NR_BARS,
+};
+
+static __devinitdata struct resource cs5535_mfd_resources[NR_BARS];
+
+static __devinitdata struct mfd_cell cs5535_mfd_cells[] = {
+ {
+ .id = SMB_BAR,
+ .name = "cs5535-smb",
+ .num_resources = 1,
+ .resources = &cs5535_mfd_resources[SMB_BAR],
+ },
+ {
+ .id = GPIO_BAR,
+ .name = "cs5535-gpio",
+ .num_resources = 1,
+ .resources = &cs5535_mfd_resources[GPIO_BAR],
+ },
+ {
+ .id = MFGPT_BAR,
+ .name = "cs5535-mfgpt",
+ .num_resources = 1,
+ .resources = &cs5535_mfd_resources[MFGPT_BAR],
+ },
+ {
+ .id = PMS_BAR,
+ .name = "cs5535-pms",
+ .num_resources = 1,
+ .resources = &cs5535_mfd_resources[PMS_BAR],
+ },
+ {
+ .id = ACPI_BAR,
+ .name = "cs5535-acpi",
+ .num_resources = 1,
+ .resources = &cs5535_mfd_resources[ACPI_BAR],
+ },
+};
+
+static int __devinit cs5535_mfd_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int err, i;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ /* fill in IO range for each cell; subdrivers handle the region */
+ for (i = 0; i < ARRAY_SIZE(cs5535_mfd_cells); i++) {
+ int bar = cs5535_mfd_cells[i].id;
+ struct resource *r = &cs5535_mfd_resources[bar];
+
+ r->flags = IORESOURCE_IO;
+ r->start = pci_resource_start(pdev, bar);
+ r->end = pci_resource_end(pdev, bar);
+
+ /* id is used for temporarily storing BAR; unset it now */
+ cs5535_mfd_cells[i].id = 0;
+ }
+
+ err = mfd_add_devices(&pdev->dev, -1, cs5535_mfd_cells,
+ ARRAY_SIZE(cs5535_mfd_cells), NULL, 0);
+ if (err) {
+ dev_err(&pdev->dev, "MFD add devices failed: %d\n", err);
+ goto err_disable;
+ }
+
+ dev_info(&pdev->dev, "%zu devices registered.\n",
+ ARRAY_SIZE(cs5535_mfd_cells));
+
+ return 0;
+
+err_disable:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void __devexit cs5535_mfd_remove(struct pci_dev *pdev)
+{
+ mfd_remove_devices(&pdev->dev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_device_id cs5535_mfd_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, cs5535_mfd_pci_tbl);
+
+static struct pci_driver cs5535_mfd_drv = {
+ .name = DRV_NAME,
+ .id_table = cs5535_mfd_pci_tbl,
+ .probe = cs5535_mfd_probe,
+ .remove = __devexit_p(cs5535_mfd_remove),
+};
+
+static int __init cs5535_mfd_init(void)
+{
+ return pci_register_driver(&cs5535_mfd_drv);
+}
+
+static void __exit cs5535_mfd_exit(void)
+{
+ pci_unregister_driver(&cs5535_mfd_drv);
+}
+
+module_init(cs5535_mfd_init);
+module_exit(cs5535_mfd_exit);
+
+MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
+MODULE_DESCRIPTION("MFD driver for CS5535/CS5536 southbridge's ISA PCI device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index 33c923d215c7..fdd8a1b8bc67 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -118,12 +118,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
/* Voice codec interface client */
cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
- cell->name = "davinci_vcif";
+ cell->name = "davinci-vcif";
cell->driver_data = davinci_vc;
/* Voice codec CQ93VC client */
cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
- cell->name = "cq93vc";
+ cell->name = "cq93vc-codec";
cell->driver_data = davinci_vc;
ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index c2b698d69a93..9e2d8dd5f9e5 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -144,26 +144,26 @@ int pcap_to_irq(struct pcap_chip *pcap, int irq)
}
EXPORT_SYMBOL_GPL(pcap_to_irq);
-static void pcap_mask_irq(unsigned int irq)
+static void pcap_mask_irq(struct irq_data *d)
{
- struct pcap_chip *pcap = get_irq_chip_data(irq);
+ struct pcap_chip *pcap = irq_data_get_irq_chip_data(d);
- pcap->msr |= 1 << irq_to_pcap(pcap, irq);
+ pcap->msr |= 1 << irq_to_pcap(pcap, d->irq);
queue_work(pcap->workqueue, &pcap->msr_work);
}
-static void pcap_unmask_irq(unsigned int irq)
+static void pcap_unmask_irq(struct irq_data *d)
{
- struct pcap_chip *pcap = get_irq_chip_data(irq);
+ struct pcap_chip *pcap = irq_data_get_irq_chip_data(d);
- pcap->msr &= ~(1 << irq_to_pcap(pcap, irq));
+ pcap->msr &= ~(1 << irq_to_pcap(pcap, d->irq));
queue_work(pcap->workqueue, &pcap->msr_work);
}
static struct irq_chip pcap_irq_chip = {
- .name = "pcap",
- .mask = pcap_mask_irq,
- .unmask = pcap_unmask_irq,
+ .name = "pcap",
+ .irq_mask = pcap_mask_irq,
+ .irq_unmask = pcap_unmask_irq,
};
static void pcap_msr_work(struct work_struct *work)
@@ -199,8 +199,7 @@ static void pcap_isr_work(struct work_struct *work)
if (service & 1) {
struct irq_desc *desc = irq_to_desc(irq);
- if (WARN(!desc, KERN_WARNING
- "Invalid PCAP IRQ %d\n", irq))
+ if (WARN(!desc, "Invalid PCAP IRQ %d\n", irq))
break;
if (desc->status & IRQ_DISABLED)
@@ -218,7 +217,7 @@ static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct pcap_chip *pcap = get_irq_data(irq);
- desc->chip->ack(irq);
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
queue_work(pcap->workqueue, &pcap->isr_work);
return;
}
@@ -282,7 +281,7 @@ static irqreturn_t pcap_adc_irq(int irq, void *_pcap)
mutex_lock(&pcap->adc_mutex);
req = pcap->adc_queue[pcap->adc_head];
- if (WARN(!req, KERN_WARNING "adc irq without pending request\n")) {
+ if (WARN(!req, "adc irq without pending request\n")) {
mutex_unlock(&pcap->adc_mutex);
return IRQ_HANDLED;
}
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index d3e74f8585e0..d00b6d1a69e5 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -70,31 +70,32 @@ static inline void ack_irqs(struct egpio_info *ei)
ei->ack_write, ei->ack_register << ei->bus_shift);
}
-static void egpio_ack(unsigned int irq)
+static void egpio_ack(struct irq_data *data)
{
}
/* There does not appear to be a way to proactively mask interrupts
* on the egpio chip itself. So, we simply ignore interrupts that
* aren't desired. */
-static void egpio_mask(unsigned int irq)
+static void egpio_mask(struct irq_data *data)
{
- struct egpio_info *ei = get_irq_chip_data(irq);
- ei->irqs_enabled &= ~(1 << (irq - ei->irq_start));
- pr_debug("EGPIO mask %d %04x\n", irq, ei->irqs_enabled);
+ struct egpio_info *ei = irq_data_get_irq_chip_data(data);
+ ei->irqs_enabled &= ~(1 << (data->irq - ei->irq_start));
+ pr_debug("EGPIO mask %d %04x\n", data->irq, ei->irqs_enabled);
}
-static void egpio_unmask(unsigned int irq)
+
+static void egpio_unmask(struct irq_data *data)
{
- struct egpio_info *ei = get_irq_chip_data(irq);
- ei->irqs_enabled |= 1 << (irq - ei->irq_start);
- pr_debug("EGPIO unmask %d %04x\n", irq, ei->irqs_enabled);
+ struct egpio_info *ei = irq_data_get_irq_chip_data(data);
+ ei->irqs_enabled |= 1 << (data->irq - ei->irq_start);
+ pr_debug("EGPIO unmask %d %04x\n", data->irq, ei->irqs_enabled);
}
static struct irq_chip egpio_muxed_chip = {
- .name = "htc-egpio",
- .ack = egpio_ack,
- .mask = egpio_mask,
- .unmask = egpio_unmask,
+ .name = "htc-egpio",
+ .irq_ack = egpio_ack,
+ .irq_mask = egpio_mask,
+ .irq_unmask = egpio_unmask,
};
static void egpio_handler(unsigned int irq, struct irq_desc *desc)
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index 594c9a8e25e1..296ad1562f69 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -82,25 +82,25 @@ struct htcpld_data {
/* There does not appear to be a way to proactively mask interrupts
* on the htcpld chip itself. So, we simply ignore interrupts that
* aren't desired. */
-static void htcpld_mask(unsigned int irq)
+static void htcpld_mask(struct irq_data *data)
{
- struct htcpld_chip *chip = get_irq_chip_data(irq);
- chip->irqs_enabled &= ~(1 << (irq - chip->irq_start));
- pr_debug("HTCPLD mask %d %04x\n", irq, chip->irqs_enabled);
+ struct htcpld_chip *chip = irq_data_get_irq_chip_data(data);
+ chip->irqs_enabled &= ~(1 << (data->irq - chip->irq_start));
+ pr_debug("HTCPLD mask %d %04x\n", data->irq, chip->irqs_enabled);
}
-static void htcpld_unmask(unsigned int irq)
+static void htcpld_unmask(struct irq_data *data)
{
- struct htcpld_chip *chip = get_irq_chip_data(irq);
- chip->irqs_enabled |= 1 << (irq - chip->irq_start);
- pr_debug("HTCPLD unmask %d %04x\n", irq, chip->irqs_enabled);
+ struct htcpld_chip *chip = irq_data_get_irq_chip_data(data);
+ chip->irqs_enabled |= 1 << (data->irq - chip->irq_start);
+ pr_debug("HTCPLD unmask %d %04x\n", data->irq, chip->irqs_enabled);
}
-static int htcpld_set_type(unsigned int irq, unsigned int flags)
+static int htcpld_set_type(struct irq_data *data, unsigned int flags)
{
- struct irq_desc *d = irq_to_desc(irq);
+ struct irq_desc *d = irq_to_desc(data->irq);
if (!d) {
- pr_err("HTCPLD invalid IRQ: %d\n", irq);
+ pr_err("HTCPLD invalid IRQ: %d\n", data->irq);
return -EINVAL;
}
@@ -118,10 +118,10 @@ static int htcpld_set_type(unsigned int irq, unsigned int flags)
}
static struct irq_chip htcpld_muxed_chip = {
- .name = "htcpld",
- .mask = htcpld_mask,
- .unmask = htcpld_unmask,
- .set_type = htcpld_set_type,
+ .name = "htcpld",
+ .irq_mask = htcpld_mask,
+ .irq_unmask = htcpld_unmask,
+ .irq_set_type = htcpld_set_type,
};
/* To properly dispatch IRQ events, we need to read from the
@@ -235,7 +235,7 @@ static irqreturn_t htcpld_handler(int irq, void *dev)
* and that work is scheduled in the set routine. The kernel can then run
* the I2C functions, which will sleep, in process context.
*/
-void htcpld_chip_set(struct gpio_chip *chip, unsigned offset, int val)
+static void htcpld_chip_set(struct gpio_chip *chip, unsigned offset, int val)
{
struct i2c_client *client;
struct htcpld_chip *chip_data;
@@ -259,7 +259,7 @@ void htcpld_chip_set(struct gpio_chip *chip, unsigned offset, int val)
schedule_work(&(chip_data->set_val_work));
}
-void htcpld_chip_set_ni(struct work_struct *work)
+static void htcpld_chip_set_ni(struct work_struct *work)
{
struct htcpld_chip *chip_data;
struct i2c_client *client;
@@ -269,7 +269,7 @@ void htcpld_chip_set_ni(struct work_struct *work)
i2c_smbus_read_byte_data(client, chip_data->cache_out);
}
-int htcpld_chip_get(struct gpio_chip *chip, unsigned offset)
+static int htcpld_chip_get(struct gpio_chip *chip, unsigned offset)
{
struct htcpld_chip *chip_data;
int val = 0;
@@ -316,7 +316,7 @@ static int htcpld_direction_input(struct gpio_chip *chip,
return (offset < chip->ngpio) ? 0 : -EINVAL;
}
-int htcpld_chip_to_irq(struct gpio_chip *chip, unsigned offset)
+static int htcpld_chip_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct htcpld_chip *chip_data;
@@ -328,7 +328,7 @@ int htcpld_chip_to_irq(struct gpio_chip *chip, unsigned offset)
return -EINVAL;
}
-void htcpld_chip_reset(struct i2c_client *client)
+static void htcpld_chip_reset(struct i2c_client *client)
{
struct htcpld_chip *chip_data = i2c_get_clientdata(client);
if (!chip_data)
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 9dd1b33f2275..0cc59795f600 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -84,31 +84,30 @@ static inline void jz4740_adc_irq_set_masked(struct jz4740_adc *adc, int irq,
spin_unlock_irqrestore(&adc->lock, flags);
}
-static void jz4740_adc_irq_mask(unsigned int irq)
+static void jz4740_adc_irq_mask(struct irq_data *data)
{
- struct jz4740_adc *adc = get_irq_chip_data(irq);
- jz4740_adc_irq_set_masked(adc, irq, true);
+ struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
+ jz4740_adc_irq_set_masked(adc, data->irq, true);
}
-static void jz4740_adc_irq_unmask(unsigned int irq)
+static void jz4740_adc_irq_unmask(struct irq_data *data)
{
- struct jz4740_adc *adc = get_irq_chip_data(irq);
- jz4740_adc_irq_set_masked(adc, irq, false);
+ struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
+ jz4740_adc_irq_set_masked(adc, data->irq, false);
}
-static void jz4740_adc_irq_ack(unsigned int irq)
+static void jz4740_adc_irq_ack(struct irq_data *data)
{
- struct jz4740_adc *adc = get_irq_chip_data(irq);
-
- irq -= adc->irq_base;
+ struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
+ unsigned int irq = data->irq - adc->irq_base;
writeb(BIT(irq), adc->base + JZ_REG_ADC_STATUS);
}
static struct irq_chip jz4740_adc_irq_chip = {
.name = "jz4740-adc",
- .mask = jz4740_adc_irq_mask,
- .unmask = jz4740_adc_irq_unmask,
- .ack = jz4740_adc_irq_ack,
+ .irq_mask = jz4740_adc_irq_mask,
+ .irq_unmask = jz4740_adc_irq_unmask,
+ .irq_ack = jz4740_adc_irq_ack,
};
static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc)
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 44695f5a1800..0e998dc4e7d8 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -407,16 +407,16 @@ static irqreturn_t max8925_tsc_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static void max8925_irq_lock(unsigned int irq)
+static void max8925_irq_lock(struct irq_data *data)
{
- struct max8925_chip *chip = get_irq_chip_data(irq);
+ struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
mutex_lock(&chip->irq_lock);
}
-static void max8925_irq_sync_unlock(unsigned int irq)
+static void max8925_irq_sync_unlock(struct irq_data *data)
{
- struct max8925_chip *chip = get_irq_chip_data(irq);
+ struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
struct max8925_irq_data *irq_data;
static unsigned char cache_chg[2] = {0xff, 0xff};
static unsigned char cache_on[2] = {0xff, 0xff};
@@ -492,25 +492,25 @@ static void max8925_irq_sync_unlock(unsigned int irq)
mutex_unlock(&chip->irq_lock);
}
-static void max8925_irq_enable(unsigned int irq)
+static void max8925_irq_enable(struct irq_data *data)
{
- struct max8925_chip *chip = get_irq_chip_data(irq);
- max8925_irqs[irq - chip->irq_base].enable
- = max8925_irqs[irq - chip->irq_base].offs;
+ struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
+ max8925_irqs[data->irq - chip->irq_base].enable
+ = max8925_irqs[data->irq - chip->irq_base].offs;
}
-static void max8925_irq_disable(unsigned int irq)
+static void max8925_irq_disable(struct irq_data *data)
{
- struct max8925_chip *chip = get_irq_chip_data(irq);
- max8925_irqs[irq - chip->irq_base].enable = 0;
+ struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
+ max8925_irqs[data->irq - chip->irq_base].enable = 0;
}
static struct irq_chip max8925_irq_chip = {
.name = "max8925",
- .bus_lock = max8925_irq_lock,
- .bus_sync_unlock = max8925_irq_sync_unlock,
- .enable = max8925_irq_enable,
- .disable = max8925_irq_disable,
+ .irq_bus_lock = max8925_irq_lock,
+ .irq_bus_sync_unlock = max8925_irq_sync_unlock,
+ .irq_enable = max8925_irq_enable,
+ .irq_disable = max8925_irq_disable,
};
static int max8925_irq_init(struct max8925_chip *chip, int irq,
diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c
index 45bfe77b639b..3903e1fbb334 100644
--- a/drivers/mfd/max8998-irq.c
+++ b/drivers/mfd/max8998-irq.c
@@ -102,16 +102,16 @@ irq_to_max8998_irq(struct max8998_dev *max8998, int irq)
return &max8998_irqs[irq - max8998->irq_base];
}
-static void max8998_irq_lock(unsigned int irq)
+static void max8998_irq_lock(struct irq_data *data)
{
- struct max8998_dev *max8998 = get_irq_chip_data(irq);
+ struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
mutex_lock(&max8998->irqlock);
}
-static void max8998_irq_sync_unlock(unsigned int irq)
+static void max8998_irq_sync_unlock(struct irq_data *data)
{
- struct max8998_dev *max8998 = get_irq_chip_data(irq);
+ struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
int i;
for (i = 0; i < ARRAY_SIZE(max8998->irq_masks_cur); i++) {
@@ -129,28 +129,30 @@ static void max8998_irq_sync_unlock(unsigned int irq)
mutex_unlock(&max8998->irqlock);
}
-static void max8998_irq_unmask(unsigned int irq)
+static void max8998_irq_unmask(struct irq_data *data)
{
- struct max8998_dev *max8998 = get_irq_chip_data(irq);
- struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, irq);
+ struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
+ struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998,
+ data->irq);
max8998->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
-static void max8998_irq_mask(unsigned int irq)
+static void max8998_irq_mask(struct irq_data *data)
{
- struct max8998_dev *max8998 = get_irq_chip_data(irq);
- struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, irq);
+ struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
+ struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998,
+ data->irq);
max8998->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
}
static struct irq_chip max8998_irq_chip = {
.name = "max8998",
- .bus_lock = max8998_irq_lock,
- .bus_sync_unlock = max8998_irq_sync_unlock,
- .mask = max8998_irq_mask,
- .unmask = max8998_irq_unmask,
+ .irq_bus_lock = max8998_irq_lock,
+ .irq_bus_sync_unlock = max8998_irq_sync_unlock,
+ .irq_mask = max8998_irq_mask,
+ .irq_unmask = max8998_irq_unmask,
};
static irqreturn_t max8998_irq_thread(int irq, void *data)
@@ -181,6 +183,13 @@ static irqreturn_t max8998_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
+int max8998_irq_resume(struct max8998_dev *max8998)
+{
+ if (max8998->irq && max8998->irq_base)
+ max8998_irq_thread(max8998->irq_base, max8998);
+ return 0;
+}
+
int max8998_irq_init(struct max8998_dev *max8998)
{
int i;
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index bb9977bebe78..bbfe86732602 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -25,6 +25,8 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max8998.h>
@@ -40,6 +42,14 @@ static struct mfd_cell max8998_devs[] = {
},
};
+static struct mfd_cell lp3974_devs[] = {
+ {
+ .name = "lp3974-pmic",
+ }, {
+ .name = "lp3974-rtc",
+ },
+};
+
int max8998_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
{
struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
@@ -135,6 +145,7 @@ static int max8998_i2c_probe(struct i2c_client *i2c,
if (pdata) {
max8998->ono = pdata->ono;
max8998->irq_base = pdata->irq_base;
+ max8998->wakeup = pdata->wakeup;
}
mutex_init(&max8998->iolock);
@@ -143,9 +154,23 @@ static int max8998_i2c_probe(struct i2c_client *i2c,
max8998_irq_init(max8998);
- ret = mfd_add_devices(max8998->dev, -1,
- max8998_devs, ARRAY_SIZE(max8998_devs),
- NULL, 0);
+ pm_runtime_set_active(max8998->dev);
+
+ switch (id->driver_data) {
+ case TYPE_LP3974:
+ ret = mfd_add_devices(max8998->dev, -1,
+ lp3974_devs, ARRAY_SIZE(lp3974_devs),
+ NULL, 0);
+ break;
+ case TYPE_MAX8998:
+ ret = mfd_add_devices(max8998->dev, -1,
+ max8998_devs, ARRAY_SIZE(max8998_devs),
+ NULL, 0);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
if (ret < 0)
goto err;
@@ -178,10 +203,113 @@ static const struct i2c_device_id max8998_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max8998_i2c_id);
+static int max8998_suspend(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
+
+ if (max8998->wakeup)
+ set_irq_wake(max8998->irq, 1);
+ return 0;
+}
+
+static int max8998_resume(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
+
+ if (max8998->wakeup)
+ set_irq_wake(max8998->irq, 0);
+ /*
+ * In LP3974, if IRQ registers are not "read & clear"
+ * when it's set during sleep, the interrupt becomes
+ * disabled.
+ */
+ return max8998_irq_resume(i2c_get_clientdata(i2c));
+}
+
+struct max8998_reg_dump {
+ u8 addr;
+ u8 val;
+};
+#define SAVE_ITEM(x) { .addr = (x), .val = 0x0, }
+struct max8998_reg_dump max8998_dump[] = {
+ SAVE_ITEM(MAX8998_REG_IRQM1),
+ SAVE_ITEM(MAX8998_REG_IRQM2),
+ SAVE_ITEM(MAX8998_REG_IRQM3),
+ SAVE_ITEM(MAX8998_REG_IRQM4),
+ SAVE_ITEM(MAX8998_REG_STATUSM1),
+ SAVE_ITEM(MAX8998_REG_STATUSM2),
+ SAVE_ITEM(MAX8998_REG_CHGR1),
+ SAVE_ITEM(MAX8998_REG_CHGR2),
+ SAVE_ITEM(MAX8998_REG_LDO_ACTIVE_DISCHARGE1),
+ SAVE_ITEM(MAX8998_REG_LDO_ACTIVE_DISCHARGE1),
+ SAVE_ITEM(MAX8998_REG_BUCK_ACTIVE_DISCHARGE3),
+ SAVE_ITEM(MAX8998_REG_ONOFF1),
+ SAVE_ITEM(MAX8998_REG_ONOFF2),
+ SAVE_ITEM(MAX8998_REG_ONOFF3),
+ SAVE_ITEM(MAX8998_REG_ONOFF4),
+ SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE1),
+ SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE2),
+ SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE3),
+ SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE4),
+ SAVE_ITEM(MAX8998_REG_BUCK2_VOLTAGE1),
+ SAVE_ITEM(MAX8998_REG_BUCK2_VOLTAGE2),
+ SAVE_ITEM(MAX8998_REG_LDO2_LDO3),
+ SAVE_ITEM(MAX8998_REG_LDO4),
+ SAVE_ITEM(MAX8998_REG_LDO5),
+ SAVE_ITEM(MAX8998_REG_LDO6),
+ SAVE_ITEM(MAX8998_REG_LDO7),
+ SAVE_ITEM(MAX8998_REG_LDO8_LDO9),
+ SAVE_ITEM(MAX8998_REG_LDO10_LDO11),
+ SAVE_ITEM(MAX8998_REG_LDO12),
+ SAVE_ITEM(MAX8998_REG_LDO13),
+ SAVE_ITEM(MAX8998_REG_LDO14),
+ SAVE_ITEM(MAX8998_REG_LDO15),
+ SAVE_ITEM(MAX8998_REG_LDO16),
+ SAVE_ITEM(MAX8998_REG_LDO17),
+ SAVE_ITEM(MAX8998_REG_BKCHR),
+ SAVE_ITEM(MAX8998_REG_LBCNFG1),
+ SAVE_ITEM(MAX8998_REG_LBCNFG2),
+};
+/* Save registers before hibernation */
+static int max8998_freeze(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(max8998_dump); i++)
+ max8998_read_reg(i2c, max8998_dump[i].addr,
+ &max8998_dump[i].val);
+
+ return 0;
+}
+
+/* Restore registers after hibernation */
+static int max8998_restore(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(max8998_dump); i++)
+ max8998_write_reg(i2c, max8998_dump[i].addr,
+ max8998_dump[i].val);
+
+ return 0;
+}
+
+const struct dev_pm_ops max8998_pm = {
+ .suspend = max8998_suspend,
+ .resume = max8998_resume,
+ .freeze = max8998_freeze,
+ .restore = max8998_restore,
+};
+
static struct i2c_driver max8998_i2c_driver = {
.driver = {
.name = "max8998",
.owner = THIS_MODULE,
+ .pm = &max8998_pm,
},
.probe = max8998_i2c_probe,
.remove = max8998_i2c_remove,
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index a2ac2ed6d64c..b9fcaf0004da 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -749,7 +749,7 @@ static int mc13xxx_probe(struct spi_device *spi)
if (ret) {
err_mask:
err_revision:
- mutex_unlock(&mc13xxx->lock);
+ mc13xxx_unlock(mc13xxx);
dev_set_drvdata(&spi->dev, NULL);
kfree(mc13xxx);
return ret;
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index ec99f681e773..d83ad0f141af 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/mfd/core.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
static int mfd_add_device(struct device *parent, int id,
@@ -82,6 +83,9 @@ static int mfd_add_device(struct device *parent, int id,
if (ret)
goto fail_res;
+ if (cell->pm_runtime_no_callbacks)
+ pm_runtime_no_callbacks(&pdev->dev);
+
kfree(res);
return 0;
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c
index f1714f93af9d..0a7df44a93c0 100644
--- a/drivers/mfd/sh_mobile_sdhi.c
+++ b/drivers/mfd/sh_mobile_sdhi.c
@@ -131,11 +131,17 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
*/
mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES;
+ /*
+ * All SDHI blocks support SDIO IRQ signalling.
+ */
+ mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
+
if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
priv->param_tx.slave_id = p->dma_slave_tx;
priv->param_rx.slave_id = p->dma_slave_rx;
priv->dma_priv.chan_priv_tx = &priv->param_tx;
priv->dma_priv.chan_priv_rx = &priv->param_rx;
+ priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
mmc_data->dma = &priv->dma_priv;
}
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index bc9275c12133..5de3a760ea1e 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -26,7 +26,7 @@
#include <linux/sm501-regs.h>
#include <linux/serial_8250.h>
-#include <asm/io.h>
+#include <linux/io.h>
struct sm501_device {
struct list_head list;
@@ -745,11 +745,8 @@ static int sm501_register_device(struct sm501_devdata *sm,
int ret;
for (ptr = 0; ptr < pdev->num_resources; ptr++) {
- printk(KERN_DEBUG "%s[%d] flags %08lx: %08llx..%08llx\n",
- pdev->name, ptr,
- pdev->resource[ptr].flags,
- (unsigned long long)pdev->resource[ptr].start,
- (unsigned long long)pdev->resource[ptr].end);
+ printk(KERN_DEBUG "%s[%d] %pR\n",
+ pdev->name, ptr, &pdev->resource[ptr]);
}
ret = platform_device_register(pdev);
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index b11487f1e1cb..3e5732b58c49 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -699,16 +699,16 @@ static irqreturn_t stmpe_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static void stmpe_irq_lock(unsigned int irq)
+static void stmpe_irq_lock(struct irq_data *data)
{
- struct stmpe *stmpe = get_irq_chip_data(irq);
+ struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
mutex_lock(&stmpe->irq_lock);
}
-static void stmpe_irq_sync_unlock(unsigned int irq)
+static void stmpe_irq_sync_unlock(struct irq_data *data)
{
- struct stmpe *stmpe = get_irq_chip_data(irq);
+ struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
struct stmpe_variant_info *variant = stmpe->variant;
int num = DIV_ROUND_UP(variant->num_irqs, 8);
int i;
@@ -727,20 +727,20 @@ static void stmpe_irq_sync_unlock(unsigned int irq)
mutex_unlock(&stmpe->irq_lock);
}
-static void stmpe_irq_mask(unsigned int irq)
+static void stmpe_irq_mask(struct irq_data *data)
{
- struct stmpe *stmpe = get_irq_chip_data(irq);
- int offset = irq - stmpe->irq_base;
+ struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
+ int offset = data->irq - stmpe->irq_base;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
stmpe->ier[regoffset] &= ~mask;
}
-static void stmpe_irq_unmask(unsigned int irq)
+static void stmpe_irq_unmask(struct irq_data *data)
{
- struct stmpe *stmpe = get_irq_chip_data(irq);
- int offset = irq - stmpe->irq_base;
+ struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
+ int offset = data->irq - stmpe->irq_base;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -749,10 +749,10 @@ static void stmpe_irq_unmask(unsigned int irq)
static struct irq_chip stmpe_irq_chip = {
.name = "stmpe",
- .bus_lock = stmpe_irq_lock,
- .bus_sync_unlock = stmpe_irq_sync_unlock,
- .mask = stmpe_irq_mask,
- .unmask = stmpe_irq_unmask,
+ .irq_bus_lock = stmpe_irq_lock,
+ .irq_bus_sync_unlock = stmpe_irq_sync_unlock,
+ .irq_mask = stmpe_irq_mask,
+ .irq_unmask = stmpe_irq_unmask,
};
static int __devinit stmpe_irq_init(struct stmpe *stmpe)
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 006c121f3f0d..9caeb4ac6ea6 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -199,37 +199,37 @@ static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc)
generic_handle_irq(irq_base + i);
}
-static void t7l66xb_irq_mask(unsigned int irq)
+static void t7l66xb_irq_mask(struct irq_data *data)
{
- struct t7l66xb *t7l66xb = get_irq_chip_data(irq);
+ struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data);
unsigned long flags;
u8 imr;
spin_lock_irqsave(&t7l66xb->lock, flags);
imr = tmio_ioread8(t7l66xb->scr + SCR_IMR);
- imr |= 1 << (irq - t7l66xb->irq_base);
+ imr |= 1 << (data->irq - t7l66xb->irq_base);
tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR);
spin_unlock_irqrestore(&t7l66xb->lock, flags);
}
-static void t7l66xb_irq_unmask(unsigned int irq)
+static void t7l66xb_irq_unmask(struct irq_data *data)
{
- struct t7l66xb *t7l66xb = get_irq_chip_data(irq);
+ struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data);
unsigned long flags;
u8 imr;
spin_lock_irqsave(&t7l66xb->lock, flags);
imr = tmio_ioread8(t7l66xb->scr + SCR_IMR);
- imr &= ~(1 << (irq - t7l66xb->irq_base));
+ imr &= ~(1 << (data->irq - t7l66xb->irq_base));
tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR);
spin_unlock_irqrestore(&t7l66xb->lock, flags);
}
static struct irq_chip t7l66xb_chip = {
- .name = "t7l66xb",
- .ack = t7l66xb_irq_mask,
- .mask = t7l66xb_irq_mask,
- .unmask = t7l66xb_irq_unmask,
+ .name = "t7l66xb",
+ .irq_ack = t7l66xb_irq_mask,
+ .irq_mask = t7l66xb_irq_mask,
+ .irq_unmask = t7l66xb_irq_unmask,
};
/*--------------------------------------------------------------------------*/
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 1ea80d8ad915..9a238633a54d 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -527,41 +527,41 @@ tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
}
}
-static void tc6393xb_irq_ack(unsigned int irq)
+static void tc6393xb_irq_ack(struct irq_data *data)
{
}
-static void tc6393xb_irq_mask(unsigned int irq)
+static void tc6393xb_irq_mask(struct irq_data *data)
{
- struct tc6393xb *tc6393xb = get_irq_chip_data(irq);
+ struct tc6393xb *tc6393xb = irq_data_get_irq_chip_data(data);
unsigned long flags;
u8 imr;
spin_lock_irqsave(&tc6393xb->lock, flags);
imr = tmio_ioread8(tc6393xb->scr + SCR_IMR);
- imr |= 1 << (irq - tc6393xb->irq_base);
+ imr |= 1 << (data->irq - tc6393xb->irq_base);
tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
}
-static void tc6393xb_irq_unmask(unsigned int irq)
+static void tc6393xb_irq_unmask(struct irq_data *data)
{
- struct tc6393xb *tc6393xb = get_irq_chip_data(irq);
+ struct tc6393xb *tc6393xb = irq_data_get_irq_chip_data(data);
unsigned long flags;
u8 imr;
spin_lock_irqsave(&tc6393xb->lock, flags);
imr = tmio_ioread8(tc6393xb->scr + SCR_IMR);
- imr &= ~(1 << (irq - tc6393xb->irq_base));
+ imr &= ~(1 << (data->irq - tc6393xb->irq_base));
tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR);
spin_unlock_irqrestore(&tc6393xb->lock, flags);
}
static struct irq_chip tc6393xb_chip = {
- .name = "tc6393xb",
- .ack = tc6393xb_irq_ack,
- .mask = tc6393xb_irq_mask,
- .unmask = tc6393xb_irq_unmask,
+ .name = "tc6393xb",
+ .irq_ack = tc6393xb_irq_ack,
+ .irq_mask = tc6393xb_irq_mask,
+ .irq_unmask = tc6393xb_irq_unmask,
};
static void tc6393xb_attach_irq(struct platform_device *dev)
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 90187fe33e04..93d5fdf020c7 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -34,7 +34,7 @@
#include <linux/i2c/tps65010.h>
-#include <asm/gpio.h>
+#include <linux/gpio.h>
/*-------------------------------------------------------------------------*/
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index b4931ab34929..e9018d1394ee 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -46,8 +46,6 @@
/* device id */
#define TPS6586X_VERSIONCRC 0xcd
-#define TPS658621A_VERSIONCRC 0x15
-#define TPS658621C_VERSIONCRC 0x2c
struct tps6586x_irq_data {
u8 mask_reg;
@@ -152,12 +150,12 @@ static inline int __tps6586x_write(struct i2c_client *client,
static inline int __tps6586x_writes(struct i2c_client *client, int reg,
int len, uint8_t *val)
{
- int ret;
+ int ret, i;
- ret = i2c_smbus_write_i2c_block_data(client, reg, len, val);
- if (ret < 0) {
- dev_err(&client->dev, "failed writings to 0x%02x\n", reg);
- return ret;
+ for (i = 0; i < len; i++) {
+ ret = __tps6586x_write(client, reg + i, *(val + i));
+ if (ret < 0)
+ return ret;
}
return 0;
@@ -325,37 +323,37 @@ static int tps6586x_remove_subdevs(struct tps6586x *tps6586x)
return device_for_each_child(tps6586x->dev, NULL, __remove_subdev);
}
-static void tps6586x_irq_lock(unsigned int irq)
+static void tps6586x_irq_lock(struct irq_data *data)
{
- struct tps6586x *tps6586x = get_irq_chip_data(irq);
+ struct tps6586x *tps6586x = irq_data_get_irq_chip_data(data);
mutex_lock(&tps6586x->irq_lock);
}
-static void tps6586x_irq_enable(unsigned int irq)
+static void tps6586x_irq_enable(struct irq_data *irq_data)
{
- struct tps6586x *tps6586x = get_irq_chip_data(irq);
- unsigned int __irq = irq - tps6586x->irq_base;
+ struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
+ unsigned int __irq = irq_data->irq - tps6586x->irq_base;
const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
tps6586x->mask_reg[data->mask_reg] &= ~data->mask_mask;
tps6586x->irq_en |= (1 << __irq);
}
-static void tps6586x_irq_disable(unsigned int irq)
+static void tps6586x_irq_disable(struct irq_data *irq_data)
{
- struct tps6586x *tps6586x = get_irq_chip_data(irq);
+ struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
- unsigned int __irq = irq - tps6586x->irq_base;
+ unsigned int __irq = irq_data->irq - tps6586x->irq_base;
const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
tps6586x->mask_reg[data->mask_reg] |= data->mask_mask;
tps6586x->irq_en &= ~(1 << __irq);
}
-static void tps6586x_irq_sync_unlock(unsigned int irq)
+static void tps6586x_irq_sync_unlock(struct irq_data *data)
{
- struct tps6586x *tps6586x = get_irq_chip_data(irq);
+ struct tps6586x *tps6586x = irq_data_get_irq_chip_data(data);
int i;
for (i = 0; i < ARRAY_SIZE(tps6586x->mask_reg); i++) {
@@ -421,10 +419,10 @@ static int __devinit tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
tps6586x->irq_base = irq_base;
tps6586x->irq_chip.name = "tps6586x";
- tps6586x->irq_chip.enable = tps6586x_irq_enable;
- tps6586x->irq_chip.disable = tps6586x_irq_disable;
- tps6586x->irq_chip.bus_lock = tps6586x_irq_lock;
- tps6586x->irq_chip.bus_sync_unlock = tps6586x_irq_sync_unlock;
+ tps6586x->irq_chip.irq_enable = tps6586x_irq_enable;
+ tps6586x->irq_chip.irq_disable = tps6586x_irq_disable;
+ tps6586x->irq_chip.irq_bus_lock = tps6586x_irq_lock;
+ tps6586x->irq_chip.irq_bus_sync_unlock = tps6586x_irq_sync_unlock;
for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) {
int __irq = i + tps6586x->irq_base;
@@ -498,11 +496,7 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
return -EIO;
}
- if ((ret != TPS658621A_VERSIONCRC) &&
- (ret != TPS658621C_VERSIONCRC)) {
- dev_err(&client->dev, "Unsupported chip ID: %x\n", ret);
- return -ENODEV;
- }
+ dev_info(&client->dev, "VERSIONCRC is %02x\n", ret);
tps6586x = kzalloc(sizeof(struct tps6586x), GFP_KERNEL);
if (tps6586x == NULL)
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 12abd5b924b3..a35fa7dcbf53 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1003,7 +1003,7 @@ static int twl_remove(struct i2c_client *client)
}
/* NOTE: this driver only handles a single twl4030/tps659x0 chip */
-static int __init
+static int __devinit
twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
int status;
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 5d3a1478004b..63a30e88908f 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -599,38 +599,38 @@ static void twl4030_sih_do_edge(struct work_struct *work)
* completion, potentially including some re-ordering, of these requests.
*/
-static void twl4030_sih_mask(unsigned irq)
+static void twl4030_sih_mask(struct irq_data *data)
{
- struct sih_agent *sih = get_irq_chip_data(irq);
+ struct sih_agent *sih = irq_data_get_irq_chip_data(data);
unsigned long flags;
spin_lock_irqsave(&sih_agent_lock, flags);
- sih->imr |= BIT(irq - sih->irq_base);
+ sih->imr |= BIT(data->irq - sih->irq_base);
sih->imr_change_pending = true;
queue_work(wq, &sih->mask_work);
spin_unlock_irqrestore(&sih_agent_lock, flags);
}
-static void twl4030_sih_unmask(unsigned irq)
+static void twl4030_sih_unmask(struct irq_data *data)
{
- struct sih_agent *sih = get_irq_chip_data(irq);
+ struct sih_agent *sih = irq_data_get_irq_chip_data(data);
unsigned long flags;
spin_lock_irqsave(&sih_agent_lock, flags);
- sih->imr &= ~BIT(irq - sih->irq_base);
+ sih->imr &= ~BIT(data->irq - sih->irq_base);
sih->imr_change_pending = true;
queue_work(wq, &sih->mask_work);
spin_unlock_irqrestore(&sih_agent_lock, flags);
}
-static int twl4030_sih_set_type(unsigned irq, unsigned trigger)
+static int twl4030_sih_set_type(struct irq_data *data, unsigned trigger)
{
- struct sih_agent *sih = get_irq_chip_data(irq);
- struct irq_desc *desc = irq_to_desc(irq);
+ struct sih_agent *sih = irq_data_get_irq_chip_data(data);
+ struct irq_desc *desc = irq_to_desc(data->irq);
unsigned long flags;
if (!desc) {
- pr_err("twl4030: Invalid IRQ: %d\n", irq);
+ pr_err("twl4030: Invalid IRQ: %d\n", data->irq);
return -EINVAL;
}
@@ -641,7 +641,7 @@ static int twl4030_sih_set_type(unsigned irq, unsigned trigger)
if ((desc->status & IRQ_TYPE_SENSE_MASK) != trigger) {
desc->status &= ~IRQ_TYPE_SENSE_MASK;
desc->status |= trigger;
- sih->edge_change |= BIT(irq - sih->irq_base);
+ sih->edge_change |= BIT(data->irq - sih->irq_base);
queue_work(wq, &sih->edge_work);
}
spin_unlock_irqrestore(&sih_agent_lock, flags);
@@ -650,9 +650,9 @@ static int twl4030_sih_set_type(unsigned irq, unsigned trigger)
static struct irq_chip twl4030_sih_irq_chip = {
.name = "twl4030",
- .mask = twl4030_sih_mask,
- .unmask = twl4030_sih_unmask,
- .set_type = twl4030_sih_set_type,
+ .irq_mask = twl4030_sih_mask,
+ .irq_unmask = twl4030_sih_unmask,
+ .irq_set_type = twl4030_sih_set_type,
};
/*----------------------------------------------------------------------*/
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 06c8955907e9..4082ed73613f 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -332,7 +332,7 @@ int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
*/
twl6030_irq_chip = dummy_irq_chip;
twl6030_irq_chip.name = "twl6030";
- twl6030_irq_chip.set_type = NULL;
+ twl6030_irq_chip.irq_set_type = NULL;
for (i = irq_base; i < irq_end; i++) {
set_irq_chip_and_handler(i, &twl6030_irq_chip,
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 000cb414a78a..92b85e28a15e 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -385,12 +385,18 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
idev->close = ucb1x00_ts_close;
__set_bit(EV_ABS, idev->evbit);
- __set_bit(ABS_X, idev->absbit);
- __set_bit(ABS_Y, idev->absbit);
- __set_bit(ABS_PRESSURE, idev->absbit);
input_set_drvdata(idev, ts);
+ ucb1x00_adc_enable(ts->ucb);
+ ts->x_res = ucb1x00_ts_read_xres(ts);
+ ts->y_res = ucb1x00_ts_read_yres(ts);
+ ucb1x00_adc_disable(ts->ucb);
+
+ input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0);
+ input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0);
+ input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0);
+
err = input_register_device(idev);
if (err)
goto fail;
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
index ebb059765edd..348052aa5dbf 100644
--- a/drivers/mfd/vx855.c
+++ b/drivers/mfd/vx855.c
@@ -112,7 +112,7 @@ out:
return ret;
}
-static void vx855_remove(struct pci_dev *pdev)
+static void __devexit vx855_remove(struct pci_dev *pdev)
{
mfd_remove_devices(&pdev->dev);
pci_disable_device(pdev);
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 76cadcf3b1fe..3fe9a58fe6c7 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -1541,6 +1541,12 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
dev_info(wm831x->dev, "WM8325 revision %c\n", 'A' + rev);
break;
+ case WM8326:
+ parent = WM8326;
+ wm831x->num_gpio = 12;
+ dev_info(wm831x->dev, "WM8326 revision %c\n", 'A' + rev);
+ break;
+
default:
dev_err(wm831x->dev, "Unknown WM831x device %04x\n", ret);
ret = -EINVAL;
@@ -1610,18 +1616,9 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
break;
case WM8320:
- ret = mfd_add_devices(wm831x->dev, -1,
- wm8320_devs, ARRAY_SIZE(wm8320_devs),
- NULL, 0);
- break;
-
case WM8321:
- ret = mfd_add_devices(wm831x->dev, -1,
- wm8320_devs, ARRAY_SIZE(wm8320_devs),
- NULL, 0);
- break;
-
case WM8325:
+ case WM8326:
ret = mfd_add_devices(wm831x->dev, -1,
wm8320_devs, ARRAY_SIZE(wm8320_devs),
NULL, wm831x->irq_base);
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index 156b19859e81..3853fa8e7cc2 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -94,9 +94,9 @@ static int wm831x_i2c_remove(struct i2c_client *i2c)
return 0;
}
-static int wm831x_i2c_suspend(struct i2c_client *i2c, pm_message_t mesg)
+static int wm831x_i2c_suspend(struct device *dev)
{
- struct wm831x *wm831x = i2c_get_clientdata(i2c);
+ struct wm831x *wm831x = dev_get_drvdata(dev);
return wm831x_device_suspend(wm831x);
}
@@ -108,19 +108,23 @@ static const struct i2c_device_id wm831x_i2c_id[] = {
{ "wm8320", WM8320 },
{ "wm8321", WM8321 },
{ "wm8325", WM8325 },
+ { "wm8326", WM8326 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm831x_i2c_id);
+static const struct dev_pm_ops wm831x_pm_ops = {
+ .suspend = wm831x_i2c_suspend,
+};
static struct i2c_driver wm831x_i2c_driver = {
.driver = {
- .name = "wm831x",
- .owner = THIS_MODULE,
+ .name = "wm831x",
+ .owner = THIS_MODULE,
+ .pm = &wm831x_pm_ops,
},
.probe = wm831x_i2c_probe,
.remove = wm831x_i2c_remove,
- .suspend = wm831x_i2c_suspend,
.id_table = wm831x_i2c_id,
};
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 294183b6260b..f7192d438aab 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -345,16 +345,16 @@ static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x,
return &wm831x_irqs[irq - wm831x->irq_base];
}
-static void wm831x_irq_lock(unsigned int irq)
+static void wm831x_irq_lock(struct irq_data *data)
{
- struct wm831x *wm831x = get_irq_chip_data(irq);
+ struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
mutex_lock(&wm831x->irq_lock);
}
-static void wm831x_irq_sync_unlock(unsigned int irq)
+static void wm831x_irq_sync_unlock(struct irq_data *data)
{
- struct wm831x *wm831x = get_irq_chip_data(irq);
+ struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
int i;
for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
@@ -371,28 +371,30 @@ static void wm831x_irq_sync_unlock(unsigned int irq)
mutex_unlock(&wm831x->irq_lock);
}
-static void wm831x_irq_unmask(unsigned int irq)
+static void wm831x_irq_unmask(struct irq_data *data)
{
- struct wm831x *wm831x = get_irq_chip_data(irq);
- struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq);
+ struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
+ struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
+ data->irq);
wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
-static void wm831x_irq_mask(unsigned int irq)
+static void wm831x_irq_mask(struct irq_data *data)
{
- struct wm831x *wm831x = get_irq_chip_data(irq);
- struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq);
+ struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
+ struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
+ data->irq);
wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
}
-static int wm831x_irq_set_type(unsigned int irq, unsigned int type)
+static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
{
- struct wm831x *wm831x = get_irq_chip_data(irq);
- int val;
+ struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
+ int val, irq;
- irq = irq - wm831x->irq_base;
+ irq = data->irq - wm831x->irq_base;
if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
/* Ignore internal-only IRQs */
@@ -421,12 +423,12 @@ static int wm831x_irq_set_type(unsigned int irq, unsigned int type)
}
static struct irq_chip wm831x_irq_chip = {
- .name = "wm831x",
- .bus_lock = wm831x_irq_lock,
- .bus_sync_unlock = wm831x_irq_sync_unlock,
- .mask = wm831x_irq_mask,
- .unmask = wm831x_irq_unmask,
- .set_type = wm831x_irq_set_type,
+ .name = "wm831x",
+ .irq_bus_lock = wm831x_irq_lock,
+ .irq_bus_sync_unlock = wm831x_irq_sync_unlock,
+ .irq_mask = wm831x_irq_mask,
+ .irq_unmask = wm831x_irq_unmask,
+ .irq_set_type = wm831x_irq_set_type,
};
/* The processing of the primary interrupt occurs in a thread so that
@@ -515,6 +517,17 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
return 0;
}
+ /* Try to flag /IRQ as a wake source; there are a number of
+ * unconditional wake sources in the PMIC so this isn't
+ * conditional but we don't actually care *too* much if it
+ * fails.
+ */
+ ret = enable_irq_wake(irq);
+ if (ret != 0) {
+ dev_warn(wm831x->dev, "Can't enable IRQ as wake source: %d\n",
+ ret);
+ }
+
wm831x->irq = irq;
wm831x->irq_base = pdata->irq_base;
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index 2789b151b0f9..0a8f772be88c 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -81,6 +81,8 @@ static int __devinit wm831x_spi_probe(struct spi_device *spi)
type = WM8321;
else if (strcmp(spi->modalias, "wm8325") == 0)
type = WM8325;
+ else if (strcmp(spi->modalias, "wm8326") == 0)
+ type = WM8326;
else {
dev_err(&spi->dev, "Unknown device type\n");
return -EINVAL;
@@ -184,6 +186,17 @@ static struct spi_driver wm8325_spi_driver = {
.suspend = wm831x_spi_suspend,
};
+static struct spi_driver wm8326_spi_driver = {
+ .driver = {
+ .name = "wm8326",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = wm831x_spi_probe,
+ .remove = __devexit_p(wm831x_spi_remove),
+ .suspend = wm831x_spi_suspend,
+};
+
static int __init wm831x_spi_init(void)
{
int ret;
@@ -212,12 +225,17 @@ static int __init wm831x_spi_init(void)
if (ret != 0)
pr_err("Failed to register WM8325 SPI driver: %d\n", ret);
+ ret = spi_register_driver(&wm8326_spi_driver);
+ if (ret != 0)
+ pr_err("Failed to register WM8326 SPI driver: %d\n", ret);
+
return 0;
}
subsys_initcall(wm831x_spi_init);
static void __exit wm831x_spi_exit(void)
{
+ spi_unregister_driver(&wm8326_spi_driver);
spi_unregister_driver(&wm8325_spi_driver);
spi_unregister_driver(&wm8321_spi_driver);
spi_unregister_driver(&wm8320_spi_driver);
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index f56c9adf9493..5839966ebd85 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -417,16 +417,16 @@ static irqreturn_t wm8350_irq(int irq, void *irq_data)
return IRQ_HANDLED;
}
-static void wm8350_irq_lock(unsigned int irq)
+static void wm8350_irq_lock(struct irq_data *data)
{
- struct wm8350 *wm8350 = get_irq_chip_data(irq);
+ struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data);
mutex_lock(&wm8350->irq_lock);
}
-static void wm8350_irq_sync_unlock(unsigned int irq)
+static void wm8350_irq_sync_unlock(struct irq_data *data)
{
- struct wm8350 *wm8350 = get_irq_chip_data(irq);
+ struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data);
int i;
for (i = 0; i < ARRAY_SIZE(wm8350->irq_masks); i++) {
@@ -442,28 +442,30 @@ static void wm8350_irq_sync_unlock(unsigned int irq)
mutex_unlock(&wm8350->irq_lock);
}
-static void wm8350_irq_enable(unsigned int irq)
+static void wm8350_irq_enable(struct irq_data *data)
{
- struct wm8350 *wm8350 = get_irq_chip_data(irq);
- struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350, irq);
+ struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data);
+ struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350,
+ data->irq);
wm8350->irq_masks[irq_data->reg] &= ~irq_data->mask;
}
-static void wm8350_irq_disable(unsigned int irq)
+static void wm8350_irq_disable(struct irq_data *data)
{
- struct wm8350 *wm8350 = get_irq_chip_data(irq);
- struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350, irq);
+ struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data);
+ struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350,
+ data->irq);
wm8350->irq_masks[irq_data->reg] |= irq_data->mask;
}
static struct irq_chip wm8350_irq_chip = {
- .name = "wm8350",
- .bus_lock = wm8350_irq_lock,
- .bus_sync_unlock = wm8350_irq_sync_unlock,
- .disable = wm8350_irq_disable,
- .enable = wm8350_irq_enable,
+ .name = "wm8350",
+ .irq_bus_lock = wm8350_irq_lock,
+ .irq_bus_sync_unlock = wm8350_irq_sync_unlock,
+ .irq_disable = wm8350_irq_disable,
+ .irq_enable = wm8350_irq_enable,
};
int wm8350_irq_init(struct wm8350 *wm8350, int irq,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index b3b2aaf89dbe..f4016a075fd6 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -18,6 +18,7 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/mfd/core.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/machine.h>
@@ -169,8 +170,16 @@ out:
EXPORT_SYMBOL_GPL(wm8994_set_bits);
static struct mfd_cell wm8994_regulator_devs[] = {
- { .name = "wm8994-ldo", .id = 1 },
- { .name = "wm8994-ldo", .id = 2 },
+ {
+ .name = "wm8994-ldo",
+ .id = 1,
+ .pm_runtime_no_callbacks = true,
+ },
+ {
+ .name = "wm8994-ldo",
+ .id = 2,
+ .pm_runtime_no_callbacks = true,
+ },
};
static struct resource wm8994_codec_resources[] = {
@@ -200,6 +209,7 @@ static struct mfd_cell wm8994_devs[] = {
.name = "wm8994-gpio",
.num_resources = ARRAY_SIZE(wm8994_gpio_resources),
.resources = wm8994_gpio_resources,
+ .pm_runtime_no_callbacks = true,
},
};
@@ -218,12 +228,34 @@ static const char *wm8994_main_supplies[] = {
"SPKVDD2",
};
+static const char *wm8958_main_supplies[] = {
+ "DBVDD1",
+ "DBVDD2",
+ "DBVDD3",
+ "DCVDD",
+ "AVDD1",
+ "AVDD2",
+ "CPVDD",
+ "SPKVDD1",
+ "SPKVDD2",
+};
+
#ifdef CONFIG_PM
-static int wm8994_device_suspend(struct device *dev)
+static int wm8994_suspend(struct device *dev)
{
struct wm8994 *wm8994 = dev_get_drvdata(dev);
int ret;
+ /* Don't actually go through with the suspend if the CODEC is
+ * still active (eg, for audio passthrough from CP. */
+ ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read power status: %d\n", ret);
+ } else if (ret & WM8994_VMID_SEL_MASK) {
+ dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+ return 0;
+ }
+
/* GPIO configuration state is saved here since we may be configuring
* the GPIO alternate functions even if we're not using the gpiolib
* driver for them.
@@ -239,7 +271,9 @@ static int wm8994_device_suspend(struct device *dev)
if (ret < 0)
dev_err(dev, "Failed to save LDO registers: %d\n", ret);
- ret = regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
+ wm8994->suspended = true;
+
+ ret = regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
dev_err(dev, "Failed to disable supplies: %d\n", ret);
@@ -249,12 +283,16 @@ static int wm8994_device_suspend(struct device *dev)
return 0;
}
-static int wm8994_device_resume(struct device *dev)
+static int wm8994_resume(struct device *dev)
{
struct wm8994 *wm8994 = dev_get_drvdata(dev);
int ret;
- ret = regulator_bulk_enable(ARRAY_SIZE(wm8994_main_supplies),
+ /* We may have lied to the PM core about suspending */
+ if (!wm8994->suspended)
+ return 0;
+
+ ret = regulator_bulk_enable(wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
dev_err(dev, "Failed to enable supplies: %d\n", ret);
@@ -276,6 +314,8 @@ static int wm8994_device_resume(struct device *dev)
if (ret < 0)
dev_err(dev, "Failed to restore GPIO registers: %d\n", ret);
+ wm8994->suspended = false;
+
return 0;
}
#endif
@@ -305,9 +345,10 @@ static int wm8994_ldo_in_use(struct wm8994_pdata *pdata, int ldo)
/*
* Instantiate the generic non-control parts of the device.
*/
-static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq)
+static int wm8994_device_init(struct wm8994 *wm8994, int irq)
{
struct wm8994_pdata *pdata = wm8994->dev->platform_data;
+ const char *devname;
int ret, i;
mutex_init(&wm8994->io_lock);
@@ -323,25 +364,48 @@ static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq)
goto err;
}
+ switch (wm8994->type) {
+ case WM8994:
+ wm8994->num_supplies = ARRAY_SIZE(wm8994_main_supplies);
+ break;
+ case WM8958:
+ wm8994->num_supplies = ARRAY_SIZE(wm8958_main_supplies);
+ break;
+ default:
+ BUG();
+ return -EINVAL;
+ }
+
wm8994->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
- ARRAY_SIZE(wm8994_main_supplies),
+ wm8994->num_supplies,
GFP_KERNEL);
if (!wm8994->supplies) {
ret = -ENOMEM;
goto err;
}
- for (i = 0; i < ARRAY_SIZE(wm8994_main_supplies); i++)
- wm8994->supplies[i].supply = wm8994_main_supplies[i];
-
- ret = regulator_bulk_get(wm8994->dev, ARRAY_SIZE(wm8994_main_supplies),
+ switch (wm8994->type) {
+ case WM8994:
+ for (i = 0; i < ARRAY_SIZE(wm8994_main_supplies); i++)
+ wm8994->supplies[i].supply = wm8994_main_supplies[i];
+ break;
+ case WM8958:
+ for (i = 0; i < ARRAY_SIZE(wm8958_main_supplies); i++)
+ wm8994->supplies[i].supply = wm8958_main_supplies[i];
+ break;
+ default:
+ BUG();
+ return -EINVAL;
+ }
+
+ ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret);
goto err_supplies;
}
- ret = regulator_bulk_enable(ARRAY_SIZE(wm8994_main_supplies),
+ ret = regulator_bulk_enable(wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret);
@@ -353,7 +417,22 @@ static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq)
dev_err(wm8994->dev, "Failed to read ID register\n");
goto err_enable;
}
- if (ret != 0x8994) {
+ switch (ret) {
+ case 0x8994:
+ devname = "WM8994";
+ if (wm8994->type != WM8994)
+ dev_warn(wm8994->dev, "Device registered as type %d\n",
+ wm8994->type);
+ wm8994->type = WM8994;
+ break;
+ case 0x8958:
+ devname = "WM8958";
+ if (wm8994->type != WM8958)
+ dev_warn(wm8994->dev, "Device registered as type %d\n",
+ wm8994->type);
+ wm8994->type = WM8958;
+ break;
+ default:
dev_err(wm8994->dev, "Device is not a WM8994, ID is %x\n",
ret);
ret = -EINVAL;
@@ -370,14 +449,16 @@ static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq)
switch (ret) {
case 0:
case 1:
- dev_warn(wm8994->dev, "revision %c not fully supported\n",
- 'A' + ret);
+ if (wm8994->type == WM8994)
+ dev_warn(wm8994->dev,
+ "revision %c not fully supported\n",
+ 'A' + ret);
break;
default:
- dev_info(wm8994->dev, "revision %c\n", 'A' + ret);
break;
}
+ dev_info(wm8994->dev, "%s revision %c\n", devname, 'A' + ret);
if (pdata) {
wm8994->irq_base = pdata->irq_base;
@@ -418,15 +499,18 @@ static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq)
goto err_irq;
}
+ pm_runtime_enable(wm8994->dev);
+ pm_runtime_resume(wm8994->dev);
+
return 0;
err_irq:
wm8994_irq_exit(wm8994);
err_enable:
- regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
+ regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
err_get:
- regulator_bulk_free(ARRAY_SIZE(wm8994_main_supplies), wm8994->supplies);
+ regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
err_supplies:
kfree(wm8994->supplies);
err:
@@ -437,11 +521,12 @@ err:
static void wm8994_device_exit(struct wm8994 *wm8994)
{
+ pm_runtime_disable(wm8994->dev);
mfd_remove_devices(wm8994->dev);
wm8994_irq_exit(wm8994);
- regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
+ regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
- regulator_bulk_free(ARRAY_SIZE(wm8994_main_supplies), wm8994->supplies);
+ regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
kfree(wm8994->supplies);
kfree(wm8994);
}
@@ -506,8 +591,9 @@ static int wm8994_i2c_probe(struct i2c_client *i2c,
wm8994->read_dev = wm8994_i2c_read_device;
wm8994->write_dev = wm8994_i2c_write_device;
wm8994->irq = i2c->irq;
+ wm8994->type = id->driver_data;
- return wm8994_device_init(wm8994, id->driver_data, i2c->irq);
+ return wm8994_device_init(wm8994, i2c->irq);
}
static int wm8994_i2c_remove(struct i2c_client *i2c)
@@ -519,36 +605,23 @@ static int wm8994_i2c_remove(struct i2c_client *i2c)
return 0;
}
-#ifdef CONFIG_PM
-static int wm8994_i2c_suspend(struct i2c_client *i2c, pm_message_t state)
-{
- return wm8994_device_suspend(&i2c->dev);
-}
-
-static int wm8994_i2c_resume(struct i2c_client *i2c)
-{
- return wm8994_device_resume(&i2c->dev);
-}
-#else
-#define wm8994_i2c_suspend NULL
-#define wm8994_i2c_resume NULL
-#endif
-
static const struct i2c_device_id wm8994_i2c_id[] = {
- { "wm8994", 0 },
+ { "wm8994", WM8994 },
+ { "wm8958", WM8958 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8994_i2c_id);
+UNIVERSAL_DEV_PM_OPS(wm8994_pm_ops, wm8994_suspend, wm8994_resume, NULL);
+
static struct i2c_driver wm8994_i2c_driver = {
.driver = {
- .name = "wm8994",
- .owner = THIS_MODULE,
+ .name = "wm8994",
+ .owner = THIS_MODULE,
+ .pm = &wm8994_pm_ops,
},
.probe = wm8994_i2c_probe,
.remove = wm8994_i2c_remove,
- .suspend = wm8994_i2c_suspend,
- .resume = wm8994_i2c_resume,
.id_table = wm8994_i2c_id,
};
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 8400eb1ee5db..29e8faf9c01c 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -156,16 +156,16 @@ static inline struct wm8994_irq_data *irq_to_wm8994_irq(struct wm8994 *wm8994,
return &wm8994_irqs[irq - wm8994->irq_base];
}
-static void wm8994_irq_lock(unsigned int irq)
+static void wm8994_irq_lock(struct irq_data *data)
{
- struct wm8994 *wm8994 = get_irq_chip_data(irq);
+ struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
mutex_lock(&wm8994->irq_lock);
}
-static void wm8994_irq_sync_unlock(unsigned int irq)
+static void wm8994_irq_sync_unlock(struct irq_data *data)
{
- struct wm8994 *wm8994 = get_irq_chip_data(irq);
+ struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
int i;
for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
@@ -182,28 +182,30 @@ static void wm8994_irq_sync_unlock(unsigned int irq)
mutex_unlock(&wm8994->irq_lock);
}
-static void wm8994_irq_unmask(unsigned int irq)
+static void wm8994_irq_unmask(struct irq_data *data)
{
- struct wm8994 *wm8994 = get_irq_chip_data(irq);
- struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, irq);
+ struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
+ struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
+ data->irq);
wm8994->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
-static void wm8994_irq_mask(unsigned int irq)
+static void wm8994_irq_mask(struct irq_data *data)
{
- struct wm8994 *wm8994 = get_irq_chip_data(irq);
- struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, irq);
+ struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
+ struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
+ data->irq);
wm8994->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
}
static struct irq_chip wm8994_irq_chip = {
- .name = "wm8994",
- .bus_lock = wm8994_irq_lock,
- .bus_sync_unlock = wm8994_irq_sync_unlock,
- .mask = wm8994_irq_mask,
- .unmask = wm8994_irq_unmask,
+ .name = "wm8994",
+ .irq_bus_lock = wm8994_irq_lock,
+ .irq_bus_sync_unlock = wm8994_irq_sync_unlock,
+ .irq_mask = wm8994_irq_mask,
+ .irq_unmask = wm8994_irq_unmask,
};
/* The processing of the primary interrupt occurs in a thread so that
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 4d073f1e4502..cc8e49db45fe 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -64,7 +64,7 @@ config ATMEL_PWM
config AB8500_PWM
bool "AB8500 PWM support"
- depends on AB8500_CORE
+ depends on AB8500_CORE && ARCH_U8500
select HAVE_PWM
help
This driver exports functions to enable/disble/config/free Pulse
@@ -402,7 +402,7 @@ config TI_DAC7512
DAC7512 16-bit digital-to-analog converter.
This driver can also be built as a module. If so, the module
- will be calles ti_dac7512.
+ will be called ti_dac7512.
config VMWARE_BALLOON
tristate "VMware Balloon Driver"
diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c
index 9e3879ef58f2..fe8616a8d287 100644
--- a/drivers/misc/arm-charlcd.c
+++ b/drivers/misc/arm-charlcd.c
@@ -313,7 +313,7 @@ static int __init charlcd_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&lcd->init_work, charlcd_init_work);
schedule_delayed_work(&lcd->init_work, 0);
- dev_info(&pdev->dev, "initalized ARM character LCD at %08x\n",
+ dev_info(&pdev->dev, "initialized ARM character LCD at %08x\n",
lcd->phybase);
return 0;
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 63ee4c1a5315..b6e1c9a6679e 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -449,6 +449,7 @@ static const struct i2c_device_id bmp085_id[] = {
{ "bmp085", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, bmp085_id);
static struct i2c_driver bmp085_driver = {
.driver = {
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
index 6f6218061b0d..d02d302ee6d5 100644
--- a/drivers/misc/cs5535-mfgpt.c
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -16,12 +16,11 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <linux/cs5535.h>
#include <linux/slab.h>
#define DRV_NAME "cs5535-mfgpt"
-#define MFGPT_BAR 2
static int mfgpt_reset_timers;
module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644);
@@ -37,7 +36,7 @@ static struct cs5535_mfgpt_chip {
DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS);
resource_size_t base;
- struct pci_dev *pdev;
+ struct platform_device *pdev;
spinlock_t lock;
int initialized;
} cs5535_mfgpt_chip;
@@ -290,10 +289,10 @@ static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt)
return timers;
}
-static int __init cs5535_mfgpt_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_id)
+static int __devinit cs5535_mfgpt_probe(struct platform_device *pdev)
{
- int err, t;
+ struct resource *res;
+ int err = -EIO, t;
/* There are two ways to get the MFGPT base address; one is by
* fetching it from MSR_LBAR_MFGPT, the other is by reading the
@@ -302,29 +301,27 @@ static int __init cs5535_mfgpt_probe(struct pci_dev *pdev,
* it turns out to be unreliable in the face of crappy BIOSes, we
* can always go back to using MSRs.. */
- err = pci_enable_device_io(pdev);
- if (err) {
- dev_err(&pdev->dev, "can't enable device IO\n");
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't fetch device resource info\n");
goto done;
}
- err = pci_request_region(pdev, MFGPT_BAR, DRV_NAME);
- if (err) {
- dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", MFGPT_BAR);
+ if (!request_region(res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "can't request region\n");
goto done;
}
/* set up the driver-specific struct */
- cs5535_mfgpt_chip.base = pci_resource_start(pdev, MFGPT_BAR);
+ cs5535_mfgpt_chip.base = res->start;
cs5535_mfgpt_chip.pdev = pdev;
spin_lock_init(&cs5535_mfgpt_chip.lock);
- dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", MFGPT_BAR,
- (unsigned long long) cs5535_mfgpt_chip.base);
+ dev_info(&pdev->dev, "reserved resource region %pR\n", res);
/* detect the available timers */
t = scan_timers(&cs5535_mfgpt_chip);
- dev_info(&pdev->dev, DRV_NAME ": %d MFGPT timers available\n", t);
+ dev_info(&pdev->dev, "%d MFGPT timers available\n", t);
cs5535_mfgpt_chip.initialized = 1;
return 0;
@@ -332,47 +329,18 @@ done:
return err;
}
-static struct pci_device_id cs5535_mfgpt_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
- { 0, },
+static struct platform_driver cs5535_mfgpt_drv = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = cs5535_mfgpt_probe,
};
-MODULE_DEVICE_TABLE(pci, cs5535_mfgpt_pci_tbl);
-/*
- * Just like with the cs5535-gpio driver, we can't use the standard PCI driver
- * registration stuff. It only allows only one driver to bind to each PCI
- * device, and we want the GPIO and MFGPT drivers to be able to share a PCI
- * device. Instead, we manually scan for the PCI device, request a single
- * region, and keep track of the devices that we're using.
- */
-
-static int __init cs5535_mfgpt_scan_pci(void)
-{
- struct pci_dev *pdev;
- int err = -ENODEV;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(cs5535_mfgpt_pci_tbl); i++) {
- pdev = pci_get_device(cs5535_mfgpt_pci_tbl[i].vendor,
- cs5535_mfgpt_pci_tbl[i].device, NULL);
- if (pdev) {
- err = cs5535_mfgpt_probe(pdev,
- &cs5535_mfgpt_pci_tbl[i]);
- if (err)
- pci_dev_put(pdev);
-
- /* we only support a single CS5535/6 southbridge */
- break;
- }
- }
-
- return err;
-}
static int __init cs5535_mfgpt_init(void)
{
- return cs5535_mfgpt_scan_pci();
+ return platform_driver_register(&cs5535_mfgpt_drv);
}
module_init(cs5535_mfgpt_init);
@@ -380,3 +348,4 @@ module_init(cs5535_mfgpt_init);
MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 559b0b3c16c3..ab1ad41786d1 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -20,6 +20,7 @@
#include <linux/log2.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
+#include <linux/of.h>
#include <linux/i2c.h>
#include <linux/i2c/at24.h>
@@ -457,6 +458,27 @@ static ssize_t at24_macc_write(struct memory_accessor *macc, const char *buf,
/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_OF
+static void at24_get_ofdata(struct i2c_client *client,
+ struct at24_platform_data *chip)
+{
+ const __be32 *val;
+ struct device_node *node = client->dev.of_node;
+
+ if (node) {
+ if (of_get_property(node, "read-only", NULL))
+ chip->flags |= AT24_FLAG_READONLY;
+ val = of_get_property(node, "pagesize", NULL);
+ if (val)
+ chip->page_size = be32_to_cpup(val);
+ }
+}
+#else
+static void at24_get_ofdata(struct i2c_client *client,
+ struct at24_platform_data *chip)
+{ }
+#endif /* CONFIG_OF */
+
static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct at24_platform_data chip;
@@ -485,6 +507,9 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
*/
chip.page_size = 1;
+ /* update chipdata if OF is present */
+ at24_get_ofdata(client, &chip);
+
chip.setup = NULL;
chip.context = NULL;
}
@@ -492,6 +517,11 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (!is_power_of_2(chip.byte_len))
dev_warn(&client->dev,
"byte_len looks suspicious (no power of 2)!\n");
+ if (!chip.page_size) {
+ dev_err(&client->dev, "page_size must not be 0!\n");
+ err = -EINVAL;
+ goto err_out;
+ }
if (!is_power_of_2(chip.page_size))
dev_warn(&client->dev,
"page_size looks suspicious (no power of 2)!\n");
@@ -597,19 +627,15 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
i2c_set_clientdata(client, at24);
- dev_info(&client->dev, "%zu byte %s EEPROM %s\n",
+ dev_info(&client->dev, "%zu byte %s EEPROM, %s, %u bytes/write\n",
at24->bin.size, client->name,
- writable ? "(writable)" : "(read-only)");
+ writable ? "writable" : "read-only", at24->write_max);
if (use_smbus == I2C_SMBUS_WORD_DATA ||
use_smbus == I2C_SMBUS_BYTE_DATA) {
dev_notice(&client->dev, "Falling back to %s reads, "
"performance will suffer\n", use_smbus ==
I2C_SMBUS_WORD_DATA ? "word" : "byte");
}
- dev_dbg(&client->dev,
- "page_size %d, num_addresses %d, write_max %d, use_smbus %d\n",
- chip.page_size, num_addresses,
- at24->write_max, use_smbus);
/* export data to kernel code */
if (chip.setup)
@@ -660,6 +686,11 @@ static struct i2c_driver at24_driver = {
static int __init at24_init(void)
{
+ if (!io_limit) {
+ pr_err("at24: io_limit must not be 0!\n");
+ return -EINVAL;
+ }
+
io_limit = rounddown_pow_of_two(io_limit);
return i2c_add_driver(&at24_driver);
}
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 5f6852dff40b..44d4475a09dd 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -329,7 +329,7 @@ static int __init tifm_init(void)
{
int rc;
- workqueue = create_freezeable_workqueue("tifm");
+ workqueue = create_freezable_workqueue("tifm");
if (!workqueue)
return -ENOMEM;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 2a1e804a71aa..6df5a55da110 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -45,7 +45,7 @@
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
-MODULE_VERSION("1.2.1.1-k");
+MODULE_VERSION("1.2.1.2-k");
MODULE_ALIAS("dmi:*:svnVMware*:*");
MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");
@@ -315,7 +315,8 @@ static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
* fear that guest will need it. Host may reject some pages, we need to
* check the return value and maybe submit a different page.
*/
-static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn)
+static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
+ unsigned int *hv_status)
{
unsigned long status, dummy;
u32 pfn32;
@@ -326,7 +327,7 @@ static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn)
STATS_INC(b->stats.lock);
- status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
+ *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
if (vmballoon_check_status(b, status))
return true;
@@ -410,6 +411,7 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
{
struct page *page;
gfp_t flags;
+ unsigned int hv_status;
bool locked = false;
do {
@@ -429,11 +431,12 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
}
/* inform monitor */
- locked = vmballoon_send_lock_page(b, page_to_pfn(page));
+ locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
if (!locked) {
STATS_INC(b->stats.refused_alloc);
- if (b->reset_required) {
+ if (hv_status == VMW_BALLOON_ERROR_RESET ||
+ hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
__free_page(page);
return -EIO;
}
@@ -782,7 +785,7 @@ static int __init vmballoon_init(void)
if (x86_hyper != &x86_hyper_vmware)
return -ENODEV;
- vmballoon_wq = create_freezeable_workqueue("vmmemctl");
+ vmballoon_wq = create_freezable_workqueue("vmmemctl");
if (!vmballoon_wq) {
pr_err("failed to create workqueue\n");
return -ENOMEM;
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 57e4416b9ef0..2a876c4099cd 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -16,6 +16,7 @@ config MMC_BLOCK
config MMC_BLOCK_MINORS
int "Number of minors per block device"
+ depends on MMC_BLOCK
range 4 256
default 8
help
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 217f82037fc1..bfc8a8ae55df 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -257,7 +257,7 @@ static u32 get_card_status(struct mmc_card *card, struct request *req)
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err)
- printk(KERN_ERR "%s: error %d sending status comand",
+ printk(KERN_ERR "%s: error %d sending status command",
req->rq_disk->disk_name, err);
return cmd.resp[0];
}
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index bb22ffd76ef8..ef103871517f 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -16,3 +16,14 @@ config MMC_UNSAFE_RESUME
This option sets a default which can be overridden by the
module parameter "removable=0" or "removable=1".
+
+config MMC_CLKGATE
+ bool "MMC host clock gating (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ This will attempt to aggressively gate the clock to the MMC card.
+ This is done to save power due to gating off the logic and bus
+ noise when the MMC card is not in use. Your host driver has to
+ support handling this in order for it to be of any use.
+
+ If unsure, say N.
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index af8dc6a2a317..63667a8f140c 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -303,14 +303,14 @@ int mmc_add_card(struct mmc_card *card)
type, card->rca);
}
- ret = device_add(&card->dev);
- if (ret)
- return ret;
-
#ifdef CONFIG_DEBUG_FS
mmc_add_card_debugfs(card);
#endif
+ ret = device_add(&card->dev);
+ if (ret)
+ return ret;
+
mmc_card_set_present(card);
return 0;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index a3a780faf85a..6625c057be05 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -22,6 +22,7 @@
#include <linux/scatterlist.h>
#include <linux/log2.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -130,6 +131,8 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
if (mrq->done)
mrq->done(mrq);
+
+ mmc_host_clk_gate(host);
}
}
@@ -190,6 +193,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
mrq->stop->mrq = mrq;
}
}
+ mmc_host_clk_ungate(host);
host->ops->request(host, mrq);
}
@@ -295,8 +299,9 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
unsigned int timeout_us, limit_us;
timeout_us = data->timeout_ns / 1000;
- timeout_us += data->timeout_clks * 1000 /
- (card->host->ios.clock / 1000);
+ if (mmc_host_clk_rate(card->host))
+ timeout_us += data->timeout_clks * 1000 /
+ (mmc_host_clk_rate(card->host) / 1000);
if (data->flags & MMC_DATA_WRITE)
/*
@@ -614,6 +619,8 @@ static inline void mmc_set_ios(struct mmc_host *host)
ios->power_mode, ios->chip_select, ios->vdd,
ios->bus_width, ios->timing);
+ if (ios->clock > 0)
+ mmc_set_ungated(host);
host->ops->set_ios(host, ios);
}
@@ -641,6 +648,61 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
mmc_set_ios(host);
}
+#ifdef CONFIG_MMC_CLKGATE
+/*
+ * This gates the clock by setting it to 0 Hz.
+ */
+void mmc_gate_clock(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clk_old = host->ios.clock;
+ host->ios.clock = 0;
+ host->clk_gated = true;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mmc_set_ios(host);
+}
+
+/*
+ * This restores the clock from gating by using the cached
+ * clock value.
+ */
+void mmc_ungate_clock(struct mmc_host *host)
+{
+ /*
+ * We should previously have gated the clock, so the clock shall
+ * be 0 here! The clock may however be 0 during initialization,
+ * when some request operations are performed before setting
+ * the frequency. When ungate is requested in that situation
+ * we just ignore the call.
+ */
+ if (host->clk_old) {
+ BUG_ON(host->ios.clock);
+ /* This call will also set host->clk_gated to false */
+ mmc_set_clock(host, host->clk_old);
+ }
+}
+
+void mmc_set_ungated(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ /*
+ * We've been given a new frequency while the clock is gated,
+ * so make sure we regard this as ungating it.
+ */
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clk_gated = false;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+#else
+void mmc_set_ungated(struct mmc_host *host)
+{
+}
+#endif
+
/*
* Change the bus mode (open drain/push-pull) of a host.
*/
@@ -1424,35 +1486,57 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
}
EXPORT_SYMBOL(mmc_set_blocklen);
+static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
+{
+ host->f_init = freq;
+
+#ifdef CONFIG_MMC_DEBUG
+ pr_info("%s: %s: trying to init card at %u Hz\n",
+ mmc_hostname(host), __func__, host->f_init);
+#endif
+ mmc_power_up(host);
+ sdio_reset(host);
+ mmc_go_idle(host);
+
+ mmc_send_if_cond(host, host->ocr_avail);
+
+ /* Order's important: probe SDIO, then SD, then MMC */
+ if (!mmc_attach_sdio(host))
+ return 0;
+ if (!mmc_attach_sd(host))
+ return 0;
+ if (!mmc_attach_mmc(host))
+ return 0;
+
+ mmc_power_off(host);
+ return -EIO;
+}
+
void mmc_rescan(struct work_struct *work)
{
+ static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
- u32 ocr;
- int err;
- unsigned long flags;
int i;
- const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
-
- spin_lock_irqsave(&host->lock, flags);
- if (host->rescan_disable) {
- spin_unlock_irqrestore(&host->lock, flags);
+ if (host->rescan_disable)
return;
- }
-
- spin_unlock_irqrestore(&host->lock, flags);
-
mmc_bus_get(host);
- /* if there is a card registered, check whether it is still present */
- if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead)
+ /*
+ * if there is a _removable_ card registered, check whether it is
+ * still present
+ */
+ if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
+ && mmc_card_is_removable(host))
host->bus_ops->detect(host);
+ /*
+ * Let mmc_bus_put() free the bus/bus_ops if we've found that
+ * the card is no longer present.
+ */
mmc_bus_put(host);
-
-
mmc_bus_get(host);
/* if there still is a card present, stop here */
@@ -1461,8 +1545,6 @@ void mmc_rescan(struct work_struct *work)
goto out;
}
- /* detect a newly inserted card */
-
/*
* Only we can add a new handler, so it's safe to
* release the lock here.
@@ -1472,72 +1554,16 @@ void mmc_rescan(struct work_struct *work)
if (host->ops->get_cd && host->ops->get_cd(host) == 0)
goto out;
+ mmc_claim_host(host);
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
- mmc_claim_host(host);
-
- if (freqs[i] >= host->f_min)
- host->f_init = freqs[i];
- else if (!i || freqs[i-1] > host->f_min)
- host->f_init = host->f_min;
- else {
- mmc_release_host(host);
- goto out;
- }
-#ifdef CONFIG_MMC_DEBUG
- pr_info("%s: %s: trying to init card at %u Hz\n",
- mmc_hostname(host), __func__, host->f_init);
-#endif
- mmc_power_up(host);
- sdio_reset(host);
- mmc_go_idle(host);
-
- mmc_send_if_cond(host, host->ocr_avail);
-
- /*
- * First we search for SDIO...
- */
- err = mmc_send_io_op_cond(host, 0, &ocr);
- if (!err) {
- if (mmc_attach_sdio(host, ocr)) {
- mmc_claim_host(host);
- /*
- * Try SDMEM (but not MMC) even if SDIO
- * is broken.
- */
- if (mmc_send_app_op_cond(host, 0, &ocr))
- goto out_fail;
-
- if (mmc_attach_sd(host, ocr))
- mmc_power_off(host);
- }
- goto out;
- }
-
- /*
- * ...then normal SD...
- */
- err = mmc_send_app_op_cond(host, 0, &ocr);
- if (!err) {
- if (mmc_attach_sd(host, ocr))
- mmc_power_off(host);
- goto out;
- }
-
- /*
- * ...and finally MMC.
- */
- err = mmc_send_op_cond(host, 0, &ocr);
- if (!err) {
- if (mmc_attach_mmc(host, ocr))
- mmc_power_off(host);
- goto out;
- }
-
-out_fail:
- mmc_release_host(host);
- mmc_power_off(host);
+ if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+ break;
+ if (freqs[i] < host->f_min)
+ break;
}
-out:
+ mmc_release_host(host);
+
+ out:
if (host->caps & MMC_CAP_NEEDS_POLL)
mmc_schedule_delayed_work(&host->detect, HZ);
}
@@ -1721,6 +1747,18 @@ int mmc_resume_host(struct mmc_host *host)
if (!(host->pm_flags & MMC_PM_KEEP_POWER)) {
mmc_power_up(host);
mmc_select_voltage(host, host->ocr);
+ /*
+ * Tell runtime PM core we just powered up the card,
+ * since it still believes the card is powered off.
+ * Note that currently runtime PM is only enabled
+ * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
+ */
+ if (mmc_card_sdio(host->card) &&
+ (host->caps & MMC_CAP_POWER_OFF_CARD)) {
+ pm_runtime_disable(&host->card->dev);
+ pm_runtime_set_active(&host->card->dev);
+ pm_runtime_enable(&host->card->dev);
+ }
}
BUG_ON(!host->bus_ops->resume);
err = host->bus_ops->resume(host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 77240cd11bcf..ca1fdde29df6 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -33,6 +33,9 @@ void mmc_init_erase(struct mmc_card *card);
void mmc_set_chip_select(struct mmc_host *host, int mode);
void mmc_set_clock(struct mmc_host *host, unsigned int hz);
+void mmc_gate_clock(struct mmc_host *host);
+void mmc_ungate_clock(struct mmc_host *host);
+void mmc_set_ungated(struct mmc_host *host);
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
@@ -54,9 +57,9 @@ void mmc_rescan(struct work_struct *work);
void mmc_start_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
-int mmc_attach_mmc(struct mmc_host *host, u32 ocr);
-int mmc_attach_sd(struct mmc_host *host, u32 ocr);
-int mmc_attach_sdio(struct mmc_host *host, u32 ocr);
+int mmc_attach_mmc(struct mmc_host *host);
+int mmc_attach_sd(struct mmc_host *host);
+int mmc_attach_sdio(struct mmc_host *host);
/* Module parameters */
extern int use_spi_crc;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index eed1405fd742..998797ed67a6 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -183,6 +183,11 @@ void mmc_add_host_debugfs(struct mmc_host *host)
&mmc_clock_fops))
goto err_node;
+#ifdef CONFIG_MMC_CLKGATE
+ if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
+ root, &host->clk_delay))
+ goto err_node;
+#endif
return;
err_node:
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 10b8af27e03a..b3ac6c5bc5c6 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright (C) 2007-2008 Pierre Ossman
+ * Copyright (C) 2010 Linus Walleij
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -20,6 +21,7 @@
#include <linux/suspend.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
#include "core.h"
#include "host.h"
@@ -50,6 +52,205 @@ void mmc_unregister_host_class(void)
static DEFINE_IDR(mmc_host_idr);
static DEFINE_SPINLOCK(mmc_host_lock);
+#ifdef CONFIG_MMC_CLKGATE
+
+/*
+ * Enabling clock gating will make the core call out to the host
+ * once up and once down when it performs a request or card operation
+ * intermingled in any fashion. The driver will see this through
+ * set_ios() operations with ios.clock field set to 0 to gate (disable)
+ * the block clock, and to the old frequency to enable it again.
+ */
+static void mmc_host_clk_gate_delayed(struct mmc_host *host)
+{
+ unsigned long tick_ns;
+ unsigned long freq = host->ios.clock;
+ unsigned long flags;
+
+ if (!freq) {
+ pr_debug("%s: frequency set to 0 in disable function, "
+ "this means the clock is already disabled.\n",
+ mmc_hostname(host));
+ return;
+ }
+ /*
+ * New requests may have appeared while we were scheduling,
+ * then there is no reason to delay the check before
+ * clk_disable().
+ */
+ spin_lock_irqsave(&host->clk_lock, flags);
+
+ /*
+ * Delay n bus cycles (at least 8 from MMC spec) before attempting
+ * to disable the MCI block clock. The reference count may have
+ * gone up again after this delay due to rescheduling!
+ */
+ if (!host->clk_requests) {
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ tick_ns = DIV_ROUND_UP(1000000000, freq);
+ ndelay(host->clk_delay * tick_ns);
+ } else {
+ /* New users appeared while waiting for this work */
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ return;
+ }
+ mutex_lock(&host->clk_gate_mutex);
+ spin_lock_irqsave(&host->clk_lock, flags);
+ if (!host->clk_requests) {
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ /* This will set host->ios.clock to 0 */
+ mmc_gate_clock(host);
+ spin_lock_irqsave(&host->clk_lock, flags);
+ pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
+ }
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mutex_unlock(&host->clk_gate_mutex);
+}
+
+/*
+ * Internal work. Work to disable the clock at some later point.
+ */
+static void mmc_host_clk_gate_work(struct work_struct *work)
+{
+ struct mmc_host *host = container_of(work, struct mmc_host,
+ clk_gate_work);
+
+ mmc_host_clk_gate_delayed(host);
+}
+
+/**
+ * mmc_host_clk_ungate - ungate hardware MCI clocks
+ * @host: host to ungate.
+ *
+ * Makes sure the host ios.clock is restored to a non-zero value
+ * past this call. Increase clock reference count and ungate clock
+ * if we're the first user.
+ */
+void mmc_host_clk_ungate(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ mutex_lock(&host->clk_gate_mutex);
+ spin_lock_irqsave(&host->clk_lock, flags);
+ if (host->clk_gated) {
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mmc_ungate_clock(host);
+ spin_lock_irqsave(&host->clk_lock, flags);
+ pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
+ }
+ host->clk_requests++;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mutex_unlock(&host->clk_gate_mutex);
+}
+
+/**
+ * mmc_host_may_gate_card - check if this card may be gated
+ * @card: card to check.
+ */
+static bool mmc_host_may_gate_card(struct mmc_card *card)
+{
+ /* If there is no card we may gate it */
+ if (!card)
+ return true;
+ /*
+ * Don't gate SDIO cards! These need to be clocked at all times
+ * since they may be independent systems generating interrupts
+ * and other events. The clock requests counter from the core will
+ * go down to zero since the core does not need it, but we will not
+ * gate the clock, because there is somebody out there that may still
+ * be using it.
+ */
+ if (mmc_card_sdio(card))
+ return false;
+
+ return true;
+}
+
+/**
+ * mmc_host_clk_gate - gate off hardware MCI clocks
+ * @host: host to gate.
+ *
+ * Calls the host driver with ios.clock set to zero as often as possible
+ * in order to gate off hardware MCI clocks. Decrease clock reference
+ * count and schedule disabling of clock.
+ */
+void mmc_host_clk_gate(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clk_requests--;
+ if (mmc_host_may_gate_card(host->card) &&
+ !host->clk_requests)
+ schedule_work(&host->clk_gate_work);
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+/**
+ * mmc_host_clk_rate - get current clock frequency setting
+ * @host: host to get the clock frequency for.
+ *
+ * Returns current clock frequency regardless of gating.
+ */
+unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+ unsigned long freq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ if (host->clk_gated)
+ freq = host->clk_old;
+ else
+ freq = host->ios.clock;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ return freq;
+}
+
+/**
+ * mmc_host_clk_init - set up clock gating code
+ * @host: host with potential clock to control
+ */
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+ host->clk_requests = 0;
+ /* Hold MCI clock for 8 cycles by default */
+ host->clk_delay = 8;
+ host->clk_gated = false;
+ INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
+ spin_lock_init(&host->clk_lock);
+ mutex_init(&host->clk_gate_mutex);
+}
+
+/**
+ * mmc_host_clk_exit - shut down clock gating code
+ * @host: host with potential clock to control
+ */
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+ /*
+ * Wait for any outstanding gate and then make sure we're
+ * ungated before exiting.
+ */
+ if (cancel_work_sync(&host->clk_gate_work))
+ mmc_host_clk_gate_delayed(host);
+ if (host->clk_gated)
+ mmc_host_clk_ungate(host);
+ /* There should be only one user now */
+ WARN_ON(host->clk_requests > 1);
+}
+
+#else
+
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+}
+
+#endif
+
/**
* mmc_alloc_host - initialise the per-host structure.
* @extra: sizeof private data structure
@@ -82,6 +283,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->class_dev.class = &mmc_host_class;
device_initialize(&host->class_dev);
+ mmc_host_clk_init(host);
+
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -163,6 +366,8 @@ void mmc_remove_host(struct mmc_host *host)
device_del(&host->class_dev);
led_trigger_unregister_simple(host->led);
+
+ mmc_host_clk_exit(host);
}
EXPORT_SYMBOL(mmc_remove_host);
@@ -183,4 +388,3 @@ void mmc_free_host(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_free_host);
-
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 8c87e1109a34..de199f911928 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -10,10 +10,31 @@
*/
#ifndef _MMC_CORE_HOST_H
#define _MMC_CORE_HOST_H
+#include <linux/mmc/host.h>
int mmc_register_host_class(void);
void mmc_unregister_host_class(void);
+#ifdef CONFIG_MMC_CLKGATE
+void mmc_host_clk_ungate(struct mmc_host *host);
+void mmc_host_clk_gate(struct mmc_host *host);
+unsigned int mmc_host_clk_rate(struct mmc_host *host);
+
+#else
+static inline void mmc_host_clk_ungate(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_gate(struct mmc_host *host)
+{
+}
+
+static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+ return host->ios.clock;
+}
+#endif
+
void mmc_host_deeper_disable(struct work_struct *work);
#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 77f93c3b8808..16006ef153fe 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -534,39 +534,57 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/
if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
- unsigned ext_csd_bit, bus_width;
-
- if (host->caps & MMC_CAP_8_BIT_DATA) {
- if (ddr)
- ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8;
- else
- ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
- bus_width = MMC_BUS_WIDTH_8;
- } else {
- if (ddr)
- ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_4;
- else
- ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
- bus_width = MMC_BUS_WIDTH_4;
+ static unsigned ext_csd_bits[][2] = {
+ { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
+ { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
+ { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
+ };
+ static unsigned bus_widths[] = {
+ MMC_BUS_WIDTH_8,
+ MMC_BUS_WIDTH_4,
+ MMC_BUS_WIDTH_1
+ };
+ unsigned idx, bus_width = 0;
+
+ if (host->caps & MMC_CAP_8_BIT_DATA)
+ idx = 0;
+ else
+ idx = 1;
+ for (; idx < ARRAY_SIZE(bus_widths); idx++) {
+ bus_width = bus_widths[idx];
+ if (bus_width == MMC_BUS_WIDTH_1)
+ ddr = 0; /* no DDR for 1-bit width */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ ext_csd_bits[idx][0]);
+ if (!err) {
+ mmc_set_bus_width_ddr(card->host,
+ bus_width, MMC_SDR_MODE);
+ /*
+ * If controller can't handle bus width test,
+ * use the highest bus width to maintain
+ * compatibility with previous MMC behavior.
+ */
+ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
+ break;
+ err = mmc_bus_test(card, bus_width);
+ if (!err)
+ break;
+ }
}
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BUS_WIDTH, ext_csd_bit);
-
- if (err && err != -EBADMSG)
- goto free_card;
-
+ if (!err && ddr) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ ext_csd_bits[idx][1]);
+ }
if (err) {
printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
- "failed\n", mmc_hostname(card->host),
- 1 << bus_width, ddr);
- err = 0;
- } else {
- if (ddr)
- mmc_card_set_ddr_mode(card);
- else
- ddr = MMC_SDR_MODE;
-
+ "failed\n", mmc_hostname(card->host),
+ 1 << bus_width, ddr);
+ goto free_card;
+ } else if (ddr) {
+ mmc_card_set_ddr_mode(card);
mmc_set_bus_width_ddr(card->host, bus_width, ddr);
}
}
@@ -737,14 +755,21 @@ static void mmc_attach_bus_ops(struct mmc_host *host)
/*
* Starting point for MMC card init.
*/
-int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
+int mmc_attach_mmc(struct mmc_host *host)
{
int err;
+ u32 ocr;
BUG_ON(!host);
WARN_ON(!host->claimed);
+ err = mmc_send_op_cond(host, 0, &ocr);
+ if (err)
+ return err;
+
mmc_attach_bus_ops(host);
+ if (host->ocr_avail_mmc)
+ host->ocr_avail = host->ocr_avail_mmc;
/*
* We need to get OCR a different way for SPI.
@@ -784,20 +809,20 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
goto err;
mmc_release_host(host);
-
err = mmc_add_card(host->card);
+ mmc_claim_host(host);
if (err)
goto remove_card;
return 0;
remove_card:
+ mmc_release_host(host);
mmc_remove_card(host->card);
- host->card = NULL;
mmc_claim_host(host);
+ host->card = NULL;
err:
mmc_detach_bus(host);
- mmc_release_host(host);
printk(KERN_ERR "%s: error %d whilst initialising MMC card\n",
mmc_hostname(host), err);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 326447c9ede8..60842f878ded 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -462,3 +462,104 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
return 0;
}
+static int
+mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
+ u8 len)
+{
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_data data;
+ struct scatterlist sg;
+ u8 *data_buf;
+ u8 *test_buf;
+ int i, err;
+ static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
+ static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
+
+ /* dma onto stack is unsafe/nonportable, but callers to this
+ * routine normally provide temporary on-stack buffers ...
+ */
+ data_buf = kmalloc(len, GFP_KERNEL);
+ if (!data_buf)
+ return -ENOMEM;
+
+ if (len == 8)
+ test_buf = testdata_8bit;
+ else if (len == 4)
+ test_buf = testdata_4bit;
+ else {
+ printk(KERN_ERR "%s: Invalid bus_width %d\n",
+ mmc_hostname(host), len);
+ kfree(data_buf);
+ return -EINVAL;
+ }
+
+ if (opcode == MMC_BUS_TEST_W)
+ memcpy(data_buf, test_buf, len);
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+
+ /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
+ * rely on callers to never use this with "native" calls for reading
+ * CSD or CID. Native versions of those commands use the R2 type,
+ * not R1 plus a data block.
+ */
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = len;
+ data.blocks = 1;
+ if (opcode == MMC_BUS_TEST_R)
+ data.flags = MMC_DATA_READ;
+ else
+ data.flags = MMC_DATA_WRITE;
+
+ data.sg = &sg;
+ data.sg_len = 1;
+ sg_init_one(&sg, data_buf, len);
+ mmc_wait_for_req(host, &mrq);
+ err = 0;
+ if (opcode == MMC_BUS_TEST_R) {
+ for (i = 0; i < len / 4; i++)
+ if ((test_buf[i] ^ data_buf[i]) != 0xff) {
+ err = -EIO;
+ break;
+ }
+ }
+ kfree(data_buf);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return err;
+}
+
+int mmc_bus_test(struct mmc_card *card, u8 bus_width)
+{
+ int err, width;
+
+ if (bus_width == MMC_BUS_WIDTH_8)
+ width = 8;
+ else if (bus_width == MMC_BUS_WIDTH_4)
+ width = 4;
+ else if (bus_width == MMC_BUS_WIDTH_1)
+ return 0; /* no need for test */
+ else
+ return -EINVAL;
+
+ /*
+ * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
+ * is a problem. This improves chances that the test will work.
+ */
+ mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
+ err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
+ return err;
+}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 653eb8e84178..e6d44b8a18db 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -26,6 +26,7 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_card_sleepawake(struct mmc_host *host, int sleep);
+int mmc_bus_test(struct mmc_card *card, u8 bus_width);
#endif
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 49da4dffd28e..d18c32bca99b 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -764,14 +764,21 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host)
/*
* Starting point for SD card init.
*/
-int mmc_attach_sd(struct mmc_host *host, u32 ocr)
+int mmc_attach_sd(struct mmc_host *host)
{
int err;
+ u32 ocr;
BUG_ON(!host);
WARN_ON(!host->claimed);
+ err = mmc_send_app_op_cond(host, 0, &ocr);
+ if (err)
+ return err;
+
mmc_sd_attach_bus_ops(host);
+ if (host->ocr_avail_sd)
+ host->ocr_avail = host->ocr_avail_sd;
/*
* We need to get OCR a different way for SPI.
@@ -795,7 +802,8 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
ocr &= ~0x7F;
}
- if (ocr & MMC_VDD_165_195) {
+ if ((ocr & MMC_VDD_165_195) &&
+ !(host->ocr_avail_sd & MMC_VDD_165_195)) {
printk(KERN_WARNING "%s: SD card claims to support the "
"incompletely defined 'low voltage range'. This "
"will be ignored.\n", mmc_hostname(host));
@@ -820,20 +828,20 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
goto err;
mmc_release_host(host);
-
err = mmc_add_card(host->card);
+ mmc_claim_host(host);
if (err)
goto remove_card;
return 0;
remove_card:
+ mmc_release_host(host);
mmc_remove_card(host->card);
host->card = NULL;
mmc_claim_host(host);
err:
mmc_detach_bus(host);
- mmc_release_host(host);
printk(KERN_ERR "%s: error %d whilst initialising SD card\n",
mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index efef5f94ac42..5c4a54d9b6a4 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -627,15 +627,27 @@ static int mmc_sdio_suspend(struct mmc_host *host)
static int mmc_sdio_resume(struct mmc_host *host)
{
- int i, err;
+ int i, err = 0;
BUG_ON(!host);
BUG_ON(!host->card);
/* Basic card reinitialization. */
mmc_claim_host(host);
- err = mmc_sdio_init_card(host, host->ocr, host->card,
+
+ /* No need to reinitialize powered-resumed nonremovable cards */
+ if (mmc_card_is_removable(host) || !mmc_card_is_powered_resumed(host))
+ err = mmc_sdio_init_card(host, host->ocr, host->card,
(host->pm_flags & MMC_PM_KEEP_POWER));
+ else if (mmc_card_is_powered_resumed(host)) {
+ /* We may have switched to 1-bit mode during suspend */
+ err = sdio_enable_4bit_bus(host->card);
+ if (err > 0) {
+ mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+ err = 0;
+ }
+ }
+
if (!err && host->sdio_irqs)
mmc_signal_sdio_irq(host);
mmc_release_host(host);
@@ -690,16 +702,22 @@ static const struct mmc_bus_ops mmc_sdio_ops = {
/*
* Starting point for SDIO card init.
*/
-int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
+int mmc_attach_sdio(struct mmc_host *host)
{
- int err;
- int i, funcs;
+ int err, i, funcs;
+ u32 ocr;
struct mmc_card *card;
BUG_ON(!host);
WARN_ON(!host->claimed);
+ err = mmc_send_io_op_cond(host, 0, &ocr);
+ if (err)
+ return err;
+
mmc_attach_bus(host, &mmc_sdio_ops);
+ if (host->ocr_avail_sdio)
+ host->ocr_avail = host->ocr_avail_sdio;
/*
* Sanity check the voltages that the card claims to
@@ -769,12 +787,12 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
pm_runtime_enable(&card->sdio_func[i]->dev);
}
- mmc_release_host(host);
-
/*
* First add the card to the driver model...
*/
+ mmc_release_host(host);
err = mmc_add_card(host->card);
+ mmc_claim_host(host);
if (err)
goto remove_added;
@@ -792,15 +810,17 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
remove_added:
/* Remove without lock if the device has been added. */
+ mmc_release_host(host);
mmc_sdio_remove(host);
mmc_claim_host(host);
remove:
/* And with lock if it hasn't been added. */
+ mmc_release_host(host);
if (host->card)
mmc_sdio_remove(host);
+ mmc_claim_host(host);
err:
mmc_detach_bus(host);
- mmc_release_host(host);
printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n",
mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 203da443e339..d29b9c36919a 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -197,44 +197,12 @@ out:
#ifdef CONFIG_PM_RUNTIME
-static int sdio_bus_pm_prepare(struct device *dev)
-{
- struct sdio_func *func = dev_to_sdio_func(dev);
-
- /*
- * Resume an SDIO device which was suspended at run time at this
- * point, in order to allow standard SDIO suspend/resume paths
- * to keep working as usual.
- *
- * Ultimately, the SDIO driver itself will decide (in its
- * suspend handler, or lack thereof) whether the card should be
- * removed or kept, and if kept, at what power state.
- *
- * At this point, PM core have increased our use count, so it's
- * safe to directly resume the device. After system is resumed
- * again, PM core will drop back its runtime PM use count, and if
- * needed device will be suspended again.
- *
- * The end result is guaranteed to be a power state that is
- * coherent with the device's runtime PM use count.
- *
- * The return value of pm_runtime_resume is deliberately unchecked
- * since there is little point in failing system suspend if a
- * device can't be resumed.
- */
- if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
- pm_runtime_resume(dev);
-
- return 0;
-}
-
static const struct dev_pm_ops sdio_bus_pm_ops = {
SET_RUNTIME_PM_OPS(
pm_generic_runtime_suspend,
pm_generic_runtime_resume,
pm_generic_runtime_idle
)
- .prepare = sdio_bus_pm_prepare,
};
#define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index d618e8673996..afe8c6fa166a 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -83,7 +83,7 @@ config MMC_RICOH_MMC
config MMC_SDHCI_OF
tristate "SDHCI support on OpenFirmware platforms"
- depends on MMC_SDHCI && PPC_OF
+ depends on MMC_SDHCI && OF
help
This selects the OF support for Secure Digital Host Controller
Interfaces.
@@ -93,6 +93,7 @@ config MMC_SDHCI_OF
config MMC_SDHCI_OF_ESDHC
bool "SDHCI OF support for the Freescale eSDHC controller"
depends on MMC_SDHCI_OF
+ depends on PPC_OF
select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
help
This selects the Freescale eSDHC controller support.
@@ -102,6 +103,7 @@ config MMC_SDHCI_OF_ESDHC
config MMC_SDHCI_OF_HLWD
bool "SDHCI OF support for the Nintendo Wii SDHCI controllers"
depends on MMC_SDHCI_OF
+ depends on PPC_OF
select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
help
This selects the Secure Digital Host Controller Interface (SDHCI)
@@ -140,6 +142,27 @@ config MMC_SDHCI_ESDHC_IMX
If unsure, say N.
+config MMC_SDHCI_DOVE
+ bool "SDHCI support on Marvell's Dove SoC"
+ depends on ARCH_DOVE
+ depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the Secure Digital Host Controller Interface in
+ Marvell's Dove SoC.
+
+ If unsure, say N.
+
+config MMC_SDHCI_TEGRA
+ tristate "SDHCI platform support for the Tegra SD/MMC Controller"
+ depends on MMC_SDHCI_PLTFM && ARCH_TEGRA
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the Tegra SD/MMC controller. If you have a Tegra
+ platform with SD or MMC devices, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_S3C
tristate "SDHCI support on Samsung S3C SoC"
depends on MMC_SDHCI && PLAT_SAMSUNG
@@ -458,11 +481,27 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
help
If you say yes here SD-Cards may work on the EZkit.
+config MMC_DW
+ tristate "Synopsys DesignWare Memory Card Interface"
+ depends on ARM
+ help
+ This selects support for the Synopsys DesignWare Mobile Storage IP
+ block, this provides host support for SD and MMC interfaces, in both
+ PIO and external DMA modes.
+
+config MMC_DW_IDMAC
+ bool "Internal DMAC interface"
+ depends on MMC_DW
+ help
+ This selects support for the internal DMAC block within the Synopsys
+ Designware Mobile Storage IP block. This disables the external DMA
+ interface.
+
config MMC_SH_MMCIF
tristate "SuperH Internal MMCIF support"
depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
help
- This selects the MMC Host Interface controler (MMCIF).
+ This selects the MMC Host Interface controller (MMCIF).
This driver supports MMCIF in sh7724/sh7757/sh7372.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 7b645ff43b30..e834fb223e9a 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
+obj-$(CONFIG_MMC_DW) += dw_mmc.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
obj-$(CONFIG_MMC_USHC) += ushc.o
@@ -39,6 +40,8 @@ obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o
sdhci-platform-y := sdhci-pltfm.o
sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
+sdhci-platform-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
+sdhci-platform-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
sdhci-of-y := sdhci-of-core.o
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 41e5a60493ad..ef72e874ca36 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -192,7 +192,7 @@ static inline void SEND_STOP(struct au1xmmc_host *host)
au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
au_sync();
- /* Send the stop commmand */
+ /* Send the stop command */
au_writel(STOP_CMD, HOST_CMD(host));
}
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index bac7d62866b7..0371bf502249 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -462,7 +462,7 @@ static int __devinit sdh_probe(struct platform_device *pdev)
goto out;
}
- mmc = mmc_alloc_host(sizeof(*mmc), &pdev->dev);
+ mmc = mmc_alloc_host(sizeof(struct sdh_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index e15547cf701f..0076c7448fe6 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -66,8 +66,8 @@
#define DAVINCI_MMCBLNC 0x60
#define DAVINCI_SDIOCTL 0x64
#define DAVINCI_SDIOST0 0x68
-#define DAVINCI_SDIOEN 0x6C
-#define DAVINCI_SDIOST 0x70
+#define DAVINCI_SDIOIEN 0x6C
+#define DAVINCI_SDIOIST 0x70
#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */
/* DAVINCI_MMCCTL definitions */
@@ -131,6 +131,14 @@
#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */
#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */
+/* DAVINCI_SDIOST0 definitions */
+#define SDIOST0_DAT1_HI BIT(0)
+
+/* DAVINCI_SDIOIEN definitions */
+#define SDIOIEN_IOINTEN BIT(0)
+
+/* DAVINCI_SDIOIST definitions */
+#define SDIOIST_IOINT BIT(0)
/* MMCSD Init clock in Hz in opendrain mode */
#define MMCSD_INIT_CLOCK 200000
@@ -164,7 +172,7 @@ struct mmc_davinci_host {
unsigned int mmc_input_clk;
void __iomem *base;
struct resource *mem_res;
- int irq;
+ int mmc_irq, sdio_irq;
unsigned char bus_mode;
#define DAVINCI_MMC_DATADIR_NONE 0
@@ -184,6 +192,7 @@ struct mmc_davinci_host {
u32 rxdma, txdma;
bool use_dma;
bool do_dma;
+ bool sdio_int;
/* Scatterlist DMA uses one or more parameter RAM entries:
* the main one (associated with rxdma or txdma) plus zero or
@@ -480,7 +489,7 @@ static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
struct scatterlist *sg;
unsigned sg_len;
unsigned bytes_left = host->bytes_left;
- const unsigned shift = ffs(rw_threshold) - 1;;
+ const unsigned shift = ffs(rw_threshold) - 1;
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
template = &host->tx_template;
@@ -866,6 +875,19 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
{
host->data = NULL;
+ if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
+ /*
+ * SDIO Interrupt Detection work-around as suggested by
+ * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
+ * 2.1.6): Signal SDIO interrupt only if it is enabled by core
+ */
+ if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) &
+ SDIOST0_DAT1_HI)) {
+ writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+ mmc_signal_sdio_irq(host->mmc);
+ }
+ }
+
if (host->do_dma) {
davinci_abort_dma(host);
@@ -932,6 +954,21 @@ davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
mmc_davinci_reset_ctrl(host, 0);
}
+static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
+{
+ struct mmc_davinci_host *host = dev_id;
+ unsigned int status;
+
+ status = readl(host->base + DAVINCI_SDIOIST);
+ if (status & SDIOIST_IOINT) {
+ dev_dbg(mmc_dev(host->mmc),
+ "SDIO interrupt status %x\n", status);
+ writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+ mmc_signal_sdio_irq(host->mmc);
+ }
+ return IRQ_HANDLED;
+}
+
static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
{
struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
@@ -1076,11 +1113,32 @@ static int mmc_davinci_get_ro(struct mmc_host *mmc)
return config->get_ro(pdev->id);
}
+static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct mmc_davinci_host *host = mmc_priv(mmc);
+
+ if (enable) {
+ if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) {
+ writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+ mmc_signal_sdio_irq(host->mmc);
+ } else {
+ host->sdio_int = true;
+ writel(readl(host->base + DAVINCI_SDIOIEN) |
+ SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN);
+ }
+ } else {
+ host->sdio_int = false;
+ writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN,
+ host->base + DAVINCI_SDIOIEN);
+ }
+}
+
static struct mmc_host_ops mmc_davinci_ops = {
.request = mmc_davinci_request,
.set_ios = mmc_davinci_set_ios,
.get_cd = mmc_davinci_get_cd,
.get_ro = mmc_davinci_get_ro,
+ .enable_sdio_irq = mmc_davinci_enable_sdio_irq,
};
/*----------------------------------------------------------------------*/
@@ -1209,7 +1267,8 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
host->nr_sg = MAX_NR_SG;
host->use_dma = use_dma;
- host->irq = irq;
+ host->mmc_irq = irq;
+ host->sdio_irq = platform_get_irq(pdev, 1);
if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
host->use_dma = 0;
@@ -1270,6 +1329,13 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
if (ret)
goto out;
+ if (host->sdio_irq >= 0) {
+ ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
+ mmc_hostname(mmc), host);
+ if (!ret)
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+ }
+
rename_region(mem, mmc_hostname(mmc));
dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
@@ -1313,7 +1379,9 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
mmc_davinci_cpufreq_deregister(host);
mmc_remove_host(host->mmc);
- free_irq(host->irq, host);
+ free_irq(host->mmc_irq, host);
+ if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
+ free_irq(host->sdio_irq, host);
davinci_release_dma_channels(host);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
new file mode 100644
index 000000000000..2fcc82577c1b
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.c
@@ -0,0 +1,1796 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ * (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/bitops.h>
+
+#include "dw_mmc.h"
+
+/* Common flag combinations */
+#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \
+ SDMMC_INT_HTO | SDMMC_INT_SBE | \
+ SDMMC_INT_EBE)
+#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
+ SDMMC_INT_RESP_ERR)
+#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
+ DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
+#define DW_MCI_SEND_STATUS 1
+#define DW_MCI_RECV_STATUS 2
+#define DW_MCI_DMA_THRESHOLD 16
+
+#ifdef CONFIG_MMC_DW_IDMAC
+struct idmac_desc {
+ u32 des0; /* Control Descriptor */
+#define IDMAC_DES0_DIC BIT(1)
+#define IDMAC_DES0_LD BIT(2)
+#define IDMAC_DES0_FD BIT(3)
+#define IDMAC_DES0_CH BIT(4)
+#define IDMAC_DES0_ER BIT(5)
+#define IDMAC_DES0_CES BIT(30)
+#define IDMAC_DES0_OWN BIT(31)
+
+ u32 des1; /* Buffer sizes */
+#define IDMAC_SET_BUFFER1_SIZE(d, s) \
+ ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff))
+
+ u32 des2; /* buffer 1 physical address */
+
+ u32 des3; /* buffer 2 physical address */
+};
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+/**
+ * struct dw_mci_slot - MMC slot state
+ * @mmc: The mmc_host representing this slot.
+ * @host: The MMC controller this slot is using.
+ * @ctype: Card type for this slot.
+ * @mrq: mmc_request currently being processed or waiting to be
+ * processed, or NULL when the slot is idle.
+ * @queue_node: List node for placing this node in the @queue list of
+ * &struct dw_mci.
+ * @clock: Clock rate configured by set_ios(). Protected by host->lock.
+ * @flags: Random state bits associated with the slot.
+ * @id: Number of this slot.
+ * @last_detect_state: Most recently observed card detect state.
+ */
+struct dw_mci_slot {
+ struct mmc_host *mmc;
+ struct dw_mci *host;
+
+ u32 ctype;
+
+ struct mmc_request *mrq;
+ struct list_head queue_node;
+
+ unsigned int clock;
+ unsigned long flags;
+#define DW_MMC_CARD_PRESENT 0
+#define DW_MMC_CARD_NEED_INIT 1
+ int id;
+ int last_detect_state;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+static int dw_mci_req_show(struct seq_file *s, void *v)
+{
+ struct dw_mci_slot *slot = s->private;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_command *stop;
+ struct mmc_data *data;
+
+ /* Make sure we get a consistent snapshot */
+ spin_lock_bh(&slot->host->lock);
+ mrq = slot->mrq;
+
+ if (mrq) {
+ cmd = mrq->cmd;
+ data = mrq->data;
+ stop = mrq->stop;
+
+ if (cmd)
+ seq_printf(s,
+ "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+ cmd->opcode, cmd->arg, cmd->flags,
+ cmd->resp[0], cmd->resp[1], cmd->resp[2],
+ cmd->resp[2], cmd->error);
+ if (data)
+ seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
+ data->bytes_xfered, data->blocks,
+ data->blksz, data->flags, data->error);
+ if (stop)
+ seq_printf(s,
+ "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+ stop->opcode, stop->arg, stop->flags,
+ stop->resp[0], stop->resp[1], stop->resp[2],
+ stop->resp[2], stop->error);
+ }
+
+ spin_unlock_bh(&slot->host->lock);
+
+ return 0;
+}
+
+static int dw_mci_req_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dw_mci_req_show, inode->i_private);
+}
+
+static const struct file_operations dw_mci_req_fops = {
+ .owner = THIS_MODULE,
+ .open = dw_mci_req_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dw_mci_regs_show(struct seq_file *s, void *v)
+{
+ seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
+ seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
+ seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
+ seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
+ seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
+ seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
+
+ return 0;
+}
+
+static int dw_mci_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dw_mci_regs_show, inode->i_private);
+}
+
+static const struct file_operations dw_mci_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = dw_mci_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
+{
+ struct mmc_host *mmc = slot->mmc;
+ struct dw_mci *host = slot->host;
+ struct dentry *root;
+ struct dentry *node;
+
+ root = mmc->debugfs_root;
+ if (!root)
+ return;
+
+ node = debugfs_create_file("regs", S_IRUSR, root, host,
+ &dw_mci_regs_fops);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_file("req", S_IRUSR, root, slot,
+ &dw_mci_req_fops);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_x32("pending_events", S_IRUSR, root,
+ (u32 *)&host->pending_events);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_x32("completed_events", S_IRUSR, root,
+ (u32 *)&host->completed_events);
+ if (!node)
+ goto err;
+
+ return;
+
+err:
+ dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+static void dw_mci_set_timeout(struct dw_mci *host)
+{
+ /* timeout (maximum) */
+ mci_writel(host, TMOUT, 0xffffffff);
+}
+
+static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
+{
+ struct mmc_data *data;
+ u32 cmdr;
+ cmd->error = -EINPROGRESS;
+
+ cmdr = cmd->opcode;
+
+ if (cmdr == MMC_STOP_TRANSMISSION)
+ cmdr |= SDMMC_CMD_STOP;
+ else
+ cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ /* We expect a response, so set this bit */
+ cmdr |= SDMMC_CMD_RESP_EXP;
+ if (cmd->flags & MMC_RSP_136)
+ cmdr |= SDMMC_CMD_RESP_LONG;
+ }
+
+ if (cmd->flags & MMC_RSP_CRC)
+ cmdr |= SDMMC_CMD_RESP_CRC;
+
+ data = cmd->data;
+ if (data) {
+ cmdr |= SDMMC_CMD_DAT_EXP;
+ if (data->flags & MMC_DATA_STREAM)
+ cmdr |= SDMMC_CMD_STRM_MODE;
+ if (data->flags & MMC_DATA_WRITE)
+ cmdr |= SDMMC_CMD_DAT_WR;
+ }
+
+ return cmdr;
+}
+
+static void dw_mci_start_command(struct dw_mci *host,
+ struct mmc_command *cmd, u32 cmd_flags)
+{
+ host->cmd = cmd;
+ dev_vdbg(&host->pdev->dev,
+ "start command: ARGR=0x%08x CMDR=0x%08x\n",
+ cmd->arg, cmd_flags);
+
+ mci_writel(host, CMDARG, cmd->arg);
+ wmb();
+
+ mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
+}
+
+static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
+{
+ dw_mci_start_command(host, data->stop, host->stop_cmdr);
+}
+
+/* DMA interface functions */
+static void dw_mci_stop_dma(struct dw_mci *host)
+{
+ if (host->use_dma) {
+ host->dma_ops->stop(host);
+ host->dma_ops->cleanup(host);
+ } else {
+ /* Data transfer was stopped by the interrupt handler */
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+ }
+}
+
+#ifdef CONFIG_MMC_DW_IDMAC
+static void dw_mci_dma_cleanup(struct dw_mci *host)
+{
+ struct mmc_data *data = host->data;
+
+ if (data)
+ dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+}
+
+static void dw_mci_idmac_stop_dma(struct dw_mci *host)
+{
+ u32 temp;
+
+ /* Disable and reset the IDMAC interface */
+ temp = mci_readl(host, CTRL);
+ temp &= ~SDMMC_CTRL_USE_IDMAC;
+ temp |= SDMMC_CTRL_DMA_RESET;
+ mci_writel(host, CTRL, temp);
+
+ /* Stop the IDMAC running */
+ temp = mci_readl(host, BMOD);
+ temp &= ~SDMMC_IDMAC_ENABLE;
+ mci_writel(host, BMOD, temp);
+}
+
+static void dw_mci_idmac_complete_dma(struct dw_mci *host)
+{
+ struct mmc_data *data = host->data;
+
+ dev_vdbg(&host->pdev->dev, "DMA complete\n");
+
+ host->dma_ops->cleanup(host);
+
+ /*
+ * If the card was removed, data will be NULL. No point in trying to
+ * send the stop command or waiting for NBUSY in this case.
+ */
+ if (data) {
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ }
+}
+
+static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
+ unsigned int sg_len)
+{
+ int i;
+ struct idmac_desc *desc = host->sg_cpu;
+
+ for (i = 0; i < sg_len; i++, desc++) {
+ unsigned int length = sg_dma_len(&data->sg[i]);
+ u32 mem_addr = sg_dma_address(&data->sg[i]);
+
+ /* Set the OWN bit and disable interrupts for this descriptor */
+ desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
+
+ /* Buffer length */
+ IDMAC_SET_BUFFER1_SIZE(desc, length);
+
+ /* Physical address to DMA to/from */
+ desc->des2 = mem_addr;
+ }
+
+ /* Set first descriptor */
+ desc = host->sg_cpu;
+ desc->des0 |= IDMAC_DES0_FD;
+
+ /* Set last descriptor */
+ desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
+ desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
+ desc->des0 |= IDMAC_DES0_LD;
+
+ wmb();
+}
+
+static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
+{
+ u32 temp;
+
+ dw_mci_translate_sglist(host, host->data, sg_len);
+
+ /* Select IDMAC interface */
+ temp = mci_readl(host, CTRL);
+ temp |= SDMMC_CTRL_USE_IDMAC;
+ mci_writel(host, CTRL, temp);
+
+ wmb();
+
+ /* Enable the IDMAC */
+ temp = mci_readl(host, BMOD);
+ temp |= SDMMC_IDMAC_ENABLE;
+ mci_writel(host, BMOD, temp);
+
+ /* Start it running */
+ mci_writel(host, PLDMND, 1);
+}
+
+static int dw_mci_idmac_init(struct dw_mci *host)
+{
+ struct idmac_desc *p;
+ int i;
+
+ /* Number of descriptors in the ring buffer */
+ host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
+
+ /* Forward link the descriptor list */
+ for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
+ p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
+
+ /* Set the last descriptor as the end-of-ring descriptor */
+ p->des3 = host->sg_dma;
+ p->des0 = IDMAC_DES0_ER;
+
+ /* Mask out interrupts - get Tx & Rx complete only */
+ mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
+ SDMMC_IDMAC_INT_TI);
+
+ /* Set the descriptor base address */
+ mci_writel(host, DBADDR, host->sg_dma);
+ return 0;
+}
+
+static struct dw_mci_dma_ops dw_mci_idmac_ops = {
+ .init = dw_mci_idmac_init,
+ .start = dw_mci_idmac_start_dma,
+ .stop = dw_mci_idmac_stop_dma,
+ .complete = dw_mci_idmac_complete_dma,
+ .cleanup = dw_mci_dma_cleanup,
+};
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
+{
+ struct scatterlist *sg;
+ unsigned int i, direction, sg_len;
+ u32 temp;
+
+ /* If we don't have a channel, we can't do DMA */
+ if (!host->use_dma)
+ return -ENODEV;
+
+ /*
+ * We don't do DMA on "complex" transfers, i.e. with
+ * non-word-aligned buffers or lengths. Also, we don't bother
+ * with all the DMA setup overhead for short transfers.
+ */
+ if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
+ return -EINVAL;
+ if (data->blksz & 3)
+ return -EINVAL;
+
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->offset & 3 || sg->length & 3)
+ return -EINVAL;
+ }
+
+ if (data->flags & MMC_DATA_READ)
+ direction = DMA_FROM_DEVICE;
+ else
+ direction = DMA_TO_DEVICE;
+
+ sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
+ direction);
+
+ dev_vdbg(&host->pdev->dev,
+ "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
+ (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
+ sg_len);
+
+ /* Enable the DMA interface */
+ temp = mci_readl(host, CTRL);
+ temp |= SDMMC_CTRL_DMA_ENABLE;
+ mci_writel(host, CTRL, temp);
+
+ /* Disable RX/TX IRQs, let DMA handle it */
+ temp = mci_readl(host, INTMASK);
+ temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
+ mci_writel(host, INTMASK, temp);
+
+ host->dma_ops->start(host, sg_len);
+
+ return 0;
+}
+
+static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
+{
+ u32 temp;
+
+ data->error = -EINPROGRESS;
+
+ WARN_ON(host->data);
+ host->sg = NULL;
+ host->data = data;
+
+ if (dw_mci_submit_data_dma(host, data)) {
+ host->sg = data->sg;
+ host->pio_offset = 0;
+ if (data->flags & MMC_DATA_READ)
+ host->dir_status = DW_MCI_RECV_STATUS;
+ else
+ host->dir_status = DW_MCI_SEND_STATUS;
+
+ temp = mci_readl(host, INTMASK);
+ temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
+ mci_writel(host, INTMASK, temp);
+
+ temp = mci_readl(host, CTRL);
+ temp &= ~SDMMC_CTRL_DMA_ENABLE;
+ mci_writel(host, CTRL, temp);
+ }
+}
+
+static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
+{
+ struct dw_mci *host = slot->host;
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+ unsigned int cmd_status = 0;
+
+ mci_writel(host, CMDARG, arg);
+ wmb();
+ mci_writel(host, CMD, SDMMC_CMD_START | cmd);
+
+ while (time_before(jiffies, timeout)) {
+ cmd_status = mci_readl(host, CMD);
+ if (!(cmd_status & SDMMC_CMD_START))
+ return;
+ }
+ dev_err(&slot->mmc->class_dev,
+ "Timeout sending command (cmd %#x arg %#x status %#x)\n",
+ cmd, arg, cmd_status);
+}
+
+static void dw_mci_setup_bus(struct dw_mci_slot *slot)
+{
+ struct dw_mci *host = slot->host;
+ u32 div;
+
+ if (slot->clock != host->current_speed) {
+ if (host->bus_hz % slot->clock)
+ /*
+ * move the + 1 after the divide to prevent
+ * over-clocking the card.
+ */
+ div = ((host->bus_hz / slot->clock) >> 1) + 1;
+ else
+ div = (host->bus_hz / slot->clock) >> 1;
+
+ dev_info(&slot->mmc->class_dev,
+ "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
+ " div = %d)\n", slot->id, host->bus_hz, slot->clock,
+ div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
+
+ /* disable clock */
+ mci_writel(host, CLKENA, 0);
+ mci_writel(host, CLKSRC, 0);
+
+ /* inform CIU */
+ mci_send_cmd(slot,
+ SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+ /* set clock to desired speed */
+ mci_writel(host, CLKDIV, div);
+
+ /* inform CIU */
+ mci_send_cmd(slot,
+ SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+ /* enable clock */
+ mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE);
+
+ /* inform CIU */
+ mci_send_cmd(slot,
+ SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+ host->current_speed = slot->clock;
+ }
+
+ /* Set the current slot bus width */
+ mci_writel(host, CTYPE, slot->ctype);
+}
+
+static void dw_mci_start_request(struct dw_mci *host,
+ struct dw_mci_slot *slot)
+{
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ u32 cmdflags;
+
+ mrq = slot->mrq;
+ if (host->pdata->select_slot)
+ host->pdata->select_slot(slot->id);
+
+ /* Slot specific timing and width adjustment */
+ dw_mci_setup_bus(slot);
+
+ host->cur_slot = slot;
+ host->mrq = mrq;
+
+ host->pending_events = 0;
+ host->completed_events = 0;
+ host->data_status = 0;
+
+ data = mrq->data;
+ if (data) {
+ dw_mci_set_timeout(host);
+ mci_writel(host, BYTCNT, data->blksz*data->blocks);
+ mci_writel(host, BLKSIZ, data->blksz);
+ }
+
+ cmd = mrq->cmd;
+ cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
+
+ /* this is the first command, send the initialization clock */
+ if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
+ cmdflags |= SDMMC_CMD_INIT;
+
+ if (data) {
+ dw_mci_submit_data(host, data);
+ wmb();
+ }
+
+ dw_mci_start_command(host, cmd, cmdflags);
+
+ if (mrq->stop)
+ host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
+}
+
+static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
+ struct mmc_request *mrq)
+{
+ dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
+ host->state);
+
+ spin_lock_bh(&host->lock);
+ slot->mrq = mrq;
+
+ if (host->state == STATE_IDLE) {
+ host->state = STATE_SENDING_CMD;
+ dw_mci_start_request(host, slot);
+ } else {
+ list_add_tail(&slot->queue_node, &host->queue);
+ }
+
+ spin_unlock_bh(&host->lock);
+}
+
+static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+
+ WARN_ON(slot->mrq);
+
+ if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
+ mrq->cmd->error = -ENOMEDIUM;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ /* We don't support multiple blocks of weird lengths. */
+ dw_mci_queue_request(host, slot, mrq);
+}
+
+static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+
+ /* set default 1 bit mode */
+ slot->ctype = SDMMC_CTYPE_1BIT;
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ slot->ctype = SDMMC_CTYPE_1BIT;
+ break;
+ case MMC_BUS_WIDTH_4:
+ slot->ctype = SDMMC_CTYPE_4BIT;
+ break;
+ }
+
+ if (ios->clock) {
+ /*
+ * Use mirror of ios->clock to prevent race with mmc
+ * core ios update when finding the minimum.
+ */
+ slot->clock = ios->clock;
+ }
+
+ switch (ios->power_mode) {
+ case MMC_POWER_UP:
+ set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
+ break;
+ default:
+ break;
+ }
+}
+
+static int dw_mci_get_ro(struct mmc_host *mmc)
+{
+ int read_only;
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci_board *brd = slot->host->pdata;
+
+ /* Use platform get_ro function, else try on board write protect */
+ if (brd->get_ro)
+ read_only = brd->get_ro(slot->id);
+ else
+ read_only =
+ mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
+
+ dev_dbg(&mmc->class_dev, "card is %s\n",
+ read_only ? "read-only" : "read-write");
+
+ return read_only;
+}
+
+static int dw_mci_get_cd(struct mmc_host *mmc)
+{
+ int present;
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci_board *brd = slot->host->pdata;
+
+ /* Use platform get_cd function, else try onboard card detect */
+ if (brd->get_cd)
+ present = !brd->get_cd(slot->id);
+ else
+ present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
+ == 0 ? 1 : 0;
+
+ if (present)
+ dev_dbg(&mmc->class_dev, "card is present\n");
+ else
+ dev_dbg(&mmc->class_dev, "card is not present\n");
+
+ return present;
+}
+
+static const struct mmc_host_ops dw_mci_ops = {
+ .request = dw_mci_request,
+ .set_ios = dw_mci_set_ios,
+ .get_ro = dw_mci_get_ro,
+ .get_cd = dw_mci_get_cd,
+};
+
+static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
+ __releases(&host->lock)
+ __acquires(&host->lock)
+{
+ struct dw_mci_slot *slot;
+ struct mmc_host *prev_mmc = host->cur_slot->mmc;
+
+ WARN_ON(host->cmd || host->data);
+
+ host->cur_slot->mrq = NULL;
+ host->mrq = NULL;
+ if (!list_empty(&host->queue)) {
+ slot = list_entry(host->queue.next,
+ struct dw_mci_slot, queue_node);
+ list_del(&slot->queue_node);
+ dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
+ mmc_hostname(slot->mmc));
+ host->state = STATE_SENDING_CMD;
+ dw_mci_start_request(host, slot);
+ } else {
+ dev_vdbg(&host->pdev->dev, "list empty\n");
+ host->state = STATE_IDLE;
+ }
+
+ spin_unlock(&host->lock);
+ mmc_request_done(prev_mmc, mrq);
+ spin_lock(&host->lock);
+}
+
+static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
+{
+ u32 status = host->cmd_status;
+
+ host->cmd_status = 0;
+
+ /* Read the response from the card (up to 16 bytes) */
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[3] = mci_readl(host, RESP0);
+ cmd->resp[2] = mci_readl(host, RESP1);
+ cmd->resp[1] = mci_readl(host, RESP2);
+ cmd->resp[0] = mci_readl(host, RESP3);
+ } else {
+ cmd->resp[0] = mci_readl(host, RESP0);
+ cmd->resp[1] = 0;
+ cmd->resp[2] = 0;
+ cmd->resp[3] = 0;
+ }
+ }
+
+ if (status & SDMMC_INT_RTO)
+ cmd->error = -ETIMEDOUT;
+ else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
+ cmd->error = -EILSEQ;
+ else if (status & SDMMC_INT_RESP_ERR)
+ cmd->error = -EIO;
+ else
+ cmd->error = 0;
+
+ if (cmd->error) {
+ /* newer ip versions need a delay between retries */
+ if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
+ mdelay(20);
+
+ if (cmd->data) {
+ host->data = NULL;
+ dw_mci_stop_dma(host);
+ }
+ }
+}
+
+static void dw_mci_tasklet_func(unsigned long priv)
+{
+ struct dw_mci *host = (struct dw_mci *)priv;
+ struct mmc_data *data;
+ struct mmc_command *cmd;
+ enum dw_mci_state state;
+ enum dw_mci_state prev_state;
+ u32 status;
+
+ spin_lock(&host->lock);
+
+ state = host->state;
+ data = host->data;
+
+ do {
+ prev_state = state;
+
+ switch (state) {
+ case STATE_IDLE:
+ break;
+
+ case STATE_SENDING_CMD:
+ if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
+ &host->pending_events))
+ break;
+
+ cmd = host->cmd;
+ host->cmd = NULL;
+ set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
+ dw_mci_command_complete(host, host->mrq->cmd);
+ if (!host->mrq->data || cmd->error) {
+ dw_mci_request_end(host, host->mrq);
+ goto unlock;
+ }
+
+ prev_state = state = STATE_SENDING_DATA;
+ /* fall through */
+
+ case STATE_SENDING_DATA:
+ if (test_and_clear_bit(EVENT_DATA_ERROR,
+ &host->pending_events)) {
+ dw_mci_stop_dma(host);
+ if (data->stop)
+ send_stop_cmd(host, data);
+ state = STATE_DATA_ERROR;
+ break;
+ }
+
+ if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
+ &host->pending_events))
+ break;
+
+ set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
+ prev_state = state = STATE_DATA_BUSY;
+ /* fall through */
+
+ case STATE_DATA_BUSY:
+ if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
+ &host->pending_events))
+ break;
+
+ host->data = NULL;
+ set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
+ status = host->data_status;
+
+ if (status & DW_MCI_DATA_ERROR_FLAGS) {
+ if (status & SDMMC_INT_DTO) {
+ dev_err(&host->pdev->dev,
+ "data timeout error\n");
+ data->error = -ETIMEDOUT;
+ } else if (status & SDMMC_INT_DCRC) {
+ dev_err(&host->pdev->dev,
+ "data CRC error\n");
+ data->error = -EILSEQ;
+ } else {
+ dev_err(&host->pdev->dev,
+ "data FIFO error "
+ "(status=%08x)\n",
+ status);
+ data->error = -EIO;
+ }
+ } else {
+ data->bytes_xfered = data->blocks * data->blksz;
+ data->error = 0;
+ }
+
+ if (!data->stop) {
+ dw_mci_request_end(host, host->mrq);
+ goto unlock;
+ }
+
+ prev_state = state = STATE_SENDING_STOP;
+ if (!data->error)
+ send_stop_cmd(host, data);
+ /* fall through */
+
+ case STATE_SENDING_STOP:
+ if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
+ &host->pending_events))
+ break;
+
+ host->cmd = NULL;
+ dw_mci_command_complete(host, host->mrq->stop);
+ dw_mci_request_end(host, host->mrq);
+ goto unlock;
+
+ case STATE_DATA_ERROR:
+ if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
+ &host->pending_events))
+ break;
+
+ state = STATE_DATA_BUSY;
+ break;
+ }
+ } while (state != prev_state);
+
+ host->state = state;
+unlock:
+ spin_unlock(&host->lock);
+
+}
+
+static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
+{
+ u16 *pdata = (u16 *)buf;
+
+ WARN_ON(cnt % 2 != 0);
+
+ cnt = cnt >> 1;
+ while (cnt > 0) {
+ mci_writew(host, DATA, *pdata++);
+ cnt--;
+ }
+}
+
+static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
+{
+ u16 *pdata = (u16 *)buf;
+
+ WARN_ON(cnt % 2 != 0);
+
+ cnt = cnt >> 1;
+ while (cnt > 0) {
+ *pdata++ = mci_readw(host, DATA);
+ cnt--;
+ }
+}
+
+static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
+{
+ u32 *pdata = (u32 *)buf;
+
+ WARN_ON(cnt % 4 != 0);
+ WARN_ON((unsigned long)pdata & 0x3);
+
+ cnt = cnt >> 2;
+ while (cnt > 0) {
+ mci_writel(host, DATA, *pdata++);
+ cnt--;
+ }
+}
+
+static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
+{
+ u32 *pdata = (u32 *)buf;
+
+ WARN_ON(cnt % 4 != 0);
+ WARN_ON((unsigned long)pdata & 0x3);
+
+ cnt = cnt >> 2;
+ while (cnt > 0) {
+ *pdata++ = mci_readl(host, DATA);
+ cnt--;
+ }
+}
+
+static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
+{
+ u64 *pdata = (u64 *)buf;
+
+ WARN_ON(cnt % 8 != 0);
+
+ cnt = cnt >> 3;
+ while (cnt > 0) {
+ mci_writeq(host, DATA, *pdata++);
+ cnt--;
+ }
+}
+
+static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
+{
+ u64 *pdata = (u64 *)buf;
+
+ WARN_ON(cnt % 8 != 0);
+
+ cnt = cnt >> 3;
+ while (cnt > 0) {
+ *pdata++ = mci_readq(host, DATA);
+ cnt--;
+ }
+}
+
+static void dw_mci_read_data_pio(struct dw_mci *host)
+{
+ struct scatterlist *sg = host->sg;
+ void *buf = sg_virt(sg);
+ unsigned int offset = host->pio_offset;
+ struct mmc_data *data = host->data;
+ int shift = host->data_shift;
+ u32 status;
+ unsigned int nbytes = 0, len, old_len, count = 0;
+
+ do {
+ len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
+ if (count == 0)
+ old_len = len;
+
+ if (offset + len <= sg->length) {
+ host->pull_data(host, (void *)(buf + offset), len);
+
+ offset += len;
+ nbytes += len;
+
+ if (offset == sg->length) {
+ flush_dcache_page(sg_page(sg));
+ host->sg = sg = sg_next(sg);
+ if (!sg)
+ goto done;
+
+ offset = 0;
+ buf = sg_virt(sg);
+ }
+ } else {
+ unsigned int remaining = sg->length - offset;
+ host->pull_data(host, (void *)(buf + offset),
+ remaining);
+ nbytes += remaining;
+
+ flush_dcache_page(sg_page(sg));
+ host->sg = sg = sg_next(sg);
+ if (!sg)
+ goto done;
+
+ offset = len - remaining;
+ buf = sg_virt(sg);
+ host->pull_data(host, buf, offset);
+ nbytes += offset;
+ }
+
+ status = mci_readl(host, MINTSTS);
+ mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
+ if (status & DW_MCI_DATA_ERROR_FLAGS) {
+ host->data_status = status;
+ data->bytes_xfered += nbytes;
+ smp_wmb();
+
+ set_bit(EVENT_DATA_ERROR, &host->pending_events);
+
+ tasklet_schedule(&host->tasklet);
+ return;
+ }
+ count++;
+ } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
+ len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
+ host->pio_offset = offset;
+ data->bytes_xfered += nbytes;
+ return;
+
+done:
+ data->bytes_xfered += nbytes;
+ smp_wmb();
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+}
+
+static void dw_mci_write_data_pio(struct dw_mci *host)
+{
+ struct scatterlist *sg = host->sg;
+ void *buf = sg_virt(sg);
+ unsigned int offset = host->pio_offset;
+ struct mmc_data *data = host->data;
+ int shift = host->data_shift;
+ u32 status;
+ unsigned int nbytes = 0, len;
+
+ do {
+ len = SDMMC_FIFO_SZ -
+ (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
+ if (offset + len <= sg->length) {
+ host->push_data(host, (void *)(buf + offset), len);
+
+ offset += len;
+ nbytes += len;
+ if (offset == sg->length) {
+ host->sg = sg = sg_next(sg);
+ if (!sg)
+ goto done;
+
+ offset = 0;
+ buf = sg_virt(sg);
+ }
+ } else {
+ unsigned int remaining = sg->length - offset;
+
+ host->push_data(host, (void *)(buf + offset),
+ remaining);
+ nbytes += remaining;
+
+ host->sg = sg = sg_next(sg);
+ if (!sg)
+ goto done;
+
+ offset = len - remaining;
+ buf = sg_virt(sg);
+ host->push_data(host, (void *)buf, offset);
+ nbytes += offset;
+ }
+
+ status = mci_readl(host, MINTSTS);
+ mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
+ if (status & DW_MCI_DATA_ERROR_FLAGS) {
+ host->data_status = status;
+ data->bytes_xfered += nbytes;
+
+ smp_wmb();
+
+ set_bit(EVENT_DATA_ERROR, &host->pending_events);
+
+ tasklet_schedule(&host->tasklet);
+ return;
+ }
+ } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
+
+ host->pio_offset = offset;
+ data->bytes_xfered += nbytes;
+
+ return;
+
+done:
+ data->bytes_xfered += nbytes;
+ smp_wmb();
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+}
+
+static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
+{
+ if (!host->cmd_status)
+ host->cmd_status = status;
+
+ smp_wmb();
+
+ set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+}
+
+static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
+{
+ struct dw_mci *host = dev_id;
+ u32 status, pending;
+ unsigned int pass_count = 0;
+
+ do {
+ status = mci_readl(host, RINTSTS);
+ pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+
+ /*
+ * DTO fix - version 2.10a and below, and only if internal DMA
+ * is configured.
+ */
+ if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
+ if (!pending &&
+ ((mci_readl(host, STATUS) >> 17) & 0x1fff))
+ pending |= SDMMC_INT_DATA_OVER;
+ }
+
+ if (!pending)
+ break;
+
+ if (pending & DW_MCI_CMD_ERROR_FLAGS) {
+ mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
+ host->cmd_status = status;
+ smp_wmb();
+ set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ }
+
+ if (pending & DW_MCI_DATA_ERROR_FLAGS) {
+ /* if there is an error report DATA_ERROR */
+ mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
+ host->data_status = status;
+ smp_wmb();
+ set_bit(EVENT_DATA_ERROR, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ }
+
+ if (pending & SDMMC_INT_DATA_OVER) {
+ mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
+ if (!host->data_status)
+ host->data_status = status;
+ smp_wmb();
+ if (host->dir_status == DW_MCI_RECV_STATUS) {
+ if (host->sg != NULL)
+ dw_mci_read_data_pio(host);
+ }
+ set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ }
+
+ if (pending & SDMMC_INT_RXDR) {
+ mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
+ if (host->sg)
+ dw_mci_read_data_pio(host);
+ }
+
+ if (pending & SDMMC_INT_TXDR) {
+ mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
+ if (host->sg)
+ dw_mci_write_data_pio(host);
+ }
+
+ if (pending & SDMMC_INT_CMD_DONE) {
+ mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
+ dw_mci_cmd_interrupt(host, status);
+ }
+
+ if (pending & SDMMC_INT_CD) {
+ mci_writel(host, RINTSTS, SDMMC_INT_CD);
+ tasklet_schedule(&host->card_tasklet);
+ }
+
+ } while (pass_count++ < 5);
+
+#ifdef CONFIG_MMC_DW_IDMAC
+ /* Handle DMA interrupts */
+ pending = mci_readl(host, IDSTS);
+ if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
+ mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
+ mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
+ set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+ host->dma_ops->complete(host);
+ }
+#endif
+
+ return IRQ_HANDLED;
+}
+
+static void dw_mci_tasklet_card(unsigned long data)
+{
+ struct dw_mci *host = (struct dw_mci *)data;
+ int i;
+
+ for (i = 0; i < host->num_slots; i++) {
+ struct dw_mci_slot *slot = host->slot[i];
+ struct mmc_host *mmc = slot->mmc;
+ struct mmc_request *mrq;
+ int present;
+ u32 ctrl;
+
+ present = dw_mci_get_cd(mmc);
+ while (present != slot->last_detect_state) {
+ spin_lock(&host->lock);
+
+ dev_dbg(&slot->mmc->class_dev, "card %s\n",
+ present ? "inserted" : "removed");
+
+ /* Card change detected */
+ slot->last_detect_state = present;
+
+ /* Power up slot */
+ if (present != 0) {
+ if (host->pdata->setpower)
+ host->pdata->setpower(slot->id,
+ mmc->ocr_avail);
+
+ set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ }
+
+ /* Clean up queue if present */
+ mrq = slot->mrq;
+ if (mrq) {
+ if (mrq == host->mrq) {
+ host->data = NULL;
+ host->cmd = NULL;
+
+ switch (host->state) {
+ case STATE_IDLE:
+ break;
+ case STATE_SENDING_CMD:
+ mrq->cmd->error = -ENOMEDIUM;
+ if (!mrq->data)
+ break;
+ /* fall through */
+ case STATE_SENDING_DATA:
+ mrq->data->error = -ENOMEDIUM;
+ dw_mci_stop_dma(host);
+ break;
+ case STATE_DATA_BUSY:
+ case STATE_DATA_ERROR:
+ if (mrq->data->error == -EINPROGRESS)
+ mrq->data->error = -ENOMEDIUM;
+ if (!mrq->stop)
+ break;
+ /* fall through */
+ case STATE_SENDING_STOP:
+ mrq->stop->error = -ENOMEDIUM;
+ break;
+ }
+
+ dw_mci_request_end(host, mrq);
+ } else {
+ list_del(&slot->queue_node);
+ mrq->cmd->error = -ENOMEDIUM;
+ if (mrq->data)
+ mrq->data->error = -ENOMEDIUM;
+ if (mrq->stop)
+ mrq->stop->error = -ENOMEDIUM;
+
+ spin_unlock(&host->lock);
+ mmc_request_done(slot->mmc, mrq);
+ spin_lock(&host->lock);
+ }
+ }
+
+ /* Power down slot */
+ if (present == 0) {
+ if (host->pdata->setpower)
+ host->pdata->setpower(slot->id, 0);
+ clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+
+ /*
+ * Clear down the FIFO - doing so generates a
+ * block interrupt, hence setting the
+ * scatter-gather pointer to NULL.
+ */
+ host->sg = NULL;
+
+ ctrl = mci_readl(host, CTRL);
+ ctrl |= SDMMC_CTRL_FIFO_RESET;
+ mci_writel(host, CTRL, ctrl);
+
+#ifdef CONFIG_MMC_DW_IDMAC
+ ctrl = mci_readl(host, BMOD);
+ ctrl |= 0x01; /* Software reset of DMA */
+ mci_writel(host, BMOD, ctrl);
+#endif
+
+ }
+
+ spin_unlock(&host->lock);
+ present = dw_mci_get_cd(mmc);
+ }
+
+ mmc_detect_change(slot->mmc,
+ msecs_to_jiffies(host->pdata->detect_delay_ms));
+ }
+}
+
+static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
+{
+ struct mmc_host *mmc;
+ struct dw_mci_slot *slot;
+
+ mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->pdev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ slot = mmc_priv(mmc);
+ slot->id = id;
+ slot->mmc = mmc;
+ slot->host = host;
+
+ mmc->ops = &dw_mci_ops;
+ mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
+ mmc->f_max = host->bus_hz;
+
+ if (host->pdata->get_ocr)
+ mmc->ocr_avail = host->pdata->get_ocr(id);
+ else
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+
+ /*
+ * Start with slot power disabled, it will be enabled when a card
+ * is detected.
+ */
+ if (host->pdata->setpower)
+ host->pdata->setpower(id, 0);
+
+ mmc->caps = 0;
+ if (host->pdata->get_bus_wd)
+ if (host->pdata->get_bus_wd(slot->id) >= 4)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+
+#ifdef CONFIG_MMC_DW_IDMAC
+ mmc->max_segs = host->ring_size;
+ mmc->max_blk_size = 65536;
+ mmc->max_blk_count = host->ring_size;
+ mmc->max_seg_size = 0x1000;
+ mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
+#else
+ if (host->pdata->blk_settings) {
+ mmc->max_segs = host->pdata->blk_settings->max_segs;
+ mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
+ mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
+ mmc->max_req_size = host->pdata->blk_settings->max_req_size;
+ mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
+ } else {
+ /* Useful defaults if platform data is unset. */
+ mmc->max_segs = 64;
+ mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
+ mmc->max_blk_count = 512;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
+ }
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+ if (dw_mci_get_cd(mmc))
+ set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ else
+ clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+
+ host->slot[id] = slot;
+ mmc_add_host(mmc);
+
+#if defined(CONFIG_DEBUG_FS)
+ dw_mci_init_debugfs(slot);
+#endif
+
+ /* Card initially undetected */
+ slot->last_detect_state = 0;
+
+ return 0;
+}
+
+static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
+{
+ /* Shutdown detect IRQ */
+ if (slot->host->pdata->exit)
+ slot->host->pdata->exit(id);
+
+ /* Debugfs stuff is cleaned up by mmc core */
+ mmc_remove_host(slot->mmc);
+ slot->host->slot[id] = NULL;
+ mmc_free_host(slot->mmc);
+}
+
+static void dw_mci_init_dma(struct dw_mci *host)
+{
+ /* Alloc memory for sg translation */
+ host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE,
+ &host->sg_dma, GFP_KERNEL);
+ if (!host->sg_cpu) {
+ dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n",
+ __func__);
+ goto no_dma;
+ }
+
+ /* Determine which DMA interface to use */
+#ifdef CONFIG_MMC_DW_IDMAC
+ host->dma_ops = &dw_mci_idmac_ops;
+ dev_info(&host->pdev->dev, "Using internal DMA controller.\n");
+#endif
+
+ if (!host->dma_ops)
+ goto no_dma;
+
+ if (host->dma_ops->init) {
+ if (host->dma_ops->init(host)) {
+ dev_err(&host->pdev->dev, "%s: Unable to initialize "
+ "DMA Controller.\n", __func__);
+ goto no_dma;
+ }
+ } else {
+ dev_err(&host->pdev->dev, "DMA initialization not found.\n");
+ goto no_dma;
+ }
+
+ host->use_dma = 1;
+ return;
+
+no_dma:
+ dev_info(&host->pdev->dev, "Using PIO mode.\n");
+ host->use_dma = 0;
+ return;
+}
+
+static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+ unsigned int ctrl;
+
+ mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
+ SDMMC_CTRL_DMA_RESET));
+
+ /* wait till resets clear */
+ do {
+ ctrl = mci_readl(host, CTRL);
+ if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
+ SDMMC_CTRL_DMA_RESET)))
+ return true;
+ } while (time_before(jiffies, timeout));
+
+ dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
+
+ return false;
+}
+
+static int dw_mci_probe(struct platform_device *pdev)
+{
+ struct dw_mci *host;
+ struct resource *regs;
+ struct dw_mci_board *pdata;
+ int irq, ret, i, width;
+ u32 fifo_size;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->pdev = pdev;
+ host->pdata = pdata = pdev->dev.platform_data;
+ if (!pdata || !pdata->init) {
+ dev_err(&pdev->dev,
+ "Platform data must supply init function\n");
+ ret = -ENODEV;
+ goto err_freehost;
+ }
+
+ if (!pdata->select_slot && pdata->num_slots > 1) {
+ dev_err(&pdev->dev,
+ "Platform data must supply select_slot function\n");
+ ret = -ENODEV;
+ goto err_freehost;
+ }
+
+ if (!pdata->bus_hz) {
+ dev_err(&pdev->dev,
+ "Platform data must supply bus speed\n");
+ ret = -ENODEV;
+ goto err_freehost;
+ }
+
+ host->bus_hz = pdata->bus_hz;
+ host->quirks = pdata->quirks;
+
+ spin_lock_init(&host->lock);
+ INIT_LIST_HEAD(&host->queue);
+
+ ret = -ENOMEM;
+ host->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ if (!host->regs)
+ goto err_freehost;
+
+ host->dma_ops = pdata->dma_ops;
+ dw_mci_init_dma(host);
+
+ /*
+ * Get the host data width - this assumes that HCON has been set with
+ * the correct values.
+ */
+ i = (mci_readl(host, HCON) >> 7) & 0x7;
+ if (!i) {
+ host->push_data = dw_mci_push_data16;
+ host->pull_data = dw_mci_pull_data16;
+ width = 16;
+ host->data_shift = 1;
+ } else if (i == 2) {
+ host->push_data = dw_mci_push_data64;
+ host->pull_data = dw_mci_pull_data64;
+ width = 64;
+ host->data_shift = 3;
+ } else {
+ /* Check for a reserved value, and warn if it is */
+ WARN((i != 1),
+ "HCON reports a reserved host data width!\n"
+ "Defaulting to 32-bit access.\n");
+ host->push_data = dw_mci_push_data32;
+ host->pull_data = dw_mci_pull_data32;
+ width = 32;
+ host->data_shift = 2;
+ }
+
+ /* Reset all blocks */
+ if (!mci_wait_reset(&pdev->dev, host)) {
+ ret = -ENODEV;
+ goto err_dmaunmap;
+ }
+
+ /* Clear the interrupts for the host controller */
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+
+ /* Put in max timeout */
+ mci_writel(host, TMOUT, 0xFFFFFFFF);
+
+ /*
+ * FIFO threshold settings RxMark = fifo_size / 2 - 1,
+ * Tx Mark = fifo_size / 2 DMA Size = 8
+ */
+ fifo_size = mci_readl(host, FIFOTH);
+ fifo_size = (fifo_size >> 16) & 0x7ff;
+ mci_writel(host, FIFOTH, ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
+ ((fifo_size/2) << 0)));
+
+ /* disable clock to CIU */
+ mci_writel(host, CLKENA, 0);
+ mci_writel(host, CLKSRC, 0);
+
+ tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
+ tasklet_init(&host->card_tasklet,
+ dw_mci_tasklet_card, (unsigned long)host);
+
+ ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host);
+ if (ret)
+ goto err_dmaunmap;
+
+ platform_set_drvdata(pdev, host);
+
+ if (host->pdata->num_slots)
+ host->num_slots = host->pdata->num_slots;
+ else
+ host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
+
+ /* We need at least one slot to succeed */
+ for (i = 0; i < host->num_slots; i++) {
+ ret = dw_mci_init_slot(host, i);
+ if (ret) {
+ ret = -ENODEV;
+ goto err_init_slot;
+ }
+ }
+
+ /*
+ * Enable interrupts for command done, data over, data empty, card det,
+ * receive ready and error such as transmit, receive timeout, crc error
+ */
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
+ SDMMC_INT_TXDR | SDMMC_INT_RXDR |
+ DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
+ mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
+
+ dev_info(&pdev->dev, "DW MMC controller at irq %d, "
+ "%d bit host data width\n", irq, width);
+ if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
+ dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
+
+ return 0;
+
+err_init_slot:
+ /* De-init any initialized slots */
+ while (i > 0) {
+ if (host->slot[i])
+ dw_mci_cleanup_slot(host->slot[i], i);
+ i--;
+ }
+ free_irq(irq, host);
+
+err_dmaunmap:
+ if (host->use_dma && host->dma_ops->exit)
+ host->dma_ops->exit(host);
+ dma_free_coherent(&host->pdev->dev, PAGE_SIZE,
+ host->sg_cpu, host->sg_dma);
+ iounmap(host->regs);
+
+err_freehost:
+ kfree(host);
+ return ret;
+}
+
+static int __exit dw_mci_remove(struct platform_device *pdev)
+{
+ struct dw_mci *host = platform_get_drvdata(pdev);
+ int i;
+
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+
+ platform_set_drvdata(pdev, NULL);
+
+ for (i = 0; i < host->num_slots; i++) {
+ dev_dbg(&pdev->dev, "remove slot %d\n", i);
+ if (host->slot[i])
+ dw_mci_cleanup_slot(host->slot[i], i);
+ }
+
+ /* disable clock to CIU */
+ mci_writel(host, CLKENA, 0);
+ mci_writel(host, CLKSRC, 0);
+
+ free_irq(platform_get_irq(pdev, 0), host);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+
+ if (host->use_dma && host->dma_ops->exit)
+ host->dma_ops->exit(host);
+
+ iounmap(host->regs);
+
+ kfree(host);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/*
+ * TODO: we should probably disable the clock to the card in the suspend path.
+ */
+static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ int i, ret;
+ struct dw_mci *host = platform_get_drvdata(pdev);
+
+ for (i = 0; i < host->num_slots; i++) {
+ struct dw_mci_slot *slot = host->slot[i];
+ if (!slot)
+ continue;
+ ret = mmc_suspend_host(slot->mmc);
+ if (ret < 0) {
+ while (--i >= 0) {
+ slot = host->slot[i];
+ if (slot)
+ mmc_resume_host(host->slot[i]->mmc);
+ }
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int dw_mci_resume(struct platform_device *pdev)
+{
+ int i, ret;
+ struct dw_mci *host = platform_get_drvdata(pdev);
+
+ for (i = 0; i < host->num_slots; i++) {
+ struct dw_mci_slot *slot = host->slot[i];
+ if (!slot)
+ continue;
+ ret = mmc_resume_host(host->slot[i]->mmc);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+#else
+#define dw_mci_suspend NULL
+#define dw_mci_resume NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver dw_mci_driver = {
+ .remove = __exit_p(dw_mci_remove),
+ .suspend = dw_mci_suspend,
+ .resume = dw_mci_resume,
+ .driver = {
+ .name = "dw_mmc",
+ },
+};
+
+static int __init dw_mci_init(void)
+{
+ return platform_driver_probe(&dw_mci_driver, dw_mci_probe);
+}
+
+static void __exit dw_mci_exit(void)
+{
+ platform_driver_unregister(&dw_mci_driver);
+}
+
+module_init(dw_mci_init);
+module_exit(dw_mci_exit);
+
+MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
+MODULE_AUTHOR("NXP Semiconductor VietNam");
+MODULE_AUTHOR("Imagination Technologies Ltd");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
new file mode 100644
index 000000000000..5dd55a75233d
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.h
@@ -0,0 +1,168 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ * (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _DW_MMC_H_
+#define _DW_MMC_H_
+
+#define SDMMC_CTRL 0x000
+#define SDMMC_PWREN 0x004
+#define SDMMC_CLKDIV 0x008
+#define SDMMC_CLKSRC 0x00c
+#define SDMMC_CLKENA 0x010
+#define SDMMC_TMOUT 0x014
+#define SDMMC_CTYPE 0x018
+#define SDMMC_BLKSIZ 0x01c
+#define SDMMC_BYTCNT 0x020
+#define SDMMC_INTMASK 0x024
+#define SDMMC_CMDARG 0x028
+#define SDMMC_CMD 0x02c
+#define SDMMC_RESP0 0x030
+#define SDMMC_RESP1 0x034
+#define SDMMC_RESP2 0x038
+#define SDMMC_RESP3 0x03c
+#define SDMMC_MINTSTS 0x040
+#define SDMMC_RINTSTS 0x044
+#define SDMMC_STATUS 0x048
+#define SDMMC_FIFOTH 0x04c
+#define SDMMC_CDETECT 0x050
+#define SDMMC_WRTPRT 0x054
+#define SDMMC_GPIO 0x058
+#define SDMMC_TCBCNT 0x05c
+#define SDMMC_TBBCNT 0x060
+#define SDMMC_DEBNCE 0x064
+#define SDMMC_USRID 0x068
+#define SDMMC_VERID 0x06c
+#define SDMMC_HCON 0x070
+#define SDMMC_BMOD 0x080
+#define SDMMC_PLDMND 0x084
+#define SDMMC_DBADDR 0x088
+#define SDMMC_IDSTS 0x08c
+#define SDMMC_IDINTEN 0x090
+#define SDMMC_DSCADDR 0x094
+#define SDMMC_BUFADDR 0x098
+#define SDMMC_DATA 0x100
+#define SDMMC_DATA_ADR 0x100
+
+/* shift bit field */
+#define _SBF(f, v) ((v) << (f))
+
+/* Control register defines */
+#define SDMMC_CTRL_USE_IDMAC BIT(25)
+#define SDMMC_CTRL_CEATA_INT_EN BIT(11)
+#define SDMMC_CTRL_SEND_AS_CCSD BIT(10)
+#define SDMMC_CTRL_SEND_CCSD BIT(9)
+#define SDMMC_CTRL_ABRT_READ_DATA BIT(8)
+#define SDMMC_CTRL_SEND_IRQ_RESP BIT(7)
+#define SDMMC_CTRL_READ_WAIT BIT(6)
+#define SDMMC_CTRL_DMA_ENABLE BIT(5)
+#define SDMMC_CTRL_INT_ENABLE BIT(4)
+#define SDMMC_CTRL_DMA_RESET BIT(2)
+#define SDMMC_CTRL_FIFO_RESET BIT(1)
+#define SDMMC_CTRL_RESET BIT(0)
+/* Clock Enable register defines */
+#define SDMMC_CLKEN_LOW_PWR BIT(16)
+#define SDMMC_CLKEN_ENABLE BIT(0)
+/* time-out register defines */
+#define SDMMC_TMOUT_DATA(n) _SBF(8, (n))
+#define SDMMC_TMOUT_DATA_MSK 0xFFFFFF00
+#define SDMMC_TMOUT_RESP(n) ((n) & 0xFF)
+#define SDMMC_TMOUT_RESP_MSK 0xFF
+/* card-type register defines */
+#define SDMMC_CTYPE_8BIT BIT(16)
+#define SDMMC_CTYPE_4BIT BIT(0)
+#define SDMMC_CTYPE_1BIT 0
+/* Interrupt status & mask register defines */
+#define SDMMC_INT_SDIO BIT(16)
+#define SDMMC_INT_EBE BIT(15)
+#define SDMMC_INT_ACD BIT(14)
+#define SDMMC_INT_SBE BIT(13)
+#define SDMMC_INT_HLE BIT(12)
+#define SDMMC_INT_FRUN BIT(11)
+#define SDMMC_INT_HTO BIT(10)
+#define SDMMC_INT_DTO BIT(9)
+#define SDMMC_INT_RTO BIT(8)
+#define SDMMC_INT_DCRC BIT(7)
+#define SDMMC_INT_RCRC BIT(6)
+#define SDMMC_INT_RXDR BIT(5)
+#define SDMMC_INT_TXDR BIT(4)
+#define SDMMC_INT_DATA_OVER BIT(3)
+#define SDMMC_INT_CMD_DONE BIT(2)
+#define SDMMC_INT_RESP_ERR BIT(1)
+#define SDMMC_INT_CD BIT(0)
+#define SDMMC_INT_ERROR 0xbfc2
+/* Command register defines */
+#define SDMMC_CMD_START BIT(31)
+#define SDMMC_CMD_CCS_EXP BIT(23)
+#define SDMMC_CMD_CEATA_RD BIT(22)
+#define SDMMC_CMD_UPD_CLK BIT(21)
+#define SDMMC_CMD_INIT BIT(15)
+#define SDMMC_CMD_STOP BIT(14)
+#define SDMMC_CMD_PRV_DAT_WAIT BIT(13)
+#define SDMMC_CMD_SEND_STOP BIT(12)
+#define SDMMC_CMD_STRM_MODE BIT(11)
+#define SDMMC_CMD_DAT_WR BIT(10)
+#define SDMMC_CMD_DAT_EXP BIT(9)
+#define SDMMC_CMD_RESP_CRC BIT(8)
+#define SDMMC_CMD_RESP_LONG BIT(7)
+#define SDMMC_CMD_RESP_EXP BIT(6)
+#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
+/* Status register defines */
+#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF)
+#define SDMMC_FIFO_SZ 32
+/* Internal DMAC interrupt defines */
+#define SDMMC_IDMAC_INT_AI BIT(9)
+#define SDMMC_IDMAC_INT_NI BIT(8)
+#define SDMMC_IDMAC_INT_CES BIT(5)
+#define SDMMC_IDMAC_INT_DU BIT(4)
+#define SDMMC_IDMAC_INT_FBE BIT(2)
+#define SDMMC_IDMAC_INT_RI BIT(1)
+#define SDMMC_IDMAC_INT_TI BIT(0)
+/* Internal DMAC bus mode bits */
+#define SDMMC_IDMAC_ENABLE BIT(7)
+#define SDMMC_IDMAC_FB BIT(1)
+#define SDMMC_IDMAC_SWRESET BIT(0)
+
+/* Register access macros */
+#define mci_readl(dev, reg) \
+ __raw_readl(dev->regs + SDMMC_##reg)
+#define mci_writel(dev, reg, value) \
+ __raw_writel((value), dev->regs + SDMMC_##reg)
+
+/* 16-bit FIFO access macros */
+#define mci_readw(dev, reg) \
+ __raw_readw(dev->regs + SDMMC_##reg)
+#define mci_writew(dev, reg, value) \
+ __raw_writew((value), dev->regs + SDMMC_##reg)
+
+/* 64-bit FIFO access macros */
+#ifdef readq
+#define mci_readq(dev, reg) \
+ __raw_readq(dev->regs + SDMMC_##reg)
+#define mci_writeq(dev, reg, value) \
+ __raw_writeq((value), dev->regs + SDMMC_##reg)
+#else
+/*
+ * Dummy readq implementation for architectures that don't define it.
+ *
+ * We would assume that none of these architectures would configure
+ * the IP block with a 64bit FIFO width, so this code will never be
+ * executed on those machines. Defining these macros here keeps the
+ * rest of the code free from ifdefs.
+ */
+#define mci_readq(dev, reg) \
+ (*(volatile u64 __force *)(dev->regs + SDMMC_##reg))
+#define mci_writeq(dev, reg, value) \
+ (*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value)
+#endif
+
+#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index b3a0ab0e4c2b..74218ad677e4 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -14,6 +14,7 @@
*/
#include <linux/mmc/host.h>
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -827,8 +828,8 @@ static int __devinit jz4740_mmc_probe(struct platform_device* pdev)
}
host->clk = clk_get(&pdev->dev, "mmc");
- if (!host->clk) {
- ret = -ENOENT;
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
dev_err(&pdev->dev, "Failed to get mmc clock\n");
goto err_free_host;
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 563022825667..2d6de3e03e2d 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -14,6 +14,7 @@
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/highmem.h>
@@ -46,10 +47,6 @@ static unsigned int fmax = 515633;
* is asserted (likewise for RX)
* @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
* is asserted (likewise for RX)
- * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware
- * and will not work at all.
- * @broken_blockend_dma: the MCI_DATABLOCKEND is broken on the hardware when
- * using DMA.
* @sdio: variant supports SDIO
* @st_clkdiv: true if using a ST-specific clock divider algorithm
*/
@@ -59,8 +56,6 @@ struct variant_data {
unsigned int datalength_bits;
unsigned int fifosize;
unsigned int fifohalfsize;
- bool broken_blockend;
- bool broken_blockend_dma;
bool sdio;
bool st_clkdiv;
};
@@ -76,7 +71,6 @@ static struct variant_data variant_u300 = {
.fifohalfsize = 8 * 4,
.clkreg_enable = 1 << 13, /* HWFCEN */
.datalength_bits = 16,
- .broken_blockend_dma = true,
.sdio = true,
};
@@ -86,7 +80,6 @@ static struct variant_data variant_ux500 = {
.clkreg = MCI_CLK_ENABLE,
.clkreg_enable = 1 << 14, /* HWFCEN */
.datalength_bits = 24,
- .broken_blockend = true,
.sdio = true,
.st_clkdiv = true,
};
@@ -210,8 +203,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
host->data = data;
host->size = data->blksz * data->blocks;
host->data_xfered = 0;
- host->blockend = false;
- host->dataend = false;
mmci_init_sg(host, data);
@@ -288,21 +279,26 @@ static void
mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
unsigned int status)
{
- struct variant_data *variant = host->variant;
-
/* First check for errors */
if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ u32 remain, success;
+
+ /* Calculate how far we are into the transfer */
+ remain = readl(host->base + MMCIDATACNT);
+ success = data->blksz * data->blocks - remain;
+
dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
- if (status & MCI_DATACRCFAIL)
+ if (status & MCI_DATACRCFAIL) {
+ /* Last block was not successful */
+ host->data_xfered = round_down(success - 1, data->blksz);
data->error = -EILSEQ;
- else if (status & MCI_DATATIMEOUT)
+ } else if (status & MCI_DATATIMEOUT) {
+ host->data_xfered = round_down(success, data->blksz);
data->error = -ETIMEDOUT;
- else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
+ } else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ host->data_xfered = round_down(success, data->blksz);
data->error = -EIO;
-
- /* Force-complete the transaction */
- host->blockend = true;
- host->dataend = true;
+ }
/*
* We hit an error condition. Ensure that any data
@@ -321,61 +317,14 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
}
}
- /*
- * On ARM variants in PIO mode, MCI_DATABLOCKEND
- * is always sent first, and we increase the
- * transfered number of bytes for that IRQ. Then
- * MCI_DATAEND follows and we conclude the transaction.
- *
- * On the Ux500 single-IRQ variant MCI_DATABLOCKEND
- * doesn't seem to immediately clear from the status,
- * so we can't use it keep count when only one irq is
- * used because the irq will hit for other reasons, and
- * then the flag is still up. So we use the MCI_DATAEND
- * IRQ at the end of the entire transfer because
- * MCI_DATABLOCKEND is broken.
- *
- * In the U300, the IRQs can arrive out-of-order,
- * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND,
- * so for this case we use the flags "blockend" and
- * "dataend" to make sure both IRQs have arrived before
- * concluding the transaction. (This does not apply
- * to the Ux500 which doesn't fire MCI_DATABLOCKEND
- * at all.) In DMA mode it suffers from the same problem
- * as the Ux500.
- */
- if (status & MCI_DATABLOCKEND) {
- /*
- * Just being a little over-cautious, we do not
- * use this progressive update if the hardware blockend
- * flag is unreliable: since it can stay high between
- * IRQs it will corrupt the transfer counter.
- */
- if (!variant->broken_blockend)
- host->data_xfered += data->blksz;
- host->blockend = true;
- }
-
- if (status & MCI_DATAEND)
- host->dataend = true;
+ if (status & MCI_DATABLOCKEND)
+ dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
- /*
- * On variants with broken blockend we shall only wait for dataend,
- * on others we must sync with the blockend signal since they can
- * appear out-of-order.
- */
- if (host->dataend && (host->blockend || variant->broken_blockend)) {
+ if (status & MCI_DATAEND || data->error) {
mmci_stop_data(host);
- /* Reset these flags */
- host->blockend = false;
- host->dataend = false;
-
- /*
- * Variants with broken blockend flags need to handle the
- * end of the entire transfer here.
- */
- if (variant->broken_blockend && !data->error)
+ if (!data->error)
+ /* The error clause is handled above, success! */
host->data_xfered += data->blksz * data->blocks;
if (!data->stop) {
@@ -394,15 +343,15 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
host->cmd = NULL;
- cmd->resp[0] = readl(base + MMCIRESPONSE0);
- cmd->resp[1] = readl(base + MMCIRESPONSE1);
- cmd->resp[2] = readl(base + MMCIRESPONSE2);
- cmd->resp[3] = readl(base + MMCIRESPONSE3);
-
if (status & MCI_CMDTIMEOUT) {
cmd->error = -ETIMEDOUT;
} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
cmd->error = -EILSEQ;
+ } else {
+ cmd->resp[0] = readl(base + MMCIRESPONSE0);
+ cmd->resp[1] = readl(base + MMCIRESPONSE1);
+ cmd->resp[2] = readl(base + MMCIRESPONSE2);
+ cmd->resp[3] = readl(base + MMCIRESPONSE3);
}
if (!cmd->data || cmd->error) {
@@ -770,7 +719,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
struct variant_data *variant = id->data;
struct mmci_host *host;
struct mmc_host *mmc;
- unsigned int mask;
int ret;
/* must have platform data */
@@ -951,12 +899,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
goto irq0_free;
}
- mask = MCI_IRQENABLE;
- /* Don't use the datablockend flag if it's broken */
- if (variant->broken_blockend)
- mask &= ~MCI_DATABLOCKEND;
-
- writel(mask, host->base + MMCIMASK0);
+ writel(MCI_IRQENABLE, host->base + MMCIMASK0);
amba_set_drvdata(dev, mmc);
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index df06f01aac89..c1df7b82d36c 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -137,7 +137,7 @@
#define MCI_IRQENABLE \
(MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
- MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK)
+ MCI_CMDRESPENDMASK|MCI_CMDSENTMASK)
/* These interrupts are directed to IRQ1 when two IRQ lines are available */
#define MCI_IRQ1MASK \
@@ -177,9 +177,6 @@ struct mmci_host {
struct timer_list timer;
unsigned int oldstat;
- bool blockend;
- bool dataend;
-
/* pio stuff */
struct sg_mapping_iter sg_miter;
unsigned int size;
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 5decfd0bd61d..153ab977a013 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -383,14 +383,30 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
host->curr.user_pages = 0;
box = &nc->cmd[0];
- for (i = 0; i < host->dma.num_ents; i++) {
- box->cmd = CMD_MODE_BOX;
- /* Initialize sg dma address */
- sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg))
- + sg->offset;
+ /* location of command block must be 64 bit aligned */
+ BUG_ON(host->dma.cmd_busaddr & 0x07);
- if (i == (host->dma.num_ents - 1))
+ nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
+ host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
+ DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
+ host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
+
+ n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
+ host->dma.num_ents, host->dma.dir);
+ if (n == 0) {
+ printk(KERN_ERR "%s: Unable to map in all sg elements\n",
+ mmc_hostname(host->mmc));
+ host->dma.sg = NULL;
+ host->dma.num_ents = 0;
+ return -ENOMEM;
+ }
+
+ for_each_sg(host->dma.sg, sg, n, i) {
+
+ box->cmd = CMD_MODE_BOX;
+
+ if (i == n - 1)
box->cmd |= CMD_LC;
rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
(sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
@@ -418,27 +434,6 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
box->cmd |= CMD_DST_CRCI(crci);
}
box++;
- sg++;
- }
-
- /* location of command block must be 64 bit aligned */
- BUG_ON(host->dma.cmd_busaddr & 0x07);
-
- nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
- host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
- DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
- host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
-
- n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
- host->dma.num_ents, host->dma.dir);
-/* dsb inside dma_map_sg will write nc out to mem as well */
-
- if (n != host->dma.num_ents) {
- printk(KERN_ERR "%s: Unable to map in all sg elements\n",
- mmc_hostname(host->mmc));
- host->dma.sg = NULL;
- host->dma.num_ents = 0;
- return -ENOMEM;
}
return 0;
@@ -1331,9 +1326,6 @@ msmsdcc_probe(struct platform_device *pdev)
if (host->timer.function)
pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
-#if BUSCLK_PWRSAVE
- msmsdcc_disable_clocks(host, 1);
-#endif
return 0;
cmd_irq_free:
free_irq(cmd_irqres->start, host);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index bdd2cbb87cba..4428594261c5 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -31,6 +31,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
#include <asm/dma.h>
#include <asm/irq.h>
@@ -141,10 +142,49 @@ struct mxcmci_host {
struct work_struct datawork;
spinlock_t lock;
+
+ struct regulator *vcc;
};
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
+static inline void mxcmci_init_ocr(struct mxcmci_host *host)
+{
+ host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
+
+ if (IS_ERR(host->vcc)) {
+ host->vcc = NULL;
+ } else {
+ host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
+ if (host->pdata && host->pdata->ocr_avail)
+ dev_warn(mmc_dev(host->mmc),
+ "pdata->ocr_avail will not be used\n");
+ }
+
+ if (host->vcc == NULL) {
+ /* fall-back to platform data */
+ if (host->pdata && host->pdata->ocr_avail)
+ host->mmc->ocr_avail = host->pdata->ocr_avail;
+ else
+ host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ }
+}
+
+static inline void mxcmci_set_power(struct mxcmci_host *host,
+ unsigned char power_mode,
+ unsigned int vdd)
+{
+ if (host->vcc) {
+ if (power_mode == MMC_POWER_UP)
+ mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
+ else if (power_mode == MMC_POWER_OFF)
+ mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
+ }
+
+ if (host->pdata && host->pdata->setpower)
+ host->pdata->setpower(mmc_dev(host->mmc), vdd);
+}
+
static inline int mxcmci_use_dma(struct mxcmci_host *host)
{
return host->do_dma;
@@ -680,9 +720,9 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
if (host->power_mode != ios->power_mode) {
- if (host->pdata && host->pdata->setpower)
- host->pdata->setpower(mmc_dev(mmc), ios->vdd);
+ mxcmci_set_power(host, ios->power_mode, ios->vdd);
host->power_mode = ios->power_mode;
+
if (ios->power_mode == MMC_POWER_ON)
host->cmdat |= CMD_DAT_CONT_INIT;
}
@@ -807,10 +847,7 @@ static int mxcmci_probe(struct platform_device *pdev)
host->pdata = pdev->dev.platform_data;
spin_lock_init(&host->lock);
- if (host->pdata && host->pdata->ocr_avail)
- mmc->ocr_avail = host->pdata->ocr_avail;
- else
- mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mxcmci_init_ocr(host);
if (host->pdata && host->pdata->dat3_card_detect)
host->default_irq_mask =
@@ -915,6 +952,9 @@ static int mxcmci_remove(struct platform_device *pdev)
mmc_remove_host(mmc);
+ if (host->vcc)
+ regulator_put(host->vcc);
+
if (host->pdata && host->pdata->exit)
host->pdata->exit(&pdev->dev, mmc);
@@ -927,7 +967,6 @@ static int mxcmci_remove(struct platform_device *pdev)
clk_put(host->clk);
release_mem_region(host->res->start, resource_size(host->res));
- release_resource(host->res);
mmc_free_host(mmc);
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
new file mode 100644
index 000000000000..2aeef4ffed8c
--- /dev/null
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -0,0 +1,70 @@
+/*
+ * sdhci-dove.c Support for SDHCI on Marvell's Dove SoC
+ *
+ * Author: Saeed Bishara <saeed@marvell.com>
+ * Mike Rapoport <mike@compulab.co.il>
+ * Based on sdhci-cns3xxx.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
+{
+ u16 ret;
+
+ switch (reg) {
+ case SDHCI_HOST_VERSION:
+ case SDHCI_SLOT_INT_STATUS:
+ /* those registers don't exist */
+ return 0;
+ default:
+ ret = readw(host->ioaddr + reg);
+ }
+ return ret;
+}
+
+static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
+{
+ u32 ret;
+
+ switch (reg) {
+ case SDHCI_CAPABILITIES:
+ ret = readl(host->ioaddr + reg);
+ /* Mask the support for 3.0V */
+ ret &= ~SDHCI_CAN_VDD_300;
+ break;
+ default:
+ ret = readl(host->ioaddr + reg);
+ }
+ return ret;
+}
+
+static struct sdhci_ops sdhci_dove_ops = {
+ .read_w = sdhci_dove_readw,
+ .read_l = sdhci_dove_readl,
+};
+
+struct sdhci_pltfm_data sdhci_dove_pdata = {
+ .ops = &sdhci_dove_ops,
+ .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
+ SDHCI_QUIRK_NO_BUSY_IRQ |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_FORCE_DMA,
+};
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index c51b71174c1d..dd84124f4209 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -13,6 +13,7 @@
* your option) any later version.
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -20,8 +21,12 @@
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/mmc/host.h>
+#ifdef CONFIG_PPC
#include <asm/machdep.h>
+#endif
#include "sdhci-of.h"
#include "sdhci.h"
@@ -112,7 +117,11 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
return true;
/* Old device trees don't have the wp-inverted property. */
+#ifdef CONFIG_PPC
return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
+#else
+ return false;
+#endif
}
static int __devinit sdhci_of_probe(struct platform_device *ofdev,
@@ -122,7 +131,7 @@ static int __devinit sdhci_of_probe(struct platform_device *ofdev,
struct sdhci_of_data *sdhci_of_data = match->data;
struct sdhci_host *host;
struct sdhci_of_host *of_host;
- const u32 *clk;
+ const __be32 *clk;
int size;
int ret;
@@ -166,7 +175,7 @@ static int __devinit sdhci_of_probe(struct platform_device *ofdev,
clk = of_get_property(np, "clock-frequency", &size);
if (clk && size == sizeof(*clk) && *clk)
- of_host->clock = *clk;
+ of_host->clock = be32_to_cpup(clk);
ret = sdhci_add_host(host);
if (ret)
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 3d9c2460d437..0dc905b20eee 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -176,6 +176,74 @@ static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
};
+/* O2Micro extra registers */
+#define O2_SD_LOCK_WP 0xD3
+#define O2_SD_MULTI_VCC3V 0xEE
+#define O2_SD_CLKREQ 0xEC
+#define O2_SD_CAPS 0xE0
+#define O2_SD_ADMA1 0xE2
+#define O2_SD_ADMA2 0xE7
+#define O2_SD_INF_MOD 0xF1
+
+static int o2_probe(struct sdhci_pci_chip *chip)
+{
+ int ret;
+ u8 scratch;
+
+ switch (chip->pdev->device) {
+ case PCI_DEVICE_ID_O2_8220:
+ case PCI_DEVICE_ID_O2_8221:
+ case PCI_DEVICE_ID_O2_8320:
+ case PCI_DEVICE_ID_O2_8321:
+ /* This extra setup is required due to broken ADMA. */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+ /* Set Multi 3 to VCC3V# */
+ pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
+
+ /* Disable CLK_REQ# support after media DET */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x20;
+ pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
+
+ /* Choose capabilities, enable SDMA. We have to write 0x01
+ * to the capabilities register first to unlock it.
+ */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x01;
+ pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
+ pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
+
+ /* Disable ADMA1/2 */
+ pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
+ pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
+
+ /* Disable the infinite transfer mode */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x08;
+ pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
+
+ /* Lock WP */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ }
+
+ return 0;
+}
+
static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
{
u8 scratch;
@@ -204,6 +272,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
static int jmicron_probe(struct sdhci_pci_chip *chip)
{
int ret;
+ u16 mmcdev = 0;
if (chip->pdev->revision == 0) {
chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
@@ -225,12 +294,17 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)
* 2. The MMC interface has a lower subfunction number
* than the SD interface.
*/
- if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) {
+ if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
+ mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
+ else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
+ mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
+
+ if (mmcdev) {
struct pci_dev *sd_dev;
sd_dev = NULL;
while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
- PCI_DEVICE_ID_JMICRON_JMB38X_MMC, sd_dev)) != NULL) {
+ mmcdev, sd_dev)) != NULL) {
if ((PCI_SLOT(chip->pdev->devfn) ==
PCI_SLOT(sd_dev->devfn)) &&
(chip->pdev->bus == sd_dev->bus))
@@ -290,13 +364,25 @@ static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
}
+ /* JM388 MMC doesn't support 1.8V while SD supports it */
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
+ slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
+ MMC_VDD_29_30 | MMC_VDD_30_31 |
+ MMC_VDD_165_195; /* allow 1.8V */
+ slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
+ MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
+ }
+
/*
* The secondary interface requires a bit set to get the
* interrupts.
*/
- if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC)
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
jmicron_enable_mmc(slot->host, 1);
+ slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
+
return 0;
}
@@ -305,7 +391,8 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
if (dead)
return;
- if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC)
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
jmicron_enable_mmc(slot->host, 0);
}
@@ -313,7 +400,8 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
{
int i;
- if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) {
+ if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+ chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
for (i = 0;i < chip->num_slots;i++)
jmicron_enable_mmc(chip->slots[i]->host, 0);
}
@@ -325,7 +413,8 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
{
int ret, i;
- if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) {
+ if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+ chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
for (i = 0;i < chip->num_slots;i++)
jmicron_enable_mmc(chip->slots[i]->host, 1);
}
@@ -339,6 +428,10 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
return 0;
}
+static const struct sdhci_pci_fixes sdhci_o2 = {
+ .probe = o2_probe,
+};
+
static const struct sdhci_pci_fixes sdhci_jmicron = {
.probe = jmicron_probe,
@@ -510,6 +603,22 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
},
{
+ .vendor = PCI_VENDOR_ID_JMICRON,
+ .device = PCI_DEVICE_ID_JMICRON_JMB388_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_jmicron,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_JMICRON,
+ .device = PCI_DEVICE_ID_JMICRON_JMB388_ESD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_jmicron,
+ },
+
+ {
.vendor = PCI_VENDOR_ID_SYSKONNECT,
.device = 0x8000,
.subvendor = PCI_ANY_ID,
@@ -589,6 +698,46 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
},
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_8120,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_8220,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_8221,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_8320,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_8321,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
{ /* Generic SD host controller */
PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
},
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 0502f89f662b..dbab0407f4b6 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -170,6 +170,12 @@ static const struct platform_device_id sdhci_pltfm_ids[] = {
#ifdef CONFIG_MMC_SDHCI_ESDHC_IMX
{ "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata },
#endif
+#ifdef CONFIG_MMC_SDHCI_DOVE
+ { "sdhci-dove", (kernel_ulong_t)&sdhci_dove_pdata },
+#endif
+#ifdef CONFIG_MMC_SDHCI_TEGRA
+ { "sdhci-tegra", (kernel_ulong_t)&sdhci_tegra_pdata },
+#endif
{ },
};
MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids);
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index c1bfe48af56a..ea2e44d9be5e 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -22,5 +22,7 @@ struct sdhci_pltfm_host {
extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata;
+extern struct sdhci_pltfm_data sdhci_dove_pdata;
+extern struct sdhci_pltfm_data sdhci_tegra_pdata;
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index aacb862ecc8a..5309ab95aada 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -130,6 +130,15 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
if (!clksrc)
return UINT_MAX;
+ /*
+ * Clock divider's step is different as 1 from that of host controller
+ * when 'clk_type' is S3C_SDHCI_CLK_DIV_EXTERNAL.
+ */
+ if (ourhost->pdata->clk_type) {
+ rate = clk_round_rate(clksrc, wanted);
+ return wanted - rate;
+ }
+
rate = clk_get_rate(clksrc);
for (div = 1; div < 256; div *= 2) {
@@ -232,10 +241,79 @@ static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host)
return min;
}
+/* sdhci_cmu_get_max_clk - callback to get maximum clock frequency.*/
+static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+
+ return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], UINT_MAX);
+}
+
+/* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */
+static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+
+ /*
+ * initial clock can be in the frequency range of
+ * 100KHz-400KHz, so we set it as max value.
+ */
+ return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], 400000);
+}
+
+/* sdhci_cmu_set_clock - callback on clock change.*/
+static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+
+ /* don't bother if the clock is going off */
+ if (clock == 0)
+ return;
+
+ sdhci_s3c_set_clock(host, clock);
+
+ clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
+
+ host->clock = clock;
+}
+
+/**
+ * sdhci_s3c_platform_8bit_width - support 8bit buswidth
+ * @host: The SDHCI host being queried
+ * @width: MMC_BUS_WIDTH_ macro for the bus width being requested
+ *
+ * We have 8-bit width support but is not a v3 controller.
+ * So we add platform_8bit_width() and support 8bit width.
+ */
+static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
+{
+ u8 ctrl;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+ switch (width) {
+ case MMC_BUS_WIDTH_8:
+ ctrl |= SDHCI_CTRL_8BITBUS;
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ break;
+ case MMC_BUS_WIDTH_4:
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ break;
+ default:
+ break;
+ }
+
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_s3c_ops = {
.get_max_clock = sdhci_s3c_get_max_clk,
.set_clock = sdhci_s3c_set_clock,
.get_min_clock = sdhci_s3c_get_min_clock,
+ .platform_8bit_width = sdhci_s3c_platform_8bit_width,
};
static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
@@ -361,6 +439,13 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
clks++;
sc->clk_bus[ptr] = clk;
+
+ /*
+ * save current clock index to know which clock bus
+ * is used later in overriding functions.
+ */
+ sc->cur_clk = ptr;
+
clk_enable(clk);
dev_info(dev, "clock source %d: %s (%ld Hz)\n",
@@ -421,12 +506,29 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
host->mmc->caps = MMC_CAP_NONREMOVABLE;
+ if (pdata->host_caps)
+ host->mmc->caps |= pdata->host_caps;
+
host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_32BIT_DMA_SIZE);
/* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
+ /*
+ * If controller does not have internal clock divider,
+ * we can use overriding functions instead of default.
+ */
+ if (pdata->clk_type) {
+ sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
+ sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
+ sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
+ }
+
+ /* It supports additional host capabilities if needed */
+ if (pdata->host_caps)
+ host->mmc->caps |= pdata->host_caps;
+
ret = sdhci_add_host(host);
if (ret) {
dev_err(dev, "sdhci_add_host() failed\n");
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
new file mode 100644
index 000000000000..4823ee94a63f
--- /dev/null
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include <mach/gpio.h>
+#include <mach/sdhci.h>
+
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
+{
+ u32 val;
+
+ if (unlikely(reg == SDHCI_PRESENT_STATE)) {
+ /* Use wp_gpio here instead? */
+ val = readl(host->ioaddr + reg);
+ return val | SDHCI_WRITE_PROTECT;
+ }
+
+ return readl(host->ioaddr + reg);
+}
+
+static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
+{
+ if (unlikely(reg == SDHCI_HOST_VERSION)) {
+ /* Erratum: Version register is invalid in HW. */
+ return SDHCI_SPEC_200;
+ }
+
+ return readw(host->ioaddr + reg);
+}
+
+static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ /* Seems like we're getting spurious timeout and crc errors, so
+ * disable signalling of them. In case of real errors software
+ * timers should take care of eventually detecting them.
+ */
+ if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
+ val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
+
+ writel(val, host->ioaddr + reg);
+
+ if (unlikely(reg == SDHCI_INT_ENABLE)) {
+ /* Erratum: Must enable block gap interrupt detection */
+ u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
+ if (val & SDHCI_INT_CARD_INT)
+ gap_ctrl |= 0x8;
+ else
+ gap_ctrl &= ~0x8;
+ writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
+ }
+}
+
+static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
+ struct tegra_sdhci_platform_data *plat;
+
+ plat = pdev->dev.platform_data;
+
+ if (!gpio_is_valid(plat->wp_gpio))
+ return -1;
+
+ return gpio_get_value(plat->wp_gpio);
+}
+
+static irqreturn_t carddetect_irq(int irq, void *data)
+{
+ struct sdhci_host *sdhost = (struct sdhci_host *)data;
+
+ tasklet_schedule(&sdhost->card_tasklet);
+ return IRQ_HANDLED;
+};
+
+static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct tegra_sdhci_platform_data *plat;
+ u32 ctrl;
+
+ plat = pdev->dev.platform_data;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ ctrl |= SDHCI_CTRL_8BITBUS;
+ } else {
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ if (bus_width == MMC_BUS_WIDTH_4)
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ else
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ }
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ return 0;
+}
+
+
+static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
+ struct sdhci_pltfm_data *pdata)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct tegra_sdhci_platform_data *plat;
+ struct clk *clk;
+ int rc;
+
+ plat = pdev->dev.platform_data;
+ if (plat == NULL) {
+ dev_err(mmc_dev(host->mmc), "missing platform data\n");
+ return -ENXIO;
+ }
+
+ if (gpio_is_valid(plat->power_gpio)) {
+ rc = gpio_request(plat->power_gpio, "sdhci_power");
+ if (rc) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to allocate power gpio\n");
+ goto out;
+ }
+ tegra_gpio_enable(plat->power_gpio);
+ gpio_direction_output(plat->power_gpio, 1);
+ }
+
+ if (gpio_is_valid(plat->cd_gpio)) {
+ rc = gpio_request(plat->cd_gpio, "sdhci_cd");
+ if (rc) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to allocate cd gpio\n");
+ goto out_power;
+ }
+ tegra_gpio_enable(plat->cd_gpio);
+ gpio_direction_input(plat->cd_gpio);
+
+ rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ mmc_hostname(host->mmc), host);
+
+ if (rc) {
+ dev_err(mmc_dev(host->mmc), "request irq error\n");
+ goto out_cd;
+ }
+
+ }
+
+ if (gpio_is_valid(plat->wp_gpio)) {
+ rc = gpio_request(plat->wp_gpio, "sdhci_wp");
+ if (rc) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to allocate wp gpio\n");
+ goto out_cd;
+ }
+ tegra_gpio_enable(plat->wp_gpio);
+ gpio_direction_input(plat->wp_gpio);
+ }
+
+ clk = clk_get(mmc_dev(host->mmc), NULL);
+ if (IS_ERR(clk)) {
+ dev_err(mmc_dev(host->mmc), "clk err\n");
+ rc = PTR_ERR(clk);
+ goto out_wp;
+ }
+ clk_enable(clk);
+ pltfm_host->clk = clk;
+
+ if (plat->is_8bit)
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+
+ return 0;
+
+out_wp:
+ if (gpio_is_valid(plat->wp_gpio)) {
+ tegra_gpio_disable(plat->wp_gpio);
+ gpio_free(plat->wp_gpio);
+ }
+
+out_cd:
+ if (gpio_is_valid(plat->cd_gpio)) {
+ tegra_gpio_disable(plat->cd_gpio);
+ gpio_free(plat->cd_gpio);
+ }
+
+out_power:
+ if (gpio_is_valid(plat->power_gpio)) {
+ tegra_gpio_disable(plat->power_gpio);
+ gpio_free(plat->power_gpio);
+ }
+
+out:
+ return rc;
+}
+
+static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct tegra_sdhci_platform_data *plat;
+
+ plat = pdev->dev.platform_data;
+
+ if (gpio_is_valid(plat->wp_gpio)) {
+ tegra_gpio_disable(plat->wp_gpio);
+ gpio_free(plat->wp_gpio);
+ }
+
+ if (gpio_is_valid(plat->cd_gpio)) {
+ tegra_gpio_disable(plat->cd_gpio);
+ gpio_free(plat->cd_gpio);
+ }
+
+ if (gpio_is_valid(plat->power_gpio)) {
+ tegra_gpio_disable(plat->power_gpio);
+ gpio_free(plat->power_gpio);
+ }
+
+ clk_disable(pltfm_host->clk);
+ clk_put(pltfm_host->clk);
+}
+
+static struct sdhci_ops tegra_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
+ .read_l = tegra_sdhci_readl,
+ .read_w = tegra_sdhci_readw,
+ .write_l = tegra_sdhci_writel,
+ .platform_8bit_width = tegra_sdhci_8bit,
+};
+
+struct sdhci_pltfm_data sdhci_tegra_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
+ .ops = &tegra_sdhci_ops,
+ .init = tegra_sdhci_pltfm_init,
+ .exit = tegra_sdhci_pltfm_exit,
+};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a25db426c910..9e15f41f87be 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -23,6 +23,7 @@
#include <linux/leds.h>
+#include <linux/mmc/mmc.h>
#include <linux/mmc/host.h>
#include "sdhci.h"
@@ -77,8 +78,11 @@ static void sdhci_dumpregs(struct sdhci_host *host)
printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
sdhci_readw(host, SDHCI_ACMD12_ERR),
sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
- printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n",
+ printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
sdhci_readl(host, SDHCI_CAPABILITIES),
+ sdhci_readl(host, SDHCI_CAPABILITIES_1));
+ printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
+ sdhci_readw(host, SDHCI_COMMAND),
sdhci_readl(host, SDHCI_MAX_CURRENT));
if (host->flags & SDHCI_USE_ADMA)
@@ -1518,7 +1522,11 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
if (intmask & SDHCI_INT_DATA_TIMEOUT)
host->data->error = -ETIMEDOUT;
- else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
+ else if (intmask & SDHCI_INT_DATA_END_BIT)
+ host->data->error = -EILSEQ;
+ else if ((intmask & SDHCI_INT_DATA_CRC) &&
+ SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
+ != MMC_BUS_TEST_R)
host->data->error = -EILSEQ;
else if (intmask & SDHCI_INT_ADMA_ERROR) {
printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
@@ -1736,7 +1744,7 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host);
int sdhci_add_host(struct sdhci_host *host)
{
struct mmc_host *mmc;
- unsigned int caps;
+ unsigned int caps, ocr_avail;
int ret;
WARN_ON(host == NULL);
@@ -1890,13 +1898,26 @@ int sdhci_add_host(struct sdhci_host *host)
mmc_card_is_removable(mmc))
mmc->caps |= MMC_CAP_NEEDS_POLL;
- mmc->ocr_avail = 0;
+ ocr_avail = 0;
if (caps & SDHCI_CAN_VDD_330)
- mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
+ ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
if (caps & SDHCI_CAN_VDD_300)
- mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
+ ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
if (caps & SDHCI_CAN_VDD_180)
- mmc->ocr_avail |= MMC_VDD_165_195;
+ ocr_avail |= MMC_VDD_165_195;
+
+ mmc->ocr_avail = ocr_avail;
+ mmc->ocr_avail_sdio = ocr_avail;
+ if (host->ocr_avail_sdio)
+ mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
+ mmc->ocr_avail_sd = ocr_avail;
+ if (host->ocr_avail_sd)
+ mmc->ocr_avail_sd &= host->ocr_avail_sd;
+ else /* normal SD controllers don't support 1.8V */
+ mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
+ mmc->ocr_avail_mmc = ocr_avail;
+ if (host->ocr_avail_mmc)
+ mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
if (mmc->ocr_avail == 0) {
printk(KERN_ERR "%s: Hardware doesn't report any "
@@ -1928,10 +1949,14 @@ int sdhci_add_host(struct sdhci_host *host)
* of bytes. When doing hardware scatter/gather, each entry cannot
* be larger than 64 KiB though.
*/
- if (host->flags & SDHCI_USE_ADMA)
- mmc->max_seg_size = 65536;
- else
+ if (host->flags & SDHCI_USE_ADMA) {
+ if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
+ mmc->max_seg_size = 65535;
+ else
+ mmc->max_seg_size = 65536;
+ } else {
mmc->max_seg_size = mmc->max_req_size;
+ }
/*
* Maximum block size. This varies from controller to controller and
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index e42d7f00c060..6e0969e40650 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -52,6 +52,7 @@
#define SDHCI_CMD_RESP_SHORT_BUSY 0x03
#define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff))
+#define SDHCI_GET_CMD(c) ((c>>8) & 0x3f)
#define SDHCI_RESPONSE 0x10
@@ -165,7 +166,7 @@
#define SDHCI_CAN_VDD_180 0x04000000
#define SDHCI_CAN_64BIT 0x10000000
-/* 44-47 reserved for more caps */
+#define SDHCI_CAPABILITIES_1 0x44
#define SDHCI_MAX_CURRENT 0x48
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index f472c2714eb8..bbc298fd2a15 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -446,7 +446,7 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
mmc->max_seg_size = 1024 * 512;
mmc->max_blk_size = 512;
- /* reset the controler */
+ /* reset the controller */
if (sdricoh_reset(host)) {
dev_dbg(dev, "could not reset\n");
result = -EIO;
@@ -478,7 +478,7 @@ static int sdricoh_pcmcia_probe(struct pcmcia_device *pcmcia_dev)
dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device"
" %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]);
- /* search pci cardbus bridge that contains the mmc controler */
+ /* search pci cardbus bridge that contains the mmc controller */
/* the io region is already claimed by yenta_socket... */
while ((pci_dev =
pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476,
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index e7765a89593e..e3c6ef208391 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -25,16 +25,261 @@
* double buffer support
*
*/
-#include <linux/module.h>
-#include <linux/irq.h>
-#include <linux/device.h>
+
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/dmaengine.h>
-#include <linux/mmc/host.h>
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+
+#define CTL_SD_CMD 0x00
+#define CTL_ARG_REG 0x04
+#define CTL_STOP_INTERNAL_ACTION 0x08
+#define CTL_XFER_BLK_COUNT 0xa
+#define CTL_RESPONSE 0x0c
+#define CTL_STATUS 0x1c
+#define CTL_IRQ_MASK 0x20
+#define CTL_SD_CARD_CLK_CTL 0x24
+#define CTL_SD_XFER_LEN 0x26
+#define CTL_SD_MEM_CARD_OPT 0x28
+#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
+#define CTL_SD_DATA_PORT 0x30
+#define CTL_TRANSACTION_CTL 0x34
+#define CTL_SDIO_STATUS 0x36
+#define CTL_SDIO_IRQ_MASK 0x38
+#define CTL_RESET_SD 0xe0
+#define CTL_SDIO_REGS 0x100
+#define CTL_CLK_AND_WAIT_CTL 0x138
+#define CTL_RESET_SDIO 0x1e0
+
+/* Definitions for values the CTRL_STATUS register can take. */
+#define TMIO_STAT_CMDRESPEND 0x00000001
+#define TMIO_STAT_DATAEND 0x00000004
+#define TMIO_STAT_CARD_REMOVE 0x00000008
+#define TMIO_STAT_CARD_INSERT 0x00000010
+#define TMIO_STAT_SIGSTATE 0x00000020
+#define TMIO_STAT_WRPROTECT 0x00000080
+#define TMIO_STAT_CARD_REMOVE_A 0x00000100
+#define TMIO_STAT_CARD_INSERT_A 0x00000200
+#define TMIO_STAT_SIGSTATE_A 0x00000400
+#define TMIO_STAT_CMD_IDX_ERR 0x00010000
+#define TMIO_STAT_CRCFAIL 0x00020000
+#define TMIO_STAT_STOPBIT_ERR 0x00040000
+#define TMIO_STAT_DATATIMEOUT 0x00080000
+#define TMIO_STAT_RXOVERFLOW 0x00100000
+#define TMIO_STAT_TXUNDERRUN 0x00200000
+#define TMIO_STAT_CMDTIMEOUT 0x00400000
+#define TMIO_STAT_RXRDY 0x01000000
+#define TMIO_STAT_TXRQ 0x02000000
+#define TMIO_STAT_ILL_FUNC 0x20000000
+#define TMIO_STAT_CMD_BUSY 0x40000000
+#define TMIO_STAT_ILL_ACCESS 0x80000000
+
+/* Definitions for values the CTRL_SDIO_STATUS register can take. */
+#define TMIO_SDIO_STAT_IOIRQ 0x0001
+#define TMIO_SDIO_STAT_EXPUB52 0x4000
+#define TMIO_SDIO_STAT_EXWT 0x8000
+#define TMIO_SDIO_MASK_ALL 0xc007
+
+/* Define some IRQ masks */
+/* This is the mask used at reset by the chip */
+#define TMIO_MASK_ALL 0x837f031d
+#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
+#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
+#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
+ TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
+#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
+
+#define enable_mmc_irqs(host, i) \
+ do { \
+ u32 mask;\
+ mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
+ mask &= ~((i) & TMIO_MASK_IRQ); \
+ sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
+ } while (0)
+
+#define disable_mmc_irqs(host, i) \
+ do { \
+ u32 mask;\
+ mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
+ mask |= ((i) & TMIO_MASK_IRQ); \
+ sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
+ } while (0)
+
+#define ack_mmc_irqs(host, i) \
+ do { \
+ sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
+ } while (0)
+
+/* This is arbitrary, just noone needed any higher alignment yet */
+#define MAX_ALIGN 4
+
+struct tmio_mmc_host {
+ void __iomem *ctl;
+ unsigned long bus_shift;
+ struct mmc_command *cmd;
+ struct mmc_request *mrq;
+ struct mmc_data *data;
+ struct mmc_host *mmc;
+ int irq;
+ unsigned int sdio_irq_enabled;
+
+ /* Callbacks for clock / power control */
+ void (*set_pwr)(struct platform_device *host, int state);
+ void (*set_clk_div)(struct platform_device *host, int state);
+
+ /* pio related stuff */
+ struct scatterlist *sg_ptr;
+ struct scatterlist *sg_orig;
+ unsigned int sg_len;
+ unsigned int sg_off;
+
+ struct platform_device *pdev;
+
+ /* DMA support */
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ struct tasklet_struct dma_complete;
+ struct tasklet_struct dma_issue;
+#ifdef CONFIG_TMIO_MMC_DMA
+ unsigned int dma_sglen;
+ u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN)));
+ struct scatterlist bounce_sg;
+#endif
+
+ /* Track lost interrupts */
+ struct delayed_work delayed_reset_work;
+ spinlock_t lock;
+ unsigned long last_req_ts;
+};
+
+static void tmio_check_bounce_buffer(struct tmio_mmc_host *host);
+
+static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
+{
+ return readw(host->ctl + (addr << host->bus_shift));
+}
+
+static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
+ u16 *buf, int count)
+{
+ readsw(host->ctl + (addr << host->bus_shift), buf, count);
+}
-#include "tmio_mmc.h"
+static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
+{
+ return readw(host->ctl + (addr << host->bus_shift)) |
+ readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+}
+
+static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
+{
+ writew(val, host->ctl + (addr << host->bus_shift));
+}
+
+static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
+ u16 *buf, int count)
+{
+ writesw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+{
+ writew(val, host->ctl + (addr << host->bus_shift));
+ writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+}
+
+static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
+{
+ host->sg_len = data->sg_len;
+ host->sg_ptr = data->sg;
+ host->sg_orig = data->sg;
+ host->sg_off = 0;
+}
+
+static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
+{
+ host->sg_ptr = sg_next(host->sg_ptr);
+ host->sg_off = 0;
+ return --host->sg_len;
+}
+
+static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
+{
+ local_irq_save(*flags);
+ return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+}
+
+static void tmio_mmc_kunmap_atomic(void *virt, unsigned long *flags)
+{
+ kunmap_atomic(virt, KM_BIO_SRC_IRQ);
+ local_irq_restore(*flags);
+}
+
+#ifdef CONFIG_MMC_DEBUG
+
+#define STATUS_TO_TEXT(a) \
+ do { \
+ if (status & TMIO_STAT_##a) \
+ printk(#a); \
+ } while (0)
+
+void pr_debug_status(u32 status)
+{
+ printk(KERN_DEBUG "status: %08x = ", status);
+ STATUS_TO_TEXT(CARD_REMOVE);
+ STATUS_TO_TEXT(CARD_INSERT);
+ STATUS_TO_TEXT(SIGSTATE);
+ STATUS_TO_TEXT(WRPROTECT);
+ STATUS_TO_TEXT(CARD_REMOVE_A);
+ STATUS_TO_TEXT(CARD_INSERT_A);
+ STATUS_TO_TEXT(SIGSTATE_A);
+ STATUS_TO_TEXT(CMD_IDX_ERR);
+ STATUS_TO_TEXT(STOPBIT_ERR);
+ STATUS_TO_TEXT(ILL_FUNC);
+ STATUS_TO_TEXT(CMD_BUSY);
+ STATUS_TO_TEXT(CMDRESPEND);
+ STATUS_TO_TEXT(DATAEND);
+ STATUS_TO_TEXT(CRCFAIL);
+ STATUS_TO_TEXT(DATATIMEOUT);
+ STATUS_TO_TEXT(CMDTIMEOUT);
+ STATUS_TO_TEXT(RXOVERFLOW);
+ STATUS_TO_TEXT(TXUNDERRUN);
+ STATUS_TO_TEXT(RXRDY);
+ STATUS_TO_TEXT(TXRQ);
+ STATUS_TO_TEXT(ILL_ACCESS);
+ printk("\n");
+}
+
+#else
+#define pr_debug_status(s) do { } while (0)
+#endif
+
+static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
+ if (enable) {
+ host->sdio_irq_enabled = 1;
+ sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
+ sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
+ (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
+ } else {
+ sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
+ sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
+ host->sdio_irq_enabled = 0;
+ }
+}
static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
{
@@ -55,8 +300,23 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
+ struct mfd_cell *cell = host->pdev->dev.platform_data;
+ struct tmio_mmc_data *pdata = cell->driver_data;
+
+ /*
+ * Testing on sh-mobile showed that SDIO IRQs are unmasked when
+ * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the
+ * device IRQ here and restore the SDIO IRQ mask before
+ * re-enabling the device IRQ.
+ */
+ if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+ disable_irq(host->irq);
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
msleep(10);
+ if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
+ tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
+ enable_irq(host->irq);
+ }
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
msleep(10);
@@ -64,11 +324,21 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{
+ struct mfd_cell *cell = host->pdev->dev.platform_data;
+ struct tmio_mmc_data *pdata = cell->driver_data;
+
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
msleep(10);
+ /* see comment in tmio_mmc_clk_stop above */
+ if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+ disable_irq(host->irq);
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
msleep(10);
+ if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
+ tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
+ enable_irq(host->irq);
+ }
}
static void reset(struct tmio_mmc_host *host)
@@ -82,15 +352,60 @@ static void reset(struct tmio_mmc_host *host)
msleep(10);
}
+static void tmio_mmc_reset_work(struct work_struct *work)
+{
+ struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
+ delayed_reset_work.work);
+ struct mmc_request *mrq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ mrq = host->mrq;
+
+ /* request already finished */
+ if (!mrq
+ || time_is_after_jiffies(host->last_req_ts +
+ msecs_to_jiffies(2000))) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+ dev_warn(&host->pdev->dev,
+ "timeout waiting for hardware interrupt (CMD%u)\n",
+ mrq->cmd->opcode);
+
+ if (host->data)
+ host->data->error = -ETIMEDOUT;
+ else if (host->cmd)
+ host->cmd->error = -ETIMEDOUT;
+ else
+ mrq->cmd->error = -ETIMEDOUT;
+
+ host->cmd = NULL;
+ host->data = NULL;
+ host->mrq = NULL;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ reset(host);
+
+ mmc_request_done(host->mmc, mrq);
+}
+
static void
tmio_mmc_finish_request(struct tmio_mmc_host *host)
{
struct mmc_request *mrq = host->mrq;
+ if (!mrq)
+ return;
+
host->mrq = NULL;
host->cmd = NULL;
host->data = NULL;
+ cancel_delayed_work(&host->delayed_reset_work);
+
mmc_request_done(host->mmc, mrq);
}
@@ -200,6 +515,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
return;
}
+/* needs to be called with host->lock held */
static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data = host->data;
@@ -233,6 +549,8 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
if (data->flags & MMC_DATA_READ) {
if (!host->chan_rx)
disable_mmc_irqs(host, TMIO_MASK_READOP);
+ else
+ tmio_check_bounce_buffer(host);
dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
host->mrq);
} else {
@@ -254,10 +572,12 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
{
- struct mmc_data *data = host->data;
+ struct mmc_data *data;
+ spin_lock(&host->lock);
+ data = host->data;
if (!data)
- return;
+ goto out;
if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
/*
@@ -278,6 +598,8 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
} else {
tmio_mmc_do_data_irq(host);
}
+out:
+ spin_unlock(&host->lock);
}
static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
@@ -286,9 +608,11 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
struct mmc_command *cmd = host->cmd;
int i, addr;
+ spin_lock(&host->lock);
+
if (!host->cmd) {
pr_debug("Spurious CMD irq\n");
- return;
+ goto out;
}
host->cmd = NULL;
@@ -324,8 +648,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
if (!host->chan_rx)
enable_mmc_irqs(host, TMIO_MASK_READOP);
} else {
- struct dma_chan *chan = host->chan_tx;
- if (!chan)
+ if (!host->chan_tx)
enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
else
tasklet_schedule(&host->dma_issue);
@@ -334,13 +657,19 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
tmio_mmc_finish_request(host);
}
+out:
+ spin_unlock(&host->lock);
+
return;
}
static irqreturn_t tmio_mmc_irq(int irq, void *devid)
{
struct tmio_mmc_host *host = devid;
+ struct mfd_cell *cell = host->pdev->dev.platform_data;
+ struct tmio_mmc_data *pdata = cell->driver_data;
unsigned int ireg, irq_mask, status;
+ unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
pr_debug("MMC IRQ begin\n");
@@ -348,6 +677,29 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
ireg = status & TMIO_MASK_IRQ & ~irq_mask;
+ sdio_ireg = 0;
+ if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
+ sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
+ sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
+ sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
+
+ sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
+
+ if (sdio_ireg && !host->sdio_irq_enabled) {
+ pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
+ sdio_status, sdio_irq_mask, sdio_ireg);
+ tmio_mmc_enable_sdio_irq(host->mmc, 0);
+ goto out;
+ }
+
+ if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
+ sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
+ mmc_signal_sdio_irq(host->mmc);
+
+ if (sdio_ireg)
+ goto out;
+ }
+
pr_debug_status(status);
pr_debug_status(ireg);
@@ -375,8 +727,10 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
*/
/* Command completion */
- if (ireg & TMIO_MASK_CMD) {
- ack_mmc_irqs(host, TMIO_MASK_CMD);
+ if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
+ ack_mmc_irqs(host,
+ TMIO_STAT_CMDRESPEND |
+ TMIO_STAT_CMDTIMEOUT);
tmio_mmc_cmd_irq(host, status);
}
@@ -407,6 +761,16 @@ out:
}
#ifdef CONFIG_TMIO_MMC_DMA
+static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
+{
+ if (host->sg_ptr == &host->bounce_sg) {
+ unsigned long flags;
+ void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
+ memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
+ tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
+ }
+}
+
static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
{
#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
@@ -427,12 +791,39 @@ static void tmio_dma_complete(void *arg)
enable_mmc_irqs(host, TMIO_STAT_DATAEND);
}
-static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
+static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
{
- struct scatterlist *sg = host->sg_ptr;
+ struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
- int ret;
+ struct mfd_cell *cell = host->pdev->dev.platform_data;
+ struct tmio_mmc_data *pdata = cell->driver_data;
+ dma_cookie_t cookie;
+ int ret, i;
+ bool aligned = true, multiple = true;
+ unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
+
+ for_each_sg(sg, sg_tmp, host->sg_len, i) {
+ if (sg_tmp->offset & align)
+ aligned = false;
+ if (sg_tmp->length & align) {
+ multiple = false;
+ break;
+ }
+ }
+
+ if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+ align >= MAX_ALIGN)) || !multiple) {
+ ret = -EINVAL;
+ goto pio;
+ }
+
+ /* The only sg element can be unaligned, use our bounce buffer then */
+ if (!aligned) {
+ sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
+ host->sg_ptr = &host->bounce_sg;
+ sg = host->sg_ptr;
+ }
ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
if (ret > 0) {
@@ -442,21 +833,21 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
}
if (desc) {
- host->desc = desc;
desc->callback = tmio_dma_complete;
desc->callback_param = host;
- host->cookie = desc->tx_submit(desc);
- if (host->cookie < 0) {
- host->desc = NULL;
- ret = host->cookie;
+ cookie = desc->tx_submit(desc);
+ if (cookie < 0) {
+ desc = NULL;
+ ret = cookie;
} else {
chan->device->device_issue_pending(chan);
}
}
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
- __func__, host->sg_len, ret, host->cookie, host->mrq);
+ __func__, host->sg_len, ret, cookie, host->mrq);
- if (!host->desc) {
+pio:
+ if (!desc) {
/* DMA failed, fall back to PIO */
if (ret >= 0)
ret = -EIO;
@@ -471,24 +862,49 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
tmio_mmc_enable_dma(host, false);
- reset(host);
- /* Fail this request, let above layers recover */
- host->mrq->cmd->error = ret;
- tmio_mmc_finish_request(host);
}
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
- desc, host->cookie, host->sg_len);
-
- return ret > 0 ? 0 : ret;
+ desc, cookie, host->sg_len);
}
-static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
+static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
{
- struct scatterlist *sg = host->sg_ptr;
+ struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
- int ret;
+ struct mfd_cell *cell = host->pdev->dev.platform_data;
+ struct tmio_mmc_data *pdata = cell->driver_data;
+ dma_cookie_t cookie;
+ int ret, i;
+ bool aligned = true, multiple = true;
+ unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
+
+ for_each_sg(sg, sg_tmp, host->sg_len, i) {
+ if (sg_tmp->offset & align)
+ aligned = false;
+ if (sg_tmp->length & align) {
+ multiple = false;
+ break;
+ }
+ }
+
+ if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+ align >= MAX_ALIGN)) || !multiple) {
+ ret = -EINVAL;
+ goto pio;
+ }
+
+ /* The only sg element can be unaligned, use our bounce buffer then */
+ if (!aligned) {
+ unsigned long flags;
+ void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
+ sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
+ memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
+ tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
+ host->sg_ptr = &host->bounce_sg;
+ sg = host->sg_ptr;
+ }
ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
if (ret > 0) {
@@ -498,19 +914,19 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
}
if (desc) {
- host->desc = desc;
desc->callback = tmio_dma_complete;
desc->callback_param = host;
- host->cookie = desc->tx_submit(desc);
- if (host->cookie < 0) {
- host->desc = NULL;
- ret = host->cookie;
+ cookie = desc->tx_submit(desc);
+ if (cookie < 0) {
+ desc = NULL;
+ ret = cookie;
}
}
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
- __func__, host->sg_len, ret, host->cookie, host->mrq);
+ __func__, host->sg_len, ret, cookie, host->mrq);
- if (!host->desc) {
+pio:
+ if (!desc) {
/* DMA failed, fall back to PIO */
if (ret >= 0)
ret = -EIO;
@@ -525,30 +941,22 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
tmio_mmc_enable_dma(host, false);
- reset(host);
- /* Fail this request, let above layers recover */
- host->mrq->cmd->error = ret;
- tmio_mmc_finish_request(host);
}
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
- desc, host->cookie);
-
- return ret > 0 ? 0 : ret;
+ desc, cookie);
}
-static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
+static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
{
if (data->flags & MMC_DATA_READ) {
if (host->chan_rx)
- return tmio_mmc_start_dma_rx(host);
+ tmio_mmc_start_dma_rx(host);
} else {
if (host->chan_tx)
- return tmio_mmc_start_dma_tx(host);
+ tmio_mmc_start_dma_tx(host);
}
-
- return 0;
}
static void tmio_issue_tasklet_fn(unsigned long priv)
@@ -562,6 +970,12 @@ static void tmio_issue_tasklet_fn(unsigned long priv)
static void tmio_tasklet_fn(unsigned long arg)
{
struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (!host->data)
+ goto out;
if (host->data->flags & MMC_DATA_READ)
dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
@@ -571,6 +985,8 @@ static void tmio_tasklet_fn(unsigned long arg)
DMA_TO_DEVICE);
tmio_mmc_do_data_irq(host);
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
}
/* It might be necessary to make filter MFD specific */
@@ -584,9 +1000,6 @@ static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
- host->cookie = -EINVAL;
- host->desc = NULL;
-
/* We can only either use DMA for both Tx and Rx or not use it at all */
if (pdata->dma) {
dma_cap_mask_t mask;
@@ -632,15 +1045,15 @@ static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
host->chan_rx = NULL;
dma_release_channel(chan);
}
-
- host->cookie = -EINVAL;
- host->desc = NULL;
}
#else
-static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
+static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
+{
+}
+
+static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
{
- return 0;
}
static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
@@ -682,7 +1095,9 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
- return tmio_mmc_start_dma(host, data);
+ tmio_mmc_start_dma(host, data);
+
+ return 0;
}
/* Process requests from the MMC layer */
@@ -694,6 +1109,8 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
if (host->mrq)
pr_debug("request not null\n");
+ host->last_req_ts = jiffies;
+ wmb();
host->mrq = mrq;
if (mrq->data) {
@@ -703,10 +1120,14 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
ret = tmio_mmc_start_command(host, mrq->cmd);
- if (!ret)
+ if (!ret) {
+ schedule_delayed_work(&host->delayed_reset_work,
+ msecs_to_jiffies(2000));
return;
+ }
fail:
+ host->mrq = NULL;
mrq->cmd->error = ret;
mmc_request_done(mmc, mrq);
}
@@ -780,6 +1201,7 @@ static const struct mmc_host_ops tmio_mmc_ops = {
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
.get_cd = tmio_mmc_get_cd,
+ .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
};
#ifdef CONFIG_PM
@@ -864,10 +1286,15 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
goto host_free;
mmc->ops = &tmio_mmc_ops;
- mmc->caps = MMC_CAP_4_BIT_DATA;
- mmc->caps |= pdata->capabilities;
+ mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->f_max = pdata->hclk;
mmc->f_min = mmc->f_max / 512;
+ mmc->max_segs = 32;
+ mmc->max_blk_size = 512;
+ mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
+ mmc->max_segs;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
if (pdata->ocr_mask)
mmc->ocr_avail = pdata->ocr_mask;
else
@@ -890,12 +1317,19 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
goto cell_disable;
disable_mmc_irqs(host, TMIO_MASK_ALL);
+ if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+ tmio_mmc_enable_sdio_irq(mmc, 0);
ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
if (ret)
goto cell_disable;
+ spin_lock_init(&host->lock);
+
+ /* Init delayed work for request timeouts */
+ INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work);
+
/* See if we also get DMA */
tmio_mmc_request_dma(host, pdata);
@@ -934,6 +1368,7 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)
if (mmc) {
struct tmio_mmc_host *host = mmc_priv(mmc);
mmc_remove_host(mmc);
+ cancel_delayed_work_sync(&host->delayed_reset_work);
tmio_mmc_release_dma(host);
free_irq(host->irq, host);
if (cell->disable)
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
deleted file mode 100644
index 0fedc78e3ea5..000000000000
--- a/drivers/mmc/host/tmio_mmc.h
+++ /dev/null
@@ -1,228 +0,0 @@
-/* Definitons for use with the tmio_mmc.c
- *
- * (c) 2004 Ian Molton <spyro@f2s.com>
- * (c) 2007 Ian Molton <spyro@f2s.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/highmem.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-
-#define CTL_SD_CMD 0x00
-#define CTL_ARG_REG 0x04
-#define CTL_STOP_INTERNAL_ACTION 0x08
-#define CTL_XFER_BLK_COUNT 0xa
-#define CTL_RESPONSE 0x0c
-#define CTL_STATUS 0x1c
-#define CTL_IRQ_MASK 0x20
-#define CTL_SD_CARD_CLK_CTL 0x24
-#define CTL_SD_XFER_LEN 0x26
-#define CTL_SD_MEM_CARD_OPT 0x28
-#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
-#define CTL_SD_DATA_PORT 0x30
-#define CTL_TRANSACTION_CTL 0x34
-#define CTL_RESET_SD 0xe0
-#define CTL_SDIO_REGS 0x100
-#define CTL_CLK_AND_WAIT_CTL 0x138
-#define CTL_RESET_SDIO 0x1e0
-
-/* Definitions for values the CTRL_STATUS register can take. */
-#define TMIO_STAT_CMDRESPEND 0x00000001
-#define TMIO_STAT_DATAEND 0x00000004
-#define TMIO_STAT_CARD_REMOVE 0x00000008
-#define TMIO_STAT_CARD_INSERT 0x00000010
-#define TMIO_STAT_SIGSTATE 0x00000020
-#define TMIO_STAT_WRPROTECT 0x00000080
-#define TMIO_STAT_CARD_REMOVE_A 0x00000100
-#define TMIO_STAT_CARD_INSERT_A 0x00000200
-#define TMIO_STAT_SIGSTATE_A 0x00000400
-#define TMIO_STAT_CMD_IDX_ERR 0x00010000
-#define TMIO_STAT_CRCFAIL 0x00020000
-#define TMIO_STAT_STOPBIT_ERR 0x00040000
-#define TMIO_STAT_DATATIMEOUT 0x00080000
-#define TMIO_STAT_RXOVERFLOW 0x00100000
-#define TMIO_STAT_TXUNDERRUN 0x00200000
-#define TMIO_STAT_CMDTIMEOUT 0x00400000
-#define TMIO_STAT_RXRDY 0x01000000
-#define TMIO_STAT_TXRQ 0x02000000
-#define TMIO_STAT_ILL_FUNC 0x20000000
-#define TMIO_STAT_CMD_BUSY 0x40000000
-#define TMIO_STAT_ILL_ACCESS 0x80000000
-
-/* Define some IRQ masks */
-/* This is the mask used at reset by the chip */
-#define TMIO_MASK_ALL 0x837f031d
-#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
-#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
-#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
- TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
-#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
-
-
-#define enable_mmc_irqs(host, i) \
- do { \
- u32 mask;\
- mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
- mask &= ~((i) & TMIO_MASK_IRQ); \
- sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
- } while (0)
-
-#define disable_mmc_irqs(host, i) \
- do { \
- u32 mask;\
- mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
- mask |= ((i) & TMIO_MASK_IRQ); \
- sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
- } while (0)
-
-#define ack_mmc_irqs(host, i) \
- do { \
- sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
- } while (0)
-
-
-struct tmio_mmc_host {
- void __iomem *ctl;
- unsigned long bus_shift;
- struct mmc_command *cmd;
- struct mmc_request *mrq;
- struct mmc_data *data;
- struct mmc_host *mmc;
- int irq;
-
- /* Callbacks for clock / power control */
- void (*set_pwr)(struct platform_device *host, int state);
- void (*set_clk_div)(struct platform_device *host, int state);
-
- /* pio related stuff */
- struct scatterlist *sg_ptr;
- unsigned int sg_len;
- unsigned int sg_off;
-
- struct platform_device *pdev;
-
- /* DMA support */
- struct dma_chan *chan_rx;
- struct dma_chan *chan_tx;
- struct tasklet_struct dma_complete;
- struct tasklet_struct dma_issue;
-#ifdef CONFIG_TMIO_MMC_DMA
- struct dma_async_tx_descriptor *desc;
- unsigned int dma_sglen;
- dma_cookie_t cookie;
-#endif
-};
-
-#include <linux/io.h>
-
-static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
-{
- return readw(host->ctl + (addr << host->bus_shift));
-}
-
-static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
- u16 *buf, int count)
-{
- readsw(host->ctl + (addr << host->bus_shift), buf, count);
-}
-
-static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
-{
- return readw(host->ctl + (addr << host->bus_shift)) |
- readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
-}
-
-static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
- u16 val)
-{
- writew(val, host->ctl + (addr << host->bus_shift));
-}
-
-static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
- u16 *buf, int count)
-{
- writesw(host->ctl + (addr << host->bus_shift), buf, count);
-}
-
-static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr,
- u32 val)
-{
- writew(val, host->ctl + (addr << host->bus_shift));
- writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
-}
-
-#include <linux/scatterlist.h>
-#include <linux/blkdev.h>
-
-static inline void tmio_mmc_init_sg(struct tmio_mmc_host *host,
- struct mmc_data *data)
-{
- host->sg_len = data->sg_len;
- host->sg_ptr = data->sg;
- host->sg_off = 0;
-}
-
-static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
-{
- host->sg_ptr = sg_next(host->sg_ptr);
- host->sg_off = 0;
- return --host->sg_len;
-}
-
-static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
- unsigned long *flags)
-{
- local_irq_save(*flags);
- return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
-}
-
-static inline void tmio_mmc_kunmap_atomic(void *virt,
- unsigned long *flags)
-{
- kunmap_atomic(virt, KM_BIO_SRC_IRQ);
- local_irq_restore(*flags);
-}
-
-#ifdef CONFIG_MMC_DEBUG
-
-#define STATUS_TO_TEXT(a) \
- do { \
- if (status & TMIO_STAT_##a) \
- printk(#a); \
- } while (0)
-
-void pr_debug_status(u32 status)
-{
- printk(KERN_DEBUG "status: %08x = ", status);
- STATUS_TO_TEXT(CARD_REMOVE);
- STATUS_TO_TEXT(CARD_INSERT);
- STATUS_TO_TEXT(SIGSTATE);
- STATUS_TO_TEXT(WRPROTECT);
- STATUS_TO_TEXT(CARD_REMOVE_A);
- STATUS_TO_TEXT(CARD_INSERT_A);
- STATUS_TO_TEXT(SIGSTATE_A);
- STATUS_TO_TEXT(CMD_IDX_ERR);
- STATUS_TO_TEXT(STOPBIT_ERR);
- STATUS_TO_TEXT(ILL_FUNC);
- STATUS_TO_TEXT(CMD_BUSY);
- STATUS_TO_TEXT(CMDRESPEND);
- STATUS_TO_TEXT(DATAEND);
- STATUS_TO_TEXT(CRCFAIL);
- STATUS_TO_TEXT(DATATIMEOUT);
- STATUS_TO_TEXT(CMDTIMEOUT);
- STATUS_TO_TEXT(RXOVERFLOW);
- STATUS_TO_TEXT(TXUNDERRUN);
- STATUS_TO_TEXT(RXRDY);
- STATUS_TO_TEXT(TXRQ);
- STATUS_TO_TEXT(ILL_ACCESS);
- printk("\n");
-}
-
-#else
-#define pr_debug_status(s) do { } while (0)
-#endif
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index f8f65df9b017..f08f944ac53c 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/kernel.h>
-#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/mmc/host.h>
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 1e2cbf5d9aa1..77414702cb00 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -53,9 +53,10 @@ config MTD_PARTITIONS
devices. Partitioning on NFTL 'devices' is a different - that's the
'normal' form of partitioning used on a block device.
+if MTD_PARTITIONS
+
config MTD_REDBOOT_PARTS
tristate "RedBoot partition table parsing"
- depends on MTD_PARTITIONS
---help---
RedBoot is a ROM monitor and bootloader which deals with multiple
'images' in flash devices by putting a table one of the erase
@@ -72,9 +73,10 @@ config MTD_REDBOOT_PARTS
SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
example.
+if MTD_REDBOOT_PARTS
+
config MTD_REDBOOT_DIRECTORY_BLOCK
int "Location of RedBoot partition table"
- depends on MTD_REDBOOT_PARTS
default "-1"
---help---
This option is the Linux counterpart to the
@@ -91,18 +93,18 @@ config MTD_REDBOOT_DIRECTORY_BLOCK
config MTD_REDBOOT_PARTS_UNALLOCATED
bool "Include unallocated flash regions"
- depends on MTD_REDBOOT_PARTS
help
If you need to register each unallocated flash region as a MTD
'partition', enable this option.
config MTD_REDBOOT_PARTS_READONLY
bool "Force read-only for RedBoot system images"
- depends on MTD_REDBOOT_PARTS
help
If you need to force read-only for 'RedBoot', 'RedBoot Config' and
'FIS directory' images, enable this option.
+endif # MTD_REDBOOT_PARTS
+
config MTD_CMDLINE_PARTS
bool "Command line partition table parsing"
depends on MTD_PARTITIONS = "y" && MTD = "y"
@@ -142,7 +144,7 @@ config MTD_CMDLINE_PARTS
config MTD_AFS_PARTS
tristate "ARM Firmware Suite partition parsing"
- depends on ARM && MTD_PARTITIONS
+ depends on ARM
---help---
The ARM Firmware Suite allows the user to divide flash devices into
multiple 'images'. Each such image has a header containing its name
@@ -158,8 +160,8 @@ config MTD_AFS_PARTS
example.
config MTD_OF_PARTS
- tristate "Flash partition map based on OF description"
- depends on (MICROBLAZE || PPC_OF) && MTD_PARTITIONS
+ def_bool y
+ depends on OF
help
This provides a partition parsing function which derives
the partition map from the children of the flash node,
@@ -167,10 +169,11 @@ config MTD_OF_PARTS
config MTD_AR7_PARTS
tristate "TI AR7 partitioning support"
- depends on MTD_PARTITIONS
---help---
TI AR7 partitioning support
+endif # MTD_PARTITIONS
+
comment "User Modules And Translation Layers"
config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 760abc533395..d4e7f25b1ebb 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -6,13 +6,13 @@
obj-$(CONFIG_MTD) += mtd.o
mtd-y := mtdcore.o mtdsuper.o
mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
+mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
-obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
# 'Users' - code which presents functionality to userspace.
obj-$(CONFIG_MTD_CHAR) += mtdchar.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index ad9268b44416..a8c3e1c9b02a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -162,7 +162,7 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
#endif
/* Atmel chips don't use the same PRI format as Intel chips */
-static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
+static void fixup_convert_atmel_pri(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -202,7 +202,7 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
cfi->cfiq->BufWriteTimeoutMax = 0;
}
-static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param)
+static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -214,7 +214,7 @@ static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param)
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
-static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
+static void fixup_intel_strataflash(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -227,7 +227,7 @@ static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
#endif
#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
-static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
+static void fixup_no_write_suspend(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -240,7 +240,7 @@ static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
}
#endif
-static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
+static void fixup_st_m28w320ct(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -249,7 +249,7 @@ static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
}
-static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
+static void fixup_st_m28w320cb(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -259,7 +259,7 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
};
-static void fixup_use_point(struct mtd_info *mtd, void *param)
+static void fixup_use_point(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
if (!mtd->point && map_is_linear(map)) {
@@ -268,7 +268,7 @@ static void fixup_use_point(struct mtd_info *mtd, void *param)
}
}
-static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
+static void fixup_use_write_buffers(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -282,7 +282,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
/*
* Some chips power-up with all sectors locked by default.
*/
-static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
+static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -295,31 +295,31 @@ static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
}
static struct cfi_fixup cfi_fixup_table[] = {
- { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
- { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock, NULL },
- { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock, NULL },
+ { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
+ { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
+ { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
- { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
#endif
#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
- { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
#endif
#if !FORCE_WORD_WRITE
- { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
#endif
- { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
- { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
- { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
- { 0, 0, NULL, NULL }
+ { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
+ { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
+ { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
+ { 0, 0, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
- { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
- { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
- { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
- { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock, NULL, },
- { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock, NULL, },
- { 0, 0, NULL, NULL }
+ { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
+ { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
+ { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
+ { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
+ { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
+ { 0, 0, NULL }
};
static struct cfi_fixup fixup_table[] = {
/* The CFI vendor ids and the JEDEC vendor IDs appear
@@ -327,8 +327,8 @@ static struct cfi_fixup fixup_table[] = {
* well. This table is to pick all cases where
* we know that is the case.
*/
- { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
- { 0, 0, NULL, NULL }
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
+ { 0, 0, NULL }
};
static void cfi_fixup_major_minor(struct cfi_private *cfi,
@@ -455,6 +455,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
+ mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 3b8e32d87977..f072fcfde04e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -134,7 +134,7 @@ static void cfi_tell_features(struct cfi_pri_amdstd *extp)
#ifdef AMD_BOOTLOC_BUG
/* Wheee. Bring me the head of someone at AMD. */
-static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
+static void fixup_amd_bootblock(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -186,7 +186,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
}
#endif
-static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
+static void fixup_use_write_buffers(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -197,7 +197,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
}
/* Atmel chips don't use the same PRI format as AMD chips */
-static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
+static void fixup_convert_atmel_pri(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -228,14 +228,14 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
cfi->cfiq->BufWriteTimeoutMax = 0;
}
-static void fixup_use_secsi(struct mtd_info *mtd, void *param)
+static void fixup_use_secsi(struct mtd_info *mtd)
{
/* Setup for chips with a secsi area */
mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
}
-static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
+static void fixup_use_erase_chip(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -250,7 +250,7 @@ static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
* Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
* locked by default.
*/
-static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
+static void fixup_use_atmel_lock(struct mtd_info *mtd)
{
mtd->lock = cfi_atmel_lock;
mtd->unlock = cfi_atmel_unlock;
@@ -271,7 +271,7 @@ static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
cfi->cfiq->NumEraseRegions = 1;
}
-static void fixup_sst39vf(struct mtd_info *mtd, void *param)
+static void fixup_sst39vf(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -282,7 +282,7 @@ static void fixup_sst39vf(struct mtd_info *mtd, void *param)
cfi->addr_unlock2 = 0x2AAA;
}
-static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
+static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -295,12 +295,12 @@ static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
cfi->sector_erase_cmd = CMD(0x50);
}
-static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd, void *param)
+static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- fixup_sst39vf_rev_b(mtd, param);
+ fixup_sst39vf_rev_b(mtd);
/*
* CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
@@ -310,7 +310,7 @@ static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd, void *param)
pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
}
-static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
+static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -321,7 +321,7 @@ static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
}
}
-static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
+static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
@@ -334,47 +334,47 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
/* Used to fix CFI-Tables of chips without Extended Query Tables */
static struct cfi_fixup cfi_nopri_fixup_table[] = {
- { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, /* SST39VF1602 */
- { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, /* SST39VF1601 */
- { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, /* SST39VF3202 */
- { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, /* SST39VF3201 */
- { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3202B */
- { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3201B */
- { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6402B */
- { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6401B */
- { 0, 0, NULL, NULL }
+ { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
+ { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
+ { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
+ { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
+ { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
+ { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
+ { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
+ { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
+ { 0, 0, NULL }
};
static struct cfi_fixup cfi_fixup_table[] = {
- { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
+ { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
#ifdef AMD_BOOTLOC_BUG
- { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
- { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
+ { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
+ { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
#endif
- { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
- { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
- { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
- { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
- { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
- { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
- { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
- { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
- { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
- { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
- { CFI_MFR_SST, 0x536A, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6402 */
- { CFI_MFR_SST, 0x536B, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6401 */
- { CFI_MFR_SST, 0x536C, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6404 */
- { CFI_MFR_SST, 0x536D, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6403 */
+ { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
+ { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
+ { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
+ { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
+ { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
+ { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
+ { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
+ { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
+ { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
#if !FORCE_WORD_WRITE
- { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
#endif
- { 0, 0, NULL, NULL }
+ { 0, 0, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
- { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
- { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
- { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
- { 0, 0, NULL, NULL }
+ { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
+ { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
+ { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
+ { 0, 0, NULL }
};
static struct cfi_fixup fixup_table[] = {
@@ -383,18 +383,30 @@ static struct cfi_fixup fixup_table[] = {
* well. This table is to pick all cases where
* we know that is the case.
*/
- { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
- { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
- { 0, 0, NULL, NULL }
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
+ { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
+ { 0, 0, NULL }
};
static void cfi_fixup_major_minor(struct cfi_private *cfi,
struct cfi_pri_amdstd *extp)
{
- if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
- extp->MajorVersion == '0')
- extp->MajorVersion = '1';
+ if (cfi->mfr == CFI_MFR_SAMSUNG) {
+ if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
+ (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
+ /*
+ * Samsung K8P2815UQB and K8D6x16UxM chips
+ * report major=0 / minor=0.
+ * K8D3x16UxC chips report major=3 / minor=3.
+ */
+ printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
+ " Extended Query version to 1.%c\n",
+ extp->MinorVersion);
+ extp->MajorVersion = '1';
+ }
+ }
+
/*
* SST 38VF640x chips report major=0xFF / minor=0xFF.
*/
@@ -428,6 +440,10 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
+ mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n",
+ __func__, mtd->writebufsize);
mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 314af1f5a370..c04b7658abe9 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -238,6 +238,7 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
mtd->resume = cfi_staa_resume;
mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
+ mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
map->fldrv = &cfi_staa_chipdrv;
__module_get(THIS_MODULE);
mtd->name = map->name;
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 360525c637d2..6ae3d111e1e7 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -156,7 +156,7 @@ void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
for (f=fixups; f->fixup; f++) {
if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
- f->fixup(mtd, f->param);
+ f->fixup(mtd);
}
}
}
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index d18064977192..5e3cc80128aa 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -98,7 +98,7 @@ static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
-static void fixup_use_fwh_lock(struct mtd_info *mtd, void *param)
+static void fixup_use_fwh_lock(struct mtd_info *mtd)
{
printk(KERN_NOTICE "using fwh lock/unlock method\n");
/* Setup for the chips with the fwh lock method */
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 2cf0cc6a4189..f29a6f9df6e7 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -224,7 +224,7 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
if (dev->blkdev) {
invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
0, -1);
- close_bdev_exclusive(dev->blkdev, FMODE_READ|FMODE_WRITE);
+ blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
kfree(dev);
@@ -234,6 +234,7 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
/* FIXME: ensure that mtd->size % erase_size == 0 */
static struct block2mtd_dev *add_device(char *devname, int erase_size)
{
+ const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
struct block_device *bdev;
struct block2mtd_dev *dev;
char *name;
@@ -246,7 +247,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
return NULL;
/* Get a handle on the device */
- bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, NULL);
+ bdev = blkdev_get_by_path(devname, mode, dev);
#ifndef MODULE
if (IS_ERR(bdev)) {
@@ -254,9 +255,8 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
to resolve the device name by other means. */
dev_t devt = name_to_dev_t(devname);
- if (devt) {
- bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ);
- }
+ if (devt)
+ bdev = blkdev_get_by_dev(devt, mode, dev);
}
#endif
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index bf5a002209bd..e4eba6cc1b2e 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -51,6 +51,10 @@
#define OPCODE_WRDI 0x04 /* Write disable */
#define OPCODE_AAI_WP 0xad /* Auto address increment word program */
+/* Used for Macronix flashes only. */
+#define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */
+#define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */
+
/* Status Register bits. */
#define SR_WIP 1 /* Write in progress */
#define SR_WEL 2 /* Write enable latch */
@@ -62,7 +66,7 @@
/* Define max times to check status register before we give up. */
#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
-#define MAX_CMD_SIZE 4
+#define MAX_CMD_SIZE 5
#ifdef CONFIG_M25PXX_USE_FAST_READ
#define OPCODE_READ OPCODE_FAST_READ
@@ -152,6 +156,16 @@ static inline int write_disable(struct m25p *flash)
}
/*
+ * Enable/disable 4-byte addressing mode.
+ */
+static inline int set_4byte(struct m25p *flash, int enable)
+{
+ u8 code = enable ? OPCODE_EN4B : OPCODE_EX4B;
+
+ return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
+}
+
+/*
* Service routine to read status register until ready, or timeout occurs.
* Returns non-zero if error.
*/
@@ -207,6 +221,7 @@ static void m25p_addr2cmd(struct m25p *flash, unsigned int addr, u8 *cmd)
cmd[1] = addr >> (flash->addr_width * 8 - 8);
cmd[2] = addr >> (flash->addr_width * 8 - 16);
cmd[3] = addr >> (flash->addr_width * 8 - 24);
+ cmd[4] = addr >> (flash->addr_width * 8 - 32);
}
static int m25p_cmdsz(struct m25p *flash)
@@ -482,6 +497,10 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t actual;
int cmd_sz, ret;
+ DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
+ dev_name(&flash->spi->dev), __func__, "to",
+ (u32)to, len);
+
*retlen = 0;
/* sanity checks */
@@ -607,7 +626,6 @@ struct flash_info {
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
- .addr_width = 3, \
.flags = (_flags), \
})
@@ -635,7 +653,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
- { "at26df321", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
+ { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
/* EON -- en25pxx */
{ "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
@@ -653,6 +671,8 @@ static const struct spi_device_id m25p_ids[] = {
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
@@ -764,6 +784,7 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
return &m25p_ids[tmp];
}
}
+ dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
return ERR_PTR(-ENODEV);
}
@@ -883,7 +904,17 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->mtd.dev.parent = &spi->dev;
flash->page_size = info->page_size;
- flash->addr_width = info->addr_width;
+
+ if (info->addr_width)
+ flash->addr_width = info->addr_width;
+ else {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ if (flash->mtd.size > 0x1000000) {
+ flash->addr_width = 4;
+ set_4byte(flash, 1);
+ } else
+ flash->addr_width = 3;
+ }
dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
(long long)flash->mtd.size >> 10);
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 684247a8a5ed..c163e619abc9 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -335,7 +335,7 @@ out:
return ret;
}
-static struct flash_info *__init sst25l_match_device(struct spi_device *spi)
+static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
{
struct flash_info *flash_info = NULL;
struct spi_message m;
@@ -375,7 +375,7 @@ static struct flash_info *__init sst25l_match_device(struct spi_device *spi)
return flash_info;
}
-static int __init sst25l_probe(struct spi_device *spi)
+static int __devinit sst25l_probe(struct spi_device *spi)
{
struct flash_info *flash_info;
struct sst25l_flash *flash;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index a0dd7bba9481..5d37d315fa98 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -72,7 +72,7 @@ config MTD_PHYSMAP_BANKWIDTH
config MTD_PHYSMAP_OF
tristate "Flash device in physical memory map based on OF description"
- depends on (MICROBLAZE || PPC_OF) && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM)
+ depends on OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM)
help
This provides a 'mapping' driver which allows the NOR Flash and
ROM driver code to communicate with chips which are mapped
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 19fe92db0c46..77d64ce19e9f 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -149,11 +149,8 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
if (request_resource(&iomem_resource, &window->rsrc)) {
window->rsrc.parent = NULL;
printk(KERN_ERR MOD_NAME
- " %s(): Unable to register resource"
- " 0x%.16llx-0x%.16llx - kernel bug?\n",
- __func__,
- (unsigned long long)window->rsrc.start,
- (unsigned long long)window->rsrc.end);
+ " %s(): Unable to register resource %pR - kernel bug?\n",
+ __func__, &window->rsrc);
}
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
index d175c120ee84..1f3049590d9e 100644
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -196,10 +196,15 @@ static int bcm963xx_probe(struct platform_device *pdev)
bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map);
if (!bcm963xx_mtd_info) {
dev_err(&pdev->dev, "failed to probe using CFI\n");
+ bcm963xx_mtd_info = do_map_probe("jedec_probe", &bcm963xx_map);
+ if (bcm963xx_mtd_info)
+ goto probe_ok;
+ dev_err(&pdev->dev, "failed to probe using JEDEC\n");
err = -EIO;
goto err_probe;
}
+probe_ok:
bcm963xx_mtd_info->owner = THIS_MODULE;
/* This is mutually exclusive */
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index ddb462bea9b5..5fdb7b26cea3 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -178,11 +178,8 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
if (request_resource(&iomem_resource, &window->rsrc)) {
window->rsrc.parent = NULL;
printk(KERN_ERR MOD_NAME
- " %s(): Unable to register resource"
- " 0x%.016llx-0x%.016llx - kernel bug?\n",
- __func__,
- (unsigned long long)window->rsrc.start,
- (unsigned long long)window->rsrc.end);
+ " %s(): Unable to register resource %pR - kernel bug?\n",
+ __func__, &window->rsrc);
}
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index d12c93dc1aad..4feb7507ab7c 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -242,12 +242,9 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, &window->rsrc)) {
window->rsrc.parent = NULL;
- printk(KERN_DEBUG MOD_NAME
- ": %s(): Unable to register resource"
- " 0x%.08llx-0x%.08llx - kernel bug?\n",
- __func__,
- (unsigned long long)window->rsrc.start,
- (unsigned long long)window->rsrc.end);
+ printk(KERN_DEBUG MOD_NAME ": "
+ "%s(): Unable to register resource %pR - kernel bug?\n",
+ __func__, &window->rsrc);
}
/* Map the firmware hub into my address space. */
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index f102bf243a74..1337a4191a0c 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -175,12 +175,9 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, &window->rsrc)) {
window->rsrc.parent = NULL;
- printk(KERN_DEBUG MOD_NAME
- ": %s(): Unable to register resource"
- " 0x%.16llx-0x%.16llx - kernel bug?\n",
- __func__,
- (unsigned long long)window->rsrc.start,
- (unsigned long long)window->rsrc.end);
+ printk(KERN_DEBUG MOD_NAME ": "
+ "%s(): Unable to register resource %pR - kernel bug?\n",
+ __func__, &window->rsrc);
}
/* Map the firmware hub into my address space. */
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 9861814aa027..8506578e6a35 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -274,9 +274,7 @@ static int __devinit of_flash_probe(struct platform_device *dev,
continue;
}
- dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
- (unsigned long long)res.start,
- (unsigned long long)res.end);
+ dev_dbg(&dev->dev, "of_flash device: %pR\n", &res);
err = -EBUSY;
res_size = resource_size(&res);
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index b5391ebb736e..027e628a4f1d 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -166,9 +166,8 @@ static int __init init_scx200_docflash(void)
outl(pmr, scx200_cb_base + SCx200_PMR);
}
- printk(KERN_INFO NAME ": DOCCS mapped at 0x%llx-0x%llx, width %d\n",
- (unsigned long long)docmem.start,
- (unsigned long long)docmem.end, width);
+ printk(KERN_INFO NAME ": DOCCS mapped at %pR, width %d\n",
+ &docmem, width);
scx200_docflash_map.size = size;
if (width == 8)
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index 60146984f4be..c08e140d40ed 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -139,7 +139,7 @@ static int __init init_tqm_mtd(void)
goto error_mem;
}
- map_banks[idx]->name = (char *)kmalloc(16, GFP_KERNEL);
+ map_banks[idx]->name = kmalloc(16, GFP_KERNEL);
if (!map_banks[idx]->name) {
ret = -ENOMEM;
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index f511dd15fd31..145b3d0dc0db 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -522,10 +522,6 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- /* Only master mtd device must be used to control partitions */
- if (!mtd_is_master(mtd))
- return -EINVAL;
-
if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
return -EFAULT;
@@ -535,6 +531,10 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
switch (a.op) {
case BLKPG_ADD_PARTITION:
+ /* Only master mtd device must be used to add partitions */
+ if (mtd_is_partition(mtd))
+ return -EINVAL;
+
return mtd_add_partition(mtd, p.devname, p.start, p.length);
case BLKPG_DEL_PARTITION:
@@ -601,6 +601,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
}
case MEMGETINFO:
+ memset(&info, 0, sizeof(info));
info.type = mtd->type;
info.flags = mtd->flags;
info.size = mtd->size;
@@ -609,7 +610,6 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
info.oobsize = mtd->oobsize;
/* The below fields are obsolete */
info.ecctype = -1;
- info.eccsize = 0;
if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
return -EFAULT;
break;
@@ -1134,7 +1134,7 @@ static const struct file_operations mtd_fops = {
static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- return mount_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC);
+ return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC);
}
static struct file_system_type mtd_inodefs_type = {
@@ -1201,7 +1201,7 @@ err_unregister_chdev:
static void __exit cleanup_mtdchar(void)
{
unregister_mtd_user(&mtdchar_notifier);
- mntput_long(mtd_inode_mnt);
+ mntput(mtd_inode_mnt);
unregister_filesystem(&mtd_inodefs_type);
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
}
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index bf8de0943103..5f5777bd3f75 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -776,6 +776,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd.size = subdev[0]->size;
concat->mtd.erasesize = subdev[0]->erasesize;
concat->mtd.writesize = subdev[0]->writesize;
+ concat->mtd.writebufsize = subdev[0]->writebufsize;
concat->mtd.subpage_sft = subdev[0]->subpage_sft;
concat->mtd.oobsize = subdev[0]->oobsize;
concat->mtd.oobavail = subdev[0]->oobavail;
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 1ee72f3f0512..e3e40f440323 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -307,6 +307,11 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
unsigned long l1_cpy, l2_cpy;
char *dst;
+ if (reason != KMSG_DUMP_OOPS &&
+ reason != KMSG_DUMP_PANIC &&
+ reason != KMSG_DUMP_KEXEC)
+ return;
+
/* Only dump oopses if dump_oops is set */
if (reason == KMSG_DUMP_OOPS && !dump_oops)
return;
@@ -396,7 +401,8 @@ static void mtdoops_notify_remove(struct mtd_info *mtd)
printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
cxt->mtd = NULL;
- flush_scheduled_work();
+ flush_work_sync(&cxt->work_erase);
+ flush_work_sync(&cxt->work_write);
}
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 79e3689f1e16..0a4760174782 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -120,8 +120,25 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
return -EINVAL;
if (ops->datbuf && from + ops->len > mtd->size)
return -EINVAL;
- res = part->master->read_oob(part->master, from + part->offset, ops);
+ /*
+ * If OOB is also requested, make sure that we do not read past the end
+ * of this partition.
+ */
+ if (ops->oobbuf) {
+ size_t len, pages;
+
+ if (ops->mode == MTD_OOB_AUTO)
+ len = mtd->oobavail;
+ else
+ len = mtd->oobsize;
+ pages = mtd_div_by_ws(mtd->size, mtd);
+ pages -= mtd_div_by_ws(from, mtd);
+ if (ops->ooboffs + ops->ooblen > pages * len)
+ return -EINVAL;
+ }
+
+ res = part->master->read_oob(part->master, from + part->offset, ops);
if (unlikely(res)) {
if (res == -EUCLEAN)
mtd->ecc_stats.corrected++;
@@ -384,6 +401,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
slave->mtd.flags = master->flags & ~part->mask_flags;
slave->mtd.size = part->size;
slave->mtd.writesize = master->writesize;
+ slave->mtd.writebufsize = master->writebufsize;
slave->mtd.oobsize = master->oobsize;
slave->mtd.oobavail = master->oobavail;
slave->mtd.subpage_sft = master->subpage_sft;
@@ -720,19 +738,19 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
}
EXPORT_SYMBOL_GPL(parse_mtd_partitions);
-int mtd_is_master(struct mtd_info *mtd)
+int mtd_is_partition(struct mtd_info *mtd)
{
struct mtd_part *part;
- int nopart = 0;
+ int ispart = 0;
mutex_lock(&mtd_partitions_mutex);
list_for_each_entry(part, &mtd_partitions, list)
if (&part->mtd == mtd) {
- nopart = 1;
+ ispart = 1;
break;
}
mutex_unlock(&mtd_partitions_mutex);
- return nopart;
+ return ispart;
}
-EXPORT_SYMBOL_GPL(mtd_is_master);
+EXPORT_SYMBOL_GPL(mtd_is_partition);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 8229802b4346..c89592239bc7 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -96,6 +96,7 @@ config MTD_NAND_SPIA
config MTD_NAND_AMS_DELTA
tristate "NAND Flash device on Amstrad E3"
depends on MACH_AMS_DELTA
+ default y
help
Support for NAND flash on Amstrad E3 (Delta).
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 2548e1065bf8..a067d090cb31 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -4,6 +4,8 @@
* Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
*
* Derived from drivers/mtd/toto.c
+ * Converted to platform driver by Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ * Partially stolen from drivers/mtd/nand/plat_nand.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -62,9 +64,10 @@ static struct mtd_partition partition_info[] = {
static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte)
{
struct nand_chip *this = mtd->priv;
+ void __iomem *io_base = this->priv;
- omap_writew(0, (OMAP1_MPUIO_BASE + OMAP_MPUIO_IO_CNTL));
- omap_writew(byte, this->IO_ADDR_W);
+ writew(0, io_base + OMAP_MPUIO_IO_CNTL);
+ writew(byte, this->IO_ADDR_W);
ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE, 0);
ndelay(40);
ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE,
@@ -75,11 +78,12 @@ static u_char ams_delta_read_byte(struct mtd_info *mtd)
{
u_char res;
struct nand_chip *this = mtd->priv;
+ void __iomem *io_base = this->priv;
ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE, 0);
ndelay(40);
- omap_writew(~0, (OMAP1_MPUIO_BASE + OMAP_MPUIO_IO_CNTL));
- res = omap_readw(this->IO_ADDR_R);
+ writew(~0, io_base + OMAP_MPUIO_IO_CNTL);
+ res = readw(this->IO_ADDR_R);
ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE,
AMS_DELTA_LATCH2_NAND_NRE);
@@ -151,11 +155,16 @@ static int ams_delta_nand_ready(struct mtd_info *mtd)
/*
* Main initialization routine
*/
-static int __init ams_delta_init(void)
+static int __devinit ams_delta_init(struct platform_device *pdev)
{
struct nand_chip *this;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ void __iomem *io_base;
int err = 0;
+ if (!res)
+ return -ENXIO;
+
/* Allocate memory for MTD device structure and private data */
ams_delta_mtd = kmalloc(sizeof(struct mtd_info) +
sizeof(struct nand_chip), GFP_KERNEL);
@@ -177,9 +186,25 @@ static int __init ams_delta_init(void)
/* Link the private data with the MTD structure */
ams_delta_mtd->priv = this;
+ if (!request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ err = -EBUSY;
+ goto out_free;
+ }
+
+ io_base = ioremap(res->start, resource_size(res));
+ if (io_base == NULL) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ err = -EIO;
+ goto out_release_io;
+ }
+
+ this->priv = io_base;
+
/* Set address of NAND IO lines */
- this->IO_ADDR_R = (OMAP1_MPUIO_BASE + OMAP_MPUIO_INPUT_LATCH);
- this->IO_ADDR_W = (OMAP1_MPUIO_BASE + OMAP_MPUIO_OUTPUT);
+ this->IO_ADDR_R = io_base + OMAP_MPUIO_INPUT_LATCH;
+ this->IO_ADDR_W = io_base + OMAP_MPUIO_OUTPUT;
this->read_byte = ams_delta_read_byte;
this->write_buf = ams_delta_write_buf;
this->read_buf = ams_delta_read_buf;
@@ -195,6 +220,8 @@ static int __init ams_delta_init(void)
this->chip_delay = 30;
this->ecc.mode = NAND_ECC_SOFT;
+ platform_set_drvdata(pdev, io_base);
+
/* Set chip enabled, but */
ams_delta_latch2_write(NAND_MASK, AMS_DELTA_LATCH2_NAND_NRE |
AMS_DELTA_LATCH2_NAND_NWE |
@@ -214,25 +241,56 @@ static int __init ams_delta_init(void)
goto out;
out_mtd:
+ platform_set_drvdata(pdev, NULL);
+ iounmap(io_base);
+out_release_io:
+ release_mem_region(res->start, resource_size(res));
+out_free:
kfree(ams_delta_mtd);
out:
return err;
}
-module_init(ams_delta_init);
-
/*
* Clean up routine
*/
-static void __exit ams_delta_cleanup(void)
+static int __devexit ams_delta_cleanup(struct platform_device *pdev)
{
+ void __iomem *io_base = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
/* Release resources, unregister device */
nand_release(ams_delta_mtd);
+ iounmap(io_base);
+ release_mem_region(res->start, resource_size(res));
+
/* Free the MTD device structure */
kfree(ams_delta_mtd);
+
+ return 0;
+}
+
+static struct platform_driver ams_delta_nand_driver = {
+ .probe = ams_delta_init,
+ .remove = __devexit_p(ams_delta_cleanup),
+ .driver = {
+ .name = "ams-delta-nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ams_delta_nand_init(void)
+{
+ return platform_driver_register(&ams_delta_nand_driver);
+}
+module_init(ams_delta_nand_init);
+
+static void __exit ams_delta_nand_exit(void)
+{
+ platform_driver_unregister(&ams_delta_nand_driver);
}
-module_exit(ams_delta_cleanup);
+module_exit(ams_delta_nand_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index c141b07b25d1..7a13d42cbabd 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -388,6 +388,8 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
"page_addr: 0x%x, column: 0x%x.\n",
page_addr, column);
+ elbc_fcm_ctrl->column = column;
+ elbc_fcm_ctrl->oob = 0;
elbc_fcm_ctrl->use_mdr = 1;
fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 02edfba25b0c..205b10b9f9b9 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -31,6 +31,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/mtd/fsmc.h>
+#include <linux/amba/bus.h>
#include <mtd/mtd-abi.h>
static struct nand_ecclayout fsmc_ecc1_layout = {
@@ -119,21 +120,36 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
}
};
-/*
- * Default partition tables to be used if the partition information not
- * provided through platform data
- */
-#define PARTITION(n, off, sz) {.name = n, .offset = off, .size = sz}
+#ifdef CONFIG_MTD_PARTITIONS
/*
+ * Default partition tables to be used if the partition information not
+ * provided through platform data.
+ *
* Default partition layout for small page(= 512 bytes) devices
* Size for "Root file system" is updated in driver based on actual device size
*/
static struct mtd_partition partition_info_16KB_blk[] = {
- PARTITION("X-loader", 0, 4 * 0x4000),
- PARTITION("U-Boot", 0x10000, 20 * 0x4000),
- PARTITION("Kernel", 0x60000, 256 * 0x4000),
- PARTITION("Root File System", 0x460000, 0),
+ {
+ .name = "X-loader",
+ .offset = 0,
+ .size = 4*0x4000,
+ },
+ {
+ .name = "U-Boot",
+ .offset = 0x10000,
+ .size = 20*0x4000,
+ },
+ {
+ .name = "Kernel",
+ .offset = 0x60000,
+ .size = 256*0x4000,
+ },
+ {
+ .name = "Root File System",
+ .offset = 0x460000,
+ .size = 0,
+ },
};
/*
@@ -141,19 +157,37 @@ static struct mtd_partition partition_info_16KB_blk[] = {
* Size for "Root file system" is updated in driver based on actual device size
*/
static struct mtd_partition partition_info_128KB_blk[] = {
- PARTITION("X-loader", 0, 4 * 0x20000),
- PARTITION("U-Boot", 0x80000, 12 * 0x20000),
- PARTITION("Kernel", 0x200000, 48 * 0x20000),
- PARTITION("Root File System", 0x800000, 0),
+ {
+ .name = "X-loader",
+ .offset = 0,
+ .size = 4*0x20000,
+ },
+ {
+ .name = "U-Boot",
+ .offset = 0x80000,
+ .size = 12*0x20000,
+ },
+ {
+ .name = "Kernel",
+ .offset = 0x200000,
+ .size = 48*0x20000,
+ },
+ {
+ .name = "Root File System",
+ .offset = 0x800000,
+ .size = 0,
+ },
};
#ifdef CONFIG_MTD_CMDLINE_PARTS
const char *part_probes[] = { "cmdlinepart", NULL };
#endif
+#endif
/**
- * struct fsmc_nand_data - atructure for FSMC NAND device state
+ * struct fsmc_nand_data - structure for FSMC NAND device state
*
+ * @pid: Part ID on the AMBA PrimeCell format
* @mtd: MTD info for a NAND flash.
* @nand: Chip related info for a NAND flash.
* @partitions: Partition info for a NAND Flash.
@@ -169,6 +203,7 @@ const char *part_probes[] = { "cmdlinepart", NULL };
* @regs_va: FSMC regs base address.
*/
struct fsmc_nand_data {
+ u32 pid;
struct mtd_info mtd;
struct nand_chip nand;
struct mtd_partition *partitions;
@@ -508,7 +543,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
struct nand_chip *nand;
struct fsmc_regs *regs;
struct resource *res;
- int nr_parts, ret = 0;
+ int ret = 0;
+ u32 pid;
+ int i;
if (!pdata) {
dev_err(&pdev->dev, "platform data is NULL\n");
@@ -598,6 +635,18 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
if (ret)
goto err_probe1;
+ /*
+ * This device ID is actually a common AMBA ID as used on the
+ * AMBA PrimeCell bus. However it is not a PrimeCell.
+ */
+ for (pid = 0, i = 0; i < 4; i++)
+ pid |= (readl(host->regs_va + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8);
+ host->pid = pid;
+ dev_info(&pdev->dev, "FSMC device partno %03x, manufacturer %02x, "
+ "revision %02x, config %02x\n",
+ AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
+ AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
+
host->bank = pdata->bank;
host->select_chip = pdata->select_bank;
regs = host->regs_va;
@@ -625,7 +674,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16);
- if (get_fsmc_version(host->regs_va) == FSMC_VER8) {
+ if (AMBA_REV_BITS(host->pid) >= 8) {
nand->ecc.read_page = fsmc_read_page_hwecc;
nand->ecc.calculate = fsmc_read_hwecc_ecc4;
nand->ecc.correct = fsmc_correct_data;
@@ -645,7 +694,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
goto err_probe;
}
- if (get_fsmc_version(host->regs_va) == FSMC_VER8) {
+ if (AMBA_REV_BITS(host->pid) >= 8) {
if (host->mtd.writesize == 512) {
nand->ecc.layout = &fsmc_ecc4_sp_layout;
host->ecc_place = &fsmc_ecc4_sp_place;
@@ -676,11 +725,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
* Check if partition info passed via command line
*/
host->mtd.name = "nand";
- nr_parts = parse_mtd_partitions(&host->mtd, part_probes,
+ host->nr_partitions = parse_mtd_partitions(&host->mtd, part_probes,
&host->partitions, 0);
- if (nr_parts > 0) {
- host->nr_partitions = nr_parts;
- } else {
+ if (host->nr_partitions <= 0) {
#endif
/*
* Check if partition info passed via command line
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 67343fc31bd5..cea38a5d4ac5 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -251,58 +251,6 @@ static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
return 0;
}
-
-/* Copy paste of nand_read_page_hwecc_oob_first except for different eccpos
- * handling. The ecc area is for 4k chips 72 bytes long and thus does not fit
- * into the eccpos array. */
-static int jz_nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int page)
-{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *p = buf;
- unsigned int ecc_offset = chip->page_shift;
-
- /* Read the OOB area first */
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
- for (i = ecc_offset; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- int stat;
-
- chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
-
- stat = chip->ecc.correct(mtd, p, &chip->oob_poi[i], NULL);
- if (stat < 0)
- mtd->ecc_stats.failed++;
- else
- mtd->ecc_stats.corrected += stat;
- }
- return 0;
-}
-
-/* Copy-and-paste of nand_write_page_hwecc with different eccpos handling. */
-static void jz_nand_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
-{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- const uint8_t *p = buf;
- unsigned int ecc_offset = chip->page_shift;
-
- for (i = ecc_offset; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
- chip->write_buf(mtd, p, eccsize);
- chip->ecc.calculate(mtd, p, &chip->oob_poi[i]);
- }
-
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-}
-
#ifdef CONFIG_MTD_CMDLINE_PARTS
static const char *part_probes[] = {"cmdline", NULL};
#endif
@@ -393,9 +341,6 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
chip->ecc.size = 512;
chip->ecc.bytes = 9;
- chip->ecc.read_page = jz_nand_read_page_hwecc_oob_first;
- chip->ecc.write_page = jz_nand_write_page_hwecc;
-
if (pdata)
chip->ecc.layout = pdata->ecc_layout;
@@ -489,7 +434,7 @@ static int __devexit jz_nand_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver jz_nand_driver = {
+static struct platform_driver jz_nand_driver = {
.probe = jz_nand_probe,
.remove = __devexit_p(jz_nand_remove),
.driver = {
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 214b03afdd48..ef932ba55a0b 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -1009,7 +1009,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
struct mxc_nand_host *host;
struct resource *res;
- int err = 0, nr_parts = 0;
+ int err = 0, __maybe_unused nr_parts = 0;
struct nand_ecclayout *oob_smallpage, *oob_largepage;
/* Allocate memory for MTD device structure and private data */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 1f75a1b1f7c3..a9c6ce745767 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -821,7 +821,7 @@ retry:
*
* Wait for command done. This is a helper function for nand_wait used when
* we are in interrupt context. May happen when in panic and trying to write
- * an oops trough mtdoops.
+ * an oops through mtdoops.
*/
static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
unsigned long timeo)
@@ -2865,20 +2865,24 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
/* check version */
val = le16_to_cpu(p->revision);
- if (val == 1 || val > (1 << 4)) {
- printk(KERN_INFO "%s: unsupported ONFI version: %d\n",
- __func__, val);
- return 0;
- }
-
- if (val & (1 << 4))
+ if (val & (1 << 5))
+ chip->onfi_version = 23;
+ else if (val & (1 << 4))
chip->onfi_version = 22;
else if (val & (1 << 3))
chip->onfi_version = 21;
else if (val & (1 << 2))
chip->onfi_version = 20;
- else
+ else if (val & (1 << 1))
chip->onfi_version = 10;
+ else
+ chip->onfi_version = 0;
+
+ if (!chip->onfi_version) {
+ printk(KERN_INFO "%s: unsupported ONFI version: %d\n",
+ __func__, val);
+ return 0;
+ }
sanitize_string(p->manufacturer, sizeof(p->manufacturer));
sanitize_string(p->model, sizeof(p->model));
@@ -2887,7 +2891,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
mtd->writesize = le32_to_cpu(p->byte_per_page);
mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
- chip->chipsize = le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
+ chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
busw = 0;
if (le16_to_cpu(p->features) & 1)
busw = NAND_BUSWIDTH_16;
@@ -3157,7 +3161,7 @@ ident_done:
printk(KERN_INFO "NAND device: Manufacturer ID:"
" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
nand_manuf_ids[maf_idx].name,
- chip->onfi_version ? type->name : chip->onfi_params.model);
+ chip->onfi_version ? chip->onfi_params.model : type->name);
return type;
}
@@ -3435,6 +3439,7 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->resume = nand_resume;
mtd->block_isbad = nand_block_isbad;
mtd->block_markbad = nand_block_markbad;
+ mtd->writebufsize = mtd->writesize;
/* propagate ecc.layout to mtd_info */
mtd->ecclayout = chip->ecc.layout;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 586b981f0e61..6ebd869993aa 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1092,7 +1092,8 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
/**
* verify_bbt_descr - verify the bad block description
- * @bd: the table to verify
+ * @mtd: MTD device structure
+ * @bd: the table to verify
*
* This functions performs a few sanity checks on the bad block description
* table.
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index a6a73aab1253..a5aa99f014ba 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -210,12 +210,12 @@ MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in d
#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */
#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */
#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */
-#define STATE_CMD_PAGEPROG 0x00000004 /* start page programm */
+#define STATE_CMD_PAGEPROG 0x00000004 /* start page program */
#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
#define STATE_CMD_STATUS 0x00000007 /* read status */
#define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */
-#define STATE_CMD_SEQIN 0x00000009 /* sequential data imput */
+#define STATE_CMD_SEQIN 0x00000009 /* sequential data input */
#define STATE_CMD_READID 0x0000000A /* read ID */
#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
#define STATE_CMD_RESET 0x0000000C /* reset */
@@ -230,7 +230,7 @@ MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in d
#define STATE_ADDR_ZERO 0x00000040 /* one byte zero address was accepted */
#define STATE_ADDR_MASK 0x00000070 /* address states mask */
-/* Durind data input/output the simulator is in these states */
+/* During data input/output the simulator is in these states */
#define STATE_DATAIN 0x00000100 /* waiting for data input */
#define STATE_DATAIN_MASK 0x00000100 /* data input states mask */
@@ -248,7 +248,7 @@ MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in d
/* Simulator's actions bit masks */
#define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */
-#define ACTION_PRGPAGE 0x00200000 /* programm the internal buffer to flash */
+#define ACTION_PRGPAGE 0x00200000 /* program the internal buffer to flash */
#define ACTION_SECERASE 0x00300000 /* erase sector */
#define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */
#define ACTION_HALFOFF 0x00500000 /* add to address half of page */
@@ -263,18 +263,18 @@ MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in d
#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
-#define OPT_AUTOINCR 0x00000020 /* page number auto inctimentation is possible */
+#define OPT_AUTOINCR 0x00000020 /* page number auto incrementation is possible */
#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
-/* Remove action bits ftom state */
+/* Remove action bits from state */
#define NS_STATE(x) ((x) & ~ACTION_MASK)
/*
* Maximum previous states which need to be saved. Currently saving is
- * only needed for page programm operation with preceeded read command
+ * only needed for page program operation with preceded read command
* (which is only valid for 512-byte pages).
*/
#define NS_MAX_PREVSTATES 1
@@ -380,16 +380,16 @@ static struct nandsim_operations {
/* Read OOB */
{OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
STATE_DATAOUT, STATE_READY}},
- /* Programm page starting from the beginning */
+ /* Program page starting from the beginning */
{OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
- /* Programm page starting from the beginning */
+ /* Program page starting from the beginning */
{OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
- /* Programm page starting from the second half */
+ /* Program page starting from the second half */
{OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
- /* Programm OOB */
+ /* Program OOB */
{OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
/* Erase sector */
@@ -470,7 +470,7 @@ static int alloc_device(struct nandsim *ns)
err = -EINVAL;
goto err_close;
}
- ns->pages_written = vmalloc(ns->geom.pgnum);
+ ns->pages_written = vzalloc(ns->geom.pgnum);
if (!ns->pages_written) {
NS_ERR("alloc_device: unable to allocate pages written array\n");
err = -ENOMEM;
@@ -483,7 +483,6 @@ static int alloc_device(struct nandsim *ns)
goto err_free;
}
ns->cfile = cfile;
- memset(ns->pages_written, 0, ns->geom.pgnum);
return 0;
}
@@ -1171,9 +1170,9 @@ static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
* of supported operations.
*
* Operation can be unknown because of the following.
- * 1. New command was accepted and this is the firs call to find the
+ * 1. New command was accepted and this is the first call to find the
* correspondent states chain. In this case ns->npstates = 0;
- * 2. There is several operations which begin with the same command(s)
+ * 2. There are several operations which begin with the same command(s)
* (for example program from the second half and read from the
* second half operations both begin with the READ1 command). In this
* case the ns->pstates[] array contains previous states.
@@ -1186,7 +1185,7 @@ static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
* ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
* zeroed).
*
- * If there are several maches, the current state is pushed to the
+ * If there are several matches, the current state is pushed to the
* ns->pstates.
*
* The operation can be unknown only while commands are input to the chip.
@@ -1195,10 +1194,10 @@ static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
* operation is searched using the following pattern:
* ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
*
- * It is supposed that this pattern must either match one operation on
+ * It is supposed that this pattern must either match one operation or
* none. There can't be ambiguity in that case.
*
- * If no matches found, the functions does the following:
+ * If no matches found, the function does the following:
* 1. if there are saved states present, try to ignore them and search
* again only using the last command. If nothing was found, switch
* to the STATE_READY state.
@@ -1668,7 +1667,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
case ACTION_PRGPAGE:
/*
- * Programm page - move internal buffer data to the page.
+ * Program page - move internal buffer data to the page.
*/
if (ns->lines.wp) {
@@ -1933,7 +1932,7 @@ static u_char ns_nand_read_byte(struct mtd_info *mtd)
NS_DBG("read_byte: all bytes were read\n");
/*
- * The OPT_AUTOINCR allows to read next conseqitive pages without
+ * The OPT_AUTOINCR allows to read next consecutive pages without
* new read operation cycle.
*/
if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 6ddb2461d740..bb277a54986f 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -107,7 +107,7 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev,
if (pasemi_nand_mtd)
return -ENODEV;
- pr_debug("pasemi_nand at %llx-%llx\n", res.start, res.end);
+ pr_debug("pasemi_nand at %pR\n", &res);
/* Allocate memory for MTD device structure and private data */
pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) +
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 17f8518cc5eb..ea2c288df3f6 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -885,6 +885,7 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
/* set info fields needed to __readid */
info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
info->reg_ndcr = ndcr;
+ info->cmdset = &default_cmdset;
if (__readid(info, &id))
return -ENODEV;
@@ -915,7 +916,6 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
- info->cmdset = &default_cmdset;
return 0;
}
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index d9d7efbc77cc..6322d1fb5d62 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -930,7 +930,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
init_completion(&dev->dma_done);
- dev->card_workqueue = create_freezeable_workqueue(DRV_NAME);
+ dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
if (!dev->card_workqueue)
goto error9;
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 054a41c0ef4a..ca270a4881a4 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -277,8 +277,9 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
ret = nand_scan_ident(mtd, 1, NULL);
if (!ret) {
if (mtd->writesize >= 512) {
- chip->ecc.size = mtd->writesize;
- chip->ecc.bytes = 3 * (mtd->writesize / 256);
+ /* Hardware ECC 6 byte ECC per 512 Byte data */
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 6;
}
ret = nand_scan_tail(mtd);
}
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index d0894ca7798b..ac31f461cc1c 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -35,6 +35,7 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
#include <asm/mach/flash.h>
#include <plat/gpmc.h>
@@ -63,8 +64,13 @@ struct omap2_onenand {
int dma_channel;
int freq;
int (*setup)(void __iomem *base, int freq);
+ struct regulator *regulator;
};
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "cmdlinepart", NULL, };
+#endif
+
static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
{
struct omap2_onenand *c = data;
@@ -108,8 +114,9 @@ static void wait_warn(char *msg, int state, unsigned int ctrl,
static int omap2_onenand_wait(struct mtd_info *mtd, int state)
{
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ struct onenand_chip *this = mtd->priv;
unsigned int intr = 0;
- unsigned int ctrl;
+ unsigned int ctrl, ctrl_mask;
unsigned long timeout;
u32 syscfg;
@@ -180,7 +187,8 @@ retry:
if (result == 0) {
/* Timeout after 20ms */
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
- if (ctrl & ONENAND_CTRL_ONGO) {
+ if (ctrl & ONENAND_CTRL_ONGO &&
+ !this->ongoing) {
/*
* The operation seems to be still going
* so give it some more time.
@@ -269,7 +277,11 @@ retry:
return -EIO;
}
- if (ctrl & 0xFE9F)
+ ctrl_mask = 0xFE9F;
+ if (this->ongoing)
+ ctrl_mask &= ~0x8000;
+
+ if (ctrl & ctrl_mask)
wait_warn("unexpected controller status", state, ctrl, intr);
return 0;
@@ -591,6 +603,30 @@ static void omap2_onenand_shutdown(struct platform_device *pdev)
memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
}
+static int omap2_onenand_enable(struct mtd_info *mtd)
+{
+ int ret;
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+
+ ret = regulator_enable(c->regulator);
+ if (ret != 0)
+ dev_err(&c->pdev->dev, "cant enable regulator\n");
+
+ return ret;
+}
+
+static int omap2_onenand_disable(struct mtd_info *mtd)
+{
+ int ret;
+ struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+
+ ret = regulator_disable(c->regulator);
+ if (ret != 0)
+ dev_err(&c->pdev->dev, "cant disable regulator\n");
+
+ return ret;
+}
+
static int __devinit omap2_onenand_probe(struct platform_device *pdev)
{
struct omap_onenand_platform_data *pdata;
@@ -705,8 +741,18 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
}
}
+ if (pdata->regulator_can_sleep) {
+ c->regulator = regulator_get(&pdev->dev, "vonenand");
+ if (IS_ERR(c->regulator)) {
+ dev_err(&pdev->dev, "Failed to get regulator\n");
+ goto err_release_dma;
+ }
+ c->onenand.enable = omap2_onenand_enable;
+ c->onenand.disable = omap2_onenand_disable;
+ }
+
if ((r = onenand_scan(&c->mtd, 1)) < 0)
- goto err_release_dma;
+ goto err_release_regulator;
switch ((c->onenand.version_id >> 4) & 0xf) {
case 0:
@@ -727,13 +773,15 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
}
#ifdef CONFIG_MTD_PARTITIONS
- if (pdata->parts != NULL)
- r = add_mtd_partitions(&c->mtd, pdata->parts,
- pdata->nr_parts);
+ r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
+ if (r > 0)
+ r = add_mtd_partitions(&c->mtd, c->parts, r);
+ else if (pdata->parts != NULL)
+ r = add_mtd_partitions(&c->mtd, pdata->parts, pdata->nr_parts);
else
#endif
r = add_mtd_device(&c->mtd);
- if (r < 0)
+ if (r)
goto err_release_onenand;
platform_set_drvdata(pdev, c);
@@ -742,6 +790,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
err_release_onenand:
onenand_release(&c->mtd);
+err_release_regulator:
+ regulator_put(c->regulator);
err_release_dma:
if (c->dma_channel != -1)
omap_free_dma(c->dma_channel);
@@ -757,6 +807,7 @@ err_release_mem_region:
err_free_cs:
gpmc_cs_free(c->gpmc_cs);
err_kfree:
+ kfree(c->parts);
kfree(c);
return r;
@@ -766,18 +817,8 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
- BUG_ON(c == NULL);
-
-#ifdef CONFIG_MTD_PARTITIONS
- if (c->parts)
- del_mtd_partitions(&c->mtd);
- else
- del_mtd_device(&c->mtd);
-#else
- del_mtd_device(&c->mtd);
-#endif
-
onenand_release(&c->mtd);
+ regulator_put(c->regulator);
if (c->dma_channel != -1)
omap_free_dma(c->dma_channel);
omap2_onenand_shutdown(pdev);
@@ -789,6 +830,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
iounmap(c->onenand.base);
release_mem_region(c->phys_base, ONENAND_IO_SIZE);
gpmc_cs_free(c->gpmc_cs);
+ kfree(c->parts);
kfree(c);
return 0;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 6b3a875647c9..bac41caa8df7 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -400,8 +400,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
- if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this) ||
- ONENAND_IS_4KB_PAGE(this))
+ if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this))
/* It is always BufferRAM0 */
ONENAND_SET_BUFFERRAM0(this);
else
@@ -430,7 +429,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
case FLEXONENAND_CMD_RECOVER_LSB:
case ONENAND_CMD_READ:
case ONENAND_CMD_READOOB:
- if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
+ if (ONENAND_IS_4KB_PAGE(this))
/* It is always BufferRAM0 */
dataram = ONENAND_SET_BUFFERRAM0(this);
else
@@ -949,6 +948,8 @@ static int onenand_get_device(struct mtd_info *mtd, int new_state)
if (this->state == FL_READY) {
this->state = new_state;
spin_unlock(&this->chip_lock);
+ if (new_state != FL_PM_SUSPENDED && this->enable)
+ this->enable(mtd);
break;
}
if (new_state == FL_PM_SUSPENDED) {
@@ -975,6 +976,8 @@ static void onenand_release_device(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
+ if (this->state != FL_PM_SUSPENDED && this->disable)
+ this->disable(mtd);
/* Release the chip */
spin_lock(&this->chip_lock);
this->state = FL_READY;
@@ -1353,7 +1356,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
stats = mtd->ecc_stats;
- readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+ readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
while (read < len) {
cond_resched();
@@ -1429,7 +1432,7 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
int ret;
onenand_get_device(mtd, FL_READING);
- ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
+ ret = ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, &ops) :
onenand_read_ops_nolock(mtd, from, &ops);
onenand_release_device(mtd);
@@ -1464,7 +1467,7 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
onenand_get_device(mtd, FL_READING);
if (ops->datbuf)
- ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
+ ret = ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, ops) :
onenand_read_ops_nolock(mtd, from, ops);
else
@@ -1485,8 +1488,7 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
{
struct onenand_chip *this = mtd->priv;
unsigned long timeout;
- unsigned int interrupt;
- unsigned int ctrl;
+ unsigned int interrupt, ctrl, ecc, addr1, addr8;
/* The 20 msec is enough */
timeout = jiffies + msecs_to_jiffies(20);
@@ -1498,25 +1500,28 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
/* To get correct interrupt status in timeout case */
interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
+ addr1 = this->read_word(this->base + ONENAND_REG_START_ADDRESS1);
+ addr8 = this->read_word(this->base + ONENAND_REG_START_ADDRESS8);
if (interrupt & ONENAND_INT_READ) {
- int ecc = onenand_read_ecc(this);
+ ecc = onenand_read_ecc(this);
if (ecc & ONENAND_ECC_2BIT_ALL) {
- printk(KERN_WARNING "%s: ecc error = 0x%04x, "
- "controller error 0x%04x\n",
- __func__, ecc, ctrl);
+ printk(KERN_DEBUG "%s: ecc 0x%04x ctrl 0x%04x "
+ "intr 0x%04x addr1 %#x addr8 %#x\n",
+ __func__, ecc, ctrl, interrupt, addr1, addr8);
return ONENAND_BBT_READ_ECC_ERROR;
}
} else {
- printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n",
- __func__, ctrl, interrupt);
+ printk(KERN_ERR "%s: read timeout! ctrl 0x%04x "
+ "intr 0x%04x addr1 %#x addr8 %#x\n",
+ __func__, ctrl, interrupt, addr1, addr8);
return ONENAND_BBT_READ_FATAL_ERROR;
}
/* Initial bad block case: 0x2400 or 0x0400 */
if (ctrl & ONENAND_CTRL_ERROR) {
- printk(KERN_DEBUG "%s: controller error = 0x%04x\n",
- __func__, ctrl);
+ printk(KERN_DEBUG "%s: ctrl 0x%04x intr 0x%04x addr1 %#x "
+ "addr8 %#x\n", __func__, ctrl, interrupt, addr1, addr8);
return ONENAND_BBT_READ_ERROR;
}
@@ -1558,7 +1563,7 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
column = from & (mtd->oobsize - 1);
- readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+ readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
while (read < len) {
cond_resched();
@@ -1612,7 +1617,7 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
u_char *oob_buf = this->oob_buf;
int status, i, readcmd;
- readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+ readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
this->command(mtd, readcmd, to, mtd->oobsize);
onenand_update_bufferram(mtd, to, 0);
@@ -1845,7 +1850,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
const u_char *buf = ops->datbuf;
const u_char *oob = ops->oobbuf;
u_char *oobbuf;
- int ret = 0;
+ int ret = 0, cmd;
DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
__func__, (unsigned int) to, (int) len);
@@ -1954,7 +1959,19 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
ONENAND_SET_NEXT_BUFFERRAM(this);
}
- this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
+ this->ongoing = 0;
+ cmd = ONENAND_CMD_PROG;
+
+ /* Exclude 1st OTP and OTP blocks for cache program feature */
+ if (ONENAND_IS_CACHE_PROGRAM(this) &&
+ likely(onenand_block(this, to) != 0) &&
+ ONENAND_IS_4KB_PAGE(this) &&
+ ((written + thislen) < len)) {
+ cmd = ONENAND_CMD_2X_CACHE_PROG;
+ this->ongoing = 1;
+ }
+
+ this->command(mtd, cmd, to, mtd->writesize);
/*
* 2 PLANE, MLC, and Flex-OneNAND wait here
@@ -2067,7 +2084,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
oobbuf = this->oob_buf;
- oobcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
+ oobcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
/* Loop until all data write */
while (written < len) {
@@ -2086,7 +2103,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
memcpy(oobbuf + column, buf, thislen);
this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
- if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) {
+ if (ONENAND_IS_4KB_PAGE(this)) {
/* Set main area of DataRAM to 0xff*/
memset(this->page_buf, 0xff, mtd->writesize);
this->write_bufferram(mtd, ONENAND_DATARAM,
@@ -2481,7 +2498,8 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
/* Grab the lock and see if the device is available */
onenand_get_device(mtd, FL_ERASING);
- if (region || instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
+ if (ONENAND_IS_4KB_PAGE(this) || region ||
+ instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
/* region is set for Flex-OneNAND (no mb erase) */
ret = onenand_block_by_block_erase(mtd, instr,
region, block_size);
@@ -3029,7 +3047,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
- ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
+ ret = ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, &ops) :
onenand_read_ops_nolock(mtd, from, &ops);
@@ -3377,8 +3395,10 @@ static void onenand_check_features(struct mtd_info *mtd)
case ONENAND_DEVICE_DENSITY_4Gb:
if (ONENAND_IS_DDP(this))
this->options |= ONENAND_HAS_2PLANE;
- else if (numbufs == 1)
+ else if (numbufs == 1) {
this->options |= ONENAND_HAS_4KB_PAGE;
+ this->options |= ONENAND_HAS_CACHE_PROGRAM;
+ }
case ONENAND_DEVICE_DENSITY_2Gb:
/* 2Gb DDP does not have 2 plane */
@@ -3399,7 +3419,11 @@ static void onenand_check_features(struct mtd_info *mtd)
break;
}
- if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
+ /* The MLC has 4KiB pagesize. */
+ if (ONENAND_IS_MLC(this))
+ this->options |= ONENAND_HAS_4KB_PAGE;
+
+ if (ONENAND_IS_4KB_PAGE(this))
this->options &= ~ONENAND_HAS_2PLANE;
if (FLEXONENAND(this)) {
@@ -3415,6 +3439,8 @@ static void onenand_check_features(struct mtd_info *mtd)
printk(KERN_DEBUG "Chip has 2 plane\n");
if (this->options & ONENAND_HAS_4KB_PAGE)
printk(KERN_DEBUG "Chip has 4KiB pagesize\n");
+ if (this->options & ONENAND_HAS_CACHE_PROGRAM)
+ printk(KERN_DEBUG "Chip has cache program feature\n");
}
/**
@@ -3831,7 +3857,7 @@ static int onenand_probe(struct mtd_info *mtd)
/* The data buffer size is equal to page size */
mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
/* We use the full BufferRAM */
- if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
+ if (ONENAND_IS_4KB_PAGE(this))
mtd->writesize <<= 1;
mtd->oobsize = mtd->writesize >> 5;
@@ -4054,6 +4080,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
mtd->block_isbad = onenand_block_isbad;
mtd->block_markbad = onenand_block_markbad;
mtd->owner = THIS_MODULE;
+ mtd->writebufsize = mtd->writesize;
/* Unlock whole block */
this->unlock_all(mtd);
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 01ab5b3c453b..fc2c16a0fd1c 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -91,16 +91,18 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
for (j = 0; j < len; j++) {
/* No need to read pages fully,
* just read required OOB bytes */
- ret = onenand_bbt_read_oob(mtd, from + j * mtd->writesize + bd->offs, &ops);
+ ret = onenand_bbt_read_oob(mtd,
+ from + j * this->writesize + bd->offs, &ops);
/* If it is a initial bad block, just ignore it */
if (ret == ONENAND_BBT_READ_FATAL_ERROR)
return -EIO;
- if (ret || check_short_pattern(&buf[j * scanlen], scanlen, mtd->writesize, bd)) {
+ if (ret || check_short_pattern(&buf[j * scanlen],
+ scanlen, this->writesize, bd)) {
bbm->bbt[i >> 3] |= 0x03 << (i & 0x6);
- printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
- i >> 1, (unsigned int) from);
+ printk(KERN_INFO "OneNAND eraseblock %d is an "
+ "initial bad block\n", i >> 1);
mtd->ecc_stats.badblocks++;
break;
}
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 0de7a05e6de0..a4c74a9ba430 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -651,7 +651,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
void __iomem *p;
void *buf = (void *) buffer;
dma_addr_t dma_src, dma_dst;
- int err, page_dma = 0;
+ int err, ofs, page_dma = 0;
struct device *dev = &onenand->pdev->dev;
p = this->base + area;
@@ -677,10 +677,13 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
if (!page)
goto normal;
+ /* Page offset */
+ ofs = ((size_t) buf & ~PAGE_MASK);
page_dma = 1;
+
/* DMA routine */
dma_src = onenand->phys_base + (p - this->base);
- dma_dst = dma_map_page(dev, page, 0, count, DMA_FROM_DEVICE);
+ dma_dst = dma_map_page(dev, page, ofs, count, DMA_FROM_DEVICE);
} else {
/* DMA routine */
dma_src = onenand->phys_base + (p - this->base);
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 67822cf6c025..ac0d6a8613b5 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1258,7 +1258,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = {
static __init int sm_module_init(void)
{
int error = 0;
- cache_flush_workqueue = create_freezeable_workqueue("smflush");
+ cache_flush_workqueue = create_freezable_workqueue("smflush");
if (IS_ERR(cache_flush_workqueue))
return PTR_ERR(cache_flush_workqueue);
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index fcdb7f65fe0b..0b8141fc5c26 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -425,12 +425,11 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
/* Read both LEB 0 and LEB 1 into memory */
ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
- leb[seb->lnum] = vmalloc(ubi->vtbl_size);
+ leb[seb->lnum] = vzalloc(ubi->vtbl_size);
if (!leb[seb->lnum]) {
err = -ENOMEM;
goto out_free;
}
- memset(leb[seb->lnum], 0, ubi->vtbl_size);
err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
ubi->vtbl_size);
@@ -516,10 +515,9 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
int i;
struct ubi_vtbl_record *vtbl;
- vtbl = vmalloc(ubi->vtbl_size);
+ vtbl = vzalloc(ubi->vtbl_size);
if (!vtbl)
return ERR_PTR(-ENOMEM);
- memset(vtbl, 0, ubi->vtbl_size);
for (i = 0; i < ubi->vtbl_slots; i++)
memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3fda24a28d2f..03823327db25 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1944,19 +1944,12 @@ config 68360_ENET
config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
- MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
+ MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28
select PHYLIB
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
-config FEC2
- bool "Second FEC ethernet controller (on some ColdFire CPUs)"
- depends on FEC
- help
- Say Y here if you want to use the second built-in 10/100 Fast
- ethernet controller on some Motorola ColdFire processors.
-
config FEC_MPC52xx
tristate "MPC52xx FEC driver"
depends on PPC_MPC52xx && PPC_BESTCOMM
@@ -2871,7 +2864,7 @@ config MLX4_CORE
default n
config MLX4_DEBUG
- bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED)
+ bool "Verbose debugging output" if (MLX4_CORE && EXPERT)
depends on MLX4_CORE
default y
---help---
@@ -2970,6 +2963,7 @@ config TILE_NET
config XEN_NETDEV_FRONTEND
tristate "Xen network device frontend driver"
depends on XEN
+ select XEN_XENBUS_FRONTEND
default y
help
The network device frontend driver allows the kernel to
@@ -3395,8 +3389,7 @@ config NETCONSOLE
config NETCONSOLE_DYNAMIC
bool "Dynamic reconfiguration of logging targets"
- depends on NETCONSOLE && SYSFS
- select CONFIGFS_FS
+ depends on NETCONSOLE && SYSFS && CONFIGFS_FS
help
This option enables the ability to dynamically reconfigure target
parameters (interface, IP addresses, port numbers, MAC addresses)
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 54c6d849cf25..aa07657744c3 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -854,12 +854,12 @@ ks8695_set_msglevel(struct net_device *ndev, u32 value)
}
/**
- * ks8695_get_settings - Get device-specific settings.
+ * ks8695_wan_get_settings - Get device-specific settings.
* @ndev: The network device to read settings from
* @cmd: The ethtool structure to read into
*/
static int
-ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
{
struct ks8695_priv *ksp = netdev_priv(ndev);
u32 ctrl;
@@ -870,69 +870,50 @@ ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
SUPPORTED_TP | SUPPORTED_MII);
cmd->transceiver = XCVR_INTERNAL;
- /* Port specific extras */
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- cmd->phy_address = 0;
- /* not supported for HPNA */
- cmd->autoneg = AUTONEG_DISABLE;
+ cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+ cmd->port = PORT_MII;
+ cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
+ cmd->phy_address = 0;
- /* BUG: Erm, dtype hpna implies no phy regs */
- /*
- ctrl = readl(KS8695_MISC_VA + KS8695_HMC);
- cmd->speed = (ctrl & HMC_HSS) ? SPEED_100 : SPEED_10;
- cmd->duplex = (ctrl & HMC_HDS) ? DUPLEX_FULL : DUPLEX_HALF;
- */
- return -EOPNOTSUPP;
- case KS8695_DTYPE_WAN:
- cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
- cmd->port = PORT_MII;
- cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
- cmd->phy_address = 0;
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+ if ((ctrl & WMC_WAND) == 0) {
+ /* auto-negotiation is enabled */
+ cmd->advertising |= ADVERTISED_Autoneg;
+ if (ctrl & WMC_WANA100F)
+ cmd->advertising |= ADVERTISED_100baseT_Full;
+ if (ctrl & WMC_WANA100H)
+ cmd->advertising |= ADVERTISED_100baseT_Half;
+ if (ctrl & WMC_WANA10F)
+ cmd->advertising |= ADVERTISED_10baseT_Full;
+ if (ctrl & WMC_WANA10H)
+ cmd->advertising |= ADVERTISED_10baseT_Half;
+ if (ctrl & WMC_WANAP)
+ cmd->advertising |= ADVERTISED_Pause;
+ cmd->autoneg = AUTONEG_ENABLE;
+
+ cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
+ cmd->duplex = (ctrl & WMC_WDS) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ } else {
+ /* auto-negotiation is disabled */
+ cmd->autoneg = AUTONEG_DISABLE;
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
- if ((ctrl & WMC_WAND) == 0) {
- /* auto-negotiation is enabled */
- cmd->advertising |= ADVERTISED_Autoneg;
- if (ctrl & WMC_WANA100F)
- cmd->advertising |= ADVERTISED_100baseT_Full;
- if (ctrl & WMC_WANA100H)
- cmd->advertising |= ADVERTISED_100baseT_Half;
- if (ctrl & WMC_WANA10F)
- cmd->advertising |= ADVERTISED_10baseT_Full;
- if (ctrl & WMC_WANA10H)
- cmd->advertising |= ADVERTISED_10baseT_Half;
- if (ctrl & WMC_WANAP)
- cmd->advertising |= ADVERTISED_Pause;
- cmd->autoneg = AUTONEG_ENABLE;
-
- cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
- cmd->duplex = (ctrl & WMC_WDS) ?
- DUPLEX_FULL : DUPLEX_HALF;
- } else {
- /* auto-negotiation is disabled */
- cmd->autoneg = AUTONEG_DISABLE;
-
- cmd->speed = (ctrl & WMC_WANF100) ?
- SPEED_100 : SPEED_10;
- cmd->duplex = (ctrl & WMC_WANFF) ?
- DUPLEX_FULL : DUPLEX_HALF;
- }
- break;
- case KS8695_DTYPE_LAN:
- return -EOPNOTSUPP;
+ cmd->speed = (ctrl & WMC_WANF100) ?
+ SPEED_100 : SPEED_10;
+ cmd->duplex = (ctrl & WMC_WANFF) ?
+ DUPLEX_FULL : DUPLEX_HALF;
}
return 0;
}
/**
- * ks8695_set_settings - Set device-specific settings.
+ * ks8695_wan_set_settings - Set device-specific settings.
* @ndev: The network device to configure
* @cmd: The settings to configure
*/
static int
-ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
{
struct ks8695_priv *ksp = netdev_priv(ndev);
u32 ctrl;
@@ -956,171 +937,85 @@ ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
ADVERTISED_100baseT_Full)) == 0)
return -EINVAL;
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- /* HPNA does not support auto-negotiation. */
- return -EINVAL;
- case KS8695_DTYPE_WAN:
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
- ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
- WMC_WANA10F | WMC_WANA10H);
- if (cmd->advertising & ADVERTISED_100baseT_Full)
- ctrl |= WMC_WANA100F;
- if (cmd->advertising & ADVERTISED_100baseT_Half)
- ctrl |= WMC_WANA100H;
- if (cmd->advertising & ADVERTISED_10baseT_Full)
- ctrl |= WMC_WANA10F;
- if (cmd->advertising & ADVERTISED_10baseT_Half)
- ctrl |= WMC_WANA10H;
-
- /* force a re-negotiation */
- ctrl |= WMC_WANR;
- writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
- break;
- case KS8695_DTYPE_LAN:
- return -EOPNOTSUPP;
- }
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+ ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
+ WMC_WANA10F | WMC_WANA10H);
+ if (cmd->advertising & ADVERTISED_100baseT_Full)
+ ctrl |= WMC_WANA100F;
+ if (cmd->advertising & ADVERTISED_100baseT_Half)
+ ctrl |= WMC_WANA100H;
+ if (cmd->advertising & ADVERTISED_10baseT_Full)
+ ctrl |= WMC_WANA10F;
+ if (cmd->advertising & ADVERTISED_10baseT_Half)
+ ctrl |= WMC_WANA10H;
+
+ /* force a re-negotiation */
+ ctrl |= WMC_WANR;
+ writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
} else {
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- /* BUG: dtype_hpna implies no phy registers */
- /*
- ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC);
-
- ctrl &= ~(HMC_HSS | HMC_HDS);
- if (cmd->speed == SPEED_100)
- ctrl |= HMC_HSS;
- if (cmd->duplex == DUPLEX_FULL)
- ctrl |= HMC_HDS;
-
- __raw_writel(ctrl, KS8695_MISC_VA + KS8695_HMC);
- */
- return -EOPNOTSUPP;
- case KS8695_DTYPE_WAN:
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
- /* disable auto-negotiation */
- ctrl |= WMC_WAND;
- ctrl &= ~(WMC_WANF100 | WMC_WANFF);
-
- if (cmd->speed == SPEED_100)
- ctrl |= WMC_WANF100;
- if (cmd->duplex == DUPLEX_FULL)
- ctrl |= WMC_WANFF;
-
- writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
- break;
- case KS8695_DTYPE_LAN:
- return -EOPNOTSUPP;
- }
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+
+ /* disable auto-negotiation */
+ ctrl |= WMC_WAND;
+ ctrl &= ~(WMC_WANF100 | WMC_WANFF);
+
+ if (cmd->speed == SPEED_100)
+ ctrl |= WMC_WANF100;
+ if (cmd->duplex == DUPLEX_FULL)
+ ctrl |= WMC_WANFF;
+
+ writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
}
return 0;
}
/**
- * ks8695_nwayreset - Restart the autonegotiation on the port.
+ * ks8695_wan_nwayreset - Restart the autonegotiation on the port.
* @ndev: The network device to restart autoneotiation on
*/
static int
-ks8695_nwayreset(struct net_device *ndev)
+ks8695_wan_nwayreset(struct net_device *ndev)
{
struct ks8695_priv *ksp = netdev_priv(ndev);
u32 ctrl;
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- /* No phy means no autonegotiation on hpna */
- return -EINVAL;
- case KS8695_DTYPE_WAN:
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
- if ((ctrl & WMC_WAND) == 0)
- writel(ctrl | WMC_WANR,
- ksp->phyiface_regs + KS8695_WMC);
- else
- /* auto-negotiation not enabled */
- return -EINVAL;
- break;
- case KS8695_DTYPE_LAN:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-/**
- * ks8695_get_link - Retrieve link status of network interface
- * @ndev: The network interface to retrive the link status of.
- */
-static u32
-ks8695_get_link(struct net_device *ndev)
-{
- struct ks8695_priv *ksp = netdev_priv(ndev);
- u32 ctrl;
+ if ((ctrl & WMC_WAND) == 0)
+ writel(ctrl | WMC_WANR,
+ ksp->phyiface_regs + KS8695_WMC);
+ else
+ /* auto-negotiation not enabled */
+ return -EINVAL;
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- /* HPNA always has link */
- return 1;
- case KS8695_DTYPE_WAN:
- /* WAN we can read the PHY for */
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
- return ctrl & WMC_WLS;
- case KS8695_DTYPE_LAN:
- return -EOPNOTSUPP;
- }
return 0;
}
/**
- * ks8695_get_pause - Retrieve network pause/flow-control advertising
+ * ks8695_wan_get_pause - Retrieve network pause/flow-control advertising
* @ndev: The device to retrieve settings from
* @param: The structure to fill out with the information
*/
static void
-ks8695_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
+ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
{
struct ks8695_priv *ksp = netdev_priv(ndev);
u32 ctrl;
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- /* No phy link on hpna to configure */
- return;
- case KS8695_DTYPE_WAN:
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
- /* advertise Pause */
- param->autoneg = (ctrl & WMC_WANAP);
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
- /* current Rx Flow-control */
- ctrl = ks8695_readreg(ksp, KS8695_DRXC);
- param->rx_pause = (ctrl & DRXC_RFCE);
+ /* advertise Pause */
+ param->autoneg = (ctrl & WMC_WANAP);
- /* current Tx Flow-control */
- ctrl = ks8695_readreg(ksp, KS8695_DTXC);
- param->tx_pause = (ctrl & DTXC_TFCE);
- break;
- case KS8695_DTYPE_LAN:
- /* The LAN's "phy" is a direct-attached switch */
- return;
- }
-}
+ /* current Rx Flow-control */
+ ctrl = ks8695_readreg(ksp, KS8695_DRXC);
+ param->rx_pause = (ctrl & DRXC_RFCE);
-/**
- * ks8695_set_pause - Configure pause/flow-control
- * @ndev: The device to configure
- * @param: The pause parameters to set
- *
- * TODO: Implement this
- */
-static int
-ks8695_set_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
-{
- return -EOPNOTSUPP;
+ /* current Tx Flow-control */
+ ctrl = ks8695_readreg(ksp, KS8695_DTXC);
+ param->tx_pause = (ctrl & DTXC_TFCE);
}
/**
@@ -1140,12 +1035,17 @@ ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
static const struct ethtool_ops ks8695_ethtool_ops = {
.get_msglevel = ks8695_get_msglevel,
.set_msglevel = ks8695_set_msglevel,
- .get_settings = ks8695_get_settings,
- .set_settings = ks8695_set_settings,
- .nway_reset = ks8695_nwayreset,
- .get_link = ks8695_get_link,
- .get_pauseparam = ks8695_get_pause,
- .set_pauseparam = ks8695_set_pause,
+ .get_drvinfo = ks8695_get_drvinfo,
+};
+
+static const struct ethtool_ops ks8695_wan_ethtool_ops = {
+ .get_msglevel = ks8695_get_msglevel,
+ .set_msglevel = ks8695_set_msglevel,
+ .get_settings = ks8695_wan_get_settings,
+ .set_settings = ks8695_wan_set_settings,
+ .nway_reset = ks8695_wan_nwayreset,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = ks8695_wan_get_pause,
.get_drvinfo = ks8695_get_drvinfo,
};
@@ -1541,7 +1441,6 @@ ks8695_probe(struct platform_device *pdev)
/* driver system setup */
ndev->netdev_ops = &ks8695_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
@@ -1608,12 +1507,15 @@ ks8695_probe(struct platform_device *pdev)
if (ksp->phyiface_regs && ksp->link_irq == -1) {
ks8695_init_switch(ksp);
ksp->dtype = KS8695_DTYPE_LAN;
+ SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
} else if (ksp->phyiface_regs && ksp->link_irq != -1) {
ks8695_init_wan_phy(ksp);
ksp->dtype = KS8695_DTYPE_WAN;
+ SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
} else {
/* No initialisation since HPNA does not have a PHY */
ksp->dtype = KS8695_DTYPE_HPNA;
+ SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
}
/* And bring up the net_device with the net core */
@@ -1742,7 +1644,7 @@ ks8695_cleanup(void)
module_init(ks8695_init);
module_exit(ks8695_cleanup);
-MODULE_AUTHOR("Simtec Electronics")
+MODULE_AUTHOR("Simtec Electronics");
MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" MODULENAME);
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index a699bbf20eb5..3824382faecc 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -48,6 +48,7 @@ static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)},
/* required last entry */
{ 0 }
};
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 0c7811faf72c..a179cc6d79f2 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1786,6 +1786,10 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
req = nonemb_cmd->va;
sge = nonembedded_sgl(wrb);
@@ -1801,6 +1805,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
+err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index de40d3b7152f..28a32a6c8bf1 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -312,11 +312,9 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
if (adapter->link_up != link_up) {
adapter->link_speed = -1;
if (link_up) {
- netif_start_queue(netdev);
netif_carrier_on(netdev);
printk(KERN_INFO "%s: Link up\n", netdev->name);
} else {
- netif_stop_queue(netdev);
netif_carrier_off(netdev);
printk(KERN_INFO "%s: Link down\n", netdev->name);
}
@@ -2628,8 +2626,6 @@ static void be_netdev_init(struct net_device *netdev)
netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
BE_NAPI_WEIGHT);
-
- netif_stop_queue(netdev);
}
static void be_unmap_pci_bars(struct be_adapter *adapter)
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index ce1e5e9d06f6..22abfb39d813 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -8,6 +8,11 @@
* Licensed under the GPL-2 or later.
*/
+#define DRV_VERSION "1.1"
+#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -41,12 +46,7 @@
#include "bfin_mac.h"
-#define DRV_NAME "bfin_mac"
-#define DRV_VERSION "1.1"
-#define DRV_AUTHOR "Bryan Wu, Luke Yang"
-#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
-
-MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_AUTHOR("Bryan Wu, Luke Yang");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRV_DESC);
MODULE_ALIAS("platform:bfin_mac");
@@ -189,8 +189,7 @@ static int desc_list_init(void)
/* allocate a new skb for next time receive */
new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
- printk(KERN_NOTICE DRV_NAME
- ": init: low on mem - packet dropped\n");
+ pr_notice("init: low on mem - packet dropped\n");
goto init_error;
}
skb_reserve(new_skb, NET_IP_ALIGN);
@@ -240,7 +239,7 @@ static int desc_list_init(void)
init_error:
desc_list_free();
- printk(KERN_ERR DRV_NAME ": kmalloc failed\n");
+ pr_err("kmalloc failed\n");
return -ENOMEM;
}
@@ -259,8 +258,7 @@ static int bfin_mdio_poll(void)
while ((bfin_read_EMAC_STAADD()) & STABUSY) {
udelay(1);
if (timeout_cnt-- < 0) {
- printk(KERN_ERR DRV_NAME
- ": wait MDC/MDIO transaction to complete timeout\n");
+ pr_err("wait MDC/MDIO transaction to complete timeout\n");
return -ETIMEDOUT;
}
}
@@ -350,9 +348,9 @@ static void bfin_mac_adjust_link(struct net_device *dev)
opmode &= ~RMII_10;
break;
default:
- printk(KERN_WARNING
- "%s: Ack! Speed (%d) is not 10/100!\n",
- DRV_NAME, phydev->speed);
+ netdev_warn(dev,
+ "Ack! Speed (%d) is not 10/100!\n",
+ phydev->speed);
break;
}
bfin_write_EMAC_OPMODE(opmode);
@@ -417,14 +415,13 @@ static int mii_probe(struct net_device *dev, int phy_mode)
/* now we are supposed to have a proper phydev, to attach to... */
if (!phydev) {
- printk(KERN_INFO "%s: Don't found any phy device at all\n",
- dev->name);
+ netdev_err(dev, "no phy device found\n");
return -ENODEV;
}
if (phy_mode != PHY_INTERFACE_MODE_RMII &&
phy_mode != PHY_INTERFACE_MODE_MII) {
- printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name);
+ netdev_err(dev, "invalid phy interface mode\n");
return -EINVAL;
}
@@ -432,7 +429,7 @@ static int mii_probe(struct net_device *dev, int phy_mode)
0, phy_mode);
if (IS_ERR(phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ netdev_err(dev, "could not attach PHY\n");
return PTR_ERR(phydev);
}
@@ -453,11 +450,10 @@ static int mii_probe(struct net_device *dev, int phy_mode)
lp->old_duplex = -1;
lp->phydev = phydev;
- printk(KERN_INFO "%s: attached PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)"
- "@sclk=%dMHz)\n",
- DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
- MDC_CLK, mdc_div, sclk/1000000);
+ pr_info("attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
+ MDC_CLK, mdc_div, sclk/1000000);
return 0;
}
@@ -502,7 +498,7 @@ bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
+ strcpy(info->driver, KBUILD_MODNAME);
strcpy(info->version, DRV_VERSION);
strcpy(info->fw_version, "N/A");
strcpy(info->bus_info, dev_name(&dev->dev));
@@ -562,7 +558,7 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
};
/**************************************************************************/
-void setup_system_regs(struct net_device *dev)
+static void setup_system_regs(struct net_device *dev)
{
struct bfin_mac_local *lp = netdev_priv(dev);
int i;
@@ -592,6 +588,10 @@ void setup_system_regs(struct net_device *dev)
bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
+ /* Set vlan regs to let 1522 bytes long packets pass through */
+ bfin_write_EMAC_VLAN1(lp->vlan1_mask);
+ bfin_write_EMAC_VLAN2(lp->vlan2_mask);
+
/* Initialize the TX DMA channel registers */
bfin_write_DMA2_X_COUNT(0);
bfin_write_DMA2_X_MODIFY(4);
@@ -827,8 +827,7 @@ static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
udelay(1);
if (timeout_cnt == 0)
- printk(KERN_ERR DRV_NAME
- ": fails to timestamp the TX packet\n");
+ netdev_err(netdev, "timestamp the TX packet failed\n");
else {
struct skb_shared_hwtstamps shhwtstamps;
u64 ns;
@@ -1083,8 +1082,7 @@ static void bfin_mac_rx(struct net_device *dev)
* we which case we simply drop the packet
*/
if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
- printk(KERN_NOTICE DRV_NAME
- ": rx: receive error - packet dropped\n");
+ netdev_notice(dev, "rx: receive error - packet dropped\n");
dev->stats.rx_dropped++;
goto out;
}
@@ -1094,8 +1092,7 @@ static void bfin_mac_rx(struct net_device *dev)
new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
- printk(KERN_NOTICE DRV_NAME
- ": rx: low on mem - packet dropped\n");
+ netdev_notice(dev, "rx: low on mem - packet dropped\n");
dev->stats.rx_dropped++;
goto out;
}
@@ -1213,7 +1210,7 @@ static int bfin_mac_enable(struct phy_device *phydev)
int ret;
u32 opmode;
- pr_debug("%s: %s\n", DRV_NAME, __func__);
+ pr_debug("%s\n", __func__);
/* Set RX DMA */
bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
@@ -1287,19 +1284,12 @@ static void bfin_mac_multicast_hash(struct net_device *dev)
{
u32 emac_hashhi, emac_hashlo;
struct netdev_hw_addr *ha;
- char *addrs;
u32 crc;
emac_hashhi = emac_hashlo = 0;
netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
-
- /* skip non-multicast addresses */
- if (!(*addrs & 1))
- continue;
-
- crc = ether_crc(ETH_ALEN, addrs);
+ crc = ether_crc(ETH_ALEN, ha->addr);
crc >>= 26;
if (crc & 0x20)
@@ -1323,7 +1313,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
u32 sysctl;
if (dev->flags & IFF_PROMISC) {
- printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
+ netdev_info(dev, "set promisc mode\n");
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= PR;
bfin_write_EMAC_OPMODE(sysctl);
@@ -1393,7 +1383,7 @@ static int bfin_mac_open(struct net_device *dev)
* address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
*/
if (!is_valid_ether_addr(dev->dev_addr)) {
- printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n");
+ netdev_warn(dev, "no valid ethernet hw addr\n");
return -EINVAL;
}
@@ -1527,6 +1517,9 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
goto out_err_mii_probe;
}
+ lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
+ lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
+
/* Fill in the fields of the device structure with ethernet values. */
ether_setup(ndev);
@@ -1558,7 +1551,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
bfin_mac_hwtstamp_init(ndev);
/* now, print out the card info, in a short format.. */
- dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
+ netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
return 0;
@@ -1650,7 +1643,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
* so set the GPIO pins to Ethernet mode
*/
pin_req = mii_bus_pd->mac_peripherals;
- rc = peripheral_request_list(pin_req, DRV_NAME);
+ rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
if (rc) {
dev_err(&pdev->dev, "Requesting peripherals failed!\n");
return rc;
@@ -1739,7 +1732,7 @@ static struct platform_driver bfin_mac_driver = {
.resume = bfin_mac_resume,
.suspend = bfin_mac_suspend,
.driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
};
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index aed68bed2365..f8559ac9a403 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -17,7 +17,14 @@
#include <linux/etherdevice.h>
#include <linux/bfin_mac.h>
+/*
+ * Disable hardware checksum for bug #5600 if writeback cache is
+ * enabled. Otherwize, corrupted RX packet will be sent up stack
+ * without error mark.
+ */
+#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK
#define BFIN_MAC_CSUM_OFFLOAD
+#endif
#define TX_RECLAIM_JIFFIES (HZ / 5)
@@ -68,7 +75,6 @@ struct bfin_mac_local {
*/
struct net_device_stats stats;
- unsigned char Mac[6]; /* MAC address of the board */
spinlock_t lock;
int wol; /* Wake On Lan */
@@ -76,6 +82,9 @@ struct bfin_mac_local {
struct timer_list tx_reclaim_timer;
struct net_device *ndev;
+ /* Data for EMAC_VLAN1 regs */
+ u16 vlan1_mask, vlan2_mask;
+
/* MII and PHY stuffs */
int old_link; /* used by bf537_adjust_link */
int old_speed;
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
index 99be5ae91991..142d6047da27 100644
--- a/drivers/net/bna/bnad_ethtool.c
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -275,7 +275,6 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) {
- memset(ioc_attr, 0, sizeof(*ioc_attr));
spin_lock_irqsave(&bnad->bna_lock, flags);
bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index df99edf3464a..0ba59d5aeb7f 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -7553,6 +7553,10 @@ bnx2_set_flags(struct net_device *dev, u32 data)
!(data & ETH_FLAG_RXVLAN))
return -EINVAL;
+ /* TSO with VLAN tag won't work with current firmware */
+ if (!(data & ETH_FLAG_TXVLAN))
+ return -EINVAL;
+
rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN |
ETH_FLAG_TXVLAN);
if (rc)
@@ -7962,11 +7966,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
/* AER (Advanced Error Reporting) hooks */
err = pci_enable_pcie_error_reporting(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_enable_pcie_error_reporting "
- "failed 0x%x\n", err);
- /* non-fatal, continue */
- }
+ if (!err)
+ bp->flags |= BNX2_FLAG_AER_ENABLED;
} else {
bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
@@ -8229,8 +8230,10 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
return 0;
err_out_unmap:
- if (bp->flags & BNX2_FLAG_PCIE)
+ if (bp->flags & BNX2_FLAG_AER_ENABLED) {
pci_disable_pcie_error_reporting(pdev);
+ bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+ }
if (bp->regview) {
iounmap(bp->regview);
@@ -8418,8 +8421,10 @@ bnx2_remove_one(struct pci_dev *pdev)
kfree(bp->temp_stats_blk);
- if (bp->flags & BNX2_FLAG_PCIE)
+ if (bp->flags & BNX2_FLAG_AER_ENABLED) {
pci_disable_pcie_error_reporting(pdev);
+ bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+ }
free_netdev(dev);
@@ -8535,7 +8540,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
}
rtnl_unlock();
- if (!(bp->flags & BNX2_FLAG_PCIE))
+ if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
return result;
err = pci_cleanup_aer_uncorrect_error_status(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 5488a2e82fe9..f459fb2f9add 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6741,6 +6741,7 @@ struct bnx2 {
#define BNX2_FLAG_JUMBO_BROKEN 0x00000800
#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000
#define BNX2_FLAG_BROKEN_STATS 0x00002000
+#define BNX2_FLAG_AER_ENABLED 0x00004000
struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC];
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 77d6c8d6d86b..7897d114b290 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,8 +22,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.62.00-3"
-#define DRV_MODULE_RELDATE "2010/12/21"
+#define DRV_MODULE_VERSION "1.62.00-6"
+#define DRV_MODULE_RELDATE "2011/01/30"
#define BNX2X_BC_VER 0x040200
#define BNX2X_MULTI_QUEUE
@@ -636,6 +636,7 @@ struct bnx2x_common {
#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
+#define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
int flash_size;
#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
@@ -1414,12 +1415,12 @@ struct bnx2x_func_init_params {
else
/* skip rx queue
- * if FCOE l2 support is diabled and this is the fcoe L2 queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
*/
#define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
/* skip tx queue
- * if FCOE l2 support is diabled and this is the fcoe L2 queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
*/
#define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
@@ -1612,19 +1613,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_BTR 4
#define MAX_SPQ_PENDING 8
-
-/* CMNG constants
- derived from lab experiments, and not from system spec calculations !!! */
-#define DEF_MIN_RATE 100
+/* CMNG constants, as derived from system spec calculations */
+/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
+#define DEF_MIN_RATE 100
/* resolution of the rate shaping timer - 100 usec */
-#define RS_PERIODIC_TIMEOUT_USEC 100
-/* resolution of fairness algorithm in usecs -
- coefficient for calculating the actual t fair */
-#define T_FAIR_COEF 10000000
+#define RS_PERIODIC_TIMEOUT_USEC 100
/* number of bytes in single QM arbitration cycle -
- coefficient for calculating the fairness timer */
-#define QM_ARB_BYTES 40000
-#define FAIR_MEM 2
+ * coefficient for calculating the fairness timer */
+#define QM_ARB_BYTES 160000
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+/* how many bytes above threshold for the minimal credit of Min algorithm*/
+#define MIN_ABOVE_THRESH 32768
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+/* Memory of fairness algorithm . 2 cycles */
+#define FAIR_MEM 2
#define ATTN_NIG_FOR_FUNC (1L << 8)
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 710ce5d04c53..93798129061b 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
#endif
}
+/* Timestamp option length allowed for TPA aggregation:
+ *
+ * nop nop kind length echo val
+ */
+#define TPA_TSTAMP_OPT_LEN 12
+/**
+ * Calculate the approximate value of the MSS for this
+ * aggregation using the first packet of it.
+ *
+ * @param bp
+ * @param parsing_flags Parsing flags from the START CQE
+ * @param len_on_bd Total length of the first packet for the
+ * aggregation.
+ */
+static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
+ u16 len_on_bd)
+{
+ /* TPA arrgregation won't have an IP options and TCP options
+ * other than timestamp.
+ */
+ u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
+
+
+ /* Check if there was a TCP timestamp, if there is it's will
+ * always be 12 bytes length: nop nop kind length echo val.
+ *
+ * Otherwise FW would close the aggregation.
+ */
+ if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
+ hdrs_len += TPA_TSTAMP_OPT_LEN;
+
+ return len_on_bd - hdrs_len;
+}
+
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct sk_buff *skb,
struct eth_fast_path_rx_cqe *fp_cqe,
- u16 cqe_idx)
+ u16 cqe_idx, u16 parsing_flags)
{
struct sw_rx_page *rx_pg, old_rx_pg;
u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
@@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* This is needed in order to enable forwarding support */
if (frag_size)
- skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
- max(frag_size, (u32)len_on_bd));
+ skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
+ len_on_bd);
#ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
@@ -344,6 +378,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (likely(new_skb)) {
/* fix ip xsum and give it to the stack */
/* (no need to map the new skb) */
+ u16 parsing_flags =
+ le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
prefetch(skb);
prefetch(((char *)(skb)) + L1_CACHE_BYTES);
@@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
if (!bnx2x_fill_frag_skb(bp, fp, skb,
- &cqe->fast_path_cqe, cqe_idx)) {
- if ((le16_to_cpu(cqe->fast_path_cqe.
- pars_flags.flags) & PARSING_FLAGS_VLAN))
+ &cqe->fast_path_cqe, cqe_idx,
+ parsing_flags)) {
+ if (parsing_flags & PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb,
le16_to_cpu(cqe->fast_path_cqe.
vlan_tag));
@@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
{
u16 line_speed = bp->link_vars.line_speed;
if (IS_MF(bp)) {
- u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
- /* Calculate the current MAX line speed limit for the DCC
- * capable devices
+ u16 maxCfg = bnx2x_extract_max_cfg(bp,
+ bp->mf_config[BP_VN(bp)]);
+
+ /* Calculate the current MAX line speed limit for the MF
+ * devices
*/
- if (IS_MF_SD(bp)) {
+ if (IS_MF_SI(bp))
+ line_speed = (line_speed * maxCfg) / 100;
+ else { /* SD mode */
u16 vn_max_rate = maxCfg * 100;
if (vn_max_rate < line_speed)
line_speed = vn_max_rate;
- } else /* IS_MF_SI(bp)) */
- line_speed = (line_speed * maxCfg) / 100;
+ }
}
return line_speed;
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 03eb4d68e6bb..326ba44b3ded 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -1044,4 +1044,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
void bnx2x_acquire_phy_lock(struct bnx2x *bp);
void bnx2x_release_phy_lock(struct bnx2x *bp);
+/**
+ * Extracts MAX BW part from MF configuration.
+ *
+ * @param bp
+ * @param mf_cfg
+ *
+ * @return u16
+ */
+static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
+{
+ u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (!max_cfg) {
+ BNX2X_ERR("Illegal configuration detected for Max BW - "
+ "using 100 instead\n");
+ max_cfg = 100;
+ }
+ return max_cfg;
+}
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
index dc18c25ca9e5..fb3ff7c4d7ca 100644
--- a/drivers/net/bnx2x/bnx2x_dump.h
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -1,10 +1,16 @@
/* bnx2x_dump.h: Broadcom Everest network driver.
*
- * Copyright (c) 2009 Broadcom Corporation
+ * Copyright (c) 2011 Broadcom Corporation
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
*/
@@ -17,53 +23,53 @@
#define BNX2X_DUMP_H
-struct dump_sign {
- u32 time_stamp;
- u32 diag_ver;
- u32 grc_dump_ver;
-};
-#define TSTORM_WAITP_ADDR 0x1b8a80
-#define CSTORM_WAITP_ADDR 0x238a80
-#define XSTORM_WAITP_ADDR 0x2b8a80
-#define USTORM_WAITP_ADDR 0x338a80
-#define TSTORM_CAM_MODE 0x1b1440
+/*definitions */
+#define XSTORM_WAITP_ADDR 0x2b8a80
+#define TSTORM_WAITP_ADDR 0x1b8a80
+#define USTORM_WAITP_ADDR 0x338a80
+#define CSTORM_WAITP_ADDR 0x238a80
+#define TSTORM_CAM_MODE 0x1B1440
-#define RI_E1 0x1
-#define RI_E1H 0x2
+#define MAX_TIMER_PENDING 200
+#define TIMER_SCAN_DONT_CARE 0xFF
+#define RI_E1 0x1
+#define RI_E1H 0x2
#define RI_E2 0x4
-#define RI_ONLINE 0x100
+#define RI_ONLINE 0x100
#define RI_PATH0_DUMP 0x200
#define RI_PATH1_DUMP 0x400
-#define RI_E1_OFFLINE (RI_E1)
-#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
-#define RI_E1H_OFFLINE (RI_E1H)
-#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
-#define RI_E2_OFFLINE (RI_E2)
-#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
-#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H)
-#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
-#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H)
-#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE)
-#define RI_E1E2_OFFLINE (RI_E2 | RI_E1)
-#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE)
-#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2)
-#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
-
-#define MAX_TIMER_PENDING 200
-#define TIMER_SCAN_DONT_CARE 0xFF
+#define RI_E1_OFFLINE (RI_E1)
+#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
+#define RI_E1H_OFFLINE (RI_E1H)
+#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
+#define RI_E2_OFFLINE (RI_E2)
+#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
+#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H)
+#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
+#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H)
+#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE)
+#define RI_E1E2_OFFLINE (RI_E2 | RI_E1)
+#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE)
+#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2)
+#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
+struct dump_sign {
+ u32 time_stamp;
+ u32 diag_ver;
+ u32 grc_dump_ver;
+};
struct dump_hdr {
- u32 hdr_size; /* in dwords, excluding this field */
- struct dump_sign dump_sign;
- u32 xstorm_waitp;
- u32 tstorm_waitp;
- u32 ustorm_waitp;
- u32 cstorm_waitp;
- u16 info;
- u8 idle_chk;
- u8 reserved;
+ u32 hdr_size; /* in dwords, excluding this field */
+ struct dump_sign dump_sign;
+ u32 xstorm_waitp;
+ u32 tstorm_waitp;
+ u32 ustorm_waitp;
+ u32 cstorm_waitp;
+ u16 info;
+ u8 idle_chk;
+ u8 reserved;
};
struct reg_addr {
@@ -80,202 +86,185 @@ struct wreg_addr {
u16 info;
};
-
-#define REGS_COUNT 558
+#define REGS_COUNT 834
static const struct reg_addr reg_addrs[REGS_COUNT] = {
{ 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE },
{ 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE },
- { 0x8800, 6, RI_E1_ONLINE }, { 0xa000, 223, RI_ALL_ONLINE },
- { 0xa388, 1, RI_ALL_ONLINE }, { 0xa398, 1, RI_ALL_ONLINE },
- { 0xa39c, 7, RI_E1H_ONLINE }, { 0xa3c0, 3, RI_E1H_ONLINE },
- { 0xa3d0, 1, RI_E1H_ONLINE }, { 0xa3d8, 1, RI_E1H_ONLINE },
- { 0xa3e0, 1, RI_E1H_ONLINE }, { 0xa3e8, 1, RI_E1H_ONLINE },
- { 0xa3f0, 1, RI_E1H_ONLINE }, { 0xa3f8, 1, RI_E1H_ONLINE },
- { 0xa400, 69, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE },
- { 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE },
- { 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE },
- { 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_ALL_ONLINE },
- { 0xa550, 1, RI_ALL_ONLINE }, { 0xa558, 1, RI_ALL_ONLINE },
- { 0xa560, 1, RI_ALL_ONLINE }, { 0xa568, 1, RI_ALL_ONLINE },
- { 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE },
- { 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_ALL_ONLINE },
- { 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1H_ONLINE },
- { 0xa5e8, 1, RI_E1H_ONLINE }, { 0xa5f0, 1, RI_E1H_ONLINE },
- { 0xa5f8, 10, RI_E1H_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
- { 0x103bc, 1, RI_ALL_ONLINE }, { 0x103cc, 1, RI_ALL_ONLINE },
- { 0x103dc, 1, RI_ALL_ONLINE }, { 0x10400, 57, RI_ALL_ONLINE },
- { 0x104e8, 2, RI_ALL_ONLINE }, { 0x104f4, 2, RI_ALL_ONLINE },
- { 0x10500, 146, RI_ALL_ONLINE }, { 0x10750, 2, RI_ALL_ONLINE },
- { 0x10760, 2, RI_ALL_ONLINE }, { 0x10770, 2, RI_ALL_ONLINE },
- { 0x10780, 2, RI_ALL_ONLINE }, { 0x10790, 2, RI_ALL_ONLINE },
- { 0x107a0, 2, RI_ALL_ONLINE }, { 0x107b0, 2, RI_ALL_ONLINE },
- { 0x107c0, 2, RI_ALL_ONLINE }, { 0x107d0, 2, RI_ALL_ONLINE },
- { 0x107e0, 2, RI_ALL_ONLINE }, { 0x10880, 2, RI_ALL_ONLINE },
- { 0x10900, 2, RI_ALL_ONLINE }, { 0x12000, 1, RI_ALL_ONLINE },
- { 0x14000, 1, RI_ALL_ONLINE }, { 0x16000, 26, RI_E1H_ONLINE },
- { 0x16070, 18, RI_E1H_ONLINE }, { 0x160c0, 27, RI_E1H_ONLINE },
- { 0x16140, 1, RI_E1H_ONLINE }, { 0x16160, 1, RI_E1H_ONLINE },
- { 0x16180, 2, RI_E1H_ONLINE }, { 0x161c0, 2, RI_E1H_ONLINE },
- { 0x16204, 5, RI_E1H_ONLINE }, { 0x18000, 1, RI_E1H_ONLINE },
- { 0x18008, 1, RI_E1H_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE },
- { 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 138, RI_ALL_ONLINE },
- { 0x202b4, 1, RI_ALL_ONLINE }, { 0x202c4, 1, RI_ALL_ONLINE },
- { 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE },
- { 0x2042c, 18, RI_E1H_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE },
- { 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE },
- { 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE },
- { 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE },
- { 0x40000, 98, RI_ALL_ONLINE }, { 0x40194, 1, RI_ALL_ONLINE },
- { 0x401a4, 1, RI_ALL_ONLINE }, { 0x401a8, 11, RI_E1H_ONLINE },
- { 0x40200, 4, RI_ALL_ONLINE }, { 0x40400, 43, RI_ALL_ONLINE },
- { 0x404b8, 1, RI_ALL_ONLINE }, { 0x404c8, 1, RI_ALL_ONLINE },
- { 0x404cc, 3, RI_E1H_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
+ { 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE },
+ { 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE },
+ { 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE },
+ { 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE },
+ { 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE },
+ { 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE },
+ { 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE },
+ { 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE },
+ { 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE },
+ { 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE },
+ { 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE },
+ { 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE },
+ { 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE },
+ { 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE },
+ { 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE },
+ { 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE },
+ { 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE },
+ { 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE },
+ { 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE },
+ { 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE },
+ { 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE },
+ { 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE },
+ { 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE },
+ { 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE },
+ { 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE },
+ { 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE },
+ { 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
+ { 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE },
+ { 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE },
+ { 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE },
+ { 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE },
+ { 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE },
+ { 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE },
+ { 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE },
+ { 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE },
+ { 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE },
+ { 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE },
+ { 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE },
+ { 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE },
+ { 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE },
+ { 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE },
+ { 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE },
+ { 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE },
+ { 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE },
+ { 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE },
+ { 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE },
+ { 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE },
+ { 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE },
+ { 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE },
+ { 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE },
+ { 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE },
+ { 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE },
+ { 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE },
+ { 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE },
+ { 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE },
+ { 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE },
+ { 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE },
+ { 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE },
+ { 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE },
+ { 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
{ 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE },
{ 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE },
- { 0x42000, 164, RI_ALL_ONLINE }, { 0x4229c, 1, RI_ALL_ONLINE },
- { 0x422ac, 1, RI_ALL_ONLINE }, { 0x422bc, 1, RI_ALL_ONLINE },
- { 0x422d4, 5, RI_E1H_ONLINE }, { 0x42400, 49, RI_ALL_ONLINE },
- { 0x424c8, 38, RI_ALL_ONLINE }, { 0x42568, 2, RI_ALL_ONLINE },
- { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 20, RI_ALL_ONLINE },
- { 0x50050, 8, RI_ALL_ONLINE }, { 0x50070, 88, RI_ALL_ONLINE },
- { 0x501dc, 1, RI_ALL_ONLINE }, { 0x501ec, 1, RI_ALL_ONLINE },
- { 0x501f0, 4, RI_E1H_ONLINE }, { 0x50200, 2, RI_ALL_ONLINE },
- { 0x5020c, 7, RI_ALL_ONLINE }, { 0x50228, 6, RI_E1H_ONLINE },
- { 0x50240, 1, RI_ALL_ONLINE }, { 0x50280, 1, RI_ALL_ONLINE },
+ { 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE },
+ { 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE },
+ { 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE },
+ { 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE },
+ { 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE },
+ { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE },
+ { 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE },
+ { 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE },
+ { 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE },
+ { 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE },
+ { 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE },
+ { 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE },
+ { 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE },
{ 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE },
{ 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE },
- { 0x58004, 8191, RI_ALL_OFFLINE }, { 0x60000, 71, RI_ALL_ONLINE },
- { 0x60128, 1, RI_ALL_ONLINE }, { 0x60138, 1, RI_ALL_ONLINE },
- { 0x6013c, 24, RI_E1H_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
+ { 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE },
+ { 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE },
+ { 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE },
+ { 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE },
+ { 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE },
+ { 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE },
+ { 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
+ { 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE },
{ 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE },
- { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 21496, RI_ALL_OFFLINE },
- { 0x85000, 3, RI_ALL_ONLINE }, { 0x8500c, 4, RI_ALL_OFFLINE },
- { 0x8501c, 7, RI_ALL_ONLINE }, { 0x85038, 4, RI_ALL_OFFLINE },
- { 0x85048, 1, RI_ALL_ONLINE }, { 0x8504c, 109, RI_ALL_OFFLINE },
- { 0x85200, 32, RI_ALL_ONLINE }, { 0x85280, 11104, RI_ALL_OFFLINE },
- { 0xa0000, 16384, RI_ALL_ONLINE }, { 0xb0000, 16384, RI_E1H_ONLINE },
- { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc1028, 1, RI_ALL_ONLINE },
- { 0xc1038, 1, RI_ALL_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE },
- { 0xc2000, 164, RI_ALL_ONLINE }, { 0xc229c, 1, RI_ALL_ONLINE },
- { 0xc22ac, 1, RI_ALL_ONLINE }, { 0xc22bc, 1, RI_ALL_ONLINE },
+ { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE },
+ { 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE },
+ { 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE },
+ { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE },
+ { 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE },
+ { 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE },
{ 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE },
{ 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE },
- { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42a0, 1, RI_ALL_ONLINE },
- { 0xc42b0, 1, RI_ALL_ONLINE }, { 0xc42c0, 1, RI_ALL_ONLINE },
- { 0xc42e0, 7, RI_E1H_ONLINE }, { 0xc4400, 51, RI_ALL_ONLINE },
- { 0xc44d0, 38, RI_ALL_ONLINE }, { 0xc4570, 2, RI_ALL_ONLINE },
+ { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE },
+ { 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE },
+ { 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE },
+ { 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE },
{ 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE },
{ 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE },
- { 0xd01e4, 1, RI_ALL_ONLINE }, { 0xd01f4, 1, RI_ALL_ONLINE },
- { 0xd0200, 2, RI_ALL_ONLINE }, { 0xd020c, 7, RI_ALL_ONLINE },
- { 0xd0228, 18, RI_E1H_ONLINE }, { 0xd0280, 1, RI_ALL_ONLINE },
- { 0xd0300, 1, RI_ALL_ONLINE }, { 0xd0400, 1, RI_ALL_ONLINE },
- { 0xd4000, 1, RI_ALL_ONLINE }, { 0xd4004, 2559, RI_ALL_OFFLINE },
- { 0xd8000, 1, RI_ALL_ONLINE }, { 0xd8004, 8191, RI_ALL_OFFLINE },
- { 0xe0000, 21, RI_ALL_ONLINE }, { 0xe0054, 8, RI_ALL_ONLINE },
- { 0xe0074, 85, RI_ALL_ONLINE }, { 0xe01d4, 1, RI_ALL_ONLINE },
- { 0xe01e4, 1, RI_ALL_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
- { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1H_ONLINE },
+ { 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE },
+ { 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE },
+ { 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE },
+ { 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE },
+ { 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE },
+ { 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE },
+ { 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE },
+ { 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE },
+ { 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
+ { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE },
{ 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE },
{ 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE },
{ 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE },
{ 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE },
- { 0x10103c, 1, RI_ALL_ONLINE }, { 0x10104c, 1, RI_ALL_ONLINE },
- { 0x101050, 1, RI_E1H_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE },
- { 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE },
- { 0x102054, 1, RI_ALL_ONLINE }, { 0x102064, 1, RI_ALL_ONLINE },
+ { 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE },
+ { 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE },
+ { 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE },
{ 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE },
- { 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE },
- { 0x103074, 1, RI_ALL_ONLINE }, { 0x103084, 1, RI_ALL_ONLINE },
- { 0x103094, 1, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1H_ONLINE },
+ { 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE },
+ { 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE },
+ { 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE },
+ { 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE },
{ 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE },
- { 0x104108, 1, RI_ALL_ONLINE }, { 0x104118, 1, RI_ALL_ONLINE },
- { 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE },
- { 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE },
- { 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 7, RI_ALL_ONLINE },
- { 0x10501c, 1, RI_ALL_OFFLINE }, { 0x105020, 3, RI_ALL_ONLINE },
- { 0x10502c, 1, RI_ALL_OFFLINE }, { 0x105030, 3, RI_ALL_ONLINE },
- { 0x10503c, 1, RI_ALL_OFFLINE }, { 0x105040, 3, RI_ALL_ONLINE },
- { 0x10504c, 1, RI_ALL_OFFLINE }, { 0x105050, 3, RI_ALL_ONLINE },
- { 0x10505c, 1, RI_ALL_OFFLINE }, { 0x105060, 3, RI_ALL_ONLINE },
- { 0x10506c, 1, RI_ALL_OFFLINE }, { 0x105070, 3, RI_ALL_ONLINE },
- { 0x10507c, 1, RI_ALL_OFFLINE }, { 0x105080, 3, RI_ALL_ONLINE },
- { 0x10508c, 1, RI_ALL_OFFLINE }, { 0x105090, 3, RI_ALL_ONLINE },
- { 0x10509c, 1, RI_ALL_OFFLINE }, { 0x1050a0, 3, RI_ALL_ONLINE },
- { 0x1050ac, 1, RI_ALL_OFFLINE }, { 0x1050b0, 3, RI_ALL_ONLINE },
- { 0x1050bc, 1, RI_ALL_OFFLINE }, { 0x1050c0, 3, RI_ALL_ONLINE },
- { 0x1050cc, 1, RI_ALL_OFFLINE }, { 0x1050d0, 3, RI_ALL_ONLINE },
- { 0x1050dc, 1, RI_ALL_OFFLINE }, { 0x1050e0, 3, RI_ALL_ONLINE },
- { 0x1050ec, 1, RI_ALL_OFFLINE }, { 0x1050f0, 3, RI_ALL_ONLINE },
- { 0x1050fc, 1, RI_ALL_OFFLINE }, { 0x105100, 3, RI_ALL_ONLINE },
- { 0x10510c, 1, RI_ALL_OFFLINE }, { 0x105110, 3, RI_ALL_ONLINE },
- { 0x10511c, 1, RI_ALL_OFFLINE }, { 0x105120, 3, RI_ALL_ONLINE },
- { 0x10512c, 1, RI_ALL_OFFLINE }, { 0x105130, 3, RI_ALL_ONLINE },
- { 0x10513c, 1, RI_ALL_OFFLINE }, { 0x105140, 3, RI_ALL_ONLINE },
- { 0x10514c, 1, RI_ALL_OFFLINE }, { 0x105150, 3, RI_ALL_ONLINE },
- { 0x10515c, 1, RI_ALL_OFFLINE }, { 0x105160, 3, RI_ALL_ONLINE },
- { 0x10516c, 1, RI_ALL_OFFLINE }, { 0x105170, 3, RI_ALL_ONLINE },
- { 0x10517c, 1, RI_ALL_OFFLINE }, { 0x105180, 3, RI_ALL_ONLINE },
- { 0x10518c, 1, RI_ALL_OFFLINE }, { 0x105190, 3, RI_ALL_ONLINE },
- { 0x10519c, 1, RI_ALL_OFFLINE }, { 0x1051a0, 3, RI_ALL_ONLINE },
- { 0x1051ac, 1, RI_ALL_OFFLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
- { 0x1051bc, 1, RI_ALL_OFFLINE }, { 0x1051c0, 3, RI_ALL_ONLINE },
- { 0x1051cc, 1, RI_ALL_OFFLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
- { 0x1051dc, 1, RI_ALL_OFFLINE }, { 0x1051e0, 3, RI_ALL_ONLINE },
- { 0x1051ec, 1, RI_ALL_OFFLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
- { 0x1051fc, 1, RI_ALL_OFFLINE }, { 0x105200, 3, RI_ALL_ONLINE },
- { 0x10520c, 1, RI_ALL_OFFLINE }, { 0x105210, 3, RI_ALL_ONLINE },
- { 0x10521c, 1, RI_ALL_OFFLINE }, { 0x105220, 3, RI_ALL_ONLINE },
- { 0x10522c, 1, RI_ALL_OFFLINE }, { 0x105230, 3, RI_ALL_ONLINE },
- { 0x10523c, 1, RI_ALL_OFFLINE }, { 0x105240, 3, RI_ALL_ONLINE },
- { 0x10524c, 1, RI_ALL_OFFLINE }, { 0x105250, 3, RI_ALL_ONLINE },
- { 0x10525c, 1, RI_ALL_OFFLINE }, { 0x105260, 3, RI_ALL_ONLINE },
- { 0x10526c, 1, RI_ALL_OFFLINE }, { 0x105270, 3, RI_ALL_ONLINE },
- { 0x10527c, 1, RI_ALL_OFFLINE }, { 0x105280, 3, RI_ALL_ONLINE },
- { 0x10528c, 1, RI_ALL_OFFLINE }, { 0x105290, 3, RI_ALL_ONLINE },
- { 0x10529c, 1, RI_ALL_OFFLINE }, { 0x1052a0, 3, RI_ALL_ONLINE },
- { 0x1052ac, 1, RI_ALL_OFFLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
- { 0x1052bc, 1, RI_ALL_OFFLINE }, { 0x1052c0, 3, RI_ALL_ONLINE },
- { 0x1052cc, 1, RI_ALL_OFFLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
- { 0x1052dc, 1, RI_ALL_OFFLINE }, { 0x1052e0, 3, RI_ALL_ONLINE },
- { 0x1052ec, 1, RI_ALL_OFFLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
- { 0x1052fc, 1, RI_ALL_OFFLINE }, { 0x105300, 3, RI_ALL_ONLINE },
- { 0x10530c, 1, RI_ALL_OFFLINE }, { 0x105310, 3, RI_ALL_ONLINE },
- { 0x10531c, 1, RI_ALL_OFFLINE }, { 0x105320, 3, RI_ALL_ONLINE },
- { 0x10532c, 1, RI_ALL_OFFLINE }, { 0x105330, 3, RI_ALL_ONLINE },
- { 0x10533c, 1, RI_ALL_OFFLINE }, { 0x105340, 3, RI_ALL_ONLINE },
- { 0x10534c, 1, RI_ALL_OFFLINE }, { 0x105350, 3, RI_ALL_ONLINE },
- { 0x10535c, 1, RI_ALL_OFFLINE }, { 0x105360, 3, RI_ALL_ONLINE },
- { 0x10536c, 1, RI_ALL_OFFLINE }, { 0x105370, 3, RI_ALL_ONLINE },
- { 0x10537c, 1, RI_ALL_OFFLINE }, { 0x105380, 3, RI_ALL_ONLINE },
- { 0x10538c, 1, RI_ALL_OFFLINE }, { 0x105390, 3, RI_ALL_ONLINE },
- { 0x10539c, 1, RI_ALL_OFFLINE }, { 0x1053a0, 3, RI_ALL_ONLINE },
- { 0x1053ac, 1, RI_ALL_OFFLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
- { 0x1053bc, 1, RI_ALL_OFFLINE }, { 0x1053c0, 3, RI_ALL_ONLINE },
- { 0x1053cc, 1, RI_ALL_OFFLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
- { 0x1053dc, 1, RI_ALL_OFFLINE }, { 0x1053e0, 3, RI_ALL_ONLINE },
- { 0x1053ec, 1, RI_ALL_OFFLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
- { 0x1053fc, 769, RI_ALL_OFFLINE }, { 0x108000, 33, RI_ALL_ONLINE },
- { 0x108090, 1, RI_ALL_ONLINE }, { 0x1080a0, 1, RI_ALL_ONLINE },
- { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_ALL_ONLINE },
- { 0x108120, 5, RI_ALL_ONLINE }, { 0x108200, 74, RI_ALL_ONLINE },
- { 0x108400, 74, RI_ALL_ONLINE }, { 0x108800, 152, RI_ALL_ONLINE },
- { 0x109000, 1, RI_ALL_ONLINE }, { 0x120000, 347, RI_ALL_ONLINE },
- { 0x120578, 1, RI_ALL_ONLINE }, { 0x120588, 1, RI_ALL_ONLINE },
- { 0x120598, 1, RI_ALL_ONLINE }, { 0x12059c, 23, RI_E1H_ONLINE },
- { 0x120614, 1, RI_E1H_ONLINE }, { 0x12061c, 30, RI_E1H_ONLINE },
- { 0x12080c, 65, RI_ALL_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
- { 0x122000, 2, RI_ALL_ONLINE }, { 0x128000, 2, RI_E1H_ONLINE },
- { 0x140000, 114, RI_ALL_ONLINE }, { 0x1401d4, 1, RI_ALL_ONLINE },
- { 0x1401e4, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
- { 0x144000, 4, RI_ALL_ONLINE }, { 0x148000, 4, RI_ALL_ONLINE },
- { 0x14c000, 4, RI_ALL_ONLINE }, { 0x150000, 4, RI_ALL_ONLINE },
- { 0x154000, 4, RI_ALL_ONLINE }, { 0x158000, 4, RI_ALL_ONLINE },
- { 0x15c000, 7, RI_E1H_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
- { 0x161028, 1, RI_ALL_ONLINE }, { 0x161038, 1, RI_ALL_ONLINE },
- { 0x161800, 2, RI_ALL_ONLINE }, { 0x164000, 60, RI_ALL_ONLINE },
- { 0x1640fc, 1, RI_ALL_ONLINE }, { 0x16410c, 1, RI_ALL_ONLINE },
- { 0x164110, 2, RI_E1H_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
+ { 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE },
+ { 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE },
+ { 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE },
+ { 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE },
+ { 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE },
+ { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE },
+ { 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE },
+ { 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE },
+ { 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE },
+ { 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE },
+ { 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE },
+ { 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE },
+ { 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE },
+ { 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE },
+ { 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE },
+ { 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE },
+ { 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE },
+ { 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE },
+ { 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE },
+ { 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE },
+ { 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE },
+ { 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE },
+ { 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE },
+ { 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE },
+ { 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE },
+ { 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE },
+ { 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE },
+ { 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE },
+ { 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
+ { 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE },
+ { 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE },
+ { 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE },
+ { 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE },
+ { 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE },
+ { 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE },
+ { 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE },
+ { 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE },
+ { 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE },
+ { 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE },
+ { 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE },
+ { 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE },
+ { 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE },
+ { 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE },
+ { 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE },
+ { 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE },
+ { 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE },
+ { 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE },
+ { 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE },
+ { 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
+ { 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE },
+ { 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE },
+ { 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
{ 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE },
{ 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE },
{ 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE },
@@ -284,169 +273,298 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = {
{ 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE },
{ 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE },
{ 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE },
- { 0x166000, 164, RI_ALL_ONLINE }, { 0x16629c, 1, RI_ALL_ONLINE },
- { 0x1662ac, 1, RI_ALL_ONLINE }, { 0x1662bc, 1, RI_ALL_ONLINE },
+ { 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE },
{ 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE },
- { 0x166568, 2, RI_ALL_ONLINE }, { 0x166800, 1, RI_ALL_ONLINE },
- { 0x168000, 270, RI_ALL_ONLINE }, { 0x168444, 1, RI_ALL_ONLINE },
- { 0x168454, 1, RI_ALL_ONLINE }, { 0x168800, 19, RI_ALL_ONLINE },
- { 0x168900, 1, RI_ALL_ONLINE }, { 0x168a00, 128, RI_ALL_ONLINE },
- { 0x16a000, 1, RI_ALL_ONLINE }, { 0x16a004, 1535, RI_ALL_OFFLINE },
- { 0x16c000, 1, RI_ALL_ONLINE }, { 0x16c004, 1535, RI_ALL_OFFLINE },
- { 0x16e000, 16, RI_E1H_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
- { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 183, RI_E1H_ONLINE },
- { 0x170000, 93, RI_ALL_ONLINE }, { 0x170180, 1, RI_ALL_ONLINE },
- { 0x170190, 1, RI_ALL_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE },
- { 0x170214, 1, RI_ALL_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE },
- { 0x180000, 61, RI_ALL_ONLINE }, { 0x180100, 1, RI_ALL_ONLINE },
- { 0x180110, 1, RI_ALL_ONLINE }, { 0x180120, 1, RI_ALL_ONLINE },
- { 0x180130, 1, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1H_ONLINE },
- { 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE },
- { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_ALL_OFFLINE },
+ { 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE },
+ { 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE },
+ { 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE },
+ { 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE },
+ { 0x168300, 2, RI_E1E1H_ONLINE }, { 0x168308, 68, RI_ALL_ONLINE },
+ { 0x168418, 2, RI_E1E1H_ONLINE }, { 0x168420, 6, RI_ALL_ONLINE },
+ { 0x168800, 19, RI_ALL_ONLINE }, { 0x168900, 1, RI_ALL_ONLINE },
+ { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE },
+ { 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE },
+ { 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE },
+ { 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
+ { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE },
+ { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE },
+ { 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE },
+ { 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE },
+ { 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE },
+ { 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE },
+ { 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE },
+ { 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE },
+ { 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE },
+ { 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE },
+ { 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE },
+ { 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE },
+ { 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE },
+ { 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE },
+ { 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE },
+ { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE },
{ 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE },
- { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 1023, RI_ALL_OFFLINE },
- { 0x1a1000, 1, RI_ALL_ONLINE }, { 0x1a1004, 4607, RI_ALL_OFFLINE },
- { 0x1a5800, 2560, RI_E1H_OFFLINE }, { 0x1a8000, 64, RI_ALL_OFFLINE },
- { 0x1a8100, 1984, RI_E1H_OFFLINE }, { 0x1aa000, 1, RI_E1H_ONLINE },
- { 0x1aa004, 6655, RI_E1H_OFFLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE },
- { 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_OFFLINE },
- { 0x1b2400, 64, RI_E1H_OFFLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
+ { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE },
+ { 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE },
+ { 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE },
+ { 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE },
+ { 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE },
+ { 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE },
+ { 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE },
+ { 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE },
+ { 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE },
+ { 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE },
+ { 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE },
+ { 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE },
+ { 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
+ { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
+ { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE },
+ { 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE },
+ { 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
{ 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE },
- { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
- { 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE },
- { 0x200000, 65, RI_ALL_ONLINE }, { 0x200110, 1, RI_ALL_ONLINE },
- { 0x200120, 1, RI_ALL_ONLINE }, { 0x200130, 1, RI_ALL_ONLINE },
- { 0x200140, 1, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1H_ONLINE },
- { 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE },
- { 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_ALL_OFFLINE },
- { 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE },
- { 0x220000, 1, RI_ALL_ONLINE }, { 0x220004, 1023, RI_ALL_OFFLINE },
- { 0x221000, 1, RI_ALL_ONLINE }, { 0x221004, 4607, RI_ALL_OFFLINE },
- { 0x225800, 1536, RI_E1H_OFFLINE }, { 0x227000, 1, RI_E1H_ONLINE },
- { 0x227004, 1023, RI_E1H_OFFLINE }, { 0x228000, 64, RI_ALL_OFFLINE },
- { 0x228100, 8640, RI_E1H_OFFLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
- { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_OFFLINE },
- { 0x232400, 64, RI_E1H_OFFLINE }, { 0x238200, 1, RI_ALL_ONLINE },
- { 0x238240, 1, RI_ALL_ONLINE }, { 0x238280, 1, RI_ALL_ONLINE },
- { 0x2382c0, 1, RI_ALL_ONLINE }, { 0x238a00, 1, RI_ALL_ONLINE },
- { 0x238a80, 1, RI_ALL_ONLINE }, { 0x240000, 2, RI_ALL_ONLINE },
- { 0x280000, 65, RI_ALL_ONLINE }, { 0x280110, 1, RI_ALL_ONLINE },
- { 0x280120, 1, RI_ALL_ONLINE }, { 0x280130, 1, RI_ALL_ONLINE },
- { 0x280140, 1, RI_ALL_ONLINE }, { 0x28014c, 2, RI_E1H_ONLINE },
- { 0x280200, 58, RI_ALL_ONLINE }, { 0x280340, 4, RI_ALL_ONLINE },
- { 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_ALL_OFFLINE },
- { 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE },
- { 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 1023, RI_ALL_OFFLINE },
- { 0x2a1000, 1, RI_ALL_ONLINE }, { 0x2a1004, 4607, RI_ALL_OFFLINE },
- { 0x2a5800, 2560, RI_E1H_OFFLINE }, { 0x2a8000, 64, RI_ALL_OFFLINE },
- { 0x2a8100, 960, RI_E1H_OFFLINE }, { 0x2a9000, 1, RI_E1H_ONLINE },
- { 0x2a9004, 7679, RI_E1H_OFFLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
- { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_OFFLINE },
- { 0x2b2400, 64, RI_E1H_OFFLINE }, { 0x2b8200, 1, RI_ALL_ONLINE },
- { 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE },
- { 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE },
- { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
- { 0x300000, 65, RI_ALL_ONLINE }, { 0x300110, 1, RI_ALL_ONLINE },
- { 0x300120, 1, RI_ALL_ONLINE }, { 0x300130, 1, RI_ALL_ONLINE },
- { 0x300140, 1, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1H_ONLINE },
+ { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE },
+ { 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE },
+ { 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE },
+ { 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE },
+ { 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE },
+ { 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE },
+ { 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE },
+ { 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE },
+ { 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE },
+ { 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE },
+ { 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
+ { 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE },
+ { 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE },
+ { 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE },
+ { 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE },
+ { 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE },
+ { 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE },
+ { 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE },
+ { 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE },
+ { 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE },
+ { 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE },
+ { 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE },
+ { 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE },
+ { 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE },
+ { 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE },
+ { 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE },
+ { 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE },
+ { 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE },
+ { 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE },
+ { 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE },
+ { 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE },
+ { 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE },
+ { 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE },
+ { 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE },
+ { 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE },
+ { 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE},
+ { 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE },
+ { 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE },
+ { 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE },
+ { 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE },
+ { 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE },
+ { 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE },
+ { 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE },
+ { 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE },
+ { 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
+ { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE },
+ { 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE },
+ { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
+ { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
+ { 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE },
+ { 0x238180, 1, RI_ALL_ONLINE }, { 0x2381c0, 1, RI_ALL_ONLINE },
+ { 0x238200, 1, RI_ALL_ONLINE }, { 0x238240, 1, RI_ALL_ONLINE },
+ { 0x238280, 1, RI_ALL_ONLINE }, { 0x2382c0, 1, RI_ALL_ONLINE },
+ { 0x238300, 1, RI_ALL_ONLINE }, { 0x238340, 1, RI_ALL_ONLINE },
+ { 0x238380, 1, RI_ALL_ONLINE }, { 0x2383c0, 1, RI_ALL_ONLINE },
+ { 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE },
+ { 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE },
+ { 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE },
+ { 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE },
+ { 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE },
+ { 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE },
+ { 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE },
+ { 0x238980, 1, RI_ALL_ONLINE }, { 0x2389c0, 1, RI_ALL_ONLINE },
+ { 0x238a00, 1, RI_ALL_ONLINE }, { 0x238a40, 1, RI_ALL_ONLINE },
+ { 0x238a80, 1, RI_ALL_ONLINE }, { 0x238ac0, 1, RI_ALL_ONLINE },
+ { 0x238b00, 1, RI_ALL_ONLINE }, { 0x238b40, 1, RI_ALL_ONLINE },
+ { 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE },
+ { 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE },
+ { 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE },
+ { 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE },
+ { 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE },
+ { 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE },
+ { 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE },
+ { 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE },
+ { 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE },
+ { 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE },
+ { 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE },
+ { 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE },
+ { 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE },
+ { 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE },
+ { 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE },
+ { 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE },
+ { 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE },
+ { 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE },
+ { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE },
+ { 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE },
+ { 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE},
+ { 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE },
+ { 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE },
+ { 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE },
+ { 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE },
+ { 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE },
+ { 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE },
+ { 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE },
+ { 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE },
+ { 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
+ { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE },
+ { 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE },
+ { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
+ { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE },
+ { 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE },
+ { 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE },
+ { 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE },
+ { 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE },
+ { 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE },
+ { 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE },
+ { 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE },
+ { 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE },
+ { 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE },
+ { 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE },
+ { 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE },
+ { 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE },
+ { 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE },
+ { 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE },
+ { 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE },
+ { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE },
+ { 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE },
+ { 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE },
+ { 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE },
+ { 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE },
+ { 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE },
+ { 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE },
+ { 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE },
+ { 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE },
+ { 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE },
+ { 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE },
+ { 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE },
+ { 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE },
+ { 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE },
+ { 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE },
+ { 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
+ { 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE },
{ 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE },
- { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_ALL_OFFLINE },
+ { 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE },
+ { 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE },
+ { 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE },
+ { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE },
{ 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE },
- { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 1023, RI_ALL_OFFLINE },
- { 0x321000, 1, RI_ALL_ONLINE }, { 0x321004, 4607, RI_ALL_OFFLINE },
- { 0x325800, 2560, RI_E1H_OFFLINE }, { 0x328000, 64, RI_ALL_OFFLINE },
- { 0x328100, 536, RI_E1H_OFFLINE }, { 0x328960, 1, RI_E1H_ONLINE },
- { 0x328964, 8103, RI_E1H_OFFLINE }, { 0x331800, 128, RI_ALL_OFFLINE },
- { 0x331c00, 128, RI_ALL_OFFLINE }, { 0x332000, 1, RI_ALL_OFFLINE },
- { 0x332400, 64, RI_E1H_OFFLINE }, { 0x338200, 1, RI_ALL_ONLINE },
+ { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE },
+ { 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE },
+ { 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE },
+ { 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE },
+ { 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE },
+ { 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE },
+ { 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE },
+ { 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE },
+ { 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE },
+ { 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE },
+ { 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE },
+ { 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE },
+ { 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE },
+ { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
+ { 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE },
+ { 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE },
+ { 0x3381c0, 1, RI_ALL_ONLINE }, { 0x338200, 1, RI_ALL_ONLINE },
{ 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE },
- { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
- { 0x338a80, 1, RI_ALL_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE }
+ { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338300, 1, RI_ALL_ONLINE },
+ { 0x338340, 1, RI_ALL_ONLINE }, { 0x338380, 1, RI_ALL_ONLINE },
+ { 0x3383c0, 1, RI_ALL_ONLINE }, { 0x338400, 1, RI_ALL_ONLINE },
+ { 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE },
+ { 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE },
+ { 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE },
+ { 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE },
+ { 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE },
+ { 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE },
+ { 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE },
+ { 0x3389c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
+ { 0x338a40, 1, RI_ALL_ONLINE }, { 0x338a80, 1, RI_ALL_ONLINE },
+ { 0x338ac0, 1, RI_ALL_ONLINE }, { 0x338b00, 1, RI_ALL_ONLINE },
+ { 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE },
+ { 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE },
+ { 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE },
+ { 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE },
+ { 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE },
+ { 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE },
+ { 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE },
+ { 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE },
+ { 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE },
+ { 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE },
+ { 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE },
+ { 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE },
+ { 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE },
};
-
-#define IDLE_REGS_COUNT 277
+#define IDLE_REGS_COUNT 237
static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
- { 0x2114, 1, RI_ALL_ONLINE }, { 0x2120, 1, RI_ALL_ONLINE },
- { 0x212c, 4, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
- { 0x281c, 2, RI_ALL_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE },
+ { 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE },
+ { 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
+ { 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE },
+ { 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE },
+ { 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE },
+ { 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE },
+ { 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE },
+ { 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE },
{ 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE },
- { 0xa600, 5, RI_E1H_ONLINE }, { 0xa618, 1, RI_E1H_ONLINE },
- { 0xc09c, 1, RI_ALL_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE },
- { 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE },
- { 0x2021c, 11, RI_ALL_ONLINE }, { 0x202a8, 1, RI_ALL_ONLINE },
- { 0x202b8, 1, RI_ALL_ONLINE }, { 0x20404, 1, RI_ALL_ONLINE },
- { 0x2040c, 2, RI_ALL_ONLINE }, { 0x2041c, 2, RI_ALL_ONLINE },
- { 0x40154, 14, RI_ALL_ONLINE }, { 0x40198, 1, RI_ALL_ONLINE },
- { 0x404ac, 1, RI_ALL_ONLINE }, { 0x404bc, 1, RI_ALL_ONLINE },
- { 0x42290, 1, RI_ALL_ONLINE }, { 0x422a0, 1, RI_ALL_ONLINE },
- { 0x422b0, 1, RI_ALL_ONLINE }, { 0x42548, 1, RI_ALL_ONLINE },
- { 0x42550, 1, RI_ALL_ONLINE }, { 0x42558, 1, RI_ALL_ONLINE },
- { 0x50160, 8, RI_ALL_ONLINE }, { 0x501d0, 1, RI_ALL_ONLINE },
- { 0x501e0, 1, RI_ALL_ONLINE }, { 0x50204, 1, RI_ALL_ONLINE },
- { 0x5020c, 2, RI_ALL_ONLINE }, { 0x5021c, 1, RI_ALL_ONLINE },
- { 0x60090, 1, RI_ALL_ONLINE }, { 0x6011c, 1, RI_ALL_ONLINE },
- { 0x6012c, 1, RI_ALL_ONLINE }, { 0xc101c, 1, RI_ALL_ONLINE },
- { 0xc102c, 1, RI_ALL_ONLINE }, { 0xc2290, 1, RI_ALL_ONLINE },
- { 0xc22a0, 1, RI_ALL_ONLINE }, { 0xc22b0, 1, RI_ALL_ONLINE },
- { 0xc2548, 1, RI_ALL_ONLINE }, { 0xc2550, 1, RI_ALL_ONLINE },
- { 0xc2558, 1, RI_ALL_ONLINE }, { 0xc4294, 1, RI_ALL_ONLINE },
- { 0xc42a4, 1, RI_ALL_ONLINE }, { 0xc42b4, 1, RI_ALL_ONLINE },
- { 0xc4550, 1, RI_ALL_ONLINE }, { 0xc4558, 1, RI_ALL_ONLINE },
- { 0xc4560, 1, RI_ALL_ONLINE }, { 0xd016c, 8, RI_ALL_ONLINE },
- { 0xd01d8, 1, RI_ALL_ONLINE }, { 0xd01e8, 1, RI_ALL_ONLINE },
- { 0xd0204, 1, RI_ALL_ONLINE }, { 0xd020c, 3, RI_ALL_ONLINE },
- { 0xe0154, 8, RI_ALL_ONLINE }, { 0xe01c8, 1, RI_ALL_ONLINE },
- { 0xe01d8, 1, RI_ALL_ONLINE }, { 0xe0204, 1, RI_ALL_ONLINE },
- { 0xe020c, 2, RI_ALL_ONLINE }, { 0xe021c, 2, RI_ALL_ONLINE },
- { 0x101014, 1, RI_ALL_ONLINE }, { 0x101030, 1, RI_ALL_ONLINE },
- { 0x101040, 1, RI_ALL_ONLINE }, { 0x102058, 1, RI_ALL_ONLINE },
- { 0x102080, 16, RI_ALL_ONLINE }, { 0x103004, 2, RI_ALL_ONLINE },
- { 0x103068, 1, RI_ALL_ONLINE }, { 0x103078, 1, RI_ALL_ONLINE },
- { 0x103088, 1, RI_ALL_ONLINE }, { 0x10309c, 2, RI_E1H_ONLINE },
+ { 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE },
+ { 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE },
+ { 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE },
+ { 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE },
+ { 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE },
+ { 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE },
+ { 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE },
+ { 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE },
+ { 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE },
+ { 0x40198, 1, RI_ALL_ONLINE }, { 0x404ac, 1, RI_ALL_ONLINE },
+ { 0x404bc, 1, RI_ALL_ONLINE }, { 0x42290, 1, RI_ALL_ONLINE },
+ { 0x422a0, 1, RI_ALL_ONLINE }, { 0x422b0, 1, RI_ALL_ONLINE },
+ { 0x42548, 1, RI_ALL_ONLINE }, { 0x42550, 1, RI_ALL_ONLINE },
+ { 0x42558, 1, RI_ALL_ONLINE }, { 0x50160, 8, RI_ALL_ONLINE },
+ { 0x501d0, 1, RI_ALL_ONLINE }, { 0x501e0, 1, RI_ALL_ONLINE },
+ { 0x50204, 1, RI_ALL_ONLINE }, { 0x5020c, 2, RI_ALL_ONLINE },
+ { 0x5021c, 1, RI_ALL_ONLINE }, { 0x60090, 1, RI_ALL_ONLINE },
+ { 0x6011c, 1, RI_ALL_ONLINE }, { 0x6012c, 1, RI_ALL_ONLINE },
+ { 0xc101c, 1, RI_ALL_ONLINE }, { 0xc102c, 1, RI_ALL_ONLINE },
+ { 0xc2290, 1, RI_ALL_ONLINE }, { 0xc22a0, 1, RI_ALL_ONLINE },
+ { 0xc22b0, 1, RI_ALL_ONLINE }, { 0xc2548, 1, RI_ALL_ONLINE },
+ { 0xc2550, 1, RI_ALL_ONLINE }, { 0xc2558, 1, RI_ALL_ONLINE },
+ { 0xc4294, 1, RI_ALL_ONLINE }, { 0xc42a4, 1, RI_ALL_ONLINE },
+ { 0xc42b4, 1, RI_ALL_ONLINE }, { 0xc4550, 1, RI_ALL_ONLINE },
+ { 0xc4558, 1, RI_ALL_ONLINE }, { 0xc4560, 1, RI_ALL_ONLINE },
+ { 0xd016c, 8, RI_ALL_ONLINE }, { 0xd01d8, 1, RI_ALL_ONLINE },
+ { 0xd01e8, 1, RI_ALL_ONLINE }, { 0xd0204, 1, RI_ALL_ONLINE },
+ { 0xd020c, 3, RI_ALL_ONLINE }, { 0xe0154, 8, RI_ALL_ONLINE },
+ { 0xe01c8, 1, RI_ALL_ONLINE }, { 0xe01d8, 1, RI_ALL_ONLINE },
+ { 0xe0204, 1, RI_ALL_ONLINE }, { 0xe020c, 2, RI_ALL_ONLINE },
+ { 0xe021c, 2, RI_ALL_ONLINE }, { 0x101014, 1, RI_ALL_ONLINE },
+ { 0x101030, 1, RI_ALL_ONLINE }, { 0x101040, 1, RI_ALL_ONLINE },
+ { 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE },
+ { 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE },
+ { 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE },
+ { 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE },
+ { 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE },
{ 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE },
{ 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE },
{ 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE },
{ 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE },
- { 0x105000, 3, RI_ALL_ONLINE }, { 0x105010, 3, RI_ALL_ONLINE },
- { 0x105020, 3, RI_ALL_ONLINE }, { 0x105030, 3, RI_ALL_ONLINE },
- { 0x105040, 3, RI_ALL_ONLINE }, { 0x105050, 3, RI_ALL_ONLINE },
- { 0x105060, 3, RI_ALL_ONLINE }, { 0x105070, 3, RI_ALL_ONLINE },
- { 0x105080, 3, RI_ALL_ONLINE }, { 0x105090, 3, RI_ALL_ONLINE },
- { 0x1050a0, 3, RI_ALL_ONLINE }, { 0x1050b0, 3, RI_ALL_ONLINE },
- { 0x1050c0, 3, RI_ALL_ONLINE }, { 0x1050d0, 3, RI_ALL_ONLINE },
- { 0x1050e0, 3, RI_ALL_ONLINE }, { 0x1050f0, 3, RI_ALL_ONLINE },
- { 0x105100, 3, RI_ALL_ONLINE }, { 0x105110, 3, RI_ALL_ONLINE },
- { 0x105120, 3, RI_ALL_ONLINE }, { 0x105130, 3, RI_ALL_ONLINE },
- { 0x105140, 3, RI_ALL_ONLINE }, { 0x105150, 3, RI_ALL_ONLINE },
- { 0x105160, 3, RI_ALL_ONLINE }, { 0x105170, 3, RI_ALL_ONLINE },
- { 0x105180, 3, RI_ALL_ONLINE }, { 0x105190, 3, RI_ALL_ONLINE },
- { 0x1051a0, 3, RI_ALL_ONLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
- { 0x1051c0, 3, RI_ALL_ONLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
- { 0x1051e0, 3, RI_ALL_ONLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
- { 0x105200, 3, RI_ALL_ONLINE }, { 0x105210, 3, RI_ALL_ONLINE },
- { 0x105220, 3, RI_ALL_ONLINE }, { 0x105230, 3, RI_ALL_ONLINE },
- { 0x105240, 3, RI_ALL_ONLINE }, { 0x105250, 3, RI_ALL_ONLINE },
- { 0x105260, 3, RI_ALL_ONLINE }, { 0x105270, 3, RI_ALL_ONLINE },
- { 0x105280, 3, RI_ALL_ONLINE }, { 0x105290, 3, RI_ALL_ONLINE },
- { 0x1052a0, 3, RI_ALL_ONLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
- { 0x1052c0, 3, RI_ALL_ONLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
- { 0x1052e0, 3, RI_ALL_ONLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
- { 0x105300, 3, RI_ALL_ONLINE }, { 0x105310, 3, RI_ALL_ONLINE },
- { 0x105320, 3, RI_ALL_ONLINE }, { 0x105330, 3, RI_ALL_ONLINE },
- { 0x105340, 3, RI_ALL_ONLINE }, { 0x105350, 3, RI_ALL_ONLINE },
- { 0x105360, 3, RI_ALL_ONLINE }, { 0x105370, 3, RI_ALL_ONLINE },
- { 0x105380, 3, RI_ALL_ONLINE }, { 0x105390, 3, RI_ALL_ONLINE },
- { 0x1053a0, 3, RI_ALL_ONLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
- { 0x1053c0, 3, RI_ALL_ONLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
- { 0x1053e0, 3, RI_ALL_ONLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
- { 0x108094, 1, RI_ALL_ONLINE }, { 0x1201b0, 2, RI_ALL_ONLINE },
- { 0x12032c, 1, RI_ALL_ONLINE }, { 0x12036c, 3, RI_ALL_ONLINE },
- { 0x120408, 2, RI_ALL_ONLINE }, { 0x120414, 15, RI_ALL_ONLINE },
- { 0x120478, 2, RI_ALL_ONLINE }, { 0x12052c, 1, RI_ALL_ONLINE },
- { 0x120564, 3, RI_ALL_ONLINE }, { 0x12057c, 1, RI_ALL_ONLINE },
- { 0x12058c, 1, RI_ALL_ONLINE }, { 0x120608, 1, RI_E1H_ONLINE },
- { 0x120808, 1, RI_E1_ONLINE }, { 0x12080c, 2, RI_ALL_ONLINE },
+ { 0x105000, 256, RI_ALL_ONLINE }, { 0x108094, 1, RI_E1E1H_ONLINE },
+ { 0x1201b0, 2, RI_ALL_ONLINE }, { 0x12032c, 1, RI_ALL_ONLINE },
+ { 0x12036c, 3, RI_ALL_ONLINE }, { 0x120408, 2, RI_ALL_ONLINE },
+ { 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE },
+ { 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE },
+ { 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE },
+ { 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE },
+ { 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE },
{ 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE },
{ 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE },
{ 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE },
@@ -462,48 +580,50 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
{ 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE },
{ 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE },
{ 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE },
- { 0x120908, 1, RI_ALL_ONLINE }, { 0x14005c, 2, RI_ALL_ONLINE },
- { 0x1400d0, 2, RI_ALL_ONLINE }, { 0x1400e0, 1, RI_ALL_ONLINE },
- { 0x1401c8, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
- { 0x16101c, 1, RI_ALL_ONLINE }, { 0x16102c, 1, RI_ALL_ONLINE },
- { 0x164014, 2, RI_ALL_ONLINE }, { 0x1640f0, 1, RI_ALL_ONLINE },
- { 0x166290, 1, RI_ALL_ONLINE }, { 0x1662a0, 1, RI_ALL_ONLINE },
- { 0x1662b0, 1, RI_ALL_ONLINE }, { 0x166548, 1, RI_ALL_ONLINE },
- { 0x166550, 1, RI_ALL_ONLINE }, { 0x166558, 1, RI_ALL_ONLINE },
- { 0x168000, 1, RI_ALL_ONLINE }, { 0x168008, 1, RI_ALL_ONLINE },
- { 0x168010, 1, RI_ALL_ONLINE }, { 0x168018, 1, RI_ALL_ONLINE },
- { 0x168028, 2, RI_ALL_ONLINE }, { 0x168058, 4, RI_ALL_ONLINE },
- { 0x168070, 1, RI_ALL_ONLINE }, { 0x168238, 1, RI_ALL_ONLINE },
- { 0x1682d0, 2, RI_ALL_ONLINE }, { 0x1682e0, 1, RI_ALL_ONLINE },
- { 0x168300, 67, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
+ { 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE },
+ { 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE },
+ { 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE },
+ { 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE },
+ { 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE },
+ { 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE },
+ { 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE },
+ { 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE },
+ { 0x16102c, 1, RI_ALL_ONLINE }, { 0x164014, 2, RI_ALL_ONLINE },
+ { 0x1640f0, 1, RI_ALL_ONLINE }, { 0x166290, 1, RI_ALL_ONLINE },
+ { 0x1662a0, 1, RI_ALL_ONLINE }, { 0x1662b0, 1, RI_ALL_ONLINE },
+ { 0x166548, 1, RI_ALL_ONLINE }, { 0x166550, 1, RI_ALL_ONLINE },
+ { 0x166558, 1, RI_ALL_ONLINE }, { 0x168000, 1, RI_ALL_ONLINE },
+ { 0x168008, 1, RI_ALL_ONLINE }, { 0x168010, 1, RI_ALL_ONLINE },
+ { 0x168018, 1, RI_ALL_ONLINE }, { 0x168028, 2, RI_ALL_ONLINE },
+ { 0x168058, 4, RI_ALL_ONLINE }, { 0x168070, 1, RI_ALL_ONLINE },
+ { 0x168238, 1, RI_ALL_ONLINE }, { 0x1682d0, 2, RI_ALL_ONLINE },
+ { 0x1682e0, 1, RI_ALL_ONLINE }, { 0x168300, 2, RI_E1E1H_ONLINE },
+ { 0x168308, 65, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
{ 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE },
{ 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE },
- { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 70, RI_E1H_ONLINE },
- { 0x1700a4, 1, RI_ALL_ONLINE }, { 0x1700ac, 2, RI_ALL_ONLINE },
- { 0x1700c0, 1, RI_ALL_ONLINE }, { 0x170174, 1, RI_ALL_ONLINE },
- { 0x170184, 1, RI_ALL_ONLINE }, { 0x1800f4, 1, RI_ALL_ONLINE },
- { 0x180104, 1, RI_ALL_ONLINE }, { 0x180114, 1, RI_ALL_ONLINE },
- { 0x180124, 1, RI_ALL_ONLINE }, { 0x18026c, 1, RI_ALL_ONLINE },
- { 0x1802a0, 1, RI_ALL_ONLINE }, { 0x1a1000, 1, RI_ALL_ONLINE },
- { 0x1aa000, 1, RI_E1H_ONLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
- { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
- { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x200104, 1, RI_ALL_ONLINE },
- { 0x200114, 1, RI_ALL_ONLINE }, { 0x200124, 1, RI_ALL_ONLINE },
- { 0x200134, 1, RI_ALL_ONLINE }, { 0x20026c, 1, RI_ALL_ONLINE },
- { 0x2002a0, 1, RI_ALL_ONLINE }, { 0x221000, 1, RI_ALL_ONLINE },
- { 0x227000, 1, RI_E1H_ONLINE }, { 0x238000, 1, RI_ALL_ONLINE },
- { 0x238040, 1, RI_ALL_ONLINE }, { 0x238080, 1, RI_ALL_ONLINE },
- { 0x2380c0, 1, RI_ALL_ONLINE }, { 0x280104, 1, RI_ALL_ONLINE },
- { 0x280114, 1, RI_ALL_ONLINE }, { 0x280124, 1, RI_ALL_ONLINE },
- { 0x280134, 1, RI_ALL_ONLINE }, { 0x28026c, 1, RI_ALL_ONLINE },
- { 0x2802a0, 1, RI_ALL_ONLINE }, { 0x2a1000, 1, RI_ALL_ONLINE },
- { 0x2a9000, 1, RI_E1H_ONLINE }, { 0x2b8000, 1, RI_ALL_ONLINE },
- { 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE },
- { 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
+ { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE },
+ { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE },
+ { 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE },
+ { 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE },
+ { 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE },
+ { 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE },
+ { 0x180114, 1, RI_ALL_ONLINE }, { 0x180124, 1, RI_ALL_ONLINE },
+ { 0x18026c, 1, RI_ALL_ONLINE }, { 0x1802a0, 1, RI_ALL_ONLINE },
+ { 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE },
+ { 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE },
+ { 0x200104, 1, RI_ALL_ONLINE }, { 0x200114, 1, RI_ALL_ONLINE },
+ { 0x200124, 1, RI_ALL_ONLINE }, { 0x200134, 1, RI_ALL_ONLINE },
+ { 0x20026c, 1, RI_ALL_ONLINE }, { 0x2002a0, 1, RI_ALL_ONLINE },
+ { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
+ { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
+ { 0x280104, 1, RI_ALL_ONLINE }, { 0x280114, 1, RI_ALL_ONLINE },
+ { 0x280124, 1, RI_ALL_ONLINE }, { 0x280134, 1, RI_ALL_ONLINE },
+ { 0x28026c, 1, RI_ALL_ONLINE }, { 0x2802a0, 1, RI_ALL_ONLINE },
+ { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
+ { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
{ 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE },
{ 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE },
- { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x321000, 1, RI_ALL_ONLINE },
- { 0x328960, 1, RI_E1H_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
+ { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
{ 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
{ 0x3380c0, 1, RI_ALL_ONLINE }
};
@@ -515,7 +635,6 @@ static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = {
{ 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE }
};
-
#define WREGS_COUNT_E1H 1
static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 };
@@ -530,22 +649,53 @@ static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = {
{ 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE }
};
-static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 };
-
+static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a };
#define TIMER_REGS_COUNT_E1 2
-static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] =
- { 0x164014, 0x164018 };
-static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] =
- { 0x1640d0, 0x1640d4 };
+static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = {
+ 0x164014, 0x164018 };
+static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = {
+ 0x1640d0, 0x1640d4 };
#define TIMER_REGS_COUNT_E1H 2
-static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] =
- { 0x164014, 0x164018 };
-static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] =
- { 0x1640d0, 0x1640d4 };
+static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = {
+ 0x164014, 0x164018 };
+static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = {
+ 0x1640d0, 0x1640d4 };
+
+#define TIMER_REGS_COUNT_E2 2
+
+static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = {
+ 0x164014, 0x164018 };
+static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = {
+ 0x1640d0, 0x1640d4 };
+
+#define PAGE_MODE_VALUES_E1 0
+
+#define PAGE_READ_REGS_E1 0
+
+#define PAGE_WRITE_REGS_E1 0
+
+static const u32 page_vals_e1[] = { 0 };
+
+static const u32 page_write_regs_e1[] = { 0 };
+
+static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } };
+
+#define PAGE_MODE_VALUES_E1H 0
+
+#define PAGE_READ_REGS_E1H 0
+
+#define PAGE_WRITE_REGS_E1H 0
+
+static const u32 page_vals_e1h[] = { 0 };
+
+static const u32 page_write_regs_e1h[] = { 0 };
+
+static const struct reg_addr page_read_regs_e1h[] = {
+ { 0x0, 0, RI_E1H_ONLINE } };
#define PAGE_MODE_VALUES_E2 2
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 99c672d894ca..ef2919987a10 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -24,6 +24,7 @@
#include "bnx2x.h"
#include "bnx2x_cmn.h"
#include "bnx2x_dump.h"
+#include "bnx2x_init.h"
/* Note: in the format strings below %s is replaced by the queue-name which is
* either its index or 'fcoe' for the fcoe queue. Make sure the format string
@@ -237,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
speed |= (cmd->speed_hi << 16);
if (IS_MF_SI(bp)) {
- u32 param = 0;
+ u32 param = 0, part;
u32 line_speed = bp->link_vars.line_speed;
/* use 10G if no link detected */
@@ -250,9 +251,11 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
REQ_BC_VER_4_SET_MF_BW);
return -EINVAL;
}
- if (line_speed < speed) {
- BNX2X_DEV_INFO("New speed should be less or equal "
- "to actual line speed\n");
+ part = (speed * 100) / line_speed;
+ if (line_speed < speed || !part) {
+ BNX2X_DEV_INFO("Speed setting should be in a range "
+ "from 1%% to 100%% "
+ "of actual line speed\n");
return -EINVAL;
}
/* load old values */
@@ -262,8 +265,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
param &= FUNC_MF_CFG_MIN_BW_MASK;
/* set new MAX value */
- param |= (((speed * 100) / line_speed)
- << FUNC_MF_CFG_MAX_BW_SHIFT)
+ param |= (part << FUNC_MF_CFG_MAX_BW_SHIFT)
& FUNC_MF_CFG_MAX_BW_MASK;
bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
@@ -472,7 +474,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
int regdump_len = 0;
- int i;
+ int i, j, k;
if (CHIP_IS_E1(bp)) {
for (i = 0; i < REGS_COUNT; i++)
@@ -502,6 +504,15 @@ static int bnx2x_get_regs_len(struct net_device *dev)
if (IS_E2_ONLINE(wreg_addrs_e2[i].info))
regdump_len += wreg_addrs_e2[i].size *
(1 + wreg_addrs_e2[i].read_regs_count);
+
+ for (i = 0; i < PAGE_MODE_VALUES_E2; i++)
+ for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
+ for (k = 0; k < PAGE_READ_REGS_E2; k++)
+ if (IS_E2_ONLINE(page_read_regs_e2[k].
+ info))
+ regdump_len +=
+ page_read_regs_e2[k].size;
+ }
}
regdump_len *= 4;
regdump_len += sizeof(struct dump_hdr);
@@ -539,6 +550,12 @@ static void bnx2x_get_regs(struct net_device *dev,
if (!netif_running(bp->dev))
return;
+ /* Disable parity attentions as long as following dump may
+ * cause false alarms by reading never written registers. We
+ * will re-enable parity attentions right after the dump.
+ */
+ bnx2x_disable_blocks_parity(bp);
+
dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
dump_hdr.dump_sign = dump_sign_all;
dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
@@ -580,6 +597,10 @@ static void bnx2x_get_regs(struct net_device *dev,
bnx2x_read_pages_regs_e2(bp, p);
}
+ /* Re-enable parity attentions */
+ bnx2x_clear_blocks_parity(bp);
+ if (CHIP_PARITY_ENABLED(bp))
+ bnx2x_enable_blocks_parity(bp);
}
#define PHY_FW_VER_LEN 20
@@ -1761,9 +1782,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
{ 0x100, 0x350 }, /* manuf_info */
{ 0x450, 0xf0 }, /* feature_info */
{ 0x640, 0x64 }, /* upgrade_key_info */
- { 0x6a4, 0x64 },
{ 0x708, 0x70 }, /* manuf_key_info */
- { 0x778, 0x70 },
{ 0, 0 }
};
__be32 buf[0x350 / 4];
@@ -1913,11 +1932,11 @@ static void bnx2x_self_test(struct net_device *dev,
buf[4] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
- if (bp->port.pmf)
- if (bnx2x_link_test(bp, is_serdes) != 0) {
- buf[5] = 1;
- etest->flags |= ETH_TEST_FL_FAILED;
- }
+
+ if (bnx2x_link_test(bp, is_serdes) != 0) {
+ buf[5] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
#ifdef BNX2X_EXTRA_DEBUG
bnx2x_panic_dump(bp);
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 6238d4f63989..548f5631c0dc 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -352,6 +352,10 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
/* forced only */
#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
+ /* Indicate whether to swap the external phy polarity */
+#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000
+#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000
+#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000
u32 external_phy_config;
#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index a9d54874a559..fa6dbe3f2058 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -192,5 +192,225 @@ struct src_ent {
u64 next;
};
+/****************************************************************************
+* Parity configuration
+****************************************************************************/
+#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2) \
+{ \
+ block##_REG_##block##_PRTY_MASK, \
+ block##_REG_##block##_PRTY_STS_CLR, \
+ en_mask, {m1, m1h, m2}, #block \
+}
+
+#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2) \
+{ \
+ block##_REG_##block##_PRTY_MASK_0, \
+ block##_REG_##block##_PRTY_STS_CLR_0, \
+ en_mask, {m1, m1h, m2}, #block"_0" \
+}
+
+#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2) \
+{ \
+ block##_REG_##block##_PRTY_MASK_1, \
+ block##_REG_##block##_PRTY_STS_CLR_1, \
+ en_mask, {m1, m1h, m2}, #block"_1" \
+}
+
+static const struct {
+ u32 mask_addr;
+ u32 sts_clr_addr;
+ u32 en_mask; /* Mask to enable parity attentions */
+ struct {
+ u32 e1; /* 57710 */
+ u32 e1h; /* 57711 */
+ u32 e2; /* 57712 */
+ } reg_mask; /* Register mask (all valid bits) */
+ char name[7]; /* Block's longest name is 6 characters long
+ * (name + suffix)
+ */
+} bnx2x_blocks_parity_data[] = {
+ /* bit 19 masked */
+ /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
+ /* bit 5,18,20-31 */
+ /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
+ /* bit 5 */
+ /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
+ /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
+ /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
+
+ /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
+ * want to handle "system kill" flow at the moment.
+ */
+ BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
+ BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
+ BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff),
+ BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1),
+ BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff),
+ BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3),
+ {GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
+ GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0,
+ {0xf, 0xf, 0xf}, "UPB"},
+ {GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
+ GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
+ {0xf, 0xf, 0xf}, "XPB"},
+ BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7),
+ BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1),
+ BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff),
+ BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f),
+ BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f),
+};
+
+
+/* [28] MCP Latched rom_parity
+ * [29] MCP Latched ump_rx_parity
+ * [30] MCP Latched ump_tx_parity
+ * [31] MCP Latched scpad_parity
+ */
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
+ (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+/* Below registers control the MCP parity attention output. When
+ * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
+ * enabled, when cleared - disabled.
+ */
+static const u32 mcp_attn_ctl_regs[] = {
+ MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+ MISC_REG_AEU_ENABLE4_NIG_0,
+ MISC_REG_AEU_ENABLE4_PXP_0,
+ MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+ MISC_REG_AEU_ENABLE4_NIG_1,
+ MISC_REG_AEU_ENABLE4_PXP_1
+};
+
+static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
+{
+ int i;
+ u32 reg_val;
+
+ for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
+ reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
+
+ if (enable)
+ reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+ else
+ reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+
+ REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
+ }
+}
+
+static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
+{
+ if (CHIP_IS_E1(bp))
+ return bnx2x_blocks_parity_data[idx].reg_mask.e1;
+ else if (CHIP_IS_E1H(bp))
+ return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
+ else
+ return bnx2x_blocks_parity_data[idx].reg_mask.e2;
+}
+
+static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+ u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
+
+ if (dis_mask) {
+ REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+ dis_mask);
+ DP(NETIF_MSG_HW, "Setting parity mask "
+ "for %s to\t\t0x%x\n",
+ bnx2x_blocks_parity_data[i].name, dis_mask);
+ }
+ }
+
+ /* Disable MCP parity attentions */
+ bnx2x_set_mcp_parity(bp, false);
+}
+
+/**
+ * Clear the parity error status registers.
+ */
+static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
+{
+ int i;
+ u32 reg_val, mcp_aeu_bits =
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
+
+ /* Clear SEM_FAST parities */
+ REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+
+ for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+ u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+ if (reg_mask) {
+ reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
+ sts_clr_addr);
+ if (reg_val & reg_mask)
+ DP(NETIF_MSG_HW,
+ "Parity errors in %s: 0x%x\n",
+ bnx2x_blocks_parity_data[i].name,
+ reg_val & reg_mask);
+ }
+ }
+
+ /* Check if there were parity attentions in MCP */
+ reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
+ if (reg_val & mcp_aeu_bits)
+ DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
+ reg_val & mcp_aeu_bits);
+
+ /* Clear parity attentions in MCP:
+ * [7] clears Latched rom_parity
+ * [8] clears Latched ump_rx_parity
+ * [9] clears Latched ump_tx_parity
+ * [10] clears Latched scpad_parity (both ports)
+ */
+ REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
+}
+
+static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+ u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+ if (reg_mask)
+ REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+ bnx2x_blocks_parity_data[i].en_mask & reg_mask);
+ }
+
+ /* Enable MCP parity attentions */
+ bnx2x_set_mcp_parity(bp, true);
+}
+
+
#endif /* BNX2X_INIT_H */
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 43b0de24f391..dd1210fddfff 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1573,7 +1573,7 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
offset = phy->addr + ser_lane;
if (CHIP_IS_E2(bp))
- aer_val = 0x2800 + offset - 1;
+ aer_val = 0x3800 + offset - 1;
else
aer_val = 0x3800 + offset;
CL45_WR_OVER_CL22(bp, phy,
@@ -3166,7 +3166,23 @@ u8 bnx2x_set_led(struct link_params *params,
if (!vars->link_up)
break;
case LED_MODE_ON:
- if (SINGLE_MEDIA_DIRECT(params)) {
+ if (params->phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
+ CHIP_IS_E2(bp) && params->num_phys == 2) {
+ /**
+ * This is a work-around for E2+8727 Configurations
+ */
+ if (mode == LED_MODE_ON ||
+ speed == SPEED_10000){
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED,
+ (tmp | EMAC_LED_OVERRIDE));
+ return rc;
+ }
+ } else if (SINGLE_MEDIA_DIRECT(params)) {
/**
* This is a work-around for HW issue found when link
* is up in CL73
@@ -3854,11 +3870,14 @@ static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
pause_result);
}
}
-
-static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
+static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
struct bnx2x_phy *phy,
u8 port)
{
+ u32 count = 0;
+ u16 fw_ver1, fw_msgout;
+ u8 rc = 0;
+
/* Boot port from external ROM */
/* EDC grst */
bnx2x_cl45_write(bp, phy,
@@ -3888,56 +3907,45 @@ static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
MDIO_PMA_REG_GEN_CTRL,
MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
- /* wait for 120ms for code download via SPI port */
- msleep(120);
+ /* Delay 100ms per the PHY specifications */
+ msleep(100);
+
+ /* 8073 sometimes taking longer to download */
+ do {
+ count++;
+ if (count > 300) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_8073_8727_external_rom_boot port %x:"
+ "Download failed. fw version = 0x%x\n",
+ port, fw_ver1);
+ rc = -EINVAL;
+ break;
+ }
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
+
+ msleep(1);
+ } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
+ ((fw_msgout & 0xff) != 0x03 && (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
/* Clear ser_boot_ctl bit */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_MISC_CTRL1, 0x0000);
bnx2x_save_bcm_spirom_ver(bp, phy, port);
-}
-static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp,
- struct bnx2x_phy *phy)
-{
- u16 val;
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
-
- if (val == 0) {
- /* Mustn't set low power mode in 8073 A0 */
- return;
- }
-
- /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
- bnx2x_cl45_read(bp, phy,
- MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
- val &= ~(1<<13);
- bnx2x_cl45_write(bp, phy,
- MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
-
- /* PLL controls */
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490);
-
- /* Tx Controls */
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640);
-
- /* Rx Controls */
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015);
+ DP(NETIF_MSG_LINK,
+ "bnx2x_8073_8727_external_rom_boot port %x:"
+ "Download complete. fw version = 0x%x\n",
+ port, fw_ver1);
- /* Enable PLL sequencer (use read-modify-write to set bit 13) */
- bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
- val |= (1<<13);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
+ return rc;
}
/******************************************************************/
@@ -4098,8 +4106,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
bnx2x_8073_set_pause_cl37(params, phy, vars);
- bnx2x_8073_set_xaui_low_power_mode(bp, phy);
-
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
@@ -4108,6 +4114,25 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
+ /**
+ * If this is forced speed, set to KR or KX (all other are not
+ * supported)
+ */
+ /* Swap polarity if required - Must be done only in non-1G mode */
+ if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+ /* Configure the 8073 to swap _P and _N of the KR lines */
+ DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
+ /* 10G Rx/Tx and 1G Tx signal polarity swap */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
+ (val | (3<<9)));
+ }
+
+
/* Enable CL37 BAM */
if (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
@@ -4314,8 +4339,32 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
}
if (link_up) {
+ /* Swap polarity if required */
+ if (params->lane_config &
+ PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+ /* Configure the 8073 to swap P and N of the KR lines */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_XS_DEVAD,
+ MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
+ /**
+ * Set bit 3 to invert Rx in 1G mode and clear this bit
+ * when it`s in 10G mode.
+ */
+ if (vars->line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
+ "the 8073\n");
+ val1 |= (1<<3);
+ } else
+ val1 &= ~(1<<3);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_XS_DEVAD,
+ MDIO_XS_REG_8073_RX_CTRL_PCIE,
+ val1);
+ }
bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
bnx2x_8073_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
}
return link_up;
}
@@ -5062,6 +5111,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
else
vars->line_speed = SPEED_10000;
bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
}
return link_up;
}
@@ -5758,8 +5808,11 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "port %x: External link is down\n",
params->port);
}
- if (link_up)
+ if (link_up) {
bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
+ DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
+ }
if ((DUAL_MEDIA(params)) &&
(phy->req_line_speed == SPEED_1000)) {
@@ -5875,10 +5928,26 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
MDIO_PMA_REG_8481_LED2_MASK,
0x18);
+ /* Select activity source by Tx and Rx, as suggested by PHY AE */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED3_MASK,
- 0x0040);
+ 0x0006);
+
+ /* Select the closest activity blink rate to that in 10/100/1000 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_BLINK,
+ 0);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
+ val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
/* 'Interrupt Mask' */
bnx2x_cl45_write(bp, phy,
@@ -6126,6 +6195,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
/* Check link 10G */
if (val2 & (1<<11)) {
vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
link_up = 1;
bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
} else { /* Check Legacy speed link */
@@ -6405,6 +6475,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
0x80);
+
+ /* Tell LED3 to blink on source */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+ val &= ~(7<<6);
+ val |= (1<<6); /* A83B[8:6]= 1 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
}
break;
}
@@ -6489,6 +6571,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
&val2);
vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
val2, (val2 & (1<<14)));
bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
@@ -7605,10 +7688,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
struct bnx2x_phy phy[PORT_MAX];
struct bnx2x_phy *phy_blk[PORT_MAX];
u16 val;
- s8 port;
+ s8 port = 0;
s8 port_of_path = 0;
-
- bnx2x_ext_phy_hw_reset(bp, 0);
+ u32 swap_val, swap_override;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ port ^= (swap_val && swap_override);
+ bnx2x_ext_phy_hw_reset(bp, port);
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u32 shmem_base, shmem2_base;
@@ -7663,7 +7749,6 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
/* PART2 - Download firmware to both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
- u16 fw_ver1;
if (CHIP_IS_E2(bp))
port_of_path = 0;
else
@@ -7671,19 +7756,9 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
phy_blk[port]->addr);
- bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
- port_of_path);
-
- bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER1, &fw_ver1);
- if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
- DP(NETIF_MSG_LINK,
- "bnx2x_8073_common_init_phy port %x:"
- "Download failed. fw version = 0x%x\n",
- port, fw_ver1);
+ if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+ port_of_path))
return -EINVAL;
- }
/* Only set bit 10 = 1 (Tx power down) */
bnx2x_cl45_read(bp, phy_blk[port],
@@ -7848,27 +7923,17 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
}
/* PART2 - Download firmware to both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
- u16 fw_ver1;
if (CHIP_IS_E2(bp))
port_of_path = 0;
else
port_of_path = port;
DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
phy_blk[port]->addr);
- bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
- port_of_path);
- bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER1, &fw_ver1);
- if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
- DP(NETIF_MSG_LINK,
- "bnx2x_8727_common_init_phy port %x:"
- "Download failed. fw version = 0x%x\n",
- port, fw_ver1);
+ if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+ port_of_path))
return -EINVAL;
- }
- }
+ }
return 0;
}
@@ -7916,6 +7981,7 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
u32 shmem2_base_path[], u32 chip_id)
{
u8 rc = 0;
+ u32 phy_ver;
u8 phy_index;
u32 ext_phy_type, ext_phy_config;
DP(NETIF_MSG_LINK, "Begin common phy init\n");
@@ -7923,6 +7989,16 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
if (CHIP_REV_IS_EMUL(bp))
return 0;
+ /* Check if common init was already done */
+ phy_ver = REG_RD(bp, shmem_base_path[0] +
+ offsetof(struct shmem_region,
+ port_mb[PORT_0].ext_phy_fw_version));
+ if (phy_ver) {
+ DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
+ phy_ver);
+ return 0;
+ }
+
/* Read the ext_phy_type for arbitrary port(0) */
for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
phy_index++) {
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 489a5512a04d..032ae184b605 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -1974,13 +1974,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
vn_max_rate = 0;
} else {
+ u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
+
vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
- /* If min rate is zero - set it to 1 */
+ /* If fairness is enabled (not all min rates are zeroes) and
+ if current min rate is zero - set it to 1.
+ This is a requirement of the algorithm. */
if (bp->vn_weight_sum && (vn_min_rate == 0))
vn_min_rate = DEF_MIN_RATE;
- vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+
+ if (IS_MF_SI(bp))
+ /* maxCfg in percents of linkspeed */
+ vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
+ else
+ /* maxCfg is absolute in 100Mb units */
+ vn_max_rate = maxCfg * 100;
}
DP(NETIF_MSG_IFUP,
@@ -2006,7 +2015,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
m_fair_vn.vn_credit_delta =
max_t(u32, (vn_min_rate * (T_FAIR_COEF /
(8 * bp->vn_weight_sum))),
- (bp->cmng.fair_vars.fair_threshold * 2));
+ (bp->cmng.fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH));
DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
m_fair_vn.vn_credit_delta);
}
@@ -2301,15 +2311,10 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
/* accept matched ucast */
drop_all_ucast = 0;
}
- if (filters & BNX2X_ACCEPT_MULTICAST) {
+ if (filters & BNX2X_ACCEPT_MULTICAST)
/* accept matched mcast */
drop_all_mcast = 0;
- if (IS_MF_SI(bp))
- /* since mcast addresses won't arrive with ovlan,
- * fw needs to accept all of them in
- * switch-independent mode */
- accp_all_mcast = 1;
- }
+
if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
/* accept all mcast */
drop_all_ucast = 0;
@@ -3152,7 +3157,6 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
-#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
/*
* should be run under rtnl lock
@@ -3527,7 +3531,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
try to handle this event */
bnx2x_acquire_alr(bp);
- if (bnx2x_chk_parity_attn(bp)) {
+ if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
bp->recovery_state = BNX2X_RECOVERY_INIT;
bnx2x_set_reset_in_progress(bp);
schedule_delayed_work(&bp->reset_task, 0);
@@ -4282,9 +4286,12 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
BNX2X_ACCEPT_MULTICAST;
#ifdef BCM_CNIC
- cl_id = bnx2x_fcoe(bp, cl_id);
- bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
- BNX2X_ACCEPT_MULTICAST);
+ if (!NO_FCOE(bp)) {
+ cl_id = bnx2x_fcoe(bp, cl_id);
+ bnx2x_rxq_set_mac_filters(bp, cl_id,
+ BNX2X_ACCEPT_UNICAST |
+ BNX2X_ACCEPT_MULTICAST);
+ }
#endif
break;
@@ -4292,18 +4299,29 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
BNX2X_ACCEPT_ALL_MULTICAST;
#ifdef BCM_CNIC
- cl_id = bnx2x_fcoe(bp, cl_id);
- bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
- BNX2X_ACCEPT_MULTICAST);
+ /*
+ * Prevent duplication of multicast packets by configuring FCoE
+ * L2 Client to receive only matched unicast frames.
+ */
+ if (!NO_FCOE(bp)) {
+ cl_id = bnx2x_fcoe(bp, cl_id);
+ bnx2x_rxq_set_mac_filters(bp, cl_id,
+ BNX2X_ACCEPT_UNICAST);
+ }
#endif
break;
case BNX2X_RX_MODE_PROMISC:
def_q_filters |= BNX2X_PROMISCUOUS_MODE;
#ifdef BCM_CNIC
- cl_id = bnx2x_fcoe(bp, cl_id);
- bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
- BNX2X_ACCEPT_MULTICAST);
+ /*
+ * Prevent packets duplication by configuring DROP_ALL for FCoE
+ * L2 Client.
+ */
+ if (!NO_FCOE(bp)) {
+ cl_id = bnx2x_fcoe(bp, cl_id);
+ bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
+ }
#endif
/* pass management unicast packets as well */
llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
@@ -4754,7 +4772,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
return 0; /* OK */
}
-static void enable_blocks_attention(struct bnx2x *bp)
+static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
{
REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
if (CHIP_IS_E2(bp))
@@ -4808,53 +4826,9 @@ static void enable_blocks_attention(struct bnx2x *bp)
REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
- REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
+ REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
}
-static const struct {
- u32 addr;
- u32 mask;
-} bnx2x_parity_mask[] = {
- {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
- {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
- {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
- {HC_REG_HC_PRTY_MASK, 0x7},
- {MISC_REG_MISC_PRTY_MASK, 0x1},
- {QM_REG_QM_PRTY_MASK, 0x0},
- {DORQ_REG_DORQ_PRTY_MASK, 0x0},
- {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
- {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
- {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
- {CDU_REG_CDU_PRTY_MASK, 0x0},
- {CFC_REG_CFC_PRTY_MASK, 0x0},
- {DBG_REG_DBG_PRTY_MASK, 0x0},
- {DMAE_REG_DMAE_PRTY_MASK, 0x0},
- {BRB1_REG_BRB1_PRTY_MASK, 0x0},
- {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
- {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
- {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
- {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
- {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
- {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
- {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
- {USEM_REG_USEM_PRTY_MASK_0, 0x0},
- {USEM_REG_USEM_PRTY_MASK_1, 0x0},
- {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
- {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
- {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
- {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
-};
-
-static void enable_blocks_parity(struct bnx2x *bp)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
- REG_WR(bp, bnx2x_parity_mask[i].addr,
- bnx2x_parity_mask[i].mask);
-}
-
-
static void bnx2x_reset_common(struct bnx2x *bp)
{
/* reset_common */
@@ -5082,7 +5056,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
memset(&ilt, 0, sizeof(struct bnx2x_ilt));
- /* initalize dummy TM client */
+ /* initialize dummy TM client */
ilt_cli.start = 0;
ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
ilt_cli.client_num = ILT_CLIENT_TM;
@@ -5341,18 +5315,14 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
}
}
- bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
- bp->common.shmem_base,
- bp->common.shmem2_base);
-
bnx2x_setup_fan_failure_detection(bp);
/* clear PXP2 attentions */
REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
- enable_blocks_attention(bp);
- if (CHIP_PARITY_SUPPORTED(bp))
- enable_blocks_parity(bp);
+ bnx2x_enable_blocks_attention(bp);
+ if (CHIP_PARITY_ENABLED(bp))
+ bnx2x_enable_blocks_parity(bp);
if (!BP_NOMCP(bp)) {
/* In E2 2-PORT mode, same ext phy is used for the two paths */
@@ -5548,9 +5518,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
bnx2x_init_block(bp, MCP_BLOCK, init_stage);
bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
- bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
- bp->common.shmem_base,
- bp->common.shmem2_base);
if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
bp->common.shmem2_base, port)) {
u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -8424,6 +8391,17 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
(ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
bp->mdio.prtad =
XGXS_EXT_PHY_ADDR(ext_phy_config);
+
+ /*
+ * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
+ * In MF mode, it is set to cover self test cases
+ */
+ if (IS_MF(bp))
+ bp->port.need_hw_lock = 1;
+ else
+ bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
+ bp->common.shmem_base,
+ bp->common.shmem2_base);
}
static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
@@ -8751,13 +8729,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
dev_err(&bp->pdev->dev, "MCP disabled, "
"must load devices in order!\n");
- /* Set multi queue mode */
- if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
- ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
- dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
- "requested is not MSI-X\n");
- multi_mode = ETH_RSS_MODE_DISABLED;
- }
bp->multi_mode = multi_mode;
bp->int_mode = int_mode;
@@ -9560,9 +9531,15 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
+ /* Power on: we can't let PCI layer write to us while we are in D3 */
+ bnx2x_set_power_state(bp, PCI_D0);
+
/* Disable MSI/MSI-X */
bnx2x_disable_msi(bp);
+ /* Power off */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
/* Make sure RESET task is not scheduled before continuing */
cancel_delayed_work_sync(&bp->reset_task);
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index bfd875b72906..e01330bb36c7 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -18,6 +18,8 @@
* WR - Write Clear (write 1 to clear the bit)
*
*/
+#ifndef BNX2X_REG_H
+#define BNX2X_REG_H
#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2)
@@ -39,6 +41,8 @@
#define BRB1_REG_BRB1_PRTY_MASK 0x60138
/* [R 4] Parity register #0 read */
#define BRB1_REG_BRB1_PRTY_STS 0x6012c
+/* [RC 4] Parity register #0 read clear */
+#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130
/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
* address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
* BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
@@ -132,8 +136,12 @@
#define CCM_REG_CCM_INT_MASK 0xd01e4
/* [R 11] Interrupt register #0 read */
#define CCM_REG_CCM_INT_STS 0xd01d8
+/* [RW 27] Parity mask register #0 read/write */
+#define CCM_REG_CCM_PRTY_MASK 0xd01f4
/* [R 27] Parity register #0 read */
#define CCM_REG_CCM_PRTY_STS 0xd01e8
+/* [RC 27] Parity register #0 read clear */
+#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec
/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
Is used to determine the number of the AG context REG-pairs written back;
@@ -350,6 +358,8 @@
#define CDU_REG_CDU_PRTY_MASK 0x10104c
/* [R 5] Parity register #0 read */
#define CDU_REG_CDU_PRTY_STS 0x101040
+/* [RC 5] Parity register #0 read clear */
+#define CDU_REG_CDU_PRTY_STS_CLR 0x101044
/* [RC 32] logging of error data in case of a CDU load error:
{expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
ype_error; ctual_active; ctual_compressed_context}; */
@@ -381,6 +391,8 @@
#define CFC_REG_CFC_PRTY_MASK 0x104118
/* [R 4] Parity register #0 read */
#define CFC_REG_CFC_PRTY_STS 0x10410c
+/* [RC 4] Parity register #0 read clear */
+#define CFC_REG_CFC_PRTY_STS_CLR 0x104110
/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
#define CFC_REG_CID_CAM 0x104800
#define CFC_REG_CONTROL0 0x104028
@@ -466,6 +478,8 @@
#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
/* [R 11] Parity register #0 read */
#define CSDM_REG_CSDM_PRTY_STS 0xc22b0
+/* [RC 11] Parity register #0 read clear */
+#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4
#define CSDM_REG_ENABLE_IN1 0xc2238
#define CSDM_REG_ENABLE_IN2 0xc223c
#define CSDM_REG_ENABLE_OUT1 0xc2240
@@ -556,6 +570,9 @@
/* [R 32] Parity register #0 read */
#define CSEM_REG_CSEM_PRTY_STS_0 0x200124
#define CSEM_REG_CSEM_PRTY_STS_1 0x200134
+/* [RC 32] Parity register #0 read clear */
+#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128
+#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138
#define CSEM_REG_ENABLE_IN 0x2000a4
#define CSEM_REG_ENABLE_OUT 0x2000a8
/* [RW 32] This address space contains all registers and memories that are
@@ -648,6 +665,8 @@
#define DBG_REG_DBG_PRTY_MASK 0xc0a8
/* [R 1] Parity register #0 read */
#define DBG_REG_DBG_PRTY_STS 0xc09c
+/* [RC 1] Parity register #0 read clear */
+#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0
/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
* function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
* 4.Completion function=0; 5.Error handling=0 */
@@ -668,6 +687,8 @@
#define DMAE_REG_DMAE_PRTY_MASK 0x102064
/* [R 4] Parity register #0 read */
#define DMAE_REG_DMAE_PRTY_STS 0x102058
+/* [RC 4] Parity register #0 read clear */
+#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c
/* [RW 1] Command 0 go. */
#define DMAE_REG_GO_C0 0x102080
/* [RW 1] Command 1 go. */
@@ -734,6 +755,8 @@
#define DORQ_REG_DORQ_PRTY_MASK 0x170190
/* [R 2] Parity register #0 read */
#define DORQ_REG_DORQ_PRTY_STS 0x170184
+/* [RC 2] Parity register #0 read clear */
+#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188
/* [RW 8] The address to write the DPM CID to STORM. */
#define DORQ_REG_DPM_CID_ADDR 0x170044
/* [RW 5] The DPM mode CID extraction offset. */
@@ -842,8 +865,12 @@
/* [R 1] data availble for error memory. If this bit is clear do not red
* from error_handling_memory. */
#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
+/* [RW 11] Parity mask register #0 read/write */
+#define IGU_REG_IGU_PRTY_MASK 0x1300a8
/* [R 11] Parity register #0 read */
#define IGU_REG_IGU_PRTY_STS 0x13009c
+/* [RC 11] Parity register #0 read clear */
+#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0
/* [R 4] Debug: int_handle_fsm */
#define IGU_REG_INT_HANDLE_FSM 0x130050
#define IGU_REG_LEADING_EDGE_LATCH 0x130134
@@ -1501,6 +1528,8 @@
#define MISC_REG_MISC_PRTY_MASK 0xa398
/* [R 1] Parity register #0 read */
#define MISC_REG_MISC_PRTY_STS 0xa38c
+/* [RC 1] Parity register #0 read clear */
+#define MISC_REG_MISC_PRTY_STS_CLR 0xa390
#define MISC_REG_NIG_WOL_P0 0xa270
#define MISC_REG_NIG_WOL_P1 0xa274
/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
@@ -1604,7 +1633,7 @@
(~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
- in this register. addres 0 - timer 1; address 1 - timer 2, ... address 7 -
+ in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 -
timer 8 */
#define MISC_REG_SW_TIMER_VAL 0xa5c0
/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
@@ -2082,6 +2111,10 @@
#define PBF_REG_PBF_INT_MASK 0x1401d4
/* [R 5] Interrupt register #0 read */
#define PBF_REG_PBF_INT_STS 0x1401c8
+/* [RW 20] Parity mask register #0 read/write */
+#define PBF_REG_PBF_PRTY_MASK 0x1401e4
+/* [RC 20] Parity register #0 read clear */
+#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
#define PB_REG_CONTROL 0
/* [RW 2] Interrupt mask register #0 read/write */
#define PB_REG_PB_INT_MASK 0x28
@@ -2091,6 +2124,8 @@
#define PB_REG_PB_PRTY_MASK 0x38
/* [R 4] Parity register #0 read */
#define PB_REG_PB_PRTY_STS 0x2c
+/* [RC 4] Parity register #0 read clear */
+#define PB_REG_PB_PRTY_STS_CLR 0x30
#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8)
#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1)
@@ -2446,6 +2481,8 @@
#define PRS_REG_PRS_PRTY_MASK 0x401a4
/* [R 8] Parity register #0 read */
#define PRS_REG_PRS_PRTY_STS 0x40198
+/* [RC 8] Parity register #0 read clear */
+#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c
/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
request message */
#define PRS_REG_PURE_REGIONS 0x40024
@@ -2599,6 +2636,9 @@
/* [R 32] Parity register #0 read */
#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c
#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c
+/* [RC 32] Parity register #0 read clear */
+#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580
+#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590
/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
indication about backpressure) */
#define PXP2_REG_RD_ALMOST_FULL_0 0x120424
@@ -3001,6 +3041,8 @@
#define PXP_REG_PXP_PRTY_MASK 0x103094
/* [R 26] Parity register #0 read */
#define PXP_REG_PXP_PRTY_STS 0x103088
+/* [RC 27] Parity register #0 read clear */
+#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c
/* [RW 4] The activity counter initial increment value sent in the load
request */
#define QM_REG_ACTCTRINITVAL_0 0x168040
@@ -3157,6 +3199,8 @@
#define QM_REG_QM_PRTY_MASK 0x168454
/* [R 12] Parity register #0 read */
#define QM_REG_QM_PRTY_STS 0x168448
+/* [RC 12] Parity register #0 read clear */
+#define QM_REG_QM_PRTY_STS_CLR 0x16844c
/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
#define QM_REG_QSTATUS_HIGH 0x16802c
/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
@@ -3442,6 +3486,8 @@
#define QM_REG_WRRWEIGHTS_9 0x168848
/* [R 6] Keep the fill level of the fifo from write client 1 */
#define QM_REG_XQM_WRC_FIFOLVL 0x168000
+/* [W 1] reset to parity interrupt */
+#define SEM_FAST_REG_PARITY_RST 0x18840
#define SRC_REG_COUNTFREE0 0x40500
/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
ports. If set the searcher support 8 functions. */
@@ -3470,6 +3516,8 @@
#define SRC_REG_SRC_PRTY_MASK 0x404c8
/* [R 3] Parity register #0 read */
#define SRC_REG_SRC_PRTY_STS 0x404bc
+/* [RC 3] Parity register #0 read clear */
+#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0
/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
#define TCM_REG_CAM_OCCUP 0x5017c
/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@@ -3596,8 +3644,12 @@
#define TCM_REG_TCM_INT_MASK 0x501dc
/* [R 11] Interrupt register #0 read */
#define TCM_REG_TCM_INT_STS 0x501d0
+/* [RW 27] Parity mask register #0 read/write */
+#define TCM_REG_TCM_PRTY_MASK 0x501ec
/* [R 27] Parity register #0 read */
#define TCM_REG_TCM_PRTY_STS 0x501e0
+/* [RC 27] Parity register #0 read clear */
+#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4
/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
Is used to determine the number of the AG context REG-pairs written back;
@@ -3755,6 +3807,10 @@
#define TM_REG_TM_INT_MASK 0x1640fc
/* [R 1] Interrupt register #0 read */
#define TM_REG_TM_INT_STS 0x1640f0
+/* [RW 7] Parity mask register #0 read/write */
+#define TM_REG_TM_PRTY_MASK 0x16410c
+/* [RC 7] Parity register #0 read clear */
+#define TM_REG_TM_PRTY_STS_CLR 0x164104
/* [RW 8] The event id for aggregated interrupt 0 */
#define TSDM_REG_AGG_INT_EVENT_0 0x42038
#define TSDM_REG_AGG_INT_EVENT_1 0x4203c
@@ -3835,6 +3891,8 @@
#define TSDM_REG_TSDM_PRTY_MASK 0x422bc
/* [R 11] Parity register #0 read */
#define TSDM_REG_TSDM_PRTY_STS 0x422b0
+/* [RC 11] Parity register #0 read clear */
+#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4
/* [RW 5] The number of time_slots in the arbitration cycle */
#define TSEM_REG_ARB_CYCLE_SIZE 0x180034
/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -3914,6 +3972,9 @@
#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0
/* [RW 8] List of free threads . There is a bit per thread. */
#define TSEM_REG_THREADS_LIST 0x1802e4
+/* [RC 32] Parity register #0 read clear */
+#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118
+#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128
/* [RW 3] The arbitration scheme of time_slot 0 */
#define TSEM_REG_TS_0_AS 0x180038
/* [RW 3] The arbitration scheme of time_slot 10 */
@@ -4116,6 +4177,8 @@
#define UCM_REG_UCM_INT_STS 0xe01c8
/* [R 27] Parity register #0 read */
#define UCM_REG_UCM_PRTY_STS 0xe01d8
+/* [RC 27] Parity register #0 read clear */
+#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc
/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
Is used to determine the number of the AG context REG-pairs written back;
@@ -4292,6 +4355,8 @@
#define USDM_REG_USDM_PRTY_MASK 0xc42c0
/* [R 11] Parity register #0 read */
#define USDM_REG_USDM_PRTY_STS 0xc42b4
+/* [RC 11] Parity register #0 read clear */
+#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8
/* [RW 5] The number of time_slots in the arbitration cycle */
#define USEM_REG_ARB_CYCLE_SIZE 0x300034
/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4421,6 +4486,9 @@
/* [R 32] Parity register #0 read */
#define USEM_REG_USEM_PRTY_STS_0 0x300124
#define USEM_REG_USEM_PRTY_STS_1 0x300134
+/* [RC 32] Parity register #0 read clear */
+#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128
+#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138
/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
* VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
#define USEM_REG_VFPF_ERR_NUM 0x300380
@@ -4797,6 +4865,8 @@
#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
/* [R 11] Parity register #0 read */
#define XSDM_REG_XSDM_PRTY_STS 0x1662b0
+/* [RC 11] Parity register #0 read clear */
+#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4
/* [RW 5] The number of time_slots in the arbitration cycle */
#define XSEM_REG_ARB_CYCLE_SIZE 0x280034
/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4929,6 +4999,9 @@
/* [R 32] Parity register #0 read */
#define XSEM_REG_XSEM_PRTY_STS_0 0x280124
#define XSEM_REG_XSEM_PRTY_STS_1 0x280134
+/* [RC 32] Parity register #0 read clear */
+#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128
+#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138
#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
@@ -6121,7 +6194,11 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
+#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
#define IGU_FUNC_BASE 0x0400
@@ -6316,3 +6393,4 @@ static inline u8 calc_crc8(u32 data, u8 crc)
}
+#endif /* BNX2X_REG_H */
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 6e4d9b144cc4..3445ded6674f 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -158,6 +158,11 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
spin_lock_bh(&bp->stats_lock);
+ if (bp->stats_pending) {
+ spin_unlock_bh(&bp->stats_lock);
+ return;
+ }
+
ramrod_data.drv_counter = bp->stats_counter++;
ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
for_each_eth_queue(bp, i)
@@ -1234,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
if (unlikely(bp->panic))
return;
+ bnx2x_stats_stm[bp->stats_state][event].action(bp);
+
/* Protect a state change flow */
spin_lock_bh(&bp->stats_lock);
state = bp->stats_state;
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
spin_unlock_bh(&bp->stats_lock);
- bnx2x_stats_stm[state][event].action(bp);
-
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
state, event, bp->stats_state);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 48cf24ff4e6f..1024ae158227 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -840,7 +840,7 @@ static int ad_lacpdu_send(struct port *port)
lacpdu_header = (struct lacpdu_header *)skb_put(skb, length);
memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
- /* Note: source addres is set to be the member's PERMANENT address,
+ /* Note: source address is set to be the member's PERMANENT address,
because we use it to identify loopback lacpdus in receive. */
memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
@@ -881,7 +881,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
marker_header = (struct bond_marker_header *)skb_put(skb, length);
memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
- /* Note: source addres is set to be the member's PERMANENT address,
+ /* Note: source address is set to be the member's PERMANENT address,
because we use it to identify loopback MARKERs in receive. */
memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
@@ -1916,7 +1916,7 @@ int bond_3ad_bind_slave(struct slave *slave)
return -1;
}
- //check that the slave has not been intialized yet.
+ //check that the slave has not been initialized yet.
if (SLAVE_AD_INFO(slave).port.slave != slave) {
// port initialization
@@ -2470,6 +2470,10 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
if (!(dev->flags & IFF_MASTER))
goto out;
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
goto out;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f4e638c65129..5c6fba802f2b 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -326,6 +326,10 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
goto out;
}
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
goto out;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b1025b85acf1..163e0b06eaa5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2733,6 +2733,10 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
if (!slave || !slave_do_arp_validate(bond, slave))
goto out_unlock;
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out_unlock;
+
if (!pskb_may_pull(skb, arp_hdr_len(dev)))
goto out_unlock;
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 4da384cc7603..31fe980e4e28 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -18,7 +18,6 @@
#include <linux/timer.h>
#include <linux/proc_fs.h>
#include <linux/if_bonding.h>
-#include <linux/kobject.h>
#include <linux/cpumask.h>
#include <linux/in6.h>
#include "bond_3ad.h"
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d5a9db60ade9..5dec456fd4a4 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -23,7 +23,7 @@ config CAN_SLCAN
As only the sending and receiving of CAN frames is implemented, this
driver should work with the (serial/USB) CAN hardware from:
- www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de
+ www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
Userspace tools to attach the SLCAN line discipline (slcan_attach,
slcand) can be found in the can-utils at the SocketCAN SVN, see
@@ -117,6 +117,8 @@ source "drivers/net/can/sja1000/Kconfig"
source "drivers/net/can/usb/Kconfig"
+source "drivers/net/can/softing/Kconfig"
+
config CAN_DEBUG_DEVICES
bool "CAN devices debugging messages"
depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 07ca159ba3f9..53c82a71778e 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_CAN_DEV) += can-dev.o
can-dev-y := dev.o
obj-y += usb/
+obj-y += softing/
obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_MSCAN) += mscan/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 7ef83d06f7ed..57d2ffbbb433 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -2,7 +2,7 @@
* at91_can.c - CAN network driver for AT91 SoC CAN controller
*
* (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
- * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
+ * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de>
*
* This software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -40,22 +41,23 @@
#include <mach/board.h>
-#define AT91_NAPI_WEIGHT 12
+#define AT91_NAPI_WEIGHT 11
/*
* RX/TX Mailbox split
* don't dare to touch
*/
-#define AT91_MB_RX_NUM 12
+#define AT91_MB_RX_NUM 11
#define AT91_MB_TX_SHIFT 2
-#define AT91_MB_RX_FIRST 0
+#define AT91_MB_RX_FIRST 1
#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1)
#define AT91_MB_RX_SPLIT 8
#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1)
-#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT))
+#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \
+ ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST))
#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT)
#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1)
@@ -168,6 +170,8 @@ struct at91_priv {
struct clk *clk;
struct at91_can_data *pdata;
+
+ canid_t mb0_id;
};
static struct can_bittiming_const at91_bittiming_const = {
@@ -220,6 +224,18 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
set_mb_mode_prio(priv, mb, mode, 0);
}
+static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
+{
+ u32 reg_mid;
+
+ if (can_id & CAN_EFF_FLAG)
+ reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
+ else
+ reg_mid = (can_id & CAN_SFF_MASK) << 18;
+
+ return reg_mid;
+}
+
/*
* Swtich transceiver on or off
*/
@@ -233,12 +249,22 @@ static void at91_setup_mailboxes(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
unsigned int i;
+ u32 reg_mid;
/*
- * The first 12 mailboxes are used as a reception FIFO. The
- * last mailbox is configured with overwrite option. The
- * overwrite flag indicates a FIFO overflow.
+ * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
+ * mailbox is disabled. The next 11 mailboxes are used as a
+ * reception FIFO. The last mailbox is configured with
+ * overwrite option. The overwrite flag indicates a FIFO
+ * overflow.
*/
+ reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
+ for (i = 0; i < AT91_MB_RX_FIRST; i++) {
+ set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
+ at91_write(priv, AT91_MID(i), reg_mid);
+ at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */
+ }
+
for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
set_mb_mode(priv, i, AT91_MB_MODE_RX);
set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
@@ -254,7 +280,8 @@ static void at91_setup_mailboxes(struct net_device *dev)
set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
/* Reset tx and rx helper pointers */
- priv->tx_next = priv->tx_echo = priv->rx_next = 0;
+ priv->tx_next = priv->tx_echo = 0;
+ priv->rx_next = AT91_MB_RX_FIRST;
}
static int at91_set_bittiming(struct net_device *dev)
@@ -372,12 +399,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
return NETDEV_TX_BUSY;
}
-
- if (cf->can_id & CAN_EFF_FLAG)
- reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
- else
- reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
-
+ reg_mid = at91_can_id_to_reg_mid(cf->can_id);
reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
(cf->can_dlc << 16) | AT91_MCR_MTCR;
@@ -539,27 +561,31 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
*
* Theory of Operation:
*
- * 12 of the 16 mailboxes on the chip are reserved for RX. we split
- * them into 2 groups. The lower group holds 8 and upper 4 mailboxes.
+ * 11 of the 16 mailboxes on the chip are reserved for RX. we split
+ * them into 2 groups. The lower group holds 7 and upper 4 mailboxes.
*
* Like it or not, but the chip always saves a received CAN message
* into the first free mailbox it finds (starting with the
* lowest). This makes it very difficult to read the messages in the
* right order from the chip. This is how we work around that problem:
*
- * The first message goes into mb nr. 0 and issues an interrupt. All
+ * The first message goes into mb nr. 1 and issues an interrupt. All
* rx ints are disabled in the interrupt handler and a napi poll is
* scheduled. We read the mailbox, but do _not_ reenable the mb (to
* receive another message).
*
* lower mbxs upper
- * ______^______ __^__
- * / \ / \
+ * ____^______ __^__
+ * / \ / \
* +-+-+-+-+-+-+-+-++-+-+-+-+
- * |x|x|x|x|x|x|x|x|| | | | |
+ * | |x|x|x|x|x|x|x|| | | | |
* +-+-+-+-+-+-+-+-++-+-+-+-+
* 0 0 0 0 0 0 0 0 0 0 1 1 \ mail
* 0 1 2 3 4 5 6 7 8 9 0 1 / box
+ * ^
+ * |
+ * \
+ * unused, due to chip bug
*
* The variable priv->rx_next points to the next mailbox to read a
* message from. As long we're in the lower mailboxes we just read the
@@ -590,10 +616,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
"order of incoming frames cannot be guaranteed\n");
again:
- for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
- mb < AT91_MB_RX_NUM && quota > 0;
+ for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next);
+ mb < AT91_MB_RX_LAST + 1 && quota > 0;
reg_sr = at91_read(priv, AT91_SR),
- mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) {
+ mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) {
at91_read_msg(dev, mb);
/* reactivate mailboxes */
@@ -610,8 +636,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
/* upper group completed, look again in lower */
if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
- quota > 0 && mb >= AT91_MB_RX_NUM) {
- priv->rx_next = 0;
+ quota > 0 && mb > AT91_MB_RX_LAST) {
+ priv->rx_next = AT91_MB_RX_FIRST;
goto again;
}
@@ -1037,6 +1063,64 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_start_xmit = at91_start_xmit,
};
+static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct at91_priv *priv = netdev_priv(to_net_dev(dev));
+
+ if (priv->mb0_id & CAN_EFF_FLAG)
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id);
+ else
+ return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
+}
+
+static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct at91_priv *priv = netdev_priv(ndev);
+ unsigned long can_id;
+ ssize_t ret;
+ int err;
+
+ rtnl_lock();
+
+ if (ndev->flags & IFF_UP) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ err = strict_strtoul(buf, 0, &can_id);
+ if (err) {
+ ret = err;
+ goto out;
+ }
+
+ if (can_id & CAN_EFF_FLAG)
+ can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
+ else
+ can_id &= CAN_SFF_MASK;
+
+ priv->mb0_id = can_id;
+ ret = count;
+
+ out:
+ rtnl_unlock();
+ return ret;
+}
+
+static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO,
+ at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
+
+static struct attribute *at91_sysfs_attrs[] = {
+ &dev_attr_mb0_id.attr,
+ NULL,
+};
+
+static struct attribute_group at91_sysfs_attr_group = {
+ .attrs = at91_sysfs_attrs,
+};
+
static int __devinit at91_can_probe(struct platform_device *pdev)
{
struct net_device *dev;
@@ -1082,6 +1166,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
dev->netdev_ops = &at91_netdev_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
+ dev->sysfs_groups[0] = &at91_sysfs_attr_group;
priv = netdev_priv(dev);
priv->can.clock.freq = clk_get_rate(clk);
@@ -1093,6 +1178,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
priv->dev = dev;
priv->clk = clk;
priv->pdata = pdev->dev.platform_data;
+ priv->mb0_id = 0x7ff;
netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index b9a6d7a5a739..366f5cc050ae 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1618,7 +1618,7 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
return count;
}
-static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term,
+static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
ican3_sysfs_set_term);
static struct attribute *ican3_sysfs_attrs[] = {
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 7ab534aee452..7513c4523ac4 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net)
goto open_unlock;
}
- priv->wq = create_freezeable_workqueue("mcp251x_wq");
+ priv->wq = create_freezable_workqueue("mcp251x_wq");
INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index 27d1d398e25e..d38706958af6 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -1,5 +1,5 @@
config CAN_MSCAN
- depends on CAN_DEV && (PPC || M68K || M68KNOMMU)
+ depends on CAN_DEV && (PPC || M68K)
tristate "Support for Freescale MSCAN based chips"
---help---
The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index c42e97268248..e54712b22c27 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -185,7 +185,7 @@ struct pch_can_priv {
static struct can_bittiming_const pch_can_bittiming_const = {
.name = KBUILD_MODNAME,
- .tseg1_min = 1,
+ .tseg1_min = 2,
.tseg1_max = 16,
.tseg2_min = 1,
.tseg2_max = 8,
@@ -959,13 +959,13 @@ static void __devexit pch_can_remove(struct pci_dev *pdev)
struct pch_can_priv *priv = netdev_priv(ndev);
unregister_candev(priv->ndev);
- pci_iounmap(pdev, priv->regs);
if (priv->use_msi)
pci_disable_msi(priv->dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
pch_can_reset(priv);
+ pci_iounmap(pdev, priv->regs);
free_candev(priv->ndev);
}
@@ -1238,6 +1238,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
priv->use_msi = 0;
} else {
netdev_err(ndev, "PCH CAN opened with MSI\n");
+ pci_set_master(pdev);
priv->use_msi = 1;
}
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
new file mode 100644
index 000000000000..5de46a9a77bb
--- /dev/null
+++ b/drivers/net/can/softing/Kconfig
@@ -0,0 +1,30 @@
+config CAN_SOFTING
+ tristate "Softing Gmbh CAN generic support"
+ depends on CAN_DEV && HAS_IOMEM
+ ---help---
+ Support for CAN cards from Softing Gmbh & some cards
+ from Vector Gmbh.
+ Softing Gmbh CAN cards come with 1 or 2 physical busses.
+ Those cards typically use Dual Port RAM to communicate
+ with the host CPU. The interface is then identical for PCI
+ and PCMCIA cards. This driver operates on a platform device,
+ which has been created by softing_cs or softing_pci driver.
+ Warning:
+ The API of the card does not allow fine control per bus, but
+ controls the 2 busses on the card together.
+ As such, some actions (start/stop/busoff recovery) on 1 bus
+ must bring down the other bus too temporarily.
+
+config CAN_SOFTING_CS
+ tristate "Softing Gmbh CAN pcmcia cards"
+ depends on PCMCIA
+ depends on CAN_SOFTING
+ ---help---
+ Support for PCMCIA cards from Softing Gmbh & some cards
+ from Vector Gmbh.
+ You need firmware for these, which you can get at
+ http://developer.berlios.de/projects/socketcan/
+ This version of the driver is written against
+ firmware version 4.6 (softing-fw-4.6-binaries.tar.gz)
+ In order to use the card as CAN device, you need the Softing generic
+ support too.
diff --git a/drivers/net/can/softing/Makefile b/drivers/net/can/softing/Makefile
new file mode 100644
index 000000000000..c5e5016c742e
--- /dev/null
+++ b/drivers/net/can/softing/Makefile
@@ -0,0 +1,6 @@
+
+softing-y := softing_main.o softing_fw.o
+obj-$(CONFIG_CAN_SOFTING) += softing.o
+obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
new file mode 100644
index 000000000000..7ec9f4db3d52
--- /dev/null
+++ b/drivers/net/can/softing/softing.h
@@ -0,0 +1,167 @@
+/*
+ * softing common interfaces
+ *
+ * by Kurt Van Dijck, 2008-2010
+ */
+
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+
+#include "softing_platform.h"
+
+struct softing;
+
+struct softing_priv {
+ struct can_priv can; /* must be the first member! */
+ struct net_device *netdev;
+ struct softing *card;
+ struct {
+ int pending;
+ /* variables wich hold the circular buffer */
+ int echo_put;
+ int echo_get;
+ } tx;
+ struct can_bittiming_const btr_const;
+ int index;
+ uint8_t output;
+ uint16_t chip;
+};
+#define netdev2softing(netdev) ((struct softing_priv *)netdev_priv(netdev))
+
+struct softing {
+ const struct softing_platform_data *pdat;
+ struct platform_device *pdev;
+ struct net_device *net[2];
+ spinlock_t spin; /* protect this structure & DPRAM access */
+ ktime_t ts_ref;
+ ktime_t ts_overflow; /* timestamp overflow value, in ktime */
+
+ struct {
+ /* indication of firmware status */
+ int up;
+ /* protection of the 'up' variable */
+ struct mutex lock;
+ } fw;
+ struct {
+ int nr;
+ int requested;
+ int svc_count;
+ unsigned int dpram_position;
+ } irq;
+ struct {
+ int pending;
+ int last_bus;
+ /*
+ * keep the bus that last tx'd a message,
+ * in order to let every netdev queue resume
+ */
+ } tx;
+ __iomem uint8_t *dpram;
+ unsigned long dpram_phys;
+ unsigned long dpram_size;
+ struct {
+ uint16_t fw_version, hw_version, license, serial;
+ uint16_t chip[2];
+ unsigned int freq; /* remote cpu's operating frequency */
+ } id;
+};
+
+extern int softing_default_output(struct net_device *netdev);
+
+extern ktime_t softing_raw2ktime(struct softing *card, u32 raw);
+
+extern int softing_chip_poweron(struct softing *card);
+
+extern int softing_bootloader_command(struct softing *card, int16_t cmd,
+ const char *msg);
+
+/* Load firmware after reset */
+extern int softing_load_fw(const char *file, struct softing *card,
+ __iomem uint8_t *virt, unsigned int size, int offset);
+
+/* Load final application firmware after bootloader */
+extern int softing_load_app_fw(const char *file, struct softing *card);
+
+/*
+ * enable or disable irq
+ * only called with fw.lock locked
+ */
+extern int softing_enable_irq(struct softing *card, int enable);
+
+/* start/stop 1 bus on card */
+extern int softing_startstop(struct net_device *netdev, int up);
+
+/* netif_rx() */
+extern int softing_netdev_rx(struct net_device *netdev,
+ const struct can_frame *msg, ktime_t ktime);
+
+/* SOFTING DPRAM mappings */
+#define DPRAM_RX 0x0000
+ #define DPRAM_RX_SIZE 32
+ #define DPRAM_RX_CNT 16
+#define DPRAM_RX_RD 0x0201 /* uint8_t */
+#define DPRAM_RX_WR 0x0205 /* uint8_t */
+#define DPRAM_RX_LOST 0x0207 /* uint8_t */
+
+#define DPRAM_FCT_PARAM 0x0300 /* int16_t [20] */
+#define DPRAM_FCT_RESULT 0x0328 /* int16_t */
+#define DPRAM_FCT_HOST 0x032b /* uint16_t */
+
+#define DPRAM_INFO_BUSSTATE 0x0331 /* uint16_t */
+#define DPRAM_INFO_BUSSTATE2 0x0335 /* uint16_t */
+#define DPRAM_INFO_ERRSTATE 0x0339 /* uint16_t */
+#define DPRAM_INFO_ERRSTATE2 0x033d /* uint16_t */
+#define DPRAM_RESET 0x0341 /* uint16_t */
+#define DPRAM_CLR_RECV_FIFO 0x0345 /* uint16_t */
+#define DPRAM_RESET_TIME 0x034d /* uint16_t */
+#define DPRAM_TIME 0x0350 /* uint64_t */
+#define DPRAM_WR_START 0x0358 /* uint8_t */
+#define DPRAM_WR_END 0x0359 /* uint8_t */
+#define DPRAM_RESET_RX_FIFO 0x0361 /* uint16_t */
+#define DPRAM_RESET_TX_FIFO 0x0364 /* uint8_t */
+#define DPRAM_READ_FIFO_LEVEL 0x0365 /* uint8_t */
+#define DPRAM_RX_FIFO_LEVEL 0x0366 /* uint16_t */
+#define DPRAM_TX_FIFO_LEVEL 0x0366 /* uint16_t */
+
+#define DPRAM_TX 0x0400 /* uint16_t */
+ #define DPRAM_TX_SIZE 16
+ #define DPRAM_TX_CNT 32
+#define DPRAM_TX_RD 0x0601 /* uint8_t */
+#define DPRAM_TX_WR 0x0605 /* uint8_t */
+
+#define DPRAM_COMMAND 0x07e0 /* uint16_t */
+#define DPRAM_RECEIPT 0x07f0 /* uint16_t */
+#define DPRAM_IRQ_TOHOST 0x07fe /* uint8_t */
+#define DPRAM_IRQ_TOCARD 0x07ff /* uint8_t */
+
+#define DPRAM_V2_RESET 0x0e00 /* uint8_t */
+#define DPRAM_V2_IRQ_TOHOST 0x0e02 /* uint8_t */
+
+#define TXMAX (DPRAM_TX_CNT - 1)
+
+/* DPRAM return codes */
+#define RES_NONE 0
+#define RES_OK 1
+#define RES_NOK 2
+#define RES_UNKNOWN 3
+/* DPRAM flags */
+#define CMD_TX 0x01
+#define CMD_ACK 0x02
+#define CMD_XTD 0x04
+#define CMD_RTR 0x08
+#define CMD_ERR 0x10
+#define CMD_BUS2 0x80
+
+/* returned fifo entry bus state masks */
+#define SF_MASK_BUSOFF 0x80
+#define SF_MASK_EPASSIVE 0x60
+
+/* bus states */
+#define STATE_BUSOFF 2
+#define STATE_EPASSIVE 1
+#define STATE_EACTIVE 0
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
new file mode 100644
index 000000000000..c11bb4de8630
--- /dev/null
+++ b/drivers/net/can/softing/softing_cs.c
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include "softing_platform.h"
+
+static int softingcs_index;
+static spinlock_t softingcs_index_lock;
+
+static int softingcs_reset(struct platform_device *pdev, int v);
+static int softingcs_enable_irq(struct platform_device *pdev, int v);
+
+/*
+ * platform_data descriptions
+ */
+#define MHZ (1000*1000)
+static const struct softing_platform_data softingcs_platform_data[] = {
+{
+ .name = "CANcard",
+ .manf = 0x0168, .prod = 0x001,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "CANcard-NEC",
+ .manf = 0x0168, .prod = 0x002,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "CANcard-SJA",
+ .manf = 0x0168, .prod = 0x004,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "CANcard-2",
+ .manf = 0x0168, .prod = 0x005,
+ .generation = 2,
+ .nbus = 2,
+ .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+ .dpram_size = 0x1000,
+ .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = NULL,
+}, {
+ .name = "Vector-CANcard",
+ .manf = 0x0168, .prod = 0x081,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "Vector-CANcard-SJA",
+ .manf = 0x0168, .prod = 0x084,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "Vector-CANcard-2",
+ .manf = 0x0168, .prod = 0x085,
+ .generation = 2,
+ .nbus = 2,
+ .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+ .dpram_size = 0x1000,
+ .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = NULL,
+}, {
+ .name = "EDICcard-NEC",
+ .manf = 0x0168, .prod = 0x102,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "EDICcard-2",
+ .manf = 0x0168, .prod = 0x105,
+ .generation = 2,
+ .nbus = 2,
+ .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+ .dpram_size = 0x1000,
+ .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = NULL,
+}, {
+ 0, 0,
+},
+};
+
+MODULE_FIRMWARE(fw_dir "bcard.bin");
+MODULE_FIRMWARE(fw_dir "ldcard.bin");
+MODULE_FIRMWARE(fw_dir "cancard.bin");
+MODULE_FIRMWARE(fw_dir "cansja.bin");
+
+MODULE_FIRMWARE(fw_dir "bcard2.bin");
+MODULE_FIRMWARE(fw_dir "ldcard2.bin");
+MODULE_FIRMWARE(fw_dir "cancrd2.bin");
+
+static __devinit const struct softing_platform_data
+*softingcs_find_platform_data(unsigned int manf, unsigned int prod)
+{
+ const struct softing_platform_data *lp;
+
+ for (lp = softingcs_platform_data; lp->manf; ++lp) {
+ if ((lp->manf == manf) && (lp->prod == prod))
+ return lp;
+ }
+ return NULL;
+}
+
+/*
+ * platformdata callbacks
+ */
+static int softingcs_reset(struct platform_device *pdev, int v)
+{
+ struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
+
+ dev_dbg(&pdev->dev, "pcmcia config [2] %02x\n", v ? 0 : 0x20);
+ return pcmcia_write_config_byte(pcmcia, 2, v ? 0 : 0x20);
+}
+
+static int softingcs_enable_irq(struct platform_device *pdev, int v)
+{
+ struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
+
+ dev_dbg(&pdev->dev, "pcmcia config [0] %02x\n", v ? 0x60 : 0);
+ return pcmcia_write_config_byte(pcmcia, 0, v ? 0x60 : 0);
+}
+
+/*
+ * pcmcia check
+ */
+static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia,
+ void *priv_data)
+{
+ struct softing_platform_data *pdat = priv_data;
+ struct resource *pres;
+ int memspeed = 0;
+
+ WARN_ON(!pdat);
+ pres = pcmcia->resource[PCMCIA_IOMEM_0];
+ if (resource_size(pres) < 0x1000)
+ return -ERANGE;
+
+ pres->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE;
+ if (pdat->generation < 2) {
+ pres->flags |= WIN_USE_WAIT | WIN_DATA_WIDTH_8;
+ memspeed = 3;
+ } else {
+ pres->flags |= WIN_DATA_WIDTH_16;
+ }
+ return pcmcia_request_window(pcmcia, pres, memspeed);
+}
+
+static __devexit void softingcs_remove(struct pcmcia_device *pcmcia)
+{
+ struct platform_device *pdev = pcmcia->priv;
+
+ /* free bits */
+ platform_device_unregister(pdev);
+ /* release pcmcia stuff */
+ pcmcia_disable_device(pcmcia);
+}
+
+/*
+ * platform_device wrapper
+ * pdev->resource has 2 entries: io & irq
+ */
+static void softingcs_pdev_release(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ kfree(pdev);
+}
+
+static __devinit int softingcs_probe(struct pcmcia_device *pcmcia)
+{
+ int ret;
+ struct platform_device *pdev;
+ const struct softing_platform_data *pdat;
+ struct resource *pres;
+ struct dev {
+ struct platform_device pdev;
+ struct resource res[2];
+ } *dev;
+
+ /* find matching platform_data */
+ pdat = softingcs_find_platform_data(pcmcia->manf_id, pcmcia->card_id);
+ if (!pdat)
+ return -ENOTTY;
+
+ /* setup pcmcia device */
+ pcmcia->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IOMEM |
+ CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC;
+ ret = pcmcia_loop_config(pcmcia, softingcs_probe_config, (void *)pdat);
+ if (ret)
+ goto pcmcia_failed;
+
+ ret = pcmcia_enable_device(pcmcia);
+ if (ret < 0)
+ goto pcmcia_failed;
+
+ pres = pcmcia->resource[PCMCIA_IOMEM_0];
+ if (!pres) {
+ ret = -EBADF;
+ goto pcmcia_bad;
+ }
+
+ /* create softing platform device */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto mem_failed;
+ }
+ dev->pdev.resource = dev->res;
+ dev->pdev.num_resources = ARRAY_SIZE(dev->res);
+ dev->pdev.dev.release = softingcs_pdev_release;
+
+ pdev = &dev->pdev;
+ pdev->dev.platform_data = (void *)pdat;
+ pdev->dev.parent = &pcmcia->dev;
+ pcmcia->priv = pdev;
+
+ /* platform device resources */
+ pdev->resource[0].flags = IORESOURCE_MEM;
+ pdev->resource[0].start = pres->start;
+ pdev->resource[0].end = pres->end;
+
+ pdev->resource[1].flags = IORESOURCE_IRQ;
+ pdev->resource[1].start = pcmcia->irq;
+ pdev->resource[1].end = pdev->resource[1].start;
+
+ /* platform device setup */
+ spin_lock(&softingcs_index_lock);
+ pdev->id = softingcs_index++;
+ spin_unlock(&softingcs_index_lock);
+ pdev->name = "softing";
+ dev_set_name(&pdev->dev, "softingcs.%i", pdev->id);
+ ret = platform_device_register(pdev);
+ if (ret < 0)
+ goto platform_failed;
+
+ dev_info(&pcmcia->dev, "created %s\n", dev_name(&pdev->dev));
+ return 0;
+
+platform_failed:
+ kfree(dev);
+mem_failed:
+pcmcia_bad:
+pcmcia_failed:
+ pcmcia_disable_device(pcmcia);
+ pcmcia->priv = NULL;
+ return ret ?: -ENODEV;
+}
+
+static /*const*/ struct pcmcia_device_id softingcs_ids[] = {
+ /* softing */
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0004),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0005),
+ /* vector, manufacturer? */
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0081),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0084),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0085),
+ /* EDIC */
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0102),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0105),
+ PCMCIA_DEVICE_NULL,
+};
+
+MODULE_DEVICE_TABLE(pcmcia, softingcs_ids);
+
+static struct pcmcia_driver softingcs_driver = {
+ .owner = THIS_MODULE,
+ .name = "softingcs",
+ .id_table = softingcs_ids,
+ .probe = softingcs_probe,
+ .remove = __devexit_p(softingcs_remove),
+};
+
+static int __init softingcs_start(void)
+{
+ spin_lock_init(&softingcs_index_lock);
+ return pcmcia_register_driver(&softingcs_driver);
+}
+
+static void __exit softingcs_stop(void)
+{
+ pcmcia_unregister_driver(&softingcs_driver);
+}
+
+module_init(softingcs_start);
+module_exit(softingcs_stop);
+
+MODULE_DESCRIPTION("softing CANcard driver"
+ ", links PCMCIA card to softing driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
new file mode 100644
index 000000000000..b520784fb197
--- /dev/null
+++ b/drivers/net/can/softing/softing_fw.c
@@ -0,0 +1,691 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <asm/div64.h>
+
+#include "softing.h"
+
+/*
+ * low level DPRAM command.
+ * Make sure that card->dpram[DPRAM_FCT_HOST] is preset
+ */
+static int _softing_fct_cmd(struct softing *card, int16_t cmd, uint16_t vector,
+ const char *msg)
+{
+ int ret;
+ unsigned long stamp;
+
+ iowrite16(cmd, &card->dpram[DPRAM_FCT_PARAM]);
+ iowrite8(vector >> 8, &card->dpram[DPRAM_FCT_HOST + 1]);
+ iowrite8(vector, &card->dpram[DPRAM_FCT_HOST]);
+ /* be sure to flush this to the card */
+ wmb();
+ stamp = jiffies + 1 * HZ;
+ /* wait for card */
+ do {
+ /* DPRAM_FCT_HOST is _not_ aligned */
+ ret = ioread8(&card->dpram[DPRAM_FCT_HOST]) +
+ (ioread8(&card->dpram[DPRAM_FCT_HOST + 1]) << 8);
+ /* don't have any cached variables */
+ rmb();
+ if (ret == RES_OK)
+ /* read return-value now */
+ return ioread16(&card->dpram[DPRAM_FCT_RESULT]);
+
+ if ((ret != vector) || time_after(jiffies, stamp))
+ break;
+ /* process context => relax */
+ usleep_range(500, 10000);
+ } while (1);
+
+ ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
+ dev_alert(&card->pdev->dev, "firmware %s failed (%i)\n", msg, ret);
+ return ret;
+}
+
+static int softing_fct_cmd(struct softing *card, int16_t cmd, const char *msg)
+{
+ int ret;
+
+ ret = _softing_fct_cmd(card, cmd, 0, msg);
+ if (ret > 0) {
+ dev_alert(&card->pdev->dev, "%s returned %u\n", msg, ret);
+ ret = -EIO;
+ }
+ return ret;
+}
+
+int softing_bootloader_command(struct softing *card, int16_t cmd,
+ const char *msg)
+{
+ int ret;
+ unsigned long stamp;
+
+ iowrite16(RES_NONE, &card->dpram[DPRAM_RECEIPT]);
+ iowrite16(cmd, &card->dpram[DPRAM_COMMAND]);
+ /* be sure to flush this to the card */
+ wmb();
+ stamp = jiffies + 3 * HZ;
+ /* wait for card */
+ do {
+ ret = ioread16(&card->dpram[DPRAM_RECEIPT]);
+ /* don't have any cached variables */
+ rmb();
+ if (ret == RES_OK)
+ return 0;
+ if (time_after(jiffies, stamp))
+ break;
+ /* process context => relax */
+ usleep_range(500, 10000);
+ } while (!signal_pending(current));
+
+ ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
+ dev_alert(&card->pdev->dev, "bootloader %s failed (%i)\n", msg, ret);
+ return ret;
+}
+
+static int fw_parse(const uint8_t **pmem, uint16_t *ptype, uint32_t *paddr,
+ uint16_t *plen, const uint8_t **pdat)
+{
+ uint16_t checksum[2];
+ const uint8_t *mem;
+ const uint8_t *end;
+
+ /*
+ * firmware records are a binary, unaligned stream composed of:
+ * uint16_t type;
+ * uint32_t addr;
+ * uint16_t len;
+ * uint8_t dat[len];
+ * uint16_t checksum;
+ * all values in little endian.
+ * We could define a struct for this, with __attribute__((packed)),
+ * but would that solve the alignment in _all_ cases (cfr. the
+ * struct itself may be an odd address)?
+ *
+ * I chose to use leXX_to_cpup() since this solves both
+ * endianness & alignment.
+ */
+ mem = *pmem;
+ *ptype = le16_to_cpup((void *)&mem[0]);
+ *paddr = le32_to_cpup((void *)&mem[2]);
+ *plen = le16_to_cpup((void *)&mem[6]);
+ *pdat = &mem[8];
+ /* verify checksum */
+ end = &mem[8 + *plen];
+ checksum[0] = le16_to_cpup((void *)end);
+ for (checksum[1] = 0; mem < end; ++mem)
+ checksum[1] += *mem;
+ if (checksum[0] != checksum[1])
+ return -EINVAL;
+ /* increment */
+ *pmem += 10 + *plen;
+ return 0;
+}
+
+int softing_load_fw(const char *file, struct softing *card,
+ __iomem uint8_t *dpram, unsigned int size, int offset)
+{
+ const struct firmware *fw;
+ int ret;
+ const uint8_t *mem, *end, *dat;
+ uint16_t type, len;
+ uint32_t addr;
+ uint8_t *buf = NULL;
+ int buflen = 0;
+ int8_t type_end = 0;
+
+ ret = request_firmware(&fw, file, &card->pdev->dev);
+ if (ret < 0)
+ return ret;
+ dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes"
+ ", offset %c0x%04x\n",
+ card->pdat->name, file, (unsigned int)fw->size,
+ (offset >= 0) ? '+' : '-', (unsigned int)abs(offset));
+ /* parse the firmware */
+ mem = fw->data;
+ end = &mem[fw->size];
+ /* look for header record */
+ ret = fw_parse(&mem, &type, &addr, &len, &dat);
+ if (ret < 0)
+ goto failed;
+ if (type != 0xffff)
+ goto failed;
+ if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) {
+ ret = -EINVAL;
+ goto failed;
+ }
+ /* ok, we had a header */
+ while (mem < end) {
+ ret = fw_parse(&mem, &type, &addr, &len, &dat);
+ if (ret < 0)
+ goto failed;
+ if (type == 3) {
+ /* start address, not used here */
+ continue;
+ } else if (type == 1) {
+ /* eof */
+ type_end = 1;
+ break;
+ } else if (type != 0) {
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ if ((addr + len + offset) > size)
+ goto failed;
+ memcpy_toio(&dpram[addr + offset], dat, len);
+ /* be sure to flush caches from IO space */
+ mb();
+ if (len > buflen) {
+ /* align buflen */
+ buflen = (len + (1024-1)) & ~(1024-1);
+ buf = krealloc(buf, buflen, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+ }
+ /* verify record data */
+ memcpy_fromio(buf, &dpram[addr + offset], len);
+ if (memcmp(buf, dat, len)) {
+ /* is not ok */
+ dev_alert(&card->pdev->dev, "DPRAM readback failed\n");
+ ret = -EIO;
+ goto failed;
+ }
+ }
+ if (!type_end)
+ /* no end record seen */
+ goto failed;
+ ret = 0;
+failed:
+ kfree(buf);
+ release_firmware(fw);
+ if (ret < 0)
+ dev_info(&card->pdev->dev, "firmware %s failed\n", file);
+ return ret;
+}
+
+int softing_load_app_fw(const char *file, struct softing *card)
+{
+ const struct firmware *fw;
+ const uint8_t *mem, *end, *dat;
+ int ret, j;
+ uint16_t type, len;
+ uint32_t addr, start_addr = 0;
+ unsigned int sum, rx_sum;
+ int8_t type_end = 0, type_entrypoint = 0;
+
+ ret = request_firmware(&fw, file, &card->pdev->dev);
+ if (ret) {
+ dev_alert(&card->pdev->dev, "request_firmware(%s) got %i\n",
+ file, ret);
+ return ret;
+ }
+ dev_dbg(&card->pdev->dev, "firmware(%s) got %lu bytes\n",
+ file, (unsigned long)fw->size);
+ /* parse the firmware */
+ mem = fw->data;
+ end = &mem[fw->size];
+ /* look for header record */
+ ret = fw_parse(&mem, &type, &addr, &len, &dat);
+ if (ret)
+ goto failed;
+ ret = -EINVAL;
+ if (type != 0xffff) {
+ dev_alert(&card->pdev->dev, "firmware starts with type 0x%x\n",
+ type);
+ goto failed;
+ }
+ if (strncmp("Structured Binary Format, Softing GmbH", dat, len)) {
+ dev_alert(&card->pdev->dev, "firmware string '%.*s' fault\n",
+ len, dat);
+ goto failed;
+ }
+ /* ok, we had a header */
+ while (mem < end) {
+ ret = fw_parse(&mem, &type, &addr, &len, &dat);
+ if (ret)
+ goto failed;
+
+ if (type == 3) {
+ /* start address */
+ start_addr = addr;
+ type_entrypoint = 1;
+ continue;
+ } else if (type == 1) {
+ /* eof */
+ type_end = 1;
+ break;
+ } else if (type != 0) {
+ dev_alert(&card->pdev->dev,
+ "unknown record type 0x%04x\n", type);
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ /* regualar data */
+ for (sum = 0, j = 0; j < len; ++j)
+ sum += dat[j];
+ /* work in 16bit (target) */
+ sum &= 0xffff;
+
+ memcpy_toio(&card->dpram[card->pdat->app.offs], dat, len);
+ iowrite32(card->pdat->app.offs + card->pdat->app.addr,
+ &card->dpram[DPRAM_COMMAND + 2]);
+ iowrite32(addr, &card->dpram[DPRAM_COMMAND + 6]);
+ iowrite16(len, &card->dpram[DPRAM_COMMAND + 10]);
+ iowrite8(1, &card->dpram[DPRAM_COMMAND + 12]);
+ ret = softing_bootloader_command(card, 1, "loading app.");
+ if (ret < 0)
+ goto failed;
+ /* verify checksum */
+ rx_sum = ioread16(&card->dpram[DPRAM_RECEIPT + 2]);
+ if (rx_sum != sum) {
+ dev_alert(&card->pdev->dev, "SRAM seems to be damaged"
+ ", wanted 0x%04x, got 0x%04x\n", sum, rx_sum);
+ ret = -EIO;
+ goto failed;
+ }
+ }
+ if (!type_end || !type_entrypoint)
+ goto failed;
+ /* start application in card */
+ iowrite32(start_addr, &card->dpram[DPRAM_COMMAND + 2]);
+ iowrite8(1, &card->dpram[DPRAM_COMMAND + 6]);
+ ret = softing_bootloader_command(card, 3, "start app.");
+ if (ret < 0)
+ goto failed;
+ ret = 0;
+failed:
+ release_firmware(fw);
+ if (ret < 0)
+ dev_info(&card->pdev->dev, "firmware %s failed\n", file);
+ return ret;
+}
+
+static int softing_reset_chip(struct softing *card)
+{
+ int ret;
+
+ do {
+ /* reset chip */
+ iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO]);
+ iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO+1]);
+ iowrite8(1, &card->dpram[DPRAM_RESET]);
+ iowrite8(0, &card->dpram[DPRAM_RESET+1]);
+
+ ret = softing_fct_cmd(card, 0, "reset_can");
+ if (!ret)
+ break;
+ if (signal_pending(current))
+ /* don't wait any longer */
+ break;
+ } while (1);
+ card->tx.pending = 0;
+ return ret;
+}
+
+int softing_chip_poweron(struct softing *card)
+{
+ int ret;
+ /* sync */
+ ret = _softing_fct_cmd(card, 99, 0x55, "sync-a");
+ if (ret < 0)
+ goto failed;
+
+ ret = _softing_fct_cmd(card, 99, 0xaa, "sync-b");
+ if (ret < 0)
+ goto failed;
+
+ ret = softing_reset_chip(card);
+ if (ret < 0)
+ goto failed;
+ /* get_serial */
+ ret = softing_fct_cmd(card, 43, "get_serial_number");
+ if (ret < 0)
+ goto failed;
+ card->id.serial = ioread32(&card->dpram[DPRAM_FCT_PARAM]);
+ /* get_version */
+ ret = softing_fct_cmd(card, 12, "get_version");
+ if (ret < 0)
+ goto failed;
+ card->id.fw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 2]);
+ card->id.hw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 4]);
+ card->id.license = ioread16(&card->dpram[DPRAM_FCT_PARAM + 6]);
+ card->id.chip[0] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 8]);
+ card->id.chip[1] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 10]);
+ return 0;
+failed:
+ return ret;
+}
+
+static void softing_initialize_timestamp(struct softing *card)
+{
+ uint64_t ovf;
+
+ card->ts_ref = ktime_get();
+
+ /* 16MHz is the reference */
+ ovf = 0x100000000ULL * 16;
+ do_div(ovf, card->pdat->freq ?: 16);
+
+ card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf);
+}
+
+ktime_t softing_raw2ktime(struct softing *card, u32 raw)
+{
+ uint64_t rawl;
+ ktime_t now, real_offset;
+ ktime_t target;
+ ktime_t tmp;
+
+ now = ktime_get();
+ real_offset = ktime_sub(ktime_get_real(), now);
+
+ /* find nsec from card */
+ rawl = raw * 16;
+ do_div(rawl, card->pdat->freq ?: 16);
+ target = ktime_add_us(card->ts_ref, rawl);
+ /* test for overflows */
+ tmp = ktime_add(target, card->ts_overflow);
+ while (unlikely(ktime_to_ns(tmp) > ktime_to_ns(now))) {
+ card->ts_ref = ktime_add(card->ts_ref, card->ts_overflow);
+ target = tmp;
+ tmp = ktime_add(target, card->ts_overflow);
+ }
+ return ktime_add(target, real_offset);
+}
+
+static inline int softing_error_reporting(struct net_device *netdev)
+{
+ struct softing_priv *priv = netdev_priv(netdev);
+
+ return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ ? 1 : 0;
+}
+
+int softing_startstop(struct net_device *dev, int up)
+{
+ int ret;
+ struct softing *card;
+ struct softing_priv *priv;
+ struct net_device *netdev;
+ int bus_bitmask_start;
+ int j, error_reporting;
+ struct can_frame msg;
+ const struct can_bittiming *bt;
+
+ priv = netdev_priv(dev);
+ card = priv->card;
+
+ if (!card->fw.up)
+ return -EIO;
+
+ ret = mutex_lock_interruptible(&card->fw.lock);
+ if (ret)
+ return ret;
+
+ bus_bitmask_start = 0;
+ if (dev && up)
+ /* prepare to start this bus as well */
+ bus_bitmask_start |= (1 << priv->index);
+ /* bring netdevs down */
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ netdev = card->net[j];
+ if (!netdev)
+ continue;
+ priv = netdev_priv(netdev);
+
+ if (dev != netdev)
+ netif_stop_queue(netdev);
+
+ if (netif_running(netdev)) {
+ if (dev != netdev)
+ bus_bitmask_start |= (1 << j);
+ priv->tx.pending = 0;
+ priv->tx.echo_put = 0;
+ priv->tx.echo_get = 0;
+ /*
+ * this bus' may just have called open_candev()
+ * which is rather stupid to call close_candev()
+ * already
+ * but we may come here from busoff recovery too
+ * in which case the echo_skb _needs_ flushing too.
+ * just be sure to call open_candev() again
+ */
+ close_candev(netdev);
+ }
+ priv->can.state = CAN_STATE_STOPPED;
+ }
+ card->tx.pending = 0;
+
+ softing_enable_irq(card, 0);
+ ret = softing_reset_chip(card);
+ if (ret)
+ goto failed;
+ if (!bus_bitmask_start)
+ /* no busses to be brought up */
+ goto card_done;
+
+ if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2)
+ && (softing_error_reporting(card->net[0])
+ != softing_error_reporting(card->net[1]))) {
+ dev_alert(&card->pdev->dev,
+ "err_reporting flag differs for busses\n");
+ goto invalid;
+ }
+ error_reporting = 0;
+ if (bus_bitmask_start & 1) {
+ netdev = card->net[0];
+ priv = netdev_priv(netdev);
+ error_reporting += softing_error_reporting(netdev);
+ /* init chip 1 */
+ bt = &priv->can.bittiming;
+ iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ iowrite16(bt->phase_seg1 + bt->prop_seg,
+ &card->dpram[DPRAM_FCT_PARAM + 6]);
+ iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
+ iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
+ &card->dpram[DPRAM_FCT_PARAM + 10]);
+ ret = softing_fct_cmd(card, 1, "initialize_chip[0]");
+ if (ret < 0)
+ goto failed;
+ /* set mode */
+ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ ret = softing_fct_cmd(card, 3, "set_mode[0]");
+ if (ret < 0)
+ goto failed;
+ /* set filter */
+ /* 11bit id & mask */
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ /* 29bit id.lo & mask.lo & id.hi & mask.hi */
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
+ iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
+ iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
+ ret = softing_fct_cmd(card, 7, "set_filter[0]");
+ if (ret < 0)
+ goto failed;
+ /* set output control */
+ iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ ret = softing_fct_cmd(card, 5, "set_output[0]");
+ if (ret < 0)
+ goto failed;
+ }
+ if (bus_bitmask_start & 2) {
+ netdev = card->net[1];
+ priv = netdev_priv(netdev);
+ error_reporting += softing_error_reporting(netdev);
+ /* init chip2 */
+ bt = &priv->can.bittiming;
+ iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ iowrite16(bt->phase_seg1 + bt->prop_seg,
+ &card->dpram[DPRAM_FCT_PARAM + 6]);
+ iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
+ iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
+ &card->dpram[DPRAM_FCT_PARAM + 10]);
+ ret = softing_fct_cmd(card, 2, "initialize_chip[1]");
+ if (ret < 0)
+ goto failed;
+ /* set mode2 */
+ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ ret = softing_fct_cmd(card, 4, "set_mode[1]");
+ if (ret < 0)
+ goto failed;
+ /* set filter2 */
+ /* 11bit id & mask */
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ /* 29bit id.lo & mask.lo & id.hi & mask.hi */
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
+ iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
+ iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
+ ret = softing_fct_cmd(card, 8, "set_filter[1]");
+ if (ret < 0)
+ goto failed;
+ /* set output control2 */
+ iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ ret = softing_fct_cmd(card, 6, "set_output[1]");
+ if (ret < 0)
+ goto failed;
+ }
+ /* enable_error_frame */
+ /*
+ * Error reporting is switched off at the moment since
+ * the receiving of them is not yet 100% verified
+ * This should be enabled sooner or later
+ *
+ if (error_reporting) {
+ ret = softing_fct_cmd(card, 51, "enable_error_frame");
+ if (ret < 0)
+ goto failed;
+ }
+ */
+ /* initialize interface */
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]);
+ ret = softing_fct_cmd(card, 17, "initialize_interface");
+ if (ret < 0)
+ goto failed;
+ /* enable_fifo */
+ ret = softing_fct_cmd(card, 36, "enable_fifo");
+ if (ret < 0)
+ goto failed;
+ /* enable fifo tx ack */
+ ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]");
+ if (ret < 0)
+ goto failed;
+ /* enable fifo tx ack2 */
+ ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]");
+ if (ret < 0)
+ goto failed;
+ /* start_chip */
+ ret = softing_fct_cmd(card, 11, "start_chip");
+ if (ret < 0)
+ goto failed;
+ iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]);
+ iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]);
+ if (card->pdat->generation < 2) {
+ iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
+ /* flush the DPRAM caches */
+ wmb();
+ }
+
+ softing_initialize_timestamp(card);
+
+ /*
+ * do socketcan notifications/status changes
+ * from here, no errors should occur, or the failed: part
+ * must be reviewed
+ */
+ memset(&msg, 0, sizeof(msg));
+ msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
+ msg.can_dlc = CAN_ERR_DLC;
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ if (!(bus_bitmask_start & (1 << j)))
+ continue;
+ netdev = card->net[j];
+ if (!netdev)
+ continue;
+ priv = netdev_priv(netdev);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ open_candev(netdev);
+ if (dev != netdev) {
+ /* notify other busses on the restart */
+ softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
+ ++priv->can.can_stats.restarts;
+ }
+ netif_wake_queue(netdev);
+ }
+
+ /* enable interrupts */
+ ret = softing_enable_irq(card, 1);
+ if (ret)
+ goto failed;
+card_done:
+ mutex_unlock(&card->fw.lock);
+ return 0;
+invalid:
+ ret = -EINVAL;
+failed:
+ softing_enable_irq(card, 0);
+ softing_reset_chip(card);
+ mutex_unlock(&card->fw.lock);
+ /* bring all other interfaces down */
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ netdev = card->net[j];
+ if (!netdev)
+ continue;
+ dev_close(netdev);
+ }
+ return ret;
+}
+
+int softing_default_output(struct net_device *netdev)
+{
+ struct softing_priv *priv = netdev_priv(netdev);
+ struct softing *card = priv->card;
+
+ switch (priv->chip) {
+ case 1000:
+ return (card->pdat->generation < 2) ? 0xfb : 0xfa;
+ case 5:
+ return 0x60;
+ default:
+ return 0x40;
+ }
+}
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
new file mode 100644
index 000000000000..aeea9f9ff6e8
--- /dev/null
+++ b/drivers/net/can/softing/softing_main.c
@@ -0,0 +1,894 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include "softing.h"
+
+#define TX_ECHO_SKB_MAX (((TXMAX+1)/2)-1)
+
+/*
+ * test is a specific CAN netdev
+ * is online (ie. up 'n running, not sleeping, not busoff
+ */
+static inline int canif_is_active(struct net_device *netdev)
+{
+ struct can_priv *can = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return 0;
+ return (can->state <= CAN_STATE_ERROR_PASSIVE);
+}
+
+/* reset DPRAM */
+static inline void softing_set_reset_dpram(struct softing *card)
+{
+ if (card->pdat->generation >= 2) {
+ spin_lock_bh(&card->spin);
+ iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) & ~1,
+ &card->dpram[DPRAM_V2_RESET]);
+ spin_unlock_bh(&card->spin);
+ }
+}
+
+static inline void softing_clr_reset_dpram(struct softing *card)
+{
+ if (card->pdat->generation >= 2) {
+ spin_lock_bh(&card->spin);
+ iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) | 1,
+ &card->dpram[DPRAM_V2_RESET]);
+ spin_unlock_bh(&card->spin);
+ }
+}
+
+/* trigger the tx queue-ing */
+static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct softing_priv *priv = netdev_priv(dev);
+ struct softing *card = priv->card;
+ int ret;
+ uint8_t *ptr;
+ uint8_t fifo_wr, fifo_rd;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ uint8_t buf[DPRAM_TX_SIZE];
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ spin_lock(&card->spin);
+
+ ret = NETDEV_TX_BUSY;
+ if (!card->fw.up ||
+ (card->tx.pending >= TXMAX) ||
+ (priv->tx.pending >= TX_ECHO_SKB_MAX))
+ goto xmit_done;
+ fifo_wr = ioread8(&card->dpram[DPRAM_TX_WR]);
+ fifo_rd = ioread8(&card->dpram[DPRAM_TX_RD]);
+ if (fifo_wr == fifo_rd)
+ /* fifo full */
+ goto xmit_done;
+ memset(buf, 0, sizeof(buf));
+ ptr = buf;
+ *ptr = CMD_TX;
+ if (cf->can_id & CAN_RTR_FLAG)
+ *ptr |= CMD_RTR;
+ if (cf->can_id & CAN_EFF_FLAG)
+ *ptr |= CMD_XTD;
+ if (priv->index)
+ *ptr |= CMD_BUS2;
+ ++ptr;
+ *ptr++ = cf->can_dlc;
+ *ptr++ = (cf->can_id >> 0);
+ *ptr++ = (cf->can_id >> 8);
+ if (cf->can_id & CAN_EFF_FLAG) {
+ *ptr++ = (cf->can_id >> 16);
+ *ptr++ = (cf->can_id >> 24);
+ } else {
+ /* increment 1, not 2 as you might think */
+ ptr += 1;
+ }
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ memcpy(ptr, &cf->data[0], cf->can_dlc);
+ memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr],
+ buf, DPRAM_TX_SIZE);
+ if (++fifo_wr >= DPRAM_TX_CNT)
+ fifo_wr = 0;
+ iowrite8(fifo_wr, &card->dpram[DPRAM_TX_WR]);
+ card->tx.last_bus = priv->index;
+ ++card->tx.pending;
+ ++priv->tx.pending;
+ can_put_echo_skb(skb, dev, priv->tx.echo_put);
+ ++priv->tx.echo_put;
+ if (priv->tx.echo_put >= TX_ECHO_SKB_MAX)
+ priv->tx.echo_put = 0;
+ /* can_put_echo_skb() saves the skb, safe to return TX_OK */
+ ret = NETDEV_TX_OK;
+xmit_done:
+ spin_unlock(&card->spin);
+ if (card->tx.pending >= TXMAX) {
+ int j;
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ if (card->net[j])
+ netif_stop_queue(card->net[j]);
+ }
+ }
+ if (ret != NETDEV_TX_OK)
+ netif_stop_queue(dev);
+
+ return ret;
+}
+
+/*
+ * shortcut for skb delivery
+ */
+int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
+ ktime_t ktime)
+{
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ skb = alloc_can_skb(netdev, &cf);
+ if (!skb)
+ return -ENOMEM;
+ memcpy(cf, msg, sizeof(*msg));
+ skb->tstamp = ktime;
+ return netif_rx(skb);
+}
+
+/*
+ * softing_handle_1
+ * pop 1 entry from the DPRAM queue, and process
+ */
+static int softing_handle_1(struct softing *card)
+{
+ struct net_device *netdev;
+ struct softing_priv *priv;
+ ktime_t ktime;
+ struct can_frame msg;
+ int cnt = 0, lost_msg;
+ uint8_t fifo_rd, fifo_wr, cmd;
+ uint8_t *ptr;
+ uint32_t tmp_u32;
+ uint8_t buf[DPRAM_RX_SIZE];
+
+ memset(&msg, 0, sizeof(msg));
+ /* test for lost msgs */
+ lost_msg = ioread8(&card->dpram[DPRAM_RX_LOST]);
+ if (lost_msg) {
+ int j;
+ /* reset condition */
+ iowrite8(0, &card->dpram[DPRAM_RX_LOST]);
+ /* prepare msg */
+ msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
+ msg.can_dlc = CAN_ERR_DLC;
+ msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ /*
+ * service to all busses, we don't know which it was applicable
+ * but only service busses that are online
+ */
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ netdev = card->net[j];
+ if (!netdev)
+ continue;
+ if (!canif_is_active(netdev))
+ /* a dead bus has no overflows */
+ continue;
+ ++netdev->stats.rx_over_errors;
+ softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
+ }
+ /* prepare for other use */
+ memset(&msg, 0, sizeof(msg));
+ ++cnt;
+ }
+
+ fifo_rd = ioread8(&card->dpram[DPRAM_RX_RD]);
+ fifo_wr = ioread8(&card->dpram[DPRAM_RX_WR]);
+
+ if (++fifo_rd >= DPRAM_RX_CNT)
+ fifo_rd = 0;
+ if (fifo_wr == fifo_rd)
+ return cnt;
+
+ memcpy_fromio(buf, &card->dpram[DPRAM_RX + DPRAM_RX_SIZE*fifo_rd],
+ DPRAM_RX_SIZE);
+ mb();
+ /* trigger dual port RAM */
+ iowrite8(fifo_rd, &card->dpram[DPRAM_RX_RD]);
+
+ ptr = buf;
+ cmd = *ptr++;
+ if (cmd == 0xff)
+ /* not quite usefull, probably the card has got out */
+ return 0;
+ netdev = card->net[0];
+ if (cmd & CMD_BUS2)
+ netdev = card->net[1];
+ priv = netdev_priv(netdev);
+
+ if (cmd & CMD_ERR) {
+ uint8_t can_state, state;
+
+ state = *ptr++;
+
+ msg.can_id = CAN_ERR_FLAG;
+ msg.can_dlc = CAN_ERR_DLC;
+
+ if (state & SF_MASK_BUSOFF) {
+ can_state = CAN_STATE_BUS_OFF;
+ msg.can_id |= CAN_ERR_BUSOFF;
+ state = STATE_BUSOFF;
+ } else if (state & SF_MASK_EPASSIVE) {
+ can_state = CAN_STATE_ERROR_PASSIVE;
+ msg.can_id |= CAN_ERR_CRTL;
+ msg.data[1] = CAN_ERR_CRTL_TX_PASSIVE;
+ state = STATE_EPASSIVE;
+ } else {
+ can_state = CAN_STATE_ERROR_ACTIVE;
+ msg.can_id |= CAN_ERR_CRTL;
+ state = STATE_EACTIVE;
+ }
+ /* update DPRAM */
+ iowrite8(state, &card->dpram[priv->index ?
+ DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]);
+ /* timestamp */
+ tmp_u32 = le32_to_cpup((void *)ptr);
+ ptr += 4;
+ ktime = softing_raw2ktime(card, tmp_u32);
+
+ ++netdev->stats.rx_errors;
+ /* update internal status */
+ if (can_state != priv->can.state) {
+ priv->can.state = can_state;
+ if (can_state == CAN_STATE_ERROR_PASSIVE)
+ ++priv->can.can_stats.error_passive;
+ else if (can_state == CAN_STATE_BUS_OFF) {
+ /* this calls can_close_cleanup() */
+ can_bus_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ /* trigger socketcan */
+ softing_netdev_rx(netdev, &msg, ktime);
+ }
+
+ } else {
+ if (cmd & CMD_RTR)
+ msg.can_id |= CAN_RTR_FLAG;
+ msg.can_dlc = get_can_dlc(*ptr++);
+ if (cmd & CMD_XTD) {
+ msg.can_id |= CAN_EFF_FLAG;
+ msg.can_id |= le32_to_cpup((void *)ptr);
+ ptr += 4;
+ } else {
+ msg.can_id |= le16_to_cpup((void *)ptr);
+ ptr += 2;
+ }
+ /* timestamp */
+ tmp_u32 = le32_to_cpup((void *)ptr);
+ ptr += 4;
+ ktime = softing_raw2ktime(card, tmp_u32);
+ if (!(msg.can_id & CAN_RTR_FLAG))
+ memcpy(&msg.data[0], ptr, 8);
+ ptr += 8;
+ /* update socket */
+ if (cmd & CMD_ACK) {
+ /* acknowledge, was tx msg */
+ struct sk_buff *skb;
+ skb = priv->can.echo_skb[priv->tx.echo_get];
+ if (skb)
+ skb->tstamp = ktime;
+ can_get_echo_skb(netdev, priv->tx.echo_get);
+ ++priv->tx.echo_get;
+ if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
+ priv->tx.echo_get = 0;
+ if (priv->tx.pending)
+ --priv->tx.pending;
+ if (card->tx.pending)
+ --card->tx.pending;
+ ++netdev->stats.tx_packets;
+ if (!(msg.can_id & CAN_RTR_FLAG))
+ netdev->stats.tx_bytes += msg.can_dlc;
+ } else {
+ int ret;
+
+ ret = softing_netdev_rx(netdev, &msg, ktime);
+ if (ret == NET_RX_SUCCESS) {
+ ++netdev->stats.rx_packets;
+ if (!(msg.can_id & CAN_RTR_FLAG))
+ netdev->stats.rx_bytes += msg.can_dlc;
+ } else {
+ ++netdev->stats.rx_dropped;
+ }
+ }
+ }
+ ++cnt;
+ return cnt;
+}
+
+/*
+ * real interrupt handler
+ */
+static irqreturn_t softing_irq_thread(int irq, void *dev_id)
+{
+ struct softing *card = (struct softing *)dev_id;
+ struct net_device *netdev;
+ struct softing_priv *priv;
+ int j, offset, work_done;
+
+ work_done = 0;
+ spin_lock_bh(&card->spin);
+ while (softing_handle_1(card) > 0) {
+ ++card->irq.svc_count;
+ ++work_done;
+ }
+ spin_unlock_bh(&card->spin);
+ /* resume tx queue's */
+ offset = card->tx.last_bus;
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ if (card->tx.pending >= TXMAX)
+ break;
+ netdev = card->net[(j + offset + 1) % card->pdat->nbus];
+ if (!netdev)
+ continue;
+ priv = netdev_priv(netdev);
+ if (!canif_is_active(netdev))
+ /* it makes no sense to wake dead busses */
+ continue;
+ if (priv->tx.pending >= TX_ECHO_SKB_MAX)
+ continue;
+ ++work_done;
+ netif_wake_queue(netdev);
+ }
+ return work_done ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/*
+ * interrupt routines:
+ * schedule the 'real interrupt handler'
+ */
+static irqreturn_t softing_irq_v2(int irq, void *dev_id)
+{
+ struct softing *card = (struct softing *)dev_id;
+ uint8_t ir;
+
+ ir = ioread8(&card->dpram[DPRAM_V2_IRQ_TOHOST]);
+ iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
+ return (1 == ir) ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
+static irqreturn_t softing_irq_v1(int irq, void *dev_id)
+{
+ struct softing *card = (struct softing *)dev_id;
+ uint8_t ir;
+
+ ir = ioread8(&card->dpram[DPRAM_IRQ_TOHOST]);
+ iowrite8(0, &card->dpram[DPRAM_IRQ_TOHOST]);
+ return ir ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
+/*
+ * netdev/candev inter-operability
+ */
+static int softing_netdev_open(struct net_device *ndev)
+{
+ int ret;
+
+ /* check or determine and set bittime */
+ ret = open_candev(ndev);
+ if (!ret)
+ ret = softing_startstop(ndev, 1);
+ return ret;
+}
+
+static int softing_netdev_stop(struct net_device *ndev)
+{
+ int ret;
+
+ netif_stop_queue(ndev);
+
+ /* softing cycle does close_candev() */
+ ret = softing_startstop(ndev, 0);
+ return ret;
+}
+
+static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ /* softing_startstop does close_candev() */
+ ret = softing_startstop(ndev, 1);
+ return ret;
+ case CAN_MODE_STOP:
+ case CAN_MODE_SLEEP:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/*
+ * Softing device management helpers
+ */
+int softing_enable_irq(struct softing *card, int enable)
+{
+ int ret;
+
+ if (!card->irq.nr) {
+ return 0;
+ } else if (card->irq.requested && !enable) {
+ free_irq(card->irq.nr, card);
+ card->irq.requested = 0;
+ } else if (!card->irq.requested && enable) {
+ ret = request_threaded_irq(card->irq.nr,
+ (card->pdat->generation >= 2) ?
+ softing_irq_v2 : softing_irq_v1,
+ softing_irq_thread, IRQF_SHARED,
+ dev_name(&card->pdev->dev), card);
+ if (ret) {
+ dev_alert(&card->pdev->dev,
+ "request_threaded_irq(%u) failed\n",
+ card->irq.nr);
+ return ret;
+ }
+ card->irq.requested = 1;
+ }
+ return 0;
+}
+
+static void softing_card_shutdown(struct softing *card)
+{
+ int fw_up = 0;
+
+ if (mutex_lock_interruptible(&card->fw.lock))
+ /* return -ERESTARTSYS */;
+ fw_up = card->fw.up;
+ card->fw.up = 0;
+
+ if (card->irq.requested && card->irq.nr) {
+ free_irq(card->irq.nr, card);
+ card->irq.requested = 0;
+ }
+ if (fw_up) {
+ if (card->pdat->enable_irq)
+ card->pdat->enable_irq(card->pdev, 0);
+ softing_set_reset_dpram(card);
+ if (card->pdat->reset)
+ card->pdat->reset(card->pdev, 1);
+ }
+ mutex_unlock(&card->fw.lock);
+}
+
+static __devinit int softing_card_boot(struct softing *card)
+{
+ int ret, j;
+ static const uint8_t stream[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, };
+ unsigned char back[sizeof(stream)];
+
+ if (mutex_lock_interruptible(&card->fw.lock))
+ return -ERESTARTSYS;
+ if (card->fw.up) {
+ mutex_unlock(&card->fw.lock);
+ return 0;
+ }
+ /* reset board */
+ if (card->pdat->enable_irq)
+ card->pdat->enable_irq(card->pdev, 1);
+ /* boot card */
+ softing_set_reset_dpram(card);
+ if (card->pdat->reset)
+ card->pdat->reset(card->pdev, 1);
+ for (j = 0; (j + sizeof(stream)) < card->dpram_size;
+ j += sizeof(stream)) {
+
+ memcpy_toio(&card->dpram[j], stream, sizeof(stream));
+ /* flush IO cache */
+ mb();
+ memcpy_fromio(back, &card->dpram[j], sizeof(stream));
+
+ if (!memcmp(back, stream, sizeof(stream)))
+ continue;
+ /* memory is not equal */
+ dev_alert(&card->pdev->dev, "dpram failed at 0x%04x\n", j);
+ ret = -EIO;
+ goto failed;
+ }
+ wmb();
+ /* load boot firmware */
+ ret = softing_load_fw(card->pdat->boot.fw, card, card->dpram,
+ card->dpram_size,
+ card->pdat->boot.offs - card->pdat->boot.addr);
+ if (ret < 0)
+ goto failed;
+ /* load loader firmware */
+ ret = softing_load_fw(card->pdat->load.fw, card, card->dpram,
+ card->dpram_size,
+ card->pdat->load.offs - card->pdat->load.addr);
+ if (ret < 0)
+ goto failed;
+
+ if (card->pdat->reset)
+ card->pdat->reset(card->pdev, 0);
+ softing_clr_reset_dpram(card);
+ ret = softing_bootloader_command(card, 0, "card boot");
+ if (ret < 0)
+ goto failed;
+ ret = softing_load_app_fw(card->pdat->app.fw, card);
+ if (ret < 0)
+ goto failed;
+
+ ret = softing_chip_poweron(card);
+ if (ret < 0)
+ goto failed;
+
+ card->fw.up = 1;
+ mutex_unlock(&card->fw.lock);
+ return 0;
+failed:
+ card->fw.up = 0;
+ if (card->pdat->enable_irq)
+ card->pdat->enable_irq(card->pdev, 0);
+ softing_set_reset_dpram(card);
+ if (card->pdat->reset)
+ card->pdat->reset(card->pdev, 1);
+ mutex_unlock(&card->fw.lock);
+ return ret;
+}
+
+/*
+ * netdev sysfs
+ */
+static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct softing_priv *priv = netdev2softing(ndev);
+
+ return sprintf(buf, "%i\n", priv->index);
+}
+
+static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct softing_priv *priv = netdev2softing(ndev);
+
+ return sprintf(buf, "%i\n", priv->chip);
+}
+
+static ssize_t show_output(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct softing_priv *priv = netdev2softing(ndev);
+
+ return sprintf(buf, "0x%02x\n", priv->output);
+}
+
+static ssize_t store_output(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct softing_priv *priv = netdev2softing(ndev);
+ struct softing *card = priv->card;
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ val &= 0xFF;
+
+ ret = mutex_lock_interruptible(&card->fw.lock);
+ if (ret)
+ return -ERESTARTSYS;
+ if (netif_running(ndev)) {
+ mutex_unlock(&card->fw.lock);
+ return -EBUSY;
+ }
+ priv->output = val;
+ mutex_unlock(&card->fw.lock);
+ return count;
+}
+
+static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
+static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
+static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
+
+static const struct attribute *const netdev_sysfs_attrs[] = {
+ &dev_attr_channel.attr,
+ &dev_attr_chip.attr,
+ &dev_attr_output.attr,
+ NULL,
+};
+static const struct attribute_group netdev_sysfs_group = {
+ .name = NULL,
+ .attrs = (struct attribute **)netdev_sysfs_attrs,
+};
+
+static const struct net_device_ops softing_netdev_ops = {
+ .ndo_open = softing_netdev_open,
+ .ndo_stop = softing_netdev_stop,
+ .ndo_start_xmit = softing_netdev_start_xmit,
+};
+
+static const struct can_bittiming_const softing_btr_const = {
+ .name = "softing",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4, /* overruled */
+ .brp_min = 1,
+ .brp_max = 32, /* overruled */
+ .brp_inc = 1,
+};
+
+
+static __devinit struct net_device *softing_netdev_create(struct softing *card,
+ uint16_t chip_id)
+{
+ struct net_device *netdev;
+ struct softing_priv *priv;
+
+ netdev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
+ if (!netdev) {
+ dev_alert(&card->pdev->dev, "alloc_candev failed\n");
+ return NULL;
+ }
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+ priv->card = card;
+ memcpy(&priv->btr_const, &softing_btr_const, sizeof(priv->btr_const));
+ priv->btr_const.brp_max = card->pdat->max_brp;
+ priv->btr_const.sjw_max = card->pdat->max_sjw;
+ priv->can.bittiming_const = &priv->btr_const;
+ priv->can.clock.freq = 8000000;
+ priv->chip = chip_id;
+ priv->output = softing_default_output(netdev);
+ SET_NETDEV_DEV(netdev, &card->pdev->dev);
+
+ netdev->flags |= IFF_ECHO;
+ netdev->netdev_ops = &softing_netdev_ops;
+ priv->can.do_set_mode = softing_candev_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+
+ return netdev;
+}
+
+static __devinit int softing_netdev_register(struct net_device *netdev)
+{
+ int ret;
+
+ netdev->sysfs_groups[0] = &netdev_sysfs_group;
+ ret = register_candev(netdev);
+ if (ret) {
+ dev_alert(&netdev->dev, "register failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+static void softing_netdev_cleanup(struct net_device *netdev)
+{
+ unregister_candev(netdev);
+ free_candev(netdev);
+}
+
+/*
+ * sysfs for Platform device
+ */
+#define DEV_ATTR_RO(name, member) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
+ return sprintf(buf, "%u\n", card->member); \
+} \
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
+
+#define DEV_ATTR_RO_STR(name, member) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
+ return sprintf(buf, "%s\n", card->member); \
+} \
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
+
+DEV_ATTR_RO(serial, id.serial);
+DEV_ATTR_RO_STR(firmware, pdat->app.fw);
+DEV_ATTR_RO(firmware_version, id.fw_version);
+DEV_ATTR_RO_STR(hardware, pdat->name);
+DEV_ATTR_RO(hardware_version, id.hw_version);
+DEV_ATTR_RO(license, id.license);
+DEV_ATTR_RO(frequency, id.freq);
+DEV_ATTR_RO(txpending, tx.pending);
+
+static struct attribute *softing_pdev_attrs[] = {
+ &dev_attr_serial.attr,
+ &dev_attr_firmware.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_hardware.attr,
+ &dev_attr_hardware_version.attr,
+ &dev_attr_license.attr,
+ &dev_attr_frequency.attr,
+ &dev_attr_txpending.attr,
+ NULL,
+};
+
+static const struct attribute_group softing_pdev_group = {
+ .name = NULL,
+ .attrs = softing_pdev_attrs,
+};
+
+/*
+ * platform driver
+ */
+static __devexit int softing_pdev_remove(struct platform_device *pdev)
+{
+ struct softing *card = platform_get_drvdata(pdev);
+ int j;
+
+ /* first, disable card*/
+ softing_card_shutdown(card);
+
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ if (!card->net[j])
+ continue;
+ softing_netdev_cleanup(card->net[j]);
+ card->net[j] = NULL;
+ }
+ sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
+
+ iounmap(card->dpram);
+ kfree(card);
+ return 0;
+}
+
+static __devinit int softing_pdev_probe(struct platform_device *pdev)
+{
+ const struct softing_platform_data *pdat = pdev->dev.platform_data;
+ struct softing *card;
+ struct net_device *netdev;
+ struct softing_priv *priv;
+ struct resource *pres;
+ int ret;
+ int j;
+
+ if (!pdat) {
+ dev_warn(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+ if (pdat->nbus > ARRAY_SIZE(card->net)) {
+ dev_warn(&pdev->dev, "%u nets??\n", pdat->nbus);
+ return -EINVAL;
+ }
+
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+ card->pdat = pdat;
+ card->pdev = pdev;
+ platform_set_drvdata(pdev, card);
+ mutex_init(&card->fw.lock);
+ spin_lock_init(&card->spin);
+
+ ret = -EINVAL;
+ pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!pres)
+ goto platform_resource_failed;;
+ card->dpram_phys = pres->start;
+ card->dpram_size = pres->end - pres->start + 1;
+ card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size);
+ if (!card->dpram) {
+ dev_alert(&card->pdev->dev, "dpram ioremap failed\n");
+ goto ioremap_failed;
+ }
+
+ pres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (pres)
+ card->irq.nr = pres->start;
+
+ /* reset card */
+ ret = softing_card_boot(card);
+ if (ret < 0) {
+ dev_alert(&pdev->dev, "failed to boot\n");
+ goto boot_failed;
+ }
+
+ /* only now, the chip's are known */
+ card->id.freq = card->pdat->freq;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &softing_pdev_group);
+ if (ret < 0) {
+ dev_alert(&card->pdev->dev, "sysfs failed\n");
+ goto sysfs_failed;
+ }
+
+ ret = -ENOMEM;
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ card->net[j] = netdev =
+ softing_netdev_create(card, card->id.chip[j]);
+ if (!netdev) {
+ dev_alert(&pdev->dev, "failed to make can[%i]", j);
+ goto netdev_failed;
+ }
+ priv = netdev_priv(card->net[j]);
+ priv->index = j;
+ ret = softing_netdev_register(netdev);
+ if (ret) {
+ free_candev(netdev);
+ card->net[j] = NULL;
+ dev_alert(&card->pdev->dev,
+ "failed to register can[%i]\n", j);
+ goto netdev_failed;
+ }
+ }
+ dev_info(&card->pdev->dev, "%s ready.\n", card->pdat->name);
+ return 0;
+
+netdev_failed:
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ if (!card->net[j])
+ continue;
+ softing_netdev_cleanup(card->net[j]);
+ }
+ sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
+sysfs_failed:
+ softing_card_shutdown(card);
+boot_failed:
+ iounmap(card->dpram);
+ioremap_failed:
+platform_resource_failed:
+ kfree(card);
+ return ret;
+}
+
+static struct platform_driver softing_driver = {
+ .driver = {
+ .name = "softing",
+ .owner = THIS_MODULE,
+ },
+ .probe = softing_pdev_probe,
+ .remove = __devexit_p(softing_pdev_remove),
+};
+
+MODULE_ALIAS("platform:softing");
+
+static int __init softing_start(void)
+{
+ return platform_driver_register(&softing_driver);
+}
+
+static void __exit softing_stop(void)
+{
+ platform_driver_unregister(&softing_driver);
+}
+
+module_init(softing_start);
+module_exit(softing_stop);
+
+MODULE_DESCRIPTION("Softing DPRAM CAN driver");
+MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_platform.h b/drivers/net/can/softing/softing_platform.h
new file mode 100644
index 000000000000..ebbf69815623
--- /dev/null
+++ b/drivers/net/can/softing/softing_platform.h
@@ -0,0 +1,40 @@
+
+#include <linux/platform_device.h>
+
+#ifndef _SOFTING_DEVICE_H_
+#define _SOFTING_DEVICE_H_
+
+/* softing firmware directory prefix */
+#define fw_dir "softing-4.6/"
+
+struct softing_platform_data {
+ unsigned int manf;
+ unsigned int prod;
+ /*
+ * generation
+ * 1st with NEC or SJA1000
+ * 8bit, exclusive interrupt, ...
+ * 2nd only SJA1000
+ * 16bit, shared interrupt
+ */
+ int generation;
+ int nbus; /* # busses on device */
+ unsigned int freq; /* operating frequency in Hz */
+ unsigned int max_brp;
+ unsigned int max_sjw;
+ unsigned long dpram_size;
+ const char *name;
+ struct {
+ unsigned long offs;
+ unsigned long addr;
+ const char *fw;
+ } boot, load, app;
+ /*
+ * reset() function
+ * bring pdev in or out of reset, depending on value
+ */
+ int (*reset)(struct platform_device *pdev, int value);
+ int (*enable_irq)(struct platform_device *pdev, int value);
+};
+
+#endif
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 7206ab2cbbf8..3437613f0454 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -3203,7 +3203,7 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
int mac_off = 0;
-#if defined(CONFIG_OF)
+#if defined(CONFIG_SPARC)
const unsigned char *addr;
#endif
@@ -3354,7 +3354,7 @@ use_random_mac_addr:
if (found & VPD_FOUND_MAC)
goto done;
-#if defined(CONFIG_OF)
+#if defined(CONFIG_SPARC)
addr = of_get_property(cp->of_node, "local-mac-address", NULL);
if (addr != NULL) {
memcpy(dev_addr, addr, 6);
@@ -5031,7 +5031,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
cassini_debug;
-#if defined(CONFIG_OF)
+#if defined(CONFIG_SPARC)
cp->of_node = pci_device_to_OF_node(pdev);
#endif
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 63ebf76d2390..8a43c7e19701 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -556,7 +556,7 @@ struct chelsio_vpd_t {
#define EEPROM_MAX_POLL 4
/*
- * Read SEEPROM. A zero is written to the flag register when the addres is
+ * Read SEEPROM. A zero is written to the flag register when the address is
* written to the Control register. The hardware device will set the flag to a
* one when 4B have been transferred to the Data register.
*/
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 263a2944566f..302be4aa69d6 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -699,13 +699,13 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
{
int i;
- u32 *page_table = dma->pgtbl;
+ __le32 *page_table = (__le32 *) dma->pgtbl;
for (i = 0; i < dma->num_pages; i++) {
/* Each entry needs to be in big endian format. */
- *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+ *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
page_table++;
- *page_table = (u32) dma->pg_map_arr[i];
+ *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
page_table++;
}
}
@@ -713,13 +713,13 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
{
int i;
- u32 *page_table = dma->pgtbl;
+ __le32 *page_table = (__le32 *) dma->pgtbl;
for (i = 0; i < dma->num_pages; i++) {
/* Each entry needs to be in little endian format. */
- *page_table = dma->pg_map_arr[i] & 0xffffffff;
+ *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
page_table++;
- *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+ *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
page_table++;
}
}
@@ -2760,6 +2760,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
int kcqe_cnt;
+ /* status block index must be read before reading other fields */
+ rmb();
cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
@@ -2770,6 +2772,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
barrier();
if (status_idx != *cp->kcq1.status_idx_ptr) {
status_idx = (u16) *cp->kcq1.status_idx_ptr;
+ /* status block index must be read first */
+ rmb();
cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
} else
break;
@@ -2888,6 +2892,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
u32 last_status = *info->status_idx_ptr;
int kcqe_cnt;
+ /* status block index must be read before reading the KCQ */
+ rmb();
while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
service_kcqes(dev, kcqe_cnt);
@@ -2898,6 +2904,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
break;
last_status = *info->status_idx_ptr;
+ /* status block index must be read before reading the KCQ */
+ rmb();
}
return last_status;
}
@@ -2906,26 +2914,35 @@ static void cnic_service_bnx2x_bh(unsigned long data)
{
struct cnic_dev *dev = (struct cnic_dev *) data;
struct cnic_local *cp = dev->cnic_priv;
- u32 status_idx;
+ u32 status_idx, new_status_idx;
if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
return;
- status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
+ while (1) {
+ status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
- CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
+ CNIC_WR16(dev, cp->kcq1.io_addr,
+ cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
- if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
- status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+ if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
+ status_idx, IGU_INT_ENABLE, 1);
+ break;
+ }
+
+ new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+
+ if (new_status_idx != status_idx)
+ continue;
CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
MAX_KCQ_IDX);
cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
status_idx, IGU_INT_ENABLE, 1);
- } else {
- cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
- status_idx, IGU_INT_ENABLE, 1);
+
+ break;
}
}
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c
index a8766fb2f9ab..e13b7fe9d082 100644
--- a/drivers/net/cxgb3/mc5.c
+++ b/drivers/net/cxgb3/mc5.c
@@ -318,7 +318,7 @@ static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
/*
* Initialization that requires the OS and protocol layers to already
- * be intialized goes here.
+ * be initialized goes here.
*/
int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
unsigned int nroutes)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index ec8579a0a808..d55db6b38e7b 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -607,7 +607,7 @@ struct t3_vpd {
*
* Read a 32-bit word from a location in VPD EEPROM using the card's PCI
* VPD ROM capability. A zero is written to the flag bit when the
- * addres is written to the control register. The hardware device will
+ * address is written to the control register. The hardware device will
* set the flag to 1 when 4 bytes have been read into the data register.
*/
int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 059c1eec8c3f..ec35d458102c 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -2710,6 +2710,8 @@ static int cxgb_open(struct net_device *dev)
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
+ netif_carrier_off(dev);
+
if (!(adapter->flags & FULL_INIT_DONE)) {
err = cxgb_up(adapter);
if (err < 0)
@@ -3661,7 +3663,6 @@ static int __devinit init_one(struct pci_dev *pdev,
pi->xact_addr_filt = -1;
pi->rx_offload = RX_CSO;
pi->port_id = i;
- netif_carrier_off(netdev);
netdev->irq = pdev->irq;
netdev->features |= NETIF_F_SG | TSO_FLAGS;
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 3c403f895750..6aad64df4dcb 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -749,13 +749,19 @@ static int cxgb4vf_open(struct net_device *dev)
netif_set_real_num_tx_queues(dev, pi->nqsets);
err = netif_set_real_num_rx_queues(dev, pi->nqsets);
if (err)
- return err;
- set_bit(pi->port_id, &adapter->open_device_map);
+ goto err_unwind;
err = link_start(dev);
if (err)
- return err;
+ goto err_unwind;
+
netif_tx_start_all_queues(dev);
+ set_bit(pi->port_id, &adapter->open_device_map);
return 0;
+
+err_unwind:
+ if (adapter->open_device_map == 0)
+ adapter_down(adapter);
+ return err;
}
/*
@@ -764,13 +770,12 @@ static int cxgb4vf_open(struct net_device *dev)
*/
static int cxgb4vf_stop(struct net_device *dev)
{
- int ret;
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
- ret = t4vf_enable_vi(adapter, pi->viid, false, false);
+ t4vf_enable_vi(adapter, pi->viid, false, false);
pi->link_cfg.link_ok = 0;
clear_bit(pi->port_id, &adapter->open_device_map);
@@ -2035,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
{
int i;
- BUG_ON(adapter->debugfs_root == NULL);
+ BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
/*
* Debugfs support is best effort.
@@ -2056,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
*/
static void cleanup_debugfs(struct adapter *adapter)
{
- BUG_ON(adapter->debugfs_root == NULL);
+ BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
/*
* Unlike our sister routine cleanup_proc(), we don't need to remove
@@ -2484,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
struct net_device *netdev;
/*
- * Vet our module parameters.
- */
- if (msi != MSI_MSIX && msi != MSI_MSI) {
- dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
- " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
- MSI_MSI);
- err = -EINVAL;
- goto err_out;
- }
-
- /*
* Print our driver banner the first time we're called to initialize a
* device.
*/
@@ -2706,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
/*
* Set up our debugfs entries.
*/
- if (cxgb4vf_debugfs_root) {
+ if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
adapter->debugfs_root =
debugfs_create_dir(pci_name(pdev),
cxgb4vf_debugfs_root);
- if (adapter->debugfs_root == NULL)
+ if (IS_ERR_OR_NULL(adapter->debugfs_root))
dev_warn(&pdev->dev, "could not create debugfs"
" directory");
else
@@ -2765,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
*/
err_free_debugfs:
- if (adapter->debugfs_root) {
+ if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
cleanup_debugfs(adapter);
debugfs_remove_recursive(adapter->debugfs_root);
}
@@ -2797,7 +2791,6 @@ err_release_regions:
err_disable_device:
pci_disable_device(pdev);
-err_out:
return err;
}
@@ -2835,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
/*
* Tear down our debugfs entries.
*/
- if (adapter->debugfs_root) {
+ if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
cleanup_debugfs(adapter);
debugfs_remove_recursive(adapter->debugfs_root);
}
@@ -2869,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
}
/*
+ * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
+ * delivery.
+ */
+static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
+{
+ struct adapter *adapter;
+ int pidx;
+
+ adapter = pci_get_drvdata(pdev);
+ if (!adapter)
+ return;
+
+ /*
+ * Disable all Virtual Interfaces. This will shut down the
+ * delivery of all ingress packets into the chip for these
+ * Virtual Interfaces.
+ */
+ for_each_port(adapter, pidx) {
+ struct net_device *netdev;
+ struct port_info *pi;
+
+ if (!test_bit(pidx, &adapter->registered_device_map))
+ continue;
+
+ netdev = adapter->port[pidx];
+ if (!netdev)
+ continue;
+
+ pi = netdev_priv(netdev);
+ t4vf_enable_vi(adapter, pi->viid, false, false);
+ }
+
+ /*
+ * Free up all Queues which will prevent further DMA and
+ * Interrupts allowing various internal pathways to drain.
+ */
+ t4vf_free_sge_resources(adapter);
+}
+
+/*
* PCI Device registration data structures.
*/
#define CH_DEVICE(devid, idx) \
@@ -2901,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = {
.id_table = cxgb4vf_pci_tbl,
.probe = cxgb4vf_pci_probe,
.remove = __devexit_p(cxgb4vf_pci_remove),
+ .shutdown = __devexit_p(cxgb4vf_pci_shutdown),
};
/*
@@ -2910,14 +2944,25 @@ static int __init cxgb4vf_module_init(void)
{
int ret;
+ /*
+ * Vet our module parameters.
+ */
+ if (msi != MSI_MSIX && msi != MSI_MSI) {
+ printk(KERN_WARNING KBUILD_MODNAME
+ ": bad module parameter msi=%d; must be %d"
+ " (MSI-X or MSI) or %d (MSI)\n",
+ msi, MSI_MSIX, MSI_MSI);
+ return -EINVAL;
+ }
+
/* Debugfs support is optional, just warn if this fails */
cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!cxgb4vf_debugfs_root)
+ if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
printk(KERN_WARNING KBUILD_MODNAME ": could not create"
" debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4vf_driver);
- if (ret < 0)
+ if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
debugfs_remove(cxgb4vf_debugfs_root);
return ret;
}
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index e4bec78c8e3f..192db226ec7f 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -147,9 +147,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
/*
* Write the command array into the Mailbox Data register array and
* transfer ownership of the mailbox to the firmware.
+ *
+ * For the VFs, the Mailbox Data "registers" are actually backed by
+ * T4's "MA" interface rather than PL Registers (as is the case for
+ * the PFs). Because these are in different coherency domains, the
+ * write to the VF's PL-register-backed Mailbox Control can race in
+ * front of the writes to the MA-backed VF Mailbox Data "registers".
+ * So we need to do a read-back on at least one byte of the VF Mailbox
+ * Data registers before doing the write to the VF Mailbox Control
+ * register.
*/
for (i = 0, p = cmd; i < size; i += 8)
t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
+ t4_read_reg(adapter, mbox_data); /* flush write */
+
t4_write_reg(adapter, mbox_ctl,
MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
t4_read_reg(adapter, mbox_ctl); /* flush write */
@@ -160,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
delay_idx = 0;
ms = delay[0];
- for (i = 0; i < 500; i += ms) {
+ for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
if (sleep_ok) {
ms = delay[delay_idx];
if (delay_idx < ARRAY_SIZE(delay) - 1)
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2a628d17d178..7018bfe408a4 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status)
int ret;
/* free and bail if we are shutting down */
- if (unlikely(!netif_running(ndev))) {
+ if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
dev_kfree_skb_any(skb);
return;
}
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 1b48b68ad4fd..8b0084d17c8c 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1094,7 +1094,7 @@ static int depca_rx(struct net_device *dev)
}
}
/* Change buffer ownership for this last frame, back to the adapter */
- for (; lp->rx_old != entry; lp->rx_old = (++lp->rx_old) & lp->rxRingMask) {
+ for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
}
writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
@@ -1103,7 +1103,7 @@ static int depca_rx(struct net_device *dev)
/*
** Update entry information
*/
- lp->rx_new = (++lp->rx_new) & lp->rxRingMask;
+ lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
}
return 0;
@@ -1148,7 +1148,7 @@ static int depca_tx(struct net_device *dev)
}
/* Update all the pointers */
- lp->tx_old = (++lp->tx_old) & lp->txRingMask;
+ lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
}
return 0;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index e1a8216ff692..c05db6046050 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev)
/* Free all the skbuffs in the queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
- np->rx_ring[i].status = 0;
- np->rx_ring[i].fraginfo = 0;
skb = np->rx_skbuff[i];
if (skb) {
pci_unmap_single(np->pdev,
@@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev)
dev_kfree_skb (skb);
np->rx_skbuff[i] = NULL;
}
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].fraginfo = 0;
}
for (i = 0; i < TX_RING_SIZE; i++) {
skb = np->tx_skbuff[i];
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 2d4c4fc1d900..461dd6f905f7 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev)
/* Checksum mode */
dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
- /* GPIO0 on pre-activate PHY */
- iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
- iow(db, DM9000_GPR, 0); /* Enable PHY */
ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
@@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev)
unsigned long flags;
/* Save previous register address */
- reg_save = readb(db->io_addr);
spin_lock_irqsave(&db->lock, flags);
+ reg_save = readb(db->io_addr);
netif_stop_queue(dev);
dm9000_reset(db);
@@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev)
if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
return -EAGAIN;
+ /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
+ iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
+ mdelay(1); /* delay needs by DM9000B */
+
/* Initialize DM9000 board */
dm9000_reset(db);
dm9000_init_dm9000(dev);
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 9d8a20b72fa9..8318ea06cb6d 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp)
for (i = 0; i < PHY_MAX_ADDR; i++)
bp->mii_bus->irq[i] = PHY_POLL;
- platform_set_drvdata(bp->dev, bp->mii_bus);
-
if (mdiobus_register(bp->mii_bus)) {
err = -ENXIO;
goto err_out_free_mdio_irq;
@@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
bp = netdev_priv(dev);
bp->dev = dev;
+ platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
spin_lock_init(&bp->lock);
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 77d08e697b74..7501d977d992 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -124,16 +124,22 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
case M88E1000_I_PHY_ID:
case M88E1011_I_PHY_ID:
case M88E1111_I_PHY_ID:
+ case M88E1118_E_PHY_ID:
hw->phy_type = e1000_phy_m88;
break;
case IGP01E1000_I_PHY_ID:
if (hw->mac_type == e1000_82541 ||
hw->mac_type == e1000_82541_rev_2 ||
hw->mac_type == e1000_82547 ||
- hw->mac_type == e1000_82547_rev_2) {
+ hw->mac_type == e1000_82547_rev_2)
hw->phy_type = e1000_phy_igp;
- break;
- }
+ break;
+ case RTL8211B_PHY_ID:
+ hw->phy_type = e1000_phy_8211;
+ break;
+ case RTL8201N_PHY_ID:
+ hw->phy_type = e1000_phy_8201;
+ break;
default:
/* Should never have loaded on this device */
hw->phy_type = e1000_phy_undefined;
@@ -318,6 +324,9 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82547GI:
hw->mac_type = e1000_82547_rev_2;
break;
+ case E1000_DEV_ID_INTEL_CE4100_GBE:
+ hw->mac_type = e1000_ce4100;
+ break;
default:
/* Should never have loaded on this device */
return -E1000_ERR_MAC_TYPE;
@@ -372,6 +381,9 @@ void e1000_set_media_type(struct e1000_hw *hw)
case e1000_82542_rev2_1:
hw->media_type = e1000_media_type_fiber;
break;
+ case e1000_ce4100:
+ hw->media_type = e1000_media_type_copper;
+ break;
default:
status = er32(STATUS);
if (status & E1000_STATUS_TBIMODE) {
@@ -460,6 +472,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
/* Reset is performed on a shadow of the control register */
ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
break;
+ case e1000_ce4100:
default:
ew32(CTRL, (ctrl | E1000_CTRL_RST));
break;
@@ -952,6 +965,67 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
}
/**
+ * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Commits changes to PHY configuration by calling e1000_phy_reset().
+ */
+static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ /* SW reset the PHY so all changes take effect */
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ e_dbg("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+static s32 gbe_dhg_phy_setup(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u32 ctrl_aux;
+
+ switch (hw->phy_type) {
+ case e1000_phy_8211:
+ ret_val = e1000_copper_link_rtl_setup(hw);
+ if (ret_val) {
+ e_dbg("e1000_copper_link_rtl_setup failed!\n");
+ return ret_val;
+ }
+ break;
+ case e1000_phy_8201:
+ /* Set RMII mode */
+ ctrl_aux = er32(CTL_AUX);
+ ctrl_aux |= E1000_CTL_AUX_RMII;
+ ew32(CTL_AUX, ctrl_aux);
+ E1000_WRITE_FLUSH();
+
+ /* Disable the J/K bits required for receive */
+ ctrl_aux = er32(CTL_AUX);
+ ctrl_aux |= 0x4;
+ ctrl_aux &= ~0x2;
+ ew32(CTL_AUX, ctrl_aux);
+ E1000_WRITE_FLUSH();
+ ret_val = e1000_copper_link_rtl_setup(hw);
+
+ if (ret_val) {
+ e_dbg("e1000_copper_link_rtl_setup failed!\n");
+ return ret_val;
+ }
+ break;
+ default:
+ e_dbg("Error Resetting the PHY\n");
+ return E1000_ERR_PHY_TYPE;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
* e1000_copper_link_preconfig - early configuration for copper
* @hw: Struct containing variables accessed by shared code
*
@@ -1286,6 +1360,10 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
if (hw->autoneg_advertised == 0)
hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ /* IFE/RTL8201N PHY only supports 10/100 */
+ if (hw->phy_type == e1000_phy_8201)
+ hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
+
e_dbg("Reconfiguring auto-neg advertisement params\n");
ret_val = e1000_phy_setup_autoneg(hw);
if (ret_val) {
@@ -1341,7 +1419,7 @@ static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
s32 ret_val;
e_dbg("e1000_copper_link_postconfig");
- if (hw->mac_type >= e1000_82544) {
+ if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) {
e1000_config_collision_dist(hw);
} else {
ret_val = e1000_config_mac_to_phy(hw);
@@ -1395,6 +1473,12 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
ret_val = e1000_copper_link_mgp_setup(hw);
if (ret_val)
return ret_val;
+ } else {
+ ret_val = gbe_dhg_phy_setup(hw);
+ if (ret_val) {
+ e_dbg("gbe_dhg_phy_setup failed!\n");
+ return ret_val;
+ }
}
if (hw->autoneg) {
@@ -1461,10 +1545,11 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
return ret_val;
/* Read the MII 1000Base-T Control Register (Address 9). */
- ret_val =
- e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
+ else if (hw->phy_type == e1000_phy_8201)
+ mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
/* Need to parse both autoneg_advertised and fc and set up
* the appropriate PHY registers. First we will parse for
@@ -1577,9 +1662,14 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
- ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
- if (ret_val)
- return ret_val;
+ if (hw->phy_type == e1000_phy_8201) {
+ mii_1000t_ctrl_reg = 0;
+ } else {
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL,
+ mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
return E1000_SUCCESS;
}
@@ -1860,7 +1950,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
/* 82544 or newer MAC, Auto Speed Detection takes care of
* MAC speed/duplex configuration.*/
- if (hw->mac_type >= e1000_82544)
+ if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
return E1000_SUCCESS;
/* Read the Device Control Register and set the bits to Force Speed
@@ -1870,27 +1960,49 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
- /* Set up duplex in the Device Control and Transmit Control
- * registers depending on negotiated values.
- */
- ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
+ switch (hw->phy_type) {
+ case e1000_phy_8201:
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
- if (phy_data & M88E1000_PSSR_DPLX)
- ctrl |= E1000_CTRL_FD;
- else
- ctrl &= ~E1000_CTRL_FD;
+ if (phy_data & RTL_PHY_CTRL_FD)
+ ctrl |= E1000_CTRL_FD;
+ else
+ ctrl &= ~E1000_CTRL_FD;
- e1000_config_collision_dist(hw);
+ if (phy_data & RTL_PHY_CTRL_SPD_100)
+ ctrl |= E1000_CTRL_SPD_100;
+ else
+ ctrl |= E1000_CTRL_SPD_10;
- /* Set up speed in the Device Control register depending on
- * negotiated values.
- */
- if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
- ctrl |= E1000_CTRL_SPD_1000;
- else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
- ctrl |= E1000_CTRL_SPD_100;
+ e1000_config_collision_dist(hw);
+ break;
+ default:
+ /* Set up duplex in the Device Control and Transmit Control
+ * registers depending on negotiated values.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & M88E1000_PSSR_DPLX)
+ ctrl |= E1000_CTRL_FD;
+ else
+ ctrl &= ~E1000_CTRL_FD;
+
+ e1000_config_collision_dist(hw);
+
+ /* Set up speed in the Device Control register depending on
+ * negotiated values.
+ */
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+ ctrl |= E1000_CTRL_SPD_1000;
+ else if ((phy_data & M88E1000_PSSR_SPEED) ==
+ M88E1000_PSSR_100MBS)
+ ctrl |= E1000_CTRL_SPD_100;
+ }
/* Write the configured values back to the Device Control Reg. */
ew32(CTRL, ctrl);
@@ -2401,7 +2513,8 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
* speed/duplex on the MAC to the current PHY speed/duplex
* settings.
*/
- if (hw->mac_type >= e1000_82544)
+ if ((hw->mac_type >= e1000_82544) &&
+ (hw->mac_type != e1000_ce4100))
e1000_config_collision_dist(hw);
else {
ret_val = e1000_config_mac_to_phy(hw);
@@ -2738,7 +2851,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
{
u32 i;
u32 mdic = 0;
- const u32 phy_addr = 1;
+ const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
e_dbg("e1000_read_phy_reg_ex");
@@ -2752,28 +2865,61 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
- mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
- (phy_addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_READ));
+ if (hw->mac_type == e1000_ce4100) {
+ mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (INTEL_CE_GBE_MDIC_OP_READ) |
+ (INTEL_CE_GBE_MDIC_GO));
- ew32(MDIC, mdic);
+ writel(mdic, E1000_MDIO_CMD);
- /* Poll the ready bit to see if the MDI read completed */
- for (i = 0; i < 64; i++) {
- udelay(50);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Read did not complete\n");
- return -E1000_ERR_PHY;
- }
- if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Error\n");
- return -E1000_ERR_PHY;
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 64; i++) {
+ udelay(50);
+ mdic = readl(E1000_MDIO_CMD);
+ if (!(mdic & INTEL_CE_GBE_MDIC_GO))
+ break;
+ }
+
+ if (mdic & INTEL_CE_GBE_MDIC_GO) {
+ e_dbg("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+
+ mdic = readl(E1000_MDIO_STS);
+ if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) {
+ e_dbg("MDI Read Error\n");
+ return -E1000_ERR_PHY;
+ }
+ *phy_data = (u16) mdic;
+ } else {
+ mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 64; i++) {
+ udelay(50);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ *phy_data = (u16) mdic;
}
- *phy_data = (u16) mdic;
} else {
/* We must first send a preamble through the MDIO pin to signal the
* beginning of an MII instruction. This is done by sending 32
@@ -2840,7 +2986,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
{
u32 i;
u32 mdic = 0;
- const u32 phy_addr = 1;
+ const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
e_dbg("e1000_write_phy_reg_ex");
@@ -2850,27 +2996,54 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
}
if (hw->mac_type > e1000_82543) {
- /* Set up Op-code, Phy Address, register address, and data intended
- * for the PHY register in the MDI Control register. The MAC will take
- * care of interfacing with the PHY to send the desired data.
+ /* Set up Op-code, Phy Address, register address, and data
+ * intended for the PHY register in the MDI Control register.
+ * The MAC will take care of interfacing with the PHY to send
+ * the desired data.
*/
- mdic = (((u32) phy_data) |
- (reg_addr << E1000_MDIC_REG_SHIFT) |
- (phy_addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_WRITE));
+ if (hw->mac_type == e1000_ce4100) {
+ mdic = (((u32) phy_data) |
+ (reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (INTEL_CE_GBE_MDIC_OP_WRITE) |
+ (INTEL_CE_GBE_MDIC_GO));
- ew32(MDIC, mdic);
+ writel(mdic, E1000_MDIO_CMD);
- /* Poll the ready bit to see if the MDI read completed */
- for (i = 0; i < 641; i++) {
- udelay(5);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Write did not complete\n");
- return -E1000_ERR_PHY;
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 640; i++) {
+ udelay(5);
+ mdic = readl(E1000_MDIO_CMD);
+ if (!(mdic & INTEL_CE_GBE_MDIC_GO))
+ break;
+ }
+ if (mdic & INTEL_CE_GBE_MDIC_GO) {
+ e_dbg("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ } else {
+ mdic = (((u32) phy_data) |
+ (reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 641; i++) {
+ udelay(5);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
}
} else {
/* We'll need to use the SW defined pins to shift the write command
@@ -3048,6 +3221,12 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
if (hw->phy_id == M88E1011_I_PHY_ID)
match = true;
break;
+ case e1000_ce4100:
+ if ((hw->phy_id == RTL8211B_PHY_ID) ||
+ (hw->phy_id == RTL8201N_PHY_ID) ||
+ (hw->phy_id == M88E1118_E_PHY_ID))
+ match = true;
+ break;
case e1000_82541:
case e1000_82541_rev_2:
case e1000_82547:
@@ -3291,6 +3470,9 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
if (hw->phy_type == e1000_phy_igp)
return e1000_phy_igp_get_info(hw, phy_info);
+ else if ((hw->phy_type == e1000_phy_8211) ||
+ (hw->phy_type == e1000_phy_8201))
+ return E1000_SUCCESS;
else
return e1000_phy_m88_get_info(hw, phy_info);
}
@@ -3742,6 +3924,12 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e_dbg("e1000_read_eeprom");
+ if (hw->mac_type == e1000_ce4100) {
+ GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words,
+ data);
+ return E1000_SUCCESS;
+ }
+
/* If eeprom is not yet detected, do so now */
if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw);
@@ -3904,6 +4092,12 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e_dbg("e1000_write_eeprom");
+ if (hw->mac_type == e1000_ce4100) {
+ GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words,
+ data);
+ return E1000_SUCCESS;
+ }
+
/* If eeprom is not yet detected, do so now */
if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw);
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index ecd9f6c6bcd5..c70b23d52284 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -41,7 +41,7 @@ struct e1000_hw;
struct e1000_hw_stats;
/* Enumerated types specific to the e1000 hardware */
-/* Media Access Controlers */
+/* Media Access Controllers */
typedef enum {
e1000_undefined = 0,
e1000_82542_rev2_0,
@@ -52,6 +52,7 @@ typedef enum {
e1000_82545,
e1000_82545_rev_3,
e1000_82546,
+ e1000_ce4100,
e1000_82546_rev_3,
e1000_82541,
e1000_82541_rev_2,
@@ -209,9 +210,11 @@ typedef enum {
} e1000_1000t_rx_status;
typedef enum {
- e1000_phy_m88 = 0,
- e1000_phy_igp,
- e1000_phy_undefined = 0xFF
+ e1000_phy_m88 = 0,
+ e1000_phy_igp,
+ e1000_phy_8211,
+ e1000_phy_8201,
+ e1000_phy_undefined = 0xFF
} e1000_phy_type;
typedef enum {
@@ -442,6 +445,7 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
#define E1000_DEV_ID_82547EI 0x1019
#define E1000_DEV_ID_82547EI_MOBILE 0x101A
#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E
#define NODE_ADDRESS_SIZE 6
#define ETH_LENGTH_OF_ADDRESS 6
@@ -808,6 +812,16 @@ struct e1000_ffvt_entry {
#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
#define E1000_FLA 0x0001C /* Flash Access - RW */
#define E1000_MDIC 0x00020 /* MDI Control - RW */
+
+extern void __iomem *ce4100_gbe_mdio_base_virt;
+#define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt)
+#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0)
+#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4)
+#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8)
+#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC)
+#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20)
+#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24)
+
#define E1000_SCTL 0x00024 /* SerDes Control - RW */
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
@@ -820,6 +834,34 @@ struct e1000_ffvt_entry {
#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+
+/* Auxiliary Control Register. This register is CE4100 specific,
+ * RMII/RGMII function is switched by this register - RW
+ * Following are bits definitions of the Auxiliary Control Register
+ */
+#define E1000_CTL_AUX 0x000E0
+#define E1000_CTL_AUX_END_SEL_SHIFT 10
+#define E1000_CTL_AUX_ENDIANESS_SHIFT 8
+#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0
+
+/* descriptor and packet transfer use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* descriptor use CTL_AUX.ENDIANESS, packet use default */
+#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* descriptor use default, packet use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* all use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT)
+
+#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
+#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
+
+/* LW little endian, Byte big endian */
+#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+
#define E1000_RCTL 0x00100 /* RX Control - RW */
#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
@@ -1011,6 +1053,7 @@ struct e1000_ffvt_entry {
* in more current versions of the 8254x. Despite the difference in location,
* the registers function in the same manner.
*/
+#define E1000_82542_CTL_AUX E1000_CTL_AUX
#define E1000_82542_CTRL E1000_CTRL
#define E1000_82542_CTRL_DUP E1000_CTRL_DUP
#define E1000_82542_STATUS E1000_STATUS
@@ -1571,6 +1614,11 @@ struct e1000_hw {
#define E1000_MDIC_INT_EN 0x20000000
#define E1000_MDIC_ERROR 0x40000000
+#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000
+#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000
+#define INTEL_CE_GBE_MDIC_GO 0x80000000
+#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000
+
#define E1000_KUMCTRLSTA_MASK 0x0000FFFF
#define E1000_KUMCTRLSTA_OFFSET 0x001F0000
#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16
@@ -2869,8 +2917,14 @@ struct e1000_host_command_info {
#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
#define M88E1011_I_REV_4 0x04
#define M88E1111_I_PHY_ID 0x01410CC0
+#define M88E1118_E_PHY_ID 0x01410E40
#define L1LXT971A_PHY_ID 0x001378E0
+#define RTL8211B_PHY_ID 0x001CC910
+#define RTL8201N_PHY_ID 0x8200
+#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */
+#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */
+
/* Bits...
* 15-5: page
* 4-0: register offset
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 340e12d2e4a9..bfab14092d2c 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -28,6 +28,12 @@
#include "e1000.h"
#include <net/ip6_checksum.h>
+#include <linux/io.h>
+
+/* Intel Media SOC GbE MDIO physical base address */
+static unsigned long ce4100_gbe_mdio_base_phy;
+/* Intel Media SOC GbE MDIO virtual base address */
+void __iomem *ce4100_gbe_mdio_base_virt;
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@@ -79,6 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
INTEL_E1000_ETHERNET_DEVICE(0x108A),
INTEL_E1000_ETHERNET_DEVICE(0x1099),
INTEL_E1000_ETHERNET_DEVICE(0x10B5),
+ INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
/* required last entry */
{0,}
};
@@ -459,6 +466,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
case e1000_82545:
case e1000_82545_rev_3:
case e1000_82546:
+ case e1000_ce4100:
case e1000_82546_rev_3:
case e1000_82541:
case e1000_82541_rev_2:
@@ -573,6 +581,7 @@ void e1000_reset(struct e1000_adapter *adapter)
case e1000_82545:
case e1000_82545_rev_3:
case e1000_82546:
+ case e1000_ce4100:
case e1000_82546_rev_3:
pba = E1000_PBA_48K;
break;
@@ -894,6 +903,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
static int global_quad_port_a = 0; /* global ksp3 port a indication */
int i, err, pci_using_dac;
u16 eeprom_data = 0;
+ u16 tmp = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
int bars, need_ioport;
@@ -996,6 +1006,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
goto err_sw_init;
err = -EIO;
+ if (hw->mac_type == e1000_ce4100) {
+ ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
+ ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
+ pci_resource_len(pdev, BAR_1));
+
+ if (!ce4100_gbe_mdio_base_virt)
+ goto err_mdio_ioremap;
+ }
if (hw->mac_type >= e1000_82543) {
netdev->features = NETIF_F_SG |
@@ -1135,6 +1153,20 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->wol = adapter->eeprom_wol;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+ /* Auto detect PHY address */
+ if (hw->mac_type == e1000_ce4100) {
+ for (i = 0; i < 32; i++) {
+ hw->phy_addr = i;
+ e1000_read_phy_reg(hw, PHY_ID2, &tmp);
+ if (tmp == 0 || tmp == 0xFF) {
+ if (i == 31)
+ goto err_eeprom;
+ continue;
+ } else
+ break;
+ }
+ }
+
/* reset the hardware with the new settings */
e1000_reset(adapter);
@@ -1171,6 +1203,8 @@ err_eeprom:
kfree(adapter->rx_ring);
err_dma:
err_sw_init:
+err_mdio_ioremap:
+ iounmap(ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr);
err_ioremap:
free_netdev(netdev);
@@ -1409,6 +1443,7 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
/* First rev 82545 and 82546 need to not allow any memory
* write location to cross 64k boundary due to errata 23 */
if (hw->mac_type == e1000_82545 ||
+ hw->mac_type == e1000_ce4100 ||
hw->mac_type == e1000_82546) {
return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
}
@@ -2198,7 +2233,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
* addresses take precedence to avoid disabling unicast filtering
* when possible.
*
- * RAR 0 is used for the station MAC adddress
+ * RAR 0 is used for the station MAC address
* if there are not 14 addresses, go ahead and clear the filters
*/
i = 1;
@@ -3443,9 +3478,17 @@ static irqreturn_t e1000_intr(int irq, void *data)
struct e1000_hw *hw = &adapter->hw;
u32 icr = er32(ICR);
- if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
+ if (unlikely((!icr)))
return IRQ_NONE; /* Not our interrupt */
+ /*
+ * we might have caused the interrupt, but the above
+ * read cleared it, and just in case the driver is
+ * down there is nothing to do so return handled
+ */
+ if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
+ return IRQ_HANDLED;
+
if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
hw->get_link_status = 1;
/* guard against interrupt when we're going down */
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index edd1c75aa895..33e7c45a4fe4 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -34,12 +34,22 @@
#ifndef _E1000_OSDEP_H_
#define _E1000_OSDEP_H_
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
#include <asm/io.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
+
+#define CONFIG_RAM_BASE 0x60000
+#define GBE_CONFIG_OFFSET 0x0
+
+#define GBE_CONFIG_RAM_BASE \
+ ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
+
+#define GBE_CONFIG_BASE_VIRT \
+ ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE))
+
+#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
+ (iowrite16_rep(base + offset, data, count))
+
+#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \
+ (ioread16_rep(base + (offset << 1), data, count))
#define er32(reg) \
(readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index e57e4097ef1b..89a69035e538 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -78,6 +78,8 @@ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
/**
* e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -113,6 +115,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
phy->type = e1000_phy_bm;
phy->ops.acquire = e1000_get_hw_semaphore_82574;
phy->ops.release = e1000_put_hw_semaphore_82574;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
break;
default:
return -E1000_ERR_PHY;
@@ -121,29 +125,36 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
/* This can only be done after all function pointers are setup. */
ret_val = e1000_get_phy_id_82571(hw);
+ if (ret_val) {
+ e_dbg("Error getting PHY ID\n");
+ return ret_val;
+ }
/* Verify phy id */
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
if (phy->id != IGP01E1000_I_PHY_ID)
- return -E1000_ERR_PHY;
+ ret_val = -E1000_ERR_PHY;
break;
case e1000_82573:
if (phy->id != M88E1111_I_PHY_ID)
- return -E1000_ERR_PHY;
+ ret_val = -E1000_ERR_PHY;
break;
case e1000_82574:
case e1000_82583:
if (phy->id != BME1000_E_PHY_ID_R2)
- return -E1000_ERR_PHY;
+ ret_val = -E1000_ERR_PHY;
break;
default:
- return -E1000_ERR_PHY;
+ ret_val = -E1000_ERR_PHY;
break;
}
- return 0;
+ if (ret_val)
+ e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
+
+ return ret_val;
}
/**
@@ -317,7 +328,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
/*
* Ensure that the inter-port SWSM.SMBI lock bit is clear before
- * first NVM or PHY acess. This should be done for single-port
+ * first NVM or PHY access. This should be done for single-port
* devices, and for one port only on dual-port devices so that
* for those devices we can still use the SMBI lock to synchronize
* inter-port accesses to the PHY & NVM.
@@ -649,6 +660,58 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
}
/**
+ * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag.
+ * LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+ u16 data = er32(POEMB);
+
+ if (active)
+ data |= E1000_PHY_CTRL_D0A_LPLU;
+ else
+ data &= ~E1000_PHY_CTRL_D0A_LPLU;
+
+ ew32(POEMB, data);
+ return 0;
+}
+
+/**
+ * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * when active is true, else clear lplu for D3. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+ u16 data = er32(POEMB);
+
+ if (!active) {
+ data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+ } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= E1000_PHY_CTRL_NOND0A_LPLU;
+ }
+
+ ew32(POEMB, data);
+ return 0;
+}
+
+/**
* e1000_acquire_nvm_82571 - Request for access to the EEPROM
* @hw: pointer to the HW structure
*
@@ -956,7 +1019,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
**/
static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
{
- u32 ctrl, ctrl_ext, icr;
+ u32 ctrl, ctrl_ext;
s32 ret_val;
/*
@@ -1040,7 +1103,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
/* Clear any pending interrupt events. */
ew32(IMC, 0xffffffff);
- icr = er32(ICR);
+ er32(ICR);
if (hw->mac.type == e1000_82571) {
/* Install any alternate MAC address into RAR0 */
@@ -1247,7 +1310,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
* apply workaround for hardware errata documented in errata
* docs Fixes issue where some error prone or unreliable PCIe
* completions are occurring, particularly with ASPM enabled.
- * Without fix, issue can cause tx timeouts.
+ * Without fix, issue can cause Tx timeouts.
*/
reg = er32(GCR2);
reg |= 1;
diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile
index 360c91369f35..28519acacd2d 100644
--- a/drivers/net/e1000e/Makefile
+++ b/drivers/net/e1000e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2008 Intel Corporation.
+# Copyright(c) 1999 - 2011 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 7245dc2e0b7c..13149983d07e 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 2c913b8e9116..e610e1369053 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -38,6 +38,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
+#include <linux/crc32.h>
#include "hw.h"
@@ -496,6 +497,8 @@ extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_update_stats(struct e1000_adapter *adapter);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
+extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
extern unsigned int copybreak;
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index b18c644e13d1..2fefa820302b 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -784,7 +784,7 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
**/
static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
{
- u32 ctrl, icr;
+ u32 ctrl;
s32 ret_val;
/*
@@ -818,7 +818,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
/* Clear any pending interrupt events. */
ew32(IMC, 0xffffffff);
- icr = er32(ICR);
+ er32(ICR);
ret_val = e1000_check_alt_mac_addr_generic(hw);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index affcacf6f5a9..fa08b6336cfb 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -624,20 +624,24 @@ static void e1000_get_drvinfo(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
char firmware_version[32];
- strncpy(drvinfo->driver, e1000e_driver_name, 32);
- strncpy(drvinfo->version, e1000e_driver_version, 32);
+ strncpy(drvinfo->driver, e1000e_driver_name,
+ sizeof(drvinfo->driver) - 1);
+ strncpy(drvinfo->version, e1000e_driver_version,
+ sizeof(drvinfo->version) - 1);
/*
* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
*/
- sprintf(firmware_version, "%d.%d-%d",
+ snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
(adapter->eeprom_vers & 0xF000) >> 12,
(adapter->eeprom_vers & 0x0FF0) >> 4,
(adapter->eeprom_vers & 0x000F));
- strncpy(drvinfo->fw_version, firmware_version, 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strncpy(drvinfo->fw_version, firmware_version,
+ sizeof(drvinfo->fw_version) - 1);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info) - 1);
drvinfo->regdump_len = e1000_get_regs_len(netdev);
drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
}
@@ -1704,6 +1708,19 @@ static void e1000_diag_test(struct net_device *netdev,
bool if_running = netif_running(netdev);
set_bit(__E1000_TESTING, &adapter->state);
+
+ if (!if_running) {
+ /* Get control of and reset hardware */
+ if (adapter->flags & FLAG_HAS_AMT)
+ e1000e_get_hw_control(adapter);
+
+ e1000e_power_up_phy(adapter);
+
+ adapter->hw.phy.autoneg_wait_to_complete = 1;
+ e1000e_reset(adapter);
+ adapter->hw.phy.autoneg_wait_to_complete = 0;
+ }
+
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
@@ -1717,8 +1734,6 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
/* indicate we're in test mode */
dev_close(netdev);
- else
- e1000e_reset(adapter);
if (e1000_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1732,8 +1747,6 @@ static void e1000_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED;
e1000e_reset(adapter);
- /* make sure the phy is powered up */
- e1000e_power_up_phy(adapter);
if (e1000_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1755,28 +1768,29 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
dev_open(netdev);
} else {
- if (!if_running && (adapter->flags & FLAG_HAS_AMT)) {
- clear_bit(__E1000_TESTING, &adapter->state);
- dev_open(netdev);
- set_bit(__E1000_TESTING, &adapter->state);
- }
+ /* Online tests */
e_info("online testing starting\n");
- /* Online tests */
- if (e1000_link_test(adapter, &data[4]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
- /* Online tests aren't run; pass by default */
+ /* register, eeprom, intr and loopback tests not run online */
data[0] = 0;
data[1] = 0;
data[2] = 0;
data[3] = 0;
- if (!if_running && (adapter->flags & FLAG_HAS_AMT))
- dev_close(netdev);
+ if (e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__E1000_TESTING, &adapter->state);
}
+
+ if (!if_running) {
+ e1000e_reset(adapter);
+
+ if (adapter->flags & FLAG_HAS_AMT)
+ e1000e_release_hw_control(adapter);
+ }
+
msleep_interruptible(4 * 1000);
}
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index ba302a5c2c30..bc0860a598c9 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -83,6 +83,7 @@ enum e1e_registers {
E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */
+#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
E1000_PBS = 0x01008, /* Packet Buffer Size */
E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
@@ -101,7 +102,7 @@ enum e1e_registers {
E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
- E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */
+ E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
/* Convenience macros
*
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index d86cc0832720..fb46974cfec1 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -321,7 +321,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
}
/*
- * Reset the PHY before any acccess to it. Doing so, ensures that
+ * Reset the PHY before any access to it. Doing so, ensures that
* the PHY is in a known good state before we read/write PHY registers.
* The generic reset is sufficient here, because we haven't determined
* the PHY type yet.
@@ -1395,22 +1395,6 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
}
}
-static u32 e1000_calc_rx_da_crc(u8 mac[])
-{
- u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
- u32 i, j, mask, crc;
-
- crc = 0xffffffff;
- for (i = 0; i < 6; i++) {
- crc = crc ^ mac[i];
- for (j = 8; j > 0; j--) {
- mask = (crc & 1) * (-1);
- crc = (crc >> 1) ^ (poly & mask);
- }
- }
- return ~crc;
-}
-
/**
* e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
* with 82579 PHY
@@ -1453,8 +1437,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
mac_addr[4] = (addr_high & 0xFF);
mac_addr[5] = ((addr_high >> 8) & 0xFF);
- ew32(PCH_RAICC(i),
- e1000_calc_rx_da_crc(mac_addr));
+ ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
}
/* Write Rx addresses to the PHY */
@@ -2977,7 +2960,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
{
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
u16 reg;
- u32 ctrl, icr, kab;
+ u32 ctrl, kab;
s32 ret_val;
/*
@@ -3067,7 +3050,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ew32(CRC_OFFSET, 0x65656565);
ew32(IMC, 0xffffffff);
- icr = er32(ICR);
+ er32(ICR);
kab = er32(KABGTXD);
kab |= E1000_KABGTXD_BGSQLBIAS;
@@ -3118,7 +3101,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
* Reset the phy after disabling host wakeup to reset the Rx buffer.
*/
if (hw->phy.type == e1000_phy_82578) {
- hw->phy.ops.read_reg(hw, BM_WUC, &i);
+ e1e_rphy(hw, BM_WUC, &i);
ret_val = e1000_phy_hw_reset_ich8lan(hw);
if (ret_val)
return ret_val;
@@ -3276,9 +3259,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
(hw->phy.type == e1000_phy_82577)) {
ew32(FCRTV_PCH, hw->fc.refresh_time);
- ret_val = hw->phy.ops.write_reg(hw,
- PHY_REG(BM_PORT_CTRL_PAGE, 27),
- hw->fc.pause_time);
+ ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
+ hw->fc.pause_time);
if (ret_val)
return ret_val;
}
@@ -3342,8 +3324,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
break;
case e1000_phy_ife:
- ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
- &reg_data);
+ ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
if (ret_val)
return ret_val;
@@ -3361,8 +3342,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
reg_data |= IFE_PMC_AUTO_MDIX;
break;
}
- ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
- reg_data);
+ ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
if (ret_val)
return ret_val;
break;
@@ -3646,7 +3626,8 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
{
if (hw->phy.type == e1000_phy_ife)
return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
- (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+ (IFE_PSCL_PROBE_MODE |
+ IFE_PSCL_PROBE_LEDS_OFF));
ew32(LEDCTL, hw->mac.ledctl_mode1);
return 0;
@@ -3660,8 +3641,7 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
{
- return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
- (u16)hw->mac.ledctl_mode1);
+ return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
}
/**
@@ -3672,8 +3652,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
**/
static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
{
- return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
- (u16)hw->mac.ledctl_default);
+ return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
}
/**
@@ -3704,7 +3683,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
}
}
- return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+ return e1e_wphy(hw, HV_LED_CONFIG, data);
}
/**
@@ -3735,7 +3714,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
}
}
- return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+ return e1e_wphy(hw, HV_LED_CONFIG, data);
}
/**
@@ -3844,20 +3823,20 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82579) ||
(hw->phy.type == e1000_phy_82577)) {
- hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);
+ e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
+ e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
+ e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
+ e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
+ e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
+ e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
+ e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
+ e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
+ e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
+ e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
+ e1e_rphy(hw, HV_DC_UPPER, &phy_data);
+ e1e_rphy(hw, HV_DC_LOWER, &phy_data);
+ e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
+ e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
}
}
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 7e55170a601e..68aa1749bf66 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -533,7 +533,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
mac->autoneg_failed = 1;
return 0;
}
- e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
+ e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
/* Disable auto-negotiation in the TXCW register */
ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -556,7 +556,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
*/
- e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -598,7 +598,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
mac->autoneg_failed = 1;
return 0;
}
- e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
+ e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
/* Disable auto-negotiation in the TXCW register */
ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -621,7 +621,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
*/
- e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -800,9 +800,9 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause frames,
- * but not send pause frames).
+ * but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames but we
- * do not support receiving pause frames).
+ * do not support receiving pause frames).
* 3: Both Rx and Tx flow control (symmetric) are enabled.
*/
switch (hw->fc.current_mode) {
@@ -1031,9 +1031,9 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause
- * frames but not send pause frames).
+ * frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * frames but we do not receive pause frames).
* 3: Both Rx and Tx flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
@@ -1135,7 +1135,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
if (ret_val)
return ret_val;
- ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
+ ret_val =
+ e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
if (ret_val)
return ret_val;
@@ -1188,7 +1189,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
} else {
hw->fc.current_mode = e1000_fc_rx_pause;
e_dbg("Flow Control = "
- "RX PAUSE frames only.\r\n");
+ "Rx PAUSE frames only.\r\n");
}
}
/*
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index fe50242aa9e6..2e5022849f18 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -77,17 +77,17 @@ struct e1000_reg_info {
char *name;
};
-#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
-#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
-#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
-#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
-#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
-#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
-#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
-#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
-#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
-#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
static const struct e1000_reg_info e1000_reg_info_tbl[] = {
@@ -99,7 +99,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
/* Interrupt Registers */
{E1000_ICR, "ICR"},
- /* RX Registers */
+ /* Rx Registers */
{E1000_RCTL, "RCTL"},
{E1000_RDLEN, "RDLEN"},
{E1000_RDH, "RDH"},
@@ -115,7 +115,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
{E1000_RDFTS, "RDFTS"},
{E1000_RDFPC, "RDFPC"},
- /* TX Registers */
+ /* Tx Registers */
{E1000_TCTL, "TCTL"},
{E1000_TDBAL, "TDBAL"},
{E1000_TDBAH, "TDBAH"},
@@ -160,7 +160,7 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
break;
default:
printk(KERN_INFO "%-15s %08x\n",
- reginfo->name, __er32(hw, reginfo->ofs));
+ reginfo->name, __er32(hw, reginfo->ofs));
return;
}
@@ -171,9 +171,8 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
printk(KERN_CONT "\n");
}
-
/*
- * e1000e_dump - Print registers, tx-ring and rx-ring
+ * e1000e_dump - Print registers, Tx-ring and Rx-ring
*/
static void e1000e_dump(struct e1000_adapter *adapter)
{
@@ -182,12 +181,20 @@ static void e1000e_dump(struct e1000_adapter *adapter)
struct e1000_reg_info *reginfo;
struct e1000_ring *tx_ring = adapter->tx_ring;
struct e1000_tx_desc *tx_desc;
- struct my_u0 { u64 a; u64 b; } *u0;
+ struct my_u0 {
+ u64 a;
+ u64 b;
+ } *u0;
struct e1000_buffer *buffer_info;
struct e1000_ring *rx_ring = adapter->rx_ring;
union e1000_rx_desc_packet_split *rx_desc_ps;
struct e1000_rx_desc *rx_desc;
- struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
+ struct my_u1 {
+ u64 a;
+ u64 b;
+ u64 c;
+ u64 d;
+ } *u1;
u32 staterr;
int i = 0;
@@ -198,12 +205,10 @@ static void e1000e_dump(struct e1000_adapter *adapter)
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
printk(KERN_INFO "Device Name state "
- "trans_start last_rx\n");
+ "trans_start last_rx\n");
printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
- netdev->name,
- netdev->state,
- netdev->trans_start,
- netdev->last_rx);
+ netdev->name, netdev->state, netdev->trans_start,
+ netdev->last_rx);
}
/* Print Registers */
@@ -214,26 +219,26 @@ static void e1000e_dump(struct e1000_adapter *adapter)
e1000_regdump(hw, reginfo);
}
- /* Print TX Ring Summary */
+ /* Print Tx Ring Summary */
if (!netdev || !netif_running(netdev))
goto exit;
- dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
- " leng ntw timestamp\n");
+ " leng ntw timestamp\n");
buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
- 0, tx_ring->next_to_use, tx_ring->next_to_clean,
- (unsigned long long)buffer_info->dma,
- buffer_info->length,
- buffer_info->next_to_watch,
- (unsigned long long)buffer_info->time_stamp);
+ 0, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp);
- /* Print TX Rings */
+ /* Print Tx Ring */
if (!netif_msg_tx_done(adapter))
goto rx_ring_summary;
- dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+ dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
*
@@ -263,22 +268,22 @@ static void e1000e_dump(struct e1000_adapter *adapter)
* 63 48 47 40 39 36 35 32 31 24 23 20 19 0
*/
printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Legacy format\n");
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Legacy format\n");
printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Ext Context format\n");
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Context format\n");
printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Ext Data format\n");
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Data format\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
- "%04X %3X %016llX %p",
- (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
- ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
+ "%04X %3X %016llX %p",
+ (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
(unsigned long long)le64_to_cpu(u0->a),
(unsigned long long)le64_to_cpu(u0->b),
(unsigned long long)buffer_info->dma,
@@ -296,22 +301,22 @@ static void e1000e_dump(struct e1000_adapter *adapter)
if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
- 16, 1, phys_to_virt(buffer_info->dma),
- buffer_info->length, true);
+ 16, 1, phys_to_virt(buffer_info->dma),
+ buffer_info->length, true);
}
- /* Print RX Rings Summary */
+ /* Print Rx Ring Summary */
rx_ring_summary:
- dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
printk(KERN_INFO "Queue [NTU] [NTC]\n");
printk(KERN_INFO " %5d %5X %5X\n", 0,
- rx_ring->next_to_use, rx_ring->next_to_clean);
+ rx_ring->next_to_use, rx_ring->next_to_clean);
- /* Print RX Rings */
+ /* Print Rx Ring */
if (!netif_msg_rx_status(adapter))
goto exit;
- dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+ dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
switch (adapter->rx_ps_pages) {
case 1:
case 2:
@@ -329,7 +334,7 @@ rx_ring_summary:
* +-----------------------------------------------------+
*/
printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
- "[buffer 1 63:0 ] "
+ "[buffer 1 63:0 ] "
"[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
"[bi->skb] <-- Ext Pkt Split format\n");
/* [Extended] Receive Descriptor (Write-Back) Format
@@ -344,7 +349,7 @@ rx_ring_summary:
* 63 48 47 32 31 20 19 0
*/
printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
- "[vl l0 ee es] "
+ "[vl l0 ee es] "
"[ l3 l2 l1 hs] [reserved ] ---------------- "
"[bi->skb] <-- Ext Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
@@ -352,26 +357,26 @@ rx_ring_summary:
rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
u1 = (struct my_u1 *)rx_desc_ps;
staterr =
- le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+ le32_to_cpu(rx_desc_ps->wb.middle.status_error);
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
printk(KERN_INFO "RWB[0x%03X] %016llX "
- "%016llX %016llX %016llX "
- "---------------- %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)le64_to_cpu(u1->c),
- (unsigned long long)le64_to_cpu(u1->d),
- buffer_info->skb);
+ "%016llX %016llX %016llX "
+ "---------------- %p", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ buffer_info->skb);
} else {
printk(KERN_INFO "R [0x%03X] %016llX "
- "%016llX %016llX %016llX %016llX %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)le64_to_cpu(u1->c),
- (unsigned long long)le64_to_cpu(u1->d),
- (unsigned long long)buffer_info->dma,
- buffer_info->skb);
+ "%016llX %016llX %016llX %016llX %p", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb);
if (netif_msg_pktdata(adapter))
print_hex_dump(KERN_INFO, "",
@@ -400,18 +405,18 @@ rx_ring_summary:
* 63 48 47 40 39 32 31 16 15 0
*/
printk(KERN_INFO "Rl[desc] [address 63:0 ] "
- "[vl er S cks ln] [bi->dma ] [bi->skb] "
- "<-- Legacy format\n");
+ "[vl er S cks ln] [bi->dma ] [bi->skb] "
+ "<-- Legacy format\n");
for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
rx_desc = E1000_RX_DESC(*rx_ring, i);
buffer_info = &rx_ring->buffer_info[i];
u0 = (struct my_u0 *)rx_desc;
printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
- "%016llX %p", i,
- (unsigned long long)le64_to_cpu(u0->a),
- (unsigned long long)le64_to_cpu(u0->b),
- (unsigned long long)buffer_info->dma,
- buffer_info->skb);
+ "%016llX %p", i,
+ (unsigned long long)le64_to_cpu(u0->a),
+ (unsigned long long)le64_to_cpu(u0->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb);
if (i == rx_ring->next_to_use)
printk(KERN_CONT " NTU\n");
else if (i == rx_ring->next_to_clean)
@@ -421,9 +426,10 @@ rx_ring_summary:
if (netif_msg_pktdata(adapter))
print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS,
- 16, 1, phys_to_virt(buffer_info->dma),
- adapter->rx_buffer_len, true);
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ phys_to_virt(buffer_info->dma),
+ adapter->rx_buffer_len, true);
}
}
@@ -450,8 +456,7 @@ static int e1000_desc_unused(struct e1000_ring *ring)
* @skb: pointer to sk_buff to be indicated to stack
**/
static void e1000_receive_skb(struct e1000_adapter *adapter,
- struct net_device *netdev,
- struct sk_buff *skb,
+ struct net_device *netdev, struct sk_buff *skb,
u8 status, __le16 vlan)
{
skb->protocol = eth_type_trans(skb, netdev);
@@ -464,7 +469,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
}
/**
- * e1000_rx_checksum - Receive Checksum Offload for 82543
+ * e1000_rx_checksum - Receive Checksum Offload
* @adapter: board private structure
* @status_err: receive descriptor status and error fields
* @csum: receive descriptor csum field
@@ -548,7 +553,7 @@ map_skb:
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- dev_err(&pdev->dev, "RX DMA map failed\n");
+ dev_err(&pdev->dev, "Rx DMA map failed\n");
adapter->rx_dma_failed++;
break;
}
@@ -601,7 +606,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
ps_page = &buffer_info->ps_pages[j];
if (j >= adapter->rx_ps_pages) {
/* all unused desc entries get hw null ptr */
- rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
+ rx_desc->read.buffer_addr[j + 1] =
+ ~cpu_to_le64(0);
continue;
}
if (!ps_page->page) {
@@ -617,7 +623,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
if (dma_mapping_error(&pdev->dev,
ps_page->dma)) {
dev_err(&adapter->pdev->dev,
- "RX DMA page map failed\n");
+ "Rx DMA page map failed\n");
adapter->rx_dma_failed++;
goto no_buffers;
}
@@ -627,8 +633,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
* didn't change because each write-back
* erases this info.
*/
- rx_desc->read.buffer_addr[j+1] =
- cpu_to_le64(ps_page->dma);
+ rx_desc->read.buffer_addr[j + 1] =
+ cpu_to_le64(ps_page->dma);
}
skb = netdev_alloc_skb_ip_align(netdev,
@@ -644,7 +650,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
adapter->rx_ps_bsize0,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- dev_err(&pdev->dev, "RX DMA map failed\n");
+ dev_err(&pdev->dev, "Rx DMA map failed\n");
adapter->rx_dma_failed++;
/* cleanup skb */
dev_kfree_skb_any(skb);
@@ -662,7 +668,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
* such as IA-64).
*/
wmb();
- writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
+ writel(i << 1, adapter->hw.hw_addr + rx_ring->tail);
}
i++;
@@ -931,6 +937,9 @@ static void e1000_print_hw_hang(struct work_struct *work)
u16 phy_status, phy_1000t_status, phy_ext_status;
u16 pci_status;
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
e1e_rphy(hw, PHY_STATUS, &phy_status);
e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1106,11 +1115,10 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
cleaned = 1;
cleaned_count++;
dma_unmap_single(&pdev->dev, buffer_info->dma,
- adapter->rx_ps_bsize0,
- DMA_FROM_DEVICE);
+ adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
buffer_info->dma = 0;
- /* see !EOP comment in other rx routine */
+ /* see !EOP comment in other Rx routine */
if (!(staterr & E1000_RXD_STAT_EOP))
adapter->flags2 |= FLAG2_IS_DISCARDING;
@@ -1501,6 +1509,9 @@ static void e1000e_downshift_workaround(struct work_struct *work)
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter, downshift_task);
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
}
@@ -1980,15 +1991,15 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
}
/**
- * e1000_get_hw_control - get control of the h/w from f/w
+ * e1000e_get_hw_control - get control of the h/w from f/w
* @adapter: address of board private structure
*
- * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that
* the driver is loaded. For AMT version (only with 82573)
* of the f/w this means that the network i/f is open.
**/
-static void e1000_get_hw_control(struct e1000_adapter *adapter)
+void e1000e_get_hw_control(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_ext;
@@ -2005,16 +2016,16 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter)
}
/**
- * e1000_release_hw_control - release control of the h/w to f/w
+ * e1000e_release_hw_control - release control of the h/w to f/w
* @adapter: address of board private structure
*
- * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that the
* driver is no longer loaded. For AMT version (only with 82573) i
* of the f/w this means that the network i/f is closed.
*
**/
-static void e1000_release_hw_control(struct e1000_adapter *adapter)
+void e1000e_release_hw_control(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_ext;
@@ -2445,7 +2456,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
(vid == adapter->mng_vlan_id)) {
/* release control to f/w */
- e1000_release_hw_control(adapter);
+ e1000e_release_hw_control(adapter);
return;
}
@@ -2610,7 +2621,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
}
/**
- * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
+ * e1000_configure_tx - Configure Transmit Unit after Reset
* @adapter: board private structure
*
* Configure the Tx unit of the MAC after a reset.
@@ -2663,7 +2674,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
* hthresh = 1 ==> prefetch when one or more available
* pthresh = 0x1f ==> prefetch if internal cache 31 or less
* BEWARE: this seems to work but should be considered first if
- * there are tx hangs or other tx related bugs
+ * there are Tx hangs or other Tx related bugs
*/
txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
ew32(TXDCTL(0), txdctl);
@@ -2734,6 +2745,9 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
else
ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+
+ if (ret_val)
+ e_dbg("failed to enable jumbo frame workaround mode\n");
}
/* Program MC offset vector base */
@@ -2874,7 +2888,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
if (adapter->rx_ps_pages) {
/* this is a 32 byte descriptor */
rdlen = rx_ring->count *
- sizeof(union e1000_rx_desc_packet_split);
+ sizeof(union e1000_rx_desc_packet_split);
adapter->clean_rx = e1000_clean_rx_irq_ps;
adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
} else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
@@ -2897,7 +2911,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
/*
* set the writeback threshold (only takes effect if the RDTR
* is set). set GRAN=1 and write back up to 0x4 worth, and
- * enable prefetching of 0x20 rx descriptors
+ * enable prefetching of 0x20 Rx descriptors
* granularity = 01
* wthresh = 04,
* hthresh = 04,
@@ -2978,12 +2992,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
* excessive C-state transition latencies result in
* dropped transactions.
*/
- pm_qos_update_request(
- &adapter->netdev->pm_qos_req, 55);
+ pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
} else {
- pm_qos_update_request(
- &adapter->netdev->pm_qos_req,
- PM_QOS_DEFAULT_VALUE);
+ pm_qos_update_request(&adapter->netdev->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
}
}
@@ -3149,7 +3161,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
/* lower 16 bits has Rx packet buffer allocation size in KB */
pba &= 0xffff;
/*
- * the Tx fifo also stores 16 bytes of information about the tx
+ * the Tx fifo also stores 16 bytes of information about the Tx
* but don't include ethernet FCS because hardware appends it
*/
min_tx_space = (adapter->max_frame_size +
@@ -3172,7 +3184,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
pba -= min_tx_space - tx_space;
/*
- * if short on Rx space, Rx wins and must trump tx
+ * if short on Rx space, Rx wins and must trump Tx
* adjustment or use Early Receive if available
*/
if ((pba < min_rx_space) &&
@@ -3184,7 +3196,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
ew32(PBA, pba);
}
-
/*
* flow control settings
*
@@ -3272,7 +3283,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
* that the network interface is in control
*/
if (adapter->flags & FLAG_HAS_AMT)
- e1000_get_hw_control(adapter);
+ e1000e_get_hw_control(adapter);
ew32(WUC, 0);
@@ -3285,6 +3296,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
ew32(VET, ETH_P_8021Q);
e1000e_reset_adaptive(hw);
+
+ if (!netif_running(adapter->netdev) &&
+ !test_bit(__E1000_TESTING, &adapter->state)) {
+ e1000_power_down_phy(adapter);
+ return;
+ }
+
e1000_get_phy_info(hw);
if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
@@ -3326,6 +3344,21 @@ int e1000e_up(struct e1000_adapter *adapter)
return 0;
}
+static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (!(adapter->flags2 & FLAG2_DMA_BURST))
+ return;
+
+ /* flush pending descriptor writebacks to memory */
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+ ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+ /* execute the writes immediately */
+ e1e_flush();
+}
+
void e1000e_down(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3365,6 +3398,9 @@ void e1000e_down(struct e1000_adapter *adapter)
if (!pci_channel_offline(adapter->pdev))
e1000e_reset(adapter);
+
+ e1000e_flush_descriptors(adapter);
+
e1000_clean_tx_ring(adapter);
e1000_clean_rx_ring(adapter);
@@ -3570,7 +3606,7 @@ static int e1000_open(struct net_device *netdev)
* interface is now open and reset the part to a known state.
*/
if (adapter->flags & FLAG_HAS_AMT) {
- e1000_get_hw_control(adapter);
+ e1000e_get_hw_control(adapter);
e1000e_reset(adapter);
}
@@ -3634,7 +3670,7 @@ static int e1000_open(struct net_device *netdev)
return 0;
err_req_irq:
- e1000_release_hw_control(adapter);
+ e1000e_release_hw_control(adapter);
e1000_power_down_phy(adapter);
e1000e_free_rx_resources(adapter);
err_setup_rx:
@@ -3689,8 +3725,9 @@ static int e1000_close(struct net_device *netdev)
* If AMT is enabled, let the firmware know that the network
* interface is now closed
*/
- if (adapter->flags & FLAG_HAS_AMT)
- e1000_release_hw_control(adapter);
+ if ((adapter->flags & FLAG_HAS_AMT) &&
+ !test_bit(__E1000_TESTING, &adapter->state))
+ e1000e_release_hw_control(adapter);
if ((adapter->flags & FLAG_HAS_ERT) ||
(adapter->hw.mac.type == e1000_pch2lan))
@@ -3752,6 +3789,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter, update_phy_task);
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
e1000_get_phy_info(&adapter->hw);
}
@@ -3762,6 +3803,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
static void e1000_update_phy_info(unsigned long data)
{
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
schedule_work(&adapter->update_phy_task);
}
@@ -4029,11 +4074,11 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
adapter->netdev->name,
adapter->link_speed,
(adapter->link_duplex == FULL_DUPLEX) ?
- "Full Duplex" : "Half Duplex",
+ "Full Duplex" : "Half Duplex",
((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
- "RX/TX" :
- ((ctrl & E1000_CTRL_RFCE) ? "RX" :
- ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
+ "Rx/Tx" :
+ ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
+ ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
}
static bool e1000e_has_link(struct e1000_adapter *adapter)
@@ -4136,6 +4181,9 @@ static void e1000_watchdog_task(struct work_struct *work)
u32 link, tctl;
int tx_pending = 0;
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
link = e1000e_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link) {
/* Cancel scheduled suspend requests. */
@@ -4296,7 +4344,6 @@ link_up:
* to get done, so reset controller to flush Tx.
* (Do the reset outside of interrupt context).
*/
- adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
/* return immediately since reset is imminent */
return;
@@ -4325,19 +4372,12 @@ link_up:
else
ew32(ICS, E1000_ICS_RXDMT0);
+ /* flush pending descriptors to memory before detecting Tx hang */
+ e1000e_flush_descriptors(adapter);
+
/* Force detection of hung controller every watchdog period */
adapter->detect_tx_hung = 1;
- /* flush partial descriptors to memory before detecting tx hang */
- if (adapter->flags2 & FLAG2_DMA_BURST) {
- ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
- ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
- /*
- * no need to flush the writes because the timeout code does
- * an er32 first thing
- */
- }
-
/*
* With 82571 controllers, LAA may be overwritten due to controller
* reset from the other port. Set the appropriate LAA in RAR[0]
@@ -4519,7 +4559,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->next_to_watch = i;
buffer_info->dma = dma_map_single(&pdev->dev,
skb->data + offset,
- size, DMA_TO_DEVICE);
+ size, DMA_TO_DEVICE);
buffer_info->mapped_as_page = false;
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
@@ -4566,7 +4606,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
}
- segs = skb_shinfo(skb)->gso_segs ?: 1;
+ segs = skb_shinfo(skb)->gso_segs ? : 1;
/* multiply data chunks by size of headers */
bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
@@ -4578,13 +4618,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
return count;
dma_error:
- dev_err(&pdev->dev, "TX DMA map failed\n");
+ dev_err(&pdev->dev, "Tx DMA map failed\n");
buffer_info->dma = 0;
if (count)
count--;
while (count--) {
- if (i==0)
+ if (i == 0)
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
@@ -4875,6 +4915,10 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task);
+ /* don't run the task if already down */
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
(adapter->flags & FLAG_RX_RESTART_NOW))) {
e1000e_dump(adapter);
@@ -5209,7 +5253,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
- e1000_release_hw_control(adapter);
+ e1000e_release_hw_control(adapter);
pci_disable_device(pdev);
@@ -5366,7 +5410,7 @@ static int __e1000_resume(struct pci_dev *pdev)
* under the control of the driver.
*/
if (!(adapter->flags & FLAG_HAS_AMT))
- e1000_get_hw_control(adapter);
+ e1000e_get_hw_control(adapter);
return 0;
}
@@ -5613,7 +5657,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
* under the control of the driver.
*/
if (!(adapter->flags & FLAG_HAS_AMT))
- e1000_get_hw_control(adapter);
+ e1000e_get_hw_control(adapter);
}
@@ -5636,7 +5680,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
ret_val = e1000_read_pba_string_generic(hw, pba_str,
E1000_PBANUM_LENGTH);
if (ret_val)
- strcpy(pba_str, "Unknown");
+ strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
e_info("MAC: %d, PHY: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, pba_str);
}
@@ -5923,7 +5967,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* APME bit in EEPROM is mapped to WUC.APME */
eeprom_data = er32(WUC);
eeprom_apme_mask = E1000_WUC_APME;
- if (eeprom_data & E1000_WUC_PHY_WAKE)
+ if ((hw->mac.type > e1000_ich10lan) &&
+ (eeprom_data & E1000_WUC_PHY_WAKE))
adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
@@ -5963,9 +6008,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
* under the control of the driver.
*/
if (!(adapter->flags & FLAG_HAS_AMT))
- e1000_get_hw_control(adapter);
+ e1000e_get_hw_control(adapter);
- strcpy(netdev->name, "eth%d");
+ strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
err = register_netdev(netdev);
if (err)
goto err_register;
@@ -5982,12 +6027,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
err_register:
if (!(adapter->flags & FLAG_HAS_AMT))
- e1000_release_hw_control(adapter);
+ e1000e_release_hw_control(adapter);
err_eeprom:
if (!e1000_check_reset_block(&adapter->hw))
e1000_phy_hw_reset(&adapter->hw);
err_hw_init:
-
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
err_sw_init:
@@ -6053,7 +6097,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
- e1000_release_hw_control(adapter);
+ e1000e_release_hw_control(adapter);
e1000e_reset_interrupt_capability(adapter);
kfree(adapter->tx_ring);
@@ -6184,7 +6228,7 @@ static int __init e1000_init_module(void)
int ret;
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
- pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n");
+ pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index a9612b0e4bca..4dd9b63273f6 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -62,10 +62,9 @@ MODULE_PARM_DESC(copybreak,
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
-
/*
* Transmit Interrupt Delay in units of 1.024 microseconds
- * Tx interrupt delay needs to typically be set to something non zero
+ * Tx interrupt delay needs to typically be set to something non-zero
*
* Valid Range: 0-65535
*/
@@ -112,6 +111,7 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define DEFAULT_ITR 3
#define MAX_ITR 100000
#define MIN_ITR 100
+
/* IntMode (Interrupt Mode)
*
* Valid Range: 0 - 2
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 1781efeb55e3..6bea051b134b 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -637,12 +637,11 @@ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
**/
s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
{
- struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_data;
- /* Enable CRS on TX. This must be set for half-duplex operation. */
- ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data);
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
if (ret_val)
goto out;
@@ -651,7 +650,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
/* Enable downshift */
phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
- ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data);
+ ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
out:
return ret_val;
@@ -774,16 +773,14 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
}
if (phy->type == e1000_phy_82578) {
- ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
- &phy_data);
+ ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
return ret_val;
/* 82578 PHY - set the downshift count to 1x. */
phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
- ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
- phy_data);
+ ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
if (ret_val)
return ret_val;
}
@@ -1319,9 +1316,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
* We didn't get link.
* Reset the DSP and cross our fingers.
*/
- ret_val = e1e_wphy(hw,
- M88E1000_PHY_PAGE_SELECT,
- 0x001d);
+ ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
+ 0x001d);
if (ret_val)
return ret_val;
ret_val = e1000e_phy_reset_dsp(hw);
@@ -2990,7 +2986,7 @@ s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
}
/**
- * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
+ * e1000_get_phy_addr_for_hv_page - Get PHY address based on page
* @page: page to be accessed
**/
static u32 e1000_get_phy_addr_for_hv_page(u32 page)
@@ -3071,12 +3067,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
goto out;
/* Do not apply workaround if in PHY loopback bit 14 set */
- hw->phy.ops.read_reg(hw, PHY_CONTROL, &data);
+ e1e_rphy(hw, PHY_CONTROL, &data);
if (data & PHY_CONTROL_LB)
goto out;
/* check if link is up and at 1Gbps */
- ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data);
+ ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
if (ret_val)
goto out;
@@ -3092,14 +3088,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
mdelay(200);
/* flush the packets in the fifo buffer */
- ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
- HV_MUX_DATA_CTRL_GEN_TO_MAC |
- HV_MUX_DATA_CTRL_FORCE_SPEED);
+ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
+ HV_MUX_DATA_CTRL_FORCE_SPEED);
if (ret_val)
goto out;
- ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
- HV_MUX_DATA_CTRL_GEN_TO_MAC);
+ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
out:
return ret_val;
@@ -3119,7 +3113,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
s32 ret_val;
u16 data;
- ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+ ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
if (!ret_val)
phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
@@ -3142,13 +3136,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
u16 phy_data;
bool link;
- ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
if (ret_val)
goto out;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
- ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
if (ret_val)
goto out;
@@ -3212,7 +3206,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
if (ret_val)
goto out;
- ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+ ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
if (ret_val)
goto out;
@@ -3224,7 +3218,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
if (ret_val)
goto out;
- ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+ ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
if (ret_val)
goto out;
@@ -3258,7 +3252,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data, length;
- ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+ ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
if (ret_val)
goto out;
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 4fa8d2a4aef3..eb35951a2442 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1761,7 +1761,7 @@ module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(mem, int, NULL, 0);
module_param(autodetect, int, 0);
-MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)");
+MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base address(es)");
MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index a724a2d14506..6c7257bd73fc 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0106"
+#define DRV_VERSION "EHEA_0107"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 1032b5bbe238..f75d3144b8a5 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -437,7 +437,7 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
}
}
/* Ring doorbell */
- ehea_update_rq1a(pr->qp, i);
+ ehea_update_rq1a(pr->qp, i - 1);
}
static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -1329,9 +1329,7 @@ static int ehea_fill_port_res(struct ehea_port_res *pr)
int ret;
struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
- ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
- - init_attr->act_nr_rwqes_rq2
- - init_attr->act_nr_rwqes_rq3 - 1);
+ ehea_init_fill_rq1(pr, pr->rq1_skba.len);
ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 112c5aa9af7f..907b05a1c659 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -812,7 +812,7 @@ static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE])
if (netif_msg_hw(priv))
printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n",
endptr + 1);
- enc28j60_mem_read(priv, endptr + 1, sizeof(tsv), tsv);
+ enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
}
static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index cce32d43175f..cd0282d5d40f 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -17,6 +17,8 @@
*
* Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
* Copyright (c) 2004-2006 Macq Electronique SA.
+ *
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
*/
#include <linux/module.h>
@@ -45,29 +47,42 @@
#include <asm/cacheflush.h>
-#ifndef CONFIG_ARCH_MXC
+#ifndef CONFIG_ARM
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#endif
#include "fec.h"
-#ifdef CONFIG_ARCH_MXC
-#include <mach/hardware.h>
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
#define FEC_ALIGNMENT 0xf
#else
#define FEC_ALIGNMENT 0x3
#endif
-/*
- * Define the fixed address of the FEC hardware.
- */
-#if defined(CONFIG_M5272)
+#define DRIVER_NAME "fec"
+
+/* Controller is ENET-MAC */
+#define FEC_QUIRK_ENET_MAC (1 << 0)
+/* Controller needs driver to swap frame */
+#define FEC_QUIRK_SWAP_FRAME (1 << 1)
-static unsigned char fec_mac_default[] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+static struct platform_device_id fec_devtype[] = {
+ {
+ .name = DRIVER_NAME,
+ .driver_data = 0,
+ }, {
+ .name = "imx28-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+ },
+ { }
};
+static unsigned char macaddr[ETH_ALEN];
+module_param_array(macaddr, byte, NULL, 0);
+MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+
+#if defined(CONFIG_M5272)
/*
* Some hardware gets it MAC address out of local flash memory.
* if this is non-zero then assume it is the address to get MAC from.
@@ -133,7 +148,8 @@ static unsigned char fec_mac_default[] = {
* account when setting it.
*/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+ defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
#else
#define OPT_FRAME_SIZE 0
@@ -186,7 +202,6 @@ struct fec_enet_private {
int mii_timeout;
uint phy_speed;
phy_interface_t phy_interface;
- int index;
int link;
int full_duplex;
struct completion mdio_done;
@@ -213,10 +228,23 @@ static void fec_stop(struct net_device *dev);
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
+static void *swap_buffer(void *bufaddr, int len)
+{
+ int i;
+ unsigned int *buf = bufaddr;
+
+ for (i = 0; i < (len + 3) / 4; i++, buf++)
+ *buf = cpu_to_be32(*buf);
+
+ return bufaddr;
+}
+
static netdev_tx_t
fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
struct bufdesc *bdp;
void *bufaddr;
unsigned short status;
@@ -261,6 +289,14 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
bufaddr = fep->tx_bounce[index];
}
+ /*
+ * Some design made an incorrect assumption on endian mode of
+ * the system that it's running on. As the result, driver has to
+ * swap every frame going to and coming from the controller.
+ */
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, skb->len);
+
/* Save skb pointer */
fep->tx_skbuff[fep->skb_cur] = skb;
@@ -429,6 +465,8 @@ static void
fec_enet_rx(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
@@ -492,6 +530,9 @@ fec_enet_rx(struct net_device *dev)
dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
DMA_FROM_DEVICE);
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(data, pkt_len);
+
/* This does 16 byte alignment, exactly what we need.
* The packet length includes FCS, but we don't want to
* include that when passing upstream as it messes up
@@ -538,37 +579,50 @@ rx_processing_done:
}
/* ------------------------------------------------------------------------- */
-#ifdef CONFIG_M5272
static void __inline__ fec_get_mac(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
unsigned char *iap, tmpaddr[ETH_ALEN];
- if (FEC_FLASHMAC) {
- /*
- * Get MAC address from FLASH.
- * If it is all 1's or 0's, use the default.
- */
- iap = (unsigned char *)FEC_FLASHMAC;
- if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
- (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
- iap = fec_mac_default;
- if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
- (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
- iap = fec_mac_default;
- } else {
- *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
- *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+ /*
+ * try to get mac address in following order:
+ *
+ * 1) module parameter via kernel command line in form
+ * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
+ */
+ iap = macaddr;
+
+ /*
+ * 2) from flash or fuse (via platform data)
+ */
+ if (!is_valid_ether_addr(iap)) {
+#ifdef CONFIG_M5272
+ if (FEC_FLASHMAC)
+ iap = (unsigned char *)FEC_FLASHMAC;
+#else
+ if (pdata)
+ memcpy(iap, pdata->mac, ETH_ALEN);
+#endif
+ }
+
+ /*
+ * 3) FEC mac registers set by bootloader
+ */
+ if (!is_valid_ether_addr(iap)) {
+ *((unsigned long *) &tmpaddr[0]) =
+ be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
+ *((unsigned short *) &tmpaddr[4]) =
+ be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
iap = &tmpaddr[0];
}
memcpy(dev->dev_addr, iap, ETH_ALEN);
- /* Adjust MAC if using default MAC address */
- if (iap == fec_mac_default)
- dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
+ /* Adjust MAC if using macaddr */
+ if (iap == macaddr)
+ dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
}
-#endif
/* ------------------------------------------------------------------------- */
@@ -651,8 +705,8 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
fep->mii_timeout = 0;
init_completion(&fep->mdio_done);
- /* start a read op */
- writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+ /* start a write op */
+ writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
FEC_MMFR_TA | FEC_MMFR_DATA(value),
fep->hwp + FEC_MII_DATA);
@@ -681,6 +735,7 @@ static int fec_enet_mii_probe(struct net_device *dev)
char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3];
int phy_id;
+ int dev_id = fep->pdev->id;
fep->phy_dev = NULL;
@@ -692,6 +747,8 @@ static int fec_enet_mii_probe(struct net_device *dev)
continue;
if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
continue;
+ if (dev_id--)
+ continue;
strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
break;
}
@@ -729,10 +786,35 @@ static int fec_enet_mii_probe(struct net_device *dev)
static int fec_enet_mii_init(struct platform_device *pdev)
{
+ static struct mii_bus *fec0_mii_bus;
struct net_device *dev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(dev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
int err = -ENXIO, i;
+ /*
+ * The dual fec interfaces are not equivalent with enet-mac.
+ * Here are the differences:
+ *
+ * - fec0 supports MII & RMII modes while fec1 only supports RMII
+ * - fec0 acts as the 1588 time master while fec1 is slave
+ * - external phys can only be configured by fec0
+ *
+ * That is to say fec1 can not work independently. It only works
+ * when fec0 is working. The reason behind this design is that the
+ * second interface is added primarily for Switch mode.
+ *
+ * Because of the last point above, both phys are attached on fec0
+ * mdio interface in board design, and need to be configured by
+ * fec0 mii_bus.
+ */
+ if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) {
+ /* fec1 uses fec0 mii_bus */
+ fep->mii_bus = fec0_mii_bus;
+ return 0;
+ }
+
fep->mii_timeout = 0;
/*
@@ -769,6 +851,10 @@ static int fec_enet_mii_init(struct platform_device *pdev)
if (mdiobus_register(fep->mii_bus))
goto err_out_free_mdio_irq;
+ /* save fec0 mii_bus */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ fec0_mii_bus = fep->mii_bus;
+
return 0;
err_out_free_mdio_irq:
@@ -1067,9 +1153,8 @@ static const struct net_device_ops fec_netdev_ops = {
/*
* XXX: We need to clean up on failure exits here.
*
- * index is only used in legacy code
*/
-static int fec_enet_init(struct net_device *dev, int index)
+static int fec_enet_init(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
struct bufdesc *cbd_base;
@@ -1086,26 +1171,11 @@ static int fec_enet_init(struct net_device *dev, int index)
spin_lock_init(&fep->hw_lock);
- fep->index = index;
fep->hwp = (void __iomem *)dev->base_addr;
fep->netdev = dev;
- /* Set the Ethernet address */
-#ifdef CONFIG_M5272
+ /* Get the Ethernet address */
fec_get_mac(dev);
-#else
- {
- unsigned long l;
- l = readl(fep->hwp + FEC_ADDR_LOW);
- dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
- dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
- dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
- dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
- l = readl(fep->hwp + FEC_ADDR_HIGH);
- dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
- dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
- }
-#endif
/* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base;
@@ -1156,12 +1226,25 @@ static void
fec_restart(struct net_device *dev, int duplex)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
int i;
+ u32 val, temp_mac[2];
/* Whack a reset. We should wait for this. */
writel(1, fep->hwp + FEC_ECNTRL);
udelay(10);
+ /*
+ * enet-mac reset will reset mac address registers too,
+ * so need to reconfigure it.
+ */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
+ writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
+ writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+ }
+
/* Clear any outstanding interrupt. */
writel(0xffc00000, fep->hwp + FEC_IEVENT);
@@ -1208,20 +1291,45 @@ fec_restart(struct net_device *dev, int duplex)
/* Set MII speed */
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
-#ifdef FEC_MIIGSK_ENR
- if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
- /* disable the gasket and wait */
- writel(0, fep->hwp + FEC_MIIGSK_ENR);
- while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
- udelay(1);
+ /*
+ * The phy interface and speed need to get configured
+ * differently on enet-mac.
+ */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ val = readl(fep->hwp + FEC_R_CNTRL);
- /* configure the gasket: RMII, 50 MHz, no loopback, no echo */
- writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+ /* MII or RMII */
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+ val |= (1 << 8);
+ else
+ val &= ~(1 << 8);
- /* re-enable the gasket */
- writel(2, fep->hwp + FEC_MIIGSK_ENR);
- }
+ /* 10M or 100M */
+ if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
+ val &= ~(1 << 9);
+ else
+ val |= (1 << 9);
+
+ writel(val, fep->hwp + FEC_R_CNTRL);
+ } else {
+#ifdef FEC_MIIGSK_ENR
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
+ /* disable the gasket and wait */
+ writel(0, fep->hwp + FEC_MIIGSK_ENR);
+ while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+ udelay(1);
+
+ /*
+ * configure the gasket:
+ * RMII, 50 MHz, no loopback, no echo
+ */
+ writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+
+ /* re-enable the gasket */
+ writel(2, fep->hwp + FEC_MIIGSK_ENR);
+ }
#endif
+ }
/* And last, enable the transmit and receive processing */
writel(2, fep->hwp + FEC_ECNTRL);
@@ -1316,7 +1424,7 @@ fec_probe(struct platform_device *pdev)
}
clk_enable(fep->clk);
- ret = fec_enet_init(ndev, 0);
+ ret = fec_enet_init(ndev);
if (ret)
goto failed_init;
@@ -1380,8 +1488,10 @@ fec_suspend(struct device *dev)
if (ndev) {
fep = netdev_priv(ndev);
- if (netif_running(ndev))
- fec_enet_close(ndev);
+ if (netif_running(ndev)) {
+ fec_stop(ndev);
+ netif_device_detach(ndev);
+ }
clk_disable(fep->clk);
}
return 0;
@@ -1396,8 +1506,10 @@ fec_resume(struct device *dev)
if (ndev) {
fep = netdev_priv(ndev);
clk_enable(fep->clk);
- if (netif_running(ndev))
- fec_enet_open(ndev);
+ if (netif_running(ndev)) {
+ fec_restart(ndev, fep->full_duplex);
+ netif_device_attach(ndev);
+ }
}
return 0;
}
@@ -1414,12 +1526,13 @@ static const struct dev_pm_ops fec_pm_ops = {
static struct platform_driver fec_driver = {
.driver = {
- .name = "fec",
+ .name = DRIVER_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &fec_pm_ops,
#endif
},
+ .id_table = fec_devtype,
.probe = fec_probe,
.remove = __devexit_p(fec_drv_remove),
};
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 2c48b25668d5..ace318df4c8d 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -14,7 +14,8 @@
/****************************************************************************/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+ defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
@@ -78,7 +79,7 @@
/*
* Define the buffer descriptor structure.
*/
-#ifdef CONFIG_ARCH_MXC
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
struct bufdesc {
unsigned short cbd_datlen; /* Data length */
unsigned short cbd_sc; /* Control and status info */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index cd2d72d825df..9c0b1bac6af6 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3949,6 +3949,7 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
writel(flags, base + NvRegWakeUpFlags);
spin_unlock_irq(&np->lock);
}
+ device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
return 0;
}
@@ -5488,14 +5489,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
/* set mac address */
nv_copy_mac_to_hw(dev);
- /* Workaround current PCI init glitch: wakeup bits aren't
- * being set from PCI PM capability.
- */
- device_init_wakeup(&pci_dev->dev, 1);
-
/* disable WOL */
writel(0, base + NvRegWakeUpFlags);
np->wolenabled = 0;
+ device_set_wakeup_enable(&pci_dev->dev, false);
if (id->driver_data & DEV_HAS_POWER_CNTRL) {
@@ -5648,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
goto out_error;
}
+ netif_carrier_off(dev);
+
dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
@@ -5746,8 +5745,9 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
}
#ifdef CONFIG_PM
-static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
+static int nv_suspend(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
@@ -5763,25 +5763,17 @@ static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
for (i = 0; i <= np->register_size/sizeof(u32); i++)
np->saved_config_space[i] = readl(base + i*sizeof(u32));
- pci_save_state(pdev);
- pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
-static int nv_resume(struct pci_dev *pdev)
+static int nv_resume(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int i, rc = 0;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- /* ack any pending wake events, disable PME */
- pci_enable_wake(pdev, PCI_D0, 0);
-
/* restore non-pci configuration space */
for (i = 0; i <= np->register_size/sizeof(u32); i++)
writel(np->saved_config_space[i], base+i*sizeof(u32));
@@ -5800,6 +5792,9 @@ static int nv_resume(struct pci_dev *pdev)
return rc;
}
+static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
+#define NV_PM_OPS (&nv_pm_ops)
+
static void nv_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -5822,15 +5817,13 @@ static void nv_shutdown(struct pci_dev *pdev)
* only put the device into D3 if we really go for poweroff.
*/
if (system_state == SYSTEM_POWER_OFF) {
- if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
- pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
+ pci_wake_from_d3(pdev, np->wolenabled);
pci_set_power_state(pdev, PCI_D3hot);
}
}
#else
-#define nv_suspend NULL
+#define NV_PM_OPS NULL
#define nv_shutdown NULL
-#define nv_resume NULL
#endif /* CONFIG_PM */
static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
@@ -6002,9 +5995,8 @@ static struct pci_driver driver = {
.id_table = pci_tbl,
.probe = nv_probe,
.remove = __devexit_p(nv_remove),
- .suspend = nv_suspend,
- .resume = nv_resume,
.shutdown = nv_shutdown,
+ .driver.pm = NV_PM_OPS,
};
static int __init init_nic(void)
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index d684f187de57..7a1f3d0ffa78 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -40,6 +40,7 @@
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
+#include <linux/of_net.h>
#include <linux/vmalloc.h>
#include <asm/pgtable.h>
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 45c4b7bfcf39..5ed8f9f9419f 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -95,6 +95,7 @@
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/of.h>
+#include <linux/of_net.h>
#include "gianfar.h"
#include "fsl_pq_mdio.h"
@@ -433,7 +434,6 @@ static void gfar_init_mac(struct net_device *ndev)
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- struct netdev_queue *txq;
unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
unsigned long tx_packets = 0, tx_bytes = 0;
int i = 0;
@@ -449,9 +449,8 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
dev->stats.rx_dropped = rx_dropped;
for (i = 0; i < priv->num_tx_queues; i++) {
- txq = netdev_get_tx_queue(dev, i);
- tx_bytes += txq->tx_bytes;
- tx_packets += txq->tx_packets;
+ tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
+ tx_packets += priv->tx_queue[i]->stats.tx_packets;
}
dev->stats.tx_bytes = tx_bytes;
@@ -1921,7 +1920,7 @@ int startup_gfar(struct net_device *ndev)
if (err) {
for (j = 0; j < i; j++)
free_grp_irqs(&priv->gfargrp[j]);
- goto irq_fail;
+ goto irq_fail;
}
}
@@ -2108,8 +2107,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Update transmit stats */
- txq->tx_bytes += skb->len;
- txq->tx_packets ++;
+ tx_queue->stats.tx_bytes += skb->len;
+ tx_queue->stats.tx_packets++;
txbdp = txbdp_start = tx_queue->cur_tx;
lstatus = txbdp->lstatus;
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 68984eb88ae0..54de4135e932 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -907,12 +907,21 @@ enum {
MQ_MG_MODE
};
+/*
+ * Per TX queue stats
+ */
+struct tx_q_stats {
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+};
+
/**
* struct gfar_priv_tx_q - per tx queue structure
* @txlock: per queue tx spin lock
* @tx_skbuff:skb pointers
* @skb_curtx: to be used skb pointer
* @skb_dirtytx:the last used skb pointer
+ * @stats: bytes/packets stats
* @qindex: index of this queue
* @dev: back pointer to the dev structure
* @grp: back pointer to the group to which this queue belongs
@@ -934,6 +943,7 @@ struct gfar_priv_tx_q {
struct txbd8 *tx_bd_base;
struct txbd8 *cur_tx;
struct txbd8 *dirty_tx;
+ struct tx_q_stats stats;
struct net_device *dev;
struct gfar_priv_grp *grp;
u16 skb_curtx;
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 27d6960ce09e..fdb0333f5cb6 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1,7 +1,7 @@
/*
* Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
*
- * 2005-2009 (c) Aeroflex Gaisler AB
+ * 2005-2010 (c) Aeroflex Gaisler AB
*
* This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
* available in the GRLIB VHDL IP core library.
@@ -356,6 +356,8 @@ static int greth_open(struct net_device *dev)
dev_dbg(&dev->dev, " starting queue\n");
netif_start_queue(dev);
+ GRETH_REGSAVE(greth->regs->status, 0xFF);
+
napi_enable(&greth->napi);
greth_enable_irqs(greth);
@@ -371,7 +373,9 @@ static int greth_close(struct net_device *dev)
napi_disable(&greth->napi);
+ greth_disable_irqs(greth);
greth_disable_tx(greth);
+ greth_disable_rx(greth);
netif_stop_queue(dev);
@@ -388,12 +392,20 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct greth_private *greth = netdev_priv(dev);
struct greth_bd *bdp;
int err = NETDEV_TX_OK;
- u32 status, dma_addr;
+ u32 status, dma_addr, ctrl;
+ unsigned long flags;
- bdp = greth->tx_bd_base + greth->tx_next;
+ /* Clean TX Ring */
+ greth_clean_tx(greth->netdev);
if (unlikely(greth->tx_free <= 0)) {
+ spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ /* Enable TX IRQ only if not already in poll() routine */
+ if (ctrl & GRETH_RXI)
+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
netif_stop_queue(dev);
+ spin_unlock_irqrestore(&greth->devlock, flags);
return NETDEV_TX_BUSY;
}
@@ -406,13 +418,14 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ bdp = greth->tx_bd_base + greth->tx_next;
dma_addr = greth_read_bd(&bdp->addr);
memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
- status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN);
+ status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
/* Wrap around descriptor ring */
if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -422,22 +435,11 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
greth->tx_next = NEXT_TX(greth->tx_next);
greth->tx_free--;
- /* No more descriptors */
- if (unlikely(greth->tx_free == 0)) {
-
- /* Free transmitted descriptors */
- greth_clean_tx(dev);
-
- /* If nothing was cleaned, stop queue & wait for irq */
- if (unlikely(greth->tx_free == 0)) {
- status |= GRETH_BD_IE;
- netif_stop_queue(dev);
- }
- }
-
/* Write descriptor control word and enable transmission */
greth_write_bd(&bdp->stat, status);
+ spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
greth_enable_tx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
out:
dev_kfree_skb(skb);
@@ -450,13 +452,23 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
struct greth_bd *bdp;
- u32 status = 0, dma_addr;
+ u32 status = 0, dma_addr, ctrl;
int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
+ unsigned long flags;
nr_frags = skb_shinfo(skb)->nr_frags;
+ /* Clean TX Ring */
+ greth_clean_tx_gbit(dev);
+
if (greth->tx_free < nr_frags + 1) {
+ spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ /* Enable TX IRQ only if not already in poll() routine */
+ if (ctrl & GRETH_RXI)
+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
netif_stop_queue(dev);
+ spin_unlock_irqrestore(&greth->devlock, flags);
err = NETDEV_TX_BUSY;
goto out;
}
@@ -499,7 +511,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
greth->tx_skbuff[curr_tx] = NULL;
bdp = greth->tx_bd_base + curr_tx;
- status = GRETH_TXBD_CSALL;
+ status = GRETH_TXBD_CSALL | GRETH_BD_EN;
status |= frag->size & GRETH_BD_LEN;
/* Wrap around descriptor ring */
@@ -509,14 +521,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
/* More fragments left */
if (i < nr_frags - 1)
status |= GRETH_TXBD_MORE;
-
- /* ... last fragment, check if out of descriptors */
- else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
-
- /* Enable interrupts and stop queue */
- status |= GRETH_BD_IE;
- netif_stop_queue(dev);
- }
+ else
+ status |= GRETH_BD_IE; /* enable IRQ on last fragment */
greth_write_bd(&bdp->stat, status);
@@ -536,26 +542,29 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
wmb();
- /* Enable the descriptors that we configured ... */
- for (i = 0; i < nr_frags + 1; i++) {
- bdp = greth->tx_bd_base + greth->tx_next;
- greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
- greth->tx_next = NEXT_TX(greth->tx_next);
- greth->tx_free--;
- }
+ /* Enable the descriptor chain by enabling the first descriptor */
+ bdp = greth->tx_bd_base + greth->tx_next;
+ greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
+ greth->tx_next = curr_tx;
+ greth->tx_free -= nr_frags + 1;
+ wmb();
+
+ spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
greth_enable_tx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
return NETDEV_TX_OK;
frag_map_error:
- /* Unmap SKB mappings that succeeded */
+ /* Unmap SKB mappings that succeeded and disable descriptor */
for (i = 0; greth->tx_next + i != curr_tx; i++) {
bdp = greth->tx_bd_base + greth->tx_next + i;
dma_unmap_single(greth->dev,
greth_read_bd(&bdp->addr),
greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
DMA_TO_DEVICE);
+ greth_write_bd(&bdp->stat, 0);
}
map_error:
if (net_ratelimit())
@@ -565,12 +574,11 @@ out:
return err;
}
-
static irqreturn_t greth_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct greth_private *greth;
- u32 status;
+ u32 status, ctrl;
irqreturn_t retval = IRQ_NONE;
greth = netdev_priv(dev);
@@ -580,13 +588,15 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id)
/* Get the interrupt events that caused us to be here. */
status = GRETH_REGLOAD(greth->regs->status);
- /* Handle rx and tx interrupts through poll */
- if (status & (GRETH_INT_RX | GRETH_INT_TX)) {
-
- /* Clear interrupt status */
- GRETH_REGORIN(greth->regs->status,
- status & (GRETH_INT_RX | GRETH_INT_TX));
+ /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
+ * set regardless of whether IRQ is enabled or not. Especially
+ * important when shared IRQ.
+ */
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ /* Handle rx and tx interrupts through poll */
+ if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
+ ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
retval = IRQ_HANDLED;
/* Disable interrupts and schedule poll() */
@@ -610,6 +620,8 @@ static void greth_clean_tx(struct net_device *dev)
while (1) {
bdp = greth->tx_bd_base + greth->tx_last;
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
+ mb();
stat = greth_read_bd(&bdp->stat);
if (unlikely(stat & GRETH_BD_EN))
@@ -670,7 +682,10 @@ static void greth_clean_tx_gbit(struct net_device *dev)
/* We only clean fully completed SKBs */
bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
- stat = bdp_last_frag->stat;
+
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
+ mb();
+ stat = greth_read_bd(&bdp_last_frag->stat);
if (stat & GRETH_BD_EN)
break;
@@ -702,21 +717,9 @@ static void greth_clean_tx_gbit(struct net_device *dev)
greth->tx_free += nr_frags+1;
dev_kfree_skb(skb);
}
- if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
- netif_wake_queue(dev);
- }
-}
-static int greth_pending_packets(struct greth_private *greth)
-{
- struct greth_bd *bdp;
- u32 status;
- bdp = greth->rx_bd_base + greth->rx_cur;
- status = greth_read_bd(&bdp->stat);
- if (status & GRETH_BD_EN)
- return 0;
- else
- return 1;
+ if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
+ netif_wake_queue(dev);
}
static int greth_rx(struct net_device *dev, int limit)
@@ -727,20 +730,24 @@ static int greth_rx(struct net_device *dev, int limit)
int pkt_len;
int bad, count;
u32 status, dma_addr;
+ unsigned long flags;
greth = netdev_priv(dev);
for (count = 0; count < limit; ++count) {
bdp = greth->rx_bd_base + greth->rx_cur;
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
+ mb();
status = greth_read_bd(&bdp->stat);
- dma_addr = greth_read_bd(&bdp->addr);
- bad = 0;
if (unlikely(status & GRETH_BD_EN)) {
break;
}
+ dma_addr = greth_read_bd(&bdp->addr);
+ bad = 0;
+
/* Check status for errors. */
if (unlikely(status & GRETH_RXBD_STATUS)) {
if (status & GRETH_RXBD_ERR_FT) {
@@ -802,7 +809,9 @@ static int greth_rx(struct net_device *dev, int limit)
dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
+ spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
greth_enable_rx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
greth->rx_cur = NEXT_RX(greth->rx_cur);
}
@@ -836,6 +845,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
int pkt_len;
int bad, count = 0;
u32 status, dma_addr;
+ unsigned long flags;
greth = netdev_priv(dev);
@@ -843,6 +853,8 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
bdp = greth->rx_bd_base + greth->rx_cur;
skb = greth->rx_skbuff[greth->rx_cur];
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
+ mb();
status = greth_read_bd(&bdp->stat);
bad = 0;
@@ -865,10 +877,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
}
}
- /* Allocate new skb to replace current */
- newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN);
-
- if (!bad && newskb) {
+ /* Allocate new skb to replace current, not needed if the
+ * current skb can be reused */
+ if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
skb_reserve(newskb, NET_IP_ALIGN);
dma_addr = dma_map_single(greth->dev,
@@ -905,11 +916,22 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
if (net_ratelimit())
dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
dev_kfree_skb(newskb);
+ /* reusing current skb, so it is a drop */
dev->stats.rx_dropped++;
}
+ } else if (bad) {
+ /* Bad Frame transfer, the skb is reused */
+ dev->stats.rx_dropped++;
} else {
+ /* Failed Allocating a new skb. This is rather stupid
+ * but the current "filled" skb is reused, as if
+ * transfer failure. One could argue that RX descriptor
+ * table handling should be divided into cleaning and
+ * filling as the TX part of the driver
+ */
if (net_ratelimit())
dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
+ /* reusing current skb, so it is a drop */
dev->stats.rx_dropped++;
}
@@ -920,7 +942,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
wmb();
greth_write_bd(&bdp->stat, status);
+ spin_lock_irqsave(&greth->devlock, flags);
greth_enable_rx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
greth->rx_cur = NEXT_RX(greth->rx_cur);
}
@@ -932,15 +956,18 @@ static int greth_poll(struct napi_struct *napi, int budget)
{
struct greth_private *greth;
int work_done = 0;
+ unsigned long flags;
+ u32 mask, ctrl;
greth = container_of(napi, struct greth_private, napi);
- if (greth->gbit_mac) {
- greth_clean_tx_gbit(greth->netdev);
- } else {
- greth_clean_tx(greth->netdev);
+restart_txrx_poll:
+ if (netif_queue_stopped(greth->netdev)) {
+ if (greth->gbit_mac)
+ greth_clean_tx_gbit(greth->netdev);
+ else
+ greth_clean_tx(greth->netdev);
}
-restart_poll:
if (greth->gbit_mac) {
work_done += greth_rx_gbit(greth->netdev, budget - work_done);
} else {
@@ -949,15 +976,29 @@ restart_poll:
if (work_done < budget) {
- napi_complete(napi);
+ spin_lock_irqsave(&greth->devlock, flags);
+
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ if (netif_queue_stopped(greth->netdev)) {
+ GRETH_REGSAVE(greth->regs->control,
+ ctrl | GRETH_TXI | GRETH_RXI);
+ mask = GRETH_INT_RX | GRETH_INT_RE |
+ GRETH_INT_TX | GRETH_INT_TE;
+ } else {
+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
+ mask = GRETH_INT_RX | GRETH_INT_RE;
+ }
- if (greth_pending_packets(greth)) {
- napi_reschedule(napi);
- goto restart_poll;
+ if (GRETH_REGLOAD(greth->regs->status) & mask) {
+ GRETH_REGSAVE(greth->regs->control, ctrl);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+ goto restart_txrx_poll;
+ } else {
+ __napi_complete(napi);
+ spin_unlock_irqrestore(&greth->devlock, flags);
}
}
- greth_enable_irqs(greth);
return work_done;
}
@@ -1152,11 +1193,11 @@ static const struct ethtool_ops greth_ethtool_ops = {
};
static struct net_device_ops greth_netdev_ops = {
- .ndo_open = greth_open,
- .ndo_stop = greth_close,
- .ndo_start_xmit = greth_start_xmit,
- .ndo_set_mac_address = greth_set_mac_add,
- .ndo_validate_addr = eth_validate_addr,
+ .ndo_open = greth_open,
+ .ndo_stop = greth_close,
+ .ndo_start_xmit = greth_start_xmit,
+ .ndo_set_mac_address = greth_set_mac_add,
+ .ndo_validate_addr = eth_validate_addr,
};
static inline int wait_for_mdio(struct greth_private *greth)
@@ -1217,29 +1258,26 @@ static void greth_link_change(struct net_device *dev)
struct greth_private *greth = netdev_priv(dev);
struct phy_device *phydev = greth->phy;
unsigned long flags;
-
int status_change = 0;
+ u32 ctrl;
spin_lock_irqsave(&greth->devlock, flags);
if (phydev->link) {
if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
-
- GRETH_REGANDIN(greth->regs->control,
- ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB));
+ ctrl = GRETH_REGLOAD(greth->regs->control) &
+ ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
if (phydev->duplex)
- GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD);
-
- if (phydev->speed == SPEED_100) {
-
- GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP);
- }
+ ctrl |= GRETH_CTRL_FD;
+ if (phydev->speed == SPEED_100)
+ ctrl |= GRETH_CTRL_SP;
else if (phydev->speed == SPEED_1000)
- GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB);
+ ctrl |= GRETH_CTRL_GB;
+ GRETH_REGSAVE(greth->regs->control, ctrl);
greth->speed = phydev->speed;
greth->duplex = phydev->duplex;
status_change = 1;
@@ -1600,6 +1638,9 @@ static struct of_device_id greth_of_match[] = {
{
.name = "GAISLER_ETHMAC",
},
+ {
+ .name = "01_01d",
+ },
{},
};
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
index 03ad903cd676..be0f2062bd14 100644
--- a/drivers/net/greth.h
+++ b/drivers/net/greth.h
@@ -23,6 +23,7 @@
#define GRETH_BD_LEN 0x7FF
#define GRETH_TXEN 0x1
+#define GRETH_INT_TE 0x2
#define GRETH_INT_TX 0x8
#define GRETH_TXI 0x4
#define GRETH_TXBD_STATUS 0x0001C000
@@ -35,6 +36,7 @@
#define GRETH_TXBD_ERR_UE 0x4000
#define GRETH_TXBD_ERR_AL 0x8000
+#define GRETH_INT_RE 0x1
#define GRETH_INT_RX 0x4
#define GRETH_RXEN 0x2
#define GRETH_RXI 0x8
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 4e7d1d0a2340..7d9ced0738c5 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -396,7 +396,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate,
while (p) {
if (p->bitrate == bitrate) {
memcpy(p->bits, bits, YAM_FPGA_SIZE);
- return p->bits;
+ goto out;
}
p = p->next;
}
@@ -411,7 +411,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate,
p->bitrate = bitrate;
p->next = yam_data;
yam_data = p;
-
+ out:
release_firmware(fw);
return p->bits;
}
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index 74486a8b009a..af3822f9ea9a 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
* The parameter rar_count will usually be hw->mac.rar_entry_count
* unless there are workarounds that change this.
**/
-void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
+static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count)
{
diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h
index b54a6f08db45..e3b285a67734 100644
--- a/drivers/net/irda/bfin_sir.h
+++ b/drivers/net/irda/bfin_sir.h
@@ -26,6 +26,8 @@
#include <asm/cacheflush.h>
#include <asm/dma.h>
#include <asm/portmux.h>
+#include <mach/bfin_serial_5xx.h>
+#undef DRIVER_NAME
#ifdef CONFIG_SIR_BFIN_DMA
struct dma_rx_buf {
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 4dc39e5f0156..77fcf4459161 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -30,7 +30,7 @@
* or the type-DO IR port.
*
* IrDA chip set list from Toshiba Computer Engineering Corp.
- * model method maker controler Version
+ * model method maker controller Version
* Portege 320CT FIR,SIR Toshiba Oboe(Triangle)
* Portege 3010CT FIR,SIR Toshiba Oboe(Sydney)
* Portege 3015CT FIR,SIR Toshiba Oboe(Sydney)
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 9e3f4f54281d..4488bd581eca 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -635,7 +635,7 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
ret = sh_irda_set_baudrate(self, speed);
if (ret < 0)
- return ret;
+ goto sh_irda_hard_xmit_end;
self->tx_buff.len = 0;
if (skb->len) {
@@ -652,11 +652,21 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
sh_irda_write(self, IRTFLR, self->tx_buff.len);
sh_irda_write(self, IRTCTR, ARMOD | TE);
- }
+ } else
+ goto sh_irda_hard_xmit_end;
dev_kfree_skb(skb);
return 0;
+
+sh_irda_hard_xmit_end:
+ sh_irda_set_baudrate(self, 9600);
+ netif_wake_queue(self->ndev);
+ sh_irda_rcv_ctrl(self, 1);
+ dev_kfree_skb(skb);
+
+ return ret;
+
}
static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 3ae30b8cb7d6..3b8c92463617 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -508,6 +508,8 @@ extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
+extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *);
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
@@ -524,26 +526,13 @@ extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
u8 queue);
extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_input *input,
struct ixgbe_atr_input_masks *input_masks,
u16 soft_id, u8 queue);
-extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
- u16 vlan_id);
-extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
- u32 src_addr);
-extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input,
- u32 dst_addr);
-extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input,
- u16 src_port);
-extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input,
- u16 dst_port);
-extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
- u16 flex_byte);
-extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
- u8 l4type);
extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring);
extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index bfd3c227cd4a..a21f5817685b 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1003,7 +1003,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
udelay(10);
}
if (i >= IXGBE_FDIRCMD_CMD_POLL) {
- hw_dbg(hw ,"Flow Director previous command isn't complete, "
+ hw_dbg(hw, "Flow Director previous command isn't complete, "
"aborting table re-initialization.\n");
return IXGBE_ERR_FDIR_REINIT_FAILED;
}
@@ -1079,7 +1079,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
/*
* The defaults in the HW for RX PB 1-7 are not zero and so should be
- * intialized to zero for non DCB mode otherwise actual total RX PB
+ * initialized to zero for non DCB mode otherwise actual total RX PB
* would be bigger than programmed and filter space would run into
* the PB 0 region.
*/
@@ -1113,13 +1113,10 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
/* Move the flexible bytes to use the ethertype - shift 6 words */
fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
- fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
/* Prime the keys for hashing */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
- htonl(IXGBE_ATR_BUCKET_HASH_KEY));
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
- htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
/*
* Poll init-done after we write the register. Estimated times:
@@ -1170,7 +1167,7 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
/*
* The defaults in the HW for RX PB 1-7 are not zero and so should be
- * intialized to zero for non DCB mode otherwise actual total RX PB
+ * initialized to zero for non DCB mode otherwise actual total RX PB
* would be bigger than programmed and filter space would run into
* the PB 0 region.
*/
@@ -1209,10 +1206,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
/* Prime the keys for hashing */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
- htonl(IXGBE_ATR_BUCKET_HASH_KEY));
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
- htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
/*
* Poll init-done after we write the register. Estimated times:
@@ -1251,8 +1246,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
* @stream: input bitstream to compute the hash on
* @key: 32-bit hash key
**/
-static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
- u32 key)
+static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
+ u32 key)
{
/*
* The algorithm is as follows:
@@ -1272,410 +1267,250 @@ static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
* To simplify for programming, the algorithm is implemented
* in software this way:
*
- * Key[31:0], Stream[335:0]
+ * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
+ *
+ * for (i = 0; i < 352; i+=32)
+ * hi_hash_dword[31:0] ^= Stream[(i+31):i];
+ *
+ * lo_hash_dword[15:0] ^= Stream[15:0];
+ * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
+ * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
*
- * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
- * int_key[350:0] = tmp_key[351:1]
- * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
+ * hi_hash_dword[31:0] ^= Stream[351:320];
*
- * hash[15:0] = 0;
- * for (i = 0; i < 351; i++) {
- * if (int_key[i])
- * hash ^= int_stream[(i + 15):i];
+ * if(key[0])
+ * hash[15:0] ^= Stream[15:0];
+ *
+ * for (i = 0; i < 16; i++) {
+ * if (key[i])
+ * hash[15:0] ^= lo_hash_dword[(i+15):i];
+ * if (key[i + 16])
+ * hash[15:0] ^= hi_hash_dword[(i+15):i];
* }
+ *
*/
+ __be32 common_hash_dword = 0;
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 hash_result = 0;
+ u8 i;
- union {
- u64 fill[6];
- u32 key[11];
- u8 key_stream[44];
- } tmp_key;
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = ntohl(atr_input->dword_stream[0]);
- u8 *stream = (u8 *)atr_input;
- u8 int_key[44]; /* upper-most bit unused */
- u8 hash_str[46]; /* upper-most 2 bits unused */
- u16 hash_result = 0;
- int i, j, k, h;
+ /* generate common hash dword */
+ for (i = 10; i; i -= 2)
+ common_hash_dword ^= atr_input->dword_stream[i] ^
+ atr_input->dword_stream[i - 1];
- /*
- * Initialize the fill member to prevent warnings
- * on some compilers
- */
- tmp_key.fill[0] = 0;
+ hi_hash_dword = ntohl(common_hash_dword);
- /* First load the temporary key stream */
- for (i = 0; i < 6; i++) {
- u64 fillkey = ((u64)key << 32) | key;
- tmp_key.fill[i] = fillkey;
- }
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
- /*
- * Set the interim key for the hashing. Bit 352 is unused, so we must
- * shift and compensate when building the key.
- */
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
- int_key[0] = tmp_key.key_stream[0] >> 1;
- for (i = 1, j = 0; i < 44; i++) {
- unsigned int this_key = tmp_key.key_stream[j] << 7;
- j++;
- int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
- }
-
- /*
- * Set the interim bit string for the hashing. Bits 368 and 367 are
- * unused, so shift and compensate when building the string.
- */
- hash_str[0] = (stream[40] & 0x7f) >> 1;
- for (i = 1, j = 40; i < 46; i++) {
- unsigned int this_str = stream[j] << 7;
- j++;
- if (j > 41)
- j = 0;
- hash_str[i] = (u8)(this_str | (stream[j] >> 1));
- }
+ /* Process bits 0 and 16 */
+ if (key & 0x0001) hash_result ^= lo_hash_dword;
+ if (key & 0x00010000) hash_result ^= hi_hash_dword;
/*
- * Now compute the hash. i is the index into hash_str, j is into our
- * key stream, k is counting the number of bits, and h interates within
- * each byte.
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
*/
- for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
- for (h = 0; h < 8 && k < 351; h++, k++) {
- if (int_key[j] & (1 << h)) {
- /*
- * Key bit is set, XOR in the current 16-bit
- * string. Example of processing:
- * h = 0,
- * tmp = (hash_str[i - 2] & 0 << 16) |
- * (hash_str[i - 1] & 0xff << 8) |
- * (hash_str[i] & 0xff >> 0)
- * So tmp = hash_str[15 + k:k], since the
- * i + 2 clause rolls off the 16-bit value
- * h = 7,
- * tmp = (hash_str[i - 2] & 0x7f << 9) |
- * (hash_str[i - 1] & 0xff << 1) |
- * (hash_str[i] & 0x80 >> 7)
- */
- int tmp = (hash_str[i] >> h);
- tmp |= (hash_str[i - 1] << (8 - h));
- tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
- << (16 - h);
- hash_result ^= (u16)tmp;
- }
- }
- }
-
- return hash_result;
-}
-
-/**
- * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
- * @input: input stream to modify
- * @vlan: the VLAN id to load
- **/
-s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
-{
- input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
- input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
- * @input: input stream to modify
- * @src_addr: the IP address to load
- **/
-s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
-{
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
- (src_addr >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
- (src_addr >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
- * @input: input stream to modify
- * @dst_addr: the IP address to load
- **/
-s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
-{
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
- (dst_addr >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
- (dst_addr >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
-
- return 0;
-}
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
-/**
- * ixgbe_atr_set_src_port_82599 - Sets the source port
- * @input: input stream to modify
- * @src_port: the source port to load
- **/
-s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
-{
- input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
- input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_dst_port_82599 - Sets the destination port
- * @input: input stream to modify
- * @dst_port: the destination port to load
- **/
-s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
-{
- input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
- input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
- * @input: input stream to modify
- * @flex_bytes: the flexible bytes to load
- **/
-s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
-{
- input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
- input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
- * @input: input stream to modify
- * @l4type: the layer 4 type value to load
- **/
-s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
-{
- input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
- * @input: input stream to search
- * @vlan: the VLAN id to load
- **/
-static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
-{
- *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
- *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
- * @input: input stream to search
- * @src_addr: the IP address to load
- **/
-static s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input,
- u32 *src_addr)
-{
- *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
- *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
- *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
- *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
-
- return 0;
-}
-/**
- * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
- * @input: input stream to search
- * @dst_addr: the IP address to load
- **/
-static s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input,
- u32 *dst_addr)
-{
- *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
- *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
- *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
- *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
+ /* process the remaining 30 bits in the key 2 bits at a time */
+ for (i = 15; i; i-- ) {
+ if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
+ if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
+ }
- return 0;
+ return hash_result & IXGBE_ATR_HASH_MASK;
}
-/**
- * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
- * @input: input stream to search
- * @src_addr_1: the first 4 bytes of the IP address to load
- * @src_addr_2: the second 4 bytes of the IP address to load
- * @src_addr_3: the third 4 bytes of the IP address to load
- * @src_addr_4: the fourth 4 bytes of the IP address to load
- **/
-static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
- u32 *src_addr_1, u32 *src_addr_2,
- u32 *src_addr_3, u32 *src_addr_4)
-{
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
-
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
-
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
-
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
-
- return 0;
-}
+/*
+ * These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define IXGBE_ATR_COMMON_HASH_KEY \
+ (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
+#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+ common_hash ^= lo_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+ sig_hash ^= lo_hash_dword << (16 - n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+ common_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+ sig_hash ^= hi_hash_dword << (16 - n); \
+} while (0);
/**
- * ixgbe_atr_get_src_port_82599 - Gets the source port
- * @input: input stream to modify
- * @src_port: the source port to load
+ * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
+ * @stream: input bitstream to compute the hash on
*
- * Even though the input is given in big-endian, the FDIRPORT registers
- * expect the ports to be programmed in little-endian. Hence the need to swap
- * endianness when retrieving the data. This can be confusing since the
- * internal hash engine expects it to be big-endian.
+ * This function is almost identical to the function above but contains
+ * several optomizations such as unwinding all of the loops, letting the
+ * compiler work out all of the conditional ifs since the keys are static
+ * defines, and computing two keys at once since the hashed dword stream
+ * will be the same for both keys.
**/
-static s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input,
- u16 *src_port)
+static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common)
{
- *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
- *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
- return 0;
-}
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = ntohl(input.dword);
-/**
- * ixgbe_atr_get_dst_port_82599 - Gets the destination port
- * @input: input stream to modify
- * @dst_port: the destination port to load
- *
- * Even though the input is given in big-endian, the FDIRPORT registers
- * expect the ports to be programmed in little-endian. Hence the need to swap
- * endianness when retrieving the data. This can be confusing since the
- * internal hash engine expects it to be big-endian.
- **/
-static s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input,
- u16 *dst_port)
-{
- *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
- *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
+ /* generate common hash dword */
+ hi_hash_dword = ntohl(common.dword);
- return 0;
-}
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
-/**
- * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
- * @input: input stream to modify
- * @flex_bytes: the flexible bytes to load
- **/
-static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
- u16 *flex_byte)
-{
- *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
- *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
- return 0;
-}
+ /* Process bits 0 and 16 */
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
-/**
- * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
- * @input: input stream to modify
- * @l4type: the layer 4 type value to load
- **/
-static s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input,
- u8 *l4type)
-{
- *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
+
+ /* combine common_hash result with signature and bucket hashes */
+ bucket_hash ^= common_hash;
+ bucket_hash &= IXGBE_ATR_HASH_MASK;
- return 0;
+ sig_hash ^= common_hash << 16;
+ sig_hash &= IXGBE_ATR_HASH_MASK << 16;
+
+ /* return completed signature hash */
+ return sig_hash ^ bucket_hash;
}
/**
* ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
* @hw: pointer to hardware structure
- * @stream: input bitstream
+ * @input: unique input dword
+ * @common: compressed common input dword
* @queue: queue index to direct traffic to
**/
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
u8 queue)
{
u64 fdirhashcmd;
- u64 fdircmd;
- u32 fdirhash;
- u16 bucket_hash, sig_hash;
- u8 l4type;
-
- bucket_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY);
-
- /* bucket_hash is only 15 bits */
- bucket_hash &= IXGBE_ATR_HASH_MASK;
-
- sig_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_SIGNATURE_HASH_KEY);
-
- /* Get the l4type in order to program FDIRCMD properly */
- /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
- ixgbe_atr_get_l4type_82599(input, &l4type);
+ u32 fdircmd;
/*
- * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
- * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
+ * Get the flow_type in order to program FDIRCMD properly
+ * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
*/
- fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
- fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
-
- switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
- case IXGBE_ATR_L4TYPE_TCP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
- break;
- case IXGBE_ATR_L4TYPE_UDP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
- break;
- case IXGBE_ATR_L4TYPE_SCTP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+ switch (input.formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ case IXGBE_ATR_FLOW_TYPE_TCPV6:
+ case IXGBE_ATR_FLOW_TYPE_UDPV6:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV6:
break;
default:
- hw_dbg(hw, "Error on l4type input\n");
+ hw_dbg(hw, " Error on flow type input\n");
return IXGBE_ERR_CONFIG;
}
- if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
- fdircmd |= IXGBE_FDIRCMD_IPV6;
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
- fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
- fdirhashcmd = ((fdircmd << 32) | fdirhash);
+ /*
+ * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
+ * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
+ */
+ fdirhashcmd = (u64)fdircmd << 32;
+ fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
+ hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+
return 0;
}
/**
+ * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
+ * @input_mask: mask to be bit swapped
+ *
+ * The source and destination port masks for flow director are bit swapped
+ * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
+ * generate a correctly swapped value we need to bit swap the mask and that
+ * is what is accomplished by this function.
+ **/
+static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
+{
+ u32 mask = ntohs(input_masks->dst_port_mask);
+ mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
+ mask |= ntohs(input_masks->src_port_mask);
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * These two macros are meant to address the fact that we have registers
+ * that are either all or in part big-endian. As a result on big-endian
+ * systems we will end up byte swapping the value to little-endian before
+ * it is byte swapped again and written to the hardware in the original
+ * big-endian format.
+ */
+#define IXGBE_STORE_AS_BE32(_value) \
+ (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
+ (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
+
+#define IXGBE_WRITE_REG_BE32(a, reg, value) \
+ IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
+
+#define IXGBE_STORE_AS_BE16(_value) \
+ (((u16)(_value) >> 8) | ((u16)(_value) << 8))
+
+/**
* ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
* @hw: pointer to hardware structure
* @input: input bitstream
@@ -1687,135 +1522,139 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* hardware writes must be protected from one another.
**/
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_input *input,
struct ixgbe_atr_input_masks *input_masks,
u16 soft_id, u8 queue)
{
- u32 fdircmd = 0;
u32 fdirhash;
- u32 src_ipv4 = 0, dst_ipv4 = 0;
- u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
- u16 src_port, dst_port, vlan_id, flex_bytes;
- u16 bucket_hash;
- u8 l4type;
- u8 fdirm = 0;
-
- /* Get our input values */
- ixgbe_atr_get_l4type_82599(input, &l4type);
+ u32 fdircmd;
+ u32 fdirport, fdirtcpm;
+ u32 fdirvlan;
+ /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
+ u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
+ IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
/*
- * Check l4type formatting, and bail out before we touch the hardware
+ * Check flow_type formatting, and bail out before we touch the hardware
* if there's a configuration issue
*/
- switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
- case IXGBE_ATR_L4TYPE_TCP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
- break;
- case IXGBE_ATR_L4TYPE_UDP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
- break;
- case IXGBE_ATR_L4TYPE_SCTP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+ switch (input->formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_IPV4:
+ /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
+ fdirm |= IXGBE_FDIRM_L4P;
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ if (input_masks->dst_port_mask || input_masks->src_port_mask) {
+ hw_dbg(hw, " Error on src/dst port mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
break;
default:
- hw_dbg(hw, "Error on l4type input\n");
+ hw_dbg(hw, " Error on flow type input\n");
return IXGBE_ERR_CONFIG;
}
- bucket_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY);
-
- /* bucket_hash is only 15 bits */
- bucket_hash &= IXGBE_ATR_HASH_MASK;
-
- ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
- ixgbe_atr_get_src_port_82599(input, &src_port);
- ixgbe_atr_get_dst_port_82599(input, &dst_port);
- ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
-
- fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
- /* Now figure out if we're IPv4 or IPv6 */
- if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
- /* IPv6 */
- ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
- &src_ipv6_3, &src_ipv6_4);
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
- /* The last 4 bytes is the same register as IPv4 */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
-
- fdircmd |= IXGBE_FDIRCMD_IPV6;
- fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
- } else {
- /* IPv4 */
- ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
- }
-
- ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
- (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
- IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
- (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
-
/*
- * Program the relevant mask registers. L4type cannot be
- * masked out in this implementation.
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
*
* This also assumes IPv4 only. IPv6 masking isn't supported at this
* point in time.
*/
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
-
- switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
- case IXGBE_ATR_L4TYPE_TCP:
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
- (input_masks->dst_port_mask << 16)));
+
+ /* Program FDIRM */
+ switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) {
+ case 0xEFFF:
+ /* Unmask VLAN ID - bit 0 and fall through to unmask prio */
+ fdirm &= ~IXGBE_FDIRM_VLANID;
+ case 0xE000:
+ /* Unmask VLAN prio - bit 1 */
+ fdirm &= ~IXGBE_FDIRM_VLANP;
break;
- case IXGBE_ATR_L4TYPE_UDP:
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
- (input_masks->src_port_mask << 16)));
+ case 0x0FFF:
+ /* Unmask VLAN ID - bit 0 */
+ fdirm &= ~IXGBE_FDIRM_VLANID;
break;
- default:
- /* this already would have failed above */
+ case 0x0000:
+ /* do nothing, vlans already masked */
break;
+ default:
+ hw_dbg(hw, " Error on VLAN mask\n");
+ return IXGBE_ERR_CONFIG;
}
- /* Program the last mask register, FDIRM */
- if (input_masks->vlan_id_mask)
- /* Mask both VLAN and VLANP - bits 0 and 1 */
- fdirm |= 0x3;
-
- if (input_masks->data_mask)
- /* Flex bytes need masking, so mask the whole thing - bit 4 */
- fdirm |= 0x10;
+ if (input_masks->flex_mask & 0xFFFF) {
+ if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
+ hw_dbg(hw, " Error on flexible byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ /* Unmask Flex Bytes - bit 4 */
+ fdirm &= ~IXGBE_FDIRM_FLEX;
+ }
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
- fdirm |= 0x24;
-
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
- fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
- fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
- fdircmd |= IXGBE_FDIRCMD_LAST;
- fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
- fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ /* store the TCP/UDP port masks, bit reversed from port layout */
+ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
+
+ /* write both the same so that UDP and TCP use the same mask */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+
+ /* store source and destination IP masks (big-enian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+ ~input_masks->src_ip_mask[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+ ~input_masks->dst_ip_mask[0]);
+
+ /* Apply masks to input data */
+ input->formatted.vlan_id &= input_masks->vlan_id_mask;
+ input->formatted.flex_bytes &= input_masks->flex_mask;
+ input->formatted.src_port &= input_masks->src_port_mask;
+ input->formatted.dst_port &= input_masks->dst_port_mask;
+ input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
+ input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
+
+ /* record vlan (little-endian) and flex_bytes(big-endian) */
+ fdirvlan =
+ IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes));
+ fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+ fdirvlan |= ntohs(input->formatted.vlan_id);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+ /* record source and destination port (little-endian)*/
+ fdirport = ntohs(input->formatted.dst_port);
+ fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+ fdirport |= ntohs(input->formatted.src_port);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+
+ /* record the first 32 bits of the destination address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+
+ /* record the source address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+ /* we only want the bucket hash so drop the upper 16 bits */
+ fdirhash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY);
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
return 0;
}
+
/**
* ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
* @hw: pointer to hardware structure
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index d5ede2df3e42..ebbda7d15254 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1370,6 +1370,9 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+ /* clear VMDq pool/queue selection for RAR 0 */
+ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
}
hw->addr_ctrl.overflow_promisc = 0;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 23ff23e8b393..2002ea88ca2a 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1477,9 +1477,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
reg_ctl &= ~IXGBE_RXCTRL_RXEN;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx));
- reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
+ ixgbe_disable_rx_queue(adapter, rx_ring);
/* now Tx */
reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
@@ -2279,10 +2277,11 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
struct ethtool_rx_ntuple *cmd)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- struct ethtool_rx_ntuple_flow_spec fs = cmd->fs;
- struct ixgbe_atr_input input_struct;
+ struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
+ union ixgbe_atr_input input_struct;
struct ixgbe_atr_input_masks input_masks;
int target_queue;
+ int err;
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
return -EOPNOTSUPP;
@@ -2291,67 +2290,122 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
* Don't allow programming if the action is a queue greater than
* the number of online Tx queues.
*/
- if ((fs.action >= adapter->num_tx_queues) ||
- (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP))
+ if ((fs->action >= adapter->num_tx_queues) ||
+ (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
return -EINVAL;
- memset(&input_struct, 0, sizeof(struct ixgbe_atr_input));
+ memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
- input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src;
- input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst;
- input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc;
- input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst;
- input_masks.vlan_id_mask = fs.vlan_tag_mask;
- /* only use the lowest 2 bytes for flex bytes */
- input_masks.data_mask = (fs.data_mask & 0xffff);
-
- switch (fs.flow_type) {
+ /* record flow type */
+ switch (fs->flow_type) {
+ case IPV4_FLOW:
+ input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
+ break;
case TCP_V4_FLOW:
- ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP);
+ input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
break;
case UDP_V4_FLOW:
- ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP);
+ input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
break;
case SCTP_V4_FLOW:
- ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP);
+ input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
break;
default:
return -1;
}
- /* Mask bits from the inputs based on user-supplied mask */
- ixgbe_atr_set_src_ipv4_82599(&input_struct,
- (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src));
- ixgbe_atr_set_dst_ipv4_82599(&input_struct,
- (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst));
- /* 82599 expects these to be byte-swapped for perfect filtering */
- ixgbe_atr_set_src_port_82599(&input_struct,
- ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc));
- ixgbe_atr_set_dst_port_82599(&input_struct,
- ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst));
-
- /* VLAN and Flex bytes are either completely masked or not */
- if (!fs.vlan_tag_mask)
- ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag);
-
- if (!input_masks.data_mask)
- /* make sure we only use the first 2 bytes of user data */
- ixgbe_atr_set_flex_byte_82599(&input_struct,
- (fs.data & 0xffff));
+ /* copy vlan tag minus the CFI bit */
+ if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
+ input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
+ if (!fs->vlan_tag_mask) {
+ input_masks.vlan_id_mask = htons(0xEFFF);
+ } else {
+ switch (~fs->vlan_tag_mask & 0xEFFF) {
+ /* all of these are valid vlan-mask values */
+ case 0xEFFF:
+ case 0xE000:
+ case 0x0FFF:
+ case 0x0000:
+ input_masks.vlan_id_mask =
+ htons(~fs->vlan_tag_mask);
+ break;
+ /* exit with error if vlan-mask is invalid */
+ default:
+ e_err(drv, "Partial VLAN ID or "
+ "priority mask in vlan-mask is not "
+ "supported by hardware\n");
+ return -1;
+ }
+ }
+ }
+
+ /* make sure we only use the first 2 bytes of user data */
+ if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
+ input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
+ if (!(fs->data_mask & 0xFFFF)) {
+ input_masks.flex_mask = 0xFFFF;
+ } else if (~fs->data_mask & 0xFFFF) {
+ e_err(drv, "Partial user-def-mask is not "
+ "supported by hardware\n");
+ return -1;
+ }
+ }
+
+ /*
+ * Copy input into formatted structures
+ *
+ * These assignments are based on the following logic
+ * If neither input or mask are set assume value is masked out.
+ * If input is set, but mask is not mask should default to accept all.
+ * If input is not set, but mask is set then mask likely results in 0.
+ * If input is set and mask is set then assign both.
+ */
+ if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
+ input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
+ if (!fs->m_u.tcp_ip4_spec.ip4src)
+ input_masks.src_ip_mask[0] = 0xFFFFFFFF;
+ else
+ input_masks.src_ip_mask[0] =
+ ~fs->m_u.tcp_ip4_spec.ip4src;
+ }
+ if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
+ input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
+ if (!fs->m_u.tcp_ip4_spec.ip4dst)
+ input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
+ else
+ input_masks.dst_ip_mask[0] =
+ ~fs->m_u.tcp_ip4_spec.ip4dst;
+ }
+ if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
+ input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
+ if (!fs->m_u.tcp_ip4_spec.psrc)
+ input_masks.src_port_mask = 0xFFFF;
+ else
+ input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
+ }
+ if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
+ input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
+ if (!fs->m_u.tcp_ip4_spec.pdst)
+ input_masks.dst_port_mask = 0xFFFF;
+ else
+ input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
+ }
/* determine if we need to drop or route the packet */
- if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+ if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
target_queue = MAX_RX_QUEUES - 1;
else
- target_queue = fs.action;
+ target_queue = fs->action;
spin_lock(&adapter->fdir_perfect_lock);
- ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct,
- &input_masks, 0, target_queue);
+ err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
+ &input_struct,
+ &input_masks, 0,
+ target_queue);
spin_unlock(&adapter->fdir_perfect_lock);
- return 0;
+ return err ? -1 : 0;
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 6342d4859790..c54a88274d51 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -159,13 +159,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
struct scatterlist *sg;
unsigned int i, j, dmacount;
unsigned int len;
- static const unsigned int bufflen = 4096;
+ static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
unsigned int firstoff = 0;
unsigned int lastsize;
unsigned int thisoff = 0;
unsigned int thislen = 0;
u32 fcbuff, fcdmarw, fcfltrw;
- dma_addr_t addr;
+ dma_addr_t addr = 0;
if (!netdev || !sgl)
return 0;
@@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
/* only the last buffer may have non-full bufflen */
lastsize = thisoff + thislen;
+ /*
+ * lastsize can not be buffer len.
+ * If it is then adding another buffer with lastsize = 1.
+ */
+ if (lastsize == bufflen) {
+ if (j >= IXGBE_BUFFCNT_MAX) {
+ e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
+ "not enough user buffers. We need an extra "
+ "buffer because lastsize is bufflen.\n",
+ xid, i, j, dmacount, (u64)addr);
+ goto out_noddp_free;
+ }
+
+ ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
+ j++;
+ lastsize = 1;
+ }
+
fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
@@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
e_err(drv, "failed to allocated FCoE DDP pool\n");
spin_lock_init(&fcoe->lock);
+
+ /* Extra buffer to be shared by all DDPs for HW work around */
+ fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+ if (fcoe->extra_ddp_buffer == NULL) {
+ e_err(drv, "failed to allocated extra DDP buffer\n");
+ goto out_extra_ddp_buffer_alloc;
+ }
+
+ fcoe->extra_ddp_buffer_dma =
+ dma_map_single(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer,
+ IXGBE_FCBUFF_MIN,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer_dma)) {
+ e_err(drv, "failed to map extra DDP buffer\n");
+ goto out_extra_ddp_buffer_dma;
+ }
}
/* Enable L2 eth type filter for FCoE */
@@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
}
}
#endif
+
+ return;
+
+out_extra_ddp_buffer_dma:
+ kfree(fcoe->extra_ddp_buffer);
+out_extra_ddp_buffer_alloc:
+ pci_pool_destroy(fcoe->pool);
+ fcoe->pool = NULL;
}
/**
@@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
if (fcoe->pool) {
for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
ixgbe_fcoe_ddp_put(adapter->netdev, i);
+ dma_unmap_single(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer_dma,
+ IXGBE_FCBUFF_MIN,
+ DMA_FROM_DEVICE);
+ kfree(fcoe->extra_ddp_buffer);
pci_pool_destroy(fcoe->pool);
fcoe->pool = NULL;
}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 4bc2c551c8db..65cc8fb14fe7 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -70,6 +70,8 @@ struct ixgbe_fcoe {
spinlock_t lock;
struct pci_pool *pool;
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
+ unsigned char *extra_ddp_buffer;
+ dma_addr_t extra_ddp_buffer_dma;
};
#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 38ab4f3f8197..30f9ccfb4f87 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -52,7 +52,7 @@ char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
-#define DRV_VERSION "3.0.12-k2"
+#define DRV_VERSION "3.2.9-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
@@ -3024,6 +3024,36 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
}
}
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ /* the hardware may take up to 100us to really disable the rx queue */
+ do {
+ udelay(10);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop) {
+ e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
+ "the polling period\n", reg_idx);
+ }
+}
+
void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
@@ -3034,9 +3064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
/* disable queue to avoid issues while updating state */
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
- rxdctl & ~IXGBE_RXDCTL_ENABLE);
- IXGBE_WRITE_FLUSH(hw);
+ ixgbe_disable_rx_queue(adapter, ring);
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
@@ -3148,9 +3176,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
u32 mhadd, hlreg0;
/* Decide whether to use packet split mode or not */
+ /* On by default */
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+
/* Do not use packet split if we're in SR-IOV Mode */
- if (!adapter->num_vfs)
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ if (adapter->num_vfs)
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+
+ /* Disable packet split due to 82599 erratum #45 */
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -3693,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
* We need to try and force an autonegotiation
* session, then bring up link.
*/
- hw->mac.ops.setup_sfp(hw);
+ if (hw->mac.ops.setup_sfp)
+ hw->mac.ops.setup_sfp(hw);
if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
schedule_work(&adapter->multispeed_fiber_task);
} else {
@@ -4064,7 +4100,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
- IXGBE_WRITE_FLUSH(hw);
+ /* disable all enabled rx queues */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ /* this call also flushes the previous write */
+ ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+
msleep(10);
netif_tx_stop_all_queues(netdev);
@@ -4789,6 +4829,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE |
+ IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ e_err(probe,
+ "Flow Director is not supported while multiple "
+ "queues are disabled. Disabling Flow Director\n");
+ }
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->atr_sample_rate = 0;
@@ -4825,16 +4871,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
{
int q_idx, num_q_vectors;
struct ixgbe_q_vector *q_vector;
- int napi_vectors;
int (*poll)(struct napi_struct *, int);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- napi_vectors = adapter->num_rx_queues;
poll = &ixgbe_clean_rxtx_many;
} else {
num_q_vectors = 1;
- napi_vectors = 1;
poll = &ixgbe_poll;
}
@@ -5094,16 +5137,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
- if (dev->features & NETIF_F_NTUPLE) {
- /* Flow Director perfect filter enabled */
- adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- adapter->atr_sample_rate = 0;
- spin_lock_init(&adapter->fdir_perfect_lock);
- } else {
- /* Flow Director hash filters enabled */
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->atr_sample_rate = 20;
- }
+ /* n-tuple support exists, always init our spinlock */
+ spin_lock_init(&adapter->fdir_perfect_lock);
+ /* Flow Director hash filters enabled */
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->atr_sample_rate = 20;
adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES;
adapter->fdir_pballoc = 0;
@@ -5931,7 +5969,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
unregister_netdev(adapter->netdev);
return;
}
- hw->mac.ops.setup_sfp(hw);
+ if (hw->mac.ops.setup_sfp)
+ hw->mac.ops.setup_sfp(hw);
if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
/* This will also work for DA Twinax connections */
@@ -6474,38 +6513,92 @@ static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
writel(i, tx_ring->tail);
}
-static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
- u8 queue, u32 tx_flags, __be16 protocol)
+static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol)
{
- struct ixgbe_atr_input atr_input;
- struct iphdr *iph = ip_hdr(skb);
- struct ethhdr *eth = (struct ethhdr *)skb->data;
+ struct ixgbe_q_vector *q_vector = ring->q_vector;
+ union ixgbe_atr_hash_dword input = { .dword = 0 };
+ union ixgbe_atr_hash_dword common = { .dword = 0 };
+ union {
+ unsigned char *network;
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } hdr;
struct tcphdr *th;
- u16 vlan_id;
+ __be16 vlan_id;
+
+ /* if ring doesn't have a interrupt vector, cannot perform ATR */
+ if (!q_vector)
+ return;
- /* Right now, we support IPv4 w/ TCP only */
- if (protocol != htons(ETH_P_IP) ||
- iph->protocol != IPPROTO_TCP)
+ /* do nothing if sampling is disabled */
+ if (!ring->atr_sample_rate)
return;
- memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
+ ring->atr_count++;
+
+ /* snag network header to get L4 type and address */
+ hdr.network = skb_network_header(skb);
- vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
- IXGBE_TX_FLAGS_VLAN_SHIFT;
+ /* Currently only IPv4/IPv6 with TCP is supported */
+ if ((protocol != __constant_htons(ETH_P_IPV6) ||
+ hdr.ipv6->nexthdr != IPPROTO_TCP) &&
+ (protocol != __constant_htons(ETH_P_IP) ||
+ hdr.ipv4->protocol != IPPROTO_TCP))
+ return;
th = tcp_hdr(skb);
- ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
- ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
- ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
- ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
- ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
- /* src and dst are inverted, think how the receiver sees them */
- ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
- ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
+ /* skip this packet since the socket is closing */
+ if (th->fin)
+ return;
+
+ /* sample on all syn packets or once every atr sample count */
+ if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
+ return;
+
+ /* reset sample count */
+ ring->atr_count = 0;
+
+ vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
+
+ /*
+ * src and dst are inverted, think how the receiver sees them
+ *
+ * The input is broken into two sections, a non-compressed section
+ * containing vm_pool, vlan_id, and flow_type. The rest of the data
+ * is XORed together and stored in the compressed dword.
+ */
+ input.formatted.vlan_id = vlan_id;
+
+ /*
+ * since src port and flex bytes occupy the same word XOR them together
+ * and write the value to source port portion of compressed dword
+ */
+ if (vlan_id)
+ common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
+ else
+ common.port.src ^= th->dest ^ protocol;
+ common.port.dst ^= th->source;
+
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+ common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
+ } else {
+ input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
+ common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
+ hdr.ipv6->saddr.s6_addr32[1] ^
+ hdr.ipv6->saddr.s6_addr32[2] ^
+ hdr.ipv6->saddr.s6_addr32[3] ^
+ hdr.ipv6->daddr.s6_addr32[0] ^
+ hdr.ipv6->daddr.s6_addr32[1] ^
+ hdr.ipv6->daddr.s6_addr32[2] ^
+ hdr.ipv6->daddr.s6_addr32[3];
+ }
/* This assumes the Rx queue and Tx queue are bound to the same CPU */
- ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
+ ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
+ input, common, ring->queue_index);
}
static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
@@ -6580,8 +6673,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
{
- struct net_device *netdev = tx_ring->netdev;
- struct netdev_queue *txq;
unsigned int first;
unsigned int tx_flags = 0;
u8 hdr_len = 0;
@@ -6676,19 +6767,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
if (count) {
/* add the ATR filter if ATR is on */
- if (tx_ring->atr_sample_rate) {
- ++tx_ring->atr_count;
- if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
- test_bit(__IXGBE_TX_FDIR_INIT_DONE,
- &tx_ring->state)) {
- ixgbe_atr(adapter, skb, tx_ring->queue_index,
- tx_flags, protocol);
- tx_ring->atr_count = 0;
- }
- }
- txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
- txq->tx_bytes += skb->len;
- txq->tx_packets++;
+ if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
+ ixgbe_atr(tx_ring, skb, tx_flags, protocol);
ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
@@ -6846,8 +6926,6 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
- /* accurate rx/tx bytes/packets stats */
- dev_txq_stats_fold(netdev, stats);
rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
@@ -6864,6 +6942,22 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
stats->rx_bytes += bytes;
}
}
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ }
+ }
rcu_read_unlock();
/* following stats updated by ixgbe_watchdog_task() */
stats->multicast = netdev->stats.multicast;
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 47b15738b009..187b3a16ec1f 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -110,12 +110,10 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
}
-
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
{
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
vmolr |= (IXGBE_VMOLR_ROMPE |
- IXGBE_VMOLR_ROPE |
IXGBE_VMOLR_BAM);
if (aupe)
vmolr |= IXGBE_VMOLR_AUPE;
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 446f3467d3c7..fd3358f54139 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1947,10 +1947,9 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRM_VLANID 0x00000001
#define IXGBE_FDIRM_VLANP 0x00000002
#define IXGBE_FDIRM_POOL 0x00000004
-#define IXGBE_FDIRM_L3P 0x00000008
-#define IXGBE_FDIRM_L4P 0x00000010
-#define IXGBE_FDIRM_FLEX 0x00000020
-#define IXGBE_FDIRM_DIPv6 0x00000040
+#define IXGBE_FDIRM_L4P 0x00000008
+#define IXGBE_FDIRM_FLEX 0x00000010
+#define IXGBE_FDIRM_DIPv6 0x00000020
#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
#define IXGBE_FDIRFREE_FREE_SHIFT 0
@@ -1990,6 +1989,7 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCMD_LAST 0x00000800
#define IXGBE_FDIRCMD_COLLISION 0x00001000
#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
+#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
#define IXGBE_FDIR_INIT_DONE_POLL 10
@@ -2147,51 +2147,80 @@ typedef u32 ixgbe_physical_layer;
#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
/* Software ATR hash keys */
-#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
-#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
-
-/* Software ATR input stream offsets and masks */
-#define IXGBE_ATR_VLAN_OFFSET 0
-#define IXGBE_ATR_SRC_IPV6_OFFSET 2
-#define IXGBE_ATR_SRC_IPV4_OFFSET 14
-#define IXGBE_ATR_DST_IPV6_OFFSET 18
-#define IXGBE_ATR_DST_IPV4_OFFSET 30
-#define IXGBE_ATR_SRC_PORT_OFFSET 34
-#define IXGBE_ATR_DST_PORT_OFFSET 36
-#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
-#define IXGBE_ATR_VM_POOL_OFFSET 40
-#define IXGBE_ATR_L4TYPE_OFFSET 41
+#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
+#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+/* Software ATR input stream values and masks */
+#define IXGBE_ATR_HASH_MASK 0x7fff
#define IXGBE_ATR_L4TYPE_MASK 0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
#define IXGBE_ATR_L4TYPE_UDP 0x1
#define IXGBE_ATR_L4TYPE_TCP 0x2
#define IXGBE_ATR_L4TYPE_SCTP 0x3
-#define IXGBE_ATR_HASH_MASK 0x7fff
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+enum ixgbe_atr_flow_type {
+ IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
+ IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
+ IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
+ IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+ IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
+ IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
+ IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
+ IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+};
/* Flow Director ATR input struct. */
-struct ixgbe_atr_input {
- /* Byte layout in order, all values with MSB first:
+union ixgbe_atr_input {
+ /*
+ * Byte layout in order, all values with MSB first:
*
+ * vm_pool - 1 byte
+ * flow_type - 1 byte
* vlan_id - 2 bytes
* src_ip - 16 bytes
* dst_ip - 16 bytes
* src_port - 2 bytes
* dst_port - 2 bytes
* flex_bytes - 2 bytes
- * vm_pool - 1 byte
- * l4type - 1 byte
+ * rsvd0 - 2 bytes - space reserved must be 0.
*/
- u8 byte_stream[42];
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 flex_bytes;
+ __be16 rsvd0;
+ } formatted;
+ __be32 dword_stream[11];
+};
+
+/* Flow Director compressed ATR hash input struct */
+union ixgbe_atr_hash_dword {
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ } formatted;
+ __be32 ip;
+ struct {
+ __be16 src;
+ __be16 dst;
+ } port;
+ __be16 flex_bytes;
+ __be32 dword;
};
struct ixgbe_atr_input_masks {
- u32 src_ip_mask;
- u32 dst_ip_mask;
- u16 src_port_mask;
- u16 dst_port_mask;
- u16 vlan_id_mask;
- u16 data_mask;
+ __be16 rsvd0;
+ __be16 vlan_id_mask;
+ __be32 dst_ip_mask[4];
+ __be32 src_ip_mask[4];
+ __be16 src_port_mask;
+ __be16 dst_port_mask;
+ __be16 flex_mask;
};
enum ixgbe_eeprom_type {
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index 3a8923993ce3..f2518b01067d 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -133,17 +133,17 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
}
ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
- IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit));
IXGBE_WRITE_FLUSH(hw);
/* Poll for reset bit to self-clear indicating reset is complete */
for (i = 0; i < 10; i++) {
udelay(1);
ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
- if (!(ctrl & IXGBE_CTRL_RST))
+ if (!(ctrl & reset_bit))
break;
}
- if (ctrl & IXGBE_CTRL_RST) {
+ if (ctrl & reset_bit) {
status = IXGBE_ERR_RESET_FAILED;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 183765cb7f25..f35554d11441 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -238,7 +238,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
goto out;
}
/* allocate the tx and rx ring buffer descriptors. */
- /* returns a virtual addres and a physical address. */
+ /* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p, GFP_KERNEL);
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index f69e73e2191e..79ccb54ab00c 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp)
for (i = 0; i < PHY_MAX_ADDR; i++)
bp->mii_bus->irq[i] = PHY_POLL;
- platform_set_drvdata(bp->dev, bp->mii_bus);
+ dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
if (mdiobus_register(bp->mii_bus))
goto err_out_free_mdio_irq;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 21845affea13..5933621ac3ff 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -585,7 +585,7 @@ err:
rcu_read_lock_bh();
vlan = rcu_dereference(q->vlan);
if (vlan)
- netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++;
+ vlan->dev->stats.tx_dropped++;
rcu_read_unlock_bh();
return err;
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 8f4bf1f07c11..3a4277f6fac4 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -178,6 +178,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
} else {
int i;
+ buf->direct.buf = NULL;
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
@@ -229,7 +230,7 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
buf->direct.map);
else {
- if (BITS_PER_LONG == 64)
+ if (BITS_PER_LONG == 64 && buf->direct.buf)
vunmap(buf->direct.buf);
for (i = 0; i < buf->nbufs; ++i)
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index 68aaa42d0ced..32f947154c33 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -113,7 +113,7 @@ static void catas_reset(struct work_struct *work)
void mlx4_start_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- unsigned long addr;
+ phys_addr_t addr;
INIT_LIST_HEAD(&priv->catas_err.list);
init_timer(&priv->catas_err.timer);
@@ -124,8 +124,8 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
if (!priv->catas_err.map) {
- mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n",
- addr);
+ mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
+ (unsigned long long) addr);
return;
}
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index f6e0d40cd876..1ff6ca6466ed 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -202,7 +202,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
if (mlx4_uar_alloc(dev, &mdev->priv_uar))
goto err_pd;
- mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+ mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
+ PAGE_SIZE);
if (!mdev->uar_map)
goto err_uar;
spin_lock_init(&mdev->uar_lock);
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 6d6806b361e3..897f576b8b17 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -972,7 +972,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
int i;
int err;
- dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
+ dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
+ prof->tx_ring_num, prof->rx_ring_num);
if (dev == NULL) {
mlx4_err(mdev, "Net device allocation failed\n");
return -ENOMEM;
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7a7e18ba278a..5de1db897835 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -289,10 +289,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
dev_cap->bf_reg_size = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
- if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) {
- mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
+ if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
field = 3;
- }
dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 782f11d8fa71..2765a3ce9c24 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -829,7 +829,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_uar_table_free;
}
- priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+ priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
if (!priv->kar) {
mlx4_err(dev, "Couldn't map kernel access region, "
"aborting.\n");
@@ -1286,6 +1286,21 @@ static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
{ PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
{ PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
{ PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
+ { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
+ { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
+ { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
{ 0, }
};
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index c4f88b7ef7b6..79cf42db2ea9 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -95,7 +95,8 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
* entry in hash chain and *mgm holds end of hash chain.
*/
static int find_mgm(struct mlx4_dev *dev,
- u8 *gid, struct mlx4_cmd_mailbox *mgm_mailbox,
+ u8 *gid, enum mlx4_protocol protocol,
+ struct mlx4_cmd_mailbox *mgm_mailbox,
u16 *hash, int *prev, int *index)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -134,7 +135,8 @@ static int find_mgm(struct mlx4_dev *dev,
return err;
}
- if (!memcmp(mgm->gid, gid, 16))
+ if (!memcmp(mgm->gid, gid, 16) &&
+ be32_to_cpu(mgm->members_count) >> 30 == protocol)
return err;
*prev = *index;
@@ -146,7 +148,7 @@ static int find_mgm(struct mlx4_dev *dev,
}
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- int block_mcast_loopback)
+ int block_mcast_loopback, enum mlx4_protocol protocol)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
@@ -165,7 +167,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
mutex_lock(&priv->mcg_table.mutex);
- err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
+ err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index);
if (err)
goto out;
@@ -187,7 +189,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
memcpy(mgm->gid, gid, 16);
}
- members_count = be32_to_cpu(mgm->members_count);
+ members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
if (members_count == MLX4_QP_PER_MGM) {
mlx4_err(dev, "MGM at index %x is full.\n", index);
err = -ENOMEM;
@@ -207,7 +209,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
else
mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
- mgm->members_count = cpu_to_be32(members_count);
+ mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30);
err = mlx4_WRITE_MCG(dev, index, mailbox);
if (err)
@@ -242,7 +244,8 @@ out:
}
EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
-int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
+int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ enum mlx4_protocol protocol)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
@@ -260,7 +263,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
mutex_lock(&priv->mcg_table.mutex);
- err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
+ err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index);
if (err)
goto out;
@@ -270,7 +273,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
goto out;
}
- members_count = be32_to_cpu(mgm->members_count);
+ members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
for (loc = -1, i = 0; i < members_count; ++i)
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
loc = i;
@@ -282,7 +285,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
}
- mgm->members_count = cpu_to_be32(--members_count);
+ mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30);
mgm->qp[loc] = mgm->qp[i - 1];
mgm->qp[i - 1] = 0;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index a37fcf11ab36..ea5cfe2c3a04 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -3403,9 +3403,7 @@ static int myri10ge_resume(struct pci_dev *pdev)
return -EIO;
}
- status = pci_restore_state(pdev);
- if (status)
- return status;
+ pci_restore_state(pdev);
status = pci_enable_device(pdev);
if (status) {
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 2541321bad82..9fb59d3f9c92 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -4489,6 +4489,9 @@ static int niu_alloc_channels(struct niu *np)
{
struct niu_parent *parent = np->parent;
int first_rx_channel, first_tx_channel;
+ int num_rx_rings, num_tx_rings;
+ struct rx_ring_info *rx_rings;
+ struct tx_ring_info *tx_rings;
int i, port, err;
port = np->port;
@@ -4498,18 +4501,21 @@ static int niu_alloc_channels(struct niu *np)
first_tx_channel += parent->txchan_per_port[i];
}
- np->num_rx_rings = parent->rxchan_per_port[port];
- np->num_tx_rings = parent->txchan_per_port[port];
+ num_rx_rings = parent->rxchan_per_port[port];
+ num_tx_rings = parent->txchan_per_port[port];
- netif_set_real_num_rx_queues(np->dev, np->num_rx_rings);
- netif_set_real_num_tx_queues(np->dev, np->num_tx_rings);
-
- np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info),
- GFP_KERNEL);
+ rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
+ GFP_KERNEL);
err = -ENOMEM;
- if (!np->rx_rings)
+ if (!rx_rings)
goto out_err;
+ np->num_rx_rings = num_rx_rings;
+ smp_wmb();
+ np->rx_rings = rx_rings;
+
+ netif_set_real_num_rx_queues(np->dev, num_rx_rings);
+
for (i = 0; i < np->num_rx_rings; i++) {
struct rx_ring_info *rp = &np->rx_rings[i];
@@ -4538,12 +4544,18 @@ static int niu_alloc_channels(struct niu *np)
return err;
}
- np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info),
- GFP_KERNEL);
+ tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
+ GFP_KERNEL);
err = -ENOMEM;
- if (!np->tx_rings)
+ if (!tx_rings)
goto out_err;
+ np->num_tx_rings = num_tx_rings;
+ smp_wmb();
+ np->tx_rings = tx_rings;
+
+ netif_set_real_num_tx_queues(np->dev, num_tx_rings);
+
for (i = 0; i < np->num_tx_rings; i++) {
struct tx_ring_info *rp = &np->tx_rings[i];
@@ -6246,11 +6258,17 @@ static void niu_sync_mac_stats(struct niu *np)
static void niu_get_rx_stats(struct niu *np)
{
unsigned long pkts, dropped, errors, bytes;
+ struct rx_ring_info *rx_rings;
int i;
pkts = dropped = errors = bytes = 0;
+
+ rx_rings = ACCESS_ONCE(np->rx_rings);
+ if (!rx_rings)
+ goto no_rings;
+
for (i = 0; i < np->num_rx_rings; i++) {
- struct rx_ring_info *rp = &np->rx_rings[i];
+ struct rx_ring_info *rp = &rx_rings[i];
niu_sync_rx_discard_stats(np, rp, 0);
@@ -6259,6 +6277,8 @@ static void niu_get_rx_stats(struct niu *np)
dropped += rp->rx_dropped;
errors += rp->rx_errors;
}
+
+no_rings:
np->dev->stats.rx_packets = pkts;
np->dev->stats.rx_bytes = bytes;
np->dev->stats.rx_dropped = dropped;
@@ -6268,16 +6288,24 @@ static void niu_get_rx_stats(struct niu *np)
static void niu_get_tx_stats(struct niu *np)
{
unsigned long pkts, errors, bytes;
+ struct tx_ring_info *tx_rings;
int i;
pkts = errors = bytes = 0;
+
+ tx_rings = ACCESS_ONCE(np->tx_rings);
+ if (!tx_rings)
+ goto no_rings;
+
for (i = 0; i < np->num_tx_rings; i++) {
- struct tx_ring_info *rp = &np->tx_rings[i];
+ struct tx_ring_info *rp = &tx_rings[i];
pkts += rp->tx_packets;
bytes += rp->tx_bytes;
errors += rp->tx_errors;
}
+
+no_rings:
np->dev->stats.tx_packets = pkts;
np->dev->stats.tx_bytes = bytes;
np->dev->stats.tx_errors = errors;
@@ -6287,9 +6315,10 @@ static struct net_device_stats *niu_get_stats(struct net_device *dev)
{
struct niu *np = netdev_priv(dev);
- niu_get_rx_stats(np);
- niu_get_tx_stats(np);
-
+ if (netif_running(dev)) {
+ niu_get_rx_stats(np);
+ niu_get_tx_stats(np);
+ }
return &dev->stats;
}
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 84134c766f3a..a41b2cf4d917 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1988,12 +1988,11 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
}
ndev = alloc_etherdev(sizeof(struct ns83820));
- dev = PRIV(ndev);
-
err = -ENOMEM;
- if (!dev)
+ if (!ndev)
goto out;
+ dev = PRIV(ndev);
dev->ndev = ndev;
spin_lock_init(&dev->rx_info.lock);
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index a0c26a99520f..e1e33c80fb25 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -73,7 +73,7 @@ struct pch_gbe_regs {
struct pch_gbe_regs_mac_adr mac_adr[16];
u32 ADDR_MASK;
u32 MIIM;
- u32 reserve2;
+ u32 MAC_ADDR_LOAD;
u32 RGMII_ST;
u32 RGMII_CTRL;
u32 reserve3[3];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index d7355306a738..b99e90aca37d 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_SHORT_PKT 64
#define DSC_INIT16 0xC000
#define PCH_GBE_DMA_ALIGN 0
+#define PCH_GBE_DMA_PADDING 2
#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
#define PCH_GBE_COPYBREAK_DEFAULT 256
#define PCH_GBE_PCI_BAR 1
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
int data);
+
+inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
+{
+ iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
+}
+
/**
* pch_gbe_mac_read_mac_addr - Read MAC address
* @hw: Pointer to the HW structure
@@ -519,7 +526,9 @@ static void pch_gbe_reset_task(struct work_struct *work)
struct pch_gbe_adapter *adapter;
adapter = container_of(work, struct pch_gbe_adapter, reset_task);
+ rtnl_lock();
pch_gbe_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -528,14 +537,8 @@ static void pch_gbe_reset_task(struct work_struct *work)
*/
void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
-
- rtnl_lock();
- if (netif_running(netdev)) {
- pch_gbe_down(adapter);
- pch_gbe_up(adapter);
- }
- rtnl_unlock();
+ pch_gbe_down(adapter);
+ pch_gbe_up(adapter);
}
/**
@@ -1369,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
struct pch_gbe_buffer *buffer_info;
struct pch_gbe_rx_desc *rx_desc;
u32 length;
- unsigned char tmp_packet[ETH_HLEN];
unsigned int i;
unsigned int cleaned_count = 0;
bool cleaned = false;
- struct sk_buff *skb;
+ struct sk_buff *skb, *new_skb;
u8 dma_status;
u16 gbec_status;
u32 tcp_ip_status;
- u8 skb_copy_flag = 0;
- u8 skb_padding_flag = 0;
i = rx_ring->next_to_clean;
@@ -1422,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
pr_err("Receive CRC Error\n");
} else {
/* get receive length */
- /* length convert[-3], padding[-2] */
- length = (rx_desc->rx_words_eob) - 3 - 2;
+ /* length convert[-3] */
+ length = (rx_desc->rx_words_eob) - 3;
/* Decide the data conversion method */
if (!adapter->rx_csum) {
/* [Header:14][payload] */
- skb_padding_flag = 0;
- skb_copy_flag = 1;
+ if (NET_IP_ALIGN) {
+ /* Because alignment differs,
+ * the new_skb is newly allocated,
+ * and data is copied to new_skb.*/
+ new_skb = netdev_alloc_skb(netdev,
+ length + NET_IP_ALIGN);
+ if (!new_skb) {
+ /* dorrop error */
+ pr_err("New skb allocation "
+ "Error\n");
+ goto dorrop;
+ }
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ memcpy(new_skb->data, skb->data,
+ length);
+ skb = new_skb;
+ } else {
+ /* DMA buffer is used as SKB as it is.*/
+ buffer_info->skb = NULL;
+ }
} else {
/* [Header:14][padding:2][payload] */
- skb_padding_flag = 1;
- if (length < copybreak)
- skb_copy_flag = 1;
- else
- skb_copy_flag = 0;
- }
-
- /* Data conversion */
- if (skb_copy_flag) { /* recycle skb */
- struct sk_buff *new_skb;
- new_skb =
- netdev_alloc_skb(netdev,
- length + NET_IP_ALIGN);
- if (new_skb) {
- if (!skb_padding_flag) {
- skb_reserve(new_skb,
- NET_IP_ALIGN);
+ /* The length includes padding length */
+ length = length - PCH_GBE_DMA_PADDING;
+ if ((length < copybreak) ||
+ (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
+ /* Because alignment differs,
+ * the new_skb is newly allocated,
+ * and data is copied to new_skb.
+ * Padding data is deleted
+ * at the time of a copy.*/
+ new_skb = netdev_alloc_skb(netdev,
+ length + NET_IP_ALIGN);
+ if (!new_skb) {
+ /* dorrop error */
+ pr_err("New skb allocation "
+ "Error\n");
+ goto dorrop;
}
+ skb_reserve(new_skb, NET_IP_ALIGN);
memcpy(new_skb->data, skb->data,
- length);
- /* save the skb
- * in buffer_info as good */
+ ETH_HLEN);
+ memcpy(&new_skb->data[ETH_HLEN],
+ &skb->data[ETH_HLEN +
+ PCH_GBE_DMA_PADDING],
+ length - ETH_HLEN);
skb = new_skb;
- } else if (!skb_padding_flag) {
- /* dorrop error */
- pr_err("New skb allocation Error\n");
- goto dorrop;
+ } else {
+ /* Padding data is deleted
+ * by moving header data.*/
+ memmove(&skb->data[PCH_GBE_DMA_PADDING],
+ &skb->data[0], ETH_HLEN);
+ skb_reserve(skb, NET_IP_ALIGN);
+ buffer_info->skb = NULL;
}
- } else {
- buffer_info->skb = NULL;
}
- if (skb_padding_flag) {
- memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN);
- memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
- ETH_HLEN);
- skb_reserve(skb, NET_IP_ALIGN);
-
- }
-
+ /* The length includes FCS length */
+ length = length - ETH_FCS_LEN;
/* update status of driver */
adapter->stats.rx_bytes += length;
adapter->stats.rx_packets++;
@@ -2247,7 +2262,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
- flush_scheduled_work();
+ cancel_work_sync(&adapter->reset_task);
unregister_netdev(netdev);
pch_gbe_hal_phy_hw_reset(&adapter->hw);
@@ -2322,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
pch_gbe_set_ethtool_ops(netdev);
+ pch_gbe_mac_load_mac_addr(&adapter->hw);
pch_gbe_mac_reset_hw(&adapter->hw);
/* setup the private structure */
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 1f42f6ac8551..d3cb77205863 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1488,12 +1488,10 @@ static void ei_rx_overrun(struct net_device *dev)
/*
* Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
- * Early datasheets said to poll the reset bit, but now they say that
- * it "is not a reliable indicator and subsequently should be ignored."
- * We wait at least 10ms.
+ * We wait at least 2ms.
*/
- mdelay(10);
+ mdelay(2);
/*
* Reset RBCR[01] back to zero as per magic incantation.
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 9226cda4d054..530ab5a10bd3 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
PCMCIA_DEVICE_NULL,
};
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 2c158910f7ea..e953793a33ff 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1536,6 +1536,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
+ PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 78d70a6481bf..a1b82c9c67d2 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -32,6 +32,7 @@
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include <asm/uaccess.h>
#include <asm/string.h>
@@ -542,7 +543,7 @@ ppp_async_encode(struct asyncppp *ap)
data = ap->tpkt->data;
count = ap->tpkt->len;
fcs = ap->tfcs;
- proto = (data[0] << 8) + data[1];
+ proto = get_unaligned_be16(data);
/*
* LCP packets with code values between 1 (configure-reqest)
@@ -963,7 +964,7 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
code = data[0];
if (code != CONFACK && code != CONFREQ)
return;
- dlen = (data[2] << 8) + data[3];
+ dlen = get_unaligned_be16(data + 2);
if (len < dlen)
return; /* packet got truncated or length is bogus */
@@ -997,15 +998,14 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
switch (data[0]) {
case LCP_MRU:
- val = (data[2] << 8) + data[3];
+ val = get_unaligned_be16(data + 2);
if (inbound)
ap->mru = val;
else
ap->chan.mtu = val;
break;
case LCP_ASYNCMAP:
- val = (data[2] << 24) + (data[3] << 16)
- + (data[4] << 8) + data[5];
+ val = get_unaligned_be32(data + 2);
if (inbound)
ap->raccm = val;
else
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 695bc83e0cfd..43583309a65d 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -41,6 +41,7 @@
#include <linux/ppp-comp.h>
#include <linux/zlib.h>
+#include <asm/unaligned.h>
/*
* State for a Deflate (de)compressor.
@@ -232,11 +233,9 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
*/
wptr[0] = PPP_ADDRESS(rptr);
wptr[1] = PPP_CONTROL(rptr);
- wptr[2] = PPP_COMP >> 8;
- wptr[3] = PPP_COMP;
+ put_unaligned_be16(PPP_COMP, wptr + 2);
wptr += PPP_HDRLEN;
- wptr[0] = state->seqno >> 8;
- wptr[1] = state->seqno;
+ put_unaligned_be16(state->seqno, wptr);
wptr += DEFLATE_OVHD;
olen = PPP_HDRLEN + DEFLATE_OVHD;
state->strm.next_out = wptr;
@@ -451,7 +450,7 @@ static int z_decompress(void *arg, unsigned char *ibuf, int isize,
}
/* Check the sequence number. */
- seq = (ibuf[PPP_HDRLEN] << 8) + ibuf[PPP_HDRLEN+1];
+ seq = get_unaligned_be16(ibuf + PPP_HDRLEN);
if (seq != (state->seqno & 0xffff)) {
if (state->debug)
printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n",
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6456484c0299..c7a6c4466978 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -46,6 +46,7 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include <net/slhc_vj.h>
#include <asm/atomic.h>
@@ -210,7 +211,7 @@ struct ppp_net {
};
/* Get the PPP protocol number from a skb */
-#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1])
+#define PPP_PROTO(skb) get_unaligned_be16((skb)->data)
/* We limit the length of ppp->file.rq to this (arbitrary) value */
#define PPP_MAX_RQLEN 32
@@ -964,8 +965,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
pp = skb_push(skb, 2);
proto = npindex_to_proto[npi];
- pp[0] = proto >> 8;
- pp[1] = proto;
+ put_unaligned_be16(proto, pp);
netif_stop_queue(dev);
skb_queue_tail(&ppp->file.xq, skb);
@@ -1473,8 +1473,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
q = skb_put(frag, flen + hdrlen);
/* make the MP header */
- q[0] = PPP_MP >> 8;
- q[1] = PPP_MP;
+ put_unaligned_be16(PPP_MP, q);
if (ppp->flags & SC_MP_XSHORTSEQ) {
q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
q[3] = ppp->nxseq;
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index 6d1a1b80cc3e..9a1849a83e2a 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -55,6 +55,7 @@
#include <linux/ppp_defs.h>
#include <linux/ppp-comp.h>
#include <linux/scatterlist.h>
+#include <asm/unaligned.h>
#include "ppp_mppe.h"
@@ -395,16 +396,14 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
*/
obuf[0] = PPP_ADDRESS(ibuf);
obuf[1] = PPP_CONTROL(ibuf);
- obuf[2] = PPP_COMP >> 8; /* isize + MPPE_OVHD + 1 */
- obuf[3] = PPP_COMP; /* isize + MPPE_OVHD + 2 */
+ put_unaligned_be16(PPP_COMP, obuf + 2);
obuf += PPP_HDRLEN;
state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
if (state->debug >= 7)
printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit,
state->ccount);
- obuf[0] = state->ccount >> 8;
- obuf[1] = state->ccount & 0xff;
+ put_unaligned_be16(state->ccount, obuf);
if (!state->stateful || /* stateless mode */
((state->ccount & 0xff) == 0xff) || /* "flag" packet */
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 4c95ec3fb8d4..4e6b72f57de8 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -45,6 +45,7 @@
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include <asm/uaccess.h>
#define PPP_VERSION "2.4.2"
@@ -563,7 +564,7 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
int islcp;
data = skb->data;
- proto = (data[0] << 8) + data[1];
+ proto = get_unaligned_be16(data);
/* LCP packets with codes between 1 (configure-request)
* and 7 (code-reject) must be sent as though no options
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 9c2a02d204dc..44e316fd67b8 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -34,8 +34,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 14
-#define QLCNIC_LINUX_VERSIONID "5.0.14"
+#define _QLCNIC_LINUX_SUBVERSION 15
+#define QLCNIC_LINUX_VERSIONID "5.0.15"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -289,6 +289,26 @@ struct uni_data_desc{
u32 reserved[5];
};
+/* Flash Defines and Structures */
+#define QLCNIC_FLT_LOCATION 0x3F1000
+#define QLCNIC_FW_IMAGE_REGION 0x74
+struct qlcnic_flt_header {
+ u16 version;
+ u16 len;
+ u16 checksum;
+ u16 reserved;
+};
+
+struct qlcnic_flt_entry {
+ u8 region;
+ u8 reserved0;
+ u8 attrib;
+ u8 reserved1;
+ u32 size;
+ u32 start_addr;
+ u32 end_add;
+};
+
/* Magic number to let user know flash is programmed */
#define QLCNIC_BDINFO_MAGIC 0x12345678
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 1e7af709d395..4c14510e2a87 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -672,7 +672,7 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
if (data[1])
eth_test->flags |= ETH_TEST_FL_FAILED;
- if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
data[2] = qlcnic_irq_test(dev);
if (data[2])
eth_test->flags |= ETH_TEST_FL_FAILED;
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 9b9c7c39d3ee..a7f1d5b7e811 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -627,12 +627,73 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
return 0;
}
+static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
+ struct qlcnic_flt_entry *region_entry)
+{
+ struct qlcnic_flt_header flt_hdr;
+ struct qlcnic_flt_entry *flt_entry;
+ int i = 0, ret;
+ u32 entry_size;
+
+ memset(region_entry, 0, sizeof(struct qlcnic_flt_entry));
+ ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION,
+ (u8 *)&flt_hdr,
+ sizeof(struct qlcnic_flt_header));
+ if (ret) {
+ dev_warn(&adapter->pdev->dev,
+ "error reading flash layout header\n");
+ return -EIO;
+ }
+
+ entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
+ flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
+ if (flt_entry == NULL) {
+ dev_warn(&adapter->pdev->dev, "error allocating memory\n");
+ return -EIO;
+ }
+
+ ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
+ sizeof(struct qlcnic_flt_header),
+ (u8 *)flt_entry, entry_size);
+ if (ret) {
+ dev_warn(&adapter->pdev->dev,
+ "error reading flash layout entries\n");
+ goto err_out;
+ }
+
+ while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) {
+ if (flt_entry[i].region == region)
+ break;
+ i++;
+ }
+ if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) {
+ dev_warn(&adapter->pdev->dev,
+ "region=%x not found in %d regions\n", region, i);
+ ret = -EIO;
+ goto err_out;
+ }
+ memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry));
+
+err_out:
+ vfree(flt_entry);
+ return ret;
+}
+
int
qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_flt_entry fw_entry;
u32 ver = -1, min_ver;
+ int ret;
- qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver);
+ ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry);
+ if (!ret)
+ /* 0-4:-signature, 4-8:-fw version */
+ qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
+ (int *)&ver);
+ else
+ qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET,
+ (int *)&ver);
ver = QLCNIC_DECODE_VERSION(ver);
min_ver = QLCNIC_MIN_FW_VERSION;
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 11e3a46c0911..37c04b4fade3 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -31,15 +31,15 @@ static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
static struct workqueue_struct *qlcnic_wq;
static int qlcnic_mac_learn;
-module_param(qlcnic_mac_learn, int, 0644);
+module_param(qlcnic_mac_learn, int, 0444);
MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
static int use_msi = 1;
-module_param(use_msi, int, 0644);
+module_param(use_msi, int, 0444);
MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
static int use_msi_x = 1;
-module_param(use_msi_x, int, 0644);
+module_param(use_msi_x, int, 0444);
MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
@@ -47,11 +47,11 @@ module_param(auto_fw_reset, int, 0644);
MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
static int load_fw_file;
-module_param(load_fw_file, int, 0644);
+module_param(load_fw_file, int, 0444);
MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
static int qlcnic_config_npars;
-module_param(qlcnic_config_npars, int, 0644);
+module_param(qlcnic_config_npars, int, 0444);
MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
static int __devinit qlcnic_probe(struct pci_dev *pdev,
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 27a7c20f64cd..7ffdb80adf40 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -25,6 +25,7 @@
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/firmware.h>
+#include <linux/pci-aspm.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -554,6 +555,8 @@ struct rtl8169_private {
struct mii_if_info mii;
struct rtl8169_counters counters;
u32 saved_wolopts;
+
+ const struct firmware *fw;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -615,8 +618,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
}
}
-static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
+static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
{
+ void __iomem *ioaddr = tp->mmio_addr;
int i;
RTL_W8(ERIDR, cmd);
@@ -628,7 +632,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
break;
}
- ocp_write(ioaddr, 0x1, 0x30, 0x00000001);
+ ocp_write(tp, 0x1, 0x30, 0x00000001);
}
#define OOB_CMD_RESET 0x00
@@ -971,7 +975,8 @@ static void __rtl8169_check_link_status(struct net_device *dev,
if (pm)
pm_request_resume(&tp->pci_dev->dev);
netif_carrier_on(dev);
- netif_info(tp, ifup, dev, "link up\n");
+ if (net_ratelimit())
+ netif_info(tp, ifup, dev, "link up\n");
} else {
netif_carrier_off(dev);
netif_info(tp, ifdown, dev, "link down\n");
@@ -1632,42 +1637,163 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
{
__le32 *phytable = (__le32 *)fw->data;
struct net_device *dev = tp->dev;
- size_t i;
+ size_t index, fw_size = fw->size / sizeof(*phytable);
+ u32 predata, count;
if (fw->size % sizeof(*phytable)) {
netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size);
return;
}
- for (i = 0; i < fw->size / sizeof(*phytable); i++) {
- u32 action = le32_to_cpu(phytable[i]);
+ for (index = 0; index < fw_size; index++) {
+ u32 action = le32_to_cpu(phytable[index]);
+ u32 regno = (action & 0x0fff0000) >> 16;
- if (!action)
+ switch(action & 0xf0000000) {
+ case PHY_READ:
+ case PHY_DATA_OR:
+ case PHY_DATA_AND:
+ case PHY_READ_EFUSE:
+ case PHY_CLEAR_READCOUNT:
+ case PHY_WRITE:
+ case PHY_WRITE_PREVIOUS:
+ case PHY_DELAY_MS:
break;
- if ((action & 0xf0000000) != PHY_WRITE) {
- netif_err(tp, probe, dev,
- "unknown action 0x%08x\n", action);
+ case PHY_BJMPN:
+ if (regno > index) {
+ netif_err(tp, probe, tp->dev,
+ "Out of range of firmware\n");
+ return;
+ }
+ break;
+ case PHY_READCOUNT_EQ_SKIP:
+ if (index + 2 >= fw_size) {
+ netif_err(tp, probe, tp->dev,
+ "Out of range of firmware\n");
+ return;
+ }
+ break;
+ case PHY_COMP_EQ_SKIPN:
+ case PHY_COMP_NEQ_SKIPN:
+ case PHY_SKIPN:
+ if (index + 1 + regno >= fw_size) {
+ netif_err(tp, probe, tp->dev,
+ "Out of range of firmware\n");
+ return;
+ }
+ break;
+
+ case PHY_READ_MAC_BYTE:
+ case PHY_WRITE_MAC_BYTE:
+ case PHY_WRITE_ERI_WORD:
+ default:
+ netif_err(tp, probe, tp->dev,
+ "Invalid action 0x%08x\n", action);
return;
}
}
- while (i-- != 0) {
- u32 action = le32_to_cpu(*phytable);
+ predata = 0;
+ count = 0;
+
+ for (index = 0; index < fw_size; ) {
+ u32 action = le32_to_cpu(phytable[index]);
u32 data = action & 0x0000ffff;
- u32 reg = (action & 0x0fff0000) >> 16;
+ u32 regno = (action & 0x0fff0000) >> 16;
+
+ if (!action)
+ break;
switch(action & 0xf0000000) {
+ case PHY_READ:
+ predata = rtl_readphy(tp, regno);
+ count++;
+ index++;
+ break;
+ case PHY_DATA_OR:
+ predata |= data;
+ index++;
+ break;
+ case PHY_DATA_AND:
+ predata &= data;
+ index++;
+ break;
+ case PHY_BJMPN:
+ index -= regno;
+ break;
+ case PHY_READ_EFUSE:
+ predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
+ index++;
+ break;
+ case PHY_CLEAR_READCOUNT:
+ count = 0;
+ index++;
+ break;
case PHY_WRITE:
- rtl_writephy(tp, reg, data);
- phytable++;
+ rtl_writephy(tp, regno, data);
+ index++;
+ break;
+ case PHY_READCOUNT_EQ_SKIP:
+ if (count == data)
+ index += 2;
+ else
+ index += 1;
+ break;
+ case PHY_COMP_EQ_SKIPN:
+ if (predata == data)
+ index += regno;
+ index++;
+ break;
+ case PHY_COMP_NEQ_SKIPN:
+ if (predata != data)
+ index += regno;
+ index++;
break;
+ case PHY_WRITE_PREVIOUS:
+ rtl_writephy(tp, regno, predata);
+ index++;
+ break;
+ case PHY_SKIPN:
+ index += regno + 1;
+ break;
+ case PHY_DELAY_MS:
+ mdelay(data);
+ index++;
+ break;
+
+ case PHY_READ_MAC_BYTE:
+ case PHY_WRITE_MAC_BYTE:
+ case PHY_WRITE_ERI_WORD:
default:
BUG();
}
}
}
+static void rtl_release_firmware(struct rtl8169_private *tp)
+{
+ release_firmware(tp->fw);
+ tp->fw = NULL;
+}
+
+static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name)
+{
+ const struct firmware **fw = &tp->fw;
+ int rc = !*fw;
+
+ if (rc) {
+ rc = request_firmware(fw, fw_name, &tp->pci_dev->dev);
+ if (rc < 0)
+ goto out;
+ }
+
+ /* TODO: release firmware once rtl_phy_write_fw signals failures. */
+ rtl_phy_write_fw(tp, *fw);
+out:
+ return rc;
+}
+
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -2041,7 +2167,6 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x0d, 0xf880 }
};
void __iomem *ioaddr = tp->mmio_addr;
- const struct firmware *fw;
rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
@@ -2105,11 +2230,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x001b);
- if (rtl_readphy(tp, 0x06) == 0xbf00 &&
- request_firmware(&fw, FIRMWARE_8168D_1, &tp->pci_dev->dev) == 0) {
- rtl_phy_write_fw(tp, fw);
- release_firmware(fw);
- } else {
+ if ((rtl_readphy(tp, 0x06) != 0xbf00) ||
+ (rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) {
netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
}
@@ -2159,7 +2281,6 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
{ 0x0d, 0xf880 }
};
void __iomem *ioaddr = tp->mmio_addr;
- const struct firmware *fw;
rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
@@ -2214,11 +2335,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x001b);
- if (rtl_readphy(tp, 0x06) == 0xb300 &&
- request_firmware(&fw, FIRMWARE_8168D_2, &tp->pci_dev->dev) == 0) {
- rtl_phy_write_fw(tp, fw);
- release_firmware(fw);
- } else {
+ if ((rtl_readphy(tp, 0x06) != 0xb300) ||
+ (rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) {
netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
}
@@ -2752,8 +2870,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- if (tp->mac_version == RTL_GIGA_MAC_VER_27)
+ if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+ (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
return;
+ }
if (((tp->mac_version == RTL_GIGA_MAC_VER_23) ||
(tp->mac_version == RTL_GIGA_MAC_VER_24)) &&
@@ -2775,6 +2896,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25:
case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
break;
}
@@ -2784,12 +2907,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- if (tp->mac_version == RTL_GIGA_MAC_VER_27)
+ if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+ (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
return;
+ }
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25:
case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
break;
}
@@ -2893,6 +3021,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
mii->reg_num_mask = 0x1f;
mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
+ /* disable ASPM completely as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pci_enable_device(pdev);
if (rc < 0) {
@@ -2926,7 +3059,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_mwi_2;
}
- tp->cp_cmd = PCIMulRW | RxChkSum;
+ tp->cp_cmd = RxChkSum;
if ((sizeof(dma_addr_t) > 4) &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -3069,20 +3202,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl8168_driver_start(tp);
}
- rtl8169_init_phy(dev, tp);
-
- /*
- * Pretend we are using VLANs; This bypasses a nasty bug where
- * Interrupts stop flowing on high load on 8110SCd controllers.
- */
- if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
-
device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
if (pci_dev_run_wake(pdev))
pm_runtime_put_noidle(&pdev->dev);
+ netif_carrier_off(dev);
+
out:
return rc;
@@ -3111,6 +3237,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
cancel_delayed_work_sync(&tp->task);
+ rtl_release_firmware(tp);
+
unregister_netdev(dev);
if (pci_dev_run_wake(pdev))
@@ -3127,6 +3255,7 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
static int rtl8169_open(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
int retval = -ENOMEM;
@@ -3162,6 +3291,15 @@ static int rtl8169_open(struct net_device *dev)
napi_enable(&tp->napi);
+ rtl8169_init_phy(dev, tp);
+
+ /*
+ * Pretend we are using VLANs; This bypasses a nasty bug where
+ * Interrupts stop flowing on high load on 8110SCd controllers.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
+
rtl_pll_power_up(tp);
rtl_hw_start(dev);
@@ -3171,7 +3309,7 @@ static int rtl8169_open(struct net_device *dev)
tp->saved_wolopts = 0;
pm_runtime_put_noidle(&pdev->dev);
- rtl8169_check_link_status(dev, tp, tp->mmio_addr);
+ rtl8169_check_link_status(dev, tp, ioaddr);
out:
return retval;
@@ -3197,7 +3335,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
/* Disable interrupts */
rtl8169_irq_mask_and_ack(ioaddr);
- if (tp->mac_version == RTL_GIGA_MAC_VER_28) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28) {
while (RTL_R8(TxPoll) & NPQ)
udelay(20);
@@ -3639,7 +3778,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
RTL_W16(IntrMitigate, 0x5151);
/* Work around for RxFIFO overflow. */
- if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_22) {
tp->intr_event |= RxFIFOOver | PCSTimeout;
tp->intr_event &= ~RxOverflow;
}
@@ -3725,8 +3865,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
Cxpl_dbg_sel | \
ASF | \
PktCntrDisable | \
- PCIDAC | \
- PCIMulRW)
+ Mac_dbgo_sel)
static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
{
@@ -3756,8 +3895,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
RTL_W8(Config1, cfg1 & ~LEDS0);
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
-
rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
}
@@ -3769,8 +3906,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
-
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
}
static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
@@ -3796,6 +3931,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
}
}
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_07:
rtl_hw_start_8102e_1(ioaddr, pdev);
@@ -3810,14 +3947,13 @@ static void rtl_hw_start_8101(struct net_device *dev)
break;
}
- RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
RTL_W8(MaxTxPacketSize, TxPacketMax);
rtl_set_rx_max_size(ioaddr, rx_buf_sz);
- tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
-
+ tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
RTL_W16(CPlusCmd, tp->cp_cmd);
RTL_W16(IntrMitigate, 0x0000);
@@ -3827,14 +3963,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_set_rx_tx_config_registers(tp);
- RTL_W8(Cfg9346, Cfg9346_Lock);
-
RTL_R8(IntrMask);
rtl_set_rx_mode(dev);
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
-
RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
RTL_W16(IntrMask, tp->intr_event);
@@ -4521,12 +4653,33 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
break;
}
- /* Work around for rx fifo overflow */
- if (unlikely(status & RxFIFOOver) &&
- (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
- netif_stop_queue(dev);
- rtl8169_tx_timeout(dev);
- break;
+ if (unlikely(status & RxFIFOOver)) {
+ switch (tp->mac_version) {
+ /* Work around for rx fifo overflow */
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_26:
+ netif_stop_queue(dev);
+ rtl8169_tx_timeout(dev);
+ goto done;
+ /* Testers needed. */
+ case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ /* Experimental science. Pktgen proof. */
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_25:
+ if (status == RxFIFOOver)
+ goto done;
+ break;
+ default:
+ break;
+ }
}
if (unlikely(status & SYSErr)) {
@@ -4562,7 +4715,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
(status & RxFIFOOver) ? (status | RxOverflow) : status);
status = RTL_R16(IntrStatus);
}
-
+done:
return IRQ_RETVAL(handled);
}
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 711449c6e675..002bac743843 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1153,6 +1153,9 @@ static int efx_wanted_channels(void)
int count;
int cpu;
+ if (rss_cpus)
+ return rss_cpus;
+
if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
printk(KERN_WARNING
"sfc: RSS disabled due to allocation failure\n");
@@ -1266,27 +1269,18 @@ static void efx_remove_interrupts(struct efx_nic *efx)
efx->legacy_irq = 0;
}
-struct efx_tx_queue *
-efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
-{
- unsigned tx_channel_offset =
- separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
- EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
- type >= EFX_TXQ_TYPES);
- return &efx->channel[tx_channel_offset + index]->tx_queue[type];
-}
-
static void efx_set_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
- unsigned tx_channel_offset =
+
+ efx->tx_channel_offset =
separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
/* Channel pointers were set in efx_init_struct() but we now
* need to clear them for TX queues in any RX-only channels. */
efx_for_each_channel(channel, efx) {
- if (channel->channel - tx_channel_offset >=
+ if (channel->channel - efx->tx_channel_offset >=
efx->n_tx_channels) {
efx_for_each_channel_tx_queue(tx_queue, channel)
tx_queue->channel = NULL;
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 0e8bb19ed60d..ca886d98bdc7 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -569,9 +569,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_self_tests efx_tests;
+ struct efx_self_tests *efx_tests;
int already_up;
- int rc;
+ int rc = -ENOMEM;
+
+ efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
+ if (!efx_tests)
+ goto fail;
+
ASSERT_RTNL();
if (efx->state != STATE_RUNNING) {
@@ -589,13 +594,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed opening device.\n");
- goto fail2;
+ goto fail1;
}
}
- memset(&efx_tests, 0, sizeof(efx_tests));
-
- rc = efx_selftest(efx, &efx_tests, test->flags);
+ rc = efx_selftest(efx, efx_tests, test->flags);
if (!already_up)
dev_close(efx->net_dev);
@@ -604,10 +607,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
rc == 0 ? "passed" : "failed",
(test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
- fail2:
- fail1:
+fail1:
/* Fill ethtool results structures */
- efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data);
+ efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
+ kfree(efx_tests);
+fail:
if (rc)
test->flags |= ETH_TEST_FL_FAILED;
}
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 70e4f7dcce81..61ddd2c6e750 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1107,22 +1107,9 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
/* Restore PCI configuration if needed */
if (method == RESET_TYPE_WORLD) {
- if (efx_nic_is_dual_func(efx)) {
- rc = pci_restore_state(nic_data->pci_dev2);
- if (rc) {
- netif_err(efx, drv, efx->net_dev,
- "failed to restore PCI config for "
- "the secondary function\n");
- goto fail3;
- }
- }
- rc = pci_restore_state(efx->pci_dev);
- if (rc) {
- netif_err(efx, drv, efx->net_dev,
- "failed to restore PCI config for the "
- "primary function\n");
- goto fail4;
- }
+ if (efx_nic_is_dual_func(efx))
+ pci_restore_state(nic_data->pci_dev2);
+ pci_restore_state(efx->pci_dev);
netif_dbg(efx, drv, efx->net_dev,
"successfully restored PCI config\n");
}
@@ -1133,7 +1120,7 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
rc = -ETIMEDOUT;
netif_err(efx, hw, efx->net_dev,
"timed out waiting for hardware reset\n");
- goto fail5;
+ goto fail3;
}
netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
@@ -1141,11 +1128,9 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
/* pci_save_state() and pci_restore_state() MUST be called in pairs */
fail2:
-fail3:
pci_restore_state(efx->pci_dev);
fail1:
-fail4:
-fail5:
+fail3:
return rc;
}
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index bdce66ddf93a..28df8665256a 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -735,6 +735,7 @@ struct efx_nic {
unsigned next_buffer_table;
unsigned n_channels;
unsigned n_rx_channels;
+ unsigned tx_channel_offset;
unsigned n_tx_channels;
unsigned int rx_buffer_len;
unsigned int rx_buffer_order;
@@ -929,8 +930,13 @@ efx_get_channel(struct efx_nic *efx, unsigned index)
_channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
(_efx)->channel[_channel->channel + 1] : NULL)
-extern struct efx_tx_queue *
-efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type);
+static inline struct efx_tx_queue *
+efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
+{
+ EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
+ type >= EFX_TXQ_TYPES);
+ return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
+}
static inline struct efx_tx_queue *
efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 581836867098..640e368ebeee 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -36,7 +36,7 @@
Rev 1.07.06 Nov. 7 2000 Jeff Garzik <jgarzik@pobox.com> some bug fix and cleaning
Rev 1.07.05 Nov. 6 2000 metapirat<metapirat@gmx.de> contribute media type select by ifconfig
Rev 1.07.04 Sep. 6 2000 Lei-Chun Chang added ICS1893 PHY support
- Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E eqaulizer workaround rule
+ Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E equalizer workaround rule
Rev 1.07.01 Aug. 08 2000 Ollie Lho minor update for SiS 630E and SiS 630E A1
Rev 1.07 Mar. 07 2000 Ollie Lho bug fix in Rx buffer ring
Rev 1.06.04 Feb. 11 2000 Jeff Garzik <jgarzik@pobox.com> softnet and init for kernel 2.4
@@ -1777,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev)
"cur_rx:%4.4d, dirty_rx:%4.4d\n",
net_dev->name, sis_priv->cur_rx,
sis_priv->dirty_rx);
+ dev_kfree_skb(skb);
break;
}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 42daf98ba736..35b28f42d208 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- /* device is off until link detection */
- netif_carrier_off(dev);
-
return dev;
}
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 39996bf3b247..7d85a38377a1 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -46,10 +46,6 @@
#include <asm/irq.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define SKY2_VLAN_TAG_USED 1
-#endif
-
#include "sky2.h"
#define DRV_NAME "sky2"
@@ -1326,40 +1322,35 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
}
-#ifdef SKY2_VLAN_TAG_USED
-static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff)
+#define NETIF_F_ALL_VLAN (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)
+
+static void sky2_vlan_mode(struct net_device *dev)
{
- if (onoff) {
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ u16 port = sky2->port;
+
+ if (dev->features & NETIF_F_HW_VLAN_RX)
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_ON);
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
- TX_VLAN_TAG_ON);
- } else {
+ else
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_OFF);
+
+ dev->vlan_features = dev->features &~ NETIF_F_ALL_VLAN;
+ if (dev->features & NETIF_F_HW_VLAN_TX)
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+ TX_VLAN_TAG_ON);
+ else {
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
TX_VLAN_TAG_OFF);
+
+ /* Can't do transmit offload of vlan without hw vlan */
+ dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_SG
+ | NETIF_F_ALL_CSUM);
}
}
-static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct sky2_port *sky2 = netdev_priv(dev);
- struct sky2_hw *hw = sky2->hw;
- u16 port = sky2->port;
-
- netif_tx_lock_bh(dev);
- napi_disable(&hw->napi);
-
- sky2->vlgrp = grp;
- sky2_set_vlan_mode(hw, port, grp != NULL);
-
- sky2_read32(hw, B0_Y2_SP_LISR);
- napi_enable(&hw->napi);
- netif_tx_unlock_bh(dev);
-}
-#endif
-
/* Amount of required worst case padding in rx buffer */
static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
{
@@ -1635,9 +1626,7 @@ static void sky2_hw_up(struct sky2_port *sky2)
sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
sky2->tx_ring_size - 1);
-#ifdef SKY2_VLAN_TAG_USED
- sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
-#endif
+ sky2_vlan_mode(sky2->netdev);
sky2_rx_start(sky2);
}
@@ -1780,7 +1769,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
}
ctrl = 0;
-#ifdef SKY2_VLAN_TAG_USED
+
/* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
if (vlan_tx_tag_present(skb)) {
if (!le) {
@@ -1792,7 +1781,6 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
le->length = cpu_to_be16(vlan_tx_tag_get(skb));
ctrl |= INS_VLAN;
}
-#endif
/* Handle TCP checksum offload */
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -2432,11 +2420,8 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
struct sk_buff *skb = NULL;
u16 count = (status & GMR_FS_LEN) >> 16;
-#ifdef SKY2_VLAN_TAG_USED
- /* Account for vlan tag */
- if (sky2->vlgrp && (status & GMR_FS_VLAN))
- count -= VLAN_HLEN;
-#endif
+ if (status & GMR_FS_VLAN)
+ count -= VLAN_HLEN; /* Account for vlan tag */
netif_printk(sky2, rx_status, KERN_DEBUG, dev,
"rx slot %u status 0x%x len %d\n",
@@ -2504,17 +2489,9 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
static inline void sky2_skb_rx(const struct sky2_port *sky2,
u32 status, struct sk_buff *skb)
{
-#ifdef SKY2_VLAN_TAG_USED
- u16 vlan_tag = be16_to_cpu(sky2->rx_tag);
- if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
- if (skb->ip_summed == CHECKSUM_NONE)
- vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag);
- else
- vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp,
- vlan_tag, skb);
- return;
- }
-#endif
+ if (status & GMR_FS_VLAN)
+ __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
+
if (skb->ip_summed == CHECKSUM_NONE)
netif_receive_skb(skb);
else
@@ -2631,7 +2608,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
goto exit_loop;
break;
-#ifdef SKY2_VLAN_TAG_USED
case OP_RXVLAN:
sky2->rx_tag = length;
break;
@@ -2639,7 +2615,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
case OP_RXCHKSVLAN:
sky2->rx_tag = length;
/* fall through */
-#endif
case OP_RXCHKS:
if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
sky2_rx_checksum(sky2, status);
@@ -3042,6 +3017,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
| SKY2_HW_NEW_LE
| SKY2_HW_AUTO_TX_SUM
| SKY2_HW_ADV_POWER_CTL;
+
+ /* The workaround for status conflicts VLAN tag detection. */
+ if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
+ hw->flags |= SKY2_HW_VLAN_BROKEN;
break;
case CHIP_ID_YUKON_SUPR:
@@ -3411,18 +3390,15 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw)
u32 modes = SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
| SUPPORTED_100baseT_Half
- | SUPPORTED_100baseT_Full
- | SUPPORTED_Autoneg | SUPPORTED_TP;
+ | SUPPORTED_100baseT_Full;
if (hw->flags & SKY2_HW_GIGABIT)
modes |= SUPPORTED_1000baseT_Half
| SUPPORTED_1000baseT_Full;
return modes;
} else
- return SUPPORTED_1000baseT_Half
- | SUPPORTED_1000baseT_Full
- | SUPPORTED_Autoneg
- | SUPPORTED_FIBRE;
+ return SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full;
}
static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
@@ -3436,9 +3412,11 @@ static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (sky2_is_copper(hw)) {
ecmd->port = PORT_TP;
ecmd->speed = sky2->speed;
+ ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
} else {
ecmd->speed = SPEED_1000;
ecmd->port = PORT_FIBRE;
+ ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
}
ecmd->advertising = sky2->advertising;
@@ -3455,8 +3433,19 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
u32 supported = sky2_supported_modes(hw);
if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (ecmd->advertising & ~supported)
+ return -EINVAL;
+
+ if (sky2_is_copper(hw))
+ sky2->advertising = ecmd->advertising |
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
+ else
+ sky2->advertising = ecmd->advertising |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
+
sky2->flags |= SKY2_FLAG_AUTO_SPEED;
- ecmd->advertising = supported;
sky2->duplex = -1;
sky2->speed = -1;
} else {
@@ -3500,8 +3489,6 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
}
- sky2->advertising = ecmd->advertising;
-
if (netif_running(dev)) {
sky2_phy_reinit(sky2);
sky2_set_multicast(dev);
@@ -4229,15 +4216,28 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
static int sky2_set_flags(struct net_device *dev, u32 data)
{
struct sky2_port *sky2 = netdev_priv(dev);
- u32 supported =
- (sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH;
+ unsigned long old_feat = dev->features;
+ u32 supported = 0;
int rc;
+ if (!(sky2->hw->flags & SKY2_HW_RSS_BROKEN))
+ supported |= ETH_FLAG_RXHASH;
+
+ if (!(sky2->hw->flags & SKY2_HW_VLAN_BROKEN))
+ supported |= ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN;
+
+ printk(KERN_DEBUG "sky2 set_flags: supported %x data %x\n",
+ supported, data);
+
rc = ethtool_op_set_flags(dev, data, supported);
if (rc)
return rc;
- rx_set_rss(dev);
+ if ((old_feat ^ dev->features) & NETIF_F_RXHASH)
+ rx_set_rss(dev);
+
+ if ((old_feat ^ dev->features) & NETIF_F_ALL_VLAN)
+ sky2_vlan_mode(dev);
return 0;
}
@@ -4273,6 +4273,7 @@ static const struct ethtool_ops sky2_ethtool_ops = {
.get_sset_count = sky2_get_sset_count,
.get_ethtool_stats = sky2_get_ethtool_stats,
.set_flags = sky2_set_flags,
+ .get_flags = ethtool_op_get_flags,
};
#ifdef CONFIG_SKY2_DEBUG
@@ -4554,9 +4555,6 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
.ndo_change_mtu = sky2_change_mtu,
.ndo_tx_timeout = sky2_tx_timeout,
.ndo_get_stats64 = sky2_get_stats,
-#ifdef SKY2_VLAN_TAG_USED
- .ndo_vlan_rx_register = sky2_vlan_rx_register,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sky2_netpoll,
#endif
@@ -4572,9 +4570,6 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
.ndo_change_mtu = sky2_change_mtu,
.ndo_tx_timeout = sky2_tx_timeout,
.ndo_get_stats64 = sky2_get_stats,
-#ifdef SKY2_VLAN_TAG_USED
- .ndo_vlan_rx_register = sky2_vlan_rx_register,
-#endif
},
};
@@ -4625,7 +4620,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
sky2->port = port;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG
- | NETIF_F_TSO | NETIF_F_GRO;
+ | NETIF_F_TSO | NETIF_F_GRO;
+
if (highmem)
dev->features |= NETIF_F_HIGHDMA;
@@ -4633,13 +4629,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
if (!(hw->flags & SKY2_HW_RSS_BROKEN))
dev->features |= NETIF_F_RXHASH;
-#ifdef SKY2_VLAN_TAG_USED
- /* The workaround for FE+ status conflicts with VLAN tag detection. */
- if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
- sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
+ if (!(hw->flags & SKY2_HW_VLAN_BROKEN))
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- }
-#endif
/* read the mac address */
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 80bdc404f1ea..6861b0e8db9a 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2236,11 +2236,8 @@ struct sky2_port {
u16 rx_pending;
u16 rx_data_size;
u16 rx_nfrags;
-
-#ifdef SKY2_VLAN_TAG_USED
u16 rx_tag;
- struct vlan_group *vlgrp;
-#endif
+
struct {
unsigned long last;
u32 mac_rp;
@@ -2284,6 +2281,7 @@ struct sky2_hw {
#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
#define SKY2_HW_RSS_BROKEN 0x00000100
+#define SKY2_HW_VLAN_BROKEN 0x00000200
u8 chip_id;
u8 chip_rev;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 34a0af3837f9..0e5f03135b50 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev)
priv->hw = device;
- if (device_can_wakeup(priv->device))
+ if (device_can_wakeup(priv->device)) {
priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+ enable_irq_wake(dev->irq);
+ }
return 0;
}
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 296000bf5a25..3397618d4d96 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -12,7 +12,7 @@
/*
* RX HW/SW interaction overview
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * There are 2 types of RX communication channels betwean driver and NIC.
+ * There are 2 types of RX communication channels between driver and NIC.
* 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming
* traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds
* info about buffer's location, size and ID. An ID field is used to identify a
@@ -821,7 +821,7 @@ static void bdx_setmulti(struct net_device *ndev)
}
/* use PMF to accept first MAC_MCST_NUM (15) addresses */
- /* TBD: sort addreses and write them in ascending order
+ /* TBD: sort addresses and write them in ascending order
* into RX_MAC_MCST regs. we skip this phase now and accept ALL
* multicast frames throu IMF */
/* accept the rest of addresses throu IMF */
@@ -1346,7 +1346,7 @@ static void print_rxfd(struct rxf_desc *rxfd)
/*
* TX HW/SW interaction overview
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * There are 2 types of TX communication channels betwean driver and NIC.
+ * There are 2 types of TX communication channels between driver and NIC.
* 1) TX Free Fifo - TXF - holds ack descriptors for sent packets
* 2) TX Data Fifo - TXD - holds descriptors of full buffers.
*
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7841a8f69998..06c0e5033656 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -60,12 +60,6 @@
#define BAR_0 0
#define BAR_2 2
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define TG3_VLAN_TAG_USED 1
-#else
-#define TG3_VLAN_TAG_USED 0
-#endif
-
#include "tg3.h"
#define DRV_MODULE_NAME "tg3"
@@ -134,9 +128,6 @@
TG3_TX_RING_SIZE)
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
-#define TG3_RX_DMA_ALIGN 16
-#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
-
#define TG3_DMA_BYTE_ENAB 64
#define TG3_RX_STD_DMA_SZ 1536
@@ -4722,8 +4713,6 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
struct sk_buff *skb;
dma_addr_t dma_addr;
u32 opaque_key, desc_idx, *post_ptr;
- bool hw_vlan __maybe_unused = false;
- u16 vtag __maybe_unused = 0;
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
@@ -4782,12 +4771,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
- copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
+ copy_skb = netdev_alloc_skb(tp->dev, len +
TG3_RAW_IP_ALIGN);
if (copy_skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
+ skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
skb_put(copy_skb, len);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4814,30 +4803,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
}
if (desc->type_flags & RXD_FLAG_VLAN &&
- !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
- vtag = desc->err_vlan & RXD_VLAN_MASK;
-#if TG3_VLAN_TAG_USED
- if (tp->vlgrp)
- hw_vlan = true;
- else
-#endif
- {
- struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
- __skb_push(skb, VLAN_HLEN);
-
- memmove(ve, skb->data + VLAN_HLEN,
- ETH_ALEN * 2);
- ve->h_vlan_proto = htons(ETH_P_8021Q);
- ve->h_vlan_TCI = htons(vtag);
- }
- }
+ !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
+ __vlan_hwaccel_put_tag(skb,
+ desc->err_vlan & RXD_VLAN_MASK);
-#if TG3_VLAN_TAG_USED
- if (hw_vlan)
- vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
- else
-#endif
- napi_gro_receive(&tnapi->napi, skb);
+ napi_gro_receive(&tnapi->napi, skb);
received++;
budget--;
@@ -5740,11 +5710,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
base_flags |= TXD_FLAG_TCPUDP_CSUM;
}
-#if TG3_VLAN_TAG_USED
if (vlan_tx_tag_present(skb))
base_flags |= (TXD_FLAG_VLAN |
(vlan_tx_tag_get(skb) << 16));
-#endif
len = skb_headlen(skb);
@@ -5986,11 +5954,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
}
}
}
-#if TG3_VLAN_TAG_USED
+
if (vlan_tx_tag_present(skb))
base_flags |= (TXD_FLAG_VLAN |
(vlan_tx_tag_get(skb) << 16));
-#endif
if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
!mss && skb->len > VLAN_ETH_FRAME_LEN)
@@ -9532,17 +9499,10 @@ static void __tg3_set_rx_mode(struct net_device *dev)
rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
RX_MODE_KEEP_VLAN_TAG);
+#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
* flag clear.
*/
-#if TG3_VLAN_TAG_USED
- if (!tp->vlgrp &&
- !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
- rx_mode |= RX_MODE_KEEP_VLAN_TAG;
-#else
- /* By definition, VLAN is disabled always in this
- * case.
- */
if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
rx_mode |= RX_MODE_KEEP_VLAN_TAG;
#endif
@@ -11198,7 +11158,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
break; /* We have no PHY */
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+ ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+ !netif_running(dev)))
return -EAGAIN;
spin_lock_bh(&tp->lock);
@@ -11214,7 +11176,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
break; /* We have no PHY */
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+ ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+ !netif_running(dev)))
return -EAGAIN;
spin_lock_bh(&tp->lock);
@@ -11230,31 +11194,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
-#if TG3_VLAN_TAG_USED
-static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct tg3 *tp = netdev_priv(dev);
-
- if (!netif_running(dev)) {
- tp->vlgrp = grp;
- return;
- }
-
- tg3_netif_stop(tp);
-
- tg3_full_lock(tp, 0);
-
- tp->vlgrp = grp;
-
- /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
- __tg3_set_rx_mode(dev);
-
- tg3_netif_start(tp);
-
- tg3_full_unlock(tp);
-}
-#endif
-
static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
struct tg3 *tp = netdev_priv(dev);
@@ -13066,9 +13005,7 @@ static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
{
-#if TG3_VLAN_TAG_USED
dev->vlan_features |= flags;
-#endif
}
static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
@@ -13861,11 +13798,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
else
tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
- tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
+ tp->rx_offset = NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
(tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
- tp->rx_offset -= NET_IP_ALIGN;
+ tp->rx_offset = 0;
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
tp->rx_copy_thresh = ~(u16)0;
#endif
@@ -14629,9 +14566,6 @@ static const struct net_device_ops tg3_netdev_ops = {
.ndo_do_ioctl = tg3_ioctl,
.ndo_tx_timeout = tg3_tx_timeout,
.ndo_change_mtu = tg3_change_mtu,
-#if TG3_VLAN_TAG_USED
- .ndo_vlan_rx_register = tg3_vlan_rx_register,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller,
#endif
@@ -14648,9 +14582,6 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
.ndo_do_ioctl = tg3_ioctl,
.ndo_tx_timeout = tg3_tx_timeout,
.ndo_change_mtu = tg3_change_mtu,
-#if TG3_VLAN_TAG_USED
- .ndo_vlan_rx_register = tg3_vlan_rx_register,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller,
#endif
@@ -14700,9 +14631,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
-#if TG3_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-#endif
tp = netdev_priv(dev);
tp->pdev = pdev;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index d62c8d937c82..f528243e1a4f 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2808,9 +2808,6 @@ struct tg3 {
u32 rx_std_max_post;
u32 rx_offset;
u32 rx_pkt_map_sz;
-#if TG3_VLAN_TAG_USED
- struct vlan_group *vlgrp;
-#endif
/* begin "everything else" cacheline(s) section */
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
index 0e6bac5ec65b..7cb301da7474 100644
--- a/drivers/net/tile/tilepro.c
+++ b/drivers/net/tile/tilepro.c
@@ -142,14 +142,6 @@
MODULE_AUTHOR("Tilera");
MODULE_LICENSE("GPL");
-
-#define IS_MULTICAST(mac_addr) \
- (((u8 *)(mac_addr))[0] & 0x01)
-
-#define IS_BROADCAST(mac_addr) \
- (((u16 *)(mac_addr))[0] == 0xffff)
-
-
/*
* Queue of incoming packets for a specific cpu and device.
*
@@ -795,7 +787,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
/*
* FIXME: Implement HW multicast filter.
*/
- if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) {
+ if (is_unicast_ether_addr(buf)) {
/* Filter packets not for our address. */
const u8 *mine = dev->dev_addr;
filter = compare_ether_addr(mine, buf);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7599c457abd1..b100bd50a0d7 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1309,7 +1309,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
case SIOCGIFHWADDR:
- /* Get hw addres */
+ /* Get hw address */
memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
ifr.ifr_hwaddr.sa_family = tun->dev->type;
if (copy_to_user(argp, &ifr, ifreq_len))
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index acbdab3d66ca..715e7b47e7e9 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -28,6 +28,7 @@
#include <linux/phy.h>
#include <linux/workqueue.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <asm/uaccess.h>
@@ -2031,7 +2032,7 @@ static void ucc_geth_set_multi(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
/* Only support group multicast for now.
*/
- if (!(ha->addr[0] & 1))
+ if (!is_multicast_ether_addr(ha->addr))
continue;
/* Ask CPM to run CRC and set bit in
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 593c104ab199..7113168473cf 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1,7 +1,7 @@
/*
* cdc_ncm.c
*
- * Copyright (C) ST-Ericsson 2010
+ * Copyright (C) ST-Ericsson 2010-2011
* Contact: Alexey Orishko <alexey.orishko@stericsson.com>
* Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
*
@@ -54,7 +54,7 @@
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc.h>
-#define DRIVER_VERSION "30-Nov-2010"
+#define DRIVER_VERSION "7-Feb-2011"
/* CDC NCM subclass 3.2.1 */
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -77,6 +77,9 @@
*/
#define CDC_NCM_DPT_DATAGRAMS_MAX 32
+/* Maximum amount of IN datagrams in NTB */
+#define CDC_NCM_DPT_DATAGRAMS_IN_MAX 0 /* unlimited */
+
/* Restart the timer, if amount of datagrams is less than given value */
#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
@@ -85,11 +88,6 @@
(sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
(CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
-struct connection_speed_change {
- __le32 USBitRate; /* holds 3GPP downlink value, bits per second */
- __le32 DSBitRate; /* holds 3GPP uplink value, bits per second */
-} __attribute__ ((packed));
-
struct cdc_ncm_data {
struct usb_cdc_ncm_nth16 nth16;
struct usb_cdc_ncm_ndp16 ndp16;
@@ -198,10 +196,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
{
struct usb_cdc_notification req;
u32 val;
- __le16 max_datagram_size;
u8 flags;
u8 iface_no;
int err;
+ u16 ntb_fmt_supported;
iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
@@ -223,6 +221,9 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
+ /* devices prior to NCM Errata shall set this field to zero */
+ ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
+ ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
if (ctx->func_desc != NULL)
flags = ctx->func_desc->bmNetworkCapabilities;
@@ -231,22 +232,58 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
"wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
- "wNdpOutAlignment=%u flags=0x%x\n",
+ "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
- ctx->tx_ndp_modulus, flags);
+ ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
- /* max count of tx datagrams without terminating NULL entry */
- ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
+ /* max count of tx datagrams */
+ if ((ctx->tx_max_datagrams == 0) ||
+ (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
+ ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
/* verify maximum size of received NTB in bytes */
- if ((ctx->rx_max <
- (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
- (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) {
+ if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
+ pr_debug("Using min receive length=%d\n",
+ USB_CDC_NCM_NTB_MIN_IN_SIZE);
+ ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
+ }
+
+ if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
pr_debug("Using default maximum receive length=%d\n",
CDC_NCM_NTB_MAX_SIZE_RX);
ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
}
+ /* inform device about NTB input size changes */
+ if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
+ req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+ USB_RECIP_INTERFACE;
+ req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE;
+ req.wValue = 0;
+ req.wIndex = cpu_to_le16(iface_no);
+
+ if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
+ struct usb_cdc_ncm_ndp_input_size ndp_in_sz;
+
+ req.wLength = 8;
+ ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+ ndp_in_sz.wNtbInMaxDatagrams =
+ cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX);
+ ndp_in_sz.wReserved = 0;
+ err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL,
+ 1000);
+ } else {
+ __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+
+ req.wLength = 4;
+ err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0,
+ NULL, 1000);
+ }
+
+ if (err)
+ pr_debug("Setting NTB Input Size failed\n");
+ }
+
/* verify maximum size of transmitted NTB in bytes */
if ((ctx->tx_max <
(CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
@@ -297,47 +334,84 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
/* additional configuration */
/* set CRC Mode */
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_SET_CRC_MODE;
- req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = 0;
-
- err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
- if (err)
- pr_debug("Setting CRC mode off failed\n");
+ if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
+ req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+ USB_RECIP_INTERFACE;
+ req.bNotificationType = USB_CDC_SET_CRC_MODE;
+ req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
+ req.wIndex = cpu_to_le16(iface_no);
+ req.wLength = 0;
+
+ err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+ if (err)
+ pr_debug("Setting CRC mode off failed\n");
+ }
- /* set NTB format */
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
- req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = 0;
+ /* set NTB format, if both formats are supported */
+ if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
+ req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+ USB_RECIP_INTERFACE;
+ req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
+ req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
+ req.wIndex = cpu_to_le16(iface_no);
+ req.wLength = 0;
+
+ err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+ if (err)
+ pr_debug("Setting NTB format to 16-bit failed\n");
+ }
- err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
- if (err)
- pr_debug("Setting NTB format to 16-bit failed\n");
+ ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
/* set Max Datagram Size (MTU) */
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
- req.wValue = 0;
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = cpu_to_le16(2);
+ if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
+ __le16 max_datagram_size;
+ u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+
+ req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN |
+ USB_RECIP_INTERFACE;
+ req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
+ req.wValue = 0;
+ req.wIndex = cpu_to_le16(iface_no);
+ req.wLength = cpu_to_le16(2);
+
+ err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL,
+ 1000);
+ if (err) {
+ pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
+ CDC_NCM_MIN_DATAGRAM_SIZE);
+ } else {
+ ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+ /* Check Eth descriptor value */
+ if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
+ if (ctx->max_datagram_size > eth_max_sz)
+ ctx->max_datagram_size = eth_max_sz;
+ } else {
+ if (ctx->max_datagram_size >
+ CDC_NCM_MAX_DATAGRAM_SIZE)
+ ctx->max_datagram_size =
+ CDC_NCM_MAX_DATAGRAM_SIZE;
+ }
- err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000);
- if (err) {
- pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n",
- CDC_NCM_MIN_DATAGRAM_SIZE);
- /* use default */
- ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
- } else {
- ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+ if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
+ ctx->max_datagram_size =
+ CDC_NCM_MIN_DATAGRAM_SIZE;
+
+ /* if value changed, update device */
+ req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+ USB_RECIP_INTERFACE;
+ req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE;
+ req.wValue = 0;
+ req.wIndex = cpu_to_le16(iface_no);
+ req.wLength = 2;
+ max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
+
+ err = cdc_ncm_do_request(ctx, &req, &max_datagram_size,
+ 0, NULL, 1000);
+ if (err)
+ pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
+ }
- if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
- ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
- else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
- ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
}
if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
@@ -466,19 +540,13 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
ctx->ether_desc =
(const struct usb_cdc_ether_desc *)buf;
-
dev->hard_mtu =
le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
- if (dev->hard_mtu <
- (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN))
- dev->hard_mtu =
- CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN;
-
- else if (dev->hard_mtu >
- (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN))
- dev->hard_mtu =
- CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN;
+ if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE)
+ dev->hard_mtu = CDC_NCM_MIN_DATAGRAM_SIZE;
+ else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE)
+ dev->hard_mtu = CDC_NCM_MAX_DATAGRAM_SIZE;
break;
case USB_CDC_NCM_TYPE:
@@ -628,13 +696,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
u32 offset;
u32 last_offset;
u16 n = 0;
- u8 timeout = 0;
+ u8 ready2send = 0;
/* if there is a remaining skb, it gets priority */
if (skb != NULL)
swap(skb, ctx->tx_rem_skb);
else
- timeout = 1;
+ ready2send = 1;
/*
* +----------------+
@@ -682,9 +750,10 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
for (; n < ctx->tx_max_datagrams; n++) {
/* check if end of transmit buffer is reached */
- if (offset >= ctx->tx_max)
+ if (offset >= ctx->tx_max) {
+ ready2send = 1;
break;
-
+ }
/* compute maximum buffer size */
rem = ctx->tx_max - offset;
@@ -711,9 +780,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
}
ctx->tx_rem_skb = skb;
skb = NULL;
-
- /* loop one more time */
- timeout = 1;
+ ready2send = 1;
}
break;
}
@@ -756,7 +823,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
ctx->tx_curr_last_offset = last_offset;
goto exit_no_skb;
- } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) {
+ } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
/* wait for more frames */
/* push variables */
ctx->tx_curr_skb = skb_out;
@@ -813,7 +880,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
- ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
+ ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
ctx->tx_ndp_modulus);
memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
@@ -825,13 +892,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
sizeof(struct usb_cdc_ncm_dpe16));
ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
- ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */
+ ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
- memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex,
+ memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex,
&(ctx->tx_ncm.ndp16),
sizeof(ctx->tx_ncm.ndp16));
- memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex +
+ memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex +
sizeof(ctx->tx_ncm.ndp16),
&(ctx->tx_ncm.dpe16),
(ctx->tx_curr_frame_num + 1) *
@@ -868,15 +935,19 @@ static void cdc_ncm_tx_timeout(unsigned long arg)
if (ctx->tx_timer_pending != 0) {
ctx->tx_timer_pending--;
restart = 1;
- } else
+ } else {
restart = 0;
+ }
spin_unlock(&ctx->mtx);
- if (restart)
+ if (restart) {
+ spin_lock(&ctx->mtx);
cdc_ncm_tx_timeout_start(ctx);
- else if (ctx->netdev != NULL)
+ spin_unlock(&ctx->mtx);
+ } else if (ctx->netdev != NULL) {
usbnet_start_xmit(NULL, ctx->netdev);
+ }
}
static struct sk_buff *
@@ -900,7 +971,6 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
if (ctx->tx_curr_skb != NULL)
need_timer = 1;
- spin_unlock(&ctx->mtx);
/* Start timer, if there is a remaining skb */
if (need_timer)
@@ -908,6 +978,8 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
if (skb_out)
dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
+
+ spin_unlock(&ctx->mtx);
return skb_out;
error:
@@ -956,7 +1028,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
goto error;
}
- temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex);
+ temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex);
if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
pr_debug("invalid DPT16 index\n");
goto error;
@@ -1020,14 +1092,16 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
if (((offset + temp) > actlen) ||
(temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
pr_debug("invalid frame detected (ignored)"
- "offset[%u]=%u, length=%u, skb=%p\n",
- x, offset, temp, skb);
+ "offset[%u]=%u, length=%u, skb=%p\n",
+ x, offset, temp, skb_in);
if (!x)
goto error;
break;
} else {
skb = skb_clone(skb_in, GFP_ATOMIC);
+ if (!skb)
+ goto error;
skb->len = temp;
skb->data = ((u8 *)skb_in->data) + offset;
skb_set_tail_pointer(skb, temp);
@@ -1041,10 +1115,10 @@ error:
static void
cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
- struct connection_speed_change *data)
+ struct usb_cdc_speed_change *data)
{
- uint32_t rx_speed = le32_to_cpu(data->USBitRate);
- uint32_t tx_speed = le32_to_cpu(data->DSBitRate);
+ uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
+ uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
/*
* Currently the USB-NET API does not support reporting the actual
@@ -1085,7 +1159,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
/* test for split data in 8-byte chunks */
if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
cdc_ncm_speed_change(ctx,
- (struct connection_speed_change *)urb->transfer_buffer);
+ (struct usb_cdc_speed_change *)urb->transfer_buffer);
return;
}
@@ -1113,12 +1187,12 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
break;
case USB_CDC_NOTIFY_SPEED_CHANGE:
- if (urb->actual_length <
- (sizeof(*event) + sizeof(struct connection_speed_change)))
+ if (urb->actual_length < (sizeof(*event) +
+ sizeof(struct usb_cdc_speed_change)))
set_bit(EVENT_STS_SPLIT, &dev->flags);
else
cdc_ncm_speed_change(ctx,
- (struct connection_speed_change *) &event[1]);
+ (struct usb_cdc_speed_change *) &event[1]);
break;
default:
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 02b622e3b9fb..5002f5be47be 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -651,6 +651,10 @@ static const struct usb_device_id products[] = {
.driver_info = (unsigned long)&dm9601_info,
},
{
+ USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
+ {
USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
.driver_info = (unsigned long)&dm9601_info,
},
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index bed8fcedff49..6d83812603b6 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2628,15 +2628,15 @@ exit:
static void hso_free_tiomget(struct hso_serial *serial)
{
- struct hso_tiocmget *tiocmget = serial->tiocmget;
+ struct hso_tiocmget *tiocmget;
+ if (!serial)
+ return;
+ tiocmget = serial->tiocmget;
if (tiocmget) {
- if (tiocmget->urb) {
- usb_free_urb(tiocmget->urb);
- tiocmget->urb = NULL;
- }
+ usb_free_urb(tiocmget->urb);
+ tiocmget->urb = NULL;
serial->tiocmget = NULL;
kfree(tiocmget);
-
}
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 5e98643a4a21..7dc84971f26f 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -406,6 +406,7 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) {
err("Firmware too big: %zu", fw->size);
+ release_firmware(fw);
return -ENOSPC;
}
data_len = fw->size;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ed9a41643ff4..95c41d56631c 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -931,8 +931,10 @@ fail_halt:
if (urb != NULL) {
clear_bit (EVENT_RX_MEMORY, &dev->flags);
status = usb_autopm_get_interface(dev->intf);
- if (status < 0)
+ if (status < 0) {
+ usb_free_urb(urb);
goto fail_lowmem;
+ }
if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
resched = 0;
usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index cab96ad49e60..09cac704fdd7 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -898,7 +898,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
set_mii_flow_control(vptr);
/*
- Check if new status is consisent with current status
+ Check if new status is consistent with current status
if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
(mii_status==curr_status)) {
vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 90a23e410d1b..82dba5aaf423 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -446,6 +446,20 @@ static void skb_recv_done(struct virtqueue *rvq)
}
}
+static void virtnet_napi_enable(struct virtnet_info *vi)
+{
+ napi_enable(&vi->napi);
+
+ /* If all buffers were filled by other side before we napi_enabled, we
+ * won't get another interrupt, so process any outstanding packets
+ * now. virtnet_poll wants re-enable the queue, so we disable here.
+ * We synchronize against interrupts via NAPI_STATE_SCHED */
+ if (napi_schedule_prep(&vi->napi)) {
+ virtqueue_disable_cb(vi->rvq);
+ __napi_schedule(&vi->napi);
+ }
+}
+
static void refill_work(struct work_struct *work)
{
struct virtnet_info *vi;
@@ -454,7 +468,7 @@ static void refill_work(struct work_struct *work)
vi = container_of(work, struct virtnet_info, refill.work);
napi_disable(&vi->napi);
still_empty = !try_fill_recv(vi, GFP_KERNEL);
- napi_enable(&vi->napi);
+ virtnet_napi_enable(vi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
@@ -638,16 +652,7 @@ static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
- napi_enable(&vi->napi);
-
- /* If all buffers were filled by other side before we napi_enabled, we
- * won't get another interrupt, so process any outstanding packets
- * now. virtnet_poll wants re-enable the queue, so we disable here.
- * We synchronize against interrupts via NAPI_STATE_SCHED */
- if (napi_schedule_prep(&vi->napi)) {
- virtqueue_disable_cb(vi->rvq);
- __napi_schedule(&vi->napi);
- }
+ virtnet_napi_enable(vi);
return 0;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d143e8b72b5b..cc14b4a75048 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -48,6 +48,9 @@ static atomic_t devices_found;
static int enable_mq = 1;
static int irq_share_mode;
+static void
+vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
+
/*
* Enable/Disable the given intr
*/
@@ -139,9 +142,13 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
{
u32 ret;
int i;
+ unsigned long flags;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
adapter->link_speed = ret >> 16;
if (ret & 1) { /* Link is up. */
printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
@@ -183,8 +190,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
/* Check if there is an error on xmit/recv queues */
if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
+ spin_lock(&adapter->cmd_lock);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_QUEUE_STATUS);
+ spin_unlock(&adapter->cmd_lock);
for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tqd_start[i].status.stopped)
@@ -804,30 +813,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
skb_transport_header(skb))->doff * 4;
ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
} else {
- unsigned int pull_size;
-
if (skb->ip_summed == CHECKSUM_PARTIAL) {
ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
if (ctx->ipv4) {
struct iphdr *iph = (struct iphdr *)
skb_network_header(skb);
- if (iph->protocol == IPPROTO_TCP) {
- pull_size = ctx->eth_ip_hdr_size +
- sizeof(struct tcphdr);
-
- if (unlikely(!pskb_may_pull(skb,
- pull_size))) {
- goto err;
- }
+ if (iph->protocol == IPPROTO_TCP)
ctx->l4_hdr_size = ((struct tcphdr *)
skb_transport_header(skb))->doff * 4;
- } else if (iph->protocol == IPPROTO_UDP) {
+ else if (iph->protocol == IPPROTO_UDP)
+ /*
+ * Use tcp header size so that bytes to
+ * be copied are more than required by
+ * the device.
+ */
ctx->l4_hdr_size =
- sizeof(struct udphdr);
- } else {
+ sizeof(struct tcphdr);
+ else
ctx->l4_hdr_size = 0;
- }
} else {
/* for simplicity, don't copy L4 headers */
ctx->l4_hdr_size = 0;
@@ -1859,18 +1863,14 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct Vmxnet3_DriverShared *shared = adapter->shared;
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ unsigned long flags;
if (grp) {
/* add vlan rx stripping. */
if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
int i;
- struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
adapter->vlan_grp = grp;
- /* update FEATURES to device */
- devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
- VMXNET3_CMD_UPDATE_FEATURE);
/*
* Clear entire vfTable; then enable untagged pkts.
* Note: setting one entry in vfTable to non-zero turns
@@ -1880,8 +1880,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
vfTable[i] = 0;
VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
} else {
printk(KERN_ERR "%s: vlan_rx_register when device has "
"no NETIF_F_HW_VLAN_RX\n", netdev->name);
@@ -1900,13 +1902,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
*/
vfTable[i] = 0;
}
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
-
- /* update FEATURES to device */
- devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
- VMXNET3_CMD_UPDATE_FEATURE);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
}
}
@@ -1939,10 +1938,13 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ unsigned long flags;
VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
@@ -1951,10 +1953,13 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ unsigned long flags;
VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
@@ -1985,6 +1990,7 @@ static void
vmxnet3_set_mc(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
struct Vmxnet3_RxFilterConf *rxConf =
&adapter->shared->devRead.rxFilterConf;
u8 *new_table = NULL;
@@ -2020,6 +2026,7 @@ vmxnet3_set_mc(struct net_device *netdev)
rxConf->mfTablePA = 0;
}
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
if (new_mode != rxConf->rxMode) {
rxConf->rxMode = cpu_to_le32(new_mode);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -2028,6 +2035,7 @@ vmxnet3_set_mc(struct net_device *netdev)
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_MAC_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
kfree(new_table);
}
@@ -2080,10 +2088,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
devRead->misc.uptFeatures |= UPT1_F_LRO;
devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
}
- if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
- adapter->vlan_grp) {
+ if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
- }
devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
@@ -2168,6 +2174,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
/* rx filter settings */
devRead->rxFilterConf.rxMode = 0;
vmxnet3_restore_vlan(adapter);
+ vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
+
/* the rest are already zeroed */
}
@@ -2177,6 +2185,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
{
int err, i;
u32 ret;
+ unsigned long flags;
dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
" ring sizes %u %u %u\n", adapter->netdev->name,
@@ -2206,9 +2215,11 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
adapter->shared_pa));
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
adapter->shared_pa));
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_ACTIVATE_DEV);
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
if (ret != 0) {
printk(KERN_ERR "Failed to activate dev %s: error %u\n",
@@ -2255,7 +2266,10 @@ rq_err:
void
vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
{
+ unsigned long flags;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
@@ -2263,12 +2277,15 @@ int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
{
int i;
+ unsigned long flags;
if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
return 0;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_QUIESCE_DEV);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
vmxnet3_disable_all_intrs(adapter);
for (i = 0; i < adapter->num_rx_queues; i++)
@@ -2426,7 +2443,7 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
ring0_size = adapter->rx_queue[0].rx_ring[0].size;
ring0_size = (ring0_size + sz - 1) / sz * sz;
- ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE /
+ ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
sz * sz);
ring1_size = adapter->rx_queue[0].rx_ring[1].size;
comp_size = ring0_size + ring1_size;
@@ -2695,7 +2712,7 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
break;
} else {
/* If fails to enable required number of MSI-x vectors
- * try enabling 3 of them. One each for rx, tx and event
+ * try enabling minimum number of vectors required.
*/
vectors = vector_threshold;
printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
@@ -2718,9 +2735,11 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
u32 cfg;
/* intr settings */
+ spin_lock(&adapter->cmd_lock);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_CONF_INTR);
cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock(&adapter->cmd_lock);
adapter->intr.type = cfg & 0x3;
adapter->intr.mask_mode = (cfg >> 2) & 0x3;
@@ -2755,7 +2774,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
*/
if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
- || adapter->num_rx_queues != 2) {
+ || adapter->num_rx_queues != 1) {
adapter->share_intr = VMXNET3_INTR_TXSHARE;
printk(KERN_ERR "Number of rx queues : 1\n");
adapter->num_rx_queues = 1;
@@ -2905,6 +2924,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->netdev = netdev;
adapter->pdev = pdev;
+ spin_lock_init(&adapter->cmd_lock);
adapter->shared = pci_alloc_consistent(adapter->pdev,
sizeof(struct Vmxnet3_DriverShared),
&adapter->shared_pa);
@@ -3108,11 +3128,15 @@ vmxnet3_suspend(struct device *device)
u8 *arpreq;
struct in_device *in_dev;
struct in_ifaddr *ifa;
+ unsigned long flags;
int i = 0;
if (!netif_running(netdev))
return 0;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_disable(&adapter->rx_queue[i].napi);
+
vmxnet3_disable_all_intrs(adapter);
vmxnet3_free_irqs(adapter);
vmxnet3_free_intr_resources(adapter);
@@ -3188,8 +3212,10 @@ skip_arp:
adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
pmConf));
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_PMCFG);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
pci_save_state(pdev);
pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
@@ -3204,7 +3230,8 @@ skip_arp:
static int
vmxnet3_resume(struct device *device)
{
- int err;
+ int err, i = 0;
+ unsigned long flags;
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -3232,10 +3259,14 @@ vmxnet3_resume(struct device *device)
pci_enable_wake(pdev, PCI_D0, 0);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_PMCFG);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
vmxnet3_alloc_intr_resources(adapter);
vmxnet3_request_irqs(adapter);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_enable(&adapter->rx_queue[i].napi);
vmxnet3_enable_all_intrs(adapter);
return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 8e17fc8a7fe7..81254be85b92 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -45,6 +45,7 @@ static int
vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
if (adapter->rxcsum != val) {
adapter->rxcsum = val;
@@ -56,8 +57,10 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_RXCSUM;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
}
return 0;
@@ -68,76 +71,78 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
static const struct vmxnet3_stat_desc
vmxnet3_tq_dev_stats[] = {
/* description, offset */
- { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
- { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
- { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
- { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
- { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
- { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
- { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
- { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
- { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
- { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
+ { "Tx Queue#", 0 },
+ { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
+ { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
+ { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
+ { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
+ { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
+ { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
+ { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
+ { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
+ { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
+ { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
};
/* per tq stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_tq_driver_stats[] = {
/* description, offset */
- {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
- drop_total) },
- { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
- drop_too_many_frags) },
- { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
- drop_oversized_hdr) },
- { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
- drop_hdr_inspect_err) },
- { " tso", offsetof(struct vmxnet3_tq_driver_stats,
- drop_tso) },
- { "ring full", offsetof(struct vmxnet3_tq_driver_stats,
- tx_ring_full) },
- { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
- linearized) },
- { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
- copy_skb_header) },
- { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
- oversized_hdr) },
+ {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_total) },
+ { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_too_many_frags) },
+ { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_oversized_hdr) },
+ { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_hdr_inspect_err) },
+ { " tso", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_tso) },
+ { " ring full", offsetof(struct vmxnet3_tq_driver_stats,
+ tx_ring_full) },
+ { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
+ linearized) },
+ { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
+ copy_skb_header) },
+ { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
+ oversized_hdr) },
};
/* per rq stats maintained by the device */
static const struct vmxnet3_stat_desc
vmxnet3_rq_dev_stats[] = {
- { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
- { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
- { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
- { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
- { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
- { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
- { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
- { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
- { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
- { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
+ { "Rx Queue#", 0 },
+ { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
+ { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
+ { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
+ { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
+ { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
+ { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
+ { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
+ { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
+ { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
+ { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
};
/* per rq stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_rq_driver_stats[] = {
/* description, offset */
- { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
- drop_total) },
- { " err", offsetof(struct vmxnet3_rq_driver_stats,
- drop_err) },
- { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
- drop_fcs) },
- { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
- rx_buf_alloc_failure) },
+ { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
+ drop_total) },
+ { " err", offsetof(struct vmxnet3_rq_driver_stats,
+ drop_err) },
+ { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
+ drop_fcs) },
+ { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
+ rx_buf_alloc_failure) },
};
/* gloabl stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_global_stats[] = {
/* description, offset */
- { "tx timeout count", offsetof(struct vmxnet3_adapter,
+ { "tx timeout count", offsetof(struct vmxnet3_adapter,
tx_timeout_count) }
};
@@ -151,12 +156,15 @@ vmxnet3_get_stats(struct net_device *netdev)
struct UPT1_TxStats *devTxStats;
struct UPT1_RxStats *devRxStats;
struct net_device_stats *net_stats = &netdev->stats;
+ unsigned long flags;
int i;
adapter = netdev_priv(netdev);
/* Collect the dev stats into the shared area */
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
memset(net_stats, 0, sizeof(*net_stats));
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -193,12 +201,15 @@ vmxnet3_get_stats(struct net_device *netdev)
static int
vmxnet3_get_sset_count(struct net_device *netdev, int sset)
{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(vmxnet3_tq_dev_stats) +
- ARRAY_SIZE(vmxnet3_tq_driver_stats) +
- ARRAY_SIZE(vmxnet3_rq_dev_stats) +
- ARRAY_SIZE(vmxnet3_rq_driver_stats) +
+ return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
+ ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
+ adapter->num_tx_queues +
+ (ARRAY_SIZE(vmxnet3_rq_dev_stats) +
+ ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
+ adapter->num_rx_queues +
ARRAY_SIZE(vmxnet3_global_stats);
default:
return -EOPNOTSUPP;
@@ -206,10 +217,16 @@ vmxnet3_get_sset_count(struct net_device *netdev, int sset)
}
+/* Should be multiple of 4 */
+#define NUM_TX_REGS 8
+#define NUM_RX_REGS 12
+
static int
vmxnet3_get_regs_len(struct net_device *netdev)
{
- return 20 * sizeof(u32);
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) +
+ adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32));
}
@@ -240,29 +257,37 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
static void
vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
if (stringset == ETH_SS_STATS) {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
- memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
- }
- for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) {
- memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
- }
- for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
- memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
+ int i, j;
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
+ memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats);
+ i++) {
+ memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
}
- for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) {
- memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
+
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
+ memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats);
+ i++) {
+ memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
}
+
for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
memcpy(buf, vmxnet3_global_stats[i].desc,
ETH_GSTRING_LEN);
@@ -277,6 +302,7 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
+ unsigned long flags;
if (data & ~ETH_FLAG_LRO)
return -EOPNOTSUPP;
@@ -292,8 +318,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
else
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_LRO;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
return 0;
}
@@ -303,30 +331,41 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *buf)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
u8 *base;
int i;
int j = 0;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
/* this does assume each counter is 64-bit wide */
-/* TODO change this for multiple queues */
-
- base = (u8 *)&adapter->tqd_start[j].stats;
- for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
- *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset);
-
- base = (u8 *)&adapter->tx_queue[j].stats;
- for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
- *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset);
-
- base = (u8 *)&adapter->rqd_start[j].stats;
- for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
- *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ base = (u8 *)&adapter->tqd_start[j].stats;
+ *buf++ = (u64)j;
+ for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
+ *buf++ = *(u64 *)(base +
+ vmxnet3_tq_dev_stats[i].offset);
+
+ base = (u8 *)&adapter->tx_queue[j].stats;
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
+ *buf++ = *(u64 *)(base +
+ vmxnet3_tq_driver_stats[i].offset);
+ }
- base = (u8 *)&adapter->rx_queue[j].stats;
- for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
- *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset);
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ base = (u8 *)&adapter->rqd_start[j].stats;
+ *buf++ = (u64) j;
+ for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
+ *buf++ = *(u64 *)(base +
+ vmxnet3_rq_dev_stats[i].offset);
+
+ base = (u8 *)&adapter->rx_queue[j].stats;
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
+ *buf++ = *(u64 *)(base +
+ vmxnet3_rq_driver_stats[i].offset);
+ }
base = (u8 *)adapter;
for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
@@ -339,7 +378,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 *buf = p;
- int i = 0;
+ int i = 0, j = 0;
memset(p, 0, vmxnet3_get_regs_len(netdev));
@@ -348,31 +387,35 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
/* Update vmxnet3_get_regs_len if we want to dump more registers */
/* make each ring use multiple of 16 bytes */
-/* TODO change this for multiple queues */
- buf[0] = adapter->tx_queue[i].tx_ring.next2fill;
- buf[1] = adapter->tx_queue[i].tx_ring.next2comp;
- buf[2] = adapter->tx_queue[i].tx_ring.gen;
- buf[3] = 0;
-
- buf[4] = adapter->tx_queue[i].comp_ring.next2proc;
- buf[5] = adapter->tx_queue[i].comp_ring.gen;
- buf[6] = adapter->tx_queue[i].stopped;
- buf[7] = 0;
-
- buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill;
- buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp;
- buf[10] = adapter->rx_queue[i].rx_ring[0].gen;
- buf[11] = 0;
-
- buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill;
- buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp;
- buf[14] = adapter->rx_queue[i].rx_ring[1].gen;
- buf[15] = 0;
-
- buf[16] = adapter->rx_queue[i].comp_ring.next2proc;
- buf[17] = adapter->rx_queue[i].comp_ring.gen;
- buf[18] = 0;
- buf[19] = 0;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ buf[j++] = adapter->tx_queue[i].tx_ring.next2fill;
+ buf[j++] = adapter->tx_queue[i].tx_ring.next2comp;
+ buf[j++] = adapter->tx_queue[i].tx_ring.gen;
+ buf[j++] = 0;
+
+ buf[j++] = adapter->tx_queue[i].comp_ring.next2proc;
+ buf[j++] = adapter->tx_queue[i].comp_ring.gen;
+ buf[j++] = adapter->tx_queue[i].stopped;
+ buf[j++] = 0;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill;
+ buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp;
+ buf[j++] = adapter->rx_queue[i].rx_ring[0].gen;
+ buf[j++] = 0;
+
+ buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill;
+ buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp;
+ buf[j++] = adapter->rx_queue[i].rx_ring[1].gen;
+ buf[j++] = 0;
+
+ buf[j++] = adapter->rx_queue[i].comp_ring.next2proc;
+ buf[j++] = adapter->rx_queue[i].comp_ring.gen;
+ buf[j++] = 0;
+ buf[j++] = 0;
+ }
+
}
@@ -574,6 +617,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
const struct ethtool_rxfh_indir *p)
{
unsigned int i;
+ unsigned long flags;
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
@@ -592,8 +636,10 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
for (i = 0; i < rssConf->indTableSize; i++)
rssConf->indTable[i] = p->ring_index[i];
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_RSSIDT);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 7fadeed37f03..fb5d245ac878 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,10 +68,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01001000
+#define VMXNET3_DRIVER_VERSION_NUM 0x01001900
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
@@ -289,7 +289,7 @@ struct vmxnet3_rx_queue {
#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
-#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */
+#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */
struct vmxnet3_intr {
@@ -317,6 +317,7 @@ struct vmxnet3_adapter {
struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
struct vlan_group *vlan_grp;
struct vmxnet3_intr intr;
+ spinlock_t cmd_lock;
struct Vmxnet3_DriverShared *shared;
struct Vmxnet3_PMConf *pm_conf;
struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 01c05f53e2f9..228d4f7a58af 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -3690,7 +3690,7 @@ __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
if (status != VXGE_HW_OK)
goto exit;
- if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
+ if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
(rts_table !=
VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
*data1 = 0;
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 1ac9b568f1b0..c81a6512c683 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4120,6 +4120,7 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
"hotplug event.\n");
out:
+ release_firmware(fw);
return ret;
}
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 8c3103fb6442..d48486d6afa1 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1695,7 +1695,7 @@ struct vxge_hw_device_stats_sw_err {
* struct vxge_hw_device_stats - Contains HW per-device statistics,
* including hw.
* @devh: HW device handle.
- * @dma_addr: DMA addres of the %hw_info. Given to device to fill-in the stats.
+ * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats.
* @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
* space.
* @hw_info_dma_acch: One more DMA handle used subsequently to free the
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 34cff6ce6d27..4578e5b4b411 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -125,7 +125,7 @@ static u32 dscc4_pci_config_store[16];
/* Module parameters */
MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
-MODULE_DESCRIPTION("Siemens PEB20534 PCI Controler");
+MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller");
MODULE_LICENSE("GPL");
module_param(debug, int, 0);
MODULE_PARM_DESC(debug,"Enable/disable extra messages");
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index f0603327aafa..65bc334ed57b 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -232,7 +232,7 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
result);
goto error;
}
- /* Extract MAC addresss */
+ /* Extract MAC address */
ddi = (void *) skb->data;
BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n",
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 17ecaa41a807..030cbfd31704 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -186,7 +186,7 @@ enum {
* struct i2400m_poke_table - Hardware poke table for the Intel 2400m
*
* This structure will be used to create a device specific poke table
- * to put the device in a consistant state at boot time.
+ * to put the device in a consistent state at boot time.
*
* @address: The device address to poke
*
@@ -703,7 +703,7 @@ enum i2400m_bm_cmd_flags {
* @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot
* rom after reading the MAC address. This is quite a dirty hack,
* if you ask me -- the device requires the bootrom to be
- * intialized after reading the MAC address.
+ * initialized after reading the MAC address.
*/
enum i2400m_bri {
I2400M_BRI_SOFT = 1 << 1,
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 019a74d533a6..09ae4ef0fd51 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2294,6 +2294,8 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
int i;
bool needreset = false;
+ mutex_lock(&sc->lock);
+
for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
if (sc->txqs[i].setup) {
txq = &sc->txqs[i];
@@ -2321,6 +2323,8 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
ath5k_reset(sc, NULL, true);
}
+ mutex_unlock(&sc->lock);
+
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
}
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 0064be7ce5c9..21091c26a9a5 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -838,9 +838,9 @@ int ath5k_hw_dma_stop(struct ath5k_hw *ah)
for (i = 0; i < qmax; i++) {
err = ath5k_hw_stop_tx_dma(ah, i);
/* -EINVAL -> queue inactive */
- if (err != -EINVAL)
+ if (err && err != -EINVAL)
return err;
}
- return err;
+ return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index e5f2b96a4c63..a702817daf72 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -86,7 +86,7 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
if (!ah->ah_bwmode) {
dur = ieee80211_generic_frame_duration(sc->hw,
NULL, len, rate);
- return dur;
+ return le16_to_cpu(dur);
}
bitrate = rate->bitrate;
@@ -265,8 +265,6 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
* what rate we should choose to TX ACKs. */
tx_time = ath5k_hw_get_frame_duration(ah, 10, rate);
- tx_time = le16_to_cpu(tx_time);
-
ath5k_hw_reg_write(ah, tx_time, reg);
if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 78c26fdccad1..62ce2f4e8605 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -282,6 +282,34 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
return 0;
}
+/*
+ * Wait for synth to settle
+ */
+static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ /*
+ * On 5211+ read activation -> rx delay
+ * and use it (100ns steps).
+ */
+ if (ah->ah_version != AR5K_AR5210) {
+ u32 delay;
+ delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
+ AR5K_PHY_RX_DELAY_M;
+ delay = (channel->hw_value & CHANNEL_CCK) ?
+ ((delay << 2) / 22) : (delay / 10);
+ if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
+ delay = delay << 1;
+ if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
+ delay = delay << 2;
+ /* XXX: /2 on turbo ? Let's be safe
+ * for now */
+ udelay(100 + delay);
+ } else {
+ mdelay(1);
+ }
+}
+
/**********************\
* RF Gain optimization *
@@ -1253,6 +1281,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
case AR5K_RF5111:
ret = ath5k_hw_rf5111_channel(ah, channel);
break;
+ case AR5K_RF2317:
case AR5K_RF2425:
ret = ath5k_hw_rf2425_channel(ah, channel);
break;
@@ -3237,6 +3266,13 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
/* Failed */
if (i >= 100)
return -EIO;
+
+ /* Set channel and wait for synth */
+ ret = ath5k_hw_channel(ah, channel);
+ if (ret)
+ return ret;
+
+ ath5k_hw_wait_for_synth(ah, channel);
}
/*
@@ -3251,13 +3287,53 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
if (ret)
return ret;
+ /* Write OFDM timings on 5212*/
+ if (ah->ah_version == AR5K_AR5212 &&
+ channel->hw_value & CHANNEL_OFDM) {
+
+ ret = ath5k_hw_write_ofdm_timings(ah, channel);
+ if (ret)
+ return ret;
+
+ /* Spur info is available only from EEPROM versions
+ * greater than 5.3, but the EEPROM routines will use
+ * static values for older versions */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
+ ath5k_hw_set_spur_mitigation_filter(ah,
+ channel);
+ }
+
+ /* If we used fast channel switching
+ * we are done, release RF bus and
+ * fire up NF calibration.
+ *
+ * Note: Only NF calibration due to
+ * channel change, not AGC calibration
+ * since AGC is still running !
+ */
+ if (fast) {
+ /*
+ * Release RF Bus grant
+ */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
+ AR5K_PHY_RFBUS_REQ_REQUEST);
+
+ /*
+ * Start NF calibration
+ */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_NF);
+
+ return ret;
+ }
+
/*
* For 5210 we do all initialization using
* initvals, so we don't have to modify
* any settings (5210 also only supports
* a/aturbo modes)
*/
- if ((ah->ah_version != AR5K_AR5210) && !fast) {
+ if (ah->ah_version != AR5K_AR5210) {
/*
* Write initial RF gain settings
@@ -3276,22 +3352,6 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
if (ret)
return ret;
- /* Write OFDM timings on 5212*/
- if (ah->ah_version == AR5K_AR5212 &&
- channel->hw_value & CHANNEL_OFDM) {
-
- ret = ath5k_hw_write_ofdm_timings(ah, channel);
- if (ret)
- return ret;
-
- /* Spur info is available only from EEPROM versions
- * greater than 5.3, but the EEPROM routines will use
- * static values for older versions */
- if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
- ath5k_hw_set_spur_mitigation_filter(ah,
- channel);
- }
-
/*Enable/disable 802.11b mode on 5111
(enable 2111 frequency converter + CCK)*/
if (ah->ah_radio == AR5K_RF5111) {
@@ -3322,47 +3382,20 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
*/
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
+ ath5k_hw_wait_for_synth(ah, channel);
+
/*
- * On 5211+ read activation -> rx delay
- * and use it.
+ * Perform ADC test to see if baseband is ready
+ * Set tx hold and check adc test register
*/
- if (ah->ah_version != AR5K_AR5210) {
- u32 delay;
- delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
- AR5K_PHY_RX_DELAY_M;
- delay = (channel->hw_value & CHANNEL_CCK) ?
- ((delay << 2) / 22) : (delay / 10);
- if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
- delay = delay << 1;
- if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
- delay = delay << 2;
- /* XXX: /2 on turbo ? Let's be safe
- * for now */
- udelay(100 + delay);
- } else {
- mdelay(1);
- }
-
- if (fast)
- /*
- * Release RF Bus grant
- */
- AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
- AR5K_PHY_RFBUS_REQ_REQUEST);
- else {
- /*
- * Perform ADC test to see if baseband is ready
- * Set tx hold and check adc test register
- */
- phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
- ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
- for (i = 0; i <= 20; i++) {
- if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
- break;
- udelay(200);
- }
- ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
+ phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
+ ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
+ for (i = 0; i <= 20; i++) {
+ if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
+ break;
+ udelay(200);
}
+ ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
/*
* Start automatic gain control calibration
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 7ad05d401ab5..fd14b9103951 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -1064,7 +1064,7 @@
/*
* EEPROM command register
*/
-#define AR5K_EEPROM_CMD 0x6008 /* Register Addres */
+#define AR5K_EEPROM_CMD 0x6008 /* Register Address */
#define AR5K_EEPROM_CMD_READ 0x00000001 /* EEPROM read */
#define AR5K_EEPROM_CMD_WRITE 0x00000002 /* EEPROM write */
#define AR5K_EEPROM_CMD_RESET 0x00000004 /* EEPROM reset */
@@ -1084,7 +1084,7 @@
/*
* EEPROM config register
*/
-#define AR5K_EEPROM_CFG 0x6010 /* Register Addres */
+#define AR5K_EEPROM_CFG 0x6010 /* Register Address */
#define AR5K_EEPROM_CFG_SIZE 0x00000003 /* Size determination override */
#define AR5K_EEPROM_CFG_SIZE_AUTO 0
#define AR5K_EEPROM_CFG_SIZE_4KBIT 1
@@ -1126,7 +1126,7 @@
* Second station id register (Upper 16 bits of MAC address + PCU settings)
*/
#define AR5K_STA_ID1 0x8004 /* Register Address */
-#define AR5K_STA_ID1_ADDR_U16 0x0000ffff /* Upper 16 bits of MAC addres */
+#define AR5K_STA_ID1_ADDR_U16 0x0000ffff /* Upper 16 bits of MAC address */
#define AR5K_STA_ID1_AP 0x00010000 /* Set AP mode */
#define AR5K_STA_ID1_ADHOC 0x00020000 /* Set Ad-Hoc mode */
#define AR5K_STA_ID1_PWR_SV 0x00040000 /* Power save reporting */
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 01880aa13e36..5e300bd3d264 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -679,10 +679,6 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
/* Do NF cal only at longer intervals */
if (longcal || nfcal_pending) {
- /* Do periodic PAOffset Cal */
- ar9002_hw_pa_cal(ah, false);
- ar9002_hw_olc_temp_compensation(ah);
-
/*
* Get the value from the previous NF cal and update
* history buffer.
@@ -697,8 +693,12 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
ath9k_hw_loadnf(ah, ah->curchan);
}
- if (longcal)
+ if (longcal) {
ath9k_hw_start_nfcal(ah, false);
+ /* Do periodic PAOffset Cal */
+ ar9002_hw_pa_cal(ah, false);
+ ar9002_hw_olc_temp_compensation(ah);
+ }
}
return iscaldone;
@@ -954,6 +954,9 @@ static void ar9002_hw_init_cal_settings(struct ath_hw *ah)
&adc_dc_cal_multi_sample;
}
ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
+
+ if (AR_SREV_9287(ah))
+ ah->supp_cals &= ~ADC_GAIN_CAL;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index f8a7771faee2..f44c84ab5dce 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -426,9 +426,8 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
}
/* WAR for ASPM system hang */
- if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) {
+ if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
val |= (AR_WA_BIT6 | AR_WA_BIT7);
- }
if (AR_SREV_9285E_20(ah))
val |= AR_WA_BIT23;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 81f9cf294dec..9ecca93392e8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -1842,7 +1842,7 @@ static const u32 ar9300_2p2_soc_preamble[][2] = {
static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = {
/* Addr allmodes */
- {0x00004040, 0x08212e5e},
+ {0x00004040, 0x0821265e},
{0x00004040, 0x0008003b},
{0x00004044, 0x00000000},
};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 6137634e46ca..06fb2c850535 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -146,8 +146,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
/* Sleep Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9300PciePhy_clkreq_enable_L1_2p2,
- ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p2),
+ ar9300PciePhy_pll_on_clkreq_disable_L1_2p2,
+ ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2),
2);
/* Fast clock modal settings */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 3681caf54282..1a7fa6ea4cf5 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -21,7 +21,6 @@
#include <linux/device.h>
#include <linux/leds.h>
#include <linux/completion.h>
-#include <linux/pm_qos_params.h>
#include "debug.h"
#include "common.h"
@@ -57,8 +56,6 @@ struct ath_node;
#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
-#define ATH9K_PM_QOS_DEFAULT_VALUE 55
-
#define TSF_TO_TU(_h,_l) \
((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
@@ -218,6 +215,7 @@ struct ath_frame_info {
struct ath_buf_state {
u8 bf_type;
u8 bfs_paprd;
+ unsigned long bfs_paprd_timestamp;
enum ath9k_internal_frame_type bfs_ftype;
};
@@ -593,7 +591,6 @@ struct ath_softc {
struct work_struct paprd_work;
struct work_struct hw_check_work;
struct completion paprd_complete;
- bool paprd_pending;
u32 intrstatus;
u32 sc_flags; /* SC_OP_* */
@@ -633,8 +630,6 @@ struct ath_softc {
struct ath_descdma txsdma;
struct ath_ant_comb ant_comb;
-
- struct pm_qos_request_list pm_qos_req;
};
struct ath_wiphy {
@@ -666,7 +661,6 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
extern struct ieee80211_ops ath9k_ops;
extern int ath9k_modparam_nohwcrypt;
extern int led_blink;
-extern int ath9k_pm_qos_value;
extern bool is_ath9k_unloaded;
irqreturn_t ath_isr(int irq, void *dev);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 088f141f2006..749a93608664 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -226,6 +226,10 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
eep->baseEepHeader.pwdclkind == 0)
ah->need_an_top2_fixup = 1;
+ if ((common->bus_ops->ath_bus_type == ATH_USB) &&
+ (AR_SREV_9280(ah)))
+ eep->modalHeader[0].xpaBiasLvl = 0;
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 5ab3084eb9cb..07b1633b7f3f 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -219,8 +219,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
struct tx_buf *tx_buf = NULL;
struct sk_buff *nskb = NULL;
int ret = 0, i;
- u16 *hdr, tx_skb_cnt = 0;
+ u16 tx_skb_cnt = 0;
u8 *buf;
+ __le16 *hdr;
if (hif_dev->tx.tx_skb_cnt == 0)
return 0;
@@ -245,9 +246,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
buf = tx_buf->buf;
buf += tx_buf->offset;
- hdr = (u16 *)buf;
- *hdr++ = nskb->len;
- *hdr++ = ATH_USB_TX_STREAM_MODE_TAG;
+ hdr = (__le16 *)buf;
+ *hdr++ = cpu_to_le16(nskb->len);
+ *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
buf += 4;
memcpy(buf, nskb->data, nskb->len);
tx_buf->len = nskb->len + 4;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index a099b3e87ed3..780ac5eac501 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -78,7 +78,7 @@ struct tx_frame_hdr {
u8 node_idx;
u8 vif_idx;
u8 tidno;
- u32 flags; /* ATH9K_HTC_TX_* */
+ __be32 flags; /* ATH9K_HTC_TX_* */
u8 key_type;
u8 keyix;
u8 reserved[26];
@@ -433,6 +433,7 @@ void ath9k_htc_txep(void *priv, struct sk_buff *skb, enum htc_endpoint_id ep_id,
void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
enum htc_endpoint_id ep_id, bool txok);
+int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv);
void ath9k_htc_station_work(struct work_struct *work);
void ath9k_htc_aggr_work(struct work_struct *work);
void ath9k_ani_work(struct work_struct *work);;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 38433f9bfe59..0352f0994caa 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -142,9 +142,6 @@ static void ath9k_deinit_priv(struct ath9k_htc_priv *priv)
{
ath9k_htc_exit_debug(priv->ah);
ath9k_hw_deinit(priv->ah);
- tasklet_kill(&priv->swba_tasklet);
- tasklet_kill(&priv->rx_tasklet);
- tasklet_kill(&priv->tx_tasklet);
kfree(priv->ah);
priv->ah = NULL;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 845b4c938d16..6bb59958f71e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -301,6 +301,16 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
priv->nstations++;
+ /*
+ * Set chainmask etc. on the target.
+ */
+ ret = ath9k_htc_update_cap_target(priv);
+ if (ret)
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Failed to update capability in target\n");
+
+ priv->ah->is_monitoring = true;
+
return 0;
err_vif:
@@ -328,6 +338,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
}
priv->nstations--;
+ priv->ah->is_monitoring = false;
return 0;
}
@@ -419,7 +430,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
return 0;
}
-static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
+int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
{
struct ath9k_htc_cap_target tcap;
int ret;
@@ -1014,12 +1025,6 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
int ret = 0;
u8 cmd_rsp;
- /* Cancel all the running timers/work .. */
- cancel_work_sync(&priv->fatal_work);
- cancel_work_sync(&priv->ps_work);
- cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
- ath9k_led_stop_brightness(priv);
-
mutex_lock(&priv->mutex);
if (priv->op_flags & OP_INVALID) {
@@ -1033,8 +1038,23 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
WMI_CMD(WMI_DISABLE_INTR_CMDID);
WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
WMI_CMD(WMI_STOP_RECV_CMDID);
+
+ tasklet_kill(&priv->swba_tasklet);
+ tasklet_kill(&priv->rx_tasklet);
+ tasklet_kill(&priv->tx_tasklet);
+
skb_queue_purge(&priv->tx_queue);
+ mutex_unlock(&priv->mutex);
+
+ /* Cancel all the running timers/work .. */
+ cancel_work_sync(&priv->fatal_work);
+ cancel_work_sync(&priv->ps_work);
+ cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
+ ath9k_led_stop_brightness(priv);
+
+ mutex_lock(&priv->mutex);
+
/* Remove monitor interface here */
if (ah->opmode == NL80211_IFTYPE_MONITOR) {
if (ath9k_htc_remove_monitor_interface(priv))
@@ -1186,6 +1206,20 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
}
}
+ /*
+ * Monitor interface should be added before
+ * IEEE80211_CONF_CHANGE_CHANNEL is handled.
+ */
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ if (ath9k_htc_add_monitor_interface(priv))
+ ath_err(common, "Failed to set monitor mode\n");
+ else
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "HW opmode set to Monitor mode\n");
+ }
+ }
+
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
struct ieee80211_channel *curchan = hw->conf.channel;
int pos = curchan->hw_value;
@@ -1221,16 +1255,6 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
ath_update_txpow(priv);
}
- if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
- if (conf->flags & IEEE80211_CONF_MONITOR) {
- if (ath9k_htc_add_monitor_interface(priv))
- ath_err(common, "Failed to set monitor mode\n");
- else
- ath_dbg(common, ATH_DBG_CONFIG,
- "HW opmode set to Monitor mode\n");
- }
- }
-
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
mutex_lock(&priv->htc_pm_lock);
if (!priv->ps_idle) {
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 33f36029fa4f..7a5ffca21958 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -113,6 +113,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
if (ieee80211_is_data(fc)) {
struct tx_frame_hdr tx_hdr;
+ u32 flags = 0;
u8 *qc;
memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
@@ -136,13 +137,14 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
/* Check for RTS protection */
if (priv->hw->wiphy->rts_threshold != (u32) -1)
if (skb->len > priv->hw->wiphy->rts_threshold)
- tx_hdr.flags |= ATH9K_HTC_TX_RTSCTS;
+ flags |= ATH9K_HTC_TX_RTSCTS;
/* CTS-to-self */
- if (!(tx_hdr.flags & ATH9K_HTC_TX_RTSCTS) &&
+ if (!(flags & ATH9K_HTC_TX_RTSCTS) &&
(priv->op_flags & OP_PROTECT_ENABLE))
- tx_hdr.flags |= ATH9K_HTC_TX_CTSONLY;
+ flags |= ATH9K_HTC_TX_CTSONLY;
+ tx_hdr.flags = cpu_to_be32(flags);
tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index fde978665e07..9f01e50d5cda 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -369,6 +369,9 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
else
ah->config.ht_enable = 0;
+ /* PAPRD needs some more work to be enabled */
+ ah->config.paprd_disable = 1;
+
ah->config.rx_intr_mitigation = true;
ah->config.pcieSerDesWrite = true;
@@ -436,9 +439,10 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
static int ath9k_hw_post_init(struct ath_hw *ah)
{
+ struct ath_common *common = ath9k_hw_common(ah);
int ecode;
- if (!AR_SREV_9271(ah)) {
+ if (common->bus_ops->ath_bus_type != ATH_USB) {
if (!ath9k_hw_chip_test(ah))
return -ENODEV;
}
@@ -1213,7 +1217,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ah->txchainmask = common->tx_chainmask;
ah->rxchainmask = common->rx_chainmask;
- if (!ah->chip_fullsleep) {
+ if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
ath9k_hw_abortpcurecv(ah);
if (!ath9k_hw_stopdmarecv(ah)) {
ath_dbg(common, ATH_DBG_XMIT,
@@ -1932,7 +1936,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->rx_status_len = sizeof(struct ar9003_rxs);
pCap->tx_desc_len = sizeof(struct ar9003_txc);
pCap->txs_len = sizeof(struct ar9003_txs);
- if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
+ if (!ah->config.paprd_disable &&
+ ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
} else {
pCap->tx_desc_len = sizeof(struct ath_desc);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 5a3dfec45e96..ea9fde670646 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -225,6 +225,7 @@ struct ath9k_ops_config {
u32 pcie_waen;
u8 analog_shiftreg;
u8 ht_enable;
+ u8 paprd_disable;
u32 ofdm_trig_low;
u32 ofdm_trig_high;
u32 cck_trig_high;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 767d8b86f1e1..a033d01bf8a0 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -41,10 +41,6 @@ static int ath9k_btcoex_enable;
module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
-int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE;
-module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH);
-MODULE_PARM_DESC(pmqos, "User specified PM-QOS value");
-
bool is_ath9k_unloaded;
/* We use the hw_value as an index into our private channel structure */
@@ -598,8 +594,6 @@ err_btcoex:
err_queues:
ath9k_hw_deinit(ah);
err_hw:
- tasklet_kill(&sc->intr_tq);
- tasklet_kill(&sc->bcon_tasklet);
kfree(ah);
sc->sc_ah = NULL;
@@ -764,9 +758,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
ath_init_leds(sc);
ath_start_rfkill_poll(sc);
- pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
-
return 0;
error_world:
@@ -807,9 +798,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
ath9k_hw_deinit(sc->sc_ah);
- tasklet_kill(&sc->intr_tq);
- tasklet_kill(&sc->bcon_tasklet);
-
kfree(sc->sc_ah);
sc->sc_ah = NULL;
}
@@ -824,6 +812,8 @@ void ath9k_deinit_device(struct ath_softc *sc)
wiphy_rfkill_stop_polling(sc->hw->wiphy);
ath_deinit_leds(sc);
+ ath9k_ps_restore(sc);
+
for (i = 0; i < sc->num_sec_wiphy; i++) {
struct ath_wiphy *aphy = sc->sec_wiphy[i];
if (aphy == NULL)
@@ -834,7 +824,6 @@ void ath9k_deinit_device(struct ath_softc *sc)
}
ieee80211_unregister_hw(hw);
- pm_qos_remove_request(&sc->pm_qos_req);
ath_rx_cleanup(sc);
ath_tx_cleanup(sc);
ath9k_deinit_softc(sc);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 180170d3ce25..2915b11edefb 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -885,7 +885,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
struct ath_common *common = ath9k_hw_common(ah);
if (!(ints & ATH9K_INT_GLOBAL))
- ath9k_hw_enable_interrupts(ah);
+ ath9k_hw_disable_interrupts(ah);
ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
@@ -963,7 +963,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
}
- ath9k_hw_enable_interrupts(ah);
+ if (ints & ATH9K_INT_GLOBAL)
+ ath9k_hw_enable_interrupts(ah);
return;
}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index f90a6ca94a76..a09d15f7aa6e 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -325,6 +325,8 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
{
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath_tx_control txctl;
int time_left;
@@ -340,14 +342,16 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
tx_info->control.rates[1].idx = -1;
init_completion(&sc->paprd_complete);
- sc->paprd_pending = true;
txctl.paprd = BIT(chain);
- if (ath_tx_start(hw, skb, &txctl) != 0)
+
+ if (ath_tx_start(hw, skb, &txctl) != 0) {
+ ath_dbg(common, ATH_DBG_XMIT, "PAPRD TX failed\n");
+ dev_kfree_skb_any(skb);
return false;
+ }
time_left = wait_for_completion_timeout(&sc->paprd_complete,
msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
- sc->paprd_pending = false;
if (!time_left)
ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE,
@@ -592,14 +596,12 @@ void ath9k_tasklet(unsigned long data)
u32 status = sc->intrstatus;
u32 rxmask;
- ath9k_ps_wakeup(sc);
-
if (status & ATH9K_INT_FATAL) {
ath_reset(sc, true);
- ath9k_ps_restore(sc);
return;
}
+ ath9k_ps_wakeup(sc);
spin_lock(&sc->sc_pcu_lock);
if (!ath9k_hw_check_alive(ah))
@@ -955,8 +957,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
spin_unlock_bh(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
-
- ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
}
int ath_reset(struct ath_softc *sc, bool retry_tx)
@@ -969,6 +969,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
/* Stop ANI */
del_timer_sync(&common->ani.timer);
+ ath9k_ps_wakeup(sc);
spin_lock_bh(&sc->sc_pcu_lock);
ieee80211_stop_queues(hw);
@@ -1015,6 +1016,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
/* Start ANI */
ath_start_ani(common);
+ ath9k_ps_restore(sc);
return r;
}
@@ -1171,12 +1173,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath9k_btcoex_timer_resume(sc);
}
- /* User has the option to provide pm-qos value as a module
- * parameter rather than using the default value of
- * 'ATH9K_PM_QOS_DEFAULT_VALUE'.
- */
- pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value);
-
if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
common->bus_ops->extn_synch_en(common);
@@ -1309,6 +1305,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
spin_lock_bh(&sc->sc_pcu_lock);
+ /* prevent tasklets to enable interrupts once we disable them */
+ ah->imask &= ~ATH9K_INT_GLOBAL;
+
/* make sure h/w will not generate any interrupt
* before setting the invalid flag. */
ath9k_hw_disable_interrupts(ah);
@@ -1326,6 +1325,12 @@ static void ath9k_stop(struct ieee80211_hw *hw)
spin_unlock_bh(&sc->sc_pcu_lock);
+ /* we can now sync irq and kill any running tasklets, since we already
+ * disabled interrupts and not holding a spin lock */
+ synchronize_irq(sc->irq);
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+
ath9k_ps_restore(sc);
sc->ps_idle = true;
@@ -1334,8 +1339,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
sc->sc_flags |= SC_OP_INVALID;
- pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE);
-
mutex_unlock(&sc->mutex);
ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
@@ -1701,7 +1704,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
skip_chan_change:
if (changed & IEEE80211_CONF_CHANGE_POWER) {
sc->config.txpowlimit = 2 * conf->power_level;
+ ath9k_ps_wakeup(sc);
ath_update_txpow(sc);
+ ath9k_ps_restore(sc);
}
spin_lock_bh(&sc->wiphy_lock);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 332d1feb5c18..07b7804aec5b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1725,6 +1725,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
bf->bf_state.bfs_paprd);
+ if (txctl->paprd)
+ bf->bf_state.bfs_paprd_timestamp = jiffies;
+
ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
}
@@ -1886,7 +1889,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
bf->bf_buf_addr = 0;
if (bf->bf_state.bfs_paprd) {
- if (!sc->paprd_pending)
+ if (time_after(jiffies,
+ bf->bf_state.bfs_paprd_timestamp +
+ msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
dev_kfree_skb_any(skb);
else
complete(&sc->paprd_complete);
@@ -2113,9 +2118,7 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
if (needreset) {
ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
"tx hung, resetting the chip\n");
- ath9k_ps_wakeup(sc);
ath_reset(sc, true);
- ath9k_ps_restore(sc);
}
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 939a0e96ed1f..84866a4b8350 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -564,7 +564,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid);
/* 2. Maybe the AP wants to send multicast/broadcast data? */
- cam = !!(tim_ie->bitmap_ctrl & 0x01);
+ cam |= !!(tim_ie->bitmap_ctrl & 0x01);
if (!cam) {
/* back to low-power land. */
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 537732e5964f..f82c400be288 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
{ USB_DEVICE(0x057c, 0x8402) },
/* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
{ USB_DEVICE(0x1668, 0x1200) },
+ /* Airlive X.USB a/b/g/n */
+ { USB_DEVICE(0x1b75, 0x9170) },
/* terminate */
{}
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 0dc33b65e86b..be4828167012 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -1919,7 +1919,7 @@ static void b43_hardware_pctl_init_gphy(struct b43_wldev *dev)
b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL);
}
-/* Intialize B/G PHY power control */
+/* Initialize B/G PHY power control */
static void b43_phy_init_pctl(struct b43_wldev *dev)
{
struct ssb_bus *bus = dev->dev->bus;
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 35033dd342ce..28e477d01587 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -153,7 +153,7 @@ void b43legacy_phy_calibrate(struct b43legacy_wldev *dev)
phy->calibrated = 1;
}
-/* intialize B PHY power control
+/* initialize B PHY power control
* as described in http://bcm-specs.sipsolutions.net/InitPowerControl
*/
static void b43legacy_phy_init_pctl(struct b43legacy_wldev *dev)
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index bd8a4134edeb..2176edede39b 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -518,22 +518,21 @@ static int prism2_config(struct pcmcia_device *link)
hw_priv->link = link;
/*
- * Make sure the IRQ handler cannot proceed until at least
- * dev->base_addr is initialized.
+ * We enable IRQ here, but IRQ handler will not proceed
+ * until dev->base_addr is set below. This protect us from
+ * receive interrupts when driver is not initialized.
*/
- spin_lock_irqsave(&local->irq_init_lock, flags);
-
ret = pcmcia_request_irq(link, prism2_interrupt);
if (ret)
- goto failed_unlock;
+ goto failed;
ret = pcmcia_enable_device(link);
if (ret)
- goto failed_unlock;
+ goto failed;
+ spin_lock_irqsave(&local->irq_init_lock, flags);
dev->irq = link->irq;
dev->base_addr = link->resource[0]->start;
-
spin_unlock_irqrestore(&local->irq_init_lock, flags);
local->shutdown = 0;
@@ -546,8 +545,6 @@ static int prism2_config(struct pcmcia_device *link)
return ret;
- failed_unlock:
- spin_unlock_irqrestore(&local->irq_init_lock, flags);
failed:
kfree(hw_priv);
prism2_release((u_long)link);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 8d6ed5f6f46f..ae438ed80c2f 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -1973,6 +1973,13 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
inta = ipw_read32(priv, IPW_INTA_RW);
inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
+
+ if (inta == 0xFFFFFFFF) {
+ /* Hardware disappeared */
+ IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
+ /* Only handle the cached INTA values */
+ inta = 0;
+ }
inta &= (IPW_INTA_MASK_ALL & inta_mask);
/* Add any cached INTA values that need to be handled */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index a9b852be4509..39b6f16c87fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
}
#endif
-/**
- * iwl3945_good_plcp_health - checks for plcp error.
- *
- * When the plcp error is exceeding the thresholds, reset the radio
- * to improve the throughput.
- */
-static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt)
-{
- bool rc = true;
- struct iwl3945_notif_statistics current_stat;
- int combined_plcp_delta;
- unsigned int plcp_msec;
- unsigned long plcp_received_jiffies;
-
- if (priv->cfg->base_params->plcp_delta_threshold ==
- IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
- IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
- return rc;
- }
- memcpy(&current_stat, pkt->u.raw, sizeof(struct
- iwl3945_notif_statistics));
- /*
- * check for plcp_err and trigger radio reset if it exceeds
- * the plcp error threshold plcp_delta.
- */
- plcp_received_jiffies = jiffies;
- plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
- (long) priv->plcp_jiffies);
- priv->plcp_jiffies = plcp_received_jiffies;
- /*
- * check to make sure plcp_msec is not 0 to prevent division
- * by zero.
- */
- if (plcp_msec) {
- combined_plcp_delta =
- (le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
- le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
-
- if ((combined_plcp_delta > 0) &&
- ((combined_plcp_delta * 100) / plcp_msec) >
- priv->cfg->base_params->plcp_delta_threshold) {
- /*
- * if plcp_err exceed the threshold, the following
- * data is printed in csv format:
- * Text: plcp_err exceeded %d,
- * Received ofdm.plcp_err,
- * Current ofdm.plcp_err,
- * combined_plcp_delta,
- * plcp_msec
- */
- IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
- "%u, %d, %u mSecs\n",
- priv->cfg->base_params->plcp_delta_threshold,
- le32_to_cpu(current_stat.rx.ofdm.plcp_err),
- combined_plcp_delta, plcp_msec);
- /*
- * Reset the RF radio due to the high plcp
- * error rate
- */
- rc = false;
- }
- }
- return rc;
-}
-
void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
@@ -2734,7 +2668,6 @@ static struct iwl_lib_ops iwl3945_lib = {
.isr_ops = {
.isr = iwl_isr_legacy,
},
- .check_plcp_health = iwl3945_good_plcp_health,
.debugfs_ops = {
.rx_stats_read = iwl3945_ucode_rx_stats_read,
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 3f1e5f1bf847..91a9f5253469 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2624,6 +2624,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
.fw_name_pre = IWL4965_FW_PRE,
.ucode_api_max = IWL4965_UCODE_API_MAX,
.ucode_api_min = IWL4965_UCODE_API_MIN,
+ .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
.valid_tx_ant = ANT_AB,
.valid_rx_ant = ANT_ABC,
.eeprom_ver = EEPROM_4965_EEPROM_VERSION,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 79ab0a6b1386..537fb8c84e3a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -51,7 +51,7 @@
#include "iwl-agn-debugfs.h"
/* Highest firmware API version supported */
-#define IWL5000_UCODE_API_MAX 2
+#define IWL5000_UCODE_API_MAX 5
#define IWL5150_UCODE_API_MAX 2
/* Lowest firmware API version supported */
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index af505bcd7ae0..ef36aff1bb43 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -681,6 +681,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.fw_name_pre = IWL6050_FW_PRE, \
.ucode_api_max = IWL6050_UCODE_API_MAX, \
.ucode_api_min = IWL6050_UCODE_API_MIN, \
+ .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
+ .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
.ops = &iwl6050_ops, \
.eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
index 97906dd442e6..27b5a3eec9dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
@@ -152,11 +152,14 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
- priv->cfg->sku = ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >>
+ if (!priv->cfg->sku) {
+ /* not using sku overwrite */
+ priv->cfg->sku =
+ ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >>
EEPROM_SKU_CAP_BAND_POS);
- if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE)
- priv->cfg->sku |= IWL_SKU_N;
-
+ if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE)
+ priv->cfg->sku |= IWL_SKU_N;
+ }
if (!priv->cfg->sku) {
IWL_ERR(priv, "Invalid device sku\n");
return -EINVAL;
@@ -168,7 +171,7 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
/* not using .cfg overwrite */
radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
- priv->cfg->valid_rx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
+ priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n",
priv->cfg->valid_tx_ant,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
index a5dbfea1bfad..b5cb3be0eb4b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
@@ -197,7 +197,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
none:
/* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if diabled by irq and no schedules tasklet. */
+ /* only Re-enable if disabled by irq and no schedules tasklet. */
if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
iwl_enable_interrupts(priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index f13a83a7e62b..c1cfd9952e52 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1154,9 +1154,12 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
}
/* Re-enable all interrupts */
- /* only Re-enable if diabled by irq */
+ /* only Re-enable if disabled by irq */
if (test_bit(STATUS_INT_ENABLED, &priv->status))
iwl_enable_interrupts(priv);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ iwl_enable_rfkill_int(priv);
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
@@ -1368,9 +1371,12 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
}
/* Re-enable all interrupts */
- /* only Re-enable if diabled by irq */
+ /* only Re-enable if disabled by irq */
if (test_bit(STATUS_INT_ENABLED, &priv->status))
iwl_enable_interrupts(priv);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ iwl_enable_rfkill_int(priv);
}
/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c
index a08b4e56e6b1..bb1a742a98a0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-legacy.c
+++ b/drivers/net/wireless/iwlwifi/iwl-legacy.c
@@ -619,7 +619,7 @@ unplugged:
none:
/* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if diabled by irq */
+ /* only Re-enable if disabled by irq */
if (test_bit(STATUS_INT_ENABLED, &priv->status))
iwl_enable_interrupts(priv);
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 4776323b1eba..49493d176515 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -107,7 +107,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
/*
* XXX: The MAC address in the command buffer is often changed from
* the original sent to the device. That is, the MAC address
- * written to the command buffer often is not the same MAC adress
+ * written to the command buffer often is not the same MAC address
* read from the command buffer when the command returns. This
* issue has not yet been resolved and this debugging is left to
* observe the problem.
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index 13a69ebf2a94..5091d77e02ce 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -126,6 +126,7 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
if (!ndev) {
dev_err(dev, "no memory for network device instance\n");
+ ret = -ENOMEM;
goto out_priv;
}
@@ -138,6 +139,7 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
GFP_KERNEL);
if (!iwm->umac_profile) {
dev_err(dev, "Couldn't alloc memory for profile\n");
+ ret = -ENOMEM;
goto out_profile;
}
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 1eacba4daa5b..0494d7b102d4 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
while (i != idx) {
u16 len;
struct sk_buff *skb;
+ dma_addr_t dma_addr;
desc = &ring[i];
len = le16_to_cpu(desc->len);
skb = rx_buf[i];
@@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
len = priv->common.rx_mtu;
}
+ dma_addr = le32_to_cpu(desc->host_addr);
+ pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
skb_put(skb, len);
if (p54_rx(dev, skb)) {
- pci_unmap_single(priv->pdev,
- le32_to_cpu(desc->host_addr),
- priv->common.rx_mtu + 32,
- PCI_DMA_FROMDEVICE);
+ pci_unmap_single(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
rx_buf[i] = NULL;
- desc->host_addr = 0;
+ desc->host_addr = cpu_to_le32(0);
} else {
skb_trim(skb, 0);
+ pci_dma_sync_single_for_device(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
}
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 21713a7638c4..9b344a921e74 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
{USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
{USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
+ {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */
{USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
{USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */
{USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 76b2318a7dc7..f618b9623e5a 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -618,7 +618,7 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
else
*burst_possible = false;
- if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
*flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 2c8cc954d1b6..ec2c75d77cea 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -630,7 +630,7 @@ islpci_alloc_memory(islpci_private *priv)
printk(KERN_DEBUG "islpci_alloc_memory\n");
#endif
- /* remap the PCI device base address to accessable */
+ /* remap the PCI device base address to accessible */
if (!(priv->device_base =
ioremap(pci_resource_start(priv->pdev, 0),
ISL38XX_PCI_MEM_SIZE))) {
@@ -709,7 +709,7 @@ islpci_alloc_memory(islpci_private *priv)
PCI_DMA_FROMDEVICE);
if (!priv->pci_map_rx_address[counter]) {
/* error mapping the buffer to device
- accessable memory address */
+ accessible memory address */
printk(KERN_ERR "failed to map skb DMA'able\n");
goto out_free;
}
@@ -773,7 +773,7 @@ islpci_free_memory(islpci_private *priv)
priv->data_low_rx[counter] = NULL;
}
- /* Free the acces control list and the WPA list */
+ /* Free the access control list and the WPA list */
prism54_acl_clean(&priv->acl);
prism54_wpa_bss_ie_clean(priv);
mgt_clean(priv);
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 2fc52bc2d7dd..d44f8e20cce0 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -450,7 +450,7 @@ islpci_eth_receive(islpci_private *priv)
MAX_FRAGMENT_SIZE_RX + 2,
PCI_DMA_FROMDEVICE);
if (unlikely(!priv->pci_map_rx_address[index])) {
- /* error mapping the buffer to device accessable memory address */
+ /* error mapping the buffer to device accessible memory address */
DEBUG(SHOW_ERROR_MESSAGES,
"Error mapping DMA address\n");
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 848cc2cce247..518542b4bf9e 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
__le32 mode;
int ret;
+ if (priv->device_type != RNDIS_BCM4320B)
+ return -ENOTSUPP;
+
netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__,
enabled ? "enabled" : "disabled",
timeout);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index aa97971a38af..3b3f1e45ab3e 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -652,6 +652,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
*/
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+ /*
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
+ */
+ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
rxdesc->flags |= RX_FLAG_DECRYPTED;
else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
@@ -1065,6 +1071,8 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) },
#endif
#ifdef CONFIG_RT2800PCI_RT35XX
+ { PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index b97a4a54ff4c..197a36c05fda 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -486,6 +486,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
*/
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+ /*
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
+ */
+ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
rxdesc->flags |= RX_FLAG_DECRYPTED;
else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index f0e1eb72befc..be0ff78c1b16 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -58,6 +58,7 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
if (!fw || !fw->size || !fw->data) {
ERROR(rt2x00dev, "Failed to read Firmware.\n");
+ release_firmware(fw);
return -ENOENT;
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 658542d2efe1..f3da051df39e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -273,7 +273,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
intf->beacon = entry;
/*
- * The MAC adddress must be configured after the device
+ * The MAC address must be configured after the device
* has been initialized. Otherwise the device can reset
* the MAC registers.
* The BSSID address must only be configured in AP mode,
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 73631c6fbb30..ace0b668c04e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -363,12 +363,12 @@ int rt2x00pci_resume(struct pci_dev *pci_dev)
struct rt2x00_dev *rt2x00dev = hw->priv;
if (pci_set_power_state(pci_dev, PCI_D0) ||
- pci_enable_device(pci_dev) ||
- pci_restore_state(pci_dev)) {
+ pci_enable_device(pci_dev)) {
ERROR(rt2x00dev, "Failed to resume device.\n");
return -EIO;
}
+ pci_restore_state(pci_dev);
return rt2x00lib_resume(rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00pci_resume);
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 0b4e8590cbb7..029be3c6c030 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2446,6 +2446,7 @@ static struct usb_device_id rt73usb_device_table[] = {
{ USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) },
+ { USB_DEVICE(0x0812, 0x3101), USB_DEVICE_DATA(&rt73usb_ops) },
/* Qcom */
{ USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) },
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index b8433f3a9bc2..62876cd5c41a 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -726,9 +726,9 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
}
static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
- u8 efuse_data, u8 offset, int *bcontinual,
- u8 *write_state, struct pgpkt_struct target_pkt,
- int *repeat_times, int *bresult, u8 word_en)
+ u8 efuse_data, u8 offset, int *bcontinual,
+ u8 *write_state, struct pgpkt_struct *target_pkt,
+ int *repeat_times, int *bresult, u8 word_en)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct pgpkt_struct tmp_pkt;
@@ -744,8 +744,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
tmp_pkt.word_en = tmp_header & 0x0F;
tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
- if (tmp_pkt.offset != target_pkt.offset) {
- efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1;
+ if (tmp_pkt.offset != target_pkt->offset) {
+ *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
*write_state = PG_STATE_HEADER;
} else {
for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) {
@@ -756,23 +756,23 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
}
if (bdataempty == false) {
- efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1;
+ *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
*write_state = PG_STATE_HEADER;
} else {
match_word_en = 0x0F;
- if (!((target_pkt.word_en & BIT(0)) |
+ if (!((target_pkt->word_en & BIT(0)) |
(tmp_pkt.word_en & BIT(0))))
match_word_en &= (~BIT(0));
- if (!((target_pkt.word_en & BIT(1)) |
+ if (!((target_pkt->word_en & BIT(1)) |
(tmp_pkt.word_en & BIT(1))))
match_word_en &= (~BIT(1));
- if (!((target_pkt.word_en & BIT(2)) |
+ if (!((target_pkt->word_en & BIT(2)) |
(tmp_pkt.word_en & BIT(2))))
match_word_en &= (~BIT(2));
- if (!((target_pkt.word_en & BIT(3)) |
+ if (!((target_pkt->word_en & BIT(3)) |
(tmp_pkt.word_en & BIT(3))))
match_word_en &= (~BIT(3));
@@ -780,7 +780,7 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
badworden = efuse_word_enable_data_write(
hw, *efuse_addr + 1,
tmp_pkt.word_en,
- target_pkt.data);
+ target_pkt->data);
if (0x0F != (badworden & 0x0F)) {
u8 reorg_offset = offset;
@@ -791,26 +791,26 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
}
tmp_word_en = 0x0F;
- if ((target_pkt.word_en & BIT(0)) ^
+ if ((target_pkt->word_en & BIT(0)) ^
(match_word_en & BIT(0)))
tmp_word_en &= (~BIT(0));
- if ((target_pkt.word_en & BIT(1)) ^
+ if ((target_pkt->word_en & BIT(1)) ^
(match_word_en & BIT(1)))
tmp_word_en &= (~BIT(1));
- if ((target_pkt.word_en & BIT(2)) ^
+ if ((target_pkt->word_en & BIT(2)) ^
(match_word_en & BIT(2)))
tmp_word_en &= (~BIT(2));
- if ((target_pkt.word_en & BIT(3)) ^
+ if ((target_pkt->word_en & BIT(3)) ^
(match_word_en & BIT(3)))
tmp_word_en &= (~BIT(3));
if ((tmp_word_en & 0x0F) != 0x0F) {
*efuse_addr = efuse_get_current_size(hw);
- target_pkt.offset = offset;
- target_pkt.word_en = tmp_word_en;
+ target_pkt->offset = offset;
+ target_pkt->word_en = tmp_word_en;
} else
*bcontinual = false;
*write_state = PG_STATE_HEADER;
@@ -821,8 +821,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
}
} else {
*efuse_addr += (2 * tmp_word_cnts) + 1;
- target_pkt.offset = offset;
- target_pkt.word_en = word_en;
+ target_pkt->offset = offset;
+ target_pkt->word_en = word_en;
*write_state = PG_STATE_HEADER;
}
}
@@ -938,7 +938,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
efuse_write_data_case1(hw, &efuse_addr,
efuse_data, offset,
&bcontinual,
- &write_state, target_pkt,
+ &write_state, &target_pkt,
&repeat_times, &bresult,
word_en);
else
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 0fa36aa6701a..1758d4463247 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -619,6 +619,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
struct sk_buff *uskb = NULL;
u8 *pdata;
uskb = dev_alloc_skb(skb->len + 128);
+ if (!uskb) {
+ RT_TRACE(rtlpriv,
+ (COMP_INTR | COMP_RECV),
+ DBG_EMERG,
+ ("can't alloc rx skb\n"));
+ goto done;
+ }
memcpy(IEEE80211_SKB_RXCB(uskb),
&rx_status,
sizeof(rx_status));
@@ -641,7 +648,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
if (unlikely(!new_skb)) {
RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
- DBG_DMESG,
+ DBG_EMERG,
("can't alloc skb for rx\n"));
goto done;
}
@@ -1066,9 +1073,9 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
struct sk_buff *skb =
dev_alloc_skb(rtlpci->rxbuffersize);
u32 bufferaddress;
- entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
if (!skb)
return 0;
+ entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
/*skb->dev = dev; */
diff --git a/drivers/net/wireless/wl1251/acx.h b/drivers/net/wireless/wl1251/acx.h
index e54b21a4f8b1..efcc3aaca14f 100644
--- a/drivers/net/wireless/wl1251/acx.h
+++ b/drivers/net/wireless/wl1251/acx.h
@@ -1272,10 +1272,10 @@ struct wl1251_acx_tid_cfg {
/* OBSOLETE */
#define WL1251_ACX_INTR_WAKE_ON_HOST BIT(6)
-/* Trace meassge on MBOX #A */
+/* Trace message on MBOX #A */
#define WL1251_ACX_INTR_TRACE_A BIT(7)
-/* Trace meassge on MBOX #B */
+/* Trace message on MBOX #B */
#define WL1251_ACX_INTR_TRACE_B BIT(8)
/* Command processing completion */
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 012e1a4016fe..40372bac9482 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -1039,6 +1039,9 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON) {
beacon = ieee80211_beacon_get(hw, vif);
+ if (!beacon)
+ goto out_sleep;
+
ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data,
beacon->len);
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
index 13fbeeccf609..c0ce2c8b43b8 100644
--- a/drivers/net/wireless/wl1251/wl1251.h
+++ b/drivers/net/wireless/wl1251/wl1251.h
@@ -419,7 +419,7 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
#define WL1251_FW_NAME "wl1251-fw.bin"
#define WL1251_NVS_NAME "wl1251-nvs.bin"
-#define WL1251_POWER_ON_SLEEP 10 /* in miliseconds */
+#define WL1251_POWER_ON_SLEEP 10 /* in milliseconds */
#define WL1251_PART_DOWN_MEM_START 0x0
#define WL1251_PART_DOWN_MEM_SIZE 0x16800
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index 9cbc3f40c8dd..7bd8e4db4a71 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -47,9 +47,9 @@
#define WL1271_ACX_INTR_HW_AVAILABLE BIT(5)
/* The MISC bit is used for aggregation of RX, TxComplete and TX rate update */
#define WL1271_ACX_INTR_DATA BIT(6)
-/* Trace meassge on MBOX #A */
+/* Trace message on MBOX #A */
#define WL1271_ACX_INTR_TRACE_A BIT(7)
-/* Trace meassge on MBOX #B */
+/* Trace message on MBOX #B */
#define WL1271_ACX_INTR_TRACE_B BIT(8)
#define WL1271_ACX_INTR_ALL 0xFFFFFFFF
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 46714910f98c..7145ea543783 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -110,9 +110,8 @@ static void wl1271_spi_reset(struct wl1271 *wl)
spi_message_add_tail(&t, &m);
spi_sync(wl_to_spi(wl), &m);
- kfree(cmd);
-
wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
+ kfree(cmd);
}
static void wl1271_spi_init(struct wl1271 *wl)
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index ce3d31f98c55..9050dd9b62d2 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -416,8 +416,8 @@ int wl1271_plt_stop(struct wl1271 *wl);
/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
on in case is has been shut down shortly before */
-#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */
-#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
+#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */
+#define WL1271_POWER_ON_SLEEP 200 /* in milliseconds */
/* Macros to handle wl1271.sta_rate_set */
#define HW_BG_RATES_MASK 0xffff
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index ee82df62e646..3e5befe4d03b 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -192,7 +192,7 @@ static inline void wl3501_switch_page(struct wl3501_card *this, u8 page)
}
/*
- * Get Ethernet MAC addresss.
+ * Get Ethernet MAC address.
*
* WARNING: We switch to FPAGE0 and switc back again.
* Making sure there is no other WL function beening called by ISR.
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cdbeec9f83ea..da1f12120346 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -120,6 +120,9 @@ struct netfront_info {
unsigned long rx_pfn_array[NET_RX_RING_SIZE];
struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+
+ /* Statistics */
+ int rx_gso_checksum_fixup;
};
struct netfront_rx_info {
@@ -488,7 +491,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!netif_carrier_ok(dev) ||
(frags > 1 && !xennet_can_sg(dev)) ||
- netif_needs_gso(dev, skb))) {
+ netif_needs_gso(skb, netif_skb_features(skb)))) {
spin_unlock_irq(&np->tx_lock);
goto drop;
}
@@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
return cons;
}
-static int skb_checksum_setup(struct sk_buff *skb)
+static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
{
struct iphdr *iph;
unsigned char *th;
int err = -EPROTO;
+ int recalculate_partial_csum = 0;
+
+ /*
+ * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
+ * peers can fail to set NETRXF_csum_blank when sending a GSO
+ * frame. In this case force the SKB to CHECKSUM_PARTIAL and
+ * recalculate the partial checksum.
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
+ struct netfront_info *np = netdev_priv(dev);
+ np->rx_gso_checksum_fixup++;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ recalculate_partial_csum = 1;
+ }
+
+ /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
if (skb->protocol != htons(ETH_P_IP))
goto out;
@@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb)
switch (iph->protocol) {
case IPPROTO_TCP:
skb->csum_offset = offsetof(struct tcphdr, check);
+
+ if (recalculate_partial_csum) {
+ struct tcphdr *tcph = (struct tcphdr *)th;
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - iph->ihl*4,
+ IPPROTO_TCP, 0);
+ }
break;
case IPPROTO_UDP:
skb->csum_offset = offsetof(struct udphdr, check);
+
+ if (recalculate_partial_csum) {
+ struct udphdr *udph = (struct udphdr *)th;
+ udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - iph->ihl*4,
+ IPPROTO_UDP, 0);
+ }
break;
default:
if (net_ratelimit())
@@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev,
/* Ethernet work: Delayed to here as it peeks the header. */
skb->protocol = eth_type_trans(skb, dev);
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (skb_checksum_setup(skb)) {
- kfree_skb(skb);
- packets_dropped++;
- dev->stats.rx_errors++;
- continue;
- }
+ if (checksum_setup(dev, skb)) {
+ kfree_skb(skb);
+ packets_dropped++;
+ dev->stats.rx_errors++;
+ continue;
}
dev->stats.rx_packets++;
@@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev,
}
}
+static const struct xennet_stat {
+ char name[ETH_GSTRING_LEN];
+ u16 offset;
+} xennet_stats[] = {
+ {
+ "rx_gso_checksum_fixup",
+ offsetof(struct netfront_info, rx_gso_checksum_fixup)
+ },
+};
+
+static int xennet_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(xennet_stats);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void xennet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ void *np = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
+ data[i] = *(int *)(np + xennet_stats[i].offset);
+}
+
+static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ xennet_stats[i].name, ETH_GSTRING_LEN);
+ break;
+ }
+}
+
static const struct ethtool_ops xennet_ethtool_ops =
{
.set_tx_csum = ethtool_op_set_tx_csum,
.set_sg = xennet_set_sg,
.set_tso = xennet_set_tso,
.get_link = ethtool_op_get_link,
+
+ .get_sset_count = xennet_get_sset_count,
+ .get_ethtool_stats = xennet_get_ethtool_stats,
+ .get_strings = xennet_get_strings,
};
#ifdef CONFIG_SYSFS
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index de6c3086d232..cad66ce1640b 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -24,6 +24,7 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/phy.h>
#define DRIVER_NAME "xilinx_emaclite"
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
new file mode 100644
index 000000000000..ea1580085347
--- /dev/null
+++ b/drivers/nfc/Kconfig
@@ -0,0 +1,30 @@
+#
+# Near Field Communication (NFC) devices
+#
+
+menuconfig NFC_DEVICES
+ bool "Near Field Communication (NFC) devices"
+ default n
+ ---help---
+ You'll have to say Y if your computer contains an NFC device that
+ you want to use under Linux.
+
+ You can say N here if you don't have any Near Field Communication
+ devices connected to your computer.
+
+if NFC_DEVICES
+
+config PN544_NFC
+ tristate "PN544 NFC driver"
+ depends on I2C
+ select CRC_CCITT
+ default n
+ ---help---
+ Say yes if you want PN544 Near Field Communication driver.
+ This is for i2c connected version. If unsure, say N here.
+
+ To compile this driver as a module, choose m here. The module will
+ be called pn544.
+
+
+endif # NFC_DEVICES
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
new file mode 100644
index 000000000000..a4efb164ec49
--- /dev/null
+++ b/drivers/nfc/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for nfc devices
+#
+
+obj-$(CONFIG_PN544_NFC) += pn544.o
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
new file mode 100644
index 000000000000..724f65d8f9e4
--- /dev/null
+++ b/drivers/nfc/pn544.c
@@ -0,0 +1,893 @@
+/*
+ * Driver for the PN544 NFC chip.
+ *
+ * Copyright (C) Nokia Corporation
+ *
+ * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
+ * Contact: Matti Aaltonen <matti.j.aaltonen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/completion.h>
+#include <linux/crc-ccitt.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nfc/pn544.h>
+#include <linux/poll.h>
+#include <linux/regulator/consumer.h>
+#include <linux/serial_core.h> /* for TCGETS */
+#include <linux/slab.h>
+
+#define DRIVER_CARD "PN544 NFC"
+#define DRIVER_DESC "NFC driver for PN544"
+
+static struct i2c_device_id pn544_id_table[] = {
+ { PN544_DRIVER_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pn544_id_table);
+
+#define HCI_MODE 0
+#define FW_MODE 1
+
+enum pn544_state {
+ PN544_ST_COLD,
+ PN544_ST_FW_READY,
+ PN544_ST_READY,
+};
+
+enum pn544_irq {
+ PN544_NONE,
+ PN544_INT,
+};
+
+struct pn544_info {
+ struct miscdevice miscdev;
+ struct i2c_client *i2c_dev;
+ struct regulator_bulk_data regs[3];
+
+ enum pn544_state state;
+ wait_queue_head_t read_wait;
+ loff_t read_offset;
+ enum pn544_irq read_irq;
+ struct mutex read_mutex; /* Serialize read_irq access */
+ struct mutex mutex; /* Serialize info struct access */
+ u8 *buf;
+ size_t buflen;
+};
+
+static const char reg_vdd_io[] = "Vdd_IO";
+static const char reg_vbat[] = "VBat";
+static const char reg_vsim[] = "VSim";
+
+/* sysfs interface */
+static ssize_t pn544_test(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pn544_info *info = dev_get_drvdata(dev);
+ struct i2c_client *client = info->i2c_dev;
+ struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pdata->test());
+}
+
+static int pn544_enable(struct pn544_info *info, int mode)
+{
+ struct pn544_nfc_platform_data *pdata;
+ struct i2c_client *client = info->i2c_dev;
+
+ int r;
+
+ r = regulator_bulk_enable(ARRAY_SIZE(info->regs), info->regs);
+ if (r < 0)
+ return r;
+
+ pdata = client->dev.platform_data;
+ info->read_irq = PN544_NONE;
+ if (pdata->enable)
+ pdata->enable(mode);
+
+ if (mode) {
+ info->state = PN544_ST_FW_READY;
+ dev_dbg(&client->dev, "now in FW-mode\n");
+ } else {
+ info->state = PN544_ST_READY;
+ dev_dbg(&client->dev, "now in HCI-mode\n");
+ }
+
+ usleep_range(10000, 15000);
+
+ return 0;
+}
+
+static void pn544_disable(struct pn544_info *info)
+{
+ struct pn544_nfc_platform_data *pdata;
+ struct i2c_client *client = info->i2c_dev;
+
+ pdata = client->dev.platform_data;
+ if (pdata->disable)
+ pdata->disable();
+
+ info->state = PN544_ST_COLD;
+
+ dev_dbg(&client->dev, "Now in OFF-mode\n");
+
+ msleep(PN544_RESETVEN_TIME);
+
+ info->read_irq = PN544_NONE;
+ regulator_bulk_disable(ARRAY_SIZE(info->regs), info->regs);
+}
+
+static int check_crc(u8 *buf, int buflen)
+{
+ u8 len;
+ u16 crc;
+
+ len = buf[0] + 1;
+ if (len < 4 || len != buflen || len > PN544_MSG_MAX_SIZE) {
+ pr_err(PN544_DRIVER_NAME
+ ": CRC; corrupt packet len %u (%d)\n", len, buflen);
+ print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
+ 16, 2, buf, buflen, false);
+ return -EPERM;
+ }
+ crc = crc_ccitt(0xffff, buf, len - 2);
+ crc = ~crc;
+
+ if (buf[len-2] != (crc & 0xff) || buf[len-1] != (crc >> 8)) {
+ pr_err(PN544_DRIVER_NAME ": CRC error 0x%x != 0x%x 0x%x\n",
+ crc, buf[len-1], buf[len-2]);
+
+ print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
+ 16, 2, buf, buflen, false);
+ return -EPERM;
+ }
+ return 0;
+}
+
+static int pn544_i2c_write(struct i2c_client *client, u8 *buf, int len)
+{
+ int r;
+
+ if (len < 4 || len != (buf[0] + 1)) {
+ dev_err(&client->dev, "%s: Illegal message length: %d\n",
+ __func__, len);
+ return -EINVAL;
+ }
+
+ if (check_crc(buf, len))
+ return -EINVAL;
+
+ usleep_range(3000, 6000);
+
+ r = i2c_master_send(client, buf, len);
+ dev_dbg(&client->dev, "send: %d\n", r);
+
+ if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+ usleep_range(6000, 10000);
+ r = i2c_master_send(client, buf, len);
+ dev_dbg(&client->dev, "send2: %d\n", r);
+ }
+
+ if (r != len)
+ return -EREMOTEIO;
+
+ return r;
+}
+
+static int pn544_i2c_read(struct i2c_client *client, u8 *buf, int buflen)
+{
+ int r;
+ u8 len;
+
+ /*
+ * You could read a packet in one go, but then you'd need to read
+ * max size and rest would be 0xff fill, so we do split reads.
+ */
+ r = i2c_master_recv(client, &len, 1);
+ dev_dbg(&client->dev, "recv1: %d\n", r);
+
+ if (r != 1)
+ return -EREMOTEIO;
+
+ if (len < PN544_LLC_HCI_OVERHEAD)
+ len = PN544_LLC_HCI_OVERHEAD;
+ else if (len > (PN544_MSG_MAX_SIZE - 1))
+ len = PN544_MSG_MAX_SIZE - 1;
+
+ if (1 + len > buflen) /* len+(data+crc16) */
+ return -EMSGSIZE;
+
+ buf[0] = len;
+
+ r = i2c_master_recv(client, buf + 1, len);
+ dev_dbg(&client->dev, "recv2: %d\n", r);
+
+ if (r != len)
+ return -EREMOTEIO;
+
+ usleep_range(3000, 6000);
+
+ return r + 1;
+}
+
+static int pn544_fw_write(struct i2c_client *client, u8 *buf, int len)
+{
+ int r;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ if (len < PN544_FW_HEADER_SIZE ||
+ (PN544_FW_HEADER_SIZE + (buf[1] << 8) + buf[2]) != len)
+ return -EINVAL;
+
+ r = i2c_master_send(client, buf, len);
+ dev_dbg(&client->dev, "fw send: %d\n", r);
+
+ if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+ usleep_range(6000, 10000);
+ r = i2c_master_send(client, buf, len);
+ dev_dbg(&client->dev, "fw send2: %d\n", r);
+ }
+
+ if (r != len)
+ return -EREMOTEIO;
+
+ return r;
+}
+
+static int pn544_fw_read(struct i2c_client *client, u8 *buf, int buflen)
+{
+ int r, len;
+
+ if (buflen < PN544_FW_HEADER_SIZE)
+ return -EINVAL;
+
+ r = i2c_master_recv(client, buf, PN544_FW_HEADER_SIZE);
+ dev_dbg(&client->dev, "FW recv1: %d\n", r);
+
+ if (r < 0)
+ return r;
+
+ if (r < PN544_FW_HEADER_SIZE)
+ return -EINVAL;
+
+ len = (buf[1] << 8) + buf[2];
+ if (len == 0) /* just header, no additional data */
+ return r;
+
+ if (len > buflen - PN544_FW_HEADER_SIZE)
+ return -EMSGSIZE;
+
+ r = i2c_master_recv(client, buf + PN544_FW_HEADER_SIZE, len);
+ dev_dbg(&client->dev, "fw recv2: %d\n", r);
+
+ if (r != len)
+ return -EINVAL;
+
+ return r + PN544_FW_HEADER_SIZE;
+}
+
+static irqreturn_t pn544_irq_thread_fn(int irq, void *dev_id)
+{
+ struct pn544_info *info = dev_id;
+ struct i2c_client *client = info->i2c_dev;
+
+ BUG_ON(!info);
+ BUG_ON(irq != info->i2c_dev->irq);
+
+ dev_dbg(&client->dev, "IRQ\n");
+
+ mutex_lock(&info->read_mutex);
+ info->read_irq = PN544_INT;
+ mutex_unlock(&info->read_mutex);
+
+ wake_up_interruptible(&info->read_wait);
+
+ return IRQ_HANDLED;
+}
+
+static enum pn544_irq pn544_irq_state(struct pn544_info *info)
+{
+ enum pn544_irq irq;
+
+ mutex_lock(&info->read_mutex);
+ irq = info->read_irq;
+ mutex_unlock(&info->read_mutex);
+ /*
+ * XXX: should we check GPIO-line status directly?
+ * return pdata->irq_status() ? PN544_INT : PN544_NONE;
+ */
+
+ return irq;
+}
+
+static ssize_t pn544_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ struct pn544_info *info = container_of(file->private_data,
+ struct pn544_info, miscdev);
+ struct i2c_client *client = info->i2c_dev;
+ enum pn544_irq irq;
+ size_t len;
+ int r = 0;
+
+ dev_dbg(&client->dev, "%s: info: %p, count: %zu\n", __func__,
+ info, count);
+
+ mutex_lock(&info->mutex);
+
+ if (info->state == PN544_ST_COLD) {
+ r = -ENODEV;
+ goto out;
+ }
+
+ irq = pn544_irq_state(info);
+ if (irq == PN544_NONE) {
+ if (file->f_flags & O_NONBLOCK) {
+ r = -EAGAIN;
+ goto out;
+ }
+
+ if (wait_event_interruptible(info->read_wait,
+ (info->read_irq == PN544_INT))) {
+ r = -ERESTARTSYS;
+ goto out;
+ }
+ }
+
+ if (info->state == PN544_ST_FW_READY) {
+ len = min(count, info->buflen);
+
+ mutex_lock(&info->read_mutex);
+ r = pn544_fw_read(info->i2c_dev, info->buf, len);
+ info->read_irq = PN544_NONE;
+ mutex_unlock(&info->read_mutex);
+
+ if (r < 0) {
+ dev_err(&info->i2c_dev->dev, "FW read failed: %d\n", r);
+ goto out;
+ }
+
+ print_hex_dump(KERN_DEBUG, "FW read: ", DUMP_PREFIX_NONE,
+ 16, 2, info->buf, r, false);
+
+ *offset += r;
+ if (copy_to_user(buf, info->buf, r)) {
+ r = -EFAULT;
+ goto out;
+ }
+ } else {
+ len = min(count, info->buflen);
+
+ mutex_lock(&info->read_mutex);
+ r = pn544_i2c_read(info->i2c_dev, info->buf, len);
+ info->read_irq = PN544_NONE;
+ mutex_unlock(&info->read_mutex);
+
+ if (r < 0) {
+ dev_err(&info->i2c_dev->dev, "read failed (%d)\n", r);
+ goto out;
+ }
+ print_hex_dump(KERN_DEBUG, "read: ", DUMP_PREFIX_NONE,
+ 16, 2, info->buf, r, false);
+
+ *offset += r;
+ if (copy_to_user(buf, info->buf, r)) {
+ r = -EFAULT;
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&info->mutex);
+
+ return r;
+}
+
+static unsigned int pn544_poll(struct file *file, poll_table *wait)
+{
+ struct pn544_info *info = container_of(file->private_data,
+ struct pn544_info, miscdev);
+ struct i2c_client *client = info->i2c_dev;
+ int r = 0;
+
+ dev_dbg(&client->dev, "%s: info: %p\n", __func__, info);
+
+ mutex_lock(&info->mutex);
+
+ if (info->state == PN544_ST_COLD) {
+ r = -ENODEV;
+ goto out;
+ }
+
+ poll_wait(file, &info->read_wait, wait);
+
+ if (pn544_irq_state(info) == PN544_INT) {
+ r = POLLIN | POLLRDNORM;
+ goto out;
+ }
+out:
+ mutex_unlock(&info->mutex);
+
+ return r;
+}
+
+static ssize_t pn544_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct pn544_info *info = container_of(file->private_data,
+ struct pn544_info, miscdev);
+ struct i2c_client *client = info->i2c_dev;
+ ssize_t len;
+ int r;
+
+ dev_dbg(&client->dev, "%s: info: %p, count %zu\n", __func__,
+ info, count);
+
+ mutex_lock(&info->mutex);
+
+ if (info->state == PN544_ST_COLD) {
+ r = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * XXX: should we detect rset-writes and clean possible
+ * read_irq state
+ */
+ if (info->state == PN544_ST_FW_READY) {
+ size_t fw_len;
+
+ if (count < PN544_FW_HEADER_SIZE) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ len = min(count, info->buflen);
+ if (copy_from_user(info->buf, buf, len)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ print_hex_dump(KERN_DEBUG, "FW write: ", DUMP_PREFIX_NONE,
+ 16, 2, info->buf, len, false);
+
+ fw_len = PN544_FW_HEADER_SIZE + (info->buf[1] << 8) +
+ info->buf[2];
+
+ if (len > fw_len) /* 1 msg at a time */
+ len = fw_len;
+
+ r = pn544_fw_write(info->i2c_dev, info->buf, len);
+ } else {
+ if (count < PN544_LLC_MIN_SIZE) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ len = min(count, info->buflen);
+ if (copy_from_user(info->buf, buf, len)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ print_hex_dump(KERN_DEBUG, "write: ", DUMP_PREFIX_NONE,
+ 16, 2, info->buf, len, false);
+
+ if (len > (info->buf[0] + 1)) /* 1 msg at a time */
+ len = info->buf[0] + 1;
+
+ r = pn544_i2c_write(info->i2c_dev, info->buf, len);
+ }
+out:
+ mutex_unlock(&info->mutex);
+
+ return r;
+
+}
+
+static long pn544_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct pn544_info *info = container_of(file->private_data,
+ struct pn544_info, miscdev);
+ struct i2c_client *client = info->i2c_dev;
+ struct pn544_nfc_platform_data *pdata;
+ unsigned int val;
+ int r = 0;
+
+ dev_dbg(&client->dev, "%s: info: %p, cmd: 0x%x\n", __func__, info, cmd);
+
+ mutex_lock(&info->mutex);
+
+ if (info->state == PN544_ST_COLD) {
+ r = -ENODEV;
+ goto out;
+ }
+
+ pdata = info->i2c_dev->dev.platform_data;
+ switch (cmd) {
+ case PN544_GET_FW_MODE:
+ dev_dbg(&client->dev, "%s: PN544_GET_FW_MODE\n", __func__);
+
+ val = (info->state == PN544_ST_FW_READY);
+ if (copy_to_user((void __user *)arg, &val, sizeof(val))) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ break;
+
+ case PN544_SET_FW_MODE:
+ dev_dbg(&client->dev, "%s: PN544_SET_FW_MODE\n", __func__);
+
+ if (copy_from_user(&val, (void __user *)arg, sizeof(val))) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ if (val) {
+ if (info->state == PN544_ST_FW_READY)
+ break;
+
+ pn544_disable(info);
+ r = pn544_enable(info, FW_MODE);
+ if (r < 0)
+ goto out;
+ } else {
+ if (info->state == PN544_ST_READY)
+ break;
+ pn544_disable(info);
+ r = pn544_enable(info, HCI_MODE);
+ if (r < 0)
+ goto out;
+ }
+ file->f_pos = info->read_offset;
+ break;
+
+ case TCGETS:
+ dev_dbg(&client->dev, "%s: TCGETS\n", __func__);
+
+ r = -ENOIOCTLCMD;
+ break;
+
+ default:
+ dev_err(&client->dev, "Unknown ioctl 0x%x\n", cmd);
+ r = -ENOIOCTLCMD;
+ break;
+ }
+
+out:
+ mutex_unlock(&info->mutex);
+
+ return r;
+}
+
+static int pn544_open(struct inode *inode, struct file *file)
+{
+ struct pn544_info *info = container_of(file->private_data,
+ struct pn544_info, miscdev);
+ struct i2c_client *client = info->i2c_dev;
+ int r = 0;
+
+ dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
+ info, info->i2c_dev);
+
+ mutex_lock(&info->mutex);
+
+ /*
+ * Only 1 at a time.
+ * XXX: maybe user (counter) would work better
+ */
+ if (info->state != PN544_ST_COLD) {
+ r = -EBUSY;
+ goto out;
+ }
+
+ file->f_pos = info->read_offset;
+ r = pn544_enable(info, HCI_MODE);
+
+out:
+ mutex_unlock(&info->mutex);
+ return r;
+}
+
+static int pn544_close(struct inode *inode, struct file *file)
+{
+ struct pn544_info *info = container_of(file->private_data,
+ struct pn544_info, miscdev);
+ struct i2c_client *client = info->i2c_dev;
+
+ dev_dbg(&client->dev, "%s: info: %p, client %p\n",
+ __func__, info, info->i2c_dev);
+
+ mutex_lock(&info->mutex);
+ pn544_disable(info);
+ mutex_unlock(&info->mutex);
+
+ return 0;
+}
+
+static const struct file_operations pn544_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = pn544_read,
+ .write = pn544_write,
+ .poll = pn544_poll,
+ .open = pn544_open,
+ .release = pn544_close,
+ .unlocked_ioctl = pn544_ioctl,
+};
+
+#ifdef CONFIG_PM
+static int pn544_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pn544_info *info;
+ int r = 0;
+
+ dev_info(&client->dev, "***\n%s: client %p\n***\n", __func__, client);
+
+ info = i2c_get_clientdata(client);
+ dev_info(&client->dev, "%s: info: %p, client %p\n", __func__,
+ info, client);
+
+ mutex_lock(&info->mutex);
+
+ switch (info->state) {
+ case PN544_ST_FW_READY:
+ /* Do not suspend while upgrading FW, please! */
+ r = -EPERM;
+ break;
+
+ case PN544_ST_READY:
+ /*
+ * CHECK: Device should be in standby-mode. No way to check?
+ * Allowing low power mode for the regulator is potentially
+ * dangerous if pn544 does not go to suspension.
+ */
+ break;
+
+ case PN544_ST_COLD:
+ break;
+ };
+
+ mutex_unlock(&info->mutex);
+ return r;
+}
+
+static int pn544_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pn544_info *info = i2c_get_clientdata(client);
+ int r = 0;
+
+ dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
+ info, client);
+
+ mutex_lock(&info->mutex);
+
+ switch (info->state) {
+ case PN544_ST_READY:
+ /*
+ * CHECK: If regulator low power mode is allowed in
+ * pn544_suspend, we should go back to normal mode
+ * here.
+ */
+ break;
+
+ case PN544_ST_COLD:
+ break;
+
+ case PN544_ST_FW_READY:
+ break;
+ };
+
+ mutex_unlock(&info->mutex);
+
+ return r;
+}
+
+static SIMPLE_DEV_PM_OPS(pn544_pm_ops, pn544_suspend, pn544_resume);
+#endif
+
+static struct device_attribute pn544_attr =
+ __ATTR(nfc_test, S_IRUGO, pn544_test, NULL);
+
+static int __devinit pn544_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pn544_info *info;
+ struct pn544_nfc_platform_data *pdata;
+ int r = 0;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+ dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
+
+ /* private data allocation */
+ info = kzalloc(sizeof(struct pn544_info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&client->dev,
+ "Cannot allocate memory for pn544_info.\n");
+ r = -ENOMEM;
+ goto err_info_alloc;
+ }
+
+ info->buflen = max(PN544_MSG_MAX_SIZE, PN544_MAX_I2C_TRANSFER);
+ info->buf = kzalloc(info->buflen, GFP_KERNEL);
+ if (!info->buf) {
+ dev_err(&client->dev,
+ "Cannot allocate memory for pn544_info->buf.\n");
+ r = -ENOMEM;
+ goto err_buf_alloc;
+ }
+
+ info->regs[0].supply = reg_vdd_io;
+ info->regs[1].supply = reg_vbat;
+ info->regs[2].supply = reg_vsim;
+ r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
+ info->regs);
+ if (r < 0)
+ goto err_kmalloc;
+
+ info->i2c_dev = client;
+ info->state = PN544_ST_COLD;
+ info->read_irq = PN544_NONE;
+ mutex_init(&info->read_mutex);
+ mutex_init(&info->mutex);
+ init_waitqueue_head(&info->read_wait);
+ i2c_set_clientdata(client, info);
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->dev, "No platform data\n");
+ r = -EINVAL;
+ goto err_reg;
+ }
+
+ if (!pdata->request_resources) {
+ dev_err(&client->dev, "request_resources() missing\n");
+ r = -EINVAL;
+ goto err_reg;
+ }
+
+ r = pdata->request_resources(client);
+ if (r) {
+ dev_err(&client->dev, "Cannot get platform resources\n");
+ goto err_reg;
+ }
+
+ r = request_threaded_irq(client->irq, NULL, pn544_irq_thread_fn,
+ IRQF_TRIGGER_RISING, PN544_DRIVER_NAME,
+ info);
+ if (r < 0) {
+ dev_err(&client->dev, "Unable to register IRQ handler\n");
+ goto err_res;
+ }
+
+ /* If we don't have the test we don't need the sysfs file */
+ if (pdata->test) {
+ r = device_create_file(&client->dev, &pn544_attr);
+ if (r) {
+ dev_err(&client->dev,
+ "sysfs registration failed, error %d\n", r);
+ goto err_irq;
+ }
+ }
+
+ info->miscdev.minor = MISC_DYNAMIC_MINOR;
+ info->miscdev.name = PN544_DRIVER_NAME;
+ info->miscdev.fops = &pn544_fops;
+ info->miscdev.parent = &client->dev;
+ r = misc_register(&info->miscdev);
+ if (r < 0) {
+ dev_err(&client->dev, "Device registration failed\n");
+ goto err_sysfs;
+ }
+
+ dev_dbg(&client->dev, "%s: info: %p, pdata %p, client %p\n",
+ __func__, info, pdata, client);
+
+ return 0;
+
+err_sysfs:
+ if (pdata->test)
+ device_remove_file(&client->dev, &pn544_attr);
+err_irq:
+ free_irq(client->irq, info);
+err_res:
+ if (pdata->free_resources)
+ pdata->free_resources();
+err_reg:
+ regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
+err_kmalloc:
+ kfree(info->buf);
+err_buf_alloc:
+ kfree(info);
+err_info_alloc:
+ return r;
+}
+
+static __devexit int pn544_remove(struct i2c_client *client)
+{
+ struct pn544_info *info = i2c_get_clientdata(client);
+ struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ misc_deregister(&info->miscdev);
+ if (pdata->test)
+ device_remove_file(&client->dev, &pn544_attr);
+
+ if (info->state != PN544_ST_COLD) {
+ if (pdata->disable)
+ pdata->disable();
+
+ info->read_irq = PN544_NONE;
+ }
+
+ free_irq(client->irq, info);
+ if (pdata->free_resources)
+ pdata->free_resources();
+
+ regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
+ kfree(info->buf);
+ kfree(info);
+
+ return 0;
+}
+
+static struct i2c_driver pn544_driver = {
+ .driver = {
+ .name = PN544_DRIVER_NAME,
+#ifdef CONFIG_PM
+ .pm = &pn544_pm_ops,
+#endif
+ },
+ .probe = pn544_probe,
+ .id_table = pn544_id_table,
+ .remove = __devexit_p(pn544_remove),
+};
+
+static int __init pn544_init(void)
+{
+ int r;
+
+ pr_debug(DRIVER_DESC ": %s\n", __func__);
+
+ r = i2c_add_driver(&pn544_driver);
+ if (r) {
+ pr_err(PN544_DRIVER_NAME ": driver registration failed\n");
+ return r;
+ }
+
+ return 0;
+}
+
+static void __exit pn544_exit(void)
+{
+ i2c_del_driver(&pn544_driver);
+ pr_info(DRIVER_DESC ", Exiting.\n");
+}
+
+module_init(pn544_init);
+module_exit(pn544_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index aa675ebd8eb3..3c6e100a3ad0 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -19,6 +19,10 @@ config OF_FLATTREE
bool
select DTC
+config OF_EARLY_FLATTREE
+ bool
+ select OF_FLATTREE
+
config OF_PROMTREE
bool
@@ -49,6 +53,10 @@ config OF_I2C
help
OpenFirmware I2C accessors
+config OF_NET
+ depends on NETDEVICES
+ def_bool y
+
config OF_SPI
def_tristate SPI
depends on SPI && !SPARC
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 7888155bea08..3ab21a0a4907 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -6,5 +6,6 @@ obj-$(CONFIG_OF_IRQ) += irq.o
obj-$(CONFIG_OF_DEVICE) += device.o platform.o
obj-$(CONFIG_OF_GPIO) += gpio.o
obj-$(CONFIG_OF_I2C) += of_i2c.o
+obj-$(CONFIG_OF_NET) += of_net.o
obj-$(CONFIG_OF_SPI) += of_spi.o
obj-$(CONFIG_OF_MDIO) += of_mdio.o
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 3a1c7e70b192..b4559c58c095 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -12,13 +12,13 @@
(ns) > 0)
static struct of_bus *of_match_bus(struct device_node *np);
-static int __of_address_to_resource(struct device_node *dev, const u32 *addrp,
- u64 size, unsigned int flags,
+static int __of_address_to_resource(struct device_node *dev,
+ const __be32 *addrp, u64 size, unsigned int flags,
struct resource *r);
/* Debug utility */
#ifdef DEBUG
-static void of_dump_addr(const char *s, const u32 *addr, int na)
+static void of_dump_addr(const char *s, const __be32 *addr, int na)
{
printk(KERN_DEBUG "%s", s);
while (na--)
@@ -26,7 +26,7 @@ static void of_dump_addr(const char *s, const u32 *addr, int na)
printk("\n");
}
#else
-static void of_dump_addr(const char *s, const u32 *addr, int na) { }
+static void of_dump_addr(const char *s, const __be32 *addr, int na) { }
#endif
/* Callbacks for bus specific translators */
@@ -36,10 +36,10 @@ struct of_bus {
int (*match)(struct device_node *parent);
void (*count_cells)(struct device_node *child,
int *addrc, int *sizec);
- u64 (*map)(u32 *addr, const u32 *range,
+ u64 (*map)(u32 *addr, const __be32 *range,
int na, int ns, int pna);
int (*translate)(u32 *addr, u64 offset, int na);
- unsigned int (*get_flags)(const u32 *addr);
+ unsigned int (*get_flags)(const __be32 *addr);
};
/*
@@ -55,7 +55,7 @@ static void of_bus_default_count_cells(struct device_node *dev,
*sizec = of_n_size_cells(dev);
}
-static u64 of_bus_default_map(u32 *addr, const u32 *range,
+static u64 of_bus_default_map(u32 *addr, const __be32 *range,
int na, int ns, int pna)
{
u64 cp, s, da;
@@ -85,7 +85,7 @@ static int of_bus_default_translate(u32 *addr, u64 offset, int na)
return 0;
}
-static unsigned int of_bus_default_get_flags(const u32 *addr)
+static unsigned int of_bus_default_get_flags(const __be32 *addr)
{
return IORESOURCE_MEM;
}
@@ -110,10 +110,10 @@ static void of_bus_pci_count_cells(struct device_node *np,
*sizec = 2;
}
-static unsigned int of_bus_pci_get_flags(const u32 *addr)
+static unsigned int of_bus_pci_get_flags(const __be32 *addr)
{
unsigned int flags = 0;
- u32 w = addr[0];
+ u32 w = be32_to_cpup(addr);
switch((w >> 24) & 0x03) {
case 0x01:
@@ -129,7 +129,8 @@ static unsigned int of_bus_pci_get_flags(const u32 *addr)
return flags;
}
-static u64 of_bus_pci_map(u32 *addr, const u32 *range, int na, int ns, int pna)
+static u64 of_bus_pci_map(u32 *addr, const __be32 *range, int na, int ns,
+ int pna)
{
u64 cp, s, da;
unsigned int af, rf;
@@ -160,7 +161,7 @@ static int of_bus_pci_translate(u32 *addr, u64 offset, int na)
return of_bus_default_translate(addr + 1, offset, na - 1);
}
-const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
+const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
unsigned int *flags)
{
const __be32 *prop;
@@ -207,7 +208,7 @@ EXPORT_SYMBOL(of_get_pci_address);
int of_pci_address_to_resource(struct device_node *dev, int bar,
struct resource *r)
{
- const u32 *addrp;
+ const __be32 *addrp;
u64 size;
unsigned int flags;
@@ -237,12 +238,13 @@ static void of_bus_isa_count_cells(struct device_node *child,
*sizec = 1;
}
-static u64 of_bus_isa_map(u32 *addr, const u32 *range, int na, int ns, int pna)
+static u64 of_bus_isa_map(u32 *addr, const __be32 *range, int na, int ns,
+ int pna)
{
u64 cp, s, da;
/* Check address type match */
- if ((addr[0] ^ range[0]) & 0x00000001)
+ if ((addr[0] ^ range[0]) & cpu_to_be32(1))
return OF_BAD_ADDR;
/* Read address values, skipping high cell */
@@ -264,10 +266,10 @@ static int of_bus_isa_translate(u32 *addr, u64 offset, int na)
return of_bus_default_translate(addr + 1, offset, na - 1);
}
-static unsigned int of_bus_isa_get_flags(const u32 *addr)
+static unsigned int of_bus_isa_get_flags(const __be32 *addr)
{
unsigned int flags = 0;
- u32 w = addr[0];
+ u32 w = be32_to_cpup(addr);
if (w & 1)
flags |= IORESOURCE_IO;
@@ -330,7 +332,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
struct of_bus *pbus, u32 *addr,
int na, int ns, int pna, const char *rprop)
{
- const u32 *ranges;
+ const __be32 *ranges;
unsigned int rlen;
int rone;
u64 offset = OF_BAD_ADDR;
@@ -398,7 +400,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
* that can be mapped to a cpu physical address). This is not really specified
* that way, but this is traditionally the way IBM at least do things
*/
-u64 __of_translate_address(struct device_node *dev, const u32 *in_addr,
+u64 __of_translate_address(struct device_node *dev, const __be32 *in_addr,
const char *rprop)
{
struct device_node *parent = NULL;
@@ -475,22 +477,22 @@ u64 __of_translate_address(struct device_node *dev, const u32 *in_addr,
return result;
}
-u64 of_translate_address(struct device_node *dev, const u32 *in_addr)
+u64 of_translate_address(struct device_node *dev, const __be32 *in_addr)
{
return __of_translate_address(dev, in_addr, "ranges");
}
EXPORT_SYMBOL(of_translate_address);
-u64 of_translate_dma_address(struct device_node *dev, const u32 *in_addr)
+u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
{
return __of_translate_address(dev, in_addr, "dma-ranges");
}
EXPORT_SYMBOL(of_translate_dma_address);
-const u32 *of_get_address(struct device_node *dev, int index, u64 *size,
+const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
unsigned int *flags)
{
- const u32 *prop;
+ const __be32 *prop;
unsigned int psize;
struct device_node *parent;
struct of_bus *bus;
@@ -525,8 +527,8 @@ const u32 *of_get_address(struct device_node *dev, int index, u64 *size,
}
EXPORT_SYMBOL(of_get_address);
-static int __of_address_to_resource(struct device_node *dev, const u32 *addrp,
- u64 size, unsigned int flags,
+static int __of_address_to_resource(struct device_node *dev,
+ const __be32 *addrp, u64 size, unsigned int flags,
struct resource *r)
{
u64 taddr;
@@ -564,7 +566,7 @@ static int __of_address_to_resource(struct device_node *dev, const u32 *addrp,
int of_address_to_resource(struct device_node *dev, int index,
struct resource *r)
{
- const u32 *addrp;
+ const __be32 *addrp;
u64 size;
unsigned int flags;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index c1360e02f921..af824e7e0367 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -11,10 +11,12 @@
#include <linux/kernel.h>
#include <linux/initrd.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/slab.h>
#ifdef CONFIG_PPC
#include <asm/machdep.h>
@@ -22,104 +24,19 @@
#include <asm/page.h>
-int __initdata dt_root_addr_cells;
-int __initdata dt_root_size_cells;
-
-struct boot_param_header *initial_boot_params;
-
-char *find_flat_dt_string(u32 offset)
+char *of_fdt_get_string(struct boot_param_header *blob, u32 offset)
{
- return ((char *)initial_boot_params) +
- be32_to_cpu(initial_boot_params->off_dt_strings) + offset;
+ return ((char *)blob) +
+ be32_to_cpu(blob->off_dt_strings) + offset;
}
/**
- * of_scan_flat_dt - scan flattened tree blob and call callback on each.
- * @it: callback function
- * @data: context data pointer
- *
- * This function is used to scan the flattened device-tree, it is
- * used to extract the memory information at boot before we can
- * unflatten the tree
+ * of_fdt_get_property - Given a node in the given flat blob, return
+ * the property ptr
*/
-int __init of_scan_flat_dt(int (*it)(unsigned long node,
- const char *uname, int depth,
- void *data),
- void *data)
-{
- unsigned long p = ((unsigned long)initial_boot_params) +
- be32_to_cpu(initial_boot_params->off_dt_struct);
- int rc = 0;
- int depth = -1;
-
- do {
- u32 tag = be32_to_cpup((__be32 *)p);
- char *pathp;
-
- p += 4;
- if (tag == OF_DT_END_NODE) {
- depth--;
- continue;
- }
- if (tag == OF_DT_NOP)
- continue;
- if (tag == OF_DT_END)
- break;
- if (tag == OF_DT_PROP) {
- u32 sz = be32_to_cpup((__be32 *)p);
- p += 8;
- if (be32_to_cpu(initial_boot_params->version) < 0x10)
- p = ALIGN(p, sz >= 8 ? 8 : 4);
- p += sz;
- p = ALIGN(p, 4);
- continue;
- }
- if (tag != OF_DT_BEGIN_NODE) {
- pr_err("Invalid tag %x in flat device tree!\n", tag);
- return -EINVAL;
- }
- depth++;
- pathp = (char *)p;
- p = ALIGN(p + strlen(pathp) + 1, 4);
- if ((*pathp) == '/') {
- char *lp, *np;
- for (lp = NULL, np = pathp; *np; np++)
- if ((*np) == '/')
- lp = np+1;
- if (lp != NULL)
- pathp = lp;
- }
- rc = it(p, pathp, depth, data);
- if (rc != 0)
- break;
- } while (1);
-
- return rc;
-}
-
-/**
- * of_get_flat_dt_root - find the root node in the flat blob
- */
-unsigned long __init of_get_flat_dt_root(void)
-{
- unsigned long p = ((unsigned long)initial_boot_params) +
- be32_to_cpu(initial_boot_params->off_dt_struct);
-
- while (be32_to_cpup((__be32 *)p) == OF_DT_NOP)
- p += 4;
- BUG_ON(be32_to_cpup((__be32 *)p) != OF_DT_BEGIN_NODE);
- p += 4;
- return ALIGN(p + strlen((char *)p) + 1, 4);
-}
-
-/**
- * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
- *
- * This function can be used within scan_flattened_dt callback to get
- * access to properties
- */
-void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
- unsigned long *size)
+void *of_fdt_get_property(struct boot_param_header *blob,
+ unsigned long node, const char *name,
+ unsigned long *size)
{
unsigned long p = node;
@@ -137,10 +54,10 @@ void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
sz = be32_to_cpup((__be32 *)p);
noff = be32_to_cpup((__be32 *)(p + 4));
p += 8;
- if (be32_to_cpu(initial_boot_params->version) < 0x10)
+ if (be32_to_cpu(blob->version) < 0x10)
p = ALIGN(p, sz >= 8 ? 8 : 4);
- nstr = find_flat_dt_string(noff);
+ nstr = of_fdt_get_string(blob, noff);
if (nstr == NULL) {
pr_warning("Can't find property index name !\n");
return NULL;
@@ -156,21 +73,28 @@ void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
}
/**
- * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
+ * of_fdt_is_compatible - Return true if given node from the given blob has
+ * compat in its compatible list
+ * @blob: A device tree blob
* @node: node to test
* @compat: compatible string to compare with compatible list.
+ *
+ * On match, returns a non-zero value with smaller values returned for more
+ * specific compatible values.
*/
-int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
+int of_fdt_is_compatible(struct boot_param_header *blob,
+ unsigned long node, const char *compat)
{
const char *cp;
- unsigned long cplen, l;
+ unsigned long cplen, l, score = 0;
- cp = of_get_flat_dt_prop(node, "compatible", &cplen);
+ cp = of_fdt_get_property(blob, node, "compatible", &cplen);
if (cp == NULL)
return 0;
while (cplen > 0) {
+ score++;
if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
- return 1;
+ return score;
l = strlen(cp) + 1;
cp += l;
cplen -= l;
@@ -179,7 +103,28 @@ int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
return 0;
}
-static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
+/**
+ * of_fdt_match - Return true if node matches a list of compatible values
+ */
+int of_fdt_match(struct boot_param_header *blob, unsigned long node,
+ const char **compat)
+{
+ unsigned int tmp, score = 0;
+
+ if (!compat)
+ return 0;
+
+ while (*compat) {
+ tmp = of_fdt_is_compatible(blob, node, *compat);
+ if (tmp && (score == 0 || (tmp < score)))
+ score = tmp;
+ compat++;
+ }
+
+ return score;
+}
+
+static void *unflatten_dt_alloc(unsigned long *mem, unsigned long size,
unsigned long align)
{
void *res;
@@ -193,16 +138,18 @@ static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
/**
* unflatten_dt_node - Alloc and populate a device_node from the flat tree
+ * @blob: The parent device tree blob
* @p: pointer to node in flat tree
* @dad: Parent struct device_node
* @allnextpp: pointer to ->allnext from last allocated device_node
* @fpsize: Size of the node path up at the current depth.
*/
-unsigned long __init unflatten_dt_node(unsigned long mem,
- unsigned long *p,
- struct device_node *dad,
- struct device_node ***allnextpp,
- unsigned long fpsize)
+unsigned long unflatten_dt_node(struct boot_param_header *blob,
+ unsigned long mem,
+ unsigned long *p,
+ struct device_node *dad,
+ struct device_node ***allnextpp,
+ unsigned long fpsize)
{
struct device_node *np;
struct property *pp, **prev_pp = NULL;
@@ -298,10 +245,10 @@ unsigned long __init unflatten_dt_node(unsigned long mem,
sz = be32_to_cpup((__be32 *)(*p));
noff = be32_to_cpup((__be32 *)((*p) + 4));
*p += 8;
- if (be32_to_cpu(initial_boot_params->version) < 0x10)
+ if (be32_to_cpu(blob->version) < 0x10)
*p = ALIGN(*p, sz >= 8 ? 8 : 4);
- pname = find_flat_dt_string(noff);
+ pname = of_fdt_get_string(blob, noff);
if (pname == NULL) {
pr_info("Can't find property name in list !\n");
break;
@@ -380,7 +327,8 @@ unsigned long __init unflatten_dt_node(unsigned long mem,
if (tag == OF_DT_NOP)
*p += 4;
else
- mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
+ mem = unflatten_dt_node(blob, mem, p, np, allnextpp,
+ fpsize);
tag = be32_to_cpup((__be32 *)(*p));
}
if (tag != OF_DT_END_NODE) {
@@ -391,6 +339,211 @@ unsigned long __init unflatten_dt_node(unsigned long mem,
return mem;
}
+/**
+ * __unflatten_device_tree - create tree of device_nodes from flat blob
+ *
+ * unflattens a device-tree, creating the
+ * tree of struct device_node. It also fills the "name" and "type"
+ * pointers of the nodes so the normal device-tree walking functions
+ * can be used.
+ * @blob: The blob to expand
+ * @mynodes: The device_node tree created by the call
+ * @dt_alloc: An allocator that provides a virtual address to memory
+ * for the resulting tree
+ */
+void __unflatten_device_tree(struct boot_param_header *blob,
+ struct device_node **mynodes,
+ void * (*dt_alloc)(u64 size, u64 align))
+{
+ unsigned long start, mem, size;
+ struct device_node **allnextp = mynodes;
+
+ pr_debug(" -> unflatten_device_tree()\n");
+
+ if (!blob) {
+ pr_debug("No device tree pointer\n");
+ return;
+ }
+
+ pr_debug("Unflattening device tree:\n");
+ pr_debug("magic: %08x\n", be32_to_cpu(blob->magic));
+ pr_debug("size: %08x\n", be32_to_cpu(blob->totalsize));
+ pr_debug("version: %08x\n", be32_to_cpu(blob->version));
+
+ if (be32_to_cpu(blob->magic) != OF_DT_HEADER) {
+ pr_err("Invalid device tree blob header\n");
+ return;
+ }
+
+ /* First pass, scan for size */
+ start = ((unsigned long)blob) +
+ be32_to_cpu(blob->off_dt_struct);
+ size = unflatten_dt_node(blob, 0, &start, NULL, NULL, 0);
+ size = (size | 3) + 1;
+
+ pr_debug(" size is %lx, allocating...\n", size);
+
+ /* Allocate memory for the expanded device tree */
+ mem = (unsigned long)
+ dt_alloc(size + 4, __alignof__(struct device_node));
+
+ ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
+
+ pr_debug(" unflattening %lx...\n", mem);
+
+ /* Second pass, do actual unflattening */
+ start = ((unsigned long)blob) +
+ be32_to_cpu(blob->off_dt_struct);
+ unflatten_dt_node(blob, mem, &start, NULL, &allnextp, 0);
+ if (be32_to_cpup((__be32 *)start) != OF_DT_END)
+ pr_warning("Weird tag at end of tree: %08x\n", *((u32 *)start));
+ if (be32_to_cpu(((__be32 *)mem)[size / 4]) != 0xdeadbeef)
+ pr_warning("End of tree marker overwritten: %08x\n",
+ be32_to_cpu(((__be32 *)mem)[size / 4]));
+ *allnextp = NULL;
+
+ pr_debug(" <- unflatten_device_tree()\n");
+}
+
+static void *kernel_tree_alloc(u64 size, u64 align)
+{
+ return kzalloc(size, GFP_KERNEL);
+}
+
+/**
+ * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
+ *
+ * unflattens the device-tree passed by the firmware, creating the
+ * tree of struct device_node. It also fills the "name" and "type"
+ * pointers of the nodes so the normal device-tree walking functions
+ * can be used.
+ */
+void of_fdt_unflatten_tree(unsigned long *blob,
+ struct device_node **mynodes)
+{
+ struct boot_param_header *device_tree =
+ (struct boot_param_header *)blob;
+ __unflatten_device_tree(device_tree, mynodes, &kernel_tree_alloc);
+}
+EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
+
+/* Everything below here references initial_boot_params directly. */
+int __initdata dt_root_addr_cells;
+int __initdata dt_root_size_cells;
+
+struct boot_param_header *initial_boot_params;
+
+#ifdef CONFIG_OF_EARLY_FLATTREE
+
+/**
+ * of_scan_flat_dt - scan flattened tree blob and call callback on each.
+ * @it: callback function
+ * @data: context data pointer
+ *
+ * This function is used to scan the flattened device-tree, it is
+ * used to extract the memory information at boot before we can
+ * unflatten the tree
+ */
+int __init of_scan_flat_dt(int (*it)(unsigned long node,
+ const char *uname, int depth,
+ void *data),
+ void *data)
+{
+ unsigned long p = ((unsigned long)initial_boot_params) +
+ be32_to_cpu(initial_boot_params->off_dt_struct);
+ int rc = 0;
+ int depth = -1;
+
+ do {
+ u32 tag = be32_to_cpup((__be32 *)p);
+ char *pathp;
+
+ p += 4;
+ if (tag == OF_DT_END_NODE) {
+ depth--;
+ continue;
+ }
+ if (tag == OF_DT_NOP)
+ continue;
+ if (tag == OF_DT_END)
+ break;
+ if (tag == OF_DT_PROP) {
+ u32 sz = be32_to_cpup((__be32 *)p);
+ p += 8;
+ if (be32_to_cpu(initial_boot_params->version) < 0x10)
+ p = ALIGN(p, sz >= 8 ? 8 : 4);
+ p += sz;
+ p = ALIGN(p, 4);
+ continue;
+ }
+ if (tag != OF_DT_BEGIN_NODE) {
+ pr_err("Invalid tag %x in flat device tree!\n", tag);
+ return -EINVAL;
+ }
+ depth++;
+ pathp = (char *)p;
+ p = ALIGN(p + strlen(pathp) + 1, 4);
+ if ((*pathp) == '/') {
+ char *lp, *np;
+ for (lp = NULL, np = pathp; *np; np++)
+ if ((*np) == '/')
+ lp = np+1;
+ if (lp != NULL)
+ pathp = lp;
+ }
+ rc = it(p, pathp, depth, data);
+ if (rc != 0)
+ break;
+ } while (1);
+
+ return rc;
+}
+
+/**
+ * of_get_flat_dt_root - find the root node in the flat blob
+ */
+unsigned long __init of_get_flat_dt_root(void)
+{
+ unsigned long p = ((unsigned long)initial_boot_params) +
+ be32_to_cpu(initial_boot_params->off_dt_struct);
+
+ while (be32_to_cpup((__be32 *)p) == OF_DT_NOP)
+ p += 4;
+ BUG_ON(be32_to_cpup((__be32 *)p) != OF_DT_BEGIN_NODE);
+ p += 4;
+ return ALIGN(p + strlen((char *)p) + 1, 4);
+}
+
+/**
+ * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
+ *
+ * This function can be used within scan_flattened_dt callback to get
+ * access to properties
+ */
+void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
+ unsigned long *size)
+{
+ return of_fdt_get_property(initial_boot_params, node, name, size);
+}
+
+/**
+ * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
+ * @node: node to test
+ * @compat: compatible string to compare with compatible list.
+ */
+int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
+{
+ return of_fdt_is_compatible(initial_boot_params, node, compat);
+}
+
+/**
+ * of_flat_dt_match - Return true if node matches a list of compatible values
+ */
+int __init of_flat_dt_match(unsigned long node, const char **compat)
+{
+ return of_fdt_match(initial_boot_params, node, compat);
+}
+
#ifdef CONFIG_BLK_DEV_INITRD
/**
* early_init_dt_check_for_initrd - Decode initrd location from flat tree
@@ -549,58 +702,13 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
*/
void __init unflatten_device_tree(void)
{
- unsigned long start, mem, size;
- struct device_node **allnextp = &allnodes;
-
- pr_debug(" -> unflatten_device_tree()\n");
-
- if (!initial_boot_params) {
- pr_debug("No device tree pointer\n");
- return;
- }
-
- pr_debug("Unflattening device tree:\n");
- pr_debug("magic: %08x\n", be32_to_cpu(initial_boot_params->magic));
- pr_debug("size: %08x\n", be32_to_cpu(initial_boot_params->totalsize));
- pr_debug("version: %08x\n", be32_to_cpu(initial_boot_params->version));
-
- if (be32_to_cpu(initial_boot_params->magic) != OF_DT_HEADER) {
- pr_err("Invalid device tree blob header\n");
- return;
- }
-
- /* First pass, scan for size */
- start = ((unsigned long)initial_boot_params) +
- be32_to_cpu(initial_boot_params->off_dt_struct);
- size = unflatten_dt_node(0, &start, NULL, NULL, 0);
- size = (size | 3) + 1;
-
- pr_debug(" size is %lx, allocating...\n", size);
-
- /* Allocate memory for the expanded device tree */
- mem = early_init_dt_alloc_memory_arch(size + 4,
- __alignof__(struct device_node));
- mem = (unsigned long) __va(mem);
-
- ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
-
- pr_debug(" unflattening %lx...\n", mem);
-
- /* Second pass, do actual unflattening */
- start = ((unsigned long)initial_boot_params) +
- be32_to_cpu(initial_boot_params->off_dt_struct);
- unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
- if (be32_to_cpup((__be32 *)start) != OF_DT_END)
- pr_warning("Weird tag at end of tree: %08x\n", *((u32 *)start));
- if (be32_to_cpu(((__be32 *)mem)[size / 4]) != 0xdeadbeef)
- pr_warning("End of tree marker overwritten: %08x\n",
- be32_to_cpu(((__be32 *)mem)[size / 4]));
- *allnextp = NULL;
+ __unflatten_device_tree(initial_boot_params, &allnodes,
+ early_init_dt_alloc_memory_arch);
/* Get pointer to OF "/chosen" node for use everywhere */
of_chosen = of_find_node_by_path("/chosen");
if (of_chosen == NULL)
of_chosen = of_find_node_by_path("/chosen@0");
-
- pr_debug(" <- unflatten_device_tree()\n");
}
+
+#endif /* CONFIG_OF_EARLY_FLATTREE */
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 1fce00eb421b..dcd7857784f2 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -52,27 +52,35 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
/* Loop over the child nodes and register a phy_device for each one */
for_each_child_of_node(np, child) {
- const __be32 *addr;
+ const __be32 *paddr;
+ u32 addr;
int len;
/* A PHY must have a reg property in the range [0-31] */
- addr = of_get_property(child, "reg", &len);
- if (!addr || len < sizeof(*addr) || *addr >= 32 || *addr < 0) {
+ paddr = of_get_property(child, "reg", &len);
+ if (!paddr || len < sizeof(*paddr)) {
dev_err(&mdio->dev, "%s has invalid PHY address\n",
child->full_name);
continue;
}
+ addr = be32_to_cpup(paddr);
+ if (addr >= 32) {
+ dev_err(&mdio->dev, "%s PHY address %i is too large\n",
+ child->full_name, addr);
+ continue;
+ }
+
if (mdio->irq) {
- mdio->irq[*addr] = irq_of_parse_and_map(child, 0);
- if (!mdio->irq[*addr])
- mdio->irq[*addr] = PHY_POLL;
+ mdio->irq[addr] = irq_of_parse_and_map(child, 0);
+ if (!mdio->irq[addr])
+ mdio->irq[addr] = PHY_POLL;
}
- phy = get_phy_device(mdio, be32_to_cpup(addr));
+ phy = get_phy_device(mdio, addr);
if (!phy || IS_ERR(phy)) {
dev_err(&mdio->dev, "error probing PHY at address %i\n",
- *addr);
+ addr);
continue;
}
phy_scan_fixups(phy);
@@ -91,7 +99,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
}
dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
- child->name, *addr);
+ child->name, addr);
}
return 0;
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
new file mode 100644
index 000000000000..86f334a2769c
--- /dev/null
+++ b/drivers/of/of_net.c
@@ -0,0 +1,48 @@
+/*
+ * OF helpers for network devices.
+ *
+ * This file is released under the GPLv2
+ *
+ * Initially copied out of arch/powerpc/kernel/prom_parse.c
+ */
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/of_net.h>
+
+/**
+ * Search the device tree for the best MAC address to use. 'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+ * address. If that isn't set, then 'local-mac-address' is checked next,
+ * because that is the default address. If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree.
+ *
+ * Note that the 'address' property is supposed to contain a virtual address of
+ * the register set, but some DTS files have redefined that property to be the
+ * MAC address.
+ *
+ * All-zero MAC addresses are rejected, because those could be properties that
+ * exist in the device tree, but were not set by U-Boot. For example, the
+ * DTS could define 'mac-address' and 'local-mac-address', with zero MAC
+ * addresses. Some older U-Boots only initialized 'local-mac-address'. In
+ * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
+ * but is all zeros.
+*/
+const void *of_get_mac_address(struct device_node *np)
+{
+ struct property *pp;
+
+ pp = of_find_property(np, "mac-address", NULL);
+ if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
+ return pp->value;
+
+ pp = of_find_property(np, "local-mac-address", NULL);
+ if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
+ return pp->value;
+
+ pp = of_find_property(np, "address", NULL);
+ if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
+ return pp->value;
+
+ return NULL;
+}
+EXPORT_SYMBOL(of_get_mac_address);
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 28295d0a50f6..4d87b5dc9284 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -36,19 +36,55 @@ unsigned int of_pdt_unique_id __initdata;
(p)->unique_id = of_pdt_unique_id++; \
} while (0)
-static inline const char *of_pdt_node_name(struct device_node *dp)
+static char * __init of_pdt_build_full_name(struct device_node *dp)
{
- return dp->path_component_name;
+ int len, ourlen, plen;
+ char *n;
+
+ dp->path_component_name = build_path_component(dp);
+
+ plen = strlen(dp->parent->full_name);
+ ourlen = strlen(dp->path_component_name);
+ len = ourlen + plen + 2;
+
+ n = prom_early_alloc(len);
+ strcpy(n, dp->parent->full_name);
+ if (!of_node_is_root(dp->parent)) {
+ strcpy(n + plen, "/");
+ plen++;
+ }
+ strcpy(n + plen, dp->path_component_name);
+
+ return n;
}
-#else
+#else /* CONFIG_SPARC */
static inline void of_pdt_incr_unique_id(void *p) { }
static inline void irq_trans_init(struct device_node *dp) { }
-static inline const char *of_pdt_node_name(struct device_node *dp)
+static char * __init of_pdt_build_full_name(struct device_node *dp)
{
- return dp->name;
+ static int failsafe_id = 0; /* for generating unique names on failure */
+ char *buf;
+ int len;
+
+ if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len))
+ goto failsafe;
+
+ buf = prom_early_alloc(len + 1);
+ if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len))
+ goto failsafe;
+ return buf;
+
+ failsafe:
+ buf = prom_early_alloc(strlen(dp->parent->full_name) +
+ strlen(dp->name) + 16);
+ sprintf(buf, "%s/%s@unknown%i",
+ of_node_is_root(dp->parent) ? "" : dp->parent->full_name,
+ dp->name, failsafe_id++);
+ pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf);
+ return buf;
}
#endif /* !CONFIG_SPARC */
@@ -132,47 +168,6 @@ static char * __init of_pdt_get_one_property(phandle node, const char *name)
return buf;
}
-static char * __init of_pdt_try_pkg2path(phandle node)
-{
- char *res, *buf = NULL;
- int len;
-
- if (!of_pdt_prom_ops->pkg2path)
- return NULL;
-
- if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len))
- return NULL;
- buf = prom_early_alloc(len + 1);
- if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) {
- pr_err("%s: package-to-path failed\n", __func__);
- return NULL;
- }
-
- res = strrchr(buf, '/');
- if (!res) {
- pr_err("%s: couldn't find / in %s\n", __func__, buf);
- return NULL;
- }
- return res+1;
-}
-
-/*
- * When fetching the node's name, first try using package-to-path; if
- * that fails (either because the arch hasn't supplied a PROM callback,
- * or some other random failure), fall back to just looking at the node's
- * 'name' property.
- */
-static char * __init of_pdt_build_name(phandle node)
-{
- char *buf;
-
- buf = of_pdt_try_pkg2path(node);
- if (!buf)
- buf = of_pdt_get_one_property(node, "name");
-
- return buf;
-}
-
static struct device_node * __init of_pdt_create_node(phandle node,
struct device_node *parent)
{
@@ -187,7 +182,7 @@ static struct device_node * __init of_pdt_create_node(phandle node,
kref_init(&dp->kref);
- dp->name = of_pdt_build_name(node);
+ dp->name = of_pdt_get_one_property(node, "name");
dp->type = of_pdt_get_one_property(node, "device_type");
dp->phandle = node;
@@ -198,26 +193,6 @@ static struct device_node * __init of_pdt_create_node(phandle node,
return dp;
}
-static char * __init of_pdt_build_full_name(struct device_node *dp)
-{
- int len, ourlen, plen;
- char *n;
-
- plen = strlen(dp->parent->full_name);
- ourlen = strlen(of_pdt_node_name(dp));
- len = ourlen + plen + 2;
-
- n = prom_early_alloc(len);
- strcpy(n, dp->parent->full_name);
- if (!of_node_is_root(dp->parent)) {
- strcpy(n + plen, "/");
- plen++;
- }
- strcpy(n + plen, of_pdt_node_name(dp));
-
- return n;
-}
-
static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
phandle node,
struct device_node ***nextp)
@@ -240,9 +215,6 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
*(*nextp) = dp;
*nextp = &dp->allnext;
-#if defined(CONFIG_SPARC)
- dp->path_component_name = build_path_component(dp);
-#endif
dp->full_name = of_pdt_build_full_name(dp);
dp->child = of_pdt_build_tree(dp,
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 5b4a07f1220e..c01cd1ac7617 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -633,6 +633,9 @@ EXPORT_SYMBOL(of_device_alloc);
* @np: pointer to node to create device for
* @bus_id: name to assign device
* @parent: Linux device model parent device.
+ *
+ * Returns pointer to created platform device, or NULL if a device was not
+ * registered. Unavailable devices will not get registered.
*/
struct platform_device *of_platform_device_create(struct device_node *np,
const char *bus_id,
@@ -640,6 +643,9 @@ struct platform_device *of_platform_device_create(struct device_node *np,
{
struct platform_device *dev;
+ if (!of_device_is_available(np))
+ return NULL;
+
dev = of_device_alloc(np, bus_id, parent);
if (!dev)
return NULL;
@@ -683,8 +689,9 @@ static int of_platform_bus_create(const struct device_node *bus,
pr_debug(" create child: %s\n", child->full_name);
dev = of_platform_device_create(child, NULL, parent);
if (dev == NULL)
- rc = -ENOMEM;
- else if (!of_match_node(matches, child))
+ continue;
+
+ if (!of_match_node(matches, child))
continue;
if (rc == 0) {
pr_debug(" and sub busses\n");
@@ -733,10 +740,9 @@ int of_platform_bus_probe(struct device_node *root,
if (of_match_node(matches, root)) {
pr_debug(" root match, create all sub devices\n");
dev = of_platform_device_create(root, NULL, parent);
- if (dev == NULL) {
- rc = -ENOMEM;
+ if (dev == NULL)
goto bail;
- }
+
pr_debug(" create all sub busses\n");
rc = of_platform_bus_create(root, matches, &dev->dev);
goto bail;
@@ -748,9 +754,9 @@ int of_platform_bus_probe(struct device_node *root,
pr_debug(" match: %s\n", child->full_name);
dev = of_platform_device_create(child, NULL, parent);
if (dev == NULL)
- rc = -ENOMEM;
- else
- rc = of_platform_bus_create(child, matches, &dev->dev);
+ continue;
+
+ rc = of_platform_bus_create(child, matches, &dev->dev);
if (rc) {
of_node_put(child);
break;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index a2d9d1e59260..a848e02e6be3 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -678,7 +678,7 @@ void parport_unregister_device(struct pardevice *dev)
/* Make sure we haven't left any pointers around in the wait
* list. */
- spin_lock (&port->waitlist_lock);
+ spin_lock_irq(&port->waitlist_lock);
if (dev->waitprev || dev->waitnext || port->waithead == dev) {
if (dev->waitprev)
dev->waitprev->waitnext = dev->waitnext;
@@ -689,7 +689,7 @@ void parport_unregister_device(struct pardevice *dev)
else
port->waittail = dev->waitprev;
}
- spin_unlock (&port->waitlist_lock);
+ spin_unlock_irq(&port->waitlist_lock);
kfree(dev->state);
kfree(dev);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 5b1630e4e9e3..a9523fdc6911 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -45,6 +45,7 @@ config XEN_PCIDEV_FRONTEND
depends on PCI && X86 && XEN
select HOTPLUG
select PCI_XEN
+ select XEN_XENBUS_FRONTEND
default y
help
The PCI device frontend driver allows the kernel to import arbitrary
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index bab52047baa8..7722108e78df 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -36,7 +36,6 @@
#define _ACPIPHP_H
#include <linux/acpi.h>
-#include <linux/kobject.h>
#include <linux/mutex.h>
#include <linux/pci_hotplug.h>
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 2ea9cf1a8d02..b283bbea6d24 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -24,7 +24,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/pci.h>
#include <linux/string.h>
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7c24dcef2989..44b0aeee83e5 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -168,8 +168,9 @@ static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag)
u32 mask_bits = desc->masked;
unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_VECTOR_CTRL;
- mask_bits &= ~1;
- mask_bits |= flag;
+ mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ if (flag)
+ mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
writel(mask_bits, desc->mask_base + offset);
return mask_bits;
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index feff3bee6fe5..65c42f80f23e 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -6,12 +6,6 @@
#ifndef MSI_H
#define MSI_H
-#define PCI_MSIX_ENTRY_SIZE 16
-#define PCI_MSIX_ENTRY_LOWER_ADDR 0
-#define PCI_MSIX_ENTRY_UPPER_ADDR 4
-#define PCI_MSIX_ENTRY_DATA 8
-#define PCI_MSIX_ENTRY_VECTOR_CTRL 12
-
#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 24e19c594e57..6fe0772e0e7d 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -46,9 +46,9 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
struct pci_dev *pci_dev = context;
if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) {
+ pci_wakeup_event(pci_dev);
pci_check_pme_status(pci_dev);
pm_runtime_resume(&pci_dev->dev);
- pci_wakeup_event(pci_dev);
if (pci_dev->subordinate)
pci_pme_wakeup_bus(pci_dev->subordinate);
}
@@ -399,6 +399,7 @@ static int __init acpi_pci_init(void)
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
+ pcie_clear_aspm();
pcie_no_aspm();
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 8a6f797de8e5..88246dd46452 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -338,7 +338,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
}
/**
- * __pci_device_probe()
+ * __pci_device_probe - check if a driver wants to claim a specific PCI device
* @drv: driver to call to check if it wants the PCI device
* @pci_dev: PCI device being probed
*
@@ -449,7 +449,8 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
return error;
}
- return pci_restore_state(pci_dev);
+ pci_restore_state(pci_dev);
+ return 0;
}
static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index f7b68ca6cc98..775e933c2225 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -47,6 +47,10 @@ static int __init pci_stub_init(void)
if (rc)
return rc;
+ /* no ids passed actually */
+ if (ids[0] == '\0')
+ return 0;
+
/* add ids specified in the module parameter */
p = ids;
while ((id = strsep(&p, ","))) {
@@ -54,6 +58,9 @@ static int __init pci_stub_init(void)
subdevice = PCI_ANY_ID, class=0, class_mask=0;
int fields;
+ if (!strlen(id))
+ continue;
+
fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
&vendor, &device, &subvendor, &subdevice,
&class, &class_mask);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 63d5042f2079..ea25e5bfcf23 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -23,6 +23,7 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/capability.h>
+#include <linux/security.h>
#include <linux/pci-aspm.h>
#include <linux/slab.h>
#include "pci.h"
@@ -368,7 +369,7 @@ pci_read_config(struct file *filp, struct kobject *kobj,
u8 *data = (u8*) buf;
/* Several chips lock up trying to read undefined config space */
- if (cap_raised(filp->f_cred->cap_effective, CAP_SYS_ADMIN)) {
+ if (security_capable(filp->f_cred, CAP_SYS_ADMIN) == 0) {
size = dev->cfg_size;
} else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
size = 128;
@@ -1149,7 +1150,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
sysfs_bin_attr_init(attr);
attr->size = rom_size;
attr->attr.name = "rom";
- attr->attr.mode = S_IRUSR;
+ attr->attr.mode = S_IRUSR | S_IWUSR;
attr->read = pci_read_rom;
attr->write = pci_write_rom;
retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 710c8a29be0d..b714d787bddd 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -937,14 +937,13 @@ pci_save_state(struct pci_dev *dev)
* pci_restore_state - Restore the saved state of a PCI device
* @dev: - PCI device that we're dealing with
*/
-int
-pci_restore_state(struct pci_dev *dev)
+void pci_restore_state(struct pci_dev *dev)
{
int i;
u32 val;
if (!dev->state_saved)
- return 0;
+ return;
/* PCI Express register must be restored first */
pci_restore_pcie_state(dev);
@@ -968,8 +967,6 @@ pci_restore_state(struct pci_dev *dev)
pci_restore_iov_state(dev);
dev->state_saved = false;
-
- return 0;
}
static int do_pci_enable_device(struct pci_dev *dev, int bars)
@@ -1300,22 +1297,6 @@ bool pci_check_pme_status(struct pci_dev *dev)
return ret;
}
-/*
- * Time to wait before the system can be put into a sleep state after reporting
- * a wakeup event signaled by a PCI device.
- */
-#define PCI_WAKEUP_COOLDOWN 100
-
-/**
- * pci_wakeup_event - Report a wakeup event related to a given PCI device.
- * @dev: Device to report the wakeup event for.
- */
-void pci_wakeup_event(struct pci_dev *dev)
-{
- if (device_may_wakeup(&dev->dev))
- pm_wakeup_event(&dev->dev, PCI_WAKEUP_COOLDOWN);
-}
-
/**
* pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
* @dev: Device to handle.
@@ -1327,8 +1308,8 @@ void pci_wakeup_event(struct pci_dev *dev)
static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
{
if (pci_check_pme_status(dev)) {
- pm_request_resume(&dev->dev);
pci_wakeup_event(dev);
+ pm_request_resume(&dev->dev);
}
return 0;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 7d33f6673868..f69d6e0fda75 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -74,6 +74,12 @@ extern void pci_pm_init(struct pci_dev *dev);
extern void platform_pci_wakeup_init(struct pci_dev *dev);
extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
+static inline void pci_wakeup_event(struct pci_dev *dev)
+{
+ /* Wait 100 ms before the system can be put into a sleep state. */
+ pm_wakeup_event(&dev->dev, 100);
+}
+
static inline bool pci_is_bridge(struct pci_dev *pci_dev)
{
return !!(pci_dev->subordinate);
@@ -140,14 +146,6 @@ static inline void pci_no_msi(void) { }
static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
#endif
-#ifdef CONFIG_PCIEAER
-void pci_no_aer(void);
-bool pci_aer_available(void);
-#else
-static inline void pci_no_aer(void) { }
-static inline bool pci_aer_available(void) { return false; }
-#endif
-
static inline int pci_no_d1d2(struct pci_dev *dev)
{
unsigned int parent_dstates = 0;
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index dda70981b7a6..dc29348264c6 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -31,7 +31,7 @@ source "drivers/pci/pcie/aer/Kconfig"
# PCI Express ASPM
#
config PCIEASPM
- bool "PCI Express ASPM control" if EMBEDDED
+ bool "PCI Express ASPM control" if EXPERT
depends on PCI && PCIEPORTBUS
default y
help
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 2b2b6508efde..58ad7917553c 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 9656e3060412..80c11d131499 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -132,7 +132,6 @@ static inline int aer_osc_setup(struct pcie_device *pciedev)
#ifdef CONFIG_ACPI_APEI
extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
-extern bool aer_acpi_firmware_first(void);
#else
static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
{
@@ -140,8 +139,6 @@ static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
return pci_dev->__aer_firmware_first;
return 0;
}
-
-static inline bool aer_acpi_firmware_first(void) { return false; }
#endif
static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 71222814c1ec..3188cd96b338 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -68,7 +68,7 @@ struct pcie_link_state {
struct aspm_latency acceptable[8];
};
-static int aspm_disabled, aspm_force;
+static int aspm_disabled, aspm_force, aspm_clear_state;
static DEFINE_MUTEX(aspm_lock);
static LIST_HEAD(link_list);
@@ -139,7 +139,7 @@ static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
/* Don't enable Clock PM if the link is not Clock PM capable */
if (!link->clkpm_capable && enable)
- return;
+ enable = 0;
/* Need nothing if the specified equals to current state */
if (link->clkpm_enabled == enable)
return;
@@ -498,6 +498,10 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
struct pci_dev *child;
int pos;
u32 reg32;
+
+ if (aspm_clear_state)
+ return -EINVAL;
+
/*
* Some functions in a slot might not all be PCIe functions,
* very strange. Disable ASPM for the whole slot
@@ -563,12 +567,15 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
struct pcie_link_state *link;
int blacklist = !!pcie_aspm_sanity_check(pdev);
- if (aspm_disabled || !pci_is_pcie(pdev) || pdev->link_state)
+ if (!pci_is_pcie(pdev) || pdev->link_state)
return;
if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
return;
+ if (aspm_disabled && !aspm_clear_state)
+ return;
+
/* VIA has a strange chipset, root port is under a bridge */
if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
pdev->bus->self)
@@ -641,7 +648,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link, *root, *parent_link;
- if (aspm_disabled || !pci_is_pcie(pdev) ||
+ if ((aspm_disabled && !aspm_clear_state) || !pci_is_pcie(pdev) ||
!parent || !parent->link_state)
return;
if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
@@ -899,6 +906,12 @@ static int __init pcie_aspm_disable(char *str)
__setup("pcie_aspm=", pcie_aspm_disable);
+void pcie_clear_aspm(void)
+{
+ if (!aspm_force)
+ aspm_clear_state = 1;
+}
+
void pcie_no_aspm(void)
{
if (!aspm_force)
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 2f3c90407227..0057344a3fcb 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -26,9 +26,6 @@
#include "../pci.h"
#include "portdrv.h"
-#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */
-#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */
-
/*
* If this switch is set, MSI will not be used for PCIe PME signaling. This
* causes the PCIe port driver to use INTx interrupts only, but it turns out
@@ -74,22 +71,6 @@ void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
}
/**
- * pcie_pme_clear_status - Clear root port PME interrupt status.
- * @dev: PCIe root port or event collector.
- */
-static void pcie_pme_clear_status(struct pci_dev *dev)
-{
- int rtsta_pos;
- u32 rtsta;
-
- rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA;
-
- pci_read_config_dword(dev, rtsta_pos, &rtsta);
- rtsta |= PCI_EXP_RTSTA_PME;
- pci_write_config_dword(dev, rtsta_pos, rtsta);
-}
-
-/**
* pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#.
* @bus: PCI bus to scan.
*
@@ -103,8 +84,8 @@ static bool pcie_pme_walk_bus(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Skip PCIe devices in case we started from a root port. */
if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
- pm_request_resume(&dev->dev);
pci_wakeup_event(dev);
+ pm_request_resume(&dev->dev);
ret = true;
}
@@ -206,8 +187,8 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
/* The device is there, but we have to check its PME status. */
found = pci_check_pme_status(dev);
if (found) {
- pm_request_resume(&dev->dev);
pci_wakeup_event(dev);
+ pm_request_resume(&dev->dev);
}
pci_dev_put(dev);
} else if (devfn) {
@@ -253,7 +234,7 @@ static void pcie_pme_work_fn(struct work_struct *work)
* Clear PME status of the port. If there are other
* pending PMEs, the status will be set again.
*/
- pcie_pme_clear_status(port);
+ pcie_clear_root_pme_status(port);
spin_unlock_irq(&data->lock);
pcie_pme_handle_request(port, rtsta & 0xffff);
@@ -378,7 +359,7 @@ static int pcie_pme_probe(struct pcie_device *srv)
port = srv->port;
pcie_pme_interrupt_enable(port, false);
- pcie_pme_clear_status(port);
+ pcie_clear_root_pme_status(port);
ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
if (ret) {
@@ -402,7 +383,7 @@ static int pcie_pme_suspend(struct pcie_device *srv)
spin_lock_irq(&data->lock);
pcie_pme_interrupt_enable(port, false);
- pcie_pme_clear_status(port);
+ pcie_clear_root_pme_status(port);
data->noirq = true;
spin_unlock_irq(&data->lock);
@@ -422,7 +403,7 @@ static int pcie_pme_resume(struct pcie_device *srv)
spin_lock_irq(&data->lock);
data->noirq = false;
- pcie_pme_clear_status(port);
+ pcie_clear_root_pme_status(port);
pcie_pme_interrupt_enable(port, true);
spin_unlock_irq(&data->lock);
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 7b5aba0a3291..bd00a01aef14 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -20,9 +20,6 @@
#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
-extern bool pcie_ports_disabled;
-extern bool pcie_ports_auto;
-
extern struct bus_type pcie_port_bus_type;
extern int pcie_port_device_register(struct pci_dev *dev);
#ifdef CONFIG_PM
@@ -35,6 +32,8 @@ extern void pcie_port_bus_unregister(void);
struct pci_dev;
+extern void pcie_clear_root_pme_status(struct pci_dev *dev);
+
#ifdef CONFIG_PCIE_PME
extern bool pcie_pme_msi_disabled;
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
index 5982b6a63b89..a86b56e5f2f2 100644
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -33,7 +33,7 @@
*/
int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
{
- acpi_status status;
+ struct acpi_pci_root *root;
acpi_handle handle;
u32 flags;
@@ -44,26 +44,11 @@ int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
if (!handle)
return -EINVAL;
- flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
- | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
- | OSC_PCI_EXPRESS_PME_CONTROL;
-
- if (pci_aer_available()) {
- if (aer_acpi_firmware_first())
- dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
- else
- flags |= OSC_PCI_EXPRESS_AER_CONTROL;
- }
-
- status = acpi_pci_osc_control_set(handle, &flags,
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
- if (ACPI_FAILURE(status)) {
- dev_dbg(&port->dev, "ACPI _OSC request failed (code %d)\n",
- status);
+ root = acpi_pci_find_root(handle);
+ if (!root)
return -ENODEV;
- }
- dev_info(&port->dev, "ACPI _OSC control granted for 0x%02x\n", flags);
+ flags = root->osc_control_set;
*srv_mask = PCIE_PORT_SERVICE_VC;
if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index a9c222d79ebc..5130d0d22390 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -241,17 +241,17 @@ static int get_port_device_capability(struct pci_dev *dev)
int cap_mask;
int err;
+ if (pcie_ports_disabled)
+ return 0;
+
err = pcie_port_platform_notify(dev, &cap_mask);
- if (pcie_ports_auto) {
- if (err) {
- pcie_no_aspm();
- return 0;
- }
- } else {
+ if (!pcie_ports_auto) {
cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
| PCIE_PORT_SERVICE_VC;
if (pci_aer_available())
cap_mask |= PCIE_PORT_SERVICE_AER;
+ } else if (err) {
+ return 0;
}
pos = pci_pcie_cap(dev);
@@ -349,15 +349,18 @@ int pcie_port_device_register(struct pci_dev *dev)
int status, capabilities, i, nr_service;
int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
- /* Get and check PCI Express port services */
- capabilities = get_port_device_capability(dev);
- if (!capabilities)
- return -ENODEV;
-
/* Enable PCI Express port device */
status = pci_enable_device(dev);
if (status)
return status;
+
+ /* Get and check PCI Express port services */
+ capabilities = get_port_device_capability(dev);
+ if (!capabilities) {
+ pcie_no_aspm();
+ return 0;
+ }
+
pci_set_master(dev);
/*
* Initialize service irqs. Don't use service devices that
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index f9033e190fb6..e0610bda1dea 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -57,6 +57,22 @@ __setup("pcie_ports=", pcie_port_setup);
/* global data */
+/**
+ * pcie_clear_root_pme_status - Clear root port PME interrupt status.
+ * @dev: PCIe root port or event collector.
+ */
+void pcie_clear_root_pme_status(struct pci_dev *dev)
+{
+ int rtsta_pos;
+ u32 rtsta;
+
+ rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA;
+
+ pci_read_config_dword(dev, rtsta_pos, &rtsta);
+ rtsta |= PCI_EXP_RTSTA_PME;
+ pci_write_config_dword(dev, rtsta_pos, rtsta);
+}
+
static int pcie_portdrv_restore_config(struct pci_dev *dev)
{
int retval;
@@ -69,6 +85,20 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev)
}
#ifdef CONFIG_PM
+static int pcie_port_resume_noirq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ /*
+ * Some BIOSes forget to clear Root PME Status bits after system wakeup
+ * which breaks ACPI-based runtime wakeup on PCI Express, so clear those
+ * bits now just in case (shouldn't hurt).
+ */
+ if(pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+ pcie_clear_root_pme_status(pdev);
+ return 0;
+}
+
static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.suspend = pcie_port_device_suspend,
.resume = pcie_port_device_resume,
@@ -76,6 +106,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.thaw = pcie_port_device_resume,
.poweroff = pcie_port_device_suspend,
.restore = pcie_port_device_resume,
+ .resume_noirq = pcie_port_resume_noirq,
};
#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
@@ -327,10 +358,8 @@ static int __init pcie_portdrv_init(void)
{
int retval;
- if (pcie_ports_disabled) {
- pcie_no_aspm();
- return -EACCES;
- }
+ if (pcie_ports_disabled)
+ return pci_register_driver(&pcie_portdriver);
dmi_check_system(pcie_portdrv_dmi_table);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index de886f3dfd39..6e318ce41136 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -69,7 +69,7 @@ comment "PC-card bridges"
config YENTA
tristate "CardBus yenta-compatible bridge support"
depends on PCI
- select CARDBUS if !EMBEDDED
+ select CARDBUS if !EXPERT
select PCCARD_NONSTATIC if PCMCIA != n
---help---
This option enables support for CardBus host bridges. Virtually
@@ -84,27 +84,27 @@ config YENTA
config YENTA_O2
default y
- bool "Special initialization for O2Micro bridges" if EMBEDDED
+ bool "Special initialization for O2Micro bridges" if EXPERT
depends on YENTA
config YENTA_RICOH
default y
- bool "Special initialization for Ricoh bridges" if EMBEDDED
+ bool "Special initialization for Ricoh bridges" if EXPERT
depends on YENTA
config YENTA_TI
default y
- bool "Special initialization for TI and EnE bridges" if EMBEDDED
+ bool "Special initialization for TI and EnE bridges" if EXPERT
depends on YENTA
config YENTA_ENE_TUNE
default y
- bool "Auto-tune EnE bridges for CB cards" if EMBEDDED
+ bool "Auto-tune EnE bridges for CB cards" if EXPERT
depends on YENTA_TI && CARDBUS
config YENTA_TOSHIBA
default y
- bool "Special initialization for Toshiba ToPIC bridges" if EMBEDDED
+ bool "Special initialization for Toshiba ToPIC bridges" if EXPERT
depends on YENTA
config PD6729
diff --git a/drivers/pcmcia/m32r_cfc.h b/drivers/pcmcia/m32r_cfc.h
index 8146e3bee2e8..f558e1adf954 100644
--- a/drivers/pcmcia/m32r_cfc.h
+++ b/drivers/pcmcia/m32r_cfc.h
@@ -9,7 +9,7 @@
#endif
/*
- * M32R PC Card Controler
+ * M32R PC Card Controller
*/
#define M32R_PCC0_BASE 0x00ef7000
#define M32R_PCC1_BASE 0x00ef7020
diff --git a/drivers/pcmcia/m32r_pcc.h b/drivers/pcmcia/m32r_pcc.h
index e4fffe417ba9..f95c58563bc8 100644
--- a/drivers/pcmcia/m32r_pcc.h
+++ b/drivers/pcmcia/m32r_pcc.h
@@ -5,7 +5,7 @@
#define M32R_MAX_PCC 2
/*
- * M32R PC Card Controler
+ * M32R PC Card Controller
*/
#define M32R_PCC0_BASE 0x00ef7000
#define M32R_PCC1_BASE 0x00ef7020
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 99d4f23cb435..0db482771fb5 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1198,7 +1198,7 @@ static int __init m8xx_probe(struct platform_device *ofdev,
out_be32(M8XX_PGCRX(1),
M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
- /* intialize the fixed memory windows */
+ /* initialize the fixed memory windows */
for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 0bdda5b3ed55..42fbf1a75576 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -518,6 +518,8 @@ int pcmcia_enable_device(struct pcmcia_device *p_dev)
flags |= CONF_ENABLE_IOCARD;
if (flags & CONF_ENABLE_IOCARD)
s->socket.flags |= SS_IOCARD;
+ if (flags & CONF_ENABLE_ZVCARD)
+ s->socket.flags |= SS_ZVCARD | SS_IOCARD;
if (flags & CONF_ENABLE_SPKR) {
s->socket.flags |= SS_SPKR_ENA;
status = CCSR_AUDIO_ENA;
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 3755e7c8c715..2c540542b5af 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -215,7 +215,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
}
#endif
-static void pxa2xx_configure_sockets(struct device *dev)
+void pxa2xx_configure_sockets(struct device *dev)
{
struct pcmcia_low_level *ops = dev->platform_data;
/*
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index bb62ea87b8f9..b609b45469ed 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,3 +1,4 @@
int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
+void pxa2xx_configure_sockets(struct device *dev);
diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c
index b9f8c8fb42bd..25afe637c657 100644
--- a/drivers/pcmcia/pxa2xx_lubbock.c
+++ b/drivers/pcmcia/pxa2xx_lubbock.c
@@ -226,6 +226,7 @@ int pcmcia_lubbock_init(struct sa1111_dev *sadev)
lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
+ pxa2xx_configure_sockets(&sadev->dev);
ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
pxa2xx_drv_pcmcia_add_one);
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index faec777b1ed4..a59af5b24f0a 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -18,12 +18,14 @@ if X86_PLATFORM_DEVICES
config ACER_WMI
tristate "Acer WMI Laptop Extras"
depends on ACPI
- depends on LEDS_CLASS
- depends on NEW_LEDS
+ select LEDS_CLASS
+ select NEW_LEDS
depends on BACKLIGHT_CLASS_DEVICE
depends on SERIO_I8042
+ depends on INPUT
depends on RFKILL || RFKILL = n
- select ACPI_WMI
+ depends on ACPI_WMI
+ select INPUT_SPARSEKMAP
---help---
This is a driver for newer Acer (and Wistron) laptops. It adds
wireless radio and bluetooth control, and on some laptops,
@@ -131,7 +133,7 @@ config TC1100_WMI
depends on !X86_64
depends on EXPERIMENTAL
depends on ACPI
- select ACPI_WMI
+ depends on ACPI_WMI
---help---
This is a driver for the WMI extensions (wireless and bluetooth power
control) of the HP Compaq TC1100 tablet.
@@ -225,7 +227,8 @@ config SONYPI_COMPAT
config IDEAPAD_LAPTOP
tristate "Lenovo IdeaPad Laptop Extras"
depends on ACPI
- depends on RFKILL
+ depends on RFKILL && INPUT
+ select INPUT_SPARSEKMAP
help
This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
@@ -425,7 +428,10 @@ config EEEPC_WMI
depends on INPUT
depends on EXPERIMENTAL
depends on BACKLIGHT_CLASS_DEVICE
+ depends on RFKILL || RFKILL = n
select INPUT_SPARSEKMAP
+ select LEDS_CLASS
+ select NEW_LEDS
---help---
Say Y here if you want to support WMI-based hotkeys on Eee PC laptops.
@@ -510,8 +516,8 @@ config TOPSTAR_LAPTOP
config ACPI_TOSHIBA
tristate "Toshiba Laptop Extras"
depends on ACPI
- depends on LEDS_CLASS
- depends on NEW_LEDS
+ select LEDS_CLASS
+ select NEW_LEDS
depends on BACKLIGHT_CLASS_DEVICE
depends on INPUT
depends on RFKILL || RFKILL = n
@@ -576,6 +582,15 @@ config INTEL_SCU_IPC
some embedded Intel x86 platforms. This is not needed for PC-type
machines.
+config INTEL_SCU_IPC_UTIL
+ tristate "Intel SCU IPC utility driver"
+ depends on INTEL_SCU_IPC
+ default y
+ ---help---
+ The IPC Util driver provides an interface with the SCU enabling
+ low level access for debug work and updating the firmware. Say
+ N unless you will be doing this on an Intel MID platform.
+
config GPIO_INTEL_PMIC
bool "Intel PMIC GPIO support"
depends on INTEL_SCU_IPC && GPIOLIB
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 9950ccc940b5..4ec4ff8f9182 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o
obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
+obj-$(CONFIG_INTEL_SCU_IPC_UTIL)+= intel_scu_ipcutil.o
obj-$(CONFIG_RAR_REGISTER) += intel_rar_register.o
obj-$(CONFIG_INTEL_IPS) += intel_ips.o
obj-$(CONFIG_GPIO_INTEL_PMIC) += intel_pmic_gpio.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c8c65375bfe2..38b34a73866a 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -37,6 +37,9 @@
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/dmi.h>
#include <acpi/acpi_drivers.h>
@@ -48,6 +51,7 @@ MODULE_LICENSE("GPL");
#define ACER_ERR KERN_ERR ACER_LOGPREFIX
#define ACER_NOTICE KERN_NOTICE ACER_LOGPREFIX
#define ACER_INFO KERN_INFO ACER_LOGPREFIX
+#define ACER_WARNING KERN_WARNING ACER_LOGPREFIX
/*
* Magic Number
@@ -80,11 +84,84 @@ MODULE_LICENSE("GPL");
*/
#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB"
#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C"
-#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"
+#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"
#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A"
+#define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531"
+
+/*
+ * Acer ACPI event GUIDs
+ */
+#define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026"
MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB");
MODULE_ALIAS("wmi:6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3");
+MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026");
+
+enum acer_wmi_event_ids {
+ WMID_HOTKEY_EVENT = 0x1,
+};
+
+static const struct key_entry acer_wmi_keymap[] = {
+ {KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */
+ {KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */
+ {KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */
+ {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
+ {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */
+ {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */
+ {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
+ {KE_KEY, 0x82, {KEY_F22} }, /* Touch Pad On/Off */
+ {KE_END, 0}
+};
+
+static struct input_dev *acer_wmi_input_dev;
+
+struct event_return_value {
+ u8 function;
+ u8 key_num;
+ u16 device_state;
+ u32 reserved;
+} __attribute__((packed));
+
+/*
+ * GUID3 Get Device Status device flags
+ */
+#define ACER_WMID3_GDS_WIRELESS (1<<0) /* WiFi */
+#define ACER_WMID3_GDS_THREEG (1<<6) /* 3G */
+#define ACER_WMID3_GDS_BLUETOOTH (1<<11) /* BT */
+
+struct lm_input_params {
+ u8 function_num; /* Function Number */
+ u16 commun_devices; /* Communication type devices default status */
+ u16 devices; /* Other type devices default status */
+ u8 lm_status; /* Launch Manager Status */
+ u16 reserved;
+} __attribute__((packed));
+
+struct lm_return_value {
+ u8 error_code; /* Error Code */
+ u8 ec_return_value; /* EC Return Value */
+ u16 reserved;
+} __attribute__((packed));
+
+struct wmid3_gds_input_param { /* Get Device Status input parameter */
+ u8 function_num; /* Function Number */
+ u8 hotkey_number; /* Hotkey Number */
+ u16 devices; /* Get Device */
+} __attribute__((packed));
+
+struct wmid3_gds_return_value { /* Get Device Status return value*/
+ u8 error_code; /* Error Code */
+ u8 ec_return_value; /* EC Return Value */
+ u16 devices; /* Current Device Status */
+ u32 reserved;
+} __attribute__((packed));
+
+struct hotkey_function_type_aa {
+ u8 type;
+ u8 length;
+ u16 handle;
+ u16 commun_func_bitmap;
+} __attribute__((packed));
/*
* Interface capability flags
@@ -116,15 +193,19 @@ static int mailled = -1;
static int brightness = -1;
static int threeg = -1;
static int force_series;
+static bool ec_raw_mode;
+static bool has_type_aa;
module_param(mailled, int, 0444);
module_param(brightness, int, 0444);
module_param(threeg, int, 0444);
module_param(force_series, int, 0444);
+module_param(ec_raw_mode, bool, 0444);
MODULE_PARM_DESC(mailled, "Set initial state of Mail LED");
MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness");
MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware");
MODULE_PARM_DESC(force_series, "Force a different laptop series");
+MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode");
struct acer_data {
int mailled;
@@ -140,6 +221,7 @@ struct acer_debug {
static struct rfkill *wireless_rfkill;
static struct rfkill *bluetooth_rfkill;
+static struct rfkill *threeg_rfkill;
/* Each low-level interface must define at least some of the following */
struct wmi_interface {
@@ -753,6 +835,28 @@ static acpi_status WMID_set_u32(u32 value, u32 cap, struct wmi_interface *iface)
return WMI_execute_u32(method_id, (u32)value, NULL);
}
+static void type_aa_dmi_decode(const struct dmi_header *header, void *dummy)
+{
+ struct hotkey_function_type_aa *type_aa;
+
+ /* We are looking for OEM-specific Type AAh */
+ if (header->type != 0xAA)
+ return;
+
+ has_type_aa = true;
+ type_aa = (struct hotkey_function_type_aa *) header;
+
+ printk(ACER_INFO "Function bitmap for Communication Button: 0x%x\n",
+ type_aa->commun_func_bitmap);
+
+ if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_WIRELESS)
+ interface->capability |= ACER_CAP_WIRELESS;
+ if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_THREEG)
+ interface->capability |= ACER_CAP_THREEG;
+ if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_BLUETOOTH)
+ interface->capability |= ACER_CAP_BLUETOOTH;
+}
+
static acpi_status WMID_set_capabilities(void)
{
struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
@@ -773,16 +877,17 @@ static acpi_status WMID_set_capabilities(void)
return AE_ERROR;
}
- /* Not sure on the meaning of the relevant bits yet to detect these */
- interface->capability |= ACER_CAP_WIRELESS;
- interface->capability |= ACER_CAP_THREEG;
+ dmi_walk(type_aa_dmi_decode, NULL);
+ if (!has_type_aa) {
+ interface->capability |= ACER_CAP_WIRELESS;
+ interface->capability |= ACER_CAP_THREEG;
+ if (devices & 0x10)
+ interface->capability |= ACER_CAP_BLUETOOTH;
+ }
/* WMID always provides brightness methods */
interface->capability |= ACER_CAP_BRIGHTNESS;
- if (devices & 0x10)
- interface->capability |= ACER_CAP_BLUETOOTH;
-
if (!(devices & 0x20))
max_brightness = 0x9;
@@ -861,7 +966,8 @@ static void __init acer_commandline_init(void)
* capability isn't available on the given interface
*/
set_u32(mailled, ACER_CAP_MAILLED);
- set_u32(threeg, ACER_CAP_THREEG);
+ if (!has_type_aa)
+ set_u32(threeg, ACER_CAP_THREEG);
set_u32(brightness, ACER_CAP_BRIGHTNESS);
}
@@ -915,7 +1021,7 @@ static int update_bl_status(struct backlight_device *bd)
return 0;
}
-static struct backlight_ops acer_bl_ops = {
+static const struct backlight_ops acer_bl_ops = {
.get_brightness = read_brightness,
.update_status = update_bl_status,
};
@@ -948,6 +1054,79 @@ static void acer_backlight_exit(void)
backlight_device_unregister(acer_backlight_device);
}
+static acpi_status wmid3_get_device_status(u32 *value, u16 device)
+{
+ struct wmid3_gds_return_value return_value;
+ acpi_status status;
+ union acpi_object *obj;
+ struct wmid3_gds_input_param params = {
+ .function_num = 0x1,
+ .hotkey_number = 0x01,
+ .devices = device,
+ };
+ struct acpi_buffer input = {
+ sizeof(struct wmid3_gds_input_param),
+ &params
+ };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input, &output);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = output.pointer;
+
+ if (!obj)
+ return AE_ERROR;
+ else if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return AE_ERROR;
+ }
+ if (obj->buffer.length != 8) {
+ printk(ACER_WARNING "Unknown buffer length %d\n",
+ obj->buffer.length);
+ kfree(obj);
+ return AE_ERROR;
+ }
+
+ return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ if (return_value.error_code || return_value.ec_return_value)
+ printk(ACER_WARNING "Get Device Status failed: "
+ "0x%x - 0x%x\n", return_value.error_code,
+ return_value.ec_return_value);
+ else
+ *value = !!(return_value.devices & device);
+
+ return status;
+}
+
+static acpi_status get_device_status(u32 *value, u32 cap)
+{
+ if (wmi_has_guid(WMID_GUID3)) {
+ u16 device;
+
+ switch (cap) {
+ case ACER_CAP_WIRELESS:
+ device = ACER_WMID3_GDS_WIRELESS;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ device = ACER_WMID3_GDS_BLUETOOTH;
+ break;
+ case ACER_CAP_THREEG:
+ device = ACER_WMID3_GDS_THREEG;
+ break;
+ default:
+ return AE_ERROR;
+ }
+ return wmid3_get_device_status(value, device);
+
+ } else {
+ return get_u32(value, cap);
+ }
+}
+
/*
* Rfkill devices
*/
@@ -968,6 +1147,13 @@ static void acer_rfkill_update(struct work_struct *ignored)
rfkill_set_sw_state(bluetooth_rfkill, !state);
}
+ if (has_cap(ACER_CAP_THREEG) && wmi_has_guid(WMID_GUID3)) {
+ status = wmid3_get_device_status(&state,
+ ACER_WMID3_GDS_THREEG);
+ if (ACPI_SUCCESS(status))
+ rfkill_set_sw_state(threeg_rfkill, !state);
+ }
+
schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
}
@@ -991,6 +1177,8 @@ static struct rfkill *acer_rfkill_register(struct device *dev,
{
int err;
struct rfkill *rfkill_dev;
+ u32 state;
+ acpi_status status;
rfkill_dev = rfkill_alloc(name, dev, type,
&acer_rfkill_ops,
@@ -998,6 +1186,10 @@ static struct rfkill *acer_rfkill_register(struct device *dev,
if (!rfkill_dev)
return ERR_PTR(-ENOMEM);
+ status = get_device_status(&state, cap);
+ if (ACPI_SUCCESS(status))
+ rfkill_init_sw_state(rfkill_dev, !state);
+
err = rfkill_register(rfkill_dev);
if (err) {
rfkill_destroy(rfkill_dev);
@@ -1024,6 +1216,19 @@ static int acer_rfkill_init(struct device *dev)
}
}
+ if (has_cap(ACER_CAP_THREEG)) {
+ threeg_rfkill = acer_rfkill_register(dev,
+ RFKILL_TYPE_WWAN, "acer-threeg",
+ ACER_CAP_THREEG);
+ if (IS_ERR(threeg_rfkill)) {
+ rfkill_unregister(wireless_rfkill);
+ rfkill_destroy(wireless_rfkill);
+ rfkill_unregister(bluetooth_rfkill);
+ rfkill_destroy(bluetooth_rfkill);
+ return PTR_ERR(threeg_rfkill);
+ }
+ }
+
schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
return 0;
@@ -1040,6 +1245,11 @@ static void acer_rfkill_exit(void)
rfkill_unregister(bluetooth_rfkill);
rfkill_destroy(bluetooth_rfkill);
}
+
+ if (has_cap(ACER_CAP_THREEG)) {
+ rfkill_unregister(threeg_rfkill);
+ rfkill_destroy(threeg_rfkill);
+ }
return;
}
@@ -1050,7 +1260,12 @@ static ssize_t show_bool_threeg(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 result; \
- acpi_status status = get_u32(&result, ACER_CAP_THREEG);
+ acpi_status status;
+ if (wmi_has_guid(WMID_GUID3))
+ status = wmid3_get_device_status(&result,
+ ACER_WMID3_GDS_THREEG);
+ else
+ status = get_u32(&result, ACER_CAP_THREEG);
if (ACPI_SUCCESS(status))
return sprintf(buf, "%u\n", result);
return sprintf(buf, "Read error\n");
@@ -1065,7 +1280,7 @@ static ssize_t set_bool_threeg(struct device *dev,
return -EINVAL;
return count;
}
-static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg,
+static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
set_bool_threeg);
static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
@@ -1085,6 +1300,178 @@ static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(interface, S_IRUGO, show_interface, NULL);
+static void acer_wmi_notify(u32 value, void *context)
+{
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ struct event_return_value return_value;
+ acpi_status status;
+
+ status = wmi_get_event_data(value, &response);
+ if (status != AE_OK) {
+ printk(ACER_WARNING "bad event status 0x%x\n", status);
+ return;
+ }
+
+ obj = (union acpi_object *)response.pointer;
+
+ if (!obj)
+ return;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ printk(ACER_WARNING "Unknown response received %d\n",
+ obj->type);
+ kfree(obj);
+ return;
+ }
+ if (obj->buffer.length != 8) {
+ printk(ACER_WARNING "Unknown buffer length %d\n",
+ obj->buffer.length);
+ kfree(obj);
+ return;
+ }
+
+ return_value = *((struct event_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ switch (return_value.function) {
+ case WMID_HOTKEY_EVENT:
+ if (!sparse_keymap_report_event(acer_wmi_input_dev,
+ return_value.key_num, 1, true))
+ printk(ACER_WARNING "Unknown key number - 0x%x\n",
+ return_value.key_num);
+ break;
+ default:
+ printk(ACER_WARNING "Unknown function number - %d - %d\n",
+ return_value.function, return_value.key_num);
+ break;
+ }
+}
+
+static acpi_status
+wmid3_set_lm_mode(struct lm_input_params *params,
+ struct lm_return_value *return_value)
+{
+ acpi_status status;
+ union acpi_object *obj;
+
+ struct acpi_buffer input = { sizeof(struct lm_input_params), params };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &input, &output);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = output.pointer;
+
+ if (!obj)
+ return AE_ERROR;
+ else if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return AE_ERROR;
+ }
+ if (obj->buffer.length != 4) {
+ printk(ACER_WARNING "Unknown buffer length %d\n",
+ obj->buffer.length);
+ kfree(obj);
+ return AE_ERROR;
+ }
+
+ *return_value = *((struct lm_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ return status;
+}
+
+static int acer_wmi_enable_ec_raw(void)
+{
+ struct lm_return_value return_value;
+ acpi_status status;
+ struct lm_input_params params = {
+ .function_num = 0x1,
+ .commun_devices = 0xFFFF,
+ .devices = 0xFFFF,
+ .lm_status = 0x00, /* Launch Manager Deactive */
+ };
+
+ status = wmid3_set_lm_mode(&params, &return_value);
+
+ if (return_value.error_code || return_value.ec_return_value)
+ printk(ACER_WARNING "Enabling EC raw mode failed: "
+ "0x%x - 0x%x\n", return_value.error_code,
+ return_value.ec_return_value);
+ else
+ printk(ACER_INFO "Enabled EC raw mode");
+
+ return status;
+}
+
+static int acer_wmi_enable_lm(void)
+{
+ struct lm_return_value return_value;
+ acpi_status status;
+ struct lm_input_params params = {
+ .function_num = 0x1,
+ .commun_devices = 0xFFFF,
+ .devices = 0xFFFF,
+ .lm_status = 0x01, /* Launch Manager Active */
+ };
+
+ status = wmid3_set_lm_mode(&params, &return_value);
+
+ if (return_value.error_code || return_value.ec_return_value)
+ printk(ACER_WARNING "Enabling Launch Manager failed: "
+ "0x%x - 0x%x\n", return_value.error_code,
+ return_value.ec_return_value);
+
+ return status;
+}
+
+static int __init acer_wmi_input_setup(void)
+{
+ acpi_status status;
+ int err;
+
+ acer_wmi_input_dev = input_allocate_device();
+ if (!acer_wmi_input_dev)
+ return -ENOMEM;
+
+ acer_wmi_input_dev->name = "Acer WMI hotkeys";
+ acer_wmi_input_dev->phys = "wmi/input0";
+ acer_wmi_input_dev->id.bustype = BUS_HOST;
+
+ err = sparse_keymap_setup(acer_wmi_input_dev, acer_wmi_keymap, NULL);
+ if (err)
+ goto err_free_dev;
+
+ status = wmi_install_notify_handler(ACERWMID_EVENT_GUID,
+ acer_wmi_notify, NULL);
+ if (ACPI_FAILURE(status)) {
+ err = -EIO;
+ goto err_free_keymap;
+ }
+
+ err = input_register_device(acer_wmi_input_dev);
+ if (err)
+ goto err_uninstall_notifier;
+
+ return 0;
+
+err_uninstall_notifier:
+ wmi_remove_notify_handler(ACERWMID_EVENT_GUID);
+err_free_keymap:
+ sparse_keymap_free(acer_wmi_input_dev);
+err_free_dev:
+ input_free_device(acer_wmi_input_dev);
+ return err;
+}
+
+static void acer_wmi_input_destroy(void)
+{
+ wmi_remove_notify_handler(ACERWMID_EVENT_GUID);
+ sparse_keymap_free(acer_wmi_input_dev);
+ input_unregister_device(acer_wmi_input_dev);
+}
+
/*
* debugfs functions
*/
@@ -1327,6 +1714,26 @@ static int __init acer_wmi_init(void)
"generic video driver\n");
}
+ if (wmi_has_guid(WMID_GUID3)) {
+ if (ec_raw_mode) {
+ if (ACPI_FAILURE(acer_wmi_enable_ec_raw())) {
+ printk(ACER_ERR "Cannot enable EC raw mode\n");
+ return -ENODEV;
+ }
+ } else if (ACPI_FAILURE(acer_wmi_enable_lm())) {
+ printk(ACER_ERR "Cannot enable Launch Manager mode\n");
+ return -ENODEV;
+ }
+ } else if (ec_raw_mode) {
+ printk(ACER_INFO "No WMID EC raw mode enable method\n");
+ }
+
+ if (wmi_has_guid(ACERWMID_EVENT_GUID)) {
+ err = acer_wmi_input_setup();
+ if (err)
+ return err;
+ }
+
err = platform_driver_register(&acer_platform_driver);
if (err) {
printk(ACER_ERR "Unable to register platform driver.\n");
@@ -1368,11 +1775,17 @@ error_device_add:
error_device_alloc:
platform_driver_unregister(&acer_platform_driver);
error_platform_register:
+ if (wmi_has_guid(ACERWMID_EVENT_GUID))
+ acer_wmi_input_destroy();
+
return err;
}
static void __exit acer_wmi_exit(void)
{
+ if (wmi_has_guid(ACERWMID_EVENT_GUID))
+ acer_wmi_input_destroy();
+
remove_sysfs(acer_platform_device);
remove_debugfs();
platform_device_unregister(acer_platform_device);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index d235f44fd7a3..f3aa6a7fdab6 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -640,7 +640,7 @@ static int update_bl_status(struct backlight_device *bd)
return asus_lcd_set(asus, value);
}
-static struct backlight_ops asusbl_ops = {
+static const struct backlight_ops asusbl_ops = {
.get_brightness = asus_read_brightness,
.update_status = update_bl_status,
};
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index ca05aefd03bf..fe495939c307 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1081,14 +1081,8 @@ static int asus_hotk_add_fs(struct acpi_device *device)
struct proc_dir_entry *proc;
mode_t mode;
- /*
- * If parameter uid or gid is not changed, keep the default setting for
- * our proc entries (-rw-rw-rw-) else, it means we care about security,
- * and then set to -rw-rw----
- */
-
if ((asus_uid == 0) && (asus_gid == 0)) {
- mode = S_IFREG | S_IRUGO | S_IWUGO;
+ mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
} else {
mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
printk(KERN_WARNING " asus_uid and asus_gid parameters are "
@@ -1467,7 +1461,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
return 0;
}
-static struct backlight_ops asus_backlight_data = {
+static const struct backlight_ops asus_backlight_data = {
.get_brightness = read_brightness,
.update_status = set_brightness_status,
};
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 341cbfef93ee..911135425224 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -522,18 +522,20 @@ static int cmpc_rfkill_block(void *data, bool blocked)
acpi_status status;
acpi_handle handle;
unsigned long long state;
+ bool is_blocked;
handle = data;
status = cmpc_get_rfkill_wlan(handle, &state);
if (ACPI_FAILURE(status))
return -ENODEV;
- if (blocked)
- state &= ~1;
- else
- state |= 1;
- status = cmpc_set_rfkill_wlan(handle, state);
- if (ACPI_FAILURE(status))
- return -ENODEV;
+ /* Check if we really need to call cmpc_set_rfkill_wlan */
+ is_blocked = state & 1 ? false : true;
+ if (is_blocked != blocked) {
+ state = blocked ? 0 : 1;
+ status = cmpc_set_rfkill_wlan(handle, state);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ }
return 0;
}
@@ -653,8 +655,9 @@ static void cmpc_keys_handler(struct acpi_device *dev, u32 event)
if ((event & 0x0F) < ARRAY_SIZE(cmpc_keys_codes))
code = cmpc_keys_codes[event & 0x0F];
- inputdev = dev_get_drvdata(&dev->dev);;
+ inputdev = dev_get_drvdata(&dev->dev);
input_report_key(inputdev, code, !(event & 0x10));
+ input_sync(inputdev);
}
static void cmpc_keys_idev_init(struct input_dev *inputdev)
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 097083cac413..034572b980c9 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -872,6 +872,14 @@ static struct dmi_system_id __initdata compal_dmi_table[] = {
},
.callback = dmi_check_cb_extra
},
+ {
+ .ident = "KHLB2",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "KHLB2"),
+ DMI_MATCH(DMI_BOARD_VERSION, "REFERENCE"),
+ },
+ .callback = dmi_check_cb_extra
+ },
{ }
};
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index cf8a89a0d8f5..ad24ef36f9f7 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -290,9 +290,12 @@ static int dell_rfkill_set(void *data, bool blocked)
dell_send_request(buffer, 17, 11);
/* If the hardware switch controls this radio, and the hardware
- switch is disabled, don't allow changing the software state */
+ switch is disabled, don't allow changing the software state.
+ If the hardware switch is reported as not supported, always
+ fire the SMI to toggle the killswitch. */
if ((hwswitch_state & BIT(hwswitch_bit)) &&
- !(buffer->output[1] & BIT(16))) {
+ !(buffer->output[1] & BIT(16)) &&
+ (buffer->output[1] & BIT(0))) {
ret = -EINVAL;
goto out;
}
@@ -398,6 +401,23 @@ static const struct file_operations dell_debugfs_fops = {
static void dell_update_rfkill(struct work_struct *ignored)
{
+ int status;
+
+ get_buffer();
+ dell_send_request(buffer, 17, 11);
+ status = buffer->output[1];
+ release_buffer();
+
+ /* if hardware rfkill is not supported, set it explicitly */
+ if (!(status & BIT(0))) {
+ if (wifi_rfkill)
+ dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17));
+ if (bluetooth_rfkill)
+ dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18));
+ if (wwan_rfkill)
+ dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19));
+ }
+
if (wifi_rfkill)
dell_rfkill_query(wifi_rfkill, (void *)1);
if (bluetooth_rfkill)
@@ -546,7 +566,7 @@ out:
return buffer->output[1];
}
-static struct backlight_ops dell_ops = {
+static const struct backlight_ops dell_ops = {
.get_brightness = dell_get_intensity,
.update_status = dell_send_intensity,
};
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index b2edfdcdcb84..49d9ad708f89 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -529,6 +529,15 @@ static void tpd_led_set(struct led_classdev *led_cdev,
queue_work(eeepc->led_workqueue, &eeepc->tpd_led_work);
}
+static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
+{
+ struct eeepc_laptop *eeepc;
+
+ eeepc = container_of(led_cdev, struct eeepc_laptop, tpd_led);
+
+ return get_acpi(eeepc, CM_ASL_TPD);
+}
+
static int eeepc_led_init(struct eeepc_laptop *eeepc)
{
int rv;
@@ -543,6 +552,8 @@ static int eeepc_led_init(struct eeepc_laptop *eeepc)
eeepc->tpd_led.name = "eeepc::touchpad";
eeepc->tpd_led.brightness_set = tpd_led_set;
+ if (get_acpi(eeepc, CM_ASL_TPD) >= 0) /* if method is available */
+ eeepc->tpd_led.brightness_get = tpd_led_get;
eeepc->tpd_led.max_brightness = 1;
rv = led_classdev_register(&eeepc->platform_device->dev,
@@ -1115,7 +1126,7 @@ static int update_bl_status(struct backlight_device *bd)
return set_brightness(bd, bd->props.brightness);
}
-static struct backlight_ops eeepcbl_ops = {
+static const struct backlight_ops eeepcbl_ops = {
.get_brightness = read_brightness,
.update_status = update_bl_status,
};
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 0d50fbbe2478..4d38f98aa976 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -2,6 +2,7 @@
* Eee PC WMI hotkey driver
*
* Copyright(C) 2010 Intel Corporation.
+ * Copyright(C) 2010 Corentin Chary <corentin.chary@gmail.com>
*
* Portions based on wistron_btns.c:
* Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
@@ -34,6 +35,10 @@
#include <linux/input/sparse-keymap.h>
#include <linux/fb.h>
#include <linux/backlight.h>
+#include <linux/leds.h>
+#include <linux/rfkill.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <linux/platform_device.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -44,6 +49,8 @@ MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver");
MODULE_LICENSE("GPL");
+#define EEEPC_ACPI_HID "ASUS010" /* old _HID used in eeepc-laptop */
+
#define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000"
#define EEEPC_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66"
@@ -60,6 +67,10 @@ MODULE_ALIAS("wmi:"EEEPC_WMI_MGMT_GUID);
#define EEEPC_WMI_METHODID_CFVS 0x53564643
#define EEEPC_WMI_DEVID_BACKLIGHT 0x00050012
+#define EEEPC_WMI_DEVID_TPDLED 0x00100011
+#define EEEPC_WMI_DEVID_WLAN 0x00010011
+#define EEEPC_WMI_DEVID_BLUETOOTH 0x00010013
+#define EEEPC_WMI_DEVID_WWAN3G 0x00010019
static const struct key_entry eeepc_wmi_keymap[] = {
/* Sleep already handled via generic ACPI code */
@@ -83,11 +94,37 @@ struct bios_args {
u32 ctrl_param;
};
+/*
+ * eeepc-wmi/ - debugfs root directory
+ * dev_id - current dev_id
+ * ctrl_param - current ctrl_param
+ * devs - call DEVS(dev_id, ctrl_param) and print result
+ * dsts - call DSTS(dev_id) and print result
+ */
+struct eeepc_wmi_debug {
+ struct dentry *root;
+ u32 dev_id;
+ u32 ctrl_param;
+};
+
struct eeepc_wmi {
struct input_dev *inputdev;
struct backlight_device *backlight_device;
+ struct platform_device *platform_device;
+
+ struct led_classdev tpd_led;
+ int tpd_led_wk;
+ struct workqueue_struct *led_workqueue;
+ struct work_struct tpd_led_work;
+
+ struct rfkill *wlan_rfkill;
+ struct rfkill *bluetooth_rfkill;
+ struct rfkill *wwan3g_rfkill;
+
+ struct eeepc_wmi_debug debug;
};
+/* Only used in eeepc_wmi_init() and eeepc_wmi_exit() */
static struct platform_device *platform_device;
static int eeepc_wmi_input_init(struct eeepc_wmi *eeepc)
@@ -101,7 +138,7 @@ static int eeepc_wmi_input_init(struct eeepc_wmi *eeepc)
eeepc->inputdev->name = "Eee PC WMI hotkeys";
eeepc->inputdev->phys = EEEPC_WMI_FILE "/input0";
eeepc->inputdev->id.bustype = BUS_HOST;
- eeepc->inputdev->dev.parent = &platform_device->dev;
+ eeepc->inputdev->dev.parent = &eeepc->platform_device->dev;
err = sparse_keymap_setup(eeepc->inputdev, eeepc_wmi_keymap, NULL);
if (err)
@@ -130,7 +167,7 @@ static void eeepc_wmi_input_exit(struct eeepc_wmi *eeepc)
eeepc->inputdev = NULL;
}
-static acpi_status eeepc_wmi_get_devstate(u32 dev_id, u32 *ctrl_param)
+static acpi_status eeepc_wmi_get_devstate(u32 dev_id, u32 *retval)
{
struct acpi_buffer input = { (acpi_size)sizeof(u32), &dev_id };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -150,8 +187,8 @@ static acpi_status eeepc_wmi_get_devstate(u32 dev_id, u32 *ctrl_param)
else
tmp = 0;
- if (ctrl_param)
- *ctrl_param = tmp;
+ if (retval)
+ *retval = tmp;
kfree(obj);
@@ -159,7 +196,8 @@ static acpi_status eeepc_wmi_get_devstate(u32 dev_id, u32 *ctrl_param)
}
-static acpi_status eeepc_wmi_set_devstate(u32 dev_id, u32 ctrl_param)
+static acpi_status eeepc_wmi_set_devstate(u32 dev_id, u32 ctrl_param,
+ u32 *retval)
{
struct bios_args args = {
.dev_id = dev_id,
@@ -168,34 +206,281 @@ static acpi_status eeepc_wmi_set_devstate(u32 dev_id, u32 ctrl_param)
struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
acpi_status status;
- status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID,
- 1, EEEPC_WMI_METHODID_DEVS, &input, NULL);
+ if (!retval) {
+ status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, 1,
+ EEEPC_WMI_METHODID_DEVS,
+ &input, NULL);
+ } else {
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ u32 tmp;
+
+ status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, 1,
+ EEEPC_WMI_METHODID_DEVS,
+ &input, &output);
+
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *)output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ tmp = (u32)obj->integer.value;
+ else
+ tmp = 0;
+
+ *retval = tmp;
+
+ kfree(obj);
+ }
return status;
}
+/*
+ * LEDs
+ */
+/*
+ * These functions actually update the LED's, and are called from a
+ * workqueue. By doing this as separate work rather than when the LED
+ * subsystem asks, we avoid messing with the Eeepc ACPI stuff during a
+ * potentially bad time, such as a timer interrupt.
+ */
+static void tpd_led_update(struct work_struct *work)
+{
+ int ctrl_param;
+ struct eeepc_wmi *eeepc;
+
+ eeepc = container_of(work, struct eeepc_wmi, tpd_led_work);
+
+ ctrl_param = eeepc->tpd_led_wk;
+ eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_TPDLED, ctrl_param, NULL);
+}
+
+static void tpd_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct eeepc_wmi *eeepc;
+
+ eeepc = container_of(led_cdev, struct eeepc_wmi, tpd_led);
+
+ eeepc->tpd_led_wk = !!value;
+ queue_work(eeepc->led_workqueue, &eeepc->tpd_led_work);
+}
+
+static int read_tpd_state(struct eeepc_wmi *eeepc)
+{
+ u32 retval;
+ acpi_status status;
+
+ status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_TPDLED, &retval);
+
+ if (ACPI_FAILURE(status))
+ return -1;
+ else if (!retval || retval == 0x00060000)
+ /*
+ * if touchpad led is present, DSTS will set some bits,
+ * usually 0x00020000.
+ * 0x00060000 means that the device is not supported
+ */
+ return -ENODEV;
+ else
+ /* Status is stored in the first bit */
+ return retval & 0x1;
+}
+
+static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
+{
+ struct eeepc_wmi *eeepc;
+
+ eeepc = container_of(led_cdev, struct eeepc_wmi, tpd_led);
+
+ return read_tpd_state(eeepc);
+}
+
+static int eeepc_wmi_led_init(struct eeepc_wmi *eeepc)
+{
+ int rv;
+
+ if (read_tpd_state(eeepc) < 0)
+ return 0;
+
+ eeepc->led_workqueue = create_singlethread_workqueue("led_workqueue");
+ if (!eeepc->led_workqueue)
+ return -ENOMEM;
+ INIT_WORK(&eeepc->tpd_led_work, tpd_led_update);
+
+ eeepc->tpd_led.name = "eeepc::touchpad";
+ eeepc->tpd_led.brightness_set = tpd_led_set;
+ eeepc->tpd_led.brightness_get = tpd_led_get;
+ eeepc->tpd_led.max_brightness = 1;
+
+ rv = led_classdev_register(&eeepc->platform_device->dev,
+ &eeepc->tpd_led);
+ if (rv) {
+ destroy_workqueue(eeepc->led_workqueue);
+ return rv;
+ }
+
+ return 0;
+}
+
+static void eeepc_wmi_led_exit(struct eeepc_wmi *eeepc)
+{
+ if (eeepc->tpd_led.dev)
+ led_classdev_unregister(&eeepc->tpd_led);
+ if (eeepc->led_workqueue)
+ destroy_workqueue(eeepc->led_workqueue);
+}
+
+/*
+ * Rfkill devices
+ */
+static int eeepc_rfkill_set(void *data, bool blocked)
+{
+ int dev_id = (unsigned long)data;
+ u32 ctrl_param = !blocked;
+
+ return eeepc_wmi_set_devstate(dev_id, ctrl_param, NULL);
+}
+
+static void eeepc_rfkill_query(struct rfkill *rfkill, void *data)
+{
+ int dev_id = (unsigned long)data;
+ u32 retval;
+ acpi_status status;
+
+ status = eeepc_wmi_get_devstate(dev_id, &retval);
+
+ if (ACPI_FAILURE(status))
+ return ;
+
+ rfkill_set_sw_state(rfkill, !(retval & 0x1));
+}
+
+static const struct rfkill_ops eeepc_rfkill_ops = {
+ .set_block = eeepc_rfkill_set,
+ .query = eeepc_rfkill_query,
+};
+
+static int eeepc_new_rfkill(struct eeepc_wmi *eeepc,
+ struct rfkill **rfkill,
+ const char *name,
+ enum rfkill_type type, int dev_id)
+{
+ int result;
+ u32 retval;
+ acpi_status status;
+
+ status = eeepc_wmi_get_devstate(dev_id, &retval);
+
+ if (ACPI_FAILURE(status))
+ return -1;
+
+ /* If the device is present, DSTS will always set some bits
+ * 0x00070000 - 1110000000000000000 - device supported
+ * 0x00060000 - 1100000000000000000 - not supported
+ * 0x00020000 - 0100000000000000000 - device supported
+ * 0x00010000 - 0010000000000000000 - not supported / special mode ?
+ */
+ if (!retval || retval == 0x00060000)
+ return -ENODEV;
+
+ *rfkill = rfkill_alloc(name, &eeepc->platform_device->dev, type,
+ &eeepc_rfkill_ops, (void *)(long)dev_id);
+
+ if (!*rfkill)
+ return -EINVAL;
+
+ rfkill_init_sw_state(*rfkill, !(retval & 0x1));
+ result = rfkill_register(*rfkill);
+ if (result) {
+ rfkill_destroy(*rfkill);
+ *rfkill = NULL;
+ return result;
+ }
+ return 0;
+}
+
+static void eeepc_wmi_rfkill_exit(struct eeepc_wmi *eeepc)
+{
+ if (eeepc->wlan_rfkill) {
+ rfkill_unregister(eeepc->wlan_rfkill);
+ rfkill_destroy(eeepc->wlan_rfkill);
+ eeepc->wlan_rfkill = NULL;
+ }
+ if (eeepc->bluetooth_rfkill) {
+ rfkill_unregister(eeepc->bluetooth_rfkill);
+ rfkill_destroy(eeepc->bluetooth_rfkill);
+ eeepc->bluetooth_rfkill = NULL;
+ }
+ if (eeepc->wwan3g_rfkill) {
+ rfkill_unregister(eeepc->wwan3g_rfkill);
+ rfkill_destroy(eeepc->wwan3g_rfkill);
+ eeepc->wwan3g_rfkill = NULL;
+ }
+}
+
+static int eeepc_wmi_rfkill_init(struct eeepc_wmi *eeepc)
+{
+ int result = 0;
+
+ result = eeepc_new_rfkill(eeepc, &eeepc->wlan_rfkill,
+ "eeepc-wlan", RFKILL_TYPE_WLAN,
+ EEEPC_WMI_DEVID_WLAN);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(eeepc, &eeepc->bluetooth_rfkill,
+ "eeepc-bluetooth", RFKILL_TYPE_BLUETOOTH,
+ EEEPC_WMI_DEVID_BLUETOOTH);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(eeepc, &eeepc->wwan3g_rfkill,
+ "eeepc-wwan3g", RFKILL_TYPE_WWAN,
+ EEEPC_WMI_DEVID_WWAN3G);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+exit:
+ if (result && result != -ENODEV)
+ eeepc_wmi_rfkill_exit(eeepc);
+
+ if (result == -ENODEV)
+ result = 0;
+
+ return result;
+}
+
+/*
+ * Backlight
+ */
static int read_brightness(struct backlight_device *bd)
{
- static u32 ctrl_param;
+ u32 retval;
acpi_status status;
- status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_BACKLIGHT, &ctrl_param);
+ status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_BACKLIGHT, &retval);
if (ACPI_FAILURE(status))
return -1;
else
- return ctrl_param & 0xFF;
+ return retval & 0xFF;
}
static int update_bl_status(struct backlight_device *bd)
{
- static u32 ctrl_param;
+ u32 ctrl_param;
acpi_status status;
ctrl_param = bd->props.brightness;
- status = eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_BACKLIGHT, ctrl_param);
+ status = eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_BACKLIGHT,
+ ctrl_param, NULL);
if (ACPI_FAILURE(status))
return -1;
@@ -234,7 +519,7 @@ static int eeepc_wmi_backlight_init(struct eeepc_wmi *eeepc)
memset(&props, 0, sizeof(struct backlight_properties));
props.max_brightness = 15;
bd = backlight_device_register(EEEPC_WMI_FILE,
- &platform_device->dev, eeepc,
+ &eeepc->platform_device->dev, eeepc,
&eeepc_wmi_bl_ops, &props);
if (IS_ERR(bd)) {
pr_err("Could not register backlight device\n");
@@ -321,65 +606,240 @@ static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv);
+static struct attribute *platform_attributes[] = {
+ &dev_attr_cpufv.attr,
+ NULL
+};
+
+static struct attribute_group platform_attribute_group = {
+ .attrs = platform_attributes
+};
+
static void eeepc_wmi_sysfs_exit(struct platform_device *device)
{
- device_remove_file(&device->dev, &dev_attr_cpufv);
+ sysfs_remove_group(&device->dev.kobj, &platform_attribute_group);
}
static int eeepc_wmi_sysfs_init(struct platform_device *device)
{
- int retval = -ENOMEM;
+ return sysfs_create_group(&device->dev.kobj, &platform_attribute_group);
+}
- retval = device_create_file(&device->dev, &dev_attr_cpufv);
- if (retval)
- goto error_sysfs;
+/*
+ * Platform device
+ */
+static int __init eeepc_wmi_platform_init(struct eeepc_wmi *eeepc)
+{
+ int err;
+ eeepc->platform_device = platform_device_alloc(EEEPC_WMI_FILE, -1);
+ if (!eeepc->platform_device)
+ return -ENOMEM;
+ platform_set_drvdata(eeepc->platform_device, eeepc);
+
+ err = platform_device_add(eeepc->platform_device);
+ if (err)
+ goto fail_platform_device;
+
+ err = eeepc_wmi_sysfs_init(eeepc->platform_device);
+ if (err)
+ goto fail_sysfs;
return 0;
-error_sysfs:
- eeepc_wmi_sysfs_exit(platform_device);
- return retval;
+fail_sysfs:
+ platform_device_del(eeepc->platform_device);
+fail_platform_device:
+ platform_device_put(eeepc->platform_device);
+ return err;
}
-static int __devinit eeepc_wmi_platform_probe(struct platform_device *device)
+static void eeepc_wmi_platform_exit(struct eeepc_wmi *eeepc)
{
+ eeepc_wmi_sysfs_exit(eeepc->platform_device);
+ platform_device_unregister(eeepc->platform_device);
+}
+
+/*
+ * debugfs
+ */
+struct eeepc_wmi_debugfs_node {
struct eeepc_wmi *eeepc;
- int err;
+ char *name;
+ int (*show)(struct seq_file *m, void *data);
+};
+
+static int show_dsts(struct seq_file *m, void *data)
+{
+ struct eeepc_wmi *eeepc = m->private;
acpi_status status;
+ u32 retval = -1;
- eeepc = platform_get_drvdata(device);
+ status = eeepc_wmi_get_devstate(eeepc->debug.dev_id, &retval);
+
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ seq_printf(m, "DSTS(%x) = %x\n", eeepc->debug.dev_id, retval);
+
+ return 0;
+}
+
+static int show_devs(struct seq_file *m, void *data)
+{
+ struct eeepc_wmi *eeepc = m->private;
+ acpi_status status;
+ u32 retval = -1;
+
+ status = eeepc_wmi_set_devstate(eeepc->debug.dev_id,
+ eeepc->debug.ctrl_param, &retval);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ seq_printf(m, "DEVS(%x, %x) = %x\n", eeepc->debug.dev_id,
+ eeepc->debug.ctrl_param, retval);
+
+ return 0;
+}
+
+static struct eeepc_wmi_debugfs_node eeepc_wmi_debug_files[] = {
+ { NULL, "devs", show_devs },
+ { NULL, "dsts", show_dsts },
+};
+
+static int eeepc_wmi_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct eeepc_wmi_debugfs_node *node = inode->i_private;
+
+ return single_open(file, node->show, node->eeepc);
+}
+
+static const struct file_operations eeepc_wmi_debugfs_io_ops = {
+ .owner = THIS_MODULE,
+ .open = eeepc_wmi_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void eeepc_wmi_debugfs_exit(struct eeepc_wmi *eeepc)
+{
+ debugfs_remove_recursive(eeepc->debug.root);
+}
+
+static int eeepc_wmi_debugfs_init(struct eeepc_wmi *eeepc)
+{
+ struct dentry *dent;
+ int i;
+
+ eeepc->debug.root = debugfs_create_dir(EEEPC_WMI_FILE, NULL);
+ if (!eeepc->debug.root) {
+ pr_err("failed to create debugfs directory");
+ goto error_debugfs;
+ }
+
+ dent = debugfs_create_x32("dev_id", S_IRUGO|S_IWUSR,
+ eeepc->debug.root, &eeepc->debug.dev_id);
+ if (!dent)
+ goto error_debugfs;
+
+ dent = debugfs_create_x32("ctrl_param", S_IRUGO|S_IWUSR,
+ eeepc->debug.root, &eeepc->debug.ctrl_param);
+ if (!dent)
+ goto error_debugfs;
+
+ for (i = 0; i < ARRAY_SIZE(eeepc_wmi_debug_files); i++) {
+ struct eeepc_wmi_debugfs_node *node = &eeepc_wmi_debug_files[i];
+
+ node->eeepc = eeepc;
+ dent = debugfs_create_file(node->name, S_IFREG | S_IRUGO,
+ eeepc->debug.root, node,
+ &eeepc_wmi_debugfs_io_ops);
+ if (!dent) {
+ pr_err("failed to create debug file: %s\n", node->name);
+ goto error_debugfs;
+ }
+ }
+
+ return 0;
+
+error_debugfs:
+ eeepc_wmi_debugfs_exit(eeepc);
+ return -ENOMEM;
+}
+
+/*
+ * WMI Driver
+ */
+static struct platform_device * __init eeepc_wmi_add(void)
+{
+ struct eeepc_wmi *eeepc;
+ acpi_status status;
+ int err;
+
+ eeepc = kzalloc(sizeof(struct eeepc_wmi), GFP_KERNEL);
+ if (!eeepc)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Register the platform device first. It is used as a parent for the
+ * sub-devices below.
+ */
+ err = eeepc_wmi_platform_init(eeepc);
+ if (err)
+ goto fail_platform;
err = eeepc_wmi_input_init(eeepc);
if (err)
- goto error_input;
+ goto fail_input;
+
+ err = eeepc_wmi_led_init(eeepc);
+ if (err)
+ goto fail_leds;
+
+ err = eeepc_wmi_rfkill_init(eeepc);
+ if (err)
+ goto fail_rfkill;
if (!acpi_video_backlight_support()) {
err = eeepc_wmi_backlight_init(eeepc);
if (err)
- goto error_backlight;
+ goto fail_backlight;
} else
pr_info("Backlight controlled by ACPI video driver\n");
status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID,
- eeepc_wmi_notify, eeepc);
+ eeepc_wmi_notify, eeepc);
if (ACPI_FAILURE(status)) {
pr_err("Unable to register notify handler - %d\n",
status);
err = -ENODEV;
- goto error_wmi;
+ goto fail_wmi_handler;
}
- return 0;
+ err = eeepc_wmi_debugfs_init(eeepc);
+ if (err)
+ goto fail_debugfs;
-error_wmi:
+ return eeepc->platform_device;
+
+fail_debugfs:
+ wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID);
+fail_wmi_handler:
eeepc_wmi_backlight_exit(eeepc);
-error_backlight:
+fail_backlight:
+ eeepc_wmi_rfkill_exit(eeepc);
+fail_rfkill:
+ eeepc_wmi_led_exit(eeepc);
+fail_leds:
eeepc_wmi_input_exit(eeepc);
-error_input:
- return err;
+fail_input:
+ eeepc_wmi_platform_exit(eeepc);
+fail_platform:
+ kfree(eeepc);
+ return ERR_PTR(err);
}
-static int __devexit eeepc_wmi_platform_remove(struct platform_device *device)
+static int eeepc_wmi_remove(struct platform_device *device)
{
struct eeepc_wmi *eeepc;
@@ -387,7 +847,12 @@ static int __devexit eeepc_wmi_platform_remove(struct platform_device *device)
wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID);
eeepc_wmi_backlight_exit(eeepc);
eeepc_wmi_input_exit(eeepc);
+ eeepc_wmi_led_exit(eeepc);
+ eeepc_wmi_rfkill_exit(eeepc);
+ eeepc_wmi_debugfs_exit(eeepc);
+ eeepc_wmi_platform_exit(eeepc);
+ kfree(eeepc);
return 0;
}
@@ -396,13 +861,31 @@ static struct platform_driver platform_driver = {
.name = EEEPC_WMI_FILE,
.owner = THIS_MODULE,
},
- .probe = eeepc_wmi_platform_probe,
- .remove = __devexit_p(eeepc_wmi_platform_remove),
};
+static acpi_status __init eeepc_wmi_parse_device(acpi_handle handle, u32 level,
+ void *context, void **retval)
+{
+ pr_warning("Found legacy ATKD device (%s)", EEEPC_ACPI_HID);
+ *(bool *)context = true;
+ return AE_CTRL_TERMINATE;
+}
+
+static int __init eeepc_wmi_check_atkd(void)
+{
+ acpi_status status;
+ bool found = false;
+
+ status = acpi_get_devices(EEEPC_ACPI_HID, eeepc_wmi_parse_device,
+ &found, NULL);
+
+ if (ACPI_FAILURE(status) || !found)
+ return 0;
+ return -1;
+}
+
static int __init eeepc_wmi_init(void)
{
- struct eeepc_wmi *eeepc;
int err;
if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID) ||
@@ -411,58 +894,40 @@ static int __init eeepc_wmi_init(void)
return -ENODEV;
}
- eeepc = kzalloc(sizeof(struct eeepc_wmi), GFP_KERNEL);
- if (!eeepc)
- return -ENOMEM;
-
- platform_device = platform_device_alloc(EEEPC_WMI_FILE, -1);
- if (!platform_device) {
- pr_warning("Unable to allocate platform device\n");
- err = -ENOMEM;
- goto fail_platform;
+ if (eeepc_wmi_check_atkd()) {
+ pr_warning("WMI device present, but legacy ATKD device is also "
+ "present and enabled.");
+ pr_warning("You probably booted with acpi_osi=\"Linux\" or "
+ "acpi_osi=\"!Windows 2009\"");
+ pr_warning("Can't load eeepc-wmi, use default acpi_osi "
+ "(preferred) or eeepc-laptop");
+ return -ENODEV;
}
- err = platform_device_add(platform_device);
- if (err) {
- pr_warning("Unable to add platform device\n");
- goto put_dev;
+ platform_device = eeepc_wmi_add();
+ if (IS_ERR(platform_device)) {
+ err = PTR_ERR(platform_device);
+ goto fail_eeepc_wmi;
}
- platform_set_drvdata(platform_device, eeepc);
-
err = platform_driver_register(&platform_driver);
if (err) {
pr_warning("Unable to register platform driver\n");
- goto del_dev;
+ goto fail_platform_driver;
}
- err = eeepc_wmi_sysfs_init(platform_device);
- if (err)
- goto del_sysfs;
-
return 0;
-del_sysfs:
- eeepc_wmi_sysfs_exit(platform_device);
-del_dev:
- platform_device_del(platform_device);
-put_dev:
- platform_device_put(platform_device);
-fail_platform:
- kfree(eeepc);
-
+fail_platform_driver:
+ eeepc_wmi_remove(platform_device);
+fail_eeepc_wmi:
return err;
}
static void __exit eeepc_wmi_exit(void)
{
- struct eeepc_wmi *eeepc;
-
- eeepc_wmi_sysfs_exit(platform_device);
- eeepc = platform_get_drvdata(platform_device);
+ eeepc_wmi_remove(platform_device);
platform_driver_unregister(&platform_driver);
- platform_device_unregister(platform_device);
- kfree(eeepc);
}
module_init(eeepc_wmi_init);
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index f44cd2620ff9..95e3b0948e9c 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -437,7 +437,7 @@ static int bl_update_status(struct backlight_device *b)
return ret;
}
-static struct backlight_ops fujitsubl_ops = {
+static const struct backlight_ops fujitsubl_ops = {
.get_brightness = bl_get_brightness,
.update_status = bl_update_status,
};
@@ -689,7 +689,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
if (error)
goto err_free_input_dev;
- result = acpi_bus_get_power(fujitsu->acpi_handle, &state);
+ result = acpi_bus_update_power(fujitsu->acpi_handle, &state);
if (result) {
printk(KERN_ERR "Error reading power state\n");
goto err_unregister_input_dev;
@@ -857,7 +857,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if (error)
goto err_free_input_dev;
- result = acpi_bus_get_power(fujitsu_hotkey->acpi_handle, &state);
+ result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
if (result) {
printk(KERN_ERR "Error reading power state\n");
goto err_unregister_input_dev;
@@ -1240,7 +1240,7 @@ MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*");
MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
-static struct pnp_device_id pnp_ids[] = {
+static struct pnp_device_id pnp_ids[] __used = {
{.id = "FUJ02bf"},
{.id = "FUJ02B1"},
{.id = "FUJ02E3"},
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 5ff12205aa6b..114d95247cdf 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1,5 +1,5 @@
/*
- * ideapad_acpi.c - Lenovo IdeaPad ACPI Extras
+ * ideapad-laptop.c - Lenovo IdeaPad ACPI Extras
*
* Copyright © 2010 Intel Corporation
* Copyright © 2010 David Woodhouse <dwmw2@infradead.org>
@@ -27,31 +27,19 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <linux/rfkill.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
-#define IDEAPAD_DEV_CAMERA 0
-#define IDEAPAD_DEV_WLAN 1
-#define IDEAPAD_DEV_BLUETOOTH 2
-#define IDEAPAD_DEV_3G 3
-#define IDEAPAD_DEV_KILLSW 4
+#define IDEAPAD_RFKILL_DEV_NUM (3)
struct ideapad_private {
- acpi_handle handle;
- struct rfkill *rfk[5];
-} *ideapad_priv;
-
-static struct {
- char *name;
- int cfgbit;
- int opcode;
- int type;
-} ideapad_rfk_data[] = {
- { "ideapad_camera", 19, 0x1E, NUM_RFKILL_TYPES },
- { "ideapad_wlan", 18, 0x15, RFKILL_TYPE_WLAN },
- { "ideapad_bluetooth", 16, 0x17, RFKILL_TYPE_BLUETOOTH },
- { "ideapad_3g", 17, 0x20, RFKILL_TYPE_WWAN },
- { "ideapad_killsw", 0, 0, RFKILL_TYPE_WLAN }
+ struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
+ struct platform_device *platform_device;
+ struct input_dev *inputdev;
};
+static acpi_handle ideapad_handle;
static bool no_bt_rfkill;
module_param(no_bt_rfkill, bool, 0444);
MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
@@ -163,17 +151,17 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
pr_err("timeout in write_ec_cmd\n");
return -1;
}
-/* the above is ACPI helpers */
+/*
+ * camera power
+ */
static ssize_t show_ideapad_cam(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct ideapad_private *priv = dev_get_drvdata(dev);
- acpi_handle handle = priv->handle;
unsigned long result;
- if (read_ec_data(handle, 0x1D, &result))
+ if (read_ec_data(ideapad_handle, 0x1D, &result))
return sprintf(buf, "-1\n");
return sprintf(buf, "%lu\n", result);
}
@@ -182,15 +170,13 @@ static ssize_t store_ideapad_cam(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ideapad_private *priv = dev_get_drvdata(dev);
- acpi_handle handle = priv->handle;
int ret, state;
if (!count)
return 0;
if (sscanf(buf, "%i", &state) != 1)
return -EINVAL;
- ret = write_ec_cmd(handle, 0x1E, state);
+ ret = write_ec_cmd(ideapad_handle, 0x1E, state);
if (ret < 0)
return ret;
return count;
@@ -198,16 +184,27 @@ static ssize_t store_ideapad_cam(struct device *dev,
static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam);
+/*
+ * Rfkill
+ */
+struct ideapad_rfk_data {
+ char *name;
+ int cfgbit;
+ int opcode;
+ int type;
+};
+
+const struct ideapad_rfk_data ideapad_rfk_data[] = {
+ { "ideapad_wlan", 18, 0x15, RFKILL_TYPE_WLAN },
+ { "ideapad_bluetooth", 16, 0x17, RFKILL_TYPE_BLUETOOTH },
+ { "ideapad_3g", 17, 0x20, RFKILL_TYPE_WWAN },
+};
+
static int ideapad_rfk_set(void *data, bool blocked)
{
- int device = (unsigned long)data;
+ unsigned long opcode = (unsigned long)data;
- if (device == IDEAPAD_DEV_KILLSW)
- return -EINVAL;
-
- return write_ec_cmd(ideapad_priv->handle,
- ideapad_rfk_data[device].opcode,
- !blocked);
+ return write_ec_cmd(ideapad_handle, opcode, !blocked);
}
static struct rfkill_ops ideapad_rfk_ops = {
@@ -217,20 +214,20 @@ static struct rfkill_ops ideapad_rfk_ops = {
static void ideapad_sync_rfk_state(struct acpi_device *adevice)
{
struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
- acpi_handle handle = priv->handle;
unsigned long hw_blocked;
int i;
- if (read_ec_data(handle, 0x23, &hw_blocked))
+ if (read_ec_data(ideapad_handle, 0x23, &hw_blocked))
return;
hw_blocked = !hw_blocked;
- for (i = IDEAPAD_DEV_WLAN; i <= IDEAPAD_DEV_KILLSW; i++)
+ for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
if (priv->rfk[i])
rfkill_set_hw_state(priv->rfk[i], hw_blocked);
}
-static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
+static int __devinit ideapad_register_rfkill(struct acpi_device *adevice,
+ int dev)
{
struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
int ret;
@@ -239,7 +236,7 @@ static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
if (no_bt_rfkill &&
(ideapad_rfk_data[dev].type == RFKILL_TYPE_BLUETOOTH)) {
/* Force to enable bluetooth when no_bt_rfkill=1 */
- write_ec_cmd(ideapad_priv->handle,
+ write_ec_cmd(ideapad_handle,
ideapad_rfk_data[dev].opcode, 1);
return 0;
}
@@ -250,7 +247,7 @@ static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
if (!priv->rfk[dev])
return -ENOMEM;
- if (read_ec_data(ideapad_priv->handle, ideapad_rfk_data[dev].opcode-1,
+ if (read_ec_data(ideapad_handle, ideapad_rfk_data[dev].opcode-1,
&sw_blocked)) {
rfkill_init_sw_state(priv->rfk[dev], 0);
} else {
@@ -266,7 +263,8 @@ static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
return 0;
}
-static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
+static void __devexit ideapad_unregister_rfkill(struct acpi_device *adevice,
+ int dev)
{
struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
@@ -277,73 +275,177 @@ static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
rfkill_destroy(priv->rfk[dev]);
}
+/*
+ * Platform device
+ */
+static struct attribute *ideapad_attributes[] = {
+ &dev_attr_camera_power.attr,
+ NULL
+};
+
+static struct attribute_group ideapad_attribute_group = {
+ .attrs = ideapad_attributes
+};
+
+static int __devinit ideapad_platform_init(struct ideapad_private *priv)
+{
+ int result;
+
+ priv->platform_device = platform_device_alloc("ideapad", -1);
+ if (!priv->platform_device)
+ return -ENOMEM;
+ platform_set_drvdata(priv->platform_device, priv);
+
+ result = platform_device_add(priv->platform_device);
+ if (result)
+ goto fail_platform_device;
+
+ result = sysfs_create_group(&priv->platform_device->dev.kobj,
+ &ideapad_attribute_group);
+ if (result)
+ goto fail_sysfs;
+ return 0;
+
+fail_sysfs:
+ platform_device_del(priv->platform_device);
+fail_platform_device:
+ platform_device_put(priv->platform_device);
+ return result;
+}
+
+static void ideapad_platform_exit(struct ideapad_private *priv)
+{
+ sysfs_remove_group(&priv->platform_device->dev.kobj,
+ &ideapad_attribute_group);
+ platform_device_unregister(priv->platform_device);
+}
+
+/*
+ * input device
+ */
+static const struct key_entry ideapad_keymap[] = {
+ { KE_KEY, 0x06, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0x0D, { KEY_WLAN } },
+ { KE_END, 0 },
+};
+
+static int __devinit ideapad_input_init(struct ideapad_private *priv)
+{
+ struct input_dev *inputdev;
+ int error;
+
+ inputdev = input_allocate_device();
+ if (!inputdev) {
+ pr_info("Unable to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ inputdev->name = "Ideapad extra buttons";
+ inputdev->phys = "ideapad/input0";
+ inputdev->id.bustype = BUS_HOST;
+ inputdev->dev.parent = &priv->platform_device->dev;
+
+ error = sparse_keymap_setup(inputdev, ideapad_keymap, NULL);
+ if (error) {
+ pr_err("Unable to setup input device keymap\n");
+ goto err_free_dev;
+ }
+
+ error = input_register_device(inputdev);
+ if (error) {
+ pr_err("Unable to register input device\n");
+ goto err_free_keymap;
+ }
+
+ priv->inputdev = inputdev;
+ return 0;
+
+err_free_keymap:
+ sparse_keymap_free(inputdev);
+err_free_dev:
+ input_free_device(inputdev);
+ return error;
+}
+
+static void __devexit ideapad_input_exit(struct ideapad_private *priv)
+{
+ sparse_keymap_free(priv->inputdev);
+ input_unregister_device(priv->inputdev);
+ priv->inputdev = NULL;
+}
+
+static void ideapad_input_report(struct ideapad_private *priv,
+ unsigned long scancode)
+{
+ sparse_keymap_report_event(priv->inputdev, scancode, 1, true);
+}
+
+/*
+ * module init/exit
+ */
static const struct acpi_device_id ideapad_device_ids[] = {
{ "VPC2004", 0},
{ "", 0},
};
MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
-static int ideapad_acpi_add(struct acpi_device *adevice)
+static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
{
- int i, cfg;
- int devs_present[5];
+ int ret, i, cfg;
struct ideapad_private *priv;
if (read_method_int(adevice->handle, "_CFG", &cfg))
return -ENODEV;
- for (i = IDEAPAD_DEV_CAMERA; i < IDEAPAD_DEV_KILLSW; i++) {
- if (test_bit(ideapad_rfk_data[i].cfgbit, (unsigned long *)&cfg))
- devs_present[i] = 1;
- else
- devs_present[i] = 0;
- }
-
- /* The hardware switch is always present */
- devs_present[IDEAPAD_DEV_KILLSW] = 1;
-
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ dev_set_drvdata(&adevice->dev, priv);
+ ideapad_handle = adevice->handle;
- if (devs_present[IDEAPAD_DEV_CAMERA]) {
- int ret = device_create_file(&adevice->dev, &dev_attr_camera_power);
- if (ret) {
- kfree(priv);
- return ret;
- }
- }
+ ret = ideapad_platform_init(priv);
+ if (ret)
+ goto platform_failed;
- priv->handle = adevice->handle;
- dev_set_drvdata(&adevice->dev, priv);
- ideapad_priv = priv;
- for (i = IDEAPAD_DEV_WLAN; i <= IDEAPAD_DEV_KILLSW; i++) {
- if (!devs_present[i])
- continue;
+ ret = ideapad_input_init(priv);
+ if (ret)
+ goto input_failed;
- ideapad_register_rfkill(adevice, i);
+ for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
+ if (test_bit(ideapad_rfk_data[i].cfgbit, (unsigned long *)&cfg))
+ ideapad_register_rfkill(adevice, i);
+ else
+ priv->rfk[i] = NULL;
}
ideapad_sync_rfk_state(adevice);
+
return 0;
+
+input_failed:
+ ideapad_platform_exit(priv);
+platform_failed:
+ kfree(priv);
+ return ret;
}
-static int ideapad_acpi_remove(struct acpi_device *adevice, int type)
+static int __devexit ideapad_acpi_remove(struct acpi_device *adevice, int type)
{
struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
int i;
- device_remove_file(&adevice->dev, &dev_attr_camera_power);
-
- for (i = IDEAPAD_DEV_WLAN; i <= IDEAPAD_DEV_KILLSW; i++)
+ for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
ideapad_unregister_rfkill(adevice, i);
-
+ ideapad_input_exit(priv);
+ ideapad_platform_exit(priv);
dev_set_drvdata(&adevice->dev, NULL);
kfree(priv);
+
return 0;
}
static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
{
+ struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
acpi_handle handle = adevice->handle;
unsigned long vpc1, vpc2, vpc_bit;
@@ -357,6 +459,8 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
if (test_bit(vpc_bit, &vpc1)) {
if (vpc_bit == 9)
ideapad_sync_rfk_state(adevice);
+ else
+ ideapad_input_report(priv, vpc_bit);
}
}
}
@@ -371,19 +475,14 @@ static struct acpi_driver ideapad_acpi_driver = {
.owner = THIS_MODULE,
};
-
static int __init ideapad_acpi_module_init(void)
{
- acpi_bus_register_driver(&ideapad_acpi_driver);
-
- return 0;
+ return acpi_bus_register_driver(&ideapad_acpi_driver);
}
-
static void __exit ideapad_acpi_module_exit(void)
{
acpi_bus_unregister_driver(&ideapad_acpi_driver);
-
}
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index f0b3ad13c273..1294a39373ba 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -1474,7 +1474,7 @@ ips_gpu_turbo_enabled(struct ips_driver *ips)
}
void
-ips_link_to_i915_driver()
+ips_link_to_i915_driver(void)
{
/* We can't cleanly get at the various ips_driver structs from
* this caller (the i915 driver), so just set a flag saying
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index e61db9dfebef..61433d492862 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -60,69 +60,20 @@ enum pmic_gpio_register {
#define GPOSW_DOU 0x08
#define GPOSW_RDRV 0x30
+#define GPIO_UPDATE_TYPE 0x80000000
#define NUM_GPIO 24
-struct pmic_gpio_irq {
- spinlock_t lock;
- u32 trigger[NUM_GPIO];
- u32 dirty;
- struct work_struct work;
-};
-
-
struct pmic_gpio {
+ struct mutex buslock;
struct gpio_chip chip;
- struct pmic_gpio_irq irqtypes;
void *gpiointr;
int irq;
unsigned irq_base;
+ unsigned int update_type;
+ u32 trigger_type;
};
-static void pmic_program_irqtype(int gpio, int type)
-{
- if (type & IRQ_TYPE_EDGE_RISING)
- intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
- else
- intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
-
- if (type & IRQ_TYPE_EDGE_FALLING)
- intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
- else
- intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
-};
-
-static void pmic_irqtype_work(struct work_struct *work)
-{
- struct pmic_gpio_irq *t =
- container_of(work, struct pmic_gpio_irq, work);
- unsigned long flags;
- int i;
- u16 type;
-
- spin_lock_irqsave(&t->lock, flags);
- /* As we drop the lock, we may need multiple scans if we race the
- pmic_irq_type function */
- while (t->dirty) {
- /*
- * For each pin that has the dirty bit set send an IPC
- * message to configure the hardware via the PMIC
- */
- for (i = 0; i < NUM_GPIO; i++) {
- if (!(t->dirty & (1 << i)))
- continue;
- t->dirty &= ~(1 << i);
- /* We can't trust the array entry or dirty
- once the lock is dropped */
- type = t->trigger[i];
- spin_unlock_irqrestore(&t->lock, flags);
- pmic_program_irqtype(i, type);
- spin_lock_irqsave(&t->lock, flags);
- }
- }
- spin_unlock_irqrestore(&t->lock, flags);
-}
-
static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
if (offset > 8) {
@@ -190,25 +141,24 @@ static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
1 << (offset - 16));
}
-static int pmic_irq_type(unsigned irq, unsigned type)
+/*
+ * This is called from genirq with pg->buslock locked and
+ * irq_desc->lock held. We can not access the scu bus here, so we
+ * store the change and update in the bus_sync_unlock() function below
+ */
+static int pmic_irq_type(struct irq_data *data, unsigned type)
{
- struct pmic_gpio *pg = get_irq_chip_data(irq);
- u32 gpio = irq - pg->irq_base;
- unsigned long flags;
+ struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
+ u32 gpio = data->irq - pg->irq_base;
if (gpio >= pg->chip.ngpio)
return -EINVAL;
- spin_lock_irqsave(&pg->irqtypes.lock, flags);
- pg->irqtypes.trigger[gpio] = type;
- pg->irqtypes.dirty |= (1 << gpio);
- spin_unlock_irqrestore(&pg->irqtypes.lock, flags);
- schedule_work(&pg->irqtypes.work);
+ pg->trigger_type = type;
+ pg->update_type = gpio | GPIO_UPDATE_TYPE;
return 0;
}
-
-
static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip);
@@ -217,34 +167,32 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
}
/* the gpiointr register is read-clear, so just do nothing. */
-static void pmic_irq_unmask(unsigned irq)
-{
-};
+static void pmic_irq_unmask(struct irq_data *data) { }
-static void pmic_irq_mask(unsigned irq)
-{
-};
+static void pmic_irq_mask(struct irq_data *data) { }
static struct irq_chip pmic_irqchip = {
.name = "PMIC-GPIO",
- .mask = pmic_irq_mask,
- .unmask = pmic_irq_unmask,
- .set_type = pmic_irq_type,
+ .irq_mask = pmic_irq_mask,
+ .irq_unmask = pmic_irq_unmask,
+ .irq_set_type = pmic_irq_type,
};
-static void pmic_irq_handler(unsigned irq, struct irq_desc *desc)
+static irqreturn_t pmic_irq_handler(int irq, void *data)
{
- struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq);
+ struct pmic_gpio *pg = data;
u8 intsts = *((u8 *)pg->gpiointr + 4);
int gpio;
+ irqreturn_t ret = IRQ_NONE;
for (gpio = 0; gpio < 8; gpio++) {
if (intsts & (1 << gpio)) {
pr_debug("pmic pin %d triggered\n", gpio);
generic_handle_irq(pg->irq_base + gpio);
+ ret = IRQ_HANDLED;
}
}
- desc->chip->eoi(irq);
+ return ret;
}
static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
@@ -293,8 +241,7 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
pg->chip.can_sleep = 1;
pg->chip.dev = dev;
- INIT_WORK(&pg->irqtypes.work, pmic_irqtype_work);
- spin_lock_init(&pg->irqtypes.lock);
+ mutex_init(&pg->buslock);
pg->chip.dev = dev;
retval = gpiochip_add(&pg->chip);
@@ -302,8 +249,13 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__);
goto err;
}
- set_irq_data(pg->irq, pg);
- set_irq_chained_handler(pg->irq, pmic_irq_handler);
+
+ retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
+ if (retval) {
+ printk(KERN_WARNING "pmic: Interrupt request failed\n");
+ goto err;
+ }
+
for (i = 0; i < 8; i++) {
set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip,
handle_simple_irq, "demux");
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index ca35b0ce944a..a91d510a798b 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -26,7 +26,6 @@
#include <linux/sfi.h>
#include <asm/mrst.h>
#include <asm/intel_scu_ipc.h>
-#include <asm/mrst.h>
/* IPC defines the following message types */
#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
@@ -161,7 +160,7 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
{
int i, nc, bytes, d;
u32 offset = 0;
- u32 err = 0;
+ int err;
u8 cbuf[IPC_WWBUF_SIZE] = { };
u32 *wbuf = (u32 *)&cbuf;
@@ -404,7 +403,7 @@ EXPORT_SYMBOL(intel_scu_ipc_update_register);
*/
int intel_scu_ipc_simple_command(int cmd, int sub)
{
- u32 err = 0;
+ int err;
mutex_lock(&ipclock);
if (ipcdev.pdev == NULL) {
@@ -434,8 +433,7 @@ EXPORT_SYMBOL(intel_scu_ipc_simple_command);
int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
u32 *out, int outlen)
{
- u32 err = 0;
- int i = 0;
+ int i, err;
mutex_lock(&ipclock);
if (ipcdev.pdev == NULL) {
@@ -497,7 +495,7 @@ int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
"intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd);
mutex_unlock(&ipclock);
- return -1;
+ return -EIO;
}
mutex_unlock(&ipclock);
return 0;
@@ -642,7 +640,7 @@ update_end:
if (status == IPC_FW_UPDATE_SUCCESS)
return 0;
- return -1;
+ return -EIO;
}
EXPORT_SYMBOL(intel_scu_ipc_fw_update);
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
new file mode 100644
index 000000000000..b93a03259c16
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -0,0 +1,133 @@
+/*
+ * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism
+ *
+ * (C) Copyright 2008-2010 Intel Corporation
+ * Author: Sreedhara DS (sreedhara.ds@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ * This driver provides ioctl interfaces to call intel scu ipc driver api
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <asm/intel_scu_ipc.h>
+
+static u32 major;
+
+#define MAX_FW_SIZE 264192
+
+/* ioctl commnds */
+#define INTE_SCU_IPC_REGISTER_READ 0
+#define INTE_SCU_IPC_REGISTER_WRITE 1
+#define INTE_SCU_IPC_REGISTER_UPDATE 2
+#define INTE_SCU_IPC_FW_UPDATE 0xA2
+
+struct scu_ipc_data {
+ u32 count; /* No. of registers */
+ u16 addr[5]; /* Register addresses */
+ u8 data[5]; /* Register data */
+ u8 mask; /* Valid for read-modify-write */
+};
+
+/**
+ * scu_reg_access - implement register access ioctls
+ * @cmd: command we are doing (read/write/update)
+ * @data: kernel copy of ioctl data
+ *
+ * Allow the user to perform register accesses on the SCU via the
+ * kernel interface
+ */
+
+static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
+{
+ int count = data->count;
+
+ if (count == 0 || count == 3 || count > 4)
+ return -EINVAL;
+
+ switch (cmd) {
+ case INTE_SCU_IPC_REGISTER_READ:
+ return intel_scu_ipc_readv(data->addr, data->data, count);
+ case INTE_SCU_IPC_REGISTER_WRITE:
+ return intel_scu_ipc_writev(data->addr, data->data, count);
+ case INTE_SCU_IPC_REGISTER_UPDATE:
+ return intel_scu_ipc_update_register(data->addr[0],
+ data->data[0], data->mask);
+ default:
+ return -ENOTTY;
+ }
+}
+
+/**
+ * scu_ipc_ioctl - control ioctls for the SCU
+ * @fp: file handle of the SCU device
+ * @cmd: ioctl coce
+ * @arg: pointer to user passed structure
+ *
+ * Support the I/O and firmware flashing interfaces of the SCU
+ */
+static long scu_ipc_ioctl(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct scu_ipc_data data;
+ void __user *argp = (void __user *)arg;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ if (cmd == INTE_SCU_IPC_FW_UPDATE) {
+ u8 *fwbuf = kmalloc(MAX_FW_SIZE, GFP_KERNEL);
+ if (fwbuf == NULL)
+ return -ENOMEM;
+ if (copy_from_user(fwbuf, (u8 *)arg, MAX_FW_SIZE)) {
+ kfree(fwbuf);
+ return -EFAULT;
+ }
+ ret = intel_scu_ipc_fw_update(fwbuf, MAX_FW_SIZE);
+ kfree(fwbuf);
+ return ret;
+ } else {
+ if (copy_from_user(&data, argp, sizeof(struct scu_ipc_data)))
+ return -EFAULT;
+ ret = scu_reg_access(cmd, &data);
+ if (ret < 0)
+ return ret;
+ if (copy_to_user(argp, &data, sizeof(struct scu_ipc_data)))
+ return -EFAULT;
+ return 0;
+ }
+}
+
+static const struct file_operations scu_ipc_fops = {
+ .unlocked_ioctl = scu_ipc_ioctl,
+};
+
+static int __init ipc_module_init(void)
+{
+ return register_chrdev(0, "intel_mid_scu", &scu_ipc_fops);
+}
+
+static void __exit ipc_module_exit(void)
+{
+ unregister_chrdev(major, "intel_mid_scu");
+}
+
+module_init(ipc_module_init);
+module_exit(ipc_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Utility driver for intel scu ipc");
+MODULE_AUTHOR("Sreedhara <sreedhara.ds@intel.com>");
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index f200677851b8..5e83370b0812 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -235,6 +235,7 @@ static int sony_laptop_input_index[] = {
57, /* 70 SONYPI_EVENT_VOLUME_DEC_PRESSED */
-1, /* 71 SONYPI_EVENT_BRIGHTNESS_PRESSED */
58, /* 72 SONYPI_EVENT_MEDIA_PRESSED */
+ 59, /* 72 SONYPI_EVENT_VENDOR_PRESSED */
};
static int sony_laptop_input_keycode_map[] = {
@@ -297,6 +298,7 @@ static int sony_laptop_input_keycode_map[] = {
KEY_VOLUMEUP, /* 56 SONYPI_EVENT_VOLUME_INC_PRESSED */
KEY_VOLUMEDOWN, /* 57 SONYPI_EVENT_VOLUME_DEC_PRESSED */
KEY_MEDIA, /* 58 SONYPI_EVENT_MEDIA_PRESSED */
+ KEY_VENDOR, /* 59 SONYPI_EVENT_VENDOR_PRESSED */
};
/* release buttons after a short delay if pressed */
@@ -856,7 +858,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
}
static struct backlight_device *sony_backlight_device;
-static struct backlight_ops sony_backlight_ops = {
+static const struct backlight_ops sony_backlight_ops = {
.update_status = sony_backlight_update_status,
.get_brightness = sony_backlight_get_brightness,
};
@@ -894,10 +896,18 @@ static struct sony_nc_event sony_100_events[] = {
{ 0x0A, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x8C, SONYPI_EVENT_FNKEY_F12 },
{ 0x0C, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x9d, SONYPI_EVENT_ZOOM_PRESSED },
+ { 0x1d, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0x9f, SONYPI_EVENT_CD_EJECT_PRESSED },
{ 0x1f, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0xa1, SONYPI_EVENT_MEDIA_PRESSED },
{ 0x21, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0xa4, SONYPI_EVENT_CD_EJECT_PRESSED },
+ { 0x24, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0xa5, SONYPI_EVENT_VENDOR_PRESSED },
+ { 0x25, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0xa6, SONYPI_EVENT_HELP_PRESSED },
+ { 0x26, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0, 0 },
};
@@ -1131,7 +1141,7 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
return err;
}
-static void sony_nc_rfkill_update()
+static void sony_nc_rfkill_update(void)
{
enum sony_nc_rfkill i;
int result;
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 1fe0f1feff71..865ef78d6f1a 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -162,7 +162,7 @@ set_bool_##value(struct device *dev, struct device_attribute *attr, \
return -EINVAL; \
return count; \
} \
-static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
+static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \
show_bool_##value, set_bool_##value);
show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e8c21994b36d..eb9922385ef8 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -589,6 +589,7 @@ static int acpi_evalf(acpi_handle handle,
default:
printk(TPACPI_ERR "acpi_evalf() called "
"with invalid format character '%c'\n", c);
+ va_end(ap);
return 0;
}
}
@@ -2274,16 +2275,12 @@ static void tpacpi_input_send_key(const unsigned int scancode)
if (keycode != KEY_RESERVED) {
mutex_lock(&tpacpi_inputdev_send_mutex);
+ input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
input_report_key(tpacpi_inputdev, keycode, 1);
- if (keycode == KEY_UNKNOWN)
- input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
- scancode);
input_sync(tpacpi_inputdev);
+ input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
input_report_key(tpacpi_inputdev, keycode, 0);
- if (keycode == KEY_UNKNOWN)
- input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
- scancode);
input_sync(tpacpi_inputdev);
mutex_unlock(&tpacpi_inputdev_send_mutex);
@@ -6109,7 +6106,7 @@ static void tpacpi_brightness_notify_change(void)
BACKLIGHT_UPDATE_HOTKEY);
}
-static struct backlight_ops ibm_backlight_data = {
+static const struct backlight_ops ibm_backlight_data = {
.get_brightness = brightness_get,
.update_status = brightness_update_status,
};
@@ -6345,7 +6342,7 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
"as change notification\n");
tpacpi_hotkey_driver_mask_set(hotkey_driver_mask
| TP_ACPI_HKEY_BRGHTUP_MASK
- | TP_ACPI_HKEY_BRGHTDWN_MASK);;
+ | TP_ACPI_HKEY_BRGHTDWN_MASK);
return 0;
}
@@ -7193,7 +7190,7 @@ static struct ibm_struct volume_driver_data = {
* TPACPI_FAN_WR_ACPI_FANS (X31/X40/X41)
*
* FIRMWARE BUG: on some models, EC 0x2f might not be initialized at
- * boot. Apparently the EC does not intialize it, so unless ACPI DSDT
+ * boot. Apparently the EC does not initialize it, so unless ACPI DSDT
* does so, its initial value is meaningless (0x07).
*
* For firmware bugs, refer to:
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 4276da7291b8..209cced786c6 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -841,7 +841,7 @@ static void remove_toshiba_proc_entries(void)
remove_proc_entry("version", toshiba_proc_dir);
}
-static struct backlight_ops toshiba_backlight_data = {
+static const struct backlight_ops toshiba_backlight_data = {
.get_brightness = get_lcd,
.update_status = set_lcd_status,
};
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index aecd9a9b549f..05cc79672a8b 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -549,21 +549,34 @@ acpi_status wmi_install_notify_handler(const char *guid,
wmi_notify_handler handler, void *data)
{
struct wmi_block *block;
- acpi_status status;
+ acpi_status status = AE_NOT_EXIST;
+ char tmp[16], guid_input[16];
+ struct list_head *p;
if (!guid || !handler)
return AE_BAD_PARAMETER;
- if (!find_guid(guid, &block))
- return AE_NOT_EXIST;
+ wmi_parse_guid(guid, tmp);
+ wmi_swap_bytes(tmp, guid_input);
- if (block->handler && block->handler != wmi_notify_debug)
- return AE_ALREADY_ACQUIRED;
+ list_for_each(p, &wmi_block_list) {
+ acpi_status wmi_status;
+ block = list_entry(p, struct wmi_block, list);
- block->handler = handler;
- block->handler_data = data;
+ if (memcmp(block->gblock.guid, guid_input, 16) == 0) {
+ if (block->handler &&
+ block->handler != wmi_notify_debug)
+ return AE_ALREADY_ACQUIRED;
- status = wmi_method_enable(block, 1);
+ block->handler = handler;
+ block->handler_data = data;
+
+ wmi_status = wmi_method_enable(block, 1);
+ if ((wmi_status != AE_OK) ||
+ ((wmi_status == AE_OK) && (status == AE_NOT_EXIST)))
+ status = wmi_status;
+ }
+ }
return status;
}
@@ -577,24 +590,40 @@ EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
acpi_status wmi_remove_notify_handler(const char *guid)
{
struct wmi_block *block;
- acpi_status status = AE_OK;
+ acpi_status status = AE_NOT_EXIST;
+ char tmp[16], guid_input[16];
+ struct list_head *p;
if (!guid)
return AE_BAD_PARAMETER;
- if (!find_guid(guid, &block))
- return AE_NOT_EXIST;
+ wmi_parse_guid(guid, tmp);
+ wmi_swap_bytes(tmp, guid_input);
- if (!block->handler || block->handler == wmi_notify_debug)
- return AE_NULL_ENTRY;
+ list_for_each(p, &wmi_block_list) {
+ acpi_status wmi_status;
+ block = list_entry(p, struct wmi_block, list);
- if (debug_event) {
- block->handler = wmi_notify_debug;
- } else {
- status = wmi_method_enable(block, 0);
- block->handler = NULL;
- block->handler_data = NULL;
+ if (memcmp(block->gblock.guid, guid_input, 16) == 0) {
+ if (!block->handler ||
+ block->handler == wmi_notify_debug)
+ return AE_NULL_ENTRY;
+
+ if (debug_event) {
+ block->handler = wmi_notify_debug;
+ status = AE_OK;
+ } else {
+ wmi_status = wmi_method_enable(block, 0);
+ block->handler = NULL;
+ block->handler_data = NULL;
+ if ((wmi_status != AE_OK) ||
+ ((wmi_status == AE_OK) &&
+ (status == AE_NOT_EXIST)))
+ status = wmi_status;
+ }
+ }
}
+
return status;
}
EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
@@ -705,22 +734,11 @@ static struct class wmi_class = {
.dev_attrs = wmi_dev_attrs,
};
-static struct wmi_block *wmi_create_device(const struct guid_block *gblock,
- acpi_handle handle)
+static int wmi_create_device(const struct guid_block *gblock,
+ struct wmi_block *wblock, acpi_handle handle)
{
- struct wmi_block *wblock;
- int error;
char guid_string[37];
- wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
- if (!wblock) {
- error = -ENOMEM;
- goto err_out;
- }
-
- wblock->handle = handle;
- wblock->gblock = *gblock;
-
wblock->dev.class = &wmi_class;
wmi_gtoa(gblock->guid, guid_string);
@@ -728,17 +746,7 @@ static struct wmi_block *wmi_create_device(const struct guid_block *gblock,
dev_set_drvdata(&wblock->dev, wblock);
- error = device_register(&wblock->dev);
- if (error)
- goto err_free;
-
- list_add_tail(&wblock->list, &wmi_block_list);
- return wblock;
-
-err_free:
- kfree(wblock);
-err_out:
- return ERR_PTR(error);
+ return device_register(&wblock->dev);
}
static void wmi_free_devices(void)
@@ -747,7 +755,8 @@ static void wmi_free_devices(void)
/* Delete devices for all the GUIDs */
list_for_each_entry_safe(wblock, next, &wmi_block_list, list)
- device_unregister(&wblock->dev);
+ if (wblock->dev.class)
+ device_unregister(&wblock->dev);
}
static bool guid_already_parsed(const char *guid_string)
@@ -770,7 +779,6 @@ static acpi_status parse_wdg(acpi_handle handle)
union acpi_object *obj;
const struct guid_block *gblock;
struct wmi_block *wblock;
- char guid_string[37];
acpi_status status;
int retval;
u32 i, total;
@@ -792,28 +800,31 @@ static acpi_status parse_wdg(acpi_handle handle)
total = obj->buffer.length / sizeof(struct guid_block);
for (i = 0; i < total; i++) {
+ if (debug_dump_wdg)
+ wmi_dump_wdg(&gblock[i]);
+
+ wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
+ if (!wblock)
+ return AE_NO_MEMORY;
+
+ wblock->handle = handle;
+ wblock->gblock = gblock[i];
+
/*
Some WMI devices, like those for nVidia hooks, have a
duplicate GUID. It's not clear what we should do in this
- case yet, so for now, we'll just ignore the duplicate.
- Anyone who wants to add support for that device can come
- up with a better workaround for the mess then.
+ case yet, so for now, we'll just ignore the duplicate
+ for device creation.
*/
- if (guid_already_parsed(gblock[i].guid) == true) {
- wmi_gtoa(gblock[i].guid, guid_string);
- pr_info("Skipping duplicate GUID %s\n", guid_string);
- continue;
+ if (!guid_already_parsed(gblock[i].guid)) {
+ retval = wmi_create_device(&gblock[i], wblock, handle);
+ if (retval) {
+ wmi_free_devices();
+ goto out_free_pointer;
+ }
}
- if (debug_dump_wdg)
- wmi_dump_wdg(&gblock[i]);
-
- wblock = wmi_create_device(&gblock[i], handle);
- if (IS_ERR(wblock)) {
- retval = PTR_ERR(wblock);
- wmi_free_devices();
- break;
- }
+ list_add_tail(&wblock->list, &wmi_block_list);
if (debug_event) {
wblock->handler = wmi_notify_debug;
diff --git a/drivers/pnp/Makefile b/drivers/pnp/Makefile
index 8de3775ec242..bfba893cb321 100644
--- a/drivers/pnp/Makefile
+++ b/drivers/pnp/Makefile
@@ -2,11 +2,13 @@
# Makefile for the Linux Plug-and-Play Support.
#
-obj-y := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o
+obj-y := pnp.o
+
+pnp-y := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o
obj-$(CONFIG_PNPACPI) += pnpacpi/
obj-$(CONFIG_PNPBIOS) += pnpbios/
obj-$(CONFIG_ISAPNP) += isapnp/
# pnp_system_init goes after pnpacpi/pnpbios init
-obj-y += system.o
+pnp-y += system.o
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 0f34d962fd3c..cb6ce42f8e77 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -220,10 +220,5 @@ subsys_initcall(pnp_init);
int pnp_debug;
#if defined(CONFIG_PNP_DEBUG_MESSAGES)
-static int __init pnp_debug_setup(char *__unused)
-{
- pnp_debug = 1;
- return 1;
-}
-__setup("pnp.debug", pnp_debug_setup);
+module_param_named(debug, pnp_debug, int, 0644);
#endif
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index d1dbb9df53fa..00e94032531a 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -189,8 +189,11 @@ static int pnp_bus_resume(struct device *dev)
if (!pnp_drv)
return 0;
- if (pnp_dev->protocol->resume)
- pnp_dev->protocol->resume(pnp_dev);
+ if (pnp_dev->protocol->resume) {
+ error = pnp_dev->protocol->resume(pnp_dev);
+ if (error)
+ return error;
+ }
if (pnp_can_write(pnp_dev)) {
error = pnp_start_dev(pnp_dev);
diff --git a/drivers/pnp/isapnp/Makefile b/drivers/pnp/isapnp/Makefile
index cac18bbfb817..6e607aa33aa3 100644
--- a/drivers/pnp/isapnp/Makefile
+++ b/drivers/pnp/isapnp/Makefile
@@ -1,7 +1,7 @@
#
# Makefile for the kernel ISAPNP driver.
#
+obj-y += pnp.o
+pnp-y := core.o compat.o
-isapnp-proc-$(CONFIG_PROC_FS) = proc.o
-
-obj-y := core.o compat.o $(isapnp-proc-y)
+pnp-$(CONFIG_PROC_FS) += proc.o
diff --git a/drivers/pnp/pnpacpi/Makefile b/drivers/pnp/pnpacpi/Makefile
index 905326fcca85..40c93da18252 100644
--- a/drivers/pnp/pnpacpi/Makefile
+++ b/drivers/pnp/pnpacpi/Makefile
@@ -1,5 +1,6 @@
#
# Makefile for the kernel PNPACPI driver.
#
+obj-y += pnp.o
-obj-y := core.o rsparser.o
+pnp-y := core.o rsparser.o
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 57313f4658bc..ca84d5099ce7 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -81,12 +81,19 @@ static int pnpacpi_get_resources(struct pnp_dev *dev)
static int pnpacpi_set_resources(struct pnp_dev *dev)
{
- struct acpi_device *acpi_dev = dev->data;
- acpi_handle handle = acpi_dev->handle;
+ struct acpi_device *acpi_dev;
+ acpi_handle handle;
struct acpi_buffer buffer;
int ret;
pnp_dbg(&dev->dev, "set resources\n");
+
+ handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+ dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+ return -ENODEV;
+ }
+
ret = pnpacpi_build_resource_template(dev, &buffer);
if (ret)
return ret;
@@ -105,12 +112,18 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
static int pnpacpi_disable_resources(struct pnp_dev *dev)
{
- struct acpi_device *acpi_dev = dev->data;
- acpi_handle handle = acpi_dev->handle;
+ struct acpi_device *acpi_dev;
+ acpi_handle handle;
int ret;
dev_dbg(&dev->dev, "disable resources\n");
+ handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+ dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+ return 0;
+ }
+
/* acpi_unregister_gsi(pnp_irq(dev, 0)); */
ret = 0;
if (acpi_bus_power_manageable(handle))
@@ -124,46 +137,74 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
#ifdef CONFIG_ACPI_SLEEP
static bool pnpacpi_can_wakeup(struct pnp_dev *dev)
{
- struct acpi_device *acpi_dev = dev->data;
- acpi_handle handle = acpi_dev->handle;
+ struct acpi_device *acpi_dev;
+ acpi_handle handle;
+
+ handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+ dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+ return false;
+ }
return acpi_bus_can_wakeup(handle);
}
static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
{
- struct acpi_device *acpi_dev = dev->data;
- acpi_handle handle = acpi_dev->handle;
- int power_state;
+ struct acpi_device *acpi_dev;
+ acpi_handle handle;
+ int error = 0;
+
+ handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+ dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+ return 0;
+ }
if (device_can_wakeup(&dev->dev)) {
- int rc = acpi_pm_device_sleep_wake(&dev->dev,
+ error = acpi_pm_device_sleep_wake(&dev->dev,
device_may_wakeup(&dev->dev));
+ if (error)
+ return error;
+ }
+
+ if (acpi_bus_power_manageable(handle)) {
+ int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL);
+
+ if (power_state < 0)
+ power_state = (state.event == PM_EVENT_ON) ?
+ ACPI_STATE_D0 : ACPI_STATE_D3;
- if (rc)
- return rc;
+ /*
+ * acpi_bus_set_power() often fails (keyboard port can't be
+ * powered-down?), and in any case, our return value is ignored
+ * by pnp_bus_suspend(). Hence we don't revert the wakeup
+ * setting if the set_power fails.
+ */
+ error = acpi_bus_set_power(handle, power_state);
}
- power_state = acpi_pm_device_sleep_state(&dev->dev, NULL);
- if (power_state < 0)
- power_state = (state.event == PM_EVENT_ON) ?
- ACPI_STATE_D0 : ACPI_STATE_D3;
-
- /* acpi_bus_set_power() often fails (keyboard port can't be
- * powered-down?), and in any case, our return value is ignored
- * by pnp_bus_suspend(). Hence we don't revert the wakeup
- * setting if the set_power fails.
- */
- return acpi_bus_set_power(handle, power_state);
+
+ return error;
}
static int pnpacpi_resume(struct pnp_dev *dev)
{
- struct acpi_device *acpi_dev = dev->data;
- acpi_handle handle = acpi_dev->handle;
+ struct acpi_device *acpi_dev;
+ acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ int error = 0;
+
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+ dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+ return -ENODEV;
+ }
if (device_may_wakeup(&dev->dev))
acpi_pm_device_sleep_wake(&dev->dev, false);
- return acpi_bus_set_power(handle, ACPI_STATE_D0);
+
+ if (acpi_bus_power_manageable(handle))
+ error = acpi_bus_set_power(handle, ACPI_STATE_D0);
+
+ return error;
}
#endif
diff --git a/drivers/pnp/pnpbios/Makefile b/drivers/pnp/pnpbios/Makefile
index 3cd3ed760605..240b0ffb83ca 100644
--- a/drivers/pnp/pnpbios/Makefile
+++ b/drivers/pnp/pnpbios/Makefile
@@ -1,7 +1,8 @@
#
# Makefile for the kernel PNPBIOS driver.
#
+obj-y := pnp.o
-pnpbios-proc-$(CONFIG_PNPBIOS_PROC_FS) = proc.o
+pnp-y := core.o bioscalls.o rsparser.o
-obj-y := core.o bioscalls.o rsparser.o $(pnpbios-proc-y)
+pnp-$(CONFIG_PNPBIOS_PROC_FS) += proc.o
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 60d83d983a36..61bf5d724139 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -136,6 +136,16 @@ config BATTERY_MAX17040
in handheld and portable equipment. The MAX17040 is configured
to operate with a single lithium cell
+config BATTERY_MAX17042
+ tristate "Maxim MAX17042/8997/8966 Fuel Gauge"
+ depends on I2C
+ help
+ MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
+ in handheld and portable equipment. The MAX17042 is configured
+ to operate with a single lithium cell. MAX8997 and MAX8966 are
+ multi-function devices that include fuel gauages that are compatible
+ with MAX17042.
+
config BATTERY_Z2
tristate "Z2 battery driver"
depends on I2C && MACH_ZIPIT2
@@ -185,4 +195,14 @@ config CHARGER_TWL4030
help
Say Y here to enable support for TWL4030 Battery Charge Interface.
+config CHARGER_GPIO
+ tristate "GPIO charger"
+ depends on GPIOLIB
+ help
+ Say Y to include support for chargers which report their online status
+ through a GPIO pin.
+
+ This driver can be build as a module. If so, the module will be
+ called gpio-charger.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index c75772eb157c..8385bfae8728 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_BATTERY_BQ20Z75) += bq20z75.o
obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
+obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o
obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
obj-$(CONFIG_BATTERY_S3C_ADC) += s3c_adc_battery.o
obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
@@ -32,3 +33,4 @@ obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
+obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c
index 039f41ae217d..548d263b1ad0 100644
--- a/drivers/power/collie_battery.c
+++ b/drivers/power/collie_battery.c
@@ -295,7 +295,7 @@ static struct {
static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state)
{
/* flush all pending status updates */
- flush_scheduled_work();
+ flush_work_sync(&bat_work);
return 0;
}
@@ -362,7 +362,7 @@ err_psy_reg_bu:
err_psy_reg_main:
/* see comment in collie_bat_remove */
- flush_scheduled_work();
+ cancel_work_sync(&bat_work);
i--;
err_gpio:
@@ -382,12 +382,11 @@ static void __devexit collie_bat_remove(struct ucb1x00_dev *dev)
power_supply_unregister(&collie_bat_main.psy);
/*
- * now flush all pending work.
- * we won't get any more schedules, since all
- * sources (isr and external_power_changed)
- * are unregistered now.
+ * Now cancel the bat_work. We won't get any more schedules,
+ * since all sources (isr and external_power_changed) are
+ * unregistered now.
*/
- flush_scheduled_work();
+ cancel_work_sync(&bat_work);
for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--)
gpio_free(gpios[i].gpio);
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index e7f89785beef..e534290f3256 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -212,7 +212,7 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di)
if (di->rem_capacity > 100)
di->rem_capacity = 100;
- if (di->current_uA >= 100L)
+ if (di->current_uA < -100L)
di->life_sec = -((di->accum_current_uAh - di->empty_uAh) * 36L)
/ (di->current_uA / 100L);
else
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
new file mode 100644
index 000000000000..25b88ac1d44c
--- /dev/null
+++ b/drivers/power/gpio-charger.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
+ * Driver for chargers which report their online status through a GPIO pin
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+
+#include <linux/power/gpio-charger.h>
+
+struct gpio_charger {
+ const struct gpio_charger_platform_data *pdata;
+ unsigned int irq;
+
+ struct power_supply charger;
+};
+
+static irqreturn_t gpio_charger_irq(int irq, void *devid)
+{
+ struct power_supply *charger = devid;
+
+ power_supply_changed(charger);
+
+ return IRQ_HANDLED;
+}
+
+static inline struct gpio_charger *psy_to_gpio_charger(struct power_supply *psy)
+{
+ return container_of(psy, struct gpio_charger, charger);
+}
+
+static int gpio_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+ struct gpio_charger *gpio_charger = psy_to_gpio_charger(psy);
+ const struct gpio_charger_platform_data *pdata = gpio_charger->pdata;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = gpio_get_value(pdata->gpio);
+ val->intval ^= pdata->gpio_active_low;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum power_supply_property gpio_charger_properties[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static int __devinit gpio_charger_probe(struct platform_device *pdev)
+{
+ const struct gpio_charger_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_charger *gpio_charger;
+ struct power_supply *charger;
+ int ret;
+ int irq;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ if (!gpio_is_valid(pdata->gpio)) {
+ dev_err(&pdev->dev, "Invalid gpio pin\n");
+ return -EINVAL;
+ }
+
+ gpio_charger = kzalloc(sizeof(*gpio_charger), GFP_KERNEL);
+ if (!gpio_charger) {
+ dev_err(&pdev->dev, "Failed to alloc driver structure\n");
+ return -ENOMEM;
+ }
+
+ charger = &gpio_charger->charger;
+
+ charger->name = pdata->name ? pdata->name : "gpio-charger";
+ charger->type = pdata->type;
+ charger->properties = gpio_charger_properties;
+ charger->num_properties = ARRAY_SIZE(gpio_charger_properties);
+ charger->get_property = gpio_charger_get_property;
+ charger->supplied_to = pdata->supplied_to;
+ charger->num_supplicants = pdata->num_supplicants;
+
+ ret = gpio_request(pdata->gpio, dev_name(&pdev->dev));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request gpio pin: %d\n", ret);
+ goto err_free;
+ }
+ ret = gpio_direction_input(pdata->gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set gpio to input: %d\n", ret);
+ goto err_gpio_free;
+ }
+
+ gpio_charger->pdata = pdata;
+
+ ret = power_supply_register(&pdev->dev, charger);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register power supply: %d\n",
+ ret);
+ goto err_gpio_free;
+ }
+
+ irq = gpio_to_irq(pdata->gpio);
+ if (irq > 0) {
+ ret = request_any_context_irq(irq, gpio_charger_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ dev_name(&pdev->dev), charger);
+ if (ret)
+ dev_warn(&pdev->dev, "Failed to request irq: %d\n", ret);
+ else
+ gpio_charger->irq = irq;
+ }
+
+ platform_set_drvdata(pdev, gpio_charger);
+
+ return 0;
+
+err_gpio_free:
+ gpio_free(pdata->gpio);
+err_free:
+ kfree(gpio_charger);
+ return ret;
+}
+
+static int __devexit gpio_charger_remove(struct platform_device *pdev)
+{
+ struct gpio_charger *gpio_charger = platform_get_drvdata(pdev);
+
+ if (gpio_charger->irq)
+ free_irq(gpio_charger->irq, &gpio_charger->charger);
+
+ power_supply_unregister(&gpio_charger->charger);
+
+ gpio_free(gpio_charger->pdata->gpio);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(gpio_charger);
+
+ return 0;
+}
+
+static struct platform_driver gpio_charger_driver = {
+ .probe = gpio_charger_probe,
+ .remove = __devexit_p(gpio_charger_remove),
+ .driver = {
+ .name = "gpio-charger",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init gpio_charger_init(void)
+{
+ return platform_driver_register(&gpio_charger_driver);
+}
+module_init(gpio_charger_init);
+
+static void __exit gpio_charger_exit(void)
+{
+ platform_driver_unregister(&gpio_charger_driver);
+}
+module_exit(gpio_charger_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Driver for chargers which report their online status through a GPIO");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gpio-charger");
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index 36cf402c0677..bce3a01da2f0 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -765,7 +765,7 @@ static int __devexit platform_pmic_battery_remove(struct platform_device *pdev)
power_supply_unregister(&pbi->usb);
power_supply_unregister(&pbi->batt);
- flush_scheduled_work();
+ cancel_work_sync(&pbi->handler);
kfree(pbi);
return 0;
}
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 72512185f3e2..2ad9b14a5ce3 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -59,11 +59,61 @@ struct isp1704_charger {
struct notifier_block nb;
struct work_struct work;
- char model[7];
+ /* properties */
+ char model[8];
unsigned present:1;
+ unsigned online:1;
+ unsigned current_max;
+
+ /* temp storage variables */
+ unsigned long event;
+ unsigned max_power;
};
/*
+ * Determine is the charging port DCP (dedicated charger) or CDP (Host/HUB
+ * chargers).
+ *
+ * REVISIT: The method is defined in Battery Charging Specification and is
+ * applicable to any ULPI transceiver. Nothing isp170x specific here.
+ */
+static inline int isp1704_charger_type(struct isp1704_charger *isp)
+{
+ u8 reg;
+ u8 func_ctrl;
+ u8 otg_ctrl;
+ int type = POWER_SUPPLY_TYPE_USB_DCP;
+
+ func_ctrl = otg_io_read(isp->otg, ULPI_FUNC_CTRL);
+ otg_ctrl = otg_io_read(isp->otg, ULPI_OTG_CTRL);
+
+ /* disable pulldowns */
+ reg = ULPI_OTG_CTRL_DM_PULLDOWN | ULPI_OTG_CTRL_DP_PULLDOWN;
+ otg_io_write(isp->otg, ULPI_CLR(ULPI_OTG_CTRL), reg);
+
+ /* full speed */
+ otg_io_write(isp->otg, ULPI_CLR(ULPI_FUNC_CTRL),
+ ULPI_FUNC_CTRL_XCVRSEL_MASK);
+ otg_io_write(isp->otg, ULPI_SET(ULPI_FUNC_CTRL),
+ ULPI_FUNC_CTRL_FULL_SPEED);
+
+ /* Enable strong pull-up on DP (1.5K) and reset */
+ reg = ULPI_FUNC_CTRL_TERMSELECT | ULPI_FUNC_CTRL_RESET;
+ otg_io_write(isp->otg, ULPI_SET(ULPI_FUNC_CTRL), reg);
+ usleep_range(1000, 2000);
+
+ reg = otg_io_read(isp->otg, ULPI_DEBUG);
+ if ((reg & 3) != 3)
+ type = POWER_SUPPLY_TYPE_USB_CDP;
+
+ /* recover original state */
+ otg_io_write(isp->otg, ULPI_FUNC_CTRL, func_ctrl);
+ otg_io_write(isp->otg, ULPI_OTG_CTRL, otg_ctrl);
+
+ return type;
+}
+
+/*
* ISP1704 detects PS/2 adapters as charger. To make sure the detected charger
* is actually a dedicated charger, the following steps need to be taken.
*/
@@ -127,16 +177,19 @@ static inline int isp1704_charger_verify(struct isp1704_charger *isp)
static inline int isp1704_charger_detect(struct isp1704_charger *isp)
{
unsigned long timeout;
- u8 r;
+ u8 pwr_ctrl;
int ret = 0;
+ pwr_ctrl = otg_io_read(isp->otg, ISP1704_PWR_CTRL);
+
/* set SW control bit in PWR_CTRL register */
otg_io_write(isp->otg, ISP1704_PWR_CTRL,
ISP1704_PWR_CTRL_SWCTRL);
/* enable manual charger detection */
- r = (ISP1704_PWR_CTRL_SWCTRL | ISP1704_PWR_CTRL_DPVSRC_EN);
- otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL), r);
+ otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL),
+ ISP1704_PWR_CTRL_SWCTRL
+ | ISP1704_PWR_CTRL_DPVSRC_EN);
usleep_range(1000, 2000);
timeout = jiffies + msecs_to_jiffies(300);
@@ -147,7 +200,10 @@ static inline int isp1704_charger_detect(struct isp1704_charger *isp)
ret = isp1704_charger_verify(isp);
break;
}
- } while (!time_after(jiffies, timeout));
+ } while (!time_after(jiffies, timeout) && isp->online);
+
+ /* recover original state */
+ otg_io_write(isp->otg, ISP1704_PWR_CTRL, pwr_ctrl);
return ret;
}
@@ -155,52 +211,92 @@ static inline int isp1704_charger_detect(struct isp1704_charger *isp)
static void isp1704_charger_work(struct work_struct *data)
{
int detect;
+ unsigned long event;
+ unsigned power;
struct isp1704_charger *isp =
container_of(data, struct isp1704_charger, work);
+ static DEFINE_MUTEX(lock);
- /*
- * FIXME Only supporting dedicated chargers even though isp1704 can
- * detect HUB and HOST chargers. If the device has already been
- * enumerated, the detection will break the connection.
- */
- if (isp->otg->state != OTG_STATE_B_IDLE)
- return;
+ event = isp->event;
+ power = isp->max_power;
- /* disable data pullups */
- if (isp->otg->gadget)
- usb_gadget_disconnect(isp->otg->gadget);
+ mutex_lock(&lock);
+
+ switch (event) {
+ case USB_EVENT_VBUS:
+ isp->online = true;
+
+ /* detect charger */
+ detect = isp1704_charger_detect(isp);
+
+ if (detect) {
+ isp->present = detect;
+ isp->psy.type = isp1704_charger_type(isp);
+ }
- /* detect charger */
- detect = isp1704_charger_detect(isp);
- if (detect) {
- isp->present = detect;
- power_supply_changed(&isp->psy);
+ switch (isp->psy.type) {
+ case POWER_SUPPLY_TYPE_USB_DCP:
+ isp->current_max = 1800;
+ break;
+ case POWER_SUPPLY_TYPE_USB_CDP:
+ /*
+ * Only 500mA here or high speed chirp
+ * handshaking may break
+ */
+ isp->current_max = 500;
+ /* FALLTHROUGH */
+ case POWER_SUPPLY_TYPE_USB:
+ default:
+ /* enable data pullups */
+ if (isp->otg->gadget)
+ usb_gadget_connect(isp->otg->gadget);
+ }
+ break;
+ case USB_EVENT_NONE:
+ isp->online = false;
+ isp->current_max = 0;
+ isp->present = 0;
+ isp->current_max = 0;
+ isp->psy.type = POWER_SUPPLY_TYPE_USB;
+
+ /*
+ * Disable data pullups. We need to prevent the controller from
+ * enumerating.
+ *
+ * FIXME: This is here to allow charger detection with Host/HUB
+ * chargers. The pullups may be enabled elsewhere, so this can
+ * not be the final solution.
+ */
+ if (isp->otg->gadget)
+ usb_gadget_disconnect(isp->otg->gadget);
+ break;
+ case USB_EVENT_ENUMERATED:
+ if (isp->present)
+ isp->current_max = 1800;
+ else
+ isp->current_max = power;
+ break;
+ default:
+ goto out;
}
- /* enable data pullups */
- if (isp->otg->gadget)
- usb_gadget_connect(isp->otg->gadget);
+ power_supply_changed(&isp->psy);
+out:
+ mutex_unlock(&lock);
}
static int isp1704_notifier_call(struct notifier_block *nb,
- unsigned long event, void *unused)
+ unsigned long event, void *power)
{
struct isp1704_charger *isp =
container_of(nb, struct isp1704_charger, nb);
- switch (event) {
- case USB_EVENT_VBUS:
- schedule_work(&isp->work);
- break;
- case USB_EVENT_NONE:
- if (isp->present) {
- isp->present = 0;
- power_supply_changed(&isp->psy);
- }
- break;
- default:
- return NOTIFY_DONE;
- }
+ isp->event = event;
+
+ if (power)
+ isp->max_power = *((unsigned *)power);
+
+ schedule_work(&isp->work);
return NOTIFY_OK;
}
@@ -216,6 +312,12 @@ static int isp1704_charger_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_PRESENT:
val->intval = isp->present;
break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = isp->online;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = isp->current_max;
+ break;
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = isp->model;
break;
@@ -230,6 +332,8 @@ static int isp1704_charger_get_property(struct power_supply *psy,
static enum power_supply_property power_props[] = {
POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
};
@@ -287,13 +391,13 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
if (!isp->otg)
goto fail0;
+ isp->dev = &pdev->dev;
+ platform_set_drvdata(pdev, isp);
+
ret = isp1704_test_ulpi(isp);
if (ret < 0)
goto fail1;
- isp->dev = &pdev->dev;
- platform_set_drvdata(pdev, isp);
-
isp->psy.name = "isp1704";
isp->psy.type = POWER_SUPPLY_TYPE_USB;
isp->psy.properties = power_props;
@@ -318,6 +422,23 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
dev_info(isp->dev, "registered with product id %s\n", isp->model);
+ /*
+ * Taking over the D+ pullup.
+ *
+ * FIXME: The device will be disconnected if it was already
+ * enumerated. The charger driver should be always loaded before any
+ * gadget is loaded.
+ */
+ if (isp->otg->gadget)
+ usb_gadget_disconnect(isp->otg->gadget);
+
+ /* Detect charger if VBUS is valid (the cable was already plugged). */
+ ret = otg_io_read(isp->otg, ULPI_USB_INT_STS);
+ if ((ret & ULPI_INT_VBUS_VALID) && !isp->otg->default_a) {
+ isp->event = USB_EVENT_VBUS;
+ schedule_work(&isp->work);
+ }
+
return 0;
fail2:
power_supply_unregister(&isp->psy);
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index a8108a73593e..02414db6a94c 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/delay.h>
#include <linux/gpio.h>
@@ -47,6 +48,8 @@ struct jz_battery {
struct power_supply battery;
struct delayed_work work;
+
+ struct mutex lock;
};
static inline struct jz_battery *psy_to_jz_battery(struct power_supply *psy)
@@ -68,6 +71,8 @@ static long jz_battery_read_voltage(struct jz_battery *battery)
unsigned long val;
long voltage;
+ mutex_lock(&battery->lock);
+
INIT_COMPLETION(battery->read_completion);
enable_irq(battery->irq);
@@ -91,6 +96,8 @@ static long jz_battery_read_voltage(struct jz_battery *battery)
battery->cell->disable(battery->pdev);
disable_irq(battery->irq);
+ mutex_unlock(&battery->lock);
+
return voltage;
}
@@ -240,6 +247,11 @@ static int __devinit jz_battery_probe(struct platform_device *pdev)
struct jz_battery *jz_battery;
struct power_supply *battery;
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform_data supplied\n");
+ return -ENXIO;
+ }
+
jz_battery = kzalloc(sizeof(*jz_battery), GFP_KERNEL);
if (!jz_battery) {
dev_err(&pdev->dev, "Failed to allocate driver structure\n");
@@ -291,6 +303,7 @@ static int __devinit jz_battery_probe(struct platform_device *pdev)
jz_battery->pdev = pdev;
init_completion(&jz_battery->read_completion);
+ mutex_init(&jz_battery->lock);
INIT_DELAYED_WORK(&jz_battery->work, jz_battery_work);
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
new file mode 100644
index 000000000000..c5c8805156cb
--- /dev/null
+++ b/drivers/power/max17042_battery.c
@@ -0,0 +1,239 @@
+/*
+ * Fuel gauge driver for Maxim 17042 / 8966 / 8997
+ * Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max17040_battery.c
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/power_supply.h>
+#include <linux/power/max17042_battery.h>
+
+enum max17042_register {
+ MAX17042_STATUS = 0x00,
+ MAX17042_VALRT_Th = 0x01,
+ MAX17042_TALRT_Th = 0x02,
+ MAX17042_SALRT_Th = 0x03,
+ MAX17042_AtRate = 0x04,
+ MAX17042_RepCap = 0x05,
+ MAX17042_RepSOC = 0x06,
+ MAX17042_Age = 0x07,
+ MAX17042_TEMP = 0x08,
+ MAX17042_VCELL = 0x09,
+ MAX17042_Current = 0x0A,
+ MAX17042_AvgCurrent = 0x0B,
+ MAX17042_Qresidual = 0x0C,
+ MAX17042_SOC = 0x0D,
+ MAX17042_AvSOC = 0x0E,
+ MAX17042_RemCap = 0x0F,
+ MAX17402_FullCAP = 0x10,
+ MAX17042_TTE = 0x11,
+ MAX17042_V_empty = 0x12,
+
+ MAX17042_RSLOW = 0x14,
+
+ MAX17042_AvgTA = 0x16,
+ MAX17042_Cycles = 0x17,
+ MAX17042_DesignCap = 0x18,
+ MAX17042_AvgVCELL = 0x19,
+ MAX17042_MinMaxTemp = 0x1A,
+ MAX17042_MinMaxVolt = 0x1B,
+ MAX17042_MinMaxCurr = 0x1C,
+ MAX17042_CONFIG = 0x1D,
+ MAX17042_ICHGTerm = 0x1E,
+ MAX17042_AvCap = 0x1F,
+ MAX17042_ManName = 0x20,
+ MAX17042_DevName = 0x21,
+ MAX17042_DevChem = 0x22,
+
+ MAX17042_TempNom = 0x24,
+ MAX17042_TempCold = 0x25,
+ MAX17042_TempHot = 0x26,
+ MAX17042_AIN = 0x27,
+ MAX17042_LearnCFG = 0x28,
+ MAX17042_SHFTCFG = 0x29,
+ MAX17042_RelaxCFG = 0x2A,
+ MAX17042_MiscCFG = 0x2B,
+ MAX17042_TGAIN = 0x2C,
+ MAx17042_TOFF = 0x2D,
+ MAX17042_CGAIN = 0x2E,
+ MAX17042_COFF = 0x2F,
+
+ MAX17042_Q_empty = 0x33,
+ MAX17042_T_empty = 0x34,
+
+ MAX17042_RCOMP0 = 0x38,
+ MAX17042_TempCo = 0x39,
+ MAX17042_Rx = 0x3A,
+ MAX17042_T_empty0 = 0x3B,
+ MAX17042_TaskPeriod = 0x3C,
+ MAX17042_FSTAT = 0x3D,
+
+ MAX17042_SHDNTIMER = 0x3F,
+
+ MAX17042_VFRemCap = 0x4A,
+
+ MAX17042_QH = 0x4D,
+ MAX17042_QL = 0x4E,
+};
+
+struct max17042_chip {
+ struct i2c_client *client;
+ struct power_supply battery;
+ struct max17042_platform_data *pdata;
+};
+
+static int max17042_write_reg(struct i2c_client *client, u8 reg, u16 value)
+{
+ int ret = i2c_smbus_write_word_data(client, reg, value);
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int max17042_read_reg(struct i2c_client *client, u8 reg)
+{
+ int ret = i2c_smbus_read_word_data(client, reg);
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static enum power_supply_property max17042_battery_props[] = {
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_CAPACITY,
+};
+
+static int max17042_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max17042_chip *chip = container_of(psy,
+ struct max17042_chip, battery);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_VCELL) * 83; /* 1000 / 12 = 83 */
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_AvgVCELL) * 83;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_SOC) / 256;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __devinit max17042_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct max17042_chip *chip;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -EIO;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->client = client;
+ chip->pdata = client->dev.platform_data;
+
+ i2c_set_clientdata(client, chip);
+
+ chip->battery.name = "max17042_battery";
+ chip->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ chip->battery.get_property = max17042_get_property;
+ chip->battery.properties = max17042_battery_props;
+ chip->battery.num_properties = ARRAY_SIZE(max17042_battery_props);
+
+ ret = power_supply_register(&client->dev, &chip->battery);
+ if (ret) {
+ dev_err(&client->dev, "failed: power supply register\n");
+ i2c_set_clientdata(client, NULL);
+ kfree(chip);
+ return ret;
+ }
+
+ if (!chip->pdata->enable_current_sense) {
+ max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
+ max17042_write_reg(client, MAX17042_MiscCFG, 0x0003);
+ max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
+ }
+
+ return 0;
+}
+
+static int __devexit max17042_remove(struct i2c_client *client)
+{
+ struct max17042_chip *chip = i2c_get_clientdata(client);
+
+ power_supply_unregister(&chip->battery);
+ i2c_set_clientdata(client, NULL);
+ kfree(chip);
+ return 0;
+}
+
+static const struct i2c_device_id max17042_id[] = {
+ { "max17042", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max17042_id);
+
+static struct i2c_driver max17042_i2c_driver = {
+ .driver = {
+ .name = "max17042",
+ },
+ .probe = max17042_probe,
+ .remove = __devexit_p(max17042_remove),
+ .id_table = max17042_id,
+};
+
+static int __init max17042_init(void)
+{
+ return i2c_add_driver(&max17042_i2c_driver);
+}
+module_init(max17042_init);
+
+static void __exit max17042_exit(void)
+{
+ i2c_del_driver(&max17042_i2c_driver);
+}
+module_exit(max17042_exit);
+
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_DESCRIPTION("MAX17042 Fuel Gauge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 5bc1dcf7785e..0b0ff3a936a6 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -201,6 +201,72 @@ static int olpc_bat_get_tech(union power_supply_propval *val)
return ret;
}
+static int olpc_bat_get_charge_full_design(union power_supply_propval *val)
+{
+ uint8_t ec_byte;
+ union power_supply_propval tech;
+ int ret, mfr;
+
+ ret = olpc_bat_get_tech(&tech);
+ if (ret)
+ return ret;
+
+ ec_byte = BAT_ADDR_MFR_TYPE;
+ ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, &ec_byte, 1);
+ if (ret)
+ return ret;
+
+ mfr = ec_byte >> 4;
+
+ switch (tech.intval) {
+ case POWER_SUPPLY_TECHNOLOGY_NiMH:
+ switch (mfr) {
+ case 1: /* Gold Peak */
+ val->intval = 3000000*.8;
+ break;
+ default:
+ return -EIO;
+ }
+ break;
+
+ case POWER_SUPPLY_TECHNOLOGY_LiFe:
+ switch (mfr) {
+ case 1: /* Gold Peak */
+ val->intval = 2800000;
+ break;
+ case 2: /* BYD */
+ val->intval = 3100000;
+ break;
+ default:
+ return -EIO;
+ }
+ break;
+
+ default:
+ return -EIO;
+ }
+
+ return ret;
+}
+
+static int olpc_bat_get_charge_now(union power_supply_propval *val)
+{
+ uint8_t soc;
+ union power_supply_propval full;
+ int ret;
+
+ ret = olpc_ec_cmd(EC_BAT_SOC, NULL, 0, &soc, 1);
+ if (ret)
+ return ret;
+
+ ret = olpc_bat_get_charge_full_design(&full);
+ if (ret)
+ return ret;
+
+ val->intval = soc * (full.intval / 100);
+ return 0;
+}
+
/*********************************************************************
* Battery properties
*********************************************************************/
@@ -267,6 +333,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
return ret;
break;
case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
ret = olpc_ec_cmd(EC_BAT_VOLTAGE, NULL, 0, (void *)&ec_word, 2);
if (ret)
return ret;
@@ -274,6 +341,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
val->intval = (s16)be16_to_cpu(ec_word) * 9760L / 32;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
ret = olpc_ec_cmd(EC_BAT_CURRENT, NULL, 0, (void *)&ec_word, 2);
if (ret)
return ret;
@@ -294,6 +362,16 @@ static int olpc_bat_get_property(struct power_supply *psy,
else
val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ ret = olpc_bat_get_charge_full_design(val);
+ if (ret)
+ return ret;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ ret = olpc_bat_get_charge_now(val);
+ if (ret)
+ return ret;
+ break;
case POWER_SUPPLY_PROP_TEMP:
ret = olpc_ec_cmd(EC_BAT_TEMP, NULL, 0, (void *)&ec_word, 2);
if (ret)
@@ -331,16 +409,20 @@ static int olpc_bat_get_property(struct power_supply *psy,
return ret;
}
-static enum power_supply_property olpc_bat_props[] = {
+static enum power_supply_property olpc_xo1_bat_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TEMP_AMBIENT,
POWER_SUPPLY_PROP_MANUFACTURER,
@@ -348,6 +430,27 @@ static enum power_supply_property olpc_bat_props[] = {
POWER_SUPPLY_PROP_CHARGE_COUNTER,
};
+/* XO-1.5 does not have ambient temperature property */
+static enum power_supply_property olpc_xo15_bat_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
+};
+
/* EEPROM reading goes completely around the power_supply API, sadly */
#define EEPROM_START 0x20
@@ -419,8 +522,6 @@ static struct device_attribute olpc_bat_error = {
static struct platform_device *bat_pdev;
static struct power_supply olpc_bat = {
- .properties = olpc_bat_props,
- .num_properties = ARRAY_SIZE(olpc_bat_props),
.get_property = olpc_bat_get_property,
.use_for_apm = 1,
};
@@ -466,6 +567,13 @@ static int __init olpc_bat_init(void)
goto ac_failed;
olpc_bat.name = bat_pdev->name;
+ if (olpc_board_at_least(olpc_board_pre(0xd0))) { /* XO-1.5 */
+ olpc_bat.properties = olpc_xo15_bat_props;
+ olpc_bat.num_properties = ARRAY_SIZE(olpc_xo15_bat_props);
+ } else { /* XO-1 */
+ olpc_bat.properties = olpc_xo1_bat_props;
+ olpc_bat.num_properties = ARRAY_SIZE(olpc_xo1_bat_props);
+ }
ret = power_supply_register(&bat_pdev->dev, &olpc_bat);
if (ret)
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 91606bb55318..970f7335d3a7 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -190,10 +190,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
goto success;
create_triggers_failed:
- device_unregister(psy->dev);
+ device_del(dev);
kobject_set_name_failed:
device_add_failed:
- kfree(dev);
+ put_device(dev);
success:
return rc;
}
@@ -201,7 +201,7 @@ EXPORT_SYMBOL_GPL(power_supply_register);
void power_supply_unregister(struct power_supply *psy)
{
- flush_scheduled_work();
+ cancel_work_sync(&psy->changed_work);
power_supply_remove_triggers(psy);
device_unregister(psy->dev);
}
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c
index fe16b482e912..4255f2358b13 100644
--- a/drivers/power/s3c_adc_battery.c
+++ b/drivers/power/s3c_adc_battery.c
@@ -1,5 +1,5 @@
/*
- * iPAQ h1930/h1940/rx1950 battery controler driver
+ * iPAQ h1930/h1940/rx1950 battery controller driver
* Copyright (c) Vasily Khoruzhick
* Based on h1940_battery.c by Arnaud Patard
*
@@ -112,6 +112,13 @@ static int calc_full_volt(int volt_val, int cur_val, int impedance)
return volt_val + cur_val * impedance / 1000;
}
+static int charge_finished(struct s3c_adc_bat *bat)
+{
+ return bat->pdata->gpio_inverted ?
+ !gpio_get_value(bat->pdata->gpio_charge_finished) :
+ gpio_get_value(bat->pdata->gpio_charge_finished);
+}
+
static int s3c_adc_bat_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -140,7 +147,7 @@ static int s3c_adc_bat_get_property(struct power_supply *psy,
if (bat->cable_plugged &&
((bat->pdata->gpio_charge_finished < 0) ||
- !gpio_get_value(bat->pdata->gpio_charge_finished))) {
+ !charge_finished(bat))) {
lut = bat->pdata->lut_acin;
lut_size = bat->pdata->lut_acin_cnt;
}
@@ -236,8 +243,7 @@ static void s3c_adc_bat_work(struct work_struct *work)
}
} else {
if ((bat->pdata->gpio_charge_finished >= 0) && is_plugged) {
- is_charged = gpio_get_value(
- main_bat.pdata->gpio_charge_finished);
+ is_charged = charge_finished(&main_bat);
if (is_charged) {
if (bat->pdata->disable_charger)
bat->pdata->disable_charger();
@@ -427,5 +433,5 @@ static void __exit s3c_adc_bat_exit(void)
module_exit(s3c_adc_bat_exit);
MODULE_AUTHOR("Vasily Khoruzhick <anarsoul@gmail.com>");
-MODULE_DESCRIPTION("iPAQ H1930/H1940/RX1950 battery controler driver");
+MODULE_DESCRIPTION("iPAQ H1930/H1940/RX1950 battery controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c
index ee04936b2db5..53f0d3524fcd 100644
--- a/drivers/power/tosa_battery.c
+++ b/drivers/power/tosa_battery.c
@@ -332,7 +332,7 @@ static struct {
static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state)
{
/* flush all pending status updates */
- flush_scheduled_work();
+ flush_work_sync(&bat_work);
return 0;
}
@@ -422,7 +422,7 @@ err_psy_reg_jacket:
err_psy_reg_main:
/* see comment in tosa_bat_remove */
- flush_scheduled_work();
+ cancel_work_sync(&bat_work);
i--;
err_gpio:
@@ -445,12 +445,11 @@ static int __devexit tosa_bat_remove(struct platform_device *dev)
power_supply_unregister(&tosa_bat_main.psy);
/*
- * now flush all pending work.
- * we won't get any more schedules, since all
- * sources (isr and external_power_changed)
- * are unregistered now.
+ * Now cancel the bat_work. We won't get any more schedules,
+ * since all sources (isr and external_power_changed) are
+ * unregistered now.
*/
- flush_scheduled_work();
+ cancel_work_sync(&bat_work);
for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--)
gpio_free(gpios[i].gpio);
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index 5071d85ec12d..156559e56fa5 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -147,7 +147,7 @@ static irqreturn_t wm97xx_chrg_irq(int irq, void *data)
#ifdef CONFIG_PM
static int wm97xx_bat_suspend(struct device *dev)
{
- flush_scheduled_work();
+ flush_work_sync(&bat_work);
return 0;
}
@@ -273,7 +273,7 @@ static int __devexit wm97xx_bat_remove(struct platform_device *dev)
free_irq(gpio_to_irq(pdata->charge_gpio), dev);
gpio_free(pdata->charge_gpio);
}
- flush_scheduled_work();
+ cancel_work_sync(&bat_work);
power_supply_unregister(&bat_ps);
kfree(prop);
return 0;
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
index 85064a9f649e..e5ed52d71937 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/z2_battery.c
@@ -254,7 +254,7 @@ static int __devexit z2_batt_remove(struct i2c_client *client)
struct z2_charger *charger = i2c_get_clientdata(client);
struct z2_battery_info *info = charger->info;
- flush_scheduled_work();
+ cancel_work_sync(&charger->bat_work);
power_supply_unregister(&charger->batt_ps);
kfree(charger->batt_ps.properties);
@@ -271,7 +271,9 @@ static int __devexit z2_batt_remove(struct i2c_client *client)
#ifdef CONFIG_PM
static int z2_batt_suspend(struct i2c_client *client, pm_message_t state)
{
- flush_scheduled_work();
+ struct z2_charger *charger = i2c_get_clientdata(client);
+
+ flush_work_sync(&charger->bat_work);
return 0;
}
diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig
index 1afe4e03440f..f0d3376b58ba 100644
--- a/drivers/pps/Kconfig
+++ b/drivers/pps/Kconfig
@@ -30,6 +30,17 @@ config PPS_DEBUG
messages to the system log. Select this if you are having a
problem with PPS support and want to see more of what is going on.
+config NTP_PPS
+ bool "PPS kernel consumer support"
+ depends on PPS && !NO_HZ
+ help
+ This option adds support for direct in-kernel time
+ syncronization using an external PPS signal.
+
+ It doesn't work on tickless systems at the moment.
+
source drivers/pps/clients/Kconfig
+source drivers/pps/generators/Kconfig
+
endmenu
diff --git a/drivers/pps/Makefile b/drivers/pps/Makefile
index 98960ddd3188..4483eaadaddd 100644
--- a/drivers/pps/Makefile
+++ b/drivers/pps/Makefile
@@ -3,7 +3,8 @@
#
pps_core-y := pps.o kapi.o sysfs.o
+pps_core-$(CONFIG_NTP_PPS) += kc.o
obj-$(CONFIG_PPS) := pps_core.o
-obj-y += clients/
+obj-y += clients/ generators/
ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG
diff --git a/drivers/pps/clients/Kconfig b/drivers/pps/clients/Kconfig
index 4e801bd7254f..8520a7f4dd62 100644
--- a/drivers/pps/clients/Kconfig
+++ b/drivers/pps/clients/Kconfig
@@ -22,4 +22,11 @@ config PPS_CLIENT_LDISC
If you say yes here you get support for a PPS source connected
with the CD (Carrier Detect) pin of your serial port.
+config PPS_CLIENT_PARPORT
+ tristate "Parallel port PPS client"
+ depends on PPS && PARPORT
+ help
+ If you say yes here you get support for a PPS source connected
+ with the interrupt pin of your parallel port.
+
endif
diff --git a/drivers/pps/clients/Makefile b/drivers/pps/clients/Makefile
index 812c9b19b430..42517da07049 100644
--- a/drivers/pps/clients/Makefile
+++ b/drivers/pps/clients/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_PPS_CLIENT_KTIMER) += pps-ktimer.o
obj-$(CONFIG_PPS_CLIENT_LDISC) += pps-ldisc.o
+obj-$(CONFIG_PPS_CLIENT_PARPORT) += pps_parport.o
ifeq ($(CONFIG_PPS_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c
index e7ef5b8186d0..82583b0ff82d 100644
--- a/drivers/pps/clients/pps-ktimer.c
+++ b/drivers/pps/clients/pps-ktimer.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@ -31,7 +32,7 @@
* Global variables
*/
-static int source;
+static struct pps_device *pps;
static struct timer_list ktimer;
/*
@@ -40,19 +41,12 @@ static struct timer_list ktimer;
static void pps_ktimer_event(unsigned long ptr)
{
- struct timespec __ts;
- struct pps_ktime ts;
+ struct pps_event_time ts;
/* First of all we get the time stamp... */
- getnstimeofday(&__ts);
+ pps_get_ts(&ts);
- pr_info("PPS event at %lu\n", jiffies);
-
- /* ... and translate it to PPS time data struct */
- ts.sec = __ts.tv_sec;
- ts.nsec = __ts.tv_nsec;
-
- pps_event(source, &ts, PPS_CAPTUREASSERT, NULL);
+ pps_event(pps, &ts, PPS_CAPTUREASSERT, NULL);
mod_timer(&ktimer, jiffies + HZ);
}
@@ -61,12 +55,11 @@ static void pps_ktimer_event(unsigned long ptr)
* The echo function
*/
-static void pps_ktimer_echo(int source, int event, void *data)
+static void pps_ktimer_echo(struct pps_device *pps, int event, void *data)
{
- pr_info("echo %s %s for source %d\n",
+ dev_info(pps->dev, "echo %s %s\n",
event & PPS_CAPTUREASSERT ? "assert" : "",
- event & PPS_CAPTURECLEAR ? "clear" : "",
- source);
+ event & PPS_CAPTURECLEAR ? "clear" : "");
}
/*
@@ -89,30 +82,27 @@ static struct pps_source_info pps_ktimer_info = {
static void __exit pps_ktimer_exit(void)
{
- del_timer_sync(&ktimer);
- pps_unregister_source(source);
+ dev_info(pps->dev, "ktimer PPS source unregistered\n");
- pr_info("ktimer PPS source unregistered\n");
+ del_timer_sync(&ktimer);
+ pps_unregister_source(pps);
}
static int __init pps_ktimer_init(void)
{
- int ret;
-
- ret = pps_register_source(&pps_ktimer_info,
+ pps = pps_register_source(&pps_ktimer_info,
PPS_CAPTUREASSERT | PPS_OFFSETASSERT);
- if (ret < 0) {
- printk(KERN_ERR "cannot register ktimer source\n");
- return ret;
+ if (pps == NULL) {
+ pr_err("cannot register PPS source\n");
+ return -ENOMEM;
}
- source = ret;
setup_timer(&ktimer, pps_ktimer_event, 0);
mod_timer(&ktimer, jiffies + HZ);
- pr_info("ktimer PPS source registered at %d\n", source);
+ dev_info(pps->dev, "ktimer PPS source registered\n");
- return 0;
+ return 0;
}
module_init(pps_ktimer_init);
diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c
index 8e1932d29fd4..79451f2dea6a 100644
--- a/drivers/pps/clients/pps-ldisc.c
+++ b/drivers/pps/clients/pps-ldisc.c
@@ -19,6 +19,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/serial_core.h>
#include <linux/tty.h>
@@ -27,30 +29,18 @@
#define PPS_TTY_MAGIC 0x0001
static void pps_tty_dcd_change(struct tty_struct *tty, unsigned int status,
- struct timespec *ts)
+ struct pps_event_time *ts)
{
- int id = (long)tty->disc_data;
- struct timespec __ts;
- struct pps_ktime pps_ts;
-
- /* First of all we get the time stamp... */
- getnstimeofday(&__ts);
-
- /* Does caller give us a timestamp? */
- if (ts) { /* Yes. Let's use it! */
- pps_ts.sec = ts->tv_sec;
- pps_ts.nsec = ts->tv_nsec;
- } else { /* No. Do it ourself! */
- pps_ts.sec = __ts.tv_sec;
- pps_ts.nsec = __ts.tv_nsec;
- }
+ struct pps_device *pps = (struct pps_device *)tty->disc_data;
+
+ BUG_ON(pps == NULL);
/* Now do the PPS event report */
- pps_event(id, &pps_ts, status ? PPS_CAPTUREASSERT : PPS_CAPTURECLEAR,
- NULL);
+ pps_event(pps, ts, status ? PPS_CAPTUREASSERT :
+ PPS_CAPTURECLEAR, NULL);
- pr_debug("PPS %s at %lu on source #%d\n",
- status ? "assert" : "clear", jiffies, id);
+ dev_dbg(pps->dev, "PPS %s at %lu\n",
+ status ? "assert" : "clear", jiffies);
}
static int (*alias_n_tty_open)(struct tty_struct *tty);
@@ -60,6 +50,7 @@ static int pps_tty_open(struct tty_struct *tty)
struct pps_source_info info;
struct tty_driver *drv = tty->driver;
int index = tty->index + drv->name_base;
+ struct pps_device *pps;
int ret;
info.owner = THIS_MODULE;
@@ -70,34 +61,42 @@ static int pps_tty_open(struct tty_struct *tty)
PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \
PPS_CANWAIT | PPS_TSFMT_TSPEC;
- ret = pps_register_source(&info, PPS_CAPTUREBOTH | \
+ pps = pps_register_source(&info, PPS_CAPTUREBOTH | \
PPS_OFFSETASSERT | PPS_OFFSETCLEAR);
- if (ret < 0) {
+ if (pps == NULL) {
pr_err("cannot register PPS source \"%s\"\n", info.path);
- return ret;
+ return -ENOMEM;
}
- tty->disc_data = (void *)(long)ret;
+ tty->disc_data = pps;
/* Should open N_TTY ldisc too */
ret = alias_n_tty_open(tty);
- if (ret < 0)
- pps_unregister_source((long)tty->disc_data);
+ if (ret < 0) {
+ pr_err("cannot open tty ldisc \"%s\"\n", info.path);
+ goto err_unregister;
+ }
- pr_info("PPS source #%d \"%s\" added\n", ret, info.path);
+ dev_info(pps->dev, "source \"%s\" added\n", info.path);
return 0;
+
+err_unregister:
+ tty->disc_data = NULL;
+ pps_unregister_source(pps);
+ return ret;
}
static void (*alias_n_tty_close)(struct tty_struct *tty);
static void pps_tty_close(struct tty_struct *tty)
{
- int id = (long)tty->disc_data;
+ struct pps_device *pps = (struct pps_device *)tty->disc_data;
- pps_unregister_source(id);
alias_n_tty_close(tty);
- pr_info("PPS source #%d removed\n", id);
+ tty->disc_data = NULL;
+ dev_info(pps->dev, "removed\n");
+ pps_unregister_source(pps);
}
static struct tty_ldisc_ops pps_ldisc_ops;
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
new file mode 100644
index 000000000000..c571d6dd8f61
--- /dev/null
+++ b/drivers/pps/clients/pps_parport.c
@@ -0,0 +1,258 @@
+/*
+ * pps_parport.c -- kernel parallel port PPS client
+ *
+ *
+ * Copyright (C) 2009 Alexander Gordeev <lasaine@lvk.cs.msu.su>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+/*
+ * TODO:
+ * implement echo over SEL pin
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irqnr.h>
+#include <linux/time.h>
+#include <linux/parport.h>
+#include <linux/pps_kernel.h>
+
+#define DRVDESC "parallel port PPS client"
+
+/* module parameters */
+
+#define CLEAR_WAIT_MAX 100
+#define CLEAR_WAIT_MAX_ERRORS 5
+
+static unsigned int clear_wait = 100;
+MODULE_PARM_DESC(clear_wait,
+ "Maximum number of port reads when polling for signal clear,"
+ " zero turns clear edge capture off entirely");
+module_param(clear_wait, uint, 0);
+
+
+/* internal per port structure */
+struct pps_client_pp {
+ struct pardevice *pardev; /* parport device */
+ struct pps_device *pps; /* PPS device */
+ unsigned int cw; /* port clear timeout */
+ unsigned int cw_err; /* number of timeouts */
+};
+
+static inline int signal_is_set(struct parport *port)
+{
+ return (port->ops->read_status(port) & PARPORT_STATUS_ACK) != 0;
+}
+
+/* parport interrupt handler */
+static void parport_irq(void *handle)
+{
+ struct pps_event_time ts_assert, ts_clear;
+ struct pps_client_pp *dev = handle;
+ struct parport *port = dev->pardev->port;
+ unsigned int i;
+ unsigned long flags;
+
+ /* first of all we get the time stamp... */
+ pps_get_ts(&ts_assert);
+
+ if (dev->cw == 0)
+ /* clear edge capture disabled */
+ goto out_assert;
+
+ /* try capture the clear edge */
+
+ /* We have to disable interrupts here. The idea is to prevent
+ * other interrupts on the same processor to introduce random
+ * lags while polling the port. Reading from IO port is known
+ * to take approximately 1us while other interrupt handlers can
+ * take much more potentially.
+ *
+ * Interrupts won't be disabled for a long time because the
+ * number of polls is limited by clear_wait parameter which is
+ * kept rather low. So it should never be an issue.
+ */
+ local_irq_save(flags);
+ /* check the signal (no signal means the pulse is lost this time) */
+ if (!signal_is_set(port)) {
+ local_irq_restore(flags);
+ dev_err(dev->pps->dev, "lost the signal\n");
+ goto out_assert;
+ }
+
+ /* poll the port until the signal is unset */
+ for (i = dev->cw; i; i--)
+ if (!signal_is_set(port)) {
+ pps_get_ts(&ts_clear);
+ local_irq_restore(flags);
+ dev->cw_err = 0;
+ goto out_both;
+ }
+ local_irq_restore(flags);
+
+ /* timeout */
+ dev->cw_err++;
+ if (dev->cw_err >= CLEAR_WAIT_MAX_ERRORS) {
+ dev_err(dev->pps->dev, "disabled clear edge capture after %d"
+ " timeouts\n", dev->cw_err);
+ dev->cw = 0;
+ dev->cw_err = 0;
+ }
+
+out_assert:
+ /* fire assert event */
+ pps_event(dev->pps, &ts_assert,
+ PPS_CAPTUREASSERT, NULL);
+ return;
+
+out_both:
+ /* fire assert event */
+ pps_event(dev->pps, &ts_assert,
+ PPS_CAPTUREASSERT, NULL);
+ /* fire clear event */
+ pps_event(dev->pps, &ts_clear,
+ PPS_CAPTURECLEAR, NULL);
+ return;
+}
+
+/* the PPS echo function */
+static void pps_echo(struct pps_device *pps, int event, void *data)
+{
+ dev_info(pps->dev, "echo %s %s\n",
+ event & PPS_CAPTUREASSERT ? "assert" : "",
+ event & PPS_CAPTURECLEAR ? "clear" : "");
+}
+
+static void parport_attach(struct parport *port)
+{
+ struct pps_client_pp *device;
+ struct pps_source_info info = {
+ .name = KBUILD_MODNAME,
+ .path = "",
+ .mode = PPS_CAPTUREBOTH | \
+ PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \
+ PPS_ECHOASSERT | PPS_ECHOCLEAR | \
+ PPS_CANWAIT | PPS_TSFMT_TSPEC,
+ .echo = pps_echo,
+ .owner = THIS_MODULE,
+ .dev = NULL
+ };
+
+ device = kzalloc(sizeof(struct pps_client_pp), GFP_KERNEL);
+ if (!device) {
+ pr_err("memory allocation failed, not attaching\n");
+ return;
+ }
+
+ device->pardev = parport_register_device(port, KBUILD_MODNAME,
+ NULL, NULL, parport_irq, PARPORT_FLAG_EXCL, device);
+ if (!device->pardev) {
+ pr_err("couldn't register with %s\n", port->name);
+ goto err_free;
+ }
+
+ if (parport_claim_or_block(device->pardev) < 0) {
+ pr_err("couldn't claim %s\n", port->name);
+ goto err_unregister_dev;
+ }
+
+ device->pps = pps_register_source(&info,
+ PPS_CAPTUREBOTH | PPS_OFFSETASSERT | PPS_OFFSETCLEAR);
+ if (device->pps == NULL) {
+ pr_err("couldn't register PPS source\n");
+ goto err_release_dev;
+ }
+
+ device->cw = clear_wait;
+
+ port->ops->enable_irq(port);
+
+ pr_info("attached to %s\n", port->name);
+
+ return;
+
+err_release_dev:
+ parport_release(device->pardev);
+err_unregister_dev:
+ parport_unregister_device(device->pardev);
+err_free:
+ kfree(device);
+}
+
+static void parport_detach(struct parport *port)
+{
+ struct pardevice *pardev = port->cad;
+ struct pps_client_pp *device;
+
+ /* FIXME: oooh, this is ugly! */
+ if (strcmp(pardev->name, KBUILD_MODNAME))
+ /* not our port */
+ return;
+
+ device = pardev->private;
+
+ port->ops->disable_irq(port);
+ pps_unregister_source(device->pps);
+ parport_release(pardev);
+ parport_unregister_device(pardev);
+ kfree(device);
+}
+
+static struct parport_driver pps_parport_driver = {
+ .name = KBUILD_MODNAME,
+ .attach = parport_attach,
+ .detach = parport_detach,
+};
+
+/* module staff */
+
+static int __init pps_parport_init(void)
+{
+ int ret;
+
+ pr_info(DRVDESC "\n");
+
+ if (clear_wait > CLEAR_WAIT_MAX) {
+ pr_err("clear_wait value should be not greater"
+ " then %d\n", CLEAR_WAIT_MAX);
+ return -EINVAL;
+ }
+
+ ret = parport_register_driver(&pps_parport_driver);
+ if (ret) {
+ pr_err("unable to register with parport\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit pps_parport_exit(void)
+{
+ parport_unregister_driver(&pps_parport_driver);
+}
+
+module_init(pps_parport_init);
+module_exit(pps_parport_exit);
+
+MODULE_AUTHOR("Alexander Gordeev <lasaine@lvk.cs.msu.su>");
+MODULE_DESCRIPTION(DRVDESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig
new file mode 100644
index 000000000000..e4c4f3dc0728
--- /dev/null
+++ b/drivers/pps/generators/Kconfig
@@ -0,0 +1,13 @@
+#
+# PPS generators configuration
+#
+
+comment "PPS generators support"
+
+config PPS_GENERATOR_PARPORT
+ tristate "Parallel port PPS signal generator"
+ depends on PARPORT && BROKEN
+ help
+ If you say yes here you get support for a PPS signal generator which
+ utilizes STROBE pin of a parallel port to send PPS signals. It uses
+ parport abstraction layer and hrtimers to precisely control the signal.
diff --git a/drivers/pps/generators/Makefile b/drivers/pps/generators/Makefile
new file mode 100644
index 000000000000..303304a6b8ec
--- /dev/null
+++ b/drivers/pps/generators/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for PPS generators.
+#
+
+obj-$(CONFIG_PPS_GENERATOR_PARPORT) += pps_gen_parport.o
+
+ifeq ($(CONFIG_PPS_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c
new file mode 100644
index 000000000000..b93af3ebb5ba
--- /dev/null
+++ b/drivers/pps/generators/pps_gen_parport.c
@@ -0,0 +1,282 @@
+/*
+ * pps_gen_parport.c -- kernel parallel port PPS signal generator
+ *
+ *
+ * Copyright (C) 2009 Alexander Gordeev <lasaine@lvk.cs.msu.su>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+/*
+ * TODO:
+ * fix issues when realtime clock is adjusted in a leap
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/hrtimer.h>
+#include <linux/parport.h>
+
+#define DRVDESC "parallel port PPS signal generator"
+
+#define SIGNAL 0
+#define NO_SIGNAL PARPORT_CONTROL_STROBE
+
+/* module parameters */
+
+#define SEND_DELAY_MAX 100000
+
+static unsigned int send_delay = 30000;
+MODULE_PARM_DESC(delay,
+ "Delay between setting and dropping the signal (ns)");
+module_param_named(delay, send_delay, uint, 0);
+
+
+#define SAFETY_INTERVAL 3000 /* set the hrtimer earlier for safety (ns) */
+
+/* internal per port structure */
+struct pps_generator_pp {
+ struct pardevice *pardev; /* parport device */
+ struct hrtimer timer;
+ long port_write_time; /* calibrated port write time (ns) */
+};
+
+static struct pps_generator_pp device = {
+ .pardev = NULL,
+};
+
+static int attached;
+
+/* calibrated time between a hrtimer event and the reaction */
+static long hrtimer_error = SAFETY_INTERVAL;
+
+/* the kernel hrtimer event */
+static enum hrtimer_restart hrtimer_event(struct hrtimer *timer)
+{
+ struct timespec expire_time, ts1, ts2, ts3, dts;
+ struct pps_generator_pp *dev;
+ struct parport *port;
+ long lim, delta;
+ unsigned long flags;
+
+ /* We have to disable interrupts here. The idea is to prevent
+ * other interrupts on the same processor to introduce random
+ * lags while polling the clock. getnstimeofday() takes <1us on
+ * most machines while other interrupt handlers can take much
+ * more potentially.
+ *
+ * NB: approx time with blocked interrupts =
+ * send_delay + 3 * SAFETY_INTERVAL
+ */
+ local_irq_save(flags);
+
+ /* first of all we get the time stamp... */
+ getnstimeofday(&ts1);
+ expire_time = ktime_to_timespec(hrtimer_get_softexpires(timer));
+ dev = container_of(timer, struct pps_generator_pp, timer);
+ lim = NSEC_PER_SEC - send_delay - dev->port_write_time;
+
+ /* check if we are late */
+ if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) {
+ local_irq_restore(flags);
+ pr_err("we are late this time %ld.%09ld\n",
+ ts1.tv_sec, ts1.tv_nsec);
+ goto done;
+ }
+
+ /* busy loop until the time is right for an assert edge */
+ do {
+ getnstimeofday(&ts2);
+ } while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim);
+
+ /* set the signal */
+ port = dev->pardev->port;
+ port->ops->write_control(port, SIGNAL);
+
+ /* busy loop until the time is right for a clear edge */
+ lim = NSEC_PER_SEC - dev->port_write_time;
+ do {
+ getnstimeofday(&ts2);
+ } while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim);
+
+ /* unset the signal */
+ port->ops->write_control(port, NO_SIGNAL);
+
+ getnstimeofday(&ts3);
+
+ local_irq_restore(flags);
+
+ /* update calibrated port write time */
+ dts = timespec_sub(ts3, ts2);
+ dev->port_write_time =
+ (dev->port_write_time + timespec_to_ns(&dts)) >> 1;
+
+done:
+ /* update calibrated hrtimer error */
+ dts = timespec_sub(ts1, expire_time);
+ delta = timespec_to_ns(&dts);
+ /* If the new error value is bigger then the old, use the new
+ * value, if not then slowly move towards the new value. This
+ * way it should be safe in bad conditions and efficient in
+ * good conditions.
+ */
+ if (delta >= hrtimer_error)
+ hrtimer_error = delta;
+ else
+ hrtimer_error = (3 * hrtimer_error + delta) >> 2;
+
+ /* update the hrtimer expire time */
+ hrtimer_set_expires(timer,
+ ktime_set(expire_time.tv_sec + 1,
+ NSEC_PER_SEC - (send_delay +
+ dev->port_write_time + SAFETY_INTERVAL +
+ 2 * hrtimer_error)));
+
+ return HRTIMER_RESTART;
+}
+
+/* calibrate port write time */
+#define PORT_NTESTS_SHIFT 5
+static void calibrate_port(struct pps_generator_pp *dev)
+{
+ struct parport *port = dev->pardev->port;
+ int i;
+ long acc = 0;
+
+ for (i = 0; i < (1 << PORT_NTESTS_SHIFT); i++) {
+ struct timespec a, b;
+ unsigned long irq_flags;
+
+ local_irq_save(irq_flags);
+ getnstimeofday(&a);
+ port->ops->write_control(port, NO_SIGNAL);
+ getnstimeofday(&b);
+ local_irq_restore(irq_flags);
+
+ b = timespec_sub(b, a);
+ acc += timespec_to_ns(&b);
+ }
+
+ dev->port_write_time = acc >> PORT_NTESTS_SHIFT;
+ pr_info("port write takes %ldns\n", dev->port_write_time);
+}
+
+static inline ktime_t next_intr_time(struct pps_generator_pp *dev)
+{
+ struct timespec ts;
+
+ getnstimeofday(&ts);
+
+ return ktime_set(ts.tv_sec +
+ ((ts.tv_nsec > 990 * NSEC_PER_MSEC) ? 1 : 0),
+ NSEC_PER_SEC - (send_delay +
+ dev->port_write_time + 3 * SAFETY_INTERVAL));
+}
+
+static void parport_attach(struct parport *port)
+{
+ if (attached) {
+ /* we already have a port */
+ return;
+ }
+
+ device.pardev = parport_register_device(port, KBUILD_MODNAME,
+ NULL, NULL, NULL, PARPORT_FLAG_EXCL, &device);
+ if (!device.pardev) {
+ pr_err("couldn't register with %s\n", port->name);
+ return;
+ }
+
+ if (parport_claim_or_block(device.pardev) < 0) {
+ pr_err("couldn't claim %s\n", port->name);
+ goto err_unregister_dev;
+ }
+
+ pr_info("attached to %s\n", port->name);
+ attached = 1;
+
+ calibrate_port(&device);
+
+ hrtimer_init(&device.timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ device.timer.function = hrtimer_event;
+#ifdef CONFIG_PREEMPT_RT
+ /* hrtimer interrupt will run in the interrupt context with this */
+ device.timer.irqsafe = 1;
+#endif
+
+ hrtimer_start(&device.timer, next_intr_time(&device), HRTIMER_MODE_ABS);
+
+ return;
+
+err_unregister_dev:
+ parport_unregister_device(device.pardev);
+}
+
+static void parport_detach(struct parport *port)
+{
+ if (port->cad != device.pardev)
+ return; /* not our port */
+
+ hrtimer_cancel(&device.timer);
+ parport_release(device.pardev);
+ parport_unregister_device(device.pardev);
+}
+
+static struct parport_driver pps_gen_parport_driver = {
+ .name = KBUILD_MODNAME,
+ .attach = parport_attach,
+ .detach = parport_detach,
+};
+
+/* module staff */
+
+static int __init pps_gen_parport_init(void)
+{
+ int ret;
+
+ pr_info(DRVDESC "\n");
+
+ if (send_delay > SEND_DELAY_MAX) {
+ pr_err("delay value should be not greater"
+ " then %d\n", SEND_DELAY_MAX);
+ return -EINVAL;
+ }
+
+ ret = parport_register_driver(&pps_gen_parport_driver);
+ if (ret) {
+ pr_err("unable to register with parport\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit pps_gen_parport_exit(void)
+{
+ parport_unregister_driver(&pps_gen_parport_driver);
+ pr_info("hrtimer avg error is %ldns\n", hrtimer_error);
+}
+
+module_init(pps_gen_parport_init);
+module_exit(pps_gen_parport_exit);
+
+MODULE_AUTHOR("Alexander Gordeev <lasaine@lvk.cs.msu.su>");
+MODULE_DESCRIPTION(DRVDESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index 1aa02db3ff4e..a4e8eb9fece6 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -19,24 +19,20 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/time.h>
+#include <linux/timex.h>
#include <linux/spinlock.h>
-#include <linux/idr.h>
#include <linux/fs.h>
#include <linux/pps_kernel.h>
#include <linux/slab.h>
-/*
- * Global variables
- */
-
-DEFINE_SPINLOCK(pps_idr_lock);
-DEFINE_IDR(pps_idr);
+#include "kc.h"
/*
* Local functions
@@ -60,60 +56,6 @@ static void pps_add_offset(struct pps_ktime *ts, struct pps_ktime *offset)
* Exported functions
*/
-/* pps_get_source - find a PPS source
- * @source: the PPS source ID.
- *
- * This function is used to find an already registered PPS source into the
- * system.
- *
- * The function returns NULL if found nothing, otherwise it returns a pointer
- * to the PPS source data struct (the refcounter is incremented by 1).
- */
-
-struct pps_device *pps_get_source(int source)
-{
- struct pps_device *pps;
- unsigned long flags;
-
- spin_lock_irqsave(&pps_idr_lock, flags);
-
- pps = idr_find(&pps_idr, source);
- if (pps != NULL)
- atomic_inc(&pps->usage);
-
- spin_unlock_irqrestore(&pps_idr_lock, flags);
-
- return pps;
-}
-
-/* pps_put_source - free the PPS source data
- * @pps: a pointer to the PPS source.
- *
- * This function is used to free a PPS data struct if its refcount is 0.
- */
-
-void pps_put_source(struct pps_device *pps)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pps_idr_lock, flags);
- BUG_ON(atomic_read(&pps->usage) == 0);
-
- if (!atomic_dec_and_test(&pps->usage)) {
- pps = NULL;
- goto exit;
- }
-
- /* No more reference to the PPS source. We can safely remove the
- * PPS data struct.
- */
- idr_remove(&pps_idr, pps->id);
-
-exit:
- spin_unlock_irqrestore(&pps_idr_lock, flags);
- kfree(pps);
-}
-
/* pps_register_source - add a PPS source in the system
* @info: the PPS info struct
* @default_params: the default PPS parameters of the new source
@@ -122,31 +64,31 @@ exit:
* source is described by info's fields and it will have, as default PPS
* parameters, the ones specified into default_params.
*
- * The function returns, in case of success, the PPS source ID.
+ * The function returns, in case of success, the PPS device. Otherwise NULL.
*/
-int pps_register_source(struct pps_source_info *info, int default_params)
+struct pps_device *pps_register_source(struct pps_source_info *info,
+ int default_params)
{
struct pps_device *pps;
- int id;
int err;
/* Sanity checks */
if ((info->mode & default_params) != default_params) {
- printk(KERN_ERR "pps: %s: unsupported default parameters\n",
+ pr_err("%s: unsupported default parameters\n",
info->name);
err = -EINVAL;
goto pps_register_source_exit;
}
if ((info->mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)) != 0 &&
info->echo == NULL) {
- printk(KERN_ERR "pps: %s: echo function is not defined\n",
+ pr_err("%s: echo function is not defined\n",
info->name);
err = -EINVAL;
goto pps_register_source_exit;
}
if ((info->mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
- printk(KERN_ERR "pps: %s: unspecified time format\n",
+ pr_err("%s: unspecified time format\n",
info->name);
err = -EINVAL;
goto pps_register_source_exit;
@@ -168,94 +110,48 @@ int pps_register_source(struct pps_source_info *info, int default_params)
init_waitqueue_head(&pps->queue);
spin_lock_init(&pps->lock);
- atomic_set(&pps->usage, 1);
-
- /* Get new ID for the new PPS source */
- if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) {
- err = -ENOMEM;
- goto kfree_pps;
- }
-
- spin_lock_irq(&pps_idr_lock);
-
- /* Now really allocate the PPS source.
- * After idr_get_new() calling the new source will be freely available
- * into the kernel.
- */
- err = idr_get_new(&pps_idr, pps, &id);
- if (err < 0) {
- spin_unlock_irq(&pps_idr_lock);
- goto kfree_pps;
- }
-
- id = id & MAX_ID_MASK;
- if (id >= PPS_MAX_SOURCES) {
- spin_unlock_irq(&pps_idr_lock);
-
- printk(KERN_ERR "pps: %s: too many PPS sources in the system\n",
- info->name);
- err = -EBUSY;
- goto free_idr;
- }
- pps->id = id;
-
- spin_unlock_irq(&pps_idr_lock);
/* Create the char device */
err = pps_register_cdev(pps);
if (err < 0) {
- printk(KERN_ERR "pps: %s: unable to create char device\n",
+ pr_err("%s: unable to create char device\n",
info->name);
- goto free_idr;
+ goto kfree_pps;
}
- pr_info("new PPS source %s at ID %d\n", info->name, id);
+ dev_info(pps->dev, "new PPS source %s\n", info->name);
- return id;
-
-free_idr:
- spin_lock_irq(&pps_idr_lock);
- idr_remove(&pps_idr, id);
- spin_unlock_irq(&pps_idr_lock);
+ return pps;
kfree_pps:
kfree(pps);
pps_register_source_exit:
- printk(KERN_ERR "pps: %s: unable to register source\n", info->name);
+ pr_err("%s: unable to register source\n", info->name);
- return err;
+ return NULL;
}
EXPORT_SYMBOL(pps_register_source);
/* pps_unregister_source - remove a PPS source from the system
- * @source: the PPS source ID
+ * @pps: the PPS source
*
* This function is used to remove a previously registered PPS source from
* the system.
*/
-void pps_unregister_source(int source)
+void pps_unregister_source(struct pps_device *pps)
{
- struct pps_device *pps;
-
- spin_lock_irq(&pps_idr_lock);
- pps = idr_find(&pps_idr, source);
-
- if (!pps) {
- BUG();
- spin_unlock_irq(&pps_idr_lock);
- return;
- }
- spin_unlock_irq(&pps_idr_lock);
-
+ pps_kc_remove(pps);
pps_unregister_cdev(pps);
- pps_put_source(pps);
+
+ /* don't have to kfree(pps) here because it will be done on
+ * device destruction */
}
EXPORT_SYMBOL(pps_unregister_source);
/* pps_event - register a PPS event into the system
- * @source: the PPS source ID
+ * @pps: the PPS device
* @ts: the event timestamp
* @event: the event type
* @data: userdef pointer
@@ -263,78 +159,72 @@ EXPORT_SYMBOL(pps_unregister_source);
* This function is used by each PPS client in order to register a new
* PPS event into the system (it's usually called inside an IRQ handler).
*
- * If an echo function is associated with the PPS source it will be called
+ * If an echo function is associated with the PPS device it will be called
* as:
- * pps->info.echo(source, event, data);
+ * pps->info.echo(pps, event, data);
*/
-
-void pps_event(int source, struct pps_ktime *ts, int event, void *data)
+void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
+ void *data)
{
- struct pps_device *pps;
unsigned long flags;
int captured = 0;
+ struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 };
- if ((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0) {
- printk(KERN_ERR "pps: unknown event (%x) for source %d\n",
- event, source);
- return;
- }
+ /* check event type */
+ BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
- pps = pps_get_source(source);
- if (!pps)
- return;
+ dev_dbg(pps->dev, "PPS event at %ld.%09ld\n",
+ ts->ts_real.tv_sec, ts->ts_real.tv_nsec);
- pr_debug("PPS event on source %d at %llu.%06u\n",
- pps->id, (unsigned long long) ts->sec, ts->nsec);
+ timespec_to_pps_ktime(&ts_real, ts->ts_real);
spin_lock_irqsave(&pps->lock, flags);
/* Must call the echo function? */
if ((pps->params.mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)))
- pps->info.echo(source, event, data);
+ pps->info.echo(pps, event, data);
/* Check the event */
pps->current_mode = pps->params.mode;
- if ((event & PPS_CAPTUREASSERT) &
- (pps->params.mode & PPS_CAPTUREASSERT)) {
+ if (event & pps->params.mode & PPS_CAPTUREASSERT) {
/* We have to add an offset? */
if (pps->params.mode & PPS_OFFSETASSERT)
- pps_add_offset(ts, &pps->params.assert_off_tu);
+ pps_add_offset(&ts_real,
+ &pps->params.assert_off_tu);
/* Save the time stamp */
- pps->assert_tu = *ts;
+ pps->assert_tu = ts_real;
pps->assert_sequence++;
- pr_debug("capture assert seq #%u for source %d\n",
- pps->assert_sequence, source);
+ dev_dbg(pps->dev, "capture assert seq #%u\n",
+ pps->assert_sequence);
captured = ~0;
}
- if ((event & PPS_CAPTURECLEAR) &
- (pps->params.mode & PPS_CAPTURECLEAR)) {
+ if (event & pps->params.mode & PPS_CAPTURECLEAR) {
/* We have to add an offset? */
if (pps->params.mode & PPS_OFFSETCLEAR)
- pps_add_offset(ts, &pps->params.clear_off_tu);
+ pps_add_offset(&ts_real,
+ &pps->params.clear_off_tu);
/* Save the time stamp */
- pps->clear_tu = *ts;
+ pps->clear_tu = ts_real;
pps->clear_sequence++;
- pr_debug("capture clear seq #%u for source %d\n",
- pps->clear_sequence, source);
+ dev_dbg(pps->dev, "capture clear seq #%u\n",
+ pps->clear_sequence);
captured = ~0;
}
- /* Wake up iif captured somthing */
+ pps_kc_event(pps, ts, event);
+
+ /* Wake up if captured something */
if (captured) {
- pps->go = ~0;
- wake_up_interruptible(&pps->queue);
+ pps->last_ev++;
+ wake_up_interruptible_all(&pps->queue);
kill_fasync(&pps->async_queue, SIGIO, POLL_IN);
}
spin_unlock_irqrestore(&pps->lock, flags);
-
- /* Now we can release the PPS source for (possible) deregistration */
- pps_put_source(pps);
}
EXPORT_SYMBOL(pps_event);
diff --git a/drivers/pps/kc.c b/drivers/pps/kc.c
new file mode 100644
index 000000000000..079e930b1938
--- /dev/null
+++ b/drivers/pps/kc.c
@@ -0,0 +1,122 @@
+/*
+ * PPS kernel consumer API
+ *
+ * Copyright (C) 2009-2010 Alexander Gordeev <lasaine@lvk.cs.msu.su>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/pps_kernel.h>
+
+#include "kc.h"
+
+/*
+ * Global variables
+ */
+
+/* state variables to bind kernel consumer */
+DEFINE_SPINLOCK(pps_kc_hardpps_lock);
+/* PPS API (RFC 2783): current source and mode for kernel consumer */
+struct pps_device *pps_kc_hardpps_dev; /* unique pointer to device */
+int pps_kc_hardpps_mode; /* mode bits for kernel consumer */
+
+/* pps_kc_bind - control PPS kernel consumer binding
+ * @pps: the PPS source
+ * @bind_args: kernel consumer bind parameters
+ *
+ * This function is used to bind or unbind PPS kernel consumer according to
+ * supplied parameters. Should not be called in interrupt context.
+ */
+int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args)
+{
+ /* Check if another consumer is already bound */
+ spin_lock_irq(&pps_kc_hardpps_lock);
+
+ if (bind_args->edge == 0)
+ if (pps_kc_hardpps_dev == pps) {
+ pps_kc_hardpps_mode = 0;
+ pps_kc_hardpps_dev = NULL;
+ spin_unlock_irq(&pps_kc_hardpps_lock);
+ dev_info(pps->dev, "unbound kernel"
+ " consumer\n");
+ } else {
+ spin_unlock_irq(&pps_kc_hardpps_lock);
+ dev_err(pps->dev, "selected kernel consumer"
+ " is not bound\n");
+ return -EINVAL;
+ }
+ else
+ if (pps_kc_hardpps_dev == NULL ||
+ pps_kc_hardpps_dev == pps) {
+ pps_kc_hardpps_mode = bind_args->edge;
+ pps_kc_hardpps_dev = pps;
+ spin_unlock_irq(&pps_kc_hardpps_lock);
+ dev_info(pps->dev, "bound kernel consumer: "
+ "edge=0x%x\n", bind_args->edge);
+ } else {
+ spin_unlock_irq(&pps_kc_hardpps_lock);
+ dev_err(pps->dev, "another kernel consumer"
+ " is already bound\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* pps_kc_remove - unbind kernel consumer on PPS source removal
+ * @pps: the PPS source
+ *
+ * This function is used to disable kernel consumer on PPS source removal
+ * if this source was bound to PPS kernel consumer. Can be called on any
+ * source safely. Should not be called in interrupt context.
+ */
+void pps_kc_remove(struct pps_device *pps)
+{
+ spin_lock_irq(&pps_kc_hardpps_lock);
+ if (pps == pps_kc_hardpps_dev) {
+ pps_kc_hardpps_mode = 0;
+ pps_kc_hardpps_dev = NULL;
+ spin_unlock_irq(&pps_kc_hardpps_lock);
+ dev_info(pps->dev, "unbound kernel consumer"
+ " on device removal\n");
+ } else
+ spin_unlock_irq(&pps_kc_hardpps_lock);
+}
+
+/* pps_kc_event - call hardpps() on PPS event
+ * @pps: the PPS source
+ * @ts: PPS event timestamp
+ * @event: PPS event edge
+ *
+ * This function calls hardpps() when an event from bound PPS source occurs.
+ */
+void pps_kc_event(struct pps_device *pps, struct pps_event_time *ts,
+ int event)
+{
+ unsigned long flags;
+
+ /* Pass some events to kernel consumer if activated */
+ spin_lock_irqsave(&pps_kc_hardpps_lock, flags);
+ if (pps == pps_kc_hardpps_dev && event & pps_kc_hardpps_mode)
+ hardpps(&ts->ts_real, &ts->ts_raw);
+ spin_unlock_irqrestore(&pps_kc_hardpps_lock, flags);
+}
diff --git a/drivers/pps/kc.h b/drivers/pps/kc.h
new file mode 100644
index 000000000000..d296fcd0a175
--- /dev/null
+++ b/drivers/pps/kc.h
@@ -0,0 +1,46 @@
+/*
+ * PPS kernel consumer API header
+ *
+ * Copyright (C) 2009-2010 Alexander Gordeev <lasaine@lvk.cs.msu.su>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef LINUX_PPS_KC_H
+#define LINUX_PPS_KC_H
+
+#include <linux/errno.h>
+#include <linux/pps_kernel.h>
+
+#ifdef CONFIG_NTP_PPS
+
+extern int pps_kc_bind(struct pps_device *pps,
+ struct pps_bind_args *bind_args);
+extern void pps_kc_remove(struct pps_device *pps);
+extern void pps_kc_event(struct pps_device *pps,
+ struct pps_event_time *ts, int event);
+
+
+#else /* CONFIG_NTP_PPS */
+
+static inline int pps_kc_bind(struct pps_device *pps,
+ struct pps_bind_args *bind_args) { return -EOPNOTSUPP; }
+static inline void pps_kc_remove(struct pps_device *pps) {}
+static inline void pps_kc_event(struct pps_device *pps,
+ struct pps_event_time *ts, int event) {}
+
+#endif /* CONFIG_NTP_PPS */
+
+#endif /* LINUX_PPS_KC_H */
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index ca5183bdad85..2baadd21b7a6 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@ -26,9 +27,13 @@
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/idr.h>
+#include <linux/mutex.h>
#include <linux/cdev.h>
#include <linux/poll.h>
#include <linux/pps_kernel.h>
+#include <linux/slab.h>
+
+#include "kc.h"
/*
* Local variables
@@ -37,6 +42,9 @@
static dev_t pps_devt;
static struct class *pps_class;
+static DEFINE_MUTEX(pps_idr_lock);
+static DEFINE_IDR(pps_idr);
+
/*
* Char device methods
*/
@@ -61,15 +69,13 @@ static long pps_cdev_ioctl(struct file *file,
{
struct pps_device *pps = file->private_data;
struct pps_kparams params;
- struct pps_fdata fdata;
- unsigned long ticks;
void __user *uarg = (void __user *) arg;
int __user *iuarg = (int __user *) arg;
int err;
switch (cmd) {
case PPS_GETPARAMS:
- pr_debug("PPS_GETPARAMS: source %d\n", pps->id);
+ dev_dbg(pps->dev, "PPS_GETPARAMS\n");
spin_lock_irq(&pps->lock);
@@ -85,7 +91,7 @@ static long pps_cdev_ioctl(struct file *file,
break;
case PPS_SETPARAMS:
- pr_debug("PPS_SETPARAMS: source %d\n", pps->id);
+ dev_dbg(pps->dev, "PPS_SETPARAMS\n");
/* Check the capabilities */
if (!capable(CAP_SYS_TIME))
@@ -95,14 +101,14 @@ static long pps_cdev_ioctl(struct file *file,
if (err)
return -EFAULT;
if (!(params.mode & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR))) {
- pr_debug("capture mode unspecified (%x)\n",
+ dev_dbg(pps->dev, "capture mode unspecified (%x)\n",
params.mode);
return -EINVAL;
}
/* Check for supported capabilities */
if ((params.mode & ~pps->info.mode) != 0) {
- pr_debug("unsupported capabilities (%x)\n",
+ dev_dbg(pps->dev, "unsupported capabilities (%x)\n",
params.mode);
return -EINVAL;
}
@@ -115,7 +121,7 @@ static long pps_cdev_ioctl(struct file *file,
/* Restore the read only parameters */
if ((params.mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
/* section 3.3 of RFC 2783 interpreted */
- pr_debug("time format unspecified (%x)\n",
+ dev_dbg(pps->dev, "time format unspecified (%x)\n",
params.mode);
pps->params.mode |= PPS_TSFMT_TSPEC;
}
@@ -128,7 +134,7 @@ static long pps_cdev_ioctl(struct file *file,
break;
case PPS_GETCAP:
- pr_debug("PPS_GETCAP: source %d\n", pps->id);
+ dev_dbg(pps->dev, "PPS_GETCAP\n");
err = put_user(pps->info.mode, iuarg);
if (err)
@@ -136,20 +142,26 @@ static long pps_cdev_ioctl(struct file *file,
break;
- case PPS_FETCH:
- pr_debug("PPS_FETCH: source %d\n", pps->id);
+ case PPS_FETCH: {
+ struct pps_fdata fdata;
+ unsigned int ev;
+
+ dev_dbg(pps->dev, "PPS_FETCH\n");
err = copy_from_user(&fdata, uarg, sizeof(struct pps_fdata));
if (err)
return -EFAULT;
- pps->go = 0;
+ ev = pps->last_ev;
/* Manage the timeout */
if (fdata.timeout.flags & PPS_TIME_INVALID)
- err = wait_event_interruptible(pps->queue, pps->go);
+ err = wait_event_interruptible(pps->queue,
+ ev != pps->last_ev);
else {
- pr_debug("timeout %lld.%09d\n",
+ unsigned long ticks;
+
+ dev_dbg(pps->dev, "timeout %lld.%09d\n",
(long long) fdata.timeout.sec,
fdata.timeout.nsec);
ticks = fdata.timeout.sec * HZ;
@@ -157,7 +169,9 @@ static long pps_cdev_ioctl(struct file *file,
if (ticks != 0) {
err = wait_event_interruptible_timeout(
- pps->queue, pps->go, ticks);
+ pps->queue,
+ ev != pps->last_ev,
+ ticks);
if (err == 0)
return -ETIMEDOUT;
}
@@ -165,7 +179,7 @@ static long pps_cdev_ioctl(struct file *file,
/* Check for pending signals */
if (err == -ERESTARTSYS) {
- pr_debug("pending signal caught\n");
+ dev_dbg(pps->dev, "pending signal caught\n");
return -EINTR;
}
@@ -185,10 +199,44 @@ static long pps_cdev_ioctl(struct file *file,
return -EFAULT;
break;
+ }
+ case PPS_KC_BIND: {
+ struct pps_bind_args bind_args;
+
+ dev_dbg(pps->dev, "PPS_KC_BIND\n");
+
+ /* Check the capabilities */
+ if (!capable(CAP_SYS_TIME))
+ return -EPERM;
+
+ if (copy_from_user(&bind_args, uarg,
+ sizeof(struct pps_bind_args)))
+ return -EFAULT;
+ /* Check for supported capabilities */
+ if ((bind_args.edge & ~pps->info.mode) != 0) {
+ dev_err(pps->dev, "unsupported capabilities (%x)\n",
+ bind_args.edge);
+ return -EINVAL;
+ }
+
+ /* Validate parameters roughly */
+ if (bind_args.tsformat != PPS_TSFMT_TSPEC ||
+ (bind_args.edge & ~PPS_CAPTUREBOTH) != 0 ||
+ bind_args.consumer != PPS_KC_HARDPPS) {
+ dev_err(pps->dev, "invalid kernel consumer bind"
+ " parameters (%x)\n", bind_args.edge);
+ return -EINVAL;
+ }
+
+ err = pps_kc_bind(pps, &bind_args);
+ if (err < 0)
+ return err;
+
+ break;
+ }
default:
return -ENOTTY;
- break;
}
return 0;
@@ -198,12 +246,6 @@ static int pps_cdev_open(struct inode *inode, struct file *file)
{
struct pps_device *pps = container_of(inode->i_cdev,
struct pps_device, cdev);
- int found;
-
- found = pps_get_source(pps->id) != 0;
- if (!found)
- return -ENODEV;
-
file->private_data = pps;
return 0;
@@ -211,11 +253,6 @@ static int pps_cdev_open(struct inode *inode, struct file *file)
static int pps_cdev_release(struct inode *inode, struct file *file)
{
- struct pps_device *pps = file->private_data;
-
- /* Free the PPS source and wake up (possible) deregistration */
- pps_put_source(pps);
-
return 0;
}
@@ -233,25 +270,67 @@ static const struct file_operations pps_cdev_fops = {
.release = pps_cdev_release,
};
+static void pps_device_destruct(struct device *dev)
+{
+ struct pps_device *pps = dev_get_drvdata(dev);
+
+ /* release id here to protect others from using it while it's
+ * still in use */
+ mutex_lock(&pps_idr_lock);
+ idr_remove(&pps_idr, pps->id);
+ mutex_unlock(&pps_idr_lock);
+
+ kfree(dev);
+ kfree(pps);
+}
+
int pps_register_cdev(struct pps_device *pps)
{
int err;
+ dev_t devt;
+
+ mutex_lock(&pps_idr_lock);
+ /* Get new ID for the new PPS source */
+ if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) {
+ mutex_unlock(&pps_idr_lock);
+ return -ENOMEM;
+ }
+
+ /* Now really allocate the PPS source.
+ * After idr_get_new() calling the new source will be freely available
+ * into the kernel.
+ */
+ err = idr_get_new(&pps_idr, pps, &pps->id);
+ mutex_unlock(&pps_idr_lock);
+
+ if (err < 0)
+ return err;
+
+ pps->id &= MAX_ID_MASK;
+ if (pps->id >= PPS_MAX_SOURCES) {
+ pr_err("%s: too many PPS sources in the system\n",
+ pps->info.name);
+ err = -EBUSY;
+ goto free_idr;
+ }
+
+ devt = MKDEV(MAJOR(pps_devt), pps->id);
- pps->devno = MKDEV(MAJOR(pps_devt), pps->id);
cdev_init(&pps->cdev, &pps_cdev_fops);
pps->cdev.owner = pps->info.owner;
- err = cdev_add(&pps->cdev, pps->devno, 1);
+ err = cdev_add(&pps->cdev, devt, 1);
if (err) {
- printk(KERN_ERR "pps: %s: failed to add char device %d:%d\n",
+ pr_err("%s: failed to add char device %d:%d\n",
pps->info.name, MAJOR(pps_devt), pps->id);
- return err;
+ goto free_idr;
}
- pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL,
+ pps->dev = device_create(pps_class, pps->info.dev, devt, pps,
"pps%d", pps->id);
if (IS_ERR(pps->dev))
goto del_cdev;
- dev_set_drvdata(pps->dev, pps);
+
+ pps->dev->release = pps_device_destruct;
pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
MAJOR(pps_devt), pps->id);
@@ -261,12 +340,17 @@ int pps_register_cdev(struct pps_device *pps)
del_cdev:
cdev_del(&pps->cdev);
+free_idr:
+ mutex_lock(&pps_idr_lock);
+ idr_remove(&pps_idr, pps->id);
+ mutex_unlock(&pps_idr_lock);
+
return err;
}
void pps_unregister_cdev(struct pps_device *pps)
{
- device_destroy(pps_class, pps->devno);
+ device_destroy(pps_class, pps->dev->devt);
cdev_del(&pps->cdev);
}
@@ -286,14 +370,14 @@ static int __init pps_init(void)
pps_class = class_create(THIS_MODULE, "pps");
if (!pps_class) {
- printk(KERN_ERR "pps: failed to allocate class\n");
+ pr_err("failed to allocate class\n");
return -ENOMEM;
}
pps_class->dev_attrs = pps_attrs;
err = alloc_chrdev_region(&pps_devt, 0, PPS_MAX_SOURCES, "pps");
if (err < 0) {
- printk(KERN_ERR "pps: failed to allocate char device region\n");
+ pr_err("failed to allocate char device region\n");
goto remove_class;
}
diff --git a/drivers/ps3/Makefile b/drivers/ps3/Makefile
index ccea15c11c19..50cb1e1b4a12 100644
--- a/drivers/ps3/Makefile
+++ b/drivers/ps3/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_PS3_VUART) += ps3-vuart.o
obj-$(CONFIG_PS3_PS3AV) += ps3av_mod.o
-ps3av_mod-objs += ps3av.o ps3av_cmd.o
+ps3av_mod-y := ps3av.o ps3av_cmd.o
obj-$(CONFIG_PPC_PS3) += sys-manager-core.o
obj-$(CONFIG_PS3_SYS_MANAGER) += ps3-sys-manager.o
obj-$(CONFIG_PS3_STORAGE) += ps3stor_lib.o
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 1eb82c4c712e..a50391b6ba2a 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -46,7 +46,6 @@ static void rio_init_em(struct rio_dev *rdev);
DEFINE_SPINLOCK(rio_global_list_lock);
static int next_destid = 0;
-static int next_switchid = 0;
static int next_net = 0;
static int next_comptag = 1;
@@ -378,12 +377,30 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
struct rio_dev *rdev;
struct rio_switch *rswitch = NULL;
int result, rdid;
+ size_t size;
+ u32 swpinfo = 0;
- rdev = kzalloc(sizeof(struct rio_dev), GFP_KERNEL);
+ size = sizeof(struct rio_dev);
+ if (rio_mport_read_config_32(port, destid, hopcount,
+ RIO_PEF_CAR, &result))
+ return NULL;
+
+ if (result & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) {
+ rio_mport_read_config_32(port, destid, hopcount,
+ RIO_SWP_INFO_CAR, &swpinfo);
+ if (result & RIO_PEF_SWITCH) {
+ size += (RIO_GET_TOTAL_PORTS(swpinfo) *
+ sizeof(rswitch->nextdev[0])) + sizeof(*rswitch);
+ }
+ }
+
+ rdev = kzalloc(size, GFP_KERNEL);
if (!rdev)
return NULL;
rdev->net = net;
+ rdev->pef = result;
+ rdev->swpinfo = swpinfo;
rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_ID_CAR,
&result);
rdev->did = result >> 16;
@@ -397,8 +414,6 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_INFO_CAR,
&result);
rdev->asm_rev = result >> 16;
- rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR,
- &rdev->pef);
if (rdev->pef & RIO_PEF_EXT_FEATURES) {
rdev->efptr = result & 0xffff;
rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid,
@@ -408,11 +423,6 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
hopcount, RIO_EFB_ERR_MGMNT);
}
- if (rdev->pef & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) {
- rio_mport_read_config_32(port, destid, hopcount,
- RIO_SWP_INFO_CAR, &rdev->swpinfo);
- }
-
rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
&rdev->src_ops);
rio_mport_read_config_32(port, destid, hopcount, RIO_DST_OPS_CAR,
@@ -427,6 +437,10 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
rio_mport_write_config_32(port, destid, hopcount,
RIO_COMPONENT_TAG_CSR, next_comptag);
rdev->comp_tag = next_comptag++;
+ } else {
+ rio_mport_read_config_32(port, destid, hopcount,
+ RIO_COMPONENT_TAG_CSR,
+ &rdev->comp_tag);
}
if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) {
@@ -437,21 +451,20 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
next_destid++;
} else
rdev->destid = rio_get_device_id(port, destid, hopcount);
- } else
- /* Switch device has an associated destID */
- rdev->destid = RIO_INVALID_DESTID;
+
+ rdev->hopcount = 0xff;
+ } else {
+ /* Switch device has an associated destID which
+ * will be adjusted later
+ */
+ rdev->destid = destid;
+ rdev->hopcount = hopcount;
+ }
/* If a PE has both switch and other functions, show it as a switch */
if (rio_is_switch(rdev)) {
- rswitch = kzalloc(sizeof(*rswitch) +
- RIO_GET_TOTAL_PORTS(rdev->swpinfo) *
- sizeof(rswitch->nextdev[0]),
- GFP_KERNEL);
- if (!rswitch)
- goto cleanup;
- rswitch->switchid = next_switchid;
- rswitch->hopcount = hopcount;
- rswitch->destid = destid;
+ rswitch = rdev->rswitch;
+ rswitch->switchid = rdev->comp_tag & RIO_CTAG_UDEVID;
rswitch->port_ok = 0;
rswitch->route_table = kzalloc(sizeof(u8)*
RIO_MAX_ROUTE_ENTRIES(port->sys_size),
@@ -462,15 +475,13 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
for (rdid = 0; rdid < RIO_MAX_ROUTE_ENTRIES(port->sys_size);
rdid++)
rswitch->route_table[rdid] = RIO_INVALID_ROUTE;
- rdev->rswitch = rswitch;
- rswitch->rdev = rdev;
dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id,
- rdev->rswitch->switchid);
+ rswitch->switchid);
rio_switch_init(rdev, do_enum);
- if (do_enum && rdev->rswitch->clr_table)
- rdev->rswitch->clr_table(port, destid, hopcount,
- RIO_GLOBAL_TABLE);
+ if (do_enum && rswitch->clr_table)
+ rswitch->clr_table(port, destid, hopcount,
+ RIO_GLOBAL_TABLE);
list_add_tail(&rswitch->node, &rio_switches);
@@ -506,10 +517,9 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
return rdev;
cleanup:
- if (rswitch) {
+ if (rswitch->route_table)
kfree(rswitch->route_table);
- kfree(rswitch);
- }
+
kfree(rdev);
return NULL;
}
@@ -632,8 +642,7 @@ rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount)
/**
* rio_route_add_entry- Add a route entry to a switch routing table
- * @mport: Master port to send transaction
- * @rswitch: Switch device
+ * @rdev: RIO device
* @table: Routing table ID
* @route_destid: Destination ID to be routed
* @route_port: Port number to be routed
@@ -647,31 +656,31 @@ rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount)
* on failure.
*/
static int
-rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch,
+rio_route_add_entry(struct rio_dev *rdev,
u16 table, u16 route_destid, u8 route_port, int lock)
{
int rc;
if (lock) {
- rc = rio_lock_device(mport, rswitch->destid,
- rswitch->hopcount, 1000);
+ rc = rio_lock_device(rdev->net->hport, rdev->destid,
+ rdev->hopcount, 1000);
if (rc)
return rc;
}
- rc = rswitch->add_entry(mport, rswitch->destid,
- rswitch->hopcount, table,
- route_destid, route_port);
+ rc = rdev->rswitch->add_entry(rdev->net->hport, rdev->destid,
+ rdev->hopcount, table,
+ route_destid, route_port);
if (lock)
- rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
+ rio_unlock_device(rdev->net->hport, rdev->destid,
+ rdev->hopcount);
return rc;
}
/**
* rio_route_get_entry- Read a route entry in a switch routing table
- * @mport: Master port to send transaction
- * @rswitch: Switch device
+ * @rdev: RIO device
* @table: Routing table ID
* @route_destid: Destination ID to be routed
* @route_port: Pointer to read port number into
@@ -685,23 +694,24 @@ rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch,
* on failure.
*/
static int
-rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table,
+rio_route_get_entry(struct rio_dev *rdev, u16 table,
u16 route_destid, u8 *route_port, int lock)
{
int rc;
if (lock) {
- rc = rio_lock_device(mport, rswitch->destid,
- rswitch->hopcount, 1000);
+ rc = rio_lock_device(rdev->net->hport, rdev->destid,
+ rdev->hopcount, 1000);
if (rc)
return rc;
}
- rc = rswitch->get_entry(mport, rswitch->destid,
- rswitch->hopcount, table,
- route_destid, route_port);
+ rc = rdev->rswitch->get_entry(rdev->net->hport, rdev->destid,
+ rdev->hopcount, table,
+ route_destid, route_port);
if (lock)
- rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
+ rio_unlock_device(rdev->net->hport, rdev->destid,
+ rdev->hopcount);
return rc;
}
@@ -809,16 +819,15 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
return -1;
if (rio_is_switch(rdev)) {
- next_switchid++;
sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo);
- rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
+ rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,
port->host_deviceid, sw_inport, 0);
rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
for (destid = 0; destid < next_destid; destid++) {
if (destid == port->host_deviceid)
continue;
- rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
+ rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,
destid, sw_inport, 0);
rdev->rswitch->route_table[destid] = sw_inport;
}
@@ -850,8 +859,7 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
"RIO: scanning device on port %d\n",
port_num);
rdev->rswitch->port_ok |= (1 << port_num);
- rio_route_add_entry(port, rdev->rswitch,
- RIO_GLOBAL_TABLE,
+ rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,
RIO_ANY_DESTID(port->sys_size),
port_num, 0);
@@ -865,7 +873,7 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
destid < next_destid; destid++) {
if (destid == port->host_deviceid)
continue;
- rio_route_add_entry(port, rdev->rswitch,
+ rio_route_add_entry(rdev,
RIO_GLOBAL_TABLE,
destid,
port_num,
@@ -904,7 +912,7 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
next_destid++;
}
- rdev->rswitch->destid = sw_destid;
+ rdev->destid = sw_destid;
} else
pr_debug("RIO: found %s (vid %4.4x did %4.4x)\n",
rio_name(rdev), rdev->vid, rdev->did);
@@ -935,13 +943,15 @@ static int rio_enum_complete(struct rio_mport *port)
* @port: Master port to send transactions
* @destid: Current destination ID in network
* @hopcount: Number of hops into the network
+ * @prev: previous rio_dev
+ * @prev_port: previous port number
*
* Recursively discovers a RIO network. Transactions are sent via the
* master port passed in @port.
*/
static int __devinit
rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
- u8 hopcount)
+ u8 hopcount, struct rio_dev *prev, int prev_port)
{
u8 port_num, route_port;
struct rio_dev *rdev;
@@ -951,14 +961,15 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
if ((rdev = rio_setup_device(net, port, destid, hopcount, 0))) {
/* Add device to the global and bus/net specific list. */
list_add_tail(&rdev->net_list, &net->devices);
+ rdev->prev = prev;
+ if (prev && rio_is_switch(prev))
+ prev->rswitch->nextdev[prev_port] = rdev;
} else
return -1;
if (rio_is_switch(rdev)) {
- next_switchid++;
-
/* Associated destid is how we accessed this switch */
- rdev->rswitch->destid = destid;
+ rdev->destid = destid;
pr_debug(
"RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",
@@ -981,7 +992,7 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
for (ndestid = 0;
ndestid < RIO_ANY_DESTID(port->sys_size);
ndestid++) {
- rio_route_get_entry(port, rdev->rswitch,
+ rio_route_get_entry(rdev,
RIO_GLOBAL_TABLE,
ndestid,
&route_port, 0);
@@ -992,8 +1003,8 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
if (ndestid == RIO_ANY_DESTID(port->sys_size))
continue;
rio_unlock_device(port, destid, hopcount);
- if (rio_disc_peer
- (net, port, ndestid, hopcount + 1) < 0)
+ if (rio_disc_peer(net, port, ndestid,
+ hopcount + 1, rdev, port_num) < 0)
return -1;
}
}
@@ -1069,14 +1080,14 @@ static struct rio_net __devinit *rio_alloc_net(struct rio_mport *port)
*/
static void rio_update_route_tables(struct rio_mport *port)
{
- struct rio_dev *rdev;
+ struct rio_dev *rdev, *swrdev;
struct rio_switch *rswitch;
u8 sport;
u16 destid;
list_for_each_entry(rdev, &rio_devices, global_list) {
- destid = (rio_is_switch(rdev))?rdev->rswitch->destid:rdev->destid;
+ destid = rdev->destid;
list_for_each_entry(rswitch, &rio_switches, node) {
@@ -1084,14 +1095,16 @@ static void rio_update_route_tables(struct rio_mport *port)
continue;
if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) {
+ swrdev = sw_to_rio_dev(rswitch);
+
/* Skip if destid ends in empty switch*/
- if (rswitch->destid == destid)
+ if (swrdev->destid == destid)
continue;
- sport = RIO_GET_PORT_NUM(rswitch->rdev->swpinfo);
+ sport = RIO_GET_PORT_NUM(swrdev->swpinfo);
if (rswitch->add_entry) {
- rio_route_add_entry(port, rswitch,
+ rio_route_add_entry(swrdev,
RIO_GLOBAL_TABLE, destid,
sport, 0);
rswitch->route_table[destid] = sport;
@@ -1203,21 +1216,20 @@ static void rio_build_route_tables(void)
list_for_each_entry(rdev, &rio_devices, global_list)
if (rio_is_switch(rdev)) {
- rio_lock_device(rdev->net->hport, rdev->rswitch->destid,
- rdev->rswitch->hopcount, 1000);
+ rio_lock_device(rdev->net->hport, rdev->destid,
+ rdev->hopcount, 1000);
for (i = 0;
i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
i++) {
- if (rio_route_get_entry
- (rdev->net->hport, rdev->rswitch,
- RIO_GLOBAL_TABLE, i, &sport, 0) < 0)
+ if (rio_route_get_entry(rdev,
+ RIO_GLOBAL_TABLE, i, &sport, 0) < 0)
continue;
rdev->rswitch->route_table[i] = sport;
}
rio_unlock_device(rdev->net->hport,
- rdev->rswitch->destid,
- rdev->rswitch->hopcount);
+ rdev->destid,
+ rdev->hopcount);
}
}
@@ -1284,7 +1296,7 @@ int __devinit rio_disc_mport(struct rio_mport *mport)
mport->host_deviceid);
if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size),
- 0) < 0) {
+ 0, NULL, 0) < 0) {
printk(KERN_INFO
"RIO: master port %d device has failed discovery\n",
mport->id);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 137ed93ee33f..1269fbd2deca 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -77,9 +77,9 @@ rio_read_config(struct file *filp, struct kobject *kobj,
/* Several chips lock up trying to read undefined config space */
if (capable(CAP_SYS_ADMIN))
- size = 0x200000;
+ size = RIO_MAINT_SPACE_SZ;
- if (off > size)
+ if (off >= size)
return 0;
if (off + count > size) {
size -= off;
@@ -147,10 +147,10 @@ rio_write_config(struct file *filp, struct kobject *kobj,
loff_t init_off = off;
u8 *data = (u8 *) buf;
- if (off > 0x200000)
+ if (off >= RIO_MAINT_SPACE_SZ)
return 0;
- if (off + count > 0x200000) {
- size = 0x200000 - off;
+ if (off + count > RIO_MAINT_SPACE_SZ) {
+ size = RIO_MAINT_SPACE_SZ - off;
count = size;
}
@@ -200,7 +200,7 @@ static struct bin_attribute rio_config_attr = {
.name = "config",
.mode = S_IRUGO | S_IWUSR,
},
- .size = 0x200000,
+ .size = RIO_MAINT_SPACE_SZ,
.read = rio_read_config,
.write = rio_write_config,
};
@@ -217,7 +217,7 @@ int rio_create_sysfs_dev_files(struct rio_dev *rdev)
err = device_create_bin_file(&rdev->dev, &rio_config_attr);
- if (!err && rdev->rswitch) {
+ if (!err && (rdev->pef & RIO_PEF_SWITCH)) {
err = device_create_file(&rdev->dev, &dev_attr_routes);
if (!err && rdev->rswitch->sw_sysfs)
err = rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_CREATE);
@@ -239,7 +239,7 @@ int rio_create_sysfs_dev_files(struct rio_dev *rdev)
void rio_remove_sysfs_dev_files(struct rio_dev *rdev)
{
device_remove_bin_file(&rdev->dev, &rio_config_attr);
- if (rdev->rswitch) {
+ if (rdev->pef & RIO_PEF_SWITCH) {
device_remove_file(&rdev->dev, &dev_attr_routes);
if (rdev->rswitch->sw_sysfs)
rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE);
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 7b5080c45569..cc2a3b74d0f0 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -471,16 +471,9 @@ exit:
*/
int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
{
- u8 hopcount = 0xff;
- u16 destid = rdev->destid;
u32 regval;
- if (rdev->rswitch) {
- destid = rdev->rswitch->destid;
- hopcount = rdev->rswitch->hopcount;
- }
-
- rio_mport_read_config_32(rdev->net->hport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
&regval);
if (lock)
@@ -488,7 +481,7 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
else
regval &= ~RIO_PORT_N_CTL_LOCKOUT;
- rio_mport_write_config_32(rdev->net->hport, destid, hopcount,
+ rio_write_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
regval);
return 0;
@@ -507,7 +500,7 @@ static int
rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum)
{
u32 result;
- int p_port, dstid, rc = -EIO;
+ int p_port, rc = -EIO;
struct rio_dev *prev = NULL;
/* Find switch with failed RIO link */
@@ -522,9 +515,7 @@ rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum)
if (prev == NULL)
goto err_out;
- dstid = (rdev->pef & RIO_PEF_SWITCH) ?
- rdev->rswitch->destid : rdev->destid;
- p_port = prev->rswitch->route_table[dstid];
+ p_port = prev->rswitch->route_table[rdev->destid];
if (p_port != RIO_INVALID_ROUTE) {
pr_debug("RIO: link failed on [%s]-P%d\n",
@@ -567,15 +558,8 @@ rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount)
*/
static int rio_chk_dev_access(struct rio_dev *rdev)
{
- u8 hopcount = 0xff;
- u16 destid = rdev->destid;
-
- if (rdev->rswitch) {
- destid = rdev->rswitch->destid;
- hopcount = rdev->rswitch->hopcount;
- }
-
- return rio_mport_chk_dev_access(rdev->net->hport, destid, hopcount);
+ return rio_mport_chk_dev_access(rdev->net->hport,
+ rdev->destid, rdev->hopcount);
}
/**
@@ -588,23 +572,20 @@ static int rio_chk_dev_access(struct rio_dev *rdev)
static int
rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
{
- struct rio_mport *mport = rdev->net->hport;
- u16 destid = rdev->rswitch->destid;
- u8 hopcount = rdev->rswitch->hopcount;
u32 regval;
int checkcount;
if (lnkresp) {
/* Read from link maintenance response register
* to clear valid bit */
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
&regval);
udelay(50);
}
/* Issue Input-status command */
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_write_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum),
RIO_MNT_REQ_CMD_IS);
@@ -615,7 +596,7 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
checkcount = 3;
while (checkcount--) {
udelay(50);
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
&regval);
if (regval & RIO_PORT_N_MNT_RSP_RVAL) {
@@ -635,15 +616,12 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
*/
static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
{
- struct rio_mport *mport = rdev->net->hport;
- u16 destid = rdev->rswitch->destid;
- u8 hopcount = rdev->rswitch->hopcount;
struct rio_dev *nextdev = rdev->rswitch->nextdev[pnum];
u32 regval;
u32 far_ackid, far_linkstat, near_ackid;
if (err_status == 0)
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
&err_status);
@@ -661,7 +639,7 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
pnum, regval);
far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5;
far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT;
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
&regval);
pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval);
@@ -679,9 +657,8 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
/* Align near outstanding/outbound ackIDs with
* far inbound.
*/
- rio_mport_write_config_32(mport, destid,
- hopcount, rdev->phys_efptr +
- RIO_PORT_N_ACK_STS_CSR(pnum),
+ rio_write_config_32(rdev,
+ rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
(near_ackid << 24) |
(far_ackid << 8) | far_ackid);
/* Align far outstanding/outbound ackIDs with
@@ -698,7 +675,7 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n");
}
rd_err:
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
&err_status);
pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
@@ -710,7 +687,7 @@ rd_err:
RIO_GET_PORT_NUM(nextdev->swpinfo), NULL);
udelay(50);
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
&err_status);
pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
@@ -730,13 +707,10 @@ rd_err:
int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
{
struct rio_dev *rdev;
- struct rio_mport *mport;
- u8 hopcount;
- u16 destid;
u32 err_status, em_perrdet, em_ltlerrdet;
int rc, portnum;
- rdev = rio_get_comptag(pw_msg->em.comptag, NULL);
+ rdev = rio_get_comptag((pw_msg->em.comptag & RIO_CTAG_UDEVID), NULL);
if (rdev == NULL) {
/* Device removed or enumeration error */
pr_debug("RIO: %s No matching device for CTag 0x%08x\n",
@@ -800,17 +774,13 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
return 0;
}
- mport = rdev->net->hport;
- destid = rdev->rswitch->destid;
- hopcount = rdev->rswitch->hopcount;
-
/*
* Process the port-write notification from switch
*/
if (rdev->rswitch->em_handle)
rdev->rswitch->em_handle(rdev, portnum);
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
&err_status);
pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
@@ -840,7 +810,7 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
rdev->rswitch->port_ok &= ~(1 << portnum);
rio_set_port_lockout(rdev, portnum, 1);
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_write_config_32(rdev,
rdev->phys_efptr +
RIO_PORT_N_ACK_STS_CSR(portnum),
RIO_PORT_N_ACK_CLEAR);
@@ -851,28 +821,28 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
}
}
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
if (em_perrdet) {
pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
portnum, em_perrdet);
/* Clear EM Port N Error Detect CSR */
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_write_config_32(rdev,
rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
}
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
if (em_ltlerrdet) {
pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
em_ltlerrdet);
/* Clear EM L/T Layer Error Detect CSR */
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_write_config_32(rdev,
rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
}
/* Clear remaining error bits and Port-Write Pending bit */
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_write_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
err_status);
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
index 0bb871cb5c40..095016a9dec1 100644
--- a/drivers/rapidio/switches/idt_gen2.c
+++ b/drivers/rapidio/switches/idt_gen2.c
@@ -209,9 +209,6 @@ idtg2_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
static int
idtg2_em_init(struct rio_dev *rdev)
{
- struct rio_mport *mport = rdev->net->hport;
- u16 destid = rdev->rswitch->destid;
- u8 hopcount = rdev->rswitch->hopcount;
u32 regval;
int i, tmp;
@@ -220,29 +217,25 @@ idtg2_em_init(struct rio_dev *rdev)
* All standard EM configuration should be performed at upper level.
*/
- pr_debug("RIO: %s [%d:%d]\n", __func__, destid, hopcount);
+ pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
/* Set Port-Write info CSR: PRIO=3 and CRF=1 */
- rio_mport_write_config_32(mport, destid, hopcount,
- IDT_PW_INFO_CSR, 0x0000e000);
+ rio_write_config_32(rdev, IDT_PW_INFO_CSR, 0x0000e000);
/*
* Configure LT LAYER error reporting.
*/
/* Enable standard (RIO.p8) error reporting */
- rio_mport_write_config_32(mport, destid, hopcount,
- IDT_LT_ERR_REPORT_EN,
+ rio_write_config_32(rdev, IDT_LT_ERR_REPORT_EN,
REM_LTL_ERR_ILLTRAN | REM_LTL_ERR_UNSOLR |
REM_LTL_ERR_UNSUPTR);
/* Use Port-Writes for LT layer error reporting.
* Enable per-port reset
*/
- rio_mport_read_config_32(mport, destid, hopcount,
- IDT_DEV_CTRL_1, &regval);
- rio_mport_write_config_32(mport, destid, hopcount,
- IDT_DEV_CTRL_1,
+ rio_read_config_32(rdev, IDT_DEV_CTRL_1, &regval);
+ rio_write_config_32(rdev, IDT_DEV_CTRL_1,
regval | IDT_DEV_CTRL_1_GENPW | IDT_DEV_CTRL_1_PRSTBEH);
/*
@@ -250,45 +243,40 @@ idtg2_em_init(struct rio_dev *rdev)
*/
/* Report all RIO.p8 errors supported by device */
- rio_mport_write_config_32(mport, destid, hopcount,
- IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037);
+ rio_write_config_32(rdev, IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037);
/* Configure reporting of implementation specific errors/events */
- rio_mport_write_config_32(mport, destid, hopcount,
- IDT_PORT_ISERR_REPORT_EN_BC, IDT_PORT_INIT_TX_ACQUIRED);
+ rio_write_config_32(rdev, IDT_PORT_ISERR_REPORT_EN_BC,
+ IDT_PORT_INIT_TX_ACQUIRED);
/* Use Port-Writes for port error reporting and enable error logging */
tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo);
for (i = 0; i < tmp; i++) {
- rio_mport_read_config_32(mport, destid, hopcount,
- IDT_PORT_OPS(i), &regval);
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev, IDT_PORT_OPS(i), &regval);
+ rio_write_config_32(rdev,
IDT_PORT_OPS(i), regval | IDT_PORT_OPS_GENPW |
IDT_PORT_OPS_PL_ELOG |
IDT_PORT_OPS_LL_ELOG |
IDT_PORT_OPS_LT_ELOG);
}
/* Overwrite error log if full */
- rio_mport_write_config_32(mport, destid, hopcount,
- IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR);
+ rio_write_config_32(rdev, IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR);
/*
* Configure LANE error reporting.
*/
/* Disable line error reporting */
- rio_mport_write_config_32(mport, destid, hopcount,
- IDT_LANE_ERR_REPORT_EN_BC, 0);
+ rio_write_config_32(rdev, IDT_LANE_ERR_REPORT_EN_BC, 0);
/* Use Port-Writes for lane error reporting (when enabled)
* (do per-lane update because lanes may have different configuration)
*/
tmp = (rdev->did == RIO_DID_IDTCPS1848) ? 48 : 16;
for (i = 0; i < tmp; i++) {
- rio_mport_read_config_32(mport, destid, hopcount,
- IDT_LANE_CTRL(i), &regval);
- rio_mport_write_config_32(mport, destid, hopcount,
- IDT_LANE_CTRL(i), regval | IDT_LANE_CTRL_GENPW);
+ rio_read_config_32(rdev, IDT_LANE_CTRL(i), &regval);
+ rio_write_config_32(rdev, IDT_LANE_CTRL(i),
+ regval | IDT_LANE_CTRL_GENPW);
}
/*
@@ -296,41 +284,32 @@ idtg2_em_init(struct rio_dev *rdev)
*/
/* Disable JTAG and I2C Error capture */
- rio_mport_write_config_32(mport, destid, hopcount,
- IDT_AUX_PORT_ERR_CAP_EN, 0);
+ rio_write_config_32(rdev, IDT_AUX_PORT_ERR_CAP_EN, 0);
/* Disable JTAG and I2C Error reporting/logging */
- rio_mport_write_config_32(mport, destid, hopcount,
- IDT_AUX_ERR_REPORT_EN, 0);
+ rio_write_config_32(rdev, IDT_AUX_ERR_REPORT_EN, 0);
/* Disable Port-Write notification from JTAG */
- rio_mport_write_config_32(mport, destid, hopcount,
- IDT_JTAG_CTRL, 0);
+ rio_write_config_32(rdev, IDT_JTAG_CTRL, 0);
/* Disable Port-Write notification from I2C */
- rio_mport_read_config_32(mport, destid, hopcount,
- IDT_I2C_MCTRL, &regval);
- rio_mport_write_config_32(mport, destid, hopcount,
- IDT_I2C_MCTRL,
- regval & ~IDT_I2C_MCTRL_GENPW);
+ rio_read_config_32(rdev, IDT_I2C_MCTRL, &regval);
+ rio_write_config_32(rdev, IDT_I2C_MCTRL, regval & ~IDT_I2C_MCTRL_GENPW);
/*
* Configure CFG_BLK error reporting.
*/
/* Disable Configuration Block error capture */
- rio_mport_write_config_32(mport, destid, hopcount,
- IDT_CFGBLK_ERR_CAPTURE_EN, 0);
+ rio_write_config_32(rdev, IDT_CFGBLK_ERR_CAPTURE_EN, 0);
/* Disable Port-Writes for Configuration Block error reporting */
- rio_mport_read_config_32(mport, destid, hopcount,
- IDT_CFGBLK_ERR_REPORT, &regval);
- rio_mport_write_config_32(mport, destid, hopcount,
- IDT_CFGBLK_ERR_REPORT,
- regval & ~IDT_CFGBLK_ERR_REPORT_GENPW);
+ rio_read_config_32(rdev, IDT_CFGBLK_ERR_REPORT, &regval);
+ rio_write_config_32(rdev, IDT_CFGBLK_ERR_REPORT,
+ regval & ~IDT_CFGBLK_ERR_REPORT_GENPW);
/* set TVAL = ~50us */
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_write_config_32(rdev,
rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
return 0;
@@ -339,18 +318,15 @@ idtg2_em_init(struct rio_dev *rdev)
static int
idtg2_em_handler(struct rio_dev *rdev, u8 portnum)
{
- struct rio_mport *mport = rdev->net->hport;
- u16 destid = rdev->rswitch->destid;
- u8 hopcount = rdev->rswitch->hopcount;
u32 regval, em_perrdet, em_ltlerrdet;
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
if (em_ltlerrdet) {
/* Service Logical/Transport Layer Error(s) */
if (em_ltlerrdet & REM_LTL_ERR_IMPSPEC) {
/* Implementation specific error reported */
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
IDT_ISLTL_ADDRESS_CAP, &regval);
pr_debug("RIO: %s Implementation Specific LTL errors" \
@@ -358,13 +334,12 @@ idtg2_em_handler(struct rio_dev *rdev, u8 portnum)
rio_name(rdev), em_ltlerrdet, regval);
/* Clear implementation specific address capture CSR */
- rio_mport_write_config_32(mport, destid, hopcount,
- IDT_ISLTL_ADDRESS_CAP, 0);
+ rio_write_config_32(rdev, IDT_ISLTL_ADDRESS_CAP, 0);
}
}
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
if (em_perrdet) {
/* Service Port-Level Error(s) */
@@ -372,14 +347,14 @@ idtg2_em_handler(struct rio_dev *rdev, u8 portnum)
/* Implementation Specific port error reported */
/* Get IS errors reported */
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
IDT_PORT_ISERR_DET(portnum), &regval);
pr_debug("RIO: %s Implementation Specific Port" \
" errors 0x%x\n", rio_name(rdev), regval);
/* Clear all implementation specific events */
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_write_config_32(rdev,
IDT_PORT_ISERR_DET(portnum), 0);
}
}
@@ -391,14 +366,10 @@ static ssize_t
idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf)
{
struct rio_dev *rdev = to_rio_dev(dev);
- struct rio_mport *mport = rdev->net->hport;
- u16 destid = rdev->rswitch->destid;
- u8 hopcount = rdev->rswitch->hopcount;
ssize_t len = 0;
u32 regval;
- while (!rio_mport_read_config_32(mport, destid, hopcount,
- IDT_ERR_RD, &regval)) {
+ while (!rio_read_config_32(rdev, IDT_ERR_RD, &regval)) {
if (!regval) /* 0 = end of log */
break;
len += snprintf(buf + len, PAGE_SIZE - len,
@@ -445,3 +416,5 @@ static int idtg2_switch_init(struct rio_dev *rdev, int do_enum)
DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init);
DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init);
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c
index fc9f6374f759..3a971077e7bf 100644
--- a/drivers/rapidio/switches/idtcps.c
+++ b/drivers/rapidio/switches/idtcps.c
@@ -117,10 +117,6 @@ idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
{
- struct rio_mport *mport = rdev->net->hport;
- u16 destid = rdev->rswitch->destid;
- u8 hopcount = rdev->rswitch->hopcount;
-
pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
rdev->rswitch->add_entry = idtcps_route_add_entry;
rdev->rswitch->get_entry = idtcps_route_get_entry;
@@ -132,7 +128,7 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
if (do_enum) {
/* set TVAL = ~50us */
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_write_config_32(rdev,
rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
}
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c
index b9a389b9f812..3994c00aa01f 100644
--- a/drivers/rapidio/switches/tsi568.c
+++ b/drivers/rapidio/switches/tsi568.c
@@ -113,22 +113,17 @@ tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
static int
tsi568_em_init(struct rio_dev *rdev)
{
- struct rio_mport *mport = rdev->net->hport;
- u16 destid = rdev->rswitch->destid;
- u8 hopcount = rdev->rswitch->hopcount;
u32 regval;
int portnum;
- pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount);
+ pr_debug("TSI568 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
/* Make sure that Port-Writes are disabled (for all ports) */
for (portnum = 0;
portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
- rio_mport_read_config_32(mport, destid, hopcount,
- TSI568_SP_MODE(portnum), &regval);
- rio_mport_write_config_32(mport, destid, hopcount,
- TSI568_SP_MODE(portnum),
- regval | TSI568_SP_MODE_PW_DIS);
+ rio_read_config_32(rdev, TSI568_SP_MODE(portnum), &regval);
+ rio_write_config_32(rdev, TSI568_SP_MODE(portnum),
+ regval | TSI568_SP_MODE_PW_DIS);
}
return 0;
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
index 2003fb63c404..1a62934bfebc 100644
--- a/drivers/rapidio/switches/tsi57x.c
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -158,48 +158,45 @@ tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
static int
tsi57x_em_init(struct rio_dev *rdev)
{
- struct rio_mport *mport = rdev->net->hport;
- u16 destid = rdev->rswitch->destid;
- u8 hopcount = rdev->rswitch->hopcount;
u32 regval;
int portnum;
- pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount);
+ pr_debug("TSI578 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
for (portnum = 0;
portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
/* Make sure that Port-Writes are enabled (for all ports) */
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
TSI578_SP_MODE(portnum), &regval);
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_write_config_32(rdev,
TSI578_SP_MODE(portnum),
regval & ~TSI578_SP_MODE_PW_DIS);
/* Clear all pending interrupts */
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->phys_efptr +
RIO_PORT_N_ERR_STS_CSR(portnum),
&regval);
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_write_config_32(rdev,
rdev->phys_efptr +
RIO_PORT_N_ERR_STS_CSR(portnum),
regval & 0x07120214);
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
TSI578_SP_INT_STATUS(portnum), &regval);
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_write_config_32(rdev,
TSI578_SP_INT_STATUS(portnum),
regval & 0x000700bd);
/* Enable all interrupts to allow ports to send a port-write */
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
TSI578_SP_CTL_INDEP(portnum), &regval);
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_write_config_32(rdev,
TSI578_SP_CTL_INDEP(portnum),
regval | 0x000b0000);
/* Skip next (odd) port if the current port is in x4 mode */
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
&regval);
if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4)
@@ -207,7 +204,7 @@ tsi57x_em_init(struct rio_dev *rdev)
}
/* set TVAL = ~50us */
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_write_config_32(rdev,
rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x9a << 8);
return 0;
@@ -217,14 +214,12 @@ static int
tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
{
struct rio_mport *mport = rdev->net->hport;
- u16 destid = rdev->rswitch->destid;
- u8 hopcount = rdev->rswitch->hopcount;
u32 intstat, err_status;
int sendcount, checkcount;
u8 route_port;
u32 regval;
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
&err_status);
@@ -232,15 +227,15 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
(err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
RIO_PORT_N_ERR_STS_PW_INP_ES))) {
/* Remove any queued packets by locking/unlocking port */
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
&regval);
if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) {
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_write_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
regval | RIO_PORT_N_CTL_LOCKOUT);
udelay(50);
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_write_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
regval);
}
@@ -248,7 +243,7 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
/* Read from link maintenance response register to clear
* valid bit
*/
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum),
&regval);
@@ -257,13 +252,12 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
*/
sendcount = 3;
while (sendcount) {
- rio_mport_write_config_32(mport, destid, hopcount,
+ rio_write_config_32(rdev,
TSI578_SP_CS_TX(portnum), 0x40fc8000);
checkcount = 3;
while (checkcount--) {
udelay(50);
- rio_mport_read_config_32(
- mport, destid, hopcount,
+ rio_read_config_32(rdev,
rdev->phys_efptr +
RIO_PORT_N_MNT_RSP_CSR(portnum),
&regval);
@@ -277,25 +271,23 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
exit_es:
/* Clear implementation specific error status bits */
- rio_mport_read_config_32(mport, destid, hopcount,
- TSI578_SP_INT_STATUS(portnum), &intstat);
+ rio_read_config_32(rdev, TSI578_SP_INT_STATUS(portnum), &intstat);
pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n",
- destid, hopcount, portnum, intstat);
+ rdev->destid, rdev->hopcount, portnum, intstat);
if (intstat & 0x10000) {
- rio_mport_read_config_32(mport, destid, hopcount,
+ rio_read_config_32(rdev,
TSI578_SP_LUT_PEINF(portnum), &regval);
regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24);
route_port = rdev->rswitch->route_table[regval];
pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n",
rio_name(rdev), portnum, regval);
- tsi57x_route_add_entry(mport, destid, hopcount,
+ tsi57x_route_add_entry(mport, rdev->destid, rdev->hopcount,
RIO_GLOBAL_TABLE, regval, route_port);
}
- rio_mport_write_config_32(mport, destid, hopcount,
- TSI578_SP_INT_STATUS(portnum),
- intstat & 0x000700bd);
+ rio_write_config_32(rdev, TSI578_SP_INT_STATUS(portnum),
+ intstat & 0x000700bd);
return 0;
}
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 2ce2eb71d0f5..dd6308499bd4 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -249,7 +249,7 @@ static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
}
static int pm8607_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *selector)
{
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
uint8_t val, mask;
@@ -263,6 +263,7 @@ static int pm8607_set_voltage(struct regulator_dev *rdev,
ret = choose_voltage(rdev, min_uV, max_uV);
if (ret < 0)
return -EINVAL;
+ *selector = ret;
val = (uint8_t)(ret << info->vol_shift);
mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index dd30e883d4a7..e1d943619ab8 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -186,13 +186,25 @@ config REGULATOR_PCAP
This driver provides support for the voltage regulators of the
PCAP2 PMIC.
+config REGULATOR_MC13XXX_CORE
+ tristate
+
config REGULATOR_MC13783
tristate "Support regulators on Freescale MC13783 PMIC"
depends on MFD_MC13783
+ select REGULATOR_MC13XXX_CORE
help
Say y here to support the regulators found on the Freescale MC13783
PMIC.
+config REGULATOR_MC13892
+ tristate "Support regulators on Freescale MC13892 PMIC"
+ depends on MFD_MC13XXX
+ select REGULATOR_MC13XXX_CORE
+ help
+ Say y here to support the regulators found on the Freescale MC13892
+ PMIC.
+
config REGULATOR_AB3100
tristate "ST-Ericsson AB3100 Regulator functions"
depends on AB3100_CORE
@@ -250,5 +262,15 @@ config REGULATOR_TPS6586X
help
This driver supports TPS6586X voltage regulator chips.
+config REGULATOR_TPS6524X
+ tristate "TI TPS6524X Power regulators"
+ depends on SPI
+ help
+ This driver supports TPS6524X voltage regulator chips. TPS6524X
+ provides three step-down converters and two general-purpose LDO
+ voltage regulators. This device is interfaced using a customized
+ serial interface currently supported on the sequencer serial
+ port controller.
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index bff815736780..0b5e88c2b8d7 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -30,10 +30,13 @@ obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
+obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
+obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
+obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index b349266a43de..ed6feaf9398d 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -362,7 +362,8 @@ static int ab3100_get_best_voltage_index(struct regulator_dev *reg,
}
static int ab3100_set_voltage_regulator(struct regulator_dev *reg,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned *selector)
{
struct ab3100_regulator *abreg = reg->reg_data;
u8 regval;
@@ -373,6 +374,8 @@ static int ab3100_set_voltage_regulator(struct regulator_dev *reg,
if (bestindex < 0)
return bestindex;
+ *selector = bestindex;
+
err = abx500_get_register_interruptible(abreg->dev, 0,
abreg->regreg, &regval);
if (err) {
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index db6b70f20511..d9a052c53aec 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -3,18 +3,13 @@
*
* License Terms: GNU General Public License v2
*
- * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
*
* AB8500 peripheral regulators
*
- * AB8500 supports the following regulators,
- * LDOs - VAUDIO, VANAMIC2/2, VDIGMIC, VINTCORE12, VTVOUT,
- * VAUX1/2/3, VANA
- *
- * for DB8500 cut 1.0 and previous versions of the silicon, all accesses
- * to registers are through the DB8500 SPI. In cut 1.1 onwards, these
- * accesses are through the DB8500 PRCMU I2C
- *
+ * AB8500 supports the following regulators:
+ * VAUX1/2/3, VINTCORE, VTVOUT, VAUDIO, VAMIC1/2, VDMIC, VANA
*/
#include <linux/init.h>
#include <linux/kernel.h>
@@ -28,38 +23,37 @@
/**
* struct ab8500_regulator_info - ab8500 regulator information
+ * @dev: device pointer
* @desc: regulator description
- * @ab8500: ab8500 parent
* @regulator_dev: regulator device
* @max_uV: maximum voltage (for variable voltage supplies)
* @min_uV: minimum voltage (for variable voltage supplies)
* @fixed_uV: typical voltage (for fixed voltage supplies)
* @update_bank: bank to control on/off
* @update_reg: register to control on/off
- * @mask: mask to enable/disable regulator
- * @enable: bits to enable the regulator in normal(high power) mode
+ * @update_mask: mask to enable/disable regulator
+ * @update_val_enable: bits to enable the regulator in normal (high power) mode
* @voltage_bank: bank to control regulator voltage
* @voltage_reg: register to control regulator voltage
* @voltage_mask: mask to control regulator voltage
- * @supported_voltages: supported voltage table
+ * @voltages: supported voltage table
* @voltages_len: number of supported voltages for the regulator
*/
struct ab8500_regulator_info {
struct device *dev;
struct regulator_desc desc;
- struct ab8500 *ab8500;
struct regulator_dev *regulator;
int max_uV;
int min_uV;
int fixed_uV;
u8 update_bank;
u8 update_reg;
- u8 mask;
- u8 enable;
+ u8 update_mask;
+ u8 update_val_enable;
u8 voltage_bank;
u8 voltage_reg;
u8 voltage_mask;
- int const *supported_voltages;
+ int const *voltages;
int voltages_len;
};
@@ -83,6 +77,17 @@ static const int ldo_vauxn_voltages[] = {
3300000,
};
+static const int ldo_vaux3_voltages[] = {
+ 1200000,
+ 1500000,
+ 1800000,
+ 2100000,
+ 2500000,
+ 2750000,
+ 2790000,
+ 2910000,
+};
+
static const int ldo_vintcore_voltages[] = {
1200000,
1225000,
@@ -95,57 +100,80 @@ static const int ldo_vintcore_voltages[] = {
static int ab8500_regulator_enable(struct regulator_dev *rdev)
{
- int regulator_id, ret;
+ int ret;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
- regulator_id = rdev_get_id(rdev);
- if (regulator_id >= AB8500_NUM_REGULATORS)
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
+ }
ret = abx500_mask_and_set_register_interruptible(info->dev,
- info->update_bank, info->update_reg, info->mask, info->enable);
+ info->update_bank, info->update_reg,
+ info->update_mask, info->update_val_enable);
if (ret < 0)
dev_err(rdev_get_dev(rdev),
"couldn't set enable bits for regulator\n");
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-enable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, info->update_val_enable);
+
return ret;
}
static int ab8500_regulator_disable(struct regulator_dev *rdev)
{
- int regulator_id, ret;
+ int ret;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
- regulator_id = rdev_get_id(rdev);
- if (regulator_id >= AB8500_NUM_REGULATORS)
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
+ }
ret = abx500_mask_and_set_register_interruptible(info->dev,
- info->update_bank, info->update_reg, info->mask, 0x0);
+ info->update_bank, info->update_reg,
+ info->update_mask, 0x0);
if (ret < 0)
dev_err(rdev_get_dev(rdev),
"couldn't set disable bits for regulator\n");
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-disable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, 0x0);
+
return ret;
}
static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
{
- int regulator_id, ret;
+ int ret;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
- u8 value;
+ u8 regval;
- regulator_id = rdev_get_id(rdev);
- if (regulator_id >= AB8500_NUM_REGULATORS)
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
+ }
ret = abx500_get_register_interruptible(info->dev,
- info->update_bank, info->update_reg, &value);
+ info->update_bank, info->update_reg, &regval);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"couldn't read 0x%x register\n", info->update_reg);
return ret;
}
- if (value & info->mask)
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-is_enabled (bank, reg, mask, value): 0x%x, 0x%x, 0x%x,"
+ " 0x%x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, regval);
+
+ if (regval & info->update_mask)
return true;
else
return false;
@@ -153,12 +181,12 @@ static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
{
- int regulator_id;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
- regulator_id = rdev_get_id(rdev);
- if (regulator_id >= AB8500_NUM_REGULATORS)
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
+ }
/* return the uV for the fixed regulators */
if (info->fixed_uV)
@@ -167,33 +195,40 @@ static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
if (selector >= info->voltages_len)
return -EINVAL;
- return info->supported_voltages[selector];
+ return info->voltages[selector];
}
static int ab8500_regulator_get_voltage(struct regulator_dev *rdev)
{
- int regulator_id, ret;
+ int ret, val;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
- u8 value;
+ u8 regval;
- regulator_id = rdev_get_id(rdev);
- if (regulator_id >= AB8500_NUM_REGULATORS)
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
+ }
- ret = abx500_get_register_interruptible(info->dev, info->voltage_bank,
- info->voltage_reg, &value);
+ ret = abx500_get_register_interruptible(info->dev,
+ info->voltage_bank, info->voltage_reg, &regval);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"couldn't read voltage reg for regulator\n");
return ret;
}
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-get_voltage (bank, reg, mask, value): 0x%x, 0x%x, 0x%x,"
+ " 0x%x\n",
+ info->desc.name, info->voltage_bank, info->voltage_reg,
+ info->voltage_mask, regval);
+
/* vintcore has a different layout */
- value &= info->voltage_mask;
- if (regulator_id == AB8500_LDO_INTCORE)
- ret = info->supported_voltages[value >> 0x3];
+ val = regval & info->voltage_mask;
+ if (info->desc.id == AB8500_LDO_INTCORE)
+ ret = info->voltages[val >> 0x3];
else
- ret = info->supported_voltages[value];
+ ret = info->voltages[val];
return ret;
}
@@ -206,8 +241,8 @@ static int ab8500_get_best_voltage_index(struct regulator_dev *rdev,
/* check the supported voltage */
for (i = 0; i < info->voltages_len; i++) {
- if ((info->supported_voltages[i] >= min_uV) &&
- (info->supported_voltages[i] <= max_uV))
+ if ((info->voltages[i] >= min_uV) &&
+ (info->voltages[i] <= max_uV))
return i;
}
@@ -215,14 +250,17 @@ static int ab8500_get_best_voltage_index(struct regulator_dev *rdev,
}
static int ab8500_regulator_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned *selector)
{
- int regulator_id, ret;
+ int ret;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+ u8 regval;
- regulator_id = rdev_get_id(rdev);
- if (regulator_id >= AB8500_NUM_REGULATORS)
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
+ }
/* get the appropriate voltages within the range */
ret = ab8500_get_best_voltage_index(rdev, min_uV, max_uV);
@@ -232,14 +270,23 @@ static int ab8500_regulator_set_voltage(struct regulator_dev *rdev,
return ret;
}
+ *selector = ret;
+
/* set the registers for the request */
+ regval = (u8)ret;
ret = abx500_mask_and_set_register_interruptible(info->dev,
- info->voltage_bank, info->voltage_reg,
- info->voltage_mask, (u8)ret);
+ info->voltage_bank, info->voltage_reg,
+ info->voltage_mask, regval);
if (ret < 0)
dev_err(rdev_get_dev(rdev),
"couldn't set voltage reg for regulator\n");
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-set_voltage (bank, reg, mask, value): 0x%x, 0x%x, 0x%x,"
+ " 0x%x\n",
+ info->desc.name, info->voltage_bank, info->voltage_reg,
+ info->voltage_mask, regval);
+
return ret;
}
@@ -254,17 +301,17 @@ static struct regulator_ops ab8500_regulator_ops = {
static int ab8500_fixed_get_voltage(struct regulator_dev *rdev)
{
- int regulator_id;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
- regulator_id = rdev_get_id(rdev);
- if (regulator_id >= AB8500_NUM_REGULATORS)
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
return -EINVAL;
+ }
return info->fixed_uV;
}
-static struct regulator_ops ab8500_ldo_fixed_ops = {
+static struct regulator_ops ab8500_regulator_fixed_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
@@ -272,88 +319,197 @@ static struct regulator_ops ab8500_ldo_fixed_ops = {
.list_voltage = ab8500_list_voltage,
};
-#define AB8500_LDO(_id, min, max, bank, reg, reg_mask, \
- reg_enable, volt_bank, volt_reg, volt_mask, \
- voltages, len_volts) \
-{ \
- .desc = { \
- .name = "LDO-" #_id, \
- .ops = &ab8500_regulator_ops, \
- .type = REGULATOR_VOLTAGE, \
- .id = AB8500_LDO_##_id, \
- .owner = THIS_MODULE, \
- }, \
- .min_uV = (min) * 1000, \
- .max_uV = (max) * 1000, \
- .update_bank = bank, \
- .update_reg = reg, \
- .mask = reg_mask, \
- .enable = reg_enable, \
- .voltage_bank = volt_bank, \
- .voltage_reg = volt_reg, \
- .voltage_mask = volt_mask, \
- .supported_voltages = voltages, \
- .voltages_len = len_volts, \
- .fixed_uV = 0, \
-}
-
-#define AB8500_FIXED_LDO(_id, fixed, bank, reg, \
- reg_mask, reg_enable) \
-{ \
- .desc = { \
- .name = "LDO-" #_id, \
- .ops = &ab8500_ldo_fixed_ops, \
- .type = REGULATOR_VOLTAGE, \
- .id = AB8500_LDO_##_id, \
- .owner = THIS_MODULE, \
- }, \
- .fixed_uV = fixed * 1000, \
- .update_bank = bank, \
- .update_reg = reg, \
- .mask = reg_mask, \
- .enable = reg_enable, \
-}
-
-static struct ab8500_regulator_info ab8500_regulator_info[] = {
+static struct ab8500_regulator_info
+ ab8500_regulator_info[AB8500_NUM_REGULATORS] = {
/*
- * Variable Voltage LDOs
- * name, min uV, max uV, ctrl bank, ctrl reg, reg mask, enable mask,
- * volt ctrl bank, volt ctrl reg, volt ctrl mask, volt table,
- * num supported volts
+ * Variable Voltage Regulators
+ * name, min mV, max mV,
+ * update bank, reg, mask, enable val
+ * volt bank, reg, mask, table, table length
*/
- AB8500_LDO(AUX1, 1100, 3300, 0x04, 0x09, 0x3, 0x1, 0x04, 0x1f, 0xf,
- ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
- AB8500_LDO(AUX2, 1100, 3300, 0x04, 0x09, 0xc, 0x4, 0x04, 0x20, 0xf,
- ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
- AB8500_LDO(AUX3, 1100, 3300, 0x04, 0x0a, 0x3, 0x1, 0x04, 0x21, 0xf,
- ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
- AB8500_LDO(INTCORE, 1100, 3300, 0x03, 0x80, 0x4, 0x4, 0x03, 0x80, 0x38,
- ldo_vintcore_voltages, ARRAY_SIZE(ldo_vintcore_voltages)),
+ [AB8500_LDO_AUX1] = {
+ .desc = {
+ .name = "LDO-AUX1",
+ .ops = &ab8500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_AUX1,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ },
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .update_bank = 0x04,
+ .update_reg = 0x09,
+ .update_mask = 0x03,
+ .update_val_enable = 0x01,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x1f,
+ .voltage_mask = 0x0f,
+ .voltages = ldo_vauxn_voltages,
+ .voltages_len = ARRAY_SIZE(ldo_vauxn_voltages),
+ },
+ [AB8500_LDO_AUX2] = {
+ .desc = {
+ .name = "LDO-AUX2",
+ .ops = &ab8500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_AUX2,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ },
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .update_bank = 0x04,
+ .update_reg = 0x09,
+ .update_mask = 0x0c,
+ .update_val_enable = 0x04,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x20,
+ .voltage_mask = 0x0f,
+ .voltages = ldo_vauxn_voltages,
+ .voltages_len = ARRAY_SIZE(ldo_vauxn_voltages),
+ },
+ [AB8500_LDO_AUX3] = {
+ .desc = {
+ .name = "LDO-AUX3",
+ .ops = &ab8500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_AUX3,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vaux3_voltages),
+ },
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .update_bank = 0x04,
+ .update_reg = 0x0a,
+ .update_mask = 0x03,
+ .update_val_enable = 0x01,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x21,
+ .voltage_mask = 0x07,
+ .voltages = ldo_vaux3_voltages,
+ .voltages_len = ARRAY_SIZE(ldo_vaux3_voltages),
+ },
+ [AB8500_LDO_INTCORE] = {
+ .desc = {
+ .name = "LDO-INTCORE",
+ .ops = &ab8500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_INTCORE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vintcore_voltages),
+ },
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .update_bank = 0x03,
+ .update_reg = 0x80,
+ .update_mask = 0x44,
+ .update_val_enable = 0x04,
+ .voltage_bank = 0x03,
+ .voltage_reg = 0x80,
+ .voltage_mask = 0x38,
+ .voltages = ldo_vintcore_voltages,
+ .voltages_len = ARRAY_SIZE(ldo_vintcore_voltages),
+ },
/*
- * Fixed Voltage LDOs
- * name, o/p uV, ctrl bank, ctrl reg, enable, disable
+ * Fixed Voltage Regulators
+ * name, fixed mV,
+ * update bank, reg, mask, enable val
*/
- AB8500_FIXED_LDO(TVOUT, 2000, 0x03, 0x80, 0x2, 0x2),
- AB8500_FIXED_LDO(AUDIO, 2000, 0x03, 0x83, 0x2, 0x2),
- AB8500_FIXED_LDO(ANAMIC1, 2050, 0x03, 0x83, 0x4, 0x4),
- AB8500_FIXED_LDO(ANAMIC2, 2050, 0x03, 0x83, 0x8, 0x8),
- AB8500_FIXED_LDO(DMIC, 1800, 0x03, 0x83, 0x10, 0x10),
- AB8500_FIXED_LDO(ANA, 1200, 0x03, 0x83, 0xc, 0x4),
-};
+ [AB8500_LDO_TVOUT] = {
+ .desc = {
+ .name = "LDO-TVOUT",
+ .ops = &ab8500_regulator_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_TVOUT,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 2000000,
+ .update_bank = 0x03,
+ .update_reg = 0x80,
+ .update_mask = 0x82,
+ .update_val_enable = 0x02,
+ },
+ [AB8500_LDO_AUDIO] = {
+ .desc = {
+ .name = "LDO-AUDIO",
+ .ops = &ab8500_regulator_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_AUDIO,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 2000000,
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x02,
+ .update_val_enable = 0x02,
+ },
+ [AB8500_LDO_ANAMIC1] = {
+ .desc = {
+ .name = "LDO-ANAMIC1",
+ .ops = &ab8500_regulator_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_ANAMIC1,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 2050000,
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x08,
+ .update_val_enable = 0x08,
+ },
+ [AB8500_LDO_ANAMIC2] = {
+ .desc = {
+ .name = "LDO-ANAMIC2",
+ .ops = &ab8500_regulator_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_ANAMIC2,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 2050000,
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x10,
+ .update_val_enable = 0x10,
+ },
+ [AB8500_LDO_DMIC] = {
+ .desc = {
+ .name = "LDO-DMIC",
+ .ops = &ab8500_regulator_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_DMIC,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 1800000,
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x04,
+ .update_val_enable = 0x04,
+ },
+ [AB8500_LDO_ANA] = {
+ .desc = {
+ .name = "LDO-ANA",
+ .ops = &ab8500_regulator_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_ANA,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 1200000,
+ .update_bank = 0x04,
+ .update_reg = 0x06,
+ .update_mask = 0x0c,
+ .update_val_enable = 0x04,
+ },
-static inline struct ab8500_regulator_info *find_regulator_info(int id)
-{
- struct ab8500_regulator_info *info;
- int i;
- for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
- info = &ab8500_regulator_info[i];
- if (info->desc.id == id)
- return info;
- }
- return NULL;
-}
+};
static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
{
@@ -366,6 +522,16 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
return -EINVAL;
}
pdata = dev_get_platdata(ab8500->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "null pdata\n");
+ return -EINVAL;
+ }
+
+ /* make sure the platform data has the correct size */
+ if (pdata->num_regulator != ARRAY_SIZE(ab8500_regulator_info)) {
+ dev_err(&pdev->dev, "platform configuration error\n");
+ return -EINVAL;
+ }
/* register all regulators */
for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
@@ -374,10 +540,22 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
/* assign per-regulator data */
info = &ab8500_regulator_info[i];
info->dev = &pdev->dev;
- info->ab8500 = ab8500;
+ /* fix for hardware before ab8500v2.0 */
+ if (abx500_get_chip_id(info->dev) < 0x20) {
+ if (info->desc.id == AB8500_LDO_AUX3) {
+ info->desc.n_voltages =
+ ARRAY_SIZE(ldo_vauxn_voltages);
+ info->voltages = ldo_vauxn_voltages;
+ info->voltages_len =
+ ARRAY_SIZE(ldo_vauxn_voltages);
+ info->voltage_mask = 0xf;
+ }
+ }
+
+ /* register regulator with framework */
info->regulator = regulator_register(&info->desc, &pdev->dev,
- pdata->regulator[i], info);
+ &pdata->regulator[i], info);
if (IS_ERR(info->regulator)) {
err = PTR_ERR(info->regulator);
dev_err(&pdev->dev, "failed to register regulator %s\n",
@@ -389,6 +567,9 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
}
return err;
}
+
+ dev_vdbg(rdev_get_dev(info->regulator),
+ "%s-probed\n", info->desc.name);
}
return 0;
@@ -401,6 +582,10 @@ static __devexit int ab8500_regulator_remove(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
struct ab8500_regulator_info *info = NULL;
info = &ab8500_regulator_info[i];
+
+ dev_vdbg(rdev_get_dev(info->regulator),
+ "%s-remove\n", info->desc.name);
+
regulator_unregister(info->regulator);
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ba521f0f0fac..9fa20957847d 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -13,8 +13,11 @@
*
*/
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/err.h>
@@ -25,16 +28,30 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/regulator.h>
+
#include "dummy.h"
-#define REGULATOR_VERSION "0.5"
+#define rdev_err(rdev, fmt, ...) \
+ pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_warn(rdev, fmt, ...) \
+ pr_warn("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_info(rdev, fmt, ...) \
+ pr_info("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_dbg(rdev, fmt, ...) \
+ pr_debug("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
static DEFINE_MUTEX(regulator_list_mutex);
static LIST_HEAD(regulator_list);
static LIST_HEAD(regulator_map_list);
-static int has_full_constraints;
+static bool has_full_constraints;
static bool board_wants_dummy_regulator;
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_root;
+#endif
+
/*
* struct regulator_map
*
@@ -71,6 +88,8 @@ static int _regulator_get_current_limit(struct regulator_dev *rdev);
static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
static void _notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
+static int _regulator_do_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV);
static const char *rdev_get_name(struct regulator_dev *rdev)
{
@@ -111,13 +130,11 @@ static int regulator_check_voltage(struct regulator_dev *rdev,
BUG_ON(*min_uV > *max_uV);
if (!rdev->constraints) {
- printk(KERN_ERR "%s: no constraints for %s\n", __func__,
- rdev_get_name(rdev));
+ rdev_err(rdev, "no constraints\n");
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
- printk(KERN_ERR "%s: operation not allowed for %s\n",
- __func__, rdev_get_name(rdev));
+ rdev_err(rdev, "operation not allowed\n");
return -EPERM;
}
@@ -132,6 +149,27 @@ static int regulator_check_voltage(struct regulator_dev *rdev,
return 0;
}
+/* Make sure we select a voltage that suits the needs of all
+ * regulator consumers
+ */
+static int regulator_check_consumers(struct regulator_dev *rdev,
+ int *min_uV, int *max_uV)
+{
+ struct regulator *regulator;
+
+ list_for_each_entry(regulator, &rdev->consumer_list, list) {
+ if (*max_uV > regulator->max_uV)
+ *max_uV = regulator->max_uV;
+ if (*min_uV < regulator->min_uV)
+ *min_uV = regulator->min_uV;
+ }
+
+ if (*min_uV > *max_uV)
+ return -EINVAL;
+
+ return 0;
+}
+
/* current constraint check */
static int regulator_check_current_limit(struct regulator_dev *rdev,
int *min_uA, int *max_uA)
@@ -139,13 +177,11 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
BUG_ON(*min_uA > *max_uA);
if (!rdev->constraints) {
- printk(KERN_ERR "%s: no constraints for %s\n", __func__,
- rdev_get_name(rdev));
+ rdev_err(rdev, "no constraints\n");
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) {
- printk(KERN_ERR "%s: operation not allowed for %s\n",
- __func__, rdev_get_name(rdev));
+ rdev_err(rdev, "operation not allowed\n");
return -EPERM;
}
@@ -174,18 +210,15 @@ static int regulator_check_mode(struct regulator_dev *rdev, int mode)
}
if (!rdev->constraints) {
- printk(KERN_ERR "%s: no constraints for %s\n", __func__,
- rdev_get_name(rdev));
+ rdev_err(rdev, "no constraints\n");
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) {
- printk(KERN_ERR "%s: operation not allowed for %s\n",
- __func__, rdev_get_name(rdev));
+ rdev_err(rdev, "operation not allowed\n");
return -EPERM;
}
if (!(rdev->constraints->valid_modes_mask & mode)) {
- printk(KERN_ERR "%s: invalid mode %x for %s\n",
- __func__, mode, rdev_get_name(rdev));
+ rdev_err(rdev, "invalid mode %x\n", mode);
return -EINVAL;
}
return 0;
@@ -195,13 +228,11 @@ static int regulator_check_mode(struct regulator_dev *rdev, int mode)
static int regulator_check_drms(struct regulator_dev *rdev)
{
if (!rdev->constraints) {
- printk(KERN_ERR "%s: no constraints for %s\n", __func__,
- rdev_get_name(rdev));
+ rdev_err(rdev, "no constraints\n");
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
- printk(KERN_ERR "%s: operation not allowed for %s\n",
- __func__, rdev_get_name(rdev));
+ rdev_err(rdev, "operation not allowed\n");
return -EPERM;
}
return 0;
@@ -553,18 +584,21 @@ static void drms_uA_update(struct regulator_dev *rdev)
err = regulator_check_drms(rdev);
if (err < 0 || !rdev->desc->ops->get_optimum_mode ||
- !rdev->desc->ops->get_voltage || !rdev->desc->ops->set_mode)
+ (!rdev->desc->ops->get_voltage &&
+ !rdev->desc->ops->get_voltage_sel) ||
+ !rdev->desc->ops->set_mode)
return;
/* get output voltage */
- output_uV = rdev->desc->ops->get_voltage(rdev);
+ output_uV = _regulator_get_voltage(rdev);
if (output_uV <= 0)
return;
/* get input voltage */
- if (rdev->supply && rdev->supply->desc->ops->get_voltage)
- input_uV = rdev->supply->desc->ops->get_voltage(rdev->supply);
- else
+ input_uV = 0;
+ if (rdev->supply)
+ input_uV = _regulator_get_voltage(rdev);
+ if (input_uV <= 0)
input_uV = rdev->constraints->input_uV;
if (input_uV <= 0)
return;
@@ -598,20 +632,17 @@ static int suspend_set_state(struct regulator_dev *rdev,
*/
if (!rstate->enabled && !rstate->disabled) {
if (can_set_state)
- printk(KERN_WARNING "%s: No configuration for %s\n",
- __func__, rdev_get_name(rdev));
+ rdev_warn(rdev, "No configuration\n");
return 0;
}
if (rstate->enabled && rstate->disabled) {
- printk(KERN_ERR "%s: invalid configuration for %s\n",
- __func__, rdev_get_name(rdev));
+ rdev_err(rdev, "invalid configuration\n");
return -EINVAL;
}
if (!can_set_state) {
- printk(KERN_ERR "%s: no way to set suspend state\n",
- __func__);
+ rdev_err(rdev, "no way to set suspend state\n");
return -EINVAL;
}
@@ -620,15 +651,14 @@ static int suspend_set_state(struct regulator_dev *rdev,
else
ret = rdev->desc->ops->set_suspend_disable(rdev);
if (ret < 0) {
- printk(KERN_ERR "%s: failed to enabled/disable\n", __func__);
+ rdev_err(rdev, "failed to enabled/disable\n");
return ret;
}
if (rdev->desc->ops->set_suspend_voltage && rstate->uV > 0) {
ret = rdev->desc->ops->set_suspend_voltage(rdev, rstate->uV);
if (ret < 0) {
- printk(KERN_ERR "%s: failed to set voltage\n",
- __func__);
+ rdev_err(rdev, "failed to set voltage\n");
return ret;
}
}
@@ -636,7 +666,7 @@ static int suspend_set_state(struct regulator_dev *rdev,
if (rdev->desc->ops->set_suspend_mode && rstate->mode > 0) {
ret = rdev->desc->ops->set_suspend_mode(rdev, rstate->mode);
if (ret < 0) {
- printk(KERN_ERR "%s: failed to set mode\n", __func__);
+ rdev_err(rdev, "failed to set mode\n");
return ret;
}
}
@@ -714,29 +744,27 @@ static void print_constraints(struct regulator_dev *rdev)
if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY)
count += sprintf(buf + count, "standby");
- printk(KERN_INFO "regulator: %s: %s\n", rdev_get_name(rdev), buf);
+ rdev_info(rdev, "%s\n", buf);
}
static int machine_constraints_voltage(struct regulator_dev *rdev,
struct regulation_constraints *constraints)
{
struct regulator_ops *ops = rdev->desc->ops;
- const char *name = rdev_get_name(rdev);
int ret;
/* do we need to apply the constraint voltage */
if (rdev->constraints->apply_uV &&
- rdev->constraints->min_uV == rdev->constraints->max_uV &&
- ops->set_voltage) {
- ret = ops->set_voltage(rdev,
- rdev->constraints->min_uV, rdev->constraints->max_uV);
- if (ret < 0) {
- printk(KERN_ERR "%s: failed to apply %duV constraint to %s\n",
- __func__,
- rdev->constraints->min_uV, name);
- rdev->constraints = NULL;
- return ret;
- }
+ rdev->constraints->min_uV == rdev->constraints->max_uV) {
+ ret = _regulator_do_set_voltage(rdev,
+ rdev->constraints->min_uV,
+ rdev->constraints->max_uV);
+ if (ret < 0) {
+ rdev_err(rdev, "failed to apply %duV constraint\n",
+ rdev->constraints->min_uV);
+ rdev->constraints = NULL;
+ return ret;
+ }
}
/* constrain machine-level voltage specs to fit
@@ -765,8 +793,7 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
/* else require explicit machine-level constraints */
if (cmin <= 0 || cmax <= 0 || cmax < cmin) {
- pr_err("%s: %s '%s' voltage constraints\n",
- __func__, "invalid", name);
+ rdev_err(rdev, "invalid voltage constraints\n");
return -EINVAL;
}
@@ -787,22 +814,19 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
/* final: [min_uV..max_uV] valid iff constraints valid */
if (max_uV < min_uV) {
- pr_err("%s: %s '%s' voltage constraints\n",
- __func__, "unsupportable", name);
+ rdev_err(rdev, "unsupportable voltage constraints\n");
return -EINVAL;
}
/* use regulator's subset of machine constraints */
if (constraints->min_uV < min_uV) {
- pr_debug("%s: override '%s' %s, %d -> %d\n",
- __func__, name, "min_uV",
- constraints->min_uV, min_uV);
+ rdev_dbg(rdev, "override min_uV, %d -> %d\n",
+ constraints->min_uV, min_uV);
constraints->min_uV = min_uV;
}
if (constraints->max_uV > max_uV) {
- pr_debug("%s: override '%s' %s, %d -> %d\n",
- __func__, name, "max_uV",
- constraints->max_uV, max_uV);
+ rdev_dbg(rdev, "override max_uV, %d -> %d\n",
+ constraints->max_uV, max_uV);
constraints->max_uV = max_uV;
}
}
@@ -822,26 +846,25 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
* set_mode.
*/
static int set_machine_constraints(struct regulator_dev *rdev,
- struct regulation_constraints *constraints)
+ const struct regulation_constraints *constraints)
{
int ret = 0;
- const char *name;
struct regulator_ops *ops = rdev->desc->ops;
- rdev->constraints = constraints;
-
- name = rdev_get_name(rdev);
+ rdev->constraints = kmemdup(constraints, sizeof(*constraints),
+ GFP_KERNEL);
+ if (!rdev->constraints)
+ return -ENOMEM;
- ret = machine_constraints_voltage(rdev, constraints);
+ ret = machine_constraints_voltage(rdev, rdev->constraints);
if (ret != 0)
goto out;
/* do we need to setup our suspend state */
if (constraints->initial_state) {
- ret = suspend_prepare(rdev, constraints->initial_state);
+ ret = suspend_prepare(rdev, rdev->constraints->initial_state);
if (ret < 0) {
- printk(KERN_ERR "%s: failed to set suspend state for %s\n",
- __func__, name);
+ rdev_err(rdev, "failed to set suspend state\n");
rdev->constraints = NULL;
goto out;
}
@@ -849,17 +872,14 @@ static int set_machine_constraints(struct regulator_dev *rdev,
if (constraints->initial_mode) {
if (!ops->set_mode) {
- printk(KERN_ERR "%s: no set_mode operation for %s\n",
- __func__, name);
+ rdev_err(rdev, "no set_mode operation\n");
ret = -EINVAL;
goto out;
}
- ret = ops->set_mode(rdev, constraints->initial_mode);
+ ret = ops->set_mode(rdev, rdev->constraints->initial_mode);
if (ret < 0) {
- printk(KERN_ERR
- "%s: failed to set initial mode for %s: %d\n",
- __func__, name, ret);
+ rdev_err(rdev, "failed to set initial mode: %d\n", ret);
goto out;
}
}
@@ -867,11 +887,11 @@ static int set_machine_constraints(struct regulator_dev *rdev,
/* If the constraints say the regulator should be on at this point
* and we have control then make sure it is enabled.
*/
- if ((constraints->always_on || constraints->boot_on) && ops->enable) {
+ if ((rdev->constraints->always_on || rdev->constraints->boot_on) &&
+ ops->enable) {
ret = ops->enable(rdev);
if (ret < 0) {
- printk(KERN_ERR "%s: failed to enable %s\n",
- __func__, name);
+ rdev_err(rdev, "failed to enable\n");
rdev->constraints = NULL;
goto out;
}
@@ -899,9 +919,8 @@ static int set_supply(struct regulator_dev *rdev,
err = sysfs_create_link(&rdev->dev.kobj, &supply_rdev->dev.kobj,
"supply");
if (err) {
- printk(KERN_ERR
- "%s: could not add device link %s err %d\n",
- __func__, supply_rdev->dev.kobj.name, err);
+ rdev_err(rdev, "could not add device link %s err %d\n",
+ supply_rdev->dev.kobj.name, err);
goto out;
}
rdev->supply = supply_rdev;
@@ -957,10 +976,10 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
continue;
dev_dbg(consumer_dev, "%s/%s is '%s' supply; fail %s/%s\n",
- dev_name(&node->regulator->dev),
- node->regulator->desc->name,
- supply,
- dev_name(&rdev->dev), rdev_get_name(rdev));
+ dev_name(&node->regulator->dev),
+ node->regulator->desc->name,
+ supply,
+ dev_name(&rdev->dev), rdev_get_name(rdev));
return -EBUSY;
}
@@ -1031,8 +1050,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
regulator->dev_attr.show = device_requested_uA_show;
err = device_create_file(dev, &regulator->dev_attr);
if (err < 0) {
- printk(KERN_WARNING "%s: could not add regulator_dev"
- " load sysfs\n", __func__);
+ rdev_warn(rdev, "could not add regulator_dev requested microamps sysfs entry\n");
goto attr_name_err;
}
@@ -1049,9 +1067,8 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
err = sysfs_create_link(&rdev->dev.kobj, &dev->kobj,
buf);
if (err) {
- printk(KERN_WARNING
- "%s: could not add device link %s err %d\n",
- __func__, dev->kobj.name, err);
+ rdev_warn(rdev, "could not add device link %s err %d\n",
+ dev->kobj.name, err);
goto link_name_err;
}
}
@@ -1088,7 +1105,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
int ret;
if (id == NULL) {
- printk(KERN_ERR "regulator: get() with no identifier\n");
+ pr_err("get() with no identifier\n");
return regulator;
}
@@ -1122,8 +1139,8 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
* substitute in a dummy regulator so consumers can continue.
*/
if (!has_full_constraints) {
- pr_warning("%s supply %s not found, using dummy regulator\n",
- devname, id);
+ pr_warn("%s supply %s not found, using dummy regulator\n",
+ devname, id);
rdev = dummy_regulator_rdev;
goto found;
}
@@ -1274,8 +1291,7 @@ static int _regulator_enable(struct regulator_dev *rdev)
ret = _regulator_enable(rdev->supply);
mutex_unlock(&rdev->supply->mutex);
if (ret < 0) {
- printk(KERN_ERR "%s: failed to enable %s: %d\n",
- __func__, rdev_get_name(rdev), ret);
+ rdev_err(rdev, "failed to enable: %d\n", ret);
return ret;
}
}
@@ -1302,13 +1318,13 @@ static int _regulator_enable(struct regulator_dev *rdev)
if (ret >= 0) {
delay = ret;
} else {
- printk(KERN_WARNING
- "%s: enable_time() failed for %s: %d\n",
- __func__, rdev_get_name(rdev),
- ret);
+ rdev_warn(rdev, "enable_time() failed: %d\n",
+ ret);
delay = 0;
}
+ trace_regulator_enable(rdev_get_name(rdev));
+
/* Allow the regulator to ramp; it would be useful
* to extend this for bulk operations so that the
* regulators can ramp together. */
@@ -1316,6 +1332,8 @@ static int _regulator_enable(struct regulator_dev *rdev)
if (ret < 0)
return ret;
+ trace_regulator_enable_delay(rdev_get_name(rdev));
+
if (delay >= 1000) {
mdelay(delay / 1000);
udelay(delay % 1000);
@@ -1323,9 +1341,10 @@ static int _regulator_enable(struct regulator_dev *rdev)
udelay(delay);
}
+ trace_regulator_enable_complete(rdev_get_name(rdev));
+
} else if (ret < 0) {
- printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n",
- __func__, rdev_get_name(rdev), ret);
+ rdev_err(rdev, "is_enabled() failed: %d\n", ret);
return ret;
}
/* Fallthrough on positive return values - already enabled */
@@ -1367,8 +1386,7 @@ static int _regulator_disable(struct regulator_dev *rdev,
*supply_rdev_ptr = NULL;
if (WARN(rdev->use_count <= 0,
- "unbalanced disables for %s\n",
- rdev_get_name(rdev)))
+ "unbalanced disables for %s\n", rdev_get_name(rdev)))
return -EIO;
/* are we the last user and permitted to disable ? */
@@ -1378,13 +1396,16 @@ static int _regulator_disable(struct regulator_dev *rdev,
/* we are last user */
if (_regulator_can_change_status(rdev) &&
rdev->desc->ops->disable) {
+ trace_regulator_disable(rdev_get_name(rdev));
+
ret = rdev->desc->ops->disable(rdev);
if (ret < 0) {
- printk(KERN_ERR "%s: failed to disable %s\n",
- __func__, rdev_get_name(rdev));
+ rdev_err(rdev, "failed to disable\n");
return ret;
}
+ trace_regulator_disable_complete(rdev_get_name(rdev));
+
_notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
NULL);
}
@@ -1451,8 +1472,7 @@ static int _regulator_force_disable(struct regulator_dev *rdev,
/* ah well, who wants to live forever... */
ret = rdev->desc->ops->disable(rdev);
if (ret < 0) {
- printk(KERN_ERR "%s: failed to force disable %s\n",
- __func__, rdev_get_name(rdev));
+ rdev_err(rdev, "failed to force disable\n");
return ret;
}
/* notify other consumers that power has been forced off */
@@ -1605,6 +1625,62 @@ int regulator_is_supported_voltage(struct regulator *regulator,
return 0;
}
+static int _regulator_do_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int ret;
+ unsigned int selector;
+
+ trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV);
+
+ if (rdev->desc->ops->set_voltage) {
+ ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV,
+ &selector);
+
+ if (rdev->desc->ops->list_voltage)
+ selector = rdev->desc->ops->list_voltage(rdev,
+ selector);
+ else
+ selector = -1;
+ } else if (rdev->desc->ops->set_voltage_sel) {
+ int best_val = INT_MAX;
+ int i;
+
+ selector = 0;
+
+ /* Find the smallest voltage that falls within the specified
+ * range.
+ */
+ for (i = 0; i < rdev->desc->n_voltages; i++) {
+ ret = rdev->desc->ops->list_voltage(rdev, i);
+ if (ret < 0)
+ continue;
+
+ if (ret < best_val && ret >= min_uV && ret <= max_uV) {
+ best_val = ret;
+ selector = i;
+ }
+ }
+
+ if (best_val != INT_MAX) {
+ ret = rdev->desc->ops->set_voltage_sel(rdev, selector);
+ selector = best_val;
+ } else {
+ ret = -EINVAL;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+
+ if (ret == 0)
+ _notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE,
+ NULL);
+
+ trace_regulator_set_voltage_complete(rdev_get_name(rdev), selector);
+
+ return ret;
+}
+
/**
* regulator_set_voltage - set regulator output voltage
* @regulator: regulator source
@@ -1626,12 +1702,20 @@ int regulator_is_supported_voltage(struct regulator *regulator,
int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
{
struct regulator_dev *rdev = regulator->rdev;
- int ret;
+ int ret = 0;
mutex_lock(&rdev->mutex);
+ /* If we're setting the same range as last time the change
+ * should be a noop (some cpufreq implementations use the same
+ * voltage for multiple frequencies, for example).
+ */
+ if (regulator->min_uV == min_uV && regulator->max_uV == max_uV)
+ goto out;
+
/* sanity check */
- if (!rdev->desc->ops->set_voltage) {
+ if (!rdev->desc->ops->set_voltage &&
+ !rdev->desc->ops->set_voltage_sel) {
ret = -EINVAL;
goto out;
}
@@ -1642,18 +1726,76 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
goto out;
regulator->min_uV = min_uV;
regulator->max_uV = max_uV;
- ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV);
+
+ ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+ if (ret < 0)
+ goto out;
+
+ ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
out:
- _notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, NULL);
mutex_unlock(&rdev->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(regulator_set_voltage);
+/**
+ * regulator_sync_voltage - re-apply last regulator output voltage
+ * @regulator: regulator source
+ *
+ * Re-apply the last configured voltage. This is intended to be used
+ * where some external control source the consumer is cooperating with
+ * has caused the configured voltage to change.
+ */
+int regulator_sync_voltage(struct regulator *regulator)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ int ret, min_uV, max_uV;
+
+ mutex_lock(&rdev->mutex);
+
+ if (!rdev->desc->ops->set_voltage &&
+ !rdev->desc->ops->set_voltage_sel) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* This is only going to work if we've had a voltage configured. */
+ if (!regulator->min_uV && !regulator->max_uV) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ min_uV = regulator->min_uV;
+ max_uV = regulator->max_uV;
+
+ /* This should be a paranoia check... */
+ ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
+ if (ret < 0)
+ goto out;
+
+ ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+ if (ret < 0)
+ goto out;
+
+ ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+
+out:
+ mutex_unlock(&rdev->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_sync_voltage);
+
static int _regulator_get_voltage(struct regulator_dev *rdev)
{
- /* sanity check */
+ int sel;
+
+ if (rdev->desc->ops->get_voltage_sel) {
+ sel = rdev->desc->ops->get_voltage_sel(rdev);
+ if (sel < 0)
+ return sel;
+ return rdev->desc->ops->list_voltage(rdev, sel);
+ }
if (rdev->desc->ops->get_voltage)
return rdev->desc->ops->get_voltage(rdev);
else
@@ -1880,21 +2022,20 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
goto out;
/* get output voltage */
- output_uV = rdev->desc->ops->get_voltage(rdev);
+ output_uV = _regulator_get_voltage(rdev);
if (output_uV <= 0) {
- printk(KERN_ERR "%s: invalid output voltage found for %s\n",
- __func__, rdev_get_name(rdev));
+ rdev_err(rdev, "invalid output voltage found\n");
goto out;
}
/* get input voltage */
- if (rdev->supply && rdev->supply->desc->ops->get_voltage)
- input_uV = rdev->supply->desc->ops->get_voltage(rdev->supply);
- else
+ input_uV = 0;
+ if (rdev->supply)
+ input_uV = _regulator_get_voltage(rdev->supply);
+ if (input_uV <= 0)
input_uV = rdev->constraints->input_uV;
if (input_uV <= 0) {
- printk(KERN_ERR "%s: invalid input voltage found for %s\n",
- __func__, rdev_get_name(rdev));
+ rdev_err(rdev, "invalid input voltage found\n");
goto out;
}
@@ -1907,16 +2048,14 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
total_uA_load);
ret = regulator_check_mode(rdev, mode);
if (ret < 0) {
- printk(KERN_ERR "%s: failed to get optimum mode for %s @"
- " %d uA %d -> %d uV\n", __func__, rdev_get_name(rdev),
- total_uA_load, input_uV, output_uV);
+ rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
+ total_uA_load, input_uV, output_uV);
goto out;
}
ret = rdev->desc->ops->set_mode(rdev, mode);
if (ret < 0) {
- printk(KERN_ERR "%s: failed to set optimum mode %x for %s\n",
- __func__, mode, rdev_get_name(rdev));
+ rdev_err(rdev, "failed to set optimum mode %x\n", mode);
goto out;
}
ret = mode;
@@ -2047,7 +2186,7 @@ int regulator_bulk_enable(int num_consumers,
return 0;
err:
- printk(KERN_ERR "Failed to enable %s: %d\n", consumers[i].supply, ret);
+ pr_err("Failed to enable %s: %d\n", consumers[i].supply, ret);
for (--i; i >= 0; --i)
regulator_disable(consumers[i].consumer);
@@ -2082,8 +2221,7 @@ int regulator_bulk_disable(int num_consumers,
return 0;
err:
- printk(KERN_ERR "Failed to disable %s: %d\n", consumers[i].supply,
- ret);
+ pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
for (--i; i >= 0; --i)
regulator_enable(consumers[i].consumer);
@@ -2166,7 +2304,7 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
int status = 0;
/* some attributes need specific methods to be displayed */
- if (ops->get_voltage) {
+ if (ops->get_voltage || ops->get_voltage_sel) {
status = device_create_file(dev, &dev_attr_microvolts);
if (status < 0)
return status;
@@ -2207,7 +2345,7 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
return status;
/* constraints need specific supporting methods */
- if (ops->set_voltage) {
+ if (ops->set_voltage || ops->set_voltage_sel) {
status = device_create_file(dev, &dev_attr_min_microvolts);
if (status < 0)
return status;
@@ -2271,6 +2409,23 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
return status;
}
+static void rdev_init_debugfs(struct regulator_dev *rdev)
+{
+#ifdef CONFIG_DEBUG_FS
+ rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root);
+ if (IS_ERR(rdev->debugfs) || !rdev->debugfs) {
+ rdev_warn(rdev, "Failed to create debugfs directory\n");
+ rdev->debugfs = NULL;
+ return;
+ }
+
+ debugfs_create_u32("use_count", 0444, rdev->debugfs,
+ &rdev->use_count);
+ debugfs_create_u32("open_count", 0444, rdev->debugfs,
+ &rdev->open_count);
+#endif
+}
+
/**
* regulator_register - register regulator
* @regulator_desc: regulator to register
@@ -2282,7 +2437,7 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
* Returns 0 on success.
*/
struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
- struct device *dev, struct regulator_init_data *init_data,
+ struct device *dev, const struct regulator_init_data *init_data,
void *driver_data)
{
static atomic_t regulator_no = ATOMIC_INIT(0);
@@ -2302,6 +2457,22 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
if (!init_data)
return ERR_PTR(-EINVAL);
+ /* Only one of each should be implemented */
+ WARN_ON(regulator_desc->ops->get_voltage &&
+ regulator_desc->ops->get_voltage_sel);
+ WARN_ON(regulator_desc->ops->set_voltage &&
+ regulator_desc->ops->set_voltage_sel);
+
+ /* If we're using selectors we must implement list_voltage. */
+ if (regulator_desc->ops->get_voltage_sel &&
+ !regulator_desc->ops->list_voltage) {
+ return ERR_PTR(-EINVAL);
+ }
+ if (regulator_desc->ops->set_voltage_sel &&
+ !regulator_desc->ops->list_voltage) {
+ return ERR_PTR(-EINVAL);
+ }
+
rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL);
if (rdev == NULL)
return ERR_PTR(-ENOMEM);
@@ -2399,6 +2570,8 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
}
list_add(&rdev->list, &regulator_list);
+
+ rdev_init_debugfs(rdev);
out:
mutex_unlock(&regulator_list_mutex);
return rdev;
@@ -2431,12 +2604,16 @@ void regulator_unregister(struct regulator_dev *rdev)
return;
mutex_lock(&regulator_list_mutex);
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(rdev->debugfs);
+#endif
WARN_ON(rdev->open_count);
unset_regulator_supplies(rdev);
list_del(&rdev->list);
if (rdev->supply)
sysfs_remove_link(&rdev->dev.kobj, "supply");
device_unregister(&rdev->dev);
+ kfree(rdev->constraints);
mutex_unlock(&regulator_list_mutex);
}
EXPORT_SYMBOL_GPL(regulator_unregister);
@@ -2465,8 +2642,7 @@ int regulator_suspend_prepare(suspend_state_t state)
mutex_unlock(&rdev->mutex);
if (ret < 0) {
- printk(KERN_ERR "%s: failed to prepare %s\n",
- __func__, rdev_get_name(rdev));
+ rdev_err(rdev, "failed to prepare\n");
goto out;
}
}
@@ -2572,10 +2748,16 @@ static int __init regulator_init(void)
{
int ret;
- printk(KERN_INFO "regulator: core version %s\n", REGULATOR_VERSION);
-
ret = class_register(&regulator_class);
+#ifdef CONFIG_DEBUG_FS
+ debugfs_root = debugfs_create_dir("regulator", NULL);
+ if (IS_ERR(debugfs_root) || !debugfs_root) {
+ pr_warn("regulator: Failed to create debugfs directory\n");
+ debugfs_root = NULL;
+ }
+#endif
+
regulator_dummy_init();
return ret;
@@ -2590,7 +2772,6 @@ static int __init regulator_init_complete(void)
struct regulator_ops *ops;
struct regulation_constraints *c;
int enabled, ret;
- const char *name;
mutex_lock(&regulator_list_mutex);
@@ -2602,8 +2783,6 @@ static int __init regulator_init_complete(void)
ops = rdev->desc->ops;
c = rdev->constraints;
- name = rdev_get_name(rdev);
-
if (!ops->disable || (c && c->always_on))
continue;
@@ -2624,13 +2803,10 @@ static int __init regulator_init_complete(void)
if (has_full_constraints) {
/* We log since this may kill the system if it
* goes wrong. */
- printk(KERN_INFO "%s: disabling %s\n",
- __func__, name);
+ rdev_info(rdev, "disabling\n");
ret = ops->disable(rdev);
if (ret != 0) {
- printk(KERN_ERR
- "%s: couldn't disable %s: %d\n",
- __func__, name, ret);
+ rdev_err(rdev, "couldn't disable: %d\n", ret);
}
} else {
/* The intention is that in future we will
@@ -2638,9 +2814,7 @@ static int __init regulator_init_complete(void)
* so warn even if we aren't going to do
* anything here.
*/
- printk(KERN_WARNING
- "%s: incomplete constraints, leaving %s on\n",
- __func__, name);
+ rdev_warn(rdev, "incomplete constraints, leaving on\n");
}
unlock:
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index f8c4661a7a81..362e08221085 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -107,7 +107,7 @@ static inline int check_range(struct da903x_regulator_info *info,
/* DA9030/DA9034 common operations */
static int da903x_set_ldo_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *selector)
{
struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
struct device *da9034_dev = to_da903x_dev(rdev);
@@ -119,6 +119,7 @@ static int da903x_set_ldo_voltage(struct regulator_dev *rdev,
}
val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+ *selector = val;
val <<= info->vol_shift;
mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
@@ -187,7 +188,8 @@ static int da903x_list_voltage(struct regulator_dev *rdev, unsigned selector)
/* DA9030 specific operations */
static int da9030_set_ldo1_15_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned *selector)
{
struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
struct device *da903x_dev = to_da903x_dev(rdev);
@@ -200,6 +202,7 @@ static int da9030_set_ldo1_15_voltage(struct regulator_dev *rdev,
}
val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+ *selector = val;
val <<= info->vol_shift;
mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
val |= DA9030_LDO_UNLOCK; /* have to set UNLOCK bits */
@@ -214,7 +217,8 @@ static int da9030_set_ldo1_15_voltage(struct regulator_dev *rdev,
}
static int da9030_set_ldo14_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned *selector)
{
struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
struct device *da903x_dev = to_da903x_dev(rdev);
@@ -234,6 +238,7 @@ static int da9030_set_ldo14_voltage(struct regulator_dev *rdev,
val = (min_uV - thresh + info->step_uV - 1) / info->step_uV;
}
+ *selector = val;
val <<= info->vol_shift;
mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
@@ -263,7 +268,7 @@ static int da9030_get_ldo14_voltage(struct regulator_dev *rdev)
/* DA9034 specific operations */
static int da9034_set_dvc_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *selector)
{
struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
struct device *da9034_dev = to_da903x_dev(rdev);
@@ -276,6 +281,7 @@ static int da9034_set_dvc_voltage(struct regulator_dev *rdev,
}
val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+ *selector = val;
val <<= info->vol_shift;
mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
@@ -289,7 +295,7 @@ static int da9034_set_dvc_voltage(struct regulator_dev *rdev,
}
static int da9034_set_ldo12_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *selector)
{
struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
struct device *da9034_dev = to_da903x_dev(rdev);
@@ -302,6 +308,7 @@ static int da9034_set_ldo12_voltage(struct regulator_dev *rdev,
val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
val = (val >= 20) ? val - 12 : ((val > 7) ? 8 : val);
+ *selector = val;
val <<= info->vol_shift;
mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index b8cc6389a541..e4b3592e8176 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -58,7 +58,9 @@ out:
return data;
}
-static int isl6271a_set_voltage(struct regulator_dev *dev, int minuV, int maxuV)
+static int isl6271a_set_voltage(struct regulator_dev *dev,
+ int minuV, int maxuV,
+ unsigned *selector)
{
struct isl_pmic *pmic = rdev_get_drvdata(dev);
int vsel, err, data;
@@ -78,6 +80,8 @@ static int isl6271a_set_voltage(struct regulator_dev *dev, int minuV, int maxuV)
/* Convert the microvolts to data for the chip */
data = (vsel - ISL6271A_VOLTAGE_MIN) / ISL6271A_VOLTAGE_STEP;
+ *selector = data;
+
mutex_lock(&pmic->mtx);
err = i2c_smbus_write_byte(pmic->client, data);
@@ -169,7 +173,7 @@ static int __devinit isl6271a_probe(struct i2c_client *i2c,
init_data, pmic);
if (IS_ERR(pmic->rdev[i])) {
dev_err(&i2c->dev, "failed to register %s\n", id->name);
- err = PTR_ERR(pmic->rdev);
+ err = PTR_ERR(pmic->rdev[i]);
goto error;
}
}
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 3bb82b624e19..0f22ef12601c 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -168,7 +168,8 @@ static int lp3971_ldo_get_voltage(struct regulator_dev *dev)
}
static int lp3971_ldo_set_voltage(struct regulator_dev *dev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned int *selector)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev) - LP3971_LDO1;
@@ -187,6 +188,8 @@ static int lp3971_ldo_set_voltage(struct regulator_dev *dev,
if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol)
return -EINVAL;
+ *selector = val;
+
return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo),
LDO_VOL_CONTR_MASK << LDO_VOL_CONTR_SHIFT(ldo),
val << LDO_VOL_CONTR_SHIFT(ldo));
@@ -256,7 +259,8 @@ static int lp3971_dcdc_get_voltage(struct regulator_dev *dev)
}
static int lp3971_dcdc_set_voltage(struct regulator_dev *dev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned int *selector)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
int buck = rdev_get_id(dev) - LP3971_DCDC1;
@@ -277,6 +281,8 @@ static int lp3971_dcdc_set_voltage(struct regulator_dev *dev,
if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol)
return -EINVAL;
+ *selector = val;
+
ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck),
BUCK_TARGET_VOL_MASK, val);
if (ret)
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index e07062fd0b42..6aa1b506fb5d 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -292,7 +292,8 @@ static int lp3972_ldo_get_voltage(struct regulator_dev *dev)
}
static int lp3972_ldo_set_voltage(struct regulator_dev *dev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned int *selector)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev) - LP3972_LDO1;
@@ -313,6 +314,8 @@ static int lp3972_ldo_set_voltage(struct regulator_dev *dev,
if (val > LP3972_LDO_VOL_MAX_IDX(ldo) || vol_map[val] > max_vol)
return -EINVAL;
+ *selector = val;
+
shift = LP3972_LDO_VOL_CONTR_SHIFT(ldo);
ret = lp3972_set_bits(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo),
LP3972_LDO_VOL_MASK(ldo) << shift, val << shift);
@@ -416,7 +419,8 @@ static int lp3972_dcdc_get_voltage(struct regulator_dev *dev)
}
static int lp3972_dcdc_set_voltage(struct regulator_dev *dev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned int *selector)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
int buck = rdev_get_id(dev) - LP3972_DCDC1;
@@ -438,6 +442,8 @@ static int lp3972_dcdc_set_voltage(struct regulator_dev *dev,
vol_map[val] > max_vol)
return -EINVAL;
+ *selector = val;
+
ret = lp3972_set_bits(lp3972, LP3972_BUCK_VOL1_REG(buck),
LP3972_BUCK_VOL_MASK, val);
if (ret)
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 559cfa271a44..3f49512c5134 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -63,12 +63,12 @@ static int max1586_v3_calc_voltage(struct max1586_data *max1586,
return max1586->min_uV + (selector * range_uV / MAX1586_V3_MAX_VSEL);
}
-static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV,
+ unsigned *selector)
{
struct max1586_data *max1586 = rdev_get_drvdata(rdev);
struct i2c_client *client = max1586->client;
unsigned range_uV = max1586->max_uV - max1586->min_uV;
- unsigned selector;
u8 v3_prog;
if (min_uV > max1586->max_uV || max_uV < max1586->min_uV)
@@ -76,15 +76,15 @@ static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV)
if (min_uV < max1586->min_uV)
min_uV = max1586->min_uV;
- selector = ((min_uV - max1586->min_uV) * MAX1586_V3_MAX_VSEL +
+ *selector = ((min_uV - max1586->min_uV) * MAX1586_V3_MAX_VSEL +
range_uV - 1) / range_uV;
- if (max1586_v3_calc_voltage(max1586, selector) > max_uV)
+ if (max1586_v3_calc_voltage(max1586, *selector) > max_uV)
return -EINVAL;
dev_dbg(&client->dev, "changing voltage v3 to %dmv\n",
- max1586_v3_calc_voltage(max1586, selector) / 1000);
+ max1586_v3_calc_voltage(max1586, *selector) / 1000);
- v3_prog = I2C_V3_SELECT | (u8) selector;
+ v3_prog = I2C_V3_SELECT | (u8) *selector;
return i2c_smbus_write_byte(client, v3_prog);
}
@@ -110,10 +110,10 @@ static int max1586_v6_calc_voltage(unsigned selector)
return voltages_uv[selector];
}
-static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV,
+ unsigned int *selector)
{
struct i2c_client *client = rdev_get_drvdata(rdev);
- unsigned selector;
u8 v6_prog;
if (min_uV < MAX1586_V6_MIN_UV || min_uV > MAX1586_V6_MAX_UV)
@@ -122,21 +122,21 @@ static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV)
return -EINVAL;
if (min_uV < 1800000)
- selector = 0;
+ *selector = 0;
else if (min_uV < 2500000)
- selector = 1;
+ *selector = 1;
else if (min_uV < 3000000)
- selector = 2;
+ *selector = 2;
else if (min_uV >= 3000000)
- selector = 3;
+ *selector = 3;
- if (max1586_v6_calc_voltage(selector) > max_uV)
+ if (max1586_v6_calc_voltage(*selector) > max_uV)
return -EINVAL;
dev_dbg(&client->dev, "changing voltage v6 to %dmv\n",
- max1586_v6_calc_voltage(selector) / 1000);
+ max1586_v6_calc_voltage(*selector) / 1000);
- v6_prog = I2C_V6_SELECT | (u8) selector;
+ v6_prog = I2C_V6_SELECT | (u8) *selector;
return i2c_smbus_write_byte(client, v6_prog);
}
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 6b60a9c0366b..30eb9e54f7ec 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -155,7 +155,7 @@ static int max8649_get_voltage(struct regulator_dev *rdev)
}
static int max8649_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *selector)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
unsigned char data, mask;
@@ -168,6 +168,7 @@ static int max8649_set_voltage(struct regulator_dev *rdev,
data = (min_uV - MAX8649_DCDC_VMIN + MAX8649_DCDC_STEP - 1)
/ MAX8649_DCDC_STEP;
mask = MAX8649_VOL_MASK;
+ *selector = data & mask;
return max8649_set_bits(info->i2c, info->vol_reg, mask, data);
}
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index c570e6eb0db2..33f5d9a492ef 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -141,7 +141,8 @@ static int max8660_dcdc_get(struct regulator_dev *rdev)
return MAX8660_DCDC_MIN_UV + selector * MAX8660_DCDC_STEP;
}
-static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV,
+ unsigned int *s)
{
struct max8660 *max8660 = rdev_get_drvdata(rdev);
u8 reg, selector, bits;
@@ -154,6 +155,7 @@ static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV)
selector = (min_uV - (MAX8660_DCDC_MIN_UV - MAX8660_DCDC_STEP + 1))
/ MAX8660_DCDC_STEP;
+ *s = selector;
ret = max8660_dcdc_list(rdev, selector);
if (ret < 0 || ret > max_uV)
@@ -196,7 +198,8 @@ static int max8660_ldo5_get(struct regulator_dev *rdev)
return MAX8660_LDO5_MIN_UV + selector * MAX8660_LDO5_STEP;
}
-static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV,
+ unsigned int *s)
{
struct max8660 *max8660 = rdev_get_drvdata(rdev);
u8 selector;
@@ -213,6 +216,8 @@ static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV)
if (ret < 0 || ret > max_uV)
return -EINVAL;
+ *s = selector;
+
ret = max8660_write(max8660, MAX8660_MDTV2, 0, selector);
if (ret)
return ret;
@@ -270,7 +275,8 @@ static int max8660_ldo67_get(struct regulator_dev *rdev)
return MAX8660_LDO67_MIN_UV + selector * MAX8660_LDO67_STEP;
}
-static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned int *s)
{
struct max8660 *max8660 = rdev_get_drvdata(rdev);
u8 selector;
@@ -288,6 +294,8 @@ static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV, int max_uV)
if (ret < 0 || ret > max_uV)
return -EINVAL;
+ *s = selector;
+
if (rdev_get_id(rdev) == MAX8660_V6)
return max8660_write(max8660, MAX8660_L12VCR, 0xf0, selector);
else
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index 552cad85ae5a..8ae147549c6a 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -55,7 +55,7 @@ static int max8925_list_voltage(struct regulator_dev *rdev, unsigned index)
}
static int max8925_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned int *selector)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
unsigned char data, mask;
@@ -66,6 +66,7 @@ static int max8925_set_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
data = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+ *selector = data;
data <<= info->vol_shift;
mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 0d5dda4fd911..a8f4ecfb0843 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -133,7 +133,7 @@ static int max8952_get_voltage(struct regulator_dev *rdev)
}
static int max8952_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *selector)
{
struct max8952_data *max8952 = rdev_get_drvdata(rdev);
s8 vid = -1, i;
@@ -156,6 +156,7 @@ static int max8952_set_voltage(struct regulator_dev *rdev,
if (vid >= 0 && vid < MAX8952_NUM_DVS_MODE) {
max8952->vid0 = (vid % 2 == 1);
max8952->vid1 = (((vid >> 1) % 2) == 1);
+ *selector = vid;
gpio_set_value(max8952->pdata->gpio_vid0, max8952->vid0);
gpio_set_value(max8952->pdata->gpio_vid1, max8952->vid1);
} else
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 5c20756db607..0ec49ca527a8 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -304,7 +304,7 @@ static int max8998_get_voltage(struct regulator_dev *rdev)
}
static int max8998_set_voltage_ldo(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *selector)
{
struct max8998_data *max8998 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8998->iodev->i2c;
@@ -331,6 +331,8 @@ static int max8998_set_voltage_ldo(struct regulator_dev *rdev,
if (desc->min + desc->step*i > max_vol)
return -EINVAL;
+ *selector = i;
+
ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
if (ret)
return ret;
@@ -352,7 +354,7 @@ static inline void buck2_gpio_set(int gpio, int v)
}
static int max8998_set_voltage_buck(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *selector)
{
struct max8998_data *max8998 = rdev_get_drvdata(rdev);
struct max8998_platform_data *pdata =
@@ -384,6 +386,8 @@ static int max8998_set_voltage_buck(struct regulator_dev *rdev,
if (desc->min + desc->step*i > max_vol)
return -EINVAL;
+ *selector = i;
+
ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
if (ret)
return ret;
@@ -420,6 +424,9 @@ static int max8998_set_voltage_buck(struct regulator_dev *rdev,
}
}
+ if (pdata->buck_voltage_lock)
+ return -EINVAL;
+
/* no predefine regulator found */
max8998->buck1_idx = (buck1_last_val % 2) + 2;
dev_dbg(max8998->dev, "max8998->buck1_idx:%d\n",
@@ -447,18 +454,26 @@ buck1_exit:
"BUCK2, i:%d buck2_vol1:%d, buck2_vol2:%d\n"
, i, max8998->buck2_vol[0], max8998->buck2_vol[1]);
if (gpio_is_valid(pdata->buck2_set3)) {
- if (max8998->buck2_vol[0] == i) {
- max8998->buck1_idx = 0;
- buck2_gpio_set(pdata->buck2_set3, 0);
- } else {
- max8998->buck1_idx = 1;
- ret = max8998_get_voltage_register(rdev, &reg,
- &shift,
- &mask);
- ret = max8998_write_reg(i2c, reg, i);
- max8998->buck2_vol[1] = i;
- buck2_gpio_set(pdata->buck2_set3, 1);
+
+ /* check if requested voltage */
+ /* value is already defined */
+ for (j = 0; j < ARRAY_SIZE(max8998->buck2_vol); j++) {
+ if (max8998->buck2_vol[j] == i) {
+ max8998->buck2_idx = j;
+ buck2_gpio_set(pdata->buck2_set3, j);
+ goto buck2_exit;
+ }
}
+
+ if (pdata->buck_voltage_lock)
+ return -EINVAL;
+
+ max8998_get_voltage_register(rdev,
+ &reg, &shift, &mask);
+ ret = max8998_write_reg(i2c, reg, i);
+ max8998->buck2_vol[max8998->buck2_idx] = i;
+ buck2_gpio_set(pdata->buck2_set3, max8998->buck2_idx);
+buck2_exit:
dev_dbg(max8998->dev, "%s: SET3:%d\n", i2c->name,
gpio_get_value(pdata->buck2_set3));
} else {
@@ -703,6 +718,9 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, max8998);
i2c = max8998->iodev->i2c;
+ max8998->buck1_idx = pdata->buck1_default_idx;
+ max8998->buck2_idx = pdata->buck2_default_idx;
+
/* NOTE: */
/* For unused GPIO NOT marked as -1 (thereof equal to 0) WARN_ON */
/* will be displayed */
@@ -735,23 +753,46 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
i = 0;
while (buck12_voltage_map_desc.min +
buck12_voltage_map_desc.step*i
- != (pdata->buck1_max_voltage1 / 1000))
+ < (pdata->buck1_voltage1 / 1000))
i++;
- printk(KERN_ERR "i:%d, buck1_idx:%d\n", i, max8998->buck1_idx);
max8998->buck1_vol[0] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
+ if (ret)
+ return ret;
/* Set predefined value for BUCK1 register 2 */
i = 0;
while (buck12_voltage_map_desc.min +
buck12_voltage_map_desc.step*i
- != (pdata->buck1_max_voltage2 / 1000))
+ < (pdata->buck1_voltage2 / 1000))
i++;
max8998->buck1_vol[1] = i;
- printk(KERN_ERR "i:%d, buck1_idx:%d\n", i, max8998->buck1_idx);
- ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i)
- + ret;
+ ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i);
+ if (ret)
+ return ret;
+
+ /* Set predefined value for BUCK1 register 3 */
+ i = 0;
+ while (buck12_voltage_map_desc.min +
+ buck12_voltage_map_desc.step*i
+ < (pdata->buck1_voltage3 / 1000))
+ i++;
+
+ max8998->buck1_vol[2] = i;
+ ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE3, i);
+ if (ret)
+ return ret;
+
+ /* Set predefined value for BUCK1 register 4 */
+ i = 0;
+ while (buck12_voltage_map_desc.min +
+ buck12_voltage_map_desc.step*i
+ < (pdata->buck1_voltage4 / 1000))
+ i++;
+
+ max8998->buck1_vol[3] = i;
+ ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE4, i);
if (ret)
return ret;
@@ -768,18 +809,28 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
gpio_direction_output(pdata->buck2_set3,
max8998->buck2_idx & 0x1);
- /* BUCK2 - set preset default voltage value to buck2_vol[0] */
+ /* BUCK2 register 1 */
i = 0;
while (buck12_voltage_map_desc.min +
buck12_voltage_map_desc.step*i
- != (pdata->buck2_max_voltage / 1000))
+ < (pdata->buck2_voltage1 / 1000))
i++;
- printk(KERN_ERR "i:%d, buck2_idx:%d\n", i, max8998->buck2_idx);
max8998->buck2_vol[0] = i;
ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
if (ret)
return ret;
+ /* BUCK2 register 2 */
+ i = 0;
+ while (buck12_voltage_map_desc.min +
+ buck12_voltage_map_desc.step*i
+ < (pdata->buck2_voltage2 / 1000))
+ i++;
+ printk(KERN_ERR "i2:%d, buck2_idx:%d\n", i, max8998->buck2_idx);
+ max8998->buck2_vol[1] = i;
+ ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i);
+ if (ret)
+ return ret;
}
for (i = 0; i < pdata->num_regulators; i++) {
@@ -831,6 +882,12 @@ static int __devexit max8998_pmic_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id max8998_pmic_id[] = {
+ { "max8998-pmic", TYPE_MAX8998 },
+ { "lp3974-pmic", TYPE_LP3974 },
+ { }
+};
+
static struct platform_driver max8998_pmic_driver = {
.driver = {
.name = "max8998-pmic",
@@ -838,6 +895,7 @@ static struct platform_driver max8998_pmic_driver = {
},
.probe = max8998_pmic_probe,
.remove = __devexit_p(max8998_pmic_remove),
+ .id_table = max8998_pmic_id,
};
static int __init max8998_pmic_init(void)
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index ecd99f59dba8..3e5d0c3b4e53 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -1,6 +1,7 @@
/*
* Regulator Driver for Freescale MC13783 PMIC
*
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
* Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
* Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
*
@@ -17,6 +18,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
+#include "mc13xxx.h"
#define MC13783_REG_SWITCHERS5 29
#define MC13783_REG_SWITCHERS5_SW3EN (1 << 20)
@@ -89,154 +91,106 @@
#define MC13783_REG_POWERMISC_PWGTSPI_M (3 << 15)
-struct mc13783_regulator {
- struct regulator_desc desc;
- int reg;
- int enable_bit;
- int vsel_reg;
- int vsel_shift;
- int vsel_mask;
- int const *voltages;
-};
-
/* Voltage Values */
-static const int const mc13783_sw3_val[] = {
+static const int mc13783_sw3_val[] = {
5000000, 5000000, 5000000, 5500000,
};
-static const int const mc13783_vaudio_val[] = {
+static const int mc13783_vaudio_val[] = {
2775000,
};
-static const int const mc13783_viohi_val[] = {
+static const int mc13783_viohi_val[] = {
2775000,
};
-static const int const mc13783_violo_val[] = {
+static const int mc13783_violo_val[] = {
1200000, 1300000, 1500000, 1800000,
};
-static const int const mc13783_vdig_val[] = {
+static const int mc13783_vdig_val[] = {
1200000, 1300000, 1500000, 1800000,
};
-static const int const mc13783_vgen_val[] = {
+static const int mc13783_vgen_val[] = {
1200000, 1300000, 1500000, 1800000,
1100000, 2000000, 2775000, 2400000,
};
-static const int const mc13783_vrfdig_val[] = {
+static const int mc13783_vrfdig_val[] = {
1200000, 1500000, 1800000, 1875000,
};
-static const int const mc13783_vrfref_val[] = {
+static const int mc13783_vrfref_val[] = {
2475000, 2600000, 2700000, 2775000,
};
-static const int const mc13783_vrfcp_val[] = {
+static const int mc13783_vrfcp_val[] = {
2700000, 2775000,
};
-static const int const mc13783_vsim_val[] = {
+static const int mc13783_vsim_val[] = {
1800000, 2900000, 3000000,
};
-static const int const mc13783_vesim_val[] = {
+static const int mc13783_vesim_val[] = {
1800000, 2900000,
};
-static const int const mc13783_vcam_val[] = {
+static const int mc13783_vcam_val[] = {
1500000, 1800000, 2500000, 2550000,
2600000, 2750000, 2800000, 3000000,
};
-static const int const mc13783_vrfbg_val[] = {
+static const int mc13783_vrfbg_val[] = {
1250000,
};
-static const int const mc13783_vvib_val[] = {
+static const int mc13783_vvib_val[] = {
1300000, 1800000, 2000000, 3000000,
};
-static const int const mc13783_vmmc_val[] = {
+static const int mc13783_vmmc_val[] = {
1600000, 1800000, 2000000, 2600000,
2700000, 2800000, 2900000, 3000000,
};
-static const int const mc13783_vrf_val[] = {
+static const int mc13783_vrf_val[] = {
1500000, 1875000, 2700000, 2775000,
};
-static const int const mc13783_gpo_val[] = {
+static const int mc13783_gpo_val[] = {
3100000,
};
-static const int const mc13783_pwgtdrv_val[] = {
+static const int mc13783_pwgtdrv_val[] = {
5500000,
};
-static struct regulator_ops mc13783_regulator_ops;
-static struct regulator_ops mc13783_fixed_regulator_ops;
static struct regulator_ops mc13783_gpo_regulator_ops;
-#define MC13783_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages) \
- [MC13783_ ## prefix ## _ ## _name] = { \
- .desc = { \
- .name = #prefix "_" #_name, \
- .n_voltages = ARRAY_SIZE(_voltages), \
- .ops = &mc13783_regulator_ops, \
- .type = REGULATOR_VOLTAGE, \
- .id = MC13783_ ## prefix ## _ ## _name, \
- .owner = THIS_MODULE, \
- }, \
- .reg = MC13783_REG_ ## _reg, \
- .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
- .vsel_reg = MC13783_REG_ ## _vsel_reg, \
- .vsel_shift = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL,\
- .vsel_mask = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL_M,\
- .voltages = _voltages, \
- }
+#define MC13783_DEFINE(prefix, name, reg, vsel_reg, voltages) \
+ MC13xxx_DEFINE(MC13783_REG_, name, reg, vsel_reg, voltages, \
+ mc13xxx_regulator_ops)
-#define MC13783_FIXED_DEFINE(prefix, _name, _reg, _voltages) \
- [MC13783_ ## prefix ## _ ## _name] = { \
- .desc = { \
- .name = #prefix "_" #_name, \
- .n_voltages = ARRAY_SIZE(_voltages), \
- .ops = &mc13783_fixed_regulator_ops, \
- .type = REGULATOR_VOLTAGE, \
- .id = MC13783_ ## prefix ## _ ## _name, \
- .owner = THIS_MODULE, \
- }, \
- .reg = MC13783_REG_ ## _reg, \
- .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
- .voltages = _voltages, \
- }
+#define MC13783_FIXED_DEFINE(prefix, name, reg, voltages) \
+ MC13xxx_FIXED_DEFINE(MC13783_REG_, name, reg, voltages, \
+ mc13xxx_fixed_regulator_ops)
-#define MC13783_GPO_DEFINE(prefix, _name, _reg, _voltages) \
- [MC13783_ ## prefix ## _ ## _name] = { \
- .desc = { \
- .name = #prefix "_" #_name, \
- .n_voltages = ARRAY_SIZE(_voltages), \
- .ops = &mc13783_gpo_regulator_ops, \
- .type = REGULATOR_VOLTAGE, \
- .id = MC13783_ ## prefix ## _ ## _name, \
- .owner = THIS_MODULE, \
- }, \
- .reg = MC13783_REG_ ## _reg, \
- .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
- .voltages = _voltages, \
- }
+#define MC13783_GPO_DEFINE(prefix, name, reg, voltages) \
+ MC13xxx_GPO_DEFINE(MC13783_REG_, name, reg, voltages, \
+ mc13783_gpo_regulator_ops)
#define MC13783_DEFINE_SW(_name, _reg, _vsel_reg, _voltages) \
- MC13783_DEFINE(SW, _name, _reg, _vsel_reg, _voltages)
+ MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages)
#define MC13783_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages) \
- MC13783_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages)
+ MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages)
-static struct mc13783_regulator mc13783_regulators[] = {
+static struct mc13xxx_regulator mc13783_regulators[] = {
MC13783_DEFINE_SW(SW3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val),
- MC13783_FIXED_DEFINE(REGU, VAUDIO, REGULATORMODE0, mc13783_vaudio_val),
- MC13783_FIXED_DEFINE(REGU, VIOHI, REGULATORMODE0, mc13783_viohi_val),
+ MC13783_FIXED_DEFINE(REG, VAUDIO, REGULATORMODE0, mc13783_vaudio_val),
+ MC13783_FIXED_DEFINE(REG, VIOHI, REGULATORMODE0, mc13783_viohi_val),
MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0, \
mc13783_violo_val),
MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \
@@ -255,7 +209,7 @@ static struct mc13783_regulator mc13783_regulators[] = {
mc13783_vesim_val),
MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \
mc13783_vcam_val),
- MC13783_FIXED_DEFINE(REGU, VRFBG, REGULATORMODE1, mc13783_vrfbg_val),
+ MC13783_FIXED_DEFINE(REG, VRFBG, REGULATORMODE1, mc13783_vrfbg_val),
MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1, \
mc13783_vvib_val),
MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1, \
@@ -266,215 +220,24 @@ static struct mc13783_regulator mc13783_regulators[] = {
mc13783_vmmc_val),
MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1, \
mc13783_vmmc_val),
- MC13783_GPO_DEFINE(REGU, GPO1, POWERMISC, mc13783_gpo_val),
- MC13783_GPO_DEFINE(REGU, GPO2, POWERMISC, mc13783_gpo_val),
- MC13783_GPO_DEFINE(REGU, GPO3, POWERMISC, mc13783_gpo_val),
- MC13783_GPO_DEFINE(REGU, GPO4, POWERMISC, mc13783_gpo_val),
- MC13783_GPO_DEFINE(REGU, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val),
- MC13783_GPO_DEFINE(REGU, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val),
-};
-
-struct mc13783_regulator_priv {
- struct mc13783 *mc13783;
- u32 powermisc_pwgt_state;
- struct regulator_dev *regulators[];
-};
-
-static int mc13783_regulator_enable(struct regulator_dev *rdev)
-{
- struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
- int ret;
-
- dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
- mc13783_lock(priv->mc13783);
- ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg,
- mc13783_regulators[id].enable_bit,
- mc13783_regulators[id].enable_bit);
- mc13783_unlock(priv->mc13783);
-
- return ret;
-}
-
-static int mc13783_regulator_disable(struct regulator_dev *rdev)
-{
- struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
- int ret;
-
- dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
- mc13783_lock(priv->mc13783);
- ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg,
- mc13783_regulators[id].enable_bit, 0);
- mc13783_unlock(priv->mc13783);
-
- return ret;
-}
-
-static int mc13783_regulator_is_enabled(struct regulator_dev *rdev)
-{
- struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
- int ret, id = rdev_get_id(rdev);
- unsigned int val;
-
- mc13783_lock(priv->mc13783);
- ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
- mc13783_unlock(priv->mc13783);
-
- if (ret)
- return ret;
-
- return (val & mc13783_regulators[id].enable_bit) != 0;
-}
-
-static int mc13783_regulator_list_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- int id = rdev_get_id(rdev);
-
- if (selector >= mc13783_regulators[id].desc.n_voltages)
- return -EINVAL;
-
- return mc13783_regulators[id].voltages[selector];
-}
-
-static int mc13783_get_best_voltage_index(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- int reg_id = rdev_get_id(rdev);
- int i;
- int bestmatch;
- int bestindex;
-
- /*
- * Locate the minimum voltage fitting the criteria on
- * this regulator. The switchable voltages are not
- * in strict falling order so we need to check them
- * all for the best match.
- */
- bestmatch = INT_MAX;
- bestindex = -1;
- for (i = 0; i < mc13783_regulators[reg_id].desc.n_voltages; i++) {
- if (mc13783_regulators[reg_id].voltages[i] >= min_uV &&
- mc13783_regulators[reg_id].voltages[i] < bestmatch) {
- bestmatch = mc13783_regulators[reg_id].voltages[i];
- bestindex = i;
- }
- }
-
- if (bestindex < 0 || bestmatch > max_uV) {
- dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n",
- min_uV, max_uV);
- return -EINVAL;
- }
- return bestindex;
-}
-
-static int mc13783_regulator_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
- int value, id = rdev_get_id(rdev);
- int ret;
-
- dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
- __func__, id, min_uV, max_uV);
-
- /* Find the best index */
- value = mc13783_get_best_voltage_index(rdev, min_uV, max_uV);
- dev_dbg(rdev_get_dev(rdev), "%s best value: %d \n", __func__, value);
- if (value < 0)
- return value;
-
- mc13783_lock(priv->mc13783);
- ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].vsel_reg,
- mc13783_regulators[id].vsel_mask,
- value << mc13783_regulators[id].vsel_shift);
- mc13783_unlock(priv->mc13783);
-
- return ret;
-}
-
-static int mc13783_regulator_get_voltage(struct regulator_dev *rdev)
-{
- struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
- int ret, id = rdev_get_id(rdev);
- unsigned int val;
-
- dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
- mc13783_lock(priv->mc13783);
- ret = mc13783_reg_read(priv->mc13783,
- mc13783_regulators[id].vsel_reg, &val);
- mc13783_unlock(priv->mc13783);
-
- if (ret)
- return ret;
-
- val = (val & mc13783_regulators[id].vsel_mask)
- >> mc13783_regulators[id].vsel_shift;
-
- dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
-
- BUG_ON(val < 0 || val > mc13783_regulators[id].desc.n_voltages);
-
- return mc13783_regulators[id].voltages[val];
-}
-
-static struct regulator_ops mc13783_regulator_ops = {
- .enable = mc13783_regulator_enable,
- .disable = mc13783_regulator_disable,
- .is_enabled = mc13783_regulator_is_enabled,
- .list_voltage = mc13783_regulator_list_voltage,
- .set_voltage = mc13783_regulator_set_voltage,
- .get_voltage = mc13783_regulator_get_voltage,
-};
-
-static int mc13783_fixed_regulator_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- int id = rdev_get_id(rdev);
-
- dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
- __func__, id, min_uV, max_uV);
-
- if (min_uV >= mc13783_regulators[id].voltages[0] &&
- max_uV <= mc13783_regulators[id].voltages[0])
- return 0;
- else
- return -EINVAL;
-}
-
-static int mc13783_fixed_regulator_get_voltage(struct regulator_dev *rdev)
-{
- int id = rdev_get_id(rdev);
-
- dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
- return mc13783_regulators[id].voltages[0];
-}
-
-static struct regulator_ops mc13783_fixed_regulator_ops = {
- .enable = mc13783_regulator_enable,
- .disable = mc13783_regulator_disable,
- .is_enabled = mc13783_regulator_is_enabled,
- .list_voltage = mc13783_regulator_list_voltage,
- .set_voltage = mc13783_fixed_regulator_set_voltage,
- .get_voltage = mc13783_fixed_regulator_get_voltage,
+ MC13783_GPO_DEFINE(REG, GPO1, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REG, GPO2, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REG, GPO3, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REG, GPO4, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REG, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val),
+ MC13783_GPO_DEFINE(REG, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val),
};
-static int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask,
- u32 val)
+static int mc13783_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask,
+ u32 val)
{
- struct mc13783 *mc13783 = priv->mc13783;
+ struct mc13xxx *mc13783 = priv->mc13xxx;
int ret;
u32 valread;
BUG_ON(val & ~mask);
- ret = mc13783_reg_read(mc13783, MC13783_REG_POWERMISC, &valread);
+ ret = mc13xxx_reg_read(mc13783, MC13783_REG_POWERMISC, &valread);
if (ret)
return ret;
@@ -489,34 +252,36 @@ static int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask,
valread = (valread & ~MC13783_REG_POWERMISC_PWGTSPI_M) |
priv->powermisc_pwgt_state;
- return mc13783_reg_write(mc13783, MC13783_REG_POWERMISC, valread);
+ return mc13xxx_reg_write(mc13783, MC13783_REG_POWERMISC, valread);
}
static int mc13783_gpo_regulator_enable(struct regulator_dev *rdev)
{
- struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int id = rdev_get_id(rdev);
int ret;
- u32 en_val = mc13783_regulators[id].enable_bit;
+ u32 en_val = mc13xxx_regulators[id].enable_bit;
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
/* Power Gate enable value is 0 */
- if (id == MC13783_REGU_PWGT1SPI ||
- id == MC13783_REGU_PWGT2SPI)
+ if (id == MC13783_REG_PWGT1SPI ||
+ id == MC13783_REG_PWGT2SPI)
en_val = 0;
- mc13783_lock(priv->mc13783);
- ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit,
+ mc13xxx_lock(priv->mc13xxx);
+ ret = mc13783_powermisc_rmw(priv, mc13xxx_regulators[id].enable_bit,
en_val);
- mc13783_unlock(priv->mc13783);
+ mc13xxx_unlock(priv->mc13xxx);
return ret;
}
static int mc13783_gpo_regulator_disable(struct regulator_dev *rdev)
{
- struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int id = rdev_get_id(rdev);
int ret;
u32 dis_val = 0;
@@ -524,27 +289,28 @@ static int mc13783_gpo_regulator_disable(struct regulator_dev *rdev)
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
/* Power Gate disable value is 1 */
- if (id == MC13783_REGU_PWGT1SPI ||
- id == MC13783_REGU_PWGT2SPI)
- dis_val = mc13783_regulators[id].enable_bit;
+ if (id == MC13783_REG_PWGT1SPI ||
+ id == MC13783_REG_PWGT2SPI)
+ dis_val = mc13xxx_regulators[id].enable_bit;
- mc13783_lock(priv->mc13783);
- ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit,
+ mc13xxx_lock(priv->mc13xxx);
+ ret = mc13783_powermisc_rmw(priv, mc13xxx_regulators[id].enable_bit,
dis_val);
- mc13783_unlock(priv->mc13783);
+ mc13xxx_unlock(priv->mc13xxx);
return ret;
}
static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev)
{
- struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int ret, id = rdev_get_id(rdev);
unsigned int val;
- mc13783_lock(priv->mc13783);
- ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
- mc13783_unlock(priv->mc13783);
+ mc13xxx_lock(priv->mc13xxx);
+ ret = mc13xxx_reg_read(priv->mc13xxx, mc13xxx_regulators[id].reg, &val);
+ mc13xxx_unlock(priv->mc13xxx);
if (ret)
return ret;
@@ -554,22 +320,22 @@ static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev)
val = (val & ~MC13783_REG_POWERMISC_PWGTSPI_M) |
(priv->powermisc_pwgt_state ^ MC13783_REG_POWERMISC_PWGTSPI_M);
- return (val & mc13783_regulators[id].enable_bit) != 0;
+ return (val & mc13xxx_regulators[id].enable_bit) != 0;
}
static struct regulator_ops mc13783_gpo_regulator_ops = {
.enable = mc13783_gpo_regulator_enable,
.disable = mc13783_gpo_regulator_disable,
.is_enabled = mc13783_gpo_regulator_is_enabled,
- .list_voltage = mc13783_regulator_list_voltage,
- .set_voltage = mc13783_fixed_regulator_set_voltage,
- .get_voltage = mc13783_fixed_regulator_get_voltage,
+ .list_voltage = mc13xxx_regulator_list_voltage,
+ .set_voltage = mc13xxx_fixed_regulator_set_voltage,
+ .get_voltage = mc13xxx_fixed_regulator_get_voltage,
};
static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
{
- struct mc13783_regulator_priv *priv;
- struct mc13783 *mc13783 = dev_get_drvdata(pdev->dev.parent);
+ struct mc13xxx_regulator_priv *priv;
+ struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
struct mc13783_regulator_platform_data *pdata =
dev_get_platdata(&pdev->dev);
struct mc13783_regulator_init_data *init_data;
@@ -583,7 +349,8 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->mc13783 = mc13783;
+ priv->mc13xxx_regulators = mc13783_regulators;
+ priv->mc13xxx = mc13783;
for (i = 0; i < pdata->num_regulators; i++) {
init_data = &pdata->regulators[i];
@@ -613,7 +380,7 @@ err:
static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
{
- struct mc13783_regulator_priv *priv = platform_get_drvdata(pdev);
+ struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
struct mc13783_regulator_platform_data *pdata =
dev_get_platdata(&pdev->dev);
int i;
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
new file mode 100644
index 000000000000..1b8f7398a4a8
--- /dev/null
+++ b/drivers/regulator/mc13892-regulator.c
@@ -0,0 +1,635 @@
+/*
+ * Regulator Driver for Freescale MC13892 PMIC
+ *
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
+ *
+ * Based on draft driver from Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/mc13892.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/driver.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include "mc13xxx.h"
+
+#define MC13892_REVISION 7
+
+#define MC13892_POWERCTL0 13
+#define MC13892_POWERCTL0_USEROFFSPI 3
+#define MC13892_POWERCTL0_VCOINCELLVSEL 20
+#define MC13892_POWERCTL0_VCOINCELLVSEL_M (7<<20)
+#define MC13892_POWERCTL0_VCOINCELLEN (1<<23)
+
+#define MC13892_SWITCHERS0_SWxHI (1<<23)
+
+#define MC13892_SWITCHERS0 24
+#define MC13892_SWITCHERS0_SW1VSEL 0
+#define MC13892_SWITCHERS0_SW1VSEL_M (0x1f<<0)
+#define MC13892_SWITCHERS0_SW1HI (1<<23)
+#define MC13892_SWITCHERS0_SW1EN 0
+
+#define MC13892_SWITCHERS1 25
+#define MC13892_SWITCHERS1_SW2VSEL 0
+#define MC13892_SWITCHERS1_SW2VSEL_M (0x1f<<0)
+#define MC13892_SWITCHERS1_SW2HI (1<<23)
+#define MC13892_SWITCHERS1_SW2EN 0
+
+#define MC13892_SWITCHERS2 26
+#define MC13892_SWITCHERS2_SW3VSEL 0
+#define MC13892_SWITCHERS2_SW3VSEL_M (0x1f<<0)
+#define MC13892_SWITCHERS2_SW3HI (1<<23)
+#define MC13892_SWITCHERS2_SW3EN 0
+
+#define MC13892_SWITCHERS3 27
+#define MC13892_SWITCHERS3_SW4VSEL 0
+#define MC13892_SWITCHERS3_SW4VSEL_M (0x1f<<0)
+#define MC13892_SWITCHERS3_SW4HI (1<<23)
+#define MC13892_SWITCHERS3_SW4EN 0
+
+#define MC13892_SWITCHERS4 28
+#define MC13892_SWITCHERS4_SW1MODE 0
+#define MC13892_SWITCHERS4_SW1MODE_AUTO (8<<0)
+#define MC13892_SWITCHERS4_SW1MODE_M (0xf<<0)
+#define MC13892_SWITCHERS4_SW2MODE 10
+#define MC13892_SWITCHERS4_SW2MODE_AUTO (8<<10)
+#define MC13892_SWITCHERS4_SW2MODE_M (0xf<<10)
+
+#define MC13892_SWITCHERS5 29
+#define MC13892_SWITCHERS5_SW3MODE 0
+#define MC13892_SWITCHERS5_SW3MODE_AUTO (8<<0)
+#define MC13892_SWITCHERS5_SW3MODE_M (0xf<<0)
+#define MC13892_SWITCHERS5_SW4MODE 8
+#define MC13892_SWITCHERS5_SW4MODE_AUTO (8<<8)
+#define MC13892_SWITCHERS5_SW4MODE_M (0xf<<8)
+#define MC13892_SWITCHERS5_SWBSTEN (1<<20)
+
+#define MC13892_REGULATORSETTING0 30
+#define MC13892_REGULATORSETTING0_VGEN1VSEL 0
+#define MC13892_REGULATORSETTING0_VDIGVSEL 4
+#define MC13892_REGULATORSETTING0_VGEN2VSEL 6
+#define MC13892_REGULATORSETTING0_VPLLVSEL 9
+#define MC13892_REGULATORSETTING0_VUSB2VSEL 11
+#define MC13892_REGULATORSETTING0_VGEN3VSEL 14
+#define MC13892_REGULATORSETTING0_VCAMVSEL 16
+
+#define MC13892_REGULATORSETTING0_VGEN1VSEL_M (3<<0)
+#define MC13892_REGULATORSETTING0_VDIGVSEL_M (3<<4)
+#define MC13892_REGULATORSETTING0_VGEN2VSEL_M (7<<6)
+#define MC13892_REGULATORSETTING0_VPLLVSEL_M (3<<9)
+#define MC13892_REGULATORSETTING0_VUSB2VSEL_M (3<<11)
+#define MC13892_REGULATORSETTING0_VGEN3VSEL_M (1<<14)
+#define MC13892_REGULATORSETTING0_VCAMVSEL_M (3<<16)
+
+#define MC13892_REGULATORSETTING1 31
+#define MC13892_REGULATORSETTING1_VVIDEOVSEL 2
+#define MC13892_REGULATORSETTING1_VAUDIOVSEL 4
+#define MC13892_REGULATORSETTING1_VSDVSEL 6
+
+#define MC13892_REGULATORSETTING1_VVIDEOVSEL_M (3<<2)
+#define MC13892_REGULATORSETTING1_VAUDIOVSEL_M (3<<4)
+#define MC13892_REGULATORSETTING1_VSDVSEL_M (7<<6)
+
+#define MC13892_REGULATORMODE0 32
+#define MC13892_REGULATORMODE0_VGEN1EN (1<<0)
+#define MC13892_REGULATORMODE0_VGEN1STDBY (1<<1)
+#define MC13892_REGULATORMODE0_VGEN1MODE (1<<2)
+#define MC13892_REGULATORMODE0_VIOHIEN (1<<3)
+#define MC13892_REGULATORMODE0_VIOHISTDBY (1<<4)
+#define MC13892_REGULATORMODE0_VIOHIMODE (1<<5)
+#define MC13892_REGULATORMODE0_VDIGEN (1<<9)
+#define MC13892_REGULATORMODE0_VDIGSTDBY (1<<10)
+#define MC13892_REGULATORMODE0_VDIGMODE (1<<11)
+#define MC13892_REGULATORMODE0_VGEN2EN (1<<12)
+#define MC13892_REGULATORMODE0_VGEN2STDBY (1<<13)
+#define MC13892_REGULATORMODE0_VGEN2MODE (1<<14)
+#define MC13892_REGULATORMODE0_VPLLEN (1<<15)
+#define MC13892_REGULATORMODE0_VPLLSTDBY (1<<16)
+#define MC13892_REGULATORMODE0_VPLLMODE (1<<17)
+#define MC13892_REGULATORMODE0_VUSB2EN (1<<18)
+#define MC13892_REGULATORMODE0_VUSB2STDBY (1<<19)
+#define MC13892_REGULATORMODE0_VUSB2MODE (1<<20)
+
+#define MC13892_REGULATORMODE1 33
+#define MC13892_REGULATORMODE1_VGEN3EN (1<<0)
+#define MC13892_REGULATORMODE1_VGEN3STDBY (1<<1)
+#define MC13892_REGULATORMODE1_VGEN3MODE (1<<2)
+#define MC13892_REGULATORMODE1_VCAMEN (1<<6)
+#define MC13892_REGULATORMODE1_VCAMSTDBY (1<<7)
+#define MC13892_REGULATORMODE1_VCAMMODE (1<<8)
+#define MC13892_REGULATORMODE1_VCAMCONFIGEN (1<<9)
+#define MC13892_REGULATORMODE1_VVIDEOEN (1<<12)
+#define MC13892_REGULATORMODE1_VVIDEOSTDBY (1<<13)
+#define MC13892_REGULATORMODE1_VVIDEOMODE (1<<14)
+#define MC13892_REGULATORMODE1_VAUDIOEN (1<<15)
+#define MC13892_REGULATORMODE1_VAUDIOSTDBY (1<<16)
+#define MC13892_REGULATORMODE1_VAUDIOMODE (1<<17)
+#define MC13892_REGULATORMODE1_VSDEN (1<<18)
+#define MC13892_REGULATORMODE1_VSDSTDBY (1<<19)
+#define MC13892_REGULATORMODE1_VSDMODE (1<<20)
+
+#define MC13892_POWERMISC 34
+#define MC13892_POWERMISC_GPO1EN (1<<6)
+#define MC13892_POWERMISC_GPO2EN (1<<8)
+#define MC13892_POWERMISC_GPO3EN (1<<10)
+#define MC13892_POWERMISC_GPO4EN (1<<12)
+#define MC13892_POWERMISC_PWGT1SPIEN (1<<15)
+#define MC13892_POWERMISC_PWGT2SPIEN (1<<16)
+#define MC13892_POWERMISC_GPO4ADINEN (1<<21)
+
+#define MC13892_POWERMISC_PWGTSPI_M (3 << 15)
+
+#define MC13892_USB1 50
+#define MC13892_USB1_VUSBEN (1<<3)
+
+static const int mc13892_vcoincell[] = {
+ 2500000, 2700000, 2800000, 2900000, 3000000, 3100000,
+ 3200000, 3300000,
+};
+
+static const int mc13892_sw1[] = {
+ 600000, 625000, 650000, 675000, 700000, 725000,
+ 750000, 775000, 800000, 825000, 850000, 875000,
+ 900000, 925000, 950000, 975000, 1000000, 1025000,
+ 1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
+ 1200000, 1225000, 1250000, 1275000, 1300000, 1325000,
+ 1350000, 1375000
+};
+
+static const int mc13892_sw[] = {
+ 600000, 625000, 650000, 675000, 700000, 725000,
+ 750000, 775000, 800000, 825000, 850000, 875000,
+ 900000, 925000, 950000, 975000, 1000000, 1025000,
+ 1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
+ 1200000, 1225000, 1250000, 1275000, 1300000, 1325000,
+ 1350000, 1375000, 1400000, 1425000, 1450000, 1475000,
+ 1500000, 1525000, 1550000, 1575000, 1600000, 1625000,
+ 1650000, 1675000, 1700000, 1725000, 1750000, 1775000,
+ 1800000, 1825000, 1850000, 1875000
+};
+
+static const int mc13892_swbst[] = {
+ 5000000,
+};
+
+static const int mc13892_viohi[] = {
+ 2775000,
+};
+
+static const int mc13892_vpll[] = {
+ 1050000, 1250000, 1650000, 1800000,
+};
+
+static const int mc13892_vdig[] = {
+ 1050000, 1250000, 1650000, 1800000,
+};
+
+static const int mc13892_vsd[] = {
+ 1800000, 2000000, 2600000, 2700000,
+ 2800000, 2900000, 3000000, 3150000,
+};
+
+static const int mc13892_vusb2[] = {
+ 2400000, 2600000, 2700000, 2775000,
+};
+
+static const int mc13892_vvideo[] = {
+ 2700000, 2775000, 2500000, 2600000,
+};
+
+static const int mc13892_vaudio[] = {
+ 2300000, 2500000, 2775000, 3000000,
+};
+
+static const int mc13892_vcam[] = {
+ 2500000, 2600000, 2750000, 3000000,
+};
+
+static const int mc13892_vgen1[] = {
+ 1200000, 1500000, 2775000, 3150000,
+};
+
+static const int mc13892_vgen2[] = {
+ 1200000, 1500000, 1600000, 1800000,
+ 2700000, 2800000, 3000000, 3150000,
+};
+
+static const int mc13892_vgen3[] = {
+ 1800000, 2900000,
+};
+
+static const int mc13892_vusb[] = {
+ 3300000,
+};
+
+static const int mc13892_gpo[] = {
+ 2750000,
+};
+
+static const int mc13892_pwgtdrv[] = {
+ 5000000,
+};
+
+static struct regulator_ops mc13892_gpo_regulator_ops;
+/* sw regulators need special care due to the "hi bit" */
+static struct regulator_ops mc13892_sw_regulator_ops;
+
+
+#define MC13892_FIXED_DEFINE(name, reg, voltages) \
+ MC13xxx_FIXED_DEFINE(MC13892_, name, reg, voltages, \
+ mc13xxx_fixed_regulator_ops)
+
+#define MC13892_GPO_DEFINE(name, reg, voltages) \
+ MC13xxx_GPO_DEFINE(MC13892_, name, reg, voltages, \
+ mc13892_gpo_regulator_ops)
+
+#define MC13892_SW_DEFINE(name, reg, vsel_reg, voltages) \
+ MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \
+ mc13892_sw_regulator_ops)
+
+#define MC13892_DEFINE_REGU(name, reg, vsel_reg, voltages) \
+ MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \
+ mc13xxx_regulator_ops)
+
+static struct mc13xxx_regulator mc13892_regulators[] = {
+ MC13892_DEFINE_REGU(VCOINCELL, POWERCTL0, POWERCTL0, mc13892_vcoincell),
+ MC13892_SW_DEFINE(SW1, SWITCHERS0, SWITCHERS0, mc13892_sw1),
+ MC13892_SW_DEFINE(SW2, SWITCHERS1, SWITCHERS1, mc13892_sw),
+ MC13892_SW_DEFINE(SW3, SWITCHERS2, SWITCHERS2, mc13892_sw),
+ MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw),
+ MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst),
+ MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi),
+ MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0, \
+ mc13892_vpll),
+ MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \
+ mc13892_vdig),
+ MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1, \
+ mc13892_vsd),
+ MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0, \
+ mc13892_vusb2),
+ MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1, \
+ mc13892_vvideo),
+ MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1, \
+ mc13892_vaudio),
+ MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \
+ mc13892_vcam),
+ MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0, \
+ mc13892_vgen1),
+ MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0, \
+ mc13892_vgen2),
+ MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0, \
+ mc13892_vgen3),
+ MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb),
+ MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo),
+ MC13892_GPO_DEFINE(GPO2, POWERMISC, mc13892_gpo),
+ MC13892_GPO_DEFINE(GPO3, POWERMISC, mc13892_gpo),
+ MC13892_GPO_DEFINE(GPO4, POWERMISC, mc13892_gpo),
+ MC13892_GPO_DEFINE(PWGT1SPI, POWERMISC, mc13892_pwgtdrv),
+ MC13892_GPO_DEFINE(PWGT2SPI, POWERMISC, mc13892_pwgtdrv),
+};
+
+static int mc13892_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask,
+ u32 val)
+{
+ struct mc13xxx *mc13892 = priv->mc13xxx;
+ int ret;
+ u32 valread;
+
+ BUG_ON(val & ~mask);
+
+ ret = mc13xxx_reg_read(mc13892, MC13892_POWERMISC, &valread);
+ if (ret)
+ return ret;
+
+ /* Update the stored state for Power Gates. */
+ priv->powermisc_pwgt_state =
+ (priv->powermisc_pwgt_state & ~mask) | val;
+ priv->powermisc_pwgt_state &= MC13892_POWERMISC_PWGTSPI_M;
+
+ /* Construct the new register value */
+ valread = (valread & ~mask) | val;
+ /* Overwrite the PWGTxEN with the stored version */
+ valread = (valread & ~MC13892_POWERMISC_PWGTSPI_M) |
+ priv->powermisc_pwgt_state;
+
+ return mc13xxx_reg_write(mc13892, MC13892_POWERMISC, valread);
+}
+
+static int mc13892_gpo_regulator_enable(struct regulator_dev *rdev)
+{
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ int ret;
+ u32 en_val = mc13892_regulators[id].enable_bit;
+ u32 mask = mc13892_regulators[id].enable_bit;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ /* Power Gate enable value is 0 */
+ if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI)
+ en_val = 0;
+
+ if (id == MC13892_GPO4)
+ mask |= MC13892_POWERMISC_GPO4ADINEN;
+
+ mc13xxx_lock(priv->mc13xxx);
+ ret = mc13892_powermisc_rmw(priv, mask, en_val);
+ mc13xxx_unlock(priv->mc13xxx);
+
+ return ret;
+}
+
+static int mc13892_gpo_regulator_disable(struct regulator_dev *rdev)
+{
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ int ret;
+ u32 dis_val = 0;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ /* Power Gate disable value is 1 */
+ if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI)
+ dis_val = mc13892_regulators[id].enable_bit;
+
+ mc13xxx_lock(priv->mc13xxx);
+ ret = mc13892_powermisc_rmw(priv, mc13892_regulators[id].enable_bit,
+ dis_val);
+ mc13xxx_unlock(priv->mc13xxx);
+
+ return ret;
+}
+
+static int mc13892_gpo_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int ret, id = rdev_get_id(rdev);
+ unsigned int val;
+
+ mc13xxx_lock(priv->mc13xxx);
+ ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].reg, &val);
+ mc13xxx_unlock(priv->mc13xxx);
+
+ if (ret)
+ return ret;
+
+ /* Power Gates state is stored in powermisc_pwgt_state
+ * where the meaning of bits is negated */
+ val = (val & ~MC13892_POWERMISC_PWGTSPI_M) |
+ (priv->powermisc_pwgt_state ^ MC13892_POWERMISC_PWGTSPI_M);
+
+ return (val & mc13892_regulators[id].enable_bit) != 0;
+}
+
+
+static struct regulator_ops mc13892_gpo_regulator_ops = {
+ .enable = mc13892_gpo_regulator_enable,
+ .disable = mc13892_gpo_regulator_disable,
+ .is_enabled = mc13892_gpo_regulator_is_enabled,
+ .list_voltage = mc13xxx_regulator_list_voltage,
+ .set_voltage = mc13xxx_fixed_regulator_set_voltage,
+ .get_voltage = mc13xxx_fixed_regulator_get_voltage,
+};
+
+static int mc13892_sw_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int ret, id = rdev_get_id(rdev);
+ unsigned int val, hi;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ mc13xxx_lock(priv->mc13xxx);
+ ret = mc13xxx_reg_read(priv->mc13xxx,
+ mc13892_regulators[id].vsel_reg, &val);
+ mc13xxx_unlock(priv->mc13xxx);
+ if (ret)
+ return ret;
+
+ hi = val & MC13892_SWITCHERS0_SWxHI;
+ val = (val & mc13892_regulators[id].vsel_mask)
+ >> mc13892_regulators[id].vsel_shift;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
+
+ if (hi)
+ val = (25000 * val) + 1100000;
+ else
+ val = (25000 * val) + 600000;
+
+ return val;
+}
+
+static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int hi, value, val, mask, id = rdev_get_id(rdev);
+ int ret;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+ __func__, id, min_uV, max_uV);
+
+ /* Find the best index */
+ value = mc13xxx_get_best_voltage_index(rdev, min_uV, max_uV);
+ dev_dbg(rdev_get_dev(rdev), "%s best value: %d\n", __func__, value);
+ if (value < 0)
+ return value;
+
+ value = mc13892_regulators[id].voltages[value];
+
+ mc13xxx_lock(priv->mc13xxx);
+ ret = mc13xxx_reg_read(priv->mc13xxx,
+ mc13892_regulators[id].vsel_reg, &val);
+ if (ret)
+ goto err;
+
+ hi = val & MC13892_SWITCHERS0_SWxHI;
+ if (value > 1375)
+ hi = 1;
+ if (value < 1100)
+ hi = 0;
+
+ if (hi) {
+ value = (value - 1100000) / 25000;
+ value |= MC13892_SWITCHERS0_SWxHI;
+ } else
+ value = (value - 600000) / 25000;
+
+ mask = mc13892_regulators[id].vsel_mask | MC13892_SWITCHERS0_SWxHI;
+ ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg,
+ mask, value << mc13892_regulators[id].vsel_shift);
+err:
+ mc13xxx_unlock(priv->mc13xxx);
+
+ return ret;
+}
+
+static struct regulator_ops mc13892_sw_regulator_ops = {
+ .is_enabled = mc13xxx_sw_regulator_is_enabled,
+ .list_voltage = mc13xxx_regulator_list_voltage,
+ .set_voltage = mc13892_sw_regulator_set_voltage,
+ .get_voltage = mc13892_sw_regulator_get_voltage,
+};
+
+static int mc13892_vcam_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ unsigned int en_val = 0;
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int ret, id = rdev_get_id(rdev);
+
+ if (mode == REGULATOR_MODE_FAST)
+ en_val = MC13892_REGULATORMODE1_VCAMCONFIGEN;
+
+ mc13xxx_lock(priv->mc13xxx);
+ ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].reg,
+ MC13892_REGULATORMODE1_VCAMCONFIGEN, en_val);
+ mc13xxx_unlock(priv->mc13xxx);
+
+ return ret;
+}
+
+static unsigned int mc13892_vcam_get_mode(struct regulator_dev *rdev)
+{
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int ret, id = rdev_get_id(rdev);
+ unsigned int val;
+
+ mc13xxx_lock(priv->mc13xxx);
+ ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].reg, &val);
+ mc13xxx_unlock(priv->mc13xxx);
+
+ if (ret)
+ return ret;
+
+ if (val & MC13892_REGULATORMODE1_VCAMCONFIGEN)
+ return REGULATOR_MODE_FAST;
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+
+static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
+{
+ struct mc13xxx_regulator_priv *priv;
+ struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent);
+ struct mc13xxx_regulator_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+ struct mc13xxx_regulator_init_data *init_data;
+ int i, ret;
+ u32 val;
+
+ priv = kzalloc(sizeof(*priv) +
+ pdata->num_regulators * sizeof(priv->regulators[0]),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mc13xxx_regulators = mc13892_regulators;
+ priv->mc13xxx = mc13892;
+
+ mc13xxx_lock(mc13892);
+ ret = mc13xxx_reg_read(mc13892, MC13892_REVISION, &val);
+ if (ret)
+ goto err_free;
+
+ /* enable switch auto mode */
+ if ((val & 0x0000FFFF) == 0x45d0) {
+ ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS4,
+ MC13892_SWITCHERS4_SW1MODE_M |
+ MC13892_SWITCHERS4_SW2MODE_M,
+ MC13892_SWITCHERS4_SW1MODE_AUTO |
+ MC13892_SWITCHERS4_SW2MODE_AUTO);
+ if (ret)
+ goto err_free;
+
+ ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS5,
+ MC13892_SWITCHERS5_SW3MODE_M |
+ MC13892_SWITCHERS5_SW4MODE_M,
+ MC13892_SWITCHERS5_SW3MODE_AUTO |
+ MC13892_SWITCHERS5_SW4MODE_AUTO);
+ if (ret)
+ goto err_free;
+ }
+ mc13xxx_unlock(mc13892);
+
+ mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
+ = mc13892_vcam_set_mode;
+ mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
+ = mc13892_vcam_get_mode;
+ for (i = 0; i < pdata->num_regulators; i++) {
+ init_data = &pdata->regulators[i];
+ priv->regulators[i] = regulator_register(
+ &mc13892_regulators[init_data->id].desc,
+ &pdev->dev, init_data->init_data, priv);
+
+ if (IS_ERR(priv->regulators[i])) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ mc13892_regulators[i].desc.name);
+ ret = PTR_ERR(priv->regulators[i]);
+ goto err;
+ }
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+err:
+ while (--i >= 0)
+ regulator_unregister(priv->regulators[i]);
+
+err_free:
+ mc13xxx_unlock(mc13892);
+ kfree(priv);
+
+ return ret;
+}
+
+static int __devexit mc13892_regulator_remove(struct platform_device *pdev)
+{
+ struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
+ struct mc13xxx_regulator_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+ int i;
+
+ platform_set_drvdata(pdev, NULL);
+
+ for (i = 0; i < pdata->num_regulators; i++)
+ regulator_unregister(priv->regulators[i]);
+
+ kfree(priv);
+ return 0;
+}
+
+static struct platform_driver mc13892_regulator_driver = {
+ .driver = {
+ .name = "mc13892-regulator",
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(mc13892_regulator_remove),
+ .probe = mc13892_regulator_probe,
+};
+
+static int __init mc13892_regulator_init(void)
+{
+ return platform_driver_register(&mc13892_regulator_driver);
+}
+subsys_initcall(mc13892_regulator_init);
+
+static void __exit mc13892_regulator_exit(void)
+{
+ platform_driver_unregister(&mc13892_regulator_driver);
+}
+module_exit(mc13892_regulator_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yong Shen <yong.shen@linaro.org>");
+MODULE_DESCRIPTION("Regulator Driver for Freescale MC13892 PMIC");
+MODULE_ALIAS("platform:mc13892-regulator");
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
new file mode 100644
index 000000000000..2bb5de1f2421
--- /dev/null
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -0,0 +1,241 @@
+/*
+ * Regulator Driver for Freescale MC13xxx PMIC
+ *
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
+ *
+ * Based on mc13783 regulator driver :
+ * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Regs infos taken from mc13xxx drivers from freescale and mc13xxx.pdf file
+ * from freescale
+ */
+
+#include <linux/mfd/mc13xxx.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/driver.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include "mc13xxx.h"
+
+static int mc13xxx_regulator_enable(struct regulator_dev *rdev)
+{
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+ int id = rdev_get_id(rdev);
+ int ret;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ mc13xxx_lock(priv->mc13xxx);
+ ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].reg,
+ mc13xxx_regulators[id].enable_bit,
+ mc13xxx_regulators[id].enable_bit);
+ mc13xxx_unlock(priv->mc13xxx);
+
+ return ret;
+}
+
+static int mc13xxx_regulator_disable(struct regulator_dev *rdev)
+{
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+ int id = rdev_get_id(rdev);
+ int ret;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ mc13xxx_lock(priv->mc13xxx);
+ ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].reg,
+ mc13xxx_regulators[id].enable_bit, 0);
+ mc13xxx_unlock(priv->mc13xxx);
+
+ return ret;
+}
+
+static int mc13xxx_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+ int ret, id = rdev_get_id(rdev);
+ unsigned int val;
+
+ mc13xxx_lock(priv->mc13xxx);
+ ret = mc13xxx_reg_read(priv->mc13xxx, mc13xxx_regulators[id].reg, &val);
+ mc13xxx_unlock(priv->mc13xxx);
+
+ if (ret)
+ return ret;
+
+ return (val & mc13xxx_regulators[id].enable_bit) != 0;
+}
+
+int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ int id = rdev_get_id(rdev);
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+
+ if (selector >= mc13xxx_regulators[id].desc.n_voltages)
+ return -EINVAL;
+
+ return mc13xxx_regulators[id].voltages[selector];
+}
+EXPORT_SYMBOL_GPL(mc13xxx_regulator_list_voltage);
+
+int mc13xxx_get_best_voltage_index(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+ int reg_id = rdev_get_id(rdev);
+ int i;
+ int bestmatch;
+ int bestindex;
+
+ /*
+ * Locate the minimum voltage fitting the criteria on
+ * this regulator. The switchable voltages are not
+ * in strict falling order so we need to check them
+ * all for the best match.
+ */
+ bestmatch = INT_MAX;
+ bestindex = -1;
+ for (i = 0; i < mc13xxx_regulators[reg_id].desc.n_voltages; i++) {
+ if (mc13xxx_regulators[reg_id].voltages[i] >= min_uV &&
+ mc13xxx_regulators[reg_id].voltages[i] < bestmatch) {
+ bestmatch = mc13xxx_regulators[reg_id].voltages[i];
+ bestindex = i;
+ }
+ }
+
+ if (bestindex < 0 || bestmatch > max_uV) {
+ dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n",
+ min_uV, max_uV);
+ return -EINVAL;
+ }
+ return bestindex;
+}
+EXPORT_SYMBOL_GPL(mc13xxx_get_best_voltage_index);
+
+static int mc13xxx_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *selector)
+{
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+ int value, id = rdev_get_id(rdev);
+ int ret;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+ __func__, id, min_uV, max_uV);
+
+ /* Find the best index */
+ value = mc13xxx_get_best_voltage_index(rdev, min_uV, max_uV);
+ dev_dbg(rdev_get_dev(rdev), "%s best value: %d\n", __func__, value);
+ if (value < 0)
+ return value;
+
+ mc13xxx_lock(priv->mc13xxx);
+ ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].vsel_reg,
+ mc13xxx_regulators[id].vsel_mask,
+ value << mc13xxx_regulators[id].vsel_shift);
+ mc13xxx_unlock(priv->mc13xxx);
+
+ return ret;
+}
+
+static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+ int ret, id = rdev_get_id(rdev);
+ unsigned int val;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ mc13xxx_lock(priv->mc13xxx);
+ ret = mc13xxx_reg_read(priv->mc13xxx,
+ mc13xxx_regulators[id].vsel_reg, &val);
+ mc13xxx_unlock(priv->mc13xxx);
+
+ if (ret)
+ return ret;
+
+ val = (val & mc13xxx_regulators[id].vsel_mask)
+ >> mc13xxx_regulators[id].vsel_shift;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
+
+ BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
+
+ return mc13xxx_regulators[id].voltages[val];
+}
+
+struct regulator_ops mc13xxx_regulator_ops = {
+ .enable = mc13xxx_regulator_enable,
+ .disable = mc13xxx_regulator_disable,
+ .is_enabled = mc13xxx_regulator_is_enabled,
+ .list_voltage = mc13xxx_regulator_list_voltage,
+ .set_voltage = mc13xxx_regulator_set_voltage,
+ .get_voltage = mc13xxx_regulator_get_voltage,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_regulator_ops);
+
+int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *selector)
+{
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+ int id = rdev_get_id(rdev);
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+ __func__, id, min_uV, max_uV);
+
+ if (min_uV >= mc13xxx_regulators[id].voltages[0] &&
+ max_uV <= mc13xxx_regulators[id].voltages[0])
+ return 0;
+ else
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_set_voltage);
+
+int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+ struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+ int id = rdev_get_id(rdev);
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ return mc13xxx_regulators[id].voltages[0];
+}
+EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_get_voltage);
+
+struct regulator_ops mc13xxx_fixed_regulator_ops = {
+ .enable = mc13xxx_regulator_enable,
+ .disable = mc13xxx_regulator_disable,
+ .is_enabled = mc13xxx_regulator_is_enabled,
+ .list_voltage = mc13xxx_regulator_list_voltage,
+ .set_voltage = mc13xxx_fixed_regulator_set_voltage,
+ .get_voltage = mc13xxx_fixed_regulator_get_voltage,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_ops);
+
+int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ return 1;
+}
+EXPORT_SYMBOL_GPL(mc13xxx_sw_regulator_is_enabled);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yong Shen <yong.shen@linaro.org>");
+MODULE_DESCRIPTION("Regulator Driver for Freescale MC13xxx PMIC");
+MODULE_ALIAS("mc13xxx-regulator-core");
diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h
new file mode 100644
index 000000000000..27758267e122
--- /dev/null
+++ b/drivers/regulator/mc13xxx.h
@@ -0,0 +1,101 @@
+/*
+ * mc13xxx.h - regulators for the Freescale mc13xxx PMIC
+ *
+ * Copyright (C) 2010 Yong Shen <yong.shen@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_REGULATOR_MC13XXX_H
+#define __LINUX_REGULATOR_MC13XXX_H
+
+#include <linux/regulator/driver.h>
+
+struct mc13xxx_regulator {
+ struct regulator_desc desc;
+ int reg;
+ int enable_bit;
+ int vsel_reg;
+ int vsel_shift;
+ int vsel_mask;
+ int hi_bit;
+ int const *voltages;
+};
+
+struct mc13xxx_regulator_priv {
+ struct mc13xxx *mc13xxx;
+ u32 powermisc_pwgt_state;
+ struct mc13xxx_regulator *mc13xxx_regulators;
+ struct regulator_dev *regulators[];
+};
+
+extern int mc13xxx_sw_regulator(struct regulator_dev *rdev);
+extern int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev);
+extern int mc13xxx_get_best_voltage_index(struct regulator_dev *rdev,
+ int min_uV, int max_uV);
+extern int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned selector);
+extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector);
+extern int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev);
+
+extern struct regulator_ops mc13xxx_regulator_ops;
+extern struct regulator_ops mc13xxx_fixed_regulator_ops;
+
+#define MC13xxx_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages, _ops) \
+ [prefix ## _name] = { \
+ .desc = { \
+ .name = #prefix "_" #_name, \
+ .n_voltages = ARRAY_SIZE(_voltages), \
+ .ops = &_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = prefix ## _name, \
+ .owner = THIS_MODULE, \
+ }, \
+ .reg = prefix ## _reg, \
+ .enable_bit = prefix ## _reg ## _ ## _name ## EN, \
+ .vsel_reg = prefix ## _vsel_reg, \
+ .vsel_shift = prefix ## _vsel_reg ## _ ## _name ## VSEL,\
+ .vsel_mask = prefix ## _vsel_reg ## _ ## _name ## VSEL_M,\
+ .voltages = _voltages, \
+ }
+
+#define MC13xxx_FIXED_DEFINE(prefix, _name, _reg, _voltages, _ops) \
+ [prefix ## _name] = { \
+ .desc = { \
+ .name = #prefix "_" #_name, \
+ .n_voltages = ARRAY_SIZE(_voltages), \
+ .ops = &_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = prefix ## _name, \
+ .owner = THIS_MODULE, \
+ }, \
+ .reg = prefix ## _reg, \
+ .enable_bit = prefix ## _reg ## _ ## _name ## EN, \
+ .voltages = _voltages, \
+ }
+
+#define MC13xxx_GPO_DEFINE(prefix, _name, _reg, _voltages, _ops) \
+ [prefix ## _name] = { \
+ .desc = { \
+ .name = #prefix "_" #_name, \
+ .n_voltages = ARRAY_SIZE(_voltages), \
+ .ops = &_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = prefix ## _name, \
+ .owner = THIS_MODULE, \
+ }, \
+ .reg = prefix ## _reg, \
+ .enable_bit = prefix ## _reg ## _ ## _name ## EN, \
+ .voltages = _voltages, \
+ }
+
+#define MC13xxx_DEFINE_SW(_name, _reg, _vsel_reg, _voltages, ops) \
+ MC13xxx_DEFINE(SW, _name, _reg, _vsel_reg, _voltages, ops)
+#define MC13xxx_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages, ops) \
+ MC13xxx_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages, ops)
+
+#endif
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 29d0566379ae..31f6e11a7f16 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -151,7 +151,8 @@ static struct pcap_regulator vreg_table[] = {
};
static int pcap_regulator_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned *selector)
{
struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
void *pcap = rdev_get_drvdata(rdev);
@@ -170,10 +171,12 @@ static int pcap_regulator_set_voltage(struct regulator_dev *rdev,
i = 0;
uV = vreg->voltage_table[i] * 1000;
- if (min_uV <= uV && uV <= max_uV)
+ if (min_uV <= uV && uV <= max_uV) {
+ *selector = i;
return ezx_pcap_set_bits(pcap, vreg->reg,
(vreg->n_voltages - 1) << vreg->index,
i << vreg->index);
+ }
if (i == 0 && rdev_get_id(rdev) == V1)
i = vreg->n_voltages - 1;
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index c8f41dc05b76..69a11d9dd87f 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -108,7 +108,8 @@ static unsigned int ldo_voltage_value(u8 bits)
}
static int pcf50633_regulator_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned *selector)
{
struct pcf50633 *pcf;
int regulator_id, millivolts;
@@ -147,6 +148,8 @@ static int pcf50633_regulator_set_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
+ *selector = volt_bits;
+
return pcf50633_reg_write(pcf, regnr, volt_bits);
}
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index cd6d4fc9d74f..60a7ca5409e9 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -321,7 +321,8 @@ static int tps65023_dcdc_get_voltage(struct regulator_dev *dev)
}
static int tps65023_dcdc_set_voltage(struct regulator_dev *dev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned *selector)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int dcdc = rdev_get_id(dev);
@@ -346,6 +347,8 @@ static int tps65023_dcdc_set_voltage(struct regulator_dev *dev,
break;
}
+ *selector = vsel;
+
/* write to the register in case we found a match */
if (vsel == tps->info[dcdc]->table_len)
return -EINVAL;
@@ -371,7 +374,7 @@ static int tps65023_ldo_get_voltage(struct regulator_dev *dev)
}
static int tps65023_ldo_set_voltage(struct regulator_dev *dev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *selector)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int data, vsel, ldo = rdev_get_id(dev);
@@ -396,6 +399,8 @@ static int tps65023_ldo_set_voltage(struct regulator_dev *dev,
if (vsel == tps->info[ldo]->table_len)
return -EINVAL;
+ *selector = vsel;
+
data = tps_65023_reg_read(tps, TPS65023_REG_LDO_CTRL);
if (data < 0)
return data;
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 020f5878d7ff..064755290599 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -369,7 +369,8 @@ static int tps6507x_pmic_dcdc_get_voltage(struct regulator_dev *dev)
}
static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned *selector)
{
struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, vsel, dcdc = rdev_get_id(dev);
@@ -415,6 +416,8 @@ static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev,
if (vsel == tps->info[dcdc]->table_len)
return -EINVAL;
+ *selector = vsel;
+
data = tps6507x_pmic_reg_read(tps, reg);
if (data < 0)
return data;
@@ -450,7 +453,8 @@ static int tps6507x_pmic_ldo_get_voltage(struct regulator_dev *dev)
}
static int tps6507x_pmic_ldo_set_voltage(struct regulator_dev *dev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned *selector)
{
struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, vsel, ldo = rdev_get_id(dev);
@@ -483,6 +487,8 @@ static int tps6507x_pmic_ldo_set_voltage(struct regulator_dev *dev,
if (vsel == tps->info[ldo]->table_len)
return -EINVAL;
+ *selector = vsel;
+
data = tps6507x_pmic_reg_read(tps, reg);
if (data < 0)
return data;
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
new file mode 100644
index 000000000000..176a6be5a8ce
--- /dev/null
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -0,0 +1,693 @@
+/*
+ * Regulator driver for TPS6524x PMIC
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#define REG_LDO_SET 0x0
+#define LDO_ILIM_MASK 1 /* 0 = 400-800, 1 = 900-1500 */
+#define LDO_VSEL_MASK 0x0f
+#define LDO2_ILIM_SHIFT 12
+#define LDO2_VSEL_SHIFT 4
+#define LDO1_ILIM_SHIFT 8
+#define LDO1_VSEL_SHIFT 0
+
+#define REG_BLOCK_EN 0x1
+#define BLOCK_MASK 1
+#define BLOCK_LDO1_SHIFT 0
+#define BLOCK_LDO2_SHIFT 1
+#define BLOCK_LCD_SHIFT 2
+#define BLOCK_USB_SHIFT 3
+
+#define REG_DCDC_SET 0x2
+#define DCDC_VDCDC_MASK 0x1f
+#define DCDC_VDCDC1_SHIFT 0
+#define DCDC_VDCDC2_SHIFT 5
+#define DCDC_VDCDC3_SHIFT 10
+
+#define REG_DCDC_EN 0x3
+#define DCDCDCDC_EN_MASK 0x1
+#define DCDCDCDC1_EN_SHIFT 0
+#define DCDCDCDC1_PG_MSK BIT(1)
+#define DCDCDCDC2_EN_SHIFT 2
+#define DCDCDCDC2_PG_MSK BIT(3)
+#define DCDCDCDC3_EN_SHIFT 4
+#define DCDCDCDC3_PG_MSK BIT(5)
+
+#define REG_USB 0x4
+#define USB_ILIM_SHIFT 0
+#define USB_ILIM_MASK 0x3
+#define USB_TSD_SHIFT 2
+#define USB_TSD_MASK 0x3
+#define USB_TWARN_SHIFT 4
+#define USB_TWARN_MASK 0x3
+#define USB_IWARN_SD BIT(6)
+#define USB_FAST_LOOP BIT(7)
+
+#define REG_ALARM 0x5
+#define ALARM_LDO1 BIT(0)
+#define ALARM_DCDC1 BIT(1)
+#define ALARM_DCDC2 BIT(2)
+#define ALARM_DCDC3 BIT(3)
+#define ALARM_LDO2 BIT(4)
+#define ALARM_USB_WARN BIT(5)
+#define ALARM_USB_ALARM BIT(6)
+#define ALARM_LCD BIT(9)
+#define ALARM_TEMP_WARM BIT(10)
+#define ALARM_TEMP_HOT BIT(11)
+#define ALARM_NRST BIT(14)
+#define ALARM_POWERUP BIT(15)
+
+#define REG_INT_ENABLE 0x6
+#define INT_LDO1 BIT(0)
+#define INT_DCDC1 BIT(1)
+#define INT_DCDC2 BIT(2)
+#define INT_DCDC3 BIT(3)
+#define INT_LDO2 BIT(4)
+#define INT_USB_WARN BIT(5)
+#define INT_USB_ALARM BIT(6)
+#define INT_LCD BIT(9)
+#define INT_TEMP_WARM BIT(10)
+#define INT_TEMP_HOT BIT(11)
+#define INT_GLOBAL_EN BIT(15)
+
+#define REG_INT_STATUS 0x7
+#define STATUS_LDO1 BIT(0)
+#define STATUS_DCDC1 BIT(1)
+#define STATUS_DCDC2 BIT(2)
+#define STATUS_DCDC3 BIT(3)
+#define STATUS_LDO2 BIT(4)
+#define STATUS_USB_WARN BIT(5)
+#define STATUS_USB_ALARM BIT(6)
+#define STATUS_LCD BIT(9)
+#define STATUS_TEMP_WARM BIT(10)
+#define STATUS_TEMP_HOT BIT(11)
+
+#define REG_SOFTWARE_RESET 0xb
+#define REG_WRITE_ENABLE 0xd
+#define REG_REV_ID 0xf
+
+#define N_DCDC 3
+#define N_LDO 2
+#define N_SWITCH 2
+#define N_REGULATORS (3 /* DCDC */ + \
+ 2 /* LDO */ + \
+ 2 /* switch */)
+
+#define FIXED_ILIMSEL BIT(0)
+#define FIXED_VOLTAGE BIT(1)
+
+#define CMD_READ(reg) ((reg) << 6)
+#define CMD_WRITE(reg) (BIT(5) | (reg) << 6)
+#define STAT_CLK BIT(3)
+#define STAT_WRITE BIT(2)
+#define STAT_INVALID BIT(1)
+#define STAT_WP BIT(0)
+
+struct field {
+ int reg;
+ int shift;
+ int mask;
+};
+
+struct supply_info {
+ const char *name;
+ int n_voltages;
+ const int *voltages;
+ int fixed_voltage;
+ int n_ilimsels;
+ const int *ilimsels;
+ int fixed_ilimsel;
+ int flags;
+ struct field enable, voltage, ilimsel;
+};
+
+struct tps6524x {
+ struct device *dev;
+ struct spi_device *spi;
+ struct mutex lock;
+ struct regulator_desc desc[N_REGULATORS];
+ struct regulator_dev *rdev[N_REGULATORS];
+};
+
+static int __read_reg(struct tps6524x *hw, int reg)
+{
+ int error = 0;
+ u16 cmd = CMD_READ(reg), in;
+ u8 status;
+ struct spi_message m;
+ struct spi_transfer t[3];
+
+ spi_message_init(&m);
+ memset(t, 0, sizeof(t));
+
+ t[0].tx_buf = &cmd;
+ t[0].len = 2;
+ t[0].bits_per_word = 12;
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = &in;
+ t[1].len = 2;
+ t[1].bits_per_word = 16;
+ spi_message_add_tail(&t[1], &m);
+
+ t[2].rx_buf = &status;
+ t[2].len = 1;
+ t[2].bits_per_word = 4;
+ spi_message_add_tail(&t[2], &m);
+
+ error = spi_sync(hw->spi, &m);
+ if (error < 0)
+ return error;
+
+ dev_dbg(hw->dev, "read reg %d, data %x, status %x\n",
+ reg, in, status);
+
+ if (!(status & STAT_CLK) || (status & STAT_WRITE))
+ return -EIO;
+
+ if (status & STAT_INVALID)
+ return -EINVAL;
+
+ return in;
+}
+
+static int read_reg(struct tps6524x *hw, int reg)
+{
+ int ret;
+
+ mutex_lock(&hw->lock);
+ ret = __read_reg(hw, reg);
+ mutex_unlock(&hw->lock);
+
+ return ret;
+}
+
+static int __write_reg(struct tps6524x *hw, int reg, int val)
+{
+ int error = 0;
+ u16 cmd = CMD_WRITE(reg), out = val;
+ u8 status;
+ struct spi_message m;
+ struct spi_transfer t[3];
+
+ spi_message_init(&m);
+ memset(t, 0, sizeof(t));
+
+ t[0].tx_buf = &cmd;
+ t[0].len = 2;
+ t[0].bits_per_word = 12;
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = &out;
+ t[1].len = 2;
+ t[1].bits_per_word = 16;
+ spi_message_add_tail(&t[1], &m);
+
+ t[2].rx_buf = &status;
+ t[2].len = 1;
+ t[2].bits_per_word = 4;
+ spi_message_add_tail(&t[2], &m);
+
+ error = spi_sync(hw->spi, &m);
+ if (error < 0)
+ return error;
+
+ dev_dbg(hw->dev, "wrote reg %d, data %x, status %x\n",
+ reg, out, status);
+
+ if (!(status & STAT_CLK) || !(status & STAT_WRITE))
+ return -EIO;
+
+ if (status & (STAT_INVALID | STAT_WP))
+ return -EINVAL;
+
+ return error;
+}
+
+static int __rmw_reg(struct tps6524x *hw, int reg, int mask, int val)
+{
+ int ret;
+
+ ret = __read_reg(hw, reg);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~mask;
+ ret |= val;
+
+ ret = __write_reg(hw, reg, ret);
+
+ return (ret < 0) ? ret : 0;
+}
+
+static int rmw_protect(struct tps6524x *hw, int reg, int mask, int val)
+{
+ int ret;
+
+ mutex_lock(&hw->lock);
+
+ ret = __write_reg(hw, REG_WRITE_ENABLE, 1);
+ if (ret) {
+ dev_err(hw->dev, "failed to set write enable\n");
+ goto error;
+ }
+
+ ret = __rmw_reg(hw, reg, mask, val);
+ if (ret)
+ dev_err(hw->dev, "failed to rmw register %d\n", reg);
+
+ ret = __write_reg(hw, REG_WRITE_ENABLE, 0);
+ if (ret) {
+ dev_err(hw->dev, "failed to clear write enable\n");
+ goto error;
+ }
+
+error:
+ mutex_unlock(&hw->lock);
+
+ return ret;
+}
+
+static int read_field(struct tps6524x *hw, const struct field *field)
+{
+ int tmp;
+
+ tmp = read_reg(hw, field->reg);
+ if (tmp < 0)
+ return tmp;
+
+ return (tmp >> field->shift) & field->mask;
+}
+
+static int write_field(struct tps6524x *hw, const struct field *field,
+ int val)
+{
+ if (val & ~field->mask)
+ return -EOVERFLOW;
+
+ return rmw_protect(hw, field->reg,
+ field->mask << field->shift,
+ val << field->shift);
+}
+
+static const int dcdc1_voltages[] = {
+ 800000, 825000, 850000, 875000,
+ 900000, 925000, 950000, 975000,
+ 1000000, 1025000, 1050000, 1075000,
+ 1100000, 1125000, 1150000, 1175000,
+ 1200000, 1225000, 1250000, 1275000,
+ 1300000, 1325000, 1350000, 1375000,
+ 1400000, 1425000, 1450000, 1475000,
+ 1500000, 1525000, 1550000, 1575000,
+};
+
+static const int dcdc2_voltages[] = {
+ 1400000, 1450000, 1500000, 1550000,
+ 1600000, 1650000, 1700000, 1750000,
+ 1800000, 1850000, 1900000, 1950000,
+ 2000000, 2050000, 2100000, 2150000,
+ 2200000, 2250000, 2300000, 2350000,
+ 2400000, 2450000, 2500000, 2550000,
+ 2600000, 2650000, 2700000, 2750000,
+ 2800000, 2850000, 2900000, 2950000,
+};
+
+static const int dcdc3_voltages[] = {
+ 2400000, 2450000, 2500000, 2550000, 2600000,
+ 2650000, 2700000, 2750000, 2800000, 2850000,
+ 2900000, 2950000, 3000000, 3050000, 3100000,
+ 3150000, 3200000, 3250000, 3300000, 3350000,
+ 3400000, 3450000, 3500000, 3550000, 3600000,
+};
+
+static const int ldo1_voltages[] = {
+ 4300000, 4350000, 4400000, 4450000,
+ 4500000, 4550000, 4600000, 4650000,
+ 4700000, 4750000, 4800000, 4850000,
+ 4900000, 4950000, 5000000, 5050000,
+};
+
+static const int ldo2_voltages[] = {
+ 1100000, 1150000, 1200000, 1250000,
+ 1300000, 1700000, 1750000, 1800000,
+ 1850000, 1900000, 3150000, 3200000,
+ 3250000, 3300000, 3350000, 3400000,
+};
+
+static const int ldo_ilimsel[] = {
+ 400000, 1500000
+};
+
+static const int usb_ilimsel[] = {
+ 200000, 400000, 800000, 1000000
+};
+
+#define __MK_FIELD(_reg, _mask, _shift) \
+ { .reg = (_reg), .mask = (_mask), .shift = (_shift), }
+
+static const struct supply_info supply_info[N_REGULATORS] = {
+ {
+ .name = "DCDC1",
+ .flags = FIXED_ILIMSEL,
+ .n_voltages = ARRAY_SIZE(dcdc1_voltages),
+ .voltages = dcdc1_voltages,
+ .fixed_ilimsel = 2400000,
+ .enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
+ DCDCDCDC1_EN_SHIFT),
+ .voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
+ DCDC_VDCDC1_SHIFT),
+ },
+ {
+ .name = "DCDC2",
+ .flags = FIXED_ILIMSEL,
+ .n_voltages = ARRAY_SIZE(dcdc2_voltages),
+ .voltages = dcdc2_voltages,
+ .fixed_ilimsel = 1200000,
+ .enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
+ DCDCDCDC2_EN_SHIFT),
+ .voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
+ DCDC_VDCDC2_SHIFT),
+ },
+ {
+ .name = "DCDC3",
+ .flags = FIXED_ILIMSEL,
+ .n_voltages = ARRAY_SIZE(dcdc3_voltages),
+ .voltages = dcdc3_voltages,
+ .fixed_ilimsel = 1200000,
+ .enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
+ DCDCDCDC3_EN_SHIFT),
+ .voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
+ DCDC_VDCDC3_SHIFT),
+ },
+ {
+ .name = "LDO1",
+ .n_voltages = ARRAY_SIZE(ldo1_voltages),
+ .voltages = ldo1_voltages,
+ .n_ilimsels = ARRAY_SIZE(ldo_ilimsel),
+ .ilimsels = ldo_ilimsel,
+ .enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
+ BLOCK_LDO1_SHIFT),
+ .voltage = __MK_FIELD(REG_LDO_SET, LDO_VSEL_MASK,
+ LDO1_VSEL_SHIFT),
+ .ilimsel = __MK_FIELD(REG_LDO_SET, LDO_ILIM_MASK,
+ LDO1_ILIM_SHIFT),
+ },
+ {
+ .name = "LDO2",
+ .n_voltages = ARRAY_SIZE(ldo2_voltages),
+ .voltages = ldo2_voltages,
+ .n_ilimsels = ARRAY_SIZE(ldo_ilimsel),
+ .ilimsels = ldo_ilimsel,
+ .enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
+ BLOCK_LDO2_SHIFT),
+ .voltage = __MK_FIELD(REG_LDO_SET, LDO_VSEL_MASK,
+ LDO2_VSEL_SHIFT),
+ .ilimsel = __MK_FIELD(REG_LDO_SET, LDO_ILIM_MASK,
+ LDO2_ILIM_SHIFT),
+ },
+ {
+ .name = "USB",
+ .flags = FIXED_VOLTAGE,
+ .fixed_voltage = 5000000,
+ .n_ilimsels = ARRAY_SIZE(usb_ilimsel),
+ .ilimsels = usb_ilimsel,
+ .enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
+ BLOCK_USB_SHIFT),
+ .ilimsel = __MK_FIELD(REG_USB, USB_ILIM_MASK,
+ USB_ILIM_SHIFT),
+ },
+ {
+ .name = "LCD",
+ .flags = FIXED_VOLTAGE | FIXED_ILIMSEL,
+ .fixed_voltage = 5000000,
+ .fixed_ilimsel = 400000,
+ .enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
+ BLOCK_LCD_SHIFT),
+ },
+};
+
+static int list_voltage(struct regulator_dev *rdev, unsigned selector)
+{
+ const struct supply_info *info;
+ struct tps6524x *hw;
+
+ hw = rdev_get_drvdata(rdev);
+ info = &supply_info[rdev_get_id(rdev)];
+
+ if (info->flags & FIXED_VOLTAGE)
+ return selector ? -EINVAL : info->fixed_voltage;
+
+ return ((selector < info->n_voltages) ?
+ info->voltages[selector] : -EINVAL);
+}
+
+static int set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+ unsigned *selector)
+{
+ const struct supply_info *info;
+ struct tps6524x *hw;
+ unsigned i;
+
+ hw = rdev_get_drvdata(rdev);
+ info = &supply_info[rdev_get_id(rdev)];
+
+ if (info->flags & FIXED_VOLTAGE)
+ return -EINVAL;
+
+ for (i = 0; i < info->n_voltages; i++)
+ if (min_uV <= info->voltages[i] &&
+ max_uV >= info->voltages[i])
+ break;
+
+ if (i >= info->n_voltages)
+ i = info->n_voltages - 1;
+
+ *selector = info->voltages[i];
+
+ return write_field(hw, &info->voltage, i);
+}
+
+static int get_voltage(struct regulator_dev *rdev)
+{
+ const struct supply_info *info;
+ struct tps6524x *hw;
+ int ret;
+
+ hw = rdev_get_drvdata(rdev);
+ info = &supply_info[rdev_get_id(rdev)];
+
+ if (info->flags & FIXED_VOLTAGE)
+ return info->fixed_voltage;
+
+ ret = read_field(hw, &info->voltage);
+ if (ret < 0)
+ return ret;
+ if (WARN_ON(ret >= info->n_voltages))
+ return -EIO;
+
+ return info->voltages[ret];
+}
+
+static int set_current_limit(struct regulator_dev *rdev, int min_uA,
+ int max_uA)
+{
+ const struct supply_info *info;
+ struct tps6524x *hw;
+ int i;
+
+ hw = rdev_get_drvdata(rdev);
+ info = &supply_info[rdev_get_id(rdev)];
+
+ if (info->flags & FIXED_ILIMSEL)
+ return -EINVAL;
+
+ for (i = 0; i < info->n_ilimsels; i++)
+ if (min_uA <= info->ilimsels[i] &&
+ max_uA >= info->ilimsels[i])
+ break;
+
+ if (i >= info->n_ilimsels)
+ return -EINVAL;
+
+ return write_field(hw, &info->ilimsel, i);
+}
+
+static int get_current_limit(struct regulator_dev *rdev)
+{
+ const struct supply_info *info;
+ struct tps6524x *hw;
+ int ret;
+
+ hw = rdev_get_drvdata(rdev);
+ info = &supply_info[rdev_get_id(rdev)];
+
+ if (info->flags & FIXED_ILIMSEL)
+ return info->fixed_ilimsel;
+
+ ret = read_field(hw, &info->ilimsel);
+ if (ret < 0)
+ return ret;
+ if (WARN_ON(ret >= info->n_ilimsels))
+ return -EIO;
+
+ return info->ilimsels[ret];
+}
+
+static int enable_supply(struct regulator_dev *rdev)
+{
+ const struct supply_info *info;
+ struct tps6524x *hw;
+
+ hw = rdev_get_drvdata(rdev);
+ info = &supply_info[rdev_get_id(rdev)];
+
+ return write_field(hw, &info->enable, 1);
+}
+
+static int disable_supply(struct regulator_dev *rdev)
+{
+ const struct supply_info *info;
+ struct tps6524x *hw;
+
+ hw = rdev_get_drvdata(rdev);
+ info = &supply_info[rdev_get_id(rdev)];
+
+ return write_field(hw, &info->enable, 0);
+}
+
+static int is_supply_enabled(struct regulator_dev *rdev)
+{
+ const struct supply_info *info;
+ struct tps6524x *hw;
+
+ hw = rdev_get_drvdata(rdev);
+ info = &supply_info[rdev_get_id(rdev)];
+
+ return read_field(hw, &info->enable);
+}
+
+static struct regulator_ops regulator_ops = {
+ .is_enabled = is_supply_enabled,
+ .enable = enable_supply,
+ .disable = disable_supply,
+ .get_voltage = get_voltage,
+ .set_voltage = set_voltage,
+ .list_voltage = list_voltage,
+ .set_current_limit = set_current_limit,
+ .get_current_limit = get_current_limit,
+};
+
+static int __devexit pmic_remove(struct spi_device *spi)
+{
+ struct tps6524x *hw = spi_get_drvdata(spi);
+ int i;
+
+ if (!hw)
+ return 0;
+ for (i = 0; i < N_REGULATORS; i++) {
+ if (hw->rdev[i])
+ regulator_unregister(hw->rdev[i]);
+ hw->rdev[i] = NULL;
+ }
+ spi_set_drvdata(spi, NULL);
+ kfree(hw);
+ return 0;
+}
+
+static int __devinit pmic_probe(struct spi_device *spi)
+{
+ struct tps6524x *hw;
+ struct device *dev = &spi->dev;
+ const struct supply_info *info = supply_info;
+ struct regulator_init_data *init_data;
+ int ret = 0, i;
+
+ init_data = dev->platform_data;
+ if (!init_data) {
+ dev_err(dev, "could not find regulator platform data\n");
+ return -EINVAL;
+ }
+
+ hw = kzalloc(sizeof(struct tps6524x), GFP_KERNEL);
+ if (!hw) {
+ dev_err(dev, "cannot allocate regulator private data\n");
+ return -ENOMEM;
+ }
+ spi_set_drvdata(spi, hw);
+
+ memset(hw, 0, sizeof(struct tps6524x));
+ hw->dev = dev;
+ hw->spi = spi_dev_get(spi);
+ mutex_init(&hw->lock);
+
+ for (i = 0; i < N_REGULATORS; i++, info++, init_data++) {
+ hw->desc[i].name = info->name;
+ hw->desc[i].id = i;
+ hw->desc[i].n_voltages = info->n_voltages;
+ hw->desc[i].ops = &regulator_ops;
+ hw->desc[i].type = REGULATOR_VOLTAGE;
+ hw->desc[i].owner = THIS_MODULE;
+
+ if (info->flags & FIXED_VOLTAGE)
+ hw->desc[i].n_voltages = 1;
+
+ hw->rdev[i] = regulator_register(&hw->desc[i], dev,
+ init_data, hw);
+ if (IS_ERR(hw->rdev[i])) {
+ ret = PTR_ERR(hw->rdev[i]);
+ hw->rdev[i] = NULL;
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ pmic_remove(spi);
+ return ret;
+}
+
+static struct spi_driver pmic_driver = {
+ .probe = pmic_probe,
+ .remove = __devexit_p(pmic_remove),
+ .driver = {
+ .name = "tps6524x",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pmic_driver_init(void)
+{
+ return spi_register_driver(&pmic_driver);
+}
+module_init(pmic_driver_init);
+
+static void __exit pmic_driver_exit(void)
+{
+ spi_unregister_driver(&pmic_driver);
+}
+module_exit(pmic_driver_exit);
+
+MODULE_DESCRIPTION("TPS6524X PMIC Driver");
+MODULE_AUTHOR("Cyril Chemparathy");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:tps6524x");
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 6d20b0454a1d..bb04a75a4c98 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -85,7 +85,8 @@ static int tps6586x_ldo_list_voltage(struct regulator_dev *rdev,
static int __tps6586x_ldo_set_voltage(struct device *parent,
struct tps6586x_regulator *ri,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned *selector)
{
int val, uV;
uint8_t mask;
@@ -100,6 +101,8 @@ static int __tps6586x_ldo_set_voltage(struct device *parent,
/* use the first in-range value */
if (min_uV <= uV && uV <= max_uV) {
+ *selector = val;
+
val <<= ri->volt_shift;
mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
@@ -111,12 +114,13 @@ static int __tps6586x_ldo_set_voltage(struct device *parent,
}
static int tps6586x_ldo_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *selector)
{
struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
struct device *parent = to_tps6586x_dev(rdev);
- return __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV);
+ return __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV,
+ selector);
}
static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev)
@@ -140,13 +144,14 @@ static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev)
}
static int tps6586x_dvm_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *selector)
{
struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
struct device *parent = to_tps6586x_dev(rdev);
int ret;
- ret = __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV);
+ ret = __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV,
+ selector);
if (ret)
return ret;
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index a57262a4fa6c..bd332cf1cc3f 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -329,7 +329,8 @@ static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
}
static int
-twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+ unsigned *selector)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int vsel;
@@ -345,9 +346,11 @@ twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
/* REVISIT for VAUX2, first match may not be best/lowest */
/* use the first in-range value */
- if (min_uV <= uV && uV <= max_uV)
+ if (min_uV <= uV && uV <= max_uV) {
+ *selector = vsel;
return twlreg_write(info, TWL_MODULE_PM_RECEIVER,
VREG_VOLTAGE, vsel);
+ }
}
return -EDOM;
@@ -389,7 +392,8 @@ static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
}
static int
-twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+ unsigned *selector)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int vsel;
@@ -402,6 +406,7 @@ twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
* mV = 1000mv + 100mv * (vsel - 1)
*/
vsel = (min_uV/1000 - 1000)/100 + 1;
+ *selector = vsel;
return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, vsel);
}
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index dbfaf5945e48..06df898842c0 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -120,6 +120,7 @@ static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev)
return REGULATOR_MODE_IDLE;
default:
BUG();
+ return -EINVAL;
}
}
@@ -302,7 +303,7 @@ static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state)
}
static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *selector)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
struct wm831x *wm831x = dcdc->wm831x;
@@ -314,6 +315,8 @@ static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
if (vsel < 0)
return vsel;
+ *selector = vsel;
+
/* If this value is already set then do a GPIO update if we can */
if (dcdc->dvs_gpio && dcdc->on_vsel == vsel)
return wm831x_buckv_set_dvs(rdev, 0);
@@ -375,14 +378,14 @@ static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev,
return wm831x_set_bits(wm831x, reg, WM831X_DC1_SLP_VSEL_MASK, vsel);
}
-static int wm831x_buckv_get_voltage(struct regulator_dev *rdev)
+static int wm831x_buckv_get_voltage_sel(struct regulator_dev *rdev)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
if (dcdc->dvs_gpio && dcdc->dvs_gpio_state)
- return wm831x_buckv_list_voltage(rdev, dcdc->dvs_vsel);
+ return dcdc->dvs_vsel;
else
- return wm831x_buckv_list_voltage(rdev, dcdc->on_vsel);
+ return dcdc->on_vsel;
}
/* Current limit options */
@@ -424,7 +427,7 @@ static int wm831x_buckv_get_current_limit(struct regulator_dev *rdev)
static struct regulator_ops wm831x_buckv_ops = {
.set_voltage = wm831x_buckv_set_voltage,
- .get_voltage = wm831x_buckv_get_voltage,
+ .get_voltage_sel = wm831x_buckv_get_voltage_sel,
.list_voltage = wm831x_buckv_list_voltage,
.set_suspend_voltage = wm831x_buckv_set_suspend_voltage,
.set_current_limit = wm831x_buckv_set_current_limit,
@@ -636,7 +639,7 @@ static int wm831x_buckp_list_voltage(struct regulator_dev *rdev,
}
static int wm831x_buckp_set_voltage_int(struct regulator_dev *rdev, int reg,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, int *selector)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
struct wm831x *wm831x = dcdc->wm831x;
@@ -650,16 +653,20 @@ static int wm831x_buckp_set_voltage_int(struct regulator_dev *rdev, int reg,
if (wm831x_buckp_list_voltage(rdev, vsel) > max_uV)
return -EINVAL;
+ *selector = vsel;
+
return wm831x_set_bits(wm831x, reg, WM831X_DC3_ON_VSEL_MASK, vsel);
}
static int wm831x_buckp_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned *selector)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
- return wm831x_buckp_set_voltage_int(rdev, reg, min_uV, max_uV);
+ return wm831x_buckp_set_voltage_int(rdev, reg, min_uV, max_uV,
+ selector);
}
static int wm831x_buckp_set_suspend_voltage(struct regulator_dev *rdev,
@@ -667,11 +674,12 @@ static int wm831x_buckp_set_suspend_voltage(struct regulator_dev *rdev,
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL;
+ unsigned selector;
- return wm831x_buckp_set_voltage_int(rdev, reg, uV, uV);
+ return wm831x_buckp_set_voltage_int(rdev, reg, uV, uV, &selector);
}
-static int wm831x_buckp_get_voltage(struct regulator_dev *rdev)
+static int wm831x_buckp_get_voltage_sel(struct regulator_dev *rdev)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
struct wm831x *wm831x = dcdc->wm831x;
@@ -682,12 +690,12 @@ static int wm831x_buckp_get_voltage(struct regulator_dev *rdev)
if (val < 0)
return val;
- return wm831x_buckp_list_voltage(rdev, val & WM831X_DC3_ON_VSEL_MASK);
+ return val & WM831X_DC3_ON_VSEL_MASK;
}
static struct regulator_ops wm831x_buckp_ops = {
.set_voltage = wm831x_buckp_set_voltage,
- .get_voltage = wm831x_buckp_get_voltage,
+ .get_voltage_sel = wm831x_buckp_get_voltage_sel,
.list_voltage = wm831x_buckp_list_voltage,
.set_suspend_voltage = wm831x_buckp_set_suspend_voltage,
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 9edf8f692341..c94fc5b7cd5b 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -113,7 +113,8 @@ static int wm831x_gp_ldo_list_voltage(struct regulator_dev *rdev,
}
static int wm831x_gp_ldo_set_voltage_int(struct regulator_dev *rdev, int reg,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned *selector)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
@@ -133,16 +134,20 @@ static int wm831x_gp_ldo_set_voltage_int(struct regulator_dev *rdev, int reg,
if (ret < min_uV || ret > max_uV)
return -EINVAL;
+ *selector = vsel;
+
return wm831x_set_bits(wm831x, reg, WM831X_LDO1_ON_VSEL_MASK, vsel);
}
static int wm831x_gp_ldo_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned *selector)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
int reg = ldo->base + WM831X_LDO_ON_CONTROL;
- return wm831x_gp_ldo_set_voltage_int(rdev, reg, min_uV, max_uV);
+ return wm831x_gp_ldo_set_voltage_int(rdev, reg, min_uV, max_uV,
+ selector);
}
static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev,
@@ -150,11 +155,12 @@ static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev,
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
+ unsigned int selector;
- return wm831x_gp_ldo_set_voltage_int(rdev, reg, uV, uV);
+ return wm831x_gp_ldo_set_voltage_int(rdev, reg, uV, uV, &selector);
}
-static int wm831x_gp_ldo_get_voltage(struct regulator_dev *rdev)
+static int wm831x_gp_ldo_get_voltage_sel(struct regulator_dev *rdev)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
@@ -167,7 +173,7 @@ static int wm831x_gp_ldo_get_voltage(struct regulator_dev *rdev)
ret &= WM831X_LDO1_ON_VSEL_MASK;
- return wm831x_gp_ldo_list_voltage(rdev, ret);
+ return ret;
}
static unsigned int wm831x_gp_ldo_get_mode(struct regulator_dev *rdev)
@@ -287,7 +293,7 @@ static unsigned int wm831x_gp_ldo_get_optimum_mode(struct regulator_dev *rdev,
static struct regulator_ops wm831x_gp_ldo_ops = {
.list_voltage = wm831x_gp_ldo_list_voltage,
- .get_voltage = wm831x_gp_ldo_get_voltage,
+ .get_voltage_sel = wm831x_gp_ldo_get_voltage_sel,
.set_voltage = wm831x_gp_ldo_set_voltage,
.set_suspend_voltage = wm831x_gp_ldo_set_suspend_voltage,
.get_mode = wm831x_gp_ldo_get_mode,
@@ -413,7 +419,8 @@ static int wm831x_aldo_list_voltage(struct regulator_dev *rdev,
}
static int wm831x_aldo_set_voltage_int(struct regulator_dev *rdev, int reg,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned *selector)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
@@ -433,16 +440,19 @@ static int wm831x_aldo_set_voltage_int(struct regulator_dev *rdev, int reg,
if (ret < min_uV || ret > max_uV)
return -EINVAL;
+ *selector = vsel;
+
return wm831x_set_bits(wm831x, reg, WM831X_LDO7_ON_VSEL_MASK, vsel);
}
static int wm831x_aldo_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *selector)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
int reg = ldo->base + WM831X_LDO_ON_CONTROL;
- return wm831x_aldo_set_voltage_int(rdev, reg, min_uV, max_uV);
+ return wm831x_aldo_set_voltage_int(rdev, reg, min_uV, max_uV,
+ selector);
}
static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev,
@@ -450,11 +460,12 @@ static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev,
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
+ unsigned int selector;
- return wm831x_aldo_set_voltage_int(rdev, reg, uV, uV);
+ return wm831x_aldo_set_voltage_int(rdev, reg, uV, uV, &selector);
}
-static int wm831x_aldo_get_voltage(struct regulator_dev *rdev)
+static int wm831x_aldo_get_voltage_sel(struct regulator_dev *rdev)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
@@ -467,7 +478,7 @@ static int wm831x_aldo_get_voltage(struct regulator_dev *rdev)
ret &= WM831X_LDO7_ON_VSEL_MASK;
- return wm831x_aldo_list_voltage(rdev, ret);
+ return ret;
}
static unsigned int wm831x_aldo_get_mode(struct regulator_dev *rdev)
@@ -548,7 +559,7 @@ static int wm831x_aldo_get_status(struct regulator_dev *rdev)
static struct regulator_ops wm831x_aldo_ops = {
.list_voltage = wm831x_aldo_list_voltage,
- .get_voltage = wm831x_aldo_get_voltage,
+ .get_voltage_sel = wm831x_aldo_get_voltage_sel,
.set_voltage = wm831x_aldo_set_voltage,
.set_suspend_voltage = wm831x_aldo_set_suspend_voltage,
.get_mode = wm831x_aldo_get_mode,
@@ -666,7 +677,8 @@ static int wm831x_alive_ldo_list_voltage(struct regulator_dev *rdev,
static int wm831x_alive_ldo_set_voltage_int(struct regulator_dev *rdev,
int reg,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned *selector)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
@@ -680,16 +692,20 @@ static int wm831x_alive_ldo_set_voltage_int(struct regulator_dev *rdev,
if (ret < min_uV || ret > max_uV)
return -EINVAL;
+ *selector = vsel;
+
return wm831x_set_bits(wm831x, reg, WM831X_LDO11_ON_VSEL_MASK, vsel);
}
static int wm831x_alive_ldo_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ unsigned *selector)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
int reg = ldo->base + WM831X_ALIVE_LDO_ON_CONTROL;
- return wm831x_alive_ldo_set_voltage_int(rdev, reg, min_uV, max_uV);
+ return wm831x_alive_ldo_set_voltage_int(rdev, reg, min_uV, max_uV,
+ selector);
}
static int wm831x_alive_ldo_set_suspend_voltage(struct regulator_dev *rdev,
@@ -697,11 +713,12 @@ static int wm831x_alive_ldo_set_suspend_voltage(struct regulator_dev *rdev,
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
int reg = ldo->base + WM831X_ALIVE_LDO_SLEEP_CONTROL;
+ unsigned selector;
- return wm831x_alive_ldo_set_voltage_int(rdev, reg, uV, uV);
+ return wm831x_alive_ldo_set_voltage_int(rdev, reg, uV, uV, &selector);
}
-static int wm831x_alive_ldo_get_voltage(struct regulator_dev *rdev)
+static int wm831x_alive_ldo_get_voltage_sel(struct regulator_dev *rdev)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
@@ -714,7 +731,7 @@ static int wm831x_alive_ldo_get_voltage(struct regulator_dev *rdev)
ret &= WM831X_LDO11_ON_VSEL_MASK;
- return wm831x_alive_ldo_list_voltage(rdev, ret);
+ return ret;
}
static int wm831x_alive_ldo_get_status(struct regulator_dev *rdev)
@@ -736,7 +753,7 @@ static int wm831x_alive_ldo_get_status(struct regulator_dev *rdev)
static struct regulator_ops wm831x_alive_ldo_ops = {
.list_voltage = wm831x_alive_ldo_list_voltage,
- .get_voltage = wm831x_alive_ldo_get_voltage,
+ .get_voltage_sel = wm831x_alive_ldo_get_voltage_sel,
.set_voltage = wm831x_alive_ldo_set_voltage,
.set_suspend_voltage = wm831x_alive_ldo_set_suspend_voltage,
.get_status = wm831x_alive_ldo_get_status,
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index fe4b8a8a9dfd..1bcb22c44095 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -360,7 +360,7 @@ int wm8350_isink_set_flash(struct wm8350 *wm8350, int isink, u16 mode,
EXPORT_SYMBOL_GPL(wm8350_isink_set_flash);
static int wm8350_dcdc_set_voltage(struct regulator_dev *rdev, int min_uV,
- int max_uV)
+ int max_uV, unsigned *selector)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int volt_reg, dcdc = rdev_get_id(rdev), mV,
@@ -397,17 +397,18 @@ static int wm8350_dcdc_set_voltage(struct regulator_dev *rdev, int min_uV,
return -EINVAL;
}
+ *selector = mV;
+
/* all DCDCs have same mV bits */
val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_DC1_VSEL_MASK;
wm8350_reg_write(wm8350, volt_reg, val | mV);
return 0;
}
-static int wm8350_dcdc_get_voltage(struct regulator_dev *rdev)
+static int wm8350_dcdc_get_voltage_sel(struct regulator_dev *rdev)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int volt_reg, dcdc = rdev_get_id(rdev);
- u16 val;
switch (dcdc) {
case WM8350_DCDC_1:
@@ -429,8 +430,7 @@ static int wm8350_dcdc_get_voltage(struct regulator_dev *rdev)
}
/* all DCDCs have same mV bits */
- val = wm8350_reg_read(wm8350, volt_reg) & WM8350_DC1_VSEL_MASK;
- return wm8350_dcdc_val_to_mvolts(val) * 1000;
+ return wm8350_reg_read(wm8350, volt_reg) & WM8350_DC1_VSEL_MASK;
}
static int wm8350_dcdc_list_voltage(struct regulator_dev *rdev,
@@ -754,7 +754,7 @@ static int wm8350_ldo_set_suspend_disable(struct regulator_dev *rdev)
}
static int wm8350_ldo_set_voltage(struct regulator_dev *rdev, int min_uV,
- int max_uV)
+ int max_uV, unsigned *selector)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int volt_reg, ldo = rdev_get_id(rdev), mV, min_mV = min_uV / 1000,
@@ -797,17 +797,18 @@ static int wm8350_ldo_set_voltage(struct regulator_dev *rdev, int min_uV,
return -EINVAL;
}
+ *selector = mV;
+
/* all LDOs have same mV bits */
val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_VSEL_MASK;
wm8350_reg_write(wm8350, volt_reg, val | mV);
return 0;
}
-static int wm8350_ldo_get_voltage(struct regulator_dev *rdev)
+static int wm8350_ldo_get_voltage_sel(struct regulator_dev *rdev)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
int volt_reg, ldo = rdev_get_id(rdev);
- u16 val;
switch (ldo) {
case WM8350_LDO_1:
@@ -827,8 +828,7 @@ static int wm8350_ldo_get_voltage(struct regulator_dev *rdev)
}
/* all LDOs have same mV bits */
- val = wm8350_reg_read(wm8350, volt_reg) & WM8350_LDO1_VSEL_MASK;
- return wm8350_ldo_val_to_mvolts(val) * 1000;
+ return wm8350_reg_read(wm8350, volt_reg) & WM8350_LDO1_VSEL_MASK;
}
static int wm8350_ldo_list_voltage(struct regulator_dev *rdev,
@@ -1225,7 +1225,7 @@ static int wm8350_ldo_is_enabled(struct regulator_dev *rdev)
static struct regulator_ops wm8350_dcdc_ops = {
.set_voltage = wm8350_dcdc_set_voltage,
- .get_voltage = wm8350_dcdc_get_voltage,
+ .get_voltage_sel = wm8350_dcdc_get_voltage_sel,
.list_voltage = wm8350_dcdc_list_voltage,
.enable = wm8350_dcdc_enable,
.disable = wm8350_dcdc_disable,
@@ -1249,7 +1249,7 @@ static struct regulator_ops wm8350_dcdc2_5_ops = {
static struct regulator_ops wm8350_ldo_ops = {
.set_voltage = wm8350_ldo_set_voltage,
- .get_voltage = wm8350_ldo_get_voltage,
+ .get_voltage_sel = wm8350_ldo_get_voltage_sel,
.list_voltage = wm8350_ldo_list_voltage,
.enable = wm8350_ldo_enable,
.disable = wm8350_ldo_disable,
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 924c7eb29ee9..b42d01cef35a 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -67,7 +67,7 @@ static int wm8400_ldo_get_voltage(struct regulator_dev *dev)
}
static int wm8400_ldo_set_voltage(struct regulator_dev *dev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *selector)
{
struct wm8400 *wm8400 = rdev_get_drvdata(dev);
u16 val;
@@ -93,6 +93,8 @@ static int wm8400_ldo_set_voltage(struct regulator_dev *dev,
val += 0xf;
}
+ *selector = val;
+
return wm8400_set_bits(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev),
WM8400_LDO1_VSEL_MASK, val);
}
@@ -156,7 +158,7 @@ static int wm8400_dcdc_get_voltage(struct regulator_dev *dev)
}
static int wm8400_dcdc_set_voltage(struct regulator_dev *dev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *selector)
{
struct wm8400 *wm8400 = rdev_get_drvdata(dev);
u16 val;
@@ -171,6 +173,8 @@ static int wm8400_dcdc_set_voltage(struct regulator_dev *dev,
return -EINVAL;
BUG_ON(850000 + (25000 * val) < min_uV);
+ *selector = val;
+
return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset,
WM8400_DC1_VSEL_MASK, val);
}
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 03713bc66e4a..35b2958d5106 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -86,7 +86,7 @@ static int wm8994_ldo1_list_voltage(struct regulator_dev *rdev,
return (selector * 100000) + 2400000;
}
-static int wm8994_ldo1_get_voltage(struct regulator_dev *rdev)
+static int wm8994_ldo1_get_voltage_sel(struct regulator_dev *rdev)
{
struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
int val;
@@ -95,13 +95,11 @@ static int wm8994_ldo1_get_voltage(struct regulator_dev *rdev)
if (val < 0)
return val;
- val = (val & WM8994_LDO1_VSEL_MASK) >> WM8994_LDO1_VSEL_SHIFT;
-
- return wm8994_ldo1_list_voltage(rdev, val);
+ return (val & WM8994_LDO1_VSEL_MASK) >> WM8994_LDO1_VSEL_SHIFT;
}
static int wm8994_ldo1_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *s)
{
struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
int selector, v;
@@ -111,6 +109,7 @@ static int wm8994_ldo1_set_voltage(struct regulator_dev *rdev,
if (v < 0 || v > max_uV)
return -EINVAL;
+ *s = selector;
selector <<= WM8994_LDO1_VSEL_SHIFT;
return wm8994_set_bits(ldo->wm8994, WM8994_LDO_1,
@@ -124,20 +123,29 @@ static struct regulator_ops wm8994_ldo1_ops = {
.enable_time = wm8994_ldo_enable_time,
.list_voltage = wm8994_ldo1_list_voltage,
- .get_voltage = wm8994_ldo1_get_voltage,
+ .get_voltage_sel = wm8994_ldo1_get_voltage_sel,
.set_voltage = wm8994_ldo1_set_voltage,
};
static int wm8994_ldo2_list_voltage(struct regulator_dev *rdev,
unsigned int selector)
{
+ struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+
if (selector > WM8994_LDO2_MAX_SELECTOR)
return -EINVAL;
- return (selector * 100000) + 900000;
+ switch (ldo->wm8994->type) {
+ case WM8994:
+ return (selector * 100000) + 900000;
+ case WM8958:
+ return (selector * 100000) + 1000000;
+ default:
+ return -EINVAL;
+ }
}
-static int wm8994_ldo2_get_voltage(struct regulator_dev *rdev)
+static int wm8994_ldo2_get_voltage_sel(struct regulator_dev *rdev)
{
struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
int val;
@@ -146,22 +154,31 @@ static int wm8994_ldo2_get_voltage(struct regulator_dev *rdev)
if (val < 0)
return val;
- val = (val & WM8994_LDO2_VSEL_MASK) >> WM8994_LDO2_VSEL_SHIFT;
-
- return wm8994_ldo2_list_voltage(rdev, val);
+ return (val & WM8994_LDO2_VSEL_MASK) >> WM8994_LDO2_VSEL_SHIFT;
}
static int wm8994_ldo2_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV, unsigned *s)
{
struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
int selector, v;
- selector = (min_uV - 900000) / 100000;
+ switch (ldo->wm8994->type) {
+ case WM8994:
+ selector = (min_uV - 900000) / 100000;
+ break;
+ case WM8958:
+ selector = (min_uV - 1000000) / 100000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
v = wm8994_ldo2_list_voltage(rdev, selector);
if (v < 0 || v > max_uV)
return -EINVAL;
+ *s = selector;
selector <<= WM8994_LDO2_VSEL_SHIFT;
return wm8994_set_bits(ldo->wm8994, WM8994_LDO_2,
@@ -175,7 +192,7 @@ static struct regulator_ops wm8994_ldo2_ops = {
.enable_time = wm8994_ldo_enable_time,
.list_voltage = wm8994_ldo2_list_voltage,
- .get_voltage = wm8994_ldo2_get_voltage,
+ .get_voltage_sel = wm8994_ldo2_get_voltage_sel,
.set_voltage = wm8994_ldo2_set_voltage,
};
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e6539cbabb35..c404b61386bf 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -16,6 +16,7 @@
#include <linux/kdev_t.h>
#include <linux/idr.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include "rtc-core.h"
@@ -142,6 +143,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
rtc->id = id;
rtc->ops = ops;
rtc->owner = owner;
+ rtc->irq_freq = 1;
rtc->max_user_freq = 64;
rtc->dev.parent = dev;
rtc->dev.class = rtc_class;
@@ -152,6 +154,18 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
spin_lock_init(&rtc->irq_task_lock);
init_waitqueue_head(&rtc->irq_queue);
+ /* Init timerqueue */
+ timerqueue_init_head(&rtc->timerqueue);
+ INIT_WORK(&rtc->irqwork, rtc_timer_do_work);
+ /* Init aie timer */
+ rtc_timer_init(&rtc->aie_timer, rtc_aie_update_irq, (void *)rtc);
+ /* Init uie timer */
+ rtc_timer_init(&rtc->uie_rtctimer, rtc_uie_update_irq, (void *)rtc);
+ /* Init pie timer */
+ hrtimer_init(&rtc->pie_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rtc->pie_timer.function = rtc_pie_update_irq;
+ rtc->pie_enabled = 0;
+
strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
dev_set_name(&rtc->dev, "rtc%d", id);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index a0c816238aa9..cb2f0728fd70 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -14,15 +14,14 @@
#include <linux/rtc.h>
#include <linux/sched.h>
#include <linux/log2.h>
+#include <linux/workqueue.h>
-int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
+static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
+static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
+
+static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
{
int err;
-
- err = mutex_lock_interruptible(&rtc->ops_lock);
- if (err)
- return err;
-
if (!rtc->ops)
err = -ENODEV;
else if (!rtc->ops->read_time)
@@ -31,7 +30,18 @@ int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
memset(tm, 0, sizeof(struct rtc_time));
err = rtc->ops->read_time(rtc->dev.parent, tm);
}
+ return err;
+}
+int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
+{
+ int err;
+
+ err = mutex_lock_interruptible(&rtc->ops_lock);
+ if (err)
+ return err;
+
+ err = __rtc_read_time(rtc, tm);
mutex_unlock(&rtc->ops_lock);
return err;
}
@@ -106,188 +116,60 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
}
EXPORT_SYMBOL_GPL(rtc_set_mmss);
-static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
int err;
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
return err;
-
if (rtc->ops == NULL)
err = -ENODEV;
else if (!rtc->ops->read_alarm)
err = -EINVAL;
else {
memset(alarm, 0, sizeof(struct rtc_wkalrm));
- err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
+ alarm->enabled = rtc->aie_timer.enabled;
+ alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
}
-
mutex_unlock(&rtc->ops_lock);
+
return err;
}
+EXPORT_SYMBOL_GPL(rtc_read_alarm);
-int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
+ struct rtc_time tm;
+ long now, scheduled;
int err;
- struct rtc_time before, now;
- int first_time = 1;
- unsigned long t_now, t_alm;
- enum { none, day, month, year } missing = none;
- unsigned days;
-
- /* The lower level RTC driver may return -1 in some fields,
- * creating invalid alarm->time values, for reasons like:
- *
- * - The hardware may not be capable of filling them in;
- * many alarms match only on time-of-day fields, not
- * day/month/year calendar data.
- *
- * - Some hardware uses illegal values as "wildcard" match
- * values, which non-Linux firmware (like a BIOS) may try
- * to set up as e.g. "alarm 15 minutes after each hour".
- * Linux uses only oneshot alarms.
- *
- * When we see that here, we deal with it by using values from
- * a current RTC timestamp for any missing (-1) values. The
- * RTC driver prevents "periodic alarm" modes.
- *
- * But this can be racey, because some fields of the RTC timestamp
- * may have wrapped in the interval since we read the RTC alarm,
- * which would lead to us inserting inconsistent values in place
- * of the -1 fields.
- *
- * Reading the alarm and timestamp in the reverse sequence
- * would have the same race condition, and not solve the issue.
- *
- * So, we must first read the RTC timestamp,
- * then read the RTC alarm value,
- * and then read a second RTC timestamp.
- *
- * If any fields of the second timestamp have changed
- * when compared with the first timestamp, then we know
- * our timestamp may be inconsistent with that used by
- * the low-level rtc_read_alarm_internal() function.
- *
- * So, when the two timestamps disagree, we just loop and do
- * the process again to get a fully consistent set of values.
- *
- * This could all instead be done in the lower level driver,
- * but since more than one lower level RTC implementation needs it,
- * then it's probably best best to do it here instead of there..
- */
- /* Get the "before" timestamp */
- err = rtc_read_time(rtc, &before);
- if (err < 0)
+ err = rtc_valid_tm(&alarm->time);
+ if (err)
return err;
- do {
- if (!first_time)
- memcpy(&before, &now, sizeof(struct rtc_time));
- first_time = 0;
-
- /* get the RTC alarm values, which may be incomplete */
- err = rtc_read_alarm_internal(rtc, alarm);
- if (err)
- return err;
- if (!alarm->enabled)
- return 0;
-
- /* full-function RTCs won't have such missing fields */
- if (rtc_valid_tm(&alarm->time) == 0)
- return 0;
-
- /* get the "after" timestamp, to detect wrapped fields */
- err = rtc_read_time(rtc, &now);
- if (err < 0)
- return err;
-
- /* note that tm_sec is a "don't care" value here: */
- } while ( before.tm_min != now.tm_min
- || before.tm_hour != now.tm_hour
- || before.tm_mon != now.tm_mon
- || before.tm_year != now.tm_year);
+ rtc_tm_to_time(&alarm->time, &scheduled);
- /* Fill in the missing alarm fields using the timestamp; we
- * know there's at least one since alarm->time is invalid.
+ /* Make sure we're not setting alarms in the past */
+ err = __rtc_read_time(rtc, &tm);
+ rtc_tm_to_time(&tm, &now);
+ if (scheduled <= now)
+ return -ETIME;
+ /*
+ * XXX - We just checked to make sure the alarm time is not
+ * in the past, but there is still a race window where if
+ * the is alarm set for the next second and the second ticks
+ * over right here, before we set the alarm.
*/
- if (alarm->time.tm_sec == -1)
- alarm->time.tm_sec = now.tm_sec;
- if (alarm->time.tm_min == -1)
- alarm->time.tm_min = now.tm_min;
- if (alarm->time.tm_hour == -1)
- alarm->time.tm_hour = now.tm_hour;
-
- /* For simplicity, only support date rollover for now */
- if (alarm->time.tm_mday == -1) {
- alarm->time.tm_mday = now.tm_mday;
- missing = day;
- }
- if (alarm->time.tm_mon == -1) {
- alarm->time.tm_mon = now.tm_mon;
- if (missing == none)
- missing = month;
- }
- if (alarm->time.tm_year == -1) {
- alarm->time.tm_year = now.tm_year;
- if (missing == none)
- missing = year;
- }
- /* with luck, no rollover is needed */
- rtc_tm_to_time(&now, &t_now);
- rtc_tm_to_time(&alarm->time, &t_alm);
- if (t_now < t_alm)
- goto done;
-
- switch (missing) {
-
- /* 24 hour rollover ... if it's now 10am Monday, an alarm that
- * that will trigger at 5am will do so at 5am Tuesday, which
- * could also be in the next month or year. This is a common
- * case, especially for PCs.
- */
- case day:
- dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
- t_alm += 24 * 60 * 60;
- rtc_time_to_tm(t_alm, &alarm->time);
- break;
-
- /* Month rollover ... if it's the 31th, an alarm on the 3rd will
- * be next month. An alarm matching on the 30th, 29th, or 28th
- * may end up in the month after that! Many newer PCs support
- * this type of alarm.
- */
- case month:
- dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
- do {
- if (alarm->time.tm_mon < 11)
- alarm->time.tm_mon++;
- else {
- alarm->time.tm_mon = 0;
- alarm->time.tm_year++;
- }
- days = rtc_month_days(alarm->time.tm_mon,
- alarm->time.tm_year);
- } while (days < alarm->time.tm_mday);
- break;
-
- /* Year rollover ... easy except for leap years! */
- case year:
- dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
- do {
- alarm->time.tm_year++;
- } while (rtc_valid_tm(&alarm->time) != 0);
- break;
-
- default:
- dev_warn(&rtc->dev, "alarm rollover not handled\n");
- }
+ if (!rtc->ops)
+ err = -ENODEV;
+ else if (!rtc->ops->set_alarm)
+ err = -EINVAL;
+ else
+ err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-done:
- return 0;
+ return err;
}
-EXPORT_SYMBOL_GPL(rtc_read_alarm);
int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
@@ -300,14 +182,14 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
return err;
-
- if (!rtc->ops)
- err = -ENODEV;
- else if (!rtc->ops->set_alarm)
- err = -EINVAL;
- else
- err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-
+ if (rtc->aie_timer.enabled) {
+ rtc_timer_remove(rtc, &rtc->aie_timer);
+ }
+ rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
+ rtc->aie_timer.period = ktime_set(0, 0);
+ if (alarm->enabled) {
+ err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
+ }
mutex_unlock(&rtc->ops_lock);
return err;
}
@@ -319,7 +201,16 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
if (err)
return err;
- if (!rtc->ops)
+ if (rtc->aie_timer.enabled != enabled) {
+ if (enabled)
+ err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
+ else
+ rtc_timer_remove(rtc, &rtc->aie_timer);
+ }
+
+ if (err)
+ /* nothing */;
+ else if (!rtc->ops)
err = -ENODEV;
else if (!rtc->ops->alarm_irq_enable)
err = -EINVAL;
@@ -340,19 +231,28 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
if (enabled == 0 && rtc->uie_irq_active) {
mutex_unlock(&rtc->ops_lock);
- return rtc_dev_update_irq_enable_emul(rtc, enabled);
+ return rtc_dev_update_irq_enable_emul(rtc, 0);
}
#endif
+ /* make sure we're changing state */
+ if (rtc->uie_rtctimer.enabled == enabled)
+ goto out;
+
+ if (enabled) {
+ struct rtc_time tm;
+ ktime_t now, onesec;
+
+ __rtc_read_time(rtc, &tm);
+ onesec = ktime_set(1, 0);
+ now = rtc_tm_to_ktime(tm);
+ rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
+ rtc->uie_rtctimer.period = ktime_set(1, 0);
+ err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
+ } else
+ rtc_timer_remove(rtc, &rtc->uie_rtctimer);
- if (!rtc->ops)
- err = -ENODEV;
- else if (!rtc->ops->update_irq_enable)
- err = -EINVAL;
- else
- err = rtc->ops->update_irq_enable(rtc->dev.parent, enabled);
-
+out:
mutex_unlock(&rtc->ops_lock);
-
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
/*
* Enable emulation if the driver did not provide
@@ -364,25 +264,30 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
err = rtc_dev_update_irq_enable_emul(rtc, enabled);
#endif
return err;
+
}
EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
+
/**
- * rtc_update_irq - report RTC periodic, alarm, and/or update irqs
- * @rtc: the rtc device
- * @num: how many irqs are being reported (usually one)
- * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF
- * Context: any
+ * rtc_handle_legacy_irq - AIE, UIE and PIE event hook
+ * @rtc: pointer to the rtc device
+ *
+ * This function is called when an AIE, UIE or PIE mode interrupt
+ * has occured (or been emulated).
+ *
+ * Triggers the registered irq_task function callback.
*/
-void rtc_update_irq(struct rtc_device *rtc,
- unsigned long num, unsigned long events)
+void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
{
unsigned long flags;
+ /* mark one irq of the appropriate mode */
spin_lock_irqsave(&rtc->irq_lock, flags);
- rtc->irq_data = (rtc->irq_data + (num << 8)) | events;
+ rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode);
spin_unlock_irqrestore(&rtc->irq_lock, flags);
+ /* call the task func */
spin_lock_irqsave(&rtc->irq_task_lock, flags);
if (rtc->irq_task)
rtc->irq_task->func(rtc->irq_task->private_data);
@@ -391,6 +296,69 @@ void rtc_update_irq(struct rtc_device *rtc,
wake_up_interruptible(&rtc->irq_queue);
kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
}
+
+
+/**
+ * rtc_aie_update_irq - AIE mode rtctimer hook
+ * @private: pointer to the rtc_device
+ *
+ * This functions is called when the aie_timer expires.
+ */
+void rtc_aie_update_irq(void *private)
+{
+ struct rtc_device *rtc = (struct rtc_device *)private;
+ rtc_handle_legacy_irq(rtc, 1, RTC_AF);
+}
+
+
+/**
+ * rtc_uie_update_irq - UIE mode rtctimer hook
+ * @private: pointer to the rtc_device
+ *
+ * This functions is called when the uie_timer expires.
+ */
+void rtc_uie_update_irq(void *private)
+{
+ struct rtc_device *rtc = (struct rtc_device *)private;
+ rtc_handle_legacy_irq(rtc, 1, RTC_UF);
+}
+
+
+/**
+ * rtc_pie_update_irq - PIE mode hrtimer hook
+ * @timer: pointer to the pie mode hrtimer
+ *
+ * This function is used to emulate PIE mode interrupts
+ * using an hrtimer. This function is called when the periodic
+ * hrtimer expires.
+ */
+enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
+{
+ struct rtc_device *rtc;
+ ktime_t period;
+ int count;
+ rtc = container_of(timer, struct rtc_device, pie_timer);
+
+ period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+ count = hrtimer_forward_now(timer, period);
+
+ rtc_handle_legacy_irq(rtc, count, RTC_PF);
+
+ return HRTIMER_RESTART;
+}
+
+/**
+ * rtc_update_irq - Triggered when a RTC interrupt occurs.
+ * @rtc: the rtc device
+ * @num: how many irqs are being reported (usually one)
+ * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF
+ * Context: any
+ */
+void rtc_update_irq(struct rtc_device *rtc,
+ unsigned long num, unsigned long events)
+{
+ schedule_work(&rtc->irqwork);
+}
EXPORT_SYMBOL_GPL(rtc_update_irq);
static int __rtc_match(struct device *dev, void *data)
@@ -477,18 +445,20 @@ int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled
int err = 0;
unsigned long flags;
- if (rtc->ops->irq_set_state == NULL)
- return -ENXIO;
-
spin_lock_irqsave(&rtc->irq_task_lock, flags);
if (rtc->irq_task != NULL && task == NULL)
err = -EBUSY;
if (rtc->irq_task != task)
err = -EACCES;
- spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
- if (err == 0)
- err = rtc->ops->irq_set_state(rtc->dev.parent, enabled);
+ if (enabled) {
+ ktime_t period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+ hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
+ } else {
+ hrtimer_cancel(&rtc->pie_timer);
+ }
+ rtc->pie_enabled = enabled;
+ spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
return err;
}
@@ -509,21 +479,206 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
int err = 0;
unsigned long flags;
- if (rtc->ops->irq_set_freq == NULL)
- return -ENXIO;
+ if (freq <= 0)
+ return -EINVAL;
spin_lock_irqsave(&rtc->irq_task_lock, flags);
if (rtc->irq_task != NULL && task == NULL)
err = -EBUSY;
if (rtc->irq_task != task)
err = -EACCES;
- spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
-
if (err == 0) {
- err = rtc->ops->irq_set_freq(rtc->dev.parent, freq);
- if (err == 0)
- rtc->irq_freq = freq;
+ rtc->irq_freq = freq;
+ if (rtc->pie_enabled) {
+ ktime_t period;
+ hrtimer_cancel(&rtc->pie_timer);
+ period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+ hrtimer_start(&rtc->pie_timer, period,
+ HRTIMER_MODE_REL);
+ }
}
+ spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
return err;
}
EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
+
+/**
+ * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue
+ * @rtc rtc device
+ * @timer timer being added.
+ *
+ * Enqueues a timer onto the rtc devices timerqueue and sets
+ * the next alarm event appropriately.
+ *
+ * Sets the enabled bit on the added timer.
+ *
+ * Must hold ops_lock for proper serialization of timerqueue
+ */
+static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
+{
+ timer->enabled = 1;
+ timerqueue_add(&rtc->timerqueue, &timer->node);
+ if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
+ struct rtc_wkalrm alarm;
+ int err;
+ alarm.time = rtc_ktime_to_tm(timer->node.expires);
+ alarm.enabled = 1;
+ err = __rtc_set_alarm(rtc, &alarm);
+ if (err == -ETIME)
+ schedule_work(&rtc->irqwork);
+ else if (err) {
+ timerqueue_del(&rtc->timerqueue, &timer->node);
+ timer->enabled = 0;
+ return err;
+ }
+ }
+ return 0;
+}
+
+/**
+ * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
+ * @rtc rtc device
+ * @timer timer being removed.
+ *
+ * Removes a timer onto the rtc devices timerqueue and sets
+ * the next alarm event appropriately.
+ *
+ * Clears the enabled bit on the removed timer.
+ *
+ * Must hold ops_lock for proper serialization of timerqueue
+ */
+static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
+{
+ struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
+ timerqueue_del(&rtc->timerqueue, &timer->node);
+ timer->enabled = 0;
+ if (next == &timer->node) {
+ struct rtc_wkalrm alarm;
+ int err;
+ next = timerqueue_getnext(&rtc->timerqueue);
+ if (!next)
+ return;
+ alarm.time = rtc_ktime_to_tm(next->expires);
+ alarm.enabled = 1;
+ err = __rtc_set_alarm(rtc, &alarm);
+ if (err == -ETIME)
+ schedule_work(&rtc->irqwork);
+ }
+}
+
+/**
+ * rtc_timer_do_work - Expires rtc timers
+ * @rtc rtc device
+ * @timer timer being removed.
+ *
+ * Expires rtc timers. Reprograms next alarm event if needed.
+ * Called via worktask.
+ *
+ * Serializes access to timerqueue via ops_lock mutex
+ */
+void rtc_timer_do_work(struct work_struct *work)
+{
+ struct rtc_timer *timer;
+ struct timerqueue_node *next;
+ ktime_t now;
+ struct rtc_time tm;
+
+ struct rtc_device *rtc =
+ container_of(work, struct rtc_device, irqwork);
+
+ mutex_lock(&rtc->ops_lock);
+again:
+ __rtc_read_time(rtc, &tm);
+ now = rtc_tm_to_ktime(tm);
+ while ((next = timerqueue_getnext(&rtc->timerqueue))) {
+ if (next->expires.tv64 > now.tv64)
+ break;
+
+ /* expire timer */
+ timer = container_of(next, struct rtc_timer, node);
+ timerqueue_del(&rtc->timerqueue, &timer->node);
+ timer->enabled = 0;
+ if (timer->task.func)
+ timer->task.func(timer->task.private_data);
+
+ /* Re-add/fwd periodic timers */
+ if (ktime_to_ns(timer->period)) {
+ timer->node.expires = ktime_add(timer->node.expires,
+ timer->period);
+ timer->enabled = 1;
+ timerqueue_add(&rtc->timerqueue, &timer->node);
+ }
+ }
+
+ /* Set next alarm */
+ if (next) {
+ struct rtc_wkalrm alarm;
+ int err;
+ alarm.time = rtc_ktime_to_tm(next->expires);
+ alarm.enabled = 1;
+ err = __rtc_set_alarm(rtc, &alarm);
+ if (err == -ETIME)
+ goto again;
+ }
+
+ mutex_unlock(&rtc->ops_lock);
+}
+
+
+/* rtc_timer_init - Initializes an rtc_timer
+ * @timer: timer to be intiialized
+ * @f: function pointer to be called when timer fires
+ * @data: private data passed to function pointer
+ *
+ * Kernel interface to initializing an rtc_timer.
+ */
+void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data)
+{
+ timerqueue_init(&timer->node);
+ timer->enabled = 0;
+ timer->task.func = f;
+ timer->task.private_data = data;
+}
+
+/* rtc_timer_start - Sets an rtc_timer to fire in the future
+ * @ rtc: rtc device to be used
+ * @ timer: timer being set
+ * @ expires: time at which to expire the timer
+ * @ period: period that the timer will recur
+ *
+ * Kernel interface to set an rtc_timer
+ */
+int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
+ ktime_t expires, ktime_t period)
+{
+ int ret = 0;
+ mutex_lock(&rtc->ops_lock);
+ if (timer->enabled)
+ rtc_timer_remove(rtc, timer);
+
+ timer->node.expires = expires;
+ timer->period = period;
+
+ ret = rtc_timer_enqueue(rtc, timer);
+
+ mutex_unlock(&rtc->ops_lock);
+ return ret;
+}
+
+/* rtc_timer_cancel - Stops an rtc_timer
+ * @ rtc: rtc device to be used
+ * @ timer: timer being set
+ *
+ * Kernel interface to cancel an rtc_timer
+ */
+int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer)
+{
+ int ret = 0;
+ mutex_lock(&rtc->ops_lock);
+ if (timer->enabled)
+ rtc_timer_remove(rtc, timer);
+ mutex_unlock(&rtc->ops_lock);
+ return ret;
+}
+
+
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index b2752b6e7a2f..e725d51e773d 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -134,36 +134,29 @@ static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
return ret;
}
-static int at32_rtc_ioctl(struct device *dev, unsigned int cmd,
- unsigned long arg)
+static int at32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
int ret = 0;
spin_lock_irq(&rtc->lock);
- switch (cmd) {
- case RTC_AIE_ON:
+ if(enabled) {
if (rtc_readl(rtc, VAL) > rtc->alarm_time) {
ret = -EINVAL;
- break;
+ goto out;
}
rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
| RTC_BIT(CTRL_TOPEN));
rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
rtc_writel(rtc, IER, RTC_BIT(IER_TOPI));
- break;
- case RTC_AIE_OFF:
+ } else {
rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
& ~RTC_BIT(CTRL_TOPEN));
rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
- break;
- default:
- ret = -ENOIOCTLCMD;
- break;
}
-
+out:
spin_unlock_irq(&rtc->lock);
return ret;
@@ -195,11 +188,11 @@ static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id)
}
static struct rtc_class_ops at32_rtc_ops = {
- .ioctl = at32_rtc_ioctl,
.read_time = at32_rtc_readtime,
.set_time = at32_rtc_settime,
.read_alarm = at32_rtc_readalarm,
.set_alarm = at32_rtc_setalarm,
+ .alarm_irq_enable = at32_rtc_alarm_irq_enable,
};
static int __init at32_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index bc8bbca9a2e2..26d1cf5d19ae 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -195,13 +195,6 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
/* important: scrub old status before enabling IRQs */
switch (cmd) {
- case RTC_AIE_OFF: /* alarm off */
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
- break;
- case RTC_AIE_ON: /* alarm on */
- at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
- at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
- break;
case RTC_UIE_OFF: /* update off */
at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV);
break;
@@ -217,6 +210,18 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
return ret;
}
+static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ pr_debug("%s(): cmd=%08x\n", __func__, enabled);
+
+ if (enabled) {
+ at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+ at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
+ } else
+ at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+
+ return 0;
+}
/*
* Provide additional RTC information in /proc/driver/rtc
*/
@@ -270,6 +275,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
.read_alarm = at91_rtc_readalarm,
.set_alarm = at91_rtc_setalarm,
.proc = at91_rtc_proc,
+ .alarm_irq_enable = at91_rtc_alarm_irq_enable,
};
/*
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index f677e0710ca1..5469c52cba3d 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -229,12 +229,6 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr);
switch (cmd) {
- case RTC_AIE_OFF: /* alarm off */
- rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
- break;
- case RTC_AIE_ON: /* alarm on */
- rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
- break;
case RTC_UIE_OFF: /* update off */
rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
break;
@@ -249,6 +243,19 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
return ret;
}
+static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct sam9_rtc *rtc = dev_get_drvdata(dev);
+ u32 mr = rtt_readl(rtc, MR);
+
+ dev_dbg(dev, "alarm_irq_enable: enabled=%08x, mr %08x\n", enabled, mr);
+ if (enabled)
+ rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
+ else
+ rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
+ return 0;
+}
+
/*
* Provide additional RTC information in /proc/driver/rtc
*/
@@ -302,6 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
.read_alarm = at91_rtc_readalarm,
.set_alarm = at91_rtc_setalarm,
.proc = at91_rtc_proc,
+ .alarm_irq_enable = at91_rtc_alarm_irq_enable,
};
/*
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index b4b6087f2234..17971d93354d 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -259,15 +259,6 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar
bfin_rtc_int_clear(~RTC_ISTAT_SEC);
break;
- case RTC_AIE_ON:
- dev_dbg_stamp(dev);
- bfin_rtc_int_set_alarm(rtc);
- break;
- case RTC_AIE_OFF:
- dev_dbg_stamp(dev);
- bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
- break;
-
default:
dev_dbg_stamp(dev);
ret = -ENOIOCTLCMD;
@@ -276,6 +267,17 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar
return ret;
}
+static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct bfin_rtc *rtc = dev_get_drvdata(dev);
+
+ dev_dbg_stamp(dev);
+ if (enabled)
+ bfin_rtc_int_set_alarm(rtc);
+ else
+ bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
+}
+
static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct bfin_rtc *rtc = dev_get_drvdata(dev);
@@ -362,6 +364,7 @@ static struct rtc_class_ops bfin_rtc_ops = {
.read_alarm = bfin_rtc_read_alarm,
.set_alarm = bfin_rtc_set_alarm,
.proc = bfin_rtc_proc,
+ .alarm_irq_enable = bfin_rtc_alarm_irq_enable,
};
static int __devinit bfin_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 5856167a0c90..c7ff8df347e7 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -36,6 +36,7 @@
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <linux/log2.h>
+#include <linux/pm.h>
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
#include <asm-generic/rtc.h>
@@ -687,7 +688,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
#if defined(CONFIG_ATARI)
address_space = 64;
#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) \
- || defined(__sparc__) || defined(__mips__)
+ || defined(__sparc__) || defined(__mips__) \
+ || defined(__powerpc__)
address_space = 128;
#else
#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
@@ -850,7 +852,7 @@ static void __exit cmos_do_remove(struct device *dev)
#ifdef CONFIG_PM
-static int cmos_suspend(struct device *dev, pm_message_t mesg)
+static int cmos_suspend(struct device *dev)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char tmp;
@@ -898,7 +900,7 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg)
*/
static inline int cmos_poweroff(struct device *dev)
{
- return cmos_suspend(dev, PMSG_HIBERNATE);
+ return cmos_suspend(dev);
}
static int cmos_resume(struct device *dev)
@@ -945,9 +947,9 @@ static int cmos_resume(struct device *dev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
+
#else
-#define cmos_suspend NULL
-#define cmos_resume NULL
static inline int cmos_poweroff(struct device *dev)
{
@@ -1077,7 +1079,7 @@ static void __exit cmos_pnp_remove(struct pnp_dev *pnp)
static int cmos_pnp_suspend(struct pnp_dev *pnp, pm_message_t mesg)
{
- return cmos_suspend(&pnp->dev, mesg);
+ return cmos_suspend(&pnp->dev);
}
static int cmos_pnp_resume(struct pnp_dev *pnp)
@@ -1157,8 +1159,9 @@ static struct platform_driver cmos_platform_driver = {
.shutdown = cmos_platform_shutdown,
.driver = {
.name = (char *) driver_name,
- .suspend = cmos_suspend,
- .resume = cmos_resume,
+#ifdef CONFIG_PM
+ .pm = &cmos_pm_ops,
+#endif
}
};
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 0cc0984d155b..d0e06edb14c5 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -76,7 +76,7 @@ static void rtc_uie_task(struct work_struct *work)
}
spin_unlock_irq(&rtc->irq_lock);
if (num)
- rtc_update_irq(rtc, num, RTC_UF | RTC_IRQF);
+ rtc_handle_legacy_irq(rtc, num, RTC_UF);
}
static void rtc_uie_timer(unsigned long data)
{
@@ -104,7 +104,7 @@ static int clear_uie(struct rtc_device *rtc)
}
if (rtc->uie_task_active) {
spin_unlock_irq(&rtc->irq_lock);
- flush_work_sync(&rtc->uie_task);
+ flush_scheduled_work();
spin_lock_irq(&rtc->irq_lock);
}
rtc->uie_irq_active = 0;
@@ -253,19 +253,7 @@ static long rtc_dev_ioctl(struct file *file,
if (err)
goto done;
- /* try the driver's ioctl interface */
- if (ops->ioctl) {
- err = ops->ioctl(rtc->dev.parent, cmd, arg);
- if (err != -ENOIOCTLCMD) {
- mutex_unlock(&rtc->ops_lock);
- return err;
- }
- }
-
- /* if the driver does not provide the ioctl interface
- * or if that particular ioctl was not implemented
- * (-ENOIOCTLCMD), we will try to emulate here.
- *
+ /*
* Drivers *SHOULD NOT* provide ioctl implementations
* for these requests. Instead, provide methods to
* support the following code, so that the RTC's main
@@ -428,7 +416,12 @@ static long rtc_dev_ioctl(struct file *file,
return err;
default:
- err = -ENOTTY;
+ /* Finally try the driver's ioctl interface */
+ if (ops->ioctl) {
+ err = ops->ioctl(rtc->dev.parent, cmd, arg);
+ if (err == -ENOIOCTLCMD)
+ err = -ENOTTY;
+ }
break;
}
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index bf430f9091ed..60ce69600828 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -40,6 +40,26 @@ static inline void ds1286_rtc_write(struct ds1286_priv *priv, u8 data, int reg)
__raw_writel(data, &priv->rtcregs[reg]);
}
+
+static int ds1286_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct ds1286_priv *priv = dev_get_drvdata(dev);
+ unsigned long flags;
+ unsigned char val;
+
+ /* Allow or mask alarm interrupts */
+ spin_lock_irqsave(&priv->lock, flags);
+ val = ds1286_rtc_read(priv, RTC_CMD);
+ if (enabled)
+ val &= ~RTC_TDM;
+ else
+ val |= RTC_TDM;
+ ds1286_rtc_write(priv, val, RTC_CMD);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
#ifdef CONFIG_RTC_INTF_DEV
static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
@@ -49,22 +69,6 @@ static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
unsigned char val;
switch (cmd) {
- case RTC_AIE_OFF:
- /* Mask alarm int. enab. bit */
- spin_lock_irqsave(&priv->lock, flags);
- val = ds1286_rtc_read(priv, RTC_CMD);
- val |= RTC_TDM;
- ds1286_rtc_write(priv, val, RTC_CMD);
- spin_unlock_irqrestore(&priv->lock, flags);
- break;
- case RTC_AIE_ON:
- /* Allow alarm interrupts. */
- spin_lock_irqsave(&priv->lock, flags);
- val = ds1286_rtc_read(priv, RTC_CMD);
- val &= ~RTC_TDM;
- ds1286_rtc_write(priv, val, RTC_CMD);
- spin_unlock_irqrestore(&priv->lock, flags);
- break;
case RTC_WIE_OFF:
/* Mask watchdog int. enab. bit */
spin_lock_irqsave(&priv->lock, flags);
@@ -316,12 +320,13 @@ static int ds1286_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
}
static const struct rtc_class_ops ds1286_ops = {
- .ioctl = ds1286_ioctl,
- .proc = ds1286_proc,
+ .ioctl = ds1286_ioctl,
+ .proc = ds1286_proc,
.read_time = ds1286_read_time,
.set_time = ds1286_set_time,
.read_alarm = ds1286_read_alarm,
.set_alarm = ds1286_set_alarm,
+ .alarm_irq_enable = ds1286_alarm_irq_enable,
};
static int __devinit ds1286_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 077af1d7b9e4..57fbcc149ba7 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -139,49 +139,32 @@ static u8 hour2bcd(bool hr12, int hour)
* Interface to RTC framework
*/
-#ifdef CONFIG_RTC_INTF_DEV
-
-/*
- * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl)
- */
-static int ds1305_ioctl(struct device *dev, unsigned cmd, unsigned long arg)
+static int ds1305_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct ds1305 *ds1305 = dev_get_drvdata(dev);
u8 buf[2];
- int status = -ENOIOCTLCMD;
+ long err = -EINVAL;
buf[0] = DS1305_WRITE | DS1305_CONTROL;
buf[1] = ds1305->ctrl[0];
- switch (cmd) {
- case RTC_AIE_OFF:
- status = 0;
- if (!(buf[1] & DS1305_AEI0))
- goto done;
- buf[1] &= ~DS1305_AEI0;
- break;
-
- case RTC_AIE_ON:
- status = 0;
+ if (enabled) {
if (ds1305->ctrl[0] & DS1305_AEI0)
goto done;
buf[1] |= DS1305_AEI0;
- break;
- }
- if (status == 0) {
- status = spi_write_then_read(ds1305->spi, buf, sizeof buf,
- NULL, 0);
- if (status >= 0)
- ds1305->ctrl[0] = buf[1];
+ } else {
+ if (!(buf[1] & DS1305_AEI0))
+ goto done;
+ buf[1] &= ~DS1305_AEI0;
}
-
+ err = spi_write_then_read(ds1305->spi, buf, sizeof buf, NULL, 0);
+ if (err >= 0)
+ ds1305->ctrl[0] = buf[1];
done:
- return status;
+ return err;
+
}
-#else
-#define ds1305_ioctl NULL
-#endif
/*
* Get/set of date and time is pretty normal.
@@ -460,12 +443,12 @@ done:
#endif
static const struct rtc_class_ops ds1305_ops = {
- .ioctl = ds1305_ioctl,
.read_time = ds1305_get_time,
.set_time = ds1305_set_time,
.read_alarm = ds1305_get_alarm,
.set_alarm = ds1305_set_alarm,
.proc = ds1305_proc,
+ .alarm_irq_enable = ds1305_alarm_irq_enable,
};
static void ds1305_work(struct work_struct *work)
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index d827ce570a8c..4724ba3acf1a 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -106,9 +106,9 @@ struct ds1307 {
struct i2c_client *client;
struct rtc_device *rtc;
struct work_struct work;
- s32 (*read_block_data)(struct i2c_client *client, u8 command,
+ s32 (*read_block_data)(const struct i2c_client *client, u8 command,
u8 length, u8 *values);
- s32 (*write_block_data)(struct i2c_client *client, u8 command,
+ s32 (*write_block_data)(const struct i2c_client *client, u8 command,
u8 length, const u8 *values);
};
@@ -158,8 +158,8 @@ MODULE_DEVICE_TABLE(i2c, ds1307_id);
#define BLOCK_DATA_MAX_TRIES 10
-static s32 ds1307_read_block_data_once(struct i2c_client *client, u8 command,
- u8 length, u8 *values)
+static s32 ds1307_read_block_data_once(const struct i2c_client *client,
+ u8 command, u8 length, u8 *values)
{
s32 i, data;
@@ -172,7 +172,7 @@ static s32 ds1307_read_block_data_once(struct i2c_client *client, u8 command,
return i;
}
-static s32 ds1307_read_block_data(struct i2c_client *client, u8 command,
+static s32 ds1307_read_block_data(const struct i2c_client *client, u8 command,
u8 length, u8 *values)
{
u8 oldvalues[I2C_SMBUS_BLOCK_MAX];
@@ -198,7 +198,7 @@ static s32 ds1307_read_block_data(struct i2c_client *client, u8 command,
return length;
}
-static s32 ds1307_write_block_data(struct i2c_client *client, u8 command,
+static s32 ds1307_write_block_data(const struct i2c_client *client, u8 command,
u8 length, const u8 *values)
{
u8 currvalues[I2C_SMBUS_BLOCK_MAX];
@@ -495,50 +495,27 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
-static int ds1307_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct i2c_client *client = to_i2c_client(dev);
struct ds1307 *ds1307 = i2c_get_clientdata(client);
int ret;
- switch (cmd) {
- case RTC_AIE_OFF:
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -ENOTTY;
-
- ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
- if (ret < 0)
- return ret;
-
- ret &= ~DS1337_BIT_A1IE;
-
- ret = i2c_smbus_write_byte_data(client,
- DS1337_REG_CONTROL, ret);
- if (ret < 0)
- return ret;
-
- break;
-
- case RTC_AIE_ON:
- if (!test_bit(HAS_ALARM, &ds1307->flags))
- return -ENOTTY;
+ if (!test_bit(HAS_ALARM, &ds1307->flags))
+ return -ENOTTY;
- ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
- if (ret < 0)
- return ret;
+ ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
+ if (ret < 0)
+ return ret;
+ if (enabled)
ret |= DS1337_BIT_A1IE;
+ else
+ ret &= ~DS1337_BIT_A1IE;
- ret = i2c_smbus_write_byte_data(client,
- DS1337_REG_CONTROL, ret);
- if (ret < 0)
- return ret;
-
- break;
-
- default:
- return -ENOIOCTLCMD;
- }
+ ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, ret);
+ if (ret < 0)
+ return ret;
return 0;
}
@@ -548,7 +525,7 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
.set_time = ds1307_set_time,
.read_alarm = ds1337_read_alarm,
.set_alarm = ds1337_set_alarm,
- .ioctl = ds1307_ioctl,
+ .alarm_irq_enable = ds1307_alarm_irq_enable,
};
/*----------------------------------------------------------------------*/
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 47fb6357c346..d834a63ec4b0 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -307,42 +307,25 @@ unlock:
mutex_unlock(&ds1374->mutex);
}
-static int ds1374_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int ds1374_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct i2c_client *client = to_i2c_client(dev);
struct ds1374 *ds1374 = i2c_get_clientdata(client);
- int ret = -ENOIOCTLCMD;
+ int ret;
mutex_lock(&ds1374->mutex);
- switch (cmd) {
- case RTC_AIE_OFF:
- ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
- if (ret < 0)
- goto out;
-
- ret &= ~DS1374_REG_CR_WACE;
-
- ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
- if (ret < 0)
- goto out;
-
- break;
-
- case RTC_AIE_ON:
- ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
- if (ret < 0)
- goto out;
+ ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
+ if (ret < 0)
+ goto out;
+ if (enabled) {
ret |= DS1374_REG_CR_WACE | DS1374_REG_CR_AIE;
ret &= ~DS1374_REG_CR_WDALM;
-
- ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
- if (ret < 0)
- goto out;
-
- break;
+ } else {
+ ret &= ~DS1374_REG_CR_WACE;
}
+ ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
out:
mutex_unlock(&ds1374->mutex);
@@ -354,7 +337,7 @@ static const struct rtc_class_ops ds1374_rtc_ops = {
.set_time = ds1374_set_time,
.read_alarm = ds1374_read_alarm,
.set_alarm = ds1374_set_alarm,
- .ioctl = ds1374_ioctl,
+ .alarm_irq_enable = ds1374_alarm_irq_enable,
};
static int ds1374_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 23a9ee19764c..950735415a7c 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -1,7 +1,7 @@
/*
* RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
*
- * Copyright (C) 2009-2010 Freescale Semiconductor.
+ * Copyright (C) 2009-2011 Freescale Semiconductor.
* Author: Jack Lan <jack.lan@freescale.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time)
time->tm_hour = bcd2bin(hour);
}
- time->tm_wday = bcd2bin(week);
+ /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+ time->tm_wday = bcd2bin(week) - 1;
time->tm_mday = bcd2bin(day);
- time->tm_mon = bcd2bin(month & 0x7F);
+ /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+ time->tm_mon = bcd2bin(month & 0x7F) - 1;
if (century)
add_century = 100;
@@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
buf[0] = bin2bcd(time->tm_sec);
buf[1] = bin2bcd(time->tm_min);
buf[2] = bin2bcd(time->tm_hour);
- buf[3] = bin2bcd(time->tm_wday); /* Day of the week */
+ /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+ buf[3] = bin2bcd(time->tm_wday + 1);
buf[4] = bin2bcd(time->tm_mday); /* Date */
- buf[5] = bin2bcd(time->tm_mon);
+ /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+ buf[5] = bin2bcd(time->tm_mon + 1);
if (time->tm_year >= 100) {
buf[5] |= 0x80;
buf[6] = bin2bcd(time->tm_year - 100);
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index 773851f338b8..075f1708deae 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -117,4 +117,32 @@ int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
}
EXPORT_SYMBOL(rtc_tm_to_time);
+/*
+ * Convert rtc_time to ktime
+ */
+ktime_t rtc_tm_to_ktime(struct rtc_time tm)
+{
+ time_t time;
+ rtc_tm_to_time(&tm, &time);
+ return ktime_set(time, 0);
+}
+EXPORT_SYMBOL_GPL(rtc_tm_to_ktime);
+
+/*
+ * Convert ktime to rtc_time
+ */
+struct rtc_time rtc_ktime_to_tm(ktime_t kt)
+{
+ struct timespec ts;
+ struct rtc_time ret;
+
+ ts = ktime_to_timespec(kt);
+ /* Round up any ns */
+ if (ts.tv_nsec)
+ ts.tv_sec++;
+ rtc_time_to_tm(ts.tv_sec, &ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rtc_ktime_to_tm);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 5a8daa358066..69fe664a2228 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -213,41 +213,27 @@ static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
return m41t80_set_datetime(to_i2c_client(dev), tm);
}
-#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
-static int
-m41t80_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int m41t80_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct i2c_client *client = to_i2c_client(dev);
int rc;
- switch (cmd) {
- case RTC_AIE_OFF:
- case RTC_AIE_ON:
- break;
- default:
- return -ENOIOCTLCMD;
- }
-
rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
if (rc < 0)
goto err;
- switch (cmd) {
- case RTC_AIE_OFF:
- rc &= ~M41T80_ALMON_AFE;
- break;
- case RTC_AIE_ON:
+
+ if (enabled)
rc |= M41T80_ALMON_AFE;
- break;
- }
+ else
+ rc &= ~M41T80_ALMON_AFE;
+
if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0)
goto err;
+
return 0;
err:
return -EIO;
}
-#else
-#define m41t80_rtc_ioctl NULL
-#endif
static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
@@ -374,7 +360,7 @@ static struct rtc_class_ops m41t80_rtc_ops = {
.read_alarm = m41t80_rtc_read_alarm,
.set_alarm = m41t80_rtc_set_alarm,
.proc = m41t80_rtc_proc,
- .ioctl = m41t80_rtc_ioctl,
+ .alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
};
#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index a99a0b554eb8..3978f4caf724 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -263,30 +263,21 @@ static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
/*
* Handle commands from user-space
*/
-static int m48t59_rtc_ioctl(struct device *dev, unsigned int cmd,
- unsigned long arg)
+static int m48t59_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct m48t59_plat_data *pdata = pdev->dev.platform_data;
struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
unsigned long flags;
- int ret = 0;
spin_lock_irqsave(&m48t59->lock, flags);
- switch (cmd) {
- case RTC_AIE_OFF: /* alarm interrupt off */
- M48T59_WRITE(0x00, M48T59_INTR);
- break;
- case RTC_AIE_ON: /* alarm interrupt on */
+ if (enabled)
M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR);
- break;
- default:
- ret = -ENOIOCTLCMD;
- break;
- }
+ else
+ M48T59_WRITE(0x00, M48T59_INTR);
spin_unlock_irqrestore(&m48t59->lock, flags);
- return ret;
+ return 0;
}
static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq)
@@ -330,12 +321,12 @@ static irqreturn_t m48t59_rtc_interrupt(int irq, void *dev_id)
}
static const struct rtc_class_ops m48t59_rtc_ops = {
- .ioctl = m48t59_rtc_ioctl,
.read_time = m48t59_rtc_read_time,
.set_time = m48t59_rtc_set_time,
.read_alarm = m48t59_rtc_readalarm,
.set_alarm = m48t59_rtc_setalarm,
.proc = m48t59_rtc_proc,
+ .alarm_irq_enable = m48t59_rtc_alarm_irq_enable,
};
static const struct rtc_class_ops m48t02_rtc_ops = {
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c
index 657403ebd54a..0ec3f588a255 100644
--- a/drivers/rtc/rtc-max6902.c
+++ b/drivers/rtc/rtc-max6902.c
@@ -139,12 +139,13 @@ static int __devinit max6902_probe(struct spi_device *spi)
if (IS_ERR(rtc))
return PTR_ERR(rtc);
+ dev_set_drvdata(&spi->dev, rtc);
return 0;
}
static int __devexit max6902_remove(struct spi_device *spi)
{
- struct rtc_device *rtc = platform_get_drvdata(spi);
+ struct rtc_device *rtc = dev_get_drvdata(&spi->dev);
rtc_device_unregister(rtc);
return 0;
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c
index f22dee35f330..3f7bc6b9fefa 100644
--- a/drivers/rtc/rtc-max8998.c
+++ b/drivers/rtc/rtc-max8998.c
@@ -20,6 +20,7 @@
#include <linux/platform_device.h>
#include <linux/mfd/max8998.h>
#include <linux/mfd/max8998-private.h>
+#include <linux/delay.h>
#define MAX8998_RTC_SEC 0x00
#define MAX8998_RTC_MIN 0x01
@@ -73,6 +74,7 @@ struct max8998_rtc_info {
struct i2c_client *rtc;
struct rtc_device *rtc_dev;
int irq;
+ bool lp3974_bug_workaround;
};
static void max8998_data_to_tm(u8 *data, struct rtc_time *tm)
@@ -124,10 +126,16 @@ static int max8998_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct max8998_rtc_info *info = dev_get_drvdata(dev);
u8 data[8];
+ int ret;
max8998_tm_to_data(tm, data);
- return max8998_bulk_write(info->rtc, MAX8998_RTC_SEC, 8, data);
+ ret = max8998_bulk_write(info->rtc, MAX8998_RTC_SEC, 8, data);
+
+ if (info->lp3974_bug_workaround)
+ msleep(2000);
+
+ return ret;
}
static int max8998_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -163,12 +171,29 @@ static int max8998_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int max8998_rtc_stop_alarm(struct max8998_rtc_info *info)
{
- return max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0);
+ int ret = max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0);
+
+ if (info->lp3974_bug_workaround)
+ msleep(2000);
+
+ return ret;
}
static int max8998_rtc_start_alarm(struct max8998_rtc_info *info)
{
- return max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0x77);
+ int ret;
+ u8 alarm0_conf = 0x77;
+
+ /* LP3974 with delay bug chips has rtc alarm bugs with "MONTH" field */
+ if (info->lp3974_bug_workaround)
+ alarm0_conf = 0x57;
+
+ ret = max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, alarm0_conf);
+
+ if (info->lp3974_bug_workaround)
+ msleep(2000);
+
+ return ret;
}
static int max8998_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -187,10 +212,13 @@ static int max8998_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (ret < 0)
return ret;
+ if (info->lp3974_bug_workaround)
+ msleep(2000);
+
if (alrm->enabled)
- return max8998_rtc_start_alarm(info);
+ ret = max8998_rtc_start_alarm(info);
- return 0;
+ return ret;
}
static int max8998_rtc_alarm_irq_enable(struct device *dev,
@@ -224,6 +252,7 @@ static const struct rtc_class_ops max8998_rtc_ops = {
static int __devinit max8998_rtc_probe(struct platform_device *pdev)
{
struct max8998_dev *max8998 = dev_get_drvdata(pdev->dev.parent);
+ struct max8998_platform_data *pdata = dev_get_platdata(max8998->dev);
struct max8998_rtc_info *info;
int ret;
@@ -249,10 +278,18 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev)
ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0,
"rtc-alarm0", info);
+
if (ret < 0)
dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
info->irq, ret);
+ dev_info(&pdev->dev, "RTC CHIP NAME: %s\n", pdev->id_entry->name);
+ if (pdata->rtc_delay) {
+ info->lp3974_bug_workaround = true;
+ dev_warn(&pdev->dev, "LP3974 with RTC REGERR option."
+ " RTC updates will be extremely slow.\n");
+ }
+
return 0;
out_rtc:
@@ -273,6 +310,12 @@ static int __devexit max8998_rtc_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id max8998_rtc_id[] = {
+ { "max8998-rtc", TYPE_MAX8998 },
+ { "lp3974-rtc", TYPE_LP3974 },
+ { }
+};
+
static struct platform_driver max8998_rtc_driver = {
.driver = {
.name = "max8998-rtc",
@@ -280,6 +323,7 @@ static struct platform_driver max8998_rtc_driver = {
},
.probe = max8998_rtc_probe,
.remove = __devexit_p(max8998_rtc_remove),
+ .id_table = max8998_rtc_id,
};
static int __init max8998_rtc_init(void)
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index bcd0cf63eb16..1db62db8469d 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -255,42 +255,21 @@ static int mrst_irq_set_state(struct device *dev, int enabled)
return 0;
}
-#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
-
/* Currently, the vRTC doesn't support UIE ON/OFF */
-static int
-mrst_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct mrst_rtc *mrst = dev_get_drvdata(dev);
unsigned long flags;
- switch (cmd) {
- case RTC_AIE_OFF:
- case RTC_AIE_ON:
- if (!mrst->irq)
- return -EINVAL;
- break;
- default:
- /* PIE ON/OFF is handled by mrst_irq_set_state() */
- return -ENOIOCTLCMD;
- }
-
spin_lock_irqsave(&rtc_lock, flags);
- switch (cmd) {
- case RTC_AIE_OFF: /* alarm off */
- mrst_irq_disable(mrst, RTC_AIE);
- break;
- case RTC_AIE_ON: /* alarm on */
+ if (enabled)
mrst_irq_enable(mrst, RTC_AIE);
- break;
- }
+ else
+ mrst_irq_disable(mrst, RTC_AIE);
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
-#else
-#define mrst_rtc_ioctl NULL
-#endif
#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
@@ -317,13 +296,13 @@ static int mrst_procfs(struct device *dev, struct seq_file *seq)
#endif
static const struct rtc_class_ops mrst_rtc_ops = {
- .ioctl = mrst_rtc_ioctl,
.read_time = mrst_read_time,
.set_time = mrst_set_time,
.read_alarm = mrst_read_alarm,
.set_alarm = mrst_set_alarm,
.proc = mrst_procfs,
.irq_set_state = mrst_irq_set_state,
+ .alarm_irq_enable = mrst_rtc_alarm_irq_enable,
};
static struct mrst_rtc mrst_rtc;
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
index b2fff0ca49f8..67820626e18f 100644
--- a/drivers/rtc/rtc-msm6242.c
+++ b/drivers/rtc/rtc-msm6242.c
@@ -82,7 +82,7 @@ static inline unsigned int msm6242_read(struct msm6242_priv *priv,
static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val,
unsigned int reg)
{
- return __raw_writel(val, &priv->regs[reg]);
+ __raw_writel(val, &priv->regs[reg]);
}
static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val,
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index bcca47298554..60627a764514 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -169,25 +169,19 @@ static int mv_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
return 0;
}
-static int mv_rtc_ioctl(struct device *dev, unsigned int cmd,
- unsigned long arg)
+static int mv_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
if (pdata->irq < 0)
- return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
- switch (cmd) {
- case RTC_AIE_OFF:
- writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
- break;
- case RTC_AIE_ON:
+ return -EINVAL; /* fall back into rtc-dev's emulation */
+
+ if (enabled)
writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
- break;
- default:
- return -ENOIOCTLCMD;
- }
+ else
+ writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
return 0;
}
@@ -216,7 +210,7 @@ static const struct rtc_class_ops mv_rtc_alarm_ops = {
.set_time = mv_rtc_set_time,
.read_alarm = mv_rtc_read_alarm,
.set_alarm = mv_rtc_set_alarm,
- .ioctl = mv_rtc_ioctl,
+ .alarm_irq_enable = mv_rtc_alarm_irq_enable,
};
static int __devinit mv_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 73377b0d65da..b4dbf3a319b3 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -143,8 +143,6 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
u8 reg;
switch (cmd) {
- case RTC_AIE_OFF:
- case RTC_AIE_ON:
case RTC_UIE_OFF:
case RTC_UIE_ON:
break;
@@ -156,13 +154,6 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
rtc_wait_not_busy();
reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
switch (cmd) {
- /* AIE = Alarm Interrupt Enable */
- case RTC_AIE_OFF:
- reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
- break;
- case RTC_AIE_ON:
- reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
- break;
/* UIE = Update Interrupt Enable (1/second) */
case RTC_UIE_OFF:
reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER;
@@ -182,6 +173,24 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
#define omap_rtc_ioctl NULL
#endif
+static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ u8 reg;
+
+ local_irq_disable();
+ rtc_wait_not_busy();
+ reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
+ if (enabled)
+ reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
+ else
+ reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
+ rtc_wait_not_busy();
+ rtc_write(reg, OMAP_RTC_INTERRUPTS_REG);
+ local_irq_enable();
+
+ return 0;
+}
+
/* this hardware doesn't support "don't care" alarm fields */
static int tm2bcd(struct rtc_time *tm)
{
@@ -309,6 +318,7 @@ static struct rtc_class_ops omap_rtc_ops = {
.set_time = omap_rtc_set_time,
.read_alarm = omap_rtc_read_alarm,
.set_alarm = omap_rtc_set_alarm,
+ .alarm_irq_enable = omap_rtc_alarm_irq_enable,
};
static int omap_rtc_alarm;
@@ -429,13 +439,14 @@ fail1:
fail0:
iounmap(rtc_base);
fail:
- release_resource(mem);
+ release_mem_region(mem->start, resource_size(mem));
return -EIO;
}
static int __exit omap_rtc_remove(struct platform_device *pdev)
{
struct rtc_device *rtc = platform_get_drvdata(pdev);
+ struct resource *mem = dev_get_drvdata(&rtc->dev);
device_init_wakeup(&pdev->dev, 0);
@@ -447,8 +458,9 @@ static int __exit omap_rtc_remove(struct platform_device *pdev)
if (omap_rtc_timer != omap_rtc_alarm)
free_irq(omap_rtc_alarm, rtc);
- release_resource(dev_get_drvdata(&rtc->dev));
rtc_device_unregister(rtc);
+ iounmap(rtc_base);
+ release_mem_region(mem->start, resource_size(mem));
return 0;
}
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c
index c086fc30a84c..242bbf86c74a 100644
--- a/drivers/rtc/rtc-proc.c
+++ b/drivers/rtc/rtc-proc.c
@@ -81,12 +81,16 @@ static int rtc_proc_show(struct seq_file *seq, void *offset)
static int rtc_proc_open(struct inode *inode, struct file *file)
{
+ int ret;
struct rtc_device *rtc = PDE(inode)->data;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
- return single_open(file, rtc_proc_show, rtc);
+ ret = single_open(file, rtc_proc_show, rtc);
+ if (ret)
+ module_put(THIS_MODULE);
+ return ret;
}
static int rtc_proc_release(struct inode *inode, struct file *file)
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
index 36eb66184461..694da39b6dd2 100644
--- a/drivers/rtc/rtc-rp5c01.c
+++ b/drivers/rtc/rtc-rp5c01.c
@@ -76,7 +76,7 @@ static inline unsigned int rp5c01_read(struct rp5c01_priv *priv,
static inline void rp5c01_write(struct rp5c01_priv *priv, unsigned int val,
unsigned int reg)
{
- return __raw_writel(val, &priv->regs[reg]);
+ __raw_writel(val, &priv->regs[reg]);
}
static void rp5c01_lock(struct rp5c01_priv *priv)
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index dd14e202c2c8..6aaa1550e3b1 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -299,14 +299,6 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
if (rs5c->type == rtc_rs5c372a
&& (buf & RS5C372A_CTRL1_SL1))
return -ENOIOCTLCMD;
- case RTC_AIE_OFF:
- case RTC_AIE_ON:
- /* these irq management calls only make sense for chips
- * which are wired up to an IRQ.
- */
- if (!rs5c->has_irq)
- return -ENOIOCTLCMD;
- break;
default:
return -ENOIOCTLCMD;
}
@@ -317,12 +309,6 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
addr = RS5C_ADDR(RS5C_REG_CTRL1);
switch (cmd) {
- case RTC_AIE_OFF: /* alarm off */
- buf &= ~RS5C_CTRL1_AALE;
- break;
- case RTC_AIE_ON: /* alarm on */
- buf |= RS5C_CTRL1_AALE;
- break;
case RTC_UIE_OFF: /* update off */
buf &= ~RS5C_CTRL1_CT_MASK;
break;
@@ -347,6 +333,39 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
#endif
+static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rs5c372 *rs5c = i2c_get_clientdata(client);
+ unsigned char buf;
+ int status, addr;
+
+ buf = rs5c->regs[RS5C_REG_CTRL1];
+
+ if (!rs5c->has_irq)
+ return -EINVAL;
+
+ status = rs5c_get_regs(rs5c);
+ if (status < 0)
+ return status;
+
+ addr = RS5C_ADDR(RS5C_REG_CTRL1);
+ if (enabled)
+ buf |= RS5C_CTRL1_AALE;
+ else
+ buf &= ~RS5C_CTRL1_AALE;
+
+ if (i2c_smbus_write_byte_data(client, addr, buf) < 0) {
+ printk(KERN_WARNING "%s: can't update alarm\n",
+ rs5c->rtc->name);
+ status = -EIO;
+ } else
+ rs5c->regs[RS5C_REG_CTRL1] = buf;
+
+ return status;
+}
+
+
/* NOTE: Since RTC_WKALM_{RD,SET} were originally defined for EFI,
* which only exposes a polled programming interface; and since
* these calls map directly to those EFI requests; we don't demand
@@ -466,6 +485,7 @@ static const struct rtc_class_ops rs5c372_rtc_ops = {
.set_time = rs5c372_rtc_set_time,
.read_alarm = rs5c_read_alarm,
.set_alarm = rs5c_set_alarm,
+ .alarm_irq_enable = rs5c_rtc_alarm_irq_enable,
};
#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index cf953ecbfca9..b80fa2882408 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -77,18 +77,20 @@ static irqreturn_t s3c_rtc_tickirq(int irq, void *id)
}
/* Update control registers */
-static void s3c_rtc_setaie(int to)
+static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
{
unsigned int tmp;
- pr_debug("%s: aie=%d\n", __func__, to);
+ pr_debug("%s: aie=%d\n", __func__, enabled);
tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
- if (to)
+ if (enabled)
tmp |= S3C2410_RTCALM_ALMEN;
writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
+
+ return 0;
}
static int s3c_rtc_setpie(struct device *dev, int enabled)
@@ -308,7 +310,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
writeb(alrm_en, base + S3C2410_RTCALM);
- s3c_rtc_setaie(alrm->enabled);
+ s3c_rtc_setaie(dev, alrm->enabled);
return 0;
}
@@ -440,7 +442,7 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
rtc_device_unregister(rtc);
s3c_rtc_setpie(&dev->dev, 0);
- s3c_rtc_setaie(0);
+ s3c_rtc_setaie(&dev->dev, 0);
clk_disable(rtc_clk);
clk_put(rtc_clk);
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 88ea52b8647a..5dfe5ffcb0d3 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -314,16 +314,6 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
unsigned long arg)
{
switch (cmd) {
- case RTC_AIE_OFF:
- spin_lock_irq(&sa1100_rtc_lock);
- RTSR &= ~RTSR_ALE;
- spin_unlock_irq(&sa1100_rtc_lock);
- return 0;
- case RTC_AIE_ON:
- spin_lock_irq(&sa1100_rtc_lock);
- RTSR |= RTSR_ALE;
- spin_unlock_irq(&sa1100_rtc_lock);
- return 0;
case RTC_UIE_OFF:
spin_lock_irq(&sa1100_rtc_lock);
RTSR &= ~RTSR_HZE;
@@ -338,6 +328,17 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
return -ENOIOCTLCMD;
}
+static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ spin_lock_irq(&sa1100_rtc_lock);
+ if (enabled)
+ RTSR |= RTSR_ALE;
+ else
+ RTSR &= ~RTSR_ALE;
+ spin_unlock_irq(&sa1100_rtc_lock);
+ return 0;
+}
+
static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
rtc_time_to_tm(RCNR, tm);
@@ -410,6 +411,7 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
.proc = sa1100_rtc_proc,
.irq_set_freq = sa1100_irq_set_freq,
.irq_set_state = sa1100_irq_set_state,
+ .alarm_irq_enable = sa1100_rtc_alarm_irq_enable,
};
static int sa1100_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 06e41ed93230..93314a9e7fa9 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -350,10 +350,6 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
unsigned int ret = 0;
switch (cmd) {
- case RTC_AIE_OFF:
- case RTC_AIE_ON:
- sh_rtc_setaie(dev, cmd == RTC_AIE_ON);
- break;
case RTC_UIE_OFF:
rtc->periodic_freq &= ~PF_OXS;
sh_rtc_setcie(dev, 0);
@@ -369,6 +365,12 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
return ret;
}
+static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ sh_rtc_setaie(dev, enabled);
+ return 0;
+}
+
static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -604,6 +606,7 @@ static struct rtc_class_ops sh_rtc_ops = {
.irq_set_state = sh_rtc_irq_set_state,
.irq_set_freq = sh_rtc_irq_set_freq,
.proc = sh_rtc_proc,
+ .alarm_irq_enable = sh_rtc_alarm_irq_enable,
};
static int __init sh_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 51725f7755b0..a82d6fe97076 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -50,24 +50,9 @@ static int test_rtc_proc(struct device *dev, struct seq_file *seq)
return 0;
}
-static int test_rtc_ioctl(struct device *dev, unsigned int cmd,
- unsigned long arg)
+static int test_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
{
- /* We do support interrupts, they're generated
- * using the sysfs interface.
- */
- switch (cmd) {
- case RTC_PIE_ON:
- case RTC_PIE_OFF:
- case RTC_UIE_ON:
- case RTC_UIE_OFF:
- case RTC_AIE_ON:
- case RTC_AIE_OFF:
- return 0;
-
- default:
- return -ENOIOCTLCMD;
- }
+ return 0;
}
static const struct rtc_class_ops test_rtc_ops = {
@@ -76,7 +61,7 @@ static const struct rtc_class_ops test_rtc_ops = {
.read_alarm = test_rtc_read_alarm,
.set_alarm = test_rtc_set_alarm,
.set_mmss = test_rtc_set_mmss,
- .ioctl = test_rtc_ioctl,
+ .alarm_irq_enable = test_rtc_alarm_irq_enable,
};
static ssize_t test_irq_show(struct device *dev,
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index c3244244e8cf..769190ac6d11 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -240,26 +240,6 @@ static int vr41xx_rtc_irq_set_state(struct device *dev, int enabled)
static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
- case RTC_AIE_ON:
- spin_lock_irq(&rtc_lock);
-
- if (!alarm_enabled) {
- enable_irq(aie_irq);
- alarm_enabled = 1;
- }
-
- spin_unlock_irq(&rtc_lock);
- break;
- case RTC_AIE_OFF:
- spin_lock_irq(&rtc_lock);
-
- if (alarm_enabled) {
- disable_irq(aie_irq);
- alarm_enabled = 0;
- }
-
- spin_unlock_irq(&rtc_lock);
- break;
case RTC_EPOCH_READ:
return put_user(epoch, (unsigned long __user *)arg);
case RTC_EPOCH_SET:
@@ -275,6 +255,24 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long
return 0;
}
+static int vr41xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ spin_lock_irq(&rtc_lock);
+ if (enabled) {
+ if (!alarm_enabled) {
+ enable_irq(aie_irq);
+ alarm_enabled = 1;
+ }
+ } else {
+ if (alarm_enabled) {
+ disable_irq(aie_irq);
+ alarm_enabled = 0;
+ }
+ }
+ spin_unlock_irq(&rtc_lock);
+ return 0;
+}
+
static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = (struct platform_device *)dev_id;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 4155805dcdff..2b771f18d1ad 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -319,6 +319,9 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
+ /* nothing to do if already disconnected */
+ if (!lcu)
+ return;
device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&lcu->lock, flags);
list_del_init(&device->alias_list);
@@ -680,6 +683,9 @@ int dasd_alias_remove_device(struct dasd_device *device)
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
+ /* nothing to do if already removed */
+ if (!lcu)
+ return 0;
spin_lock_irqsave(&lcu->lock, flags);
_remove_device_from_lcu(lcu, device);
spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 318672d05563..a9fe23d5bd0f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -72,7 +72,7 @@ static struct dasd_discipline dasd_eckd_discipline;
static struct ccw_device_id dasd_eckd_ids[] = {
{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
- { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
+ { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 30a1ca3d08b7..5505bc07e1e7 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -103,7 +103,7 @@ int dasd_scan_partitions(struct dasd_block *block)
struct block_device *bdev;
bdev = bdget_disk(block->gdp, 0);
- if (!bdev || blkdev_get(bdev, FMODE_READ) < 0)
+ if (!bdev || blkdev_get(bdev, FMODE_READ, NULL) < 0)
return -ENODEV;
/*
* See fs/partition/check.c:register_disk,rescan_partitions
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index c881a14fa5dd..1f6a4d894e73 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -62,8 +62,8 @@ static int xpram_devs;
/*
* Parameter parsing functions.
*/
-static int __initdata devs = XPRAM_DEVS;
-static char __initdata *sizes[XPRAM_MAX_DEVS];
+static int devs = XPRAM_DEVS;
+static char *sizes[XPRAM_MAX_DEVS];
module_param(devs, int, 0);
module_param_array(sizes, charp, NULL, 0);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 8cd58e412b5e..5ad44daef73b 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -460,7 +460,8 @@ kbd_ioctl(struct kbd_data *kbd, struct file *file,
unsigned int cmd, unsigned long arg)
{
void __user *argp;
- int ct, perm;
+ unsigned int ct;
+ int perm;
argp = (void __user *)arg;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 7a242f073632..267b54e8ff5a 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -280,6 +280,14 @@ tape_do_io_free(struct tape_device *device, struct tape_request *request)
return rc;
}
+static inline void
+tape_do_io_async_free(struct tape_device *device, struct tape_request *request)
+{
+ request->callback = (void *) tape_free_request;
+ request->callback_data = NULL;
+ tape_do_io_async(device, request);
+}
+
extern int tape_oper_handler(int irq, int status);
extern void tape_noper_handler(int irq, int status);
extern int tape_open(struct tape_device *);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index c17f35b6136a..c26511171ffe 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -53,23 +53,11 @@ static void tape_34xx_delete_sbid_from(struct tape_device *, int);
* Medium sense for 34xx tapes. There is no 'real' medium sense call.
* So we just do a normal sense.
*/
-static int
-tape_34xx_medium_sense(struct tape_device *device)
+static void __tape_34xx_medium_sense(struct tape_request *request)
{
- struct tape_request *request;
- unsigned char *sense;
- int rc;
-
- request = tape_alloc_request(1, 32);
- if (IS_ERR(request)) {
- DBF_EXCEPTION(6, "MSEN fail\n");
- return PTR_ERR(request);
- }
-
- request->op = TO_MSEN;
- tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+ struct tape_device *device = request->device;
+ unsigned char *sense;
- rc = tape_do_io_interruptible(device, request);
if (request->rc == 0) {
sense = request->cpdata;
@@ -88,15 +76,47 @@ tape_34xx_medium_sense(struct tape_device *device)
device->tape_generic_status |= GMT_WR_PROT(~0);
else
device->tape_generic_status &= ~GMT_WR_PROT(~0);
- } else {
+ } else
DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
request->rc);
- }
tape_free_request(request);
+}
+
+static int tape_34xx_medium_sense(struct tape_device *device)
+{
+ struct tape_request *request;
+ int rc;
+
+ request = tape_alloc_request(1, 32);
+ if (IS_ERR(request)) {
+ DBF_EXCEPTION(6, "MSEN fail\n");
+ return PTR_ERR(request);
+ }
+ request->op = TO_MSEN;
+ tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+ rc = tape_do_io_interruptible(device, request);
+ __tape_34xx_medium_sense(request);
return rc;
}
+static void tape_34xx_medium_sense_async(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = tape_alloc_request(1, 32);
+ if (IS_ERR(request)) {
+ DBF_EXCEPTION(6, "MSEN fail\n");
+ return;
+ }
+
+ request->op = TO_MSEN;
+ tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+ request->callback = (void *) __tape_34xx_medium_sense;
+ request->callback_data = NULL;
+ tape_do_io_async(device, request);
+}
+
struct tape_34xx_work {
struct tape_device *device;
enum tape_op op;
@@ -109,6 +129,9 @@ struct tape_34xx_work {
* is inserted but cannot call tape_do_io* from an interrupt context.
* Maybe that's useful for other actions we want to start from the
* interrupt handler.
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
*/
static void
tape_34xx_work_handler(struct work_struct *work)
@@ -119,7 +142,7 @@ tape_34xx_work_handler(struct work_struct *work)
switch(p->op) {
case TO_MSEN:
- tape_34xx_medium_sense(device);
+ tape_34xx_medium_sense_async(device);
break;
default:
DBF_EVENT(3, "T34XX: internal error: unknown work\n");
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index fbe361fcd2c0..de2e99e0a71b 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -329,17 +329,17 @@ out:
/*
* Enable encryption
*/
-static int tape_3592_enable_crypt(struct tape_device *device)
+static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device)
{
struct tape_request *request;
char *data;
DBF_EVENT(6, "tape_3592_enable_crypt\n");
if (!crypt_supported(device))
- return -ENOSYS;
+ return ERR_PTR(-ENOSYS);
request = tape_alloc_request(2, 72);
if (IS_ERR(request))
- return PTR_ERR(request);
+ return request;
data = request->cpdata;
memset(data,0,72);
@@ -354,23 +354,42 @@ static int tape_3592_enable_crypt(struct tape_device *device)
request->op = TO_CRYPT_ON;
tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
+ return request;
+}
+
+static int tape_3592_enable_crypt(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = __tape_3592_enable_crypt(device);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
return tape_do_io_free(device, request);
}
+static void tape_3592_enable_crypt_async(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = __tape_3592_enable_crypt(device);
+ if (!IS_ERR(request))
+ tape_do_io_async_free(device, request);
+}
+
/*
* Disable encryption
*/
-static int tape_3592_disable_crypt(struct tape_device *device)
+static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device)
{
struct tape_request *request;
char *data;
DBF_EVENT(6, "tape_3592_disable_crypt\n");
if (!crypt_supported(device))
- return -ENOSYS;
+ return ERR_PTR(-ENOSYS);
request = tape_alloc_request(2, 72);
if (IS_ERR(request))
- return PTR_ERR(request);
+ return request;
data = request->cpdata;
memset(data,0,72);
@@ -383,9 +402,28 @@ static int tape_3592_disable_crypt(struct tape_device *device)
tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
+ return request;
+}
+
+static int tape_3592_disable_crypt(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = __tape_3592_disable_crypt(device);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
return tape_do_io_free(device, request);
}
+static void tape_3592_disable_crypt_async(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = __tape_3592_disable_crypt(device);
+ if (!IS_ERR(request))
+ tape_do_io_async_free(device, request);
+}
+
/*
* IOCTL: Set encryption status
*/
@@ -457,8 +495,7 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
/*
* SENSE Medium: Get Sense data about medium state
*/
-static int
-tape_3590_sense_medium(struct tape_device *device)
+static int tape_3590_sense_medium(struct tape_device *device)
{
struct tape_request *request;
@@ -470,6 +507,18 @@ tape_3590_sense_medium(struct tape_device *device)
return tape_do_io_free(device, request);
}
+static void tape_3590_sense_medium_async(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = tape_alloc_request(1, 128);
+ if (IS_ERR(request))
+ return;
+ request->op = TO_MSEN;
+ tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
+ tape_do_io_async_free(device, request);
+}
+
/*
* MTTELL: Tell block. Return the number of block relative to current file.
*/
@@ -546,15 +595,14 @@ tape_3590_read_opposite(struct tape_device *device,
* 2. The attention msg is written to the "read subsystem data" buffer.
* In this case we probably should print it to the console.
*/
-static int
-tape_3590_read_attmsg(struct tape_device *device)
+static void tape_3590_read_attmsg_async(struct tape_device *device)
{
struct tape_request *request;
char *buf;
request = tape_alloc_request(3, 4096);
if (IS_ERR(request))
- return PTR_ERR(request);
+ return;
request->op = TO_READ_ATTMSG;
buf = request->cpdata;
buf[0] = PREP_RD_SS_DATA;
@@ -562,12 +610,15 @@ tape_3590_read_attmsg(struct tape_device *device)
tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf);
tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
- return tape_do_io_free(device, request);
+ tape_do_io_async_free(device, request);
}
/*
* These functions are used to schedule follow-up actions from within an
* interrupt context (like unsolicited interrupts).
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
*/
struct work_handler_data {
struct tape_device *device;
@@ -583,16 +634,16 @@ tape_3590_work_handler(struct work_struct *work)
switch (p->op) {
case TO_MSEN:
- tape_3590_sense_medium(p->device);
+ tape_3590_sense_medium_async(p->device);
break;
case TO_READ_ATTMSG:
- tape_3590_read_attmsg(p->device);
+ tape_3590_read_attmsg_async(p->device);
break;
case TO_CRYPT_ON:
- tape_3592_enable_crypt(p->device);
+ tape_3592_enable_crypt_async(p->device);
break;
case TO_CRYPT_OFF:
- tape_3592_disable_crypt(p->device);
+ tape_3592_disable_crypt_async(p->device);
break;
default:
DBF_EVENT(3, "T3590: work handler undefined for "
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
index 707b7f48c232..9e32780c317f 100644
--- a/drivers/s390/char/tape_class.h
+++ b/drivers/s390/char/tape_class.h
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/major.h>
-#include <linux/kobject.h>
#include <linux/kobj_map.h>
#include <linux/cdev.h>
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e8391b89eff4..b7eaff9ca19e 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1835,6 +1835,7 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
* available again. Kick re-detection.
*/
cdev->private->flags.resuming = 1;
+ cdev->private->path_new_mask = LPM_ANYPATH;
css_schedule_eval(sch->schid);
spin_unlock_irq(sch->lock);
css_complete_work();
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index e9fff2b9bce2..5640c89cd9de 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -476,7 +476,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
static int get_inbound_buffer_frontier(struct qdio_q *q)
{
int count, stop;
- unsigned char state;
+ unsigned char state = 0;
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -643,7 +643,7 @@ void qdio_inbound_processing(unsigned long data)
static int get_outbound_buffer_frontier(struct qdio_q *q)
{
int count, stop;
- unsigned char state;
+ unsigned char state = 0;
if (need_siga_sync(q))
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 09e7a053c844..30b2a820e670 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -841,7 +841,7 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
}
/**
- * Emit buffer of a lan comand.
+ * Emit buffer of a lan command.
*/
static void
lcs_lancmd_timeout(unsigned long data)
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 65ebee0a3266..b6a6356d09b3 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -565,7 +565,7 @@ static int netiucv_callback_connreq(struct iucv_path *path,
struct iucv_event ev;
int rc;
- if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
+ if (memcmp(iucvMagic, ipuser, 16))
/* ipuser must match iucvMagic. */
return -EINVAL;
rc = -EINVAL;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 29f848bfc12f..019ae58ab913 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -988,16 +988,30 @@ static void qeth_get_channel_path_desc(struct qeth_card *card)
chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
if (chp_dsc != NULL) {
/* CHPP field bit 6 == 1 -> single queue */
- if ((chp_dsc->chpp & 0x02) == 0x02)
+ if ((chp_dsc->chpp & 0x02) == 0x02) {
+ if ((atomic_read(&card->qdio.state) !=
+ QETH_QDIO_UNINITIALIZED) &&
+ (card->qdio.no_out_queues == 4))
+ /* change from 4 to 1 outbound queues */
+ qeth_free_qdio_buffers(card);
card->qdio.no_out_queues = 1;
+ if (card->qdio.default_out_queue != 0)
+ dev_info(&card->gdev->dev,
+ "Priority Queueing not supported\n");
+ card->qdio.default_out_queue = 0;
+ } else {
+ if ((atomic_read(&card->qdio.state) !=
+ QETH_QDIO_UNINITIALIZED) &&
+ (card->qdio.no_out_queues == 1)) {
+ /* change from 1 to 4 outbound queues */
+ qeth_free_qdio_buffers(card);
+ card->qdio.default_out_queue = 2;
+ }
+ card->qdio.no_out_queues = 4;
+ }
card->info.func_level = 0x4100 + chp_dsc->desc;
kfree(chp_dsc);
}
- if (card->qdio.no_out_queues == 1) {
- card->qdio.default_out_queue = 0;
- dev_info(&card->gdev->dev,
- "Priority Queueing not supported\n");
- }
QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
return;
@@ -1832,33 +1846,6 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
}
}
-static inline int qeth_get_max_mtu_for_card(int cardtype)
-{
- switch (cardtype) {
-
- case QETH_CARD_TYPE_UNKNOWN:
- case QETH_CARD_TYPE_OSD:
- case QETH_CARD_TYPE_OSN:
- case QETH_CARD_TYPE_OSM:
- case QETH_CARD_TYPE_OSX:
- return 61440;
- case QETH_CARD_TYPE_IQD:
- return 57344;
- default:
- return 1500;
- }
-}
-
-static inline int qeth_get_mtu_out_of_mpc(int cardtype)
-{
- switch (cardtype) {
- case QETH_CARD_TYPE_IQD:
- return 1;
- default:
- return 0;
- }
-}
-
static inline int qeth_get_mtu_outof_framesize(int framesize)
{
switch (framesize) {
@@ -1881,10 +1868,9 @@ static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
case QETH_CARD_TYPE_OSD:
case QETH_CARD_TYPE_OSM:
case QETH_CARD_TYPE_OSX:
- return ((mtu >= 576) && (mtu <= 61440));
case QETH_CARD_TYPE_IQD:
return ((mtu >= 576) &&
- (mtu <= card->info.max_mtu + 4096 - 32));
+ (mtu <= card->info.max_mtu));
case QETH_CARD_TYPE_OSN:
case QETH_CARD_TYPE_UNKNOWN:
default:
@@ -1907,7 +1893,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
memcpy(&card->token.ulp_filter_r,
QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
- if (qeth_get_mtu_out_of_mpc(card->info.type)) {
+ if (card->info.type == QETH_CARD_TYPE_IQD) {
memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
mtu = qeth_get_mtu_outof_framesize(framesize);
if (!mtu) {
@@ -1915,12 +1901,21 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
- card->info.max_mtu = mtu;
+ if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) {
+ /* frame size has changed */
+ if (card->dev &&
+ ((card->dev->mtu == card->info.initial_mtu) ||
+ (card->dev->mtu > mtu)))
+ card->dev->mtu = mtu;
+ qeth_free_qdio_buffers(card);
+ }
card->info.initial_mtu = mtu;
+ card->info.max_mtu = mtu;
card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
} else {
card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
- card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
+ card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(
+ iob->data);
card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
}
@@ -3775,6 +3770,47 @@ static inline int qeth_get_qdio_q_format(struct qeth_card *card)
}
}
+static void qeth_determine_capabilities(struct qeth_card *card)
+{
+ int rc;
+ int length;
+ char *prcd;
+ struct ccw_device *ddev;
+ int ddev_offline = 0;
+
+ QETH_DBF_TEXT(SETUP, 2, "detcapab");
+ ddev = CARD_DDEV(card);
+ if (!ddev->online) {
+ ddev_offline = 1;
+ rc = ccw_device_set_online(ddev);
+ if (rc) {
+ QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+ goto out;
+ }
+ }
+
+ rc = qeth_read_conf_data(card, (void **) &prcd, &length);
+ if (rc) {
+ QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
+ dev_name(&card->gdev->dev), rc);
+ QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ goto out_offline;
+ }
+ qeth_configure_unitaddr(card, prcd);
+ qeth_configure_blkt_default(card, prcd);
+ kfree(prcd);
+
+ rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
+ if (rc)
+ QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+
+out_offline:
+ if (ddev_offline == 1)
+ ccw_device_set_offline(ddev);
+out:
+ return;
+}
+
static int qeth_qdio_establish(struct qeth_card *card)
{
struct qdio_initialize init_data;
@@ -3905,6 +3941,7 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
atomic_set(&card->force_alloc_skb, 0);
+ qeth_get_channel_path_desc(card);
retry:
if (retries)
QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
@@ -3933,6 +3970,7 @@ retriable:
else
goto retry;
}
+ qeth_determine_capabilities(card);
qeth_init_tokens(card);
qeth_init_func_level(card);
rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
@@ -4202,41 +4240,6 @@ void qeth_core_free_discipline(struct qeth_card *card)
card->discipline.ccwgdriver = NULL;
}
-static void qeth_determine_capabilities(struct qeth_card *card)
-{
- int rc;
- int length;
- char *prcd;
-
- QETH_DBF_TEXT(SETUP, 2, "detcapab");
- rc = ccw_device_set_online(CARD_DDEV(card));
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
- goto out;
- }
-
-
- rc = qeth_read_conf_data(card, (void **) &prcd, &length);
- if (rc) {
- QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
- dev_name(&card->gdev->dev), rc);
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
- goto out_offline;
- }
- qeth_configure_unitaddr(card, prcd);
- qeth_configure_blkt_default(card, prcd);
- kfree(prcd);
-
- rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
- if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
-
-out_offline:
- ccw_device_set_offline(CARD_DDEV(card));
-out:
- return;
-}
-
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 7a7a1b664781..ada0fe782373 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -573,13 +573,13 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
case IPA_RC_L2_DUP_LAYER3_MAC:
dev_warn(&card->gdev->dev,
"MAC address %pM already exists\n",
- card->dev->dev_addr);
+ cmd->data.setdelmac.mac);
break;
case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
dev_warn(&card->gdev->dev,
"MAC address %pM is not authorized\n",
- card->dev->dev_addr);
+ cmd->data.setdelmac.mac);
break;
default:
break;
@@ -831,12 +831,14 @@ tx_drop:
return NETDEV_TX_OK;
}
-static int qeth_l2_open(struct net_device *dev)
+static int __qeth_l2_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
int rc = 0;
QETH_CARD_TEXT(card, 4, "qethopen");
+ if (card->state == CARD_STATE_UP)
+ return rc;
if (card->state != CARD_STATE_SOFTSETUP)
return -ENODEV;
@@ -857,6 +859,18 @@ static int qeth_l2_open(struct net_device *dev)
return rc;
}
+static int qeth_l2_open(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ QETH_CARD_TEXT(card, 5, "qethope_");
+ if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+ QETH_CARD_TEXT(card, 3, "openREC");
+ return -ERESTARTSYS;
+ }
+ return __qeth_l2_open(dev);
+}
+
static int qeth_l2_stop(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
@@ -1046,7 +1060,7 @@ contin:
if (recover_flag == CARD_STATE_RECOVER) {
if (recovery_mode &&
card->info.type != QETH_CARD_TYPE_OSN) {
- qeth_l2_open(card->dev);
+ __qeth_l2_open(card->dev);
} else {
rtnl_lock();
dev_open(card->dev);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e227e465bfc4..d09b0c44fc3d 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2998,7 +2998,9 @@ static inline void qeth_l3_hdr_csum(struct qeth_card *card,
*/
if (iph->protocol == IPPROTO_UDP)
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP;
- hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
+ hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ |
+ QETH_HDR_EXT_CSUM_HDR_REQ;
+ iph->check = 0;
if (card->options.performance_stats)
card->perf_stats.tx_csum++;
}
@@ -3240,12 +3242,14 @@ tx_drop:
return NETDEV_TX_OK;
}
-static int qeth_l3_open(struct net_device *dev)
+static int __qeth_l3_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
int rc = 0;
QETH_CARD_TEXT(card, 4, "qethopen");
+ if (card->state == CARD_STATE_UP)
+ return rc;
if (card->state != CARD_STATE_SOFTSETUP)
return -ENODEV;
card->data.state = CH_STATE_UP;
@@ -3260,6 +3264,18 @@ static int qeth_l3_open(struct net_device *dev)
return rc;
}
+static int qeth_l3_open(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ QETH_CARD_TEXT(card, 5, "qethope_");
+ if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+ QETH_CARD_TEXT(card, 3, "openREC");
+ return -ERESTARTSYS;
+ }
+ return __qeth_l3_open(dev);
+}
+
static int qeth_l3_stop(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
@@ -3564,7 +3580,7 @@ contin:
netif_carrier_off(card->dev);
if (recover_flag == CARD_STATE_RECOVER) {
if (recovery_mode)
- qeth_l3_open(card->dev);
+ __qeth_l3_open(card->dev);
else {
rtnl_lock();
dev_open(card->dev);
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 65e1cf104943..207b7d742443 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -60,7 +60,7 @@ static struct iucv_handler smsg_handler = {
static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8],
u8 ipuser[16])
{
- if (strncmp(ipvmid, "*MSG ", sizeof(ipvmid)) != 0)
+ if (strncmp(ipvmid, "*MSG ", 8) != 0)
return -EINVAL;
/* Path pending from *MSG. */
return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL);
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 46342fee394d..303dde09d294 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -317,7 +317,7 @@ static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
/**
* zfcp_cfdc_port_denied - Process "access denied" for port
- * @port: The port where the acces has been denied
+ * @port: The port where the access has been denied
* @qual: The FSF status qualifier for the access denied FSF status
*/
void zfcp_cfdc_port_denied(struct zfcp_port *port,
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index a624f5af4320..e8566224fe4b 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -467,7 +467,7 @@ static int jsflash_init(void)
node = prom_getchild(prom_root_node);
node = prom_searchsiblings(node, "flash-memory");
- if (node != 0 && node != -1) {
+ if (node != 0 && (s32)node != -1) {
if (prom_getproperty(node, "reg",
(char *)&reg0, sizeof(reg0)) == -1) {
printk("jsflash: no \"reg\" property\n");
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index dc5ac6e528c4..a391090a17c5 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -416,7 +416,7 @@ static u8 orc_load_firmware(struct orc_host * host)
/* Go back and check they match */
outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Reset program count 0 */
- bios_addr -= 0x1000; /* Reset the BIOS adddress */
+ bios_addr -= 0x1000; /* Reset the BIOS address */
for (i = 0, data32_ptr = (u8 *) & data32; /* Check the code */
i < 0x1000; /* Firmware code size = 4K */
i++, bios_addr++) {
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index afc9aeba5edb..060ac4bd5a14 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -91,7 +91,7 @@ void aac_fib_map_free(struct aac_dev *dev)
* aac_fib_setup - setup the fibs
* @dev: Adapter to set up
*
- * Allocate the PCI space for the fibs, map it and then intialise the
+ * Allocate the PCI space for the fibs, map it and then initialise the
* fib area, the unmapped fib data and also the free list
*/
diff --git a/drivers/scsi/aic7xxx_old/aic7xxx.seq b/drivers/scsi/aic7xxx_old/aic7xxx.seq
index 5997e7c3a191..1565be9ebd49 100644
--- a/drivers/scsi/aic7xxx_old/aic7xxx.seq
+++ b/drivers/scsi/aic7xxx_old/aic7xxx.seq
@@ -1178,7 +1178,7 @@ notFound:
/*
* Retrieve an SCB by SCBID first searching the disconnected list falling
* back to DMA'ing the SCB down from the host. This routine assumes that
- * ARG_1 is the SCBID of interrest and that SINDEX is the position in the
+ * ARG_1 is the SCBID of interest and that SINDEX is the position in the
* disconnected list to start the search from. If SINDEX is SCB_LIST_NULL,
* we go directly to the host for the SCB.
*/
diff --git a/drivers/scsi/aic94xx/aic94xx_reg_def.h b/drivers/scsi/aic94xx/aic94xx_reg_def.h
index 28aaf349c111..40273a747d29 100644
--- a/drivers/scsi/aic94xx/aic94xx_reg_def.h
+++ b/drivers/scsi/aic94xx/aic94xx_reg_def.h
@@ -1689,7 +1689,7 @@
#define PHY_START_CAL 0x01
/*
- * HST_PCIX2 Registers, Addresss Range: (0x00-0xFC)
+ * HST_PCIX2 Registers, Address Range: (0x00-0xFC)
*/
#define PCIX_REG_BASE_ADR 0xB8040000
@@ -1802,7 +1802,7 @@
#define PCIC_TP_CTRL 0xFC
/*
- * EXSI Registers, Addresss Range: (0x00-0xFC)
+ * EXSI Registers, Address Range: (0x00-0xFC)
*/
#define EXSI_REG_BASE_ADR REG_BASE_ADDR_EXSI
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index c43698b1cb64..29593275201a 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -867,7 +867,7 @@ void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id,
* resources they have with this SCB, and then call this one at the
* end of their timeout function. To do this, one should initialize
* the ascb->timer.{function, data, expires} prior to calling the post
- * funcion. The timer is started by the post function.
+ * function. The timer is started by the post function.
*/
void asd_ascb_timedout(unsigned long data)
{
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
index 74374618010c..390168f62a13 100644
--- a/drivers/scsi/aic94xx/aic94xx_seq.c
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -797,7 +797,7 @@ static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha, int lseq)
int j;
/* Start from Page 1 of Mode 0 and 1. */
moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE;
- /* All the fields of page 1 can be intialized to 0. */
+ /* All the fields of page 1 can be initialized to 0. */
for (j = 0; j < LSEQ_PAGE_SIZE; j += 4)
asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0);
}
@@ -938,7 +938,7 @@ static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha)
asd_write_reg_dword(asd_ha, SCBPRO, 0);
asd_write_reg_dword(asd_ha, CSEQCON, 0);
- /* Intialize CSEQ Mode 11 Interrupt Vectors.
+ /* Initialize CSEQ Mode 11 Interrupt Vectors.
* The addresses are 16 bit wide and in dword units.
* The values of their macros are in byte units.
* Thus we have to divide by 4. */
@@ -961,7 +961,7 @@ static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha)
asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
for (i = 0; i < 8; i++) {
- /* Intialize Mode n Link m Interrupt Enable. */
+ /* Initialize Mode n Link m Interrupt Enable. */
asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF);
/* Initialize Mode n Request Mailbox. */
asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0);
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 475c31ae985c..77b26f5b9c33 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -2,7 +2,7 @@
*******************************************************************************
** O.S : Linux
** FILE NAME : arcmsr.h
-** BY : Erich Chen
+** BY : Nick Cheng
** Description: SCSI RAID Device Driver for
** ARECA RAID Host adapter
*******************************************************************************
@@ -46,8 +46,12 @@
struct device_attribute;
/*The limit of outstanding scsi command that firmware can handle*/
#define ARCMSR_MAX_OUTSTANDING_CMD 256
-#define ARCMSR_MAX_FREECCB_NUM 320
-#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/02/02"
+#ifdef CONFIG_XEN
+ #define ARCMSR_MAX_FREECCB_NUM 160
+#else
+ #define ARCMSR_MAX_FREECCB_NUM 320
+#endif
+#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/08/05"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096
@@ -60,7 +64,6 @@ struct device_attribute;
#define ARCMSR_MAX_HBB_POSTQUEUE 264
#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
#define ARCMSR_CDB_SG_PAGE_LENGTH 256
-#define SCSI_CMD_ARECA_SPECIFIC 0xE1
#ifndef PCI_DEVICE_ID_ARECA_1880
#define PCI_DEVICE_ID_ARECA_1880 0x1880
#endif
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index a4e04c50c436..acdae33de521 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -2,7 +2,7 @@
*******************************************************************************
** O.S : Linux
** FILE NAME : arcmsr_attr.c
-** BY : Erich Chen
+** BY : Nick Cheng
** Description: attributes exported to sysfs and device host
*******************************************************************************
** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 1cadcd6b7da6..984bd527c6c9 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2,7 +2,7 @@
*******************************************************************************
** O.S : Linux
** FILE NAME : arcmsr_hba.c
-** BY : Erich Chen
+** BY : Nick Cheng
** Description: SCSI RAID Device Driver for
** ARECA RAID Host adapter
*******************************************************************************
@@ -76,7 +76,7 @@ MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapte
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ARCMSR_DRIVER_VERSION);
static int sleeptime = 10;
-static int retrycount = 30;
+static int retrycount = 12;
wait_queue_head_t wait_q;
static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
struct scsi_cmnd *cmd);
@@ -187,7 +187,6 @@ int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd)
if (isleep > 0) {
msleep(isleep*1000);
}
- printk(KERN_NOTICE "wake-up\n");
return 0;
}
@@ -921,7 +920,6 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
}
static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
-
{
int id, lun;
if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
@@ -948,7 +946,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma
, pCCB->startdone
, atomic_read(&acb->ccboutstandingcount));
return;
- }
+ }
arcmsr_report_ccb_state(acb, pCCB, error);
}
@@ -981,7 +979,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
/*clear all outbound posted Q*/
- writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, &reg->iop2drv_doorbell); /* clear doorbell interrupt */
+ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
writel(0, &reg->done_qbuffer[i]);
@@ -1511,7 +1509,6 @@ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
arcmsr_drain_donequeue(acb, pCCB, error);
}
}
-
static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
{
uint32_t index;
@@ -2106,10 +2103,6 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
if (atomic_read(&acb->ccboutstandingcount) >=
ARCMSR_MAX_OUTSTANDING_CMD)
return SCSI_MLQUEUE_HOST_BUSY;
- if ((scsicmd == SCSI_CMD_ARECA_SPECIFIC)) {
- printk(KERN_NOTICE "Receiveing SCSI_CMD_ARECA_SPECIFIC command..\n");
- return 0;
- }
ccb = arcmsr_get_freeccb(acb);
if (!ccb)
return SCSI_MLQUEUE_HOST_BUSY;
@@ -2393,6 +2386,7 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
int index, rtn;
bool error;
polling_hbb_ccb_retry:
+
poll_count++;
/* clear doorbell interrupt */
writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
@@ -2663,6 +2657,7 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
return;
} else {
acb->fw_flag = FW_NORMAL;
@@ -2670,8 +2665,10 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
atomic_set(&acb->rq_map_token, 16);
}
atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token))
+ if (atomic_dec_and_test(&acb->rq_map_token)) {
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
return;
+ }
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
}
@@ -2682,15 +2679,18 @@ static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
{
struct MessageUnit_B __iomem *reg = acb->pmuB;
if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
return;
} else {
acb->fw_flag = FW_NORMAL;
if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
- atomic_set(&acb->rq_map_token,16);
+ atomic_set(&acb->rq_map_token, 16);
}
atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if(atomic_dec_and_test(&acb->rq_map_token))
+ if (atomic_dec_and_test(&acb->rq_map_token)) {
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
return;
+ }
writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
}
@@ -2701,6 +2701,7 @@ static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
{
struct MessageUnit_C __iomem *reg = acb->pmuC;
if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
return;
} else {
acb->fw_flag = FW_NORMAL;
@@ -2708,8 +2709,10 @@ static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
atomic_set(&acb->rq_map_token, 16);
}
atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token))
+ if (atomic_dec_and_test(&acb->rq_map_token)) {
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
return;
+ }
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
@@ -2897,6 +2900,8 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
uint32_t intmask_org;
uint8_t rtnval = 0x00;
int i = 0;
+ unsigned long flags;
+
if (atomic_read(&acb->ccboutstandingcount) != 0) {
/* disable all outbound interrupt */
intmask_org = arcmsr_disable_outbound_ints(acb);
@@ -2907,7 +2912,12 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START) {
- arcmsr_ccb_complete(ccb);
+ scsi_dma_unmap(ccb->pcmd);
+ ccb->startdone = ARCMSR_CCB_DONE;
+ ccb->ccb_flags = 0;
+ spin_lock_irqsave(&acb->ccblist_lock, flags);
+ list_add_tail(&ccb->list, &acb->ccb_free_list);
+ spin_unlock_irqrestore(&acb->ccblist_lock, flags);
}
}
atomic_set(&acb->ccboutstandingcount, 0);
@@ -2920,8 +2930,7 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
{
- struct AdapterControlBlock *acb =
- (struct AdapterControlBlock *)cmd->device->host->hostdata;
+ struct AdapterControlBlock *acb;
uint32_t intmask_org, outbound_doorbell;
int retry_count = 0;
int rtn = FAILED;
@@ -2971,31 +2980,16 @@ sleep_again:
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
- init_timer(&acb->eternal_timer);
- acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
- acb->eternal_timer.data = (unsigned long) acb;
- acb->eternal_timer.function = &arcmsr_request_device_map;
- add_timer(&acb->eternal_timer);
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
acb->acb_flags &= ~ACB_F_BUS_RESET;
rtn = SUCCESS;
printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
} else {
acb->acb_flags &= ~ACB_F_BUS_RESET;
- if (atomic_read(&acb->rq_map_token) == 0) {
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- init_timer(&acb->eternal_timer);
- acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
- acb->eternal_timer.data = (unsigned long) acb;
- acb->eternal_timer.function = &arcmsr_request_device_map;
- add_timer(&acb->eternal_timer);
- } else {
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
- }
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
rtn = SUCCESS;
}
break;
@@ -3007,21 +3001,10 @@ sleep_again:
rtn = FAILED;
} else {
acb->acb_flags &= ~ACB_F_BUS_RESET;
- if (atomic_read(&acb->rq_map_token) == 0) {
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- init_timer(&acb->eternal_timer);
- acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
- acb->eternal_timer.data = (unsigned long) acb;
- acb->eternal_timer.function = &arcmsr_request_device_map;
- add_timer(&acb->eternal_timer);
- } else {
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
- }
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
rtn = SUCCESS;
}
break;
@@ -3067,31 +3050,16 @@ sleep:
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
- init_timer(&acb->eternal_timer);
- acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- acb->eternal_timer.data = (unsigned long) acb;
- acb->eternal_timer.function = &arcmsr_request_device_map;
- add_timer(&acb->eternal_timer);
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
acb->acb_flags &= ~ACB_F_BUS_RESET;
rtn = SUCCESS;
printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
} else {
acb->acb_flags &= ~ACB_F_BUS_RESET;
- if (atomic_read(&acb->rq_map_token) == 0) {
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- init_timer(&acb->eternal_timer);
- acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
- acb->eternal_timer.data = (unsigned long) acb;
- acb->eternal_timer.function = &arcmsr_request_device_map;
- add_timer(&acb->eternal_timer);
- } else {
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
- }
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
rtn = SUCCESS;
}
break;
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 9c410b21db6d..c0353cdca929 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -1838,7 +1838,7 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
case BFA_IOIM_SM_ABORT:
/*
- * IO is alraedy being cleaned up implicitly
+ * IO is already being cleaned up implicitly
*/
ioim->io_cbfn = __bfa_cb_ioim_abort;
break;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 4e2eb92ba028..43fa986bb586 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -5646,7 +5646,7 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
switch (status) {
case BFA_STATUS_OK:
/*
- * Initialiaze the V-Port fields
+ * Initialize the V-Port fields
*/
__vport_fcid(vport) = vport->lps->lp_pid;
vport->vport_stats.fdisc_accepts++;
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 8f1b5c8bf903..b0f8523e665f 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3796,7 +3796,7 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
* adapter_add_device - Adds the device instance to the adaptor instance.
*
* @acb: The adapter device to be updated
- * @dcb: A newly created and intialised device instance to add.
+ * @dcb: A newly created and initialised device instance to add.
**/
static void adapter_add_device(struct AdapterCtlBlk *acb,
struct DeviceCtlBlk *dcb)
@@ -4498,7 +4498,7 @@ static void __devinit adapter_init_chip(struct AdapterCtlBlk *acb)
* init_adapter - Grab the resource for the card, setup the adapter
* information, set the card into a known state, create the various
* tables etc etc. This basically gets all adapter information all up
- * to date, intialised and gets the chip in sync with it.
+ * to date, initialised and gets the chip in sync with it.
*
* @host: This hosts adapter structure
* @io_port: The base I/O port
@@ -4789,7 +4789,7 @@ static void banner_display(void)
* that it finds in the system. The pci_dev strcuture indicates which
* instance we are being called from.
*
- * @dev: The PCI device to intialize.
+ * @dev: The PCI device to initialize.
* @id: Looks like a pointer to the entry in our pci device table
* that was actually matched by the PCI subsystem.
*
@@ -4860,7 +4860,7 @@ fail:
* dc395x_remove_one - Called to remove a single instance of the
* adapter.
*
- * @dev: The PCI device to intialize.
+ * @dev: The PCI device to initialize.
**/
static void __devexit dc395x_remove_one(struct pci_dev *dev)
{
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index de2e09e49a3e..9c5c8be72231 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5748,7 +5748,7 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
}
if (ipr_is_gata(res) && res->sata_port)
- return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
+ return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioarcb = &ipr_cmd->ioarcb;
@@ -7515,16 +7515,10 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
volatile u32 int_reg;
- int rc;
ENTER;
ioa_cfg->pdev->state_saved = true;
- rc = pci_restore_state(ioa_cfg->pdev);
-
- if (rc != PCIBIOS_SUCCESSFUL) {
- ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
- return IPR_RC_JOB_CONTINUE;
- }
+ pci_restore_state(ioa_cfg->pdev);
if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index cdc06cda76e5..5962d1a5a674 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1250,7 +1250,7 @@ static void fc_lun_reset_send(unsigned long data)
/**
* fc_lun_reset() - Send a LUN RESET command to a device
* and wait for the reply
- * @lport: The local port to sent the comand on
+ * @lport: The local port to sent the command on
* @fsp: The FCP packet that identifies the LUN to be reset
* @id: The SCSI command ID
* @lun: The LUN ID to be reset
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 29251fabecc6..9a7aaf5f1311 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -211,8 +211,7 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
unsigned long flags;
spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
- res = ata_sas_queuecmd(cmd, scsi_done,
- dev->sata_dev.ap);
+ res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
goto out;
}
@@ -647,6 +646,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
spin_lock_irqsave(shost->host_lock, flags);
list_splice_init(&shost->eh_cmd_q, &eh_work_q);
+ shost->host_eh_scheduled = 0;
spin_unlock_irqrestore(shost->host_lock, flags);
SAS_DPRINTK("Enter %s\n", __func__);
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c06491b5862f..3512abb8a587 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1335,7 +1335,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
}
/**
- * lpfc_param_init - Intializes a cfg attribute
+ * lpfc_param_init - Initializes a cfg attribute
*
* Description:
* Macro that given an attr e.g. hba_queue_depth expands
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index f9f160ab2ee9..bb015960dbc9 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -2852,7 +2852,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
if (unlikely(!fcf_record)) {
lpfc_printf_log(phba, KERN_ERR,
LOG_MBOX | LOG_SLI,
- "2554 Could not allocate memmory for "
+ "2554 Could not allocate memory for "
"fcf record\n");
rc = -ENODEV;
goto out;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 462242dcdd0a..6d0b36aa3389 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -8071,7 +8071,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
* the HBA.
*/
- /* HBA interrupt will be diabled after this call */
+ /* HBA interrupt will be disabled after this call */
lpfc_sli_hba_down(phba);
/* Stop kthread signal shall trigger work_done one more time */
kthread_stop(phba->worker_thread);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 634b2fea9c4d..a359d2b873ce 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -10172,7 +10172,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
* lpfc_sli4_queue_free - free a queue structure and associated memory
* @queue: The queue structure to free.
*
- * This function frees a queue structure and the DMAable memeory used for
+ * This function frees a queue structure and the DMAable memory used for
* the host resident queue. This function must be called after destroying the
* queue on the HBA.
**/
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index f5644745e24e..853411911b2e 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -13,7 +13,7 @@
*/
/*
- * Comand coalescing - This feature allows the driver to be able to combine
+ * Command coalescing - This feature allows the driver to be able to combine
* two or more commands and issue as one command in order to boost I/O
* performance. Useful if the nature of the I/O is sequential. It is not very
* useful for random natured I/Os.
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index a7008c0c24f9..25506c777381 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -224,7 +224,7 @@ mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
{
int err;
- /* inconsistant: mraid_mm_compat_ioctl doesn't take the BKL */
+ /* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */
mutex_lock(&mraid_mm_mutex);
err = mraid_mm_ioctl(filep, cmd, arg);
mutex_unlock(&mraid_mm_mutex);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index b2a817055b8b..9ead0399808a 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -2176,9 +2176,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
/* adjust hba_queue_depth, reply_free_queue_depth,
* and queue_size
*/
- ioc->hba_queue_depth -= queue_diff;
- ioc->reply_free_queue_depth -= queue_diff;
- queue_size -= queue_diff;
+ ioc->hba_queue_depth -= (queue_diff / 2);
+ ioc->reply_free_queue_depth -= (queue_diff / 2);
+ queue_size = facts->MaxReplyDescriptorPostQueueDepth;
}
ioc->reply_post_queue_depth = queue_size;
@@ -3941,6 +3941,8 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
static void
_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
{
+ mpt2sas_scsih_reset_handler(ioc, reset_phase);
+ mpt2sas_ctl_reset_handler(ioc, reset_phase);
switch (reset_phase) {
case MPT2_IOC_PRE_RESET:
dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
@@ -3971,8 +3973,6 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
"MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
break;
}
- mpt2sas_scsih_reset_handler(ioc, reset_phase);
- mpt2sas_ctl_reset_handler(ioc, reset_phase);
}
/**
@@ -4026,6 +4026,7 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
{
int r;
unsigned long flags;
+ u8 pe_complete = ioc->wait_for_port_enable_to_complete;
dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
__func__));
@@ -4068,6 +4069,14 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
if (r)
goto out;
_base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
+
+ /* If this hard reset is called while port enable is active, then
+ * there is no reason to call make_ioc_operational
+ */
+ if (pe_complete) {
+ r = -EFAULT;
+ goto out;
+ }
r = _base_make_ioc_operational(ioc, sleep_flag);
if (!r)
_base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index eda347c57979..5ded3db6e316 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -819,7 +819,7 @@ _scsih_is_end_device(u32 device_info)
}
/**
- * mptscsih_get_scsi_lookup - returns scmd entry
+ * _scsih_scsi_lookup_get - returns scmd entry
* @ioc: per adapter object
* @smid: system request message index
*
@@ -832,6 +832,28 @@ _scsih_scsi_lookup_get(struct MPT2SAS_ADAPTER *ioc, u16 smid)
}
/**
+ * _scsih_scsi_lookup_get_clear - returns scmd entry
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ * Then will derefrence the stored scmd pointer.
+ */
+static inline struct scsi_cmnd *
+_scsih_scsi_lookup_get_clear(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+{
+ unsigned long flags;
+ struct scsi_cmnd *scmd;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ scmd = ioc->scsi_lookup[smid - 1].scmd;
+ ioc->scsi_lookup[smid - 1].scmd = NULL;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ return scmd;
+}
+
+/**
* _scsih_scsi_lookup_find_by_scmd - scmd lookup
* @ioc: per adapter object
* @smid: system request message index
@@ -2981,9 +3003,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
u16 handle;
for (i = 0 ; i < event_data->NumEntries; i++) {
- if (event_data->PHY[i].PhyStatus &
- MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
- continue;
handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
if (!handle)
continue;
@@ -3210,7 +3229,7 @@ _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc)
u16 count = 0;
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
- scmd = _scsih_scsi_lookup_get(ioc, smid);
+ scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
if (!scmd)
continue;
count++;
@@ -3804,7 +3823,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
u32 response_code = 0;
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
- scmd = _scsih_scsi_lookup_get(ioc, smid);
+ scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
if (scmd == NULL)
return 1;
@@ -5005,6 +5024,12 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
event_data);
#endif
+ /* In MPI Revision K (0xC), the internal device reset complete was
+ * implemented, so avoid setting tm_busy flag for older firmware.
+ */
+ if ((ioc->facts.HeaderVersion >> 8) < 0xC)
+ return;
+
if (event_data->ReasonCode !=
MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
event_data->ReasonCode !=
@@ -5099,6 +5124,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
struct scsi_cmnd *scmd;
+ struct scsi_device *sdev;
u16 smid, handle;
u32 lun;
struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -5109,12 +5135,17 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
#endif
u16 ioc_status;
+ unsigned long flags;
+ int r;
+
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primative: "
"phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum,
event_data->PortWidth));
dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
__func__));
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ ioc->broadcast_aen_busy = 0;
termination_count = 0;
query_count = 0;
mpi_reply = ioc->tm_cmds.reply;
@@ -5122,7 +5153,8 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
scmd = _scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
- sas_device_priv_data = scmd->device->hostdata;
+ sdev = scmd->device;
+ sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
continue;
/* skip hidden raid components */
@@ -5138,6 +5170,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
lun = sas_device_priv_data->lun;
query_count++;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL);
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
@@ -5147,14 +5180,20 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
(mpi_reply->ResponseCode ==
MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
mpi_reply->ResponseCode ==
- MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
+ MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) {
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
continue;
-
- mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30, NULL);
+ }
+ r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
+ sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
+ scmd);
+ if (r == FAILED)
+ sdev_printk(KERN_WARNING, sdev, "task abort: FAILED "
+ "scmd(%p)\n", scmd);
termination_count += le32_to_cpu(mpi_reply->TerminationCount);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
}
- ioc->broadcast_aen_busy = 0;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
"%s - exit, query_count = %d termination_count = %d\n",
@@ -6626,6 +6665,7 @@ _scsih_remove(struct pci_dev *pdev)
destroy_workqueue(wq);
/* release all the volumes */
+ _scsih_ir_shutdown(ioc);
list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
list) {
if (raid_device->starget) {
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index f8c86b28f03f..b95285f3383f 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -603,7 +603,7 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
#endif
intx:
- /* intialize the INT-X interrupt */
+ /* initialize the INT-X interrupt */
rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME,
SHOST_TO_SAS_HA(pm8001_ha->shost));
return rc;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 300d59f389da..321cf3ae8630 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -2228,12 +2228,7 @@ static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd)
/* Once either bist or pci reset is done, restore PCI config
* space. If this fails, proceed with hard reset again
*/
- if (pci_restore_state(pinstance->pdev)) {
- pmcraid_info("config-space error resetting again\n");
- pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
- pmcraid_reset_alert(cmd);
- break;
- }
+ pci_restore_state(pinstance->pdev);
/* fail all pending commands */
pmcraid_fail_outstanding_cmds(pinstance);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 44578b56ad0a..d3e58d763b43 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1561,6 +1561,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
{
struct Scsi_Host *host = rport_to_shost(rport);
fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+ unsigned long flags;
if (!fcport)
return;
@@ -1573,10 +1574,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
* Transport has effectively 'deleted' the rport, clear
* all local references.
*/
- spin_lock_irq(host->host_lock);
+ spin_lock_irqsave(host->host_lock, flags);
fcport->rport = fcport->drport = NULL;
*((fc_port_t **)rport->dd_data) = NULL;
- spin_unlock_irq(host->host_lock);
+ spin_unlock_irqrestore(host->host_lock, flags);
if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
return;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f948e1a73aec..d9479c3fe5f8 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2505,11 +2505,12 @@ qla2x00_rport_del(void *data)
{
fc_port_t *fcport = data;
struct fc_rport *rport;
+ unsigned long flags;
- spin_lock_irq(fcport->vha->host->host_lock);
+ spin_lock_irqsave(fcport->vha->host->host_lock, flags);
rport = fcport->drport ? fcport->drport: fcport->rport;
fcport->drport = NULL;
- spin_unlock_irq(fcport->vha->host->host_lock);
+ spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
if (rport)
fc_remote_port_delete(rport);
}
@@ -2879,6 +2880,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
struct fc_rport_identifiers rport_ids;
struct fc_rport *rport;
struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
qla2x00_rport_del(fcport);
@@ -2893,9 +2895,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
"Unable to allocate fc remote port!\n");
return;
}
- spin_lock_irq(fcport->vha->host->host_lock);
+ spin_lock_irqsave(fcport->vha->host->host_lock, flags);
*((fc_port_t **)rport->dd_data) = fcport;
- spin_unlock_irq(fcport->vha->host->host_lock);
+ spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
rport->supported_classes = fcport->supported_classes;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c194c23ca1fb..f27724d76cf6 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -562,7 +562,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
}
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
- atomic_read(&fcport->state) == FCS_DEVICE_LOST ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
@@ -2513,6 +2512,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
{
struct fc_rport *rport;
scsi_qla_host_t *base_vha;
+ unsigned long flags;
if (!fcport->rport)
return;
@@ -2520,9 +2520,9 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
rport = fcport->rport;
if (defer) {
base_vha = pci_get_drvdata(vha->hw->pdev);
- spin_lock_irq(vha->host->host_lock);
+ spin_lock_irqsave(vha->host->host_lock, flags);
fcport->drport = rport;
- spin_unlock_irq(vha->host->host_lock);
+ spin_unlock_irqrestore(vha->host->host_lock, flags);
set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(base_vha);
} else
@@ -3282,10 +3282,10 @@ qla2x00_do_dpc(void *data)
set_user_nice(current, -20);
+ set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
- set_current_state(TASK_INTERRUPTIBLE);
schedule();
__set_current_state(TASK_RUNNING);
@@ -3454,7 +3454,9 @@ qla2x00_do_dpc(void *data)
qla2x00_do_dpc_all_vps(base_vha);
ha->dpc_active = 0;
+ set_current_state(TASK_INTERRUPTIBLE);
} /* End of while(1) */
+ __set_current_state(TASK_RUNNING);
DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 7b310934efed..a6b2d72022fc 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1671,7 +1671,7 @@ static int do_device_access(struct scsi_cmnd *scmd,
unsigned long long lba, unsigned int num, int write)
{
int ret;
- unsigned int block, rest = 0;
+ unsigned long long block, rest = 0;
int (*func)(struct scsi_cmnd *, unsigned char *, int);
func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 501f67bef719..fb2bb35c62cb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
&sdev->request_queue->queue_flags);
if (flagset)
queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
- __blk_run_queue(sdev->request_queue);
+ __blk_run_queue(sdev->request_queue, false);
if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
spin_unlock(sdev->request_queue->queue_lock);
@@ -1977,8 +1977,7 @@ EXPORT_SYMBOL(scsi_mode_sense);
* in.
*
* Returns zero if unsuccessful or an error if TUR failed. For
- * removable media, a return of NOT_READY or UNIT_ATTENTION is
- * translated to success, with the ->changed flag updated.
+ * removable media, UNIT_ATTENTION sets ->changed flag.
**/
int
scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
@@ -2005,16 +2004,6 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
} while (scsi_sense_valid(sshdr) &&
sshdr->sense_key == UNIT_ATTENTION && --retries);
- if (!sshdr)
- /* could not allocate sense buffer, so can't process it */
- return result;
-
- if (sdev->removable && scsi_sense_valid(sshdr) &&
- (sshdr->sense_key == UNIT_ATTENTION ||
- sshdr->sense_key == NOT_READY)) {
- sdev->changed = 1;
- result = 0;
- }
if (!sshdr_external)
kfree(sshdr);
return result;
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index d53e6503c6d5..a2ed201885ae 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -477,7 +477,7 @@ EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
/**
- * scsi_netlink_init - Called by SCSI subsystem to intialize
+ * scsi_netlink_init - Called by SCSI subsystem to initialize
* the SCSI transport netlink interface
*
**/
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 4c68d36f9ac2..490ce213204e 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -864,13 +864,15 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
error = device_add(&sdev->sdev_gendev);
if (error) {
- printk(KERN_INFO "error 1\n");
+ sdev_printk(KERN_INFO, sdev,
+ "failed to add device: %d\n", error);
return error;
}
device_enable_async_suspend(&sdev->sdev_dev);
error = device_add(&sdev->sdev_dev);
if (error) {
- printk(KERN_INFO "error 2\n");
+ sdev_printk(KERN_INFO, sdev,
+ "failed to add class device: %d\n", error);
device_del(&sdev->sdev_gendev);
return error;
}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 998c01be3234..5c3ccfc6b622 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
!test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
if (flagset)
queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
- __blk_run_queue(rport->rqst_q);
+ __blk_run_queue(rport->rqst_q, false);
if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 365024b0c407..e56730214c05 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -990,30 +990,51 @@ out:
static void set_media_not_present(struct scsi_disk *sdkp)
{
- sdkp->media_present = 0;
- sdkp->capacity = 0;
- sdkp->device->changed = 1;
+ if (sdkp->media_present)
+ sdkp->device->changed = 1;
+
+ if (sdkp->device->removable) {
+ sdkp->media_present = 0;
+ sdkp->capacity = 0;
+ }
+}
+
+static int media_not_present(struct scsi_disk *sdkp,
+ struct scsi_sense_hdr *sshdr)
+{
+ if (!scsi_sense_valid(sshdr))
+ return 0;
+
+ /* not invoked for commands that could return deferred errors */
+ switch (sshdr->sense_key) {
+ case UNIT_ATTENTION:
+ case NOT_READY:
+ /* medium not present */
+ if (sshdr->asc == 0x3A) {
+ set_media_not_present(sdkp);
+ return 1;
+ }
+ }
+ return 0;
}
/**
- * sd_media_changed - check if our medium changed
- * @disk: kernel device descriptor
+ * sd_check_events - check media events
+ * @disk: kernel device descriptor
+ * @clearing: disk events currently being cleared
*
- * Returns 0 if not applicable or no change; 1 if change
+ * Returns mask of DISK_EVENT_*.
*
* Note: this function is invoked from the block subsystem.
**/
-static int sd_media_changed(struct gendisk *disk)
+static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
struct scsi_sense_hdr *sshdr = NULL;
int retval;
- SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_media_changed\n"));
-
- if (!sdp->removable)
- return 0;
+ SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
/*
* If the device is offline, don't send any commands - just pretend as
@@ -1043,48 +1064,32 @@ static int sd_media_changed(struct gendisk *disk)
sshdr);
}
- /*
- * Unable to test, unit probably not ready. This usually
- * means there is no disc in the drive. Mark as changed,
- * and we will figure it out later once the drive is
- * available again.
- */
- if (retval || (scsi_sense_valid(sshdr) &&
- /* 0x3a is medium not present */
- sshdr->asc == 0x3a)) {
+ /* failed to execute TUR, assume media not present */
+ if (host_byte(retval)) {
set_media_not_present(sdkp);
goto out;
}
+ if (media_not_present(sdkp, sshdr))
+ goto out;
+
/*
* For removable scsi disk we have to recognise the presence
- * of a disk in the drive. This is kept in the struct scsi_disk
- * struct and tested at open ! Daniel Roche (dan@lectra.fr)
+ * of a disk in the drive.
*/
+ if (!sdkp->media_present)
+ sdp->changed = 1;
sdkp->media_present = 1;
-
out:
/*
- * Report a media change under the following conditions:
+ * sdp->changed is set under the following conditions:
*
- * Medium is present now and wasn't present before.
- * Medium wasn't present before and is present now.
- * Medium was present at all times, but it changed while
- * we weren't looking (sdp->changed is set).
- *
- * If there was no medium before and there is no medium now then
- * don't report a change, even if a medium was inserted and removed
- * while we weren't looking.
+ * Medium present state has changed in either direction.
+ * Device has indicated UNIT_ATTENTION.
*/
- retval = (sdkp->media_present != sdkp->previous_state ||
- (sdkp->media_present && sdp->changed));
- if (retval)
- sdev_evt_send_simple(sdp, SDEV_EVT_MEDIA_CHANGE, GFP_KERNEL);
- sdkp->previous_state = sdkp->media_present;
-
- /* sdp->changed indicates medium was changed or is not present */
- sdp->changed = !sdkp->media_present;
kfree(sshdr);
+ retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
+ sdp->changed = 0;
return retval;
}
@@ -1177,7 +1182,7 @@ static const struct block_device_operations sd_fops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = sd_compat_ioctl,
#endif
- .media_changed = sd_media_changed,
+ .check_events = sd_check_events,
.revalidate_disk = sd_revalidate_disk,
.unlock_native_capacity = sd_unlock_native_capacity,
};
@@ -1320,23 +1325,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
return good_bytes;
}
-static int media_not_present(struct scsi_disk *sdkp,
- struct scsi_sense_hdr *sshdr)
-{
-
- if (!scsi_sense_valid(sshdr))
- return 0;
- /* not invoked for commands that could return deferred errors */
- if (sshdr->sense_key != NOT_READY &&
- sshdr->sense_key != UNIT_ATTENTION)
- return 0;
- if (sshdr->asc != 0x3A) /* medium not present */
- return 0;
-
- set_media_not_present(sdkp);
- return 1;
-}
-
/*
* spinup disk - called only in sd_revalidate_disk()
*/
@@ -1511,7 +1499,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
*/
if (sdp->removable &&
sense_valid && sshdr->sense_key == NOT_READY)
- sdp->changed = 1;
+ set_media_not_present(sdkp);
/*
* We used to set media_present to 0 here to indicate no media
@@ -2397,8 +2385,10 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
gd->driverfs_dev = &sdp->sdev_gendev;
gd->flags = GENHD_FL_EXT_DEVT;
- if (sdp->removable)
+ if (sdp->removable) {
gd->flags |= GENHD_FL_REMOVABLE;
+ gd->events |= DISK_EVENT_MEDIA_CHANGE;
+ }
add_disk(gd);
sd_dif_config_host(sdkp);
@@ -2480,7 +2470,6 @@ static int sd_probe(struct device *dev)
sdkp->disk = gd;
sdkp->index = index;
atomic_set(&sdkp->openers, 0);
- sdkp->previous_state = 1;
if (!sdp->request_queue->rq_timeout) {
if (sdp->type != TYPE_MOD)
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 55488faf0815..c9d8f6ca49e2 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -55,7 +55,6 @@ struct scsi_disk {
u8 media_present;
u8 write_prot;
u8 protection_type;/* Data Integrity Field */
- unsigned previous_state : 1;
unsigned ATO : 1; /* state of disk ATO bit */
unsigned WCE : 1; /* state of disk WCE bit */
unsigned RCD : 1; /* state of disk RCD bit, unused */
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index d7b383c96d5d..aefadc6a1607 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -104,14 +104,15 @@ static void sr_release(struct cdrom_device_info *);
static void get_sectorsize(struct scsi_cd *);
static void get_capabilities(struct scsi_cd *);
-static int sr_media_change(struct cdrom_device_info *, int);
+static unsigned int sr_check_events(struct cdrom_device_info *cdi,
+ unsigned int clearing, int slot);
static int sr_packet(struct cdrom_device_info *, struct packet_command *);
static struct cdrom_device_ops sr_dops = {
.open = sr_open,
.release = sr_release,
.drive_status = sr_drive_status,
- .media_changed = sr_media_change,
+ .check_events = sr_check_events,
.tray_move = sr_tray_move,
.lock_door = sr_lock_door,
.select_speed = sr_select_speed,
@@ -165,90 +166,92 @@ static void scsi_cd_put(struct scsi_cd *cd)
mutex_unlock(&sr_ref_mutex);
}
-/* identical to scsi_test_unit_ready except that it doesn't
- * eat the NOT_READY returns for removable media */
-int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr)
+static unsigned int sr_get_events(struct scsi_device *sdev)
{
- int retries = MAX_RETRIES;
- int the_result;
- u8 cmd[] = {TEST_UNIT_READY, 0, 0, 0, 0, 0 };
+ u8 buf[8];
+ u8 cmd[] = { GET_EVENT_STATUS_NOTIFICATION,
+ 1, /* polled */
+ 0, 0, /* reserved */
+ 1 << 4, /* notification class: media */
+ 0, 0, /* reserved */
+ 0, sizeof(buf), /* allocation length */
+ 0, /* control */
+ };
+ struct event_header *eh = (void *)buf;
+ struct media_event_desc *med = (void *)(buf + 4);
+ struct scsi_sense_hdr sshdr;
+ int result;
- /* issue TEST_UNIT_READY until the initial startup UNIT_ATTENTION
- * conditions are gone, or a timeout happens
- */
- do {
- the_result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL,
- 0, sshdr, SR_TIMEOUT,
- retries--, NULL);
- if (scsi_sense_valid(sshdr) &&
- sshdr->sense_key == UNIT_ATTENTION)
- sdev->changed = 1;
-
- } while (retries > 0 &&
- (!scsi_status_is_good(the_result) ||
- (scsi_sense_valid(sshdr) &&
- sshdr->sense_key == UNIT_ATTENTION)));
- return the_result;
+ result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, sizeof(buf),
+ &sshdr, SR_TIMEOUT, MAX_RETRIES, NULL);
+ if (scsi_sense_valid(&sshdr) && sshdr.sense_key == UNIT_ATTENTION)
+ return DISK_EVENT_MEDIA_CHANGE;
+
+ if (result || be16_to_cpu(eh->data_len) < sizeof(*med))
+ return 0;
+
+ if (eh->nea || eh->notification_class != 0x4)
+ return 0;
+
+ if (med->media_event_code == 1)
+ return DISK_EVENT_EJECT_REQUEST;
+ else if (med->media_event_code == 2)
+ return DISK_EVENT_MEDIA_CHANGE;
+ return 0;
}
/*
- * This function checks to see if the media has been changed in the
- * CDROM drive. It is possible that we have already sensed a change,
- * or the drive may have sensed one and not yet reported it. We must
- * be ready for either case. This function always reports the current
- * value of the changed bit. If flag is 0, then the changed bit is reset.
- * This function could be done as an ioctl, but we would need to have
- * an inode for that to work, and we do not always have one.
+ * This function checks to see if the media has been changed or eject
+ * button has been pressed. It is possible that we have already
+ * sensed a change, or the drive may have sensed one and not yet
+ * reported it. The past events are accumulated in sdev->changed and
+ * returned together with the current state.
*/
-
-static int sr_media_change(struct cdrom_device_info *cdi, int slot)
+static unsigned int sr_check_events(struct cdrom_device_info *cdi,
+ unsigned int clearing, int slot)
{
struct scsi_cd *cd = cdi->handle;
- int retval;
- struct scsi_sense_hdr *sshdr;
+ bool last_present;
+ struct scsi_sense_hdr sshdr;
+ unsigned int events;
+ int ret;
- if (CDSL_CURRENT != slot) {
- /* no changer support */
- return -EINVAL;
- }
+ /* no changer support */
+ if (CDSL_CURRENT != slot)
+ return 0;
- sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
- retval = sr_test_unit_ready(cd->device, sshdr);
- if (retval || (scsi_sense_valid(sshdr) &&
- /* 0x3a is medium not present */
- sshdr->asc == 0x3a)) {
- /* Media not present or unable to test, unit probably not
- * ready. This usually means there is no disc in the drive.
- * Mark as changed, and we will figure it out later once
- * the drive is available again.
- */
- cd->device->changed = 1;
- /* This will force a flush, if called from check_disk_change */
- retval = 1;
- goto out;
- };
+ events = sr_get_events(cd->device);
+ /*
+ * GET_EVENT_STATUS_NOTIFICATION is enough unless MEDIA_CHANGE
+ * is being cleared. Note that there are devices which hang
+ * if asked to execute TUR repeatedly.
+ */
+ if (!(clearing & DISK_EVENT_MEDIA_CHANGE))
+ goto skip_tur;
+
+ /* let's see whether the media is there with TUR */
+ last_present = cd->media_present;
+ ret = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
+
+ /*
+ * Media is considered to be present if TUR succeeds or fails with
+ * sense data indicating something other than media-not-present
+ * (ASC 0x3a).
+ */
+ cd->media_present = scsi_status_is_good(ret) ||
+ (scsi_sense_valid(&sshdr) && sshdr.asc != 0x3a);
- retval = cd->device->changed;
- cd->device->changed = 0;
- /* If the disk changed, the capacity will now be different,
- * so we force a re-read of this information */
- if (retval) {
- /* check multisession offset etc */
- sr_cd_check(cdi);
- get_sectorsize(cd);
+ if (last_present != cd->media_present)
+ events |= DISK_EVENT_MEDIA_CHANGE;
+skip_tur:
+ if (cd->device->changed) {
+ events |= DISK_EVENT_MEDIA_CHANGE;
+ cd->device->changed = 0;
}
-out:
- /* Notify userspace, that media has changed. */
- if (retval != cd->previous_state)
- sdev_evt_send_simple(cd->device, SDEV_EVT_MEDIA_CHANGE,
- GFP_KERNEL);
- cd->previous_state = retval;
- kfree(sshdr);
-
- return retval;
+ return events;
}
-
+
/*
* sr_done is the interrupt routine for the device driver.
*
@@ -533,10 +536,25 @@ out:
return ret;
}
-static int sr_block_media_changed(struct gendisk *disk)
+static unsigned int sr_block_check_events(struct gendisk *disk,
+ unsigned int clearing)
{
struct scsi_cd *cd = scsi_cd(disk);
- return cdrom_media_changed(&cd->cdi);
+ return cdrom_check_events(&cd->cdi, clearing);
+}
+
+static int sr_block_revalidate_disk(struct gendisk *disk)
+{
+ struct scsi_cd *cd = scsi_cd(disk);
+ struct scsi_sense_hdr sshdr;
+
+ /* if the unit is not ready, nothing more to do */
+ if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
+ return 0;
+
+ sr_cd_check(&cd->cdi);
+ get_sectorsize(cd);
+ return 0;
}
static const struct block_device_operations sr_bdops =
@@ -545,7 +563,8 @@ static const struct block_device_operations sr_bdops =
.open = sr_block_open,
.release = sr_block_release,
.ioctl = sr_block_ioctl,
- .media_changed = sr_block_media_changed,
+ .check_events = sr_block_check_events,
+ .revalidate_disk = sr_block_revalidate_disk,
/*
* No compat_ioctl for now because sr_block_ioctl never
* seems to pass arbitary ioctls down to host drivers.
@@ -618,6 +637,7 @@ static int sr_probe(struct device *dev)
sprintf(disk->disk_name, "sr%d", minor);
disk->fops = &sr_bdops;
disk->flags = GENHD_FL_CD;
+ disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
@@ -627,7 +647,7 @@ static int sr_probe(struct device *dev)
cd->disk = disk;
cd->capacity = 0x1fffff;
cd->device->changed = 1; /* force recheck CD type */
- cd->previous_state = 1;
+ cd->media_present = 1;
cd->use = 1;
cd->readcd_known = 0;
cd->readcd_cdda = 0;
@@ -780,7 +800,7 @@ static void get_capabilities(struct scsi_cd *cd)
}
/* eat unit attentions */
- sr_test_unit_ready(cd->device, &sshdr);
+ scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
/* ask for mode page 0x2a */
rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index 1e144dfdbd4b..e036f1dc83c8 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -40,7 +40,7 @@ typedef struct scsi_cd {
unsigned xa_flag:1; /* CD has XA sectors ? */
unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */
unsigned readcd_cdda:1; /* reading audio data using READ_CD */
- unsigned previous_state:1; /* media has changed */
+ unsigned media_present:1; /* media is present */
struct cdrom_device_info cdi;
/* We hold gendisk and scsi_device references on probe and use
* the refs on this kref to decide when to release them */
@@ -61,7 +61,6 @@ int sr_select_speed(struct cdrom_device_info *cdi, int speed);
int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
int sr_is_xa(Scsi_CD *);
-int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr);
/* sr_vendor.c */
void sr_vendor_init(Scsi_CD *);
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 3cd8ffbad577..8be30554119b 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -307,7 +307,7 @@ int sr_drive_status(struct cdrom_device_info *cdi, int slot)
/* we have no changer support */
return -EINVAL;
}
- if (0 == sr_test_unit_ready(cd->device, &sshdr))
+ if (!scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
return CDS_DISC_OK;
/* SK/ASC/ASCQ of 2/4/1 means "unit is becoming ready" */
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 6b97ded9d45d..b4543f575f46 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1866,7 +1866,7 @@ static pci_ers_result_t sym2_io_slot_dump(struct pci_dev *pdev)
*
* This routine is similar to sym_set_workarounds(), except
* that, at this point, we already know that the device was
- * successfully intialized at least once before, and so most
+ * successfully initialized at least once before, and so most
* of the steps taken there are un-needed here.
*/
static void sym2_reset_workarounds(struct pci_dev *pdev)
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index ceba593dc84f..04113e5304a0 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -101,7 +101,7 @@ static void __iomem * __ref sfi_map_memory(u64 phys, u32 size)
return NULL;
if (sfi_use_ioremap)
- return ioremap(phys, size);
+ return ioremap_cache(phys, size);
else
return early_ioremap(phys, size);
}
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index 3f5e387ed564..5f63c3b83828 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -21,7 +21,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/list.h>
-#include <linux/kobject.h>
#include <linux/sysdev.h>
#include <linux/seq_file.h>
#include <linux/err.h>
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
index de885a0f917a..f33e2dd97934 100644
--- a/drivers/sh/intc/chip.c
+++ b/drivers/sh/intc/chip.c
@@ -173,7 +173,8 @@ int intc_set_priority(unsigned int irq, unsigned int prio)
return 0;
}
-#define VALID(x) (x | 0x80)
+#define SENSE_VALID_FLAG 0x80
+#define VALID(x) (x | SENSE_VALID_FLAG)
static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
[IRQ_TYPE_EDGE_FALLING] = VALID(0),
@@ -201,7 +202,8 @@ static int intc_set_type(struct irq_data *data, unsigned int type)
ihp = intc_find_irq(d->sense, d->nr_sense, irq);
if (ihp) {
addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
- intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
+ intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle,
+ value & ~SENSE_VALID_FLAG);
}
return 0;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1906840c1113..bb233a9cbad2 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -53,6 +53,14 @@ if SPI_MASTER
comment "SPI Master Controller Drivers"
+config SPI_ATH79
+ tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
+ depends on ATH79 && GENERIC_GPIO
+ select SPI_BITBANG
+ help
+ This enables support for the SPI controller present on the
+ Atheros AR71XX/AR724X/AR913X SoCs.
+
config SPI_ATMEL
tristate "Atmel SPI Controller"
depends on (ARCH_AT91 || AVR32)
@@ -156,10 +164,10 @@ config SPI_IMX_VER_0_4
def_bool y if ARCH_MX31
config SPI_IMX_VER_0_7
- def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51
+ def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51 || ARCH_MX53
config SPI_IMX_VER_2_3
- def_bool y if ARCH_MX51
+ def_bool y if ARCH_MX51 || ARCH_MX53
config SPI_IMX
tristate "Freescale i.MX SPI controllers"
@@ -310,8 +318,8 @@ config SPI_S3C24XX_GPIO
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
- depends on ARCH_S3C64XX && EXPERIMENTAL
- select S3C64XX_DMA
+ depends on (ARCH_S3C64XX || ARCH_S5P64X0)
+ select S3C64XX_DMA if ARCH_S3C64XX
help
SPI driver for Samsung S3C64XX and newer SoCs.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 3a42463c92a4..86d1b5f9bbd9 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_SPI_MASTER) += spi.o
# SPI master controller drivers (bus)
obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o
+obj-$(CONFIG_SPI_ATH79) += ath79_spi.o
obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index a2a5921c730a..71a1219a995d 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -1795,7 +1795,7 @@ static int pl022_setup(struct spi_device *spi)
{
struct pl022_config_chip const *chip_info;
struct chip_data *chip;
- struct ssp_clock_params clk_freq;
+ struct ssp_clock_params clk_freq = {0, };
int status = 0;
struct pl022 *pl022 = spi_master_get_devdata(spi->master);
unsigned int bits = spi->bits_per_word;
diff --git a/drivers/spi/ath79_spi.c b/drivers/spi/ath79_spi.c
new file mode 100644
index 000000000000..fcff810ea3b0
--- /dev/null
+++ b/drivers/spi/ath79_spi.c
@@ -0,0 +1,292 @@
+/*
+ * SPI controller driver for the Atheros AR71XX/AR724X/AR913X SoCs
+ *
+ * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This driver has been based on the spi-gpio.c:
+ * Copyright (C) 2006,2008 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/ath79_spi_platform.h>
+
+#define DRV_NAME "ath79-spi"
+
+struct ath79_spi {
+ struct spi_bitbang bitbang;
+ u32 ioc_base;
+ u32 reg_ctrl;
+ void __iomem *base;
+};
+
+static inline u32 ath79_spi_rr(struct ath79_spi *sp, unsigned reg)
+{
+ return ioread32(sp->base + reg);
+}
+
+static inline void ath79_spi_wr(struct ath79_spi *sp, unsigned reg, u32 val)
+{
+ iowrite32(val, sp->base + reg);
+}
+
+static inline struct ath79_spi *ath79_spidev_to_sp(struct spi_device *spi)
+{
+ return spi_master_get_devdata(spi->master);
+}
+
+static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
+{
+ struct ath79_spi *sp = ath79_spidev_to_sp(spi);
+ int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active;
+
+ if (is_active) {
+ /* set initial clock polarity */
+ if (spi->mode & SPI_CPOL)
+ sp->ioc_base |= AR71XX_SPI_IOC_CLK;
+ else
+ sp->ioc_base &= ~AR71XX_SPI_IOC_CLK;
+
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
+ }
+
+ if (spi->chip_select) {
+ struct ath79_spi_controller_data *cdata = spi->controller_data;
+
+ /* SPI is normally active-low */
+ gpio_set_value(cdata->gpio, cs_high);
+ } else {
+ if (cs_high)
+ sp->ioc_base |= AR71XX_SPI_IOC_CS0;
+ else
+ sp->ioc_base &= ~AR71XX_SPI_IOC_CS0;
+
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
+ }
+
+}
+
+static int ath79_spi_setup_cs(struct spi_device *spi)
+{
+ struct ath79_spi *sp = ath79_spidev_to_sp(spi);
+ struct ath79_spi_controller_data *cdata;
+
+ cdata = spi->controller_data;
+ if (spi->chip_select && !cdata)
+ return -EINVAL;
+
+ /* enable GPIO mode */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
+
+ /* save CTRL register */
+ sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL);
+ sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC);
+
+ /* TODO: setup speed? */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43);
+
+ if (spi->chip_select) {
+ int status = 0;
+
+ status = gpio_request(cdata->gpio, dev_name(&spi->dev));
+ if (status)
+ return status;
+
+ status = gpio_direction_output(cdata->gpio,
+ spi->mode & SPI_CS_HIGH);
+ if (status) {
+ gpio_free(cdata->gpio);
+ return status;
+ }
+ } else {
+ if (spi->mode & SPI_CS_HIGH)
+ sp->ioc_base |= AR71XX_SPI_IOC_CS0;
+ else
+ sp->ioc_base &= ~AR71XX_SPI_IOC_CS0;
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
+ }
+
+ return 0;
+}
+
+static void ath79_spi_cleanup_cs(struct spi_device *spi)
+{
+ struct ath79_spi *sp = ath79_spidev_to_sp(spi);
+
+ if (spi->chip_select) {
+ struct ath79_spi_controller_data *cdata = spi->controller_data;
+ gpio_free(cdata->gpio);
+ }
+
+ /* restore CTRL register */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl);
+ /* disable GPIO mode */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
+}
+
+static int ath79_spi_setup(struct spi_device *spi)
+{
+ int status = 0;
+
+ if (spi->bits_per_word > 32)
+ return -EINVAL;
+
+ if (!spi->controller_state) {
+ status = ath79_spi_setup_cs(spi);
+ if (status)
+ return status;
+ }
+
+ status = spi_bitbang_setup(spi);
+ if (status && !spi->controller_state)
+ ath79_spi_cleanup_cs(spi);
+
+ return status;
+}
+
+static void ath79_spi_cleanup(struct spi_device *spi)
+{
+ ath79_spi_cleanup_cs(spi);
+ spi_bitbang_cleanup(spi);
+}
+
+static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs,
+ u32 word, u8 bits)
+{
+ struct ath79_spi *sp = ath79_spidev_to_sp(spi);
+ u32 ioc = sp->ioc_base;
+
+ /* clock starts at inactive polarity */
+ for (word <<= (32 - bits); likely(bits); bits--) {
+ u32 out;
+
+ if (word & (1 << 31))
+ out = ioc | AR71XX_SPI_IOC_DO;
+ else
+ out = ioc & ~AR71XX_SPI_IOC_DO;
+
+ /* setup MSB (to slave) on trailing edge */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out | AR71XX_SPI_IOC_CLK);
+
+ word <<= 1;
+ }
+
+ return ath79_spi_rr(sp, AR71XX_SPI_REG_RDS);
+}
+
+static __devinit int ath79_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct ath79_spi *sp;
+ struct ath79_spi_platform_data *pdata;
+ struct resource *r;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*sp));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "failed to allocate spi master\n");
+ return -ENOMEM;
+ }
+
+ sp = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, sp);
+
+ pdata = pdev->dev.platform_data;
+
+ master->setup = ath79_spi_setup;
+ master->cleanup = ath79_spi_cleanup;
+ if (pdata) {
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->num_chipselect;
+ } else {
+ master->bus_num = -1;
+ master->num_chipselect = 1;
+ }
+
+ sp->bitbang.master = spi_master_get(master);
+ sp->bitbang.chipselect = ath79_spi_chipselect;
+ sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
+ sp->bitbang.setup_transfer = spi_bitbang_setup_transfer;
+ sp->bitbang.flags = SPI_CS_HIGH;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENOENT;
+ goto err_put_master;
+ }
+
+ sp->base = ioremap(r->start, r->end - r->start + 1);
+ if (!sp->base) {
+ ret = -ENXIO;
+ goto err_put_master;
+ }
+
+ ret = spi_bitbang_start(&sp->bitbang);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+
+err_unmap:
+ iounmap(sp->base);
+err_put_master:
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(sp->bitbang.master);
+
+ return ret;
+}
+
+static __devexit int ath79_spi_remove(struct platform_device *pdev)
+{
+ struct ath79_spi *sp = platform_get_drvdata(pdev);
+
+ spi_bitbang_stop(&sp->bitbang);
+ iounmap(sp->base);
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(sp->bitbang.master);
+
+ return 0;
+}
+
+static struct platform_driver ath79_spi_driver = {
+ .probe = ath79_spi_probe,
+ .remove = __devexit_p(ath79_spi_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static __init int ath79_spi_init(void)
+{
+ return platform_driver_register(&ath79_spi_driver);
+}
+module_init(ath79_spi_init);
+
+static __exit void ath79_spi_exit(void)
+{
+ platform_driver_unregister(&ath79_spi_driver);
+}
+module_exit(ath79_spi_exit);
+
+MODULE_DESCRIPTION("SPI controller driver for Atheros AR71XX/AR724X/AR913X");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index a067046c9da2..1a478bf88c9d 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -341,9 +341,9 @@ static void atmel_spi_next_message(struct spi_master *master)
/*
* For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
* - The buffer is either valid for CPU access, else NULL
- * - If the buffer is valid, so is its DMA addresss
+ * - If the buffer is valid, so is its DMA address
*
- * This driver manages the dma addresss unless message->is_dma_mapped.
+ * This driver manages the dma address unless message->is_dma_mapped.
*/
static int
atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c
index db35bd9c1b24..2fa012c109bc 100644
--- a/drivers/spi/dw_spi_mmio.c
+++ b/drivers/spi/dw_spi_mmio.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -68,8 +69,8 @@ static int __devinit dw_spi_mmio_probe(struct platform_device *pdev)
}
dwsmmio->clk = clk_get(&pdev->dev, NULL);
- if (!dwsmmio->clk) {
- ret = -ENODEV;
+ if (IS_ERR(dwsmmio->clk)) {
+ ret = PTR_ERR(dwsmmio->clk);
goto err_irq;
}
clk_enable(dwsmmio->clk);
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c
index 351d8a375b57..19752b09e155 100644
--- a/drivers/spi/pxa2xx_spi_pci.c
+++ b/drivers/spi/pxa2xx_spi_pci.c
@@ -7,10 +7,9 @@
#include <linux/of_device.h>
#include <linux/spi/pxa2xx_spi.h>
-struct awesome_struct {
+struct ce4100_info {
struct ssp_device ssp;
- struct platform_device spi_pdev;
- struct pxa2xx_spi_master spi_pdata;
+ struct platform_device *spi_pdev;
};
static DEFINE_MUTEX(ssp_lock);
@@ -51,23 +50,15 @@ void pxa_ssp_free(struct ssp_device *ssp)
}
EXPORT_SYMBOL_GPL(pxa_ssp_free);
-static void plat_dev_release(struct device *dev)
-{
- struct awesome_struct *as = container_of(dev,
- struct awesome_struct, spi_pdev.dev);
-
- of_device_node_put(&as->spi_pdev.dev);
-}
-
static int __devinit ce4100_spi_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
int ret;
resource_size_t phys_beg;
resource_size_t phys_len;
- struct awesome_struct *spi_info;
+ struct ce4100_info *spi_info;
struct platform_device *pdev;
- struct pxa2xx_spi_master *spi_pdata;
+ struct pxa2xx_spi_master spi_pdata;
struct ssp_device *ssp;
ret = pci_enable_device(dev);
@@ -84,33 +75,30 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
return ret;
}
+ pdev = platform_device_alloc("pxa2xx-spi", dev->devfn);
spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL);
- if (!spi_info) {
+ if (!pdev || !spi_info ) {
ret = -ENOMEM;
- goto err_kz;
+ goto err_nomem;
}
- ssp = &spi_info->ssp;
- pdev = &spi_info->spi_pdev;
- spi_pdata = &spi_info->spi_pdata;
+ memset(&spi_pdata, 0, sizeof(spi_pdata));
+ spi_pdata.num_chipselect = dev->devfn;
- pdev->name = "pxa2xx-spi";
- pdev->id = dev->devfn;
- pdev->dev.parent = &dev->dev;
- pdev->dev.platform_data = &spi_info->spi_pdata;
+ ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata));
+ if (ret)
+ goto err_nomem;
+ pdev->dev.parent = &dev->dev;
#ifdef CONFIG_OF
pdev->dev.of_node = dev->dev.of_node;
#endif
- pdev->dev.release = plat_dev_release;
-
- spi_pdata->num_chipselect = dev->devfn;
-
+ ssp = &spi_info->ssp;
ssp->phys_base = pci_resource_start(dev, 0);
ssp->mmio_base = ioremap(phys_beg, phys_len);
if (!ssp->mmio_base) {
dev_err(&pdev->dev, "failed to ioremap() registers\n");
ret = -EIO;
- goto err_remap;
+ goto err_nomem;
}
ssp->irq = dev->irq;
ssp->port_id = pdev->id;
@@ -122,7 +110,7 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
pci_set_drvdata(dev, spi_info);
- ret = platform_device_register(pdev);
+ ret = platform_device_add(pdev);
if (ret)
goto err_dev_add;
@@ -135,27 +123,21 @@ err_dev_add:
mutex_unlock(&ssp_lock);
iounmap(ssp->mmio_base);
-err_remap:
- kfree(spi_info);
-
-err_kz:
+err_nomem:
release_mem_region(phys_beg, phys_len);
-
+ platform_device_put(pdev);
+ kfree(spi_info);
return ret;
}
static void __devexit ce4100_spi_remove(struct pci_dev *dev)
{
- struct awesome_struct *spi_info;
- struct platform_device *pdev;
+ struct ce4100_info *spi_info;
struct ssp_device *ssp;
spi_info = pci_get_drvdata(dev);
-
ssp = &spi_info->ssp;
- pdev = &spi_info->spi_pdev;
-
- platform_device_unregister(pdev);
+ platform_device_unregister(spi_info->spi_pdev);
iounmap(ssp->mmio_base);
release_mem_region(pci_resource_start(dev, 0),
@@ -171,7 +153,6 @@ static void __devexit ce4100_spi_remove(struct pci_dev *dev)
}
static struct pci_device_id ce4100_spi_devices[] __devinitdata = {
-
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
{ },
};
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b02d0cbce890..34bb17f03019 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -28,6 +28,7 @@
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
#include <linux/of_spi.h>
+#include <linux/pm_runtime.h>
static void spidev_release(struct device *dev)
{
@@ -100,9 +101,8 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-#ifdef CONFIG_PM
-
-static int spi_suspend(struct device *dev, pm_message_t message)
+#ifdef CONFIG_PM_SLEEP
+static int spi_legacy_suspend(struct device *dev, pm_message_t message)
{
int value = 0;
struct spi_driver *drv = to_spi_driver(dev->driver);
@@ -117,7 +117,7 @@ static int spi_suspend(struct device *dev, pm_message_t message)
return value;
}
-static int spi_resume(struct device *dev)
+static int spi_legacy_resume(struct device *dev)
{
int value = 0;
struct spi_driver *drv = to_spi_driver(dev->driver);
@@ -132,18 +132,94 @@ static int spi_resume(struct device *dev)
return value;
}
+static int spi_pm_suspend(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pm)
+ return pm_generic_suspend(dev);
+ else
+ return spi_legacy_suspend(dev, PMSG_SUSPEND);
+}
+
+static int spi_pm_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pm)
+ return pm_generic_resume(dev);
+ else
+ return spi_legacy_resume(dev);
+}
+
+static int spi_pm_freeze(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pm)
+ return pm_generic_freeze(dev);
+ else
+ return spi_legacy_suspend(dev, PMSG_FREEZE);
+}
+
+static int spi_pm_thaw(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pm)
+ return pm_generic_thaw(dev);
+ else
+ return spi_legacy_resume(dev);
+}
+
+static int spi_pm_poweroff(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pm)
+ return pm_generic_poweroff(dev);
+ else
+ return spi_legacy_suspend(dev, PMSG_HIBERNATE);
+}
+
+static int spi_pm_restore(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pm)
+ return pm_generic_restore(dev);
+ else
+ return spi_legacy_resume(dev);
+}
#else
-#define spi_suspend NULL
-#define spi_resume NULL
+#define spi_pm_suspend NULL
+#define spi_pm_resume NULL
+#define spi_pm_freeze NULL
+#define spi_pm_thaw NULL
+#define spi_pm_poweroff NULL
+#define spi_pm_restore NULL
#endif
+static const struct dev_pm_ops spi_pm = {
+ .suspend = spi_pm_suspend,
+ .resume = spi_pm_resume,
+ .freeze = spi_pm_freeze,
+ .thaw = spi_pm_thaw,
+ .poweroff = spi_pm_poweroff,
+ .restore = spi_pm_restore,
+ SET_RUNTIME_PM_OPS(
+ pm_generic_runtime_suspend,
+ pm_generic_runtime_resume,
+ pm_generic_runtime_idle
+ )
+};
+
struct bus_type spi_bus_type = {
.name = "spi",
.dev_attrs = spi_dev_attrs,
.match = spi_match_device,
.uevent = spi_uevent,
- .suspend = spi_suspend,
- .resume = spi_resume,
+ .pm = &spi_pm,
};
EXPORT_SYMBOL_GPL(spi_bus_type);
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 9469564e6888..1cf9d5faabf4 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -743,6 +743,12 @@ static struct platform_device_id spi_imx_devtype[] = {
.name = "imx51-ecspi",
.driver_data = SPI_IMX_VER_2_3,
}, {
+ .name = "imx53-cspi",
+ .driver_data = SPI_IMX_VER_0_7,
+ }, {
+ .name = "imx53-ecspi",
+ .driver_data = SPI_IMX_VER_2_3,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c
index d93b66743ba7..2c665fceaac7 100644
--- a/drivers/spi/spi_sh_msiof.c
+++ b/drivers/spi/spi_sh_msiof.c
@@ -509,9 +509,11 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
bytes_done = 0;
while (bytes_done < t->len) {
+ void *rx_buf = t->rx_buf ? t->rx_buf + bytes_done : NULL;
+ const void *tx_buf = t->tx_buf ? t->tx_buf + bytes_done : NULL;
n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo,
- t->tx_buf + bytes_done,
- t->rx_buf + bytes_done,
+ tx_buf,
+ rx_buf,
words, bits);
if (n < 0)
break;
@@ -635,7 +637,7 @@ static int sh_msiof_spi_remove(struct platform_device *pdev)
ret = spi_bitbang_stop(&p->bitbang);
if (!ret) {
pm_runtime_disable(&pdev->dev);
- free_irq(platform_get_irq(pdev, 0), sh_msiof_spi_irq);
+ free_irq(platform_get_irq(pdev, 0), p);
iounmap(p->mapbase);
clk_put(p->clk);
spi_master_put(p->bitbang.master);
diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi_tegra.c
index bb7df02a5472..891e5909038c 100644
--- a/drivers/spi/spi_tegra.c
+++ b/drivers/spi/spi_tegra.c
@@ -513,7 +513,7 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
}
tspi->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR_OR_NULL(tspi->clk)) {
+ if (IS_ERR(tspi->clk)) {
dev_err(&pdev->dev, "can not get clock\n");
ret = PTR_ERR(tspi->clk);
goto err2;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 4e6245e67995..603428213d21 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -38,7 +38,7 @@
/*
- * This supports acccess to SPI devices using normal userspace I/O calls.
+ * This supports access to SPI devices using normal userspace I/O calls.
* Note that while traditional UNIX/POSIX I/O semantics are half duplex,
* and often mask message boundaries, full SPI support requires full duplex
* transfers. There are several kinds of internal message boundaries to
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 2d8cc455dbc7..42cdaa9a4d8a 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -82,7 +82,7 @@ config SSB_SDIOHOST
config SSB_SILENT
bool "No SSB kernel messages"
- depends on SSB && EMBEDDED
+ depends on SSB && EXPERT
help
This option turns off all Sonics Silicon Backplane printks.
Note that you won't be able to identify problems, once
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index c7345dbf43fa..f8533795ee7f 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -733,7 +733,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
/* Fetch the vendor specific tuples. */
res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS,
- ssb_pcmcia_do_get_invariants, sprom);
+ ssb_pcmcia_do_get_invariants, iv);
if ((res == 0) || (res == -ENOSPC))
return 0;
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index 5a0985d4ce15..29884c00c4d5 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -420,6 +420,16 @@ int ssb_bus_scan(struct ssb_bus *bus,
bus->pcicore.dev = dev;
#endif /* CONFIG_SSB_DRIVER_PCICORE */
break;
+ case SSB_DEV_ETHERNET:
+ if (bus->bustype == SSB_BUSTYPE_PCI) {
+ if (bus->host_pci->vendor == PCI_VENDOR_ID_BROADCOM &&
+ (bus->host_pci->device & 0xFF00) == 0x4300) {
+ /* This is a dangling ethernet core on a
+ * wireless device. Ignore it. */
+ continue;
+ }
+ }
+ break;
default:
break;
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index bdc632b6b586..5c8fcfc42c3e 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -119,16 +119,18 @@ source "drivers/staging/vme/Kconfig"
source "drivers/staging/memrar/Kconfig"
+source "drivers/staging/sep/Kconfig"
+
source "drivers/staging/iio/Kconfig"
+source "drivers/staging/cs5535_gpio/Kconfig"
+
source "drivers/staging/zram/Kconfig"
source "drivers/staging/wlags49_h2/Kconfig"
source "drivers/staging/wlags49_h25/Kconfig"
-source "drivers/staging/batman-adv/Kconfig"
-
source "drivers/staging/samsung-laptop/Kconfig"
source "drivers/staging/sm7xx/Kconfig"
@@ -141,8 +143,6 @@ source "drivers/staging/cxt1e1/Kconfig"
source "drivers/staging/ti-st/Kconfig"
-source "drivers/staging/adis16255/Kconfig"
-
source "drivers/staging/xgifb/Kconfig"
source "drivers/staging/msm/Kconfig"
@@ -175,5 +175,9 @@ source "drivers/staging/intel_sst/Kconfig"
source "drivers/staging/speakup/Kconfig"
+source "drivers/staging/cptm1217/Kconfig"
+
+source "drivers/staging/ste_rmi4/Kconfig"
+
endif # !STAGING_EXCLUDE_BUILD
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 3eda5c73a50a..d53886317826 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -42,18 +42,18 @@ obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/
+obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
+obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio/
obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
-obj-$(CONFIG_BATMAN_ADV) += batman-adv/
obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop/
obj-$(CONFIG_FB_SM7XX) += sm7xx/
obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
obj-$(CONFIG_CRYSTALHD) += crystalhd/
obj-$(CONFIG_CXT1E1) += cxt1e1/
obj-$(CONFIG_TI_ST) += ti-st/
-obj-$(CONFIG_ADIS16255) += adis16255/
obj-$(CONFIG_FB_XGI) += xgifb/
obj-$(CONFIG_MSM_STAGING) += msm/
obj-$(CONFIG_EASYCAP) += easycap/
@@ -68,3 +68,5 @@ obj-$(CONFIG_BCM_WIMAX) += bcm/
obj-$(CONFIG_FT1000) += ft1000/
obj-$(CONFIG_SND_INTEL_SST) += intel_sst/
obj-$(CONFIG_SPEAKUP) += speakup/
+obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/
diff --git a/drivers/staging/adis16255/Kconfig b/drivers/staging/adis16255/Kconfig
deleted file mode 100644
index a883c1f4478b..000000000000
--- a/drivers/staging/adis16255/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-config ADIS16255
- tristate "Analog Devices ADIS16250/16255"
- depends on SPI && SYSFS
- ---help---
- If you say yes here you get support for the Analog Devices
- ADIS16250/16255 Low Power Gyroscope. The driver exposes
- orientation and gyroscope value, as well as sample rate
- to the sysfs.
-
- This driver can also be built as a module. If so, the module
- will be called adis16255.
diff --git a/drivers/staging/adis16255/Makefile b/drivers/staging/adis16255/Makefile
deleted file mode 100644
index 8c3908106bfa..000000000000
--- a/drivers/staging/adis16255/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_ADIS16255) += adis16255.o
diff --git a/drivers/staging/adis16255/adis16255.c b/drivers/staging/adis16255/adis16255.c
deleted file mode 100644
index 8d4d7cbab979..000000000000
--- a/drivers/staging/adis16255/adis16255.c
+++ /dev/null
@@ -1,468 +0,0 @@
-/*
- * Analog Devices ADIS16250/ADIS16255 Low Power Gyroscope
- *
- * Written by: Matthias Brugger <m_brugger@web.de>
- *
- * Copyright (C) 2010 Fraunhofer Institute for Integrated Circuits
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*
- * The driver just has a bare interface to the sysfs (sample rate in Hz,
- * orientation (x, y, z) and gyroscope data in °/sec.
- *
- * It should be added to iio subsystem when this has left staging.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-
-#include <linux/interrupt.h>
-#include <linux/sysfs.h>
-#include <linux/stat.h>
-#include <linux/delay.h>
-
-#include <linux/gpio.h>
-
-#include <linux/spi/spi.h>
-#include <linux/workqueue.h>
-
-#include "adis16255.h"
-
-#define ADIS_STATUS 0x3d
-#define ADIS_SMPL_PRD_MSB 0x37
-#define ADIS_SMPL_PRD_LSB 0x36
-#define ADIS_MSC_CTRL_MSB 0x35
-#define ADIS_MSC_CTRL_LSB 0x34
-#define ADIS_GPIO_CTRL 0x33
-#define ADIS_ALM_SMPL1 0x25
-#define ADIS_ALM_MAG1 0x21
-#define ADIS_GYRO_SCALE 0x17
-#define ADIS_GYRO_OUT 0x05
-#define ADIS_SUPPLY_OUT 0x03
-#define ADIS_ENDURANCE 0x01
-
-/*
- * data structure for every sensor
- *
- * @dev: Driver model representation of the device.
- * @spi: Pointer to the spi device which will manage i/o to spi bus.
- * @data: Last read data from device.
- * @irq_adis: GPIO Number of IRQ signal
- * @irq: irq line manage by kernel
- * @negative: indicates if sensor is upside down (negative == 1)
- * @direction: indicates axis (x, y, z) the sensor is meassuring
- */
-struct spi_adis16255_data {
- struct device dev;
- struct spi_device *spi;
- s16 data;
- int irq;
- u8 negative;
- char direction;
-};
-
-/*-------------------------------------------------------------------------*/
-
-static int spi_adis16255_read_data(struct spi_adis16255_data *spiadis,
- u8 adr,
- u8 *rbuf)
-{
- struct spi_device *spi = spiadis->spi;
- struct spi_message msg;
- struct spi_transfer xfer1, xfer2;
- u8 *buf, *rx;
- int ret;
-
- buf = kzalloc(4, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- rx = kzalloc(4, GFP_KERNEL);
- if (rx == NULL) {
- ret = -ENOMEM;
- goto err_buf;
- }
-
- buf[0] = adr;
-
- spi_message_init(&msg);
- memset(&xfer1, 0, sizeof(xfer1));
- memset(&xfer2, 0, sizeof(xfer2));
-
- xfer1.tx_buf = buf;
- xfer1.rx_buf = buf + 2;
- xfer1.len = 2;
- xfer1.delay_usecs = 9;
-
- xfer2.tx_buf = rx + 2;
- xfer2.rx_buf = rx;
- xfer2.len = 2;
-
- spi_message_add_tail(&xfer1, &msg);
- spi_message_add_tail(&xfer2, &msg);
-
- ret = spi_sync(spi, &msg);
- if (ret == 0) {
- rbuf[0] = rx[0];
- rbuf[1] = rx[1];
- }
-
- kfree(rx);
-err_buf:
- kfree(buf);
-
- return ret;
-}
-
-static int spi_adis16255_write_data(struct spi_adis16255_data *spiadis,
- u8 adr1,
- u8 adr2,
- u8 *wbuf)
-{
- struct spi_device *spi = spiadis->spi;
- struct spi_message msg;
- struct spi_transfer xfer1, xfer2;
- u8 *buf, *rx;
- int ret;
-
- buf = kmalloc(4, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- rx = kzalloc(4, GFP_KERNEL);
- if (rx == NULL) {
- ret = -ENOMEM;
- goto err_buf;
- }
-
- spi_message_init(&msg);
- memset(&xfer1, 0, sizeof(xfer1));
- memset(&xfer2, 0, sizeof(xfer2));
-
- buf[0] = adr1 | 0x80;
- buf[1] = *wbuf;
-
- buf[2] = adr2 | 0x80;
- buf[3] = *(wbuf + 1);
-
- xfer1.tx_buf = buf;
- xfer1.rx_buf = rx;
- xfer1.len = 2;
- xfer1.delay_usecs = 9;
-
- xfer2.tx_buf = buf+2;
- xfer2.rx_buf = rx+2;
- xfer2.len = 2;
-
- spi_message_add_tail(&xfer1, &msg);
- spi_message_add_tail(&xfer2, &msg);
-
- ret = spi_sync(spi, &msg);
- if (ret != 0)
- dev_warn(&spi->dev, "write data to %#x %#x failed\n",
- buf[0], buf[2]);
-
- kfree(rx);
-err_buf:
- kfree(buf);
- return ret;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static irqreturn_t adis_irq_thread(int irq, void *dev_id)
-{
- struct spi_adis16255_data *spiadis = dev_id;
- int status;
- u16 value = 0;
-
- status = spi_adis16255_read_data(spiadis, ADIS_GYRO_OUT, (u8 *)&value);
- if (status != 0) {
- dev_warn(&spiadis->spi->dev, "SPI FAILED\n");
- goto exit;
- }
-
- /* perform on new data only... */
- if (value & 0x8000) {
- /* delete error and new data bit */
- value = value & 0x3fff;
- /* set negative value */
- if (value & 0x2000)
- value = value | 0xe000;
-
- if (likely(spiadis->negative))
- value = -value;
-
- spiadis->data = (s16) value;
- }
-
-exit:
- return IRQ_HANDLED;
-}
-
-/*-------------------------------------------------------------------------*/
-
-ssize_t adis16255_show_data(struct device *device,
- struct device_attribute *da,
- char *buf)
-{
- struct spi_adis16255_data *spiadis = dev_get_drvdata(device);
- return snprintf(buf, PAGE_SIZE, "%d\n", spiadis->data);
-}
-DEVICE_ATTR(data, S_IRUGO , adis16255_show_data, NULL);
-
-ssize_t adis16255_show_direction(struct device *device,
- struct device_attribute *da,
- char *buf)
-{
- struct spi_adis16255_data *spiadis = dev_get_drvdata(device);
- return snprintf(buf, PAGE_SIZE, "%c\n", spiadis->direction);
-}
-DEVICE_ATTR(direction, S_IRUGO , adis16255_show_direction, NULL);
-
-ssize_t adis16255_show_sample_rate(struct device *device,
- struct device_attribute *da,
- char *buf)
-{
- struct spi_adis16255_data *spiadis = dev_get_drvdata(device);
- int status = 0;
- u16 value = 0;
- int ts = 0;
-
- status = spi_adis16255_read_data(spiadis, ADIS_SMPL_PRD_MSB,
- (u8 *)&value);
- if (status != 0)
- return -EINVAL;
-
- if (value & 0x80) {
- /* timebase = 60.54 ms */
- ts = 60540 * ((0x7f & value) + 1);
- } else {
- /* timebase = 1.953 ms */
- ts = 1953 * ((0x7f & value) + 1);
- }
-
- return snprintf(buf, PAGE_SIZE, "%d\n", (1000*1000)/ts);
-}
-DEVICE_ATTR(sample_rate, S_IRUGO , adis16255_show_sample_rate, NULL);
-
-static struct attribute *adis16255_attributes[] = {
- &dev_attr_data.attr,
- &dev_attr_direction.attr,
- &dev_attr_sample_rate.attr,
- NULL
-};
-
-static const struct attribute_group adis16255_attr_group = {
- .attrs = adis16255_attributes,
-};
-
-/*-------------------------------------------------------------------------*/
-
-static int spi_adis16255_shutdown(struct spi_adis16255_data *spiadis)
-{
- u16 value = 0;
- /* turn sensor off */
- spi_adis16255_write_data(spiadis,
- ADIS_SMPL_PRD_MSB, ADIS_SMPL_PRD_LSB,
- (u8 *)&value);
- spi_adis16255_write_data(spiadis,
- ADIS_MSC_CTRL_MSB, ADIS_MSC_CTRL_LSB,
- (u8 *)&value);
- return 0;
-}
-
-static int spi_adis16255_bringup(struct spi_adis16255_data *spiadis)
-{
- int status = 0;
- u16 value = 0;
-
- status = spi_adis16255_read_data(spiadis, ADIS_GYRO_SCALE,
- (u8 *)&value);
- if (status != 0)
- goto err;
- if (value != 0x0800) {
- dev_warn(&spiadis->spi->dev, "Scale factor is none default "
- "value (%.4x)\n", value);
- }
-
- /* timebase = 1.953 ms, Ns = 0 -> 512 Hz sample rate */
- value = 0x0001;
- status = spi_adis16255_write_data(spiadis,
- ADIS_SMPL_PRD_MSB, ADIS_SMPL_PRD_LSB,
- (u8 *)&value);
- if (status != 0)
- goto err;
-
- /* start internal self-test */
- value = 0x0400;
- status = spi_adis16255_write_data(spiadis,
- ADIS_MSC_CTRL_MSB, ADIS_MSC_CTRL_LSB,
- (u8 *)&value);
- if (status != 0)
- goto err;
-
- /* wait 35 ms to finish self-test */
- msleep(35);
-
- value = 0x0000;
- status = spi_adis16255_read_data(spiadis, ADIS_STATUS,
- (u8 *)&value);
- if (status != 0)
- goto err;
-
- if (value & 0x23) {
- if (value & 0x20) {
- dev_warn(&spiadis->spi->dev, "self-test error\n");
- status = -ENODEV;
- goto err;
- } else if (value & 0x3) {
- dev_warn(&spiadis->spi->dev, "Sensor voltage "
- "out of range.\n");
- status = -ENODEV;
- goto err;
- }
- }
-
- /* set interrupt to active high on DIO0 when data ready */
- value = 0x0006;
- status = spi_adis16255_write_data(spiadis,
- ADIS_MSC_CTRL_MSB, ADIS_MSC_CTRL_LSB,
- (u8 *)&value);
- if (status != 0)
- goto err;
- return status;
-
-err:
- spi_adis16255_shutdown(spiadis);
- return status;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static int __devinit spi_adis16255_probe(struct spi_device *spi)
-{
-
- struct adis16255_init_data *init_data = spi->dev.platform_data;
- struct spi_adis16255_data *spiadis;
- int status = 0;
-
- spiadis = kzalloc(sizeof(*spiadis), GFP_KERNEL);
- if (!spiadis)
- return -ENOMEM;
-
- spiadis->spi = spi;
- spiadis->direction = init_data->direction;
-
- if (init_data->negative)
- spiadis->negative = 1;
-
- status = gpio_request(init_data->irq, "adis16255");
- if (status != 0)
- goto err;
-
- status = gpio_direction_input(init_data->irq);
- if (status != 0)
- goto gpio_err;
-
- spiadis->irq = gpio_to_irq(init_data->irq);
-
- status = request_threaded_irq(spiadis->irq,
- NULL, adis_irq_thread,
- IRQF_DISABLED, "adis-driver", spiadis);
-
- if (status != 0) {
- dev_err(&spi->dev, "IRQ request failed\n");
- goto gpio_err;
- }
-
- dev_dbg(&spi->dev, "GPIO %d IRQ %d\n", init_data->irq, spiadis->irq);
-
- dev_set_drvdata(&spi->dev, spiadis);
- status = sysfs_create_group(&spi->dev.kobj, &adis16255_attr_group);
- if (status != 0)
- goto irq_err;
-
- status = spi_adis16255_bringup(spiadis);
- if (status != 0)
- goto sysfs_err;
-
- dev_info(&spi->dev, "spi_adis16255 driver added!\n");
-
- return status;
-
-sysfs_err:
- sysfs_remove_group(&spiadis->spi->dev.kobj, &adis16255_attr_group);
-irq_err:
- free_irq(spiadis->irq, spiadis);
-gpio_err:
- gpio_free(init_data->irq);
-err:
- kfree(spiadis);
- return status;
-}
-
-static int __devexit spi_adis16255_remove(struct spi_device *spi)
-{
- struct spi_adis16255_data *spiadis = dev_get_drvdata(&spi->dev);
-
- spi_adis16255_shutdown(spiadis);
-
- free_irq(spiadis->irq, spiadis);
- gpio_free(irq_to_gpio(spiadis->irq));
-
- sysfs_remove_group(&spiadis->spi->dev.kobj, &adis16255_attr_group);
-
- kfree(spiadis);
-
- dev_info(&spi->dev, "spi_adis16255 driver removed!\n");
- return 0;
-}
-
-static struct spi_driver spi_adis16255_drv = {
- .driver = {
- .name = "spi_adis16255",
- .owner = THIS_MODULE,
- },
- .probe = spi_adis16255_probe,
- .remove = __devexit_p(spi_adis16255_remove),
-};
-
-/*-------------------------------------------------------------------------*/
-
-static int __init spi_adis16255_init(void)
-{
- return spi_register_driver(&spi_adis16255_drv);
-}
-module_init(spi_adis16255_init);
-
-static void __exit spi_adis16255_exit(void)
-{
- spi_unregister_driver(&spi_adis16255_drv);
-}
-module_exit(spi_adis16255_exit);
-
-MODULE_AUTHOR("Matthias Brugger");
-MODULE_DESCRIPTION("SPI device driver for ADIS16255 sensor");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/adis16255/adis16255.h b/drivers/staging/adis16255/adis16255.h
deleted file mode 100644
index 03e07001bab2..000000000000
--- a/drivers/staging/adis16255/adis16255.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef ADIS16255_H
-#define ADIS16255_H
-
-#include <linux/types.h>
-
-struct adis16255_init_data {
- char direction;
- u8 negative;
- int irq;
-};
-
-#endif
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
index 016c6f7f8630..7bb7da7959a2 100644
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -70,7 +70,7 @@ module_param(start_off, uint, 0644);
MODULE_PARM_DESC(start_off,
"Set to 1 to switch off OLED display after it is attached");
-enum oled_pack_mode{
+enum oled_pack_mode {
PACK_MODE_G1,
PACK_MODE_G50,
PACK_MODE_LAST
diff --git a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c
index c307a5559362..e96662b84ed9 100644
--- a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c
+++ b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c
@@ -876,7 +876,7 @@ HIFAckInterrupt(HIF_DEVICE *device)
void
HIFUnMaskInterrupt(HIF_DEVICE *device)
{
- int ret;;
+ int ret;
AR_DEBUG_ASSERT(device != NULL);
AR_DEBUG_ASSERT(device->func != NULL);
@@ -1188,7 +1188,7 @@ addHifDevice(struct sdio_func *func)
HIF_DEVICE *hifdevice;
AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: addHifDevice\n"));
AR_DEBUG_ASSERT(func != NULL);
- hifdevice = (HIF_DEVICE *)kzalloc(sizeof(HIF_DEVICE), GFP_KERNEL);
+ hifdevice = kzalloc(sizeof(HIF_DEVICE), GFP_KERNEL);
AR_DEBUG_ASSERT(hifdevice != NULL);
#if HIF_USE_DMA_BOUNCE_BUFFER
hifdevice->dma_buffer = kmalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
index 0e298dba9fc8..29b8ab44ea47 100644
--- a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
+++ b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
@@ -360,8 +360,8 @@ int PSSendOps(void *arg)
status = 1;
goto complete;
}
- len = (firmware->size > MAX_BDADDR_FORMAT_LENGTH)? MAX_BDADDR_FORMAT_LENGTH: firmware->size;
- memcpy(config_bdaddr, firmware->data,len);
+ len = min(firmware->size, MAX_BDADDR_FORMAT_LENGTH - 1);
+ memcpy(config_bdaddr, firmware->data, len);
config_bdaddr[len] = '\0';
write_bdaddr(hdev,config_bdaddr,BDADDR_TYPE_STRING);
A_RELEASE_FIRMWARE(firmware);
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
index a659f7047373..126a36a2daa6 100644
--- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
+++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
@@ -4439,7 +4439,7 @@ skip_key:
for (i = assoc_req_ie_pos; i < assoc_req_ie_pos + assocReqLen - 4; i++) {
AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i]));
sprintf(pos, "%2.2x", assocInfo[i]);
- pos += 2;;
+ pos += 2;
}
AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n"));
diff --git a/drivers/staging/autofs/dirhash.c b/drivers/staging/autofs/dirhash.c
index 8f3e2b816129..a08bd7355035 100644
--- a/drivers/staging/autofs/dirhash.c
+++ b/drivers/staging/autofs/dirhash.c
@@ -30,7 +30,7 @@ void autofs_update_usage(struct autofs_dirhash *dh,
struct autofs_dir_ent *ent)
{
autofs_delete_usage(ent); /* Unlink from current position */
- autofs_init_usage(dh,ent); /* Relink at queue tail */
+ autofs_init_usage(dh, ent); /* Relink at queue tail */
}
struct autofs_dir_ent *autofs_expire(struct super_block *sb,
@@ -45,17 +45,18 @@ struct autofs_dir_ent *autofs_expire(struct super_block *sb,
struct path path;
int umount_ok;
- if ( list_empty(&dh->expiry_head) || sbi->catatonic )
+ if (list_empty(&dh->expiry_head) || sbi->catatonic)
return NULL; /* No entries */
/* We keep the list sorted by last_usage and want old stuff */
- ent = list_entry(dh->expiry_head.next, struct autofs_dir_ent, exp);
+ ent = list_entry(dh->expiry_head.next,
+ struct autofs_dir_ent, exp);
if (jiffies - ent->last_usage < timeout)
break;
/* Move to end of list in case expiry isn't desirable */
autofs_update_usage(dh, ent);
/* Check to see that entry is expirable */
- if ( ent->ino < AUTOFS_FIRST_DIR_INO )
+ if (ent->ino < AUTOFS_FIRST_DIR_INO)
return ent; /* Symlinks are always expirable */
/* Get the dentry for the autofs subdirectory */
@@ -63,14 +64,15 @@ struct autofs_dir_ent *autofs_expire(struct super_block *sb,
if (!path.dentry) {
/* Should only happen in catatonic mode */
- printk("autofs: dentry == NULL but inode range is directory, entry %s\n", ent->name);
+ printk(KERN_DEBUG "autofs: dentry == NULL but inode \
+ range is directory, entry %s\n", ent->name);
autofs_delete_usage(ent);
continue;
}
if (!path.dentry->d_inode) {
dput(path.dentry);
- printk("autofs: negative dentry on expiry queue: %s\n",
+ printk(KERN_DEBUG "autofs: negative dentry on expiry queue: %s\n",
ent->name);
autofs_delete_usage(ent);
continue;
@@ -80,46 +82,54 @@ struct autofs_dir_ent *autofs_expire(struct super_block *sb,
point to the mounted-on-top root. */
if (!S_ISDIR(path.dentry->d_inode->i_mode) ||
!d_mountpoint(path.dentry)) {
- DPRINTK(("autofs: not expirable (not a mounted directory): %s\n", ent->name));
+ DPRINTK(("autofs: not expirable \
+ (not a mounted directory): %s\n", ent->name));
continue;
}
path.mnt = mnt;
path_get(&path);
- if (!follow_down(&path)) {
+ if (!follow_down_one(&path)) {
path_put(&path);
- DPRINTK(("autofs: not expirable (not a mounted directory): %s\n", ent->name));
+ DPRINTK(("autofs: not expirable\
+ (not a mounted directory): %s\n", ent->name));
continue;
}
- while (d_mountpoint(path.dentry) && follow_down(&path))
- ;
+ follow_down(&path, false); // TODO: need to check error
umount_ok = may_umount(path.mnt);
path_put(&path);
if (umount_ok) {
- DPRINTK(("autofs: signaling expire on %s\n", ent->name));
+ DPRINTK(("autofs: signaling expire on %s\n",
+ ent->name));
return ent; /* Expirable! */
}
- DPRINTK(("autofs: didn't expire due to may_umount: %s\n", ent->name));
+
+ DPRINTK(("autofs: didn't expire due to may_umount: %s\n",
+ ent->name));
}
return NULL; /* No expirable entries */
}
-void autofs_initialize_hash(struct autofs_dirhash *dh) {
+void autofs_initialize_hash(struct autofs_dirhash *dh)
+{
memset(&dh->h, 0, AUTOFS_HASH_SIZE*sizeof(struct autofs_dir_ent *));
INIT_LIST_HEAD(&dh->expiry_head);
}
-struct autofs_dir_ent *autofs_hash_lookup(const struct autofs_dirhash *dh, struct qstr *name)
+struct autofs_dir_ent *autofs_hash_lookup(const struct autofs_dirhash *dh,
+ struct qstr *name)
{
struct autofs_dir_ent *dhn;
DPRINTK(("autofs_hash_lookup: hash = 0x%08x, name = ", name->hash));
- autofs_say(name->name,name->len);
+ autofs_say(name->name, name->len);
- for ( dhn = dh->h[(unsigned) name->hash % AUTOFS_HASH_SIZE] ; dhn ; dhn = dhn->next ) {
- if ( name->hash == dhn->hash &&
+ for (dhn = dh->h[(unsigned) name->hash % AUTOFS_HASH_SIZE];
+ dhn;
+ dhn = dhn->next) {
+ if (name->hash == dhn->hash &&
name->len == dhn->len &&
- !memcmp(name->name, dhn->name, name->len) )
+ !memcmp(name->name, dhn->name, name->len))
break;
}
@@ -131,9 +141,9 @@ void autofs_hash_insert(struct autofs_dirhash *dh, struct autofs_dir_ent *ent)
struct autofs_dir_ent **dhnp;
DPRINTK(("autofs_hash_insert: hash = 0x%08x, name = ", ent->hash));
- autofs_say(ent->name,ent->len);
+ autofs_say(ent->name, ent->len);
- autofs_init_usage(dh,ent);
+ autofs_init_usage(dh, ent);
if (ent->dentry)
dget(ent->dentry);
@@ -141,19 +151,19 @@ void autofs_hash_insert(struct autofs_dirhash *dh, struct autofs_dir_ent *ent)
ent->next = *dhnp;
ent->back = dhnp;
*dhnp = ent;
- if ( ent->next )
+ if (ent->next)
ent->next->back = &(ent->next);
}
void autofs_hash_delete(struct autofs_dir_ent *ent)
{
*(ent->back) = ent->next;
- if ( ent->next )
+ if (ent->next)
ent->next->back = ent->back;
autofs_delete_usage(ent);
- if ( ent->dentry )
+ if (ent->dentry)
dput(ent->dentry);
kfree(ent->name);
kfree(ent);
@@ -176,37 +186,37 @@ struct autofs_dir_ent *autofs_hash_enum(const struct autofs_dirhash *dh,
bucket = (*ptr >> 16) - 1;
ecount = *ptr & 0xffff;
- if ( bucket < 0 ) {
+ if (bucket < 0)
bucket = ecount = 0;
- }
DPRINTK(("autofs_hash_enum: bucket %d, entry %d\n", bucket, ecount));
ent = last ? last->next : NULL;
- if ( ent ) {
+ if (ent) {
ecount++;
} else {
- while ( bucket < AUTOFS_HASH_SIZE ) {
+ while (bucket < AUTOFS_HASH_SIZE) {
ent = dh->h[bucket];
- for ( i = ecount ; ent && i ; i-- )
+ for (i = ecount ; ent && i ; i--)
ent = ent->next;
-
+
if (ent) {
ecount++; /* Point to *next* entry */
break;
}
-
+
bucket++; ecount = 0;
}
}
#ifdef DEBUG
- if ( !ent )
- printk("autofs_hash_enum: nothing found\n");
+ if (!ent)
+ printk(KERN_DEBUG "autofs_hash_enum: nothing found\n");
else {
- printk("autofs_hash_enum: found hash %08x, name", ent->hash);
- autofs_say(ent->name,ent->len);
+ printk(KERN_DEBUG "autofs_hash_enum: found hash %08x, name",
+ ent->hash);
+ autofs_say(ent->name, ent->len);
}
#endif
@@ -221,9 +231,9 @@ void autofs_hash_dputall(struct autofs_dirhash *dh)
int i;
struct autofs_dir_ent *ent;
- for ( i = 0 ; i < AUTOFS_HASH_SIZE ; i++ ) {
- for ( ent = dh->h[i] ; ent ; ent = ent->next ) {
- if ( ent->dentry ) {
+ for (i = 0 ; i < AUTOFS_HASH_SIZE ; i++) {
+ for (ent = dh->h[i] ; ent ; ent = ent->next) {
+ if (ent->dentry) {
dput(ent->dentry);
ent->dentry = NULL;
}
@@ -238,10 +248,10 @@ void autofs_hash_nuke(struct autofs_sb_info *sbi)
int i;
struct autofs_dir_ent *ent, *nent;
- for ( i = 0 ; i < AUTOFS_HASH_SIZE ; i++ ) {
- for ( ent = sbi->dirhash.h[i] ; ent ; ent = nent ) {
+ for (i = 0 ; i < AUTOFS_HASH_SIZE ; i++) {
+ for (ent = sbi->dirhash.h[i] ; ent ; ent = nent) {
nent = ent->next;
- if ( ent->dentry )
+ if (ent->dentry)
dput(ent->dentry);
kfree(ent->name);
kfree(ent);
diff --git a/drivers/staging/batman-adv/Kconfig b/drivers/staging/batman-adv/Kconfig
deleted file mode 100644
index 8553f3517454..000000000000
--- a/drivers/staging/batman-adv/Kconfig
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# B.A.T.M.A.N meshing protocol
-#
-
-config BATMAN_ADV
- tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
- depends on NET
- default n
- ---help---
-
- B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
- a routing protocol for multi-hop ad-hoc mesh networks. The
- networks may be wired or wireless. See
- http://www.open-mesh.org/ for more information and user space
- tools.
-
-config BATMAN_ADV_DEBUG
- bool "B.A.T.M.A.N. debugging"
- depends on BATMAN_ADV != n
- ---help---
-
- This is an option for use by developers; most people should
- say N here. This enables compilation of support for
- outputting debugging information to the kernel log. The
- output is controlled via the module parameter debug.
-
diff --git a/drivers/staging/batman-adv/Makefile b/drivers/staging/batman-adv/Makefile
deleted file mode 100644
index 78924283ea74..000000000000
--- a/drivers/staging/batman-adv/Makefile
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
-#
-# Marek Lindner, Simon Wunderlich
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of version 2 of the GNU General Public
-# License as published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
-# 02110-1301, USA
-#
-
-obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
-batman-adv-y := main.o bat_debugfs.o bat_sysfs.o send.o routing.o soft-interface.o icmp_socket.o translation-table.o bitarray.o hash.o ring_buffer.o vis.o hard-interface.o aggregation.o originator.o unicast.o
diff --git a/drivers/staging/batman-adv/README b/drivers/staging/batman-adv/README
deleted file mode 100644
index 7c878bb07f3a..000000000000
--- a/drivers/staging/batman-adv/README
+++ /dev/null
@@ -1,240 +0,0 @@
-[state: 04-09-2010]
-
-BATMAN-ADV
-----------
-
-Batman advanced is a new approach to wireless networking which
-does no longer operate on the IP basis. Unlike the batman daemon,
-which exchanges information using UDP packets and sets routing
-tables, batman-advanced operates on ISO/OSI Layer 2 only and uses
-and routes (or better: bridges) Ethernet Frames. It emulates a
-virtual network switch of all nodes participating. Therefore all
-nodes appear to be link local, thus all higher operating proto-
-cols won't be affected by any changes within the network. You can
-run almost any protocol above batman advanced, prominent examples
-are: IPv4, IPv6, DHCP, IPX.
-
-Batman advanced was implemented as a Linux kernel driver to re-
-duce the overhead to a minimum. It does not depend on any (other)
-network driver, and can be used on wifi as well as ethernet lan,
-vpn, etc ... (anything with ethernet-style layer 2).
-
-CONFIGURATION
--------------
-
-Load the batman-adv module into your kernel:
-
-# insmod batman-adv.ko
-
-The module is now waiting for activation. You must add some in-
-terfaces on which batman can operate. After loading the module
-batman advanced will scan your systems interfaces to search for
-compatible interfaces. Once found, it will create subfolders in
-the /sys directories of each supported interface, e.g.
-
-# ls /sys/class/net/eth0/batman_adv/
-# iface_status mesh_iface
-
-If an interface does not have the "batman_adv" subfolder it prob-
-ably is not supported. Not supported interfaces are: loopback,
-non-ethernet and batman's own interfaces.
-
-Note: After the module was loaded it will continuously watch for
-new interfaces to verify the compatibility. There is no need to
-reload the module if you plug your USB wifi adapter into your ma-
-chine after batman advanced was initially loaded.
-
-To activate a given interface simply write "bat0" into its
-"mesh_iface" file inside the batman_adv subfolder:
-
-# echo bat0 > /sys/class/net/eth0/batman_adv/mesh_iface
-
-Repeat this step for all interfaces you wish to add. Now batman
-starts using/broadcasting on this/these interface(s).
-
-By reading the "iface_status" file you can check its status:
-
-# cat /sys/class/net/eth0/batman_adv/iface_status
-# active
-
-To deactivate an interface you have to write "none" into its
-"mesh_iface" file:
-
-# echo none > /sys/class/net/eth0/batman_adv/mesh_iface
-
-
-All mesh wide settings can be found in batman's own interface
-folder:
-
-# ls /sys/class/net/bat0/mesh/
-# aggregated_ogms bonding orig_interval vis_mode
-
-
-There is a special folder for debugging informations:
-
-# ls /sys/kernel/debug/batman_adv/bat0/
-# originators socket transtable_global transtable_local
-# vis_data
-
-
-Some of the files contain all sort of status information regard-
-ing the mesh network. For example, you can view the table of
-originators (mesh participants) with:
-
-# cat /sys/kernel/debug/batman_adv/bat0/originators
-
-Other files allow to change batman's behaviour to better fit your
-requirements. For instance, you can check the current originator
-interval (value in milliseconds which determines how often batman
-sends its broadcast packets):
-
-# cat /sys/class/net/bat0/mesh/orig_interval
-# 1000
-
-and also change its value:
-
-# echo 3000 > /sys/class/net/bat0/mesh/orig_interval
-
-In very mobile scenarios, you might want to adjust the originator
-interval to a lower value. This will make the mesh more respon-
-sive to topology changes, but will also increase the overhead.
-
-
-USAGE
------
-
-To make use of your newly created mesh, batman advanced provides
-a new interface "bat0" which you should use from this point on.
-All interfaces added to batman advanced are not relevant any
-longer because batman handles them for you. Basically, one "hands
-over" the data by using the batman interface and batman will make
-sure it reaches its destination.
-
-The "bat0" interface can be used like any other regular inter-
-face. It needs an IP address which can be either statically con-
-figured or dynamically (by using DHCP or similar services):
-
-# NodeA: ifconfig bat0 192.168.0.1
-# NodeB: ifconfig bat0 192.168.0.2
-# NodeB: ping 192.168.0.1
-
-Note: In order to avoid problems remove all IP addresses previ-
-ously assigned to interfaces now used by batman advanced, e.g.
-
-# ifconfig eth0 0.0.0.0
-
-
-VISUALIZATION
--------------
-
-If you want topology visualization, at least one mesh node must
-be configured as VIS-server:
-
-# echo "server" > /sys/class/net/bat0/mesh/vis_mode
-
-Each node is either configured as "server" or as "client" (de-
-fault: "client"). Clients send their topology data to the server
-next to them, and server synchronize with other servers. If there
-is no server configured (default) within the mesh, no topology
-information will be transmitted. With these "synchronizing
-servers", there can be 1 or more vis servers sharing the same (or
-at least very similar) data.
-
-When configured as server, you can get a topology snapshot of
-your mesh:
-
-# cat /sys/kernel/debug/batman_adv/bat0/vis_data
-
-This raw output is intended to be easily parsable and convertable
-with other tools. Have a look at the batctl README if you want a
-vis output in dot or json format for instance and how those out-
-puts could then be visualised in an image.
-
-The raw format consists of comma separated values per entry where
-each entry is giving information about a certain source inter-
-face. Each entry can/has to have the following values:
--> "mac" - mac address of an originator's source interface
- (each line begins with it)
--> "TQ mac value" - src mac's link quality towards mac address
- of a neighbor originator's interface which
- is being used for routing
--> "HNA mac" - HNA announced by source mac
--> "PRIMARY" - this is a primary interface
--> "SEC mac" - secondary mac address of source
- (requires preceding PRIMARY)
-
-The TQ value has a range from 4 to 255 with 255 being the best.
-The HNA entries are showing which hosts are connected to the mesh
-via bat0 or being bridged into the mesh network. The PRIMARY/SEC
-values are only applied on primary interfaces
-
-
-LOGGING/DEBUGGING
------------------
-
-All error messages, warnings and information messages are sent to
-the kernel log. Depending on your operating system distribution
-this can be read in one of a number of ways. Try using the com-
-mands: dmesg, logread, or looking in the files /var/log/kern.log
-or /var/log/syslog. All batman-adv messages are prefixed with
-"batman-adv:" So to see just these messages try
-
-# dmesg | grep batman-adv
-
-When investigating problems with your mesh network it is some-
-times necessary to see more detail debug messages. This must be
-enabled when compiling the batman-adv module. When building bat-
-man-adv as part of kernel, use "make menuconfig" and enable the
-option "B.A.T.M.A.N. debugging".
-
-Those additional debug messages can be accessed using a special
-file in debugfs
-
-# cat /sys/kernel/debug/batman_adv/bat0/log
-
-The additional debug output is by default disabled. It can be en-
-abled during run time. Following log_levels are defined:
-
-0 - All debug output disabled
-1 - Enable messages related to routing / flooding / broadcasting
-2 - Enable route or hna added / changed / deleted
-3 - Enable all messages
-
-The debug output can be changed at runtime using the file
-/sys/class/net/bat0/mesh/log_level. e.g.
-
-# echo 2 > /sys/class/net/bat0/mesh/log_level
-
-will enable debug messages for when routes or HNAs change.
-
-
-BATCTL
-------
-
-As batman advanced operates on layer 2 all hosts participating in
-the virtual switch are completely transparent for all protocols
-above layer 2. Therefore the common diagnosis tools do not work
-as expected. To overcome these problems batctl was created. At
-the moment the batctl contains ping, traceroute, tcpdump and
-interfaces to the kernel module settings.
-
-For more information, please see the manpage (man batctl).
-
-batctl is available on http://www.open-mesh.org/
-
-
-CONTACT
--------
-
-Please send us comments, experiences, questions, anything :)
-
-IRC: #batman on irc.freenode.org
-Mailing-list: b.a.t.m.a.n@b.a.t.m.a.n@lists.open-mesh.org
- (optional subscription at
- https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
-
-You can also contact the Authors:
-
-Marek Lindner <lindner_marek@yahoo.de>
-Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
-
diff --git a/drivers/staging/batman-adv/TODO b/drivers/staging/batman-adv/TODO
deleted file mode 100644
index 11c384f8b063..000000000000
--- a/drivers/staging/batman-adv/TODO
+++ /dev/null
@@ -1,14 +0,0 @@
- * remove own list functionality from hash
- * use hlist_head, hlist_node in hash
- * don't use callbacks for compare+choose in hash
- * think about more efficient ways instead of abstraction of hash
- * Request a new review
- * Process the comments from the review
- * Move into mainline proper
-
-Please send all patches to:
- Marek Lindner <lindner_marek@yahoo.de>
- Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
- Andrew Lunn <andrew@lunn.ch>
- b.a.t.m.a.n@lists.open-mesh.org
- Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/drivers/staging/batman-adv/aggregation.c b/drivers/staging/batman-adv/aggregation.c
deleted file mode 100644
index 08624d44e231..000000000000
--- a/drivers/staging/batman-adv/aggregation.c
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "aggregation.h"
-#include "send.h"
-#include "routing.h"
-
-/* calculate the size of the hna information for a given packet */
-static int hna_len(struct batman_packet *batman_packet)
-{
- return batman_packet->num_hna * ETH_ALEN;
-}
-
-/* return true if new_packet can be aggregated with forw_packet */
-static bool can_aggregate_with(struct batman_packet *new_batman_packet,
- int packet_len,
- unsigned long send_time,
- bool directlink,
- struct batman_if *if_incoming,
- struct forw_packet *forw_packet)
-{
- struct batman_packet *batman_packet =
- (struct batman_packet *)forw_packet->skb->data;
- int aggregated_bytes = forw_packet->packet_len + packet_len;
-
- /**
- * we can aggregate the current packet to this aggregated packet
- * if:
- *
- * - the send time is within our MAX_AGGREGATION_MS time
- * - the resulting packet wont be bigger than
- * MAX_AGGREGATION_BYTES
- */
-
- if (time_before(send_time, forw_packet->send_time) &&
- time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
- forw_packet->send_time) &&
- (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
-
- /**
- * check aggregation compatibility
- * -> direct link packets are broadcasted on
- * their interface only
- * -> aggregate packet if the current packet is
- * a "global" packet as well as the base
- * packet
- */
-
- /* packets without direct link flag and high TTL
- * are flooded through the net */
- if ((!directlink) &&
- (!(batman_packet->flags & DIRECTLINK)) &&
- (batman_packet->ttl != 1) &&
-
- /* own packets originating non-primary
- * interfaces leave only that interface */
- ((!forw_packet->own) ||
- (forw_packet->if_incoming->if_num == 0)))
- return true;
-
- /* if the incoming packet is sent via this one
- * interface only - we still can aggregate */
- if ((directlink) &&
- (new_batman_packet->ttl == 1) &&
- (forw_packet->if_incoming == if_incoming) &&
-
- /* packets from direct neighbors or
- * own secondary interface packets
- * (= secondary interface packets in general) */
- (batman_packet->flags & DIRECTLINK ||
- (forw_packet->own &&
- forw_packet->if_incoming->if_num != 0)))
- return true;
- }
-
- return false;
-}
-
-#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
-/* create a new aggregated packet and add this packet to it */
-static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
- unsigned long send_time, bool direct_link,
- struct batman_if *if_incoming,
- int own_packet)
-{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct forw_packet *forw_packet_aggr;
- unsigned long flags;
- unsigned char *skb_buff;
-
- /* own packet should always be scheduled */
- if (!own_packet) {
- if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "batman packet queue full\n");
- return;
- }
- }
-
- forw_packet_aggr = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
- if (!forw_packet_aggr) {
- if (!own_packet)
- atomic_inc(&bat_priv->batman_queue_left);
- return;
- }
-
- if ((atomic_read(&bat_priv->aggregation_enabled)) &&
- (packet_len < MAX_AGGREGATION_BYTES))
- forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
- sizeof(struct ethhdr));
- else
- forw_packet_aggr->skb = dev_alloc_skb(packet_len +
- sizeof(struct ethhdr));
-
- if (!forw_packet_aggr->skb) {
- if (!own_packet)
- atomic_inc(&bat_priv->batman_queue_left);
- kfree(forw_packet_aggr);
- return;
- }
- skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
-
- INIT_HLIST_NODE(&forw_packet_aggr->list);
-
- skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
- forw_packet_aggr->packet_len = packet_len;
- memcpy(skb_buff, packet_buff, packet_len);
-
- forw_packet_aggr->own = own_packet;
- forw_packet_aggr->if_incoming = if_incoming;
- forw_packet_aggr->num_packets = 0;
- forw_packet_aggr->direct_link_flags = 0;
- forw_packet_aggr->send_time = send_time;
-
- /* save packet direct link flag status */
- if (direct_link)
- forw_packet_aggr->direct_link_flags |= 1;
-
- /* add new packet to packet list */
- spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
- hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
- spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
-
- /* start timer for this packet */
- INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
- send_outstanding_bat_packet);
- queue_delayed_work(bat_event_workqueue,
- &forw_packet_aggr->delayed_work,
- send_time - jiffies);
-}
-
-/* aggregate a new packet into the existing aggregation */
-static void aggregate(struct forw_packet *forw_packet_aggr,
- unsigned char *packet_buff,
- int packet_len,
- bool direct_link)
-{
- unsigned char *skb_buff;
-
- skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
- memcpy(skb_buff, packet_buff, packet_len);
- forw_packet_aggr->packet_len += packet_len;
- forw_packet_aggr->num_packets++;
-
- /* save packet direct link flag status */
- if (direct_link)
- forw_packet_aggr->direct_link_flags |=
- (1 << forw_packet_aggr->num_packets);
-}
-
-void add_bat_packet_to_list(struct bat_priv *bat_priv,
- unsigned char *packet_buff, int packet_len,
- struct batman_if *if_incoming, char own_packet,
- unsigned long send_time)
-{
- /**
- * _aggr -> pointer to the packet we want to aggregate with
- * _pos -> pointer to the position in the queue
- */
- struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL;
- struct hlist_node *tmp_node;
- struct batman_packet *batman_packet =
- (struct batman_packet *)packet_buff;
- bool direct_link = batman_packet->flags & DIRECTLINK ? 1 : 0;
- unsigned long flags;
-
- /* find position for the packet in the forward queue */
- spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
- /* own packets are not to be aggregated */
- if ((atomic_read(&bat_priv->aggregation_enabled)) && (!own_packet)) {
- hlist_for_each_entry(forw_packet_pos, tmp_node,
- &bat_priv->forw_bat_list, list) {
- if (can_aggregate_with(batman_packet,
- packet_len,
- send_time,
- direct_link,
- if_incoming,
- forw_packet_pos)) {
- forw_packet_aggr = forw_packet_pos;
- break;
- }
- }
- }
-
- /* nothing to aggregate with - either aggregation disabled or no
- * suitable aggregation packet found */
- if (forw_packet_aggr == NULL) {
- /* the following section can run without the lock */
- spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
-
- /**
- * if we could not aggregate this packet with one of the others
- * we hold it back for a while, so that it might be aggregated
- * later on
- */
- if ((!own_packet) &&
- (atomic_read(&bat_priv->aggregation_enabled)))
- send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
-
- new_aggregated_packet(packet_buff, packet_len,
- send_time, direct_link,
- if_incoming, own_packet);
- } else {
- aggregate(forw_packet_aggr,
- packet_buff, packet_len,
- direct_link);
- spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
- }
-}
-
-/* unpack the aggregated packets and process them one by one */
-void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
- int packet_len, struct batman_if *if_incoming)
-{
- struct batman_packet *batman_packet;
- int buff_pos = 0;
- unsigned char *hna_buff;
-
- batman_packet = (struct batman_packet *)packet_buff;
-
- do {
- /* network to host order for our 32bit seqno, and the
- orig_interval. */
- batman_packet->seqno = ntohl(batman_packet->seqno);
-
- hna_buff = packet_buff + buff_pos + BAT_PACKET_LEN;
- receive_bat_packet(ethhdr, batman_packet,
- hna_buff, hna_len(batman_packet),
- if_incoming);
-
- buff_pos += BAT_PACKET_LEN + hna_len(batman_packet);
- batman_packet = (struct batman_packet *)
- (packet_buff + buff_pos);
- } while (aggregated_packet(buff_pos, packet_len,
- batman_packet->num_hna));
-}
diff --git a/drivers/staging/batman-adv/aggregation.h b/drivers/staging/batman-adv/aggregation.h
deleted file mode 100644
index 71a91b3da913..000000000000
--- a/drivers/staging/batman-adv/aggregation.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_AGGREGATION_H_
-#define _NET_BATMAN_ADV_AGGREGATION_H_
-
-#include "main.h"
-
-/* is there another aggregated packet here? */
-static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna)
-{
- int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_hna * ETH_ALEN);
-
- return (next_buff_pos <= packet_len) &&
- (next_buff_pos <= MAX_AGGREGATION_BYTES);
-}
-
-void add_bat_packet_to_list(struct bat_priv *bat_priv,
- unsigned char *packet_buff, int packet_len,
- struct batman_if *if_incoming, char own_packet,
- unsigned long send_time);
-void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
- int packet_len, struct batman_if *if_incoming);
-
-#endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */
diff --git a/drivers/staging/batman-adv/bat_debugfs.c b/drivers/staging/batman-adv/bat_debugfs.c
deleted file mode 100644
index 57f84a9f8234..000000000000
--- a/drivers/staging/batman-adv/bat_debugfs.c
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-
-#include <linux/debugfs.h>
-
-#include "bat_debugfs.h"
-#include "translation-table.h"
-#include "originator.h"
-#include "hard-interface.h"
-#include "vis.h"
-#include "icmp_socket.h"
-
-static struct dentry *bat_debugfs;
-
-#ifdef CONFIG_BATMAN_ADV_DEBUG
-#define LOG_BUFF_MASK (log_buff_len-1)
-#define LOG_BUFF(idx) (debug_log->log_buff[(idx) & LOG_BUFF_MASK])
-
-static int log_buff_len = LOG_BUF_LEN;
-
-static void emit_log_char(struct debug_log *debug_log, char c)
-{
- LOG_BUFF(debug_log->log_end) = c;
- debug_log->log_end++;
-
- if (debug_log->log_end - debug_log->log_start > log_buff_len)
- debug_log->log_start = debug_log->log_end - log_buff_len;
-}
-
-static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
-{
- int printed_len;
- va_list args;
- static char debug_log_buf[256];
- char *p;
- unsigned long flags;
-
- if (!debug_log)
- return 0;
-
- spin_lock_irqsave(&debug_log->lock, flags);
- va_start(args, fmt);
- printed_len = vscnprintf(debug_log_buf, sizeof(debug_log_buf),
- fmt, args);
- va_end(args);
-
- for (p = debug_log_buf; *p != 0; p++)
- emit_log_char(debug_log, *p);
-
- spin_unlock_irqrestore(&debug_log->lock, flags);
-
- wake_up(&debug_log->queue_wait);
-
- return 0;
-}
-
-int debug_log(struct bat_priv *bat_priv, char *fmt, ...)
-{
- va_list args;
- char tmp_log_buf[256];
-
- va_start(args, fmt);
- vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
- fdebug_log(bat_priv->debug_log, "[%10u] %s",
- (jiffies / HZ), tmp_log_buf);
- va_end(args);
-
- return 0;
-}
-
-static int log_open(struct inode *inode, struct file *file)
-{
- nonseekable_open(inode, file);
- file->private_data = inode->i_private;
- inc_module_count();
- return 0;
-}
-
-static int log_release(struct inode *inode, struct file *file)
-{
- dec_module_count();
- return 0;
-}
-
-static ssize_t log_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct bat_priv *bat_priv = file->private_data;
- struct debug_log *debug_log = bat_priv->debug_log;
- int error, i = 0;
- char c;
- unsigned long flags;
-
- if ((file->f_flags & O_NONBLOCK) &&
- !(debug_log->log_end - debug_log->log_start))
- return -EAGAIN;
-
- if ((!buf) || (count < 0))
- return -EINVAL;
-
- if (count == 0)
- return 0;
-
- if (!access_ok(VERIFY_WRITE, buf, count))
- return -EFAULT;
-
- error = wait_event_interruptible(debug_log->queue_wait,
- (debug_log->log_start - debug_log->log_end));
-
- if (error)
- return error;
-
- spin_lock_irqsave(&debug_log->lock, flags);
-
- while ((!error) && (i < count) &&
- (debug_log->log_start != debug_log->log_end)) {
- c = LOG_BUFF(debug_log->log_start);
-
- debug_log->log_start++;
-
- spin_unlock_irqrestore(&debug_log->lock, flags);
-
- error = __put_user(c, buf);
-
- spin_lock_irqsave(&debug_log->lock, flags);
-
- buf++;
- i++;
-
- }
-
- spin_unlock_irqrestore(&debug_log->lock, flags);
-
- if (!error)
- return i;
-
- return error;
-}
-
-static unsigned int log_poll(struct file *file, poll_table *wait)
-{
- struct bat_priv *bat_priv = file->private_data;
- struct debug_log *debug_log = bat_priv->debug_log;
-
- poll_wait(file, &debug_log->queue_wait, wait);
-
- if (debug_log->log_end - debug_log->log_start)
- return POLLIN | POLLRDNORM;
-
- return 0;
-}
-
-static const struct file_operations log_fops = {
- .open = log_open,
- .release = log_release,
- .read = log_read,
- .poll = log_poll,
- .llseek = no_llseek,
-};
-
-static int debug_log_setup(struct bat_priv *bat_priv)
-{
- struct dentry *d;
-
- if (!bat_priv->debug_dir)
- goto err;
-
- bat_priv->debug_log = kzalloc(sizeof(struct debug_log), GFP_ATOMIC);
- if (!bat_priv->debug_log)
- goto err;
-
- spin_lock_init(&bat_priv->debug_log->lock);
- init_waitqueue_head(&bat_priv->debug_log->queue_wait);
-
- d = debugfs_create_file("log", S_IFREG | S_IRUSR,
- bat_priv->debug_dir, bat_priv, &log_fops);
- if (d)
- goto err;
-
- return 0;
-
-err:
- return 1;
-}
-
-static void debug_log_cleanup(struct bat_priv *bat_priv)
-{
- kfree(bat_priv->debug_log);
- bat_priv->debug_log = NULL;
-}
-#else /* CONFIG_BATMAN_ADV_DEBUG */
-static int debug_log_setup(struct bat_priv *bat_priv)
-{
- bat_priv->debug_log = NULL;
- return 0;
-}
-
-static void debug_log_cleanup(struct bat_priv *bat_priv)
-{
- return;
-}
-#endif
-
-static int originators_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, orig_seq_print_text, net_dev);
-}
-
-static int transtable_global_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, hna_global_seq_print_text, net_dev);
-}
-
-static int transtable_local_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, hna_local_seq_print_text, net_dev);
-}
-
-static int vis_data_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, vis_seq_print_text, net_dev);
-}
-
-struct bat_debuginfo {
- struct attribute attr;
- const struct file_operations fops;
-};
-
-#define BAT_DEBUGINFO(_name, _mode, _open) \
-struct bat_debuginfo bat_debuginfo_##_name = { \
- .attr = { .name = __stringify(_name), \
- .mode = _mode, }, \
- .fops = { .owner = THIS_MODULE, \
- .open = _open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
- } \
-};
-
-static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
-static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open);
-static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open);
-static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
-
-static struct bat_debuginfo *mesh_debuginfos[] = {
- &bat_debuginfo_originators,
- &bat_debuginfo_transtable_global,
- &bat_debuginfo_transtable_local,
- &bat_debuginfo_vis_data,
- NULL,
-};
-
-void debugfs_init(void)
-{
- bat_debugfs = debugfs_create_dir(DEBUGFS_BAT_SUBDIR, NULL);
- if (bat_debugfs == ERR_PTR(-ENODEV))
- bat_debugfs = NULL;
-}
-
-void debugfs_destroy(void)
-{
- if (bat_debugfs) {
- debugfs_remove_recursive(bat_debugfs);
- bat_debugfs = NULL;
- }
-}
-
-int debugfs_add_meshif(struct net_device *dev)
-{
- struct bat_priv *bat_priv = netdev_priv(dev);
- struct bat_debuginfo **bat_debug;
- struct dentry *file;
-
- if (!bat_debugfs)
- goto out;
-
- bat_priv->debug_dir = debugfs_create_dir(dev->name, bat_debugfs);
- if (!bat_priv->debug_dir)
- goto out;
-
- bat_socket_setup(bat_priv);
- debug_log_setup(bat_priv);
-
- for (bat_debug = mesh_debuginfos; *bat_debug; ++bat_debug) {
- file = debugfs_create_file(((*bat_debug)->attr).name,
- S_IFREG | ((*bat_debug)->attr).mode,
- bat_priv->debug_dir,
- dev, &(*bat_debug)->fops);
- if (!file) {
- bat_err(dev, "Can't add debugfs file: %s/%s\n",
- dev->name, ((*bat_debug)->attr).name);
- goto rem_attr;
- }
- }
-
- return 0;
-rem_attr:
- debugfs_remove_recursive(bat_priv->debug_dir);
- bat_priv->debug_dir = NULL;
-out:
-#ifdef CONFIG_DEBUG_FS
- return -ENOMEM;
-#else
- return 0;
-#endif /* CONFIG_DEBUG_FS */
-}
-
-void debugfs_del_meshif(struct net_device *dev)
-{
- struct bat_priv *bat_priv = netdev_priv(dev);
-
- debug_log_cleanup(bat_priv);
-
- if (bat_debugfs) {
- debugfs_remove_recursive(bat_priv->debug_dir);
- bat_priv->debug_dir = NULL;
- }
-}
diff --git a/drivers/staging/batman-adv/bat_debugfs.h b/drivers/staging/batman-adv/bat_debugfs.h
deleted file mode 100644
index 72df532b7d5f..000000000000
--- a/drivers/staging/batman-adv/bat_debugfs.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-
-#ifndef _NET_BATMAN_ADV_DEBUGFS_H_
-#define _NET_BATMAN_ADV_DEBUGFS_H_
-
-#define DEBUGFS_BAT_SUBDIR "batman_adv"
-
-void debugfs_init(void);
-void debugfs_destroy(void);
-int debugfs_add_meshif(struct net_device *dev);
-void debugfs_del_meshif(struct net_device *dev);
-
-#endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */
diff --git a/drivers/staging/batman-adv/bat_sysfs.c b/drivers/staging/batman-adv/bat_sysfs.c
deleted file mode 100644
index bc17fb816300..000000000000
--- a/drivers/staging/batman-adv/bat_sysfs.c
+++ /dev/null
@@ -1,558 +0,0 @@
-/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "bat_sysfs.h"
-#include "translation-table.h"
-#include "originator.h"
-#include "hard-interface.h"
-#include "vis.h"
-
-#define to_dev(obj) container_of(obj, struct device, kobj)
-
-#define BAT_ATTR(_name, _mode, _show, _store) \
-struct bat_attribute bat_attr_##_name = { \
- .attr = {.name = __stringify(_name), \
- .mode = _mode }, \
- .show = _show, \
- .store = _store, \
-};
-
-static ssize_t show_aggr_ogms(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct device *dev = to_dev(kobj->parent);
- struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
- int aggr_status = atomic_read(&bat_priv->aggregation_enabled);
-
- return sprintf(buff, "%s\n",
- aggr_status == 0 ? "disabled" : "enabled");
-}
-
-static ssize_t store_aggr_ogms(struct kobject *kobj, struct attribute *attr,
- char *buff, size_t count)
-{
- struct device *dev = to_dev(kobj->parent);
- struct net_device *net_dev = to_net_dev(dev);
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- int aggr_tmp = -1;
-
- if (((count == 2) && (buff[0] == '1')) ||
- (strncmp(buff, "enable", 6) == 0))
- aggr_tmp = 1;
-
- if (((count == 2) && (buff[0] == '0')) ||
- (strncmp(buff, "disable", 7) == 0))
- aggr_tmp = 0;
-
- if (aggr_tmp < 0) {
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- bat_info(net_dev,
- "Invalid parameter for 'aggregate OGM' setting"
- "received: %s\n", buff);
- return -EINVAL;
- }
-
- if (atomic_read(&bat_priv->aggregation_enabled) == aggr_tmp)
- return count;
-
- bat_info(net_dev, "Changing aggregation from: %s to: %s\n",
- atomic_read(&bat_priv->aggregation_enabled) == 1 ?
- "enabled" : "disabled", aggr_tmp == 1 ? "enabled" :
- "disabled");
-
- atomic_set(&bat_priv->aggregation_enabled, (unsigned)aggr_tmp);
- return count;
-}
-
-static ssize_t show_bond(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct device *dev = to_dev(kobj->parent);
- struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
- int bond_status = atomic_read(&bat_priv->bonding_enabled);
-
- return sprintf(buff, "%s\n",
- bond_status == 0 ? "disabled" : "enabled");
-}
-
-static ssize_t store_bond(struct kobject *kobj, struct attribute *attr,
- char *buff, size_t count)
-{
- struct device *dev = to_dev(kobj->parent);
- struct net_device *net_dev = to_net_dev(dev);
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- int bonding_enabled_tmp = -1;
-
- if (((count == 2) && (buff[0] == '1')) ||
- (strncmp(buff, "enable", 6) == 0))
- bonding_enabled_tmp = 1;
-
- if (((count == 2) && (buff[0] == '0')) ||
- (strncmp(buff, "disable", 7) == 0))
- bonding_enabled_tmp = 0;
-
- if (bonding_enabled_tmp < 0) {
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- bat_err(net_dev,
- "Invalid parameter for 'bonding' setting received: "
- "%s\n", buff);
- return -EINVAL;
- }
-
- if (atomic_read(&bat_priv->bonding_enabled) == bonding_enabled_tmp)
- return count;
-
- bat_info(net_dev, "Changing bonding from: %s to: %s\n",
- atomic_read(&bat_priv->bonding_enabled) == 1 ?
- "enabled" : "disabled",
- bonding_enabled_tmp == 1 ? "enabled" : "disabled");
-
- atomic_set(&bat_priv->bonding_enabled, (unsigned)bonding_enabled_tmp);
- return count;
-}
-
-static ssize_t show_frag(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct device *dev = to_dev(kobj->parent);
- struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
- int frag_status = atomic_read(&bat_priv->frag_enabled);
-
- return sprintf(buff, "%s\n",
- frag_status == 0 ? "disabled" : "enabled");
-}
-
-static ssize_t store_frag(struct kobject *kobj, struct attribute *attr,
- char *buff, size_t count)
-{
- struct device *dev = to_dev(kobj->parent);
- struct net_device *net_dev = to_net_dev(dev);
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- int frag_enabled_tmp = -1;
-
- if (((count == 2) && (buff[0] == '1')) ||
- (strncmp(buff, "enable", 6) == 0))
- frag_enabled_tmp = 1;
-
- if (((count == 2) && (buff[0] == '0')) ||
- (strncmp(buff, "disable", 7) == 0))
- frag_enabled_tmp = 0;
-
- if (frag_enabled_tmp < 0) {
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- bat_err(net_dev,
- "Invalid parameter for 'fragmentation' setting on mesh"
- "received: %s\n", buff);
- return -EINVAL;
- }
-
- if (atomic_read(&bat_priv->frag_enabled) == frag_enabled_tmp)
- return count;
-
- bat_info(net_dev, "Changing fragmentation from: %s to: %s\n",
- atomic_read(&bat_priv->frag_enabled) == 1 ?
- "enabled" : "disabled",
- frag_enabled_tmp == 1 ? "enabled" : "disabled");
-
- atomic_set(&bat_priv->frag_enabled, (unsigned)frag_enabled_tmp);
- update_min_mtu(net_dev);
- return count;
-}
-
-static ssize_t show_vis_mode(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct device *dev = to_dev(kobj->parent);
- struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
- int vis_mode = atomic_read(&bat_priv->vis_mode);
-
- return sprintf(buff, "%s\n",
- vis_mode == VIS_TYPE_CLIENT_UPDATE ?
- "client" : "server");
-}
-
-static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
- char *buff, size_t count)
-{
- struct device *dev = to_dev(kobj->parent);
- struct net_device *net_dev = to_net_dev(dev);
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- unsigned long val;
- int ret, vis_mode_tmp = -1;
-
- ret = strict_strtoul(buff, 10, &val);
-
- if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) ||
- (strncmp(buff, "client", 6) == 0) ||
- (strncmp(buff, "off", 3) == 0))
- vis_mode_tmp = VIS_TYPE_CLIENT_UPDATE;
-
- if (((count == 2) && (!ret) && (val == VIS_TYPE_SERVER_SYNC)) ||
- (strncmp(buff, "server", 6) == 0))
- vis_mode_tmp = VIS_TYPE_SERVER_SYNC;
-
- if (vis_mode_tmp < 0) {
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- bat_info(net_dev,
- "Invalid parameter for 'vis mode' setting received: "
- "%s\n", buff);
- return -EINVAL;
- }
-
- if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
- return count;
-
- bat_info(net_dev, "Changing vis mode from: %s to: %s\n",
- atomic_read(&bat_priv->vis_mode) == VIS_TYPE_CLIENT_UPDATE ?
- "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ?
- "client" : "server");
-
- atomic_set(&bat_priv->vis_mode, (unsigned)vis_mode_tmp);
- return count;
-}
-
-static ssize_t show_orig_interval(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct device *dev = to_dev(kobj->parent);
- struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buff, "%i\n",
- atomic_read(&bat_priv->orig_interval));
-}
-
-static ssize_t store_orig_interval(struct kobject *kobj, struct attribute *attr,
- char *buff, size_t count)
-{
- struct device *dev = to_dev(kobj->parent);
- struct net_device *net_dev = to_net_dev(dev);
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- unsigned long orig_interval_tmp;
- int ret;
-
- ret = strict_strtoul(buff, 10, &orig_interval_tmp);
- if (ret) {
- bat_info(net_dev, "Invalid parameter for 'orig_interval' "
- "setting received: %s\n", buff);
- return -EINVAL;
- }
-
- if (orig_interval_tmp < JITTER * 2) {
- bat_info(net_dev, "New originator interval too small: %li "
- "(min: %i)\n", orig_interval_tmp, JITTER * 2);
- return -EINVAL;
- }
-
- if (atomic_read(&bat_priv->orig_interval) == orig_interval_tmp)
- return count;
-
- bat_info(net_dev, "Changing originator interval from: %i to: %li\n",
- atomic_read(&bat_priv->orig_interval),
- orig_interval_tmp);
-
- atomic_set(&bat_priv->orig_interval, orig_interval_tmp);
- return count;
-}
-
-#ifdef CONFIG_BATMAN_ADV_DEBUG
-static ssize_t show_log_level(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct device *dev = to_dev(kobj->parent);
- struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
- int log_level = atomic_read(&bat_priv->log_level);
-
- return sprintf(buff, "%d\n", log_level);
-}
-
-static ssize_t store_log_level(struct kobject *kobj, struct attribute *attr,
- char *buff, size_t count)
-{
- struct device *dev = to_dev(kobj->parent);
- struct net_device *net_dev = to_net_dev(dev);
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- unsigned long log_level_tmp;
- int ret;
-
- ret = strict_strtoul(buff, 10, &log_level_tmp);
- if (ret) {
- bat_info(net_dev, "Invalid parameter for 'log_level' "
- "setting received: %s\n", buff);
- return -EINVAL;
- }
-
- if (log_level_tmp > 3) {
- bat_info(net_dev, "New log level too big: %li "
- "(max: %i)\n", log_level_tmp, 3);
- return -EINVAL;
- }
-
- if (atomic_read(&bat_priv->log_level) == log_level_tmp)
- return count;
-
- bat_info(net_dev, "Changing log level from: %i to: %li\n",
- atomic_read(&bat_priv->log_level),
- log_level_tmp);
-
- atomic_set(&bat_priv->log_level, (unsigned)log_level_tmp);
- return count;
-}
-#endif
-
-static BAT_ATTR(aggregated_ogms, S_IRUGO | S_IWUSR,
- show_aggr_ogms, store_aggr_ogms);
-static BAT_ATTR(bonding, S_IRUGO | S_IWUSR, show_bond, store_bond);
-static BAT_ATTR(fragmentation, S_IRUGO | S_IWUSR, show_frag, store_frag);
-static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
-static BAT_ATTR(orig_interval, S_IRUGO | S_IWUSR,
- show_orig_interval, store_orig_interval);
-#ifdef CONFIG_BATMAN_ADV_DEBUG
-static BAT_ATTR(log_level, S_IRUGO | S_IWUSR, show_log_level, store_log_level);
-#endif
-
-static struct bat_attribute *mesh_attrs[] = {
- &bat_attr_aggregated_ogms,
- &bat_attr_bonding,
- &bat_attr_fragmentation,
- &bat_attr_vis_mode,
- &bat_attr_orig_interval,
-#ifdef CONFIG_BATMAN_ADV_DEBUG
- &bat_attr_log_level,
-#endif
- NULL,
-};
-
-int sysfs_add_meshif(struct net_device *dev)
-{
- struct kobject *batif_kobject = &dev->dev.kobj;
- struct bat_priv *bat_priv = netdev_priv(dev);
- struct bat_attribute **bat_attr;
- int err;
-
- bat_priv->mesh_obj = kobject_create_and_add(SYSFS_IF_MESH_SUBDIR,
- batif_kobject);
- if (!bat_priv->mesh_obj) {
- bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
- SYSFS_IF_MESH_SUBDIR);
- goto out;
- }
-
- for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr) {
- err = sysfs_create_file(bat_priv->mesh_obj,
- &((*bat_attr)->attr));
- if (err) {
- bat_err(dev, "Can't add sysfs file: %s/%s/%s\n",
- dev->name, SYSFS_IF_MESH_SUBDIR,
- ((*bat_attr)->attr).name);
- goto rem_attr;
- }
- }
-
- return 0;
-
-rem_attr:
- for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
- sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
-
- kobject_put(bat_priv->mesh_obj);
- bat_priv->mesh_obj = NULL;
-out:
- return -ENOMEM;
-}
-
-void sysfs_del_meshif(struct net_device *dev)
-{
- struct bat_priv *bat_priv = netdev_priv(dev);
- struct bat_attribute **bat_attr;
-
- for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
- sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
-
- kobject_put(bat_priv->mesh_obj);
- bat_priv->mesh_obj = NULL;
-}
-
-static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct device *dev = to_dev(kobj->parent);
- struct net_device *net_dev = to_net_dev(dev);
- struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
- ssize_t length;
-
- if (!batman_if)
- return 0;
-
- length = sprintf(buff, "%s\n", batman_if->if_status == IF_NOT_IN_USE ?
- "none" : batman_if->soft_iface->name);
-
- hardif_put(batman_if);
-
- return length;
-}
-
-static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
- char *buff, size_t count)
-{
- struct device *dev = to_dev(kobj->parent);
- struct net_device *net_dev = to_net_dev(dev);
- struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
- int status_tmp = -1;
- int ret;
-
- if (!batman_if)
- return count;
-
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- if (strlen(buff) >= IFNAMSIZ) {
- pr_err("Invalid parameter for 'mesh_iface' setting received: "
- "interface name too long '%s'\n", buff);
- hardif_put(batman_if);
- return -EINVAL;
- }
-
- if (strncmp(buff, "none", 4) == 0)
- status_tmp = IF_NOT_IN_USE;
- else
- status_tmp = IF_I_WANT_YOU;
-
- if ((batman_if->if_status == status_tmp) || ((batman_if->soft_iface) &&
- (strncmp(batman_if->soft_iface->name, buff, IFNAMSIZ) == 0))) {
- hardif_put(batman_if);
- return count;
- }
-
- if (status_tmp == IF_NOT_IN_USE) {
- rtnl_lock();
- hardif_disable_interface(batman_if);
- rtnl_unlock();
- hardif_put(batman_if);
- return count;
- }
-
- /* if the interface already is in use */
- if (batman_if->if_status != IF_NOT_IN_USE) {
- rtnl_lock();
- hardif_disable_interface(batman_if);
- rtnl_unlock();
- }
-
- ret = hardif_enable_interface(batman_if, buff);
- hardif_put(batman_if);
-
- return ret;
-}
-
-static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct device *dev = to_dev(kobj->parent);
- struct net_device *net_dev = to_net_dev(dev);
- struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
- ssize_t length;
-
- if (!batman_if)
- return 0;
-
- switch (batman_if->if_status) {
- case IF_TO_BE_REMOVED:
- length = sprintf(buff, "disabling\n");
- break;
- case IF_INACTIVE:
- length = sprintf(buff, "inactive\n");
- break;
- case IF_ACTIVE:
- length = sprintf(buff, "active\n");
- break;
- case IF_TO_BE_ACTIVATED:
- length = sprintf(buff, "enabling\n");
- break;
- case IF_NOT_IN_USE:
- default:
- length = sprintf(buff, "not in use\n");
- break;
- }
-
- hardif_put(batman_if);
-
- return length;
-}
-
-static BAT_ATTR(mesh_iface, S_IRUGO | S_IWUSR,
- show_mesh_iface, store_mesh_iface);
-static BAT_ATTR(iface_status, S_IRUGO, show_iface_status, NULL);
-
-static struct bat_attribute *batman_attrs[] = {
- &bat_attr_mesh_iface,
- &bat_attr_iface_status,
- NULL,
-};
-
-int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
-{
- struct kobject *hardif_kobject = &dev->dev.kobj;
- struct bat_attribute **bat_attr;
- int err;
-
- *hardif_obj = kobject_create_and_add(SYSFS_IF_BAT_SUBDIR,
- hardif_kobject);
-
- if (!*hardif_obj) {
- bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
- SYSFS_IF_BAT_SUBDIR);
- goto out;
- }
-
- for (bat_attr = batman_attrs; *bat_attr; ++bat_attr) {
- err = sysfs_create_file(*hardif_obj, &((*bat_attr)->attr));
- if (err) {
- bat_err(dev, "Can't add sysfs file: %s/%s/%s\n",
- dev->name, SYSFS_IF_BAT_SUBDIR,
- ((*bat_attr)->attr).name);
- goto rem_attr;
- }
- }
-
- return 0;
-
-rem_attr:
- for (bat_attr = batman_attrs; *bat_attr; ++bat_attr)
- sysfs_remove_file(*hardif_obj, &((*bat_attr)->attr));
-out:
- return -ENOMEM;
-}
-
-void sysfs_del_hardif(struct kobject **hardif_obj)
-{
- kobject_put(*hardif_obj);
- *hardif_obj = NULL;
-}
diff --git a/drivers/staging/batman-adv/bat_sysfs.h b/drivers/staging/batman-adv/bat_sysfs.h
deleted file mode 100644
index 7f186c007b4f..000000000000
--- a/drivers/staging/batman-adv/bat_sysfs.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-
-#ifndef _NET_BATMAN_ADV_SYSFS_H_
-#define _NET_BATMAN_ADV_SYSFS_H_
-
-#define SYSFS_IF_MESH_SUBDIR "mesh"
-#define SYSFS_IF_BAT_SUBDIR "batman_adv"
-
-struct bat_attribute {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
- char *buf);
- ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
- char *buf, size_t count);
-};
-
-int sysfs_add_meshif(struct net_device *dev);
-void sysfs_del_meshif(struct net_device *dev);
-int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev);
-void sysfs_del_hardif(struct kobject **hardif_obj);
-
-#endif /* _NET_BATMAN_ADV_SYSFS_H_ */
diff --git a/drivers/staging/batman-adv/bitarray.c b/drivers/staging/batman-adv/bitarray.c
deleted file mode 100644
index 814274fbaa2f..000000000000
--- a/drivers/staging/batman-adv/bitarray.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich, Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "bitarray.h"
-
-#include <linux/bitops.h>
-
-/* returns true if the corresponding bit in the given seq_bits indicates true
- * and curr_seqno is within range of last_seqno */
-uint8_t get_bit_status(TYPE_OF_WORD *seq_bits, uint32_t last_seqno,
- uint32_t curr_seqno)
-{
- int32_t diff, word_offset, word_num;
-
- diff = last_seqno - curr_seqno;
- if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) {
- return 0;
- } else {
- /* which word */
- word_num = (last_seqno - curr_seqno) / WORD_BIT_SIZE;
- /* which position in the selected word */
- word_offset = (last_seqno - curr_seqno) % WORD_BIT_SIZE;
-
- if (seq_bits[word_num] & 1 << word_offset)
- return 1;
- else
- return 0;
- }
-}
-
-/* turn corresponding bit on, so we can remember that we got the packet */
-void bit_mark(TYPE_OF_WORD *seq_bits, int32_t n)
-{
- int32_t word_offset, word_num;
-
- /* if too old, just drop it */
- if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE)
- return;
-
- /* which word */
- word_num = n / WORD_BIT_SIZE;
- /* which position in the selected word */
- word_offset = n % WORD_BIT_SIZE;
-
- seq_bits[word_num] |= 1 << word_offset; /* turn the position on */
-}
-
-/* shift the packet array by n places. */
-static void bit_shift(TYPE_OF_WORD *seq_bits, int32_t n)
-{
- int32_t word_offset, word_num;
- int32_t i;
-
- if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE)
- return;
-
- word_offset = n % WORD_BIT_SIZE;/* shift how much inside each word */
- word_num = n / WORD_BIT_SIZE; /* shift over how much (full) words */
-
- for (i = NUM_WORDS - 1; i > word_num; i--) {
- /* going from old to new, so we don't overwrite the data we copy
- * from.
- *
- * left is high, right is low: FEDC BA98 7654 3210
- * ^^ ^^
- * vvvv
- * ^^^^ = from, vvvvv =to, we'd have word_num==1 and
- * word_offset==WORD_BIT_SIZE/2 ????? in this example.
- * (=24 bits)
- *
- * our desired output would be: 9876 5432 1000 0000
- * */
-
- seq_bits[i] =
- (seq_bits[i - word_num] << word_offset) +
- /* take the lower port from the left half, shift it left
- * to its final position */
- (seq_bits[i - word_num - 1] >>
- (WORD_BIT_SIZE-word_offset));
- /* and the upper part of the right half and shift it left to
- * it's position */
- /* for our example that would be: word[0] = 9800 + 0076 =
- * 9876 */
- }
- /* now for our last word, i==word_num, we only have the it's "left"
- * half. that's the 1000 word in our example.*/
-
- seq_bits[i] = (seq_bits[i - word_num] << word_offset);
-
- /* pad the rest with 0, if there is anything */
- i--;
-
- for (; i >= 0; i--)
- seq_bits[i] = 0;
-}
-
-static void bit_reset_window(TYPE_OF_WORD *seq_bits)
-{
- int i;
- for (i = 0; i < NUM_WORDS; i++)
- seq_bits[i] = 0;
-}
-
-
-/* receive and process one packet within the sequence number window.
- *
- * returns:
- * 1 if the window was moved (either new or very old)
- * 0 if the window was not moved/shifted.
- */
-char bit_get_packet(void *priv, TYPE_OF_WORD *seq_bits,
- int32_t seq_num_diff, int8_t set_mark)
-{
- struct bat_priv *bat_priv = (struct bat_priv *)priv;
-
- /* sequence number is slightly older. We already got a sequence number
- * higher than this one, so we just mark it. */
-
- if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) {
- if (set_mark)
- bit_mark(seq_bits, -seq_num_diff);
- return 0;
- }
-
- /* sequence number is slightly newer, so we shift the window and
- * set the mark if required */
-
- if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) {
- bit_shift(seq_bits, seq_num_diff);
-
- if (set_mark)
- bit_mark(seq_bits, 0);
- return 1;
- }
-
- /* sequence number is much newer, probably missed a lot of packets */
-
- if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE)
- || (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "We missed a lot of packets (%i) !\n",
- seq_num_diff - 1);
- bit_reset_window(seq_bits);
- if (set_mark)
- bit_mark(seq_bits, 0);
- return 1;
- }
-
- /* received a much older packet. The other host either restarted
- * or the old packet got delayed somewhere in the network. The
- * packet should be dropped without calling this function if the
- * seqno window is protected. */
-
- if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
- || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "Other host probably restarted!\n");
-
- bit_reset_window(seq_bits);
- if (set_mark)
- bit_mark(seq_bits, 0);
-
- return 1;
- }
-
- /* never reached */
- return 0;
-}
-
-/* count the hamming weight, how many good packets did we receive? just count
- * the 1's.
- */
-int bit_packet_count(TYPE_OF_WORD *seq_bits)
-{
- int i, hamming = 0;
-
- for (i = 0; i < NUM_WORDS; i++)
- hamming += hweight_long(seq_bits[i]);
-
- return hamming;
-}
diff --git a/drivers/staging/batman-adv/bitarray.h b/drivers/staging/batman-adv/bitarray.h
deleted file mode 100644
index 77b1e61847e8..000000000000
--- a/drivers/staging/batman-adv/bitarray.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich, Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_BITARRAY_H_
-#define _NET_BATMAN_ADV_BITARRAY_H_
-
-/* you should choose something big, if you don't want to waste cpu
- * and keep the type in sync with bit_packet_count */
-#define TYPE_OF_WORD unsigned long
-#define WORD_BIT_SIZE (sizeof(TYPE_OF_WORD) * 8)
-
-/* returns true if the corresponding bit in the given seq_bits indicates true
- * and curr_seqno is within range of last_seqno */
-uint8_t get_bit_status(TYPE_OF_WORD *seq_bits, uint32_t last_seqno,
- uint32_t curr_seqno);
-
-/* turn corresponding bit on, so we can remember that we got the packet */
-void bit_mark(TYPE_OF_WORD *seq_bits, int32_t n);
-
-
-/* receive and process one packet, returns 1 if received seq_num is considered
- * new, 0 if old */
-char bit_get_packet(void *priv, TYPE_OF_WORD *seq_bits,
- int32_t seq_num_diff, int8_t set_mark);
-
-/* count the hamming weight, how many good packets did we receive? */
-int bit_packet_count(TYPE_OF_WORD *seq_bits);
-
-#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
deleted file mode 100644
index d85de82f941a..000000000000
--- a/drivers/staging/batman-adv/hard-interface.c
+++ /dev/null
@@ -1,647 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "hard-interface.h"
-#include "soft-interface.h"
-#include "send.h"
-#include "translation-table.h"
-#include "routing.h"
-#include "bat_sysfs.h"
-#include "originator.h"
-#include "hash.h"
-
-#include <linux/if_arp.h>
-
-#define MIN(x, y) ((x) < (y) ? (x) : (y))
-
-/* protect update critical side of if_list - but not the content */
-static DEFINE_SPINLOCK(if_list_lock);
-
-struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev)
-{
- struct batman_if *batman_if;
-
- rcu_read_lock();
- list_for_each_entry_rcu(batman_if, &if_list, list) {
- if (batman_if->net_dev == net_dev)
- goto out;
- }
-
- batman_if = NULL;
-
-out:
- if (batman_if)
- hardif_hold(batman_if);
-
- rcu_read_unlock();
- return batman_if;
-}
-
-static int is_valid_iface(struct net_device *net_dev)
-{
- if (net_dev->flags & IFF_LOOPBACK)
- return 0;
-
- if (net_dev->type != ARPHRD_ETHER)
- return 0;
-
- if (net_dev->addr_len != ETH_ALEN)
- return 0;
-
- /* no batman over batman */
-#ifdef HAVE_NET_DEVICE_OPS
- if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
- return 0;
-#else
- if (net_dev->hard_start_xmit == interface_tx)
- return 0;
-#endif
-
- /* Device is being bridged */
- /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
- return 0; */
-
- return 1;
-}
-
-static struct batman_if *get_active_batman_if(struct net_device *soft_iface)
-{
- struct batman_if *batman_if;
-
- rcu_read_lock();
- list_for_each_entry_rcu(batman_if, &if_list, list) {
- if (batman_if->soft_iface != soft_iface)
- continue;
-
- if (batman_if->if_status == IF_ACTIVE)
- goto out;
- }
-
- batman_if = NULL;
-
-out:
- if (batman_if)
- hardif_hold(batman_if);
-
- rcu_read_unlock();
- return batman_if;
-}
-
-static void update_primary_addr(struct bat_priv *bat_priv)
-{
- struct vis_packet *vis_packet;
-
- vis_packet = (struct vis_packet *)
- bat_priv->my_vis_info->skb_packet->data;
- memcpy(vis_packet->vis_orig,
- bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
- memcpy(vis_packet->sender_orig,
- bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
-}
-
-static void set_primary_if(struct bat_priv *bat_priv,
- struct batman_if *batman_if)
-{
- struct batman_packet *batman_packet;
- struct batman_if *old_if;
-
- if (batman_if)
- hardif_hold(batman_if);
-
- old_if = bat_priv->primary_if;
- bat_priv->primary_if = batman_if;
-
- if (old_if)
- hardif_put(old_if);
-
- if (!bat_priv->primary_if)
- return;
-
- batman_packet = (struct batman_packet *)(batman_if->packet_buff);
- batman_packet->flags = PRIMARIES_FIRST_HOP;
- batman_packet->ttl = TTL;
-
- update_primary_addr(bat_priv);
-
- /***
- * hacky trick to make sure that we send the HNA information via
- * our new primary interface
- */
- atomic_set(&bat_priv->hna_local_changed, 1);
-}
-
-static bool hardif_is_iface_up(struct batman_if *batman_if)
-{
- if (batman_if->net_dev->flags & IFF_UP)
- return true;
-
- return false;
-}
-
-static void update_mac_addresses(struct batman_if *batman_if)
-{
- memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
- batman_if->net_dev->dev_addr, ETH_ALEN);
- memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender,
- batman_if->net_dev->dev_addr, ETH_ALEN);
-}
-
-static void check_known_mac_addr(struct net_device *net_dev)
-{
- struct batman_if *batman_if;
-
- rcu_read_lock();
- list_for_each_entry_rcu(batman_if, &if_list, list) {
- if ((batman_if->if_status != IF_ACTIVE) &&
- (batman_if->if_status != IF_TO_BE_ACTIVATED))
- continue;
-
- if (batman_if->net_dev == net_dev)
- continue;
-
- if (!compare_orig(batman_if->net_dev->dev_addr,
- net_dev->dev_addr))
- continue;
-
- pr_warning("The newly added mac address (%pM) already exists "
- "on: %s\n", net_dev->dev_addr,
- batman_if->net_dev->name);
- pr_warning("It is strongly recommended to keep mac addresses "
- "unique to avoid problems!\n");
- }
- rcu_read_unlock();
-}
-
-int hardif_min_mtu(struct net_device *soft_iface)
-{
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
- struct batman_if *batman_if;
- /* allow big frames if all devices are capable to do so
- * (have MTU > 1500 + BAT_HEADER_LEN) */
- int min_mtu = ETH_DATA_LEN;
-
- if (atomic_read(&bat_priv->frag_enabled))
- goto out;
-
- rcu_read_lock();
- list_for_each_entry_rcu(batman_if, &if_list, list) {
- if ((batman_if->if_status != IF_ACTIVE) &&
- (batman_if->if_status != IF_TO_BE_ACTIVATED))
- continue;
-
- if (batman_if->soft_iface != soft_iface)
- continue;
-
- min_mtu = MIN(batman_if->net_dev->mtu - BAT_HEADER_LEN,
- min_mtu);
- }
- rcu_read_unlock();
-out:
- return min_mtu;
-}
-
-/* adjusts the MTU if a new interface with a smaller MTU appeared. */
-void update_min_mtu(struct net_device *soft_iface)
-{
- int min_mtu;
-
- min_mtu = hardif_min_mtu(soft_iface);
- if (soft_iface->mtu != min_mtu)
- soft_iface->mtu = min_mtu;
-}
-
-static void hardif_activate_interface(struct batman_if *batman_if)
-{
- struct bat_priv *bat_priv;
-
- if (batman_if->if_status != IF_INACTIVE)
- return;
-
- bat_priv = netdev_priv(batman_if->soft_iface);
-
- update_mac_addresses(batman_if);
- batman_if->if_status = IF_TO_BE_ACTIVATED;
-
- /**
- * the first active interface becomes our primary interface or
- * the next active interface after the old primay interface was removed
- */
- if (!bat_priv->primary_if)
- set_primary_if(bat_priv, batman_if);
-
- bat_info(batman_if->soft_iface, "Interface activated: %s\n",
- batman_if->net_dev->name);
-
- update_min_mtu(batman_if->soft_iface);
- return;
-}
-
-static void hardif_deactivate_interface(struct batman_if *batman_if)
-{
- if ((batman_if->if_status != IF_ACTIVE) &&
- (batman_if->if_status != IF_TO_BE_ACTIVATED))
- return;
-
- batman_if->if_status = IF_INACTIVE;
-
- bat_info(batman_if->soft_iface, "Interface deactivated: %s\n",
- batman_if->net_dev->name);
-
- update_min_mtu(batman_if->soft_iface);
-}
-
-int hardif_enable_interface(struct batman_if *batman_if, char *iface_name)
-{
- struct bat_priv *bat_priv;
- struct batman_packet *batman_packet;
-
- if (batman_if->if_status != IF_NOT_IN_USE)
- goto out;
-
- batman_if->soft_iface = dev_get_by_name(&init_net, iface_name);
-
- if (!batman_if->soft_iface) {
- batman_if->soft_iface = softif_create(iface_name);
-
- if (!batman_if->soft_iface)
- goto err;
-
- /* dev_get_by_name() increases the reference counter for us */
- dev_hold(batman_if->soft_iface);
- }
-
- bat_priv = netdev_priv(batman_if->soft_iface);
- batman_if->packet_len = BAT_PACKET_LEN;
- batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC);
-
- if (!batman_if->packet_buff) {
- bat_err(batman_if->soft_iface, "Can't add interface packet "
- "(%s): out of memory\n", batman_if->net_dev->name);
- goto err;
- }
-
- batman_packet = (struct batman_packet *)(batman_if->packet_buff);
- batman_packet->packet_type = BAT_PACKET;
- batman_packet->version = COMPAT_VERSION;
- batman_packet->flags = 0;
- batman_packet->ttl = 2;
- batman_packet->tq = TQ_MAX_VALUE;
- batman_packet->num_hna = 0;
-
- batman_if->if_num = bat_priv->num_ifaces;
- bat_priv->num_ifaces++;
- batman_if->if_status = IF_INACTIVE;
- orig_hash_add_if(batman_if, bat_priv->num_ifaces);
-
- batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
- batman_if->batman_adv_ptype.func = batman_skb_recv;
- batman_if->batman_adv_ptype.dev = batman_if->net_dev;
- hardif_hold(batman_if);
- dev_add_pack(&batman_if->batman_adv_ptype);
-
- atomic_set(&batman_if->seqno, 1);
- atomic_set(&batman_if->frag_seqno, 1);
- bat_info(batman_if->soft_iface, "Adding interface: %s\n",
- batman_if->net_dev->name);
-
- if (atomic_read(&bat_priv->frag_enabled) && batman_if->net_dev->mtu <
- ETH_DATA_LEN + BAT_HEADER_LEN)
- bat_info(batman_if->soft_iface,
- "The MTU of interface %s is too small (%i) to handle "
- "the transport of batman-adv packets. Packets going "
- "over this interface will be fragmented on layer2 "
- "which could impact the performance. Setting the MTU "
- "to %zi would solve the problem.\n",
- batman_if->net_dev->name, batman_if->net_dev->mtu,
- ETH_DATA_LEN + BAT_HEADER_LEN);
-
- if (!atomic_read(&bat_priv->frag_enabled) && batman_if->net_dev->mtu <
- ETH_DATA_LEN + BAT_HEADER_LEN)
- bat_info(batman_if->soft_iface,
- "The MTU of interface %s is too small (%i) to handle "
- "the transport of batman-adv packets. If you experience"
- " problems getting traffic through try increasing the "
- "MTU to %zi.\n",
- batman_if->net_dev->name, batman_if->net_dev->mtu,
- ETH_DATA_LEN + BAT_HEADER_LEN);
-
- if (hardif_is_iface_up(batman_if))
- hardif_activate_interface(batman_if);
- else
- bat_err(batman_if->soft_iface, "Not using interface %s "
- "(retrying later): interface not active\n",
- batman_if->net_dev->name);
-
- /* begin scheduling originator messages on that interface */
- schedule_own_packet(batman_if);
-
-out:
- return 0;
-
-err:
- return -ENOMEM;
-}
-
-void hardif_disable_interface(struct batman_if *batman_if)
-{
- struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
-
- if (batman_if->if_status == IF_ACTIVE)
- hardif_deactivate_interface(batman_if);
-
- if (batman_if->if_status != IF_INACTIVE)
- return;
-
- bat_info(batman_if->soft_iface, "Removing interface: %s\n",
- batman_if->net_dev->name);
- dev_remove_pack(&batman_if->batman_adv_ptype);
- hardif_put(batman_if);
-
- bat_priv->num_ifaces--;
- orig_hash_del_if(batman_if, bat_priv->num_ifaces);
-
- if (batman_if == bat_priv->primary_if) {
- struct batman_if *new_if;
-
- new_if = get_active_batman_if(batman_if->soft_iface);
- set_primary_if(bat_priv, new_if);
-
- if (new_if)
- hardif_put(new_if);
- }
-
- kfree(batman_if->packet_buff);
- batman_if->packet_buff = NULL;
- batman_if->if_status = IF_NOT_IN_USE;
-
- /* delete all references to this batman_if */
- purge_orig_ref(bat_priv);
- purge_outstanding_packets(bat_priv, batman_if);
- dev_put(batman_if->soft_iface);
-
- /* nobody uses this interface anymore */
- if (!bat_priv->num_ifaces)
- softif_destroy(batman_if->soft_iface);
-
- batman_if->soft_iface = NULL;
-}
-
-static struct batman_if *hardif_add_interface(struct net_device *net_dev)
-{
- struct batman_if *batman_if;
- int ret;
-
- ret = is_valid_iface(net_dev);
- if (ret != 1)
- goto out;
-
- dev_hold(net_dev);
-
- batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
- if (!batman_if) {
- pr_err("Can't add interface (%s): out of memory\n",
- net_dev->name);
- goto release_dev;
- }
-
- ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev);
- if (ret)
- goto free_if;
-
- batman_if->if_num = -1;
- batman_if->net_dev = net_dev;
- batman_if->soft_iface = NULL;
- batman_if->if_status = IF_NOT_IN_USE;
- INIT_LIST_HEAD(&batman_if->list);
- atomic_set(&batman_if->refcnt, 0);
- hardif_hold(batman_if);
-
- check_known_mac_addr(batman_if->net_dev);
-
- spin_lock(&if_list_lock);
- list_add_tail_rcu(&batman_if->list, &if_list);
- spin_unlock(&if_list_lock);
-
- /* extra reference for return */
- hardif_hold(batman_if);
- return batman_if;
-
-free_if:
- kfree(batman_if);
-release_dev:
- dev_put(net_dev);
-out:
- return NULL;
-}
-
-static void hardif_remove_interface(struct batman_if *batman_if)
-{
- /* first deactivate interface */
- if (batman_if->if_status != IF_NOT_IN_USE)
- hardif_disable_interface(batman_if);
-
- if (batman_if->if_status != IF_NOT_IN_USE)
- return;
-
- batman_if->if_status = IF_TO_BE_REMOVED;
- synchronize_rcu();
- sysfs_del_hardif(&batman_if->hardif_obj);
- hardif_put(batman_if);
-}
-
-void hardif_remove_interfaces(void)
-{
- struct batman_if *batman_if, *batman_if_tmp;
- struct list_head if_queue;
-
- INIT_LIST_HEAD(&if_queue);
-
- spin_lock(&if_list_lock);
- list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) {
- list_del_rcu(&batman_if->list);
- list_add_tail(&batman_if->list, &if_queue);
- }
- spin_unlock(&if_list_lock);
-
- rtnl_lock();
- list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) {
- hardif_remove_interface(batman_if);
- }
- rtnl_unlock();
-}
-
-static int hard_if_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct net_device *net_dev = (struct net_device *)ptr;
- struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
- struct bat_priv *bat_priv;
-
- if (!batman_if && event == NETDEV_REGISTER)
- batman_if = hardif_add_interface(net_dev);
-
- if (!batman_if)
- goto out;
-
- switch (event) {
- case NETDEV_UP:
- hardif_activate_interface(batman_if);
- break;
- case NETDEV_GOING_DOWN:
- case NETDEV_DOWN:
- hardif_deactivate_interface(batman_if);
- break;
- case NETDEV_UNREGISTER:
- spin_lock(&if_list_lock);
- list_del_rcu(&batman_if->list);
- spin_unlock(&if_list_lock);
-
- hardif_remove_interface(batman_if);
- break;
- case NETDEV_CHANGEMTU:
- if (batman_if->soft_iface)
- update_min_mtu(batman_if->soft_iface);
- break;
- case NETDEV_CHANGEADDR:
- if (batman_if->if_status == IF_NOT_IN_USE) {
- hardif_put(batman_if);
- goto out;
- }
-
- check_known_mac_addr(batman_if->net_dev);
- update_mac_addresses(batman_if);
-
- bat_priv = netdev_priv(batman_if->soft_iface);
- if (batman_if == bat_priv->primary_if)
- update_primary_addr(bat_priv);
- break;
- default:
- break;
- };
- hardif_put(batman_if);
-
-out:
- return NOTIFY_DONE;
-}
-
-/* receive a packet with the batman ethertype coming on a hard
- * interface */
-int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *ptype, struct net_device *orig_dev)
-{
- struct bat_priv *bat_priv;
- struct batman_packet *batman_packet;
- struct batman_if *batman_if;
- int ret;
-
- batman_if = container_of(ptype, struct batman_if, batman_adv_ptype);
- skb = skb_share_check(skb, GFP_ATOMIC);
-
- /* skb was released by skb_share_check() */
- if (!skb)
- goto err_out;
-
- /* packet should hold at least type and version */
- if (unlikely(!pskb_may_pull(skb, 2)))
- goto err_free;
-
- /* expect a valid ethernet header here. */
- if (unlikely(skb->mac_len != sizeof(struct ethhdr)
- || !skb_mac_header(skb)))
- goto err_free;
-
- if (!batman_if->soft_iface)
- goto err_free;
-
- bat_priv = netdev_priv(batman_if->soft_iface);
-
- if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
- goto err_free;
-
- /* discard frames on not active interfaces */
- if (batman_if->if_status != IF_ACTIVE)
- goto err_free;
-
- batman_packet = (struct batman_packet *)skb->data;
-
- if (batman_packet->version != COMPAT_VERSION) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: incompatible batman version (%i)\n",
- batman_packet->version);
- goto err_free;
- }
-
- /* all receive handlers return whether they received or reused
- * the supplied skb. if not, we have to free the skb. */
-
- switch (batman_packet->packet_type) {
- /* batman originator packet */
- case BAT_PACKET:
- ret = recv_bat_packet(skb, batman_if);
- break;
-
- /* batman icmp packet */
- case BAT_ICMP:
- ret = recv_icmp_packet(skb, batman_if);
- break;
-
- /* unicast packet */
- case BAT_UNICAST:
- ret = recv_unicast_packet(skb, batman_if);
- break;
-
- /* fragmented unicast packet */
- case BAT_UNICAST_FRAG:
- ret = recv_ucast_frag_packet(skb, batman_if);
- break;
-
- /* broadcast packet */
- case BAT_BCAST:
- ret = recv_bcast_packet(skb, batman_if);
- break;
-
- /* vis packet */
- case BAT_VIS:
- ret = recv_vis_packet(skb, batman_if);
- break;
- default:
- ret = NET_RX_DROP;
- }
-
- if (ret == NET_RX_DROP)
- kfree_skb(skb);
-
- /* return NET_RX_SUCCESS in any case as we
- * most probably dropped the packet for
- * routing-logical reasons. */
-
- return NET_RX_SUCCESS;
-
-err_free:
- kfree_skb(skb);
-err_out:
- return NET_RX_DROP;
-}
-
-struct notifier_block hard_if_notifier = {
- .notifier_call = hard_if_event,
-};
diff --git a/drivers/staging/batman-adv/hard-interface.h b/drivers/staging/batman-adv/hard-interface.h
deleted file mode 100644
index d5508899065b..000000000000
--- a/drivers/staging/batman-adv/hard-interface.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_
-#define _NET_BATMAN_ADV_HARD_INTERFACE_H_
-
-#define IF_NOT_IN_USE 0
-#define IF_TO_BE_REMOVED 1
-#define IF_INACTIVE 2
-#define IF_ACTIVE 3
-#define IF_TO_BE_ACTIVATED 4
-#define IF_I_WANT_YOU 5
-
-extern struct notifier_block hard_if_notifier;
-
-struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev);
-int hardif_enable_interface(struct batman_if *batman_if, char *iface_name);
-void hardif_disable_interface(struct batman_if *batman_if);
-void hardif_remove_interfaces(void);
-int batman_skb_recv(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *ptype,
- struct net_device *orig_dev);
-int hardif_min_mtu(struct net_device *soft_iface);
-void update_min_mtu(struct net_device *soft_iface);
-
-static inline void hardif_hold(struct batman_if *batman_if)
-{
- atomic_inc(&batman_if->refcnt);
-}
-
-static inline void hardif_put(struct batman_if *batman_if)
-{
- if (atomic_dec_and_test(&batman_if->refcnt)) {
- dev_put(batman_if->net_dev);
- kfree(batman_if);
- }
-}
-
-#endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */
diff --git a/drivers/staging/batman-adv/hash.c b/drivers/staging/batman-adv/hash.c
deleted file mode 100644
index 8ef26eb4949d..000000000000
--- a/drivers/staging/batman-adv/hash.c
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich, Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "hash.h"
-
-/* clears the hash */
-static void hash_init(struct hashtable_t *hash)
-{
- int i;
-
- hash->elements = 0;
-
- for (i = 0 ; i < hash->size; i++)
- hash->table[i] = NULL;
-}
-
-/* remove the hash structure. if hashdata_free_cb != NULL, this function will be
- * called to remove the elements inside of the hash. if you don't remove the
- * elements, memory might be leaked. */
-void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb, void *arg)
-{
- struct element_t *bucket, *last_bucket;
- int i;
-
- for (i = 0; i < hash->size; i++) {
- bucket = hash->table[i];
-
- while (bucket != NULL) {
- if (free_cb != NULL)
- free_cb(bucket->data, arg);
-
- last_bucket = bucket;
- bucket = bucket->next;
- kfree(last_bucket);
- }
- }
-
- hash_destroy(hash);
-}
-
-/* free only the hashtable and the hash itself. */
-void hash_destroy(struct hashtable_t *hash)
-{
- kfree(hash->table);
- kfree(hash);
-}
-
-/* iterate though the hash. First element is selected if an iterator
- * initialized with HASHIT() is supplied as iter. Use the returned
- * (or supplied) iterator to access the elements until hash_iterate returns
- * NULL. */
-
-struct hash_it_t *hash_iterate(struct hashtable_t *hash,
- struct hash_it_t *iter)
-{
- if (!hash)
- return NULL;
- if (!iter)
- return NULL;
-
- /* sanity checks first (if our bucket got deleted in the last
- * iteration): */
- if (iter->bucket != NULL) {
- if (iter->first_bucket != NULL) {
- /* we're on the first element and it got removed after
- * the last iteration. */
- if ((*iter->first_bucket) != iter->bucket) {
- /* there are still other elements in the list */
- if ((*iter->first_bucket) != NULL) {
- iter->prev_bucket = NULL;
- iter->bucket = (*iter->first_bucket);
- iter->first_bucket =
- &hash->table[iter->index];
- return iter;
- } else {
- iter->bucket = NULL;
- }
- }
- } else if (iter->prev_bucket != NULL) {
- /*
- * we're not on the first element, and the bucket got
- * removed after the last iteration. the last bucket's
- * next pointer is not pointing to our actual bucket
- * anymore. select the next.
- */
- if (iter->prev_bucket->next != iter->bucket)
- iter->bucket = iter->prev_bucket;
- }
- }
-
- /* now as we are sane, select the next one if there is some */
- if (iter->bucket != NULL) {
- if (iter->bucket->next != NULL) {
- iter->prev_bucket = iter->bucket;
- iter->bucket = iter->bucket->next;
- iter->first_bucket = NULL;
- return iter;
- }
- }
-
- /* if not returned yet, we've reached the last one on the index and have
- * to search forward */
- iter->index++;
- /* go through the entries of the hash table */
- while (iter->index < hash->size) {
- if ((hash->table[iter->index]) != NULL) {
- iter->prev_bucket = NULL;
- iter->bucket = hash->table[iter->index];
- iter->first_bucket = &hash->table[iter->index];
- return iter;
- } else {
- iter->index++;
- }
- }
-
- /* nothing to iterate over anymore */
- return NULL;
-}
-
-/* allocates and clears the hash */
-struct hashtable_t *hash_new(int size, hashdata_compare_cb compare,
- hashdata_choose_cb choose)
-{
- struct hashtable_t *hash;
-
- hash = kmalloc(sizeof(struct hashtable_t) , GFP_ATOMIC);
-
- if (hash == NULL)
- return NULL;
-
- hash->size = size;
- hash->table = kmalloc(sizeof(struct element_t *) * size, GFP_ATOMIC);
-
- if (hash->table == NULL) {
- kfree(hash);
- return NULL;
- }
-
- hash_init(hash);
-
- hash->compare = compare;
- hash->choose = choose;
-
- return hash;
-}
-
-/* adds data to the hashtable. returns 0 on success, -1 on error */
-int hash_add(struct hashtable_t *hash, void *data)
-{
- int index;
- struct element_t *bucket, *prev_bucket = NULL;
-
- if (!hash)
- return -1;
-
- index = hash->choose(data, hash->size);
- bucket = hash->table[index];
-
- while (bucket != NULL) {
- if (hash->compare(bucket->data, data))
- return -1;
-
- prev_bucket = bucket;
- bucket = bucket->next;
- }
-
- /* found the tail of the list, add new element */
- bucket = kmalloc(sizeof(struct element_t), GFP_ATOMIC);
-
- if (bucket == NULL)
- return -1;
-
- bucket->data = data;
- bucket->next = NULL;
-
- /* and link it */
- if (prev_bucket == NULL)
- hash->table[index] = bucket;
- else
- prev_bucket->next = bucket;
-
- hash->elements++;
- return 0;
-}
-
-/* finds data, based on the key in keydata. returns the found data on success,
- * or NULL on error */
-void *hash_find(struct hashtable_t *hash, void *keydata)
-{
- int index;
- struct element_t *bucket;
-
- if (!hash)
- return NULL;
-
- index = hash->choose(keydata , hash->size);
- bucket = hash->table[index];
-
- while (bucket != NULL) {
- if (hash->compare(bucket->data, keydata))
- return bucket->data;
-
- bucket = bucket->next;
- }
-
- return NULL;
-}
-
-/* remove bucket (this might be used in hash_iterate() if you already found the
- * bucket you want to delete and don't need the overhead to find it again with
- * hash_remove(). But usually, you don't want to use this function, as it
- * fiddles with hash-internals. */
-void *hash_remove_bucket(struct hashtable_t *hash, struct hash_it_t *hash_it_t)
-{
- void *data_save;
-
- data_save = hash_it_t->bucket->data;
-
- if (hash_it_t->prev_bucket != NULL)
- hash_it_t->prev_bucket->next = hash_it_t->bucket->next;
- else if (hash_it_t->first_bucket != NULL)
- (*hash_it_t->first_bucket) = hash_it_t->bucket->next;
-
- kfree(hash_it_t->bucket);
- hash->elements--;
-
- return data_save;
-}
-
-/* removes data from hash, if found. returns pointer do data on success, so you
- * can remove the used structure yourself, or NULL on error . data could be the
- * structure you use with just the key filled, we just need the key for
- * comparing. */
-void *hash_remove(struct hashtable_t *hash, void *data)
-{
- struct hash_it_t hash_it_t;
-
- hash_it_t.index = hash->choose(data, hash->size);
- hash_it_t.bucket = hash->table[hash_it_t.index];
- hash_it_t.prev_bucket = NULL;
-
- while (hash_it_t.bucket != NULL) {
- if (hash->compare(hash_it_t.bucket->data, data)) {
- hash_it_t.first_bucket =
- (hash_it_t.bucket ==
- hash->table[hash_it_t.index] ?
- &hash->table[hash_it_t.index] : NULL);
- return hash_remove_bucket(hash, &hash_it_t);
- }
-
- hash_it_t.prev_bucket = hash_it_t.bucket;
- hash_it_t.bucket = hash_it_t.bucket->next;
- }
-
- return NULL;
-}
-
-/* resize the hash, returns the pointer to the new hash or NULL on
- * error. removes the old hash on success. */
-struct hashtable_t *hash_resize(struct hashtable_t *hash, int size)
-{
- struct hashtable_t *new_hash;
- struct element_t *bucket;
- int i;
-
- /* initialize a new hash with the new size */
- new_hash = hash_new(size, hash->compare, hash->choose);
-
- if (new_hash == NULL)
- return NULL;
-
- /* copy the elements */
- for (i = 0; i < hash->size; i++) {
- bucket = hash->table[i];
-
- while (bucket != NULL) {
- hash_add(new_hash, bucket->data);
- bucket = bucket->next;
- }
- }
-
- /* remove hash and eventual overflow buckets but not the content
- * itself. */
- hash_delete(hash, NULL, NULL);
-
- return new_hash;
-}
diff --git a/drivers/staging/batman-adv/hash.h b/drivers/staging/batman-adv/hash.h
deleted file mode 100644
index 2c8e1762389f..000000000000
--- a/drivers/staging/batman-adv/hash.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich, Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_HASH_H_
-#define _NET_BATMAN_ADV_HASH_H_
-
-#define HASHIT(name) struct hash_it_t name = { \
- .index = -1, .bucket = NULL, \
- .prev_bucket = NULL, \
- .first_bucket = NULL }
-
-
-typedef int (*hashdata_compare_cb)(void *, void *);
-typedef int (*hashdata_choose_cb)(void *, int);
-typedef void (*hashdata_free_cb)(void *, void *);
-
-struct element_t {
- void *data; /* pointer to the data */
- struct element_t *next; /* overflow bucket pointer */
-};
-
-struct hash_it_t {
- int index;
- struct element_t *bucket;
- struct element_t *prev_bucket;
- struct element_t **first_bucket;
-};
-
-struct hashtable_t {
- struct element_t **table; /* the hashtable itself, with the buckets */
- int elements; /* number of elements registered */
- int size; /* size of hashtable */
- hashdata_compare_cb compare;/* callback to a compare function. should
- * compare 2 element datas for their keys,
- * return 0 if same and not 0 if not
- * same */
- hashdata_choose_cb choose; /* the hashfunction, should return an index
- * based on the key in the data of the first
- * argument and the size the second */
-};
-
-/* allocates and clears the hash */
-struct hashtable_t *hash_new(int size, hashdata_compare_cb compare,
- hashdata_choose_cb choose);
-
-/* remove bucket (this might be used in hash_iterate() if you already found the
- * bucket you want to delete and don't need the overhead to find it again with
- * hash_remove(). But usually, you don't want to use this function, as it
- * fiddles with hash-internals. */
-void *hash_remove_bucket(struct hashtable_t *hash, struct hash_it_t *hash_it_t);
-
-/* remove the hash structure. if hashdata_free_cb != NULL, this function will be
- * called to remove the elements inside of the hash. if you don't remove the
- * elements, memory might be leaked. */
-void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb, void *arg);
-
-/* free only the hashtable and the hash itself. */
-void hash_destroy(struct hashtable_t *hash);
-
-/* adds data to the hashtable. returns 0 on success, -1 on error */
-int hash_add(struct hashtable_t *hash, void *data);
-
-/* removes data from hash, if found. returns pointer do data on success, so you
- * can remove the used structure yourself, or NULL on error . data could be the
- * structure you use with just the key filled, we just need the key for
- * comparing. */
-void *hash_remove(struct hashtable_t *hash, void *data);
-
-/* finds data, based on the key in keydata. returns the found data on success,
- * or NULL on error */
-void *hash_find(struct hashtable_t *hash, void *keydata);
-
-/* resize the hash, returns the pointer to the new hash or NULL on
- * error. removes the old hash on success */
-struct hashtable_t *hash_resize(struct hashtable_t *hash, int size);
-
-/* iterate though the hash. first element is selected with iter_in NULL. use
- * the returned iterator to access the elements until hash_it_t returns NULL. */
-struct hash_it_t *hash_iterate(struct hashtable_t *hash,
- struct hash_it_t *iter_in);
-
-#endif /* _NET_BATMAN_ADV_HASH_H_ */
diff --git a/drivers/staging/batman-adv/icmp_socket.c b/drivers/staging/batman-adv/icmp_socket.c
deleted file mode 100644
index 48856ca73b6a..000000000000
--- a/drivers/staging/batman-adv/icmp_socket.c
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include "icmp_socket.h"
-#include "send.h"
-#include "types.h"
-#include "hash.h"
-#include "hard-interface.h"
-
-
-static struct socket_client *socket_client_hash[256];
-
-static void bat_socket_add_packet(struct socket_client *socket_client,
- struct icmp_packet_rr *icmp_packet,
- size_t icmp_len);
-
-void bat_socket_init(void)
-{
- memset(socket_client_hash, 0, sizeof(socket_client_hash));
-}
-
-static int bat_socket_open(struct inode *inode, struct file *file)
-{
- unsigned int i;
- struct socket_client *socket_client;
-
- nonseekable_open(inode, file);
-
- socket_client = kmalloc(sizeof(struct socket_client), GFP_KERNEL);
-
- if (!socket_client)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(socket_client_hash); i++) {
- if (!socket_client_hash[i]) {
- socket_client_hash[i] = socket_client;
- break;
- }
- }
-
- if (i == ARRAY_SIZE(socket_client_hash)) {
- pr_err("Error - can't add another packet client: "
- "maximum number of clients reached\n");
- kfree(socket_client);
- return -EXFULL;
- }
-
- INIT_LIST_HEAD(&socket_client->queue_list);
- socket_client->queue_len = 0;
- socket_client->index = i;
- socket_client->bat_priv = inode->i_private;
- spin_lock_init(&socket_client->lock);
- init_waitqueue_head(&socket_client->queue_wait);
-
- file->private_data = socket_client;
-
- inc_module_count();
- return 0;
-}
-
-static int bat_socket_release(struct inode *inode, struct file *file)
-{
- struct socket_client *socket_client = file->private_data;
- struct socket_packet *socket_packet;
- struct list_head *list_pos, *list_pos_tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&socket_client->lock, flags);
-
- /* for all packets in the queue ... */
- list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) {
- socket_packet = list_entry(list_pos,
- struct socket_packet, list);
-
- list_del(list_pos);
- kfree(socket_packet);
- }
-
- socket_client_hash[socket_client->index] = NULL;
- spin_unlock_irqrestore(&socket_client->lock, flags);
-
- kfree(socket_client);
- dec_module_count();
-
- return 0;
-}
-
-static ssize_t bat_socket_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct socket_client *socket_client = file->private_data;
- struct socket_packet *socket_packet;
- size_t packet_len;
- int error;
- unsigned long flags;
-
- if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0))
- return -EAGAIN;
-
- if ((!buf) || (count < sizeof(struct icmp_packet)))
- return -EINVAL;
-
- if (!access_ok(VERIFY_WRITE, buf, count))
- return -EFAULT;
-
- error = wait_event_interruptible(socket_client->queue_wait,
- socket_client->queue_len);
-
- if (error)
- return error;
-
- spin_lock_irqsave(&socket_client->lock, flags);
-
- socket_packet = list_first_entry(&socket_client->queue_list,
- struct socket_packet, list);
- list_del(&socket_packet->list);
- socket_client->queue_len--;
-
- spin_unlock_irqrestore(&socket_client->lock, flags);
-
- error = __copy_to_user(buf, &socket_packet->icmp_packet,
- socket_packet->icmp_len);
-
- packet_len = socket_packet->icmp_len;
- kfree(socket_packet);
-
- if (error)
- return -EFAULT;
-
- return packet_len;
-}
-
-static ssize_t bat_socket_write(struct file *file, const char __user *buff,
- size_t len, loff_t *off)
-{
- struct socket_client *socket_client = file->private_data;
- struct bat_priv *bat_priv = socket_client->bat_priv;
- struct sk_buff *skb;
- struct icmp_packet_rr *icmp_packet;
-
- struct orig_node *orig_node;
- struct batman_if *batman_if;
- size_t packet_len = sizeof(struct icmp_packet);
- uint8_t dstaddr[ETH_ALEN];
- unsigned long flags;
-
- if (len < sizeof(struct icmp_packet)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: "
- "invalid packet size\n");
- return -EINVAL;
- }
-
- if (!bat_priv->primary_if)
- return -EFAULT;
-
- if (len >= sizeof(struct icmp_packet_rr))
- packet_len = sizeof(struct icmp_packet_rr);
-
- skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr));
- if (!skb)
- return -ENOMEM;
-
- skb_reserve(skb, sizeof(struct ethhdr));
- icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
-
- if (!access_ok(VERIFY_READ, buff, packet_len)) {
- len = -EFAULT;
- goto free_skb;
- }
-
- if (__copy_from_user(icmp_packet, buff, packet_len)) {
- len = -EFAULT;
- goto free_skb;
- }
-
- if (icmp_packet->packet_type != BAT_ICMP) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: "
- "got bogus packet type (expected: BAT_ICMP)\n");
- len = -EINVAL;
- goto free_skb;
- }
-
- if (icmp_packet->msg_type != ECHO_REQUEST) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: "
- "got bogus message type (expected: ECHO_REQUEST)\n");
- len = -EINVAL;
- goto free_skb;
- }
-
- icmp_packet->uid = socket_client->index;
-
- if (icmp_packet->version != COMPAT_VERSION) {
- icmp_packet->msg_type = PARAMETER_PROBLEM;
- icmp_packet->ttl = COMPAT_VERSION;
- bat_socket_add_packet(socket_client, icmp_packet, packet_len);
- goto free_skb;
- }
-
- if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
- goto dst_unreach;
-
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
- icmp_packet->dst));
-
- if (!orig_node)
- goto unlock;
-
- if (!orig_node->router)
- goto unlock;
-
- batman_if = orig_node->router->if_incoming;
- memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
- if (!batman_if)
- goto dst_unreach;
-
- if (batman_if->if_status != IF_ACTIVE)
- goto dst_unreach;
-
- memcpy(icmp_packet->orig,
- bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
-
- if (packet_len == sizeof(struct icmp_packet_rr))
- memcpy(icmp_packet->rr, batman_if->net_dev->dev_addr, ETH_ALEN);
-
-
- send_skb_packet(skb, batman_if, dstaddr);
-
- goto out;
-
-unlock:
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-dst_unreach:
- icmp_packet->msg_type = DESTINATION_UNREACHABLE;
- bat_socket_add_packet(socket_client, icmp_packet, packet_len);
-free_skb:
- kfree_skb(skb);
-out:
- return len;
-}
-
-static unsigned int bat_socket_poll(struct file *file, poll_table *wait)
-{
- struct socket_client *socket_client = file->private_data;
-
- poll_wait(file, &socket_client->queue_wait, wait);
-
- if (socket_client->queue_len > 0)
- return POLLIN | POLLRDNORM;
-
- return 0;
-}
-
-static const struct file_operations fops = {
- .owner = THIS_MODULE,
- .open = bat_socket_open,
- .release = bat_socket_release,
- .read = bat_socket_read,
- .write = bat_socket_write,
- .poll = bat_socket_poll,
- .llseek = no_llseek,
-};
-
-int bat_socket_setup(struct bat_priv *bat_priv)
-{
- struct dentry *d;
-
- if (!bat_priv->debug_dir)
- goto err;
-
- d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
- bat_priv->debug_dir, bat_priv, &fops);
- if (d)
- goto err;
-
- return 0;
-
-err:
- return 1;
-}
-
-static void bat_socket_add_packet(struct socket_client *socket_client,
- struct icmp_packet_rr *icmp_packet,
- size_t icmp_len)
-{
- struct socket_packet *socket_packet;
- unsigned long flags;
-
- socket_packet = kmalloc(sizeof(struct socket_packet), GFP_ATOMIC);
-
- if (!socket_packet)
- return;
-
- INIT_LIST_HEAD(&socket_packet->list);
- memcpy(&socket_packet->icmp_packet, icmp_packet, icmp_len);
- socket_packet->icmp_len = icmp_len;
-
- spin_lock_irqsave(&socket_client->lock, flags);
-
- /* while waiting for the lock the socket_client could have been
- * deleted */
- if (!socket_client_hash[icmp_packet->uid]) {
- spin_unlock_irqrestore(&socket_client->lock, flags);
- kfree(socket_packet);
- return;
- }
-
- list_add_tail(&socket_packet->list, &socket_client->queue_list);
- socket_client->queue_len++;
-
- if (socket_client->queue_len > 100) {
- socket_packet = list_first_entry(&socket_client->queue_list,
- struct socket_packet, list);
-
- list_del(&socket_packet->list);
- kfree(socket_packet);
- socket_client->queue_len--;
- }
-
- spin_unlock_irqrestore(&socket_client->lock, flags);
-
- wake_up(&socket_client->queue_wait);
-}
-
-void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet,
- size_t icmp_len)
-{
- struct socket_client *hash = socket_client_hash[icmp_packet->uid];
-
- if (hash)
- bat_socket_add_packet(hash, icmp_packet, icmp_len);
-}
diff --git a/drivers/staging/batman-adv/icmp_socket.h b/drivers/staging/batman-adv/icmp_socket.h
deleted file mode 100644
index bf9b348cde27..000000000000
--- a/drivers/staging/batman-adv/icmp_socket.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_
-#define _NET_BATMAN_ADV_ICMP_SOCKET_H_
-
-#include "types.h"
-
-#define ICMP_SOCKET "socket"
-
-void bat_socket_init(void);
-int bat_socket_setup(struct bat_priv *bat_priv);
-void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet,
- size_t icmp_len);
-
-#endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */
diff --git a/drivers/staging/batman-adv/main.c b/drivers/staging/batman-adv/main.c
deleted file mode 100644
index 0587940d2723..000000000000
--- a/drivers/staging/batman-adv/main.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "bat_sysfs.h"
-#include "bat_debugfs.h"
-#include "routing.h"
-#include "send.h"
-#include "originator.h"
-#include "soft-interface.h"
-#include "icmp_socket.h"
-#include "translation-table.h"
-#include "hard-interface.h"
-#include "types.h"
-#include "vis.h"
-#include "hash.h"
-
-struct list_head if_list;
-
-unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
-struct workqueue_struct *bat_event_workqueue;
-
-static int __init batman_init(void)
-{
- INIT_LIST_HEAD(&if_list);
-
- /* the name should not be longer than 10 chars - see
- * http://lwn.net/Articles/23634/ */
- bat_event_workqueue = create_singlethread_workqueue("bat_events");
-
- if (!bat_event_workqueue)
- return -ENOMEM;
-
- bat_socket_init();
- debugfs_init();
-
- register_netdevice_notifier(&hard_if_notifier);
-
- pr_info("B.A.T.M.A.N. advanced %s%s (compatibility version %i) "
- "loaded\n", SOURCE_VERSION, REVISION_VERSION_STR,
- COMPAT_VERSION);
-
- return 0;
-}
-
-static void __exit batman_exit(void)
-{
- debugfs_destroy();
- unregister_netdevice_notifier(&hard_if_notifier);
- hardif_remove_interfaces();
-
- flush_workqueue(bat_event_workqueue);
- destroy_workqueue(bat_event_workqueue);
- bat_event_workqueue = NULL;
-
- rcu_barrier();
-}
-
-int mesh_init(struct net_device *soft_iface)
-{
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
-
- spin_lock_init(&bat_priv->orig_hash_lock);
- spin_lock_init(&bat_priv->forw_bat_list_lock);
- spin_lock_init(&bat_priv->forw_bcast_list_lock);
- spin_lock_init(&bat_priv->hna_lhash_lock);
- spin_lock_init(&bat_priv->hna_ghash_lock);
- spin_lock_init(&bat_priv->vis_hash_lock);
- spin_lock_init(&bat_priv->vis_list_lock);
-
- INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
- INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
-
- if (originator_init(bat_priv) < 1)
- goto err;
-
- if (hna_local_init(bat_priv) < 1)
- goto err;
-
- if (hna_global_init(bat_priv) < 1)
- goto err;
-
- hna_local_add(soft_iface, soft_iface->dev_addr);
-
- if (vis_init(bat_priv) < 1)
- goto err;
-
- atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
- goto end;
-
-err:
- pr_err("Unable to allocate memory for mesh information structures: "
- "out of mem ?\n");
- mesh_free(soft_iface);
- return -1;
-
-end:
- return 0;
-}
-
-void mesh_free(struct net_device *soft_iface)
-{
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
-
- atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING);
-
- purge_outstanding_packets(bat_priv, NULL);
-
- vis_quit(bat_priv);
-
- originator_free(bat_priv);
-
- hna_local_free(bat_priv);
- hna_global_free(bat_priv);
-
- atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
-}
-
-void inc_module_count(void)
-{
- try_module_get(THIS_MODULE);
-}
-
-void dec_module_count(void)
-{
- module_put(THIS_MODULE);
-}
-
-/* returns 1 if they are the same originator */
-
-int compare_orig(void *data1, void *data2)
-{
- return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
-}
-
-/* hashfunction to choose an entry in a hash table of given size */
-/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-int choose_orig(void *data, int32_t size)
-{
- unsigned char *key = data;
- uint32_t hash = 0;
- size_t i;
-
- for (i = 0; i < 6; i++) {
- hash += key[i];
- hash += (hash << 10);
- hash ^= (hash >> 6);
- }
-
- hash += (hash << 3);
- hash ^= (hash >> 11);
- hash += (hash << 15);
-
- return hash % size;
-}
-
-int is_my_mac(uint8_t *addr)
-{
- struct batman_if *batman_if;
-
- rcu_read_lock();
- list_for_each_entry_rcu(batman_if, &if_list, list) {
- if (batman_if->if_status != IF_ACTIVE)
- continue;
-
- if (compare_orig(batman_if->net_dev->dev_addr, addr)) {
- rcu_read_unlock();
- return 1;
- }
- }
- rcu_read_unlock();
- return 0;
-
-}
-
-int is_bcast(uint8_t *addr)
-{
- return (addr[0] == (uint8_t)0xff) && (addr[1] == (uint8_t)0xff);
-}
-
-int is_mcast(uint8_t *addr)
-{
- return *addr & 0x01;
-}
-
-module_init(batman_init);
-module_exit(batman_exit);
-
-MODULE_LICENSE("GPL");
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE);
-#ifdef REVISION_VERSION
-MODULE_VERSION(SOURCE_VERSION "-" REVISION_VERSION);
-#else
-MODULE_VERSION(SOURCE_VERSION);
-#endif
diff --git a/drivers/staging/batman-adv/main.h b/drivers/staging/batman-adv/main.h
deleted file mode 100644
index 5e3f51681f5e..000000000000
--- a/drivers/staging/batman-adv/main.h
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_MAIN_H_
-#define _NET_BATMAN_ADV_MAIN_H_
-
-/* Kernel Programming */
-#define LINUX
-
-#define DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
- "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
-#define DRIVER_DESC "B.A.T.M.A.N. advanced"
-#define DRIVER_DEVICE "batman-adv"
-
-#define SOURCE_VERSION "next"
-
-
-/* B.A.T.M.A.N. parameters */
-
-#define TQ_MAX_VALUE 255
-#define JITTER 20
-#define TTL 50 /* Time To Live of broadcast messages */
-
-#define PURGE_TIMEOUT 200 /* purge originators after time in seconds if no
- * valid packet comes in -> TODO: check
- * influence on TQ_LOCAL_WINDOW_SIZE */
-#define LOCAL_HNA_TIMEOUT 3600 /* in seconds */
-
-#define TQ_LOCAL_WINDOW_SIZE 64 /* sliding packet range of received originator
- * messages in squence numbers (should be a
- * multiple of our word size) */
-#define TQ_GLOBAL_WINDOW_SIZE 5
-#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
-#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
-#define TQ_TOTAL_BIDRECT_LIMIT 1
-
-#define TQ_HOP_PENALTY 10
-
-#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
-
-#define PACKBUFF_SIZE 2000
-#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
-
-#define VIS_INTERVAL 5000 /* 5 seconds */
-
-/* how much worse secondary interfaces may be to
- * to be considered as bonding candidates */
-
-#define BONDING_TQ_THRESHOLD 50
-
-#define MAX_AGGREGATION_BYTES 512 /* should not be bigger than 512 bytes or
- * change the size of
- * forw_packet->direct_link_flags */
-#define MAX_AGGREGATION_MS 100
-
-#define RESET_PROTECTION_MS 30000
-#define EXPECTED_SEQNO_RANGE 65536
-/* don't reset again within 30 seconds */
-
-#define MESH_INACTIVE 0
-#define MESH_ACTIVE 1
-#define MESH_DEACTIVATING 2
-
-#define BCAST_QUEUE_LEN 256
-#define BATMAN_QUEUE_LEN 256
-
-/*
- * Debug Messages
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* Append 'batman-adv: ' before
- * kernel messages */
-
-#define DBG_BATMAN 1 /* all messages related to routing / flooding /
- * broadcasting / etc */
-#define DBG_ROUTES 2 /* route or hna added / changed / deleted */
-#define DBG_ALL 3
-
-#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
-
-
-/*
- * Vis
- */
-
-/* #define VIS_SUBCLUSTERS_DISABLED */
-
-/*
- * Kernel headers
- */
-
-#include <linux/mutex.h> /* mutex */
-#include <linux/module.h> /* needed by all modules */
-#include <linux/netdevice.h> /* netdevice */
-#include <linux/if_ether.h> /* ethernet header */
-#include <linux/poll.h> /* poll_table */
-#include <linux/kthread.h> /* kernel threads */
-#include <linux/pkt_sched.h> /* schedule types */
-#include <linux/workqueue.h> /* workqueue */
-#include <linux/slab.h>
-#include <net/sock.h> /* struct sock */
-#include <linux/jiffies.h>
-#include <linux/seq_file.h>
-#include "types.h"
-
-#ifndef REVISION_VERSION
-#define REVISION_VERSION_STR ""
-#else
-#define REVISION_VERSION_STR " "REVISION_VERSION
-#endif
-
-extern struct list_head if_list;
-
-extern unsigned char broadcast_addr[];
-extern struct workqueue_struct *bat_event_workqueue;
-
-int mesh_init(struct net_device *soft_iface);
-void mesh_free(struct net_device *soft_iface);
-void inc_module_count(void);
-void dec_module_count(void);
-int compare_orig(void *data1, void *data2);
-int choose_orig(void *data, int32_t size);
-int is_my_mac(uint8_t *addr);
-int is_bcast(uint8_t *addr);
-int is_mcast(uint8_t *addr);
-
-#ifdef CONFIG_BATMAN_ADV_DEBUG
-int debug_log(struct bat_priv *bat_priv, char *fmt, ...);
-
-#define bat_dbg(type, bat_priv, fmt, arg...) \
- do { \
- if (atomic_read(&bat_priv->log_level) & type) \
- debug_log(bat_priv, fmt, ## arg); \
- } \
- while (0)
-#else /* !CONFIG_BATMAN_ADV_DEBUG */
-static inline void bat_dbg(char type __attribute__((unused)),
- struct bat_priv *bat_priv __attribute__((unused)),
- char *fmt __attribute__((unused)), ...)
-{
-}
-#endif
-
-#define bat_warning(net_dev, fmt, arg...) \
- do { \
- struct net_device *_netdev = (net_dev); \
- struct bat_priv *_batpriv = netdev_priv(_netdev); \
- bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \
- pr_warning("%s: " fmt, _netdev->name, ## arg); \
- } while (0)
-#define bat_info(net_dev, fmt, arg...) \
- do { \
- struct net_device *_netdev = (net_dev); \
- struct bat_priv *_batpriv = netdev_priv(_netdev); \
- bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \
- pr_info("%s: " fmt, _netdev->name, ## arg); \
- } while (0)
-#define bat_err(net_dev, fmt, arg...) \
- do { \
- struct net_device *_netdev = (net_dev); \
- struct bat_priv *_batpriv = netdev_priv(_netdev); \
- bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \
- pr_err("%s: " fmt, _netdev->name, ## arg); \
- } while (0)
-
-#endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c
deleted file mode 100644
index 55270080a44b..000000000000
--- a/drivers/staging/batman-adv/originator.c
+++ /dev/null
@@ -1,533 +0,0 @@
-/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-/* increase the reference counter for this originator */
-
-#include "main.h"
-#include "originator.h"
-#include "hash.h"
-#include "translation-table.h"
-#include "routing.h"
-#include "hard-interface.h"
-#include "unicast.h"
-
-static void purge_orig(struct work_struct *work);
-
-static void start_purge_timer(struct bat_priv *bat_priv)
-{
- INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
- queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
-}
-
-int originator_init(struct bat_priv *bat_priv)
-{
- unsigned long flags;
- if (bat_priv->orig_hash)
- return 1;
-
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- bat_priv->orig_hash = hash_new(128, compare_orig, choose_orig);
-
- if (!bat_priv->orig_hash)
- goto err;
-
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
- start_purge_timer(bat_priv);
- return 1;
-
-err:
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
- return 0;
-}
-
-struct neigh_node *
-create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
- uint8_t *neigh, struct batman_if *if_incoming)
-{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct neigh_node *neigh_node;
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "Creating new last-hop neighbor of originator\n");
-
- neigh_node = kzalloc(sizeof(struct neigh_node), GFP_ATOMIC);
- if (!neigh_node)
- return NULL;
-
- INIT_LIST_HEAD(&neigh_node->list);
-
- memcpy(neigh_node->addr, neigh, ETH_ALEN);
- neigh_node->orig_node = orig_neigh_node;
- neigh_node->if_incoming = if_incoming;
-
- list_add_tail(&neigh_node->list, &orig_node->neigh_list);
- return neigh_node;
-}
-
-static void free_orig_node(void *data, void *arg)
-{
- struct list_head *list_pos, *list_pos_tmp;
- struct neigh_node *neigh_node;
- struct orig_node *orig_node = (struct orig_node *)data;
- struct bat_priv *bat_priv = (struct bat_priv *)arg;
-
- /* for all neighbors towards this originator ... */
- list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
- neigh_node = list_entry(list_pos, struct neigh_node, list);
-
- list_del(list_pos);
- kfree(neigh_node);
- }
-
- frag_list_free(&orig_node->frag_list);
- hna_global_del_orig(bat_priv, orig_node, "originator timed out");
-
- kfree(orig_node->bcast_own);
- kfree(orig_node->bcast_own_sum);
- kfree(orig_node);
-}
-
-void originator_free(struct bat_priv *bat_priv)
-{
- unsigned long flags;
-
- if (!bat_priv->orig_hash)
- return;
-
- cancel_delayed_work_sync(&bat_priv->orig_work);
-
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- hash_delete(bat_priv->orig_hash, free_orig_node, bat_priv);
- bat_priv->orig_hash = NULL;
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-}
-
-/* this function finds or creates an originator entry for the given
- * address if it does not exits */
-struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
-{
- struct orig_node *orig_node;
- struct hashtable_t *swaphash;
- int size;
-
- orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, addr));
-
- if (orig_node)
- return orig_node;
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "Creating new originator: %pM\n", addr);
-
- orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC);
- if (!orig_node)
- return NULL;
-
- INIT_LIST_HEAD(&orig_node->neigh_list);
-
- memcpy(orig_node->orig, addr, ETH_ALEN);
- orig_node->router = NULL;
- orig_node->hna_buff = NULL;
- orig_node->bcast_seqno_reset = jiffies - 1
- - msecs_to_jiffies(RESET_PROTECTION_MS);
- orig_node->batman_seqno_reset = jiffies - 1
- - msecs_to_jiffies(RESET_PROTECTION_MS);
-
- size = bat_priv->num_ifaces * sizeof(TYPE_OF_WORD) * NUM_WORDS;
-
- orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
- if (!orig_node->bcast_own)
- goto free_orig_node;
-
- size = bat_priv->num_ifaces * sizeof(uint8_t);
- orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
-
- INIT_LIST_HEAD(&orig_node->frag_list);
- orig_node->last_frag_packet = 0;
-
- if (!orig_node->bcast_own_sum)
- goto free_bcast_own;
-
- if (hash_add(bat_priv->orig_hash, orig_node) < 0)
- goto free_bcast_own_sum;
-
- if (bat_priv->orig_hash->elements * 4 > bat_priv->orig_hash->size) {
- swaphash = hash_resize(bat_priv->orig_hash,
- bat_priv->orig_hash->size * 2);
-
- if (!swaphash)
- bat_dbg(DBG_BATMAN, bat_priv,
- "Couldn't resize orig hash table\n");
- else
- bat_priv->orig_hash = swaphash;
- }
-
- return orig_node;
-free_bcast_own_sum:
- kfree(orig_node->bcast_own_sum);
-free_bcast_own:
- kfree(orig_node->bcast_own);
-free_orig_node:
- kfree(orig_node);
- return NULL;
-}
-
-static bool purge_orig_neighbors(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- struct neigh_node **best_neigh_node)
-{
- struct list_head *list_pos, *list_pos_tmp;
- struct neigh_node *neigh_node;
- bool neigh_purged = false;
-
- *best_neigh_node = NULL;
-
- /* for all neighbors towards this originator ... */
- list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
- neigh_node = list_entry(list_pos, struct neigh_node, list);
-
- if ((time_after(jiffies,
- neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
- (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
- (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
-
- if (neigh_node->if_incoming->if_status ==
- IF_TO_BE_REMOVED)
- bat_dbg(DBG_BATMAN, bat_priv,
- "neighbor purge: originator %pM, "
- "neighbor: %pM, iface: %s\n",
- orig_node->orig, neigh_node->addr,
- neigh_node->if_incoming->net_dev->name);
- else
- bat_dbg(DBG_BATMAN, bat_priv,
- "neighbor timeout: originator %pM, "
- "neighbor: %pM, last_valid: %lu\n",
- orig_node->orig, neigh_node->addr,
- (neigh_node->last_valid / HZ));
-
- neigh_purged = true;
- list_del(list_pos);
- kfree(neigh_node);
- } else {
- if ((*best_neigh_node == NULL) ||
- (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
- *best_neigh_node = neigh_node;
- }
- }
- return neigh_purged;
-}
-
-static bool purge_orig_node(struct bat_priv *bat_priv,
- struct orig_node *orig_node)
-{
- struct neigh_node *best_neigh_node;
-
- if (time_after(jiffies,
- orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) {
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "Originator timeout: originator %pM, last_valid %lu\n",
- orig_node->orig, (orig_node->last_valid / HZ));
- return true;
- } else {
- if (purge_orig_neighbors(bat_priv, orig_node,
- &best_neigh_node)) {
- update_routes(bat_priv, orig_node,
- best_neigh_node,
- orig_node->hna_buff,
- orig_node->hna_buff_len);
- /* update bonding candidates, we could have lost
- * some candidates. */
- update_bonding_candidates(bat_priv, orig_node);
- }
- }
-
- return false;
-}
-
-static void _purge_orig(struct bat_priv *bat_priv)
-{
- HASHIT(hashit);
- struct orig_node *orig_node;
- unsigned long flags;
-
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-
- /* for all origins... */
- while (hash_iterate(bat_priv->orig_hash, &hashit)) {
- orig_node = hashit.bucket->data;
-
- if (purge_orig_node(bat_priv, orig_node)) {
- hash_remove_bucket(bat_priv->orig_hash, &hashit);
- free_orig_node(orig_node, bat_priv);
- }
-
- if (time_after(jiffies, (orig_node->last_frag_packet +
- msecs_to_jiffies(FRAG_TIMEOUT))))
- frag_list_free(&orig_node->frag_list);
- }
-
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
-}
-
-static void purge_orig(struct work_struct *work)
-{
- struct delayed_work *delayed_work =
- container_of(work, struct delayed_work, work);
- struct bat_priv *bat_priv =
- container_of(delayed_work, struct bat_priv, orig_work);
-
- _purge_orig(bat_priv);
- start_purge_timer(bat_priv);
-}
-
-void purge_orig_ref(struct bat_priv *bat_priv)
-{
- _purge_orig(bat_priv);
-}
-
-int orig_seq_print_text(struct seq_file *seq, void *offset)
-{
- HASHIT(hashit);
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- struct orig_node *orig_node;
- struct neigh_node *neigh_node;
- int batman_count = 0;
- int last_seen_secs;
- int last_seen_msecs;
- unsigned long flags;
-
- if ((!bat_priv->primary_if) ||
- (bat_priv->primary_if->if_status != IF_ACTIVE)) {
- if (!bat_priv->primary_if)
- return seq_printf(seq, "BATMAN mesh %s disabled - "
- "please specify interfaces to enable it\n",
- net_dev->name);
-
- return seq_printf(seq, "BATMAN mesh %s "
- "disabled - primary interface not active\n",
- net_dev->name);
- }
-
- seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
- SOURCE_VERSION, REVISION_VERSION_STR,
- bat_priv->primary_if->net_dev->name,
- bat_priv->primary_if->net_dev->dev_addr, net_dev->name);
- seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
- "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
- "outgoingIF", "Potential nexthops");
-
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-
- while (hash_iterate(bat_priv->orig_hash, &hashit)) {
-
- orig_node = hashit.bucket->data;
-
- if (!orig_node->router)
- continue;
-
- if (orig_node->router->tq_avg == 0)
- continue;
-
- last_seen_secs = jiffies_to_msecs(jiffies -
- orig_node->last_valid) / 1000;
- last_seen_msecs = jiffies_to_msecs(jiffies -
- orig_node->last_valid) % 1000;
-
- seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
- orig_node->orig, last_seen_secs, last_seen_msecs,
- orig_node->router->tq_avg, orig_node->router->addr,
- orig_node->router->if_incoming->net_dev->name);
-
- list_for_each_entry(neigh_node, &orig_node->neigh_list, list) {
- seq_printf(seq, " %pM (%3i)", neigh_node->addr,
- neigh_node->tq_avg);
- }
-
- seq_printf(seq, "\n");
- batman_count++;
- }
-
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
- if ((batman_count == 0))
- seq_printf(seq, "No batman nodes in range ...\n");
-
- return 0;
-}
-
-static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
-{
- void *data_ptr;
-
- data_ptr = kmalloc(max_if_num * sizeof(TYPE_OF_WORD) * NUM_WORDS,
- GFP_ATOMIC);
- if (!data_ptr) {
- pr_err("Can't resize orig: out of memory\n");
- return -1;
- }
-
- memcpy(data_ptr, orig_node->bcast_own,
- (max_if_num - 1) * sizeof(TYPE_OF_WORD) * NUM_WORDS);
- kfree(orig_node->bcast_own);
- orig_node->bcast_own = data_ptr;
-
- data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
- if (!data_ptr) {
- pr_err("Can't resize orig: out of memory\n");
- return -1;
- }
-
- memcpy(data_ptr, orig_node->bcast_own_sum,
- (max_if_num - 1) * sizeof(uint8_t));
- kfree(orig_node->bcast_own_sum);
- orig_node->bcast_own_sum = data_ptr;
-
- return 0;
-}
-
-int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
-{
- struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
- struct orig_node *orig_node;
- unsigned long flags;
- HASHIT(hashit);
-
- /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
- * if_num */
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-
- while (hash_iterate(bat_priv->orig_hash, &hashit)) {
- orig_node = hashit.bucket->data;
-
- if (orig_node_add_if(orig_node, max_if_num) == -1)
- goto err;
- }
-
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
- return 0;
-
-err:
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
- return -ENOMEM;
-}
-
-static int orig_node_del_if(struct orig_node *orig_node,
- int max_if_num, int del_if_num)
-{
- void *data_ptr = NULL;
- int chunk_size;
-
- /* last interface was removed */
- if (max_if_num == 0)
- goto free_bcast_own;
-
- chunk_size = sizeof(TYPE_OF_WORD) * NUM_WORDS;
- data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
- if (!data_ptr) {
- pr_err("Can't resize orig: out of memory\n");
- return -1;
- }
-
- /* copy first part */
- memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
-
- /* copy second part */
- memcpy(data_ptr + del_if_num * chunk_size,
- orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
- (max_if_num - del_if_num) * chunk_size);
-
-free_bcast_own:
- kfree(orig_node->bcast_own);
- orig_node->bcast_own = data_ptr;
-
- if (max_if_num == 0)
- goto free_own_sum;
-
- data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
- if (!data_ptr) {
- pr_err("Can't resize orig: out of memory\n");
- return -1;
- }
-
- memcpy(data_ptr, orig_node->bcast_own_sum,
- del_if_num * sizeof(uint8_t));
-
- memcpy(data_ptr + del_if_num * sizeof(uint8_t),
- orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
- (max_if_num - del_if_num) * sizeof(uint8_t));
-
-free_own_sum:
- kfree(orig_node->bcast_own_sum);
- orig_node->bcast_own_sum = data_ptr;
-
- return 0;
-}
-
-int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
-{
- struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
- struct batman_if *batman_if_tmp;
- struct orig_node *orig_node;
- unsigned long flags;
- HASHIT(hashit);
- int ret;
-
- /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
- * if_num */
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-
- while (hash_iterate(bat_priv->orig_hash, &hashit)) {
- orig_node = hashit.bucket->data;
-
- ret = orig_node_del_if(orig_node, max_if_num,
- batman_if->if_num);
-
- if (ret == -1)
- goto err;
- }
-
- /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
- rcu_read_lock();
- list_for_each_entry_rcu(batman_if_tmp, &if_list, list) {
- if (batman_if_tmp->if_status == IF_NOT_IN_USE)
- continue;
-
- if (batman_if == batman_if_tmp)
- continue;
-
- if (batman_if->soft_iface != batman_if_tmp->soft_iface)
- continue;
-
- if (batman_if_tmp->if_num > batman_if->if_num)
- batman_if_tmp->if_num--;
- }
- rcu_read_unlock();
-
- batman_if->if_num = -1;
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
- return 0;
-
-err:
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
- return -ENOMEM;
-}
diff --git a/drivers/staging/batman-adv/originator.h b/drivers/staging/batman-adv/originator.h
deleted file mode 100644
index a97c4004776a..000000000000
--- a/drivers/staging/batman-adv/originator.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
-#define _NET_BATMAN_ADV_ORIGINATOR_H_
-
-int originator_init(struct bat_priv *bat_priv);
-void originator_free(struct bat_priv *bat_priv);
-void purge_orig_ref(struct bat_priv *bat_priv);
-struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr);
-struct neigh_node *
-create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
- uint8_t *neigh, struct batman_if *if_incoming);
-int orig_seq_print_text(struct seq_file *seq, void *offset);
-int orig_hash_add_if(struct batman_if *batman_if, int max_if_num);
-int orig_hash_del_if(struct batman_if *batman_if, int max_if_num);
-
-#endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */
diff --git a/drivers/staging/batman-adv/packet.h b/drivers/staging/batman-adv/packet.h
deleted file mode 100644
index 2693383889a4..000000000000
--- a/drivers/staging/batman-adv/packet.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_PACKET_H_
-#define _NET_BATMAN_ADV_PACKET_H_
-
-#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
-
-#define BAT_PACKET 0x01
-#define BAT_ICMP 0x02
-#define BAT_UNICAST 0x03
-#define BAT_BCAST 0x04
-#define BAT_VIS 0x05
-#define BAT_UNICAST_FRAG 0x06
-
-/* this file is included by batctl which needs these defines */
-#define COMPAT_VERSION 13
-#define DIRECTLINK 0x40
-#define VIS_SERVER 0x20
-#define PRIMARIES_FIRST_HOP 0x10
-
-/* ICMP message types */
-#define ECHO_REPLY 0
-#define DESTINATION_UNREACHABLE 3
-#define ECHO_REQUEST 8
-#define TTL_EXCEEDED 11
-#define PARAMETER_PROBLEM 12
-
-/* vis defines */
-#define VIS_TYPE_SERVER_SYNC 0
-#define VIS_TYPE_CLIENT_UPDATE 1
-
-/* fragmentation defines */
-#define UNI_FRAG_HEAD 0x01
-
-struct batman_packet {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
- uint8_t tq;
- uint32_t seqno;
- uint8_t orig[6];
- uint8_t prev_sender[6];
- uint8_t ttl;
- uint8_t num_hna;
-} __attribute__((packed));
-
-#define BAT_PACKET_LEN sizeof(struct batman_packet)
-
-struct icmp_packet {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t msg_type; /* see ICMP message types above */
- uint8_t ttl;
- uint8_t dst[6];
- uint8_t orig[6];
- uint16_t seqno;
- uint8_t uid;
-} __attribute__((packed));
-
-#define BAT_RR_LEN 16
-
-/* icmp_packet_rr must start with all fields from imcp_packet
- * as this is assumed by code that handles ICMP packets */
-struct icmp_packet_rr {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t msg_type; /* see ICMP message types above */
- uint8_t ttl;
- uint8_t dst[6];
- uint8_t orig[6];
- uint16_t seqno;
- uint8_t uid;
- uint8_t rr_cur;
- uint8_t rr[BAT_RR_LEN][ETH_ALEN];
-} __attribute__((packed));
-
-struct unicast_packet {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t dest[6];
- uint8_t ttl;
-} __attribute__((packed));
-
-struct unicast_frag_packet {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t dest[6];
- uint8_t ttl;
- uint8_t flags;
- uint8_t orig[6];
- uint16_t seqno;
-} __attribute__((packed));
-
-struct bcast_packet {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t orig[6];
- uint8_t ttl;
- uint32_t seqno;
-} __attribute__((packed));
-
-struct vis_packet {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t vis_type; /* which type of vis-participant sent this? */
- uint8_t entries; /* number of entries behind this struct */
- uint32_t seqno; /* sequence number */
- uint8_t ttl; /* TTL */
- uint8_t vis_orig[6]; /* originator that informs about its
- * neighbors */
- uint8_t target_orig[6]; /* who should receive this packet */
- uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */
-} __attribute__((packed));
-
-#endif /* _NET_BATMAN_ADV_PACKET_H_ */
diff --git a/drivers/staging/batman-adv/ring_buffer.c b/drivers/staging/batman-adv/ring_buffer.c
deleted file mode 100644
index defd37c9be1f..000000000000
--- a/drivers/staging/batman-adv/ring_buffer.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "ring_buffer.h"
-
-void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value)
-{
- lq_recv[*lq_index] = value;
- *lq_index = (*lq_index + 1) % TQ_GLOBAL_WINDOW_SIZE;
-}
-
-uint8_t ring_buffer_avg(uint8_t lq_recv[])
-{
- uint8_t *ptr;
- uint16_t count = 0, i = 0, sum = 0;
-
- ptr = lq_recv;
-
- while (i < TQ_GLOBAL_WINDOW_SIZE) {
- if (*ptr != 0) {
- count++;
- sum += *ptr;
- }
-
- i++;
- ptr++;
- }
-
- if (count == 0)
- return 0;
-
- return (uint8_t)(sum / count);
-}
diff --git a/drivers/staging/batman-adv/ring_buffer.h b/drivers/staging/batman-adv/ring_buffer.h
deleted file mode 100644
index 6b0cb9aaeba5..000000000000
--- a/drivers/staging/batman-adv/ring_buffer.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_RING_BUFFER_H_
-#define _NET_BATMAN_ADV_RING_BUFFER_H_
-
-void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value);
-uint8_t ring_buffer_avg(uint8_t lq_recv[]);
-
-#endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
deleted file mode 100644
index 657b69e6b957..000000000000
--- a/drivers/staging/batman-adv/routing.c
+++ /dev/null
@@ -1,1389 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "routing.h"
-#include "send.h"
-#include "hash.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "icmp_socket.h"
-#include "translation-table.h"
-#include "originator.h"
-#include "types.h"
-#include "ring_buffer.h"
-#include "vis.h"
-#include "aggregation.h"
-#include "unicast.h"
-
-void slide_own_bcast_window(struct batman_if *batman_if)
-{
- struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
- HASHIT(hashit);
- struct orig_node *orig_node;
- TYPE_OF_WORD *word;
- unsigned long flags;
-
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-
- while (hash_iterate(bat_priv->orig_hash, &hashit)) {
- orig_node = hashit.bucket->data;
- word = &(orig_node->bcast_own[batman_if->if_num * NUM_WORDS]);
-
- bit_get_packet(bat_priv, word, 1, 0);
- orig_node->bcast_own_sum[batman_if->if_num] =
- bit_packet_count(word);
- }
-
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-}
-
-static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
- unsigned char *hna_buff, int hna_buff_len)
-{
- if ((hna_buff_len != orig_node->hna_buff_len) ||
- ((hna_buff_len > 0) &&
- (orig_node->hna_buff_len > 0) &&
- (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
-
- if (orig_node->hna_buff_len > 0)
- hna_global_del_orig(bat_priv, orig_node,
- "originator changed hna");
-
- if ((hna_buff_len > 0) && (hna_buff != NULL))
- hna_global_add_orig(bat_priv, orig_node,
- hna_buff, hna_buff_len);
- }
-}
-
-static void update_route(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- struct neigh_node *neigh_node,
- unsigned char *hna_buff, int hna_buff_len)
-{
- /* route deleted */
- if ((orig_node->router != NULL) && (neigh_node == NULL)) {
-
- bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
- orig_node->orig);
- hna_global_del_orig(bat_priv, orig_node,
- "originator timed out");
-
- /* route added */
- } else if ((orig_node->router == NULL) && (neigh_node != NULL)) {
-
- bat_dbg(DBG_ROUTES, bat_priv,
- "Adding route towards: %pM (via %pM)\n",
- orig_node->orig, neigh_node->addr);
- hna_global_add_orig(bat_priv, orig_node,
- hna_buff, hna_buff_len);
-
- /* route changed */
- } else {
- bat_dbg(DBG_ROUTES, bat_priv,
- "Changing route towards: %pM "
- "(now via %pM - was via %pM)\n",
- orig_node->orig, neigh_node->addr,
- orig_node->router->addr);
- }
-
- orig_node->router = neigh_node;
-}
-
-
-void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
- struct neigh_node *neigh_node, unsigned char *hna_buff,
- int hna_buff_len)
-{
-
- if (orig_node == NULL)
- return;
-
- if (orig_node->router != neigh_node)
- update_route(bat_priv, orig_node, neigh_node,
- hna_buff, hna_buff_len);
- /* may be just HNA changed */
- else
- update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
-}
-
-static int is_bidirectional_neigh(struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- struct batman_packet *batman_packet,
- struct batman_if *if_incoming)
-{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
- unsigned char total_count;
-
- if (orig_node == orig_neigh_node) {
- list_for_each_entry(tmp_neigh_node,
- &orig_node->neigh_list,
- list) {
-
- if (compare_orig(tmp_neigh_node->addr,
- orig_neigh_node->orig) &&
- (tmp_neigh_node->if_incoming == if_incoming))
- neigh_node = tmp_neigh_node;
- }
-
- if (!neigh_node)
- neigh_node = create_neighbor(orig_node,
- orig_neigh_node,
- orig_neigh_node->orig,
- if_incoming);
- /* create_neighbor failed, return 0 */
- if (!neigh_node)
- return 0;
-
- neigh_node->last_valid = jiffies;
- } else {
- /* find packet count of corresponding one hop neighbor */
- list_for_each_entry(tmp_neigh_node,
- &orig_neigh_node->neigh_list, list) {
-
- if (compare_orig(tmp_neigh_node->addr,
- orig_neigh_node->orig) &&
- (tmp_neigh_node->if_incoming == if_incoming))
- neigh_node = tmp_neigh_node;
- }
-
- if (!neigh_node)
- neigh_node = create_neighbor(orig_neigh_node,
- orig_neigh_node,
- orig_neigh_node->orig,
- if_incoming);
- /* create_neighbor failed, return 0 */
- if (!neigh_node)
- return 0;
- }
-
- orig_node->last_valid = jiffies;
-
- /* pay attention to not get a value bigger than 100 % */
- total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
- neigh_node->real_packet_count ?
- neigh_node->real_packet_count :
- orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
-
- /* if we have too few packets (too less data) we set tq_own to zero */
- /* if we receive too few packets it is not considered bidirectional */
- if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
- (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
- orig_neigh_node->tq_own = 0;
- else
- /* neigh_node->real_packet_count is never zero as we
- * only purge old information when getting new
- * information */
- orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
- neigh_node->real_packet_count;
-
- /*
- * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
- * affect the nearly-symmetric links only a little, but
- * punishes asymmetric links more. This will give a value
- * between 0 and TQ_MAX_VALUE
- */
- orig_neigh_node->tq_asym_penalty =
- TQ_MAX_VALUE -
- (TQ_MAX_VALUE *
- (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
- (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
- (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
- (TQ_LOCAL_WINDOW_SIZE *
- TQ_LOCAL_WINDOW_SIZE *
- TQ_LOCAL_WINDOW_SIZE);
-
- batman_packet->tq = ((batman_packet->tq *
- orig_neigh_node->tq_own *
- orig_neigh_node->tq_asym_penalty) /
- (TQ_MAX_VALUE * TQ_MAX_VALUE));
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "bidirectional: "
- "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
- "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
- "total tq: %3i\n",
- orig_node->orig, orig_neigh_node->orig, total_count,
- neigh_node->real_packet_count, orig_neigh_node->tq_own,
- orig_neigh_node->tq_asym_penalty, batman_packet->tq);
-
- /* if link has the minimum required transmission quality
- * consider it bidirectional */
- if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
- return 1;
-
- return 0;
-}
-
-static void update_orig(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- struct ethhdr *ethhdr,
- struct batman_packet *batman_packet,
- struct batman_if *if_incoming,
- unsigned char *hna_buff, int hna_buff_len,
- char is_duplicate)
-{
- struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
- int tmp_hna_buff_len;
-
- bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
- "Searching and updating originator entry of received packet\n");
-
- list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
- if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
- (tmp_neigh_node->if_incoming == if_incoming)) {
- neigh_node = tmp_neigh_node;
- continue;
- }
-
- if (is_duplicate)
- continue;
-
- ring_buffer_set(tmp_neigh_node->tq_recv,
- &tmp_neigh_node->tq_index, 0);
- tmp_neigh_node->tq_avg =
- ring_buffer_avg(tmp_neigh_node->tq_recv);
- }
-
- if (!neigh_node) {
- struct orig_node *orig_tmp;
-
- orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
- if (!orig_tmp)
- return;
-
- neigh_node = create_neighbor(orig_node, orig_tmp,
- ethhdr->h_source, if_incoming);
- if (!neigh_node)
- return;
- } else
- bat_dbg(DBG_BATMAN, bat_priv,
- "Updating existing last-hop neighbor of originator\n");
-
- orig_node->flags = batman_packet->flags;
- neigh_node->last_valid = jiffies;
-
- ring_buffer_set(neigh_node->tq_recv,
- &neigh_node->tq_index,
- batman_packet->tq);
- neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
-
- if (!is_duplicate) {
- orig_node->last_ttl = batman_packet->ttl;
- neigh_node->last_ttl = batman_packet->ttl;
- }
-
- tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
- batman_packet->num_hna * ETH_ALEN : hna_buff_len);
-
- /* if this neighbor already is our next hop there is nothing
- * to change */
- if (orig_node->router == neigh_node)
- goto update_hna;
-
- /* if this neighbor does not offer a better TQ we won't consider it */
- if ((orig_node->router) &&
- (orig_node->router->tq_avg > neigh_node->tq_avg))
- goto update_hna;
-
- /* if the TQ is the same and the link not more symetric we
- * won't consider it either */
- if ((orig_node->router) &&
- ((neigh_node->tq_avg == orig_node->router->tq_avg) &&
- (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num]
- >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num])))
- goto update_hna;
-
- update_routes(bat_priv, orig_node, neigh_node,
- hna_buff, tmp_hna_buff_len);
- return;
-
-update_hna:
- update_routes(bat_priv, orig_node, orig_node->router,
- hna_buff, tmp_hna_buff_len);
-}
-
-/* checks whether the host restarted and is in the protection time.
- * returns:
- * 0 if the packet is to be accepted
- * 1 if the packet is to be ignored.
- */
-static int window_protected(struct bat_priv *bat_priv,
- int32_t seq_num_diff,
- unsigned long *last_reset)
-{
- if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
- || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
- if (time_after(jiffies, *last_reset +
- msecs_to_jiffies(RESET_PROTECTION_MS))) {
-
- *last_reset = jiffies;
- bat_dbg(DBG_BATMAN, bat_priv,
- "old packet received, start protection\n");
-
- return 0;
- } else
- return 1;
- }
- return 0;
-}
-
-/* processes a batman packet for all interfaces, adjusts the sequence number and
- * finds out whether it is a duplicate.
- * returns:
- * 1 the packet is a duplicate
- * 0 the packet has not yet been received
- * -1 the packet is old and has been received while the seqno window
- * was protected. Caller should drop it.
- */
-static char count_real_packets(struct ethhdr *ethhdr,
- struct batman_packet *batman_packet,
- struct batman_if *if_incoming)
-{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct orig_node *orig_node;
- struct neigh_node *tmp_neigh_node;
- char is_duplicate = 0;
- int32_t seq_diff;
- int need_update = 0;
- int set_mark;
-
- orig_node = get_orig_node(bat_priv, batman_packet->orig);
- if (orig_node == NULL)
- return 0;
-
- seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
-
- /* signalize caller that the packet is to be dropped. */
- if (window_protected(bat_priv, seq_diff,
- &orig_node->batman_seqno_reset))
- return -1;
-
- list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
-
- is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
- orig_node->last_real_seqno,
- batman_packet->seqno);
-
- if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
- (tmp_neigh_node->if_incoming == if_incoming))
- set_mark = 1;
- else
- set_mark = 0;
-
- /* if the window moved, set the update flag. */
- need_update |= bit_get_packet(bat_priv,
- tmp_neigh_node->real_bits,
- seq_diff, set_mark);
-
- tmp_neigh_node->real_packet_count =
- bit_packet_count(tmp_neigh_node->real_bits);
- }
-
- if (need_update) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "updating last_seqno: old %d, new %d\n",
- orig_node->last_real_seqno, batman_packet->seqno);
- orig_node->last_real_seqno = batman_packet->seqno;
- }
-
- return is_duplicate;
-}
-
-/* copy primary address for bonding */
-static void mark_bonding_address(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- struct batman_packet *batman_packet)
-
-{
- if (batman_packet->flags & PRIMARIES_FIRST_HOP)
- memcpy(orig_neigh_node->primary_addr,
- orig_node->orig, ETH_ALEN);
-
- return;
-}
-
-/* mark possible bond.candidates in the neighbor list */
-void update_bonding_candidates(struct bat_priv *bat_priv,
- struct orig_node *orig_node)
-{
- int candidates;
- int interference_candidate;
- int best_tq;
- struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
- struct neigh_node *first_candidate, *last_candidate;
-
- /* update the candidates for this originator */
- if (!orig_node->router) {
- orig_node->bond.candidates = 0;
- return;
- }
-
- best_tq = orig_node->router->tq_avg;
-
- /* update bond.candidates */
-
- candidates = 0;
-
- /* mark other nodes which also received "PRIMARIES FIRST HOP" packets
- * as "bonding partner" */
-
- /* first, zero the list */
- list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
- tmp_neigh_node->next_bond_candidate = NULL;
- }
-
- first_candidate = NULL;
- last_candidate = NULL;
- list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
-
- /* only consider if it has the same primary address ... */
- if (memcmp(orig_node->orig,
- tmp_neigh_node->orig_node->primary_addr,
- ETH_ALEN) != 0)
- continue;
-
- /* ... and is good enough to be considered */
- if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
- continue;
-
- /* check if we have another candidate with the same
- * mac address or interface. If we do, we won't
- * select this candidate because of possible interference. */
-
- interference_candidate = 0;
- list_for_each_entry(tmp_neigh_node2,
- &orig_node->neigh_list, list) {
-
- if (tmp_neigh_node2 == tmp_neigh_node)
- continue;
-
- /* we only care if the other candidate is even
- * considered as candidate. */
- if (tmp_neigh_node2->next_bond_candidate == NULL)
- continue;
-
-
- if ((tmp_neigh_node->if_incoming ==
- tmp_neigh_node2->if_incoming)
- || (memcmp(tmp_neigh_node->addr,
- tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
-
- interference_candidate = 1;
- break;
- }
- }
- /* don't care further if it is an interference candidate */
- if (interference_candidate)
- continue;
-
- if (first_candidate == NULL) {
- first_candidate = tmp_neigh_node;
- tmp_neigh_node->next_bond_candidate = first_candidate;
- } else
- tmp_neigh_node->next_bond_candidate = last_candidate;
-
- last_candidate = tmp_neigh_node;
-
- candidates++;
- }
-
- if (candidates > 0) {
- first_candidate->next_bond_candidate = last_candidate;
- orig_node->bond.selected = first_candidate;
- }
-
- orig_node->bond.candidates = candidates;
-}
-
-void receive_bat_packet(struct ethhdr *ethhdr,
- struct batman_packet *batman_packet,
- unsigned char *hna_buff, int hna_buff_len,
- struct batman_if *if_incoming)
-{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct batman_if *batman_if;
- struct orig_node *orig_neigh_node, *orig_node;
- char has_directlink_flag;
- char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
- char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
- char is_duplicate;
- uint32_t if_incoming_seqno;
-
- /* Silently drop when the batman packet is actually not a
- * correct packet.
- *
- * This might happen if a packet is padded (e.g. Ethernet has a
- * minimum frame length of 64 byte) and the aggregation interprets
- * it as an additional length.
- *
- * TODO: A more sane solution would be to have a bit in the
- * batman_packet to detect whether the packet is the last
- * packet in an aggregation. Here we expect that the padding
- * is always zero (or not 0x01)
- */
- if (batman_packet->packet_type != BAT_PACKET)
- return;
-
- /* could be changed by schedule_own_packet() */
- if_incoming_seqno = atomic_read(&if_incoming->seqno);
-
- has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
-
- is_single_hop_neigh = (compare_orig(ethhdr->h_source,
- batman_packet->orig) ? 1 : 0);
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
- "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
- "TTL %d, V %d, IDF %d)\n",
- ethhdr->h_source, if_incoming->net_dev->name,
- if_incoming->net_dev->dev_addr, batman_packet->orig,
- batman_packet->prev_sender, batman_packet->seqno,
- batman_packet->tq, batman_packet->ttl, batman_packet->version,
- has_directlink_flag);
-
- rcu_read_lock();
- list_for_each_entry_rcu(batman_if, &if_list, list) {
- if (batman_if->if_status != IF_ACTIVE)
- continue;
-
- if (batman_if->soft_iface != if_incoming->soft_iface)
- continue;
-
- if (compare_orig(ethhdr->h_source,
- batman_if->net_dev->dev_addr))
- is_my_addr = 1;
-
- if (compare_orig(batman_packet->orig,
- batman_if->net_dev->dev_addr))
- is_my_orig = 1;
-
- if (compare_orig(batman_packet->prev_sender,
- batman_if->net_dev->dev_addr))
- is_my_oldorig = 1;
-
- if (compare_orig(ethhdr->h_source, broadcast_addr))
- is_broadcast = 1;
- }
- rcu_read_unlock();
-
- if (batman_packet->version != COMPAT_VERSION) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: incompatible batman version (%i)\n",
- batman_packet->version);
- return;
- }
-
- if (is_my_addr) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: received my own broadcast (sender: %pM"
- ")\n",
- ethhdr->h_source);
- return;
- }
-
- if (is_broadcast) {
- bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
- "ignoring all packets with broadcast source addr (sender: %pM"
- ")\n", ethhdr->h_source);
- return;
- }
-
- if (is_my_orig) {
- TYPE_OF_WORD *word;
- int offset;
-
- orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
-
- if (!orig_neigh_node)
- return;
-
- /* neighbor has to indicate direct link and it has to
- * come via the corresponding interface */
- /* if received seqno equals last send seqno save new
- * seqno for bidirectional check */
- if (has_directlink_flag &&
- compare_orig(if_incoming->net_dev->dev_addr,
- batman_packet->orig) &&
- (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
- offset = if_incoming->if_num * NUM_WORDS;
- word = &(orig_neigh_node->bcast_own[offset]);
- bit_mark(word, 0);
- orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
- bit_packet_count(word);
- }
-
- bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
- "originator packet from myself (via neighbor)\n");
- return;
- }
-
- if (is_my_oldorig) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: ignoring all rebroadcast echos (sender: "
- "%pM)\n", ethhdr->h_source);
- return;
- }
-
- orig_node = get_orig_node(bat_priv, batman_packet->orig);
- if (orig_node == NULL)
- return;
-
- is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
-
- if (is_duplicate == -1) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: packet within seqno protection time "
- "(sender: %pM)\n", ethhdr->h_source);
- return;
- }
-
- if (batman_packet->tq == 0) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: originator packet with tq equal 0\n");
- return;
- }
-
- /* avoid temporary routing loops */
- if ((orig_node->router) &&
- (orig_node->router->orig_node->router) &&
- (compare_orig(orig_node->router->addr,
- batman_packet->prev_sender)) &&
- !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
- (compare_orig(orig_node->router->addr,
- orig_node->router->orig_node->router->addr))) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: ignoring all rebroadcast packets that "
- "may make me loop (sender: %pM)\n", ethhdr->h_source);
- return;
- }
-
- /* if sender is a direct neighbor the sender mac equals
- * originator mac */
- orig_neigh_node = (is_single_hop_neigh ?
- orig_node :
- get_orig_node(bat_priv, ethhdr->h_source));
- if (orig_neigh_node == NULL)
- return;
-
- /* drop packet if sender is not a direct neighbor and if we
- * don't route towards it */
- if (!is_single_hop_neigh &&
- (orig_neigh_node->router == NULL)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: OGM via unknown neighbor!\n");
- return;
- }
-
- is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
- batman_packet, if_incoming);
-
- /* update ranking if it is not a duplicate or has the same
- * seqno and similar ttl as the non-duplicate */
- if (is_bidirectional &&
- (!is_duplicate ||
- ((orig_node->last_real_seqno == batman_packet->seqno) &&
- (orig_node->last_ttl - 3 <= batman_packet->ttl))))
- update_orig(bat_priv, orig_node, ethhdr, batman_packet,
- if_incoming, hna_buff, hna_buff_len, is_duplicate);
-
- mark_bonding_address(bat_priv, orig_node,
- orig_neigh_node, batman_packet);
- update_bonding_candidates(bat_priv, orig_node);
-
- /* is single hop (direct) neighbor */
- if (is_single_hop_neigh) {
-
- /* mark direct link on incoming interface */
- schedule_forward_packet(orig_node, ethhdr, batman_packet,
- 1, hna_buff_len, if_incoming);
-
- bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
- "rebroadcast neighbor packet with direct link flag\n");
- return;
- }
-
- /* multihop originator */
- if (!is_bidirectional) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: not received via bidirectional link\n");
- return;
- }
-
- if (is_duplicate) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: duplicate packet received\n");
- return;
- }
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "Forwarding packet: rebroadcast originator packet\n");
- schedule_forward_packet(orig_node, ethhdr, batman_packet,
- 0, hna_buff_len, if_incoming);
-}
-
-int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
-{
- struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
- struct ethhdr *ethhdr;
- unsigned long flags;
-
- /* drop packet if it has not necessary minimum size */
- if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
- return NET_RX_DROP;
-
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
- /* packet with broadcast indication but unicast recipient */
- if (!is_bcast(ethhdr->h_dest))
- return NET_RX_DROP;
-
- /* packet with broadcast sender address */
- if (is_bcast(ethhdr->h_source))
- return NET_RX_DROP;
-
- /* create a copy of the skb, if needed, to modify it. */
- if (skb_cow(skb, 0) < 0)
- return NET_RX_DROP;
-
- /* keep skb linear */
- if (skb_linearize(skb) < 0)
- return NET_RX_DROP;
-
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- receive_aggr_bat_packet(ethhdr,
- skb->data,
- skb_headlen(skb),
- batman_if);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
- kfree_skb(skb);
- return NET_RX_SUCCESS;
-}
-
-static int recv_my_icmp_packet(struct bat_priv *bat_priv,
- struct sk_buff *skb, size_t icmp_len)
-{
- struct orig_node *orig_node;
- struct icmp_packet_rr *icmp_packet;
- struct ethhdr *ethhdr;
- struct batman_if *batman_if;
- int ret;
- unsigned long flags;
- uint8_t dstaddr[ETH_ALEN];
-
- icmp_packet = (struct icmp_packet_rr *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
- /* add data to device queue */
- if (icmp_packet->msg_type != ECHO_REQUEST) {
- bat_socket_receive_packet(icmp_packet, icmp_len);
- return NET_RX_DROP;
- }
-
- if (!bat_priv->primary_if)
- return NET_RX_DROP;
-
- /* answer echo request (ping) */
- /* get routing information */
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
- icmp_packet->orig));
- ret = NET_RX_DROP;
-
- if ((orig_node != NULL) &&
- (orig_node->router != NULL)) {
-
- /* don't lock while sending the packets ... we therefore
- * copy the required data before sending */
- batman_if = orig_node->router->if_incoming;
- memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
- /* create a copy of the skb, if needed, to modify it. */
- if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
- return NET_RX_DROP;
-
- icmp_packet = (struct icmp_packet_rr *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
- memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
- memcpy(icmp_packet->orig,
- bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
- icmp_packet->msg_type = ECHO_REPLY;
- icmp_packet->ttl = TTL;
-
- send_skb_packet(skb, batman_if, dstaddr);
- ret = NET_RX_SUCCESS;
-
- } else
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
- return ret;
-}
-
-static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
- struct sk_buff *skb, size_t icmp_len)
-{
- struct orig_node *orig_node;
- struct icmp_packet *icmp_packet;
- struct ethhdr *ethhdr;
- struct batman_if *batman_if;
- int ret;
- unsigned long flags;
- uint8_t dstaddr[ETH_ALEN];
-
- icmp_packet = (struct icmp_packet *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
- /* send TTL exceeded if packet is an echo request (traceroute) */
- if (icmp_packet->msg_type != ECHO_REQUEST) {
- pr_debug("Warning - can't forward icmp packet from %pM to "
- "%pM: ttl exceeded\n", icmp_packet->orig,
- icmp_packet->dst);
- return NET_RX_DROP;
- }
-
- if (!bat_priv->primary_if)
- return NET_RX_DROP;
-
- /* get routing information */
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- orig_node = ((struct orig_node *)
- hash_find(bat_priv->orig_hash, icmp_packet->orig));
- ret = NET_RX_DROP;
-
- if ((orig_node != NULL) &&
- (orig_node->router != NULL)) {
-
- /* don't lock while sending the packets ... we therefore
- * copy the required data before sending */
- batman_if = orig_node->router->if_incoming;
- memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
- /* create a copy of the skb, if needed, to modify it. */
- if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
- return NET_RX_DROP;
-
- icmp_packet = (struct icmp_packet *) skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
- memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
- memcpy(icmp_packet->orig,
- bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
- icmp_packet->msg_type = TTL_EXCEEDED;
- icmp_packet->ttl = TTL;
-
- send_skb_packet(skb, batman_if, dstaddr);
- ret = NET_RX_SUCCESS;
-
- } else
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
- return ret;
-}
-
-
-int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
-{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct icmp_packet_rr *icmp_packet;
- struct ethhdr *ethhdr;
- struct orig_node *orig_node;
- struct batman_if *batman_if;
- int hdr_size = sizeof(struct icmp_packet);
- int ret;
- unsigned long flags;
- uint8_t dstaddr[ETH_ALEN];
-
- /**
- * we truncate all incoming icmp packets if they don't match our size
- */
- if (skb->len >= sizeof(struct icmp_packet_rr))
- hdr_size = sizeof(struct icmp_packet_rr);
-
- /* drop packet if it has not necessary minimum size */
- if (unlikely(!pskb_may_pull(skb, hdr_size)))
- return NET_RX_DROP;
-
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
- /* packet with unicast indication but broadcast recipient */
- if (is_bcast(ethhdr->h_dest))
- return NET_RX_DROP;
-
- /* packet with broadcast sender address */
- if (is_bcast(ethhdr->h_source))
- return NET_RX_DROP;
-
- /* not for me */
- if (!is_my_mac(ethhdr->h_dest))
- return NET_RX_DROP;
-
- icmp_packet = (struct icmp_packet_rr *)skb->data;
-
- /* add record route information if not full */
- if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
- (icmp_packet->rr_cur < BAT_RR_LEN)) {
- memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
- ethhdr->h_dest, ETH_ALEN);
- icmp_packet->rr_cur++;
- }
-
- /* packet for me */
- if (is_my_mac(icmp_packet->dst))
- return recv_my_icmp_packet(bat_priv, skb, hdr_size);
-
- /* TTL exceeded */
- if (icmp_packet->ttl < 2)
- return recv_icmp_ttl_exceeded(bat_priv, skb, hdr_size);
-
- ret = NET_RX_DROP;
-
- /* get routing information */
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- orig_node = ((struct orig_node *)
- hash_find(bat_priv->orig_hash, icmp_packet->dst));
-
- if ((orig_node != NULL) &&
- (orig_node->router != NULL)) {
-
- /* don't lock while sending the packets ... we therefore
- * copy the required data before sending */
- batman_if = orig_node->router->if_incoming;
- memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
- /* create a copy of the skb, if needed, to modify it. */
- if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
- return NET_RX_DROP;
-
- icmp_packet = (struct icmp_packet_rr *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
- /* decrement ttl */
- icmp_packet->ttl--;
-
- /* route it */
- send_skb_packet(skb, batman_if, dstaddr);
- ret = NET_RX_SUCCESS;
-
- } else
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
- return ret;
-}
-
-/* find a suitable router for this originator, and use
- * bonding if possible. */
-struct neigh_node *find_router(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- struct batman_if *recv_if)
-{
- struct orig_node *primary_orig_node;
- struct orig_node *router_orig;
- struct neigh_node *router, *first_candidate, *best_router;
- static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
- int bonding_enabled;
-
- if (!orig_node)
- return NULL;
-
- if (!orig_node->router)
- return NULL;
-
- /* without bonding, the first node should
- * always choose the default router. */
-
- bonding_enabled = atomic_read(&bat_priv->bonding_enabled);
-
- if ((!recv_if) && (!bonding_enabled))
- return orig_node->router;
-
- router_orig = orig_node->router->orig_node;
-
- /* if we have something in the primary_addr, we can search
- * for a potential bonding candidate. */
- if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
- return orig_node->router;
-
- /* find the orig_node which has the primary interface. might
- * even be the same as our router_orig in many cases */
-
- if (memcmp(router_orig->primary_addr,
- router_orig->orig, ETH_ALEN) == 0) {
- primary_orig_node = router_orig;
- } else {
- primary_orig_node = hash_find(bat_priv->orig_hash,
- router_orig->primary_addr);
-
- if (!primary_orig_node)
- return orig_node->router;
- }
-
- /* with less than 2 candidates, we can't do any
- * bonding and prefer the original router. */
-
- if (primary_orig_node->bond.candidates < 2)
- return orig_node->router;
-
-
- /* all nodes between should choose a candidate which
- * is is not on the interface where the packet came
- * in. */
- first_candidate = primary_orig_node->bond.selected;
- router = first_candidate;
-
- if (bonding_enabled) {
- /* in the bonding case, send the packets in a round
- * robin fashion over the remaining interfaces. */
- do {
- /* recv_if == NULL on the first node. */
- if (router->if_incoming != recv_if)
- break;
-
- router = router->next_bond_candidate;
- } while (router != first_candidate);
-
- primary_orig_node->bond.selected = router->next_bond_candidate;
-
- } else {
- /* if bonding is disabled, use the best of the
- * remaining candidates which are not using
- * this interface. */
- best_router = first_candidate;
-
- do {
- /* recv_if == NULL on the first node. */
- if ((router->if_incoming != recv_if) &&
- (router->tq_avg > best_router->tq_avg))
- best_router = router;
-
- router = router->next_bond_candidate;
- } while (router != first_candidate);
-
- router = best_router;
- }
-
- return router;
-}
-
-static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
-{
- struct ethhdr *ethhdr;
-
- /* drop packet if it has not necessary minimum size */
- if (unlikely(!pskb_may_pull(skb, hdr_size)))
- return -1;
-
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
- /* packet with unicast indication but broadcast recipient */
- if (is_bcast(ethhdr->h_dest))
- return -1;
-
- /* packet with broadcast sender address */
- if (is_bcast(ethhdr->h_source))
- return -1;
-
- /* not for me */
- if (!is_my_mac(ethhdr->h_dest))
- return -1;
-
- return 0;
-}
-
-static int route_unicast_packet(struct sk_buff *skb,
- struct batman_if *recv_if, int hdr_size)
-{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct orig_node *orig_node;
- struct neigh_node *router;
- struct batman_if *batman_if;
- uint8_t dstaddr[ETH_ALEN];
- unsigned long flags;
- struct unicast_packet *unicast_packet;
- struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
- unicast_packet = (struct unicast_packet *)skb->data;
-
- /* packet for me */
- if (is_my_mac(unicast_packet->dest)) {
- interface_rx(recv_if->soft_iface, skb, hdr_size);
- return NET_RX_SUCCESS;
- }
-
- /* TTL exceeded */
- if (unicast_packet->ttl < 2) {
- pr_debug("Warning - can't forward unicast packet from %pM to "
- "%pM: ttl exceeded\n", ethhdr->h_source,
- unicast_packet->dest);
- return NET_RX_DROP;
- }
-
- /* get routing information */
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- orig_node = ((struct orig_node *)
- hash_find(bat_priv->orig_hash, unicast_packet->dest));
-
- router = find_router(bat_priv, orig_node, recv_if);
-
- if (!router) {
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
- return NET_RX_DROP;
- }
-
- /* don't lock while sending the packets ... we therefore
- * copy the required data before sending */
-
- batman_if = router->if_incoming;
- memcpy(dstaddr, router->addr, ETH_ALEN);
-
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
- /* create a copy of the skb, if needed, to modify it. */
- if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
- return NET_RX_DROP;
-
- unicast_packet = (struct unicast_packet *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
- /* decrement ttl */
- unicast_packet->ttl--;
-
- /* route it */
- send_skb_packet(skb, batman_if, dstaddr);
-
- return NET_RX_SUCCESS;
-}
-
-int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
-{
- struct unicast_packet *unicast_packet;
- int hdr_size = sizeof(struct unicast_packet);
-
- if (check_unicast_packet(skb, hdr_size) < 0)
- return NET_RX_DROP;
-
- unicast_packet = (struct unicast_packet *)skb->data;
-
- /* packet for me */
- if (is_my_mac(unicast_packet->dest)) {
- interface_rx(recv_if->soft_iface, skb, hdr_size);
- return NET_RX_SUCCESS;
- }
-
- return route_unicast_packet(skb, recv_if, hdr_size);
-}
-
-int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
-{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct unicast_frag_packet *unicast_packet;
- struct orig_node *orig_node;
- struct frag_packet_list_entry *tmp_frag_entry;
- int hdr_size = sizeof(struct unicast_frag_packet);
- unsigned long flags;
-
- if (check_unicast_packet(skb, hdr_size) < 0)
- return NET_RX_DROP;
-
- unicast_packet = (struct unicast_frag_packet *)skb->data;
-
- /* packet for me */
- if (is_my_mac(unicast_packet->dest)) {
-
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- orig_node = ((struct orig_node *)
- hash_find(bat_priv->orig_hash, unicast_packet->orig));
-
- if (!orig_node) {
- pr_debug("couldn't find orig node for fragmentation\n");
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
- flags);
- return NET_RX_DROP;
- }
-
- orig_node->last_frag_packet = jiffies;
-
- if (list_empty(&orig_node->frag_list) &&
- create_frag_buffer(&orig_node->frag_list)) {
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
- flags);
- return NET_RX_DROP;
- }
-
- tmp_frag_entry =
- search_frag_packet(&orig_node->frag_list,
- unicast_packet);
-
- if (!tmp_frag_entry) {
- create_frag_entry(&orig_node->frag_list, skb);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
- flags);
- return NET_RX_SUCCESS;
- }
-
- skb = merge_frag_packet(&orig_node->frag_list,
- tmp_frag_entry, skb);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
- if (!skb)
- return NET_RX_DROP;
-
- interface_rx(recv_if->soft_iface, skb, hdr_size);
- return NET_RX_SUCCESS;
- }
-
- return route_unicast_packet(skb, recv_if, hdr_size);
-}
-
-
-int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
-{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct orig_node *orig_node;
- struct bcast_packet *bcast_packet;
- struct ethhdr *ethhdr;
- int hdr_size = sizeof(struct bcast_packet);
- int32_t seq_diff;
- unsigned long flags;
-
- /* drop packet if it has not necessary minimum size */
- if (unlikely(!pskb_may_pull(skb, hdr_size)))
- return NET_RX_DROP;
-
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
- /* packet with broadcast indication but unicast recipient */
- if (!is_bcast(ethhdr->h_dest))
- return NET_RX_DROP;
-
- /* packet with broadcast sender address */
- if (is_bcast(ethhdr->h_source))
- return NET_RX_DROP;
-
- /* ignore broadcasts sent by myself */
- if (is_my_mac(ethhdr->h_source))
- return NET_RX_DROP;
-
- bcast_packet = (struct bcast_packet *)skb->data;
-
- /* ignore broadcasts originated by myself */
- if (is_my_mac(bcast_packet->orig))
- return NET_RX_DROP;
-
- if (bcast_packet->ttl < 2)
- return NET_RX_DROP;
-
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- orig_node = ((struct orig_node *)
- hash_find(bat_priv->orig_hash, bcast_packet->orig));
-
- if (orig_node == NULL) {
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
- return NET_RX_DROP;
- }
-
- /* check whether the packet is a duplicate */
- if (get_bit_status(orig_node->bcast_bits,
- orig_node->last_bcast_seqno,
- ntohl(bcast_packet->seqno))) {
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
- return NET_RX_DROP;
- }
-
- seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
-
- /* check whether the packet is old and the host just restarted. */
- if (window_protected(bat_priv, seq_diff,
- &orig_node->bcast_seqno_reset)) {
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
- return NET_RX_DROP;
- }
-
- /* mark broadcast in flood history, update window position
- * if required. */
- if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
- orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
-
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
- /* rebroadcast packet */
- add_bcast_packet_to_list(bat_priv, skb);
-
- /* broadcast for me */
- interface_rx(recv_if->soft_iface, skb, hdr_size);
-
- return NET_RX_SUCCESS;
-}
-
-int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
-{
- struct vis_packet *vis_packet;
- struct ethhdr *ethhdr;
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- int hdr_size = sizeof(struct vis_packet);
-
- /* keep skb linear */
- if (skb_linearize(skb) < 0)
- return NET_RX_DROP;
-
- if (unlikely(!pskb_may_pull(skb, hdr_size)))
- return NET_RX_DROP;
-
- vis_packet = (struct vis_packet *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
- /* not for me */
- if (!is_my_mac(ethhdr->h_dest))
- return NET_RX_DROP;
-
- /* ignore own packets */
- if (is_my_mac(vis_packet->vis_orig))
- return NET_RX_DROP;
-
- if (is_my_mac(vis_packet->sender_orig))
- return NET_RX_DROP;
-
- switch (vis_packet->vis_type) {
- case VIS_TYPE_SERVER_SYNC:
- receive_server_sync_packet(bat_priv, vis_packet,
- skb_headlen(skb));
- break;
-
- case VIS_TYPE_CLIENT_UPDATE:
- receive_client_update_packet(bat_priv, vis_packet,
- skb_headlen(skb));
- break;
-
- default: /* ignore unknown packet */
- break;
- }
-
- /* We take a copy of the data in the packet, so we should
- always free the skbuf. */
- return NET_RX_DROP;
-}
diff --git a/drivers/staging/batman-adv/routing.h b/drivers/staging/batman-adv/routing.h
deleted file mode 100644
index 92674c8d9c03..000000000000
--- a/drivers/staging/batman-adv/routing.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_ROUTING_H_
-#define _NET_BATMAN_ADV_ROUTING_H_
-
-#include "types.h"
-
-void slide_own_bcast_window(struct batman_if *batman_if);
-void receive_bat_packet(struct ethhdr *ethhdr,
- struct batman_packet *batman_packet,
- unsigned char *hna_buff, int hna_buff_len,
- struct batman_if *if_incoming);
-void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
- struct neigh_node *neigh_node, unsigned char *hna_buff,
- int hna_buff_len);
-int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
-struct neigh_node *find_router(struct bat_priv *bat_priv,
- struct orig_node *orig_node, struct batman_if *recv_if);
-void update_bonding_candidates(struct bat_priv *bat_priv,
- struct orig_node *orig_node);
-
-#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
deleted file mode 100644
index 7adf76ddd0ba..000000000000
--- a/drivers/staging/batman-adv/send.c
+++ /dev/null
@@ -1,580 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "send.h"
-#include "routing.h"
-#include "translation-table.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "types.h"
-#include "vis.h"
-#include "aggregation.h"
-
-
-static void send_outstanding_bcast_packet(struct work_struct *work);
-
-/* apply hop penalty for a normal link */
-static uint8_t hop_penalty(const uint8_t tq)
-{
- return (tq * (TQ_MAX_VALUE - TQ_HOP_PENALTY)) / (TQ_MAX_VALUE);
-}
-
-/* when do we schedule our own packet to be sent */
-static unsigned long own_send_time(struct bat_priv *bat_priv)
-{
- return jiffies + msecs_to_jiffies(
- atomic_read(&bat_priv->orig_interval) -
- JITTER + (random32() % 2*JITTER));
-}
-
-/* when do we schedule a forwarded packet to be sent */
-static unsigned long forward_send_time(struct bat_priv *bat_priv)
-{
- return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
-}
-
-/* send out an already prepared packet to the given address via the
- * specified batman interface */
-int send_skb_packet(struct sk_buff *skb,
- struct batman_if *batman_if,
- uint8_t *dst_addr)
-{
- struct ethhdr *ethhdr;
-
- if (batman_if->if_status != IF_ACTIVE)
- goto send_skb_err;
-
- if (unlikely(!batman_if->net_dev))
- goto send_skb_err;
-
- if (!(batman_if->net_dev->flags & IFF_UP)) {
- pr_warning("Interface %s is not up - can't send packet via "
- "that interface!\n", batman_if->net_dev->name);
- goto send_skb_err;
- }
-
- /* push to the ethernet header. */
- if (my_skb_head_push(skb, sizeof(struct ethhdr)) < 0)
- goto send_skb_err;
-
- skb_reset_mac_header(skb);
-
- ethhdr = (struct ethhdr *) skb_mac_header(skb);
- memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
- memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
- ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
-
- skb_set_network_header(skb, ETH_HLEN);
- skb->priority = TC_PRIO_CONTROL;
- skb->protocol = __constant_htons(ETH_P_BATMAN);
-
- skb->dev = batman_if->net_dev;
-
- /* dev_queue_xmit() returns a negative result on error. However on
- * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
- * (which is > 0). This will not be treated as an error. */
-
- return dev_queue_xmit(skb);
-send_skb_err:
- kfree_skb(skb);
- return NET_XMIT_DROP;
-}
-
-/* Send a packet to a given interface */
-static void send_packet_to_if(struct forw_packet *forw_packet,
- struct batman_if *batman_if)
-{
- struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
- char *fwd_str;
- uint8_t packet_num;
- int16_t buff_pos;
- struct batman_packet *batman_packet;
- struct sk_buff *skb;
-
- if (batman_if->if_status != IF_ACTIVE)
- return;
-
- packet_num = 0;
- buff_pos = 0;
- batman_packet = (struct batman_packet *)forw_packet->skb->data;
-
- /* adjust all flags and log packets */
- while (aggregated_packet(buff_pos,
- forw_packet->packet_len,
- batman_packet->num_hna)) {
-
- /* we might have aggregated direct link packets with an
- * ordinary base packet */
- if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
- (forw_packet->if_incoming == batman_if))
- batman_packet->flags |= DIRECTLINK;
- else
- batman_packet->flags &= ~DIRECTLINK;
-
- fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
- "Sending own" :
- "Forwarding"));
- bat_dbg(DBG_BATMAN, bat_priv,
- "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
- " IDF %s) on interface %s [%pM]\n",
- fwd_str, (packet_num > 0 ? "aggregated " : ""),
- batman_packet->orig, ntohl(batman_packet->seqno),
- batman_packet->tq, batman_packet->ttl,
- (batman_packet->flags & DIRECTLINK ?
- "on" : "off"),
- batman_if->net_dev->name, batman_if->net_dev->dev_addr);
-
- buff_pos += sizeof(struct batman_packet) +
- (batman_packet->num_hna * ETH_ALEN);
- packet_num++;
- batman_packet = (struct batman_packet *)
- (forw_packet->skb->data + buff_pos);
- }
-
- /* create clone because function is called more than once */
- skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
- if (skb)
- send_skb_packet(skb, batman_if, broadcast_addr);
-}
-
-/* send a batman packet */
-static void send_packet(struct forw_packet *forw_packet)
-{
- struct batman_if *batman_if;
- struct net_device *soft_iface;
- struct bat_priv *bat_priv;
- struct batman_packet *batman_packet =
- (struct batman_packet *)(forw_packet->skb->data);
- unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
-
- if (!forw_packet->if_incoming) {
- pr_err("Error - can't forward packet: incoming iface not "
- "specified\n");
- return;
- }
-
- soft_iface = forw_packet->if_incoming->soft_iface;
- bat_priv = netdev_priv(soft_iface);
-
- if (forw_packet->if_incoming->if_status != IF_ACTIVE)
- return;
-
- /* multihomed peer assumed */
- /* non-primary OGMs are only broadcasted on their interface */
- if ((directlink && (batman_packet->ttl == 1)) ||
- (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
-
- /* FIXME: what about aggregated packets ? */
- bat_dbg(DBG_BATMAN, bat_priv,
- "%s packet (originator %pM, seqno %d, TTL %d) "
- "on interface %s [%pM]\n",
- (forw_packet->own ? "Sending own" : "Forwarding"),
- batman_packet->orig, ntohl(batman_packet->seqno),
- batman_packet->ttl,
- forw_packet->if_incoming->net_dev->name,
- forw_packet->if_incoming->net_dev->dev_addr);
-
- /* skb is only used once and than forw_packet is free'd */
- send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
- broadcast_addr);
- forw_packet->skb = NULL;
-
- return;
- }
-
- /* broadcast on every interface */
- rcu_read_lock();
- list_for_each_entry_rcu(batman_if, &if_list, list) {
- if (batman_if->soft_iface != soft_iface)
- continue;
-
- send_packet_to_if(forw_packet, batman_if);
- }
- rcu_read_unlock();
-}
-
-static void rebuild_batman_packet(struct bat_priv *bat_priv,
- struct batman_if *batman_if)
-{
- int new_len;
- unsigned char *new_buff;
- struct batman_packet *batman_packet;
-
- new_len = sizeof(struct batman_packet) +
- (bat_priv->num_local_hna * ETH_ALEN);
- new_buff = kmalloc(new_len, GFP_ATOMIC);
-
- /* keep old buffer if kmalloc should fail */
- if (new_buff) {
- memcpy(new_buff, batman_if->packet_buff,
- sizeof(struct batman_packet));
- batman_packet = (struct batman_packet *)new_buff;
-
- batman_packet->num_hna = hna_local_fill_buffer(bat_priv,
- new_buff + sizeof(struct batman_packet),
- new_len - sizeof(struct batman_packet));
-
- kfree(batman_if->packet_buff);
- batman_if->packet_buff = new_buff;
- batman_if->packet_len = new_len;
- }
-}
-
-void schedule_own_packet(struct batman_if *batman_if)
-{
- struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
- unsigned long send_time;
- struct batman_packet *batman_packet;
- int vis_server;
-
- if ((batman_if->if_status == IF_NOT_IN_USE) ||
- (batman_if->if_status == IF_TO_BE_REMOVED))
- return;
-
- vis_server = atomic_read(&bat_priv->vis_mode);
-
- /**
- * the interface gets activated here to avoid race conditions between
- * the moment of activating the interface in
- * hardif_activate_interface() where the originator mac is set and
- * outdated packets (especially uninitialized mac addresses) in the
- * packet queue
- */
- if (batman_if->if_status == IF_TO_BE_ACTIVATED)
- batman_if->if_status = IF_ACTIVE;
-
- /* if local hna has changed and interface is a primary interface */
- if ((atomic_read(&bat_priv->hna_local_changed)) &&
- (batman_if == bat_priv->primary_if))
- rebuild_batman_packet(bat_priv, batman_if);
-
- /**
- * NOTE: packet_buff might just have been re-allocated in
- * rebuild_batman_packet()
- */
- batman_packet = (struct batman_packet *)batman_if->packet_buff;
-
- /* change sequence number to network order */
- batman_packet->seqno =
- htonl((uint32_t)atomic_read(&batman_if->seqno));
-
- if (vis_server == VIS_TYPE_SERVER_SYNC)
- batman_packet->flags |= VIS_SERVER;
- else
- batman_packet->flags &= ~VIS_SERVER;
-
- atomic_inc(&batman_if->seqno);
-
- slide_own_bcast_window(batman_if);
- send_time = own_send_time(bat_priv);
- add_bat_packet_to_list(bat_priv,
- batman_if->packet_buff,
- batman_if->packet_len,
- batman_if, 1, send_time);
-}
-
-void schedule_forward_packet(struct orig_node *orig_node,
- struct ethhdr *ethhdr,
- struct batman_packet *batman_packet,
- uint8_t directlink, int hna_buff_len,
- struct batman_if *if_incoming)
-{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- unsigned char in_tq, in_ttl, tq_avg = 0;
- unsigned long send_time;
-
- if (batman_packet->ttl <= 1) {
- bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
- return;
- }
-
- in_tq = batman_packet->tq;
- in_ttl = batman_packet->ttl;
-
- batman_packet->ttl--;
- memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
-
- /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
- * of our best tq value */
- if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
-
- /* rebroadcast ogm of best ranking neighbor as is */
- if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) {
- batman_packet->tq = orig_node->router->tq_avg;
-
- if (orig_node->router->last_ttl)
- batman_packet->ttl = orig_node->router->last_ttl
- - 1;
- }
-
- tq_avg = orig_node->router->tq_avg;
- }
-
- /* apply hop penalty */
- batman_packet->tq = hop_penalty(batman_packet->tq);
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "Forwarding packet: tq_orig: %i, tq_avg: %i, "
- "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
- in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
- batman_packet->ttl);
-
- batman_packet->seqno = htonl(batman_packet->seqno);
-
- /* switch of primaries first hop flag when forwarding */
- batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
- if (directlink)
- batman_packet->flags |= DIRECTLINK;
- else
- batman_packet->flags &= ~DIRECTLINK;
-
- send_time = forward_send_time(bat_priv);
- add_bat_packet_to_list(bat_priv,
- (unsigned char *)batman_packet,
- sizeof(struct batman_packet) + hna_buff_len,
- if_incoming, 0, send_time);
-}
-
-static void forw_packet_free(struct forw_packet *forw_packet)
-{
- if (forw_packet->skb)
- kfree_skb(forw_packet->skb);
- kfree(forw_packet);
-}
-
-static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
- struct forw_packet *forw_packet,
- unsigned long send_time)
-{
- unsigned long flags;
- INIT_HLIST_NODE(&forw_packet->list);
-
- /* add new packet to packet list */
- spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
- hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
- spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
-
- /* start timer for this packet */
- INIT_DELAYED_WORK(&forw_packet->delayed_work,
- send_outstanding_bcast_packet);
- queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
- send_time);
-}
-
-#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
-/* add a broadcast packet to the queue and setup timers. broadcast packets
- * are sent multiple times to increase probability for beeing received.
- *
- * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
- * errors.
- *
- * The skb is not consumed, so the caller should make sure that the
- * skb is freed. */
-int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
-{
- struct forw_packet *forw_packet;
- struct bcast_packet *bcast_packet;
-
- if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
- bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
- goto out;
- }
-
- if (!bat_priv->primary_if)
- goto out;
-
- forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
-
- if (!forw_packet)
- goto out_and_inc;
-
- skb = skb_copy(skb, GFP_ATOMIC);
- if (!skb)
- goto packet_free;
-
- /* as we have a copy now, it is safe to decrease the TTL */
- bcast_packet = (struct bcast_packet *)skb->data;
- bcast_packet->ttl--;
-
- skb_reset_mac_header(skb);
-
- forw_packet->skb = skb;
- forw_packet->if_incoming = bat_priv->primary_if;
-
- /* how often did we send the bcast packet ? */
- forw_packet->num_packets = 0;
-
- _add_bcast_packet_to_list(bat_priv, forw_packet, 1);
- return NETDEV_TX_OK;
-
-packet_free:
- kfree(forw_packet);
-out_and_inc:
- atomic_inc(&bat_priv->bcast_queue_left);
-out:
- return NETDEV_TX_BUSY;
-}
-
-static void send_outstanding_bcast_packet(struct work_struct *work)
-{
- struct batman_if *batman_if;
- struct delayed_work *delayed_work =
- container_of(work, struct delayed_work, work);
- struct forw_packet *forw_packet =
- container_of(delayed_work, struct forw_packet, delayed_work);
- unsigned long flags;
- struct sk_buff *skb1;
- struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
-
- spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
- hlist_del(&forw_packet->list);
- spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
-
- if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
- goto out;
-
- /* rebroadcast packet */
- rcu_read_lock();
- list_for_each_entry_rcu(batman_if, &if_list, list) {
- if (batman_if->soft_iface != soft_iface)
- continue;
-
- /* send a copy of the saved skb */
- skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
- if (skb1)
- send_skb_packet(skb1, batman_if, broadcast_addr);
- }
- rcu_read_unlock();
-
- forw_packet->num_packets++;
-
- /* if we still have some more bcasts to send */
- if (forw_packet->num_packets < 3) {
- _add_bcast_packet_to_list(bat_priv, forw_packet,
- ((5 * HZ) / 1000));
- return;
- }
-
-out:
- forw_packet_free(forw_packet);
- atomic_inc(&bat_priv->bcast_queue_left);
-}
-
-void send_outstanding_bat_packet(struct work_struct *work)
-{
- struct delayed_work *delayed_work =
- container_of(work, struct delayed_work, work);
- struct forw_packet *forw_packet =
- container_of(delayed_work, struct forw_packet, delayed_work);
- unsigned long flags;
- struct bat_priv *bat_priv;
-
- bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
- spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
- hlist_del(&forw_packet->list);
- spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
-
- if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
- goto out;
-
- send_packet(forw_packet);
-
- /**
- * we have to have at least one packet in the queue
- * to determine the queues wake up time unless we are
- * shutting down
- */
- if (forw_packet->own)
- schedule_own_packet(forw_packet->if_incoming);
-
-out:
- /* don't count own packet */
- if (!forw_packet->own)
- atomic_inc(&bat_priv->batman_queue_left);
-
- forw_packet_free(forw_packet);
-}
-
-void purge_outstanding_packets(struct bat_priv *bat_priv,
- struct batman_if *batman_if)
-{
- struct forw_packet *forw_packet;
- struct hlist_node *tmp_node, *safe_tmp_node;
- unsigned long flags;
-
- if (batman_if)
- bat_dbg(DBG_BATMAN, bat_priv,
- "purge_outstanding_packets(): %s\n",
- batman_if->net_dev->name);
- else
- bat_dbg(DBG_BATMAN, bat_priv,
- "purge_outstanding_packets()\n");
-
- /* free bcast list */
- spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
- hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
- &bat_priv->forw_bcast_list, list) {
-
- /**
- * if purge_outstanding_packets() was called with an argmument
- * we delete only packets belonging to the given interface
- */
- if ((batman_if) &&
- (forw_packet->if_incoming != batman_if))
- continue;
-
- spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
-
- /**
- * send_outstanding_bcast_packet() will lock the list to
- * delete the item from the list
- */
- cancel_delayed_work_sync(&forw_packet->delayed_work);
- spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
- }
- spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
-
- /* free batman packet list */
- spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
- hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
- &bat_priv->forw_bat_list, list) {
-
- /**
- * if purge_outstanding_packets() was called with an argmument
- * we delete only packets belonging to the given interface
- */
- if ((batman_if) &&
- (forw_packet->if_incoming != batman_if))
- continue;
-
- spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
-
- /**
- * send_outstanding_bat_packet() will lock the list to
- * delete the item from the list
- */
- cancel_delayed_work_sync(&forw_packet->delayed_work);
- spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
- }
- spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
-}
diff --git a/drivers/staging/batman-adv/send.h b/drivers/staging/batman-adv/send.h
deleted file mode 100644
index c4cefa8e4f85..000000000000
--- a/drivers/staging/batman-adv/send.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_SEND_H_
-#define _NET_BATMAN_ADV_SEND_H_
-
-#include "types.h"
-
-int send_skb_packet(struct sk_buff *skb,
- struct batman_if *batman_if,
- uint8_t *dst_addr);
-void schedule_own_packet(struct batman_if *batman_if);
-void schedule_forward_packet(struct orig_node *orig_node,
- struct ethhdr *ethhdr,
- struct batman_packet *batman_packet,
- uint8_t directlink, int hna_buff_len,
- struct batman_if *if_outgoing);
-int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb);
-void send_outstanding_bat_packet(struct work_struct *work);
-void purge_outstanding_packets(struct bat_priv *bat_priv,
- struct batman_if *batman_if);
-
-#endif /* _NET_BATMAN_ADV_SEND_H_ */
diff --git a/drivers/staging/batman-adv/soft-interface.c b/drivers/staging/batman-adv/soft-interface.c
deleted file mode 100644
index 0e996181daf7..000000000000
--- a/drivers/staging/batman-adv/soft-interface.c
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "routing.h"
-#include "send.h"
-#include "bat_debugfs.h"
-#include "translation-table.h"
-#include "types.h"
-#include "hash.h"
-#include "send.h"
-#include "bat_sysfs.h"
-#include <linux/slab.h>
-#include <linux/ethtool.h>
-#include <linux/etherdevice.h>
-#include "unicast.h"
-
-
-static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
-static void bat_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info);
-static u32 bat_get_msglevel(struct net_device *dev);
-static void bat_set_msglevel(struct net_device *dev, u32 value);
-static u32 bat_get_link(struct net_device *dev);
-static u32 bat_get_rx_csum(struct net_device *dev);
-static int bat_set_rx_csum(struct net_device *dev, u32 data);
-
-static const struct ethtool_ops bat_ethtool_ops = {
- .get_settings = bat_get_settings,
- .get_drvinfo = bat_get_drvinfo,
- .get_msglevel = bat_get_msglevel,
- .set_msglevel = bat_set_msglevel,
- .get_link = bat_get_link,
- .get_rx_csum = bat_get_rx_csum,
- .set_rx_csum = bat_set_rx_csum
-};
-
-int my_skb_head_push(struct sk_buff *skb, unsigned int len)
-{
- int result;
-
- /**
- * TODO: We must check if we can release all references to non-payload
- * data using skb_header_release in our skbs to allow skb_cow_header to
- * work optimally. This means that those skbs are not allowed to read
- * or write any data which is before the current position of skb->data
- * after that call and thus allow other skbs with the same data buffer
- * to write freely in that area.
- */
- result = skb_cow_head(skb, len);
- if (result < 0)
- return result;
-
- skb_push(skb, len);
- return 0;
-}
-
-static int interface_open(struct net_device *dev)
-{
- netif_start_queue(dev);
- return 0;
-}
-
-static int interface_release(struct net_device *dev)
-{
- netif_stop_queue(dev);
- return 0;
-}
-
-static struct net_device_stats *interface_stats(struct net_device *dev)
-{
- struct bat_priv *bat_priv = netdev_priv(dev);
- return &bat_priv->stats;
-}
-
-static int interface_set_mac_addr(struct net_device *dev, void *p)
-{
- struct bat_priv *bat_priv = netdev_priv(dev);
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- /* only modify hna-table if it has been initialised before */
- if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
- hna_local_remove(bat_priv, dev->dev_addr,
- "mac address changed");
- hna_local_add(dev, addr->sa_data);
- }
-
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
-
- return 0;
-}
-
-static int interface_change_mtu(struct net_device *dev, int new_mtu)
-{
- /* check ranges */
- if ((new_mtu < 68) || (new_mtu > hardif_min_mtu(dev)))
- return -EINVAL;
-
- dev->mtu = new_mtu;
-
- return 0;
-}
-
-int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
-{
- struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
- struct bcast_packet *bcast_packet;
- int data_len = skb->len, ret;
-
- if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
- goto dropped;
-
- soft_iface->trans_start = jiffies;
-
- /* TODO: check this for locks */
- hna_local_add(soft_iface, ethhdr->h_source);
-
- /* ethernet packet should be broadcasted */
- if (is_bcast(ethhdr->h_dest) || is_mcast(ethhdr->h_dest)) {
- if (!bat_priv->primary_if)
- goto dropped;
-
- if (my_skb_head_push(skb, sizeof(struct bcast_packet)) < 0)
- goto dropped;
-
- bcast_packet = (struct bcast_packet *)skb->data;
- bcast_packet->version = COMPAT_VERSION;
- bcast_packet->ttl = TTL;
-
- /* batman packet type: broadcast */
- bcast_packet->packet_type = BAT_BCAST;
-
- /* hw address of first interface is the orig mac because only
- * this mac is known throughout the mesh */
- memcpy(bcast_packet->orig,
- bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
-
- /* set broadcast sequence number */
- bcast_packet->seqno =
- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
-
- add_bcast_packet_to_list(bat_priv, skb);
-
- /* a copy is stored in the bcast list, therefore removing
- * the original skb. */
- kfree_skb(skb);
-
- /* unicast packet */
- } else {
- ret = unicast_send_skb(skb, bat_priv);
- if (ret != 0)
- goto dropped_freed;
- }
-
- bat_priv->stats.tx_packets++;
- bat_priv->stats.tx_bytes += data_len;
- goto end;
-
-dropped:
- kfree_skb(skb);
-dropped_freed:
- bat_priv->stats.tx_dropped++;
-end:
- return NETDEV_TX_OK;
-}
-
-void interface_rx(struct net_device *soft_iface,
- struct sk_buff *skb, int hdr_size)
-{
- struct bat_priv *priv = netdev_priv(soft_iface);
-
- /* check if enough space is available for pulling, and pull */
- if (!pskb_may_pull(skb, hdr_size))
- goto dropped;
-
- skb_pull_rcsum(skb, hdr_size);
-/* skb_set_mac_header(skb, -sizeof(struct ethhdr));*/
-
- /* skb->dev & skb->pkt_type are set here */
- if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
- goto dropped;
- skb->protocol = eth_type_trans(skb, soft_iface);
-
- /* should not be neccesary anymore as we use skb_pull_rcsum()
- * TODO: please verify this and remove this TODO
- * -- Dec 21st 2009, Simon Wunderlich */
-
-/* skb->ip_summed = CHECKSUM_UNNECESSARY;*/
-
- priv->stats.rx_packets++;
- priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr);
-
- soft_iface->last_rx = jiffies;
-
- netif_rx(skb);
- return;
-
-dropped:
- kfree_skb(skb);
- return;
-}
-
-#ifdef HAVE_NET_DEVICE_OPS
-static const struct net_device_ops bat_netdev_ops = {
- .ndo_open = interface_open,
- .ndo_stop = interface_release,
- .ndo_get_stats = interface_stats,
- .ndo_set_mac_address = interface_set_mac_addr,
- .ndo_change_mtu = interface_change_mtu,
- .ndo_start_xmit = interface_tx,
- .ndo_validate_addr = eth_validate_addr
-};
-#endif
-
-static void interface_setup(struct net_device *dev)
-{
- struct bat_priv *priv = netdev_priv(dev);
- char dev_addr[ETH_ALEN];
-
- ether_setup(dev);
-
-#ifdef HAVE_NET_DEVICE_OPS
- dev->netdev_ops = &bat_netdev_ops;
-#else
- dev->open = interface_open;
- dev->stop = interface_release;
- dev->get_stats = interface_stats;
- dev->set_mac_address = interface_set_mac_addr;
- dev->change_mtu = interface_change_mtu;
- dev->hard_start_xmit = interface_tx;
-#endif
- dev->destructor = free_netdev;
-
- /**
- * can't call min_mtu, because the needed variables
- * have not been initialized yet
- */
- dev->mtu = ETH_DATA_LEN;
- dev->hard_header_len = BAT_HEADER_LEN; /* reserve more space in the
- * skbuff for our header */
-
- /* generate random address */
- random_ether_addr(dev_addr);
- memcpy(dev->dev_addr, dev_addr, ETH_ALEN);
-
- SET_ETHTOOL_OPS(dev, &bat_ethtool_ops);
-
- memset(priv, 0, sizeof(struct bat_priv));
-}
-
-struct net_device *softif_create(char *name)
-{
- struct net_device *soft_iface;
- struct bat_priv *bat_priv;
- int ret;
-
- soft_iface = alloc_netdev(sizeof(struct bat_priv) , name,
- interface_setup);
-
- if (!soft_iface) {
- pr_err("Unable to allocate the batman interface: %s\n", name);
- goto out;
- }
-
- ret = register_netdev(soft_iface);
- if (ret < 0) {
- pr_err("Unable to register the batman interface '%s': %i\n",
- name, ret);
- goto free_soft_iface;
- }
-
- bat_priv = netdev_priv(soft_iface);
-
- atomic_set(&bat_priv->aggregation_enabled, 1);
- atomic_set(&bat_priv->bonding_enabled, 0);
- atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
- atomic_set(&bat_priv->orig_interval, 1000);
- atomic_set(&bat_priv->log_level, 0);
- atomic_set(&bat_priv->frag_enabled, 1);
- atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
- atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
-
- atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
- atomic_set(&bat_priv->bcast_seqno, 1);
- atomic_set(&bat_priv->hna_local_changed, 0);
-
- bat_priv->primary_if = NULL;
- bat_priv->num_ifaces = 0;
-
- ret = sysfs_add_meshif(soft_iface);
- if (ret < 0)
- goto unreg_soft_iface;
-
- ret = debugfs_add_meshif(soft_iface);
- if (ret < 0)
- goto unreg_sysfs;
-
- ret = mesh_init(soft_iface);
- if (ret < 0)
- goto unreg_debugfs;
-
- return soft_iface;
-
-unreg_debugfs:
- debugfs_del_meshif(soft_iface);
-unreg_sysfs:
- sysfs_del_meshif(soft_iface);
-unreg_soft_iface:
- unregister_netdev(soft_iface);
- return NULL;
-
-free_soft_iface:
- free_netdev(soft_iface);
-out:
- return NULL;
-}
-
-void softif_destroy(struct net_device *soft_iface)
-{
- debugfs_del_meshif(soft_iface);
- sysfs_del_meshif(soft_iface);
- mesh_free(soft_iface);
- unregister_netdevice(soft_iface);
-}
-
-/* ethtool */
-static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- cmd->supported = 0;
- cmd->advertising = 0;
- cmd->speed = SPEED_10;
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_TP;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
-
- return 0;
-}
-
-static void bat_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, "B.A.T.M.A.N. advanced");
- strcpy(info->version, SOURCE_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, "batman");
-}
-
-static u32 bat_get_msglevel(struct net_device *dev)
-{
- return -EOPNOTSUPP;
-}
-
-static void bat_set_msglevel(struct net_device *dev, u32 value)
-{
-}
-
-static u32 bat_get_link(struct net_device *dev)
-{
- return 1;
-}
-
-static u32 bat_get_rx_csum(struct net_device *dev)
-{
- return 0;
-}
-
-static int bat_set_rx_csum(struct net_device *dev, u32 data)
-{
- return -EOPNOTSUPP;
-}
diff --git a/drivers/staging/batman-adv/soft-interface.h b/drivers/staging/batman-adv/soft-interface.h
deleted file mode 100644
index 843a7ec082fe..000000000000
--- a/drivers/staging/batman-adv/soft-interface.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_
-#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
-
-int my_skb_head_push(struct sk_buff *skb, unsigned int len);
-int interface_tx(struct sk_buff *skb, struct net_device *soft_iface);
-void interface_rx(struct net_device *soft_iface,
- struct sk_buff *skb, int hdr_size);
-struct net_device *softif_create(char *name);
-void softif_destroy(struct net_device *soft_iface);
-
-#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --git a/drivers/staging/batman-adv/sysfs-class-net-batman-adv b/drivers/staging/batman-adv/sysfs-class-net-batman-adv
deleted file mode 100644
index 38dd762def4b..000000000000
--- a/drivers/staging/batman-adv/sysfs-class-net-batman-adv
+++ /dev/null
@@ -1,14 +0,0 @@
-
-What: /sys/class/net/<iface>/batman-adv/mesh_iface
-Date: May 2010
-Contact: Marek Lindner <lindner_marek@yahoo.de>
-Description:
- The /sys/class/net/<iface>/batman-adv/mesh_iface file
- displays the batman mesh interface this <iface>
- currently is associated with.
-
-What: /sys/class/net/<iface>/batman-adv/iface_status
-Date: May 2010
-Contact: Marek Lindner <lindner_marek@yahoo.de>
-Description:
- Indicates the status of <iface> as it is seen by batman.
diff --git a/drivers/staging/batman-adv/sysfs-class-net-mesh b/drivers/staging/batman-adv/sysfs-class-net-mesh
deleted file mode 100644
index b4cdb6038bf1..000000000000
--- a/drivers/staging/batman-adv/sysfs-class-net-mesh
+++ /dev/null
@@ -1,41 +0,0 @@
-
-What: /sys/class/net/<mesh_iface>/mesh/aggregated_ogms
-Date: May 2010
-Contact: Marek Lindner <lindner_marek@yahoo.de>
-Description:
- Indicates whether the batman protocol messages of the
- mesh <mesh_iface> shall be aggregated or not.
-
-What: /sys/class/net/<mesh_iface>/mesh/bonding
-Date: June 2010
-Contact: Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
-Description:
- Indicates whether the data traffic going through the
- mesh will be sent using multiple interfaces at the
- same time (if available).
-
-What: /sys/class/net/<mesh_iface>/mesh/fragmentation
-Date: October 2010
-Contact: Andreas Langer <an.langer@gmx.de>
-Description:
- Indicates whether the data traffic going through the
- mesh will be fragmented or silently discarded if the
- packet size exceeds the outgoing interface MTU.
-
-What: /sys/class/net/<mesh_iface>/mesh/orig_interval
-Date: May 2010
-Contact: Marek Lindner <lindner_marek@yahoo.de>
-Description:
- Defines the interval in milliseconds in which batman
- sends its protocol messages.
-
-What: /sys/class/net/<mesh_iface>/mesh/vis_mode
-Date: May 2010
-Contact: Marek Lindner <lindner_marek@yahoo.de>
-Description:
- Each batman node only maintains information about its
- own local neighborhood, therefore generating graphs
- showing the topology of the entire mesh is not easily
- feasible without having a central instance to collect
- the local topologies from all nodes. This file allows
- to activate the collecting (server) mode.
diff --git a/drivers/staging/batman-adv/translation-table.c b/drivers/staging/batman-adv/translation-table.c
deleted file mode 100644
index 681ccbda3eac..000000000000
--- a/drivers/staging/batman-adv/translation-table.c
+++ /dev/null
@@ -1,518 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "translation-table.h"
-#include "soft-interface.h"
-#include "types.h"
-#include "hash.h"
-
-static void hna_local_purge(struct work_struct *work);
-static void _hna_global_del_orig(struct bat_priv *bat_priv,
- struct hna_global_entry *hna_global_entry,
- char *message);
-
-static void hna_local_start_timer(struct bat_priv *bat_priv)
-{
- INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
- queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
-}
-
-int hna_local_init(struct bat_priv *bat_priv)
-{
- if (bat_priv->hna_local_hash)
- return 1;
-
- bat_priv->hna_local_hash = hash_new(128, compare_orig, choose_orig);
-
- if (!bat_priv->hna_local_hash)
- return 0;
-
- atomic_set(&bat_priv->hna_local_changed, 0);
- hna_local_start_timer(bat_priv);
-
- return 1;
-}
-
-void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
-{
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
- struct hna_local_entry *hna_local_entry;
- struct hna_global_entry *hna_global_entry;
- struct hashtable_t *swaphash;
- unsigned long flags;
- int required_bytes;
-
- spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
- hna_local_entry =
- ((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
- addr));
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
-
- if (hna_local_entry) {
- hna_local_entry->last_seen = jiffies;
- return;
- }
-
- /* only announce as many hosts as possible in the batman-packet and
- space in batman_packet->num_hna That also should give a limit to
- MAC-flooding. */
- required_bytes = (bat_priv->num_local_hna + 1) * ETH_ALEN;
- required_bytes += BAT_PACKET_LEN;
-
- if ((required_bytes > ETH_DATA_LEN) ||
- (atomic_read(&bat_priv->aggregation_enabled) &&
- required_bytes > MAX_AGGREGATION_BYTES) ||
- (bat_priv->num_local_hna + 1 > 255)) {
- bat_dbg(DBG_ROUTES, bat_priv,
- "Can't add new local hna entry (%pM): "
- "number of local hna entries exceeds packet size\n",
- addr);
- return;
- }
-
- bat_dbg(DBG_ROUTES, bat_priv,
- "Creating new local hna entry: %pM\n", addr);
-
- hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC);
- if (!hna_local_entry)
- return;
-
- memcpy(hna_local_entry->addr, addr, ETH_ALEN);
- hna_local_entry->last_seen = jiffies;
-
- /* the batman interface mac address should never be purged */
- if (compare_orig(addr, soft_iface->dev_addr))
- hna_local_entry->never_purge = 1;
- else
- hna_local_entry->never_purge = 0;
-
- spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
-
- hash_add(bat_priv->hna_local_hash, hna_local_entry);
- bat_priv->num_local_hna++;
- atomic_set(&bat_priv->hna_local_changed, 1);
-
- if (bat_priv->hna_local_hash->elements * 4 >
- bat_priv->hna_local_hash->size) {
- swaphash = hash_resize(bat_priv->hna_local_hash,
- bat_priv->hna_local_hash->size * 2);
-
- if (!swaphash)
- pr_err("Couldn't resize local hna hash table\n");
- else
- bat_priv->hna_local_hash = swaphash;
- }
-
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
-
- /* remove address from global hash if present */
- spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
-
- hna_global_entry = ((struct hna_global_entry *)
- hash_find(bat_priv->hna_global_hash, addr));
-
- if (hna_global_entry)
- _hna_global_del_orig(bat_priv, hna_global_entry,
- "local hna received");
-
- spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
-}
-
-int hna_local_fill_buffer(struct bat_priv *bat_priv,
- unsigned char *buff, int buff_len)
-{
- struct hna_local_entry *hna_local_entry;
- HASHIT(hashit);
- int i = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
-
- while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
-
- if (buff_len < (i + 1) * ETH_ALEN)
- break;
-
- hna_local_entry = hashit.bucket->data;
- memcpy(buff + (i * ETH_ALEN), hna_local_entry->addr, ETH_ALEN);
-
- i++;
- }
-
- /* if we did not get all new local hnas see you next time ;-) */
- if (i == bat_priv->num_local_hna)
- atomic_set(&bat_priv->hna_local_changed, 0);
-
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
- return i;
-}
-
-int hna_local_seq_print_text(struct seq_file *seq, void *offset)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- struct hna_local_entry *hna_local_entry;
- HASHIT(hashit);
- HASHIT(hashit_count);
- unsigned long flags;
- size_t buf_size, pos;
- char *buff;
-
- if (!bat_priv->primary_if) {
- return seq_printf(seq, "BATMAN mesh %s disabled - "
- "please specify interfaces to enable it\n",
- net_dev->name);
- }
-
- seq_printf(seq, "Locally retrieved addresses (from %s) "
- "announced via HNA:\n",
- net_dev->name);
-
- spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
-
- buf_size = 1;
- /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
- while (hash_iterate(bat_priv->hna_local_hash, &hashit_count))
- buf_size += 21;
-
- buff = kmalloc(buf_size, GFP_ATOMIC);
- if (!buff) {
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
- return -ENOMEM;
- }
- buff[0] = '\0';
- pos = 0;
-
- while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
- hna_local_entry = hashit.bucket->data;
-
- pos += snprintf(buff + pos, 22, " * %pM\n",
- hna_local_entry->addr);
- }
-
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
-
- seq_printf(seq, "%s", buff);
- kfree(buff);
- return 0;
-}
-
-static void _hna_local_del(void *data, void *arg)
-{
- struct bat_priv *bat_priv = (struct bat_priv *)arg;
-
- kfree(data);
- bat_priv->num_local_hna--;
- atomic_set(&bat_priv->hna_local_changed, 1);
-}
-
-static void hna_local_del(struct bat_priv *bat_priv,
- struct hna_local_entry *hna_local_entry,
- char *message)
-{
- bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
- hna_local_entry->addr, message);
-
- hash_remove(bat_priv->hna_local_hash, hna_local_entry->addr);
- _hna_local_del(hna_local_entry, bat_priv);
-}
-
-void hna_local_remove(struct bat_priv *bat_priv,
- uint8_t *addr, char *message)
-{
- struct hna_local_entry *hna_local_entry;
- unsigned long flags;
-
- spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
-
- hna_local_entry = (struct hna_local_entry *)
- hash_find(bat_priv->hna_local_hash, addr);
- if (hna_local_entry)
- hna_local_del(bat_priv, hna_local_entry, message);
-
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
-}
-
-static void hna_local_purge(struct work_struct *work)
-{
- struct delayed_work *delayed_work =
- container_of(work, struct delayed_work, work);
- struct bat_priv *bat_priv =
- container_of(delayed_work, struct bat_priv, hna_work);
- struct hna_local_entry *hna_local_entry;
- HASHIT(hashit);
- unsigned long flags;
- unsigned long timeout;
-
- spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
-
- while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
- hna_local_entry = hashit.bucket->data;
-
- timeout = hna_local_entry->last_seen + LOCAL_HNA_TIMEOUT * HZ;
-
- if ((!hna_local_entry->never_purge) &&
- time_after(jiffies, timeout))
- hna_local_del(bat_priv, hna_local_entry,
- "address timed out");
- }
-
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
- hna_local_start_timer(bat_priv);
-}
-
-void hna_local_free(struct bat_priv *bat_priv)
-{
- if (!bat_priv->hna_local_hash)
- return;
-
- cancel_delayed_work_sync(&bat_priv->hna_work);
- hash_delete(bat_priv->hna_local_hash, _hna_local_del, bat_priv);
- bat_priv->hna_local_hash = NULL;
-}
-
-int hna_global_init(struct bat_priv *bat_priv)
-{
- if (bat_priv->hna_global_hash)
- return 1;
-
- bat_priv->hna_global_hash = hash_new(128, compare_orig, choose_orig);
-
- if (!bat_priv->hna_global_hash)
- return 0;
-
- return 1;
-}
-
-void hna_global_add_orig(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- unsigned char *hna_buff, int hna_buff_len)
-{
- struct hna_global_entry *hna_global_entry;
- struct hna_local_entry *hna_local_entry;
- struct hashtable_t *swaphash;
- int hna_buff_count = 0;
- unsigned long flags;
- unsigned char *hna_ptr;
-
- while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) {
- spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
-
- hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
- hna_global_entry = (struct hna_global_entry *)
- hash_find(bat_priv->hna_global_hash, hna_ptr);
-
- if (!hna_global_entry) {
- spin_unlock_irqrestore(&bat_priv->hna_ghash_lock,
- flags);
-
- hna_global_entry =
- kmalloc(sizeof(struct hna_global_entry),
- GFP_ATOMIC);
-
- if (!hna_global_entry)
- break;
-
- memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN);
-
- bat_dbg(DBG_ROUTES, bat_priv,
- "Creating new global hna entry: "
- "%pM (via %pM)\n",
- hna_global_entry->addr, orig_node->orig);
-
- spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
- hash_add(bat_priv->hna_global_hash, hna_global_entry);
-
- }
-
- hna_global_entry->orig_node = orig_node;
- spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
-
- /* remove address from local hash if present */
- spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
-
- hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
- hna_local_entry = (struct hna_local_entry *)
- hash_find(bat_priv->hna_local_hash, hna_ptr);
-
- if (hna_local_entry)
- hna_local_del(bat_priv, hna_local_entry,
- "global hna received");
-
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
-
- hna_buff_count++;
- }
-
- /* initialize, and overwrite if malloc succeeds */
- orig_node->hna_buff = NULL;
- orig_node->hna_buff_len = 0;
-
- if (hna_buff_len > 0) {
- orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC);
- if (orig_node->hna_buff) {
- memcpy(orig_node->hna_buff, hna_buff, hna_buff_len);
- orig_node->hna_buff_len = hna_buff_len;
- }
- }
-
- spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
-
- if (bat_priv->hna_global_hash->elements * 4 >
- bat_priv->hna_global_hash->size) {
- swaphash = hash_resize(bat_priv->hna_global_hash,
- bat_priv->hna_global_hash->size * 2);
-
- if (!swaphash)
- pr_err("Couldn't resize global hna hash table\n");
- else
- bat_priv->hna_global_hash = swaphash;
- }
-
- spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
-}
-
-int hna_global_seq_print_text(struct seq_file *seq, void *offset)
-{
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- struct hna_global_entry *hna_global_entry;
- HASHIT(hashit);
- HASHIT(hashit_count);
- unsigned long flags;
- size_t buf_size, pos;
- char *buff;
-
- if (!bat_priv->primary_if) {
- return seq_printf(seq, "BATMAN mesh %s disabled - "
- "please specify interfaces to enable it\n",
- net_dev->name);
- }
-
- seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
- net_dev->name);
-
- spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
-
- buf_size = 1;
- /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
- while (hash_iterate(bat_priv->hna_global_hash, &hashit_count))
- buf_size += 43;
-
- buff = kmalloc(buf_size, GFP_ATOMIC);
- if (!buff) {
- spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
- return -ENOMEM;
- }
- buff[0] = '\0';
- pos = 0;
-
- while (hash_iterate(bat_priv->hna_global_hash, &hashit)) {
- hna_global_entry = hashit.bucket->data;
-
- pos += snprintf(buff + pos, 44,
- " * %pM via %pM\n", hna_global_entry->addr,
- hna_global_entry->orig_node->orig);
- }
-
- spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
-
- seq_printf(seq, "%s", buff);
- kfree(buff);
- return 0;
-}
-
-static void _hna_global_del_orig(struct bat_priv *bat_priv,
- struct hna_global_entry *hna_global_entry,
- char *message)
-{
- bat_dbg(DBG_ROUTES, bat_priv,
- "Deleting global hna entry %pM (via %pM): %s\n",
- hna_global_entry->addr, hna_global_entry->orig_node->orig,
- message);
-
- hash_remove(bat_priv->hna_global_hash, hna_global_entry->addr);
- kfree(hna_global_entry);
-}
-
-void hna_global_del_orig(struct bat_priv *bat_priv,
- struct orig_node *orig_node, char *message)
-{
- struct hna_global_entry *hna_global_entry;
- int hna_buff_count = 0;
- unsigned long flags;
- unsigned char *hna_ptr;
-
- if (orig_node->hna_buff_len == 0)
- return;
-
- spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
-
- while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
- hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
- hna_global_entry = (struct hna_global_entry *)
- hash_find(bat_priv->hna_global_hash, hna_ptr);
-
- if ((hna_global_entry) &&
- (hna_global_entry->orig_node == orig_node))
- _hna_global_del_orig(bat_priv, hna_global_entry,
- message);
-
- hna_buff_count++;
- }
-
- spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
-
- orig_node->hna_buff_len = 0;
- kfree(orig_node->hna_buff);
- orig_node->hna_buff = NULL;
-}
-
-static void hna_global_del(void *data, void *arg)
-{
- kfree(data);
-}
-
-void hna_global_free(struct bat_priv *bat_priv)
-{
- if (!bat_priv->hna_global_hash)
- return;
-
- hash_delete(bat_priv->hna_global_hash, hna_global_del, NULL);
- bat_priv->hna_global_hash = NULL;
-}
-
-struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
-{
- struct hna_global_entry *hna_global_entry;
- unsigned long flags;
-
- spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
- hna_global_entry = (struct hna_global_entry *)
- hash_find(bat_priv->hna_global_hash, addr);
- spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
-
- if (!hna_global_entry)
- return NULL;
-
- return hna_global_entry->orig_node;
-}
diff --git a/drivers/staging/batman-adv/translation-table.h b/drivers/staging/batman-adv/translation-table.h
deleted file mode 100644
index 10c4c5c319b6..000000000000
--- a/drivers/staging/batman-adv/translation-table.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
-#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
-
-#include "types.h"
-
-int hna_local_init(struct bat_priv *bat_priv);
-void hna_local_add(struct net_device *soft_iface, uint8_t *addr);
-void hna_local_remove(struct bat_priv *bat_priv,
- uint8_t *addr, char *message);
-int hna_local_fill_buffer(struct bat_priv *bat_priv,
- unsigned char *buff, int buff_len);
-int hna_local_seq_print_text(struct seq_file *seq, void *offset);
-void hna_local_free(struct bat_priv *bat_priv);
-int hna_global_init(struct bat_priv *bat_priv);
-void hna_global_add_orig(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- unsigned char *hna_buff, int hna_buff_len);
-int hna_global_seq_print_text(struct seq_file *seq, void *offset);
-void hna_global_del_orig(struct bat_priv *bat_priv,
- struct orig_node *orig_node, char *message);
-void hna_global_free(struct bat_priv *bat_priv);
-struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr);
-
-#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/drivers/staging/batman-adv/types.h b/drivers/staging/batman-adv/types.h
deleted file mode 100644
index f3f7366231e7..000000000000
--- a/drivers/staging/batman-adv/types.h
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-
-
-#ifndef _NET_BATMAN_ADV_TYPES_H_
-#define _NET_BATMAN_ADV_TYPES_H_
-
-#include "packet.h"
-#include "bitarray.h"
-
-#define BAT_HEADER_LEN (sizeof(struct ethhdr) + \
- ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \
- sizeof(struct unicast_packet) : \
- sizeof(struct bcast_packet))))
-
-
-struct batman_if {
- struct list_head list;
- int16_t if_num;
- char if_status;
- struct net_device *net_dev;
- atomic_t seqno;
- atomic_t frag_seqno;
- unsigned char *packet_buff;
- int packet_len;
- struct kobject *hardif_obj;
- atomic_t refcnt;
- struct packet_type batman_adv_ptype;
- struct net_device *soft_iface;
-};
-
-/**
- * orig_node - structure for orig_list maintaining nodes of mesh
- * @primary_addr: hosts primary interface address
- * @last_valid: when last packet from this node was received
- * @bcast_seqno_reset: time when the broadcast seqno window was reset
- * @batman_seqno_reset: time when the batman seqno window was reset
- * @flags: for now only VIS_SERVER flag
- * @last_real_seqno: last and best known squence number
- * @last_ttl: ttl of last received packet
- * @last_bcast_seqno: last broadcast sequence number received by this host
- *
- * @candidates: how many candidates are available
- * @selected: next bonding candidate
- */
-struct orig_node {
- uint8_t orig[ETH_ALEN];
- uint8_t primary_addr[ETH_ALEN];
- struct neigh_node *router;
- TYPE_OF_WORD *bcast_own;
- uint8_t *bcast_own_sum;
- uint8_t tq_own;
- int tq_asym_penalty;
- unsigned long last_valid;
- unsigned long bcast_seqno_reset;
- unsigned long batman_seqno_reset;
- uint8_t flags;
- unsigned char *hna_buff;
- int16_t hna_buff_len;
- uint32_t last_real_seqno;
- uint8_t last_ttl;
- TYPE_OF_WORD bcast_bits[NUM_WORDS];
- uint32_t last_bcast_seqno;
- struct list_head neigh_list;
- struct list_head frag_list;
- unsigned long last_frag_packet;
- struct {
- uint8_t candidates;
- struct neigh_node *selected;
- } bond;
-};
-
-/**
- * neigh_node
- * @last_valid: when last packet via this neighbor was received
- */
-struct neigh_node {
- struct list_head list;
- uint8_t addr[ETH_ALEN];
- uint8_t real_packet_count;
- uint8_t tq_recv[TQ_GLOBAL_WINDOW_SIZE];
- uint8_t tq_index;
- uint8_t tq_avg;
- uint8_t last_ttl;
- struct neigh_node *next_bond_candidate;
- unsigned long last_valid;
- TYPE_OF_WORD real_bits[NUM_WORDS];
- struct orig_node *orig_node;
- struct batman_if *if_incoming;
-};
-
-
-struct bat_priv {
- atomic_t mesh_state;
- struct net_device_stats stats;
- atomic_t aggregation_enabled;
- atomic_t bonding_enabled;
- atomic_t frag_enabled;
- atomic_t vis_mode;
- atomic_t orig_interval;
- atomic_t log_level;
- atomic_t bcast_seqno;
- atomic_t bcast_queue_left;
- atomic_t batman_queue_left;
- char num_ifaces;
- struct debug_log *debug_log;
- struct batman_if *primary_if;
- struct kobject *mesh_obj;
- struct dentry *debug_dir;
- struct hlist_head forw_bat_list;
- struct hlist_head forw_bcast_list;
- struct list_head vis_send_list;
- struct hashtable_t *orig_hash;
- struct hashtable_t *hna_local_hash;
- struct hashtable_t *hna_global_hash;
- struct hashtable_t *vis_hash;
- spinlock_t orig_hash_lock; /* protects orig_hash */
- spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
- spinlock_t forw_bcast_list_lock; /* protects */
- spinlock_t hna_lhash_lock; /* protects hna_local_hash */
- spinlock_t hna_ghash_lock; /* protects hna_global_hash */
- spinlock_t vis_hash_lock; /* protects vis_hash */
- spinlock_t vis_list_lock; /* protects vis_info::recv_list */
- int16_t num_local_hna;
- atomic_t hna_local_changed;
- struct delayed_work hna_work;
- struct delayed_work orig_work;
- struct delayed_work vis_work;
- struct vis_info *my_vis_info;
-};
-
-struct socket_client {
- struct list_head queue_list;
- unsigned int queue_len;
- unsigned char index;
- spinlock_t lock; /* protects queue_list, queue_len, index */
- wait_queue_head_t queue_wait;
- struct bat_priv *bat_priv;
-};
-
-struct socket_packet {
- struct list_head list;
- size_t icmp_len;
- struct icmp_packet_rr icmp_packet;
-};
-
-struct hna_local_entry {
- uint8_t addr[ETH_ALEN];
- unsigned long last_seen;
- char never_purge;
-};
-
-struct hna_global_entry {
- uint8_t addr[ETH_ALEN];
- struct orig_node *orig_node;
-};
-
-/**
- * forw_packet - structure for forw_list maintaining packets to be
- * send/forwarded
- */
-struct forw_packet {
- struct hlist_node list;
- unsigned long send_time;
- uint8_t own;
- struct sk_buff *skb;
- uint16_t packet_len;
- uint32_t direct_link_flags;
- uint8_t num_packets;
- struct delayed_work delayed_work;
- struct batman_if *if_incoming;
-};
-
-/* While scanning for vis-entries of a particular vis-originator
- * this list collects its interfaces to create a subgraph/cluster
- * out of them later
- */
-struct if_list_entry {
- uint8_t addr[ETH_ALEN];
- bool primary;
- struct hlist_node list;
-};
-
-struct debug_log {
- char log_buff[LOG_BUF_LEN];
- unsigned long log_start;
- unsigned long log_end;
- spinlock_t lock; /* protects log_buff, log_start and log_end */
- wait_queue_head_t queue_wait;
-};
-
-struct frag_packet_list_entry {
- struct list_head list;
- uint16_t seqno;
- struct sk_buff *skb;
-};
-
-struct vis_info {
- unsigned long first_seen;
- struct list_head recv_list;
- /* list of server-neighbors we received a vis-packet
- * from. we should not reply to them. */
- struct list_head send_list;
- struct kref refcount;
- struct bat_priv *bat_priv;
- /* this packet might be part of the vis send queue. */
- struct sk_buff *skb_packet;
- /* vis_info may follow here*/
-} __attribute__((packed));
-
-struct vis_info_entry {
- uint8_t src[ETH_ALEN];
- uint8_t dest[ETH_ALEN];
- uint8_t quality; /* quality = 0 means HNA */
-} __attribute__((packed));
-
-struct recvlist_node {
- struct list_head list;
- uint8_t mac[ETH_ALEN];
-};
-
-#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/drivers/staging/batman-adv/unicast.c b/drivers/staging/batman-adv/unicast.c
deleted file mode 100644
index 0459413ff67f..000000000000
--- a/drivers/staging/batman-adv/unicast.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
- *
- * Andreas Langer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "unicast.h"
-#include "send.h"
-#include "soft-interface.h"
-#include "hash.h"
-#include "translation-table.h"
-#include "routing.h"
-#include "hard-interface.h"
-
-
-struct sk_buff *merge_frag_packet(struct list_head *head,
- struct frag_packet_list_entry *tfp,
- struct sk_buff *skb)
-{
- struct unicast_frag_packet *up =
- (struct unicast_frag_packet *)skb->data;
- struct sk_buff *tmp_skb;
-
- /* set skb to the first part and tmp_skb to the second part */
- if (up->flags & UNI_FRAG_HEAD) {
- tmp_skb = tfp->skb;
- } else {
- tmp_skb = skb;
- skb = tfp->skb;
- }
-
- skb_pull(tmp_skb, sizeof(struct unicast_frag_packet));
- if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) {
- /* free buffered skb, skb will be freed later */
- kfree_skb(tfp->skb);
- return NULL;
- }
-
- /* move free entry to end */
- tfp->skb = NULL;
- tfp->seqno = 0;
- list_move_tail(&tfp->list, head);
-
- memcpy(skb_put(skb, tmp_skb->len), tmp_skb->data, tmp_skb->len);
- kfree_skb(tmp_skb);
- return skb;
-}
-
-void create_frag_entry(struct list_head *head, struct sk_buff *skb)
-{
- struct frag_packet_list_entry *tfp;
- struct unicast_frag_packet *up =
- (struct unicast_frag_packet *)skb->data;
-
- /* free and oldest packets stand at the end */
- tfp = list_entry((head)->prev, typeof(*tfp), list);
- kfree_skb(tfp->skb);
-
- tfp->seqno = ntohs(up->seqno);
- tfp->skb = skb;
- list_move(&tfp->list, head);
- return;
-}
-
-int create_frag_buffer(struct list_head *head)
-{
- int i;
- struct frag_packet_list_entry *tfp;
-
- for (i = 0; i < FRAG_BUFFER_SIZE; i++) {
- tfp = kmalloc(sizeof(struct frag_packet_list_entry),
- GFP_ATOMIC);
- if (!tfp) {
- frag_list_free(head);
- return -ENOMEM;
- }
- tfp->skb = NULL;
- tfp->seqno = 0;
- INIT_LIST_HEAD(&tfp->list);
- list_add(&tfp->list, head);
- }
-
- return 0;
-}
-
-struct frag_packet_list_entry *search_frag_packet(struct list_head *head,
- struct unicast_frag_packet *up)
-{
- struct frag_packet_list_entry *tfp;
- struct unicast_frag_packet *tmp_up = NULL;
- uint16_t search_seqno;
-
- if (up->flags & UNI_FRAG_HEAD)
- search_seqno = ntohs(up->seqno)+1;
- else
- search_seqno = ntohs(up->seqno)-1;
-
- list_for_each_entry(tfp, head, list) {
-
- if (!tfp->skb)
- continue;
-
- if (tfp->seqno == ntohs(up->seqno))
- goto mov_tail;
-
- tmp_up = (struct unicast_frag_packet *)tfp->skb->data;
-
- if (tfp->seqno == search_seqno) {
-
- if ((tmp_up->flags & UNI_FRAG_HEAD) !=
- (up->flags & UNI_FRAG_HEAD))
- return tfp;
- else
- goto mov_tail;
- }
- }
- return NULL;
-
-mov_tail:
- list_move_tail(&tfp->list, head);
- return NULL;
-}
-
-void frag_list_free(struct list_head *head)
-{
- struct frag_packet_list_entry *pf, *tmp_pf;
-
- if (!list_empty(head)) {
-
- list_for_each_entry_safe(pf, tmp_pf, head, list) {
- kfree_skb(pf->skb);
- list_del(&pf->list);
- kfree(pf);
- }
- }
- return;
-}
-
-static int unicast_send_frag_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
- struct batman_if *batman_if, uint8_t dstaddr[],
- struct orig_node *orig_node)
-{
- struct unicast_frag_packet *ucast_frag1, *ucast_frag2;
- int hdr_len = sizeof(struct unicast_frag_packet);
- struct sk_buff *frag_skb;
- int data_len = skb->len;
-
- if (!bat_priv->primary_if)
- goto dropped;
-
- frag_skb = dev_alloc_skb(data_len - (data_len / 2) + hdr_len);
- skb_split(skb, frag_skb, data_len / 2);
-
- if (my_skb_head_push(frag_skb, hdr_len) < 0 ||
- my_skb_head_push(skb, hdr_len) < 0)
- goto drop_frag;
-
- ucast_frag1 = (struct unicast_frag_packet *)skb->data;
- ucast_frag2 = (struct unicast_frag_packet *)frag_skb->data;
-
- ucast_frag1->version = COMPAT_VERSION;
- ucast_frag1->packet_type = BAT_UNICAST_FRAG;
- ucast_frag1->ttl = TTL;
- memcpy(ucast_frag1->orig,
- bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
- memcpy(ucast_frag1->dest, orig_node->orig, ETH_ALEN);
-
- memcpy(ucast_frag2, ucast_frag1, sizeof(struct unicast_frag_packet));
-
- ucast_frag1->flags |= UNI_FRAG_HEAD;
- ucast_frag2->flags &= ~UNI_FRAG_HEAD;
-
- ucast_frag1->seqno = htons((uint16_t)atomic_inc_return(
- &batman_if->frag_seqno));
-
- ucast_frag2->seqno = htons((uint16_t)atomic_inc_return(
- &batman_if->frag_seqno));
-
- send_skb_packet(skb, batman_if, dstaddr);
- send_skb_packet(frag_skb, batman_if, dstaddr);
- return 0;
-
-drop_frag:
- kfree_skb(frag_skb);
-dropped:
- kfree_skb(skb);
- return 1;
-}
-
-int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
-{
- struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
- struct unicast_packet *unicast_packet;
- struct orig_node *orig_node;
- struct batman_if *batman_if;
- struct neigh_node *router;
- int data_len = skb->len;
- uint8_t dstaddr[6];
- unsigned long flags;
-
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-
- /* get routing information */
- orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
- ethhdr->h_dest));
-
- /* check for hna host */
- if (!orig_node)
- orig_node = transtable_search(bat_priv, ethhdr->h_dest);
-
- router = find_router(bat_priv, orig_node, NULL);
-
- if (!router)
- goto unlock;
-
- /* don't lock while sending the packets ... we therefore
- * copy the required data before sending */
-
- batman_if = router->if_incoming;
- memcpy(dstaddr, router->addr, ETH_ALEN);
-
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
- if (batman_if->if_status != IF_ACTIVE)
- goto dropped;
-
- if (atomic_read(&bat_priv->frag_enabled) &&
- data_len + sizeof(struct unicast_packet) > batman_if->net_dev->mtu)
- return unicast_send_frag_skb(skb, bat_priv, batman_if,
- dstaddr, orig_node);
-
- if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0)
- goto dropped;
-
- unicast_packet = (struct unicast_packet *)skb->data;
-
- unicast_packet->version = COMPAT_VERSION;
- /* batman packet type: unicast */
- unicast_packet->packet_type = BAT_UNICAST;
- /* set unicast ttl */
- unicast_packet->ttl = TTL;
- /* copy the destination for faster routing */
- memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
-
- send_skb_packet(skb, batman_if, dstaddr);
- return 0;
-
-unlock:
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-dropped:
- kfree_skb(skb);
- return 1;
-}
diff --git a/drivers/staging/batman-adv/unicast.h b/drivers/staging/batman-adv/unicast.h
deleted file mode 100644
index 797369771900..000000000000
--- a/drivers/staging/batman-adv/unicast.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
- *
- * Andreas Langer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_UNICAST_H_
-#define _NET_BATMAN_ADV_UNICAST_H_
-
-#define FRAG_TIMEOUT 10000 /* purge frag list entrys after time in ms */
-#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
-
-struct sk_buff *merge_frag_packet(struct list_head *head,
- struct frag_packet_list_entry *tfp,
- struct sk_buff *skb);
-
-void create_frag_entry(struct list_head *head, struct sk_buff *skb);
-int create_frag_buffer(struct list_head *head);
-struct frag_packet_list_entry *search_frag_packet(struct list_head *head,
- struct unicast_frag_packet *up);
-void frag_list_free(struct list_head *head);
-int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
-
-#endif /* _NET_BATMAN_ADV_UNICAST_H_ */
diff --git a/drivers/staging/batman-adv/vis.c b/drivers/staging/batman-adv/vis.c
deleted file mode 100644
index 3d2c1bccf2e6..000000000000
--- a/drivers/staging/batman-adv/vis.c
+++ /dev/null
@@ -1,895 +0,0 @@
-/*
- * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "send.h"
-#include "translation-table.h"
-#include "vis.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "hash.h"
-
-#define MAX_VIS_PACKET_SIZE 1000
-
-/* Returns the smallest signed integer in two's complement with the sizeof x */
-#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
-
-/* Checks if a sequence number x is a predecessor/successor of y.
- * they handle overflows/underflows and can correctly check for a
- * predecessor/successor unless the variable sequence number has grown by
- * more then 2**(bitwidth(x)-1)-1.
- * This means that for a uint8_t with the maximum value 255, it would think:
- * - when adding nothing - it is neither a predecessor nor a successor
- * - before adding more than 127 to the starting value - it is a predecessor,
- * - when adding 128 - it is neither a predecessor nor a successor,
- * - after adding more than 127 to the starting value - it is a successor */
-#define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
- _dummy > smallest_signed_int(_dummy); })
-#define seq_after(x, y) seq_before(y, x)
-
-static void start_vis_timer(struct bat_priv *bat_priv);
-
-/* free the info */
-static void free_info(struct kref *ref)
-{
- struct vis_info *info = container_of(ref, struct vis_info, refcount);
- struct bat_priv *bat_priv = info->bat_priv;
- struct recvlist_node *entry, *tmp;
- unsigned long flags;
-
- list_del_init(&info->send_list);
- spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
- list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
- list_del(&entry->list);
- kfree(entry);
- }
-
- spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
- kfree_skb(info->skb_packet);
-}
-
-/* Compare two vis packets, used by the hashing algorithm */
-static int vis_info_cmp(void *data1, void *data2)
-{
- struct vis_info *d1, *d2;
- struct vis_packet *p1, *p2;
- d1 = data1;
- d2 = data2;
- p1 = (struct vis_packet *)d1->skb_packet->data;
- p2 = (struct vis_packet *)d2->skb_packet->data;
- return compare_orig(p1->vis_orig, p2->vis_orig);
-}
-
-/* hash function to choose an entry in a hash table of given size */
-/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static int vis_info_choose(void *data, int size)
-{
- struct vis_info *vis_info = data;
- struct vis_packet *packet;
- unsigned char *key;
- uint32_t hash = 0;
- size_t i;
-
- packet = (struct vis_packet *)vis_info->skb_packet->data;
- key = packet->vis_orig;
- for (i = 0; i < ETH_ALEN; i++) {
- hash += key[i];
- hash += (hash << 10);
- hash ^= (hash >> 6);
- }
-
- hash += (hash << 3);
- hash ^= (hash >> 11);
- hash += (hash << 15);
-
- return hash % size;
-}
-
-/* insert interface to the list of interfaces of one originator, if it
- * does not already exist in the list */
-static void vis_data_insert_interface(const uint8_t *interface,
- struct hlist_head *if_list,
- bool primary)
-{
- struct if_list_entry *entry;
- struct hlist_node *pos;
-
- hlist_for_each_entry(entry, pos, if_list, list) {
- if (compare_orig(entry->addr, (void *)interface))
- return;
- }
-
- /* its a new address, add it to the list */
- entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry)
- return;
- memcpy(entry->addr, interface, ETH_ALEN);
- entry->primary = primary;
- hlist_add_head(&entry->list, if_list);
-}
-
-static ssize_t vis_data_read_prim_sec(char *buff, struct hlist_head *if_list)
-{
- struct if_list_entry *entry;
- struct hlist_node *pos;
- size_t len = 0;
-
- hlist_for_each_entry(entry, pos, if_list, list) {
- if (entry->primary)
- len += sprintf(buff + len, "PRIMARY, ");
- else {
- len += sprintf(buff + len, "SEC %pM, ", entry->addr);
- }
- }
-
- return len;
-}
-
-static size_t vis_data_count_prim_sec(struct hlist_head *if_list)
-{
- struct if_list_entry *entry;
- struct hlist_node *pos;
- size_t count = 0;
-
- hlist_for_each_entry(entry, pos, if_list, list) {
- if (entry->primary)
- count += 9;
- else
- count += 23;
- }
-
- return count;
-}
-
-/* read an entry */
-static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
- uint8_t *src, bool primary)
-{
- /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
- if (primary && entry->quality == 0)
- return sprintf(buff, "HNA %pM, ", entry->dest);
- else if (compare_orig(entry->src, src))
- return sprintf(buff, "TQ %pM %d, ", entry->dest,
- entry->quality);
-
- return 0;
-}
-
-int vis_seq_print_text(struct seq_file *seq, void *offset)
-{
- HASHIT(hashit);
- HASHIT(hashit_count);
- struct vis_info *info;
- struct vis_packet *packet;
- struct vis_info_entry *entries;
- struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- HLIST_HEAD(vis_if_list);
- struct if_list_entry *entry;
- struct hlist_node *pos, *n;
- int i;
- unsigned long flags;
- int vis_server = atomic_read(&bat_priv->vis_mode);
- size_t buff_pos, buf_size;
- char *buff;
-
- if ((!bat_priv->primary_if) ||
- (vis_server == VIS_TYPE_CLIENT_UPDATE))
- return 0;
-
- buf_size = 1;
- /* Estimate length */
- spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
- while (hash_iterate(bat_priv->vis_hash, &hashit_count)) {
- info = hashit_count.bucket->data;
- packet = (struct vis_packet *)info->skb_packet->data;
- entries = (struct vis_info_entry *)
- ((char *)packet + sizeof(struct vis_packet));
-
- for (i = 0; i < packet->entries; i++) {
- if (entries[i].quality == 0)
- continue;
- vis_data_insert_interface(entries[i].src, &vis_if_list,
- compare_orig(entries[i].src, packet->vis_orig));
- }
-
- hlist_for_each_entry(entry, pos, &vis_if_list, list) {
- buf_size += 18 + 26 * packet->entries;
-
- /* add primary/secondary records */
- if (compare_orig(entry->addr, packet->vis_orig))
- buf_size +=
- vis_data_count_prim_sec(&vis_if_list);
-
- buf_size += 1;
- }
-
- hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
- hlist_del(&entry->list);
- kfree(entry);
- }
- }
-
- buff = kmalloc(buf_size, GFP_ATOMIC);
- if (!buff) {
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
- return -ENOMEM;
- }
- buff[0] = '\0';
- buff_pos = 0;
-
- while (hash_iterate(bat_priv->vis_hash, &hashit)) {
- info = hashit.bucket->data;
- packet = (struct vis_packet *)info->skb_packet->data;
- entries = (struct vis_info_entry *)
- ((char *)packet + sizeof(struct vis_packet));
-
- for (i = 0; i < packet->entries; i++) {
- if (entries[i].quality == 0)
- continue;
- vis_data_insert_interface(entries[i].src, &vis_if_list,
- compare_orig(entries[i].src, packet->vis_orig));
- }
-
- hlist_for_each_entry(entry, pos, &vis_if_list, list) {
- buff_pos += sprintf(buff + buff_pos, "%pM,",
- entry->addr);
-
- for (i = 0; i < packet->entries; i++)
- buff_pos += vis_data_read_entry(buff + buff_pos,
- &entries[i],
- entry->addr,
- entry->primary);
-
- /* add primary/secondary records */
- if (compare_orig(entry->addr, packet->vis_orig))
- buff_pos +=
- vis_data_read_prim_sec(buff + buff_pos,
- &vis_if_list);
-
- buff_pos += sprintf(buff + buff_pos, "\n");
- }
-
- hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
- hlist_del(&entry->list);
- kfree(entry);
- }
- }
-
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
-
- seq_printf(seq, "%s", buff);
- kfree(buff);
-
- return 0;
-}
-
-/* add the info packet to the send list, if it was not
- * already linked in. */
-static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
-{
- if (list_empty(&info->send_list)) {
- kref_get(&info->refcount);
- list_add_tail(&info->send_list, &bat_priv->vis_send_list);
- }
-}
-
-/* delete the info packet from the send list, if it was
- * linked in. */
-static void send_list_del(struct vis_info *info)
-{
- if (!list_empty(&info->send_list)) {
- list_del_init(&info->send_list);
- kref_put(&info->refcount, free_info);
- }
-}
-
-/* tries to add one entry to the receive list. */
-static void recv_list_add(struct bat_priv *bat_priv,
- struct list_head *recv_list, char *mac)
-{
- struct recvlist_node *entry;
- unsigned long flags;
-
- entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC);
- if (!entry)
- return;
-
- memcpy(entry->mac, mac, ETH_ALEN);
- spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
- list_add_tail(&entry->list, recv_list);
- spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
-}
-
-/* returns 1 if this mac is in the recv_list */
-static int recv_list_is_in(struct bat_priv *bat_priv,
- struct list_head *recv_list, char *mac)
-{
- struct recvlist_node *entry;
- unsigned long flags;
-
- spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
- list_for_each_entry(entry, recv_list, list) {
- if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
- spin_unlock_irqrestore(&bat_priv->vis_list_lock,
- flags);
- return 1;
- }
- }
- spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
- return 0;
-}
-
-/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
- * broken.. ). vis hash must be locked outside. is_new is set when the packet
- * is newer than old entries in the hash. */
-static struct vis_info *add_packet(struct bat_priv *bat_priv,
- struct vis_packet *vis_packet,
- int vis_info_len, int *is_new,
- int make_broadcast)
-{
- struct vis_info *info, *old_info;
- struct vis_packet *search_packet, *old_packet;
- struct vis_info search_elem;
- struct vis_packet *packet;
-
- *is_new = 0;
- /* sanity check */
- if (!bat_priv->vis_hash)
- return NULL;
-
- /* see if the packet is already in vis_hash */
- search_elem.skb_packet = dev_alloc_skb(sizeof(struct vis_packet));
- if (!search_elem.skb_packet)
- return NULL;
- search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet,
- sizeof(struct vis_packet));
-
- memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
- old_info = hash_find(bat_priv->vis_hash, &search_elem);
- kfree_skb(search_elem.skb_packet);
-
- if (old_info != NULL) {
- old_packet = (struct vis_packet *)old_info->skb_packet->data;
- if (!seq_after(ntohl(vis_packet->seqno),
- ntohl(old_packet->seqno))) {
- if (old_packet->seqno == vis_packet->seqno) {
- recv_list_add(bat_priv, &old_info->recv_list,
- vis_packet->sender_orig);
- return old_info;
- } else {
- /* newer packet is already in hash. */
- return NULL;
- }
- }
- /* remove old entry */
- hash_remove(bat_priv->vis_hash, old_info);
- send_list_del(old_info);
- kref_put(&old_info->refcount, free_info);
- }
-
- info = kmalloc(sizeof(struct vis_info), GFP_ATOMIC);
- if (!info)
- return NULL;
-
- info->skb_packet = dev_alloc_skb(sizeof(struct vis_packet) +
- vis_info_len + sizeof(struct ethhdr));
- if (!info->skb_packet) {
- kfree(info);
- return NULL;
- }
- skb_reserve(info->skb_packet, sizeof(struct ethhdr));
- packet = (struct vis_packet *)skb_put(info->skb_packet,
- sizeof(struct vis_packet) +
- vis_info_len);
-
- kref_init(&info->refcount);
- INIT_LIST_HEAD(&info->send_list);
- INIT_LIST_HEAD(&info->recv_list);
- info->first_seen = jiffies;
- info->bat_priv = bat_priv;
- memcpy(packet, vis_packet, sizeof(struct vis_packet) + vis_info_len);
-
- /* initialize and add new packet. */
- *is_new = 1;
-
- /* Make it a broadcast packet, if required */
- if (make_broadcast)
- memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
-
- /* repair if entries is longer than packet. */
- if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len)
- packet->entries = vis_info_len / sizeof(struct vis_info_entry);
-
- recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
-
- /* try to add it */
- if (hash_add(bat_priv->vis_hash, info) < 0) {
- /* did not work (for some reason) */
- kref_put(&old_info->refcount, free_info);
- info = NULL;
- }
-
- return info;
-}
-
-/* handle the server sync packet, forward if needed. */
-void receive_server_sync_packet(struct bat_priv *bat_priv,
- struct vis_packet *vis_packet,
- int vis_info_len)
-{
- struct vis_info *info;
- int is_new, make_broadcast;
- unsigned long flags;
- int vis_server = atomic_read(&bat_priv->vis_mode);
-
- make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
-
- spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
- info = add_packet(bat_priv, vis_packet, vis_info_len,
- &is_new, make_broadcast);
- if (!info)
- goto end;
-
- /* only if we are server ourselves and packet is newer than the one in
- * hash.*/
- if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
- send_list_add(bat_priv, info);
-end:
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
-}
-
-/* handle an incoming client update packet and schedule forward if needed. */
-void receive_client_update_packet(struct bat_priv *bat_priv,
- struct vis_packet *vis_packet,
- int vis_info_len)
-{
- struct vis_info *info;
- struct vis_packet *packet;
- int is_new;
- unsigned long flags;
- int vis_server = atomic_read(&bat_priv->vis_mode);
- int are_target = 0;
-
- /* clients shall not broadcast. */
- if (is_bcast(vis_packet->target_orig))
- return;
-
- /* Are we the target for this VIS packet? */
- if (vis_server == VIS_TYPE_SERVER_SYNC &&
- is_my_mac(vis_packet->target_orig))
- are_target = 1;
-
- spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
- info = add_packet(bat_priv, vis_packet, vis_info_len,
- &is_new, are_target);
-
- if (!info)
- goto end;
- /* note that outdated packets will be dropped at this point. */
-
- packet = (struct vis_packet *)info->skb_packet->data;
-
- /* send only if we're the target server or ... */
- if (are_target && is_new) {
- packet->vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
- send_list_add(bat_priv, info);
-
- /* ... we're not the recipient (and thus need to forward). */
- } else if (!is_my_mac(packet->target_orig)) {
- send_list_add(bat_priv, info);
- }
-
-end:
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
-}
-
-/* Walk the originators and find the VIS server with the best tq. Set the packet
- * address to its address and return the best_tq.
- *
- * Must be called with the originator hash locked */
-static int find_best_vis_server(struct bat_priv *bat_priv,
- struct vis_info *info)
-{
- HASHIT(hashit);
- struct orig_node *orig_node;
- struct vis_packet *packet;
- int best_tq = -1;
-
- packet = (struct vis_packet *)info->skb_packet->data;
-
- while (hash_iterate(bat_priv->orig_hash, &hashit)) {
- orig_node = hashit.bucket->data;
- if ((orig_node) && (orig_node->router) &&
- (orig_node->flags & VIS_SERVER) &&
- (orig_node->router->tq_avg > best_tq)) {
- best_tq = orig_node->router->tq_avg;
- memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
- }
- }
- return best_tq;
-}
-
-/* Return true if the vis packet is full. */
-static bool vis_packet_full(struct vis_info *info)
-{
- struct vis_packet *packet;
- packet = (struct vis_packet *)info->skb_packet->data;
-
- if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry)
- < packet->entries + 1)
- return true;
- return false;
-}
-
-/* generates a packet of own vis data,
- * returns 0 on success, -1 if no packet could be generated */
-static int generate_vis_packet(struct bat_priv *bat_priv)
-{
- HASHIT(hashit_local);
- HASHIT(hashit_global);
- struct orig_node *orig_node;
- struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
- struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
- struct vis_info_entry *entry;
- struct hna_local_entry *hna_local_entry;
- int best_tq = -1;
- unsigned long flags;
-
- info->first_seen = jiffies;
- packet->vis_type = atomic_read(&bat_priv->vis_mode);
-
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
- packet->ttl = TTL;
- packet->seqno = htonl(ntohl(packet->seqno) + 1);
- packet->entries = 0;
- skb_trim(info->skb_packet, sizeof(struct vis_packet));
-
- if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
- best_tq = find_best_vis_server(bat_priv, info);
-
- if (best_tq < 0) {
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
- flags);
- return -1;
- }
- }
-
- while (hash_iterate(bat_priv->orig_hash, &hashit_global)) {
- orig_node = hashit_global.bucket->data;
-
- if (!orig_node->router)
- continue;
-
- if (!compare_orig(orig_node->router->addr, orig_node->orig))
- continue;
-
- if (orig_node->router->if_incoming->if_status != IF_ACTIVE)
- continue;
-
- if (orig_node->router->tq_avg < 1)
- continue;
-
- /* fill one entry into buffer. */
- entry = (struct vis_info_entry *)
- skb_put(info->skb_packet, sizeof(*entry));
- memcpy(entry->src,
- orig_node->router->if_incoming->net_dev->dev_addr,
- ETH_ALEN);
- memcpy(entry->dest, orig_node->orig, ETH_ALEN);
- entry->quality = orig_node->router->tq_avg;
- packet->entries++;
-
- if (vis_packet_full(info)) {
- spin_unlock_irqrestore(
- &bat_priv->orig_hash_lock, flags);
- return 0;
- }
- }
-
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
- spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
- while (hash_iterate(bat_priv->hna_local_hash, &hashit_local)) {
- hna_local_entry = hashit_local.bucket->data;
- entry = (struct vis_info_entry *)skb_put(info->skb_packet,
- sizeof(*entry));
- memset(entry->src, 0, ETH_ALEN);
- memcpy(entry->dest, hna_local_entry->addr, ETH_ALEN);
- entry->quality = 0; /* 0 means HNA */
- packet->entries++;
-
- if (vis_packet_full(info)) {
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock,
- flags);
- return 0;
- }
- }
-
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
- return 0;
-}
-
-/* free old vis packets. Must be called with this vis_hash_lock
- * held */
-static void purge_vis_packets(struct bat_priv *bat_priv)
-{
- HASHIT(hashit);
- struct vis_info *info;
-
- while (hash_iterate(bat_priv->vis_hash, &hashit)) {
- info = hashit.bucket->data;
-
- /* never purge own data. */
- if (info == bat_priv->my_vis_info)
- continue;
-
- if (time_after(jiffies,
- info->first_seen + VIS_TIMEOUT * HZ)) {
- hash_remove_bucket(bat_priv->vis_hash, &hashit);
- send_list_del(info);
- kref_put(&info->refcount, free_info);
- }
- }
-}
-
-static void broadcast_vis_packet(struct bat_priv *bat_priv,
- struct vis_info *info)
-{
- HASHIT(hashit);
- struct orig_node *orig_node;
- struct vis_packet *packet;
- struct sk_buff *skb;
- unsigned long flags;
- struct batman_if *batman_if;
- uint8_t dstaddr[ETH_ALEN];
-
-
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- packet = (struct vis_packet *)info->skb_packet->data;
-
- /* send to all routers in range. */
- while (hash_iterate(bat_priv->orig_hash, &hashit)) {
- orig_node = hashit.bucket->data;
-
- /* if it's a vis server and reachable, send it. */
- if ((!orig_node) || (!orig_node->router))
- continue;
- if (!(orig_node->flags & VIS_SERVER))
- continue;
- /* don't send it if we already received the packet from
- * this node. */
- if (recv_list_is_in(bat_priv, &info->recv_list,
- orig_node->orig))
- continue;
-
- memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
- batman_if = orig_node->router->if_incoming;
- memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
- skb = skb_clone(info->skb_packet, GFP_ATOMIC);
- if (skb)
- send_skb_packet(skb, batman_if, dstaddr);
-
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-
- }
-
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-}
-
-static void unicast_vis_packet(struct bat_priv *bat_priv,
- struct vis_info *info)
-{
- struct orig_node *orig_node;
- struct sk_buff *skb;
- struct vis_packet *packet;
- unsigned long flags;
- struct batman_if *batman_if;
- uint8_t dstaddr[ETH_ALEN];
-
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- packet = (struct vis_packet *)info->skb_packet->data;
- orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
- packet->target_orig));
-
- if ((!orig_node) || (!orig_node->router))
- goto out;
-
- /* don't lock while sending the packets ... we therefore
- * copy the required data before sending */
- batman_if = orig_node->router->if_incoming;
- memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
- skb = skb_clone(info->skb_packet, GFP_ATOMIC);
- if (skb)
- send_skb_packet(skb, batman_if, dstaddr);
-
- return;
-
-out:
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-}
-
-/* only send one vis packet. called from send_vis_packets() */
-static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
-{
- struct vis_packet *packet;
-
- packet = (struct vis_packet *)info->skb_packet->data;
- if (packet->ttl < 2) {
- pr_debug("Error - can't send vis packet: ttl exceeded\n");
- return;
- }
-
- memcpy(packet->sender_orig, bat_priv->primary_if->net_dev->dev_addr,
- ETH_ALEN);
- packet->ttl--;
-
- if (is_bcast(packet->target_orig))
- broadcast_vis_packet(bat_priv, info);
- else
- unicast_vis_packet(bat_priv, info);
- packet->ttl++; /* restore TTL */
-}
-
-/* called from timer; send (and maybe generate) vis packet. */
-static void send_vis_packets(struct work_struct *work)
-{
- struct delayed_work *delayed_work =
- container_of(work, struct delayed_work, work);
- struct bat_priv *bat_priv =
- container_of(delayed_work, struct bat_priv, vis_work);
- struct vis_info *info, *temp;
- unsigned long flags;
-
- spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
- purge_vis_packets(bat_priv);
-
- if (generate_vis_packet(bat_priv) == 0) {
- /* schedule if generation was successful */
- send_list_add(bat_priv, bat_priv->my_vis_info);
- }
-
- list_for_each_entry_safe(info, temp, &bat_priv->vis_send_list,
- send_list) {
-
- kref_get(&info->refcount);
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
-
- if (bat_priv->primary_if)
- send_vis_packet(bat_priv, info);
-
- spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
- send_list_del(info);
- kref_put(&info->refcount, free_info);
- }
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
- start_vis_timer(bat_priv);
-}
-
-/* init the vis server. this may only be called when if_list is already
- * initialized (e.g. bat0 is initialized, interfaces have been added) */
-int vis_init(struct bat_priv *bat_priv)
-{
- struct vis_packet *packet;
- unsigned long flags;
-
- if (bat_priv->vis_hash)
- return 1;
-
- spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
-
- bat_priv->vis_hash = hash_new(256, vis_info_cmp, vis_info_choose);
- if (!bat_priv->vis_hash) {
- pr_err("Can't initialize vis_hash\n");
- goto err;
- }
-
- bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
- if (!bat_priv->my_vis_info) {
- pr_err("Can't initialize vis packet\n");
- goto err;
- }
-
- bat_priv->my_vis_info->skb_packet = dev_alloc_skb(
- sizeof(struct vis_packet) +
- MAX_VIS_PACKET_SIZE +
- sizeof(struct ethhdr));
- if (!bat_priv->my_vis_info->skb_packet)
- goto free_info;
-
- skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
- packet = (struct vis_packet *)skb_put(
- bat_priv->my_vis_info->skb_packet,
- sizeof(struct vis_packet));
-
- /* prefill the vis info */
- bat_priv->my_vis_info->first_seen = jiffies -
- msecs_to_jiffies(VIS_INTERVAL);
- INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
- INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
- kref_init(&bat_priv->my_vis_info->refcount);
- bat_priv->my_vis_info->bat_priv = bat_priv;
- packet->version = COMPAT_VERSION;
- packet->packet_type = BAT_VIS;
- packet->ttl = TTL;
- packet->seqno = 0;
- packet->entries = 0;
-
- INIT_LIST_HEAD(&bat_priv->vis_send_list);
-
- if (hash_add(bat_priv->vis_hash, bat_priv->my_vis_info) < 0) {
- pr_err("Can't add own vis packet into hash\n");
- /* not in hash, need to remove it manually. */
- kref_put(&bat_priv->my_vis_info->refcount, free_info);
- goto err;
- }
-
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
- start_vis_timer(bat_priv);
- return 1;
-
-free_info:
- kfree(bat_priv->my_vis_info);
- bat_priv->my_vis_info = NULL;
-err:
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
- vis_quit(bat_priv);
- return 0;
-}
-
-/* Decrease the reference count on a hash item info */
-static void free_info_ref(void *data, void *arg)
-{
- struct vis_info *info = data;
-
- send_list_del(info);
- kref_put(&info->refcount, free_info);
-}
-
-/* shutdown vis-server */
-void vis_quit(struct bat_priv *bat_priv)
-{
- unsigned long flags;
- if (!bat_priv->vis_hash)
- return;
-
- cancel_delayed_work_sync(&bat_priv->vis_work);
-
- spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
- /* properly remove, kill timers ... */
- hash_delete(bat_priv->vis_hash, free_info_ref, NULL);
- bat_priv->vis_hash = NULL;
- bat_priv->my_vis_info = NULL;
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
-}
-
-/* schedule packets for (re)transmission */
-static void start_vis_timer(struct bat_priv *bat_priv)
-{
- INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets);
- queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work,
- msecs_to_jiffies(VIS_INTERVAL));
-}
diff --git a/drivers/staging/batman-adv/vis.h b/drivers/staging/batman-adv/vis.h
deleted file mode 100644
index 2c3b33089a9b..000000000000
--- a/drivers/staging/batman-adv/vis.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich, Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_VIS_H_
-#define _NET_BATMAN_ADV_VIS_H_
-
-#define VIS_TIMEOUT 200 /* timeout of vis packets in seconds */
-
-int vis_seq_print_text(struct seq_file *seq, void *offset);
-void receive_server_sync_packet(struct bat_priv *bat_priv,
- struct vis_packet *vis_packet,
- int vis_info_len);
-void receive_client_update_packet(struct bat_priv *bat_priv,
- struct vis_packet *vis_packet,
- int vis_info_len);
-int vis_init(struct bat_priv *bat_priv);
-void vis_quit(struct bat_priv *bat_priv);
-
-#endif /* _NET_BATMAN_ADV_VIS_H_ */
diff --git a/drivers/staging/bcm/Adapter.h b/drivers/staging/bcm/Adapter.h
index 748460e898d8..32909e2938d5 100644
--- a/drivers/staging/bcm/Adapter.h
+++ b/drivers/staging/bcm/Adapter.h
@@ -7,53 +7,6 @@
#define MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES 256
#include "Debug.h"
-typedef struct _LIST_ENTRY{
- struct _LIST_ENTRY *next;
- struct _LIST_ENTRY *prev;
-} LIST_ENTRY, *PLIST_ENTRY;
-
-typedef struct _BCM_LIST_ENTRY {
-
- LIST_ENTRY Link;
-
-} BCM_LIST_ENTRY, *PBCM_LIST_ENTRY;
-
-typedef enum _RCB_STATUS
-{
- DRIVER_PROCESSED=1,
- APPLICATION_PROCESSED
-} RCB_STATUS, *PRCB_STATUS;
-
-#define fFILLED 1
-#define fEMPTY 0
-
-struct _BCM_CB
-{
- // The network packet that this RCB is receiving
- PVOID pv_packet;
- // Describes the length of the packet .
- UINT ui_packet_length;
- // Pointer to the first buffer in the packet (only one buffer for Rx)
- PUCHAR buffer;
- atomic_t status;
- UINT filled;
-} __attribute__((packed));
-typedef struct _BCM_CB BCM_CB,*PBCM_CB;
-
-typedef BCM_CB BCM_RCB, *PBCM_RCB;
-typedef BCM_CB BCM_TCB, *PBCM_TCB;
-
-/* This is to be stored in the "pvOsDepData" of ADAPTER */
-typedef struct LINUX_DEP_DATA
-{
- struct net_device *virtualdev; /* Our Interface (veth0) */
- struct net_device *actualdev; /* True Interface (eth0) */
- struct net_device_stats netstats; /* Net statistics */
- struct fasync_struct *async_queue; /* For asynchronus notification */
-
-} LINUX_DEP_DATA, *PLINUX_DEP_DATA;
-
-
struct _LEADER
{
USHORT Vcid;
@@ -429,26 +382,28 @@ Driver adapter data structure
struct _MINI_ADAPTER
{
struct _MINI_ADAPTER *next;
- PVOID pvOsDepData;
+ struct net_device *dev;
+ u32 msg_enable;
+
CHAR *caDsxReqResp;
- atomic_t ApplicationRunning;
+ atomic_t ApplicationRunning;
volatile INT CtrlQueueLen;
- atomic_t AppCtrlQueueLen;
- BOOLEAN AppCtrlQueueOverFlow;
- atomic_t CurrentApplicationCount;
- atomic_t RegisteredApplicationCount;
- BOOLEAN TimerActive;
- ULONG StatisticsPointer;
+ atomic_t AppCtrlQueueLen;
+ BOOLEAN AppCtrlQueueOverFlow;
+ atomic_t CurrentApplicationCount;
+ atomic_t RegisteredApplicationCount;
+ BOOLEAN LinkUpStatus;
+ BOOLEAN TimerActive;
+ u32 StatisticsPointer;
struct sk_buff *RxControlHead;
struct sk_buff *RxControlTail;
-// spinlock_t RxControlQueuelock;
+
struct semaphore RxAppControlQueuelock;
struct semaphore fw_download_sema;
PPER_TARANG_DATA pTarangs;
spinlock_t control_queue_lock;
wait_queue_head_t process_read_wait_queue;
- ULONG bcm_jiffies; /* Store Jiffies value */
// the pointer to the first packet we have queued in send
// deserialized miniport support variables
@@ -458,24 +413,15 @@ struct _MINI_ADAPTER
// this to keep track of the Tx and Rx MailBox Registers.
atomic_t CurrNumFreeTxDesc;
// to keep track the no of byte recieved
- atomic_t RxRollOverCount;
USHORT PrevNumRecvDescs;
USHORT CurrNumRecvDescs;
- atomic_t GoodRxByteCount;
- atomic_t GoodRxPktCount;
- atomic_t BadRxByteCount;
- atomic_t RxPacketDroppedCount;
- atomic_t GoodTxByteCount;
- atomic_t TxTotalPacketCount;
- atomic_t TxDroppedPacketCount;
- ULONG LinkUpStatus;
- BOOLEAN TransferMode;
UINT u32TotalDSD;
PacketInfo PackInfo[NO_OF_QUEUES];
S_CLASSIFIER_RULE astClassifierTable[MAX_CLASSIFIERS];
+ BOOLEAN TransferMode;
/*************** qos ******************/
- UINT bETHCSEnabled;
+ BOOLEAN bETHCSEnabled;
ULONG BEBucketSize;
ULONG rtPSBucketSize;
@@ -483,7 +429,6 @@ struct _MINI_ADAPTER
BOOLEAN AutoLinkUp;
BOOLEAN AutoSyncup;
- struct net_device *dev;
int major;
int minor;
wait_queue_head_t tx_packet_wait_queue;
@@ -491,8 +436,6 @@ struct _MINI_ADAPTER
atomic_t process_waiting;
BOOLEAN fw_download_done;
- unsigned int ctrlpkt_present;
- BOOLEAN packets_given_to_all;
char *txctlpacket[MAX_CNTRL_PKTS];
atomic_t cntrlpktCnt ;
atomic_t index_app_read_cntrlpkt;
@@ -502,34 +445,30 @@ struct _MINI_ADAPTER
struct semaphore rdmwrmsync;
STTARGETDSXBUFFER astTargetDsxBuffer[MAX_TARGET_DSX_BUFFERS];
- ULONG ulFreeTargetBufferCnt;
+ ULONG ulFreeTargetBufferCnt;
ULONG ulCurrentTargetBuffer;
ULONG ulTotalTargetBuffersAvailable;
- unsigned int timeout;
- int irq;
+
unsigned long chip_id;
- unsigned int bFlashBoot;
- unsigned int if_up;
-// spinlock_t sleeper_lock;
- atomic_t rdm_wrm_access;
- atomic_t tx_rx_access;
+
wait_queue_head_t lowpower_mode_wait_queue;
- atomic_t bAbortedByHost;
- BOOLEAN bBinDownloaded;
- BOOLEAN bCfgDownloaded;
- USHORT usBestEffortQueueIndex;
- BOOLEAN bSyncUpRequestSent;
-// struct semaphore data_packet_queue_lock;
+
+ BOOLEAN bFlashBoot;
+ BOOLEAN bBinDownloaded;
+ BOOLEAN bCfgDownloaded;
+ BOOLEAN bSyncUpRequestSent;
+ USHORT usBestEffortQueueIndex;
+
wait_queue_head_t ioctl_fw_dnld_wait_queue;
BOOLEAN waiting_to_fw_download_done;
pid_t fw_download_process_pid;
PSTARGETPARAMS pstargetparams;
BOOLEAN device_removed;
BOOLEAN DeviceAccess;
- INT DDRSetting;
+ BOOLEAN bIsAutoCorrectEnabled;
BOOLEAN bDDRInitDone;
+ INT DDRSetting;
ULONG ulPowerSaveMode;
- BOOLEAN bIsAutoCorrectEnabled;
spinlock_t txtransmitlock;
B_UINT8 txtransmit_running;
/* Thread for control packet handling */
@@ -567,13 +506,13 @@ struct _MINI_ADAPTER
unsigned int usIdleModePattern;
//BOOLEAN bTriedToWakeUpFromShutdown;
BOOLEAN bLinkDownRequested;
- unsigned int check_for_hang;
+
int downloadDDR;
PHS_DEVICE_EXTENSION stBCMPhsContext;
S_HDR_SUPRESSION_CONTEXTINFO stPhsTxContextInfo;
uint8_t ucaPHSPktRestoreBuf[2048];
uint8_t bPHSEnabled;
- int AutoFirmDld;
+ BOOLEAN AutoFirmDld;
BOOLEAN bMipsConfig;
BOOLEAN bDPLLConfig;
UINT32 aTxPktSizeHist[MIBS_MAX_HIST_ENTRIES];
@@ -599,10 +538,9 @@ struct _MINI_ADAPTER
struct semaphore NVMRdmWrmLock;
- BOOLEAN bNetworkInterfaceRegistered;
- BOOLEAN bNetdeviceNotifierRegistered;
+
struct device *pstCreatedClassDevice;
- BOOLEAN bUsbClassDriverRegistered;
+
// BOOLEAN InterfaceUpStatus;
PFLASH2X_CS_INFO psFlash2xCSInfo;
PFLASH_CS_INFO psFlashCSInfo ;
@@ -630,17 +568,13 @@ struct _MINI_ADAPTER
struct semaphore LowPowerModeSync;
ULONG liDrainCalculated;
UINT gpioBitMap;
+
S_BCM_DEBUG_STATE stDebugState;
};
typedef struct _MINI_ADAPTER MINI_ADAPTER, *PMINI_ADAPTER;
-
-typedef struct _DEVICE_EXTENSION
-{
- PMINI_ADAPTER pAdapt;
-}DEVICE_EXTENSION,*PDEVICE_EXTENSION;
-
+#define GET_BCM_ADAPTER(net_dev) netdev_priv(net_dev)
struct _ETH_HEADER_STRUC {
UCHAR au8DestinationAddress[6];
@@ -667,8 +601,8 @@ typedef LINK_REQUEST CONTROL_MESSAGE;
typedef struct _DDR_SETTING
{
- ULONG ulRegAddress;
- ULONG ulRegValue;
+ UINT ulRegAddress;
+ UINT ulRegValue;
}DDR_SETTING, *PDDR_SETTING;
typedef DDR_SETTING DDR_SET_NODE, *PDDR_SET_NODE;
INT
diff --git a/drivers/staging/bcm/Arp.c b/drivers/staging/bcm/Arp.c
deleted file mode 100644
index d60d8593d2ef..000000000000
--- a/drivers/staging/bcm/Arp.c
+++ /dev/null
@@ -1,94 +0,0 @@
-
-/*
- * File Name: Arp.c
- * Abstract: This file contains the routines for handling ARP PACKETS
- */
-#include "headers.h"
-#define ARP_PKT_SIZE 60
-
-/* =========================================================================
- * Function - reply_to_arp_request()
- *
- * Description - When this host tries to broadcast ARP request packet through
- * the virtual interface (veth0), reply directly to upper layer.
- * This function allocates a new skb for ARP reply packet,
- * fills in the fields of the packet and then sends it to
- * upper layer.
- *
- * Parameters - skb: Pointer to sk_buff structure of the ARP request pkt.
- *
- * Returns - None
- * =========================================================================*/
-
-VOID
-reply_to_arp_request(struct sk_buff *skb)
-{
- PMINI_ADAPTER Adapter;
- struct ArpHeader *pArpHdr = NULL;
- struct ethhdr *pethhdr = NULL;
- UCHAR uiIPHdr[4];
- /* Check for valid skb */
- if(skb == NULL)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Invalid skb: Cannot reply to ARP request\n");
- return;
- }
-
-
- Adapter = GET_BCM_ADAPTER(skb->dev);
- /* Print the ARP Request Packet */
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, ARP_RESP, DBG_LVL_ALL, "ARP Packet Dump :");
- BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_TX, ARP_RESP, DBG_LVL_ALL, (PUCHAR)(skb->data), skb->len);
-
- /*
- * Extract the Ethernet Header and Arp Payload including Header
- */
- pethhdr = (struct ethhdr *)skb->data;
- pArpHdr = (struct ArpHeader *)(skb->data+ETH_HLEN);
-
- if(Adapter->bETHCSEnabled)
- {
- if(memcmp(pethhdr->h_source, Adapter->dev->dev_addr, ETH_ALEN))
- {
- bcm_kfree_skb(skb);
- return;
- }
- }
-
- // Set the Ethernet Header First.
- memcpy(pethhdr->h_dest, pethhdr->h_source, ETH_ALEN);
- if(!memcmp(pethhdr->h_source, Adapter->dev->dev_addr, ETH_ALEN))
- {
- pethhdr->h_source[5]++;
- }
-
- /* Set the reply to ARP Reply */
- pArpHdr->arp.ar_op = ntohs(ARPOP_REPLY);
-
- /* Set the HW Address properly */
- memcpy(pArpHdr->ar_sha, pethhdr->h_source, ETH_ALEN);
- memcpy(pArpHdr->ar_tha, pethhdr->h_dest, ETH_ALEN);
-
- // Swapping the IP Adddress
- memcpy(uiIPHdr,pArpHdr->ar_sip,4);
- memcpy(pArpHdr->ar_sip,pArpHdr->ar_tip,4);
- memcpy(pArpHdr->ar_tip,uiIPHdr,4);
-
- /* Print the ARP Reply Packet */
-
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, ARP_RESP, DBG_LVL_ALL, "ARP REPLY PACKET: ");
-
- /* Send the Packet to upper layer */
- BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_TX, ARP_RESP, DBG_LVL_ALL, (PUCHAR)(skb->data), skb->len);
-
- skb->protocol = eth_type_trans(skb,skb->dev);
- skb->pkt_type = PACKET_HOST;
-
-// skb->mac.raw=skb->data+LEADER_SIZE;
- skb_set_mac_header (skb, LEADER_SIZE);
- netif_rx(skb);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, ARP_RESP, DBG_LVL_ALL, "<=============\n");
- return;
-}
-
-
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index fead9c56162e..31674ea1cd48 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -12,7 +12,7 @@
*
* Returns - Zero(Success)
****************************************************************/
-static struct class *bcm_class = NULL;
+
static int bcm_char_open(struct inode *inode, struct file * filp)
{
PMINI_ADAPTER Adapter = NULL;
@@ -93,7 +93,7 @@ static int bcm_char_release(struct inode *inode, struct file *filp)
/*Stop Queuing the control response Packets*/
atomic_dec(&Adapter->ApplicationRunning);
- bcm_kfree(pTarang);
+ kfree(pTarang);
/* remove this filp from the asynchronously notified filp's */
filp->private_data = NULL;
@@ -102,11 +102,11 @@ static int bcm_char_release(struct inode *inode, struct file *filp)
static ssize_t bcm_char_read(struct file *filp, char __user *buf, size_t size, loff_t *f_pos)
{
- PPER_TARANG_DATA pTarang = (PPER_TARANG_DATA)filp->private_data;
+ PPER_TARANG_DATA pTarang = filp->private_data;
PMINI_ADAPTER Adapter = pTarang->Adapter;
- struct sk_buff* Packet = NULL;
- UINT PktLen = 0;
- int wait_ret_val=0;
+ struct sk_buff* Packet = NULL;
+ ssize_t PktLen = 0;
+ int wait_ret_val=0;
wait_ret_val = wait_event_interruptible(Adapter->process_read_wait_queue,
(pTarang->RxAppControlHead || Adapter->device_removed));
@@ -139,14 +139,16 @@ static ssize_t bcm_char_read(struct file *filp, char __user *buf, size_t size, l
if(Packet)
{
PktLen = Packet->len;
- if(copy_to_user(buf, Packet->data, PktLen))
+ if(copy_to_user(buf, Packet->data, min_t(size_t, PktLen, size)))
{
- bcm_kfree_skb(Packet);
+ dev_kfree_skb(Packet);
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "\nReturning from copy to user failure \n");
return -EFAULT;
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Read %d Bytes From Adapter packet = 0x%p by process %d!\n", PktLen, Packet, current->pid);
- bcm_kfree_skb(Packet);
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+ "Read %zd Bytes From Adapter packet = %p by process %d!\n",
+ PktLen, Packet, current->pid);
+ dev_kfree_skb(Packet);
}
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "<====\n");
@@ -155,15 +157,12 @@ static ssize_t bcm_char_read(struct file *filp, char __user *buf, size_t size, l
static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
{
- PPER_TARANG_DATA pTarang = (PPER_TARANG_DATA)filp->private_data;
- void __user *argp = (void __user *)argp;
+ PPER_TARANG_DATA pTarang = filp->private_data;
+ void __user *argp = (void __user *)arg;
PMINI_ADAPTER Adapter = pTarang->Adapter;
INT Status = STATUS_FAILURE;
- IOCTL_BUFFER IoBuffer={};
-#ifndef BCM_SHM_INTERFACE
- int timeout = 0;
-#endif
-
+ int timeout = 0;
+ IOCTL_BUFFER IoBuffer;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX", cmd, arg);
@@ -204,50 +203,41 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
Status = vendorextnIoctl(Adapter, cmd, arg);
if(Status != CONTINUE_COMMON_PATH )
- {
return Status;
- }
switch(cmd){
// Rdms for Swin Idle...
case IOCTL_BCM_REGISTER_READ_PRIVATE:
{
RDM_BUFFER sRdmBuffer = {0};
- PCHAR temp_buff = NULL;
- UINT Bufflen = 0;
+ PCHAR temp_buff;
+ UINT Bufflen;
+
/* Copy Ioctl Buffer structure */
- if(copy_from_user((PCHAR)&IoBuffer, argp,
- sizeof(IOCTL_BUFFER)))
- {
- Status = -EFAULT;
- break;
- }
+ if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
+ if (IoBuffer.InputLength > sizeof(sRdmBuffer))
+ return -EINVAL;
+ if(copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
+
+ /* FIXME: need to restrict BuffLen */
Bufflen = IoBuffer.OutputLength + (4 - IoBuffer.OutputLength%4)%4;
- temp_buff = (PCHAR)kmalloc(Bufflen, GFP_KERNEL);
+ temp_buff = kmalloc(Bufflen, GFP_KERNEL);
if(!temp_buff)
- {
- return STATUS_FAILURE;
- }
- if(copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer,
- IoBuffer.InputLength))
- {
- Status = -EFAULT;
- break;
- }
+ return -ENOMEM;
+
Status = rdmalt(Adapter, (UINT)sRdmBuffer.Register,
(PUINT)temp_buff, Bufflen);
- if(Status != STATUS_SUCCESS)
- {
- bcm_kfree(temp_buff);
- return Status;
- }
- if(copy_to_user(IoBuffer.OutputBuffer,
- (PCHAR)temp_buff, (UINT)IoBuffer.OutputLength))
+ if(Status == STATUS_SUCCESS)
{
- Status = -EFAULT;
+ if(copy_to_user(IoBuffer.OutputBuffer, temp_buff, IoBuffer.OutputLength))
+ Status = -EFAULT;
}
- bcm_kfree(temp_buff);
+
+ kfree(temp_buff);
break;
}
case IOCTL_BCM_REGISTER_WRITE_PRIVATE:
@@ -256,19 +246,16 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
UINT uiTempVar=0;
/* Copy Ioctl Buffer structure */
- if(copy_from_user(&IoBuffer, argp,
- sizeof(IOCTL_BUFFER)))
- {
- Status = -EFAULT;
- break;
- }
+ if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
+ if (IoBuffer.InputLength > sizeof(sWrmBuffer))
+ return -EINVAL;
+
/* Get WrmBuffer structure */
- if(copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer,
- IoBuffer.InputLength))
- {
- Status = -EFAULT;
- break;
- }
+ if(copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
+
uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
if(!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
((uiTempVar == EEPROM_REJECT_REG_1)||
@@ -277,8 +264,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(uiTempVar == EEPROM_REJECT_REG_4)))
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
Status = wrmalt(Adapter, (UINT)sWrmBuffer.Register,
(PUINT)sWrmBuffer.Data, sizeof(ULONG));
@@ -305,56 +291,39 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(Adapter->bPreparingForLowPowerMode ==TRUE))
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Rdms\n");
- Status = -EACCES;
- break;
+ return -EACCES;
}
/* Copy Ioctl Buffer structure */
- if(copy_from_user(&IoBuffer, argp,
- sizeof(IOCTL_BUFFER)))
- {
- Status = -EFAULT;
- break;
- }
+ if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
+ if (IoBuffer.InputLength > sizeof(sRdmBuffer))
+ return -EINVAL;
- temp_buff = (PCHAR)kmalloc(IoBuffer.OutputLength, GFP_KERNEL);
+ if(copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
+
+ /* FIXME: don't trust user supplied length */
+ temp_buff = kmalloc(IoBuffer.OutputLength, GFP_KERNEL);
if(!temp_buff)
- {
return STATUS_FAILURE;
- }
- if(copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer,
- IoBuffer.InputLength))
- {
- Status = -EFAULT;
- break;
- }
- if(
-#if !defined(BCM_SHM_INTERFACE)
- (((ULONG)sRdmBuffer.Register & 0x0F000000) != 0x0F000000) ||
-#endif
- ((ULONG)sRdmBuffer.Register & 0x3)
- )
+ if((((ULONG)sRdmBuffer.Register & 0x0F000000) != 0x0F000000) ||
+ ((ULONG)sRdmBuffer.Register & 0x3))
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "RDM Done On invalid Address : %x Access Denied.\n",
(int)sRdmBuffer.Register);
- Status = -EINVAL;
- break;
+ return -EINVAL;
}
uiTempVar = sRdmBuffer.Register & EEPROM_REJECT_MASK;
Status = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register,
(PUINT)temp_buff, IoBuffer.OutputLength);
- if(Status != STATUS_SUCCESS)
- {
- bcm_kfree(temp_buff);
- return Status;
- }
- if(copy_to_user(IoBuffer.OutputBuffer,
- (PCHAR)temp_buff, (UINT)IoBuffer.OutputLength))
- {
- Status = -EFAULT;
- }
- bcm_kfree(temp_buff);
+ if(Status == STATUS_SUCCESS)
+ if(copy_to_user(IoBuffer.OutputBuffer, temp_buff, IoBuffer.OutputLength))
+ Status = -EFAULT;
+
+ kfree(temp_buff);
break;
}
case IOCTL_BCM_REGISTER_WRITE:
@@ -367,36 +336,28 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(Adapter->bPreparingForLowPowerMode ==TRUE))
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Wrms\n");
- Status = -EACCES;
- break;
+ return -EACCES;
}
+
/* Copy Ioctl Buffer structure */
- if(copy_from_user((PCHAR)&IoBuffer, argp,
- sizeof(IOCTL_BUFFER)))
- {
- Status = -EFAULT;
- break;
- }
+ if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
+ if (IoBuffer.InputLength > sizeof(sWrmBuffer))
+ return -EINVAL;
+
/* Get WrmBuffer structure */
- if(copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer,
- IoBuffer.InputLength))
- {
- Status = -EFAULT;
- break;
- }
- if(
-#if !defined(BCM_SHM_INTERFACE)
+ if(copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
- (((ULONG)sWrmBuffer.Register & 0x0F000000) != 0x0F000000) ||
-#endif
- ((ULONG)sWrmBuffer.Register & 0x3)
- )
+ if( (((ULONG)sWrmBuffer.Register & 0x0F000000) != 0x0F000000) ||
+ ((ULONG)sWrmBuffer.Register & 0x3) )
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "WRM Done On invalid Address : %x Access Denied.\n",
(int)sWrmBuffer.Register);
- Status = -EINVAL;
- break;
+ return -EINVAL;
}
+
uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
if(!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
((uiTempVar == EEPROM_REJECT_REG_1)||
@@ -406,8 +367,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(cmd == IOCTL_BCM_REGISTER_WRITE))
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
Status = wrmaltWithLock(Adapter, (UINT)sWrmBuffer.Register,
@@ -436,19 +396,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(Adapter->bPreparingForLowPowerMode ==TRUE))
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"GPIO Can't be set/clear in Low power Mode");
- Status = -EACCES;
- break;
+ return -EACCES;
}
if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- {
- Status = -EFAULT;
- break;
- }
+ return -EFAULT;
+ if (IoBuffer.InputLength > sizeof(gpio_info))
+ return -EINVAL;
if(copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
- {
- Status = -EFAULT;
- break;
- }
+ return -EFAULT;
uiBit = gpio_info.uiGpioNumber;
uiOperation = gpio_info.uiGpioValue;
@@ -517,8 +472,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
break;
case BCM_LED_THREAD_STATE_CHANGE_REQ:
{
-
- USER_THREAD_REQ threadReq = {0};
+ USER_THREAD_REQ threadReq = { 0 };
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"User made LED thread InActive");
if((Adapter->IdleMode == TRUE) ||
@@ -529,21 +483,16 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
Status = -EACCES;
break;
}
- Status =copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying the IOBufer from user space err:%d",Status);
- Status = -EFAULT;
- break;
- }
- Status= copy_from_user(&threadReq, IoBuffer.InputBuffer, IoBuffer.InputLength);
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying the InputBuffer from user space err:%d",Status);
- Status = -EFAULT;
- break;
- }
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
+ if (IoBuffer.InputLength > sizeof(threadReq))
+ return -EINVAL;
+
+ if (copy_from_user(&threadReq, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
+
//if LED thread is running(Actively or Inactively) set it state to make inactive
if(Adapter->LEDInfo.led_thread_running)
{
@@ -572,19 +521,13 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus ==TRUE) ||
(Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- Status = -EACCES;
- break;
- }
- if(copy_from_user((PCHAR)&IoBuffer, argp, sizeof(IOCTL_BUFFER))) {
- Status = -EFAULT;
- break;
- }
- if(copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
- {
- Status = -EFAULT;
- break;
- }
+ return -EACCES;
+ if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+ if (IoBuffer.InputLength > sizeof(gpio_info))
+ return -EINVAL;
+ if(copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
uiBit = gpio_info.uiGpioNumber;
//Set the gpio output register
Status = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
@@ -608,25 +551,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus ==TRUE) ||
(Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- Status = -EINVAL;
- break;
- }
- Status = copy_from_user( (PCHAR)&IoBuffer, argp, sizeof( IOCTL_BUFFER));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying the IOBufer from user space err:%d",Status);
- Status = -EFAULT;
- break;
- }
+ return -EINVAL;
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+ if (IoBuffer.InputLength > sizeof(gpio_multi_info))
+ return -EINVAL;
+ if (copy_from_user(&gpio_multi_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
- Status = copy_from_user( &gpio_multi_info, IoBuffer.InputBuffer, IoBuffer.InputLength);
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying the IOBufer Contents from user space err:%d",Status);
- Status = -EFAULT;
- break;
- }
if(IsReqGpioIsLedInNVM(Adapter,pgpio_multi_info[WIMAX_IDX].uiGPIOMask)== FALSE)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",pgpio_multi_info[WIMAX_IDX].uiGPIOMask,Adapter->gpioBitMap);
@@ -686,7 +618,6 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if(Status)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying Content to IOBufer for user space err:%d",Status);
- Status = -EFAULT;
break;
}
}
@@ -700,25 +631,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if((Adapter->IdleMode == TRUE) ||
(Adapter->bShutStatus ==TRUE) ||
(Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- Status = -EINVAL;
- break;
- }
- Status = copy_from_user(&IoBuffer, argp, sizeof( IOCTL_BUFFER));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying the IOBufer from user space err:%d",Status);
- Status = -EFAULT;
- break;
- }
+ return -EINVAL;
- Status = copy_from_user( &gpio_multi_mode, IoBuffer.InputBuffer, IoBuffer.InputLength);
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying the IOBufer Contents from user space err:%d",Status);
- Status = -EFAULT;
- break;
- }
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+ if (IoBuffer.InputLength > sizeof(gpio_multi_mode))
+ return -EINVAL;
+ if (copy_from_user(&gpio_multi_mode, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
Status = rdmaltWithLock( Adapter, ( UINT) GPIO_MODE_REGISTER, ( PUINT) ucResetValue, sizeof( UINT));
if( STATUS_SUCCESS != Status)
@@ -769,7 +689,6 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if(Status)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying Content to IOBufer for user space err:%d",Status);
- Status = -EFAULT;
break;
}
}
@@ -783,24 +702,20 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
case IOCTL_IDLE_REQ:
{
PVOID pvBuffer=NULL;
+
/* Copy Ioctl Buffer structure */
- if(copy_from_user(&IoBuffer, argp,
- sizeof(IOCTL_BUFFER)))
- {
- Status = -EFAULT;
- break;
- }
- pvBuffer=kmalloc(IoBuffer.InputLength, GFP_KERNEL);
+ if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
+ /* FIXME: don't accept any length from user */
+ pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
if(!pvBuffer)
- {
return -ENOMEM;
- }
- if(copy_from_user(pvBuffer, IoBuffer.InputBuffer,
- IoBuffer.InputLength))
+ if(copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
{
Status = -EFAULT;
- bcm_kfree(pvBuffer);
+ kfree(pvBuffer);
break;
}
@@ -820,10 +735,9 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
Status = CopyBufferToControlPacket(Adapter, (PVOID)pvBuffer);
cntrlEnd:
up(&Adapter->LowPowerModeSync);
- bcm_kfree(pvBuffer);
+ kfree(pvBuffer);
break;
}
-#ifndef BCM_SHM_INTERFACE
case IOCTL_BCM_BUFFER_DOWNLOAD_START:
{
INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock) ;
@@ -844,7 +758,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
Status = reset_card_proc(Adapter);
if(Status)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "reset_card_proc Failed!\n");
+ pr_err(PFX "%s: reset_card_proc Failed!\n", Adapter->dev->name);
up(&Adapter->fw_download_sema);
up(&Adapter->NVMRdmWrmLock);
break;
@@ -862,7 +776,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
case IOCTL_BCM_BUFFER_DOWNLOAD:
{
- FIRMWARE_INFO *psFwInfo=NULL;
+ FIRMWARE_INFO *psFwInfo = NULL;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Starting the firmware download PID =0x%x!!!!\n", current->pid);
do{
if(!down_trylock(&Adapter->fw_download_sema))
@@ -871,29 +785,23 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
Status=-EINVAL;
break;
}
+
/* Copy Ioctl Buffer structure */
if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "copy_from_user 1 failed\n");
- Status = -EFAULT;
- break;
- }
+ return -EFAULT;
+
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Length for FW DLD is : %lx\n",
IoBuffer.InputLength);
- psFwInfo=kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
+
+ if (IoBuffer.InputLength > sizeof(FIRMWARE_INFO))
+ return -EINVAL;
+
+ psFwInfo = kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
if(!psFwInfo)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Failed to allocate buffer!!!!\n");
- Status = -ENOMEM;
- break;
- }
- if(copy_from_user(psFwInfo, IoBuffer.InputBuffer,
- IoBuffer.InputLength))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_from_user 2 failed\n");
- Status = -EFAULT;
- break;
- }
+ return -ENOMEM;
+
+ if(copy_from_user(psFwInfo, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ return -EFAULT;
if(!psFwInfo->pvMappedFirmwareAddress ||
(psFwInfo->u32FirmwareLength == 0))
@@ -929,7 +837,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if(Status != STATUS_SUCCESS)
up(&Adapter->fw_download_sema);
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "IOCTL: Firmware File Uploaded\n");
- bcm_kfree(psFwInfo);
+ kfree(psFwInfo);
break;
}
case IOCTL_BCM_BUFFER_DOWNLOAD_STOP:
@@ -946,7 +854,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
Adapter->bBinDownloaded=TRUE;
Adapter->bCfgDownloaded=TRUE;
atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
- atomic_set(&Adapter->RxRollOverCount, 0);
+
Adapter->CurrNumRecvDescs=0;
Adapter->downloadDDR = 0;
@@ -999,7 +907,6 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
up(&Adapter->NVMRdmWrmLock);
break;
}
-#endif
case IOCTL_BE_BUCKET_SIZE:
Status = 0;
if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg))
@@ -1050,22 +957,16 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
break;
case IOCTL_GET_PACK_INFO:
- if(copy_to_user(argp, &Adapter->PackInfo,
- sizeof(PacketInfo)*NO_OF_QUEUES))
- {
- Status = -EFAULT;
- break;
- }
+ if(copy_to_user(argp, &Adapter->PackInfo, sizeof(PacketInfo)*NO_OF_QUEUES))
+ return -EFAULT;
Status = STATUS_SUCCESS;
break;
case IOCTL_BCM_SWITCH_TRANSFER_MODE:
{
UINT uiData = 0;
if(copy_from_user(&uiData, argp, sizeof(UINT)))
- {
- Status = -EFAULT;
- break;
- }
+ return -EFAULT;
+
if(uiData) /* Allow All Packets */
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SWITCH_TRANSFER_MODE: ETH_PACKET_TUNNELING_MODE\n");
@@ -1084,22 +985,16 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
{
/* Copy Ioctl Buffer structure */
if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- {
- Status = -EFAULT;
- break;
- }
- if(copy_to_user(IoBuffer.OutputBuffer,
- VER_FILEVERSION_STR, (UINT)IoBuffer.OutputLength))
- {
- Status = -EFAULT;
- break;
- }
+ return -EFAULT;
+
+ if(copy_to_user(IoBuffer.OutputBuffer, VER_FILEVERSION_STR, IoBuffer.OutputLength))
+ return -EFAULT;
Status = STATUS_SUCCESS;
break;
}
case IOCTL_BCM_GET_CURRENT_STATUS:
{
- LINK_STATE plink_state;
+ LINK_STATE link_state;
/* Copy Ioctl Buffer structure */
if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
@@ -1108,19 +1003,19 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
Status = -EFAULT;
break;
}
- if (IoBuffer.OutputLength != sizeof(plink_state)) {
+ if (IoBuffer.OutputLength != sizeof(link_state)) {
Status = -EINVAL;
break;
}
- if (copy_from_user(&plink_state, (void __user *)arg, sizeof(plink_state))) {
- Status = -EFAULT;
- break;
- }
- plink_state.bIdleMode = (UCHAR)Adapter->IdleMode;
- plink_state.bShutdownMode = Adapter->bShutStatus;
- plink_state.ucLinkStatus = (UCHAR)Adapter->LinkStatus;
- if (copy_to_user(IoBuffer.OutputBuffer, &plink_state, IoBuffer.OutputLength)) {
+ memset(&link_state, 0, sizeof(link_state));
+ link_state.bIdleMode = Adapter->IdleMode;
+ link_state.bShutdownMode = Adapter->bShutStatus;
+ link_state.ucLinkStatus = Adapter->LinkStatus;
+
+ if (copy_to_user(IoBuffer.OutputBuffer, &link_state,
+ min_t(size_t, sizeof(link_state), IoBuffer.OutputLength)))
+ {
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n");
Status = -EFAULT;
break;
@@ -1131,17 +1026,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
case IOCTL_BCM_SET_MAC_TRACING:
{
UINT tracing_flag;
+
/* copy ioctl Buffer structure */
- if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- {
- Status = -EFAULT;
- break;
- }
- if(copy_from_user(&tracing_flag, IoBuffer.InputBuffer,sizeof(UINT)))
- {
- Status = -EFAULT;
- break;
- }
+ if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
+ if(copy_from_user(&tracing_flag,IoBuffer.InputBuffer,sizeof(UINT)))
+ return -EFAULT;
+
if (tracing_flag)
Adapter->pTarangs->MacTracingEnabled = TRUE;
else
@@ -1151,72 +1043,53 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
case IOCTL_BCM_GET_DSX_INDICATION:
{
ULONG ulSFId=0;
- if(copy_from_user((PCHAR)&IoBuffer, argp,
- sizeof(IOCTL_BUFFER)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Invalid IO buffer!!!" );
- Status = -EFAULT;
- break;
- }
+ if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
if(IoBuffer.OutputLength < sizeof(stLocalSFAddIndicationAlt))
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Mismatch req: %lx needed is =0x%zx!!!",
- IoBuffer.OutputLength, sizeof(stLocalSFAddIndicationAlt));
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,
+ "Mismatch req: %lx needed is =0x%zx!!!",
+ IoBuffer.OutputLength, sizeof(stLocalSFAddIndicationAlt));
return -EINVAL;
}
- if(copy_from_user(&ulSFId, IoBuffer.InputBuffer,
- sizeof(ulSFId)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Invalid SFID!!! %lu", ulSFId );
- Status = -EFAULT;
- break;
- }
+
+ if(copy_from_user(&ulSFId, IoBuffer.InputBuffer, sizeof(ulSFId)))
+ return -EFAULT;
+
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Get DSX Data SF ID is =%lx\n", ulSFId );
- get_dsx_sf_data_to_application(Adapter, ulSFId,
- IoBuffer.OutputBuffer);
+ get_dsx_sf_data_to_application(Adapter, ulSFId, IoBuffer.OutputBuffer);
Status=STATUS_SUCCESS;
}
break;
case IOCTL_BCM_GET_HOST_MIBS:
{
- PCHAR temp_buff;
+ PVOID temp_buff;
if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_from user for IoBuff failed\n");
- Status = -EFAULT;
- break;
- }
+ return -EFAULT;
if(IoBuffer.OutputLength != sizeof(S_MIBS_HOST_STATS_MIBS))
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Length Check failed %lu %zd\n", IoBuffer.OutputLength,
- sizeof(S_MIBS_HOST_STATS_MIBS));
- return -EINVAL;
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,
+ "Length Check failed %lu %zd\n",
+ IoBuffer.OutputLength, sizeof(S_MIBS_HOST_STATS_MIBS));
+ return -EINVAL;
}
- temp_buff = (PCHAR)kmalloc(IoBuffer.OutputLength, GFP_KERNEL);
-
+ /* FIXME: HOST_STATS are too big for kmalloc (122048)! */
+ temp_buff = kzalloc(sizeof(S_MIBS_HOST_STATS_MIBS), GFP_KERNEL);
if(!temp_buff)
- {
return STATUS_FAILURE;
- }
-
- Status = ProcessGetHostMibs(Adapter,
- (PUCHAR)temp_buff, IoBuffer.OutputLength);
- Status = GetDroppedAppCntrlPktMibs((PVOID)temp_buff,
- (PPER_TARANG_DATA)filp->private_data);
+ Status = ProcessGetHostMibs(Adapter, temp_buff);
+ GetDroppedAppCntrlPktMibs(temp_buff, pTarang);
- if(copy_to_user(IoBuffer.OutputBuffer,(PCHAR)temp_buff,
- sizeof(S_MIBS_HOST_STATS_MIBS)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy to user failed\n");
- bcm_kfree(temp_buff);
- return -EFAULT;
- }
+ if (Status != STATUS_FAILURE)
+ if(copy_to_user(IoBuffer.OutputBuffer, temp_buff, sizeof(S_MIBS_HOST_STATS_MIBS)))
+ Status = -EFAULT;
- bcm_kfree(temp_buff);
+ kfree(temp_buff);
break;
}
@@ -1226,10 +1099,6 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
Adapter->usIdleModePattern = ABORT_IDLE_MODE;
Adapter->bWakeUpDevice = TRUE;
wake_up(&Adapter->process_rx_cntrlpkt);
- #if 0
- Adapter->bTriedToWakeUpFromlowPowerMode = TRUE;
- InterfaceAbortIdlemode (Adapter, Adapter->usIdleModePattern);
- #endif
}
Status = STATUS_SUCCESS;
break;
@@ -1248,24 +1117,20 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
Status = -EACCES;
break;
}
+
/* Copy Ioctl Buffer structure */
- if(copy_from_user((PCHAR)&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- {
- Status = -EFAULT;
- break;
- }
+ if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
- pvBuffer=kmalloc(IoBuffer.InputLength, GFP_KERNEL);
+ /* FIXME: restrict length */
+ pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
if(!pvBuffer)
- {
return -ENOMEM;
- break;
- }
/* Get WrmBuffer structure */
- if(copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
+ if(copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
{
- bcm_kfree(pvBuffer);
+ kfree(pvBuffer);
Status = -EFAULT;
break;
}
@@ -1275,7 +1140,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if(((ULONG)pBulkBuffer->Register & 0x0F000000) != 0x0F000000 ||
((ULONG)pBulkBuffer->Register & 0x3))
{
- bcm_kfree(pvBuffer);
+ kfree(pvBuffer);
BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,"WRM Done On invalid Address : %x Access Denied.\n",(int)pBulkBuffer->Register);
Status = -EINVAL;
break;
@@ -1290,7 +1155,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(uiTempVar == EEPROM_REJECT_REG_4)) &&
(cmd == IOCTL_BCM_REGISTER_WRITE))
{
- bcm_kfree(pvBuffer);
+ kfree(pvBuffer);
BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,"EEPROM Access Denied, not in VSG Mode\n");
Status = -EFAULT;
break;
@@ -1306,30 +1171,19 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "WRM Failed\n");
}
- bcm_kfree(pvBuffer);
+ kfree(pvBuffer);
break;
}
case IOCTL_BCM_GET_NVM_SIZE:
- {
-
if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- {
- //IOLog("failed NVM first");
- Status = -EFAULT;
- break;
- }
+ return -EFAULT;
+
if(Adapter->eNVMType == NVM_EEPROM || Adapter->eNVMType == NVM_FLASH ) {
- if(copy_to_user(IoBuffer.OutputBuffer,
- (unsigned char *)&Adapter->uiNVMDSDSize, (UINT)sizeof(UINT)))
- {
- Status = -EFAULT;
- return Status;
- }
+ if(copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiNVMDSDSize, sizeof(UINT)))
+ return -EFAULT;
}
-
Status = STATUS_SUCCESS ;
- }
break;
case IOCTL_BCM_CAL_INIT :
@@ -1338,40 +1192,26 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
UINT uiSectorSize = 0 ;
if(Adapter->eNVMType == NVM_FLASH)
{
- Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy From User space failed. status :%d", Status);
+ if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
return -EFAULT;
- }
- if (get_user(uiSectorSize, (unsigned int __user *)IoBuffer.InputBuffer))
+
+ if (copy_from_user(&uiSectorSize, IoBuffer.InputBuffer, sizeof(UINT)))
return -EFAULT;
if((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE))
{
-
- Status = copy_to_user(IoBuffer.OutputBuffer,
- (unsigned char *)&Adapter->uiSectorSize ,
- (UINT)sizeof(UINT));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Coping the sector size to use space failed. status:%d",Status);
- return -EFAULT;
- }
+ if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiSectorSize,
+ sizeof(UINT)))
+ return -EFAULT;
}
else
{
if(IsFlash2x(Adapter))
{
- Status = copy_to_user(IoBuffer.OutputBuffer,
- (unsigned char *)&Adapter->uiSectorSize ,
- (UINT)sizeof(UINT));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Coping the sector size to use space failed. status:%d",Status);
- return -EFAULT;
- }
-
+ if (copy_to_user(IoBuffer.OutputBuffer,
+ &Adapter->uiSectorSize ,
+ sizeof(UINT)))
+ return -EFAULT;
}
else
{
@@ -1395,25 +1235,19 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
break;
case IOCTL_BCM_SET_DEBUG :
+#ifdef DEBUG
{
USER_BCM_DBG_STATE sUserDebugState;
// BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "Entered the ioctl %x \n", IOCTL_BCM_SET_DEBUG );
BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "In SET_DEBUG ioctl\n");
- Status = copy_from_user((PCHAR)&IoBuffer, argp, sizeof(IOCTL_BUFFER));
- if(Status)
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy from user failed\n");
- Status = -EFAULT;
- break;
- }
- Status = copy_from_user(&sUserDebugState,IoBuffer.InputBuffer, sizeof(USER_BCM_DBG_STATE));
- if(Status)
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IoBuffer.InputBuffer failed");
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
return -EFAULT;
- }
+
+ if (copy_from_user(&sUserDebugState, IoBuffer.InputBuffer, sizeof(USER_BCM_DBG_STATE)))
+ return -EFAULT;
+
BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL_BCM_SET_DEBUG: OnOff=%d Type = 0x%x ",
sUserDebugState.OnOff, sUserDebugState.Type);
@@ -1436,15 +1270,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
BCM_SHOW_DEBUG_BITMAP(Adapter);
}
+#endif
break;
case IOCTL_BCM_NVM_READ:
case IOCTL_BCM_NVM_WRITE:
{
-
- NVM_READWRITE stNVMReadWrite = {};
+ NVM_READWRITE stNVMReadWrite;
PUCHAR pReadData = NULL;
- void __user * pBuffertobeCopied = NULL;
- ULONG ulDSDMagicNumInUsrBuff = 0 ;
+ ULONG ulDSDMagicNumInUsrBuff = 0;
struct timeval tv0, tv1;
memset(&tv0,0,sizeof(struct timeval));
memset(&tv1,0,sizeof(struct timeval));
@@ -1469,21 +1302,12 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
/* Copy Ioctl Buffer structure */
if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"copy_from_user failed\n");
- Status = -EFAULT;
- break;
- }
- if(IOCTL_BCM_NVM_READ == cmd)
- pBuffertobeCopied = IoBuffer.OutputBuffer;
- else
- pBuffertobeCopied = IoBuffer.InputBuffer;
+ return -EFAULT;
- if(copy_from_user(&stNVMReadWrite, pBuffertobeCopied,sizeof(NVM_READWRITE)))
- {
- Status = -EFAULT;
- break;
- }
+ if(copy_from_user(&stNVMReadWrite,
+ (IOCTL_BCM_NVM_READ == cmd) ? IoBuffer.OutputBuffer : IoBuffer.InputBuffer,
+ sizeof(NVM_READWRITE)))
+ return -EFAULT;
//
// Deny the access if the offset crosses the cal area limit.
@@ -1496,18 +1320,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
break;
}
- pReadData =(PCHAR)kmalloc(stNVMReadWrite.uiNumBytes, GFP_KERNEL);
-
+ pReadData = kzalloc(stNVMReadWrite.uiNumBytes, GFP_KERNEL);
if(!pReadData)
return -ENOMEM;
- memset(pReadData,0,stNVMReadWrite.uiNumBytes);
-
if(copy_from_user(pReadData, stNVMReadWrite.pBuffer,
stNVMReadWrite.uiNumBytes))
{
Status = -EFAULT;
- bcm_kfree(pReadData);
+ kfree(pReadData);
break;
}
@@ -1522,7 +1343,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
- bcm_kfree(pReadData);
+ kfree(pReadData);
return -EACCES;
}
@@ -1533,13 +1354,12 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if(Status != STATUS_SUCCESS)
{
- bcm_kfree(pReadData);
+ kfree(pReadData);
return Status;
}
- if(copy_to_user(stNVMReadWrite.pBuffer,
- pReadData, (UINT)stNVMReadWrite.uiNumBytes))
+ if(copy_to_user(stNVMReadWrite.pBuffer,pReadData, stNVMReadWrite.uiNumBytes))
{
- bcm_kfree(pReadData);
+ kfree(pReadData);
Status = -EFAULT;
}
}
@@ -1554,7 +1374,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
- bcm_kfree(pReadData);
+ kfree(pReadData);
return -EACCES;
}
@@ -1582,7 +1402,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"DSD Sig is present neither in Flash nor User provided Input..");
up(&Adapter->NVMRdmWrmLock);
- bcm_kfree(pReadData);
+ kfree(pReadData);
return Status;
}
@@ -1591,7 +1411,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"DSD Sig is present neither in Flash nor User provided Input..");
up(&Adapter->NVMRdmWrmLock);
- bcm_kfree(pReadData);
+ kfree(pReadData);
return Status;
}
}
@@ -1608,7 +1428,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if(Status != STATUS_SUCCESS)
{
- bcm_kfree(pReadData);
+ kfree(pReadData);
return Status;
}
}
@@ -1616,7 +1436,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " timetaken by Write/read :%ld msec\n",(tv1.tv_sec - tv0.tv_sec)*1000 +(tv1.tv_usec - tv0.tv_usec)/1000);
- bcm_kfree(pReadData);
+ kfree(pReadData);
Status = STATUS_SUCCESS;
}
break;
@@ -1629,7 +1449,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
UINT BuffSize = 0;
UINT ReadBytes = 0;
UINT ReadOffset = 0;
- char __user *OutPutBuff = NULL;
+ void __user *OutPutBuff;
if(IsFlash2x(Adapter) != TRUE)
{
@@ -1638,20 +1458,12 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_READ Called");
- Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
return -EFAULT;
- }
//Reading FLASH 2.x READ structure
- Status = copy_from_user(&sFlash2xRead, IoBuffer.InputBuffer,sizeof(FLASH2X_READWRITE));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of Input Buffer failed");
+ if (copy_from_user(&sFlash2xRead, IoBuffer.InputBuffer,sizeof(FLASH2X_READWRITE)))
return -EFAULT;
- }
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.Section :%x" ,sFlash2xRead.Section);
@@ -1687,7 +1499,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
- bcm_kfree(pReadBuff);
+ kfree(pReadBuff);
return -EACCES;
}
@@ -1715,7 +1527,6 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if(Status)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Copy to use failed with status :%d", Status);
- Status = -EFAULT;
break;
}
NOB = NOB - ReadBytes;
@@ -1727,15 +1538,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
up(&Adapter->NVMRdmWrmLock);
- bcm_kfree(pReadBuff);
+ kfree(pReadBuff);
}
break ;
case IOCTL_BCM_FLASH2X_SECTION_WRITE :
{
FLASH2X_READWRITE sFlash2xWrite = {0};
- PUCHAR pWriteBuff = NULL;
- void __user *InputAddr = NULL;
+ PUCHAR pWriteBuff;
+ void __user *InputAddr;
UINT NOB = 0;
UINT BuffSize = 0;
UINT WriteOffset = 0;
@@ -1752,33 +1563,17 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " IOCTL_BCM_FLASH2X_SECTION_WRITE Called");
- Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
return -EFAULT;
- }
//Reading FLASH 2.x READ structure
- Status = copy_from_user(&sFlash2xWrite, IoBuffer.InputBuffer, sizeof(FLASH2X_READWRITE));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Reading of output Buffer from IOCTL buffer fails");
+ if (copy_from_user(&sFlash2xWrite, IoBuffer.InputBuffer, sizeof(FLASH2X_READWRITE)))
return -EFAULT;
- }
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.Section :%x" ,sFlash2xWrite.Section);
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.offset :%d" ,sFlash2xWrite.offset);
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.numOfBytes :%x" ,sFlash2xWrite.numOfBytes);
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.bVerify :%x\n" ,sFlash2xWrite.bVerify);
- #if 0
- if((sFlash2xWrite.Section == ISO_IMAGE1) ||(sFlash2xWrite.Section == ISO_IMAGE2) ||
- (sFlash2xWrite.Section == DSD0) || (sFlash2xWrite.Section == DSD1) || (sFlash2xWrite.Section == DSD2))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"ISO/DSD Image write is not allowed.... ");
- return STATUS_FAILURE ;
- }
- #endif
if((sFlash2xWrite.Section != VSA0) && (sFlash2xWrite.Section != VSA1) &&
(sFlash2xWrite.Section != VSA2) )
{
@@ -1798,12 +1593,10 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
else
BuffSize = NOB ;
- pWriteBuff = (PCHAR)kmalloc(BuffSize, GFP_KERNEL);
+ pWriteBuff = kmalloc(BuffSize, GFP_KERNEL);
if(pWriteBuff == NULL)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Memory allocation failed for Flash 2.x Read Structure");
return -ENOMEM;
- }
+
//extracting the remainder of the given offset.
WriteBytes = Adapter->uiSectorSize ;
@@ -1820,7 +1613,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
- bcm_kfree(pWriteBuff);
+ kfree(pWriteBuff);
return -EACCES;
}
@@ -1831,7 +1624,6 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if(Status)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy to user failed with status :%d", Status);
- Status = -EFAULT;
break ;
}
BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,pWriteBuff,WriteBytes);
@@ -1859,28 +1651,22 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
} while(NOB > 0);
BcmFlash2xWriteSig(Adapter,sFlash2xWrite.Section);
up(&Adapter->NVMRdmWrmLock);
- bcm_kfree(pWriteBuff);
+ kfree(pWriteBuff);
}
break ;
case IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP :
{
- PFLASH2X_BITMAP psFlash2xBitMap = NULL ;
+ PFLASH2X_BITMAP psFlash2xBitMap;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP Called");
- Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
return -EFAULT;
- }
+
if(IoBuffer.OutputLength != sizeof(FLASH2X_BITMAP))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Structure size mismatch Lib :0x%lx Driver :0x%zx ",IoBuffer.OutputLength, sizeof(FLASH2X_BITMAP));
- break;
- }
+ return -EINVAL;
- psFlash2xBitMap = (PFLASH2X_BITMAP)kzalloc(sizeof(FLASH2X_BITMAP), GFP_KERNEL);
+ psFlash2xBitMap = kzalloc(sizeof(FLASH2X_BITMAP), GFP_KERNEL);
if(psFlash2xBitMap == NULL)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Memory is not available");
@@ -1895,20 +1681,16 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
up(&Adapter->NVMRdmWrmLock);
- bcm_kfree(psFlash2xBitMap);
+ kfree(psFlash2xBitMap);
return -EACCES;
}
BcmGetFlash2xSectionalBitMap(Adapter, psFlash2xBitMap);
up(&Adapter->NVMRdmWrmLock);
- Status = copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap, sizeof(FLASH2X_BITMAP));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "copying Flash2x bitMap failed");
- bcm_kfree(psFlash2xBitMap);
- return -EFAULT;
- }
- bcm_kfree(psFlash2xBitMap);
+ if (copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap, sizeof(FLASH2X_BITMAP)))
+ Status = -EFAULT;
+
+ kfree(psFlash2xBitMap);
}
break ;
case IOCTL_BCM_SET_ACTIVE_SECTION :
@@ -1926,14 +1708,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if(Status)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- return -EFAULT;
+ return Status;
}
Status = copy_from_user(&eFlash2xSectionVal,IoBuffer.InputBuffer, sizeof(INT));
if(Status)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
- return -EFAULT;
+ return Status;
}
down(&Adapter->NVMRdmWrmLock);
@@ -1961,29 +1743,6 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
Adapter->bAllDSDWriteAllow = FALSE ;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"IOCTL_BCM_IDENTIFY_ACTIVE_SECTION called");
- #if 0
- SECTION_TYPE section = 0 ;
-
-
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_IDENTIFY_ACTIVE_SECTION Called");
- Status = copy_from_user((PCHAR)&IoBuffer, (PCHAR)arg, sizeof(IOCTL_BUFFER));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Copy of IOCTL BUFFER failed");
- return -EFAULT;
- }
- Status = copy_from_user((PCHAR)section,(PCHAR)&IoBuffer, sizeof(INT));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Copy of section type failed failed");
- return -EFAULT;
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Read Section :%d", section);
- if(section == DSD)
- Adapter->ulFlashCalStart = Adapter->uiActiveDSDOffsetAtFwDld ;
- else
- Status = STATUS_FAILURE ;
- #endif
Status = STATUS_SUCCESS ;
}
break ;
@@ -2004,14 +1763,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if(Status)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed Status :%d", Status);
- return -EFAULT;
+ return Status;
}
- Status = copy_from_user(&sCopySectStrut,IoBuffer.InputBuffer, sizeof(FLASH2X_COPY_SECTION));
+ Status = copy_from_user(&sCopySectStrut, IoBuffer.InputBuffer, sizeof(FLASH2X_COPY_SECTION));
if(Status)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of Copy_Section_Struct failed with Status :%d", Status);
- return -EFAULT;
+ return Status;
}
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Source SEction :%x", sCopySectStrut.SrcSection);
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Destination SEction :%x", sCopySectStrut.DstSection);
@@ -2082,7 +1841,6 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if(Status)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- Status = -EFAULT;
break;
}
if(Adapter->eNVMType != NVM_FLASH)
@@ -2095,35 +1853,18 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
{
if(IoBuffer.OutputLength < sizeof(FLASH2X_CS_INFO))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0," Passed buffer size:0x%lX is insufficient for the CS structure.. \nRequired size :0x%zx ",IoBuffer.OutputLength, sizeof(FLASH2X_CS_INFO));
- Status = -EINVAL;
- break;
- }
+ return -EINVAL;
- Status = copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlash2xCSInfo, sizeof(FLASH2X_CS_INFO));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "copying Flash2x cs info failed");
- Status = -EFAULT;
- break;
- }
+ if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlash2xCSInfo, sizeof(FLASH2X_CS_INFO)))
+ return -EFAULT;
}
else
{
if(IoBuffer.OutputLength < sizeof(FLASH_CS_INFO))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0," Passed buffer size:0x%lX is insufficient for the CS structure.. Required size :0x%zx ",IoBuffer.OutputLength, sizeof(FLASH_CS_INFO));
- Status = -EINVAL;
- break;
- }
- Status = copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlashCSInfo, sizeof(FLASH_CS_INFO));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "copying Flash CS info failed");
- Status = -EFAULT;
- break;
- }
+ return -EINVAL;
+
+ if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlashCSInfo, sizeof(FLASH_CS_INFO)))
+ return -EFAULT;
}
}
@@ -2145,13 +1886,13 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if(Status)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- return -EFAULT;
+ return Status;
}
- Status = copy_from_user(&eFlash2xSectionVal,IoBuffer.InputBuffer, sizeof(INT));
+ Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
if(Status)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
- return -EFAULT;
+ return Status;
}
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Read Section :%d", eFlash2xSectionVal);
@@ -2181,13 +1922,13 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
case IOCTL_BCM_NVM_RAW_READ :
{
- NVM_READWRITE stNVMRead = {};
+ NVM_READWRITE stNVMRead;
INT NOB ;
INT BuffSize ;
INT ReadOffset = 0;
UINT ReadBytes = 0 ;
- PUCHAR pReadBuff = NULL ;
- char __user *OutPutBuff = NULL ;
+ PUCHAR pReadBuff;
+ void __user *OutPutBuff;
if(Adapter->eNVMType != NVM_FLASH)
{
@@ -2204,10 +1945,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
if(copy_from_user(&stNVMRead, IoBuffer.OutputBuffer,sizeof(NVM_READWRITE)))
- {
- Status = -EFAULT;
- break;
- }
+ return -EFAULT;
NOB = stNVMRead.uiNumBytes;
//In Raw-Read max Buff size : 64MB
@@ -2217,11 +1955,10 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
else
BuffSize = NOB ;
- ReadOffset = stNVMRead.uiOffset ;
+ ReadOffset = stNVMRead.uiOffset;
OutPutBuff = stNVMRead.pBuffer;
-
- pReadBuff = (PCHAR)kzalloc(BuffSize , GFP_KERNEL);
+ pReadBuff = kzalloc(BuffSize , GFP_KERNEL);
if(pReadBuff == NULL)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Memory allocation failed for Flash 2.x Read Structure");
@@ -2235,7 +1972,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
(Adapter->bPreparingForLowPowerMode ==TRUE))
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
- bcm_kfree(pReadBuff);
+ kfree(pReadBuff);
up(&Adapter->NVMRdmWrmLock);
return -EACCES;
}
@@ -2256,13 +1993,12 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
break;
}
- BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,pReadBuff, ReadBytes);
+ BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,pReadBuff,ReadBytes);
Status = copy_to_user(OutPutBuff, pReadBuff,ReadBytes);
if(Status)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy to use failed with status :%d", Status);
- Status = -EFAULT;
break;
}
NOB = NOB - ReadBytes;
@@ -2275,7 +2011,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
Adapter->bFlashRawRead = FALSE ;
up(&Adapter->NVMRdmWrmLock);
- bcm_kfree(pReadBuff);
+ kfree(pReadBuff);
break ;
}
@@ -2288,7 +2024,6 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if(Status)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"copy of Ioctl buffer is failed from user space");
- Status = -EFAULT;
break;
}
@@ -2296,7 +2031,6 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if(Status)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"copy of control bit mask failed from user space");
- Status = -EFAULT;
break;
}
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\n Got user defined cntrl msg bit mask :%lx", RxCntrlMsgBitMask);
@@ -2315,71 +2049,44 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
DevInfo.u32NVMType = Adapter->eNVMType;
DevInfo.u32InterfaceType = BCM_USB;
- Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- Status = -EFAULT;
- break;
- }
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
if(IoBuffer.OutputLength < sizeof(DevInfo))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"User Passed buffer length is less than actural buffer size");
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"user passed buffer size :0x%lX, expected size :0x%zx",IoBuffer.OutputLength, sizeof(DevInfo));
- Status = -EINVAL;
- break;
- }
- Status = copy_to_user(IoBuffer.OutputBuffer, &DevInfo, sizeof(DevInfo));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"copying Dev info structure to user space buffer failed");
- Status = -EFAULT;
- break;
- }
+ return -EINVAL;
+
+ if (copy_to_user(IoBuffer.OutputBuffer, &DevInfo, sizeof(DevInfo)))
+ return -EFAULT;
}
break ;
case IOCTL_BCM_TIME_SINCE_NET_ENTRY:
{
ST_TIME_ELAPSED stTimeElapsedSinceNetEntry = {0};
- struct timeval tv = {0} ;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"IOCTL_BCM_TIME_SINCE_NET_ENTRY called");
- Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- Status = -EFAULT;
- break;
- }
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+ return -EFAULT;
+
if(IoBuffer.OutputLength < sizeof(ST_TIME_ELAPSED))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"User Passed buffer length:0x%lx is less than expected buff size :0x%zX",IoBuffer.OutputLength,sizeof(ST_TIME_ELAPSED));
- Status = -EINVAL;
- break;
- }
+ return -EINVAL;
- //stTimeElapsedSinceNetEntry.ul64TimeElapsedSinceNetEntry = Adapter->liTimeSinceLastNetEntry;
- do_gettimeofday(&tv);
- stTimeElapsedSinceNetEntry.ul64TimeElapsedSinceNetEntry = tv.tv_sec - Adapter->liTimeSinceLastNetEntry;
+ stTimeElapsedSinceNetEntry.ul64TimeElapsedSinceNetEntry = get_seconds() - Adapter->liTimeSinceLastNetEntry;
- Status = copy_to_user(IoBuffer.OutputBuffer, &stTimeElapsedSinceNetEntry, sizeof(ST_TIME_ELAPSED));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"copying ST_TIME_ELAPSED structure to user space buffer failed");
- Status = -EFAULT;
- break;
- }
+ if (copy_to_user(IoBuffer.OutputBuffer, &stTimeElapsedSinceNetEntry, sizeof(ST_TIME_ELAPSED)))
+ return -EFAULT;
}
break;
- default:
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "wrong input %x",cmd);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "In default ioctl %d\n", cmd);
- Status = STATUS_FAILURE;
+ case IOCTL_CLOSE_NOTIFICATION:
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"IOCTL_CLOSE_NOTIFICATION");
+ break;
+ default:
+ pr_info(DRV_NAME ": unknown ioctl cmd=%#x\n", cmd);
+ Status = STATUS_FAILURE;
break;
}
return Status;
@@ -2395,59 +2102,37 @@ static struct file_operations bcm_fops = {
.llseek = no_llseek,
};
+extern struct class *bcm_class;
int register_control_device_interface(PMINI_ADAPTER Adapter)
{
+
if(Adapter->major>0)
- return Adapter->major;
- Adapter->major = register_chrdev(0, "tarang", &bcm_fops);
- if(Adapter->major < 0)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "register_chrdev:Failed to registering WiMax control char device!");
- return Adapter->major;
- }
-
- bcm_class = NULL;
- bcm_class = class_create (THIS_MODULE, "tarang");
- if(IS_ERR (bcm_class))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Unable to create class\n");
- unregister_chrdev(Adapter->major, "tarang");
- Adapter->major = 0;
- return -ENODEV;
+ return Adapter->major;
+
+ Adapter->major = register_chrdev(0, DEV_NAME, &bcm_fops);
+ if(Adapter->major < 0) {
+ pr_err(DRV_NAME ": could not created character device\n");
+ return Adapter->major;
}
+
Adapter->pstCreatedClassDevice = device_create (bcm_class, NULL,
- MKDEV(Adapter->major, 0),
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
- NULL ,
-#endif
- "tarang");
+ MKDEV(Adapter->major, 0), Adapter,
+ DEV_NAME);
- if(IS_ERR(Adapter->pstCreatedClassDevice))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "class device did not get created : %ld", PTR_ERR(Adapter->pstCreatedClassDevice) );
+ if(IS_ERR(Adapter->pstCreatedClassDevice)) {
+ pr_err(DRV_NAME ": class device create failed\n");
+ unregister_chrdev(Adapter->major, DEV_NAME);
+ return PTR_ERR(Adapter->pstCreatedClassDevice);
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Got Major No: %d", Adapter->major);
- return 0;
+
+ return 0;
}
void unregister_control_device_interface(PMINI_ADAPTER Adapter)
{
- if(Adapter->major > 0)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "destroying class device");
+ if(Adapter->major > 0) {
device_destroy (bcm_class, MKDEV(Adapter->major, 0));
+ unregister_chrdev(Adapter->major, DEV_NAME);
}
- if(!IS_ERR(bcm_class))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "destroying created class ");
- class_destroy (bcm_class);
- bcm_class = NULL;
- }
- if(Adapter->major > 0)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,"unregistering character interface");
- unregister_chrdev(Adapter->major, "tarang");
- }
-
}
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index bc2969821421..a6ce2396c791 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -1,264 +1,238 @@
#include "headers.h"
-static INT bcm_notify_event(struct notifier_block *nb, ULONG event, PVOID dev)
+struct net_device *gblpnetdev;
+
+static INT bcm_open(struct net_device *dev)
{
- struct net_device *ndev = (struct net_device*)dev;
- PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
- //PMINI_ADAPTER Adapter = (PMINI_ADAPTER)ndev->priv;
- if(strncmp(ndev->name,gblpnetdev->name,5)==0)
- {
- switch(event)
- {
- case NETDEV_CHANGEADDR:
- case NETDEV_GOING_DOWN:
- /*ignore this */
- break;
- case NETDEV_DOWN:
- break;
-
- case NETDEV_UP:
- break;
-
- case NETDEV_REGISTER:
- /* Increment the Reference Count for "veth0" */
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Register RefCount: %x\n",
- netdev_refcnt_read(ndev));
- dev_hold(ndev);
- break;
-
- case NETDEV_UNREGISTER:
- /* Decrement the Reference Count for "veth0" */
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Unregister RefCnt: %x\n",
- netdev_refcnt_read(ndev));
- dev_put(ndev);
- break;
- };
+ PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev);
+
+ if (Adapter->fw_download_done == FALSE) {
+ pr_notice(PFX "%s: link up failed (download in progress)\n",
+ dev->name);
+ return -EBUSY;
}
- return NOTIFY_DONE;
-}
-/* Notifier block to receive netdevice events */
-static struct notifier_block bcm_notifier_block =
-{
- .notifier_call = bcm_notify_event,
-};
+ if (netif_msg_ifup(Adapter))
+ pr_info(PFX "%s: enabling interface\n", dev->name);
-struct net_device *gblpnetdev;
-/***************************************************************************************/
-/* proto-type of lower function */
-#ifdef BCM_SHM_INTERFACE
-const char *bcmVirtDeviceName="bcmeth";
-#endif
+ if (Adapter->LinkUpStatus) {
+ if (netif_msg_link(Adapter))
+ pr_info(PFX "%s: link up\n", dev->name);
-static INT bcm_open(struct net_device *dev)
-{
- PMINI_ADAPTER Adapter = NULL ; //(PMINI_ADAPTER)dev->priv;
- Adapter = GET_BCM_ADAPTER(dev);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "======>");
- if(Adapter->fw_download_done==FALSE)
- return -EINVAL;
- Adapter->if_up=1;
- if(Adapter->LinkUpStatus == 1){
- if(netif_queue_stopped(Adapter->dev)){
- netif_carrier_on(Adapter->dev);
- netif_start_queue(Adapter->dev);
- }
+ netif_carrier_on(Adapter->dev);
+ netif_start_queue(Adapter->dev);
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "<======");
- return 0;
+ return 0;
}
static INT bcm_close(struct net_device *dev)
{
- PMINI_ADAPTER Adapter = NULL ;//gpadapter ;
- Adapter = GET_BCM_ADAPTER(dev);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "=====>");
- Adapter->if_up=0;
- if(!netif_queue_stopped(dev)) {
- netif_carrier_off(dev);
- netif_stop_queue(dev);
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,"<=====");
- return 0;
+ PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev);
+
+ if (netif_msg_ifdown(Adapter))
+ pr_info(PFX "%s: disabling interface\n", dev->name);
+
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ return ClassifyPacket(netdev_priv(dev), skb);
}
-static struct net_device_stats *bcm_get_stats(struct net_device *dev)
+/*******************************************************************
+* Function - bcm_transmit()
+*
+* Description - This is the main transmit function for our virtual
+* interface(eth0). It handles the ARP packets. It
+* clones this packet and then Queue it to a suitable
+* Queue. Then calls the transmit_packet().
+*
+* Parameter - skb - Pointer to the socket buffer structure
+* dev - Pointer to the virtual net device structure
+*
+*********************************************************************/
+
+static netdev_tx_t bcm_transmit(struct sk_buff *skb, struct net_device *dev)
{
- PLINUX_DEP_DATA pLinuxData=NULL;
- PMINI_ADAPTER Adapter = NULL ;// gpadapter ;
- Adapter = GET_BCM_ADAPTER(dev);
- pLinuxData = (PLINUX_DEP_DATA)(Adapter->pvOsDepData);
-
- //BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Dev = %p, pLinuxData = %p", dev, pLinuxData);
- pLinuxData->netstats.rx_packets=atomic_read(&Adapter->RxRollOverCount)*64*1024+Adapter->PrevNumRecvDescs;
- pLinuxData->netstats.rx_bytes=atomic_read(&Adapter->GoodRxByteCount)+atomic_read(&Adapter->BadRxByteCount);
- pLinuxData->netstats.rx_dropped=atomic_read(&Adapter->RxPacketDroppedCount);
- pLinuxData->netstats.rx_errors=atomic_read(&Adapter->RxPacketDroppedCount);
- pLinuxData->netstats.rx_length_errors=0;
- pLinuxData->netstats.rx_frame_errors=0;
- pLinuxData->netstats.rx_crc_errors=0;
- pLinuxData->netstats.tx_bytes=atomic_read(&Adapter->GoodTxByteCount);
- pLinuxData->netstats.tx_packets=atomic_read(&Adapter->TxTotalPacketCount);
- pLinuxData->netstats.tx_dropped=atomic_read(&Adapter->TxDroppedPacketCount);
-
- return &(pLinuxData->netstats);
+ PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev);
+ u16 qindex = skb_get_queue_mapping(skb);
+
+
+ if (Adapter->device_removed || !Adapter->LinkUpStatus)
+ goto drop;
+
+ if (Adapter->TransferMode != IP_PACKET_ONLY_MODE)
+ goto drop;
+
+ if (INVALID_QUEUE_INDEX == qindex)
+ goto drop;
+
+ if (Adapter->PackInfo[qindex].uiCurrentPacketsOnHost >=
+ SF_MAX_ALLOWED_PACKETS_TO_BACKUP)
+ return NETDEV_TX_BUSY;
+
+ /* Now Enqueue the packet */
+ if (netif_msg_tx_queued(Adapter))
+ pr_info(PFX "%s: enqueueing packet to queue %d\n",
+ dev->name, qindex);
+
+ spin_lock(&Adapter->PackInfo[qindex].SFQueueLock);
+ Adapter->PackInfo[qindex].uiCurrentBytesOnHost += skb->len;
+ Adapter->PackInfo[qindex].uiCurrentPacketsOnHost++;
+
+ *((B_UINT32 *) skb->cb + SKB_CB_LATENCY_OFFSET) = jiffies;
+ ENQUEUEPACKET(Adapter->PackInfo[qindex].FirstTxQueue,
+ Adapter->PackInfo[qindex].LastTxQueue, skb);
+ atomic_inc(&Adapter->TotalPacketCount);
+ spin_unlock(&Adapter->PackInfo[qindex].SFQueueLock);
+
+ /* FIXME - this is racy and incorrect, replace with work queue */
+ if (!atomic_read(&Adapter->TxPktAvail)) {
+ atomic_set(&Adapter->TxPktAvail, 1);
+ wake_up(&Adapter->tx_packet_wait_queue);
+ }
+ return NETDEV_TX_OK;
+
+ drop:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
}
+
+
+
/**
@ingroup init_functions
Register other driver entry points with the kernel
*/
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
-static struct net_device_ops bcmNetDevOps = {
+static const struct net_device_ops bcmNetDevOps = {
.ndo_open = bcm_open,
.ndo_stop = bcm_close,
- .ndo_get_stats = bcm_get_stats,
.ndo_start_xmit = bcm_transmit,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_select_queue = bcm_select_queue,
};
-#endif
-int register_networkdev(PMINI_ADAPTER Adapter)
+static struct device_type wimax_type = {
+ .name = "wimax",
+};
+
+static int bcm_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- int result=0;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
- void **temp = NULL; /* actually we're *allocating* the device in alloc_etherdev */
-#endif
- Adapter->dev = alloc_etherdev(sizeof(PMINI_ADAPTER));
- if(!Adapter->dev)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "ERR: No Dev");
- return -ENOMEM;
- }
- gblpnetdev = Adapter->dev;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
- Adapter->dev->priv = Adapter;
-#else
- temp = netdev_priv(Adapter->dev);
- *temp = (void *)Adapter;
-#endif
- //BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "init adapterptr: %x %x\n", (UINT)Adapter, temp);
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
- Adapter->dev->netdev_ops = &bcmNetDevOps;
-#else
- Adapter->dev->open = bcm_open;
- Adapter->dev->stop = bcm_close;
- Adapter->dev->get_stats = bcm_get_stats;
- Adapter->dev->hard_start_xmit = bcm_transmit;
- Adapter->dev->hard_header_len = ETH_HLEN + LEADER_SIZE;
-#endif
-
-#ifndef BCM_SHM_INTERFACE
- Adapter->dev->mtu = MTU_SIZE; /* 1400 Bytes */
- /* Read the MAC Address from EEPROM */
- ReadMacAddressFromNVM(Adapter);
+ cmd->supported = 0;
+ cmd->advertising = 0;
+ cmd->speed = SPEED_10000;
+ cmd->duplex = DUPLEX_FULL;
+ cmd->port = PORT_TP;
+ cmd->phy_address = 0;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ return 0;
+}
+static void bcm_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev);
+ PS_INTERFACE_ADAPTER psIntfAdapter = Adapter->pvInterfaceAdapter;
+ struct usb_device *udev = interface_to_usbdev(psIntfAdapter->interface);
- /* Register the notifier block for getting netdevice events */
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Registering netdevice notifier\n");
- result = register_netdevice_notifier(&bcm_notifier_block);
- if(result)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "BCM Notifier Block did not get registered");
- Adapter->bNetdeviceNotifierRegistered = FALSE;
- return result;
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "BCM Notifier got Registered");
- Adapter->bNetdeviceNotifierRegistered = TRUE;
- }
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u",
+ Adapter->uiFlashLayoutMajorVersion,
+ Adapter->uiFlashLayoutMinorVersion);
-#else
-
- Adapter->dev->mtu = CPE_MTU_SIZE;
-
-#if 0
- //for CPE - harcode the virtual mac address
- Adapter->dev->dev_addr[0] = MII_WIMAX_MACADDRESS[0];
- Adapter->dev->dev_addr[1] = MII_WIMAX_MACADDRESS[1];
- Adapter->dev->dev_addr[2] = MII_WIMAX_MACADDRESS[2];
- Adapter->dev->dev_addr[3] = MII_WIMAX_MACADDRESS[3];
- Adapter->dev->dev_addr[4] = MII_WIMAX_MACADDRESS[4];
- Adapter->dev->dev_addr[5] = MII_WIMAX_MACADDRESS[5];
-#else
- ReadMacAddressFromNVM(Adapter);
-#endif
- strcpy(Adapter->dev->name, bcmVirtDeviceName); //Copy the device name
-
-#endif
-
- result = register_netdev(Adapter->dev);
- if (!result)
- {
- Adapter->bNetworkInterfaceRegistered = TRUE ;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Beceem Network device name is %s!", Adapter->dev->name);
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Network device can not be registered!");
- Adapter->bNetworkInterfaceRegistered = FALSE ;
- return result;
- }
+ usb_make_path(udev, info->bus_info, sizeof(info->bus_info));
+}
-#if 0
- Adapter->stDebugState.debug_level = DBG_LVL_CURR;
- Adapter->stDebugState.type =(UINT)0xffffffff;
- Adapter->stDebugState.subtype[DBG_TYPE_OTHERS] = 0xffffffff;
- Adapter->stDebugState.subtype[DBG_TYPE_RX] = 0xffffffff;
- Adapter->stDebugState.subtype[DBG_TYPE_TX] = 0xffffffff;
- Adapter->stDebugState.subtype[DBG_TYPE_INITEXIT] = 0xffffffff;
+static u32 bcm_get_link(struct net_device *dev)
+{
+ PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev);
- printk("-------ps_adapter->stDebugState.type=%x\n",Adapter->stDebugState.type);
- printk("-------ps_adapter->stDebugState.subtype[DBG_TYPE_OTHERS]=%x\n",Adapter->stDebugState.subtype[DBG_TYPE_OTHERS]);
- printk("-------ps_adapter->stDebugState.subtype[DBG_TYPE_RX]=%x\n",Adapter->stDebugState.subtype[DBG_TYPE_RX]);
- printk("-------ps_adapter->stDebugState.subtype[DBG_TYPE_TX]=%x\n",Adapter->stDebugState.subtype[DBG_TYPE_TX]);
-#endif
+ return Adapter->LinkUpStatus;
+}
- return 0;
+static u32 bcm_get_msglevel (struct net_device *dev)
+{
+ PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev);
+
+ return Adapter->msg_enable;
}
-void bcm_unregister_networkdev(PMINI_ADAPTER Adapter)
+static void bcm_set_msglevel (struct net_device *dev, u32 level)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Unregistering the Net Dev...\n");
- if(Adapter->dev && !IS_ERR(Adapter->dev) && Adapter->bNetworkInterfaceRegistered)
- unregister_netdev(Adapter->dev);
- /* Unregister the notifier block */
- if(Adapter->bNetdeviceNotifierRegistered == TRUE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Unregistering netdevice notifier\n");
- unregister_netdevice_notifier(&bcm_notifier_block);
- }
+ PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev);
+
+ Adapter->msg_enable = level;
}
-static int bcm_init(void)
+static const struct ethtool_ops bcm_ethtool_ops = {
+ .get_settings = bcm_get_settings,
+ .get_drvinfo = bcm_get_drvinfo,
+ .get_link = bcm_get_link,
+ .get_msglevel = bcm_get_msglevel,
+ .set_msglevel = bcm_set_msglevel,
+};
+
+int register_networkdev(PMINI_ADAPTER Adapter)
{
+ struct net_device *net = Adapter->dev;
+ PS_INTERFACE_ADAPTER IntfAdapter = Adapter->pvInterfaceAdapter;
+ struct usb_interface *udev = IntfAdapter->interface;
+ struct usb_device *xdev = IntfAdapter->udev;
+
int result;
- result = InterfaceInitialize();
- if(result)
- {
- printk("Initialisation failed for usbbcm");
- }
- else
- {
- printk("Initialised usbbcm");
+
+ net->netdev_ops = &bcmNetDevOps;
+ net->ethtool_ops = &bcm_ethtool_ops;
+ net->mtu = MTU_SIZE; /* 1400 Bytes */
+ net->tx_queue_len = TX_QLEN;
+ net->flags |= IFF_NOARP;
+
+ netif_carrier_off(net);
+
+ SET_NETDEV_DEVTYPE(net, &wimax_type);
+
+ /* Read the MAC Address from EEPROM */
+ result = ReadMacAddressFromNVM(Adapter);
+ if (result != STATUS_SUCCESS) {
+ dev_err(&udev->dev,
+ PFX "Error in Reading the mac Address: %d", result);
+ return -EIO;
}
- return result;
-}
+ result = register_netdev(net);
+ if (result)
+ return result;
-static void bcm_exit(void)
-{
- printk("%s %s Calling InterfaceExit\n",__FILE__, __FUNCTION__);
- InterfaceExit();
- printk("%s %s InterfaceExit returned\n",__FILE__, __FUNCTION__);
-}
+ gblpnetdev = Adapter->dev;
-module_init(bcm_init);
-module_exit(bcm_exit);
-MODULE_LICENSE ("GPL");
+ if (netif_msg_probe(Adapter))
+ dev_info(&udev->dev, PFX "%s: register usb-%s-%s %pM\n",
+ net->name, xdev->bus->bus_name, xdev->devpath,
+ net->dev_addr);
+ return 0;
+}
+void unregister_networkdev(PMINI_ADAPTER Adapter)
+{
+ struct net_device *net = Adapter->dev;
+ PS_INTERFACE_ADAPTER IntfAdapter = Adapter->pvInterfaceAdapter;
+ struct usb_interface *udev = IntfAdapter->interface;
+ struct usb_device *xdev = IntfAdapter->udev;
+
+ if (netif_msg_probe(Adapter))
+ dev_info(&udev->dev, PFX "%s: unregister usb-%s%s\n",
+ net->name, xdev->bus->bus_name, xdev->devpath);
+
+ unregister_netdev(Adapter->dev);
+}
diff --git a/drivers/staging/bcm/CmHost.c b/drivers/staging/bcm/CmHost.c
index 6f388a374ddc..5ac45820d564 100644
--- a/drivers/staging/bcm/CmHost.c
+++ b/drivers/staging/bcm/CmHost.c
@@ -15,6 +15,7 @@ typedef enum _E_CLASSIFIER_ACTION
eDeleteClassifier
}E_CLASSIFIER_ACTION;
+static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter,B_UINT16 tid);
/************************************************************
* Function - SearchSfid
@@ -28,7 +29,7 @@ typedef enum _E_CLASSIFIER_ACTION
* Returns - Queue index for this SFID(If matched)
Else Invalid Queue Index(If Not matched)
************************************************************/
-__inline INT SearchSfid(PMINI_ADAPTER Adapter,UINT uiSfid)
+INT SearchSfid(PMINI_ADAPTER Adapter,UINT uiSfid)
{
INT iIndex=0;
for(iIndex=(NO_OF_QUEUES-1); iIndex>=0; iIndex--)
@@ -47,26 +48,16 @@ __inline INT SearchSfid(PMINI_ADAPTER Adapter,UINT uiSfid)
* Returns - Queue index for the free SFID
* Else returns Invalid Index.
****************************************************************/
-__inline INT SearchFreeSfid(PMINI_ADAPTER Adapter)
+static INT SearchFreeSfid(PMINI_ADAPTER Adapter)
{
UINT uiIndex=0;
+
for(uiIndex=0; uiIndex < (NO_OF_QUEUES-1); uiIndex++)
if(Adapter->PackInfo[uiIndex].ulSFID==0)
return uiIndex;
return NO_OF_QUEUES+1;
}
-__inline int SearchVcid(PMINI_ADAPTER Adapter,unsigned short usVcid)
-{
- int iIndex=0;
- for(iIndex=(NO_OF_QUEUES-1);iIndex>=0;iIndex--)
- if(Adapter->PackInfo[iIndex].usVCID_Value == usVcid)
- return iIndex;
- return NO_OF_QUEUES+1;
-
-}
-
-
/*
Function: SearchClsid
Description: This routinue would search Classifier having specified ClassifierID as input parameter
@@ -76,7 +67,7 @@ Input parameters: PMINI_ADAPTER Adapter - Adapter Context
Return: int :Classifier table index of matching entry
*/
-__inline int SearchClsid(PMINI_ADAPTER Adapter,ULONG ulSFID,B_UINT16 uiClassifierID)
+static int SearchClsid(PMINI_ADAPTER Adapter,ULONG ulSFID,B_UINT16 uiClassifierID)
{
unsigned int uiClassifierIndex = 0;
for(uiClassifierIndex=0;uiClassifierIndex<MAX_CLASSIFIERS;uiClassifierIndex++)
@@ -94,7 +85,7 @@ __inline int SearchClsid(PMINI_ADAPTER Adapter,ULONG ulSFID,B_UINT16 uiClassifi
This routinue would search Free available Classifier entry in classifier table.
@return free Classifier Entry index in classifier table for specified SF
*/
-static __inline int SearchFreeClsid(PMINI_ADAPTER Adapter /**Adapter Context*/
+static int SearchFreeClsid(PMINI_ADAPTER Adapter /**Adapter Context*/
)
{
unsigned int uiClassifierIndex = 0;
@@ -106,7 +97,7 @@ static __inline int SearchFreeClsid(PMINI_ADAPTER Adapter /**Adapter Context*/
return MAX_CLASSIFIERS+1;
}
-VOID deleteSFBySfid(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex)
+static VOID deleteSFBySfid(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex)
{
//deleting all the packet held in the SF
flush_queue(Adapter,uiSearchRuleIndex);
@@ -985,7 +976,7 @@ static VOID CopyToAdapter( register PMINI_ADAPTER Adapter, /**<Pointer to the A
if(Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication)
{
- bcm_kfree(Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication);
+ kfree(Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication);
Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication = NULL;
}
Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication = pstAddIndication;
@@ -1061,12 +1052,6 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
pstAddIndication->sfAuthorizedSet.u32MaxTrafficBurst);
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
pstAddIndication->sfAuthorizedSet.u32MinReservedTrafficRate);
-#if 0
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinimumTolerableTrafficRate : 0x%X",
- pstAddIndication->sfAuthorizedSet.u32MinimumTolerableTrafficRate);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32RequesttransmissionPolicy : 0x%X",
- pstAddIndication->sfAuthorizedSet.u32RequesttransmissionPolicy);
-#endif
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength : 0x%X",
pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParamLength);
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam : 0x%X",
@@ -1114,13 +1099,6 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
pstAddIndication->sfAuthorizedSet.u8PagingPreference);
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UnsolicitedPollingInterval : 0x%X",
pstAddIndication->sfAuthorizedSet.u16UnsolicitedPollingInterval);
-#if 0
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "MBSZoneIdentifierassignmentLength : 0x%X",
- pstAddIndication->sfAuthorizedSet.MBSZoneIdentifierassignmentLength);
- for(uiLoopIndex=0; uiLoopIndex < MAX_STRING_LEN; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "MBSZoneIdentifierassignment : 0x%X",
- pstAddIndication->sfAuthorizedSet.MBSZoneIdentifierassignment[uiLoopIndex]);
-#endif
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "sfAuthorizedSet.u8HARQChannelMapping %x %x %x ",
*(unsigned int*)pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping,
@@ -1158,11 +1136,6 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
-#if 0
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "u8ProtocolLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolLength);
-#endif
for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++)
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol : 0x%02X ",
@@ -1278,14 +1251,6 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
pstAddIndication->sfAdmittedSet.u8QosParamSet);
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority : 0x%02X",
pstAddIndication->sfAdmittedSet.u8TrafficPriority);
-#if 0
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "u32MaxSustainedTrafficRate : 0x%02X",
- ntohl(pstAddIndication->sfAdmittedSet.u32MaxSustainedTrafficRate));
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "u32MinimumTolerableTrafficRate : 0x%X",
- pstAddIndication->sfAdmittedSet.u32MinimumTolerableTrafficRate);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "u32RequesttransmissionPolicy : 0x%X",
- pstAddIndication->sfAdmittedSet.u32RequesttransmissionPolicy);
-#endif
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst : 0x%X",
pstAddIndication->sfAdmittedSet.u32MaxTrafficBurst);
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
@@ -1339,13 +1304,6 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
pstAddIndication->sfAdmittedSet.u16TimeBase);
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference : 0x%X",
pstAddIndication->sfAdmittedSet.u8PagingPreference);
-#if 0
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "MBSZoneIdentifierassignmentLength : 0x%X",
- pstAddIndication->sfAdmittedSet.MBSZoneIdentifierassignmentLength);
- for(uiLoopIndex=0; uiLoopIndex < MAX_STRING_LEN; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "MBSZoneIdentifierassignment : 0x%X",
- pstAddIndication->sfAdmittedSet.MBSZoneIdentifierassignment[uiLoopIndex]);
-#endif
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference : 0x%02X",
@@ -1378,11 +1336,6 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
-#if 0
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolLength :0x%02X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolLength);
-#endif
for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++)
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8Protocol);
@@ -1497,20 +1450,10 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
pstAddIndication->sfActiveSet.u8QosParamSet);
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority : 0x%02X",
pstAddIndication->sfActiveSet.u8TrafficPriority);
-#if 0
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "u32MaxSustainedTrafficRate : 0x%02X",
- ntohl(pstAddIndication->sfActiveSet.u32MaxSustainedTrafficRate));
-#endif
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst : 0x%X",
pstAddIndication->sfActiveSet.u32MaxTrafficBurst);
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
pstAddIndication->sfActiveSet.u32MinReservedTrafficRate);
-#if 0
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "u32MinimumTolerableTrafficRate : 0x%X",
- pstAddIndication->sfActiveSet.u32MinimumTolerableTrafficRate);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "u32RequesttransmissionPolicy : 0x%X",
- pstAddIndication->sfActiveSet.u32RequesttransmissionPolicy);
-#endif
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength : 0x%02X",
pstAddIndication->sfActiveSet.u8VendorSpecificQoSParamLength);
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam : 0x%02X",
@@ -1558,13 +1501,6 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
pstAddIndication->sfActiveSet.u16TimeBase);
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8PagingPreference : 0x%X",
pstAddIndication->sfActiveSet.u8PagingPreference);
-#if 0
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " MBSZoneIdentifierassignmentLength : 0x%X",
- pstAddIndication->sfActiveSet.MBSZoneIdentifierassignmentLength);
- for(uiLoopIndex=0; uiLoopIndex < MAX_STRING_LEN; uiLoopIndex++)
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " MBSZoneIdentifierassignment : 0x%X",
- pstAddIndication->sfActiveSet.MBSZoneIdentifierassignment[uiLoopIndex]);
-#endif
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TrafficIndicationPreference : 0x%X",
@@ -1597,11 +1533,6 @@ static VOID DumpCmControlPacket(PVOID pvBuffer)
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
-#if 0
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " u8ProtocolLength :0x%X ",
- psfCSType->cCPacketClassificationRule.u8ProtocolLength);
-#endif
for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++)
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Protocol : 0x%X ",
psfCSType->cCPacketClassificationRule.u8Protocol);
@@ -1706,12 +1637,8 @@ static inline ULONG RestoreSFParam(PMINI_ADAPTER Adapter, ULONG ulAddrSFParamSet
return 0;
}
ulAddrSFParamSet = ntohl(ulAddrSFParamSet);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " RestoreSFParam: Total Words of DSX Message To Read: 0x%zx From Target At : 0x%lx ",
- nBytesToRead/sizeof(ULONG),ulAddrSFParamSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "sizeof(stServiceFlowParamSI) = %zx", sizeof(stServiceFlowParamSI));
//Read out the SF Param Set At the indicated Location
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "nBytesToRead = %x", nBytesToRead);
if(rdm(Adapter, ulAddrSFParamSet, (PUCHAR)pucDestBuffer, nBytesToRead) < 0)
return STATUS_FAILURE;
@@ -1719,23 +1646,20 @@ static inline ULONG RestoreSFParam(PMINI_ADAPTER Adapter, ULONG ulAddrSFParamSet
}
-static __inline ULONG StoreSFParam(PMINI_ADAPTER Adapter,PUCHAR pucSrcBuffer,ULONG ulAddrSFParamSet)
+static ULONG StoreSFParam(PMINI_ADAPTER Adapter,PUCHAR pucSrcBuffer,ULONG ulAddrSFParamSet)
{
UINT nBytesToWrite = sizeof(stServiceFlowParamSI);
- UINT uiRetVal =0;
+ int ret = 0;
if(ulAddrSFParamSet == 0 || NULL == pucSrcBuffer)
{
return 0;
}
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " StoreSFParam: Total Words of DSX Message To Write: 0x%zX To Target At : 0x%lX ",(nBytesToWrite/sizeof(ULONG)),ulAddrSFParamSet);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "WRM with %x bytes",nBytesToWrite);
-
- uiRetVal = wrm(Adapter,ulAddrSFParamSet,(PUCHAR)pucSrcBuffer, nBytesToWrite);
- if(uiRetVal < 0) {
+ ret = wrm(Adapter, ulAddrSFParamSet, (u8 *)pucSrcBuffer, nBytesToWrite);
+ if (ret < 0) {
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s:%d WRM failed",__FUNCTION__, __LINE__);
- return uiRetVal;
+ return ret;
}
return 1;
}
@@ -1778,7 +1702,7 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
}
// For DSA_REQ, only upto "psfAuthorizedSet" parameter should be accessed by driver!
- pstAddIndication=(stLocalSFAddIndication *)kmalloc(sizeof(*pstAddIndication), GFP_KERNEL);
+ pstAddIndication=kmalloc(sizeof(*pstAddIndication), GFP_KERNEL);
if(NULL==pstAddIndication)
return 0;
@@ -1844,7 +1768,7 @@ ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *p
(*puBufferLength) = sizeof(stLocalSFAddIndication);
*(stLocalSFAddIndication *)pvBuffer = *pstAddIndication;
- bcm_kfree(pstAddIndication);
+ kfree(pstAddIndication);
return 1;
}
@@ -1931,7 +1855,7 @@ static inline stLocalSFAddIndicationAlt
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
return pstAddIndicationDest;
failed_restore_sf_param:
- bcm_kfree(pstAddIndicationDest);
+ kfree(pstAddIndicationDest);
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "<=====" );
return NULL;
}
@@ -1988,7 +1912,7 @@ ULONG SetUpTargetDsxBuffers(PMINI_ADAPTER Adapter)
return 1;
}
-ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter,B_UINT16 tid)
+static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter,B_UINT16 tid)
{
ULONG ulTargetDSXBufferAddress;
ULONG ulTargetDsxBufferIndexToUse,ulMaxTry;
@@ -2049,7 +1973,7 @@ INT FreeAdapterDsxBuffer(PMINI_ADAPTER Adapter)
{
if(Adapter->caDsxReqResp)
{
- bcm_kfree(Adapter->caDsxReqResp);
+ kfree(Adapter->caDsxReqResp);
}
return 0;
@@ -2102,7 +2026,7 @@ BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /**<Pointer to the Adap
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " VCID = %x", ntohs(pstAddIndication->u16VCID));
CopyBufferToControlPacket(Adapter,(PVOID)Adapter->caDsxReqResp);
- bcm_kfree(pstAddIndication);
+ kfree(pstAddIndication);
}
break;
case DSA_RSP:
@@ -2118,7 +2042,7 @@ BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /**<Pointer to the Adap
case DSA_ACK:
{
UINT uiSearchRuleIndex=0;
- struct timeval tv = {0};
+
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "VCID:0x%X",
ntohs(pstAddIndication->u16VCID));
uiSearchRuleIndex=SearchFreeSfid(Adapter);
@@ -2169,7 +2093,7 @@ BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /**<Pointer to the Adap
Adapter->PackInfo[uiSearchRuleIndex].bActive=FALSE;
Adapter->PackInfo[uiSearchRuleIndex].bValid=FALSE;
Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value=0;
- bcm_kfree(pstAddIndication);
+ kfree(pstAddIndication);
}
else if(psfLocalSet->bValid && (pstAddIndication->u8CC == 0))
@@ -2200,14 +2124,13 @@ BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /**<Pointer to the Adap
if(!Adapter->LinkUpStatus)
{
netif_carrier_on(Adapter->dev);
- netif_start_queue(Adapter->dev);
+ netif_start_queue(Adapter->dev);
Adapter->LinkUpStatus = 1;
- do_gettimeofday(&tv);
-
+ if (netif_msg_link(Adapter))
+ pr_info(PFX "%s: link up\n", Adapter->dev->name);
atomic_set(&Adapter->TxPktAvail, 1);
wake_up(&Adapter->tx_packet_wait_queue);
- Adapter->liTimeSinceLastNetEntry = tv.tv_sec;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============Tx Service Flow Created!");
+ Adapter->liTimeSinceLastNetEntry = get_seconds();
}
}
}
@@ -2218,13 +2141,13 @@ BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /**<Pointer to the Adap
Adapter->PackInfo[uiSearchRuleIndex].bActive=FALSE;
Adapter->PackInfo[uiSearchRuleIndex].bValid=FALSE;
Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value=0;
- bcm_kfree(pstAddIndication);
+ kfree(pstAddIndication);
}
}
else
{
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "DSA ACK did not get valid SFID");
- bcm_kfree(pstAddIndication);
+ kfree(pstAddIndication);
return FALSE;
}
}
@@ -2239,7 +2162,7 @@ BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /**<Pointer to the Adap
((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_RSP;
CopyBufferToControlPacket(Adapter,(PVOID)Adapter->caDsxReqResp);
- bcm_kfree(pstAddIndication);
+ kfree(pstAddIndication);
}
break;
case DSC_RSP:
@@ -2312,13 +2235,13 @@ BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /**<Pointer to the Adap
else if(pstChangeIndication->u8CC == 6)
{
deleteSFBySfid(Adapter,uiSearchRuleIndex);
- bcm_kfree(pstAddIndication);
+ kfree(pstAddIndication);
}
}
else
{
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "DSC ACK did not get valid SFID");
- bcm_kfree(pstAddIndication);
+ kfree(pstAddIndication);
return FALSE;
}
}
@@ -2355,7 +2278,7 @@ BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /**<Pointer to the Adap
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD ACK Rcd, let App handle it\n");
break;
default:
- bcm_kfree(pstAddIndication);
+ kfree(pstAddIndication);
return FALSE ;
}
return TRUE;
diff --git a/drivers/staging/bcm/CmHost.h b/drivers/staging/bcm/CmHost.h
index 847782c3765b..8f689769b4ba 100644
--- a/drivers/staging/bcm/CmHost.h
+++ b/drivers/staging/bcm/CmHost.h
@@ -150,8 +150,6 @@ typedef struct stLocalSFChangeIndicationAlt{
ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *puBufferLength);
-ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter,B_UINT16 tid);
-
INT AllocAdapterDsxBuffer(PMINI_ADAPTER Adapter);
INT FreeAdapterDsxBuffer(PMINI_ADAPTER Adapter);
@@ -159,7 +157,6 @@ ULONG SetUpTargetDsxBuffers(PMINI_ADAPTER Adapter);
BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer);
-VOID deleteSFBySfid(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex);
#pragma pack (pop)
diff --git a/drivers/staging/bcm/DDRInit.c b/drivers/staging/bcm/DDRInit.c
index 8907e211d483..1c7db81a1ee8 100644
--- a/drivers/staging/bcm/DDRInit.c
+++ b/drivers/staging/bcm/DDRInit.c
@@ -1,6 +1,5 @@
#include "headers.h"
-#ifndef BCM_SHM_INTERFACE
#define DDR_DUMP_INTERNAL_DEVICE_MEMORY 0xBFC02B00
@@ -188,17 +187,6 @@ static DDR_SET_NODE asDPLL_266MHZ[] = {
{0x0f000840,0x0FFF1B00},
{0x0f000870,0x00000002}
};
-#if 0
-static DDR_SET_NODE asDPLL_800MHZ[] = {
- {0x0f000810,0x00000F95},
- {0x0f000810,0x00000F95},
- {0x0f000810,0x00000F95},
- {0x0f000820,0x03F1365B},
- {0x0f000840,0x0FFF0000},
- {0x0f000880,0x000003DD},
- {0x0f000860,0x00000000}
- };
-#endif
#define T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 11 //index for 0x0F007000
static DDR_SET_NODE asT3B_DDRSetting133MHz[] = {// # DPLL Clock Setting
@@ -788,7 +776,7 @@ int ddr_init(MINI_ADAPTER *Adapter)
{
PDDR_SETTING psDDRSetting=NULL;
ULONG RegCount=0;
- ULONG value = 0;
+ UINT value = 0;
UINT uiResetValue = 0;
UINT uiClockSetting = 0;
int retval = STATUS_SUCCESS;
@@ -982,7 +970,7 @@ int ddr_init(MINI_ADAPTER *Adapter)
{
value = psDDRSetting->ulRegValue;
}
- retval = wrmalt(Adapter, psDDRSetting->ulRegAddress, (PUINT)&value, sizeof(value));
+ retval = wrmalt(Adapter, psDDRSetting->ulRegAddress, &value, sizeof(value));
if(STATUS_SUCCESS != retval) {
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"%s:%d\n", __FUNCTION__, __LINE__);
break;
@@ -1298,5 +1286,4 @@ int download_ddr_settings(PMINI_ADAPTER Adapter)
return retval;
}
-#endif
diff --git a/drivers/staging/bcm/Debug.c b/drivers/staging/bcm/Debug.c
deleted file mode 100644
index 2703f304756d..000000000000
--- a/drivers/staging/bcm/Debug.c
+++ /dev/null
@@ -1,41 +0,0 @@
-#include "headers.h"
-
-static UINT current_debug_level=BCM_SCREAM;
-
-int bcm_print_buffer( UINT debug_level, const char *function_name,
- char *file_name, int line_number, unsigned char *buffer, int bufferlen, enum _BASE_TYPE base)
-{
- static const char * const buff_dump_base[] = {
- "DEC", "HEX", "OCT", "BIN"
- };
- if(debug_level>=current_debug_level)
- {
- int i=0;
- printk("\n%s:%s:%d:Buffer dump of size 0x%x in the %s:\n", file_name, function_name, line_number, bufferlen, buff_dump_base[1]);
- for(;i<bufferlen;i++)
- {
- if(i && !(i%16) )
- printk("\n");
- switch(base)
- {
- case BCM_BASE_TYPE_DEC:
- printk("%03d ", buffer[i]);
- break;
- case BCM_BASE_TYPE_OCT:
- printk("%0x03o ", buffer[i]);
- break;
- case BCM_BASE_TYPE_BIN:
- printk("%02x ", buffer[i]);
- break;
- case BCM_BASE_TYPE_HEX:
- default:
- printk("%02X ", buffer[i]);
- break;
- }
- }
- printk("\n");
- }
- return 0;
-}
-
-
diff --git a/drivers/staging/bcm/Debug.h b/drivers/staging/bcm/Debug.h
index 3d788b59ab57..3138729cf34f 100644
--- a/drivers/staging/bcm/Debug.h
+++ b/drivers/staging/bcm/Debug.h
@@ -9,34 +9,6 @@
#include <linux/string.h>
#define NONE 0xFFFF
-typedef enum _BASE_TYPE
-{
- BCM_BASE_TYPE_DEC,
- BCM_BASE_TYPE_OCT,
- BCM_BASE_TYPE_BIN,
- BCM_BASE_TYPE_HEX,
- BCM_BASE_TYPE_NONE,
-} BASE_TYPE, *PBASE_TYPE;
-
-int bcm_print_buffer( UINT debug_level, const char *function_name,
- char *file_name, int line_number, unsigned char *buffer, int bufferlen, BASE_TYPE base);
-
-#ifdef BCM_SHM_INTERFACE
-#define CPE_VIRTUAL_ERROR_CODE_BASE_ADDR (0xBFC02E00 + 0x4C)
-// ERROR codes for debugging
-extern unsigned char u32ErrorCounter ;
-#define ERROR_DEVICE_REMOVED 0x1
-#define ERROR_LEADER_LENGTH_ZERO 0x2
-#define ERROR_LEADER_LENGTH_CORRUPTED 0x3
-#define ERROR_NO_SKBUFF 0x4
-
-#define ERROR_DL_MODULE 0xaa000000
-extern void CPE_ERROR_LOG(unsigned int module,unsigned int code);
-
-#endif
-
-
-
//--------------------------------------------------------------------------------
@@ -242,44 +214,34 @@ typedef struct _S_BCM_DEBUG_STATE {
//--- Only for direct printk's; "hidden" to API.
#define DBG_TYPE_PRINTK 3
-#define PRINTKS_ON 1 // "hidden" from API, set to 0 to turn off all printk's
-
-#define BCM_DEBUG_PRINT(Adapter, Type, SubType, dbg_level, string, args...) do { \
- if ((DBG_TYPE_PRINTK == Type) && (PRINTKS_ON)) { \
- printk ("%s:" string, __FUNCTION__, ##args); \
- printk("\n"); \
- } else if (!Adapter) \
- ; \
- else { \
- if (((dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level) && \
- ((Type & Adapter->stDebugState.type) && (SubType & Adapter->stDebugState.subtype[Type]))) { \
- if (dbg_level & DBG_NO_FUNC_PRINT) \
- printk (string, ##args); \
- else \
- { \
- printk ("%s:" string, __FUNCTION__, ##args); \
- printk("\n"); \
- } \
- } \
- } \
-} while (0)
-#define BCM_DEBUG_PRINT_BUFFER(Adapter, Type, SubType, dbg_level, buffer, bufferlen) do { \
- if ((DBG_TYPE_PRINTK == Type) && (PRINTKS_ON)) { \
- bcm_print_buffer( dbg_level, __FUNCTION__, __FILE__, __LINE__, buffer, bufferlen, BCM_BASE_TYPE_HEX); \
- } else if (!Adapter) \
- ; \
- else { \
- if (((dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level) && \
- ((Type & Adapter->stDebugState.type) && (SubType & Adapter->stDebugState.subtype[Type]))) { \
- if (dbg_level & DBG_NO_FUNC_PRINT) \
- bcm_print_buffer( dbg_level, NULL, NULL, __LINE__, buffer, bufferlen, BCM_BASE_TYPE_HEX); \
- else \
- bcm_print_buffer( dbg_level, __FUNCTION__, __FILE__, __LINE__, buffer, bufferlen, BCM_BASE_TYPE_HEX); \
- } \
- } \
+#define BCM_DEBUG_PRINT(Adapter, Type, SubType, dbg_level, string, args...) \
+ do { \
+ if (DBG_TYPE_PRINTK == Type) \
+ pr_info("%s:" string, __func__, ##args); \
+ else if (Adapter && \
+ (dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level && \
+ (Type & Adapter->stDebugState.type) && \
+ (SubType & Adapter->stDebugState.subtype[Type])) { \
+ if (dbg_level & DBG_NO_FUNC_PRINT) \
+ printk(KERN_DEBUG string, ##args); \
+ else \
+ printk(KERN_DEBUG "%s:" string, __func__, ##args); \
+ } \
} while (0)
+#define BCM_DEBUG_PRINT_BUFFER(Adapter, Type, SubType, dbg_level, buffer, bufferlen) do { \
+ if (DBG_TYPE_PRINTK == Type || \
+ (Adapter && \
+ (dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level && \
+ (Type & Adapter->stDebugState.type) && \
+ (SubType & Adapter->stDebugState.subtype[Type]))) { \
+ printk(KERN_DEBUG "%s:\n", __func__); \
+ print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET, \
+ 16, 1, buffer, bufferlen, false); \
+ } \
+} while(0)
+
#define BCM_SHOW_DEBUG_BITMAP(Adapter) do { \
int i; \
diff --git a/drivers/staging/bcm/HandleControlPacket.c b/drivers/staging/bcm/HandleControlPacket.c
index 7b2ec28a4bc1..2b1e9e17e11c 100644
--- a/drivers/staging/bcm/HandleControlPacket.c
+++ b/drivers/staging/bcm/HandleControlPacket.c
@@ -11,8 +11,7 @@ When a control packet is received, analyze the
Enqueue the control packet for Application.
@return None
*/
-VOID handle_rx_control_packet(PMINI_ADAPTER Adapter, /**<Pointer to the Adapter structure*/
- struct sk_buff *skb) /**<Pointer to the socket buffer*/
+static VOID handle_rx_control_packet(PMINI_ADAPTER Adapter, struct sk_buff *skb)
{
PPER_TARANG_DATA pTarang = NULL;
BOOLEAN HighPriorityMessage = FALSE;
@@ -20,8 +19,10 @@ VOID handle_rx_control_packet(PMINI_ADAPTER Adapter, /**<Pointer to the Adapter
CHAR cntrl_msg_mask_bit = 0;
BOOLEAN drop_pkt_flag = TRUE ;
USHORT usStatus = *(PUSHORT)(skb->data);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "=====>");
- /* Get the Leader field */
+
+ if (netif_msg_pktdata(Adapter))
+ print_hex_dump(KERN_DEBUG, PFX "rx control: ", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, skb->len, 0);
switch(usStatus)
{
@@ -134,7 +135,7 @@ VOID handle_rx_control_packet(PMINI_ADAPTER Adapter, /**<Pointer to the Adapter
}
up(&Adapter->RxAppControlQueuelock);
wake_up(&Adapter->process_read_wait_queue);
- bcm_kfree_skb(skb);
+ dev_kfree_skb(skb);
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "After wake_up_interruptible");
}
@@ -185,33 +186,7 @@ int control_packet_handler (PMINI_ADAPTER Adapter /**< pointer to adapter obje
{
DEQUEUEPACKET(Adapter->RxControlHead,Adapter->RxControlTail);
// Adapter->RxControlHead=ctrl_packet->next;
- ((PLINUX_DEP_DATA)Adapter->pvOsDepData)->netstats.rx_packets++;
- ((PLINUX_DEP_DATA)Adapter->pvOsDepData)->netstats.rx_bytes+=
- ((PLEADER)ctrl_packet->data)->PLength;
- }
- #if 0 //Idle mode debug profiling...
- if(*(PUSHORT)ctrl_packet->data == IDLE_MODE_STATUS)
- {
- puiBuffer = (PUINT)(ctrl_packet->data +sizeof(USHORT));
- if((ntohl(*puiBuffer) == GO_TO_IDLE_MODE_PAYLOAD))
- {
- memset(&tv, 0, sizeof(tv));
- do_gettimeofday(&tv);
- if((ntohl(*(puiBuffer+1)) == 0))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "IdleMode Wake-up Msg from f/w at time :%ld ms", tv.tv_sec *1000 + tv.tv_usec /1000);
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "IdleMode req Msg from f/w at time :%ld ms", tv.tv_sec *1000 + tv.tv_usec /1000);
- }
- }
- else if((ntohl(*puiBuffer) == IDLE_MODE_SF_UPDATE_MSG))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "GOT IDLE_MODE_SF_UPDATE MSG at time :%ld ms", tv.tv_sec *1000 + tv.tv_usec /1000);
- }
}
- #endif
spin_unlock_irqrestore (&Adapter->control_queue_lock, flags);
handle_rx_control_packet(Adapter, ctrl_packet);
@@ -234,7 +209,7 @@ INT flushAllAppQ(void)
{
PacketToDrop=pTarang->RxAppControlHead;
DEQUEUEPACKET(pTarang->RxAppControlHead,pTarang->RxAppControlTail);
- bcm_kfree_skb(PacketToDrop);
+ dev_kfree_skb(PacketToDrop);
}
pTarang->AppCtrlQueueLen = 0;
//dropped contrl packet statistics also should be reset.
diff --git a/drivers/staging/bcm/HostMibs.h b/drivers/staging/bcm/HostMibs.h
deleted file mode 100644
index 28a578311378..000000000000
--- a/drivers/staging/bcm/HostMibs.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _HOST_MIBS_H
-#define _HOST_MIBS_H
-
-INT ProcessGetHostMibs(PMINI_ADAPTER Adapter,
- PVOID ioBuffer,
- ULONG inputBufferLength);
-#endif
diff --git a/drivers/staging/bcm/IPv6Protocol.c b/drivers/staging/bcm/IPv6Protocol.c
index 5ec3b896c6a7..91b6fbe33c91 100644
--- a/drivers/staging/bcm/IPv6Protocol.c
+++ b/drivers/staging/bcm/IPv6Protocol.c
@@ -1,5 +1,9 @@
#include "headers.h"
+static BOOLEAN MatchSrcIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pstIpv6Header);
+static BOOLEAN MatchDestIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pstIpv6Header);
+static VOID DumpIpv6Header(IPV6Header *pstIpv6Header);
+
static UCHAR * GetNextIPV6ChainedHeader(UCHAR **ppucPayload,UCHAR *pucNextHeader,BOOLEAN *bParseDone,USHORT *pusPayloadLength)
{
UCHAR *pucRetHeaderPtr = NULL;
@@ -257,7 +261,7 @@ USHORT IpVersion6(PMINI_ADAPTER Adapter, /**< Pointer to the driver control stru
}
-BOOLEAN MatchSrcIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pstIpv6Header)
+static BOOLEAN MatchSrcIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pstIpv6Header)
{
UINT uiLoopIndex=0;
UINT uiIpv6AddIndex=0;
@@ -310,7 +314,7 @@ BOOLEAN MatchSrcIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pst
return FALSE;
}
-BOOLEAN MatchDestIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pstIpv6Header)
+static BOOLEAN MatchDestIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pstIpv6Header)
{
UINT uiLoopIndex=0;
UINT uiIpv6AddIndex=0;
@@ -376,7 +380,7 @@ VOID DumpIpv6Address(ULONG *puIpv6Address)
}
-VOID DumpIpv6Header(IPV6Header *pstIpv6Header)
+static VOID DumpIpv6Header(IPV6Header *pstIpv6Header)
{
UCHAR ucVersion;
UCHAR ucPrio ;
diff --git a/drivers/staging/bcm/IPv6ProtocolHdr.h b/drivers/staging/bcm/IPv6ProtocolHdr.h
index b93f7902e283..a0db5a1de763 100644
--- a/drivers/staging/bcm/IPv6ProtocolHdr.h
+++ b/drivers/staging/bcm/IPv6ProtocolHdr.h
@@ -101,15 +101,12 @@ typedef enum _E_IPADDR_CONTEXT
//Function Prototypes
-BOOLEAN MatchSrcIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pstIpv6Header);
-BOOLEAN MatchDestIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pstIpv6Header);
USHORT IpVersion6(PMINI_ADAPTER Adapter, /**< Pointer to the driver control structure */
PVOID pcIpHeader, /**<Pointer to the IP Hdr of the packet*/
S_CLASSIFIER_RULE *pstClassifierRule );
VOID DumpIpv6Address(ULONG *puIpv6Address);
-VOID DumpIpv6Header(IPV6Header *pstIpv6Header);
extern BOOLEAN MatchSrcPort(S_CLASSIFIER_RULE *pstClassifierRule,USHORT ushSrcPort);
extern BOOLEAN MatchDestPort(S_CLASSIFIER_RULE *pstClassifierRule,USHORT ushSrcPort);
diff --git a/drivers/staging/bcm/InterfaceDld.c b/drivers/staging/bcm/InterfaceDld.c
index 60c0f29f3eef..df64acb06126 100644
--- a/drivers/staging/bcm/InterfaceDld.c
+++ b/drivers/staging/bcm/InterfaceDld.c
@@ -1,20 +1,18 @@
#include "headers.h"
-#ifndef BCM_SHM_INTERFACE
int InterfaceFileDownload( PVOID arg,
struct file *flp,
unsigned int on_chip_loc)
{
- char *buff=NULL;
// unsigned int reg=0;
mm_segment_t oldfs={0};
int errno=0, len=0 /*,is_config_file = 0*/;
loff_t pos=0;
PS_INTERFACE_ADAPTER psIntfAdapter = (PS_INTERFACE_ADAPTER)arg;
//PMINI_ADAPTER Adapter = psIntfAdapter->psAdapter;
+ char *buff=kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
- buff=(PCHAR)kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
if(!buff)
{
return -ENOMEM;
@@ -49,7 +47,7 @@ int InterfaceFileDownload( PVOID arg,
on_chip_loc+=MAX_TRANSFER_CTRL_BYTE_USB;
}/* End of for(;;)*/
- bcm_kfree(buff);
+ kfree(buff);
return errno;
}
@@ -57,7 +55,7 @@ int InterfaceFileReadbackFromChip( PVOID arg,
struct file *flp,
unsigned int on_chip_loc)
{
- char *buff=NULL, *buff_readback=NULL;
+ char *buff, *buff_readback;
unsigned int reg=0;
mm_segment_t oldfs={0};
int errno=0, len=0, is_config_file = 0;
@@ -66,12 +64,12 @@ int InterfaceFileReadbackFromChip( PVOID arg,
INT Status = STATUS_SUCCESS;
PS_INTERFACE_ADAPTER psIntfAdapter = (PS_INTERFACE_ADAPTER)arg;
- buff=(PCHAR)kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_DMA);
- buff_readback=(PCHAR)kmalloc(MAX_TRANSFER_CTRL_BYTE_USB , GFP_DMA);
+ buff=kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_DMA);
+ buff_readback=kmalloc(MAX_TRANSFER_CTRL_BYTE_USB , GFP_DMA);
if(!buff || !buff_readback)
{
- bcm_kfree(buff);
- bcm_kfree(buff_readback);
+ kfree(buff);
+ kfree(buff_readback);
return -ENOMEM;
}
@@ -138,8 +136,8 @@ int InterfaceFileReadbackFromChip( PVOID arg,
on_chip_loc+=MAX_TRANSFER_CTRL_BYTE_USB;
}/* End of while(1)*/
exit:
- bcm_kfree(buff);
- bcm_kfree(buff_readback);
+ kfree(buff);
+ kfree(buff_readback);
return Status;
}
@@ -165,7 +163,7 @@ static int bcm_download_config_file(PMINI_ADAPTER Adapter,
psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength);
if(retval)
{
- bcm_kfree (Adapter->pstargetparams);
+ kfree(Adapter->pstargetparams);
Adapter->pstargetparams = NULL;
return -EFAULT;
}
@@ -231,41 +229,6 @@ static int bcm_download_config_file(PMINI_ADAPTER Adapter,
return retval;
}
-#if 0
-static int bcm_download_buffer(PMINI_ADAPTER Adapter,
- unsigned char *mappedbuffer, unsigned int u32FirmwareLength,
- unsigned long u32StartingAddress)
-{
- char *buff=NULL;
- unsigned int len = 0;
- int retval = STATUS_SUCCESS;
- buff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
-
- len = u32FirmwareLength;
-
- while(u32FirmwareLength)
- {
- len = MIN_VAL (u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
- if(STATUS_SUCCESS != (retval = copy_from_user(buff,
- (unsigned char *)mappedbuffer, len)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "copy_from_user failed\n");
- break;
- }
- retval = wrm (Adapter, u32StartingAddress, buff, len);
- if(retval)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "wrm failed\n");
- break;
- }
- u32StartingAddress += len;
- u32FirmwareLength -= len;
- mappedbuffer +=len;
- }
- bcm_kfree(buff);
- return retval;
-}
-#endif
static int bcm_compare_buff_contents(unsigned char *readbackbuff,
unsigned char *buff,unsigned int len)
{
@@ -297,58 +260,6 @@ static int bcm_compare_buff_contents(unsigned char *readbackbuff,
}
return retval;
}
-#if 0
-static int bcm_buffer_readback(PMINI_ADAPTER Adapter,
- unsigned char *mappedbuffer, unsigned int u32FirmwareLength,
- unsigned long u32StartingAddress)
-{
- unsigned char *buff = NULL;
- unsigned char *readbackbuff = NULL;
- unsigned int len = u32FirmwareLength;
- int retval = STATUS_SUCCESS;
-
- buff=(unsigned char *)kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
- if(NULL == buff)
- return -ENOMEM;
- readbackbuff = (unsigned char *)kzalloc(MAX_TRANSFER_CTRL_BYTE_USB,
- GFP_KERNEL);
- if(NULL == readbackbuff)
- {
- bcm_kfree(buff);
- return -ENOMEM;
- }
- while (u32FirmwareLength && !retval)
- {
- len = MIN_VAL (u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
-
- /* read from the appl buff and then read from the target, compare */
- if(STATUS_SUCCESS != (retval = copy_from_user(buff,
- (unsigned char *)mappedbuffer, len)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "copy_from_user failed\n");
- break;
- }
- retval = rdm (Adapter, u32StartingAddress, readbackbuff, len);
- if(retval)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "rdm failed\n");
- break;
- }
-
- if (STATUS_SUCCESS !=
- (retval = bcm_compare_buff_contents (readbackbuff, buff, len)))
- {
- break;
- }
- u32StartingAddress += len;
- u32FirmwareLength -= len;
- mappedbuffer +=len;
- }/* end of while (u32FirmwareLength && !retval) */
- bcm_kfree(buff);
- bcm_kfree(readbackbuff);
- return retval;
-}
-#endif
int bcm_ioctl_fw_download(PMINI_ADAPTER Adapter, FIRMWARE_INFO *psFwInfo)
{
int retval = STATUS_SUCCESS;
@@ -375,7 +286,7 @@ int bcm_ioctl_fw_download(PMINI_ADAPTER Adapter, FIRMWARE_INFO *psFwInfo)
else
{
- buff = (PUCHAR)kzalloc(psFwInfo->u32FirmwareLength,GFP_KERNEL);
+ buff = kzalloc(psFwInfo->u32FirmwareLength,GFP_KERNEL);
if(buff==NULL)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"Failed in allocation memory");
@@ -389,23 +300,6 @@ int bcm_ioctl_fw_download(PMINI_ADAPTER Adapter, FIRMWARE_INFO *psFwInfo)
goto error ;
}
- #if 0
- retval = bcm_download_buffer(Adapter,
- (unsigned char *)psFwInfo->pvMappedFirmwareAddress,
- psFwInfo->u32FirmwareLength, psFwInfo->u32StartingAddress);
- if(retval != STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "User space buffer download fails....");
- }
- retval = bcm_buffer_readback (Adapter,
- (unsigned char *)psFwInfo->pvMappedFirmwareAddress,
- psFwInfo->u32FirmwareLength, psFwInfo->u32StartingAddress);
-
- if(retval != STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "read back verifier failed ....");
- }
- #endif
retval = buffDnldVerify(Adapter,
buff,
psFwInfo->u32FirmwareLength,
@@ -417,7 +311,7 @@ int bcm_ioctl_fw_download(PMINI_ADAPTER Adapter, FIRMWARE_INFO *psFwInfo)
}
}
error:
- bcm_kfree(buff);
+ kfree(buff);
return retval;
}
@@ -450,11 +344,10 @@ static INT buffRdbkVerify(PMINI_ADAPTER Adapter,
PUCHAR mappedbuffer, UINT u32FirmwareLength,
ULONG u32StartingAddress)
{
- PUCHAR readbackbuff = NULL;
UINT len = u32FirmwareLength;
INT retval = STATUS_SUCCESS;
+ PUCHAR readbackbuff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB,GFP_KERNEL);
- readbackbuff = (PUCHAR)kzalloc(MAX_TRANSFER_CTRL_BYTE_USB,GFP_KERNEL);
if(NULL == readbackbuff)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "MEMORY ALLOCATION FAILED");
@@ -480,7 +373,7 @@ static INT buffRdbkVerify(PMINI_ADAPTER Adapter,
u32FirmwareLength -= len;
mappedbuffer +=len;
}/* end of while (u32FirmwareLength && !retval) */
- bcm_kfree(readbackbuff);
+ kfree(readbackbuff);
return retval;
}
@@ -506,5 +399,4 @@ error:
return status;
}
-#endif
diff --git a/drivers/staging/bcm/InterfaceIdleMode.c b/drivers/staging/bcm/InterfaceIdleMode.c
index 0750382733ff..bf5c0ad86610 100644
--- a/drivers/staging/bcm/InterfaceIdleMode.c
+++ b/drivers/staging/bcm/InterfaceIdleMode.c
@@ -98,14 +98,6 @@ int InterfaceIdleModeRespond(PMINI_ADAPTER Adapter, unsigned int* puiBuffer)
Adapter->bTriedToWakeUpFromlowPowerMode = FALSE;
wake_up(&Adapter->lowpower_mode_wait_queue);
- #if 0
- if(Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL,"LED Thread is Running. Hence Setting the LED Event as IDLEMODE_EXIT");
- Adapter->DriverState = IDLEMODE_EXIT;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
- #endif
}
else
@@ -154,17 +146,7 @@ int InterfaceIdleModeRespond(PMINI_ADAPTER Adapter, unsigned int* puiBuffer)
return status;
}
-
-VOID InterfaceWriteIdleModeWakePattern(PMINI_ADAPTER Adapter)
-{
-/* BeceemWriteMemoryUshort(Adapter, Host2CPU_Mailbox_Low, 0x1d1e);
- BeceemWriteMemoryUshort(Adapter, Host2CPU_Mailbox_Low, 0x1d1e);
- BeceemWriteMemoryUshort(Adapter, Host2CPU_Mailbox_Upp, 0xd0ea);
- BeceemWriteMemoryUshort(Adapter, Host2CPU_Mailbox_Upp, 0xd0ea);*/
- return;
-}
-
-int InterfaceAbortIdlemode(PMINI_ADAPTER Adapter, unsigned int Pattern)
+static int InterfaceAbortIdlemode(PMINI_ADAPTER Adapter, unsigned int Pattern)
{
int status = STATUS_SUCCESS;
unsigned int value;
diff --git a/drivers/staging/bcm/InterfaceIdleMode.h b/drivers/staging/bcm/InterfaceIdleMode.h
index 1bc723d2d72c..859a2ffba6b7 100644
--- a/drivers/staging/bcm/InterfaceIdleMode.h
+++ b/drivers/staging/bcm/InterfaceIdleMode.h
@@ -7,8 +7,6 @@ INT InterfaceIdleModeRespond(PMINI_ADAPTER Adapter, unsigned int *puiBuffer);
VOID InterfaceWriteIdleModeWakePattern(PMINI_ADAPTER Adapter);
-INT InterfaceAbortIdlemode(PMINI_ADAPTER Adapter, unsigned int Pattern);
-
INT InterfaceWakeUp(PMINI_ADAPTER Adapter);
VOID InterfaceHandleShutdownModeWakeup(PMINI_ADAPTER Adapter);
diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
index e97ad99b1bb4..d78d5ef1f298 100644
--- a/drivers/staging/bcm/InterfaceInit.c
+++ b/drivers/staging/bcm/InterfaceInit.c
@@ -1,54 +1,63 @@
#include "headers.h"
static struct usb_device_id InterfaceUsbtable[] = {
- { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3) },
+ { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3) },
{ USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3B) },
{ USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3L) },
- { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_226) },
+ { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_SM250) },
+ { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_226) },
{ USB_DEVICE(BCM_USB_VENDOR_ID_FOXCONN, BCM_USB_PRODUCT_ID_1901) },
- {}
+ { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_TU25) },
+ { }
};
+MODULE_DEVICE_TABLE(usb, InterfaceUsbtable);
-VOID InterfaceAdapterFree(PS_INTERFACE_ADAPTER psIntfAdapter)
+static int debug = -1;
+module_param(debug, uint, 0600);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static const u32 default_msg =
+ NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
+ | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
+ | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
+
+static int InterfaceAdapterInit(PS_INTERFACE_ADAPTER Adapter);
+
+static void InterfaceAdapterFree(PS_INTERFACE_ADAPTER psIntfAdapter)
{
- INT i = 0;
- // Wake up the wait_queue...
- if(psIntfAdapter->psAdapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY)
- {
+ int i = 0;
+
+ /* Wake up the wait_queue... */
+ if (psIntfAdapter->psAdapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
psIntfAdapter->psAdapter->DriverState = DRIVER_HALT;
wake_up(&psIntfAdapter->psAdapter->LEDInfo.notify_led_event);
}
reset_card_proc(psIntfAdapter->psAdapter);
- //worst case time taken by the RDM/WRM will be 5 sec. will check after every 100 ms
- //to accertain the device is not being accessed. After this No RDM/WRM should be made.
- while(psIntfAdapter->psAdapter->DeviceAccess)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,"Device is being Accessed \n");
+ /*
+ * worst case time taken by the RDM/WRM will be 5 sec. will check after every 100 ms
+ * to accertain the device is not being accessed. After this No RDM/WRM should be made.
+ */
+ while (psIntfAdapter->psAdapter->DeviceAccess) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "Device is being accessed.\n");
msleep(100);
}
/* Free interrupt URB */
- //psIntfAdapter->psAdapter->device_removed = TRUE;
- if(psIntfAdapter->psInterruptUrb)
- {
- usb_free_urb(psIntfAdapter->psInterruptUrb);
- }
+ /* psIntfAdapter->psAdapter->device_removed = TRUE; */
+ usb_free_urb(psIntfAdapter->psInterruptUrb);
/* Free transmit URBs */
- for(i = 0; i < MAXIMUM_USB_TCB; i++)
- {
- if(psIntfAdapter->asUsbTcb[i].urb != NULL)
- {
+ for (i = 0; i < MAXIMUM_USB_TCB; i++) {
+ if (psIntfAdapter->asUsbTcb[i].urb != NULL) {
usb_free_urb(psIntfAdapter->asUsbTcb[i].urb);
psIntfAdapter->asUsbTcb[i].urb = NULL;
}
}
/* Free receive URB and buffers */
- for(i = 0; i < MAXIMUM_USB_RCB; i++)
- {
- if (psIntfAdapter->asUsbRcb[i].urb != NULL)
- {
- bcm_kfree(psIntfAdapter->asUsbRcb[i].urb->transfer_buffer);
+ for (i = 0; i < MAXIMUM_USB_RCB; i++) {
+ if (psIntfAdapter->asUsbRcb[i].urb != NULL) {
+ kfree(psIntfAdapter->asUsbRcb[i].urb->transfer_buffer);
usb_free_urb(psIntfAdapter->asUsbRcb[i].urb);
psIntfAdapter->asUsbRcb[i].urb = NULL;
}
@@ -56,151 +65,109 @@ VOID InterfaceAdapterFree(PS_INTERFACE_ADAPTER psIntfAdapter)
AdapterFree(psIntfAdapter->psAdapter);
}
-
-
-static int usbbcm_open(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-static int usbbcm_release(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-static ssize_t usbbcm_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
-{
- return 0;
-}
-
-static ssize_t usbbcm_write(struct file *file, const char __user *user_buffer, size_t count, loff_t *ppos)
+static void ConfigureEndPointTypesThroughEEPROM(PMINI_ADAPTER Adapter)
{
- return 0;
-}
+ unsigned long ulReg = 0;
+ int ret;
-
-VOID ConfigureEndPointTypesThroughEEPROM(PMINI_ADAPTER Adapter)
-{
- ULONG ulReg = 0;
-
-// Program EP2 MAX_PKT_SIZE
+ /* Program EP2 MAX_PKT_SIZE */
ulReg = ntohl(EP2_MPS_REG);
- BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x128,4,TRUE);
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x128, 4, TRUE);
ulReg = ntohl(EP2_MPS);
- BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x12C,4,TRUE);
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x12C, 4, TRUE);
ulReg = ntohl(EP2_CFG_REG);
- BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x132,4,TRUE);
- if(((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter))->bHighSpeedDevice == TRUE)
- {
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x132, 4, TRUE);
+ if (((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter))->bHighSpeedDevice == TRUE) {
ulReg = ntohl(EP2_CFG_INT);
- BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x136,4,TRUE);
- }
- else
- {
-// USE BULK EP as TX in FS mode.
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x136, 4, TRUE);
+ } else {
+ /* USE BULK EP as TX in FS mode. */
ulReg = ntohl(EP2_CFG_BULK);
- BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x136,4,TRUE);
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x136, 4, TRUE);
}
-
-// Program EP4 MAX_PKT_SIZE.
+ /* Program EP4 MAX_PKT_SIZE. */
ulReg = ntohl(EP4_MPS_REG);
- BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x13C,4,TRUE);
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x13C, 4, TRUE);
ulReg = ntohl(EP4_MPS);
- BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x140,4,TRUE);
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x140, 4, TRUE);
-// Program TX EP as interrupt (Alternate Setting)
- if( rdmalt(Adapter,0x0F0110F8, (PUINT)&ulReg,4))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "reading of Tx EP is failing");
- return ;
+ /* Program TX EP as interrupt(Alternate Setting) */
+ ret = rdmalt(Adapter, 0x0F0110F8, (u32 *)&ulReg, sizeof(u32));
+ if (ret) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "reading of Tx EP failed\n");
+ return;
}
ulReg |= 0x6;
ulReg = ntohl(ulReg);
- BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x1CC,4,TRUE);
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1CC, 4, TRUE);
ulReg = ntohl(EP4_CFG_REG);
- BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x1C8,4,TRUE);
-// Program ISOCHRONOUS EP size to zero.
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1C8, 4, TRUE);
+ /* Program ISOCHRONOUS EP size to zero. */
ulReg = ntohl(ISO_MPS_REG);
- BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x1D2,4,TRUE);
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1D2, 4, TRUE);
ulReg = ntohl(ISO_MPS);
- BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x1D6,4,TRUE);
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1D6, 4, TRUE);
-// Update EEPROM Version.
-// Read 4 bytes from 508 and modify 511 and 510.
-//
- ReadBeceemEEPROM(Adapter,0x1FC,(PUINT)&ulReg);
+ /*
+ * Update EEPROM Version.
+ * Read 4 bytes from 508 and modify 511 and 510.
+ */
+ ReadBeceemEEPROM(Adapter, 0x1FC, (PUINT)&ulReg);
ulReg &= 0x0101FFFF;
- BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x1FC,4,TRUE);
-//
-//Update length field if required. Also make the string NULL terminated.
-//
- ReadBeceemEEPROM(Adapter,0xA8,(PUINT)&ulReg);
- if((ulReg&0x00FF0000)>>16 > 0x30)
- {
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1FC, 4, TRUE);
+
+ /* Update length field if required. Also make the string NULL terminated. */
+
+ ReadBeceemEEPROM(Adapter, 0xA8, (PUINT)&ulReg);
+ if ((ulReg&0x00FF0000)>>16 > 0x30) {
ulReg = (ulReg&0xFF00FFFF)|(0x30<<16);
- BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0xA8,4,TRUE);
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0xA8, 4, TRUE);
}
- ReadBeceemEEPROM(Adapter,0x148,(PUINT)&ulReg);
- if((ulReg&0x00FF0000)>>16 > 0x30)
- {
+ ReadBeceemEEPROM(Adapter, 0x148, (PUINT)&ulReg);
+ if ((ulReg&0x00FF0000)>>16 > 0x30) {
ulReg = (ulReg&0xFF00FFFF)|(0x30<<16);
- BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x148,4,TRUE);
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x148, 4, TRUE);
}
ulReg = 0;
- BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x122,4,TRUE);
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x122, 4, TRUE);
ulReg = 0;
- BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x1C2,4,TRUE);
-
+ BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1C2, 4, TRUE);
}
-static struct file_operations usbbcm_fops = {
- .open = usbbcm_open,
- .release = usbbcm_release,
- .read = usbbcm_read,
- .write = usbbcm_write,
- .owner = THIS_MODULE,
- .llseek = no_llseek,
-};
-
-static struct usb_class_driver usbbcm_class = {
- .name = "usbbcm",
- .fops = &usbbcm_fops,
- .minor_base = BCM_USB_MINOR_BASE,
-};
-
static int
usbbcm_device_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
- int retval =0 ;
- PMINI_ADAPTER psAdapter = NULL;
- PS_INTERFACE_ADAPTER psIntfAdapter = NULL;
- struct usb_device *udev = NULL;
-
-// BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Usbbcm probe!!");
- if((intf == NULL) || (id == NULL))
- {
- // BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "intf or id is NULL");
- return -EINVAL;
- }
+ struct usb_device *udev = interface_to_usbdev(intf);
+ int retval;
+ PMINI_ADAPTER psAdapter;
+ PS_INTERFACE_ADAPTER psIntfAdapter;
+ struct net_device *ndev;
- /* Allocate Adapter structure */
- if((psAdapter = kzalloc(sizeof(MINI_ADAPTER), GFP_KERNEL)) == NULL)
- {
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_PRINTK, 0, 0, "Out of memory");
+ /* Reserve one extra queue for the bit-bucket */
+ ndev = alloc_etherdev_mq(sizeof(MINI_ADAPTER), NO_OF_QUEUES+1);
+ if (ndev == NULL) {
+ dev_err(&udev->dev, DRV_NAME ": no memory for device\n");
return -ENOMEM;
}
- /* Init default driver debug state */
+ SET_NETDEV_DEV(ndev, &intf->dev);
- psAdapter->stDebugState.debug_level = DBG_LVL_CURR;
+ psAdapter = netdev_priv(ndev);
+ psAdapter->dev = ndev;
+ psAdapter->msg_enable = netif_msg_init(debug, default_msg);
+
+ /* Init default driver debug state */
+
+ psAdapter->stDebugState.debug_level = DBG_LVL_CURR;
psAdapter->stDebugState.type = DBG_TYPE_INITEXIT;
- memset (psAdapter->stDebugState.subtype, 0, sizeof (psAdapter->stDebugState.subtype));
- /* Technically, one can start using BCM_DEBUG_PRINT after this point.
+ /*
+ * Technically, one can start using BCM_DEBUG_PRINT after this point.
* However, realize that by default the Type/Subtype bitmaps are all zero now;
* so no prints will actually appear until the TestApp turns on debug paths via
* the ioctl(); so practically speaking, in early init, no logging happens.
@@ -211,160 +178,128 @@ usbbcm_device_probe(struct usb_interface *intf, const struct usb_device_id *id)
* Further, we turn this OFF once init_module() completes.
*/
- psAdapter->stDebugState.subtype[DBG_TYPE_INITEXIT] = 0xff;
+ psAdapter->stDebugState.subtype[DBG_TYPE_INITEXIT] = 0xff;
BCM_SHOW_DEBUG_BITMAP(psAdapter);
retval = InitAdapter(psAdapter);
- if(retval)
- {
- BCM_DEBUG_PRINT (psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "InitAdapter Failed\n");
+ if (retval) {
+ dev_err(&udev->dev, DRV_NAME ": InitAdapter Failed\n");
AdapterFree(psAdapter);
return retval;
}
/* Allocate interface adapter structure */
- if((psAdapter->pvInterfaceAdapter =
- kmalloc(sizeof(S_INTERFACE_ADAPTER), GFP_KERNEL)) == NULL)
- {
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_PRINTK, 0, 0, "Out of memory");
- AdapterFree (psAdapter);
+ psIntfAdapter = kzalloc(sizeof(S_INTERFACE_ADAPTER), GFP_KERNEL);
+ if (psIntfAdapter == NULL) {
+ dev_err(&udev->dev, DRV_NAME ": no memory for Interface adapter\n");
+ AdapterFree(psAdapter);
return -ENOMEM;
}
- memset(psAdapter->pvInterfaceAdapter, 0, sizeof(S_INTERFACE_ADAPTER));
- psIntfAdapter = InterfaceAdapterGet(psAdapter);
+ psAdapter->pvInterfaceAdapter = psIntfAdapter;
psIntfAdapter->psAdapter = psAdapter;
/* Store usb interface in Interface Adapter */
psIntfAdapter->interface = intf;
usb_set_intfdata(intf, psIntfAdapter);
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "psIntfAdapter 0x%p",psIntfAdapter);
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "psIntfAdapter 0x%p\n", psIntfAdapter);
retval = InterfaceAdapterInit(psIntfAdapter);
- if(retval)
- {
+ if (retval) {
/* If the Firmware/Cfg File is not present
- * then return success, let the application
- * download the files.
- */
- if(-ENOENT == retval){
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "File Not Found, Use App to Download\n");
+ * then return success, let the application
+ * download the files.
+ */
+ if (-ENOENT == retval) {
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "File Not Found. Use app to download.\n");
return STATUS_SUCCESS;
}
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "InterfaceAdapterInit Failed \n");
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "InterfaceAdapterInit failed.\n");
usb_set_intfdata(intf, NULL);
- udev = interface_to_usbdev (intf);
+ udev = interface_to_usbdev(intf);
usb_put_dev(udev);
- if(psAdapter->bUsbClassDriverRegistered == TRUE)
- usb_deregister_dev (intf, &usbbcm_class);
InterfaceAdapterFree(psIntfAdapter);
- return retval ;
+ return retval;
}
- if(psAdapter->chip_id > T3)
- {
- uint32_t uiNackZeroLengthInt=4;
- if(wrmalt(psAdapter, DISABLE_USB_ZERO_LEN_INT, &uiNackZeroLengthInt, sizeof(uiNackZeroLengthInt)))
- {
- return -EIO;;
- }
+ if (psAdapter->chip_id > T3) {
+ uint32_t uiNackZeroLengthInt = 4;
+
+ retval = wrmalt(psAdapter, DISABLE_USB_ZERO_LEN_INT, &uiNackZeroLengthInt, sizeof(uiNackZeroLengthInt));
+ if (retval)
+ return retval;
}
- udev = interface_to_usbdev (intf);
/* Check whether the USB-Device Supports remote Wake-Up */
- if(USB_CONFIG_ATT_WAKEUP & udev->actconfig->desc.bmAttributes)
- {
+ if (USB_CONFIG_ATT_WAKEUP & udev->actconfig->desc.bmAttributes) {
/* If Suspend then only support dynamic suspend */
- if(psAdapter->bDoSuspend)
- {
+ if (psAdapter->bDoSuspend) {
#ifdef CONFIG_PM
pm_runtime_set_autosuspend_delay(&udev->dev, 0);
intf->needs_remote_wakeup = 1;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
- udev->autosuspend_disabled = 0;
-#else
usb_enable_autosuspend(udev);
-#endif
- device_init_wakeup(&intf->dev,1);
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
- usb_autopm_disable(intf);
-#endif
+ device_init_wakeup(&intf->dev, 1);
INIT_WORK(&psIntfAdapter->usbSuspendWork, putUsbSuspend);
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Enabling USB Auto-Suspend\n");
+ BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "Enabling USB Auto-Suspend\n");
#endif
- }
- else
- {
+ } else {
intf->needs_remote_wakeup = 0;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
- udev->autosuspend_disabled = 1;
-#else
usb_disable_autosuspend(udev);
-#endif
}
}
- psAdapter->stDebugState.subtype[DBG_TYPE_INITEXIT] = 0x0;
- return retval;
+ psAdapter->stDebugState.subtype[DBG_TYPE_INITEXIT] = 0x0;
+ return retval;
}
-static void usbbcm_disconnect (struct usb_interface *intf)
+static void usbbcm_disconnect(struct usb_interface *intf)
{
- PS_INTERFACE_ADAPTER psIntfAdapter = NULL;
- PMINI_ADAPTER psAdapter = NULL;
- struct usb_device *udev = NULL;
- PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Usb disconnected");
- if(intf == NULL)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "intf pointer is NULL");
- return;
- }
- psIntfAdapter = usb_get_intfdata(intf);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "psIntfAdapter 0x%p",psIntfAdapter);
- if(psIntfAdapter == NULL)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "InterfaceAdapter pointer is NULL");
+ PS_INTERFACE_ADAPTER psIntfAdapter = usb_get_intfdata(intf);
+ PMINI_ADAPTER psAdapter;
+ struct usb_device *udev = interface_to_usbdev(intf);
+
+ if (psIntfAdapter == NULL)
return;
- }
+
psAdapter = psIntfAdapter->psAdapter;
- if(psAdapter->bDoSuspend)
+ netif_device_detach(psAdapter->dev);
+
+ if (psAdapter->bDoSuspend)
intf->needs_remote_wakeup = 0;
psAdapter->device_removed = TRUE ;
usb_set_intfdata(intf, NULL);
InterfaceAdapterFree(psIntfAdapter);
- udev = interface_to_usbdev (intf);
usb_put_dev(udev);
- usb_deregister_dev (intf, &usbbcm_class);
}
-
-static __inline int AllocUsbCb(PS_INTERFACE_ADAPTER psIntfAdapter)
+static int AllocUsbCb(PS_INTERFACE_ADAPTER psIntfAdapter)
{
int i = 0;
- for(i = 0; i < MAXIMUM_USB_TCB; i++)
- {
- if((psIntfAdapter->asUsbTcb[i].urb =
- usb_alloc_urb(0, GFP_KERNEL)) == NULL)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Cant allocate Tx urb for index %d", i);
+
+ for (i = 0; i < MAXIMUM_USB_TCB; i++) {
+ if ((psIntfAdapter->asUsbTcb[i].urb =
+ usb_alloc_urb(0, GFP_KERNEL)) == NULL) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
+ "Can't allocate Tx urb for index %d\n", i);
return -ENOMEM;
}
}
- for(i = 0; i < MAXIMUM_USB_RCB; i++)
- {
+ for (i = 0; i < MAXIMUM_USB_RCB; i++) {
if ((psIntfAdapter->asUsbRcb[i].urb =
- usb_alloc_urb(0, GFP_KERNEL)) == NULL)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Cant allocate Rx urb for index %d", i);
+ usb_alloc_urb(0, GFP_KERNEL)) == NULL) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
+ "Can't allocate Rx urb for index %d\n", i);
return -ENOMEM;
}
- if((psIntfAdapter->asUsbRcb[i].urb->transfer_buffer =
- kmalloc(MAX_DATA_BUFFER_SIZE, GFP_KERNEL)) == NULL)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Cant allocate Rx buffer for index %d", i);
+ if ((psIntfAdapter->asUsbRcb[i].urb->transfer_buffer =
+ kmalloc(MAX_DATA_BUFFER_SIZE, GFP_KERNEL)) == NULL) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
+ "Can't allocate Rx buffer for index %d\n", i);
return -ENOMEM;
}
psIntfAdapter->asUsbRcb[i].urb->transfer_buffer_length = MAX_DATA_BUFFER_SIZE;
@@ -372,77 +307,41 @@ static __inline int AllocUsbCb(PS_INTERFACE_ADAPTER psIntfAdapter)
return 0;
}
-
-
static int device_run(PS_INTERFACE_ADAPTER psIntfAdapter)
{
- INT value = 0;
+ int value = 0;
UINT status = STATUS_SUCCESS;
status = InitCardAndDownloadFirmware(psIntfAdapter->psAdapter);
- if(status != STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "InitCardAndDownloadFirmware failed.\n");
+ if (status != STATUS_SUCCESS) {
+ pr_err(DRV_NAME "InitCardAndDownloadFirmware failed.\n");
return status;
}
- if(TRUE == psIntfAdapter->psAdapter->fw_download_done)
- {
-
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Sending first interrupt URB down......");
- if(StartInterruptUrb(psIntfAdapter))
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Cannot send interrupt in URB");
+ if (TRUE == psIntfAdapter->psAdapter->fw_download_done) {
+ if (StartInterruptUrb(psIntfAdapter)) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "Cannot send interrupt in URB\n");
}
- //now register the cntrl interface.
- //after downloading the f/w waiting for 5 sec to get the mailbox interrupt.
+ /*
+ * now register the cntrl interface.
+ * after downloading the f/w waiting for 5 sec to get the mailbox interrupt.
+ */
psIntfAdapter->psAdapter->waiting_to_fw_download_done = FALSE;
value = wait_event_timeout(psIntfAdapter->psAdapter->ioctl_fw_dnld_wait_queue,
psIntfAdapter->psAdapter->waiting_to_fw_download_done, 5*HZ);
- if(value == 0)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,"Mailbox Interrupt has not reached to Driver..");
- }
- else
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,"Got the mailbox interrupt ...Registering control interface...\n ");
- }
- if(register_control_device_interface(psIntfAdapter->psAdapter) < 0)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Register Control Device failed...");
+ if (value == 0)
+ pr_err(DRV_NAME ": Timeout waiting for mailbox interrupt.\n");
+
+ if (register_control_device_interface(psIntfAdapter->psAdapter) < 0) {
+ pr_err(DRV_NAME ": Register Control Device failed.\n");
return -EIO;
}
}
return 0;
}
-#if 0
-static void print_usb_interface_desc(struct usb_interface_descriptor *usb_intf_desc)
-{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "**************** INTERFACE DESCRIPTOR *********************");
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bLength: %x", usb_intf_desc->bLength);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bDescriptorType: %x", usb_intf_desc->bDescriptorType);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bInterfaceNumber: %x", usb_intf_desc->bInterfaceNumber);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bAlternateSetting: %x", usb_intf_desc->bAlternateSetting);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bNumEndpoints: %x", usb_intf_desc->bNumEndpoints);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bInterfaceClass: %x", usb_intf_desc->bInterfaceClass);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bInterfaceSubClass: %x", usb_intf_desc->bInterfaceSubClass);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bInterfaceProtocol: %x", usb_intf_desc->bInterfaceProtocol);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "iInterface :%x\n",usb_intf_desc->iInterface);
-}
-static void print_usb_endpoint_descriptor(struct usb_endpoint_descriptor *usb_ep_desc)
-{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "**************** ENDPOINT DESCRIPTOR *********************");
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bLength :%x ", usb_ep_desc->bLength);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bDescriptorType :%x ", usb_ep_desc->bDescriptorType);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bEndpointAddress :%x ", usb_ep_desc->bEndpointAddress);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bmAttributes :%x ", usb_ep_desc->bmAttributes);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "wMaxPacketSize :%x ",usb_ep_desc->wMaxPacketSize);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bInterval :%x ",usb_ep_desc->bInterval);
-}
-
-#endif
static inline int bcm_usb_endpoint_num(const struct usb_endpoint_descriptor *epd)
{
@@ -518,124 +417,111 @@ static inline int bcm_usb_endpoint_is_isoc_out(const struct usb_endpoint_descrip
return (bcm_usb_endpoint_xfer_isoc(epd) && bcm_usb_endpoint_dir_out(epd));
}
-INT InterfaceAdapterInit(PS_INTERFACE_ADAPTER psIntfAdapter)
+static int InterfaceAdapterInit(PS_INTERFACE_ADAPTER psIntfAdapter)
{
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
size_t buffer_size;
- ULONG value;
- INT retval = 0;
- INT usedIntOutForBulkTransfer = 0 ;
+ unsigned long value;
+ int retval = 0;
+ int usedIntOutForBulkTransfer = 0 ;
BOOLEAN bBcm16 = FALSE;
UINT uiData = 0;
/* Store the usb dev into interface adapter */
- psIntfAdapter->udev = usb_get_dev(interface_to_usbdev(
- psIntfAdapter->interface));
-
- if((psIntfAdapter->udev->speed == USB_SPEED_HIGH))
- {
- psIntfAdapter->bHighSpeedDevice = TRUE ;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "MODEM IS CONFIGURED TO HIGH_SPEED ");
- }
- else
- {
- psIntfAdapter->bHighSpeedDevice = FALSE ;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "MODEM IS CONFIGURED TO FULL_SPEED ");
- }
+ psIntfAdapter->udev = usb_get_dev(interface_to_usbdev(psIntfAdapter->interface));
+ psIntfAdapter->bHighSpeedDevice = (psIntfAdapter->udev->speed == USB_SPEED_HIGH);
psIntfAdapter->psAdapter->interface_rdm = BcmRDM;
psIntfAdapter->psAdapter->interface_wrm = BcmWRM;
- if(rdmalt(psIntfAdapter->psAdapter, CHIP_ID_REG, (PUINT)&(psIntfAdapter->psAdapter->chip_id), sizeof(UINT)) < 0)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "CHIP ID Read Failed\n");
- return STATUS_FAILURE;
- }
- if(0xbece3200==(psIntfAdapter->psAdapter->chip_id&~(0xF0)))
- {
- psIntfAdapter->psAdapter->chip_id=(psIntfAdapter->psAdapter->chip_id&~(0xF0));
+ retval = rdmalt(psIntfAdapter->psAdapter, CHIP_ID_REG,
+ (u32 *)&(psIntfAdapter->psAdapter->chip_id), sizeof(u32));
+ if (retval) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "CHIP ID Read Failed\n");
+ return retval;
}
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "First RDM Chip ID 0x%lx\n", psIntfAdapter->psAdapter->chip_id);
+ if (0xbece3200 == (psIntfAdapter->psAdapter->chip_id & ~(0xF0)))
+ psIntfAdapter->psAdapter->chip_id &= ~0xF0;
- iface_desc = psIntfAdapter->interface->cur_altsetting;
- //print_usb_interface_desc(&(iface_desc->desc));
+ dev_info(&psIntfAdapter->udev->dev, "RDM Chip ID 0x%lx\n",
+ psIntfAdapter->psAdapter->chip_id);
- if(psIntfAdapter->psAdapter->chip_id == T3B)
- {
+ iface_desc = psIntfAdapter->interface->cur_altsetting;
- //
- //T3B device will have EEPROM,check if EEPROM is proper and BCM16 can be done or not.
- //
- BeceemEEPROMBulkRead(psIntfAdapter->psAdapter,&uiData,0x0,4);
- if(uiData == BECM)
- {
+ if (psIntfAdapter->psAdapter->chip_id == T3B) {
+ /* T3B device will have EEPROM, check if EEPROM is proper and BCM16 can be done or not. */
+ BeceemEEPROMBulkRead(psIntfAdapter->psAdapter, &uiData, 0x0, 4);
+ if (uiData == BECM)
bBcm16 = TRUE;
- }
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Number of Altsetting aviailable for This Modem 0x%x\n", psIntfAdapter->interface->num_altsetting);
- if(bBcm16 == TRUE)
- {
- //selecting alternate setting one as a default setting for High Speed modem.
- if(psIntfAdapter->bHighSpeedDevice)
- retval= usb_set_interface(psIntfAdapter->udev,DEFAULT_SETTING_0,ALTERNATE_SETTING_1);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "BCM16 is Applicable on this dongle");
- if(retval || (psIntfAdapter->bHighSpeedDevice == FALSE))
- {
+
+ dev_info(&psIntfAdapter->udev->dev, "number of alternate setting %d\n",
+ psIntfAdapter->interface->num_altsetting);
+
+ if (bBcm16 == TRUE) {
+ /* selecting alternate setting one as a default setting for High Speed modem. */
+ if (psIntfAdapter->bHighSpeedDevice)
+ retval= usb_set_interface(psIntfAdapter->udev, DEFAULT_SETTING_0, ALTERNATE_SETTING_1);
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "BCM16 is applicable on this dongle\n");
+ if (retval || (psIntfAdapter->bHighSpeedDevice == FALSE)) {
usedIntOutForBulkTransfer = EP2 ;
endpoint = &iface_desc->endpoint[EP2].desc;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Interface altsetting got failed or Moemd is configured to FS.hence will work on default setting 0 \n");
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "Interface altsetting failed or modem is configured to Full Speed, hence will work on default setting 0\n");
/*
- If Modem is high speed device EP2 should be INT OUT End point
- If Mode is FS then EP2 should be bulk end point
- */
- if(((psIntfAdapter->bHighSpeedDevice ==TRUE ) && (bcm_usb_endpoint_is_int_out(endpoint)== FALSE))
- ||((psIntfAdapter->bHighSpeedDevice == FALSE)&& (bcm_usb_endpoint_is_bulk_out(endpoint)== FALSE)))
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,"Configuring the EEPROM ");
- //change the EP2, EP4 to INT OUT end point
+ * If Modem is high speed device EP2 should be INT OUT End point
+ * If Mode is FS then EP2 should be bulk end point
+ */
+ if (((psIntfAdapter->bHighSpeedDevice == TRUE) && (bcm_usb_endpoint_is_int_out(endpoint) == FALSE))
+ || ((psIntfAdapter->bHighSpeedDevice == FALSE) && (bcm_usb_endpoint_is_bulk_out(endpoint) == FALSE))) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "Configuring the EEPROM\n");
+ /* change the EP2, EP4 to INT OUT end point */
ConfigureEndPointTypesThroughEEPROM(psIntfAdapter->psAdapter);
/*
- It resets the device and if any thing gets changed in USB descriptor it will show fail and
- re-enumerate the device
- */
+ * It resets the device and if any thing gets changed
+ * in USB descriptor it will show fail and re-enumerate
+ * the device
+ */
retval = usb_reset_device(psIntfAdapter->udev);
- if(retval)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "reset got failed. hence Re-enumerating the device \n");
+ if (retval) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "reset failed. Re-enumerating the device.\n");
return retval ;
}
}
- if((psIntfAdapter->bHighSpeedDevice == FALSE) && bcm_usb_endpoint_is_bulk_out(endpoint))
- {
- // Once BULK is selected in FS mode. Revert it back to INT. Else USB_IF will fail.
+ if ((psIntfAdapter->bHighSpeedDevice == FALSE) && bcm_usb_endpoint_is_bulk_out(endpoint)) {
+ /* Once BULK is selected in FS mode. Revert it back to INT. Else USB_IF will fail. */
UINT _uiData = ntohl(EP2_CFG_INT);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,"Reverting Bulk to INT as it is FS MODE");
- BeceemEEPROMBulkWrite(psIntfAdapter->psAdapter,(PUCHAR)&_uiData,0x136,4,TRUE);
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "Reverting Bulk to INT as it is in Full Speed mode.\n");
+ BeceemEEPROMBulkWrite(psIntfAdapter->psAdapter, (PUCHAR)&_uiData, 0x136, 4, TRUE);
}
- }
- else
- {
+ } else {
usedIntOutForBulkTransfer = EP4 ;
endpoint = &iface_desc->endpoint[EP4].desc;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Choosing AltSetting as a default setting");
- if( bcm_usb_endpoint_is_int_out(endpoint) == FALSE)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, " Dongle does not have BCM16 Fix");
- //change the EP2, EP4 to INT OUT end point and use EP4 in altsetting
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "Choosing AltSetting as a default setting.\n");
+ if (bcm_usb_endpoint_is_int_out(endpoint) == FALSE) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "Dongle does not have BCM16 Fix.\n");
+ /* change the EP2, EP4 to INT OUT end point and use EP4 in altsetting */
ConfigureEndPointTypesThroughEEPROM(psIntfAdapter->psAdapter);
/*
- It resets the device and if any thing gets changed in USB descriptor it will show fail and
- re-enumerate the device
- */
+ * It resets the device and if any thing gets changed in
+ * USB descriptor it will show fail and re-enumerate the
+ * device
+ */
retval = usb_reset_device(psIntfAdapter->udev);
- if(retval)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "reset got failed. hence Re-enumerating the device \n");
- return retval ;
+ if (retval) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "reset failed. Re-enumerating the device.\n");
+ return retval;
}
}
@@ -644,99 +530,67 @@ INT InterfaceAdapterInit(PS_INTERFACE_ADAPTER psIntfAdapter)
}
iface_desc = psIntfAdapter->interface->cur_altsetting;
- //print_usb_interface_desc(&(iface_desc->desc));
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Current number of endpoints :%x \n", iface_desc->desc.bNumEndpoints);
- for (value = 0; value < iface_desc->desc.bNumEndpoints; ++value)
- {
- endpoint = &iface_desc->endpoint[value].desc;
- //print_usb_endpoint_descriptor(endpoint);
-
- if (!psIntfAdapter->sBulkIn.bulk_in_endpointAddr && bcm_usb_endpoint_is_bulk_in(endpoint))
- {
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
- psIntfAdapter->sBulkIn.bulk_in_size = buffer_size;
- psIntfAdapter->sBulkIn.bulk_in_endpointAddr =
- endpoint->bEndpointAddress;
- psIntfAdapter->sBulkIn.bulk_in_pipe =
+
+ for (value = 0; value < iface_desc->desc.bNumEndpoints; ++value) {
+ endpoint = &iface_desc->endpoint[value].desc;
+
+ if (!psIntfAdapter->sBulkIn.bulk_in_endpointAddr && bcm_usb_endpoint_is_bulk_in(endpoint)) {
+ buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ psIntfAdapter->sBulkIn.bulk_in_size = buffer_size;
+ psIntfAdapter->sBulkIn.bulk_in_endpointAddr = endpoint->bEndpointAddress;
+ psIntfAdapter->sBulkIn.bulk_in_pipe =
usb_rcvbulkpipe(psIntfAdapter->udev,
psIntfAdapter->sBulkIn.bulk_in_endpointAddr);
- }
-
- if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr && bcm_usb_endpoint_is_bulk_out(endpoint))
- {
+ }
- psIntfAdapter->sBulkOut.bulk_out_endpointAddr =
- endpoint->bEndpointAddress;
- psIntfAdapter->sBulkOut.bulk_out_pipe =
- usb_sndbulkpipe(psIntfAdapter->udev,
+ if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr && bcm_usb_endpoint_is_bulk_out(endpoint)) {
+ psIntfAdapter->sBulkOut.bulk_out_endpointAddr = endpoint->bEndpointAddress;
+ psIntfAdapter->sBulkOut.bulk_out_pipe =
+ usb_sndbulkpipe(psIntfAdapter->udev,
psIntfAdapter->sBulkOut.bulk_out_endpointAddr);
- }
-
- if (!psIntfAdapter->sIntrIn.int_in_endpointAddr && bcm_usb_endpoint_is_int_in(endpoint))
- {
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
- psIntfAdapter->sIntrIn.int_in_size = buffer_size;
- psIntfAdapter->sIntrIn.int_in_endpointAddr =
- endpoint->bEndpointAddress;
- psIntfAdapter->sIntrIn.int_in_interval = endpoint->bInterval;
- psIntfAdapter->sIntrIn.int_in_buffer =
+ }
+
+ if (!psIntfAdapter->sIntrIn.int_in_endpointAddr && bcm_usb_endpoint_is_int_in(endpoint)) {
+ buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ psIntfAdapter->sIntrIn.int_in_size = buffer_size;
+ psIntfAdapter->sIntrIn.int_in_endpointAddr = endpoint->bEndpointAddress;
+ psIntfAdapter->sIntrIn.int_in_interval = endpoint->bInterval;
+ psIntfAdapter->sIntrIn.int_in_buffer =
kmalloc(buffer_size, GFP_KERNEL);
- if (!psIntfAdapter->sIntrIn.int_in_buffer) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Could not allocate interrupt_in_buffer");
- return -EINVAL;
- }
- //psIntfAdapter->sIntrIn.int_in_pipe =
- }
-
- if (!psIntfAdapter->sIntrOut.int_out_endpointAddr && bcm_usb_endpoint_is_int_out(endpoint))
- {
-
- if( !psIntfAdapter->sBulkOut.bulk_out_endpointAddr &&
- (psIntfAdapter->psAdapter->chip_id == T3B) && (value == usedIntOutForBulkTransfer))
- {
- //use first intout end point as a bulk out end point
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
- psIntfAdapter->sBulkOut.bulk_out_size = buffer_size;
- //printk("\nINT OUT Endpoing buffer size :%x endpoint :%x\n", buffer_size, value +1);
- psIntfAdapter->sBulkOut.bulk_out_endpointAddr =
- endpoint->bEndpointAddress;
- psIntfAdapter->sBulkOut.bulk_out_pipe =
- usb_sndintpipe(psIntfAdapter->udev,
- psIntfAdapter->sBulkOut.bulk_out_endpointAddr);
- psIntfAdapter->sBulkOut.int_out_interval = endpoint->bInterval;
+ if (!psIntfAdapter->sIntrIn.int_in_buffer) {
+ dev_err(&psIntfAdapter->udev->dev,
+ "could not allocate interrupt_in_buffer\n");
+ return -EINVAL;
+ }
+ }
+ if (!psIntfAdapter->sIntrOut.int_out_endpointAddr && bcm_usb_endpoint_is_int_out(endpoint)) {
+ if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr &&
+ (psIntfAdapter->psAdapter->chip_id == T3B) && (value == usedIntOutForBulkTransfer)) {
+ /* use first intout end point as a bulk out end point */
+ buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ psIntfAdapter->sBulkOut.bulk_out_size = buffer_size;
+ psIntfAdapter->sBulkOut.bulk_out_endpointAddr = endpoint->bEndpointAddress;
+ psIntfAdapter->sBulkOut.bulk_out_pipe = usb_sndintpipe(psIntfAdapter->udev,
+ psIntfAdapter->sBulkOut.bulk_out_endpointAddr);
+ psIntfAdapter->sBulkOut.int_out_interval = endpoint->bInterval;
+ } else if (value == EP6) {
+ buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ psIntfAdapter->sIntrOut.int_out_size = buffer_size;
+ psIntfAdapter->sIntrOut.int_out_endpointAddr = endpoint->bEndpointAddress;
+ psIntfAdapter->sIntrOut.int_out_interval = endpoint->bInterval;
+ psIntfAdapter->sIntrOut.int_out_buffer= kmalloc(buffer_size, GFP_KERNEL);
+ if (!psIntfAdapter->sIntrOut.int_out_buffer) {
+ dev_err(&psIntfAdapter->udev->dev,
+ "could not allocate interrupt_out_buffer\n");
+ return -EINVAL;
+ }
}
- else if(value == EP6)
- {
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
- psIntfAdapter->sIntrOut.int_out_size = buffer_size;
- psIntfAdapter->sIntrOut.int_out_endpointAddr =
- endpoint->bEndpointAddress;
- psIntfAdapter->sIntrOut.int_out_interval = endpoint->bInterval;
- psIntfAdapter->sIntrOut.int_out_buffer= kmalloc(buffer_size,
- GFP_KERNEL);
- if (!psIntfAdapter->sIntrOut.int_out_buffer)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Could not allocate interrupt_out_buffer");
- return -EINVAL;
- }
- }
- }
- }
- usb_set_intfdata(psIntfAdapter->interface, psIntfAdapter);
- retval = usb_register_dev(psIntfAdapter->interface, &usbbcm_class);
- if(retval)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "usb register dev failed = %d", retval);
- psIntfAdapter->psAdapter->bUsbClassDriverRegistered = FALSE;
- return retval;
- }
- else
- {
- psIntfAdapter->psAdapter->bUsbClassDriverRegistered = TRUE;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "usb dev registered");
+ }
}
+ usb_set_intfdata(psIntfAdapter->interface, psIntfAdapter);
+
psIntfAdapter->psAdapter->bcm_file_download = InterfaceFileDownload;
psIntfAdapter->psAdapter->bcm_file_readback_from_chip =
InterfaceFileReadbackFromChip;
@@ -744,67 +598,51 @@ INT InterfaceAdapterInit(PS_INTERFACE_ADAPTER psIntfAdapter)
retval = CreateInterruptUrb(psIntfAdapter);
- if(retval)
- {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Cannot create interrupt urb");
+ if (retval) {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
+ "Cannot create interrupt urb\n");
return retval;
}
retval = AllocUsbCb(psIntfAdapter);
- if(retval)
- {
+ if (retval)
return retval;
- }
-
- retval = device_run(psIntfAdapter);
- if(retval)
- {
- return retval;
- }
-
-
- return 0;
+ return device_run(psIntfAdapter);
}
-static int InterfaceSuspend (struct usb_interface *intf, pm_message_t message)
+static int InterfaceSuspend(struct usb_interface *intf, pm_message_t message)
{
PS_INTERFACE_ADAPTER psIntfAdapter = usb_get_intfdata(intf);
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "=================================\n");
- //Bcm_kill_all_URBs(psIntfAdapter);
+
psIntfAdapter->bSuspended = TRUE;
- if(TRUE == psIntfAdapter->bPreparingForBusSuspend)
- {
+ if (TRUE == psIntfAdapter->bPreparingForBusSuspend) {
psIntfAdapter->bPreparingForBusSuspend = FALSE;
- if(psIntfAdapter->psAdapter->LinkStatus == LINKUP_DONE)
- {
+ if (psIntfAdapter->psAdapter->LinkStatus == LINKUP_DONE) {
psIntfAdapter->psAdapter->IdleMode = TRUE ;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Host Entered in PMU Idle Mode..");
- }
- else
- {
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "Host Entered in PMU Idle Mode.\n");
+ } else {
psIntfAdapter->psAdapter->bShutStatus = TRUE;
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Host Entered in PMU Shutdown Mode..");
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+ "Host Entered in PMU Shutdown Mode.\n");
}
}
psIntfAdapter->psAdapter->bPreparingForLowPowerMode = FALSE;
- //Signaling the control pkt path
+ /* Signaling the control pkt path */
wake_up(&psIntfAdapter->psAdapter->lowpower_mode_wait_queue);
return 0;
}
-static int InterfaceResume (struct usb_interface *intf)
+static int InterfaceResume(struct usb_interface *intf)
{
- PS_INTERFACE_ADAPTER psIntfAdapter = usb_get_intfdata(intf);
- printk("=================================\n");
+ PS_INTERFACE_ADAPTER psIntfAdapter = usb_get_intfdata(intf);
mdelay(100);
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
- intf->pm_usage_cnt =1 ;
-#endif
+
psIntfAdapter->bSuspended = FALSE;
StartInterruptUrb(psIntfAdapter);
@@ -812,57 +650,41 @@ static int InterfaceResume (struct usb_interface *intf)
return 0;
}
-static int InterfacePreReset(struct usb_interface *intf)
-{
- printk("====================>");
- return STATUS_SUCCESS;
-}
-
-static int InterfacePostReset(struct usb_interface *intf)
-{
- printk("Do Post chip reset setting here if it is required");
- return STATUS_SUCCESS;
-}
static struct usb_driver usbbcm_driver = {
- .name = "usbbcm",
- .probe = usbbcm_device_probe,
- .disconnect = usbbcm_disconnect,
- .suspend = InterfaceSuspend,
- .resume = InterfaceResume,
- .pre_reset=InterfacePreReset,
- .post_reset=InterfacePostReset,
- .id_table = InterfaceUsbtable,
- .supports_autosuspend = 1,
+ .name = "usbbcm",
+ .probe = usbbcm_device_probe,
+ .disconnect = usbbcm_disconnect,
+ .suspend = InterfaceSuspend,
+ .resume = InterfaceResume,
+ .id_table = InterfaceUsbtable,
+ .supports_autosuspend = 1,
};
+struct class *bcm_class;
-/*
-Function: InterfaceInitialize
-
-Description: This is the hardware specific initialization Function.
- Registering the driver with NDIS , other device specific NDIS
- and hardware initializations are done here.
-
-Input parameters: IN PMINI_ADAPTER Adapter - Miniport Adapter Context
+static __init int bcm_init(void)
+{
+ printk(KERN_INFO "%s: %s, %s\n", DRV_NAME, DRV_DESCRIPTION, DRV_VERSION);
+ printk(KERN_INFO "%s\n", DRV_COPYRIGHT);
+ bcm_class = class_create(THIS_MODULE, DRV_NAME);
+ if (IS_ERR(bcm_class)) {
+ printk(KERN_ERR DRV_NAME ": could not create class\n");
+ return PTR_ERR(bcm_class);
+ }
-Return: BCM_STATUS_SUCCESS - If Initialization of the
- HW Interface was successful.
- Other - If an error occured.
-*/
-INT InterfaceInitialize(void)
-{
-// BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Registering Usb driver!!");
return usb_register(&usbbcm_driver);
}
-INT InterfaceExit(void)
+static __exit void bcm_exit(void)
{
- //PMINI_ADAPTER psAdapter = NULL;
- int status = 0;
-
- //BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Deregistering Usb driver!!");
usb_deregister(&usbbcm_driver);
- return status;
+ class_destroy(bcm_class);
}
-MODULE_LICENSE ("GPL");
+
+module_init(bcm_init);
+module_exit(bcm_exit);
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/bcm/InterfaceInit.h b/drivers/staging/bcm/InterfaceInit.h
index e7a96e5c5c50..058315a64c05 100644
--- a/drivers/staging/bcm/InterfaceInit.h
+++ b/drivers/staging/bcm/InterfaceInit.h
@@ -8,9 +8,11 @@
#define BCM_USB_PRODUCT_ID_T3 0x0300
#define BCM_USB_PRODUCT_ID_T3B 0x0210
#define BCM_USB_PRODUCT_ID_T3L 0x0220
+#define BCM_USB_PRODUCT_ID_SM250 0xbccd
#define BCM_USB_PRODUCT_ID_SYM 0x15E
#define BCM_USB_PRODUCT_ID_1901 0xe017
#define BCM_USB_PRODUCT_ID_226 0x0132
+#define BCM_USB_PRODUCT_ID_ZTE_TU25 0x0007
#define BCM_USB_MINOR_BASE 192
@@ -19,33 +21,7 @@ INT InterfaceInitialize(void);
INT InterfaceExit(void);
-#ifndef BCM_SHM_INTERFACE
-INT InterfaceAdapterInit(PS_INTERFACE_ADAPTER Adapter);
-
INT usbbcm_worker_thread(PS_INTERFACE_ADAPTER psIntfAdapter);
-VOID InterfaceAdapterFree(PS_INTERFACE_ADAPTER psIntfAdapter);
-
-#else
-INT InterfaceAdapterInit(PMINI_ADAPTER Adapter);
-#endif
-
-
-#if 0
-
-ULONG InterfaceClaimAdapter(PMINI_ADAPTER Adapter);
-
-VOID InterfaceDDRControllerInit(PMINI_ADAPTER Adapter);
-
-ULONG InterfaceReset(PMINI_ADAPTER Adapter);
-
-ULONG InterfaceRegisterResources(PMINI_ADAPTER Adapter);
-
-VOID InterfaceUnRegisterResources(PMINI_ADAPTER Adapter);
-
-ULONG InterfaceFirmwareDownload(PMINI_ADAPTER Adapter);
-
-#endif
-
#endif
diff --git a/drivers/staging/bcm/InterfaceIsr.c b/drivers/staging/bcm/InterfaceIsr.c
index f928fe4d564d..220ff922bdcf 100644
--- a/drivers/staging/bcm/InterfaceIsr.c
+++ b/drivers/staging/bcm/InterfaceIsr.c
@@ -1,6 +1,5 @@
#include "headers.h"
-#ifndef BCM_SHM_INTERFACE
static void read_int_callback(struct urb *urb/*, struct pt_regs *regs*/)
{
@@ -8,6 +7,10 @@ static void read_int_callback(struct urb *urb/*, struct pt_regs *regs*/)
PS_INTERFACE_ADAPTER psIntfAdapter = (PS_INTERFACE_ADAPTER)urb->context;
PMINI_ADAPTER Adapter = psIntfAdapter->psAdapter ;
+ if (netif_msg_intr(Adapter))
+ pr_info(PFX "%s: interrupt status %d\n",
+ Adapter->dev->name, status);
+
if(Adapter->device_removed == TRUE)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Device has Got Removed.");
@@ -87,7 +90,7 @@ static void read_int_callback(struct urb *urb/*, struct pt_regs *regs*/)
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Interrupt IN endPoint has got halted/stalled...need to clear this");
Adapter->bEndPointHalted = TRUE ;
wake_up(&Adapter->tx_packet_wait_queue);
- urb->status = STATUS_SUCCESS ;;
+ urb->status = STATUS_SUCCESS ;
return;
}
/* software-driven interface shutdown */
@@ -164,40 +167,3 @@ INT StartInterruptUrb(PS_INTERFACE_ADAPTER psIntfAdapter)
return status;
}
-/*
-Function: InterfaceEnableInterrupt
-
-Description: This is the hardware specific Function for configuring
- and enabling the interrupts on the device.
-
-Input parameters: IN PMINI_ADAPTER Adapter - Miniport Adapter Context
-
-
-Return: BCM_STATUS_SUCCESS - If configuring the interrupts was successful.
- Other - If an error occured.
-*/
-
-void InterfaceEnableInterrupt(PMINI_ADAPTER Adapter)
-{
-
-}
-
-/*
-Function: InterfaceDisableInterrupt
-
-Description: This is the hardware specific Function for disabling the interrupts on the device.
-
-Input parameters: IN PMINI_ADAPTER Adapter - Miniport Adapter Context
-
-
-Return: BCM_STATUS_SUCCESS - If disabling the interrupts was successful.
- Other - If an error occured.
-*/
-
-void InterfaceDisableInterrupt(PMINI_ADAPTER Adapter)
-{
-
-}
-
-#endif
-
diff --git a/drivers/staging/bcm/InterfaceMisc.c b/drivers/staging/bcm/InterfaceMisc.c
index 8fc893b37fe4..a51185b522cf 100644
--- a/drivers/staging/bcm/InterfaceMisc.c
+++ b/drivers/staging/bcm/InterfaceMisc.c
@@ -1,17 +1,5 @@
#include "headers.h"
-#ifndef BCM_SHM_INTERFACE
-
-PS_INTERFACE_ADAPTER
-InterfaceAdapterGet(PMINI_ADAPTER psAdapter)
-{
- if(psAdapter == NULL)
- {
- return NULL;
- }
- return (PS_INTERFACE_ADAPTER)(psAdapter->pvInterfaceAdapter);
-}
-
INT
InterfaceRDM(PS_INTERFACE_ADAPTER psIntfAdapter,
UINT addr,
@@ -102,7 +90,7 @@ InterfaceWRM(PS_INTERFACE_ADAPTER psIntfAdapter,
if((psIntfAdapter->psAdapter->StopAllXaction == TRUE) && (psIntfAdapter->psAdapter->chip_id >= T3LPB))
{
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_OTHERS, WRM, DBG_LVL_ALL,"Currently Xaction is not allowed on the bus...");
- return EACCES;
+ return -EACCES;
}
if(psIntfAdapter->bSuspended ==TRUE || psIntfAdapter->bPreparingForBusSuspend == TRUE)
@@ -236,9 +224,7 @@ VOID Bcm_kill_all_URBs(PS_INTERFACE_ADAPTER psIntfAdapter)
}
/* Cancel All submitted TX URB's */
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Cancelling All Submitted TX Urbs \n");
-
- for(i = 0; i < MAXIMUM_USB_TCB; i++)
+ for(i = 0; i < MAXIMUM_USB_TCB; i++)
{
tempUrb = psIntfAdapter->asUsbTcb[i].urb;
if(tempUrb)
@@ -248,9 +234,6 @@ VOID Bcm_kill_all_URBs(PS_INTERFACE_ADAPTER psIntfAdapter)
}
}
-
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Cancelling All submitted Rx Urbs \n");
-
for(i = 0; i < MAXIMUM_USB_RCB; i++)
{
tempUrb = psIntfAdapter->asUsbRcb[i].urb;
@@ -261,16 +244,11 @@ VOID Bcm_kill_all_URBs(PS_INTERFACE_ADAPTER psIntfAdapter)
}
}
-
atomic_set(&psIntfAdapter->uNumTcbUsed, 0);
atomic_set(&psIntfAdapter->uCurrTcb, 0);
atomic_set(&psIntfAdapter->uNumRcbUsed, 0);
atomic_set(&psIntfAdapter->uCurrRcb, 0);
-
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "TCB: used- %d cur-%d\n", atomic_read(&psIntfAdapter->uNumTcbUsed), atomic_read(&psIntfAdapter->uCurrTcb));
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "RCB: used- %d cur-%d\n", atomic_read(&psIntfAdapter->uNumRcbUsed), atomic_read(&psIntfAdapter->uCurrRcb));
-
}
VOID putUsbSuspend(struct work_struct *work)
@@ -282,9 +260,6 @@ VOID putUsbSuspend(struct work_struct *work)
if(psIntfAdapter->bSuspended == FALSE)
usb_autopm_put_interface(intf);
- else
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Interface Resumed Completely\n");
}
-#endif
diff --git a/drivers/staging/bcm/InterfaceMisc.h b/drivers/staging/bcm/InterfaceMisc.h
index 74c81d45cff4..6c9e39bf9889 100644
--- a/drivers/staging/bcm/InterfaceMisc.h
+++ b/drivers/staging/bcm/InterfaceMisc.h
@@ -1,9 +1,6 @@
#ifndef __INTERFACE_MISC_H
#define __INTERFACE_MISC_H
-PS_INTERFACE_ADAPTER
-InterfaceAdapterGet(PMINI_ADAPTER psAdapter);
-
INT
InterfaceRDM(PS_INTERFACE_ADAPTER psIntfAdapter,
UINT addr,
diff --git a/drivers/staging/bcm/InterfaceRx.c b/drivers/staging/bcm/InterfaceRx.c
index 6fee9684f2ef..533f8ebe0f84 100644
--- a/drivers/staging/bcm/InterfaceRx.c
+++ b/drivers/staging/bcm/InterfaceRx.c
@@ -1,5 +1,15 @@
#include "headers.h"
-extern int SearchVcid(PMINI_ADAPTER , unsigned short);
+
+static int SearchVcid(PMINI_ADAPTER Adapter,unsigned short usVcid)
+{
+ int iIndex=0;
+
+ for(iIndex=(NO_OF_QUEUES-1);iIndex>=0;iIndex--)
+ if(Adapter->PackInfo[iIndex].usVCID_Value == usVcid)
+ return iIndex;
+ return NO_OF_QUEUES+1;
+
+}
static PUSB_RCB
@@ -38,13 +48,9 @@ static void read_bulk_callback(struct urb *urb)
PMINI_ADAPTER Adapter = psIntfAdapter->psAdapter;
PLEADER pLeader = urb->transfer_buffer;
-
- #if 0
- int *puiBuffer = NULL;
- struct timeval tv;
- memset(&tv, 0, sizeof(tv));
- do_gettimeofday(&tv);
- #endif
+ if (unlikely(netif_msg_rx_status(Adapter)))
+ pr_info(PFX "%s: rx urb status %d length %d\n",
+ Adapter->dev->name, urb->status, urb->actual_length);
if((Adapter->device_removed == TRUE) ||
(TRUE == Adapter->bEndPointHalted) ||
@@ -89,10 +95,10 @@ static void read_bulk_callback(struct urb *urb)
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Status:0x%hX, Length:0x%hX, VCID:0x%hX", pLeader->Status,pLeader->PLength,pLeader->Vcid);
if(MAX_CNTL_PKT_SIZE < pLeader->PLength)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Corrupted leader length...%d\n",
- pLeader->PLength);
- atomic_inc(&Adapter->RxPacketDroppedCount);
- atomic_add(pLeader->PLength, &Adapter->BadRxByteCount);
+ if (netif_msg_rx_err(Adapter))
+ pr_info(PFX "%s: corrupted leader length...%d\n",
+ Adapter->dev->name, pLeader->PLength);
+ ++Adapter->dev->stats.rx_dropped;
atomic_dec(&psIntfAdapter->uNumRcbUsed);
return;
}
@@ -145,10 +151,9 @@ static void read_bulk_callback(struct urb *urb)
skb_put (skb, pLeader->PLength + ETH_HLEN);
Adapter->PackInfo[QueueIndex].uiTotalRxBytes+=pLeader->PLength;
Adapter->PackInfo[QueueIndex].uiThisPeriodRxBytes+= pLeader->PLength;
- atomic_add(pLeader->PLength, &Adapter->GoodRxByteCount);
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Recived Data pkt of len :0x%X", pLeader->PLength);
- if(Adapter->if_up)
+ if(netif_running(Adapter->dev))
{
/* Moving ahead by ETH_HLEN to the data ptr as received from FW */
skb_pull(skb, ETH_HLEN);
@@ -173,9 +178,12 @@ static void read_bulk_callback(struct urb *urb)
else
{
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "i/f not up hance freeing SKB...");
- bcm_kfree_skb(skb);
+ dev_kfree_skb(skb);
}
- atomic_inc(&Adapter->GoodRxPktCount);
+
+ ++Adapter->dev->stats.rx_packets;
+ Adapter->dev->stats.rx_bytes += pLeader->PLength;
+
for(uiIndex = 0 ; uiIndex < MIBS_MAX_HIST_ENTRIES ; uiIndex++)
{
if((pLeader->PLength <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1))
diff --git a/drivers/staging/bcm/InterfaceTx.c b/drivers/staging/bcm/InterfaceTx.c
index 771f7b34d2ec..a842de9de6b5 100644
--- a/drivers/staging/bcm/InterfaceTx.c
+++ b/drivers/staging/bcm/InterfaceTx.c
@@ -1,50 +1,5 @@
#include "headers.h"
-#ifndef BCM_SHM_INTERFACE
-
-/*
-Function: InterfaceTxDataPacket
-
-Description: This is the hardware specific Function for Transmitting
- data packet to the device.
-
-Input parameters: IN PMINI_ADAPTER Adapter - Miniport Adapter Context
- PVOID Packet - Packet Containing the data to be transmitted
- USHORT usVcid - VCID on which data packet is to be sent
-
-
-Return: BCM_STATUS_SUCCESS - If Tx was successful.
- Other - If an error occured.
-*/
-
-ULONG InterfaceTxDataPacket(PMINI_ADAPTER Adapter,PVOID Packet,USHORT usVcid)
-{
- ULONG Status = 0;
- return Status;
-}
-
-/*
-Function: InterfaceTxControlPacket
-
-Description: This is the hardware specific Function for Transmitting
- control packet to the device.
-
-Input parameters: IN PMINI_ADAPTER Adapter - Miniport Adapter Context
- PVOID pvBuffer - Buffer containg control packet
- UINT uiBufferLength - Buffer Length
-
-Return: BCM_STATUS_SUCCESS - If control packet transmit was successful.
- Other - If an error occured.
-*/
-
-ULONG InterfaceTxControlPacket(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT uiBufferLength)
-{
- ULONG Status = 0;
-
-
-
- return Status;
-}
/*this is transmit call-back(BULK OUT)*/
static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
{
@@ -54,10 +9,10 @@ static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
PMINI_ADAPTER psAdapter = psIntfAdapter->psAdapter ;
BOOLEAN bpowerDownMsg = FALSE ;
PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
-#if 0
- struct timeval tv;
- UINT time_ms = 0;
-#endif
+
+ if (unlikely(netif_msg_tx_done(Adapter)))
+ pr_info(PFX "%s: transmit status %d\n", Adapter->dev->name, urb->status);
+
if(urb->status != STATUS_SUCCESS)
{
if(urb->status == -EPIPE)
@@ -78,11 +33,6 @@ static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
if(TRUE == psAdapter->bPreparingForLowPowerMode)
{
- #if 0
- do_gettimeofday(&tv);
- time_ms = tv.tv_sec *1000 + tv.tv_usec/1000;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, " %s Idle Mode ACK_Sent got from device at time :0x%x", __FUNCTION__, time_ms);
- #endif
if(((pControlMsg->szData[0] == GO_TO_IDLE_MODE_PAYLOAD) &&
(pControlMsg->szData[1] == TARGET_CAN_GO_TO_IDLE_MODE)))
@@ -152,17 +102,12 @@ static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
}
err_exit :
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
-#else
usb_free_coherent(urb->dev, urb->transfer_buffer_length,
urb->transfer_buffer, urb->transfer_dma);
-#endif
}
-static __inline PUSB_TCB GetBulkOutTcb(PS_INTERFACE_ADAPTER psIntfAdapter)
+static PUSB_TCB GetBulkOutTcb(PS_INTERFACE_ADAPTER psIntfAdapter)
{
PUSB_TCB pTcb = NULL;
UINT index = 0;
@@ -183,20 +128,14 @@ static __inline PUSB_TCB GetBulkOutTcb(PS_INTERFACE_ADAPTER psIntfAdapter)
return pTcb;
}
-static __inline int TransmitTcb(PS_INTERFACE_ADAPTER psIntfAdapter, PUSB_TCB pTcb, PVOID data, int len)
+static int TransmitTcb(PS_INTERFACE_ADAPTER psIntfAdapter, PUSB_TCB pTcb, PVOID data, int len)
{
struct urb *urb = pTcb->urb;
int retval = 0;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
- urb->transfer_buffer = usb_buffer_alloc(psIntfAdapter->udev, len,
- GFP_ATOMIC, &urb->transfer_dma);
-#else
urb->transfer_buffer = usb_alloc_coherent(psIntfAdapter->udev, len,
GFP_ATOMIC, &urb->transfer_dma);
-#endif
-
if (!urb->transfer_buffer)
{
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Error allocating memory\n");
@@ -255,5 +194,4 @@ int InterfaceTransmitPacket(PVOID arg, PVOID data, UINT len)
return TransmitTcb(psIntfAdapter, pTcb, data, len);
}
-#endif
diff --git a/drivers/staging/bcm/InterfaceTx.h b/drivers/staging/bcm/InterfaceTx.h
index 053f631e2042..273147577c17 100644
--- a/drivers/staging/bcm/InterfaceTx.h
+++ b/drivers/staging/bcm/InterfaceTx.h
@@ -3,11 +3,5 @@
INT InterfaceTransmitPacket(PVOID arg, PVOID data, UINT len);
-
-ULONG InterfaceTxDataPacket(PMINI_ADAPTER Adapter,PVOID Packet,USHORT usVcid);
-
-ULONG InterfaceTxControlPacket(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT uiBufferLength);
-
-
#endif
diff --git a/drivers/staging/bcm/Interfacemain.h b/drivers/staging/bcm/Interfacemain.h
deleted file mode 100644
index e0db563c5e0f..000000000000
--- a/drivers/staging/bcm/Interfacemain.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _MAIN_
-#define _MAIN_
-#if 0
-typedef struct _MINI_ADAPTER
-{
- S_INTERFACE_ADAPTER stInterfaceAdapter;
-}MINI_ADAPTER,*PMINI_ADAPTER;
-
-#endif
-#endif
diff --git a/drivers/staging/bcm/LeakyBucket.c b/drivers/staging/bcm/LeakyBucket.c
index cae382313ce9..f4cf41c0e46b 100644
--- a/drivers/staging/bcm/LeakyBucket.c
+++ b/drivers/staging/bcm/LeakyBucket.c
@@ -75,14 +75,14 @@ static VOID UpdateTokenCount(register PMINI_ADAPTER Adapter)
* Returns - The number of bytes allowed for transmission.
*
***********************************************************************/
-static __inline ULONG GetSFTokenCount(PMINI_ADAPTER Adapter, PacketInfo *psSF)
+static ULONG GetSFTokenCount(PMINI_ADAPTER Adapter, PacketInfo *psSF)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IsPacketAllowedForFlow ===>");
/* Validate the parameters */
if(NULL == Adapter || (psSF < Adapter->PackInfo &&
(uintptr_t)psSF > (uintptr_t) &Adapter->PackInfo[HiPriority]))
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IPAFF: Got wrong Parameters:Adapter: %p, QIndex: %ld\n", Adapter, (psSF-Adapter->PackInfo));
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IPAFF: Got wrong Parameters:Adapter: %p, QIndex: %zd\n", Adapter, (psSF-Adapter->PackInfo));
return 0;
}
@@ -94,51 +94,27 @@ static __inline ULONG GetSFTokenCount(PMINI_ADAPTER Adapter, PacketInfo *psSF)
}
else
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "Not enough tokens in queue %ld Available %u\n",
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "Not enough tokens in queue %zd Available %u\n",
psSF-Adapter->PackInfo, psSF->uiCurrentTokenCount);
psSF->uiPendedLast = 1;
}
}
else
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IPAFF: Queue %ld not valid\n", psSF-Adapter->PackInfo);
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IPAFF: Queue %zd not valid\n", psSF-Adapter->PackInfo);
}
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IsPacketAllowedForFlow <===");
return 0;
}
-static __inline void RemovePacketFromQueue(PacketInfo *pPackInfo , struct sk_buff *Packet)
-{
- struct sk_buff *psQueueCurrent=NULL, *psLastQueueNode=NULL;
- psQueueCurrent = pPackInfo->FirstTxQueue;
- while(psQueueCurrent)
- {
- if(Packet == psQueueCurrent)
- {
- if(psQueueCurrent == pPackInfo->FirstTxQueue)
- {
- pPackInfo->FirstTxQueue=psQueueCurrent->next;
- if(psQueueCurrent==pPackInfo->LastTxQueue)
- pPackInfo->LastTxQueue=NULL;
- }
- else
- {
- psLastQueueNode->next=psQueueCurrent->next;
- }
- break;
- }
- psLastQueueNode = psQueueCurrent;
- psQueueCurrent=psQueueCurrent->next;
- }
-}
/**
@ingroup tx_functions
This function despatches packet from the specified queue.
@return Zero(success) or Negative value(failure)
*/
-static __inline INT SendPacketFromQueue(PMINI_ADAPTER Adapter,/**<Logical Adapter*/
- PacketInfo *psSF, /**<Queue identifier*/
- struct sk_buff* Packet) /**<Pointer to the packet to be sent*/
+static INT SendPacketFromQueue(PMINI_ADAPTER Adapter,/**<Logical Adapter*/
+ PacketInfo *psSF, /**<Queue identifier*/
+ struct sk_buff* Packet) /**<Pointer to the packet to be sent*/
{
INT Status=STATUS_FAILURE;
UINT uiIndex =0,PktLen = 0;
@@ -180,8 +156,7 @@ static __inline INT SendPacketFromQueue(PMINI_ADAPTER Adapter,/**<Logical Adapte
* Returns - None.
*
****************************************************************************/
-static __inline VOID CheckAndSendPacketFromIndex
-(PMINI_ADAPTER Adapter, PacketInfo *psSF)
+static VOID CheckAndSendPacketFromIndex(PMINI_ADAPTER Adapter, PacketInfo *psSF)
{
struct sk_buff *QueuePacket=NULL;
char *pControlPacket = NULL;
@@ -189,7 +164,7 @@ static __inline VOID CheckAndSendPacketFromIndex
int iPacketLen=0;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "%ld ====>", (psSF-Adapter->PackInfo));
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "%zd ====>", (psSF-Adapter->PackInfo));
if((psSF != &Adapter->PackInfo[HiPriority]) && Adapter->LinkUpStatus && atomic_read(&psSF->uiPerSFTxResourceCount))//Get data packet
{
if(!psSF->ucDirection )
@@ -197,10 +172,8 @@ static __inline VOID CheckAndSendPacketFromIndex
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "UpdateTokenCount ");
if(Adapter->IdleMode || Adapter->bPreparingForLowPowerMode)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Device is in Idle Mode..Hence blocking Data Packets..\n");
- return;
- }
+ return; /* in idle mode */
+
// Check for Free Descriptors
if(atomic_read(&Adapter->CurrNumFreeTxDesc) <= MINIMUM_PENDING_DESCRIPTORS)
{
@@ -208,9 +181,6 @@ static __inline VOID CheckAndSendPacketFromIndex
return ;
}
-#if 0
- PruneQueue(Adapter,(psSF-Adapter->PackInfo));
-#endif
spin_lock_bh(&psSF->SFQueueLock);
QueuePacket=psSF->FirstTxQueue;
@@ -240,7 +210,7 @@ static __inline VOID CheckAndSendPacketFromIndex
}
else
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "For Queue: %ld\n", psSF-Adapter->PackInfo);
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "For Queue: %zd\n", psSF-Adapter->PackInfo);
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "\nAvailable Tokens = %d required = %d\n",
psSF->uiCurrentTokenCount, iPacketLen);
//this part indicates that becuase of non-availability of the tokens
@@ -290,17 +260,6 @@ static __inline VOID CheckAndSendPacketFromIndex
}
}
}
-
- if(Status != STATUS_SUCCESS) //Tx of data packet to device Failed
- {
- if(Adapter->bcm_jiffies == 0)
- Adapter->bcm_jiffies = jiffies;
- }
- else
- {
- Adapter->bcm_jiffies = 0;
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "<=====");
}
@@ -387,12 +346,7 @@ VOID transmit_packets(PMINI_ADAPTER Adapter)
if(exit_flag == TRUE )
break ;
}/* end of inner while loop */
- if(Adapter->bcm_jiffies == 0 &&
- atomic_read(&Adapter->TotalPacketCount) != 0 &&
- uiPrevTotalCount == atomic_read(&Adapter->TotalPacketCount))
- {
- Adapter->bcm_jiffies = jiffies;
- }
+
update_per_cid_rx (Adapter);
Adapter->txtransmit_running = 0;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "<======");
diff --git a/drivers/staging/bcm/Macros.h b/drivers/staging/bcm/Macros.h
index 0241234605f1..feb351578c8b 100644
--- a/drivers/staging/bcm/Macros.h
+++ b/drivers/staging/bcm/Macros.h
@@ -4,10 +4,6 @@
#ifndef __MACROS_H__
#define __MACROS_H__
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-#define kthread_run(threadfn,data,datafmt)(struct task_struct *)kernel_thread(threadfn,data,0)
-#endif
-
#define TX_TIMER_PERIOD 10 //10 msec
#define MAX_CLASSIFIERS 100
//#define MAX_CLASSIFIERS_PER_SF 20
@@ -17,10 +13,9 @@
#define MAX_DATA_PKTS 200
#define MAX_ETH_SIZE 1536
#define MAX_CNTL_PKT_SIZE 2048
-/* TIMER RELATED */
-#define JIFFIES_2_QUADPART() (ULONG)(jiffies * 10000) // jiffies(1msec) to Quadpart(100nsec)
#define MTU_SIZE 1400
+#define TX_QLEN 5
#define MAC_ADDR_REGISTER 0xbf60d000
@@ -266,7 +261,7 @@ typedef enum _E_PHS_DSC_ACTION
#define FIRMWARE_BEGIN_ADDR 0xBFC00000
-#define INVALID_QUEUE_INDEX (USHORT)-1
+#define INVALID_QUEUE_INDEX NO_OF_QUEUES
#define INVALID_PID (pid_t)-1
#define DDR_80_MHZ 0
@@ -300,12 +295,7 @@ typedef enum _E_PHS_DSC_ACTION
/* Idle Mode Related Registers */
#define DEBUG_INTERRUPT_GENERATOR_REGISTOR 0x0F00007C
-#ifdef BCM_SHM_INTERFACE
-#define SW_ABORT_IDLEMODE_LOC 0xbfc02f9c
-#define CPE_VIRTUAL_MAILBOX_REG 0xBFC02E58
-#else
#define SW_ABORT_IDLEMODE_LOC 0x0FF01FFC
-#endif
#define SW_ABORT_IDLEMODE_PATTERN 0xd0ea1d1e
#define DEVICE_INT_OUT_EP_REG0 0x0F011870
@@ -355,12 +345,7 @@ typedef enum ePMU_MODES
HYBRID_MODE_6 = 2
}PMU_MODE;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
-#define MAX_RDM_WRM_RETIRES 16
-#else
#define MAX_RDM_WRM_RETIRES 1
-#endif
-
enum eAbortPattern {
ABORT_SHUTDOWN_MODE = 1,
@@ -369,27 +354,6 @@ enum eAbortPattern {
ABORT_IDLE_SYNCDOWN = 3
};
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
- #define GET_BCM_ADAPTER(net_dev) ({\
- PMINI_ADAPTER __Adapter = NULL; \
- if (net_dev) { \
- __Adapter = (PMINI_ADAPTER)(net_dev->priv); \
- } \
- else { \
- __Adapter = NULL; \
- }__Adapter;} )
-#else
- #define GET_BCM_ADAPTER(net_dev) ({\
- PMINI_ADAPTER __Adapter = NULL; \
- if (net_dev) { \
- __Adapter = (PMINI_ADAPTER)(*((unsigned long *)netdev_priv(net_dev))); \
- } \
- else { \
- __Adapter = NULL; \
- }__Adapter;})
-
-
-#endif
/* Offsets used by driver in skb cb variable */
#define SKB_CB_CLASSIFICATION_OFFSET 0
diff --git a/drivers/staging/bcm/Makefile b/drivers/staging/bcm/Makefile
index c3ae25af670a..652b7f87737c 100644
--- a/drivers/staging/bcm/Makefile
+++ b/drivers/staging/bcm/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_BCM_WIMAX) += bcm_wimax.o
bcm_wimax-y := InterfaceDld.o InterfaceIdleMode.o InterfaceInit.o InterfaceRx.o \
InterfaceIsr.o InterfaceMisc.o InterfaceTx.o \
- Arp.o CmHost.o Debug.o IPv6Protocol.o Qos.o Transmit.o\
+ CmHost.o IPv6Protocol.o Qos.o Transmit.o\
Bcmnet.o DDRInit.o HandleControlPacket.o\
LeakyBucket.o Misc.o sort.o Bcmchar.o hostmibs.o PHSModule.o\
- Osal_Misc.o led_control.o nvm.o vendorspecificextn.o
+ led_control.o nvm.o vendorspecificextn.o
diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c
index 22550f745917..f585aae9cf8b 100644
--- a/drivers/staging/bcm/Misc.c
+++ b/drivers/staging/bcm/Misc.c
@@ -1,5 +1,12 @@
#include "headers.h"
+static int BcmFileDownload(PMINI_ADAPTER Adapter, const char *path,
+ unsigned int loc);
+static VOID doPowerAutoCorrection(PMINI_ADAPTER psAdapter);
+static void HandleShutDownModeRequest(PMINI_ADAPTER Adapter,PUCHAR pucBuffer);
+static int bcm_parse_target_params(PMINI_ADAPTER Adapter);
+static void beceem_protocol_reset (PMINI_ADAPTER Adapter);
+
static VOID default_wimax_protocol_initialize(PMINI_ADAPTER Adapter)
{
@@ -60,21 +67,11 @@ InitAdapter(PMINI_ADAPTER psAdapter)
//init_waitqueue_head(&psAdapter->device_wake_queue);
psAdapter->fw_download_done=FALSE;
- psAdapter->pvOsDepData = (PLINUX_DEP_DATA) kmalloc(sizeof(LINUX_DEP_DATA),
- GFP_KERNEL);
-
- if(psAdapter->pvOsDepData == NULL)
- {
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Linux Specific Data allocation failed");
- return -ENOMEM;
- }
- memset(psAdapter->pvOsDepData, 0, sizeof(LINUX_DEP_DATA));
default_wimax_protocol_initialize(psAdapter);
for (i=0;i<MAX_CNTRL_PKTS;i++)
{
- psAdapter->txctlpacket[i] = (char *)kmalloc(MAX_CNTL_PKT_SIZE,
- GFP_KERNEL);
+ psAdapter->txctlpacket[i] = kmalloc(MAX_CNTL_PKT_SIZE, GFP_KERNEL);
if(!psAdapter->txctlpacket[i])
{
BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "No More Cntl pkts got, max got is %d", i);
@@ -117,7 +114,7 @@ InitAdapter(PMINI_ADAPTER psAdapter)
VOID AdapterFree(PMINI_ADAPTER Adapter)
{
- INT count = 0;
+ int count;
beceem_protocol_reset(Adapter);
@@ -125,72 +122,66 @@ VOID AdapterFree(PMINI_ADAPTER Adapter)
if(Adapter->control_packet_handler && !IS_ERR(Adapter->control_packet_handler))
kthread_stop (Adapter->control_packet_handler);
+
if(Adapter->transmit_packet_thread && !IS_ERR(Adapter->transmit_packet_thread))
- kthread_stop (Adapter->transmit_packet_thread);
- wake_up(&Adapter->process_read_wait_queue);
+ kthread_stop (Adapter->transmit_packet_thread);
+
+ wake_up(&Adapter->process_read_wait_queue);
+
if(Adapter->LEDInfo.led_thread_running & (BCM_LED_THREAD_RUNNING_ACTIVELY | BCM_LED_THREAD_RUNNING_INACTIVELY))
kthread_stop (Adapter->LEDInfo.led_cntrl_threadid);
- bcm_unregister_networkdev(Adapter);
+
+ unregister_networkdev(Adapter);
+
+ /* FIXME: use proper wait_event and refcounting */
while(atomic_read(&Adapter->ApplicationRunning))
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Waiting for Application to close.. %d\n",atomic_read(&Adapter->ApplicationRunning));
msleep(100);
}
unregister_control_device_interface(Adapter);
- if(Adapter->dev && !IS_ERR(Adapter->dev))
- free_netdev(Adapter->dev);
- if(Adapter->pstargetparams != NULL)
- {
- bcm_kfree(Adapter->pstargetparams);
- }
+
+ kfree(Adapter->pstargetparams);
+
for (count =0;count < MAX_CNTRL_PKTS;count++)
- {
- if(Adapter->txctlpacket[count])
- bcm_kfree(Adapter->txctlpacket[count]);
- }
+ kfree(Adapter->txctlpacket[count]);
+
FreeAdapterDsxBuffer(Adapter);
- if(Adapter->pvOsDepData)
- bcm_kfree (Adapter->pvOsDepData);
- if(Adapter->pvInterfaceAdapter)
- bcm_kfree(Adapter->pvInterfaceAdapter);
+
+ kfree(Adapter->pvInterfaceAdapter);
//Free the PHS Interface
PhsCleanup(&Adapter->stBCMPhsContext);
-#ifndef BCM_SHM_INTERFACE
BcmDeAllocFlashCSStructure(Adapter);
-#endif
- bcm_kfree (Adapter);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "<========\n");
+ free_netdev(Adapter->dev);
}
-
-int create_worker_threads(PMINI_ADAPTER psAdapter)
+static int create_worker_threads(PMINI_ADAPTER psAdapter)
{
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Init Threads...");
// Rx Control Packets Processing
psAdapter->control_packet_handler = kthread_run((int (*)(void *))
- control_packet_handler, psAdapter, "CtrlPktHdlr");
+ control_packet_handler, psAdapter, "%s-rx", DRV_NAME);
if(IS_ERR(psAdapter->control_packet_handler))
{
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "No Kernel Thread, but still returning success\n");
+ pr_notice(DRV_NAME ": could not create control thread\n");
return PTR_ERR(psAdapter->control_packet_handler);
}
+
// Tx Thread
psAdapter->transmit_packet_thread = kthread_run((int (*)(void *))
- tx_pkt_handler, psAdapter, "TxPktThread");
+ tx_pkt_handler, psAdapter, "%s-tx", DRV_NAME);
if(IS_ERR (psAdapter->transmit_packet_thread))
{
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "No Kernel Thread, but still returning success");
+ pr_notice(DRV_NAME ": could not creat transmit thread\n");
kthread_stop(psAdapter->control_packet_handler);
return PTR_ERR(psAdapter->transmit_packet_thread);
}
return 0;
}
-
-static inline struct file *open_firmware_file(PMINI_ADAPTER Adapter, char *path)
+static struct file *open_firmware_file(PMINI_ADAPTER Adapter, const char *path)
{
struct file *flp=NULL;
mm_segment_t oldfs;
@@ -200,26 +191,20 @@ static inline struct file *open_firmware_file(PMINI_ADAPTER Adapter, char *path)
set_fs(oldfs);
if(IS_ERR(flp))
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Unable To Open File %s, err %lx",
- path, PTR_ERR(flp));
- flp = NULL;
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Got file descriptor pointer of %s!",
- path);
+ pr_err(DRV_NAME "Unable To Open File %s, err %ld",
+ path, PTR_ERR(flp));
+ flp = NULL;
}
- if(Adapter->device_removed)
- {
- flp = NULL;
- }
+
+ if(Adapter->device_removed)
+ flp = NULL;
return flp;
}
-int BcmFileDownload(PMINI_ADAPTER Adapter,/**< Logical Adapter */
- char *path, /**< path to image file */
+static int BcmFileDownload(PMINI_ADAPTER Adapter,/**< Logical Adapter */
+ const char *path, /**< path to image file */
unsigned int loc /**< Download Address on the chip*/
)
{
@@ -248,9 +233,7 @@ int BcmFileDownload(PMINI_ADAPTER Adapter,/**< Logical Adapter */
goto exit_download;
}
oldfs=get_fs();set_fs(get_ds());
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
vfs_llseek(flp, 0, 0);
-#endif
set_fs(oldfs);
if(Adapter->bcm_file_readback_from_chip(Adapter->pvInterfaceAdapter,
flp, loc))
@@ -265,29 +248,8 @@ exit_download:
if(flp && !(IS_ERR(flp)))
filp_close(flp, current->files);
set_fs(oldfs);
- do_gettimeofday(&tv);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "file download done at %lx", ((tv.tv_sec * 1000) +
- (tv.tv_usec/1000)));
- return errorno;
-}
-
-void bcm_kfree_skb(struct sk_buff *skb)
-{
- if(skb)
- {
- kfree_skb(skb);
- }
- skb = NULL ;
-}
-
-VOID bcm_kfree(VOID *ptr)
-{
- if(ptr)
- {
- kfree(ptr);
- }
- ptr = NULL ;
+ return errorno;
}
/**
@@ -395,13 +357,6 @@ INT CopyBufferToControlPacket(PMINI_ADAPTER Adapter,/**<Logical Adapter*/
/*Setting bIdleMode_tx_from_host to TRUE to indicate LED control thread to represent
the wake up from idlemode is from host*/
//Adapter->LEDInfo.bIdleMode_tx_from_host = TRUE;
-#if 0
- if(STATUS_SUCCESS != InterfaceIdleModeWakeup(Adapter))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Idle Mode Wake up Failed\n");
- return STATUS_FAILURE;
- }
-#endif
Adapter->bWakeUpDevice = TRUE;
wake_up(&Adapter->process_rx_cntrlpkt);
@@ -489,9 +444,6 @@ INT CopyBufferToControlPacket(PMINI_ADAPTER Adapter,/**<Logical Adapter*/
atomic_inc(&Adapter->index_wr_txcntrlpkt);
BCM_DEBUG_PRINT( Adapter,DBG_TYPE_TX, TX_CONTROL,DBG_LVL_ALL, "Calling transmit_packets");
atomic_set(&Adapter->TxPktAvail, 1);
-#ifdef BCM_SHM_INTERFACE
- virtual_mail_box_interrupt();
-#endif
wake_up(&Adapter->tx_packet_wait_queue);
}
else
@@ -530,18 +482,6 @@ static VOID SendStatisticsPointerRequest(PMINI_ADAPTER Adapter,
#endif
-void SendLinkDown(PMINI_ADAPTER Adapter)
-{
- LINK_REQUEST stLinkDownRequest;
- memset(&stLinkDownRequest, 0, sizeof(LINK_REQUEST));
- stLinkDownRequest.Leader.Status=LINK_UP_CONTROL_REQ;
- stLinkDownRequest.Leader.PLength=sizeof(ULONG);//minimum 4 bytes
- stLinkDownRequest.szData[0]=LINK_DOWN_REQ_PAYLOAD;
- Adapter->bLinkDownRequested = TRUE;
-
- CopyBufferToControlPacket(Adapter,&stLinkDownRequest);
-}
-
/******************************************************************
* Function - LinkMessage()
*
@@ -552,7 +492,7 @@ void SendLinkDown(PMINI_ADAPTER Adapter)
*
* Returns - None.
*******************************************************************/
-__inline VOID LinkMessage(PMINI_ADAPTER Adapter)
+VOID LinkMessage(PMINI_ADAPTER Adapter)
{
PLINK_REQUEST pstLinkRequest=NULL;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "=====>");
@@ -594,7 +534,7 @@ __inline VOID LinkMessage(PMINI_ADAPTER Adapter)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Calling CopyBufferToControlPacket");
CopyBufferToControlPacket(Adapter, pstLinkRequest);
- bcm_kfree(pstLinkRequest);
+ kfree(pstLinkRequest);
}
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "LinkMessage <=====");
return;
@@ -614,8 +554,8 @@ __inline VOID LinkMessage(PMINI_ADAPTER Adapter)
VOID StatisticsResponse(PMINI_ADAPTER Adapter,PVOID pvBuffer)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "%s====>",__FUNCTION__);
- Adapter->StatisticsPointer = ntohl(*(PULONG)pvBuffer);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Stats at %lx", Adapter->StatisticsPointer);
+ Adapter->StatisticsPointer = ntohl(*(__be32 *)pvBuffer);
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Stats at %x", (UINT)Adapter->StatisticsPointer);
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "%s <====",__FUNCTION__);
return;
}
@@ -764,7 +704,7 @@ void SendIdleModeResponse(PMINI_ADAPTER Adapter)
/* Wake the LED Thread with IDLEMODE_ENTER State */
Adapter->DriverState = LOWPOWER_MODE_ENTER;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"LED Thread is Running..Hence Setting LED Event as IDLEMODE_ENTER jiffies:%ld",jiffies);;
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"LED Thread is Running..Hence Setting LED Event as IDLEMODE_ENTER jiffies:%ld",jiffies);
wake_up(&Adapter->LEDInfo.notify_led_event);
/* Wait for 1 SEC for LED to OFF */
@@ -787,12 +727,10 @@ void SendIdleModeResponse(PMINI_ADAPTER Adapter)
down(&Adapter->rdmwrmsync);
Adapter->bPreparingForLowPowerMode = TRUE;
up(&Adapter->rdmwrmsync);
-#ifndef BCM_SHM_INTERFACE
//Killing all URBS.
if(Adapter->bDoSuspend == TRUE)
Bcm_kill_all_URBs((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter));
-#endif
}
else
{
@@ -811,9 +749,7 @@ void SendIdleModeResponse(PMINI_ADAPTER Adapter)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"fail to send the Idle mode Request \n");
Adapter->bPreparingForLowPowerMode = FALSE;
-#ifndef BCM_SHM_INTERFACE
StartInterruptUrb((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter));
-#endif
}
do_gettimeofday(&tv);
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "IdleMode Msg submitter to Q :%ld ms", tv.tv_sec *1000 + tv.tv_usec /1000);
@@ -980,12 +916,10 @@ VOID DumpPackInfo(PMINI_ADAPTER Adapter)
}
-
-__inline int reset_card_proc(PMINI_ADAPTER ps_adapter)
+int reset_card_proc(PMINI_ADAPTER ps_adapter)
{
int retval = STATUS_SUCCESS;
-#ifndef BCM_SHM_INTERFACE
PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
PS_INTERFACE_ADAPTER psIntfAdapter = NULL;
unsigned int value = 0, uiResetValue = 0;
@@ -1006,11 +940,9 @@ __inline int reset_card_proc(PMINI_ADAPTER ps_adapter)
wrmalt(ps_adapter, SYS_CFG, &value, sizeof(value));
}
-#ifndef BCM_SHM_INTERFACE
//killing all submitted URBs.
psIntfAdapter->psAdapter->StopAllXaction = TRUE ;
Bcm_kill_all_URBs(psIntfAdapter);
-#endif
/* Reset the UMA-B Device */
if(ps_adapter->chip_id >= T3LPB)
{
@@ -1111,11 +1043,10 @@ __inline int reset_card_proc(PMINI_ADAPTER ps_adapter)
err_exit :
psIntfAdapter->psAdapter->StopAllXaction = FALSE ;
-#endif
return retval;
}
-__inline int run_card_proc(PMINI_ADAPTER ps_adapter )
+int run_card_proc(PMINI_ADAPTER ps_adapter )
{
unsigned int value=0;
{
@@ -1146,21 +1077,17 @@ __inline int run_card_proc(PMINI_ADAPTER ps_adapter )
int InitCardAndDownloadFirmware(PMINI_ADAPTER ps_adapter)
{
- UINT status = STATUS_SUCCESS;
+ int status;
UINT value = 0;
-#ifdef BCM_SHM_INTERFACE
- unsigned char *pConfigFileAddr = (unsigned char *)CPE_MACXVI_CFG_ADDR;
-#endif
/*
* Create the threads first and then download the
* Firm/DDR Settings..
*/
- if((status = create_worker_threads(ps_adapter))<0)
- {
- BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Cannot create thread");
+ status = create_worker_threads(ps_adapter);
+ if (status<0)
return status;
- }
+
/*
* For Downloading the Firm, parse the cfg file first.
*/
@@ -1169,7 +1096,6 @@ int InitCardAndDownloadFirmware(PMINI_ADAPTER ps_adapter)
return status;
}
-#ifndef BCM_SHM_INTERFACE
if(ps_adapter->chip_id >= T3LPB)
{
rdmalt(ps_adapter, SYS_CFG, &value, sizeof (value));
@@ -1187,7 +1113,7 @@ int InitCardAndDownloadFirmware(PMINI_ADAPTER ps_adapter)
status = ddr_init(ps_adapter);
if(status)
{
- BCM_DEBUG_PRINT (ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "ddr_init Failed\n");
+ pr_err(DRV_NAME "ddr_init Failed\n");
return status;
}
@@ -1201,7 +1127,6 @@ int InitCardAndDownloadFirmware(PMINI_ADAPTER ps_adapter)
BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Error downloading CFG file");
goto OUT;
}
- BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "CFG file downloaded");
if(register_networkdev(ps_adapter))
{
@@ -1266,12 +1191,6 @@ int InitCardAndDownloadFirmware(PMINI_ADAPTER ps_adapter)
goto OUT;
}
}
-#if 0
- else if(psAdapter->eNVMType == NVM_EEPROM)
- {
- PropagateCalParamsFromEEPROMToMemory();
- }
-#endif
/* Download Firmare */
if ((status = BcmFileDownload( ps_adapter, BIN_FILE, FIRMWARE_BEGIN_ADDR)))
@@ -1280,7 +1199,6 @@ int InitCardAndDownloadFirmware(PMINI_ADAPTER ps_adapter)
goto OUT;
}
- BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "BIN file downloaded");
status = run_card_proc(ps_adapter);
if(status)
{
@@ -1299,68 +1217,19 @@ OUT:
wake_up(&ps_adapter->LEDInfo.notify_led_event);
}
-#else
-
- ps_adapter->bDDRInitDone = TRUE;
- //Initializing the NVM.
- BcmInitNVM(ps_adapter);
-
- //Propagating the cal param from Flash to DDR
- value = 0;
- wrmalt(ps_adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value));
- wrmalt(ps_adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value));
-
- if(ps_adapter->eNVMType == NVM_FLASH)
- {
- status = PropagateCalParamsFromFlashToMemory(ps_adapter);
- if(status)
- {
- printk("\nPropogation of Cal param from flash to DDR failed ..\n" );
- }
- }
-
- //Copy config file param to DDR.
- memcpy(pConfigFileAddr,ps_adapter->pstargetparams, sizeof(STARGETPARAMS));
-
- if(register_networkdev(ps_adapter))
- {
- BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Register Netdevice failed. Cleanup needs to be performed.");
- return -EIO;
- }
-
-
- status = InitLedSettings (ps_adapter);
- if(status)
- {
- BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_PRINTK, 0, 0,"INIT LED FAILED\n");
- return status;
- }
-
-
- if(register_control_device_interface(ps_adapter) < 0)
- {
- BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Register Control Device failed. Cleanup needs to be performed.");
- return -EIO;
- }
-
- ps_adapter->fw_download_done = TRUE;
-#endif
return status;
}
-int bcm_parse_target_params(PMINI_ADAPTER Adapter)
+static int bcm_parse_target_params(PMINI_ADAPTER Adapter)
{
-#ifdef BCM_SHM_INTERFACE
- extern void read_cfg_file(PMINI_ADAPTER Adapter);
-#endif
struct file *flp=NULL;
mm_segment_t oldfs={0};
- char *buff = NULL;
+ char *buff;
int len = 0;
loff_t pos = 0;
- buff=(PCHAR)kmalloc(BUFFER_1K, GFP_KERNEL);
+ buff=kmalloc(BUFFER_1K, GFP_KERNEL);
if(!buff)
{
return -ENOMEM;
@@ -1368,14 +1237,14 @@ int bcm_parse_target_params(PMINI_ADAPTER Adapter)
if((Adapter->pstargetparams =
kmalloc(sizeof(STARGETPARAMS), GFP_KERNEL)) == NULL)
{
- bcm_kfree(buff);
+ kfree(buff);
return -ENOMEM;
}
flp=open_firmware_file(Adapter, CFG_FILE);
if(!flp) {
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "NOT ABLE TO OPEN THE %s FILE \n", CFG_FILE);
- bcm_kfree(buff);
- bcm_kfree(Adapter->pstargetparams);
+ kfree(buff);
+ kfree(Adapter->pstargetparams);
Adapter->pstargetparams = NULL;
return -ENOENT;
}
@@ -1386,8 +1255,8 @@ int bcm_parse_target_params(PMINI_ADAPTER Adapter)
if(len != sizeof(STARGETPARAMS))
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"Mismatch in Target Param Structure!\n");
- bcm_kfree(buff);
- bcm_kfree(Adapter->pstargetparams);
+ kfree(buff);
+ kfree(Adapter->pstargetparams);
Adapter->pstargetparams = NULL;
filp_close(flp, current->files);
return -ENOENT;
@@ -1399,37 +1268,34 @@ int bcm_parse_target_params(PMINI_ADAPTER Adapter)
* Values in Adapter->pstargetparams are in network byte order
*/
memcpy(Adapter->pstargetparams, buff, sizeof(STARGETPARAMS));
- bcm_kfree (buff);
+ kfree (buff);
beceem_parse_target_struct(Adapter);
-#ifdef BCM_SHM_INTERFACE
- read_cfg_file(Adapter);
-
-#endif
return STATUS_SUCCESS;
}
void beceem_parse_target_struct(PMINI_ADAPTER Adapter)
{
- UINT uiHostDrvrCfg6 =0, uiEEPROMFlag = 0;;
+ UINT uiHostDrvrCfg6 =0, uiEEPROMFlag = 0;
if(ntohl(Adapter->pstargetparams->m_u32PhyParameter2) & AUTO_SYNC_DISABLE)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "AutoSyncup is Disabled\n");
+ pr_info(DRV_NAME ": AutoSyncup is Disabled\n");
Adapter->AutoSyncup = FALSE;
}
else
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "AutoSyncup is Enabled\n");
+ pr_info(DRV_NAME ": AutoSyncup is Enabled\n");
Adapter->AutoSyncup = TRUE;
}
+
if(ntohl(Adapter->pstargetparams->HostDrvrConfig6) & AUTO_LINKUP_ENABLE)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Enabling autolink up");
+ pr_info(DRV_NAME ": Enabling autolink up");
Adapter->AutoLinkUp = TRUE;
}
else
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Disabling autolink up");
+ pr_info(DRV_NAME ": Disabling autolink up");
Adapter->AutoLinkUp = FALSE;
}
// Setting the DDR Setting..
@@ -1438,59 +1304,54 @@ void beceem_parse_target_struct(PMINI_ADAPTER Adapter)
Adapter->ulPowerSaveMode =
(ntohl(Adapter->pstargetparams->HostDrvrConfig6)>>12)&0x0F;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "DDR Setting: %x\n", Adapter->DDRSetting);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT,DBG_LVL_ALL, "Power Save Mode: %lx\n",
- Adapter->ulPowerSaveMode);
+ pr_info(DRV_NAME ": DDR Setting: %x\n", Adapter->DDRSetting);
+ pr_info(DRV_NAME ": Power Save Mode: %lx\n", Adapter->ulPowerSaveMode);
if(ntohl(Adapter->pstargetparams->HostDrvrConfig6) & AUTO_FIRM_DOWNLOAD)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Enabling Auto Firmware Download\n");
+ pr_info(DRV_NAME ": Enabling Auto Firmware Download\n");
Adapter->AutoFirmDld = TRUE;
}
else
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Disabling Auto Firmware Download\n");
+ pr_info(DRV_NAME ": Disabling Auto Firmware Download\n");
Adapter->AutoFirmDld = FALSE;
}
uiHostDrvrCfg6 = ntohl(Adapter->pstargetparams->HostDrvrConfig6);
Adapter->bMipsConfig = (uiHostDrvrCfg6>>20)&0x01;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"MIPSConfig : 0x%X\n",Adapter->bMipsConfig);
+ pr_info(DRV_NAME ": MIPSConfig : 0x%X\n",Adapter->bMipsConfig);
//used for backward compatibility.
Adapter->bDPLLConfig = (uiHostDrvrCfg6>>19)&0x01;
Adapter->PmuMode= (uiHostDrvrCfg6 >> 24 ) & 0x03;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "PMU MODE: %x", Adapter->PmuMode);
+ pr_info(DRV_NAME ": PMU MODE: %x", Adapter->PmuMode);
if((uiHostDrvrCfg6 >> HOST_BUS_SUSPEND_BIT ) & (0x01))
{
Adapter->bDoSuspend = TRUE;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Making DoSuspend TRUE as per configFile");
+ pr_info(DRV_NAME ": Making DoSuspend TRUE as per configFile");
}
uiEEPROMFlag = ntohl(Adapter->pstargetparams->m_u32EEPROMFlag);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "uiEEPROMFlag : 0x%X\n",uiEEPROMFlag);
+ pr_info(DRV_NAME ": uiEEPROMFlag : 0x%X\n",uiEEPROMFlag);
Adapter->eNVMType = (NVM_TYPE)((uiEEPROMFlag>>4)&0x3);
-
Adapter->bStatusWrite = (uiEEPROMFlag>>6)&0x1;
- //printk(("bStatusWrite : 0x%X\n", Adapter->bStatusWrite));
Adapter->uiSectorSizeInCFG = 1024*(0xFFFF & ntohl(Adapter->pstargetparams->HostDrvrConfig4));
- //printk(("uiSectorSize : 0x%X\n", Adapter->uiSectorSizeInCFG));
Adapter->bSectorSizeOverride =(bool) ((ntohl(Adapter->pstargetparams->HostDrvrConfig4))>>16)&0x1;
- //printk(MP_INIT,("bSectorSizeOverride : 0x%X\n",Adapter->bSectorSizeOverride));
if(ntohl(Adapter->pstargetparams->m_u32PowerSavingModeOptions) &0x01)
Adapter->ulPowerSaveMode = DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE;
- //autocorrection part
+
if(Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)
doPowerAutoCorrection(Adapter);
}
-VOID doPowerAutoCorrection(PMINI_ADAPTER psAdapter)
+static VOID doPowerAutoCorrection(PMINI_ADAPTER psAdapter)
{
- UINT reporting_mode = 0;
+ UINT reporting_mode;
reporting_mode = ntohl(psAdapter->pstargetparams->m_u32PowerSavingModeOptions) &0x02 ;
psAdapter->bIsAutoCorrectEnabled = !((char)(psAdapter->ulPowerSaveMode >> 3) & 0x1);
@@ -1504,20 +1365,9 @@ VOID doPowerAutoCorrection(PMINI_ADAPTER psAdapter)
if (psAdapter->bIsAutoCorrectEnabled && (psAdapter->chip_id >= T3LPB))
{
//If reporting mode is enable, switch PMU to PMC
- #if 0
- if(reporting_mode == FALSE)
- {
- psAdapter->ulPowerSaveMode = DEVICE_POWERSAVE_MODE_AS_PMU_SHUTDOWN;
- psAdapter->bDoSuspend = TRUE;
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"PMU selected ....");
-
- }
- else
- #endif
{
psAdapter->ulPowerSaveMode = DEVICE_POWERSAVE_MODE_AS_PMU_CLOCK_GATING;
psAdapter->bDoSuspend =FALSE;
- BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"PMC selected..");
}
@@ -1540,12 +1390,10 @@ VOID doPowerAutoCorrection(PMINI_ADAPTER psAdapter)
#if 0
static unsigned char *ReadMacAddrEEPROM(PMINI_ADAPTER Adapter, ulong dwAddress)
{
- unsigned char *pucmacaddr = NULL;
- int status = 0, i=0;
- unsigned int temp =0;
+ int status = 0, i = 0;
+ unsigned int temp = 0;
+ unsigned char *pucmacaddr = kmalloc(MAC_ADDRESS_SIZE, GFP_KERNEL);
-
- pucmacaddr = (unsigned char *)kmalloc(MAC_ADDRESS_SIZE, GFP_KERNEL);
if(!pucmacaddr)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "No Buffers to Read the EEPROM Address\n");
@@ -1558,7 +1406,7 @@ static unsigned char *ReadMacAddrEEPROM(PMINI_ADAPTER Adapter, ulong dwAddress)
if(status != STATUS_SUCCESS)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "wrm Failed..\n");
- bcm_kfree(pucmacaddr);
+ kfree(pucmacaddr);
pucmacaddr = NULL;
goto OUT;
}
@@ -1568,7 +1416,7 @@ static unsigned char *ReadMacAddrEEPROM(PMINI_ADAPTER Adapter, ulong dwAddress)
if(status != STATUS_SUCCESS)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "rdm Failed..\n");
- bcm_kfree(pucmacaddr);
+ kfree(pucmacaddr);
pucmacaddr = NULL;
goto OUT;
}
@@ -1580,43 +1428,6 @@ OUT:
}
#endif
-#if 0
-INT ReadMacAddressFromEEPROM(PMINI_ADAPTER Adapter)
-{
- unsigned char *puMacAddr = NULL;
- int i =0;
-
- puMacAddr = ReadMacAddrEEPROM(Adapter,0x200);
- if(!puMacAddr)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Couldn't retrieve the Mac Address\n");
- return STATUS_FAILURE;
- }
- else
- {
- if((puMacAddr[0] == 0x0 && puMacAddr[1] == 0x0 &&
- puMacAddr[2] == 0x0 && puMacAddr[3] == 0x0 &&
- puMacAddr[4] == 0x0 && puMacAddr[5] == 0x0) ||
- (puMacAddr[0] == 0xFF && puMacAddr[1] == 0xFF &&
- puMacAddr[2] == 0xFF && puMacAddr[3] == 0xFF &&
- puMacAddr[4] == 0xFF && puMacAddr[5] == 0xFF))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Invalid Mac Address\n");
- bcm_kfree(puMacAddr);
- return STATUS_FAILURE;
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "The Mac Address received is: \n");
- memcpy(Adapter->dev->dev_addr, puMacAddr, MAC_ADDRESS_SIZE);
- for(i=0;i<MAC_ADDRESS_SIZE;i++)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"%02x ", Adapter->dev->dev_addr[i]);
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"\n");
- bcm_kfree(puMacAddr);
- }
- return STATUS_SUCCESS;
-}
-#endif
static void convertEndian(B_UINT8 rwFlag, PUINT puiBuffer, UINT uiByteCount)
{
@@ -1640,81 +1451,21 @@ int rdm(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t sSize)
{
INT uiRetVal =0;
-#ifndef BCM_SHM_INTERFACE
uiRetVal = Adapter->interface_rdm(Adapter->pvInterfaceAdapter,
uiAddress, pucBuff, sSize);
if(uiRetVal < 0)
return uiRetVal;
-#else
- int indx;
- uiRetVal = STATUS_SUCCESS;
- if(uiAddress & 0x10000000) {
- // DDR Memory Access
- uiAddress |= CACHE_ADDRESS_MASK;
- memcpy(pucBuff,(unsigned char *)uiAddress ,sSize);
- }
- else {
- // Register, SPRAM, Flash
- uiAddress |= UNCACHE_ADDRESS_MASK;
- if ((uiAddress & FLASH_ADDR_MASK) == (FLASH_CONTIGIOUS_START_ADDR_BCS350 & FLASH_ADDR_MASK))
- {
- #if defined(FLASH_DIRECT_ACCESS)
- memcpy(pucBuff,(unsigned char *)uiAddress ,sSize);
- #else
- printk("\nInvalid GSPI ACCESS :Addr :%#X", uiAddress);
- uiRetVal = STATUS_FAILURE;
- #endif
- }
- else if(((unsigned int )uiAddress & 0x3) ||
- ((unsigned int )pucBuff & 0x3) ||
- ((unsigned int )sSize & 0x3)) {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"rdmalt :unalligned register access uiAddress = %x,pucBuff = %x size = %x\n",(unsigned int )uiAddress,(unsigned int )pucBuff,(unsigned int )sSize);
- uiRetVal = STATUS_FAILURE;
- }
- else {
- for (indx=0;indx<sSize;indx+=4){
- *(PUINT)(pucBuff + indx) = *(PUINT)(uiAddress + indx);
- }
- }
- }
-#endif
return uiRetVal;
}
int wrm(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t sSize)
{
int iRetVal;
-#ifndef BCM_SHM_INTERFACE
iRetVal = Adapter->interface_wrm(Adapter->pvInterfaceAdapter,
uiAddress, pucBuff, sSize);
-#else
- int indx;
- if(uiAddress & 0x10000000) {
- // DDR Memory Access
- uiAddress |= CACHE_ADDRESS_MASK;
- memcpy((unsigned char *)(uiAddress),pucBuff,sSize);
- }
- else {
- // Register, SPRAM, Flash
- uiAddress |= UNCACHE_ADDRESS_MASK;
-
- if(((unsigned int )uiAddress & 0x3) ||
- ((unsigned int )pucBuff & 0x3) ||
- ((unsigned int )sSize & 0x3)) {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"wrmalt: unalligned register access uiAddress = %x,pucBuff = %x size = %x\n",(unsigned int )uiAddress,(unsigned int )pucBuff,(unsigned int )sSize);
- iRetVal = STATUS_FAILURE;
- }
- else {
- for (indx=0;indx<sSize;indx+=4) {
- *(PUINT)(uiAddress + indx) = *(PUINT)(pucBuff + indx);
- }
- }
- }
- iRetVal = STATUS_SUCCESS;
-#endif
return iRetVal;
}
@@ -1735,26 +1486,7 @@ int rdmalt (PMINI_ADAPTER Adapter, UINT uiAddress, PUINT pucBuff, size_t size)
return uiRetVal;
}
-int rdmWithLock(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t sSize)
-{
-
- INT status = STATUS_SUCCESS ;
- down(&Adapter->rdmwrmsync);
-
- if((Adapter->IdleMode == TRUE) ||
- (Adapter->bShutStatus ==TRUE) ||
- (Adapter->bPreparingForLowPowerMode ==TRUE))
- {
- status = -EACCES;
- goto exit;
- }
- status = rdm(Adapter, uiAddress, pucBuff, sSize);
-
-exit:
- up(&Adapter->rdmwrmsync);
- return status ;
-}
int wrmWithLock(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t sSize)
{
INT status = STATUS_SUCCESS ;
@@ -1921,10 +1653,8 @@ static VOID SendShutModeResponse(PMINI_ADAPTER Adapter)
Adapter->bPreparingForLowPowerMode = TRUE;
up(&Adapter->rdmwrmsync);
//Killing all URBS.
-#ifndef BCM_SHM_INTERFACE
if(Adapter->bDoSuspend == TRUE)
Bcm_kill_all_URBs((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter));
-#endif
}
else
{
@@ -1943,14 +1673,12 @@ static VOID SendShutModeResponse(PMINI_ADAPTER Adapter)
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL,"fail to send the Idle mode Request \n");
Adapter->bPreparingForLowPowerMode = FALSE;
-#ifndef BCM_SHM_INTERFACE
StartInterruptUrb((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter));
-#endif
}
}
-void HandleShutDownModeRequest(PMINI_ADAPTER Adapter,PUCHAR pucBuffer)
+static void HandleShutDownModeRequest(PMINI_ADAPTER Adapter,PUCHAR pucBuffer)
{
B_UINT32 uiResetValue = 0;
@@ -2077,11 +1805,7 @@ void update_per_sf_desc_cnts( PMINI_ADAPTER Adapter)
if(!atomic_read (&Adapter->uiMBupdate))
return;
-#ifdef BCM_SHM_INTERFACE
- if(rdmalt(Adapter, TARGET_SFID_TXDESC_MAP_LOC, (PUINT)uibuff, sizeof(UINT) * MAX_TARGET_DSX_BUFFERS)<0)
-#else
if(rdmaltWithLock(Adapter, TARGET_SFID_TXDESC_MAP_LOC, (PUINT)uibuff, sizeof(UINT) * MAX_TARGET_DSX_BUFFERS)<0)
-#endif
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "rdm failed\n");
return;
@@ -2107,9 +1831,7 @@ void update_per_sf_desc_cnts( PMINI_ADAPTER Adapter)
void flush_queue(PMINI_ADAPTER Adapter, UINT iQIndex)
{
struct sk_buff* PacketToDrop=NULL;
- struct net_device_stats* netstats=NULL;
-
- netstats = &((PLINUX_DEP_DATA)Adapter->pvOsDepData)->netstats;
+ struct net_device_stats* netstats = &Adapter->dev->stats;
spin_lock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
@@ -2130,25 +1852,23 @@ void flush_queue(PMINI_ADAPTER Adapter, UINT iQIndex)
Adapter->PackInfo[iQIndex].uiDroppedCountBytes += PacketToDrop->len;
Adapter->PackInfo[iQIndex].uiDroppedCountPackets++;
- bcm_kfree_skb(PacketToDrop);
+ dev_kfree_skb(PacketToDrop);
atomic_dec(&Adapter->TotalPacketCount);
- atomic_inc(&Adapter->TxDroppedPacketCount);
-
}
}
spin_unlock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
}
-void beceem_protocol_reset (PMINI_ADAPTER Adapter)
+static void beceem_protocol_reset (PMINI_ADAPTER Adapter)
{
- int i =0;
+ int i;
- if(NULL != Adapter->dev)
- {
- netif_carrier_off(Adapter->dev);
- netif_stop_queue(Adapter->dev);
- }
+ if (netif_msg_link(Adapter))
+ pr_notice(PFX "%s: protocol reset\n", Adapter->dev->name);
+
+ netif_carrier_off(Adapter->dev);
+ netif_stop_queue(Adapter->dev);
Adapter->IdleMode = FALSE;
Adapter->LinkUpStatus = FALSE;
@@ -2166,78 +1886,18 @@ void beceem_protocol_reset (PMINI_ADAPTER Adapter)
Adapter->TimerActive = FALSE;
memset(Adapter->astFragmentedPktClassifierTable, 0,
- sizeof(S_FRAGMENTED_PACKET_INFO) *
- MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES);
+ sizeof(S_FRAGMENTED_PACKET_INFO) * MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES);
for(i = 0;i<HiPriority;i++)
{
//resetting only the first size (S_MIBS_SERVICEFLOW_TABLE) for the SF.
// It is same between MIBs and SF.
- memset((PVOID)&Adapter->PackInfo[i],0,sizeof(S_MIBS_SERVICEFLOW_TABLE));
+ memset(&Adapter->PackInfo[i].stMibsExtServiceFlowTable,
+ 0, sizeof(S_MIBS_EXTSERVICEFLOW_PARAMETERS));
}
}
-#ifdef BCM_SHM_INTERFACE
-
-
-#define GET_GTB_DIFF(start, end) \
-( (start) < (end) )? ( (end) - (start) ) : ( ~0x0 - ( (start) - (end)) +1 )
-
-void usdelay ( unsigned int a) {
- unsigned int start= *(unsigned int *)0xaf8051b4;
- unsigned int end = start+1;
- unsigned int diff = 0;
-
- while(1) {
- end = *(unsigned int *)0xaf8051b4;
- diff = (GET_GTB_DIFF(start,end))/80;
- if (diff >= a)
- break;
- }
-}
-void read_cfg_file(PMINI_ADAPTER Adapter) {
-
-
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Config File Version = 0x%x \n",Adapter->pstargetparams->m_u32CfgVersion );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Center Frequency = 0x%x \n",Adapter->pstargetparams->m_u32CenterFrequency );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Band A Scan = 0x%x \n",Adapter->pstargetparams->m_u32BandAScan );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Band B Scan = 0x%x \n",Adapter->pstargetparams->m_u32BandBScan );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Band C Scan = 0x%x \n",Adapter->pstargetparams->m_u32BandCScan );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"ERTPS Options = 0x%x \n",Adapter->pstargetparams->m_u32ErtpsOptions );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"PHS Enable = 0x%x \n",Adapter->pstargetparams->m_u32PHSEnable );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Handoff Enable = 0x%x \n",Adapter->pstargetparams->m_u32HoEnable );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"HO Reserved1 = 0x%x \n",Adapter->pstargetparams->m_u32HoReserved1 );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"HO Reserved2 = 0x%x \n",Adapter->pstargetparams->m_u32HoReserved2 );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"MIMO Enable = 0x%x \n",Adapter->pstargetparams->m_u32MimoEnable );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"PKMv2 Enable = 0x%x \n",Adapter->pstargetparams->m_u32SecurityEnable );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Powersaving Modes Enable = 0x%x \n",Adapter->pstargetparams->m_u32PowerSavingModesEnable );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Power Saving Mode Options = 0x%x \n",Adapter->pstargetparams->m_u32PowerSavingModeOptions );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"ARQ Enable = 0x%x \n",Adapter->pstargetparams->m_u32ArqEnable );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Harq Enable = 0x%x \n",Adapter->pstargetparams->m_u32HarqEnable );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"EEPROM Flag = 0x%x \n",Adapter->pstargetparams->m_u32EEPROMFlag );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Customize = 0x%x \n",Adapter->pstargetparams->m_u32Customize );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Bandwidth = 0x%x \n",Adapter->pstargetparams->m_u32ConfigBW );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"ShutDown Timer Value = 0x%x \n",Adapter->pstargetparams->m_u32ShutDownInitThresholdTimer );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"RadioParameter = 0x%x \n",Adapter->pstargetparams->m_u32RadioParameter );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"PhyParameter1 = 0x%x \n",Adapter->pstargetparams->m_u32PhyParameter1 );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"PhyParameter2 = 0x%x \n",Adapter->pstargetparams->m_u32PhyParameter2 );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"PhyParameter3 = 0x%x \n",Adapter->pstargetparams->m_u32PhyParameter3 );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"m_u32TestOptions = 0x%x \n",Adapter->pstargetparams->m_u32TestOptions );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"MaxMACDataperDLFrame = 0x%x \n",Adapter->pstargetparams->m_u32MaxMACDataperDLFrame );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"MaxMACDataperULFrame = 0x%x \n",Adapter->pstargetparams->m_u32MaxMACDataperULFrame );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Corr2MacFlags = 0x%x \n",Adapter->pstargetparams->m_u32Corr2MacFlags );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"HostDrvrConfig1 = 0x%x \n",Adapter->pstargetparams->HostDrvrConfig1 );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"HostDrvrConfig2 = 0x%x \n",Adapter->pstargetparams->HostDrvrConfig2 );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"HostDrvrConfig3 = 0x%x \n",Adapter->pstargetparams->HostDrvrConfig3 );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"HostDrvrConfig4 = 0x%x \n",Adapter->pstargetparams->HostDrvrConfig4 );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"HostDrvrConfig5 = 0x%x \n",Adapter->pstargetparams->HostDrvrConfig5 );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"HostDrvrConfig6 = 0x%x \n",Adapter->pstargetparams->HostDrvrConfig6 );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Segmented PUSC Enable = 0x%x \n",Adapter->pstargetparams->m_u32SegmentedPUSCenable );
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"BamcEnable = 0x%x \n",Adapter->pstargetparams->m_u32BandAMCEnable );
-}
-
-#endif
diff --git a/drivers/staging/bcm/Osal_Misc.c b/drivers/staging/bcm/Osal_Misc.c
deleted file mode 100644
index feefd20a5291..000000000000
--- a/drivers/staging/bcm/Osal_Misc.c
+++ /dev/null
@@ -1,27 +0,0 @@
- /*++
-
- Copyright (c) Beceem Communications Inc.
-
- Module Name:
- WIN_Misc.c
-
- Abstract:
- Implements the Miscelanneous OS Construts
- Linked Lists
- Dispatcher Objects(Events,Semaphores,Spin Locks and the like)
- Files
-
- Revision History:
- Who When What
- -------- -------- ----------------------------------------------
- Name Date Created/reviewed/modified
- Rajeev 24/1/08 Created
- Notes:
-
- --*/
-#include "headers.h"
-
-bool OsalMemCompare(void *dest, void *src, UINT len)
-{
- return (memcmp(src, dest, len));
-}
diff --git a/drivers/staging/bcm/PHSModule.c b/drivers/staging/bcm/PHSModule.c
index 8a38cf43e795..d1ca1912a74b 100644
--- a/drivers/staging/bcm/PHSModule.c
+++ b/drivers/staging/bcm/PHSModule.c
@@ -1,10 +1,54 @@
#include "headers.h"
+static UINT CreateSFToClassifierRuleMapping(B_UINT16 uiVcid,B_UINT16 uiClsId,S_SERVICEFLOW_TABLE *psServiceFlowTable,S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI);
+
+static UINT CreateClassiferToPHSRuleMapping(B_UINT16 uiVcid,B_UINT16 uiClsId,S_SERVICEFLOW_ENTRY *pstServiceFlowEntry,S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI);
+
+static UINT CreateClassifierPHSRule(B_UINT16 uiClsId,S_CLASSIFIER_TABLE *psaClassifiertable ,S_PHS_RULE *psPhsRule,E_CLASSIFIER_ENTRY_CONTEXT eClsContext,B_UINT8 u8AssociatedPHSI);
+
+static UINT UpdateClassifierPHSRule(B_UINT16 uiClsId,S_CLASSIFIER_ENTRY *pstClassifierEntry,S_CLASSIFIER_TABLE *psaClassifiertable ,S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI);
+
+static BOOLEAN ValidatePHSRuleComplete(S_PHS_RULE *psPhsRule);
+
+static BOOLEAN DerefPhsRule(B_UINT16 uiClsId,S_CLASSIFIER_TABLE *psaClassifiertable,S_PHS_RULE *pstPhsRule);
+
+static UINT GetClassifierEntry(S_CLASSIFIER_TABLE *pstClassifierTable,B_UINT32 uiClsid,E_CLASSIFIER_ENTRY_CONTEXT eClsContext, S_CLASSIFIER_ENTRY **ppstClassifierEntry);
+
+static UINT GetPhsRuleEntry(S_CLASSIFIER_TABLE *pstClassifierTable,B_UINT32 uiPHSI,E_CLASSIFIER_ENTRY_CONTEXT eClsContext,S_PHS_RULE **ppstPhsRule);
+
+static void free_phs_serviceflow_rules(S_SERVICEFLOW_TABLE *psServiceFlowRulesTable);
+
+static int phs_compress(S_PHS_RULE *phs_members,unsigned char *in_buf,
+ unsigned char *out_buf,unsigned int *header_size,UINT *new_header_size );
+
+
+static int verify_suppress_phsf(unsigned char *in_buffer,unsigned char *out_buffer,
+ unsigned char *phsf,unsigned char *phsm,unsigned int phss,unsigned int phsv,UINT *new_header_size );
+
+static int phs_decompress(unsigned char *in_buf,unsigned char *out_buf,\
+ S_PHS_RULE *phs_rules,UINT *header_size);
+
+
+static ULONG PhsCompress(void* pvContext,
+ B_UINT16 uiVcid,
+ B_UINT16 uiClsId,
+ void *pvInputBuffer,
+ void *pvOutputBuffer,
+ UINT *pOldHeaderSize,
+ UINT *pNewHeaderSize );
+
+static ULONG PhsDeCompress(void* pvContext,
+ B_UINT16 uiVcid,
+ void *pvInputBuffer,
+ void *pvOutputBuffer,
+ UINT *pInHeaderSize,
+ UINT *pOutHeaderSize);
+
+
+
#define IN
#define OUT
-void DumpDataPacketHeader(PUCHAR pPkt);
-
/*
Function: PHSTransmit
@@ -81,8 +125,6 @@ int PHSTransmit(PMINI_ADAPTER Adapter,
{
- //DumpDataPacketHeader(pucPHSPktHdrInBuf);
-
// Step 2 Supress Header using PHS and fill into intermediate ucaPHSPktHdrOutBuf.
// Suppress only if IP Header and PHS Enabled For the Service Flow
if(((usPacketType == ETHERNET_FRAMETYPE_IPV4) ||
@@ -120,15 +162,15 @@ int PHSTransmit(PMINI_ADAPTER Adapter,
if(newPacket == NULL)
return STATUS_FAILURE;
- bcm_kfree_skb(Packet);
+ dev_kfree_skb(Packet);
*pPacket = Packet = newPacket;
pucPHSPktHdrInBuf = Packet->data + BytesToRemove;
}
numBytesCompressed = unPhsOldHdrSize - (unPHSNewPktHeaderLen+PHSI_LEN);
- OsalMemMove(pucPHSPktHdrInBuf + numBytesCompressed, pucPHSPktHdrOutBuf, unPHSNewPktHeaderLen + PHSI_LEN);
- OsalMemMove(Packet->data + numBytesCompressed, Packet->data, BytesToRemove);
+ memcpy(pucPHSPktHdrInBuf + numBytesCompressed, pucPHSPktHdrOutBuf, unPHSNewPktHeaderLen + PHSI_LEN);
+ memcpy(Packet->data + numBytesCompressed, Packet->data, BytesToRemove);
skb_pull(Packet, numBytesCompressed);
return STATUS_SUCCESS;
@@ -223,23 +265,12 @@ int PHSRecieve(PMINI_ADAPTER Adapter,
}
}
- OsalMemMove(packet->data, Adapter->ucaPHSPktRestoreBuf, nStandardPktHdrLen);
+ memcpy(packet->data, Adapter->ucaPHSPktRestoreBuf, nStandardPktHdrLen);
}
return STATUS_SUCCESS;
}
-void DumpDataPacketHeader(PUCHAR pPkt)
-{
- struct iphdr *iphd = (struct iphdr*)pPkt;
- PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,"Phs Send/Recieve : IP Packet Hdr \n");
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,"TOS : %x \n",iphd->tos);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,"Src IP : %x \n",iphd->saddr);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,"Dest IP : %x \n \n",iphd->daddr);
-
-}
-
void DumpFullPacket(UCHAR *pBuf,UINT nPktLen)
{
PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -270,15 +301,9 @@ int phs_init(PPHS_DEVICE_EXTENSION pPhsdeviceExtension,PMINI_ADAPTER Adapter)
return -EINVAL;
pPhsdeviceExtension->pstServiceFlowPhsRulesTable =
- (S_SERVICEFLOW_TABLE*)OsalMemAlloc(sizeof(S_SERVICEFLOW_TABLE),
- PHS_MEM_TAG);
+ kzalloc(sizeof(S_SERVICEFLOW_TABLE), GFP_KERNEL);
- if(pPhsdeviceExtension->pstServiceFlowPhsRulesTable)
- {
- OsalZeroMemory(pPhsdeviceExtension->pstServiceFlowPhsRulesTable,
- sizeof(S_SERVICEFLOW_TABLE));
- }
- else
+ if(!pPhsdeviceExtension->pstServiceFlowPhsRulesTable)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nAllocation ServiceFlowPhsRulesTable failed");
return -ENOMEM;
@@ -288,14 +313,8 @@ int phs_init(PPHS_DEVICE_EXTENSION pPhsdeviceExtension,PMINI_ADAPTER Adapter)
for(i=0;i<MAX_SERVICEFLOWS;i++)
{
S_SERVICEFLOW_ENTRY sServiceFlow = pstServiceFlowTable->stSFList[i];
- sServiceFlow.pstClassifierTable = (S_CLASSIFIER_TABLE*)OsalMemAlloc(
- sizeof(S_CLASSIFIER_TABLE), PHS_MEM_TAG);
- if(sServiceFlow.pstClassifierTable)
- {
- OsalZeroMemory(sServiceFlow.pstClassifierTable,sizeof(S_CLASSIFIER_TABLE));
- pstServiceFlowTable->stSFList[i].pstClassifierTable = sServiceFlow.pstClassifierTable;
- }
- else
+ sServiceFlow.pstClassifierTable = kzalloc(sizeof(S_CLASSIFIER_TABLE), GFP_KERNEL);
+ if(!sServiceFlow.pstClassifierTable)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nAllocation failed");
free_phs_serviceflow_rules(pPhsdeviceExtension->
@@ -305,9 +324,7 @@ int phs_init(PPHS_DEVICE_EXTENSION pPhsdeviceExtension,PMINI_ADAPTER Adapter)
}
}
-
- pPhsdeviceExtension->CompressedTxBuffer =
- OsalMemAlloc(PHS_BUFFER_SIZE,PHS_MEM_TAG);
+ pPhsdeviceExtension->CompressedTxBuffer = kmalloc(PHS_BUFFER_SIZE, GFP_KERNEL);
if(pPhsdeviceExtension->CompressedTxBuffer == NULL)
{
@@ -317,12 +334,11 @@ int phs_init(PPHS_DEVICE_EXTENSION pPhsdeviceExtension,PMINI_ADAPTER Adapter)
return -ENOMEM;
}
- pPhsdeviceExtension->UnCompressedRxBuffer =
- OsalMemAlloc(PHS_BUFFER_SIZE,PHS_MEM_TAG);
+ pPhsdeviceExtension->UnCompressedRxBuffer = kmalloc(PHS_BUFFER_SIZE, GFP_KERNEL);
if(pPhsdeviceExtension->UnCompressedRxBuffer == NULL)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nAllocation failed");
- OsalMemFree(pPhsdeviceExtension->CompressedTxBuffer,PHS_BUFFER_SIZE);
+ kfree(pPhsdeviceExtension->CompressedTxBuffer);
free_phs_serviceflow_rules(pPhsdeviceExtension->pstServiceFlowPhsRulesTable);
pPhsdeviceExtension->pstServiceFlowPhsRulesTable = NULL;
return -ENOMEM;
@@ -343,16 +359,11 @@ int PhsCleanup(IN PPHS_DEVICE_EXTENSION pPHSDeviceExt)
pPHSDeviceExt->pstServiceFlowPhsRulesTable = NULL;
}
- if(pPHSDeviceExt->CompressedTxBuffer)
- {
- OsalMemFree(pPHSDeviceExt->CompressedTxBuffer,PHS_BUFFER_SIZE);
- pPHSDeviceExt->CompressedTxBuffer = NULL;
- }
- if(pPHSDeviceExt->UnCompressedRxBuffer)
- {
- OsalMemFree(pPHSDeviceExt->UnCompressedRxBuffer,PHS_BUFFER_SIZE);
- pPHSDeviceExt->UnCompressedRxBuffer = NULL;
- }
+ kfree(pPHSDeviceExt->CompressedTxBuffer);
+ pPHSDeviceExt->CompressedTxBuffer = NULL;
+
+ kfree(pPHSDeviceExt->UnCompressedRxBuffer);
+ pPHSDeviceExt->UnCompressedRxBuffer = NULL;
return 0;
}
@@ -478,20 +489,12 @@ ULONG PhsDeletePHSRule(IN void* pvContext,IN B_UINT16 uiVcid,IN B_UINT8 u8PHSI)
{
if(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].bUsed && pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule)
{
- if(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex]
- .pstPhsRule->u8PHSI == u8PHSI)
- {
- if(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule
- ->u8RefCnt)
- pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule
- ->u8RefCnt--;
- if(0 == pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex]
- .pstPhsRule->u8RefCnt)
- OsalMemFree(pstClassifierRulesTable
- ->stActivePhsRulesList[nClsidIndex].pstPhsRule,
- sizeof(S_PHS_RULE));
- OsalZeroMemory(&pstClassifierRulesTable
- ->stActivePhsRulesList[nClsidIndex],
+ if(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule->u8PHSI == u8PHSI) {
+ if(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule->u8RefCnt)
+ pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule->u8RefCnt--;
+ if(0 == pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule->u8RefCnt)
+ kfree(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule);
+ memset(&pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex], 0,
sizeof(S_CLASSIFIER_ENTRY));
}
}
@@ -548,10 +551,10 @@ ULONG PhsDeleteClassifierRule(IN void* pvContext,IN B_UINT16 uiVcid ,IN B_UINT16
if(pstClassifierEntry->pstPhsRule->u8RefCnt)
pstClassifierEntry->pstPhsRule->u8RefCnt--;
if(0==pstClassifierEntry->pstPhsRule->u8RefCnt)
- OsalMemFree(pstClassifierEntry->pstPhsRule,sizeof(S_PHS_RULE));
+ kfree(pstClassifierEntry->pstPhsRule);
}
- OsalZeroMemory(pstClassifierEntry,sizeof(S_CLASSIFIER_ENTRY));
+ memset(pstClassifierEntry, 0, sizeof(S_CLASSIFIER_ENTRY));
}
nClsidIndex = GetClassifierEntry(pstServiceFlowEntry->pstClassifierTable,
@@ -559,10 +562,8 @@ ULONG PhsDeleteClassifierRule(IN void* pvContext,IN B_UINT16 uiVcid ,IN B_UINT16
if((nClsidIndex != PHS_INVALID_TABLE_INDEX) && (!pstClassifierEntry->bUnclassifiedPHSRule))
{
- if(pstClassifierEntry->pstPhsRule)
- //Delete the classifier entry
- OsalMemFree(pstClassifierEntry->pstPhsRule,sizeof(S_PHS_RULE));
- OsalZeroMemory(pstClassifierEntry,sizeof(S_CLASSIFIER_ENTRY));
+ kfree(pstClassifierEntry->pstPhsRule);
+ memset(pstClassifierEntry, 0, sizeof(S_CLASSIFIER_ENTRY));
}
}
return lStatus;
@@ -619,14 +620,11 @@ ULONG PhsDeleteSFRules(IN void* pvContext,IN B_UINT16 uiVcid)
.pstPhsRule->u8RefCnt--;
if(0==pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex]
.pstPhsRule->u8RefCnt)
- OsalMemFree(pstClassifierRulesTable
- ->stActivePhsRulesList[nClsidIndex].pstPhsRule,
- sizeof(S_PHS_RULE));
+ kfree(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule);
pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex]
.pstPhsRule = NULL;
}
- OsalZeroMemory(&pstClassifierRulesTable
- ->stActivePhsRulesList[nClsidIndex],sizeof(S_CLASSIFIER_ENTRY));
+ memset(&pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex], 0, sizeof(S_CLASSIFIER_ENTRY));
if(pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex].pstPhsRule)
{
if(pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex]
@@ -635,15 +633,12 @@ ULONG PhsDeleteSFRules(IN void* pvContext,IN B_UINT16 uiVcid)
.pstPhsRule->u8RefCnt--;
if(0 == pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex]
.pstPhsRule->u8RefCnt)
- OsalMemFree(pstClassifierRulesTable
- ->stOldPhsRulesList[nClsidIndex].pstPhsRule,
- sizeof(S_PHS_RULE));
+ kfree(pstClassifierRulesTable
+ ->stOldPhsRulesList[nClsidIndex].pstPhsRule);
pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex]
.pstPhsRule = NULL;
}
- OsalZeroMemory(&pstClassifierRulesTable
- ->stOldPhsRulesList[nClsidIndex],
- sizeof(S_CLASSIFIER_ENTRY));
+ memset(&pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex], 0, sizeof(S_CLASSIFIER_ENTRY));
}
}
pstServiceFlowEntry->bUsed = FALSE;
@@ -849,7 +844,7 @@ ULONG PhsDeCompress(IN void* pvContext,
// Does not return any value.
//-----------------------------------------------------------------------------
-void free_phs_serviceflow_rules(S_SERVICEFLOW_TABLE *psServiceFlowRulesTable)
+static void free_phs_serviceflow_rules(S_SERVICEFLOW_TABLE *psServiceFlowRulesTable)
{
int i,j;
PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -876,8 +871,7 @@ void free_phs_serviceflow_rules(S_SERVICEFLOW_TABLE *psServiceFlowRulesTable)
->u8RefCnt--;
if(0==pstClassifierRulesTable->stActivePhsRulesList[j].pstPhsRule
->u8RefCnt)
- OsalMemFree(pstClassifierRulesTable->stActivePhsRulesList[j].
- pstPhsRule, sizeof(S_PHS_RULE));
+ kfree(pstClassifierRulesTable->stActivePhsRulesList[j].pstPhsRule);
pstClassifierRulesTable->stActivePhsRulesList[j].pstPhsRule = NULL;
}
if(pstClassifierRulesTable->stOldPhsRulesList[j].pstPhsRule)
@@ -888,24 +882,23 @@ void free_phs_serviceflow_rules(S_SERVICEFLOW_TABLE *psServiceFlowRulesTable)
->u8RefCnt--;
if(0==pstClassifierRulesTable->stOldPhsRulesList[j].pstPhsRule
->u8RefCnt)
- OsalMemFree(pstClassifierRulesTable->stOldPhsRulesList[j]
- .pstPhsRule,sizeof(S_PHS_RULE));
+ kfree(pstClassifierRulesTable->stOldPhsRulesList[j].pstPhsRule);
pstClassifierRulesTable->stOldPhsRulesList[j].pstPhsRule = NULL;
}
}
- OsalMemFree(pstClassifierRulesTable,sizeof(S_CLASSIFIER_TABLE));
+ kfree(pstClassifierRulesTable);
stServiceFlowEntry.pstClassifierTable = pstClassifierRulesTable = NULL;
}
}
}
- OsalMemFree(psServiceFlowRulesTable,sizeof(S_SERVICEFLOW_TABLE));
- psServiceFlowRulesTable = NULL;
+ kfree(psServiceFlowRulesTable);
+ psServiceFlowRulesTable = NULL;
}
-BOOLEAN ValidatePHSRuleComplete(IN S_PHS_RULE *psPhsRule)
+static BOOLEAN ValidatePHSRuleComplete(IN S_PHS_RULE *psPhsRule)
{
if(psPhsRule)
{
@@ -988,9 +981,9 @@ UINT GetClassifierEntry(IN S_CLASSIFIER_TABLE *pstClassifierTable,
return PHS_INVALID_TABLE_INDEX;
}
-UINT GetPhsRuleEntry(IN S_CLASSIFIER_TABLE *pstClassifierTable,
- IN B_UINT32 uiPHSI,E_CLASSIFIER_ENTRY_CONTEXT eClsContext,
- OUT S_PHS_RULE **ppstPhsRule)
+static UINT GetPhsRuleEntry(IN S_CLASSIFIER_TABLE *pstClassifierTable,
+ IN B_UINT32 uiPHSI,E_CLASSIFIER_ENTRY_CONTEXT eClsContext,
+ OUT S_PHS_RULE **ppstPhsRule)
{
int i;
S_CLASSIFIER_ENTRY *pstClassifierRule = NULL;
@@ -1102,7 +1095,7 @@ UINT CreateClassiferToPHSRuleMapping(IN B_UINT16 uiVcid,
if(psPhsRule->u8PHSFLength)
{
//update PHSF
- OsalMemMove(pstClassifierEntry->pstPhsRule->u8PHSF,
+ memcpy(pstClassifierEntry->pstPhsRule->u8PHSF,
psPhsRule->u8PHSF , MAX_PHS_LENGTHS);
}
if(psPhsRule->u8PHSFLength)
@@ -1114,7 +1107,7 @@ UINT CreateClassiferToPHSRuleMapping(IN B_UINT16 uiVcid,
if(psPhsRule->u8PHSMLength)
{
//update PHSM
- OsalMemMove(pstClassifierEntry->pstPhsRule->u8PHSM,
+ memcpy(pstClassifierEntry->pstPhsRule->u8PHSM,
psPhsRule->u8PHSM, MAX_PHS_LENGTHS);
}
if(psPhsRule->u8PHSMLength)
@@ -1147,7 +1140,7 @@ UINT CreateClassiferToPHSRuleMapping(IN B_UINT16 uiVcid,
return uiStatus;
}
-UINT CreateClassifierPHSRule(IN B_UINT16 uiClsId,
+static UINT CreateClassifierPHSRule(IN B_UINT16 uiClsId,
S_CLASSIFIER_TABLE *psaClassifiertable ,S_PHS_RULE *psPhsRule,
E_CLASSIFIER_ENTRY_CONTEXT eClsContext,B_UINT8 u8AssociatedPHSI)
{
@@ -1234,8 +1227,7 @@ UINT CreateClassifierPHSRule(IN B_UINT16 uiClsId,
{
if(psClassifierRules->pstPhsRule == NULL)
{
- psClassifierRules->pstPhsRule = (S_PHS_RULE*)OsalMemAlloc
- (sizeof(S_PHS_RULE),PHS_MEM_TAG);
+ psClassifierRules->pstPhsRule = kmalloc(sizeof(S_PHS_RULE),GFP_KERNEL);
if(NULL == psClassifierRules->pstPhsRule)
return ERR_PHSRULE_MEMALLOC_FAIL;
@@ -1247,7 +1239,7 @@ UINT CreateClassifierPHSRule(IN B_UINT16 uiClsId,
psClassifierRules->bUnclassifiedPHSRule = psPhsRule->bUnclassifiedPHSRule;
/* Update The PHS rule */
- OsalMemMove(psClassifierRules->pstPhsRule,
+ memcpy(psClassifierRules->pstPhsRule,
psPhsRule, sizeof(S_PHS_RULE));
}
else
@@ -1259,7 +1251,7 @@ UINT CreateClassifierPHSRule(IN B_UINT16 uiClsId,
}
-UINT UpdateClassifierPHSRule(IN B_UINT16 uiClsId,
+static UINT UpdateClassifierPHSRule(IN B_UINT16 uiClsId,
IN S_CLASSIFIER_ENTRY *pstClassifierEntry,
S_CLASSIFIER_TABLE *psaClassifiertable ,S_PHS_RULE *psPhsRule,
B_UINT8 u8AssociatedPHSI)
@@ -1289,13 +1281,13 @@ UINT UpdateClassifierPHSRule(IN B_UINT16 uiClsId,
//Step 2.a PHS Rule Does Not Exist .Create New PHS Rule for uiClsId
if(FALSE == bPHSRuleOrphaned)
{
- pstClassifierEntry->pstPhsRule = (S_PHS_RULE*)OsalMemAlloc(sizeof(S_PHS_RULE),PHS_MEM_TAG);
+ pstClassifierEntry->pstPhsRule = kmalloc(sizeof(S_PHS_RULE), GFP_KERNEL);
if(NULL == pstClassifierEntry->pstPhsRule)
{
return ERR_PHSRULE_MEMALLOC_FAIL;
}
}
- OsalMemMove(pstClassifierEntry->pstPhsRule, psPhsRule, sizeof(S_PHS_RULE));
+ memcpy(pstClassifierEntry->pstPhsRule, psPhsRule, sizeof(S_PHS_RULE));
}
else
@@ -1304,14 +1296,8 @@ UINT UpdateClassifierPHSRule(IN B_UINT16 uiClsId,
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nTying Classifier to Existing PHS Rule");
if(bPHSRuleOrphaned)
{
- if(pstClassifierEntry->pstPhsRule)
- {
- //Just Free the PHS Rule as Ref Count is Zero
- OsalMemFree(pstClassifierEntry->pstPhsRule,sizeof(S_PHS_RULE));
+ kfree(pstClassifierEntry->pstPhsRule);
pstClassifierEntry->pstPhsRule = NULL;
-
- }
-
}
pstClassifierEntry->pstPhsRule = pstAddPhsRule;
@@ -1326,7 +1312,7 @@ UINT UpdateClassifierPHSRule(IN B_UINT16 uiClsId,
}
-BOOLEAN DerefPhsRule(IN B_UINT16 uiClsId,S_CLASSIFIER_TABLE *psaClassifiertable,S_PHS_RULE *pstPhsRule)
+static BOOLEAN DerefPhsRule(IN B_UINT16 uiClsId,S_CLASSIFIER_TABLE *psaClassifiertable,S_PHS_RULE *pstPhsRule)
{
if(pstPhsRule==NULL)
return FALSE;
@@ -1345,22 +1331,6 @@ BOOLEAN DerefPhsRule(IN B_UINT16 uiClsId,S_CLASSIFIER_TABLE *psaClassifiertable
}
}
-static void DumpBuffer(PVOID BuffVAddress, int xferSize)
-{
- int i;
- int iPrintLength;
- PUCHAR temp=(PUCHAR)BuffVAddress;
- PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
- iPrintLength=(xferSize<32?xferSize:32);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\n");
-
- for (i=0;i < iPrintLength;i++) {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "%x|",temp[i]);
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\n");
-}
-
-
void DumpPhsRules(PPHS_DEVICE_EXTENSION pDeviceExtension)
{
int i,j,k,l;
@@ -1520,8 +1490,8 @@ int phs_decompress(unsigned char *in_buf,unsigned char *out_buf,
// size-The number of bytes copied into the output buffer i.e dynamic fields
// 0 -If PHS rule is NULL.If PHSV field is not set.If the verification fails.
//-----------------------------------------------------------------------------
-int phs_compress(S_PHS_RULE *phs_rule,unsigned char *in_buf
- ,unsigned char *out_buf,UINT *header_size,UINT *new_header_size)
+static int phs_compress(S_PHS_RULE *phs_rule,unsigned char *in_buf
+ ,unsigned char *out_buf,UINT *header_size,UINT *new_header_size)
{
unsigned char *old_addr = out_buf;
int supress = 0;
@@ -1581,9 +1551,9 @@ int phs_compress(S_PHS_RULE *phs_rule,unsigned char *in_buf
// 0 -Packet has failed the verification.
//-----------------------------------------------------------------------------
- int verify_suppress_phsf(unsigned char *in_buffer,unsigned char *out_buffer,
- unsigned char *phsf,unsigned char *phsm,unsigned int phss,
- unsigned int phsv,UINT* new_header_size)
+static int verify_suppress_phsf(unsigned char *in_buffer,unsigned char *out_buffer,
+ unsigned char *phsf,unsigned char *phsm,unsigned int phss,
+ unsigned int phsv,UINT* new_header_size)
{
unsigned int size=0;
int bit,i=0;
diff --git a/drivers/staging/bcm/PHSModule.h b/drivers/staging/bcm/PHSModule.h
index bf2b5763252c..0dd05a7c55d9 100644
--- a/drivers/staging/bcm/PHSModule.h
+++ b/drivers/staging/bcm/PHSModule.h
@@ -27,19 +27,6 @@ void DumpPhsRules(PPHS_DEVICE_EXTENSION pDeviceExtension);
int phs_init(PPHS_DEVICE_EXTENSION pPhsdeviceExtension,PMINI_ADAPTER Adapter);
-void free_phs_serviceflow_rules(S_SERVICEFLOW_TABLE *psServiceFlowRulesTable);
-
-int phs_compress(S_PHS_RULE *phs_members,unsigned char *in_buf,
- unsigned char *out_buf,unsigned int *header_size,UINT *new_header_size );
-
-
-int verify_suppress_phsf(unsigned char *in_buffer,unsigned char *out_buffer,
- unsigned char *phsf,unsigned char *phsm,unsigned int phss,unsigned int phsv,UINT *new_header_size );
-
-int phs_decompress(unsigned char *in_buf,unsigned char *out_buf,\
- S_PHS_RULE *phs_rules,UINT *header_size);
-
-
int PhsCleanup(PPHS_DEVICE_EXTENSION pPHSDeviceExt);
//Utility Functions
@@ -52,42 +39,10 @@ ULONG PhsDeleteClassifierRule(void* pvContext, B_UINT16 uiVcid ,B_UINT16 uiClsI
ULONG PhsDeleteSFRules(void* pvContext,B_UINT16 uiVcid) ;
-ULONG PhsCompress(void* pvContext,
- B_UINT16 uiVcid,
- B_UINT16 uiClsId,
- void *pvInputBuffer,
- void *pvOutputBuffer,
- UINT *pOldHeaderSize,
- UINT *pNewHeaderSize );
-
-ULONG PhsDeCompress(void* pvContext,
- B_UINT16 uiVcid,
- void *pvInputBuffer,
- void *pvOutputBuffer,
- UINT *pInHeaderSize,
- UINT *pOutHeaderSize);
-
-
BOOLEAN ValidatePHSRule(S_PHS_RULE *psPhsRule);
-BOOLEAN ValidatePHSRuleComplete(S_PHS_RULE *psPhsRule);
-
UINT GetServiceFlowEntry(S_SERVICEFLOW_TABLE *psServiceFlowTable,B_UINT16 uiVcid,S_SERVICEFLOW_ENTRY **ppstServiceFlowEntry);
-UINT GetClassifierEntry(S_CLASSIFIER_TABLE *pstClassifierTable,B_UINT32 uiClsid,E_CLASSIFIER_ENTRY_CONTEXT eClsContext, S_CLASSIFIER_ENTRY **ppstClassifierEntry);
-
-UINT GetPhsRuleEntry(S_CLASSIFIER_TABLE *pstClassifierTable,B_UINT32 uiPHSI,E_CLASSIFIER_ENTRY_CONTEXT eClsContext,S_PHS_RULE **ppstPhsRule);
-
-
-UINT CreateSFToClassifierRuleMapping(B_UINT16 uiVcid,B_UINT16 uiClsId,S_SERVICEFLOW_TABLE *psServiceFlowTable,S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI);
-
-UINT CreateClassiferToPHSRuleMapping(B_UINT16 uiVcid,B_UINT16 uiClsId,S_SERVICEFLOW_ENTRY *pstServiceFlowEntry,S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI);
-
-UINT CreateClassifierPHSRule(B_UINT16 uiClsId,S_CLASSIFIER_TABLE *psaClassifiertable ,S_PHS_RULE *psPhsRule,E_CLASSIFIER_ENTRY_CONTEXT eClsContext,B_UINT8 u8AssociatedPHSI);
-
-UINT UpdateClassifierPHSRule(B_UINT16 uiClsId,S_CLASSIFIER_ENTRY *pstClassifierEntry,S_CLASSIFIER_TABLE *psaClassifiertable ,S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI);
-
-BOOLEAN DerefPhsRule(B_UINT16 uiClsId,S_CLASSIFIER_TABLE *psaClassifiertable,S_PHS_RULE *pstPhsRule);
void DumpPhsRules(PPHS_DEVICE_EXTENSION pDeviceExtension);
diff --git a/drivers/staging/bcm/Protocol.h b/drivers/staging/bcm/Protocol.h
index 00f1cc12356a..b8a4009bdf0c 100644
--- a/drivers/staging/bcm/Protocol.h
+++ b/drivers/staging/bcm/Protocol.h
@@ -85,10 +85,10 @@ typedef struct _ETH_CS_ETH2_FRAME
ETH_HEADER_STRUC EThHdr;
} __attribute__((packed)) ETH_CS_ETH2_FRAME;
+#define ETHERNET_FRAMETYPE_IPV4 ntohs(0x0800)
+#define ETHERNET_FRAMETYPE_IPV6 ntohs(0x86dd)
+#define ETHERNET_FRAMETYPE_802QVLAN ntohs(0x8100)
-#define ETHERNET_FRAMETYPE_IPV4 ntohs(0x0800)
-#define ETHERNET_FRAMETYPE_IPV6 ntohs(0x86dd)
-#define ETHERNET_FRAMETYPE_802QVLAN 0x8100
//Per SF CS Specification Encodings
typedef enum _E_SERVICEFLOW_CS_SPEC_
{
diff --git a/drivers/staging/bcm/Prototypes.h b/drivers/staging/bcm/Prototypes.h
index 70ec8bcafd1e..b80b806c90a3 100644
--- a/drivers/staging/bcm/Prototypes.h
+++ b/drivers/staging/bcm/Prototypes.h
@@ -1,23 +1,12 @@
#ifndef _PROTOTYPES_H_
#define _PROTOTYPES_H_
-int BcmFileDownload(PMINI_ADAPTER Adapter,/**< Logical Adapter */
- char *path, /**< path to image file */
- unsigned int loc /**< Download Address on the chip*/
- );
VOID LinkControlResponseMessage(PMINI_ADAPTER Adapter, PUCHAR pucBuffer);
VOID StatisticsResponse(PMINI_ADAPTER Adapter,PVOID pvBuffer);
VOID IdleModeResponse(PMINI_ADAPTER Adapter,PUINT puiBuffer);
-void bcm_kfree_skb(struct sk_buff *skb);
-VOID bcm_kfree(VOID *ptr);
-
-
-VOID handle_rx_control_packet(PMINI_ADAPTER Adapter, /**<Pointer to the Adapter structure*/
- struct sk_buff *skb); /**<Pointer to the socket buffer*/
-
int control_packet_handler (PMINI_ADAPTER Adapter);
VOID DeleteAllClassifiersForSF(PMINI_ADAPTER Adapter,UINT uiSearchRuleIndex);
@@ -38,25 +27,16 @@ VOID SortClassifiers(PMINI_ADAPTER Adapter);
VOID flush_all_queues(PMINI_ADAPTER Adapter);
-USHORT IpVersion4(PMINI_ADAPTER Adapter, /**< Pointer to the driver control structure */
- struct iphdr *iphd, /**<Pointer to the IP Hdr of the packet*/
- S_CLASSIFIER_RULE *pstClassifierRule );
-
-VOID PruneQueue(PMINI_ADAPTER Adapter,/**<Pointer to the driver control structure*/
- INT iIndex/**<Queue Index*/
- );
-
VOID PruneQueueAllSF(PMINI_ADAPTER Adapter);
INT SearchSfid(PMINI_ADAPTER Adapter,UINT uiSfid);
-USHORT GetPacketQueueIndex(PMINI_ADAPTER Adapter, /**<Pointer to the driver control structure */
- struct sk_buff* Packet /**< Pointer to the Packet to be sent*/
- );
+USHORT ClassifyPacket(PMINI_ADAPTER Adapter,struct sk_buff* skb);
+
+BOOLEAN MatchSrcPort(S_CLASSIFIER_RULE *pstClassifierRule,USHORT ushSrcPort);
+BOOLEAN MatchDestPort(S_CLASSIFIER_RULE *pstClassifierRule,USHORT ushSrcPort);
+BOOLEAN MatchProtocol(S_CLASSIFIER_RULE *pstClassifierRule,UCHAR ucProtocol);
-VOID
-reply_to_arp_request(struct sk_buff *skb /**<sk_buff of ARP request*/
- );
INT SetupNextSend(PMINI_ADAPTER Adapter, /**<Logical Adapter*/
struct sk_buff *Packet, /**<data buffer*/
@@ -70,11 +50,9 @@ INT SendControlPacket(PMINI_ADAPTER Adapter, /**<Logical Adapter*/
char *pControlPacket/**<Control Packet*/
);
-INT bcm_transmit(struct sk_buff *skb, /**< skb */
- struct net_device *dev /**< net device pointer */
- );
int register_networkdev(PMINI_ADAPTER Adapter);
+void unregister_networkdev(PMINI_ADAPTER Adapter);
INT AllocAdapterDsxBuffer(PMINI_ADAPTER Adapter);
@@ -82,8 +60,6 @@ VOID AdapterFree(PMINI_ADAPTER Adapter);
INT FreeAdapterDsxBuffer(PMINI_ADAPTER Adapter);
-int create_worker_threads(PMINI_ADAPTER psAdapter);
-
int tx_pkt_handler(PMINI_ADAPTER Adapter);
int reset_card_proc(PMINI_ADAPTER Adapter );
@@ -92,7 +68,6 @@ int run_card_proc(PMINI_ADAPTER Adapter );
int InitCardAndDownloadFirmware(PMINI_ADAPTER ps_adapter);
-int bcm_parse_target_params(PMINI_ADAPTER Adapter);
INT ReadMacAddressFromNVM(PMINI_ADAPTER Adapter);
@@ -110,26 +85,15 @@ int rdmalt (PMINI_ADAPTER Adapter, UINT uiAddress, PUINT pucBuff, size_t sSize);
int get_dsx_sf_data_to_application(PMINI_ADAPTER Adapter, UINT uiSFId, void __user * user_buffer);
-void SendLinkDown(PMINI_ADAPTER Adapter);
-
void SendIdleModeResponse(PMINI_ADAPTER Adapter);
-void HandleShutDownModeRequest(PMINI_ADAPTER Adapter,PUCHAR pucBuffer);
-
-int ProcessGetHostMibs(PMINI_ADAPTER Adapter, PVOID ioBuffer,
- ULONG inputBufferLength);
-int GetDroppedAppCntrlPktMibs(PVOID ioBuffer, PPER_TARANG_DATA pTarang);
+int ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *buf);
+void GetDroppedAppCntrlPktMibs(S_MIBS_HOST_STATS_MIBS *ioBuffer, PPER_TARANG_DATA pTarang);
void beceem_parse_target_struct(PMINI_ADAPTER Adapter);
-void doPowerAutoCorrection(PMINI_ADAPTER psAdapter);
-
int bcm_ioctl_fw_download(PMINI_ADAPTER Adapter, FIRMWARE_INFO *psFwInfo);
-void bcm_unregister_networkdev(PMINI_ADAPTER Adapter);
-
-int SearchVcid(PMINI_ADAPTER Adapter,unsigned short usVcid);
-
void CopyMIBSExtendedSFParameters(PMINI_ADAPTER Adapter,
CServiceFlowParamSI *psfLocalSet, UINT uiSearchRuleIndex);
@@ -149,7 +113,6 @@ void update_per_sf_desc_cnts( PMINI_ADAPTER Adapter);
void ClearTargetDSXBuffer(PMINI_ADAPTER Adapter,B_UINT16 TID,BOOLEAN bFreeAll);
-void beceem_protocol_reset (PMINI_ADAPTER Adapter);
void flush_queue(PMINI_ADAPTER Adapter, UINT iQIndex);
@@ -164,31 +127,11 @@ INT BeceemEEPROMBulkRead(
UINT uiNumBytes);
-INT BeceemFlashBulkRead(
- PMINI_ADAPTER Adapter,
- PUINT pBuffer,
- UINT uiOffset,
- UINT uiNumBytes);
-
-UINT BcmGetEEPROMSize(PMINI_ADAPTER Adapter);
INT WriteBeceemEEPROM(PMINI_ADAPTER Adapter,UINT uiEEPROMOffset, UINT uiData);
-UINT BcmGetFlashSize(PMINI_ADAPTER Adapter);
-
-UINT BcmGetFlashSectorSize(PMINI_ADAPTER Adapter, UINT FlashSectorSizeSig, UINT FlashSectorSize);
-
-INT BeceemFlashBulkWrite(
- PMINI_ADAPTER Adapter,
- PUINT pBuffer,
- UINT uiOffset,
- UINT uiNumBytes,
- BOOLEAN bVerify);
-
INT PropagateCalParamsFromFlashToMemory(PMINI_ADAPTER Adapter);
-INT PropagateCalParamsFromEEPROMToMemory(PMINI_ADAPTER Adapter);
-
INT BeceemEEPROMBulkWrite(
PMINI_ADAPTER Adapter,
@@ -198,11 +141,8 @@ INT BeceemEEPROMBulkWrite(
BOOLEAN bVerify);
-INT ReadBeceemEEPROMBulk(PMINI_ADAPTER Adapter,UINT dwAddress, UINT *pdwData, UINT dwNumData);
-
INT ReadBeceemEEPROM(PMINI_ADAPTER Adapter,UINT dwAddress, UINT *pdwData);
-NVM_TYPE BcmGetNvmType(PMINI_ADAPTER Adapter);
INT BeceemNVMRead(
PMINI_ADAPTER Adapter,
@@ -217,24 +157,12 @@ INT BeceemNVMWrite(
UINT uiNumBytes,
BOOLEAN bVerify);
-INT BcmUpdateSectorSize(PMINI_ADAPTER Adapter,UINT uiSectorSize);
INT BcmInitNVM(PMINI_ADAPTER Adapter);
-INT BcmGetNvmSize(PMINI_ADAPTER Adapter);
-
-INT IsSectionExistInVendorInfo(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL section);
-
-VOID BcmValidateNvmType(PMINI_ADAPTER Adapter);
-
-VOID ConfigureEndPointTypesThroughEEPROM(PMINI_ADAPTER Adapter);
+INT BcmUpdateSectorSize(PMINI_ADAPTER Adapter,UINT uiSectorSize);
+BOOLEAN IsSectionExistInFlash(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL section);
-INT BcmGetFlashCSInfo(PMINI_ADAPTER Adapter);
-INT ReadDSDHeader(PMINI_ADAPTER Adapter, PDSD_HEADER psDSDHeader, FLASH2X_SECTION_VAL dsd);
-INT BcmGetActiveDSD(PMINI_ADAPTER Adapter);
-INT ReadISOHeader(PMINI_ADAPTER Adapter, PISO_HEADER psISOHeader, FLASH2X_SECTION_VAL IsoImage);
-INT BcmGetActiveISO(PMINI_ADAPTER Adapter);
-B_UINT8 IsOffsetWritable(PMINI_ADAPTER Adapter, UINT uiOffset);
INT BcmGetFlash2xSectionalBitMap(PMINI_ADAPTER Adapter, PFLASH2X_BITMAP psFlash2xBitMap);
INT BcmFlash2xBulkWrite(
@@ -251,7 +179,6 @@ INT BcmFlash2xBulkRead(
FLASH2X_SECTION_VAL eFlashSectionVal,
UINT uiOffsetWithinSectionVal,
UINT uiNumBytes);
-INT BcmGetSectionValEndOffset(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlashSectionVal);
INT BcmGetSectionValStartOffset(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlashSectionVal);
@@ -264,34 +191,13 @@ INT BcmFlash2xCorruptSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSect
INT BcmFlash2xWriteSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlashSectionVal);
INT validateFlash2xReadWrite(PMINI_ADAPTER Adapter, PFLASH2X_READWRITE psFlash2xReadWrite);
INT IsFlash2x(PMINI_ADAPTER Adapter);
-INT GetFlashBaseAddr(PMINI_ADAPTER Adapter);
-INT SaveHeaderIfPresent(PMINI_ADAPTER Adapter, PUCHAR pBuff, UINT uiSectAlignAddr);
INT BcmCopySection(PMINI_ADAPTER Adapter,
FLASH2X_SECTION_VAL SrcSection,
FLASH2X_SECTION_VAL DstSection,
UINT offset,
UINT numOfBytes);
-INT BcmDoChipSelect(PMINI_ADAPTER Adapter, UINT offset);
-INT BcmMakeFlashCSActive(PMINI_ADAPTER Adapter, UINT offset);
-INT ReadDSDSignature(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL dsd);
-INT ReadDSDPriority(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL dsd);
-FLASH2X_SECTION_VAL getHighestPriDSD(PMINI_ADAPTER Adapter);
-INT ReadISOSignature(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL iso);
-INT ReadISOPriority(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL iso);
-FLASH2X_SECTION_VAL getHighestPriISO(PMINI_ADAPTER Adapter);
-INT WriteToFlashWithoutSectorErase(PMINI_ADAPTER Adapter,
- PUINT pBuff,
- FLASH2X_SECTION_VAL eFlash2xSectionVal,
- UINT uiOffset,
- UINT uiNumBytes
- );
-
-//UINT getNumOfSubSectionWithWRPermisson(PMINI_ADAPTER Adapter, SECTION_TYPE secType);
-BOOLEAN IsSectionExistInFlash(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL section);
-INT IsSectionWritable(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL Section);
-INT CorruptDSDSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal);
-INT CorruptISOSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal);
+
BOOLEAN IsNonCDLessDevice(PMINI_ADAPTER Adapter);
@@ -300,7 +206,6 @@ VOID OverrideServiceFlowParams(PMINI_ADAPTER Adapter,PUINT puiBuffer);
int wrmaltWithLock (PMINI_ADAPTER Adapter, UINT uiAddress, PUINT pucBuff, size_t sSize);
int rdmaltWithLock (PMINI_ADAPTER Adapter, UINT uiAddress, PUINT pucBuff, size_t sSize);
-int rdmWithLock(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t size);
int wrmWithLock(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t size);
INT buffDnldVerify(PMINI_ADAPTER Adapter, unsigned char *mappedbuffer, unsigned int u32FirmwareLength,
unsigned long u32StartingAddress);
@@ -309,11 +214,6 @@ INT buffDnldVerify(PMINI_ADAPTER Adapter, unsigned char *mappedbuffer, unsigned
VOID putUsbSuspend(struct work_struct *work);
BOOLEAN IsReqGpioIsLedInNVM(PMINI_ADAPTER Adapter, UINT gpios);
-#ifdef BCM_SHM_INTERFACE
-INT beceem_virtual_device_init(void);
-VOID virtual_mail_box_interrupt(void);
-INT beceem_virtual_device_exit(void);
-#endif
#endif
diff --git a/drivers/staging/bcm/Qos.c b/drivers/staging/bcm/Qos.c
index 75b2b879633f..feade9451b2e 100644
--- a/drivers/staging/bcm/Qos.c
+++ b/drivers/staging/bcm/Qos.c
@@ -4,15 +4,14 @@ This file contains the routines related to Quality of Service.
*/
#include "headers.h"
-BOOLEAN MatchSrcIpAddress(S_CLASSIFIER_RULE *pstClassifierRule,ULONG ulSrcIP);
-BOOLEAN MatchTos(S_CLASSIFIER_RULE *pstClassifierRule,UCHAR ucTypeOfService);
-BOOLEAN MatchSrcPort(S_CLASSIFIER_RULE *pstClassifierRule,USHORT ushSrcPort);
-BOOLEAN MatchDestPort(S_CLASSIFIER_RULE *pstClassifierRule,USHORT ushDestPort);
-BOOLEAN MatchProtocol(S_CLASSIFIER_RULE *pstClassifierRule,UCHAR ucProtocol);
-BOOLEAN MatchDestIpAddress(S_CLASSIFIER_RULE *pstClassifierRule,ULONG ulDestIP);
-USHORT ClassifyPacket(PMINI_ADAPTER Adapter,struct sk_buff* skb);
-void EThCSGetPktInfo(PMINI_ADAPTER Adapter,PVOID pvEthPayload,PS_ETHCS_PKT_INFO pstEthCsPktInfo);
-BOOLEAN EThCSClassifyPkt(PMINI_ADAPTER Adapter,struct sk_buff* skb,PS_ETHCS_PKT_INFO pstEthCsPktInfo,S_CLASSIFIER_RULE *pstClassifierRule, B_UINT8 EthCSCupport);
+static void EThCSGetPktInfo(PMINI_ADAPTER Adapter,PVOID pvEthPayload,PS_ETHCS_PKT_INFO pstEthCsPktInfo);
+static BOOLEAN EThCSClassifyPkt(PMINI_ADAPTER Adapter,struct sk_buff* skb,PS_ETHCS_PKT_INFO pstEthCsPktInfo,S_CLASSIFIER_RULE *pstClassifierRule, B_UINT8 EthCSCupport);
+
+static USHORT IpVersion4(PMINI_ADAPTER Adapter, struct iphdr *iphd,
+ S_CLASSIFIER_RULE *pstClassifierRule );
+
+static VOID PruneQueue(PMINI_ADAPTER Adapter, INT iIndex);
+
/*******************************************************************
* Function - MatchSrcIpAddress()
@@ -205,11 +204,10 @@ BOOLEAN MatchDestPort(S_CLASSIFIER_RULE *pstClassifierRule,USHORT ushDestPort)
Compares IPV4 Ip address and port number
@return Queue Index.
*/
-USHORT IpVersion4(PMINI_ADAPTER Adapter, /**< Pointer to the driver control structure */
- struct iphdr *iphd, /**<Pointer to the IP Hdr of the packet*/
- S_CLASSIFIER_RULE *pstClassifierRule )
+static USHORT IpVersion4(PMINI_ADAPTER Adapter,
+ struct iphdr *iphd,
+ S_CLASSIFIER_RULE *pstClassifierRule )
{
- //IPHeaderFormat *pIpHeader=NULL;
xporthdr *xprt_hdr=NULL;
BOOLEAN bClassificationSucceed=FALSE;
@@ -261,15 +259,6 @@ USHORT IpVersion4(PMINI_ADAPTER Adapter, /**< Pointer to the driver control stru
//if protocol is not TCP or UDP then no need of comparing source port and destination port
if(iphd->protocol!=TCP && iphd->protocol!=UDP)
break;
-#if 0
- //check if memory is available of src and Dest port
- if(ETH_AND_IP_HEADER_LEN + L4_SRC_PORT_LEN + L4_DEST_PORT_LEN > Packet->len)
- {
- //This is not an erroneous condition and pkt will be checked for next classification.
- bClassificationSucceed = FALSE;
- break;
- }
-#endif
//******************Checking Transport Layer Header field if present *****************//
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Source Port %04x",
(iphd->protocol==UDP)?xprt_hdr->uhdr.source:xprt_hdr->thdr.source);
@@ -312,29 +301,6 @@ USHORT IpVersion4(PMINI_ADAPTER Adapter, /**< Pointer to the driver control stru
return bClassificationSucceed;
}
-/**
-@ingroup tx_functions
-@return Queue Index based on priority.
-*/
-USHORT GetPacketQueueIndex(PMINI_ADAPTER Adapter, /**<Pointer to the driver control structure */
- struct sk_buff* Packet /**< Pointer to the Packet to be sent*/
- )
-{
- USHORT usIndex=-1;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, QUEUE_INDEX, DBG_LVL_ALL, "=====>");
-
- if(NULL==Adapter || NULL==Packet)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, QUEUE_INDEX, DBG_LVL_ALL, "Got NULL Values<======");
- return -1;
- }
-
- usIndex = ClassifyPacket(Adapter,Packet);
-
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, QUEUE_INDEX, DBG_LVL_ALL, "Got Queue Index %x",usIndex);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, QUEUE_INDEX, DBG_LVL_ALL, "GetPacketQueueIndex <==============");
- return usIndex;
-}
VOID PruneQueueAllSF(PMINI_ADAPTER Adapter)
{
@@ -357,23 +323,21 @@ is less than number of bytes in the queue. If so -
drops packets from the Head till the number of bytes is
less than or equal to max queue size for the queue.
*/
-VOID PruneQueue(PMINI_ADAPTER Adapter,/**<Pointer to the driver control structure*/
- INT iIndex/**<Queue Index*/
- )
+static VOID PruneQueue(PMINI_ADAPTER Adapter, INT iIndex)
{
struct sk_buff* PacketToDrop=NULL;
- struct net_device_stats* netstats=NULL;
+ struct net_device_stats *netstats;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "=====> Index %d",iIndex);
if(iIndex == HiPriority)
- return;
+ return;
if(!Adapter || (iIndex < 0) || (iIndex > HiPriority))
return;
/* To Store the netdevice statistic */
- netstats = &((PLINUX_DEP_DATA)Adapter->pvOsDepData)->netstats;
+ netstats = &Adapter->dev->stats;
spin_lock_bh(&Adapter->PackInfo[iIndex].SFQueueLock);
@@ -395,9 +359,12 @@ VOID PruneQueue(PMINI_ADAPTER Adapter,/**<Pointer to the driver control structur
if(PacketToDrop)
{
- if(netstats)
- netstats->tx_dropped++;
- atomic_inc(&Adapter->TxDroppedPacketCount);
+ if (netif_msg_tx_err(Adapter))
+ pr_info(PFX "%s: tx queue %d overlimit\n",
+ Adapter->dev->name, iIndex);
+
+ netstats->tx_dropped++;
+
DEQUEUEPACKET(Adapter->PackInfo[iIndex].FirstTxQueue,
Adapter->PackInfo[iIndex].LastTxQueue);
/// update current bytes and packets count
@@ -407,7 +374,7 @@ VOID PruneQueue(PMINI_ADAPTER Adapter,/**<Pointer to the driver control structur
/// update dropped bytes and packets counts
Adapter->PackInfo[iIndex].uiDroppedCountBytes += PacketToDrop->len;
Adapter->PackInfo[iIndex].uiDroppedCountPackets++;
- bcm_kfree_skb(PacketToDrop);
+ dev_kfree_skb(PacketToDrop);
}
@@ -416,7 +383,6 @@ VOID PruneQueue(PMINI_ADAPTER Adapter,/**<Pointer to the driver control structur
Adapter->PackInfo[iIndex].uiDroppedCountPackets);
atomic_dec(&Adapter->TotalPacketCount);
- Adapter->bcm_jiffies = jiffies;
}
spin_unlock_bh(&Adapter->PackInfo[iIndex].SFQueueLock);
@@ -430,16 +396,15 @@ VOID flush_all_queues(PMINI_ADAPTER Adapter)
{
INT iQIndex;
UINT uiTotalPacketLength;
- struct sk_buff* PacketToDrop=NULL;
- struct net_device_stats* netstats=NULL;
+ struct sk_buff* PacketToDrop=NULL;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "=====>");
- /* To Store the netdevice statistic */
- netstats = &((PLINUX_DEP_DATA)Adapter->pvOsDepData)->netstats;
// down(&Adapter->data_packet_queue_lock);
for(iQIndex=LowPriority; iQIndex<HiPriority; iQIndex++)
{
+ struct net_device_stats *netstats = &Adapter->dev->stats;
+
spin_lock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
while(Adapter->PackInfo[iQIndex].FirstTxQueue)
{
@@ -448,7 +413,6 @@ VOID flush_all_queues(PMINI_ADAPTER Adapter)
{
uiTotalPacketLength = PacketToDrop->len;
netstats->tx_dropped++;
- atomic_inc(&Adapter->TxDroppedPacketCount);
}
else
uiTotalPacketLength = 0;
@@ -457,7 +421,7 @@ VOID flush_all_queues(PMINI_ADAPTER Adapter)
Adapter->PackInfo[iQIndex].LastTxQueue);
/* Free the skb */
- bcm_kfree_skb(PacketToDrop);
+ dev_kfree_skb(PacketToDrop);
/// update current bytes and packets count
Adapter->PackInfo[iQIndex].uiCurrentBytesOnHost -= uiTotalPacketLength;
@@ -559,12 +523,6 @@ USHORT ClassifyPacket(PMINI_ADAPTER Adapter,struct sk_buff* skb)
for(uiLoopIndex = MAX_CLASSIFIERS - 1; uiLoopIndex >= 0; uiLoopIndex--)
{
- if (Adapter->device_removed)
- {
- bClassificationSucceed = FALSE;
- break;
- }
-
if(bClassificationSucceed)
break;
//Iterate through all classifiers which are already in order of priority
@@ -810,7 +768,10 @@ static BOOLEAN EthCSMatchVLANRules(S_CLASSIFIER_RULE *pstClassifierRule,struct s
}
-BOOLEAN EThCSClassifyPkt(PMINI_ADAPTER Adapter,struct sk_buff* skb,PS_ETHCS_PKT_INFO pstEthCsPktInfo,S_CLASSIFIER_RULE *pstClassifierRule, B_UINT8 EthCSCupport)
+static BOOLEAN EThCSClassifyPkt(PMINI_ADAPTER Adapter,struct sk_buff* skb,
+ PS_ETHCS_PKT_INFO pstEthCsPktInfo,
+ S_CLASSIFIER_RULE *pstClassifierRule,
+ B_UINT8 EthCSCupport)
{
BOOLEAN bClassificationSucceed = FALSE;
bClassificationSucceed = EthCSMatchSrcMACAddress(pstClassifierRule,((ETH_HEADER_STRUC *)(skb->data))->au8SourceAddress);
@@ -840,9 +801,11 @@ BOOLEAN EThCSClassifyPkt(PMINI_ADAPTER Adapter,struct sk_buff* skb,PS_ETHCS_PKT_
return bClassificationSucceed;
}
-void EThCSGetPktInfo(PMINI_ADAPTER Adapter,PVOID pvEthPayload,PS_ETHCS_PKT_INFO pstEthCsPktInfo)
+static void EThCSGetPktInfo(PMINI_ADAPTER Adapter,PVOID pvEthPayload,
+ PS_ETHCS_PKT_INFO pstEthCsPktInfo)
{
USHORT u16Etype = ntohs(((ETH_HEADER_STRUC*)pvEthPayload)->u16Etype);
+
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCSGetPktInfo : Eth Hdr Type : %X\n",u16Etype);
if(u16Etype > 0x5dc)
{
diff --git a/drivers/staging/bcm/TODO b/drivers/staging/bcm/TODO
index 366634be5fe1..cd3e9f2ed87a 100644
--- a/drivers/staging/bcm/TODO
+++ b/drivers/staging/bcm/TODO
@@ -1,15 +1,22 @@
+This driver is barely functional in its current state.
+
+BIG:
+ - existing API is (/dev/tarang) should be replaced
+ Is it possible to use same API as Intel Wimax stack and
+ have same user level components.
+ - Qos and queue model is non-standard and inflexible.
+ Use existing TC Qos?
+
TODO:
+ - support more than one board - eliminate global variables
+ - remove developer debug BCM_DEBUG() macros
+ add a limited number of messages through netif_msg()
- fix non-standard kernel style
- - sparse warnings
- checkpatch warnings
- - remove compatiablity code for older kernels
- - remove #ifdef's
- - fix bogus device nameing and reference counting (see bcm_notify_event)
- - fix use of file I/O to load config
- - request firmware
- - update to current network device API
- - merge some files together
+ - use request firmware
+ - fix use of file I/O to load config with better API
+ - merge some files together?
- cleanup/eliminate debug messages
- - integrate with existing Wimax stack?
+
diff --git a/drivers/staging/bcm/Transmit.c b/drivers/staging/bcm/Transmit.c
index 12f9e13457db..d5e4a7404f71 100644
--- a/drivers/staging/bcm/Transmit.c
+++ b/drivers/staging/bcm/Transmit.c
@@ -6,7 +6,7 @@
digraph transmit1 {
node[shape=box]
edge[weight=5;color=red]
-bcm_transmit->reply_to_arp_request[label="ARP"]
+
bcm_transmit->GetPacketQueueIndex[label="IP Packet"]
GetPacketQueueIndex->IpVersion4[label="IPV4"]
GetPacketQueueIndex->IpVersion6[label="IPV6"]
@@ -35,169 +35,16 @@ SendPacketFromQueue->SetupNextSend->bcm_cmd53
#include "headers.h"
-/*******************************************************************
-* Function - bcm_transmit()
-*
-* Description - This is the main transmit function for our virtual
-* interface(veth0). It handles the ARP packets. It
-* clones this packet and then Queue it to a suitable
-* Queue. Then calls the transmit_packet().
-*
-* Parameter - skb - Pointer to the socket buffer structure
-* dev - Pointer to the virtual net device structure
-*
-* Returns - zero (success) or -ve value (failure)
-*
-*********************************************************************/
-
-INT bcm_transmit(struct sk_buff *skb, /**< skb */
- struct net_device *dev /**< net device pointer */
- )
-{
- PMINI_ADAPTER Adapter = NULL;
- USHORT qindex=0;
- struct timeval tv;
- UINT pkt_type = 0;
- UINT calltransmit = 0;
-
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL, "\n%s====>\n",__FUNCTION__);
-
- memset(&tv, 0, sizeof(tv));
- /* Check for valid parameters */
- if(skb == NULL || dev==NULL)
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX,TX_OSAL_DBG, DBG_LVL_ALL, "Got NULL skb or dev\n");
- return -EINVAL;
- }
-
- Adapter = GET_BCM_ADAPTER(dev);
- if(!Adapter)
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL, "Got Invalid Adapter\n");
- return -EINVAL;
- }
- if(Adapter->device_removed == TRUE || !Adapter->LinkUpStatus)
- {
- if(!netif_queue_stopped(dev)) {
- netif_carrier_off(dev);
- netif_stop_queue(dev);
- }
- return STATUS_FAILURE;
- }
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL, "Packet size : %d\n", skb->len);
-
- /*Add Ethernet CS check here*/
- if(Adapter->TransferMode == IP_PACKET_ONLY_MODE )
- {
- pkt_type = ntohs(*(PUSHORT)(skb->data + 12));
- /* Get the queue index where the packet is to be queued */
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL, "Getting the Queue Index.....");
-
- qindex = GetPacketQueueIndex(Adapter,skb);
-
- if((SHORT)INVALID_QUEUE_INDEX==(SHORT)qindex)
- {
- if(pkt_type == ETH_ARP_FRAME)
- {
- /*
- Reply directly to ARP request packet
- ARP Spoofing only if NO ETH CS rule matches for it
- */
- BCM_DEBUG_PRINT (Adapter,DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL,"ARP OPCODE = %02x",
-
- (*(PUCHAR)(skb->data + 21)));
-
- reply_to_arp_request(skb);
-
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX,TX_OSAL_DBG, DBG_LVL_ALL,"After reply_to_arp_request \n");
-
- }
- else
- {
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL,
- "Invalid queue index, dropping pkt\n");
-
- bcm_kfree_skb(skb);
- }
- return STATUS_SUCCESS;
- }
-
- if(Adapter->PackInfo[qindex].uiCurrentPacketsOnHost >= SF_MAX_ALLOWED_PACKETS_TO_BACKUP)
- {
- atomic_inc(&Adapter->TxDroppedPacketCount);
- bcm_kfree_skb(skb);
- return STATUS_SUCCESS;
- }
-
- /* Now Enqueue the packet */
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "bcm_transmit Enqueueing the Packet To Queue %d",qindex);
- spin_lock(&Adapter->PackInfo[qindex].SFQueueLock);
- Adapter->PackInfo[qindex].uiCurrentBytesOnHost += skb->len;
- Adapter->PackInfo[qindex].uiCurrentPacketsOnHost++;
-
- *((B_UINT32 *)skb->cb + SKB_CB_LATENCY_OFFSET ) = jiffies;
- ENQUEUEPACKET(Adapter->PackInfo[qindex].FirstTxQueue,
- Adapter->PackInfo[qindex].LastTxQueue, skb);
- atomic_inc(&Adapter->TotalPacketCount);
- spin_unlock(&Adapter->PackInfo[qindex].SFQueueLock);
- do_gettimeofday(&tv);
-
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL,"ENQ: \n");
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL, "Pkt Len = %d, sec: %ld, usec: %ld\n",
- (skb->len-ETH_HLEN), tv.tv_sec, tv.tv_usec);
-
-#ifdef BCM_SHM_INTERFACE
- spin_lock(&Adapter->txtransmitlock);
- if(Adapter->txtransmit_running == 0)
- {
- Adapter->txtransmit_running = 1;
- calltransmit = 1;
- }
- else
- calltransmit = 0;
-
- spin_unlock(&Adapter->txtransmitlock);
-#endif
- if(calltransmit == 1)
- transmit_packets(Adapter);
- else
- {
- if(!atomic_read(&Adapter->TxPktAvail))
- {
- atomic_set(&Adapter->TxPktAvail, 1);
-#ifdef BCM_SHM_INTERFACE
- virtual_mail_box_interrupt();
-#endif
- wake_up(&Adapter->tx_packet_wait_queue);
- }
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL, "<====");
- }
- else
- bcm_kfree_skb(skb);
-
- return STATUS_SUCCESS;
-}
-
/**
@ingroup ctrl_pkt_functions
This function dispatches control packet to the h/w interface
@return zero(success) or -ve value(failure)
*/
-INT SendControlPacket(PMINI_ADAPTER Adapter, /**<Logical Adapter*/
- char *pControlPacket/**<Control Packet*/
- )
+INT SendControlPacket(PMINI_ADAPTER Adapter, char *pControlPacket)
{
- PLEADER PLeader = NULL;
- struct timeval tv;
- memset(&tv, 0, sizeof(tv));
-
-
-
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "========>");
+ PLEADER PLeader = (PLEADER)pControlPacket;
- PLeader=(PLEADER)pControlPacket;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Tx");
if(!pControlPacket || !Adapter)
{
@@ -208,12 +55,6 @@ INT SendControlPacket(PMINI_ADAPTER Adapter, /**<Logical Adapter*/
((PLeader->PLength-1)/MAX_DEVICE_DESC_SIZE)+1))
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "NO FREE DESCRIPTORS TO SEND CONTROL PACKET");
- if(Adapter->bcm_jiffies == 0)
- {
- Adapter->bcm_jiffies = jiffies;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "UPDATED TIME(hex): %lu",
- Adapter->bcm_jiffies);
- }
return STATUS_FAILURE;
}
@@ -224,76 +65,33 @@ INT SendControlPacket(PMINI_ADAPTER Adapter, /**<Logical Adapter*/
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Leader Length: %x",PLeader->PLength);
if(Adapter->device_removed)
return 0;
-#ifndef BCM_SHM_INTERFACE
- Adapter->interface_transmit(Adapter->pvInterfaceAdapter,
- pControlPacket, (PLeader->PLength + LEADER_SIZE));
-#else
- tx_pkts_to_firmware(pControlPacket,(PLeader->PLength + LEADER_SIZE),1);
- if(PLeader->Status==IDLE_MESSAGE)
- {
- if(((CONTROL_MESSAGE*)PLeader)->szData[0] == GO_TO_IDLE_MODE_PAYLOAD &&
- ((CONTROL_MESSAGE*)PLeader)->szData[1] == TARGET_CAN_GO_TO_IDLE_MODE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Idle Mode Ack Sent to the Device\n");
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Host Entering into Idle Mode\n");
- do_gettimeofday(&tv);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "IdleMode Msg sent to f/w at time :%ld ms", tv.tv_sec *1000 + tv.tv_usec /1000);
- if(Adapter->bDoSuspend != TRUE)
- {
- Adapter->IdleMode = TRUE;
- Adapter->bPreparingForLowPowerMode = FALSE ;
- }
- }
- }
- if((PLeader->Status == LINK_UP_CONTROL_REQ) &&
- ((PUCHAR)pControlPacket)[sizeof(LEADER)] == LINK_UP_ACK &&
- ((PUCHAR)pControlPacket)[sizeof(LEADER)+1] ==
- LINK_SHUTDOWN_REQ_FROM_FIRMWARE &&
- ((PUCHAR)pControlPacket)[sizeof(LEADER)+2] == SHUTDOWN_ACK_FROM_DRIVER)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Shut Down ACK Sent and Host entering Shut State \n");
- if(Adapter->bDoSuspend != TRUE)
- {
- Adapter->bShutStatus = TRUE;
- Adapter->bPreparingForLowPowerMode = FALSE;
- Adapter->bTriedToWakeUpFromlowPowerMode = FALSE;
- }
+ if (netif_msg_pktdata(Adapter))
+ print_hex_dump(KERN_DEBUG, PFX "tx control: ", DUMP_PREFIX_NONE,
+ 16, 1, pControlPacket, PLeader->PLength + LEADER_SIZE, 0);
- }
-#endif
+ Adapter->interface_transmit(Adapter->pvInterfaceAdapter,
+ pControlPacket, (PLeader->PLength + LEADER_SIZE));
- ((PLINUX_DEP_DATA)Adapter->pvOsDepData)->netstats.tx_packets++;
- ((PLINUX_DEP_DATA)Adapter->pvOsDepData)->netstats.tx_bytes+=
- PLeader->PLength;
atomic_dec(&Adapter->CurrNumFreeTxDesc);
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "<=========");
return STATUS_SUCCESS;
}
-static LEADER Leader={0};
+
/**
@ingroup tx_functions
This function despatches the IP packets with the given vcid
to the target via the host h/w interface.
@return zero(success) or -ve value(failure)
*/
-INT SetupNextSend(PMINI_ADAPTER Adapter, /**<Logical Adapter*/
- struct sk_buff *Packet, /**<data buffer*/
- USHORT Vcid) /**<VCID for this packet*/
+INT SetupNextSend(PMINI_ADAPTER Adapter, struct sk_buff *Packet, USHORT Vcid)
{
int status=0;
-#ifdef GDMA_INTERFACE
- int dontfree = 0;
-#endif
BOOLEAN bHeaderSupressionEnabled = FALSE;
B_UINT16 uiClassifierRuleID;
- int QueueIndex = NO_OF_QUEUES + 1;
+ u16 QueueIndex = skb_get_queue_mapping(Packet);
+ LEADER Leader={0};
- if(!Adapter || !Packet)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Got NULL Adapter or Packet");
- return -EINVAL;
- }
if(Packet->len > MAX_DEVICE_DESC_SIZE)
{
status = STATUS_FAILURE;
@@ -302,14 +100,10 @@ INT SetupNextSend(PMINI_ADAPTER Adapter, /**<Logical Adapter*/
/* Get the Classifier Rule ID */
uiClassifierRuleID = *((UINT32*) (Packet->cb)+SKB_CB_CLASSIFICATION_OFFSET);
- QueueIndex = SearchVcid( Adapter,Vcid);
- if(QueueIndex < NO_OF_QUEUES)
- {
- bHeaderSupressionEnabled =
- Adapter->PackInfo[QueueIndex].bHeaderSuppressionEnabled;
- bHeaderSupressionEnabled =
- bHeaderSupressionEnabled & Adapter->bPHSEnabled;
- }
+
+ bHeaderSupressionEnabled = Adapter->PackInfo[QueueIndex].bHeaderSuppressionEnabled
+ & Adapter->bPHSEnabled;
+
if(Adapter->device_removed)
{
status = STATUS_FAILURE;
@@ -327,15 +121,10 @@ INT SetupNextSend(PMINI_ADAPTER Adapter, /**<Logical Adapter*/
Leader.Vcid = Vcid;
- if(TCP_ACK == *((UINT32*) (Packet->cb) + SKB_CB_TCPACK_OFFSET ))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Sending TCP ACK\n");
+ if(TCP_ACK == *((UINT32*) (Packet->cb) + SKB_CB_TCPACK_OFFSET ))
Leader.Status = LEADER_STATUS_TCP_ACK;
- }
else
- {
Leader.Status = LEADER_STATUS;
- }
if(Adapter->PackInfo[QueueIndex].bEthCSSupport)
{
@@ -351,68 +140,53 @@ INT SetupNextSend(PMINI_ADAPTER Adapter, /**<Logical Adapter*/
skb_push(Packet, LEADER_SIZE);
memcpy(Packet->data, &Leader, LEADER_SIZE);
}
-
else
{
Leader.PLength = Packet->len - ETH_HLEN;
memcpy((LEADER*)skb_pull(Packet, (ETH_HLEN - LEADER_SIZE)), &Leader, LEADER_SIZE);
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Packet->len = %d", Packet->len);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Vcid = %d", Vcid);
-
-#ifndef BCM_SHM_INTERFACE
status = Adapter->interface_transmit(Adapter->pvInterfaceAdapter,
Packet->data, (Leader.PLength + LEADER_SIZE));
-#else
- status = tx_pkts_to_firmware(Packet,Packet->len,0);
-#endif
if(status)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Tx Failed..\n");
+ ++Adapter->dev->stats.tx_errors;
+ if (netif_msg_tx_err(Adapter))
+ pr_info(PFX "%s: transmit error %d\n", Adapter->dev->name,
+ status);
}
else
{
+ struct net_device_stats *netstats = &Adapter->dev->stats;
Adapter->PackInfo[QueueIndex].uiTotalTxBytes += Leader.PLength;
- atomic_add(Leader.PLength, &Adapter->GoodTxByteCount);
- atomic_inc(&Adapter->TxTotalPacketCount);
-#ifdef GDMA_INTERFACE
- dontfree = 1;
-#endif
- }
- atomic_dec(&Adapter->CurrNumFreeTxDesc);
-
-errExit:
+ netstats->tx_bytes += Leader.PLength;
+ ++netstats->tx_packets;
- if(STATUS_SUCCESS == status)
- {
Adapter->PackInfo[QueueIndex].uiCurrentTokenCount -= Leader.PLength << 3;
Adapter->PackInfo[QueueIndex].uiSentBytes += (Packet->len);
Adapter->PackInfo[QueueIndex].uiSentPackets++;
Adapter->PackInfo[QueueIndex].NumOfPacketsSent++;
atomic_dec(&Adapter->PackInfo[QueueIndex].uiPerSFTxResourceCount);
-#ifdef BCM_SHM_INTERFACE
- if(atomic_read(&Adapter->PackInfo[QueueIndex].uiPerSFTxResourceCount) < 0)
- {
- atomic_set(&Adapter->PackInfo[QueueIndex].uiPerSFTxResourceCount, 0);
- }
-#endif
Adapter->PackInfo[QueueIndex].uiThisPeriodSentBytes += Leader.PLength;
}
+ atomic_dec(&Adapter->CurrNumFreeTxDesc);
-#ifdef GDMA_INTERFACE
- if(!dontfree){
- bcm_kfree_skb(Packet);
- }
-#else
- bcm_kfree_skb(Packet);
-#endif
+errExit:
+
+ dev_kfree_skb(Packet);
return status;
}
+static int tx_pending(PMINI_ADAPTER Adapter)
+{
+ return (atomic_read(&Adapter->TxPktAvail)
+ && MINIMUM_PENDING_DESCRIPTORS < atomic_read(&Adapter->CurrNumFreeTxDesc))
+ || Adapter->device_removed || (1 == Adapter->downloadDDR);
+}
+
/**
@ingroup tx_functions
Transmit thread
@@ -420,57 +194,26 @@ Transmit thread
int tx_pkt_handler(PMINI_ADAPTER Adapter /**< pointer to adapter object*/
)
{
-#ifndef BCM_SHM_INTERFACE
int status = 0;
-#endif
-
- UINT calltransmit = 1;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Entring to wait for signal from the interrupt service thread!Adapter = %p",Adapter);
-
- while(1)
- {
- if(Adapter->LinkUpStatus){
+ while(! kthread_should_stop()) {
+ /* FIXME - the timeout looks like workaround for racey usage of TxPktAvail */
+ if(Adapter->LinkUpStatus)
wait_event_timeout(Adapter->tx_packet_wait_queue,
- ((atomic_read(&Adapter->TxPktAvail) &&
- (MINIMUM_PENDING_DESCRIPTORS <
- atomic_read(&Adapter->CurrNumFreeTxDesc)) &&
- (Adapter->device_removed == FALSE))) ||
- (1 == Adapter->downloadDDR) || kthread_should_stop()
-#ifndef BCM_SHM_INTERFACE
- || (TRUE == Adapter->bEndPointHalted)
-#endif
- , msecs_to_jiffies(10));
- }
- else{
- wait_event(Adapter->tx_packet_wait_queue,
- ((atomic_read(&Adapter->TxPktAvail) &&
- (MINIMUM_PENDING_DESCRIPTORS <
- atomic_read(&Adapter->CurrNumFreeTxDesc)) &&
- (Adapter->device_removed == FALSE))) ||
- (1 == Adapter->downloadDDR) || kthread_should_stop()
-#ifndef BCM_SHM_INTERFACE
- || (TRUE == Adapter->bEndPointHalted)
-#endif
- );
- }
-
- if(kthread_should_stop() || Adapter->device_removed)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Exiting the tx thread..\n");
- Adapter->transmit_packet_thread = NULL;
- return 0;
- }
+ tx_pending(Adapter), msecs_to_jiffies(10));
+ else
+ wait_event_interruptible(Adapter->tx_packet_wait_queue,
+ tx_pending(Adapter));
-#ifndef BCM_SHM_INTERFACE
+ if (Adapter->device_removed)
+ break;
if(Adapter->downloadDDR == 1)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Downloading DDR Settings\n");
Adapter->downloadDDR +=1;
status = download_ddr_settings(Adapter);
if(status)
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "DDR DOWNLOAD FAILED!\n");
+ pr_err(PFX "DDR DOWNLOAD FAILED! %d\n", status);
continue;
}
@@ -489,7 +232,6 @@ int tx_pkt_handler(PMINI_ADAPTER Adapter /**< pointer to adapter object*/
update_per_sf_desc_cnts(Adapter);
}
}
-#endif
if( atomic_read(&Adapter->CurrNumFreeTxDesc) &&
Adapter->LinkStatus == SYNC_UP_REQUEST &&
@@ -507,49 +249,12 @@ int tx_pkt_handler(PMINI_ADAPTER Adapter /**< pointer to adapter object*/
wake_up(&Adapter->process_rx_cntrlpkt);
}
-#ifdef BCM_SHM_INTERFACE
- spin_lock_bh(&Adapter->txtransmitlock);
- if(Adapter->txtransmit_running == 0)
- {
- Adapter->txtransmit_running = 1;
- calltransmit = 1;
- }
- else
- calltransmit = 0;
- spin_unlock_bh(&Adapter->txtransmitlock);
-#endif
-
- if(calltransmit)
- transmit_packets(Adapter);
+ transmit_packets(Adapter);
atomic_set(&Adapter->TxPktAvail, 0);
}
- return 0;
-}
-
-#ifdef BCM_SHM_INTERFACE
-extern PMINI_ADAPTER psAdaptertest;
-void virtual_mail_box_interrupt(void)
-{
-
-#ifndef GDMA_INTERFACE
- PUINT ptr = (PUINT)CPE_VIRTUAL_MAILBOX_REG;
- UINT intval = (UINT)((*ptr & 0xFF00) >> 8);
- if (intval != 0)
- {
- atomic_set(&psAdaptertest->CurrNumFreeTxDesc, intval);
- atomic_set (&psAdaptertest->uiMBupdate, TRUE);
- //make it to 0
- *ptr = *ptr & 0xffff00ff;
- }
-#endif
-}
-unsigned int total_tx_pkts_pending(void)
-{
- return atomic_read(&psAdaptertest->TotalPacketCount);
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Exiting the tx thread..\n");
+ Adapter->transmit_packet_thread = NULL;
+ return 0;
}
-
-#endif
-
-
diff --git a/drivers/staging/bcm/cntrl_SignalingInterface.h b/drivers/staging/bcm/cntrl_SignalingInterface.h
index 4cbe30022248..890778450a86 100644
--- a/drivers/staging/bcm/cntrl_SignalingInterface.h
+++ b/drivers/staging/bcm/cntrl_SignalingInterface.h
@@ -2,19 +2,6 @@
#define CNTRL_SIGNALING_INTERFACE_
-#ifdef BECEEM_TARGET
-
-#include <mac_common.h>
-#include <msg_Dsa.h>
-#include <msg_Dsc.h>
-#include <msg_Dsd.h>
-#include <sch_definitions.h>
-using namespace Beceem;
-#ifdef ENABLE_CORRIGENDUM2_UPDATE
-extern B_UINT32 g_u32Corr2MacFlags;
-#endif
-
-#else
#define DSA_REQ 11
@@ -28,7 +15,6 @@ extern B_UINT32 g_u32Corr2MacFlags;
#define DSD_ACK 19
#define MAX_CLASSIFIERS_IN_SF 4
-#endif
#define MAX_STRING_LEN 20
#define MAX_PHS_LENGTHS 255
@@ -57,37 +43,7 @@ extern B_UINT32 g_u32Corr2MacFlags;
////////////////////////structure Definitions///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// \brief class cCPacketClassificationRule
-#ifdef BECEEM_TARGET
-class CCPacketClassificationRuleSI{
- public:
- /// \brief Constructor for the class
- CCPacketClassificationRuleSI():
- u8ClassifierRulePriority(mClassifierRulePriority),
- u8IPTypeOfServiceLength(mIPTypeOfService),
- u8Protocol(mProtocol),
- u8IPMaskedSourceAddressLength(0),
- u8IPDestinationAddressLength(0),
- u8ProtocolSourcePortRangeLength(0),
- u8ProtocolDestPortRangeLength(0),
- u8EthernetDestMacAddressLength(0),
- u8EthernetSourceMACAddressLength(0),
- u8EthertypeLength(0),
- u16UserPriority(mUserPriority),
- u16VLANID(mVLANID),
- u8AssociatedPHSI(mAssociatedPHSI),
- u16PacketClassificationRuleIndex(mPacketClassifierRuleIndex),
- u8VendorSpecificClassifierParamLength(mVendorSpecificClassifierParamLength),
- u8IPv6FlowLableLength(mIPv6FlowLableLength),
- u8ClassifierActionRule(mClassifierActionRule)
-
- {}
- void Reset()
- {
- CCPacketClassificationRuleSI();
- }
-#else
struct _stCPacketClassificationRuleSI{
-#endif
/** 16bit UserPriority Of The Service Flow*/
B_UINT16 u16UserPriority;
@@ -145,29 +101,10 @@ struct _stCPacketClassificationRuleSI{
B_UINT8 u8ClassifierActionRule;
B_UINT16 u16ValidityBitMap;
};
-#ifndef BECEEM_TARGET
typedef struct _stCPacketClassificationRuleSI CCPacketClassificationRuleSI,stCPacketClassificationRuleSI, *pstCPacketClassificationRuleSI;
-#endif
/// \brief class CPhsRuleSI
-#ifdef BECEEM_TARGET
-class CPhsRuleSI{
- public:
- /// \brief Constructor for the class
- CPhsRuleSI():
- u8PHSI(mPHSI),
- u8PHSFLength(0),
- u8PHSMLength(0),
- u8PHSS(mPHSS),
- u8PHSV(mPHSV),
- u8VendorSpecificPHSParamsLength(mVendorSpecificPHSParamLength){}
- void Reset()
- {
- CPhsRuleSI();
- }
-#else
typedef struct _stPhsRuleSI {
-#endif
/** 8bit PHS Index Of The Service Flow*/
B_UINT8 u8PHSI;
/** PHSF Length Of The Service Flow*/
@@ -188,31 +125,11 @@ typedef struct _stPhsRuleSI {
B_UINT8 u8VendorSpecificPHSParams[VENDOR_PHS_PARAM_LENGTH];
B_UINT8 u8Padding[2];
-#ifdef BECEEM_TARGET
-};
-#else
}stPhsRuleSI,*pstPhsRuleSI;
typedef stPhsRuleSI CPhsRuleSI;
-#endif
/// \brief structure cConvergenceSLTypes
-#ifdef BECEEM_TARGET
-class CConvergenceSLTypes{
- public:
- /// \brief Constructor for the class
- CConvergenceSLTypes():
- u8ClassfierDSCAction(mClassifierDSCAction),
- u8PhsDSCAction (mPhsDSCAction)
- {}
- void Reset()
- {
- CConvergenceSLTypes();
- cCPacketClassificationRule.Reset();
- cPhsRule.Reset();
- }
-#else
struct _stConvergenceSLTypes{
-#endif
/** 8bit Phs Classfier Action Of The Service Flow*/
B_UINT8 u8ClassfierDSCAction;
/** 8bit Phs DSC Action Of The Service Flow*/
@@ -220,111 +137,15 @@ struct _stConvergenceSLTypes{
/** 16bit Padding */
B_UINT8 u8Padding[2];
/// \brief class cCPacketClassificationRule
-#ifdef BECEEM_TARGET
- CCPacketClassificationRuleSI cCPacketClassificationRule;
-#else
stCPacketClassificationRuleSI cCPacketClassificationRule;
-#endif
/// \brief class CPhsRuleSI
-#ifdef BECEEM_TARGET
- CPhsRuleSI cPhsRule;
-#else
struct _stPhsRuleSI cPhsRule;
-#endif
};
-#ifndef BECEEM_TARGET
typedef struct _stConvergenceSLTypes stConvergenceSLTypes,CConvergenceSLTypes, *pstConvergenceSLTypes;
-#endif
/// \brief structure CServiceFlowParamSI
-#ifdef BECEEM_TARGET
-class CServiceFlowParamSI{
- public:
- /// \brief Constructor for the class
- CServiceFlowParamSI():
- u32SFID(mSFid),
- u16CID(mCid),
- u8ServiceClassNameLength(mServiceClassNameLength),
- u8MBSService(mMBSService),
- u8QosParamSet(mQosParamSetType),
- u8TrafficPriority(mTrafficPriority),
- u32MaxSustainedTrafficRate(mMaximumSustainedTrafficRate),
- u32MaxTrafficBurst(mMaximumTrafficBurst),
- u32MinReservedTrafficRate(mMinimumReservedTrafficRate),
- u8ServiceFlowSchedulingType(mServiceFlowSchedulingType),
- u8RequesttransmissionPolicy(mRequestTransmissionPolicy),
- u32ToleratedJitter(mToleratedJitter),
- u32MaximumLatency(mMaximumLatency),
- u8FixedLengthVSVariableLengthSDUIndicator
- (mFixedLengthVSVariableLength),
- u8SDUSize(mSDUSize),
- u16TargetSAID(mTargetSAID),
- u8ARQEnable(mARQEnable),
- u16ARQWindowSize(mARQWindowSize),
- u16ARQBlockLifeTime(mARQBlockLifeTime),
- u16ARQSyncLossTimeOut(mARQSyncLossTimeOut),
- u8ARQDeliverInOrder(mARQDeliverInOrder),
- u16ARQRxPurgeTimeOut(mARQRXPurgeTimeOut),
- //Add ARQ BLOCK SIZE, ARQ TX and RX delay initializations here
- //after we move to only CORR2
- u8RxARQAckProcessingTime(mRxARQAckProcessingTime),
- u8CSSpecification(mCSSpecification),
- u8TypeOfDataDeliveryService(mTypeOfDataDeliveryService),
- u16SDUInterArrivalTime(mSDUInterArrivalTime),
- u16TimeBase(mTimeBase),
- u8PagingPreference(mPagingPreference),
- u8MBSZoneIdentifierassignment(mMBSZoneIdentifierassignmentLength),
- u8TrafficIndicationPreference(mTrafficIndicationPreference),
- u8GlobalServicesClassNameLength(mGlobalServicesClassNameLength),
- u8SNFeedbackEnabled(mSNFeedbackEnabled),
- u8FSNSize(mFSNSize),
- u8CIDAllocation4activeBSsLength(mCIDAllocation4activeBSsLength),
- u16UnsolicitedGrantInterval(mUnsolicitedGrantInterval),
- u16UnsolicitedPollingInterval(mUnsolicitedPollingInterval),
- u8PDUSNExtendedSubheader4HarqReordering(mPDUSNExtendedSubheader4HarqReordering),
- u8MBSContentsIDLength(mMBSContentsIDLength),
- u8HARQServiceFlows(mHARQServiceFlows),
- u8AuthTokenLength(mAuthTokenLength),
- u8HarqChannelMappingLength(mHarqChannelMappingLength),
- u8VendorSpecificQoSParamLength(mVendorSpecificQoSParamLength),
- bValid(FALSE),
- u8TotalClassifiers()
-{
-//Remove the bolck after we move to Corr2 only code
-#ifdef ENABLE_CORRIGENDUM2_UPDATE
- if((g_u32Corr2MacFlags & CORR_2_DSX) || (g_u32Corr2MacFlags & CORR_2_ARQ))
- {
- /* IEEE Comment #627 / MTG Comment #426 */
- u16ARQBlockSize = mARQBlockSize;
- if(g_u32Corr2MacFlags & CORR_2_ARQ) {
- u16ARQRetryTxTimeOut = mARQRetryTimeOutTxDelay;
- if(g_u32VENDOR_TYPE == VENDOR_ALCATEL) {
- u16ARQRetryRxTimeOut = mARQRetryTimeOutRxDelay_ALU;
- } else {
- u16ARQRetryRxTimeOut = mARQRetryTimeOutRxDelay;
- }
- }
- else
- {
- u16ARQRetryTxTimeOut = mARQRetryTimeOutTxDelayCorr1;
- u16ARQRetryRxTimeOut = mARQRetryTimeOutRxDelayCorr1;
- }
- }
- else
-#endif
- {
- u16ARQBlockSize = mARQBlockSizeCorr1;
- u16ARQRetryTxTimeOut = mARQRetryTimeOutTxDelayCorr1;
- u16ARQRetryRxTimeOut = mARQRetryTimeOutRxDelayCorr1;
- }
-}
-
- void ComputeMacOverhead(B_UINT8 u8SecOvrhead);
- B_UINT16 GetMacOverhead() { return u16MacOverhead; }
-#else
typedef struct _stServiceFlowParamSI{
-#endif //end of ifdef BECEEM_TARGET
/** 32bitSFID Of The Service Flow*/
B_UINT32 u32SFID;
@@ -367,11 +188,6 @@ typedef struct _stServiceFlowParamSI{
/** 16bit ARQ Purge timeout */
B_UINT16 u16ARQRxPurgeTimeOut;
-#if 0 //def ENABLE_CORRIGENDUM2_UPDATE
-/* IEEE Comment #627 / MTG Comment #426 */
- /// \brief Size of an ARQ block, changed from 2 bytes to 1
- B_UINT8 u8ARQBlockSize;
-#endif
//TODO::Remove this once we move to a new CORR2 driver
/// \brief Size of an ARQ block
B_UINT16 u16ARQBlockSize;
@@ -496,35 +312,18 @@ typedef struct _stServiceFlowParamSI{
B_UINT8 bValid; /**< Validity flag */
B_UINT8 u8Padding; /**< Padding byte*/
-#ifdef BECEEM_TARGET
-/**
-Structure for Convergence SubLayer Types with a maximum of 4 classifiers
-*/
- CConvergenceSLTypes cConvergenceSLTypes[MAX_CLASSIFIERS_IN_SF];
-#else
/**
Structure for Convergence SubLayer Types with a maximum of 4 classifiers
*/
stConvergenceSLTypes cConvergenceSLTypes[MAX_CLASSIFIERS_IN_SF];
-#endif
-#ifdef BECEEM_TARGET
-};
-#else
} stServiceFlowParamSI, *pstServiceFlowParamSI;
typedef stServiceFlowParamSI CServiceFlowParamSI;
-#endif
/**
structure stLocalSFAddRequest
*/
typedef struct _stLocalSFAddRequest{
-#ifdef BECEEM_TARGET
- _stLocalSFAddRequest( ) :
- u8Type(0x00), eConnectionDir(0x00),
- u16TID(0x0000), u16CID(0x0000), u16VCID(0x0000)
- {}
-#endif
B_UINT8 u8Type; /**< Type*/
B_UINT8 eConnectionDir; /**< Connection direction*/
@@ -535,19 +334,9 @@ typedef struct _stLocalSFAddRequest{
/// \brief 16bitVCID
B_UINT16 u16VCID; /**< 16bit VCID*/
/// \brief structure ParameterSet
-#ifdef BECEEM_SIGNALLING_INTERFACE_API
- CServiceFlowParamSI sfParameterSet;
-#endif
-#ifdef BECEEM_TARGET
- CServiceFlowParamSI *psfParameterSet;
-#else
stServiceFlowParamSI *psfParameterSet; /**< structure ParameterSet*/
-#endif
-#ifdef USING_VXWORKS
- USE_DATA_MEMORY_MANAGER();
-#endif
}stLocalSFAddRequest, *pstLocalSFAddRequest;
@@ -555,12 +344,6 @@ typedef struct _stLocalSFAddRequest{
structure stLocalSFAddIndication
*/
typedef struct _stLocalSFAddIndication{
-#ifdef BECEEM_TARGET
- _stLocalSFAddIndication( ) :
- u8Type(0x00), eConnectionDir(0x00),
- u16TID(0x0000), u16CID(0x0000), u16VCID(0x0000)
- {}
-#endif
B_UINT8 u8Type; /**< Type*/
B_UINT8 eConnectionDir; /**< Connection Direction*/
@@ -571,37 +354,19 @@ typedef struct _stLocalSFAddIndication{
/// \brief 16bitVCID
B_UINT16 u16VCID; /**< 16bitVCID*/
-#ifdef BECEEM_SIGNALLING_INTERFACE_API
- CServiceFlowParamSI sfAuthorizedSet;
- /// \brief structure AdmittedSet
- CServiceFlowParamSI sfAdmittedSet;
- /// \brief structure ActiveSet
- CServiceFlowParamSI sfActiveSet;
-#endif
/// \brief structure AuthorizedSet
-#ifdef BECEEM_TARGET
- CServiceFlowParamSI *psfAuthorizedSet;
- /// \brief structure AdmittedSet
- CServiceFlowParamSI *psfAdmittedSet;
- /// \brief structure ActiveSet
- CServiceFlowParamSI *psfActiveSet;
-#else
/// \brief structure AuthorizedSet
stServiceFlowParamSI *psfAuthorizedSet; /**< AuthorizedSet of type stServiceFlowParamSI*/
/// \brief structure AdmittedSet
stServiceFlowParamSI *psfAdmittedSet; /**< AdmittedSet of type stServiceFlowParamSI*/
/// \brief structure ActiveSet
stServiceFlowParamSI *psfActiveSet; /**< sfActiveSet of type stServiceFlowParamSI*/
-#endif
B_UINT8 u8CC; /**< Confirmation Code*/
B_UINT8 u8Padd; /**< 8-bit Padding */
B_UINT16 u16Padd; /**< 16 bit Padding */
-#ifdef USING_VXWORKS
- USE_DATA_MEMORY_MANAGER();
-#endif
}stLocalSFAddIndication;
@@ -619,33 +384,17 @@ typedef struct _stLocalSFAddIndication stLocalSFChangeIndication, *pstLocalSFCha
structure stLocalSFDeleteRequest
*/
typedef struct _stLocalSFDeleteRequest{
-#ifdef BECEEM_TARGET
- _stLocalSFDeleteRequest( ) :
- u8Type(0x00), u8Padding(0x00),
- u16TID(0x0000), u32SFID (0x00000000)
- {}
-#endif
B_UINT8 u8Type; /**< Type*/
B_UINT8 u8Padding; /**< Padding byte*/
B_UINT16 u16TID; /**< TID*/
/// \brief 32bitSFID
B_UINT32 u32SFID; /**< SFID*/
-#ifdef USING_VXWORKS
- USE_DATA_MEMORY_MANAGER();
-#endif
}stLocalSFDeleteRequest, *pstLocalSFDeleteRequest;
/**
structure stLocalSFDeleteIndication
*/
typedef struct stLocalSFDeleteIndication{
-#ifdef BECEEM_TARGET
- stLocalSFDeleteIndication( ) :
- u8Type(0x00), u8Padding(0x00),
- u16TID(0x0000), u16CID(0x0000),
- u16VCID(0x0000),u32SFID (0x00000000)
- {}
-#endif
B_UINT8 u8Type; /**< Type */
B_UINT8 u8Padding; /**< Padding */
B_UINT16 u16TID; /**< TID */
@@ -658,9 +407,6 @@ typedef struct stLocalSFDeleteIndication{
/// \brief 8bit Confirmation code
B_UINT8 u8ConfirmationCode; /**< Confirmation code */
B_UINT8 u8Padding1[3]; /**< 3 byte Padding */
-#ifdef USING_VXWORKS
- USE_DATA_MEMORY_MANAGER();
-#endif
}stLocalSFDeleteIndication;
typedef struct _stIM_SFHostNotify
diff --git a/drivers/staging/bcm/headers.h b/drivers/staging/bcm/headers.h
index 9d4e3aca1b34..1148e5e22eb9 100644
--- a/drivers/staging/bcm/headers.h
+++ b/drivers/staging/bcm/headers.h
@@ -22,7 +22,6 @@
#include <linux/etherdevice.h>
#include <net/ip.h>
#include <linux/wait.h>
-#include <linux/notifier.h>
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
@@ -36,26 +35,10 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <asm/uaccess.h>
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
#include <linux/kthread.h>
-#endif
#include <linux/tcp.h>
#include <linux/udp.h>
-#ifndef BCM_SHM_INTERFACE
#include <linux/usb.h>
-#endif
-#ifdef BECEEM_TARGET
-
-#include <mac_common.h>
-#include <msg_Dsa.h>
-#include <msg_Dsc.h>
-#include <msg_Dsd.h>
-#include <sch_definitions.h>
-using namespace Beceem;
-#ifdef ENABLE_CORRIGENDUM2_UPDATE
-extern B_UINT32 g_u32Corr2MacFlags;
-#endif
-#endif
#include "Typedefs.h"
#include "Version.h"
@@ -71,39 +54,28 @@ extern B_UINT32 g_u32Corr2MacFlags;
#include "CmHost.h"
#include "DDRInit.h"
#include "Debug.h"
-#include "HostMibs.h"
#include "IPv6ProtocolHdr.h"
-#include "osal_misc.h"
#include "PHSModule.h"
#include "Protocol.h"
#include "Prototypes.h"
#include "Queue.h"
#include "vendorspecificextn.h"
-#ifndef BCM_SHM_INTERFACE
#include "InterfaceMacros.h"
#include "InterfaceAdapter.h"
#include "InterfaceIsr.h"
-#include "Interfacemain.h"
#include "InterfaceMisc.h"
#include "InterfaceRx.h"
#include "InterfaceTx.h"
-#endif
#include "InterfaceIdleMode.h"
#include "InterfaceInit.h"
-#ifdef BCM_SHM_INTERFACE
-#include <linux/cpe_config.h>
-
-#ifdef GDMA_INTERFACE
-#include "GdmaInterface.h"
-#include "symphony.h"
-#else
-#include "virtual_interface.h"
-
-#endif
-
-#endif
+#define DRV_NAME "beceem"
+#define DEV_NAME "tarang"
+#define DRV_DESCRIPTION "Beceem Communications Inc. WiMAX driver"
+#define DRV_COPYRIGHT "Copyright 2010. Beceem Communications Inc"
+#define DRV_VERSION VER_FILEVERSION_STR
+#define PFX DRV_NAME " "
#endif
diff --git a/drivers/staging/bcm/hostmibs.c b/drivers/staging/bcm/hostmibs.c
index e9da513b3c24..c13ea5c9a2aa 100644
--- a/drivers/staging/bcm/hostmibs.c
+++ b/drivers/staging/bcm/hostmibs.c
@@ -10,12 +10,8 @@
*/
#include "headers.h"
-INT ProcessGetHostMibs(PMINI_ADAPTER Adapter,
- PVOID ioBuffer,
- ULONG inputBufferLength)
+INT ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *pstHostMibs)
{
-
- S_MIBS_HOST_STATS_MIBS *pstHostMibs = NULL;
S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
S_PHS_RULE *pstPhsRule = NULL;
S_CLASSIFIER_TABLE *pstClassifierTable = NULL;
@@ -30,15 +26,6 @@ INT ProcessGetHostMibs(PMINI_ADAPTER Adapter,
return STATUS_FAILURE;
}
- if(ioBuffer == NULL)
- {
- return -EINVAL;
- }
- memset(ioBuffer,0,sizeof(S_MIBS_HOST_STATS_MIBS));
-
- pstHostMibs = (S_MIBS_HOST_STATS_MIBS *)ioBuffer;
-
-
//Copy the classifier Table
for(nClassifierIndex=0; nClassifierIndex < MAX_CLASSIFIERS;
nClassifierIndex++)
@@ -54,7 +41,7 @@ INT ProcessGetHostMibs(PMINI_ADAPTER Adapter,
{
if(Adapter->PackInfo[nSfIndex].bValid)
{
- OsalMemMove((PVOID)&pstHostMibs->astSFtable[nSfIndex],(PVOID)&Adapter->PackInfo[nSfIndex],sizeof(S_MIBS_SERVICEFLOW_TABLE));
+ memcpy((PVOID)&pstHostMibs->astSFtable[nSfIndex],(PVOID)&Adapter->PackInfo[nSfIndex],sizeof(S_MIBS_SERVICEFLOW_TABLE));
}
else
{
@@ -83,7 +70,7 @@ INT ProcessGetHostMibs(PMINI_ADAPTER Adapter,
pstHostMibs->astPhsRulesTable[nPhsTableIndex].ulSFID = Adapter->PackInfo[nSfIndex].ulSFID;
- OsalMemMove(&pstHostMibs->astPhsRulesTable[nPhsTableIndex].u8PHSI,
+ memcpy(&pstHostMibs->astPhsRulesTable[nPhsTableIndex].u8PHSI,
&pstPhsRule->u8PHSI,
sizeof(S_PHS_RULE));
nPhsTableIndex++;
@@ -95,12 +82,9 @@ INT ProcessGetHostMibs(PMINI_ADAPTER Adapter,
}
-
//copy other Host Statistics parameters
- pstHostMibs->stHostInfo.GoodTransmits =
- atomic_read(&Adapter->TxTotalPacketCount);
- pstHostMibs->stHostInfo.GoodReceives =
- atomic_read(&Adapter->GoodRxPktCount);
+ pstHostMibs->stHostInfo.GoodTransmits = Adapter->dev->stats.tx_packets;
+ pstHostMibs->stHostInfo.GoodReceives = Adapter->dev->stats.rx_packets;
pstHostMibs->stHostInfo.CurrNumFreeDesc =
atomic_read(&Adapter->CurrNumFreeTxDesc);
pstHostMibs->stHostInfo.BEBucketSize = Adapter->BEBucketSize;
@@ -115,13 +99,10 @@ INT ProcessGetHostMibs(PMINI_ADAPTER Adapter,
}
-INT GetDroppedAppCntrlPktMibs(PVOID ioBuffer, PPER_TARANG_DATA pTarang)
+VOID GetDroppedAppCntrlPktMibs(S_MIBS_HOST_STATS_MIBS *pstHostMibs, const PPER_TARANG_DATA pTarang)
{
- S_MIBS_HOST_STATS_MIBS *pstHostMibs = (S_MIBS_HOST_STATS_MIBS *)ioBuffer;
-
- memcpy((PVOID)&(pstHostMibs->stDroppedAppCntrlMsgs),(PVOID)&(pTarang->stDroppedAppCntrlMsgs),sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
-
- return STATUS_SUCCESS ;
+ memcpy(&(pstHostMibs->stDroppedAppCntrlMsgs),
+ &(pTarang->stDroppedAppCntrlMsgs),sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
}
diff --git a/drivers/staging/bcm/led_control.c b/drivers/staging/bcm/led_control.c
index 97adaae7dfc0..16e939fa15d6 100644
--- a/drivers/staging/bcm/led_control.c
+++ b/drivers/staging/bcm/led_control.c
@@ -108,52 +108,16 @@ static INT LED_Proportional_Blink(PMINI_ADAPTER Adapter, UCHAR GPIO_Num_tx,
ulong timeout = 0;
/*Read initial value of packets sent/received */
- Initial_num_of_packts_tx = atomic_read(&Adapter->TxTotalPacketCount);
- Initial_num_of_packts_rx = atomic_read(&Adapter->GoodRxPktCount);
+ Initial_num_of_packts_tx = Adapter->dev->stats.tx_packets;
+ Initial_num_of_packts_rx = Adapter->dev->stats.rx_packets;
+
/*Scale the rate of transfer to no of blinks.*/
num_of_time_tx= ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
num_of_time_rx= ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
while((Adapter->device_removed == FALSE))
{
- #if 0
- if(0 == num_of_time_tx && 0 == num_of_time_rx)
- {
- timeout = 1000;
- Status = wait_event_interruptible_timeout(Adapter->LEDInfo.notify_led_event,
- currdriverstate!= Adapter->DriverState || kthread_should_stop(),
- msecs_to_jiffies (timeout));
- if(kthread_should_stop())
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Led thread got signal to exit..hence exiting");
- Adapter->LEDInfo.led_thread_running= BCM_LED_THREAD_DISABLED;
- return EVENT_SIGNALED;
- }
- if(Status)
- return EVENT_SIGNALED;
-
- }
- #endif
-
timeout = 50;
- #if 0
- /*Turn on LED if Tx is high bandwidth*/
- if(num_of_time_tx > MAX_NUM_OF_BLINKS)
- {
- TURN_ON_LED(1<<GPIO_Num_tx, uiTxLedIndex);
- num_of_time_tx = 0;
- bBlinkBothLED = FALSE;
- num_of_time = num_of_time_rx;
- }
- /*Turn on LED if Rx is high bandwidth*/
- if(num_of_time_rx > MAX_NUM_OF_BLINKS)
- {
- TURN_ON_LED(1<<GPIO_Num_rx, uiRxLedIndex);
- num_of_time_rx = 0;
- bBlinkBothLED = FALSE;
- num_of_time = num_of_time_tx;
- }
- #endif
/*Blink Tx and Rx LED when both Tx and Rx is in normal bandwidth*/
if(bBlinkBothLED)
{
@@ -249,9 +213,10 @@ static INT LED_Proportional_Blink(PMINI_ADAPTER Adapter, UCHAR GPIO_Num_tx,
* Read the Tx & Rx packets transmission after 1 second and
* calculate rate of transfer
*/
- Final_num_of_packts_tx = atomic_read(&Adapter->TxTotalPacketCount);
+ Final_num_of_packts_tx = Adapter->dev->stats.tx_packets;
+ Final_num_of_packts_rx = Adapter->dev->stats.rx_packets;
+
rate_of_transfer_tx = Final_num_of_packts_tx - Initial_num_of_packts_tx;
- Final_num_of_packts_rx = atomic_read(&Adapter->GoodRxPktCount);
rate_of_transfer_rx = Final_num_of_packts_rx - Initial_num_of_packts_rx;
/*Read initial value of packets sent/received */
@@ -293,7 +258,7 @@ static INT ValidateDSDParamsChecksum(
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread:ValidateDSDParamsChecksum: 0x%lx 0x%X",ulParamOffset, usParamLen);
- puBuffer = OsalMemAlloc(usParamLen,"!MEM");
+ puBuffer = kmalloc(usParamLen, GFP_KERNEL);
if(!puBuffer)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: ValidateDSDParamsChecksum Allocation failed");
@@ -341,10 +306,7 @@ static INT ValidateDSDParamsChecksum(
}
exit:
- if(puBuffer)
- {
- OsalMemFree(puBuffer, usParamLen);
- }
+ kfree(puBuffer);
return Status;
}
@@ -497,12 +459,10 @@ static int ReadConfigFileStructure(PMINI_ADAPTER Adapter, BOOLEAN *bEnableThread
{
int Status = STATUS_SUCCESS;
UCHAR GPIO_Array[NUM_OF_LEDS+1]; /*Array to store GPIO numbers from EEPROM*/
-#ifndef BCM_SHM_INTERFACE
UINT uiIndex = 0;
UINT uiNum_of_LED_Type = 0;
PUCHAR puCFGData = NULL;
UCHAR bData = 0;
-#endif
memset(GPIO_Array, DISABLE_GPIO_NUM, NUM_OF_LEDS+1);
if(!Adapter->pstargetparams || IS_ERR(Adapter->pstargetparams))
@@ -524,10 +484,6 @@ static int ReadConfigFileStructure(PMINI_ADAPTER Adapter, BOOLEAN *bEnableThread
*bEnableThread = FALSE;
return Status;
}
-#ifdef BCM_SHM_INTERFACE
- *bEnableThread = FALSE;
- return Status ;
-#else
/*
* CONFIG file read successfully. Deallocate the memory of
* uiFileNameBufferSize
@@ -578,23 +534,7 @@ static int ReadConfigFileStructure(PMINI_ADAPTER Adapter, BOOLEAN *bEnableThread
}
if(uiNum_of_LED_Type >= NUM_OF_LEDS)
*bEnableThread = FALSE;
-#endif
-#if 0
- for(uiIndex=0; uiIndex<NUM_OF_LEDS; uiIndex++)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LEDState[%d].LED_Type = %x\n", uiIndex,
- Adapter->LEDInfo.LEDState[uiIndex].LED_Type);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LEDState[%d].LED_On_State = %x\n", uiIndex,
- Adapter->LEDInfo.LEDState[uiIndex].LED_On_State);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LEDState[%d].LED_Blink_State = %x\n", uiIndex,
- Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LEDState[%d].GPIO_Num = %x\n", uiIndex,
- Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num);
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: Polarity = %d\n",
- Adapter->LEDInfo.BitPolarty);
-#endif
return Status;
}
//--------------------------------------------------------------------------
@@ -721,20 +661,6 @@ static VOID LEDControlThread(PMINI_ADAPTER Adapter)
TURN_OFF_LED(1<<GPIO_num, uiLedIndex);
return ;//STATUS_FAILURE;
}
- #if 0
- if(Adapter->device_removed)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"Device removed hence exiting from Led Thread..");
- return ; //-ENODEV;
- }
- #endif
- #if 0
- if((GPIO_num != DISABLE_GPIO_NUM) &&
- ((currdriverstate != FW_DOWNLOAD) &&
- (currdriverstate != NORMAL_OPERATION) &&
- (currdriverstate != IDLEMODE_EXIT)))
- TURN_OFF_LED(1<<GPIO_num, uiLedIndex);
- #endif
if(GPIO_num != DISABLE_GPIO_NUM)
{
@@ -752,10 +678,6 @@ static VOID LEDControlThread(PMINI_ADAPTER Adapter)
case DRIVER_INIT:
{
currdriverstate = DRIVER_INIT;//Adapter->DriverState;
- #if 0
- LedGpioInit(Adapter);
- Adapter->LEDInfo.bLedInitDone = TRUE;
- #endif
BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex, &dummyIndex, currdriverstate);
if(GPIO_num != DISABLE_GPIO_NUM)
@@ -768,13 +690,6 @@ static VOID LEDControlThread(PMINI_ADAPTER Adapter)
{
//BCM_DEBUG_PRINT (Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: FW_DN_DONE called\n");
currdriverstate = FW_DOWNLOAD;
- #if 0
- if(Adapter->LEDInfo.bLedInitDone == FALSE)
- {
- LedGpioInit(Adapter);
- Adapter->LEDInfo.bLedInitDone = TRUE;
- }
- #endif
BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex, &dummyIndex, currdriverstate);
if(GPIO_num != DISABLE_GPIO_NUM)
@@ -796,12 +711,6 @@ static VOID LEDControlThread(PMINI_ADAPTER Adapter)
break;
case SHUTDOWN_EXIT:
- #if 0
- if(Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_PMU_SHUTDOWN)
- {
- LedGpioInit(Adapter);
- }
- #endif
//no break, continue to NO_NETWORK_ENTRY state as well.
case NO_NETWORK_ENTRY:
@@ -875,34 +784,6 @@ static VOID LEDControlThread(PMINI_ADAPTER Adapter)
break;
case IDLEMODE_EXIT:
{
-#if 0
- UCHAR GPIO_num_tx = DISABLE_GPIO_NUM;
- UCHAR GPIO_num_rx = DISABLE_GPIO_NUM;
- UCHAR uiTxLedIndex = 0;
- UCHAR uiRxLedIndex = 0;
-
- currdriverstate = IDLEMODE_EXIT;
- if(DEVICE_POWERSAVE_MODE_AS_PMU_SHUTDOWN == Adapter->ulPowerSaveMode)
- {
- LedGpioInit(Adapter);
- }
- BcmGetGPIOPinInfo(Adapter, &GPIO_num_tx, &GPIO_num_rx, &uiTxLedIndex,&uiRxLedIndex,currdriverstate);
-
- Adapter->LEDInfo.bIdle_led_off = FALSE;
-
- if((GPIO_num_tx == DISABLE_GPIO_NUM) && (GPIO_num_rx == DISABLE_GPIO_NUM))
- {
- GPIO_num = DISABLE_GPIO_NUM ;
- }
- else
- {
- timeout = 50;
- if(Adapter->LEDInfo.bIdleMode_tx_from_host)
- LED_Blink(Adapter, 1<<GPIO_num_tx, uiTxLedIndex, timeout, -1,currdriverstate);
- else
- LED_Blink(Adapter, 1<<GPIO_num_rx, uiRxLedIndex, timeout, -1,currdriverstate);
- }
-#endif
}
break;
case DRIVER_HALT:
diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c
index 41c9ab8a2385..c7292373a65f 100644
--- a/drivers/staging/bcm/nvm.c
+++ b/drivers/staging/bcm/nvm.c
@@ -1,6 +1,56 @@
#include "headers.h"
#define DWORD unsigned int
+
+static INT BcmDoChipSelect(PMINI_ADAPTER Adapter, UINT offset);
+static INT BcmGetActiveDSD(PMINI_ADAPTER Adapter);
+static INT BcmGetActiveISO(PMINI_ADAPTER Adapter);
+static UINT BcmGetEEPROMSize(PMINI_ADAPTER Adapter);
+static INT BcmGetFlashCSInfo(PMINI_ADAPTER Adapter);
+static UINT BcmGetFlashSectorSize(PMINI_ADAPTER Adapter, UINT FlashSectorSizeSig, UINT FlashSectorSize);
+
+static VOID BcmValidateNvmType(PMINI_ADAPTER Adapter);
+static INT BcmGetNvmSize(PMINI_ADAPTER Adapter);
+static UINT BcmGetFlashSize(PMINI_ADAPTER Adapter);
+static NVM_TYPE BcmGetNvmType(PMINI_ADAPTER Adapter);
+
+static INT BcmGetSectionValEndOffset(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal);
+
+static B_UINT8 IsOffsetWritable(PMINI_ADAPTER Adapter, UINT uiOffset);
+static INT IsSectionWritable(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL Section);
+static INT IsSectionExistInVendorInfo(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL section);
+
+static INT ReadDSDPriority(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL dsd);
+static INT ReadDSDSignature(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL dsd);
+static INT ReadISOPriority(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL iso);
+static INT ReadISOSignature(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL iso);
+
+static INT CorruptDSDSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal);
+static INT CorruptISOSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal);
+static INT SaveHeaderIfPresent(PMINI_ADAPTER Adapter, PUCHAR pBuff, UINT uiSectAlignAddr);
+static INT WriteToFlashWithoutSectorErase(PMINI_ADAPTER Adapter, PUINT pBuff,
+ FLASH2X_SECTION_VAL eFlash2xSectionVal,
+ UINT uiOffset, UINT uiNumBytes);
+static FLASH2X_SECTION_VAL getHighestPriDSD(PMINI_ADAPTER Adapter);
+static FLASH2X_SECTION_VAL getHighestPriISO(PMINI_ADAPTER Adapter);
+
+static INT BeceemFlashBulkRead(
+ PMINI_ADAPTER Adapter,
+ PUINT pBuffer,
+ UINT uiOffset,
+ UINT uiNumBytes);
+
+static INT BeceemFlashBulkWrite(
+ PMINI_ADAPTER Adapter,
+ PUINT pBuffer,
+ UINT uiOffset,
+ UINT uiNumBytes,
+ BOOLEAN bVerify);
+
+static INT GetFlashBaseAddr(PMINI_ADAPTER Adapter);
+
+static INT ReadBeceemEEPROMBulk(PMINI_ADAPTER Adapter,UINT dwAddress, UINT *pdwData, UINT dwNumData);
+
// Procedure: ReadEEPROMStatusRegister
//
// Description: Reads the standard EEPROM Status Register.
@@ -228,213 +278,27 @@ INT ReadBeceemEEPROM( PMINI_ADAPTER Adapter,
ReadBeceemEEPROMBulk(Adapter, uiTempOffset + MAX_RW_SIZE, (PUINT)&uiData[4], 4);
}
- OsalMemMove( (PUCHAR) pBuffer, ( ((PUCHAR)&uiData[0]) + uiByteOffset ), 4);
+ memcpy( (PUCHAR) pBuffer, ( ((PUCHAR)&uiData[0]) + uiByteOffset ), 4);
return STATUS_SUCCESS;
} /* ReadBeceemEEPROM() */
-#if 0
-//-----------------------------------------------------------------------------
-// Procedure: IsEEPROMWriteDone
-//
-// Description: Reads the SPI status to see the status of previous write.
-//
-// Arguments:
-// Adapter - ptr to Adapter object instance
-//
-// Returns:
-// BOOLEAN - TRUE - write went through
-// - FALSE - Write Failed.
-//-----------------------------------------------------------------------------
-
-BOOLEAN IsEEPROMWriteDone(PMINI_ADAPTER Adapter)
-{
- UINT uiRetries = 16;
- //UINT uiStatus = 0;
- UINT value;
-
- //sleep for 1.2ms ..worst case EEPROM write can take up to 1.2ms.
- mdelay(2);
-
- value = 0;
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
-
- while(((value >> 14) & 1) == 1)
- {
- // EEPROM_SPI_Q_STATUS1_REG will be cleared only if write back to that.
- value = (0x1 << 14);
- wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG,&value, sizeof(value));
- udelay(1000);
- uiRetries--;
- if(uiRetries == 0)
- {
- return FALSE;
- }
- value = 0;
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
- }
- return TRUE;
-
-
-}
-
-
-//-----------------------------------------------------------------------------
-// Procedure: ReadBeceemEEPROMBulk
-//
-// Description: This routine reads 16Byte data from EEPROM
-//
-// Arguments:
-// Adapter - ptr to Adapter object instance
-// dwAddress - EEPROM Offset to read the data from.
-// pdwData - Pointer to double word where data needs to be stored in.
-//
-// Returns:
-// OSAL_STATUS_CODE:
-//-----------------------------------------------------------------------------
-
-INT ReadBeceemEEPROMBulk(PMINI_ADAPTER Adapter,DWORD dwAddress, DWORD *pdwData)
-{
- DWORD dwRetries = 16;
- DWORD dwIndex = 0;
- UINT value, tmpVal;
-
-
- value = 0;
- rdmalt (Adapter, 0x0f003008, &value, sizeof(value));
-
- //read 0x0f003020 untill bit 1 of 0x0f003008 is set.
- while(((value >> 1) & 1) == 0)
- {
-
- rdmalt (Adapter, 0x0f003020, &tmpVal, sizeof(tmpVal));
- dwRetries--;
- if(dwRetries == 0)
- {
- return -1;
- }
- value = 0;
- rdmalt (Adapter, 0x0f003008, &value, sizeof(value));
- }
-
- value = dwAddress | 0xfb000000;
- wrmalt (Adapter, 0x0f003018, &value, sizeof(value));
-
- udelay(1000);
- value = 0;
- for(dwIndex = 0;dwIndex < 4 ; dwIndex++)
- {
- value = 0;
- rdmalt (Adapter, 0x0f003020, &value, sizeof(value));
- pdwData[dwIndex] = value;
-
- value = 0;
- rdmalt (Adapter, 0x0f003020, &value, sizeof(value));
- pdwData[dwIndex] |= (value << 8);
-
- value = 0;
- rdmalt (Adapter, 0x0f003020, &value, sizeof(value));
- pdwData[dwIndex] |= (value << 16);
-
- value = 0;
- rdmalt (Adapter, 0x0f003020, &value, sizeof(value));
- pdwData[dwIndex] |= (value << 24);
-
- }
- return 0;
-}
-
-//-----------------------------------------------------------------------------
-// Procedure: ReadBeceemEEPROM
-//
-// Description: This routine reads 4Byte data from EEPROM
-//
-// Arguments:
-// Adapter - ptr to Adapter object instance
-// dwAddress - EEPROM Offset to read the data from.
-// pdwData - Pointer to double word where data needs to be stored in.
-//
-// Returns:
-// OSAL_STATUS_CODE:
-//-----------------------------------------------------------------------------
-
-INT ReadBeceemEEPROM(PMINI_ADAPTER Adapter,DWORD dwAddress, DWORD *pdwData)
-{
-
- DWORD dwReadValue = 0;
- DWORD dwRetries = 16, dwCompleteWord = 0;
- UINT value, tmpVal;
-
- rdmalt(Adapter, 0x0f003008, &value, sizeof(value));
- while (((value >> 1) & 1) == 0) {
- rdmalt(Adapter, 0x0f003020, &tmpVal, sizeof(tmpVal));
-
- if (dwRetries == 0) {
- return -1;
- }
- rdmalt(Adapter, 0x0f003008, &value, sizeof(value));
- }
-
-
- //wrm (0x0f003018, 0xNbXXXXXX) // N is the number of bytes u want to read (0 means 1, f means 16, b is the opcode for page read)
- // Follow it up by N executions of rdm(0x0f003020) to read the rxed bytes from rx queue.
- dwAddress |= 0x3b000000;
- wrmalt(Adapter, 0x0f003018,&dwAddress,4);
- mdelay(10);
- rdmalt(Adapter, 0x0f003020,&dwReadValue,4);
- dwCompleteWord=dwReadValue;
- rdmalt(Adapter, 0x0f003020,&dwReadValue,4);
- dwCompleteWord|=(dwReadValue<<8);
- rdmalt(Adapter, 0x0f003020,&dwReadValue,4);
- dwCompleteWord|=(dwReadValue<<16);
- rdmalt(Adapter, 0x0f003020,&dwReadValue,4);
- dwCompleteWord|=(dwReadValue<<24);
-
- *pdwData = dwCompleteWord;
-
- return 0;
-}
-#endif
INT ReadMacAddressFromNVM(PMINI_ADAPTER Adapter)
{
- INT Status=0, i;
- unsigned char puMacAddr[6] = {0};
- INT AllZeroMac = 0;
- INT AllFFMac = 0;
+ INT Status;
+ unsigned char puMacAddr[6];
Status = BeceemNVMRead(Adapter,
(PUINT)&puMacAddr[0],
INIT_PARAMS_1_MACADDRESS_ADDRESS,
MAC_ADDRESS_SIZE);
- if(Status != STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Error in Reading the mac Addres with status :%d", Status);
- return Status;
- }
-
- memcpy(Adapter->dev->dev_addr, puMacAddr, MAC_ADDRESS_SIZE);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL,"Modem MAC Addr :");
- BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_PRINTK, 0, DBG_LVL_ALL,&Adapter->dev->dev_addr[0],MAC_ADDRESS_SIZE);
- for(i=0;i<MAC_ADDRESS_SIZE;i++)
- {
-
- if(Adapter->dev->dev_addr[i] == 0x00)
- AllZeroMac++;
- if(Adapter->dev->dev_addr[i] == 0xFF)
- AllFFMac++;
-
- }
- //BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "\n");
- if(AllZeroMac == MAC_ADDRESS_SIZE)
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL,"Warning :: MAC Address has all 00's");
- if(AllFFMac == MAC_ADDRESS_SIZE)
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL,"Warning :: MAC Address has all FF's");
+ if(Status == STATUS_SUCCESS)
+ memcpy(Adapter->dev->dev_addr, puMacAddr, MAC_ADDRESS_SIZE);
return Status;
-
}
//-----------------------------------------------------------------------------
@@ -476,7 +340,7 @@ INT BeceemEEPROMBulkRead(
ReadBeceemEEPROMBulk(Adapter,uiTempOffset,(PUINT)&uiData[0],4);
if(uiBytesRemaining >= (MAX_RW_SIZE - uiExtraBytes))
{
- OsalMemMove(pBuffer,(((PUCHAR)&uiData[0])+uiExtraBytes),MAX_RW_SIZE - uiExtraBytes);
+ memcpy(pBuffer,(((PUCHAR)&uiData[0])+uiExtraBytes),MAX_RW_SIZE - uiExtraBytes);
uiBytesRemaining -= (MAX_RW_SIZE - uiExtraBytes);
uiIndex += (MAX_RW_SIZE - uiExtraBytes);
@@ -484,7 +348,7 @@ INT BeceemEEPROMBulkRead(
}
else
{
- OsalMemMove(pBuffer,(((PUCHAR)&uiData[0])+uiExtraBytes),uiBytesRemaining);
+ memcpy(pBuffer,(((PUCHAR)&uiData[0])+uiExtraBytes),uiBytesRemaining);
uiIndex += uiBytesRemaining;
uiOffset += uiBytesRemaining;
uiBytesRemaining = 0;
@@ -508,7 +372,7 @@ INT BeceemEEPROMBulkRead(
* We read 4 Dwords of data */
if(0 == ReadBeceemEEPROMBulk(Adapter,uiOffset,&uiData[0],4))
{
- OsalMemMove(pcBuff+uiIndex,&uiData[0],MAX_RW_SIZE);
+ memcpy(pcBuff+uiIndex,&uiData[0],MAX_RW_SIZE);
uiOffset += MAX_RW_SIZE;
uiBytesRemaining -= MAX_RW_SIZE;
uiIndex += MAX_RW_SIZE;
@@ -523,7 +387,7 @@ INT BeceemEEPROMBulkRead(
{
if(0 == ReadBeceemEEPROM(Adapter,uiOffset,&uiData[0]))
{
- OsalMemMove(pcBuff+uiIndex,&uiData[0],4);
+ memcpy(pcBuff+uiIndex,&uiData[0],4);
uiOffset += 4;
uiBytesRemaining -= 4;
uiIndex +=4;
@@ -540,7 +404,7 @@ INT BeceemEEPROMBulkRead(
pCharBuff += uiIndex;
if(0 == ReadBeceemEEPROM(Adapter,uiOffset,&uiData[0]))
{
- OsalMemMove(pCharBuff,&uiData[0],uiBytesRemaining);//copy only bytes requested.
+ memcpy(pCharBuff,&uiData[0],uiBytesRemaining);//copy only bytes requested.
uiBytesRemaining = 0;
}
else
@@ -571,7 +435,7 @@ INT BeceemEEPROMBulkRead(
// <FAILURE> - if failed.
//-----------------------------------------------------------------------------
-INT BeceemFlashBulkRead(
+static INT BeceemFlashBulkRead(
PMINI_ADAPTER Adapter,
PUINT pBuffer,
UINT uiOffset,
@@ -653,16 +517,8 @@ INT BeceemFlashBulkRead(
//
//-----------------------------------------------------------------------------
-UINT BcmGetFlashSize(PMINI_ADAPTER Adapter)
+static UINT BcmGetFlashSize(PMINI_ADAPTER Adapter)
{
-#if 0
- if(Adapter->bDDRInitDone)
- {
- return rdm(Adapter,FLASH_CONTIGIOUS_START_ADDR_AFTER_INIT|FLASH_SIZE_ADDR);
- }
-
- return rdm(Adapter,FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT|FLASH_SIZE_ADDR);
-#endif
if(IsFlash2x(Adapter))
return (Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(DSD_HEADER));
else
@@ -684,7 +540,7 @@ UINT BcmGetFlashSize(PMINI_ADAPTER Adapter)
//
//-----------------------------------------------------------------------------
-UINT BcmGetEEPROMSize(PMINI_ADAPTER Adapter)
+static UINT BcmGetEEPROMSize(PMINI_ADAPTER Adapter)
{
UINT uiData = 0;
UINT uiIndex = 0;
@@ -733,60 +589,6 @@ UINT BcmGetEEPROMSize(PMINI_ADAPTER Adapter)
return 0;
}
-#if 0
-/***********************************************************************************/
-//
-// WriteBeceemEEPROM: Writes 4 byte data to EEPROM offset.
-//
-// uiEEPROMOffset - Offset to be written to.
-// uiData - Data to be written.
-//
-/***********************************************************************************/
-
-INT WriteBeceemEEPROM(PMINI_ADAPTER Adapter,UINT uiEEPROMOffset, UINT uiData)
-{
- INT Status = 0;
- ULONG ulRdBk = 0;
- ULONG ulRetryCount = 3;
- UINT value;
-
- if(uiEEPROMOffset > EEPROM_END)
- {
-
- return -1;
- }
-
- uiData = htonl(uiData);
- while(ulRetryCount--)
- {
- value = 0x06000000;
- wrmalt(Adapter, 0x0F003018,&value, sizeof(value));//flush the EEPROM FIFO.
- wrmalt(Adapter, 0x0F00301C,&uiData, sizeof(uiData));
- value = 0x3A000000 | uiEEPROMOffset;
- wrmalt(Adapter, 0x0F003018,&value, sizeof(value));
- __udelay(100000);
- //read back and verify.
- Status = ReadBeceemEEPROM(Adapter,uiEEPROMOffset,(UINT *)&ulRdBk);
- if(Status == 0)
- {
- if(ulRdBk == uiData)
- {
- return Status;
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "WriteBeceemEEPROM: Readback does not match\n");
- }
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "WriteBeceemEEPROM: Readback failed\n");
- }
- }
-
- return 0;
-}
-#endif
//-----------------------------------------------------------------------------
// Procedure: FlashSectorErase
@@ -973,7 +775,7 @@ static INT flashWrite(
// need not write 0xFFFFFFFF because write requires an erase and erase will
// make whole sector 0xFFFFFFFF.
//
- if (!OsalMemCompare(pData, uiErasePattern, MAX_RW_SIZE))
+ if (!memcmp(pData, uiErasePattern, MAX_RW_SIZE))
{
return 0;
}
@@ -1138,7 +940,7 @@ static INT flashWriteStatus(
// need not write 0xFFFFFFFF because write requires an erase and erase will
// make whole sector 0xFFFFFFFF.
//
- if (!OsalMemCompare(pData,uiErasePattern,MAX_RW_SIZE))
+ if (!memcmp(pData,uiErasePattern,MAX_RW_SIZE))
{
return 0;
}
@@ -1332,7 +1134,7 @@ static ULONG BcmFlashUnProtectBlock(PMINI_ADAPTER Adapter,UINT uiOffset, UINT ui
//
//-----------------------------------------------------------------------------
-INT BeceemFlashBulkWrite(
+static INT BeceemFlashBulkWrite(
PMINI_ADAPTER Adapter,
PUINT pBuffer,
UINT uiOffset,
@@ -1353,15 +1155,6 @@ INT BeceemFlashBulkWrite(
UINT uiTemp = 0;
UINT index = 0;
UINT uiPartOffset = 0;
- #if 0
- struct timeval tv1 = {0};
- struct timeval tv2 = {0};
-
- struct timeval tr = {0};
- struct timeval te = {0};
- struct timeval tw = {0};
- struct timeval twv = {0};
- #endif
#if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS)
Status = bcmflash_raw_write((uiOffset/FLASH_PART_SIZE),(uiOffset % FLASH_PART_SIZE),( unsigned char *)pBuffer,uiNumBytes);
@@ -1377,12 +1170,9 @@ INT BeceemFlashBulkWrite(
uiCurrSectOffsetAddr = uiOffset & (Adapter->uiSectorSize - 1);
uiSectBoundary = uiSectAlignAddr + Adapter->uiSectorSize;
- //pTempBuff = OsalMemAlloc(MAX_SECTOR_SIZE,'!MVN');
- pTempBuff = OsalMemAlloc(Adapter->uiSectorSize ,"!MVN");
+ pTempBuff = kmalloc(Adapter->uiSectorSize, GFP_KERNEL);
if(NULL == pTempBuff)
- {
goto BeceemFlashBulkWrite_EXIT;
- }
//
// check if the data to be written is overlapped accross sectors
//
@@ -1399,7 +1189,6 @@ INT BeceemFlashBulkWrite(
uiNumSectTobeRead++;
}
}
- #if 1
//Check whether Requested sector is writable or not in case of flash2x write. But if write call is
// for DSD calibration, allow it without checking of sector permission
@@ -1420,7 +1209,6 @@ INT BeceemFlashBulkWrite(
index = index + 1 ;
}
}
- #endif
Adapter->SelectedChip = RESET_CHIP_SELECT;
while(uiNumSectTobeRead)
{
@@ -1448,13 +1236,13 @@ INT BeceemFlashBulkWrite(
if(uiNumSectTobeRead > 1)
{
- OsalMemMove(&pTempBuff[uiCurrSectOffsetAddr],pcBuffer,uiSectBoundary-(uiSectAlignAddr+uiCurrSectOffsetAddr));
+ memcpy(&pTempBuff[uiCurrSectOffsetAddr],pcBuffer,uiSectBoundary-(uiSectAlignAddr+uiCurrSectOffsetAddr));
pcBuffer += ((uiSectBoundary-(uiSectAlignAddr+uiCurrSectOffsetAddr)));
uiNumBytes -= (uiSectBoundary-(uiSectAlignAddr+uiCurrSectOffsetAddr));
}
else
{
- OsalMemMove(&pTempBuff[uiCurrSectOffsetAddr],pcBuffer,uiNumBytes);
+ memcpy(&pTempBuff[uiCurrSectOffsetAddr],pcBuffer,uiNumBytes);
}
if(IsFlash2x(Adapter))
@@ -1503,7 +1291,7 @@ INT BeceemFlashBulkWrite(
}
else
{
- if(OsalMemCompare(ucReadBk,&pTempBuff[uiIndex],MAX_RW_SIZE))
+ if(memcmp(ucReadBk,&pTempBuff[uiIndex],MAX_RW_SIZE))
{
if(STATUS_SUCCESS != (*Adapter->fpFlashWriteWithStatusCheck)(Adapter,uiPartOffset+uiIndex,&pTempBuff[uiIndex]))
{
@@ -1541,10 +1329,8 @@ BeceemFlashBulkWrite_EXIT:
{
BcmRestoreBlockProtectStatus(Adapter,ulStatus);
}
- if(pTempBuff)
- {
- OsalMemFree(pTempBuff,Adapter->uiSectorSize);
- }
+
+ kfree(pTempBuff);
Adapter->SelectedChip = RESET_CHIP_SELECT;
return Status;
@@ -1599,14 +1385,10 @@ static INT BeceemFlashBulkWriteStatus(
uiCurrSectOffsetAddr = uiOffset & (Adapter->uiSectorSize - 1);
uiSectBoundary = uiSectAlignAddr + Adapter->uiSectorSize;
-
-
-// pTempBuff = OsalMemAlloc(MAX_SECTOR_SIZE,'!MVN');
- pTempBuff = OsalMemAlloc(Adapter->uiSectorSize,"!MVN");
+ pTempBuff = kmalloc(Adapter->uiSectorSize, GFP_KERNEL);
if(NULL == pTempBuff)
- {
goto BeceemFlashBulkWriteStatus_EXIT;
- }
+
//
// check if the data to be written is overlapped accross sectors
//
@@ -1662,13 +1444,13 @@ static INT BeceemFlashBulkWriteStatus(
if(uiNumSectTobeRead > 1)
{
- OsalMemMove(&pTempBuff[uiCurrSectOffsetAddr],pcBuffer,uiSectBoundary-(uiSectAlignAddr+uiCurrSectOffsetAddr));
+ memcpy(&pTempBuff[uiCurrSectOffsetAddr],pcBuffer,uiSectBoundary-(uiSectAlignAddr+uiCurrSectOffsetAddr));
pcBuffer += ((uiSectBoundary-(uiSectAlignAddr+uiCurrSectOffsetAddr)));
uiNumBytes -= (uiSectBoundary-(uiSectAlignAddr+uiCurrSectOffsetAddr));
}
else
{
- OsalMemMove(&pTempBuff[uiCurrSectOffsetAddr],pcBuffer,uiNumBytes);
+ memcpy(&pTempBuff[uiCurrSectOffsetAddr],pcBuffer,uiNumBytes);
}
if(IsFlash2x(Adapter))
@@ -1698,25 +1480,10 @@ static INT BeceemFlashBulkWriteStatus(
{
for(uiIndex = 0;uiIndex < Adapter->uiSectorSize;uiIndex += MAX_RW_SIZE)
{
-#if 0
- if(0 == BeceemFlashBulkRead(Adapter,uiReadBk,uiOffsetFromSectStart+uiIndex + Adapter->ulFlashCalStart ,MAX_RW_SIZE))
- {
- for(uiReadIndex = 0;uiReadIndex < 4; uiReadIndex++)
- {
- if(*((PUINT)&pTempBuff[uiIndex+uiReadIndex*4]) != uiReadBk[uiReadIndex])
- {
- Status = -1;
- goto BeceemFlashBulkWriteStatus_EXIT;
-
- }
- }
-
- }
-#endif
if(STATUS_SUCCESS == BeceemFlashBulkRead(Adapter,(PUINT)ucReadBk,uiOffsetFromSectStart+uiIndex,MAX_RW_SIZE))
{
- if(OsalMemCompare(ucReadBk,&pTempBuff[uiIndex],MAX_RW_SIZE))
+ if(memcmp(ucReadBk,&pTempBuff[uiIndex],MAX_RW_SIZE))
{
Status = STATUS_FAILURE;
goto BeceemFlashBulkWriteStatus_EXIT;
@@ -1747,10 +1514,8 @@ BeceemFlashBulkWriteStatus_EXIT:
{
BcmRestoreBlockProtectStatus(Adapter,ulStatus);
}
- if(pTempBuff)
- {
- OsalMemFree(pTempBuff,Adapter->uiSectorSize);
- }
+
+ kfree(pTempBuff);
Adapter->SelectedChip = RESET_CHIP_SELECT;
return Status;
@@ -1771,7 +1536,7 @@ BeceemFlashBulkWriteStatus_EXIT:
INT PropagateCalParamsFromEEPROMToMemory(PMINI_ADAPTER Adapter)
{
- PCHAR pBuff = OsalMemAlloc(BUFFER_4K,"3MVN");
+ PCHAR pBuff = kmalloc(BUFFER_4K, GFP_KERNEL);
UINT uiEepromSize = 0;
UINT uiIndex = 0;
UINT uiBytesToCopy = 0;
@@ -1787,14 +1552,14 @@ INT PropagateCalParamsFromEEPROMToMemory(PMINI_ADAPTER Adapter)
if(0 != BeceemEEPROMBulkRead(Adapter,&uiEepromSize,EEPROM_SIZE_OFFSET,4))
{
- OsalMemFree(pBuff,BUFFER_4K);
+ kfree(pBuff);
return -1;
}
uiEepromSize >>= 16;
if(uiEepromSize > 1024*1024)
{
- OsalMemFree(pBuff,BUFFER_4K);
+ kfree(pBuff);
return -1;
}
@@ -1820,7 +1585,7 @@ INT PropagateCalParamsFromEEPROMToMemory(PMINI_ADAPTER Adapter)
wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC-4,&value, sizeof(value));
value = 0xbeadbead;
wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC-8,&value, sizeof(value));
- OsalMemFree(pBuff,MAX_RW_SIZE);
+ kfree(pBuff);
return Status;
@@ -1873,16 +1638,13 @@ INT PropagateCalParamsFromFlashToMemory(PMINI_ADAPTER Adapter)
return -1;
}
- pBuff = OsalMemAlloc(uiEepromSize, 0);
-
+ pBuff = kmalloc(uiEepromSize, GFP_KERNEL);
if ( pBuff == NULL )
- {
return -1;
- }
if(0 != BeceemNVMRead(Adapter,(PUINT)pBuff,uiCalStartAddr, uiEepromSize))
{
- OsalMemFree(pBuff, 0);
+ kfree(pBuff);
return -1;
}
@@ -1905,7 +1667,7 @@ INT PropagateCalParamsFromFlashToMemory(PMINI_ADAPTER Adapter)
uiBytesToCopy = MIN(BUFFER_4K,uiEepromSize);
}
- OsalMemFree(pBuff, 0);
+ kfree(pBuff);
return Status;
}
@@ -1947,14 +1709,14 @@ static INT BeceemEEPROMReadBackandVerify(
{// for the requests more than or equal to MAX_RW_SIZE bytes, use bulk read function to make the access faster.
BeceemEEPROMBulkRead(Adapter,&auiData[0],uiOffset,MAX_RW_SIZE);
- if(OsalMemCompare(&pBuffer[uiIndex],&auiData[0],MAX_RW_SIZE))
+ if(memcmp(&pBuffer[uiIndex],&auiData[0],MAX_RW_SIZE))
{
// re-write
BeceemEEPROMBulkWrite(Adapter,(PUCHAR)(pBuffer+uiIndex),uiOffset,MAX_RW_SIZE,FALSE);
mdelay(3);
BeceemEEPROMBulkRead(Adapter,&auiData[0],uiOffset,MAX_RW_SIZE);
- if(OsalMemCompare(&pBuffer[uiIndex],&auiData[0],MAX_RW_SIZE))
+ if(memcmp(&pBuffer[uiIndex],&auiData[0],MAX_RW_SIZE))
{
return -1;
}
@@ -1986,7 +1748,7 @@ static INT BeceemEEPROMReadBackandVerify(
else
{ // Handle the reads less than 4 bytes...
uiData = 0;
- OsalMemMove(&uiData,((PUCHAR)pBuffer)+(uiIndex*sizeof(UINT)),uiNumBytes);
+ memcpy(&uiData,((PUCHAR)pBuffer)+(uiIndex*sizeof(UINT)),uiNumBytes);
BeceemEEPROMBulkRead(Adapter,&uiRdbk,uiOffset,4);
if(memcmp(&uiData, &uiRdbk, uiNumBytes))
@@ -2186,7 +1948,7 @@ INT BeceemEEPROMBulkWrite(
if(uiBytesToCopy >= (16 -uiExtraBytes))
{
- OsalMemMove((((PUCHAR)&uiData[0])+uiExtraBytes),pBuffer,MAX_RW_SIZE- uiExtraBytes);
+ memcpy((((PUCHAR)&uiData[0])+uiExtraBytes),pBuffer,MAX_RW_SIZE- uiExtraBytes);
if ( STATUS_FAILURE == BeceemEEPROMWritePage( Adapter, uiData, uiTempOffset ) )
return STATUS_FAILURE;
@@ -2197,7 +1959,7 @@ INT BeceemEEPROMBulkWrite(
}
else
{
- OsalMemMove((((PUCHAR)&uiData[0])+uiExtraBytes),pBuffer,uiBytesToCopy);
+ memcpy((((PUCHAR)&uiData[0])+uiExtraBytes),pBuffer,uiBytesToCopy);
if ( STATUS_FAILURE == BeceemEEPROMWritePage( Adapter, uiData, uiTempOffset ) )
return STATUS_FAILURE;
@@ -2233,7 +1995,7 @@ INT BeceemEEPROMBulkWrite(
// To program non 16byte aligned data, read 16byte and then update.
//
BeceemEEPROMBulkRead(Adapter,&uiData[0],uiOffset,16);
- OsalMemMove(&uiData[0],pBuffer+uiIndex,uiBytesToCopy);
+ memcpy(&uiData[0],pBuffer+uiIndex,uiBytesToCopy);
if ( STATUS_FAILURE == BeceemEEPROMWritePage( Adapter, uiData, uiOffset ) )
@@ -2535,7 +2297,7 @@ INT BcmUpdateSectorSize(PMINI_ADAPTER Adapter,UINT uiSectorSize)
//
//-----------------------------------------------------------------------------
-UINT BcmGetFlashSectorSize(PMINI_ADAPTER Adapter, UINT FlashSectorSizeSig, UINT FlashSectorSize)
+static UINT BcmGetFlashSectorSize(PMINI_ADAPTER Adapter, UINT FlashSectorSizeSig, UINT FlashSectorSize)
{
UINT uiSectorSize = 0;
UINT uiSectorSig = 0;
@@ -2642,20 +2404,8 @@ static INT BcmInitEEPROMQueues(PMINI_ADAPTER Adapter)
INT BcmInitNVM(PMINI_ADAPTER ps_adapter)
{
-#ifdef BCM_SHM_INTERFACE
-#ifdef FLASH_DIRECT_ACCESS
- unsigned int data,data1,data2 = 1;
- wrm(ps_adapter, PAD_SELECT_REGISTER, &data2, 4);
- data1 = rdm(ps_adapter,SYS_CFG,&data,4);
- data1 = rdm(ps_adapter,SYS_CFG,&data,4);
- data2 = (data | 0x80 | 0x8000);
- wrm(ps_adapter,SYS_CFG, &data2,4); // over-write as Flash boot mode
-#endif
- ps_adapter->eNVMType = NVM_FLASH;
-#else
BcmValidateNvmType(ps_adapter);
BcmInitEEPROMQueues(ps_adapter);
-#endif
if(ps_adapter->eNVMType == NVM_AUTODETECT)
{
@@ -2684,7 +2434,7 @@ INT BcmInitNVM(PMINI_ADAPTER ps_adapter)
*/
/***************************************************************************/
-INT BcmGetNvmSize(PMINI_ADAPTER Adapter)
+static INT BcmGetNvmSize(PMINI_ADAPTER Adapter)
{
if(Adapter->eNVMType == NVM_EEPROM)
{
@@ -2708,7 +2458,7 @@ INT BcmGetNvmSize(PMINI_ADAPTER Adapter)
// Returns:
// <VOID>
//-----------------------------------------------------------------------------
-VOID BcmValidateNvmType(PMINI_ADAPTER Adapter)
+static VOID BcmValidateNvmType(PMINI_ADAPTER Adapter)
{
//
@@ -2775,7 +2525,7 @@ INT BcmAllocFlashCSStructure(PMINI_ADAPTER psAdapter)
if(psAdapter->psFlash2xCSInfo == NULL)
{
BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_PRINTK, 0, 0,"Can't Allocate memory for Flash 2.x");
- bcm_kfree(psAdapter->psFlashCSInfo);
+ kfree(psAdapter->psFlashCSInfo);
return -ENOMEM;
}
@@ -2783,8 +2533,8 @@ INT BcmAllocFlashCSStructure(PMINI_ADAPTER psAdapter)
if(psAdapter->psFlash2xVendorInfo == NULL)
{
BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_PRINTK, 0, 0,"Can't Allocate Vendor Info Memory for Flash 2.x");
- bcm_kfree(psAdapter->psFlashCSInfo);
- bcm_kfree(psAdapter->psFlash2xCSInfo);
+ kfree(psAdapter->psFlashCSInfo);
+ kfree(psAdapter->psFlash2xCSInfo);
return -ENOMEM;
}
@@ -2798,9 +2548,9 @@ INT BcmDeAllocFlashCSStructure(PMINI_ADAPTER psAdapter)
BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_PRINTK, 0, 0," Adapter structure point is NULL");
return -EINVAL;
}
- bcm_kfree(psAdapter->psFlashCSInfo);
- bcm_kfree(psAdapter->psFlash2xCSInfo);
- bcm_kfree(psAdapter->psFlash2xVendorInfo);
+ kfree(psAdapter->psFlashCSInfo);
+ kfree(psAdapter->psFlash2xCSInfo);
+ kfree(psAdapter->psFlash2xVendorInfo);
return STATUS_SUCCESS ;
}
@@ -2954,7 +2704,7 @@ static INT ConvertEndianOfCSStructure(PFLASH_CS_INFO psFlashCSInfo)
return STATUS_SUCCESS;
}
-INT IsSectionExistInVendorInfo(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL section)
+static INT IsSectionExistInVendorInfo(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL section)
{
return ( Adapter->uiVendorExtnFlag &&
(Adapter->psFlash2xVendorInfo->VendorSection[section].AccessFlags & FLASH2X_SECTION_PRESENT) &&
@@ -3052,7 +2802,7 @@ static VOID UpdateVendorInfo(PMINI_ADAPTER Adapter)
// <VOID>
//-----------------------------------------------------------------------------
-INT BcmGetFlashCSInfo(PMINI_ADAPTER Adapter)
+static INT BcmGetFlashCSInfo(PMINI_ADAPTER Adapter)
{
//FLASH_CS_INFO sFlashCsInfo = {0};
@@ -3070,7 +2820,6 @@ INT BcmGetFlashCSInfo(PMINI_ADAPTER Adapter)
memset(Adapter->psFlashCSInfo, 0 ,sizeof(FLASH_CS_INFO));
memset(Adapter->psFlash2xCSInfo, 0 ,sizeof(FLASH2X_CS_INFO));
-#ifndef BCM_SHM_INTERFACE
if(!Adapter->bDDRInitDone)
{
{
@@ -3079,7 +2828,6 @@ INT BcmGetFlashCSInfo(PMINI_ADAPTER Adapter)
}
}
-#endif
// Reading first 8 Bytes to get the Flash Layout
// MagicNumber(4 bytes) +FlashLayoutMinorVersion(2 Bytes) +FlashLayoutMajorVersion(2 Bytes)
@@ -3147,9 +2895,7 @@ INT BcmGetFlashCSInfo(PMINI_ADAPTER Adapter)
return STATUS_FAILURE;
}
ConvertEndianOf2XCSStructure(Adapter->psFlash2xCSInfo);
-#ifndef BCM_SHM_INTERFACE
BcmDumpFlash2XCSStructure(Adapter->psFlash2xCSInfo,Adapter);
-#endif
if((FLASH_CONTROL_STRUCT_SIGNATURE == Adapter->psFlash2xCSInfo->MagicNumber) &&
(SCSI_FIRMWARE_MINOR_VERSION <= MINOR_VERSION(Adapter->psFlash2xCSInfo->SCSIFirmwareVersion)) &&
(FLASH_SECTOR_SIZE_SIG == Adapter->psFlash2xCSInfo->FlashSectorSizeSig) &&
@@ -3181,21 +2927,10 @@ INT BcmGetFlashCSInfo(PMINI_ADAPTER Adapter)
Concerns: what if CS sector size does not match with this sector size ???
what is the indication of AccessBitMap in CS in flash 2.x ????
*/
-#ifndef BCM_SHM_INTERFACE
Adapter->ulFlashID = BcmReadFlashRDID(Adapter);
-#endif
Adapter->uiFlashLayoutMajorVersion = uiFlashLayoutMajorVersion;
- #if 0
- if(FLASH_PART_SST25VF080B == Adapter->ulFlashID)
- {
- //
- // 1MB flash has been selected. we have to use 64K as sector size no matter what is kept in FLASH_CS.
- //
- Adapter->uiSectorSize = 0x10000;
- }
- #endif
return STATUS_SUCCESS ;
}
@@ -3214,7 +2949,7 @@ INT BcmGetFlashCSInfo(PMINI_ADAPTER Adapter)
//
//-----------------------------------------------------------------------------
-NVM_TYPE BcmGetNvmType(PMINI_ADAPTER Adapter)
+static NVM_TYPE BcmGetNvmType(PMINI_ADAPTER Adapter)
{
UINT uiData = 0;
@@ -3569,39 +3304,6 @@ INT BcmFlash2xBulkWrite(
}
/**
-* ReadDSDHeader : Read the DSD map for the DSD Section val provided in Argument.
-* @Adapter : Beceem Private Data Structure
-* @psDSDHeader :Pointer of the buffer where header has to be read
-* @dsd :value of the Dyanmic DSD like DSD0 of DSD1 or DSD2
-*
-* Return Value:-
-* if suceeds return STATUS_SUCCESS or negative error code.
-**/
-INT ReadDSDHeader(PMINI_ADAPTER Adapter, PDSD_HEADER psDSDHeader, FLASH2X_SECTION_VAL dsd)
-{
- INT Status = STATUS_SUCCESS;
-
- Status =BcmFlash2xBulkRead(Adapter,
- (PUINT)psDSDHeader,
- dsd,
- Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader,
- sizeof(DSD_HEADER));
- if(Status == STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSDImageMagicNumber :0X%x", ntohl(psDSDHeader->DSDImageMagicNumber));
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSDImageSize :0X%x ",ntohl(psDSDHeader->DSDImageSize));
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSDImageCRC :0X%x",ntohl(psDSDHeader->DSDImageCRC));
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSDImagePriority :0X%x",ntohl(psDSDHeader->DSDImagePriority));
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"DSD Header read is failed with status :%d", Status);
- }
-
- return Status;
-}
-
-/**
* BcmGetActiveDSD : Set the Active DSD in Adapter Structure which has to be dumped in DDR
* @Adapter :-Drivers private Data Structure
*
@@ -3609,7 +3311,7 @@ INT ReadDSDHeader(PMINI_ADAPTER Adapter, PDSD_HEADER psDSDHeader, FLASH2X_SECTIO
* Return STATUS_SUCESS if get sucess in setting the right DSD else negaive error code
*
**/
-INT BcmGetActiveDSD(PMINI_ADAPTER Adapter)
+static INT BcmGetActiveDSD(PMINI_ADAPTER Adapter)
{
FLASH2X_SECTION_VAL uiHighestPriDSD = 0 ;
@@ -3647,39 +3349,6 @@ INT BcmGetActiveDSD(PMINI_ADAPTER Adapter)
return STATUS_SUCCESS;
}
-/**
-* ReadISOUnReservedBytes : Read the ISO map for the ISO Section val provided in Argument.
-* @Adapter : Driver Private Data Structure
-* @psISOHeader :Pointer of the location where header has to be read
-* @IsoImage :value of the Dyanmic ISO like ISO_IMAGE1 of ISO_IMAGE2
-*
-* Return Value:-
-* if suceeds return STATUS_SUCCESS or negative error code.
-**/
-
-INT ReadISOHeader(PMINI_ADAPTER Adapter, PISO_HEADER psISOHeader, FLASH2X_SECTION_VAL IsoImage)
-{
- INT Status = STATUS_SUCCESS;
-
- Status = BcmFlash2xBulkRead(Adapter,
- (PUINT)psISOHeader,
- IsoImage,
- 0,
- sizeof(ISO_HEADER));
-
- if(Status == STATUS_SUCCESS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISOImageMagicNumber :0X%x", ntohl(psISOHeader->ISOImageMagicNumber));
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISOImageSize :0X%x ",ntohl(psISOHeader->ISOImageSize));
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISOImageCRC :0X%x",ntohl(psISOHeader->ISOImageCRC));
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISOImagePriority :0X%x",ntohl(psISOHeader->ISOImagePriority));
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "ISO Header Read failed");
- }
- return Status;
-}
/**
* BcmGetActiveISO :- Set the Active ISO in Adapter Data Structue
@@ -3691,7 +3360,7 @@ INT ReadISOHeader(PMINI_ADAPTER Adapter, PISO_HEADER psISOHeader, FLASH2X_SECTIO
*
**/
-INT BcmGetActiveISO(PMINI_ADAPTER Adapter)
+static INT BcmGetActiveISO(PMINI_ADAPTER Adapter)
{
INT HighestPriISO = 0 ;
@@ -4588,7 +4257,7 @@ INT BcmCopyISO(PMINI_ADAPTER Adapter, FLASH2X_COPY_SECTION sCopySectStrut)
}
- bcm_kfree(Buff);
+ kfree(Buff);
return Status;
}
@@ -4789,7 +4458,7 @@ Return Value:-
Success :- Base Address of the Flash
**/
-INT GetFlashBaseAddr(PMINI_ADAPTER Adapter)
+static INT GetFlashBaseAddr(PMINI_ADAPTER Adapter)
{
UINT uiBaseAddr = 0;
@@ -4866,20 +4535,6 @@ INT BcmCopySection(PMINI_ADAPTER Adapter,
return -EINVAL;
}
- #if 0
- else
- {
- if((SrcSection == VSA0) || (SrcSection == VSA1) || (SrcSection == VSA2))
- {
- if((DstSection != VSA0) && (DstSection != VSA1) && (DstSection != VSA2))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL,"Source and Destion secton is not of same type");
- return -EINVAL;
- }
- }
-
- }
- #endif
//if offset zero means have to copy complete secton
if(numOfBytes == 0)
@@ -4954,7 +4609,7 @@ INT BcmCopySection(PMINI_ADAPTER Adapter,
BytesToBeCopied = numOfBytes;
}
}while(numOfBytes > 0) ;
- bcm_kfree(pBuff);
+ kfree(pBuff);
Adapter->bHeaderChangeAllowed = FALSE ;
return Status;
}
@@ -4979,14 +4634,6 @@ INT SaveHeaderIfPresent(PMINI_ADAPTER Adapter, PUCHAR pBuff, UINT uiOffset)
UINT uiSectAlignAddr = 0;
UINT sig = 0;
- #if 0
- //if Chenges in Header is allowed, Return back
- if(Adapter->bHeaderChangeAllowed == TRUE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Header Change is allowed");
- return STATUS_SUCCESS ;
- }
- #endif
//making the offset sector alligned
uiSectAlignAddr = uiOffset & ~(Adapter->uiSectorSize - 1);
@@ -5024,7 +4671,7 @@ INT SaveHeaderIfPresent(PMINI_ADAPTER Adapter, PUCHAR pBuff, UINT uiOffset)
//Replace Buffer content with Header
memcpy(pBuff +offsetToProtect,pTempBuff,HeaderSizeToProtect);
- bcm_kfree(pTempBuff);
+ kfree(pTempBuff);
}
if(bHasHeader && Adapter->bSigCorrupted)
{
@@ -5044,29 +4691,7 @@ INT SaveHeaderIfPresent(PMINI_ADAPTER Adapter, PUCHAR pBuff, UINT uiOffset)
return STATUS_SUCCESS ;
}
-INT BcmMakeFlashCSActive(PMINI_ADAPTER Adapter, UINT offset)
-{
- UINT GPIOConfig = 0 ;
-
-
- if(Adapter->bFlashRawRead == FALSE)
- {
- //Applicable for Flash2.x
- if(IsFlash2x(Adapter) == FALSE)
- return STATUS_SUCCESS;
- }
- if(offset/FLASH_PART_SIZE)
- {
- //bit[14..12] -> will select make Active CS1, CS2 or CS3
- // Select CS1, CS2 and CS3 (CS0 is dedicated pin)
- rdmalt(Adapter,FLASH_GPIO_CONFIG_REG, &GPIOConfig, 4);
- GPIOConfig |= (7 << 12);
- wrmalt(Adapter,FLASH_GPIO_CONFIG_REG, &GPIOConfig, 4);
- }
-
- return STATUS_SUCCESS ;
-}
/**
BcmDoChipSelect : This will selcet the appropriate chip for writing.
@Adapater :- Bcm Driver Private Data Structure
@@ -5074,7 +4699,7 @@ BcmDoChipSelect : This will selcet the appropriate chip for writing.
OutPut:-
Select the Appropriate chip and retrn status Sucess
**/
-INT BcmDoChipSelect(PMINI_ADAPTER Adapter, UINT offset)
+static INT BcmDoChipSelect(PMINI_ADAPTER Adapter, UINT offset)
{
UINT FlashConfig = 0;
INT ChipNum = 0;
@@ -5365,39 +4990,6 @@ INT WriteToFlashWithoutSectorErase(PMINI_ADAPTER Adapter,
return Status;
}
-#if 0
-UINT getNumOfSubSectionWithWRPermisson(PMINI_ADAPTER Adapter, SECTION_TYPE secType)
-{
-
- UINT numOfWRSubSec = 0;
- switch(secType)
- {
- case ISO :
- if(IsSectionWritable(Adapter,ISO_IMAGE1))
- numOfWRSubSec = numOfWRSubSec + 1;
- if(IsSectionWritable(Adapter,ISO_IMAGE2))
- numOfWRSubSec = numOfWRSubSec + 1;
- break;
-
- case DSD :
- if(IsSectionWritable(Adapter,DSD2))
- numOfWRSubSec = numOfWRSubSec + 1;
- if(IsSectionWritable(Adapter,DSD1))
- numOfWRSubSec = numOfWRSubSec + 1;
- if(IsSectionWritable(Adapter,DSD0))
- numOfWRSubSec = numOfWRSubSec + 1;
- break ;
-
- case VSA :
- //for VSA Add code Here
- default :
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL,"Invalid secton<%d> is passed", secType);\
- numOfWRSubSec = 0;
-
- }
- return numOfWRSubSec;
-}
-#endif
BOOLEAN IsSectionExistInFlash(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL section)
{
@@ -5479,7 +5071,7 @@ INT IsSectionWritable(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL Section)
return Status ;
}
-INT CorruptDSDSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal)
+static INT CorruptDSDSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal)
{
PUCHAR pBuff = NULL;
@@ -5543,16 +5135,16 @@ INT CorruptDSDSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal)
else
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"BCM Signature is not present in header");
- bcm_kfree(pBuff);
+ kfree(pBuff);
return STATUS_FAILURE;
}
- bcm_kfree(pBuff);
+ kfree(pBuff);
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL,"Corrupted the signature");
return STATUS_SUCCESS ;
}
-INT CorruptISOSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal)
+static INT CorruptISOSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal)
{
PUCHAR pBuff = NULL;
@@ -5593,14 +5185,14 @@ INT CorruptISOSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal)
else
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"BCM Signature is not present in header");
- bcm_kfree(pBuff);
+ kfree(pBuff);
return STATUS_FAILURE;
}
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL,"Corrupted the signature");
BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL,pBuff,MAX_RW_SIZE);
- bcm_kfree(pBuff);
+ kfree(pBuff);
return STATUS_SUCCESS ;
}
diff --git a/drivers/staging/bcm/nvm.h b/drivers/staging/bcm/nvm.h
index 6ec6ca85b501..651b5a455b32 100644
--- a/drivers/staging/bcm/nvm.h
+++ b/drivers/staging/bcm/nvm.h
@@ -323,15 +323,6 @@ typedef struct _ISO_HEADER
-#ifdef BCM_SHM_INTERFACE
-
-#define FLASH_ADDR_MASK 0x1F000000
-extern int bcmflash_raw_read(unsigned int flash_id, unsigned int offset, unsigned char *inbuf, unsigned int len);
-extern int bcmflash_raw_write(unsigned int flash_id, unsigned int offset, unsigned char *outbuf, unsigned int len);
-extern int bcmflash_raw_writenoerase(unsigned int flash_id, unsigned int offset, unsigned char *outbuf, unsigned int len);
-
-
-#endif
#define FLASH_CONTIGIOUS_START_ADDR_AFTER_INIT 0x1C000000
#define FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT 0x1F000000
@@ -414,76 +405,5 @@ extern int bcmflash_raw_writenoerase(unsigned int flash_id, unsigned int offset,
#define FIELD_OFFSET_IN_HEADER(HeaderPointer,Field) ((PUCHAR)&((HeaderPointer)(NULL))->Field - (PUCHAR)(NULL))
-#if 0
-INT BeceemEEPROMBulkRead(
- PMINI_ADAPTER Adapter,
- PUINT pBuffer,
- UINT uiOffset,
- UINT uiNumBytes);
-
-
-INT BeceemFlashBulkRead(
- PMINI_ADAPTER Adapter,
- PUINT pBuffer,
- UINT uiOffset,
- UINT uiNumBytes);
-
-UINT BcmGetEEPROMSize(PMINI_ADAPTER Adapter);
-
-UINT BcmGetFlashSize(PMINI_ADAPTER Adapter);
-
-UINT BcmGetFlashSectorSize(PMINI_ADAPTER Adapter);
-
-
-
-INT BeceemFlashBulkWrite(
- PMINI_ADAPTER Adapter,
- PUINT pBuffer,
- UINT uiOffset,
- UINT uiNumBytes,
- BOOLEAN bVerify);
-
-INT PropagateCalParamsFromFlashToMemory(PMINI_ADAPTER Adapter);
-
-INT PropagateCalParamsFromEEPROMToMemory(PMINI_ADAPTER Adapter);
-
-
-INT BeceemEEPROMBulkWrite(
- PMINI_ADAPTER Adapter,
- PUCHAR pBuffer,
- UINT uiOffset,
- UINT uiNumBytes,
- BOOLEAN bVerify);
-
-
-INT ReadBeceemEEPROM(PMINI_ADAPTER Adapter,UINT dwAddress, UINT *pdwData);
-
-NVM_TYPE BcmGetNvmType(PMINI_ADAPTER Adapter);
-
-INT BeceemNVMRead(
- PMINI_ADAPTER Adapter,
- PUINT pBuffer,
- UINT uiOffset,
- UINT uiNumBytes);
-
-INT BeceemNVMWrite(
- PMINI_ADAPTER Adapter,
- PUINT pBuffer,
- UINT uiOffset,
- UINT uiNumBytes,
- BOOLEAN bVerify);
-
-INT ReadMacAddressFromEEPROM(PMINI_ADAPTER Adapter);
-
-INT BcmUpdateSectorSize(PMINI_ADAPTER Adapter,UINT uiSectorSize);
-
-INT BcmInitNVM(PMINI_ADAPTER Adapter);
-
-VOID BcmValidateNvmType(PMINI_ADAPTER Adapter);
-
-VOID BcmGetFlashCSInfo(PMINI_ADAPTER Adapter);
-
-#endif
-
#endif
diff --git a/drivers/staging/bcm/osal_misc.h b/drivers/staging/bcm/osal_misc.h
deleted file mode 100644
index ff4adde17cd8..000000000000
--- a/drivers/staging/bcm/osal_misc.h
+++ /dev/null
@@ -1,49 +0,0 @@
- /*++
-
- Copyright (c) Beceem Communications Inc.
-
- Module Name:
- OSAL_Misc.h
-
- Abstract:
- Provides the OS Abstracted macros to access:
- Linked Lists
- Dispatcher Objects(Events,Semaphores,Spin Locks and the like)
- Files
-
-
- Revision History:
- Who When What
- -------- -------- ----------------------------------------------
- Name Date Created/reviewed/modified
- Rajeev 24/1/08 Created
- Notes:
-
- --*/
-#ifndef _OSAL_MISC_H_
-#define _OSAL_MISC_H_
-//OSAL Macros
-//OSAL Primitives
-typedef PUCHAR POSAL_NW_PACKET ; //Nw packets
-
-
-#define OsalMemAlloc(n,t) kmalloc(n,GFP_KERNEL)
-
-#define OsalMemFree(x,n) bcm_kfree(x)
-
-#define OsalMemMove(dest, src, len) \
-{ \
- memcpy(dest,src, len); \
-}
-
-#define OsalZeroMemory(pDest, Len) \
-{ \
- memset(pDest,0,Len); \
-}
-
-//#define OsalMemSet(pSrc,Char,Len) memset(pSrc,Char,Len)
-
-bool OsalMemCompare(void *dest, void *src, UINT len);
-
-#endif
-
diff --git a/drivers/staging/brcm80211/README b/drivers/staging/brcm80211/README
index a27bb0b4f581..99e67669f26b 100644
--- a/drivers/staging/brcm80211/README
+++ b/drivers/staging/brcm80211/README
@@ -43,14 +43,8 @@ Firmware is available from the Linux firmware repository at:
http://git.kernel.org/?p=linux/kernel/git/dwmw2/linux-firmware.git
https://git.kernel.org/?p=linux/kernel/git/dwmw2/linux-firmware.git
-For all chips, copy brcm/bcm43xx-0-610-809-0.fw and
-brcm/bcm43xx_hdr-0-610-809-0.fw to /lib/firmware/brcm (or wherever firmware is
-normally installed on the system). In the /lib/firmware/brcm directory, then
-create the following symlinks:
-
- ln -s bcm43xx-0-610-809-0.fw bcm43xx-0.fw
- ln -s bcm43xx_hdr-0-610-809-0.fw bcm43xx_hdr-0.fw
-
+For all chips, copy brcm/bcm43xx-0.fw and brcm/bcm43xx_hdr-0.fw to
+/lib/firmware/brcm (or wherever firmware is normally installed on your system).
Currently supported chips
==============
diff --git a/drivers/staging/brcm80211/brcmfmac/README b/drivers/staging/brcm80211/brcmfmac/README
index 43601fa8b17e..be29e4236920 100644
--- a/drivers/staging/brcm80211/brcmfmac/README
+++ b/drivers/staging/brcm80211/brcmfmac/README
@@ -25,8 +25,9 @@ Firmware is available from the Linux firmware repository at:
http://git.kernel.org/?p=linux/kernel/git/dwmw2/linux-firmware.git
https://git.kernel.org/?p=linux/kernel/git/dwmw2/linux-firmware.git
-For 4329 chip, copy brcm/bcm4329-fullmac-4-218-248-5.bin and
-bcm4329-fullmac-4-218-248-5.txt to /lib/firmware/brcm
+For 4329 chip, copy brcm/bcm4329-fullmac-4.bin and brcm/bcm4329-fullmac-4.txt
+to /lib/firmware/brcm (or wherever firmware is normally installed on your
+system).
Contact Info:
=============
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh.c
index 4c613da3553a..acf43a365081 100644
--- a/drivers/staging/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/staging/brcm80211/brcmfmac/bcmsdh.c
@@ -16,13 +16,14 @@
/* ****************** BCMSDH Interface Functions *************************** */
#include <linux/types.h>
+#include <linux/netdevice.h>
#include <bcmdefs.h>
#include <bcmdevs.h>
#include <bcmendian.h>
+#include <osl.h>
#include <bcmutils.h>
#include <hndsoc.h>
#include <siutils.h>
-#include <osl.h>
#include <bcmsdh.h> /* BRCM API for SDIO
clients (such as wl, dhd) */
@@ -38,7 +39,7 @@ struct bcmsdh_info {
bool init_success; /* underlying driver successfully attached */
void *sdioh; /* handler for sdioh */
u32 vendevid; /* Target Vendor and Device ID on SD bus */
- osl_t *osh;
+ struct osl_info *osh;
bool regfail; /* Save status of last
reg_read/reg_write call */
u32 sbwad; /* Save backplane window address */
@@ -55,7 +56,8 @@ void bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable)
}
#endif
-bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq)
+bcmsdh_info_t *bcmsdh_attach(struct osl_info *osh, void *cfghdl,
+ void **regsva, uint irq)
{
bcmsdh_info_t *bcmsdh;
@@ -84,7 +86,7 @@ bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq)
return bcmsdh;
}
-int bcmsdh_detach(osl_t *osh, void *sdh)
+int bcmsdh_detach(struct osl_info *osh, void *sdh)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
@@ -451,7 +453,7 @@ bool bcmsdh_regfail(void *sdh)
int
bcmsdh_recv_buf(void *sdh, u32 addr, uint fn, uint flags,
- u8 *buf, uint nbytes, void *pkt,
+ u8 *buf, uint nbytes, struct sk_buff *pkt,
bcmsdh_cmplt_fn_t complete, void *handle)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c
index 9028cd01d9d0..d24b5e7d753c 100644
--- a/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c
+++ b/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c
@@ -20,8 +20,7 @@
#define __UNDEF_NO_VERSION__
-#include <linuxver.h>
-
+#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/completion.h>
@@ -57,7 +56,7 @@ struct bcmsdh_hc {
#else
struct pci_dev *dev; /* pci device handle */
#endif /* BCMPLATFORM_BUS */
- osl_t *osh;
+ struct osl_info *osh;
void *regs; /* SDIO Host Controller address */
bcmsdh_info_t *sdh; /* SDIO Host Controller handle */
void *ch;
@@ -139,22 +138,11 @@ static int __devexit bcmsdh_remove(struct device *dev);
#endif /* BCMLXSDMMC */
#ifndef BCMLXSDMMC
-static struct device_driver bcmsdh_driver = {
- .name = "pxa2xx-mci",
- .bus = &platform_bus_type,
- .probe = bcmsdh_probe,
- .remove = bcmsdh_remove,
- .suspend = NULL,
- .resume = NULL,
-};
-#endif /* BCMLXSDMMC */
-
-#ifndef BCMLXSDMMC
static
#endif /* BCMLXSDMMC */
int bcmsdh_probe(struct device *dev)
{
- osl_t *osh = NULL;
+ struct osl_info *osh = NULL;
bcmsdh_hc_t *sdhc = NULL;
unsigned long regs = 0;
bcmsdh_info_t *sdh = NULL;
@@ -189,7 +177,7 @@ int bcmsdh_probe(struct device *dev)
}
#endif /* defined(OOB_INTR_ONLY) */
/* allocate SDIO Host Controller state info */
- osh = osl_attach(dev, PCI_BUS, false);
+ osh = osl_attach(dev, PCI_BUS);
if (!osh) {
SDLX_MSG(("%s: osl_attach failed\n", __func__));
goto err;
@@ -258,7 +246,7 @@ static
int bcmsdh_remove(struct device *dev)
{
bcmsdh_hc_t *sdhc, *prev;
- osl_t *osh;
+ struct osl_info *osh;
sdhc = sdhcinfo;
drvinfo.detach(sdhc->ch);
@@ -291,269 +279,23 @@ int bcmsdh_remove(struct device *dev)
return 0;
}
-
-#else /* BCMPLATFORM_BUS */
-
-#if !defined(BCMLXSDMMC)
-/* forward declarations for PCI probe and remove functions. */
-static int __devinit bcmsdh_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-static void __devexit bcmsdh_pci_remove(struct pci_dev *pdev);
-
-/**
- * pci id table
- */
-static struct pci_device_id bcmsdh_pci_devid[] __devinitdata = {
-{
- .vendor = PCI_ANY_ID,
- .device = PCI_ANY_ID,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .class = 0,
- .class_mask = 0,
- .driver_data = 0,
-},
-{0,}
-};
-
-MODULE_DEVICE_TABLE(pci, bcmsdh_pci_devid);
-
-/**
- * SDIO Host Controller pci driver info
- */
-static struct pci_driver bcmsdh_pci_driver = {
- .node = {},
- .name = "bcmsdh",
- .id_table = bcmsdh_pci_devid,
- .probe = bcmsdh_pci_probe,
- .remove = bcmsdh_pci_remove,
- .suspend = NULL,
- .resume = NULL,
-};
-
-extern uint sd_pci_slot; /* Force detection to a particular PCI */
- /* slot only . Allows for having multiple */
- /* WL devices at once in a PC */
- /* Only one instance of dhd will be */
- /* usable at a time */
- /* Upper word is bus number, */
- /* lower word is slot number */
- /* Default value of 0xFFFFffff turns this */
- /* off */
-module_param(sd_pci_slot, uint, 0);
-
-/**
- * Detect supported SDIO Host Controller and attach if found.
- *
- * Determine if the device described by pdev is a supported SDIO Host
- * Controller. If so, attach to it and attach to the target device.
- */
-static int __devinit
-bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- osl_t *osh = NULL;
- bcmsdh_hc_t *sdhc = NULL;
- unsigned long regs;
- bcmsdh_info_t *sdh = NULL;
- int rc;
-
- if (sd_pci_slot != 0xFFFFffff) {
- if (pdev->bus->number != (sd_pci_slot >> 16) ||
- PCI_SLOT(pdev->devfn) != (sd_pci_slot & 0xffff)) {
- SDLX_MSG(("%s: %s: bus %X, slot %X, vend %X, dev %X\n",
- __func__,
- bcmsdh_chipmatch(pdev->vendor, pdev->device) ?
- "Found compatible SDIOHC" :
- "Probing unknown device",
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- pdev->vendor, pdev->device));
- return -ENODEV;
- }
- SDLX_MSG(("%s: %s: bus %X, slot %X, vendor %X, device %X "
- "(good PCI location)\n", __func__,
- bcmsdh_chipmatch(pdev->vendor, pdev->device) ?
- "Using compatible SDIOHC" : "WARNING, forced use "
- "of unkown device",
- pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor,
- pdev->device));
- }
-
- if ((pdev->vendor == VENDOR_TI)
- && ((pdev->device == PCIXX21_FLASHMEDIA_ID)
- || (pdev->device == PCIXX21_FLASHMEDIA0_ID))) {
- u32 config_reg;
-
- SDLX_MSG(("%s: Disabling TI FlashMedia Controller.\n",
- __func__));
- osh = osl_attach(pdev, PCI_BUS, false);
- if (!osh) {
- SDLX_MSG(("%s: osl_attach failed\n", __func__));
- goto err;
- }
-
- config_reg = OSL_PCI_READ_CONFIG(osh, 0x4c, 4);
-
- /*
- * Set MMC_SD_DIS bit in FlashMedia Controller.
- * Disbling the SD/MMC Controller in the FlashMedia Controller
- * allows the Standard SD Host Controller to take over control
- * of the SD Slot.
- */
- config_reg |= 0x02;
- OSL_PCI_WRITE_CONFIG(osh, 0x4c, 4, config_reg);
- osl_detach(osh);
- }
- /* match this pci device with what we support */
- /* we can't solely rely on this to believe it is
- our SDIO Host Controller! */
- if (!bcmsdh_chipmatch(pdev->vendor, pdev->device))
- return -ENODEV;
-
- /* this is a pci device we might support */
- SDLX_MSG(("%s: Found possible SDIO Host Controller: "
- "bus %d slot %d func %d irq %d\n", __func__,
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), pdev->irq));
-
- /* use bcmsdh_query_device() to get the vendor ID of the target device
- * so it will eventually appear in the Broadcom string on the console
- */
-
- /* allocate SDIO Host Controller state info */
- osh = osl_attach(pdev, PCI_BUS, false);
- if (!osh) {
- SDLX_MSG(("%s: osl_attach failed\n", __func__));
- goto err;
- }
- sdhc = kzalloc(sizeof(bcmsdh_hc_t), GFP_ATOMIC);
- if (!sdhc) {
- SDLX_MSG(("%s: out of memory\n", __func__));
- goto err;
- }
- sdhc->osh = osh;
-
- sdhc->dev = pdev;
-
- /* map to address where host can access */
- pci_set_master(pdev);
- rc = pci_enable_device(pdev);
- if (rc) {
- SDLX_MSG(("%s: Cannot enable PCI device\n", __func__));
- goto err;
- }
- sdh = bcmsdh_attach(osh, (void *)(unsigned long)pci_resource_start(pdev, 0),
- (void **)&regs, pdev->irq);
- if (!sdh) {
- SDLX_MSG(("%s: bcmsdh_attach failed\n", __func__));
- goto err;
- }
-
- sdhc->sdh = sdh;
-
- /* try to attach to the target device */
- sdhc->ch = drvinfo.attach(VENDOR_BROADCOM, /* pdev->vendor, */
- bcmsdh_query_device(sdh) & 0xFFFF, 0, 0, 0, 0,
- (void *)regs, NULL, sdh);
- if (!sdhc->ch) {
- SDLX_MSG(("%s: device attach failed\n", __func__));
- goto err;
- }
-
- /* chain SDIO Host Controller info together */
- sdhc->next = sdhcinfo;
- sdhcinfo = sdhc;
-
- return 0;
-
- /* error handling */
-err:
- if (sdhc->sdh)
- bcmsdh_detach(sdhc->osh, sdhc->sdh);
- if (sdhc)
- kfree(sdhc);
- if (osh)
- osl_detach(osh);
- return -ENODEV;
-}
-
-/**
- * Detach from target devices and SDIO Host Controller
- */
-static void __devexit bcmsdh_pci_remove(struct pci_dev *pdev)
-{
- bcmsdh_hc_t *sdhc, *prev;
- osl_t *osh;
-
- /* find the SDIO Host Controller state for this
- pdev and take it out from the list */
- for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
- if (sdhc->dev == pdev) {
- if (prev)
- prev->next = sdhc->next;
- else
- sdhcinfo = NULL;
- break;
- }
- prev = sdhc;
- }
- if (!sdhc)
- return;
-
- drvinfo.detach(sdhc->ch);
-
- bcmsdh_detach(sdhc->osh, sdhc->sdh);
-
- /* release SDIO Host Controller info */
- osh = sdhc->osh;
- kfree(sdhc);
- osl_detach(osh);
-}
-#endif /* BCMLXSDMMC */
#endif /* BCMPLATFORM_BUS */
extern int sdio_function_init(void);
int bcmsdh_register(bcmsdh_driver_t *driver)
{
- int error = 0;
-
drvinfo = *driver;
-#if defined(BCMPLATFORM_BUS)
-#if defined(BCMLXSDMMC)
SDLX_MSG(("Linux Kernel SDIO/MMC Driver\n"));
- error = sdio_function_init();
-#else
- SDLX_MSG(("Intel PXA270 SDIO Driver\n"));
- error = driver_register(&bcmsdh_driver);
-#endif /* defined(BCMLXSDMMC) */
- return error;
-#endif /* defined(BCMPLATFORM_BUS) */
-
-#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
- error = pci_register_driver(&bcmsdh_pci_driver);
- if (!error)
- return 0;
-
- SDLX_MSG(("%s: pci_register_driver failed 0x%x\n", __func__, error));
-#endif /* BCMPLATFORM_BUS */
-
- return error;
+ return sdio_function_init();
}
extern void sdio_function_cleanup(void);
void bcmsdh_unregister(void)
{
-#if defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
- driver_unregister(&bcmsdh_driver);
-#endif
-#if defined(BCMLXSDMMC)
sdio_function_cleanup();
-#endif /* BCMLXSDMMC */
-#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
- pci_unregister_driver(&bcmsdh_pci_driver);
-#endif /* BCMPLATFORM_BUS */
}
#if defined(OOB_INTR_ONLY)
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index f6c9c4541813..d399b5c76f94 100644
--- a/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -14,11 +14,12 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/types.h>
+#include <linux/netdevice.h>
#include <bcmdefs.h>
#include <bcmdevs.h>
#include <bcmendian.h>
-#include <bcmutils.h>
#include <osl.h>
+#include <bcmutils.h>
#include <sdio.h> /* SDIO Device and Protocol Specs */
#include <sdioh.h> /* SDIO Host Controller Specification */
#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
@@ -111,7 +112,7 @@ static int sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
/*
* Public entry points & extern's
*/
-extern sdioh_info_t *sdioh_attach(osl_t *osh, void *bar0, uint irq)
+extern sdioh_info_t *sdioh_attach(struct osl_info *osh, void *bar0, uint irq)
{
sdioh_info_t *sd;
int err_ret;
@@ -174,7 +175,7 @@ extern sdioh_info_t *sdioh_attach(osl_t *osh, void *bar0, uint irq)
return sd;
}
-extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+extern SDIOH_API_RC sdioh_detach(struct osl_info *osh, sdioh_info_t *sd)
{
sd_trace(("%s\n", __func__));
@@ -750,7 +751,7 @@ sdioh_cis_read(sdioh_info_t *sd, uint func, u8 *cisd, u32 length)
sd_trace(("%s: Func = %d\n", __func__, func));
if (!sd->func_cis_ptr[func]) {
- bzero(cis, length);
+ memset(cis, 0, length);
sd_err(("%s: no func_cis_ptr[%d]\n", __func__, func));
return SDIOH_API_RC_FAIL;
}
@@ -927,13 +928,13 @@ sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func,
static SDIOH_API_RC
sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
- uint addr, void *pkt)
+ uint addr, struct sk_buff *pkt)
{
bool fifo = (fix_inc == SDIOH_DATA_FIX);
u32 SGCount = 0;
int err_ret = 0;
- void *pnext;
+ struct sk_buff *pnext;
sd_trace(("%s: Enter\n", __func__));
@@ -943,8 +944,8 @@ sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
/* Claim host controller */
sdio_claim_host(gInstance->func[func]);
- for (pnext = pkt; pnext; pnext = PKTNEXT(pnext)) {
- uint pkt_len = PKTLEN(pnext);
+ for (pnext = pkt; pnext; pnext = pnext->next) {
+ uint pkt_len = pnext->len;
pkt_len += 3;
pkt_len &= 0xFFFFFFFC;
@@ -961,23 +962,23 @@ sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
* is supposed to give
* us something we can work with.
*/
- ASSERT(((u32) (PKTDATA(pkt)) & DMA_ALIGN_MASK) == 0);
+ ASSERT(((u32) (pkt->data) & DMA_ALIGN_MASK) == 0);
if ((write) && (!fifo)) {
err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
- ((u8 *) PKTDATA(pnext)),
+ ((u8 *) (pnext->data)),
pkt_len);
} else if (write) {
err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
- ((u8 *) PKTDATA(pnext)),
+ ((u8 *) (pnext->data)),
pkt_len);
} else if (fifo) {
err_ret = sdio_readsb(gInstance->func[func],
- ((u8 *) PKTDATA(pnext)),
+ ((u8 *) (pnext->data)),
addr, pkt_len);
} else {
err_ret = sdio_memcpy_fromio(gInstance->func[func],
- ((u8 *) PKTDATA(pnext)),
+ ((u8 *) (pnext->data)),
addr, pkt_len);
}
@@ -1025,10 +1026,10 @@ sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
extern SDIOH_API_RC
sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write,
uint func, uint addr, uint reg_width, uint buflen_u,
- u8 *buffer, void *pkt)
+ u8 *buffer, struct sk_buff *pkt)
{
SDIOH_API_RC Status;
- void *mypkt = NULL;
+ struct sk_buff *mypkt = NULL;
sd_trace(("%s: Enter\n", __func__));
@@ -1038,52 +1039,52 @@ sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write,
if (pkt == NULL) {
sd_data(("%s: Creating new %s Packet, len=%d\n",
__func__, write ? "TX" : "RX", buflen_u));
- mypkt = PKTGET(sd->osh, buflen_u, write ? true : false);
+ mypkt = pkt_buf_get_skb(sd->osh, buflen_u);
if (!mypkt) {
- sd_err(("%s: PKTGET failed: len %d\n",
+ sd_err(("%s: pkt_buf_get_skb failed: len %d\n",
__func__, buflen_u));
return SDIOH_API_RC_FAIL;
}
/* For a write, copy the buffer data into the packet. */
if (write)
- bcopy(buffer, PKTDATA(mypkt), buflen_u);
+ bcopy(buffer, mypkt->data, buflen_u);
Status =
sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
/* For a read, copy the packet data back to the buffer. */
if (!write)
- bcopy(PKTDATA(mypkt), buffer, buflen_u);
+ bcopy(mypkt->data, buffer, buflen_u);
- PKTFREE(sd->osh, mypkt, write ? true : false);
- } else if (((u32) (PKTDATA(pkt)) & DMA_ALIGN_MASK) != 0) {
+ pkt_buf_free_skb(sd->osh, mypkt, write ? true : false);
+ } else if (((u32) (pkt->data) & DMA_ALIGN_MASK) != 0) {
/* Case 2: We have a packet, but it is unaligned. */
/* In this case, we cannot have a chain. */
- ASSERT(PKTNEXT(pkt) == NULL);
+ ASSERT(pkt->next == NULL);
sd_data(("%s: Creating aligned %s Packet, len=%d\n",
- __func__, write ? "TX" : "RX", PKTLEN(pkt)));
- mypkt = PKTGET(sd->osh, PKTLEN(pkt), write ? true : false);
+ __func__, write ? "TX" : "RX", pkt->len));
+ mypkt = pkt_buf_get_skb(sd->osh, pkt->len);
if (!mypkt) {
- sd_err(("%s: PKTGET failed: len %d\n",
- __func__, PKTLEN(pkt)));
+ sd_err(("%s: pkt_buf_get_skb failed: len %d\n",
+ __func__, pkt->len));
return SDIOH_API_RC_FAIL;
}
/* For a write, copy the buffer data into the packet. */
if (write)
- bcopy(PKTDATA(pkt), PKTDATA(mypkt), PKTLEN(pkt));
+ bcopy(pkt->data, mypkt->data, pkt->len);
Status =
sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
/* For a read, copy the packet data back to the buffer. */
if (!write)
- bcopy(PKTDATA(mypkt), PKTDATA(pkt), PKTLEN(mypkt));
+ bcopy(mypkt->data, pkt->data, mypkt->len);
- PKTFREE(sd->osh, mypkt, write ? true : false);
+ pkt_buf_free_skb(sd->osh, mypkt, write ? true : false);
} else { /* case 3: We have a packet and
it is aligned. */
sd_data(("%s: Aligned %s Packet, direct DMA\n",
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc_linux.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc_linux.c
index ae7b566b11d7..ceaa47490680 100644
--- a/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc_linux.c
+++ b/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc_linux.c
@@ -15,7 +15,9 @@
*/
#include <linux/types.h>
#include <linux/sched.h> /* request_irq() */
+#include <linux/netdevice.h>
#include <bcmdefs.h>
+#include <osl.h>
#include <bcmutils.h>
#include <sdio.h> /* SDIO Specs */
#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
@@ -211,7 +213,7 @@ int sdio_function_init(void)
if (!gInstance)
return -ENOMEM;
- bzero(&sdmmc_dev, sizeof(sdmmc_dev));
+ memset(&sdmmc_dev, 0, sizeof(sdmmc_dev));
error = sdio_register_driver(&bcmsdh_sdmmc_driver);
return error;
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd.h b/drivers/staging/brcm80211/brcmfmac/dhd.h
index 57d06b2da46f..69c6a0272812 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd.h
+++ b/drivers/staging/brcm80211/brcmfmac/dhd.h
@@ -77,7 +77,7 @@ enum dhd_prealloc_index {
/* Common structure for module and instance linkage */
typedef struct dhd_pub {
/* Linkage ponters */
- osl_t *osh; /* OSL handle */
+ struct osl_info *osh; /* OSL handle */
struct dhd_bus *bus; /* Bus module handle */
struct dhd_prot *prot; /* Protocol module handle */
struct dhd_info *info; /* Info module handle */
@@ -277,15 +277,16 @@ typedef struct dhd_if_event {
*/
/* To allow osl_attach/detach calls from os-independent modules */
-osl_t *dhd_osl_attach(void *pdev, uint bustype);
-void dhd_osl_detach(osl_t *osh);
+struct osl_info *dhd_osl_attach(void *pdev, uint bustype);
+void dhd_osl_detach(struct osl_info *osh);
/* Indication from bus module regarding presence/insertion of dongle.
* Return dhd_pub_t pointer, used as handle to OS module in later calls.
* Returned structure should have bus and prot pointers filled in.
* bus_hdrlen specifies required headroom for bus module header.
*/
-extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen);
+extern dhd_pub_t *dhd_attach(struct osl_info *osh, struct dhd_bus *bus,
+ uint bus_hdrlen);
extern int dhd_net_attach(dhd_pub_t *dhdp, int idx);
/* Indication from bus module regarding removal/absence of dongle */
@@ -294,10 +295,12 @@ extern void dhd_detach(dhd_pub_t *dhdp);
/* Indication from bus module to change flow-control state */
extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on);
-extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec);
+extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q,
+ struct sk_buff *pkt, int prec);
/* Receive frame for delivery to OS. Callee disposes of rxp. */
-extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt);
+extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx,
+ struct sk_buff *rxp, int numpkt);
/* Return pointer to interface name */
extern char *dhd_ifname(dhd_pub_t *dhdp, int idx);
@@ -306,7 +309,7 @@ extern char *dhd_ifname(dhd_pub_t *dhdp, int idx);
extern void dhd_sched_dpc(dhd_pub_t *dhdp);
/* Notify tx completion */
-extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
+extern void dhd_txcomplete(dhd_pub_t *dhdp, struct sk_buff *txp, bool success);
/* Query ioctl */
extern int dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf,
@@ -377,7 +380,7 @@ extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, unsigned char * cp,
int len);
/* Send packet to dongle via data channel */
-extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt);
+extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, struct sk_buff *pkt);
/* Send event to host */
extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event,
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_bus.h b/drivers/staging/brcm80211/brcmfmac/dhd_bus.h
index 3b39c9966f81..cd0d5400bf07 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_bus.h
@@ -26,8 +26,8 @@ extern int dhd_bus_register(void);
extern void dhd_bus_unregister(void);
/* Download firmware image and nvram image */
-extern bool dhd_bus_download_firmware(struct dhd_bus *bus, osl_t * osh,
- char *fw_path, char *nv_path);
+extern bool dhd_bus_download_firmware(struct dhd_bus *bus,
+ struct osl_info *osh, char *fw_path, char *nv_path);
/* Stop bus module: clear pending frames, disable data flow */
extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex);
@@ -36,7 +36,7 @@ extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex);
extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex);
/* Send a data frame to the dongle. Callee disposes of txp. */
-extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
+extern int dhd_bus_txdata(struct dhd_bus *bus, struct sk_buff *txp);
/* Send/receive a control message to/from the dongle.
* Expects caller to enforce a single outstanding transaction.
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_cdc.c b/drivers/staging/brcm80211/brcmfmac/dhd_cdc.c
index bcbaac9bcdcc..b7b527f5024c 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_cdc.c
@@ -15,6 +15,7 @@
*/
#include <linux/types.h>
+#include <linux/netdevice.h>
#include <bcmdefs.h>
#include <osl.h>
@@ -297,6 +298,15 @@ done:
return ret;
}
+#define PKTSUMNEEDED(skb) \
+ (((struct sk_buff *)(skb))->ip_summed == CHECKSUM_PARTIAL)
+#define PKTSETSUMGOOD(skb, x) \
+ (((struct sk_buff *)(skb))->ip_summed = \
+ ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
+
+/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because
+ skb->ip_summed is overloaded */
+
int
dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
void *params, int plen, void *arg, int len, bool set)
@@ -309,7 +319,7 @@ void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
}
-void dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *pktbuf)
+void dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, struct sk_buff *pktbuf)
{
#ifdef BDC
struct bdc_header *h;
@@ -320,33 +330,33 @@ void dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *pktbuf)
#ifdef BDC
/* Push BDC header used to convey priority for buses that don't */
- PKTPUSH(pktbuf, BDC_HEADER_LEN);
+ skb_push(pktbuf, BDC_HEADER_LEN);
- h = (struct bdc_header *)PKTDATA(pktbuf);
+ h = (struct bdc_header *)(pktbuf->data);
h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
if (PKTSUMNEEDED(pktbuf))
h->flags |= BDC_FLAG_SUM_NEEDED;
- h->priority = (PKTPRIO(pktbuf) & BDC_PRIORITY_MASK);
+ h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
h->flags2 = 0;
h->rssi = 0;
#endif /* BDC */
BDC_SET_IF_IDX(h, ifidx);
}
-bool dhd_proto_fcinfo(dhd_pub_t *dhd, void *pktbuf, u8 * fcbits)
+bool dhd_proto_fcinfo(dhd_pub_t *dhd, struct sk_buff *pktbuf, u8 * fcbits)
{
#ifdef BDC
struct bdc_header *h;
- if (PKTLEN(pktbuf) < BDC_HEADER_LEN) {
+ if (pktbuf->len < BDC_HEADER_LEN) {
DHD_ERROR(("%s: rx data too short (%d < %d)\n",
- __func__, PKTLEN(pktbuf), BDC_HEADER_LEN));
+ __func__, pktbuf->len, BDC_HEADER_LEN));
return BCME_ERROR;
}
- h = (struct bdc_header *)PKTDATA(pktbuf);
+ h = (struct bdc_header *)(pktbuf->data);
*fcbits = h->priority >> BDC_PRIORITY_FC_SHIFT;
if ((h->flags2 & BDC_FLAG2_FC_FLAG) == BDC_FLAG2_FC_FLAG)
@@ -355,7 +365,7 @@ bool dhd_proto_fcinfo(dhd_pub_t *dhd, void *pktbuf, u8 * fcbits)
return false;
}
-int dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf)
+int dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, struct sk_buff *pktbuf)
{
#ifdef BDC
struct bdc_header *h;
@@ -366,13 +376,13 @@ int dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf)
#ifdef BDC
/* Pop BDC header used to convey priority for buses that don't */
- if (PKTLEN(pktbuf) < BDC_HEADER_LEN) {
+ if (pktbuf->len < BDC_HEADER_LEN) {
DHD_ERROR(("%s: rx data too short (%d < %d)\n", __func__,
- PKTLEN(pktbuf), BDC_HEADER_LEN));
+ pktbuf->len, BDC_HEADER_LEN));
return BCME_ERROR;
}
- h = (struct bdc_header *)PKTDATA(pktbuf);
+ h = (struct bdc_header *)(pktbuf->data);
*ifidx = BDC_GET_IF_IDX(h);
if (*ifidx >= DHD_MAX_IFS) {
@@ -395,9 +405,9 @@ int dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf)
PKTSETSUMGOOD(pktbuf, true);
}
- PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK));
+ pktbuf->priority = h->priority & BDC_PRIORITY_MASK;
- PKTPULL(pktbuf, BDC_HEADER_LEN);
+ skb_pull(pktbuf, BDC_HEADER_LEN);
#endif /* BDC */
return 0;
@@ -467,7 +477,7 @@ int dhd_prot_init(dhd_pub_t *dhd)
dhd_os_proto_unblock(dhd);
return ret;
}
- memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+ memcpy(dhd->mac.octet, buf, ETH_ALEN);
dhd_os_proto_unblock(dhd);
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_common.c b/drivers/staging/brcm80211/brcmfmac/dhd_common.c
index 703188fc28ec..3dbf72eebd4a 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_common.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <bcmdefs.h>
+#include <linux/netdevice.h>
#include <osl.h>
#include <bcmutils.h>
#include <bcmendian.h>
@@ -326,9 +327,10 @@ void dhd_store_conn_status(u32 event, u32 status, u32 reason)
}
}
-bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
+bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, struct sk_buff *pkt,
+ int prec)
{
- void *p;
+ struct sk_buff *p;
int eprec = -1; /* precedence to evict from */
bool discard_oldest;
@@ -366,7 +368,7 @@ bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
ASSERT(p);
}
- PKTFREE(dhdp->osh, p, true);
+ pkt_buf_free_skb(dhdp->osh, p, true);
}
/* Enqueue */
@@ -832,7 +834,7 @@ wl_host_event(struct dhd_info *dhd, int *ifidx, void *pktdata,
u16 flags;
int evlen;
- if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
+ if (memcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
DHD_ERROR(("%s: mismatched OUI, bailing\n", __func__));
return BCME_ERROR;
}
@@ -1254,7 +1256,7 @@ int dhd_preinit_ioctls(dhd_pub_t *dhd)
*/
ret = dhd_custom_get_mac_address(ea_addr.octet);
if (!ret) {
- bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN,
+ bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETH_ALEN,
buf, sizeof(buf));
ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, sizeof(buf));
if (ret < 0) {
@@ -1262,7 +1264,7 @@ int dhd_preinit_ioctls(dhd_pub_t *dhd)
__func__, ret));
} else
memcpy(dhd->mac.octet, (void *)&ea_addr,
- ETHER_ADDR_LEN);
+ ETH_ALEN);
}
#endif /* GET_CUSTOM_MAC_ENABLE */
@@ -1532,7 +1534,7 @@ int dhd_iscan_delete_bss(void *dhdp, void *addr, iscan_buf_t *iscan_skip)
break;
if (!memcmp
- (bi->BSSID.octet, addr, ETHER_ADDR_LEN)) {
+ (bi->BSSID.octet, addr, ETH_ALEN)) {
DHD_ISCAN(("%s: Del BSS[%2.2d:%2.2d] "
"%X:%X:%X:%X:%X:%X\n",
__func__, l, i, bi->BSSID.octet[0],
@@ -1670,7 +1672,7 @@ int dhd_iscan_request(void *dhdp, u16 action)
char buf[WLC_IOCTL_SMLEN];
memset(&params, 0, sizeof(wl_iscan_params_t));
- memcpy(&params.params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+ memcpy(&params.params.bssid, &ether_bcast, ETH_ALEN);
params.params.bss_type = DOT11_BSSTYPE_ANY;
params.params.scan_type = DOT11_SCANTYPE_ACTIVE;
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_custom_gpio.c b/drivers/staging/brcm80211/brcmfmac/dhd_custom_gpio.c
index f647034f36d6..c3f18bb3b27c 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_custom_gpio.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_custom_gpio.c
@@ -14,7 +14,7 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linuxver.h>
+#include <linux/netdevice.h>
#include <osl.h>
#include <bcmutils.h>
@@ -24,8 +24,8 @@
#include <wlioctl.h>
#include <wl_iw.h>
-#define WL_ERROR(x) printf x
-#define WL_TRACE(x)
+#define WL_ERROR(fmt, args...) printk(fmt, ##args)
+#define WL_TRACE(fmt, args...) no_printk(fmt, ##args)
#ifdef CUSTOMER_HW
extern void bcm_wlan_power_off(int);
@@ -67,13 +67,13 @@ int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr)
#endif
if (dhd_oob_gpio_num < 0) {
- WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined\n",
- __func__));
+ WL_ERROR("%s: ERROR customer specific Host GPIO is NOT defined\n",
+ __func__);
return dhd_oob_gpio_num;
}
- WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
- __func__, dhd_oob_gpio_num));
+ WL_ERROR("%s: customer specific Host GPIO number is (%d)\n",
+ __func__, dhd_oob_gpio_num);
#if defined CUSTOMER_HW
host_oob_irq = MSM_GPIO_TO_INT(dhd_oob_gpio_num);
@@ -93,40 +93,40 @@ void dhd_customer_gpio_wlan_ctrl(int onoff)
{
switch (onoff) {
case WLAN_RESET_OFF:
- WL_TRACE(("%s: call customer specific GPIO to insert WLAN RESET\n",
- __func__));
+ WL_TRACE("%s: call customer specific GPIO to insert WLAN RESET\n",
+ __func__);
#ifdef CUSTOMER_HW
bcm_wlan_power_off(2);
#endif /* CUSTOMER_HW */
#ifdef CUSTOMER_HW2
wifi_set_power(0, 0);
#endif
- WL_ERROR(("=========== WLAN placed in RESET ========\n"));
+ WL_ERROR("=========== WLAN placed in RESET ========\n");
break;
case WLAN_RESET_ON:
- WL_TRACE(("%s: callc customer specific GPIO to remove WLAN RESET\n",
- __func__));
+ WL_TRACE("%s: callc customer specific GPIO to remove WLAN RESET\n",
+ __func__);
#ifdef CUSTOMER_HW
bcm_wlan_power_on(2);
#endif /* CUSTOMER_HW */
#ifdef CUSTOMER_HW2
wifi_set_power(1, 0);
#endif
- WL_ERROR(("=========== WLAN going back to live ========\n"));
+ WL_ERROR("=========== WLAN going back to live ========\n");
break;
case WLAN_POWER_OFF:
- WL_TRACE(("%s: call customer specific GPIO to turn off WL_REG_ON\n",
- __func__));
+ WL_TRACE("%s: call customer specific GPIO to turn off WL_REG_ON\n",
+ __func__);
#ifdef CUSTOMER_HW
bcm_wlan_power_off(1);
#endif /* CUSTOMER_HW */
break;
case WLAN_POWER_ON:
- WL_TRACE(("%s: call customer specific GPIO to turn on WL_REG_ON\n",
- __func__));
+ WL_TRACE("%s: call customer specific GPIO to turn on WL_REG_ON\n",
+ __func__);
#ifdef CUSTOMER_HW
bcm_wlan_power_on(1);
#endif /* CUSTOMER_HW */
@@ -140,7 +140,7 @@ void dhd_customer_gpio_wlan_ctrl(int onoff)
/* Function to get custom MAC address */
int dhd_custom_get_mac_address(unsigned char *buf)
{
- WL_TRACE(("%s Enter\n", __func__));
+ WL_TRACE("%s Enter\n", __func__);
if (!buf)
return -EINVAL;
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
index 9335f02029aa..db4508378775 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
@@ -32,7 +32,6 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <bcmdefs.h>
-#include <linuxver.h>
#include <osl.h>
#include <bcmutils.h>
#include <bcmendian.h>
@@ -211,7 +210,7 @@ typedef struct dhd_if {
int idx; /* iface idx in dongle */
int state; /* interface state */
uint subunit; /* subunit */
- u8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
+ u8 mac_addr[ETH_ALEN]; /* assigned MAC address */
bool attached; /* Delayed attachment when unset */
bool txflowcontrol; /* Per interface flow control indicator */
char name[IFNAMSIZ]; /* linux interface name */
@@ -709,7 +708,7 @@ static void _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
/* Send down the multicast list first. */
- buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
+ buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETH_ALEN);
bufp = buf = kmalloc(buflen, GFP_ATOMIC);
if (!bufp) {
DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
@@ -727,8 +726,8 @@ static void _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
netdev_for_each_mc_addr(ha, dev) {
if (!cnt)
break;
- memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
- bufp += ETHER_ADDR_LEN;
+ memcpy(bufp, ha->addr, ETH_ALEN);
+ bufp += ETH_ALEN;
cnt--;
}
@@ -812,7 +811,7 @@ _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, struct ether_addr *addr)
DHD_TRACE(("%s enter\n", __func__));
if (!bcm_mkiovar
- ("cur_etheraddr", (char *)addr, ETHER_ADDR_LEN, buf, 32)) {
+ ("cur_etheraddr", (char *)addr, ETH_ALEN, buf, 32)) {
DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n",
dhd_ifname(&dhd->pub, ifidx)));
return -1;
@@ -828,7 +827,7 @@ _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, struct ether_addr *addr)
DHD_ERROR(("%s: set cur_etheraddr failed\n",
dhd_ifname(&dhd->pub, ifidx)));
} else {
- memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
+ memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETH_ALEN);
}
return ret;
@@ -998,7 +997,7 @@ static int dhd_set_mac_address(struct net_device *dev, void *addr)
return -1;
ASSERT(dhd->sysioc_tsk);
- memcpy(&dhd->macvalue, sa->sa_data, ETHER_ADDR_LEN);
+ memcpy(&dhd->macvalue, sa->sa_data, ETH_ALEN);
dhd->set_macaddress = true;
up(&dhd->sysioc_sem);
@@ -1019,7 +1018,7 @@ static void dhd_set_multicast_list(struct net_device *dev)
up(&dhd->sysioc_sem);
}
-int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, struct sk_buff *pktbuf)
{
int ret;
dhd_info_t *dhd = (dhd_info_t *) (dhdp->info);
@@ -1029,13 +1028,13 @@ int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
return -ENODEV;
/* Update multicast statistic */
- if (PKTLEN(pktbuf) >= ETHER_ADDR_LEN) {
- u8 *pktdata = (u8 *) PKTDATA(pktbuf);
+ if (pktbuf->len >= ETH_ALEN) {
+ u8 *pktdata = (u8 *) (pktbuf->data);
struct ether_header *eh = (struct ether_header *)pktdata;
- if (ETHER_ISMULTI(eh->ether_dhost))
+ if (is_multicast_ether_addr(eh->ether_dhost))
dhdp->tx_multicast++;
- if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
+ if (ntoh16(eh->ether_type) == ETH_P_PAE)
atomic_inc(&dhd->pend_8021x_cnt);
}
@@ -1053,6 +1052,32 @@ int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
return ret;
}
+static inline void *
+osl_pkt_frmnative(struct osl_info *osh, struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+
+ for (nskb = skb; nskb; nskb = nskb->next)
+ osh->pktalloced++;
+
+ return (void *)skb;
+}
+#define PKTFRMNATIVE(osh, skb) \
+ osl_pkt_frmnative((osh), (struct sk_buff *)(skb))
+
+static inline struct sk_buff *
+osl_pkt_tonative(struct osl_info *osh, void *pkt)
+{
+ struct sk_buff *nskb;
+
+ for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next)
+ osh->pktalloced--;
+
+ return (struct sk_buff *)pkt;
+}
+#define PKTTONATIVE(osh, pkt) \
+ osl_pkt_tonative((osh), (pkt))
+
static int dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
{
int ret;
@@ -1133,13 +1158,15 @@ void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
netif_wake_queue(net);
}
-void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt)
+void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, struct sk_buff *pktbuf,
+ int numpkt)
{
dhd_info_t *dhd = (dhd_info_t *) dhdp->info;
struct sk_buff *skb;
unsigned char *eth;
uint len;
- void *data, *pnext, *save_pktbuf;
+ void *data;
+ struct sk_buff *pnext, *save_pktbuf;
int i;
dhd_if_t *ifp;
wl_event_msg_t event;
@@ -1150,8 +1177,8 @@ void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt)
for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
- pnext = PKTNEXT(pktbuf);
- PKTSETNEXT(pktbuf, NULL);
+ pnext = pktbuf->next;
+ pktbuf->next = NULL;
skb = PKTTONATIVE(dhdp->osh, pktbuf);
@@ -1190,7 +1217,7 @@ void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt)
/* Process special event packets and then discard them */
if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM)
dhd_wl_host_event(dhd, &ifidx,
- skb->mac_header,
+ skb_mac_header(skb),
&event, &data);
ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
@@ -1223,7 +1250,7 @@ void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
return;
}
-void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
+void dhd_txcomplete(dhd_pub_t *dhdp, struct sk_buff *txp, bool success)
{
uint ifidx;
dhd_info_t *dhd = (dhd_info_t *) (dhdp->info);
@@ -1232,10 +1259,10 @@ void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
dhd_prot_hdrpull(dhdp, &ifidx, txp);
- eh = (struct ether_header *)PKTDATA(txp);
+ eh = (struct ether_header *)(txp->data);
type = ntoh16(eh->ether_type);
- if (type == ETHER_TYPE_802_1X)
+ if (type == ETH_P_PAE)
atomic_dec(&dhd->pend_8021x_cnt);
}
@@ -1621,6 +1648,51 @@ static int dhd_ethtool(dhd_info_t *dhd, void *uaddr)
return 0;
}
+static s16 linuxbcmerrormap[] = { 0, /* 0 */
+ -EINVAL, /* BCME_ERROR */
+ -EINVAL, /* BCME_BADARG */
+ -EINVAL, /* BCME_BADOPTION */
+ -EINVAL, /* BCME_NOTUP */
+ -EINVAL, /* BCME_NOTDOWN */
+ -EINVAL, /* BCME_NOTAP */
+ -EINVAL, /* BCME_NOTSTA */
+ -EINVAL, /* BCME_BADKEYIDX */
+ -EINVAL, /* BCME_RADIOOFF */
+ -EINVAL, /* BCME_NOTBANDLOCKED */
+ -EINVAL, /* BCME_NOCLK */
+ -EINVAL, /* BCME_BADRATESET */
+ -EINVAL, /* BCME_BADBAND */
+ -E2BIG, /* BCME_BUFTOOSHORT */
+ -E2BIG, /* BCME_BUFTOOLONG */
+ -EBUSY, /* BCME_BUSY */
+ -EINVAL, /* BCME_NOTASSOCIATED */
+ -EINVAL, /* BCME_BADSSIDLEN */
+ -EINVAL, /* BCME_OUTOFRANGECHAN */
+ -EINVAL, /* BCME_BADCHAN */
+ -EFAULT, /* BCME_BADADDR */
+ -ENOMEM, /* BCME_NORESOURCE */
+ -EOPNOTSUPP, /* BCME_UNSUPPORTED */
+ -EMSGSIZE, /* BCME_BADLENGTH */
+ -EINVAL, /* BCME_NOTREADY */
+ -EPERM, /* BCME_NOTPERMITTED */
+ -ENOMEM, /* BCME_NOMEM */
+ -EINVAL, /* BCME_ASSOCIATED */
+ -ERANGE, /* BCME_RANGE */
+ -EINVAL, /* BCME_NOTFOUND */
+ -EINVAL, /* BCME_WME_NOT_ENABLED */
+ -EINVAL, /* BCME_TSPEC_NOTFOUND */
+ -EINVAL, /* BCME_ACM_NOTSUPPORTED */
+ -EINVAL, /* BCME_NOT_WME_ASSOCIATION */
+ -EIO, /* BCME_SDIO_ERROR */
+ -ENODEV, /* BCME_DONGLE_DOWN */
+ -EINVAL, /* BCME_VERSION */
+ -EIO, /* BCME_TXFAIL */
+ -EIO, /* BCME_RXFAIL */
+ -EINVAL, /* BCME_NODEVICE */
+ -EINVAL, /* BCME_NMODE_DISABLED */
+ -ENODATA, /* BCME_NONRESIDENT */
+};
+
static int dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
{
dhd_info_t *dhd = *(dhd_info_t **) netdev_priv(net);
@@ -1742,7 +1814,12 @@ done:
if (buf)
kfree(buf);
- return OSL_ERROR(bcmerror);
+ if (bcmerror > 0)
+ bcmerror = 0;
+ else if (bcmerror < BCME_LAST)
+ bcmerror = BCME_ERROR;
+
+ return linuxbcmerrormap[-bcmerror];
}
static int dhd_stop(struct net_device *net)
@@ -1789,7 +1866,7 @@ static int dhd_open(struct net_device *net)
}
atomic_set(&dhd->pend_8021x_cnt, 0);
- memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+ memcpy(net->dev_addr, dhd->pub.mac.octet, ETH_ALEN);
#ifdef TOE
/* Get current TOE mode from dongle */
@@ -1814,12 +1891,12 @@ static int dhd_open(struct net_device *net)
return ret;
}
-osl_t *dhd_osl_attach(void *pdev, uint bustype)
+struct osl_info *dhd_osl_attach(void *pdev, uint bustype)
{
- return osl_attach(pdev, bustype, true);
+ return osl_attach(pdev, bustype);
}
-void dhd_osl_detach(osl_t *osh)
+void dhd_osl_detach(struct osl_info *osh)
{
osl_detach(osh);
}
@@ -1845,7 +1922,7 @@ dhd_add_if(dhd_info_t *dhd, int ifidx, void *handle, char *name,
dhd->iflist[ifidx] = ifp;
strlcpy(ifp->name, name, IFNAMSIZ);
if (mac_addr != NULL)
- memcpy(&ifp->mac_addr, mac_addr, ETHER_ADDR_LEN);
+ memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
if (handle == NULL) {
ifp->state = WLC_E_IF_ADD;
@@ -1877,7 +1954,8 @@ void dhd_del_if(dhd_info_t *dhd, int ifidx)
up(&dhd->sysioc_sem);
}
-dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
+dhd_pub_t *dhd_attach(struct osl_info *osh, struct dhd_bus *bus,
+ uint bus_hdrlen)
{
dhd_info_t *dhd = NULL;
struct net_device *net;
@@ -2199,19 +2277,11 @@ static struct net_device_ops dhd_ops_pri = {
.ndo_set_multicast_list = dhd_set_multicast_list
};
-static struct net_device_ops dhd_ops_virt = {
- .ndo_get_stats = dhd_get_stats,
- .ndo_do_ioctl = dhd_ioctl_entry,
- .ndo_start_xmit = dhd_start_xmit,
- .ndo_set_mac_address = dhd_set_mac_address,
- .ndo_set_multicast_list = dhd_set_multicast_list
-};
-
int dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
{
dhd_info_t *dhd = (dhd_info_t *) dhdp->info;
struct net_device *net;
- u8 temp_addr[ETHER_ADDR_LEN] = {
+ u8 temp_addr[ETH_ALEN] = {
0x00, 0x90, 0x4c, 0x11, 0x22, 0x33};
DHD_TRACE(("%s: ifidx %d\n", __func__, ifidx));
@@ -2229,7 +2299,7 @@ int dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
*/
if (ifidx != 0) {
/* for virtual interfaces use the primary MAC */
- memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+ memcpy(temp_addr, dhd->pub.mac.octet, ETH_ALEN);
}
@@ -2257,7 +2327,7 @@ int dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
dhd->pub.rxsz = net->mtu + net->hard_header_len + dhd->pub.hdrlen;
- memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+ memcpy(net->dev_addr, temp_addr, ETH_ALEN);
if (register_netdev(net) != 0) {
DHD_ERROR(("%s: couldn't register the net device\n",
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux_sched.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux_sched.c
index bf8df9801030..c66f1c2941e2 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux_sched.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux_sched.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
-#include <linuxver.h>
int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
{
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_proto.h b/drivers/staging/brcm80211/brcmfmac/dhd_proto.h
index cc42fa4a9140..a5309e27b65b 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_proto.h
@@ -46,15 +46,16 @@ extern int dhd_prot_init(dhd_pub_t *dhdp);
/* Stop protocol: sync w/dongle state. */
extern void dhd_prot_stop(dhd_pub_t *dhdp);
-extern bool dhd_proto_fcinfo(dhd_pub_t *dhd, void *pktbuf, u8 *fcbits);
+extern bool dhd_proto_fcinfo(dhd_pub_t *dhd, struct sk_buff *pktbuf,
+ u8 *fcbits);
/* Add any protocol-specific data header.
* Caller must reserve prot_hdrlen prepend space.
*/
-extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp);
+extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, struct sk_buff *txp);
/* Remove any protocol-specific data header. */
-extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp);
+extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, struct sk_buff *rxp);
/* Use protocol to issue ioctl to dongle */
extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc,
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_sdio.c b/drivers/staging/brcm80211/brcmfmac/dhd_sdio.c
index b2281d9dfdcf..3edce44978a1 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_sdio.c
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <bcmdefs.h>
+#include <linux/netdevice.h>
#include <osl.h>
#include <bcmsdh.h>
@@ -143,7 +144,7 @@
* bufpool was present for gspi bus.
*/
#define PKTFREE2() if ((bus->bus != SPI_BUS) || bus->usebufpool) \
- PKTFREE(bus->dhd->osh, pkt, false);
+ pkt_buf_free_skb(bus->dhd->osh, pkt, false);
DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf,
uint len);
@@ -202,8 +203,8 @@ typedef struct dhd_bus {
u8 rx_seq; /* Receive sequence number (expected) */
bool rxskip; /* Skip receive (awaiting NAK ACK) */
- void *glomd; /* Packet containing glomming descriptor */
- void *glom; /* Packet chain for glommed superframe */
+ struct sk_buff *glomd; /* Packet containing glomming descriptor */
+ struct sk_buff *glom; /* Packet chain for glommed superframe */
uint glomerr; /* Glom packet read errors */
u8 *rxbuf; /* Buffer for receiving control packets */
@@ -356,16 +357,16 @@ extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD)
#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD
#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */
-#define PKTALIGN(osh, p, len, align) \
+#define PKTALIGN(_osh, _p, _len, _align) \
do { \
uint datalign; \
- datalign = (unsigned long)PKTDATA((p)); \
- datalign = roundup(datalign, (align)) - datalign; \
- ASSERT(datalign < (align)); \
- ASSERT(PKTLEN((p)) >= ((len) + datalign)); \
+ datalign = (unsigned long)((_p)->data); \
+ datalign = roundup(datalign, (_align)) - datalign; \
+ ASSERT(datalign < (_align)); \
+ ASSERT((_p)->len >= ((_len) + datalign)); \
if (datalign) \
- PKTPULL((p), datalign); \
- PKTSETLEN((p), (len)); \
+ skb_pull((_p), datalign); \
+ __skb_trim((_p), (_len)); \
} while (0)
/* Limit on rounding up frames */
@@ -430,27 +431,30 @@ static int dhdsdio_mem_dump(dhd_bus_t *bus);
#endif /* DHD_DEBUG */
static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
-static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh);
-static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_release(dhd_bus_t *bus, struct osl_info *osh);
+static void dhdsdio_release_malloc(dhd_bus_t *bus, struct osl_info *osh);
static void dhdsdio_disconnect(void *ptr);
static bool dhdsdio_chipmatch(u16 chipid);
-static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh,
- void *regsva, u16 devid);
-static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh);
-static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh);
-static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t * osh);
+static bool dhdsdio_probe_attach(dhd_bus_t *bus, struct osl_info *osh,
+ void *sdh, void *regsva, u16 devid);
+static bool dhdsdio_probe_malloc(dhd_bus_t *bus, struct osl_info *osh,
+ void *sdh);
+static bool dhdsdio_probe_init(dhd_bus_t *bus, struct osl_info *osh, void *sdh);
+static void dhdsdio_release_dongle(dhd_bus_t *bus, struct osl_info * osh);
static uint process_nvram_vars(char *varbuf, uint len);
static void dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size);
static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, void *pkt,
- bcmsdh_cmplt_fn_t complete, void *handle);
+ uint flags, u8 *buf, uint nbytes,
+ struct sk_buff *pkt, bcmsdh_cmplt_fn_t complete,
+ void *handle);
static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, void *pkt,
- bcmsdh_cmplt_fn_t complete, void *handle);
+ uint flags, u8 *buf, uint nbytes,
+ struct sk_buff *pkt, bcmsdh_cmplt_fn_t complete,
+ void *handle);
-static bool dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh,
+static bool dhdsdio_download_firmware(struct dhd_bus *bus, struct osl_info *osh,
void *sdh);
static int _dhdsdio_download_firmware(struct dhd_bus *bus);
@@ -900,16 +904,17 @@ void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
/* Writes a HW/SW header into the packet and sends it. */
/* Assumes: (a) header space already there, (b) caller holds lock */
-static int dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt)
+static int dhdsdio_txpkt(dhd_bus_t *bus, struct sk_buff *pkt, uint chan,
+ bool free_pkt)
{
int ret;
- osl_t *osh;
+ struct osl_info *osh;
u8 *frame;
u16 len, pad = 0;
u32 swheader;
uint retries = 0;
bcmsdh_info_t *sdh;
- void *new;
+ struct sk_buff *new;
int i;
DHD_TRACE(("%s: Enter\n", __func__));
@@ -922,46 +927,46 @@ static int dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt)
goto done;
}
- frame = (u8 *) PKTDATA(pkt);
+ frame = (u8 *) (pkt->data);
/* Add alignment padding, allocate new packet if needed */
pad = ((unsigned long)frame % DHD_SDALIGN);
if (pad) {
- if (PKTHEADROOM(pkt) < pad) {
+ if (skb_headroom(pkt) < pad) {
DHD_INFO(("%s: insufficient headroom %d for %d pad\n",
- __func__, (int)PKTHEADROOM(pkt), pad));
+ __func__, skb_headroom(pkt), pad));
bus->dhd->tx_realloc++;
- new = PKTGET(osh, (PKTLEN(pkt) + DHD_SDALIGN), true);
+ new = pkt_buf_get_skb(osh, (pkt->len + DHD_SDALIGN));
if (!new) {
DHD_ERROR(("%s: couldn't allocate new %d-byte "
"packet\n",
- __func__, PKTLEN(pkt) + DHD_SDALIGN));
+ __func__, pkt->len + DHD_SDALIGN));
ret = BCME_NOMEM;
goto done;
}
- PKTALIGN(osh, new, PKTLEN(pkt), DHD_SDALIGN);
- bcopy(PKTDATA(pkt), PKTDATA(new), PKTLEN(pkt));
+ PKTALIGN(osh, new, pkt->len, DHD_SDALIGN);
+ bcopy(pkt->data, new->data, pkt->len);
if (free_pkt)
- PKTFREE(osh, pkt, true);
+ pkt_buf_free_skb(osh, pkt, true);
/* free the pkt if canned one is not used */
free_pkt = true;
pkt = new;
- frame = (u8 *) PKTDATA(pkt);
+ frame = (u8 *) (pkt->data);
ASSERT(((unsigned long)frame % DHD_SDALIGN) == 0);
pad = 0;
} else {
- PKTPUSH(pkt, pad);
- frame = (u8 *) PKTDATA(pkt);
+ skb_push(pkt, pad);
+ frame = (u8 *) (pkt->data);
- ASSERT((pad + SDPCM_HDRLEN) <= (int)PKTLEN(pkt));
- bzero(frame, pad + SDPCM_HDRLEN);
+ ASSERT((pad + SDPCM_HDRLEN) <= (int)(pkt->len));
+ memset(frame, 0, pad + SDPCM_HDRLEN);
}
}
ASSERT(pad < DHD_SDALIGN);
/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
- len = (u16) PKTLEN(pkt);
+ len = (u16) (pkt->len);
*(u16 *) frame = htol16(len);
*(((u16 *) frame) + 1) = htol16(~len);
@@ -974,7 +979,7 @@ static int dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt)
htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
#ifdef DHD_DEBUG
- tx_packets[PKTPRIO(pkt)]++;
+ tx_packets[pkt->priority]++;
if (DHD_BYTES_ON() &&
(((DHD_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
(DHD_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
@@ -989,7 +994,7 @@ static int dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt)
u16 pad = bus->blocksize - (len % bus->blocksize);
if ((pad <= bus->roundup) && (pad < bus->blocksize))
#ifdef NOTUSED
- if (pad <= PKTTAILROOM(pkt))
+ if (pad <= skb_tailroom(pkt))
#endif /* NOTUSED */
len += pad;
} else if (len % DHD_SDALIGN) {
@@ -999,7 +1004,7 @@ static int dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt)
/* Some controllers have trouble with odd bytes -- round to even */
if (forcealign && (len & (ALIGNMENT - 1))) {
#ifdef NOTUSED
- if (PKTTAILROOM(pkt))
+ if (skb_tailroom(pkt))
#endif
len = roundup(len, ALIGNMENT);
#ifdef NOTUSED
@@ -1050,34 +1055,34 @@ static int dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt)
done:
/* restore pkt buffer pointer before calling tx complete routine */
- PKTPULL(pkt, SDPCM_HDRLEN + pad);
+ skb_pull(pkt, SDPCM_HDRLEN + pad);
dhd_os_sdunlock(bus->dhd);
dhd_txcomplete(bus->dhd, pkt, ret != 0);
dhd_os_sdlock(bus->dhd);
if (free_pkt)
- PKTFREE(osh, pkt, true);
+ pkt_buf_free_skb(osh, pkt, true);
return ret;
}
-int dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
+int dhd_bus_txdata(struct dhd_bus *bus, struct sk_buff *pkt)
{
int ret = BCME_ERROR;
- osl_t *osh;
+ struct osl_info *osh;
uint datalen, prec;
DHD_TRACE(("%s: Enter\n", __func__));
osh = bus->dhd->osh;
- datalen = PKTLEN(pkt);
+ datalen = pkt->len;
#ifdef SDTEST
/* Push the test header if doing loopback */
if (bus->ext_loop) {
u8 *data;
- PKTPUSH(pkt, SDPCM_TEST_HDRLEN);
- data = PKTDATA(pkt);
+ skb_push(pkt, SDPCM_TEST_HDRLEN);
+ data = pkt->data;
*data++ = SDPCM_TEST_ECHOREQ;
*data++ = (u8) bus->loopid++;
*data++ = (datalen >> 0);
@@ -1087,10 +1092,10 @@ int dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
#endif /* SDTEST */
/* Add space for the header */
- PKTPUSH(pkt, SDPCM_HDRLEN);
- ASSERT(IS_ALIGNED((unsigned long)PKTDATA(pkt), 2));
+ skb_push(pkt, SDPCM_HDRLEN);
+ ASSERT(IS_ALIGNED((unsigned long)(pkt->data), 2));
- prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
+ prec = PRIO2PREC((pkt->priority & PRIOMASK));
/* Check for existing queue, current flow-control,
pending event, or pending clock */
@@ -1105,9 +1110,9 @@ int dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
/* Priority based enq */
dhd_os_sdlock_txq(bus->dhd);
if (dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec) == false) {
- PKTPULL(pkt, SDPCM_HDRLEN);
+ skb_pull(pkt, SDPCM_HDRLEN);
dhd_txcomplete(bus->dhd, pkt, false);
- PKTFREE(osh, pkt, true);
+ pkt_buf_free_skb(osh, pkt, true);
DHD_ERROR(("%s: out of bus->txq !!!\n", __func__));
ret = BCME_NORESOURCE;
} else {
@@ -1162,7 +1167,7 @@ int dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
static uint dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
{
- void *pkt;
+ struct sk_buff *pkt;
u32 intstatus = 0;
uint retries = 0;
int ret = 0, prec_out;
@@ -1186,7 +1191,7 @@ static uint dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
break;
}
dhd_os_sdunlock_txq(bus->dhd);
- datalen = PKTLEN(pkt) - SDPCM_HDRLEN;
+ datalen = pkt->len - SDPCM_HDRLEN;
#ifndef SDTEST
ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
@@ -1247,7 +1252,7 @@ int dhd_bus_txctl(struct dhd_bus *bus, unsigned char *msg, uint msglen)
frame -= doff;
len += doff;
msglen += doff;
- bzero(frame, doff + SDPCM_HDRLEN);
+ memset(frame, 0, doff + SDPCM_HDRLEN);
}
ASSERT(doff < DHD_SDALIGN);
}
@@ -2531,7 +2536,7 @@ static int dhdsdio_write_vars(dhd_bus_t *bus)
if (!vbuffer)
return BCME_NOMEM;
- bzero(vbuffer, varsize);
+ memset(vbuffer, 0, varsize);
bcopy(bus->vars, vbuffer, bus->varsz);
/* Write the vars list */
@@ -2823,7 +2828,7 @@ exit:
void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
{
- osl_t *osh = bus->dhd->osh;
+ struct osl_info *osh = bus->dhd->osh;
u32 local_hostintmask;
u8 saveclk;
uint retries;
@@ -2877,10 +2882,10 @@ void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
/* Clear any held glomming stuff */
if (bus->glomd)
- PKTFREE(osh, bus->glomd, false);
+ pkt_buf_free_skb(osh, bus->glomd, false);
if (bus->glom)
- PKTFREE(osh, bus->glom, false);
+ pkt_buf_free_skb(osh, bus->glom, false);
bus->glom = bus->glomd = NULL;
@@ -3178,8 +3183,8 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
u8 *dptr, num = 0;
u16 sublen, check;
- void *pfirst, *plast, *pnext, *save_pfirst;
- osl_t *osh = bus->dhd->osh;
+ struct sk_buff *pfirst, *plast, *pnext, *save_pfirst;
+ struct osl_info *osh = bus->dhd->osh;
int errcode;
u8 chan, seq, doff, sfdoff;
@@ -3199,8 +3204,8 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
dhd_os_sdlock_rxq(bus->dhd);
pfirst = plast = pnext = NULL;
- dlen = (u16) PKTLEN(bus->glomd);
- dptr = PKTDATA(bus->glomd);
+ dlen = (u16) (bus->glomd->len);
+ dptr = bus->glomd->data;
if (!dlen || (dlen & 1)) {
DHD_ERROR(("%s: bad glomd len(%d), ignore descriptor\n",
__func__, dlen));
@@ -3235,19 +3240,19 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
}
/* Allocate/chain packet for next subframe */
- pnext = PKTGET(osh, sublen + DHD_SDALIGN, false);
+ pnext = pkt_buf_get_skb(osh, sublen + DHD_SDALIGN);
if (pnext == NULL) {
- DHD_ERROR(("%s: PKTGET failed, num %d len %d\n",
+ DHD_ERROR(("%s: pkt_buf_get_skb failed, num %d len %d\n",
__func__, num, sublen));
break;
}
- ASSERT(!PKTLINK(pnext));
+ ASSERT(!(pnext->prev));
if (!pfirst) {
ASSERT(!plast);
pfirst = plast = pnext;
} else {
ASSERT(plast);
- PKTSETNEXT(plast, pnext);
+ plast->next = pnext;
plast = pnext;
}
@@ -3271,13 +3276,13 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
pfirst = pnext = NULL;
} else {
if (pfirst)
- PKTFREE(osh, pfirst, false);
+ pkt_buf_free_skb(osh, pfirst, false);
bus->glom = NULL;
num = 0;
}
/* Done with descriptor packet */
- PKTFREE(osh, bus->glomd, false);
+ pkt_buf_free_skb(osh, bus->glomd, false);
bus->glomd = NULL;
bus->nextlen = 0;
@@ -3290,10 +3295,10 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
if (DHD_GLOM_ON()) {
DHD_GLOM(("%s: try superframe read, packet chain:\n",
__func__));
- for (pnext = bus->glom; pnext; pnext = PKTNEXT(pnext)) {
+ for (pnext = bus->glom; pnext; pnext = pnext->next) {
DHD_GLOM((" %p: %p len 0x%04x (%d)\n",
- pnext, (u8 *) PKTDATA(pnext),
- PKTLEN(pnext), PKTLEN(pnext)));
+ pnext, (u8 *) (pnext->data),
+ pnext->len, pnext->len));
}
}
@@ -3309,7 +3314,7 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
bcmsdh_cur_sbwad
(bus->sdh), SDIO_FUNC_2,
F2SYNC,
- (u8 *) PKTDATA(pfirst),
+ (u8 *) pfirst->data,
dlen, pfirst, NULL, NULL);
} else if (bus->dataptr) {
errcode = dhd_bcmsdh_recv_buf(bus,
@@ -3346,7 +3351,7 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
bus->glomerr = 0;
dhdsdio_rxfail(bus, true, false);
dhd_os_sdlock_rxq(bus->dhd);
- PKTFREE(osh, bus->glom, false);
+ pkt_buf_free_skb(osh, bus->glom, false);
dhd_os_sdunlock_rxq(bus->dhd);
bus->rxglomfail++;
bus->glom = NULL;
@@ -3355,13 +3360,13 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
}
#ifdef DHD_DEBUG
if (DHD_GLOM_ON()) {
- prhex("SUPERFRAME", PKTDATA(pfirst),
- min_t(int, PKTLEN(pfirst), 48));
+ prhex("SUPERFRAME", pfirst->data,
+ min_t(int, pfirst->len, 48));
}
#endif
/* Validate the superframe header */
- dptr = (u8 *) PKTDATA(pfirst);
+ dptr = (u8 *) (pfirst->data);
sublen = ltoh16_ua(dptr);
check = ltoh16_ua(dptr + sizeof(u16));
@@ -3399,11 +3404,11 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
__func__));
errcode = -1;
} else if ((doff < SDPCM_HDRLEN) ||
- (doff > (PKTLEN(pfirst) - SDPCM_HDRLEN))) {
+ (doff > (pfirst->len - SDPCM_HDRLEN))) {
DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d "
"pkt %d min %d\n",
__func__, doff, sublen,
- PKTLEN(pfirst), SDPCM_HDRLEN));
+ pfirst->len, SDPCM_HDRLEN));
errcode = -1;
}
@@ -3424,14 +3429,14 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
bus->tx_max = txmax;
/* Remove superframe header, remember offset */
- PKTPULL(pfirst, doff);
+ skb_pull(pfirst, doff);
sfdoff = doff;
/* Validate all the subframe headers */
for (num = 0, pnext = pfirst; pnext && !errcode;
- num++, pnext = PKTNEXT(pnext)) {
- dptr = (u8 *) PKTDATA(pnext);
- dlen = (u16) PKTLEN(pnext);
+ num++, pnext = pnext->next) {
+ dptr = (u8 *) (pnext->data);
+ dlen = (u16) (pnext->len);
sublen = ltoh16_ua(dptr);
check = ltoh16_ua(dptr + sizeof(u16));
chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
@@ -3469,13 +3474,13 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
a couple retries */
if (bus->glomerr++ < 3) {
/* Restore superframe header space */
- PKTPUSH(pfirst, sfdoff);
+ skb_push(pfirst, sfdoff);
dhdsdio_rxfail(bus, true, true);
} else {
bus->glomerr = 0;
dhdsdio_rxfail(bus, true, false);
dhd_os_sdlock_rxq(bus->dhd);
- PKTFREE(osh, bus->glom, false);
+ pkt_buf_free_skb(osh, bus->glom, false);
dhd_os_sdunlock_rxq(bus->dhd);
bus->rxglomfail++;
bus->glom = NULL;
@@ -3491,10 +3496,10 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
dhd_os_sdlock_rxq(bus->dhd);
for (num = 0; pfirst; rxseq++, pfirst = pnext) {
- pnext = PKTNEXT(pfirst);
- PKTSETNEXT(pfirst, NULL);
+ pnext = pfirst->next;
+ pfirst->next = NULL;
- dptr = (u8 *) PKTDATA(pfirst);
+ dptr = (u8 *) (pfirst->data);
sublen = ltoh16_ua(dptr);
chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
@@ -3502,8 +3507,8 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d "
"chan %d seq %d\n",
- __func__, num, pfirst, PKTDATA(pfirst),
- PKTLEN(pfirst), sublen, chan, seq));
+ __func__, num, pfirst, pfirst->data,
+ pfirst->len, sublen, chan, seq));
ASSERT((chan == SDPCM_DATA_CHANNEL)
|| (chan == SDPCM_EVENT_CHANNEL));
@@ -3519,13 +3524,13 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
prhex("Rx Subframe Data", dptr, dlen);
#endif
- PKTSETLEN(pfirst, sublen);
- PKTPULL(pfirst, doff);
+ __skb_trim(pfirst, sublen);
+ skb_pull(pfirst, doff);
- if (PKTLEN(pfirst) == 0) {
- PKTFREE(bus->dhd->osh, pfirst, false);
+ if (pfirst->len == 0) {
+ pkt_buf_free_skb(bus->dhd->osh, pfirst, false);
if (plast) {
- PKTSETNEXT(plast, pnext);
+ plast->next = pnext;
} else {
ASSERT(save_pfirst == pfirst);
save_pfirst = pnext;
@@ -3536,9 +3541,9 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
DHD_ERROR(("%s: rx protocol error\n",
__func__));
bus->dhd->rx_errors++;
- PKTFREE(osh, pfirst, false);
+ pkt_buf_free_skb(osh, pfirst, false);
if (plast) {
- PKTSETNEXT(plast, pnext);
+ plast->next = pnext;
} else {
ASSERT(save_pfirst == pfirst);
save_pfirst = pnext;
@@ -3548,7 +3553,7 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
/* this packet will go up, link back into
chain and count it */
- PKTSETNEXT(pfirst, pnext);
+ pfirst->next = pnext;
plast = pfirst;
num++;
@@ -3556,11 +3561,11 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
if (DHD_GLOM_ON()) {
DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) "
"nxt/lnk %p/%p\n",
- __func__, num, pfirst, PKTDATA(pfirst),
- PKTLEN(pfirst), PKTNEXT(pfirst),
- PKTLINK(pfirst)));
- prhex("", (u8 *) PKTDATA(pfirst),
- min_t(int, PKTLEN(pfirst), 32));
+ __func__, num, pfirst, pfirst->data,
+ pfirst->len, pfirst->next,
+ pfirst->prev));
+ prhex("", (u8 *) pfirst->data,
+ min_t(int, pfirst->len, 32));
}
#endif /* DHD_DEBUG */
}
@@ -3580,7 +3585,7 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
/* Return true if there may be more frames to read */
static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
{
- osl_t *osh = bus->dhd->osh;
+ struct osl_info *osh = bus->dhd->osh;
bcmsdh_info_t *sdh = bus->sdh;
u16 len, check; /* Extracted hardware header fields */
@@ -3588,7 +3593,7 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
u8 fcbits; /* Extracted fcbits from software header */
u8 delta;
- void *pkt; /* Packet for event or data frames */
+ struct sk_buff *pkt; /* Packet for event or data frames */
u16 pad; /* Number of pad bytes to read */
u16 rdlen; /* Total number of bytes to read */
u8 rxseq; /* Next sequence number to expect */
@@ -3675,7 +3680,7 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
*/
/* Allocate a packet buffer */
dhd_os_sdlock_rxq(bus->dhd);
- pkt = PKTGET(osh, rdlen + DHD_SDALIGN, false);
+ pkt = pkt_buf_get_skb(osh, rdlen + DHD_SDALIGN);
if (!pkt) {
if (bus->bus == SPI_BUS) {
bus->usebufpool = false;
@@ -3721,7 +3726,7 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
} else {
/* Give up on data,
request rtx of events */
- DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d " "expected rxseq %d\n",
+ DHD_ERROR(("%s (nextlen): pkt_buf_get_skb failed: len %d rdlen %d " "expected rxseq %d\n",
__func__, len, rdlen, rxseq));
/* Just go try again w/normal
header read */
@@ -3732,9 +3737,9 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
if (bus->bus == SPI_BUS)
bus->usebufpool = true;
- ASSERT(!PKTLINK(pkt));
+ ASSERT(!(pkt->prev));
PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
- rxbuf = (u8 *) PKTDATA(pkt);
+ rxbuf = (u8 *) (pkt->data);
/* Read the entire frame */
sdret =
dhd_bcmsdh_recv_buf(bus,
@@ -3748,7 +3753,7 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
if (sdret < 0) {
DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n",
__func__, rdlen, sdret));
- PKTFREE(bus->dhd->osh, pkt, false);
+ pkt_buf_free_skb(bus->dhd->osh, pkt, false);
bus->dhd->rx_errors++;
dhd_os_sdunlock_rxq(bus->dhd);
/* Force retry w/normal header read.
@@ -3896,7 +3901,7 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
doff);
if (bus->usebufpool) {
dhd_os_sdlock_rxq(bus->dhd);
- PKTFREE(bus->dhd->osh, pkt,
+ pkt_buf_free_skb(bus->dhd->osh, pkt,
false);
dhd_os_sdunlock_rxq(bus->dhd);
}
@@ -4086,10 +4091,10 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
}
dhd_os_sdlock_rxq(bus->dhd);
- pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), false);
+ pkt = pkt_buf_get_skb(osh, (rdlen + firstread + DHD_SDALIGN));
if (!pkt) {
/* Give up on data, request rtx of events */
- DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n",
+ DHD_ERROR(("%s: pkt_buf_get_skb failed: rdlen %d chan %d\n",
__func__, rdlen, chan));
bus->dhd->rx_dropped++;
dhd_os_sdunlock_rxq(bus->dhd);
@@ -4098,17 +4103,17 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
}
dhd_os_sdunlock_rxq(bus->dhd);
- ASSERT(!PKTLINK(pkt));
+ ASSERT(!(pkt->prev));
/* Leave room for what we already read, and align remainder */
- ASSERT(firstread < (PKTLEN(pkt)));
- PKTPULL(pkt, firstread);
+ ASSERT(firstread < pkt->len);
+ skb_pull(pkt, firstread);
PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
/* Read the remaining frame data */
sdret =
dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2,
- F2SYNC, ((u8 *) PKTDATA(pkt)), rdlen,
+ F2SYNC, ((u8 *) (pkt->data)), rdlen,
pkt, NULL, NULL);
bus->f2rxdata++;
ASSERT(sdret != BCME_PENDING);
@@ -4122,7 +4127,7 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
? "data" : "test")),
sdret));
dhd_os_sdlock_rxq(bus->dhd);
- PKTFREE(bus->dhd->osh, pkt, false);
+ pkt_buf_free_skb(bus->dhd->osh, pkt, false);
dhd_os_sdunlock_rxq(bus->dhd);
bus->dhd->rx_errors++;
dhdsdio_rxfail(bus, true, RETRYCHAN(chan));
@@ -4130,12 +4135,12 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
}
/* Copy the already-read portion */
- PKTPUSH(pkt, firstread);
- bcopy(bus->rxhdr, PKTDATA(pkt), firstread);
+ skb_push(pkt, firstread);
+ bcopy(bus->rxhdr, pkt->data, firstread);
#ifdef DHD_DEBUG
if (DHD_BYTES_ON() && DHD_DATA_ON())
- prhex("Rx Data", PKTDATA(pkt), len);
+ prhex("Rx Data", pkt->data, len);
#endif
deliver:
@@ -4146,12 +4151,12 @@ deliver:
__func__, len));
#ifdef DHD_DEBUG
if (DHD_GLOM_ON()) {
- prhex("Glom Data", PKTDATA(pkt), len);
+ prhex("Glom Data", pkt->data, len);
}
#endif
- PKTSETLEN(pkt, len);
+ __skb_trim(pkt, len);
ASSERT(doff == SDPCM_HDRLEN);
- PKTPULL(pkt, SDPCM_HDRLEN);
+ skb_pull(pkt, SDPCM_HDRLEN);
bus->glomd = pkt;
} else {
DHD_ERROR(("%s: glom superframe w/o "
@@ -4162,8 +4167,8 @@ deliver:
}
/* Fill in packet len and prio, deliver upward */
- PKTSETLEN(pkt, len);
- PKTPULL(pkt, doff);
+ __skb_trim(pkt, len);
+ skb_pull(pkt, doff);
#ifdef SDTEST
/* Test channel packets are processed separately */
@@ -4173,15 +4178,15 @@ deliver:
}
#endif /* SDTEST */
- if (PKTLEN(pkt) == 0) {
+ if (pkt->len == 0) {
dhd_os_sdlock_rxq(bus->dhd);
- PKTFREE(bus->dhd->osh, pkt, false);
+ pkt_buf_free_skb(bus->dhd->osh, pkt, false);
dhd_os_sdunlock_rxq(bus->dhd);
continue;
} else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt) != 0) {
DHD_ERROR(("%s: rx protocol error\n", __func__));
dhd_os_sdlock_rxq(bus->dhd);
- PKTFREE(bus->dhd->osh, pkt, false);
+ pkt_buf_free_skb(bus->dhd->osh, pkt, false);
dhd_os_sdunlock_rxq(bus->dhd);
bus->dhd->rx_errors++;
continue;
@@ -4626,11 +4631,11 @@ static void dhdsdio_pktgen_init(dhd_bus_t *bus)
static void dhdsdio_pktgen(dhd_bus_t *bus)
{
- void *pkt;
+ struct sk_buff *pkt;
u8 *data;
uint pktcount;
uint fillbyte;
- osl_t *osh = bus->dhd->osh;
+ struct osl_info *osh = bus->dhd->osh;
u16 len;
/* Display current count if appropriate */
@@ -4658,16 +4663,16 @@ static void dhdsdio_pktgen(dhd_bus_t *bus)
/* Allocate an appropriate-sized packet */
len = bus->pktgen_len;
- pkt = PKTGET(osh,
+ pkt = pkt_buf_get_skb(osh,
(len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN),
true);
if (!pkt) {
- DHD_ERROR(("%s: PKTGET failed!\n", __func__));
+ DHD_ERROR(("%s: pkt_buf_get_skb failed!\n", __func__));
break;
}
PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN),
DHD_SDALIGN);
- data = (u8 *) PKTDATA(pkt) + SDPCM_HDRLEN;
+ data = (u8 *) (pkt->data) + SDPCM_HDRLEN;
/* Write test header cmd and extra based on mode */
switch (bus->pktgen_mode) {
@@ -4689,7 +4694,7 @@ static void dhdsdio_pktgen(dhd_bus_t *bus)
default:
DHD_ERROR(("Unrecognized pktgen mode %d\n",
bus->pktgen_mode));
- PKTFREE(osh, pkt, true);
+ pkt_buf_free_skb(osh, pkt, true);
bus->pktgen_count = 0;
return;
}
@@ -4706,9 +4711,9 @@ static void dhdsdio_pktgen(dhd_bus_t *bus)
#ifdef DHD_DEBUG
if (DHD_BYTES_ON() && DHD_DATA_ON()) {
- data = (u8 *) PKTDATA(pkt) + SDPCM_HDRLEN;
+ data = (u8 *) (pkt->data) + SDPCM_HDRLEN;
prhex("dhdsdio_pktgen: Tx Data", data,
- PKTLEN(pkt) - SDPCM_HDRLEN);
+ pkt->len - SDPCM_HDRLEN);
}
#endif
@@ -4733,19 +4738,19 @@ static void dhdsdio_pktgen(dhd_bus_t *bus)
static void dhdsdio_sdtest_set(dhd_bus_t *bus, bool start)
{
- void *pkt;
+ struct sk_buff *pkt;
u8 *data;
- osl_t *osh = bus->dhd->osh;
+ struct osl_info *osh = bus->dhd->osh;
/* Allocate the packet */
- pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN,
+ pkt = pkt_buf_get_skb(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN,
true);
if (!pkt) {
- DHD_ERROR(("%s: PKTGET failed!\n", __func__));
+ DHD_ERROR(("%s: pkt_buf_get_skb failed!\n", __func__));
return;
}
PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
- data = (u8 *) PKTDATA(pkt) + SDPCM_HDRLEN;
+ data = (u8 *) (pkt->data) + SDPCM_HDRLEN;
/* Fill in the test header */
*data++ = SDPCM_TEST_SEND;
@@ -4758,9 +4763,9 @@ static void dhdsdio_sdtest_set(dhd_bus_t *bus, bool start)
bus->pktgen_fail++;
}
-static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
+static void dhdsdio_testrcv(dhd_bus_t *bus, struct sk_buff *pkt, uint seq)
{
- osl_t *osh = bus->dhd->osh;
+ struct osl_info *osh = bus->dhd->osh;
u8 *data;
uint pktlen;
@@ -4770,16 +4775,16 @@ static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
u16 offset;
/* Check for min length */
- pktlen = PKTLEN(pkt);
+ pktlen = pkt->len;
if (pktlen < SDPCM_TEST_HDRLEN) {
DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n",
pktlen));
- PKTFREE(osh, pkt, false);
+ pkt_buf_free_skb(osh, pkt, false);
return;
}
/* Extract header fields */
- data = PKTDATA(pkt);
+ data = pkt->data;
cmd = *data++;
extra = *data++;
len = *data++;
@@ -4792,7 +4797,7 @@ static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, "
"pktlen %d seq %d" " cmd %d extra %d len %d\n",
pktlen, seq, cmd, extra, len));
- PKTFREE(osh, pkt, false);
+ pkt_buf_free_skb(osh, pkt, false);
return;
}
}
@@ -4802,19 +4807,19 @@ static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
case SDPCM_TEST_ECHOREQ:
/* Rx->Tx turnaround ok (even on NDIS w/current
implementation) */
- *(u8 *) (PKTDATA(pkt)) = SDPCM_TEST_ECHORSP;
+ *(u8 *) (pkt->data) = SDPCM_TEST_ECHORSP;
if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, true) == 0) {
bus->pktgen_sent++;
} else {
bus->pktgen_fail++;
- PKTFREE(osh, pkt, false);
+ pkt_buf_free_skb(osh, pkt, false);
}
bus->pktgen_rcvd++;
break;
case SDPCM_TEST_ECHORSP:
if (bus->ext_loop) {
- PKTFREE(osh, pkt, false);
+ pkt_buf_free_skb(osh, pkt, false);
bus->pktgen_rcvd++;
break;
}
@@ -4827,12 +4832,12 @@ static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
break;
}
}
- PKTFREE(osh, pkt, false);
+ pkt_buf_free_skb(osh, pkt, false);
bus->pktgen_rcvd++;
break;
case SDPCM_TEST_DISCARD:
- PKTFREE(osh, pkt, false);
+ pkt_buf_free_skb(osh, pkt, false);
bus->pktgen_rcvd++;
break;
@@ -4842,7 +4847,7 @@ static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, "
"pktlen %d seq %d" " cmd %d extra %d len %d\n",
pktlen, seq, cmd, extra, len));
- PKTFREE(osh, pkt, false);
+ pkt_buf_free_skb(osh, pkt, false);
break;
}
@@ -4960,7 +4965,7 @@ extern int dhd_bus_console_in(dhd_pub_t *dhdp, unsigned char *msg, uint msglen)
dhd_bus_t *bus = dhdp->bus;
u32 addr, val;
int rv;
- void *pkt;
+ struct sk_buff *pkt;
/* Address could be zero if CONSOLE := 0 in dongle Makefile */
if (bus->console_addr == 0)
@@ -5003,7 +5008,7 @@ extern int dhd_bus_console_in(dhd_pub_t *dhdp, unsigned char *msg, uint msglen)
/* Bump dongle by sending an empty event pkt.
* sdpcm_sendup (RX) checks for virtual console input.
*/
- pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, true);
+ pkt = pkt_buf_get_skb(bus->dhd->osh, 4 + SDPCM_RESERVE);
if ((pkt != NULL) && bus->clkstate == CLK_AVAIL)
dhdsdio_txpkt(bus, pkt, SDPCM_EVENT_CHANNEL, true);
@@ -5061,7 +5066,7 @@ static bool dhdsdio_chipmatch(u16 chipid)
static void *dhdsdio_probe(u16 venid, u16 devid, u16 bus_no,
u16 slot, u16 func, uint bustype, void *regsva,
- osl_t *osh, void *sdh)
+ struct osl_info *osh, void *sdh)
{
int ret;
dhd_bus_t *bus;
@@ -5220,8 +5225,8 @@ fail:
}
static bool
-dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
- u16 devid)
+dhdsdio_probe_attach(struct dhd_bus *bus, struct osl_info *osh, void *sdh,
+ void *regsva, u16 devid)
{
u8 clkctl = 0;
int err = 0;
@@ -5280,7 +5285,7 @@ dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
"failed\n", fn));
break;
}
- bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+ memset(cis[fn], 0, SBSDIO_CIS_SIZE_LIMIT);
err = bcmsdh_cis_read(sdh, fn, cis[fn],
SBSDIO_CIS_SIZE_LIMIT);
@@ -5378,7 +5383,8 @@ fail:
return false;
}
-static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh)
+static bool dhdsdio_probe_malloc(dhd_bus_t *bus, struct osl_info *osh,
+ void *sdh)
{
DHD_TRACE(("%s: Enter\n", __func__));
@@ -5419,7 +5425,7 @@ fail:
return false;
}
-static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh)
+static bool dhdsdio_probe_init(dhd_bus_t *bus, struct osl_info *osh, void *sdh)
{
s32 fnum;
@@ -5496,7 +5502,7 @@ static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh)
}
bool
-dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+dhd_bus_download_firmware(struct dhd_bus *bus, struct osl_info *osh,
char *fw_path, char *nv_path)
{
bool ret;
@@ -5509,7 +5515,7 @@ dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
}
static bool
-dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh)
+dhdsdio_download_firmware(struct dhd_bus *bus, struct osl_info *osh, void *sdh)
{
bool ret;
@@ -5524,7 +5530,7 @@ dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh)
}
/* Detach and free everything */
-static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh)
+static void dhdsdio_release(dhd_bus_t *bus, struct osl_info *osh)
{
DHD_TRACE(("%s: Enter\n", __func__));
@@ -5554,7 +5560,7 @@ static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh)
DHD_TRACE(("%s: Disconnected\n", __func__));
}
-static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh)
+static void dhdsdio_release_malloc(dhd_bus_t *bus, struct osl_info *osh)
{
DHD_TRACE(("%s: Enter\n", __func__));
@@ -5573,7 +5579,7 @@ static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh)
}
}
-static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh)
+static void dhdsdio_release_dongle(dhd_bus_t *bus, struct osl_info *osh)
{
DHD_TRACE(("%s: Enter\n", __func__));
@@ -5986,7 +5992,7 @@ err:
static int
dhd_bcmsdh_recv_buf(dhd_bus_t *bus, u32 addr, uint fn, uint flags,
- u8 *buf, uint nbytes, void *pkt,
+ u8 *buf, uint nbytes, struct sk_buff *pkt,
bcmsdh_cmplt_fn_t complete, void *handle)
{
int status;
@@ -6000,7 +6006,7 @@ dhd_bcmsdh_recv_buf(dhd_bus_t *bus, u32 addr, uint fn, uint flags,
static int
dhd_bcmsdh_send_buf(dhd_bus_t *bus, u32 addr, uint fn, uint flags,
- u8 *buf, uint nbytes, void *pkt,
+ u8 *buf, uint nbytes, struct sk_buff *pkt,
bcmsdh_cmplt_fn_t complete, void *handle)
{
return bcmsdh_send_buf
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
index ea0825238d53..991463f4a7f4 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/if_arp.h>
-#include <linuxver.h>
#include <osl.h>
#include <bcmutils.h>
@@ -30,10 +29,6 @@
#include <dhdioctl.h>
#include <wlioctl.h>
-#include <proto/ethernet.h>
-#include <dngl_stats.h>
-#include <dhd.h>
-
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
@@ -342,7 +337,7 @@ static void wl_debugfs_remove_netdev(struct wl_priv *wl);
struct wl_iface *ci; \
if (unlikely(!(wl_cfg80211_dev && \
(ci = wl_get_drvdata(wl_cfg80211_dev))))) { \
- WL_ERR(("wl_cfg80211_dev is unavailable\n")); \
+ WL_ERR("wl_cfg80211_dev is unavailable\n"); \
BUG(); \
} \
ci_to_wl(ci); \
@@ -352,8 +347,8 @@ static void wl_debugfs_remove_netdev(struct wl_priv *wl);
do { \
struct wl_priv *wl = wiphy_to_wl(wiphy); \
if (unlikely(!test_bit(WL_STATUS_READY, &wl->status))) { \
- WL_INFO(("device is not ready : status (%d)\n", \
- (int)wl->status)); \
+ WL_INFO("device is not ready : status (%d)\n", \
+ (int)wl->status); \
return -EIO; \
} \
} while (0)
@@ -618,8 +613,8 @@ wl_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
switch (type) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_WDS:
- WL_ERR(("type (%d) : currently we do not support this type\n",
- type));
+ WL_ERR("type (%d) : currently we do not support this type\n",
+ type);
return -EOPNOTSUPP;
case NL80211_IFTYPE_ADHOC:
wl->conf->mode = WL_MODE_IBSS;
@@ -635,15 +630,15 @@ wl_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
ap = htod32(ap);
wdev = ndev->ieee80211_ptr;
wdev->iftype = type;
- WL_DBG(("%s : ap (%d), infra (%d)\n", ndev->name, ap, infra));
+ WL_DBG("%s : ap (%d), infra (%d)\n", ndev->name, ap, infra);
err = wl_dev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra));
if (unlikely(err)) {
- WL_ERR(("WLC_SET_INFRA error (%d)\n", err));
+ WL_ERR("WLC_SET_INFRA error (%d)\n", err);
return err;
}
err = wl_dev_ioctl(ndev, WLC_SET_AP, &ap, sizeof(ap));
if (unlikely(err)) {
- WL_ERR(("WLC_SET_AP error (%d)\n", err));
+ WL_ERR("WLC_SET_AP error (%d)\n", err);
return err;
}
@@ -653,7 +648,7 @@ wl_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
static void wl_iscan_prep(struct wl_scan_params *params, struct wlc_ssid *ssid)
{
- memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ memcpy(&params->bssid, &ether_bcast, ETH_ALEN);
params->bss_type = DOT11_BSSTYPE_ANY;
params->scan_type = 0;
params->nprobes = -1;
@@ -705,7 +700,7 @@ wl_run_iscan(struct wl_iscan_ctrl *iscan, struct wlc_ssid *ssid, u16 action)
if (ssid && ssid->SSID_len)
params_size += sizeof(struct wlc_ssid);
- params = (struct wl_iscan_params *)kzalloc(params_size, GFP_KERNEL);
+ params = kzalloc(params_size, GFP_KERNEL);
if (unlikely(!params))
return -ENOMEM;
memset(params, 0, params_size);
@@ -722,9 +717,9 @@ wl_run_iscan(struct wl_iscan_ctrl *iscan, struct wlc_ssid *ssid, u16 action)
iscan->ioctl_buf, WLC_IOCTL_SMLEN);
if (unlikely(err)) {
if (err == -EBUSY) {
- WL_INFO(("system busy : iscan canceled\n"));
+ WL_INFO("system busy : iscan canceled\n");
} else {
- WL_ERR(("error (%d)\n", err));
+ WL_ERR("error (%d)\n", err);
}
}
kfree(params);
@@ -748,7 +743,7 @@ static s32 wl_do_iscan(struct wl_priv *wl)
err = wl_dev_ioctl(wl_to_ndev(wl), WLC_SET_PASSIVE_SCAN,
&passive_scan, sizeof(passive_scan));
if (unlikely(err)) {
- WL_DBG(("error (%d)\n", err));
+ WL_DBG("error (%d)\n", err);
return err;
}
wl_set_mpc(ndev, 0);
@@ -774,12 +769,12 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
s32 err = 0;
if (unlikely(test_bit(WL_STATUS_SCANNING, &wl->status))) {
- WL_ERR(("Scanning already : status (%d)\n", (int)wl->status));
+ WL_ERR("Scanning already : status (%d)\n", (int)wl->status);
return -EAGAIN;
}
if (unlikely(test_bit(WL_STATUS_SCAN_ABORTING, &wl->status))) {
- WL_ERR(("Scanning being aborted : status (%d)\n",
- (int)wl->status));
+ WL_ERR("Scanning being aborted : status (%d)\n",
+ (int)wl->status);
return -EAGAIN;
}
@@ -811,26 +806,26 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
else
goto scan_out;
} else {
- WL_DBG(("ssid \"%s\", ssid_len (%d)\n",
- ssids->ssid, ssids->ssid_len));
+ WL_DBG("ssid \"%s\", ssid_len (%d)\n",
+ ssids->ssid, ssids->ssid_len);
memset(&sr->ssid, 0, sizeof(sr->ssid));
sr->ssid.SSID_len =
min_t(u8, sizeof(sr->ssid.SSID), ssids->ssid_len);
if (sr->ssid.SSID_len) {
memcpy(sr->ssid.SSID, ssids->ssid, sr->ssid.SSID_len);
sr->ssid.SSID_len = htod32(sr->ssid.SSID_len);
- WL_DBG(("Specific scan ssid=\"%s\" len=%d\n",
- sr->ssid.SSID, sr->ssid.SSID_len));
+ WL_DBG("Specific scan ssid=\"%s\" len=%d\n",
+ sr->ssid.SSID, sr->ssid.SSID_len);
spec_scan = true;
} else {
- WL_DBG(("Broadcast scan\n"));
+ WL_DBG("Broadcast scan\n");
}
- WL_DBG(("sr->ssid.SSID_len (%d)\n", sr->ssid.SSID_len));
+ WL_DBG("sr->ssid.SSID_len (%d)\n", sr->ssid.SSID_len);
passive_scan = wl->active_scan ? 0 : 1;
err = wl_dev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
&passive_scan, sizeof(passive_scan));
if (unlikely(err)) {
- WL_ERR(("WLC_SET_PASSIVE_SCAN error (%d)\n", err));
+ WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
goto scan_out;
}
wl_set_mpc(ndev, 0);
@@ -838,10 +833,10 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
sizeof(sr->ssid));
if (err) {
if (err == -EBUSY) {
- WL_INFO(("system busy : scan for \"%s\" "
- "canceled\n", sr->ssid.SSID));
+ WL_INFO("system busy : scan for \"%s\" canceled\n",
+ sr->ssid.SSID);
} else {
- WL_ERR(("WLC_SCAN error (%d)\n", err));
+ WL_ERR("WLC_SCAN error (%d)\n", err);
}
wl_set_mpc(ndev, 1);
goto scan_out;
@@ -865,7 +860,7 @@ wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
CHECK_SYS_UP();
err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
if (unlikely(err)) {
- WL_DBG(("scan error (%d)\n", err));
+ WL_DBG("scan error (%d)\n", err);
return err;
}
@@ -884,7 +879,7 @@ static s32 wl_dev_intvar_set(struct net_device *dev, s8 *name, s32 val)
err = wl_dev_ioctl(dev, WLC_SET_VAR, buf, len);
if (unlikely(err)) {
- WL_ERR(("error (%d)\n", err));
+ WL_ERR("error (%d)\n", err);
}
return err;
@@ -907,7 +902,7 @@ wl_dev_intvar_get(struct net_device *dev, s8 *name, s32 *retval)
BUG_ON(unlikely(!len));
err = wl_dev_ioctl(dev, WLC_GET_VAR, &var, len);
if (unlikely(err)) {
- WL_ERR(("error (%d)\n", err));
+ WL_ERR("error (%d)\n", err);
}
*retval = dtoh32(var.val);
@@ -920,7 +915,7 @@ static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold)
err = wl_dev_intvar_set(dev, "rtsthresh", rts_threshold);
if (unlikely(err)) {
- WL_ERR(("Error (%d)\n", err));
+ WL_ERR("Error (%d)\n", err);
return err;
}
return err;
@@ -932,7 +927,7 @@ static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold)
err = wl_dev_intvar_set(dev, "fragthresh", frag_threshold);
if (unlikely(err)) {
- WL_ERR(("Error (%d)\n", err));
+ WL_ERR("Error (%d)\n", err);
return err;
}
return err;
@@ -946,7 +941,7 @@ static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l)
retry = htod32(retry);
err = wl_dev_ioctl(dev, cmd, &retry, sizeof(retry));
if (unlikely(err)) {
- WL_ERR(("cmd (%d) , error (%d)\n", cmd, err));
+ WL_ERR("cmd (%d) , error (%d)\n", cmd, err);
return err;
}
return err;
@@ -1006,7 +1001,7 @@ wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
CHECK_SYS_UP();
if (params->bssid) {
- WL_ERR(("Invalid bssid\n"));
+ WL_ERR("Invalid bssid\n");
return -EOPNOTSUPP;
}
bss = cfg80211_get_ibss(wiphy, NULL, params->ssid, params->ssid_len);
@@ -1032,7 +1027,7 @@ wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
}
if (bss) {
wl->ibss_starter = false;
- WL_DBG(("Found IBSS\n"));
+ WL_DBG("Found IBSS\n");
} else {
wl->ibss_starter = true;
}
@@ -1049,14 +1044,14 @@ wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
join_params.ssid.SSID_len = htod32(params->ssid_len);
if (params->bssid)
memcpy(&join_params.params.bssid, params->bssid,
- ETHER_ADDR_LEN);
+ ETH_ALEN);
else
- memset(&join_params.params.bssid, 0, ETHER_ADDR_LEN);
+ memset(&join_params.params.bssid, 0, ETH_ALEN);
err = wl_dev_ioctl(dev, WLC_SET_SSID, &join_params,
sizeof(join_params));
if (unlikely(err)) {
- WL_ERR(("Error (%d)\n", err));
+ WL_ERR("Error (%d)\n", err);
return err;
}
return err;
@@ -1087,10 +1082,10 @@ wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme)
val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
else
val = WPA_AUTH_DISABLED;
- WL_DBG(("setting wpa_auth to 0x%0x\n", val));
+ WL_DBG("setting wpa_auth to 0x%0x\n", val);
err = wl_dev_intvar_set(dev, "wpa_auth", val);
if (unlikely(err)) {
- WL_ERR(("set wpa_auth failed (%d)\n", err));
+ WL_ERR("set wpa_auth failed (%d)\n", err);
return err;
}
sec = wl_read_prof(wl, WL_PROF_SEC);
@@ -1109,27 +1104,27 @@ wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
switch (sme->auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
val = 0;
- WL_DBG(("open system\n"));
+ WL_DBG("open system\n");
break;
case NL80211_AUTHTYPE_SHARED_KEY:
val = 1;
- WL_DBG(("shared key\n"));
+ WL_DBG("shared key\n");
break;
case NL80211_AUTHTYPE_AUTOMATIC:
val = 2;
- WL_DBG(("automatic\n"));
+ WL_DBG("automatic\n");
break;
case NL80211_AUTHTYPE_NETWORK_EAP:
- WL_DBG(("network eap\n"));
+ WL_DBG("network eap\n");
default:
val = 2;
- WL_ERR(("invalid auth type (%d)\n", sme->auth_type));
+ WL_ERR("invalid auth type (%d)\n", sme->auth_type);
break;
}
err = wl_dev_intvar_set(dev, "auth", val);
if (unlikely(err)) {
- WL_ERR(("set auth failed (%d)\n", err));
+ WL_ERR("set auth failed (%d)\n", err);
return err;
}
sec = wl_read_prof(wl, WL_PROF_SEC);
@@ -1162,8 +1157,8 @@ wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
pval = AES_ENABLED;
break;
default:
- WL_ERR(("invalid cipher pairwise (%d)\n",
- sme->crypto.ciphers_pairwise[0]));
+ WL_ERR("invalid cipher pairwise (%d)\n",
+ sme->crypto.ciphers_pairwise[0]);
return -EINVAL;
}
}
@@ -1183,16 +1178,16 @@ wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
gval = AES_ENABLED;
break;
default:
- WL_ERR(("invalid cipher group (%d)\n",
- sme->crypto.cipher_group));
+ WL_ERR("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
return -EINVAL;
}
}
- WL_DBG(("pval (%d) gval (%d)\n", pval, gval));
+ WL_DBG("pval (%d) gval (%d)\n", pval, gval);
err = wl_dev_intvar_set(dev, "wsec", pval | gval);
if (unlikely(err)) {
- WL_ERR(("error (%d)\n", err));
+ WL_ERR("error (%d)\n", err);
return err;
}
@@ -1214,7 +1209,7 @@ wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
if (sme->crypto.n_akm_suites) {
err = wl_dev_intvar_get(dev, "wpa_auth", &val);
if (unlikely(err)) {
- WL_ERR(("could not get wpa_auth (%d)\n", err));
+ WL_ERR("could not get wpa_auth (%d)\n", err);
return err;
}
if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
@@ -1226,8 +1221,8 @@ wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
val = WPA_AUTH_PSK;
break;
default:
- WL_ERR(("invalid cipher group (%d)\n",
- sme->crypto.cipher_group));
+ WL_ERR("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
return -EINVAL;
}
} else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
@@ -1239,16 +1234,16 @@ wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
val = WPA2_AUTH_PSK;
break;
default:
- WL_ERR(("invalid cipher group (%d)\n",
- sme->crypto.cipher_group));
+ WL_ERR("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
return -EINVAL;
}
}
- WL_DBG(("setting wpa_auth to %d\n", val));
+ WL_DBG("setting wpa_auth to %d\n", val);
err = wl_dev_intvar_set(dev, "wpa_auth", val);
if (unlikely(err)) {
- WL_ERR(("could not set wpa_auth (%d)\n", err));
+ WL_ERR("could not set wpa_auth (%d)\n", err);
return err;
}
}
@@ -1268,11 +1263,11 @@ wl_set_set_sharedkey(struct net_device *dev,
s32 val;
s32 err = 0;
- WL_DBG(("key len (%d)\n", sme->key_len));
+ WL_DBG("key len (%d)\n", sme->key_len);
if (sme->key_len) {
sec = wl_read_prof(wl, WL_PROF_SEC);
- WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n",
- sec->wpa_versions, sec->cipher_pairwise));
+ WL_DBG("wpa_versions 0x%x cipher_pairwise 0x%x\n",
+ sec->wpa_versions, sec->cipher_pairwise);
if (!
(sec->wpa_versions & (NL80211_WPA_VERSION_1 |
NL80211_WPA_VERSION_2))
@@ -1282,7 +1277,7 @@ wl_set_set_sharedkey(struct net_device *dev,
key.len = (u32) sme->key_len;
key.index = (u32) sme->key_idx;
if (unlikely(key.len > sizeof(key.data))) {
- WL_ERR(("Too long key length (%u)\n", key.len));
+ WL_ERR("Too long key length (%u)\n", key.len);
return -EINVAL;
}
memcpy(key.data, sme->key, key.len);
@@ -1295,27 +1290,27 @@ wl_set_set_sharedkey(struct net_device *dev,
key.algo = CRYPTO_ALGO_WEP128;
break;
default:
- WL_ERR(("Invalid algorithm (%d)\n",
- sme->crypto.ciphers_pairwise[0]));
+ WL_ERR("Invalid algorithm (%d)\n",
+ sme->crypto.ciphers_pairwise[0]);
return -EINVAL;
}
/* Set the new key/index */
- WL_DBG(("key length (%d) key index (%d) algo (%d)\n",
- key.len, key.index, key.algo));
- WL_DBG(("key \"%s\"\n", key.data));
+ WL_DBG("key length (%d) key index (%d) algo (%d)\n",
+ key.len, key.index, key.algo);
+ WL_DBG("key \"%s\"\n", key.data);
swap_key_from_BE(&key);
err = wl_dev_ioctl(dev, WLC_SET_KEY, &key,
sizeof(key));
if (unlikely(err)) {
- WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ WL_ERR("WLC_SET_KEY error (%d)\n", err);
return err;
}
if (sec->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) {
- WL_DBG(("set auth_type to shared key\n"));
+ WL_DBG("set auth_type to shared key\n");
val = 1; /* shared key */
err = wl_dev_intvar_set(dev, "auth", val);
if (unlikely(err)) {
- WL_ERR(("set auth failed (%d)\n", err));
+ WL_ERR("set auth failed (%d)\n", err);
return err;
}
}
@@ -1337,15 +1332,15 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
CHECK_SYS_UP();
if (unlikely(!sme->ssid)) {
- WL_ERR(("Invalid ssid\n"));
+ WL_ERR("Invalid ssid\n");
return -EOPNOTSUPP;
}
if (chan) {
wl->channel = ieee80211_frequency_to_channel(chan->center_freq);
- WL_DBG(("channel (%d), center_req (%d)\n", wl->channel,
- chan->center_freq));
+ WL_DBG("channel (%d), center_req (%d)\n",
+ wl->channel, chan->center_freq);
}
- WL_DBG(("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len));
+ WL_DBG("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
err = wl_set_wpa_version(dev, sme);
if (unlikely(err))
return err;
@@ -1378,18 +1373,18 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len);
join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
wl_update_prof(wl, NULL, &join_params.ssid, WL_PROF_SSID);
- memcpy(&join_params.params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+ memcpy(&join_params.params.bssid, &ether_bcast, ETH_ALEN);
wl_ch_to_chanspec(wl->channel, &join_params, &join_params_size);
- WL_DBG(("join_param_size %d\n", join_params_size));
+ WL_DBG("join_param_size %d\n", join_params_size);
if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
- WL_DBG(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
- join_params.ssid.SSID_len));
+ WL_DBG("ssid \"%s\", len (%d)\n",
+ join_params.ssid.SSID, join_params.ssid.SSID_len);
}
err = wl_dev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size);
if (unlikely(err)) {
- WL_ERR(("error (%d)\n", err));
+ WL_ERR("error (%d)\n", err);
return err;
}
set_bit(WL_STATUS_CONNECTING, &wl->status);
@@ -1406,17 +1401,17 @@ wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
bool act = false;
s32 err = 0;
- WL_DBG(("Reason %d\n", reason_code));
+ WL_DBG("Reason %d\n", reason_code);
CHECK_SYS_UP();
act = *(bool *) wl_read_prof(wl, WL_PROF_ACT);
if (likely(act)) {
scbval.val = reason_code;
- memcpy(&scbval.ea, &wl->bssid, ETHER_ADDR_LEN);
+ memcpy(&scbval.ea, &wl->bssid, ETH_ALEN);
scbval.val = htod32(scbval.val);
err = wl_dev_ioctl(dev, WLC_DISASSOC, &scbval,
sizeof(scb_val_t));
if (unlikely(err)) {
- WL_ERR(("error (%d)\n", err));
+ WL_ERR("error (%d)\n", err);
return err;
}
}
@@ -1441,13 +1436,13 @@ wl_cfg80211_set_tx_power(struct wiphy *wiphy,
break;
case NL80211_TX_POWER_LIMITED:
if (dbm < 0) {
- WL_ERR(("TX_POWER_LIMITTED - dbm is negative\n"));
+ WL_ERR("TX_POWER_LIMITED - dbm is negative\n");
return -EINVAL;
}
break;
case NL80211_TX_POWER_FIXED:
if (dbm < 0) {
- WL_ERR(("TX_POWER_FIXED - dbm is negative..\n"));
+ WL_ERR("TX_POWER_FIXED - dbm is negative\n");
return -EINVAL;
}
break;
@@ -1457,7 +1452,7 @@ wl_cfg80211_set_tx_power(struct wiphy *wiphy,
disable = htod32(disable);
err = wl_dev_ioctl(ndev, WLC_SET_RADIO, &disable, sizeof(disable));
if (unlikely(err)) {
- WL_ERR(("WLC_SET_RADIO error (%d)\n", err));
+ WL_ERR("WLC_SET_RADIO error (%d)\n", err);
return err;
}
@@ -1468,7 +1463,7 @@ wl_cfg80211_set_tx_power(struct wiphy *wiphy,
err = wl_dev_intvar_set(ndev, "qtxpower",
(s32) (bcm_mw_to_qdbm(txpwrmw)));
if (unlikely(err)) {
- WL_ERR(("qtxpower error (%d)\n", err));
+ WL_ERR("qtxpower error (%d)\n", err);
return err;
}
wl->conf->tx_power = dbm;
@@ -1487,7 +1482,7 @@ static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
CHECK_SYS_UP();
err = wl_dev_intvar_get(ndev, "qtxpower", &txpwrdbm);
if (unlikely(err)) {
- WL_ERR(("error (%d)\n", err));
+ WL_ERR("error (%d)\n", err);
return err;
}
result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE);
@@ -1504,12 +1499,12 @@ wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
s32 wsec;
s32 err = 0;
- WL_DBG(("key index (%d)\n", key_idx));
+ WL_DBG("key index (%d)\n", key_idx);
CHECK_SYS_UP();
err = wl_dev_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec));
if (unlikely(err)) {
- WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+ WL_ERR("WLC_GET_WSEC error (%d)\n", err);
return err;
}
wsec = dtoh32(wsec);
@@ -1520,7 +1515,7 @@ wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
err = wl_dev_ioctl(dev, WLC_SET_KEY_PRIMARY, &index,
sizeof(index));
if (unlikely(err)) {
- WL_ERR(("error (%d)\n", err));
+ WL_ERR("error (%d)\n", err);
}
}
return err;
@@ -1537,8 +1532,8 @@ wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
key.index = (u32) key_idx;
/* Instead of bcast for ea address for default wep keys,
driver needs it to be Null */
- if (!ETHER_ISMULTI(mac_addr))
- memcpy((char *)&key.ea, (void *)mac_addr, ETHER_ADDR_LEN);
+ if (!is_multicast_ether_addr(mac_addr))
+ memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN);
key.len = (u32) params->key_len;
/* check for key index change */
if (key.len == 0) {
@@ -1546,16 +1541,16 @@ wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
swap_key_from_BE(&key);
err = wl_dev_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
if (unlikely(err)) {
- WL_ERR(("key delete error (%d)\n", err));
+ WL_ERR("key delete error (%d)\n", err);
return err;
}
} else {
if (key.len > sizeof(key.data)) {
- WL_ERR(("Invalid key length (%d)\n", key.len));
+ WL_ERR("Invalid key length (%d)\n", key.len);
return -EINVAL;
}
- WL_DBG(("Setting the key index %d\n", key.index));
+ WL_DBG("Setting the key index %d\n", key.index);
memcpy(key.data, params->key, key.len);
if (params->cipher == WLAN_CIPHER_SUITE_TKIP) {
@@ -1579,26 +1574,26 @@ wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
key.algo = CRYPTO_ALGO_WEP1;
- WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+ WL_DBG("WLAN_CIPHER_SUITE_WEP40\n");
break;
case WLAN_CIPHER_SUITE_WEP104:
key.algo = CRYPTO_ALGO_WEP128;
- WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+ WL_DBG("WLAN_CIPHER_SUITE_WEP104\n");
break;
case WLAN_CIPHER_SUITE_TKIP:
key.algo = CRYPTO_ALGO_TKIP;
- WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ WL_DBG("WLAN_CIPHER_SUITE_TKIP\n");
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
key.algo = CRYPTO_ALGO_AES_CCM;
- WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+ WL_DBG("WLAN_CIPHER_SUITE_AES_CMAC\n");
break;
case WLAN_CIPHER_SUITE_CCMP:
key.algo = CRYPTO_ALGO_AES_CCM;
- WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+ WL_DBG("WLAN_CIPHER_SUITE_CCMP\n");
break;
default:
- WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+ WL_ERR("Invalid cipher (0x%x)\n", params->cipher);
return -EINVAL;
}
swap_key_from_BE(&key);
@@ -1606,7 +1601,7 @@ wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
dhd_wait_pend8021x(dev);
err = wl_dev_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
if (unlikely(err)) {
- WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ WL_ERR("WLC_SET_KEY error (%d)\n", err);
return err;
}
}
@@ -1623,7 +1618,7 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
s32 wsec;
s32 err = 0;
- WL_DBG(("key index (%d)\n", key_idx));
+ WL_DBG("key index (%d)\n", key_idx);
CHECK_SYS_UP();
if (mac_addr)
@@ -1634,7 +1629,7 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
key.index = (u32) key_idx;
if (unlikely(key.len > sizeof(key.data))) {
- WL_ERR(("Too long key length (%u)\n", key.len));
+ WL_ERR("Too long key length (%u)\n", key.len);
return -EINVAL;
}
memcpy(key.data, params->key, key.len);
@@ -1643,26 +1638,26 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
key.algo = CRYPTO_ALGO_WEP1;
- WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+ WL_DBG("WLAN_CIPHER_SUITE_WEP40\n");
break;
case WLAN_CIPHER_SUITE_WEP104:
key.algo = CRYPTO_ALGO_WEP128;
- WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+ WL_DBG("WLAN_CIPHER_SUITE_WEP104\n");
break;
case WLAN_CIPHER_SUITE_TKIP:
key.algo = CRYPTO_ALGO_TKIP;
- WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ WL_DBG("WLAN_CIPHER_SUITE_TKIP\n");
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
key.algo = CRYPTO_ALGO_AES_CCM;
- WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+ WL_DBG("WLAN_CIPHER_SUITE_AES_CMAC\n");
break;
case WLAN_CIPHER_SUITE_CCMP:
key.algo = CRYPTO_ALGO_AES_CCM;
- WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+ WL_DBG("WLAN_CIPHER_SUITE_CCMP\n");
break;
default:
- WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+ WL_ERR("Invalid cipher (0x%x)\n", params->cipher);
return -EINVAL;
}
@@ -1670,21 +1665,21 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
swap_key_from_BE(&key);
err = wl_dev_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
if (unlikely(err)) {
- WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ WL_ERR("WLC_SET_KEY error (%d)\n", err);
return err;
}
val = WEP_ENABLED;
err = wl_dev_intvar_get(dev, "wsec", &wsec);
if (unlikely(err)) {
- WL_ERR(("get wsec error (%d)\n", err));
+ WL_ERR("get wsec error (%d)\n", err);
return err;
}
wsec &= ~(WEP_ENABLED);
wsec |= val;
err = wl_dev_intvar_set(dev, "wsec", wsec);
if (unlikely(err)) {
- WL_ERR(("set wsec error (%d)\n", err));
+ WL_ERR("set wsec error (%d)\n", err);
return err;
}
@@ -1692,7 +1687,7 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
val = htod32(val);
err = wl_dev_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val));
if (unlikely(err)) {
- WL_ERR(("WLC_SET_AUTH error (%d)\n", err));
+ WL_ERR("WLC_SET_AUTH error (%d)\n", err);
return err;
}
return err;
@@ -1714,7 +1709,7 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
key.flags = WL_PRIMARY_KEY;
key.algo = CRYPTO_ALGO_OFF;
- WL_DBG(("key index (%d)\n", key_idx));
+ WL_DBG("key index (%d)\n", key_idx);
/* Set the new key/index */
swap_key_from_BE(&key);
err = wl_dev_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
@@ -1722,10 +1717,10 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
if (err == -EINVAL) {
if (key.index >= DOT11_MAX_DEFAULT_KEYS) {
/* we ignore this key index in this case */
- WL_DBG(("invalid key index (%d)\n", key_idx));
+ WL_DBG("invalid key index (%d)\n", key_idx);
}
} else {
- WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ WL_ERR("WLC_SET_KEY error (%d)\n", err);
}
return err;
}
@@ -1733,14 +1728,14 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
val = 0;
err = wl_dev_intvar_get(dev, "wsec", &wsec);
if (unlikely(err)) {
- WL_ERR(("get wsec error (%d)\n", err));
+ WL_ERR("get wsec error (%d)\n", err);
return err;
}
wsec &= ~(WEP_ENABLED);
wsec |= val;
err = wl_dev_intvar_set(dev, "wsec", wsec);
if (unlikely(err)) {
- WL_ERR(("set wsec error (%d)\n", err));
+ WL_ERR("set wsec error (%d)\n", err);
return err;
}
@@ -1748,7 +1743,7 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
val = htod32(val);
err = wl_dev_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val));
if (unlikely(err)) {
- WL_ERR(("WLC_SET_AUTH error (%d)\n", err));
+ WL_ERR("WLC_SET_AUTH error (%d)\n", err);
return err;
}
return err;
@@ -1766,7 +1761,7 @@ wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
s32 wsec;
s32 err = 0;
- WL_DBG(("key index (%d)\n", key_idx));
+ WL_DBG("key index (%d)\n", key_idx);
CHECK_SYS_UP();
memset(&key, 0, sizeof(key));
@@ -1778,7 +1773,7 @@ wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
err = wl_dev_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec));
if (unlikely(err)) {
- WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+ WL_ERR("WLC_GET_WSEC error (%d)\n", err);
return err;
}
wsec = dtoh32(wsec);
@@ -1787,22 +1782,22 @@ wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
sec = wl_read_prof(wl, WL_PROF_SEC);
if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
params.cipher = WLAN_CIPHER_SUITE_WEP40;
- WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+ WL_DBG("WLAN_CIPHER_SUITE_WEP40\n");
} else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) {
params.cipher = WLAN_CIPHER_SUITE_WEP104;
- WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+ WL_DBG("WLAN_CIPHER_SUITE_WEP104\n");
}
break;
case TKIP_ENABLED:
params.cipher = WLAN_CIPHER_SUITE_TKIP;
- WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ WL_DBG("WLAN_CIPHER_SUITE_TKIP\n");
break;
case AES_ENABLED:
params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
- WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+ WL_DBG("WLAN_CIPHER_SUITE_AES_CMAC\n");
break;
default:
- WL_ERR(("Invalid algo (0x%x)\n", wsec));
+ WL_ERR("Invalid algo (0x%x)\n", wsec);
return -EINVAL;
}
@@ -1814,7 +1809,7 @@ static s32
wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
struct net_device *dev, u8 key_idx)
{
- WL_INFO(("Not supported\n"));
+ WL_INFO("Not supported\n");
CHECK_SYS_UP();
return -EOPNOTSUPP;
}
@@ -1831,20 +1826,20 @@ wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
CHECK_SYS_UP();
if (unlikely
- (memcmp(mac, wl_read_prof(wl, WL_PROF_BSSID), ETHER_ADDR_LEN))) {
- WL_ERR(("Wrong Mac address\n"));
+ (memcmp(mac, wl_read_prof(wl, WL_PROF_BSSID), ETH_ALEN))) {
+ WL_ERR("Wrong Mac address\n");
return -ENOENT;
}
/* Report the current tx rate */
err = wl_dev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate));
if (err) {
- WL_ERR(("Could not get rate (%d)\n", err));
+ WL_ERR("Could not get rate (%d)\n", err);
} else {
rate = dtoh32(rate);
sinfo->filled |= STATION_INFO_TX_BITRATE;
sinfo->txrate.legacy = rate * 5;
- WL_DBG(("Rate %d Mbps\n", (rate / 2)));
+ WL_DBG("Rate %d Mbps\n", rate / 2);
}
if (test_bit(WL_STATUS_CONNECTED, &wl->status)) {
@@ -1852,13 +1847,13 @@ wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
err = wl_dev_ioctl(dev, WLC_GET_RSSI, &scb_val,
sizeof(scb_val_t));
if (unlikely(err)) {
- WL_ERR(("Could not get rssi (%d)\n", err));
+ WL_ERR("Could not get rssi (%d)\n", err);
return err;
}
rssi = dtoh32(scb_val.val);
sinfo->filled |= STATION_INFO_SIGNAL;
sinfo->signal = rssi;
- WL_DBG(("RSSI %d dBm\n", rssi));
+ WL_DBG("RSSI %d dBm\n", rssi);
}
return err;
@@ -1874,13 +1869,13 @@ wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
CHECK_SYS_UP();
pm = enabled ? PM_FAST : PM_OFF;
pm = htod32(pm);
- WL_DBG(("power save %s\n", (pm ? "enabled" : "disabled")));
+ WL_DBG("power save %s\n", (pm ? "enabled" : "disabled"));
err = wl_dev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
if (unlikely(err)) {
if (err == -ENODEV)
- WL_DBG(("net_device is not ready yet\n"));
+ WL_DBG("net_device is not ready yet\n");
else
- WL_ERR(("error (%d)\n", err));
+ WL_ERR("error (%d)\n", err);
return err;
}
return err;
@@ -1932,7 +1927,7 @@ wl_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev,
err = wl_dev_ioctl(dev, WLC_GET_CURR_RATESET, &rateset,
sizeof(rateset));
if (unlikely(err)) {
- WL_ERR(("could not get current rateset (%d)\n", err));
+ WL_ERR("could not get current rateset (%d)\n", err);
return err;
}
@@ -1952,7 +1947,7 @@ wl_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev,
rate = val / 500000;
}
- WL_DBG(("rate %d mbps\n", (rate / 2)));
+ WL_DBG("rate %d mbps\n", rate / 2);
/*
*
@@ -1962,7 +1957,7 @@ wl_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev,
err_bg = wl_dev_intvar_set(dev, "bg_rate", rate);
err_a = wl_dev_intvar_set(dev, "a_rate", rate);
if (unlikely(err_bg && err_a)) {
- WL_ERR(("could not set fixed rate (%d) (%d)\n", err_bg, err_a));
+ WL_ERR("could not set fixed rate (%d) (%d)\n", err_bg, err_a);
return err_bg | err_a;
}
@@ -2007,12 +2002,12 @@ wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list,
{
int i, j;
- WL_DBG(("No of elements %d\n", pmk_list->pmkids.npmkid));
+ WL_DBG("No of elements %d\n", pmk_list->pmkids.npmkid);
for (i = 0; i < pmk_list->pmkids.npmkid; i++) {
- WL_DBG(("PMKID[%d]: %pM =\n", i,
- &pmk_list->pmkids.pmkid[i].BSSID));
+ WL_DBG("PMKID[%d]: %pM =\n", i,
+ &pmk_list->pmkids.pmkid[i].BSSID);
for (j = 0; j < WPA2_PMKID_LEN; j++) {
- WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]));
+ WL_DBG("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]);
}
}
if (likely(!err)) {
@@ -2034,11 +2029,11 @@ wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
CHECK_SYS_UP();
for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++)
if (!memcmp(pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID,
- ETHER_ADDR_LEN))
+ ETH_ALEN))
break;
if (i < WL_NUM_PMKIDS_MAX) {
memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid,
- ETHER_ADDR_LEN);
+ ETH_ALEN);
memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid,
WPA2_PMKID_LEN);
if (i == wl->pmk_list->pmkids.npmkid)
@@ -2046,12 +2041,12 @@ wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
} else {
err = -EINVAL;
}
- WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
- &wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid].BSSID));
+ WL_DBG("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+ &wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid].BSSID);
for (i = 0; i < WPA2_PMKID_LEN; i++) {
- WL_DBG(("%02x\n",
- wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid].
- PMKID[i]));
+ WL_DBG("%02x\n",
+ wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid].
+ PMKID[i]);
}
err = wl_update_pmklist(dev, wl->pmk_list, err);
@@ -2069,19 +2064,19 @@ wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
int i;
CHECK_SYS_UP();
- memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN);
+ memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN);
memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN);
- WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
- &pmkid.pmkid[0].BSSID));
+ WL_DBG("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+ &pmkid.pmkid[0].BSSID);
for (i = 0; i < WPA2_PMKID_LEN; i++) {
- WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i]));
+ WL_DBG("%02x\n", pmkid.pmkid[0].PMKID[i]);
}
for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++)
if (!memcmp
(pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID,
- ETHER_ADDR_LEN))
+ ETH_ALEN))
break;
if ((wl->pmk_list->pmkids.npmkid > 0)
@@ -2090,7 +2085,7 @@ wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
for (; i < (wl->pmk_list->pmkids.npmkid - 1); i++) {
memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID,
&wl->pmk_list->pmkids.pmkid[i + 1].BSSID,
- ETHER_ADDR_LEN);
+ ETH_ALEN);
memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID,
&wl->pmk_list->pmkids.pmkid[i + 1].PMKID,
WPA2_PMKID_LEN);
@@ -2168,13 +2163,13 @@ static struct wireless_dev *wl_alloc_wdev(s32 sizeof_iface,
wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
if (unlikely(!wdev)) {
- WL_ERR(("Could not allocate wireless device\n"));
+ WL_ERR("Could not allocate wireless device\n");
return ERR_PTR(-ENOMEM);
}
wdev->wiphy =
wiphy_new(&wl_cfg80211_ops, sizeof(struct wl_priv) + sizeof_iface);
if (unlikely(!wdev->wiphy)) {
- WL_ERR(("Couldn not allocate wiphy device\n"));
+ WL_ERR("Couldn not allocate wiphy device\n");
err = -ENOMEM;
goto wiphy_new_out;
}
@@ -2204,7 +2199,7 @@ static struct wireless_dev *wl_alloc_wdev(s32 sizeof_iface,
#endif /* !WL_POWERSAVE_DISABLED */
err = wiphy_register(wdev->wiphy);
if (unlikely(err < 0)) {
- WL_ERR(("Couldn not register wiphy device (%d)\n", err));
+ WL_ERR("Couldn not register wiphy device (%d)\n", err);
goto wiphy_register_out;
}
return wdev;
@@ -2223,7 +2218,7 @@ static void wl_free_wdev(struct wl_priv *wl)
struct wireless_dev *wdev = wl_to_wdev(wl);
if (unlikely(!wdev)) {
- WL_ERR(("wdev is invalid\n"));
+ WL_ERR("wdev is invalid\n");
return;
}
wiphy_unregister(wdev->wiphy);
@@ -2241,11 +2236,11 @@ static s32 wl_inform_bss(struct wl_priv *wl)
bss_list = wl->bss_list;
if (unlikely(bss_list->version != WL_BSS_INFO_VERSION)) {
- WL_ERR(("Version %d != WL_BSS_INFO_VERSION\n",
- bss_list->version));
+ WL_ERR("Version %d != WL_BSS_INFO_VERSION\n",
+ bss_list->version);
return -EOPNOTSUPP;
}
- WL_DBG(("scanned AP count (%d)\n", bss_list->count));
+ WL_DBG("scanned AP count (%d)\n", bss_list->count);
bi = next_bss(bss_list, bi);
for_each_bss(bss_list, bi, i) {
err = wl_inform_single_bss(wl, bi);
@@ -2270,14 +2265,14 @@ static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi)
s32 err = 0;
if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) {
- WL_DBG(("Beacon is larger than buffer. Discarding\n"));
+ WL_DBG("Beacon is larger than buffer. Discarding\n");
return err;
}
notif_bss_info =
kzalloc(sizeof(*notif_bss_info) + sizeof(*mgmt) - sizeof(u8) +
WL_BSS_INFO_MAX, GFP_KERNEL);
if (unlikely(!notif_bss_info)) {
- WL_ERR(("notif_bss_info alloc failed\n"));
+ WL_ERR("notif_bss_info alloc failed\n");
return -ENOMEM;
}
mgmt = (struct ieee80211_mgmt *)notif_bss_info->frame_buf;
@@ -2289,7 +2284,7 @@ static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi)
else
band = wiphy->bands[IEEE80211_BAND_5GHZ];
notif_bss_info->rssi = bi->RSSI;
- memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN);
+ memcpy(mgmt->bssid, &bi->BSSID, ETH_ALEN);
mgmt_type = wl->active_scan ?
IEEE80211_STYPE_PROBE_RESP : IEEE80211_STYPE_BEACON;
if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) {
@@ -2321,17 +2316,17 @@ static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi)
freq = ieee80211_channel_to_frequency(notif_bss_info->channel);
channel = ieee80211_get_channel(wiphy, freq);
- WL_DBG(("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM\n",
- bi->SSID,
- notif_bss_info->rssi, notif_bss_info->channel,
- mgmt->u.beacon.capab_info, &bi->BSSID));
+ WL_DBG("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM\n",
+ bi->SSID,
+ notif_bss_info->rssi, notif_bss_info->channel,
+ mgmt->u.beacon.capab_info, &bi->BSSID);
signal = notif_bss_info->rssi * 100;
if (unlikely(!cfg80211_inform_bss_frame(wiphy, channel, mgmt,
le16_to_cpu
(notif_bss_info->frame_len),
signal, GFP_KERNEL))) {
- WL_ERR(("cfg80211_inform_bss_frame error\n"));
+ WL_ERR("cfg80211_inform_bss_frame error\n");
kfree(notif_bss_info);
return -EINVAL;
}
@@ -2399,12 +2394,12 @@ wl_notify_connect_status(struct wl_priv *wl, struct net_device *ndev,
if (wl_is_ibssmode(wl)) {
cfg80211_ibss_joined(ndev, (s8 *)&e->addr,
GFP_KERNEL);
- WL_DBG(("joined in IBSS network\n"));
+ WL_DBG("joined in IBSS network\n");
} else {
wl_bss_connect_done(wl, ndev, e, data, true);
- WL_DBG(("joined in BSS network \"%s\"\n",
- ((struct wlc_ssid *)
- wl_read_prof(wl, WL_PROF_SSID))->SSID));
+ WL_DBG("joined in BSS network \"%s\"\n",
+ ((struct wlc_ssid *)
+ wl_read_prof(wl, WL_PROF_SSID))->SSID);
}
act = true;
wl_update_prof(wl, e, &act, WL_PROF_ACT);
@@ -2459,7 +2454,7 @@ wl_dev_bufvar_get(struct net_device *dev, s8 *name, s8 *buf,
err = wl_dev_ioctl(dev, WLC_GET_VAR, (void *)wl->ioctl_buf,
WL_IOCTL_LEN_MAX);
if (unlikely(err)) {
- WL_ERR(("error (%d)\n", err));
+ WL_ERR("error (%d)\n", err);
return err;
}
memcpy(buf, wl->ioctl_buf, buf_len);
@@ -2479,7 +2474,7 @@ static s32 wl_get_assoc_ies(struct wl_priv *wl)
err = wl_dev_bufvar_get(ndev, "assoc_info", wl->extra_buf,
WL_ASSOC_INFO_MAX);
if (unlikely(err)) {
- WL_ERR(("could not get assoc info (%d)\n", err));
+ WL_ERR("could not get assoc info (%d)\n", err);
return err;
}
assoc_info = (struct wl_assoc_ielen *)wl->extra_buf;
@@ -2489,7 +2484,7 @@ static s32 wl_get_assoc_ies(struct wl_priv *wl)
err = wl_dev_bufvar_get(ndev, "assoc_req_ies", wl->extra_buf,
WL_ASSOC_INFO_MAX);
if (unlikely(err)) {
- WL_ERR(("could not get assoc req (%d)\n", err));
+ WL_ERR("could not get assoc req (%d)\n", err);
return err;
}
conn_info->req_ie_len = req_len;
@@ -2503,7 +2498,7 @@ static s32 wl_get_assoc_ies(struct wl_priv *wl)
err = wl_dev_bufvar_get(ndev, "assoc_resp_ies", wl->extra_buf,
WL_ASSOC_INFO_MAX);
if (unlikely(err)) {
- WL_ERR(("could not get assoc resp (%d)\n", err));
+ WL_ERR("could not get assoc resp (%d)\n", err);
return err;
}
conn_info->resp_ie_len = resp_len;
@@ -2513,8 +2508,8 @@ static s32 wl_get_assoc_ies(struct wl_priv *wl)
conn_info->resp_ie_len = 0;
conn_info->resp_ie = NULL;
}
- WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len,
- conn_info->resp_ie_len));
+ WL_DBG("req len (%d) resp len (%d)\n",
+ conn_info->req_ie_len, conn_info->resp_ie_len);
return err;
}
@@ -2547,8 +2542,8 @@ static void wl_ch_to_chanspec(int ch, struct wl_join_params *join_params,
join_params->params.chanspec_num =
htod32(join_params->params.chanspec_num);
- WL_DBG(("join_params->params.chanspec_list[0]= %#X, channel %d, chanspec %#X\n",
- join_params->params.chanspec_list[0], ch, chanspec));
+ WL_DBG("join_params->params.chanspec_list[0]= %#X, channel %d, chanspec %#X\n",
+ join_params->params.chanspec_list[0], ch, chanspec);
}
}
@@ -2575,16 +2570,16 @@ static s32 wl_update_bss_info(struct wl_priv *wl)
rtnl_lock();
if (unlikely(!bss)) {
- WL_DBG(("Could not find the AP\n"));
+ WL_DBG("Could not find the AP\n");
*(u32 *) wl->extra_buf = htod32(WL_EXTRA_BUF_MAX);
err = wl_dev_ioctl(wl_to_ndev(wl), WLC_GET_BSS_INFO,
wl->extra_buf, WL_EXTRA_BUF_MAX);
if (unlikely(err)) {
- WL_ERR(("Could not get bss info %d\n", err));
+ WL_ERR("Could not get bss info %d\n", err);
goto update_bss_info_out;
}
bi = (struct wl_bss_info *)(wl->extra_buf + 4);
- if (unlikely(memcmp(&bi->BSSID, &wl->bssid, ETHER_ADDR_LEN))) {
+ if (unlikely(memcmp(&bi->BSSID, &wl->bssid, ETH_ALEN))) {
err = -EIO;
goto update_bss_info_out;
}
@@ -2596,7 +2591,7 @@ static s32 wl_update_bss_info(struct wl_priv *wl)
ie_len = bi->ie_length;
beacon_interval = cpu_to_le16(bi->beacon_period);
} else {
- WL_DBG(("Found the AP in the list - BSSID %pM\n", bss->bssid));
+ WL_DBG("Found the AP in the list - BSSID %pM\n", bss->bssid);
ie = bss->information_elements;
ie_len = bss->len_information_elements;
beacon_interval = bss->beacon_interval;
@@ -2615,7 +2610,7 @@ static s32 wl_update_bss_info(struct wl_priv *wl)
err = wl_dev_ioctl(wl_to_ndev(wl), WLC_GET_DTIMPRD,
&dtim_period, sizeof(dtim_period));
if (unlikely(err)) {
- WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err));
+ WL_ERR("WLC_GET_DTIMPRD error (%d)\n", err);
goto update_bss_info_out;
}
}
@@ -2636,13 +2631,13 @@ wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev,
s32 err = 0;
wl_get_assoc_ies(wl);
- memcpy(&wl->bssid, &e->addr, ETHER_ADDR_LEN);
+ memcpy(&wl->bssid, &e->addr, ETH_ALEN);
wl_update_bss_info(wl);
cfg80211_roamed(ndev,
(u8 *)&wl->bssid,
conn_info->req_ie, conn_info->req_ie_len,
conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
- WL_DBG(("Report roaming result\n"));
+ WL_DBG("Report roaming result\n");
set_bit(WL_STATUS_CONNECTED, &wl->status);
@@ -2657,7 +2652,7 @@ wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev,
s32 err = 0;
wl_get_assoc_ies(wl);
- memcpy(&wl->bssid, &e->addr, ETHER_ADDR_LEN);
+ memcpy(&wl->bssid, &e->addr, ETH_ALEN);
wl_update_bss_info(wl);
if (test_and_clear_bit(WL_STATUS_CONNECTING, &wl->status)) {
cfg80211_connect_result(ndev,
@@ -2668,15 +2663,15 @@ wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev,
conn_info->resp_ie_len,
completed ? WLAN_STATUS_SUCCESS : WLAN_STATUS_AUTH_TIMEOUT,
GFP_KERNEL);
- WL_DBG(("Report connect result - connection %s\n",
- completed ? "succeeded" : "failed"));
+ WL_DBG("Report connect result - connection %s\n",
+ completed ? "succeeded" : "failed");
} else {
cfg80211_roamed(ndev,
(u8 *)&wl->bssid,
conn_info->req_ie, conn_info->req_ie_len,
conn_info->resp_ie, conn_info->resp_ie_len,
GFP_KERNEL);
- WL_DBG(("Report roaming result\n"));
+ WL_DBG("Report roaming result\n");
}
set_bit(WL_STATUS_CONNECTED, &wl->status);
@@ -2716,7 +2711,7 @@ wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev,
return wl_wakeup_iscan(wl_to_iscan(wl));
if (unlikely(!test_and_clear_bit(WL_STATUS_SCANNING, &wl->status))) {
- WL_ERR(("Scan complete while device not scanning\n"));
+ WL_ERR("Scan complete while device not scanning\n");
return -EINVAL;
}
if (unlikely(!wl->scan_request)) {
@@ -2725,14 +2720,14 @@ wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev,
err = wl_dev_ioctl(ndev, WLC_GET_CHANNEL, &channel_inform,
sizeof(channel_inform));
if (unlikely(err)) {
- WL_ERR(("scan busy (%d)\n", err));
+ WL_ERR("scan busy (%d)\n", err);
goto scan_done_out;
}
channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
if (unlikely(channel_inform.scan_channel)) {
- WL_DBG(("channel_inform.scan_channel (%d)\n",
- channel_inform.scan_channel));
+ WL_DBG("channel_inform.scan_channel (%d)\n",
+ channel_inform.scan_channel);
}
wl->bss_list = wl->scan_results;
bss_list = wl->bss_list;
@@ -2740,7 +2735,7 @@ wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev,
bss_list->buflen = htod32(len);
err = wl_dev_ioctl(ndev, WLC_SCAN_RESULTS, bss_list, len);
if (unlikely(err)) {
- WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
+ WL_ERR("%s Scan_results error (%d)\n", ndev->name, err);
err = -EINVAL;
goto scan_done_out;
}
@@ -2794,55 +2789,54 @@ static void wl_init_eloop_handler(struct wl_event_loop *el)
static s32 wl_init_priv_mem(struct wl_priv *wl)
{
- wl->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
+ wl->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
if (unlikely(!wl->scan_results)) {
- WL_ERR(("Scan results alloc failed\n"));
+ WL_ERR("Scan results alloc failed\n");
goto init_priv_mem_out;
}
- wl->conf = (void *)kzalloc(sizeof(*wl->conf), GFP_KERNEL);
+ wl->conf = kzalloc(sizeof(*wl->conf), GFP_KERNEL);
if (unlikely(!wl->conf)) {
- WL_ERR(("wl_conf alloc failed\n"));
+ WL_ERR("wl_conf alloc failed\n");
goto init_priv_mem_out;
}
- wl->profile = (void *)kzalloc(sizeof(*wl->profile), GFP_KERNEL);
+ wl->profile = kzalloc(sizeof(*wl->profile), GFP_KERNEL);
if (unlikely(!wl->profile)) {
- WL_ERR(("wl_profile alloc failed\n"));
+ WL_ERR("wl_profile alloc failed\n");
goto init_priv_mem_out;
}
- wl->bss_info = (void *)kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+ wl->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
if (unlikely(!wl->bss_info)) {
- WL_ERR(("Bss information alloc failed\n"));
+ WL_ERR("Bss information alloc failed\n");
goto init_priv_mem_out;
}
- wl->scan_req_int =
- (void *)kzalloc(sizeof(*wl->scan_req_int), GFP_KERNEL);
+ wl->scan_req_int = kzalloc(sizeof(*wl->scan_req_int), GFP_KERNEL);
if (unlikely(!wl->scan_req_int)) {
- WL_ERR(("Scan req alloc failed\n"));
+ WL_ERR("Scan req alloc failed\n");
goto init_priv_mem_out;
}
- wl->ioctl_buf = (void *)kzalloc(WL_IOCTL_LEN_MAX, GFP_KERNEL);
+ wl->ioctl_buf = kzalloc(WL_IOCTL_LEN_MAX, GFP_KERNEL);
if (unlikely(!wl->ioctl_buf)) {
- WL_ERR(("Ioctl buf alloc failed\n"));
+ WL_ERR("Ioctl buf alloc failed\n");
goto init_priv_mem_out;
}
- wl->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+ wl->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
if (unlikely(!wl->extra_buf)) {
- WL_ERR(("Extra buf alloc failed\n"));
+ WL_ERR("Extra buf alloc failed\n");
goto init_priv_mem_out;
}
- wl->iscan = (void *)kzalloc(sizeof(*wl->iscan), GFP_KERNEL);
+ wl->iscan = kzalloc(sizeof(*wl->iscan), GFP_KERNEL);
if (unlikely(!wl->iscan)) {
- WL_ERR(("Iscan buf alloc failed\n"));
+ WL_ERR("Iscan buf alloc failed\n");
goto init_priv_mem_out;
}
- wl->fw = (void *)kzalloc(sizeof(*wl->fw), GFP_KERNEL);
+ wl->fw = kzalloc(sizeof(*wl->fw), GFP_KERNEL);
if (unlikely(!wl->fw)) {
- WL_ERR(("fw object alloc failed\n"));
+ WL_ERR("fw object alloc failed\n");
goto init_priv_mem_out;
}
- wl->pmk_list = (void *)kzalloc(sizeof(*wl->pmk_list), GFP_KERNEL);
+ wl->pmk_list = kzalloc(sizeof(*wl->pmk_list), GFP_KERNEL);
if (unlikely(!wl->pmk_list)) {
- WL_ERR(("pmk list alloc failed\n"));
+ WL_ERR("pmk list alloc failed\n");
goto init_priv_mem_out;
}
@@ -2884,7 +2878,7 @@ static s32 wl_create_event_handler(struct wl_priv *wl)
wl->event_tsk = kthread_run(wl_event_handler, wl, "wl_event_handler");
if (IS_ERR(wl->event_tsk)) {
wl->event_tsk = NULL;
- WL_ERR(("failed to create event thread\n"));
+ WL_ERR("failed to create event thread\n");
return -ENOMEM;
}
return 0;
@@ -2917,7 +2911,7 @@ static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted)
struct net_device *ndev = wl_to_ndev(wl);
if (unlikely(!test_and_clear_bit(WL_STATUS_SCANNING, &wl->status))) {
- WL_ERR(("Scan complete while device not scanning\n"));
+ WL_ERR("Scan complete while device not scanning\n");
return;
}
if (likely(wl->scan_request)) {
@@ -2931,7 +2925,7 @@ static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted)
static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan)
{
if (likely(iscan->state != WL_ISCAN_STATE_IDLE)) {
- WL_DBG(("wake up iscan\n"));
+ WL_DBG("wake up iscan\n");
up(&iscan->sync);
return 0;
}
@@ -2961,14 +2955,14 @@ wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status,
WL_ISCAN_RESULTS_FIXED_SIZE, iscan->scan_buf,
WL_ISCAN_BUF_MAX);
if (unlikely(err)) {
- WL_ERR(("error (%d)\n", err));
+ WL_ERR("error (%d)\n", err);
return err;
}
results->buflen = dtoh32(results->buflen);
results->version = dtoh32(results->version);
results->count = dtoh32(results->count);
- WL_DBG(("results->count = %d\n", results->count));
- WL_DBG(("results->buflen = %d\n", results->buflen));
+ WL_DBG("results->count = %d\n", results->count);
+ WL_DBG("results->buflen = %d\n", results->buflen);
*status = dtoh32(list_buf->status);
*bss_list = results;
@@ -3053,7 +3047,7 @@ static s32 wl_iscan_thread(void *data)
err = wl_get_iscan_results(iscan, &status, &wl->bss_list);
if (unlikely(err)) {
status = WL_SCAN_RESULTS_ABORTED;
- WL_ERR(("Abort iscan\n"));
+ WL_ERR("Abort iscan\n");
}
rtnl_unlock();
el->handler[status] (wl);
@@ -3062,7 +3056,7 @@ static s32 wl_iscan_thread(void *data)
del_timer_sync(&iscan->timer);
iscan->timer_on = 0;
}
- WL_DBG(("%s was terminated\n", __func__));
+ WL_DBG("%s was terminated\n", __func__);
return 0;
}
@@ -3073,7 +3067,7 @@ static void wl_iscan_timer(unsigned long data)
if (iscan) {
iscan->timer_on = 0;
- WL_DBG(("timer expired\n"));
+ WL_DBG("timer expired\n");
wl_wakeup_iscan(iscan);
}
}
@@ -3088,7 +3082,7 @@ static s32 wl_invoke_iscan(struct wl_priv *wl)
sema_init(&iscan->sync, 0);
iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan");
if (IS_ERR(iscan->tsk)) {
- WL_ERR(("Could not create iscan thread\n"));
+ WL_ERR("Could not create iscan thread\n");
iscan->tsk = NULL;
return -ENOMEM;
}
@@ -3123,7 +3117,7 @@ static s32 wl_init_iscan(struct wl_priv *wl)
sema_init(&iscan->sync, 0);
iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan");
if (IS_ERR(iscan->tsk)) {
- WL_ERR(("Could not create iscan thread\n"));
+ WL_ERR("Could not create iscan thread\n");
iscan->tsk = NULL;
return -ENOMEM;
}
@@ -3192,17 +3186,17 @@ s32 wl_cfg80211_attach(struct net_device *ndev, void *data)
s32 err = 0;
if (unlikely(!ndev)) {
- WL_ERR(("ndev is invaild\n"));
+ WL_ERR("ndev is invalid\n");
return -ENODEV;
}
wl_cfg80211_dev = kzalloc(sizeof(struct wl_dev), GFP_KERNEL);
if (unlikely(!wl_cfg80211_dev)) {
- WL_ERR(("wl_cfg80211_dev is invalid\n"));
+ WL_ERR("wl_cfg80211_dev is invalid\n");
return -ENOMEM;
}
- WL_DBG(("func %p\n", wl_cfg80211_get_sdio_func()));
+ WL_DBG("func %p\n", wl_cfg80211_get_sdio_func());
wdev = wl_alloc_wdev(sizeof(struct wl_iface), &wl_cfg80211_get_sdio_func()->dev);
- if (unlikely(IS_ERR(wdev)))
+ if (IS_ERR(wdev))
return -ENOMEM;
wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
@@ -3216,7 +3210,7 @@ s32 wl_cfg80211_attach(struct net_device *ndev, void *data)
wdev->netdev = ndev;
err = wl_init_priv(wl);
if (unlikely(err)) {
- WL_ERR(("Failed to init iwm_priv (%d)\n", err));
+ WL_ERR("Failed to init iwm_priv (%d)\n", err);
goto cfg80211_attach_out;
}
wl_set_drvdata(wl_cfg80211_dev, ci);
@@ -3261,19 +3255,19 @@ static s32 wl_event_handler(void *data)
break;
e = wl_deq_event(wl);
if (unlikely(!e)) {
- WL_ERR(("eqeue empty..\n"));
+ WL_ERR("event queue empty...\n");
BUG();
}
- WL_DBG(("event type (%d)\n", e->etype));
+ WL_DBG("event type (%d)\n", e->etype);
if (wl->el.handler[e->etype]) {
wl->el.handler[e->etype] (wl, wl_to_ndev(wl), &e->emsg,
e->edata);
} else {
- WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
+ WL_DBG("Unknown Event (%d): ignoring\n", e->etype);
}
wl_put_event(e);
}
- WL_DBG(("%s was terminated\n", __func__));
+ WL_DBG("%s was terminated\n", __func__);
return 0;
}
@@ -3286,7 +3280,7 @@ wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data)
s8 *estr = (event_type <= sizeof(wl_dbg_estr) / WL_DBG_ESTR_MAX - 1) ?
wl_dbg_estr[event_type] : (s8 *) "Unknown";
#endif /* (WL_DBG_LEVEL > 0) */
- WL_DBG(("event_type (%d):" "WLC_E_" "%s\n", event_type, estr));
+ WL_DBG("event_type (%d):" "WLC_E_" "%s\n", event_type, estr);
if (likely(!wl_enq_event(wl, event_type, e, data)))
wl_wakeup_event(wl);
}
@@ -3341,7 +3335,7 @@ wl_enq_event(struct wl_priv *wl, u32 event, const wl_event_msg_t *msg,
e = kzalloc(sizeof(struct wl_event_q), GFP_KERNEL);
if (unlikely(!e)) {
- WL_ERR(("event alloc failed\n"));
+ WL_ERR("event alloc failed\n");
return -ENOMEM;
}
@@ -3385,8 +3379,8 @@ static s32 wl_dongle_mode(struct net_device *ndev, s32 iftype)
switch (iftype) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_WDS:
- WL_ERR(("type (%d) : currently we do not support this mode\n",
- iftype));
+ WL_ERR("type (%d) : currently we do not support this mode\n",
+ iftype);
err = -EINVAL;
return err;
case NL80211_IFTYPE_ADHOC:
@@ -3396,20 +3390,20 @@ static s32 wl_dongle_mode(struct net_device *ndev, s32 iftype)
break;
default:
err = -EINVAL;
- WL_ERR(("invalid type (%d)\n", iftype));
+ WL_ERR("invalid type (%d)\n", iftype);
return err;
}
infra = htod32(infra);
ap = htod32(ap);
- WL_DBG(("%s ap (%d), infra (%d)\n", ndev->name, ap, infra));
+ WL_DBG("%s ap (%d), infra (%d)\n", ndev->name, ap, infra);
err = wl_dev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra));
if (unlikely(err)) {
- WL_ERR(("WLC_SET_INFRA error (%d)\n", err));
+ WL_ERR("WLC_SET_INFRA error (%d)\n", err);
return err;
}
err = wl_dev_ioctl(ndev, WLC_SET_AP, &ap, sizeof(ap));
if (unlikely(err)) {
- WL_ERR(("WLC_SET_AP error (%d)\n", err));
+ WL_ERR("WLC_SET_AP error (%d)\n", err);
return err;
}
@@ -3431,7 +3425,7 @@ static s32 wl_dongle_up(struct net_device *ndev, u32 up)
err = wl_dev_ioctl(ndev, WLC_UP, &up, sizeof(up));
if (unlikely(err)) {
- WL_ERR(("WLC_UP error (%d)\n", err));
+ WL_ERR("WLC_UP error (%d)\n", err);
}
return err;
}
@@ -3442,7 +3436,7 @@ static s32 wl_dongle_power(struct net_device *ndev, u32 power_mode)
err = wl_dev_ioctl(ndev, WLC_SET_PM, &power_mode, sizeof(power_mode));
if (unlikely(err)) {
- WL_ERR(("WLC_SET_PM error (%d)\n", err));
+ WL_ERR("WLC_SET_PM error (%d)\n", err);
}
return err;
}
@@ -3459,14 +3453,14 @@ wl_dongle_glom(struct net_device *ndev, u32 glom, u32 dongle_align)
sizeof(iovbuf));
err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
if (unlikely(err)) {
- WL_ERR(("txglomalign error (%d)\n", err));
+ WL_ERR("txglomalign error (%d)\n", err);
goto dongle_glom_out;
}
/* disable glom option per default */
bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
if (unlikely(err)) {
- WL_ERR(("txglom error (%d)\n", err));
+ WL_ERR("txglom error (%d)\n", err);
goto dongle_glom_out;
}
dongle_glom_out:
@@ -3487,7 +3481,7 @@ wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
sizeof(iovbuf));
err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
if (unlikely(err)) {
- WL_ERR(("bcn_timeout error (%d)\n", err));
+ WL_ERR("bcn_timeout error (%d)\n", err);
goto dongle_rom_out;
}
}
@@ -3496,7 +3490,7 @@ wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
if (unlikely(err)) {
- WL_ERR(("roam_off error (%d)\n", err));
+ WL_ERR("roam_off error (%d)\n", err);
goto dongle_rom_out;
}
dongle_rom_out:
@@ -3516,7 +3510,7 @@ static s32 wl_dongle_eventmsg(struct net_device *ndev)
sizeof(iovbuf));
err = wl_dev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf));
if (unlikely(err)) {
- WL_ERR(("Get event_msgs error (%d)\n", err));
+ WL_ERR("Get event_msgs error (%d)\n", err);
goto dongle_eventmsg_out;
}
memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
@@ -3544,7 +3538,7 @@ static s32 wl_dongle_eventmsg(struct net_device *ndev)
sizeof(iovbuf));
err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
if (unlikely(err)) {
- WL_ERR(("Set event_msgs error (%d)\n", err));
+ WL_ERR("Set event_msgs error (%d)\n", err);
goto dongle_eventmsg_out;
}
@@ -3562,9 +3556,9 @@ wl_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
sizeof(scan_assoc_time));
if (err) {
if (err == -EOPNOTSUPP) {
- WL_INFO(("Scan assoc time is not supported\n"));
+ WL_INFO("Scan assoc time is not supported\n");
} else {
- WL_ERR(("Scan assoc time error (%d)\n", err));
+ WL_ERR("Scan assoc time error (%d)\n", err);
}
goto dongle_scantime_out;
}
@@ -3572,9 +3566,9 @@ wl_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
sizeof(scan_unassoc_time));
if (err) {
if (err == -EOPNOTSUPP) {
- WL_INFO(("Scan unassoc time is not supported\n"));
+ WL_INFO("Scan unassoc time is not supported\n");
} else {
- WL_ERR(("Scan unassoc time error (%d)\n", err));
+ WL_ERR("Scan unassoc time error (%d)\n", err);
}
goto dongle_scantime_out;
}
@@ -3595,9 +3589,9 @@ wl_dongle_offload(struct net_device *ndev, s32 arpoe, s32 arp_ol)
err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
if (err) {
if (err == -EOPNOTSUPP)
- WL_INFO(("arpoe is not supported\n"));
+ WL_INFO("arpoe is not supported\n");
else
- WL_ERR(("arpoe error (%d)\n", err));
+ WL_ERR("arpoe error (%d)\n", err);
goto dongle_offload_out;
}
@@ -3605,9 +3599,9 @@ wl_dongle_offload(struct net_device *ndev, s32 arpoe, s32 arp_ol)
err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
if (err) {
if (err == -EOPNOTSUPP)
- WL_INFO(("arp_ol is not supported\n"));
+ WL_INFO("arp_ol is not supported\n");
else
- WL_ERR(("arp_ol error (%d)\n", err));
+ WL_ERR("arp_ol error (%d)\n", err);
goto dongle_offload_out;
}
@@ -3620,12 +3614,12 @@ static s32 wl_pattern_atoh(s8 *src, s8 *dst)
{
int i;
if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) {
- WL_ERR(("Mask invalid format. Needs to start with 0x\n"));
+ WL_ERR("Mask invalid format. Needs to start with 0x\n");
return -1;
}
src = src + 2; /* Skip past 0x */
if (strlen(src) % 2 != 0) {
- WL_ERR(("Mask invalid format. Needs to be of even length\n"));
+ WL_ERR("Mask invalid format. Needs to be of even length\n");
return -1;
}
for (i = 0; *src != '\0'; i++) {
@@ -3684,7 +3678,7 @@ static s32 wl_dongle_filter(struct net_device *ndev, u32 filter_mode)
mask_and_pattern[mask_size]));
if (mask_size != pattern_size) {
- WL_ERR(("Mask and pattern not the same size\n"));
+ WL_ERR("Mask and pattern not the same size\n");
err = -EINVAL;
goto dongle_filter_out;
}
@@ -3704,9 +3698,9 @@ static s32 wl_dongle_filter(struct net_device *ndev, u32 filter_mode)
err = wl_dev_ioctl(ndev, WLC_SET_VAR, buf, buf_len);
if (err) {
if (err == -EOPNOTSUPP) {
- WL_INFO(("filter not supported\n"));
+ WL_INFO("filter not supported\n");
} else {
- WL_ERR(("filter (%d)\n", err));
+ WL_ERR("filter (%d)\n", err);
}
goto dongle_filter_out;
}
@@ -3717,9 +3711,9 @@ static s32 wl_dongle_filter(struct net_device *ndev, u32 filter_mode)
err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
if (err) {
if (err == -EOPNOTSUPP) {
- WL_INFO(("filter_mode not supported\n"));
+ WL_INFO("filter_mode not supported\n");
} else {
- WL_ERR(("filter_mode (%d)\n", err));
+ WL_ERR("filter_mode (%d)\n", err);
}
goto dongle_filter_out;
}
@@ -3800,12 +3794,12 @@ static s32 wl_update_wiphybands(struct wl_priv *wl)
err = wl_dev_ioctl(wl_to_ndev(wl), WLC_GET_PHYLIST, &phy_list,
sizeof(phy_list));
if (unlikely(err)) {
- WL_ERR(("error (%d)\n", err));
+ WL_ERR("error (%d)\n", err);
return err;
}
phy = ((char *)&phy_list)[1];
- WL_DBG(("%c phy\n", phy));
+ WL_DBG("%c phy\n", phy);
if (phy == 'n' || phy == 'a') {
wiphy = wl_to_wiphy(wl);
wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n;
@@ -3911,7 +3905,7 @@ static void *wl_read_prof(struct wl_priv *wl, s32 item)
case WL_PROF_SSID:
return &wl->profile->ssid;
}
- WL_ERR(("invalid item (%d)\n", item));
+ WL_ERR("invalid item (%d)\n", item);
return NULL;
}
@@ -3932,9 +3926,9 @@ wl_update_prof(struct wl_priv *wl, const wl_event_msg_t *e, void *data,
break;
case WL_PROF_BSSID:
if (data)
- memcpy(wl->profile->bssid, data, ETHER_ADDR_LEN);
+ memcpy(wl->profile->bssid, data, ETH_ALEN);
else
- memset(wl->profile->bssid, 0, ETHER_ADDR_LEN);
+ memset(wl->profile->bssid, 0, ETH_ALEN);
break;
case WL_PROF_SEC:
memcpy(&wl->profile->sec, data, sizeof(wl->profile->sec));
@@ -3949,7 +3943,7 @@ wl_update_prof(struct wl_priv *wl, const wl_event_msg_t *e, void *data,
wl->profile->dtim_period = *(u8 *)data;
break;
default:
- WL_ERR(("unsupported item (%d)\n", item));
+ WL_ERR("unsupported item (%d)\n", item);
err = -EOPNOTSUPP;
break;
}
@@ -3991,7 +3985,7 @@ static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v)
s32 err = 0;
if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) {
- WL_ERR(("ei crosses buffer boundary\n"));
+ WL_ERR("ei crosses buffer boundary\n");
return -ENOSPC;
}
ie->buf[ie->offset] = t;
@@ -4008,7 +4002,7 @@ static s32 wl_mrg_ie(struct wl_priv *wl, u8 *ie_stream, u16 ie_size)
s32 err = 0;
if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) {
- WL_ERR(("ei_stream crosses buffer boundary\n"));
+ WL_ERR("ei_stream crosses buffer boundary\n");
return -ENOSPC;
}
memcpy(&ie->buf[ie->offset], ie_stream, ie_size);
@@ -4023,7 +4017,7 @@ static s32 wl_cp_ie(struct wl_priv *wl, u8 *dst, u16 dst_size)
s32 err = 0;
if (unlikely(ie->offset > dst_size)) {
- WL_ERR(("dst_size is not enough\n"));
+ WL_ERR("dst_size is not enough\n");
return -ENOSPC;
}
memcpy(dst, &ie->buf[0], ie->offset);
@@ -4123,37 +4117,37 @@ void *wl_cfg80211_request_fw(s8 *file_name)
const struct firmware *fw_entry = NULL;
s32 err = 0;
- WL_DBG(("file name : \"%s\"\n", file_name));
+ WL_DBG("file name : \"%s\"\n", file_name);
wl = WL_PRIV_GET();
if (!test_bit(WL_FW_LOADING_DONE, &wl->fw->status)) {
err = request_firmware(&wl->fw->fw_entry, file_name,
&wl_cfg80211_get_sdio_func()->dev);
if (unlikely(err)) {
- WL_ERR(("Could not download fw (%d)\n", err));
+ WL_ERR("Could not download fw (%d)\n", err);
goto req_fw_out;
}
set_bit(WL_FW_LOADING_DONE, &wl->fw->status);
fw_entry = wl->fw->fw_entry;
if (fw_entry) {
- WL_DBG(("fw size (%zd), data (%p)\n", fw_entry->size,
- fw_entry->data));
+ WL_DBG("fw size (%zd), data (%p)\n",
+ fw_entry->size, fw_entry->data);
}
} else if (!test_bit(WL_NVRAM_LOADING_DONE, &wl->fw->status)) {
err = request_firmware(&wl->fw->fw_entry, file_name,
&wl_cfg80211_get_sdio_func()->dev);
if (unlikely(err)) {
- WL_ERR(("Could not download nvram (%d)\n", err));
+ WL_ERR("Could not download nvram (%d)\n", err);
goto req_fw_out;
}
set_bit(WL_NVRAM_LOADING_DONE, &wl->fw->status);
fw_entry = wl->fw->fw_entry;
if (fw_entry) {
- WL_DBG(("nvram size (%zd), data (%p)\n", fw_entry->size,
- fw_entry->data));
+ WL_DBG("nvram size (%zd), data (%p)\n",
+ fw_entry->size, fw_entry->data);
}
} else {
- WL_DBG(("Downloading already done. Nothing to do more\n"));
+ WL_DBG("Downloading already done. Nothing to do more\n");
err = -EPERM;
}
@@ -4189,10 +4183,10 @@ static void wl_set_mpc(struct net_device *ndev, int mpc)
err = wl_dev_intvar_set(ndev, "mpc", mpc);
if (unlikely(err)) {
- WL_ERR(("fail to set mpc\n"));
+ WL_ERR("fail to set mpc\n");
return;
}
- WL_DBG(("MPC : %d\n", mpc));
+ WL_DBG("MPC : %d\n", mpc);
}
static int wl_debugfs_add_netdev_params(struct wl_priv *wl)
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h
index 770e63f0c8ef..482691be210a 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h
@@ -54,34 +54,36 @@ struct wl_ibss;
#define WL_DBG_LEVEL 1 /* 0 invalidates all debug messages.
default is 1 */
-#define WL_ERR(args) \
-do { \
- if (wl_dbg_level & WL_DBG_ERR) { \
- if (net_ratelimit()) { \
- printk(KERN_ERR "ERROR @%s : ", __func__); \
- printk args; \
- } \
- } \
+#define WL_ERR(fmt, args...) \
+do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ if (net_ratelimit()) { \
+ printk(KERN_ERR "ERROR @%s : " fmt, \
+ __func__, ##args); \
+ } \
+ } \
} while (0)
-#define WL_INFO(args) \
-do { \
- if (wl_dbg_level & WL_DBG_INFO) { \
- if (net_ratelimit()) { \
- printk(KERN_ERR "INFO @%s : ", __func__); \
- printk args; \
- } \
- } \
+
+#define WL_INFO(fmt, args...) \
+do { \
+ if (wl_dbg_level & WL_DBG_INFO) { \
+ if (net_ratelimit()) { \
+ printk(KERN_ERR "INFO @%s : " fmt, \
+ __func__, ##args); \
+ } \
+ } \
} while (0)
+
#if (WL_DBG_LEVEL > 0)
-#define WL_DBG(args) \
-do { \
+#define WL_DBG(fmt, args...) \
+do { \
if (wl_dbg_level & WL_DBG_DBG) { \
- printk(KERN_ERR "DEBUG @%s :", __func__); \
- printk args; \
- } \
+ printk(KERN_ERR "DEBUG @%s :" fmt, \
+ __func__, ##args); \
+ } \
} while (0)
#else /* !(WL_DBG_LEVEL > 0) */
-#define WL_DBG(args)
+#define WL_DBG(fmt, args...) noprintk(fmt, ##args)
#endif /* (WL_DBG_LEVEL > 0) */
#define WL_SCAN_RETRY_MAX 3 /* used for ibss scan */
@@ -237,7 +239,7 @@ struct wl_ibss {
struct wl_profile {
u32 mode;
struct wlc_ssid ssid;
- u8 bssid[ETHER_ADDR_LEN];
+ u8 bssid[ETH_ALEN];
u16 beacon_interval;
u8 dtim_period;
struct wl_security sec;
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_iw.c b/drivers/staging/brcm80211/brcmfmac/wl_iw.c
index 979a494fda59..db6e68eab290 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_iw.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_iw.c
@@ -15,8 +15,9 @@
*/
#include <linux/kthread.h>
+#include <linux/semaphore.h>
#include <bcmdefs.h>
-#include <linuxver.h>
+#include <linux/netdevice.h>
#include <osl.h>
#include <wlioctl.h>
@@ -31,20 +32,18 @@
#include <dhd.h>
#include <dhdioctl.h>
-typedef void wlc_info_t;
-typedef void wl_info_t;
typedef const struct si_pub si_t;
#include <wlioctl.h>
#include <proto/ethernet.h>
#include <dngl_stats.h>
#include <dhd.h>
-#define WL_ERROR(x) printf x
-#define WL_TRACE(x)
-#define WL_ASSOC(x)
-#define WL_INFORM(x)
-#define WL_WSEC(x)
-#define WL_SCAN(x)
+
+#define WL_ERROR(fmt, args...) printk(fmt, ##args)
+#define WL_TRACE(fmt, args...) no_printk(fmt, ##args)
+#define WL_INFORM(fmt, args...) no_printk(fmt, ##args)
+#define WL_WSEC(fmt, args...) no_printk(fmt, ##args)
+#define WL_SCAN(fmt, args...) no_printk(fmt, ##args)
#include <wl_iw.h>
@@ -187,12 +186,12 @@ static int dev_wlc_ioctl(struct net_device *dev, int cmd, void *arg, int len)
int ret = -EINVAL;
if (!dev) {
- WL_ERROR(("%s: dev is null\n", __func__));
+ WL_ERROR("%s: dev is null\n", __func__);
return ret;
}
- WL_INFORM(("\n%s, PID:%x: send Local IOCTL -> dhd: cmd:0x%x, buf:%p, "
- "len:%d ,\n", __func__, current->pid, cmd, arg, len));
+ WL_INFORM("\n%s, PID:%x: send Local IOCTL -> dhd: cmd:0x%x, buf:%p, len:%d\n",
+ __func__, current->pid, cmd, arg, len);
if (g_onoff == G_WLAN_SET_ON) {
memset(&ioc, 0, sizeof(ioc));
@@ -205,7 +204,7 @@ static int dev_wlc_ioctl(struct net_device *dev, int cmd, void *arg, int len)
ret = dev_open(dev);
if (ret) {
- WL_ERROR(("%s: Error dev_open: %d\n", __func__, ret));
+ WL_ERROR("%s: Error dev_open: %d\n", __func__, ret);
return ret;
}
@@ -214,7 +213,7 @@ static int dev_wlc_ioctl(struct net_device *dev, int cmd, void *arg, int len)
ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
set_fs(fs);
} else {
- WL_TRACE(("%s: call after driver stop : ignored\n", __func__));
+ WL_TRACE("%s: call after driver stop : ignored\n", __func__);
}
return ret;
}
@@ -335,7 +334,7 @@ wl_iw_config_commit(struct net_device *dev,
int error;
struct sockaddr bssid;
- WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name));
+ WL_TRACE("%s: SIOCSIWCOMMIT\n", dev->name);
error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid));
if (error)
@@ -346,11 +345,11 @@ wl_iw_config_commit(struct net_device *dev,
if (!ssid.SSID_len)
return 0;
- bzero(&bssid, sizeof(struct sockaddr));
- error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN);
+ memset(&bssid, 0, sizeof(struct sockaddr));
+ error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETH_ALEN);
if (error) {
- WL_ERROR(("%s: WLC_REASSOC to %s failed \n", __func__,
- ssid.SSID));
+ WL_ERROR("%s: WLC_REASSOC to %s failed\n",
+ __func__, ssid.SSID);
return error;
}
@@ -361,7 +360,7 @@ static int
wl_iw_get_name(struct net_device *dev,
struct iw_request_info *info, char *cwrq, char *extra)
{
- WL_TRACE(("%s: SIOCGIWNAME\n", dev->name));
+ WL_TRACE("%s: SIOCGIWNAME\n", dev->name);
strcpy(cwrq, "IEEE 802.11-DS");
@@ -375,7 +374,7 @@ wl_iw_set_freq(struct net_device *dev,
int error, chan;
uint sf = 0;
- WL_TRACE(("\n %s %s: SIOCSIWFREQ\n", __func__, dev->name));
+ WL_TRACE("\n %s %s: SIOCSIWFREQ\n", __func__, dev->name);
if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) {
chan = fwrq->m;
@@ -410,7 +409,7 @@ wl_iw_get_freq(struct net_device *dev,
channel_info_t ci;
int error;
- WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name));
+ WL_TRACE("%s: SIOCGIWFREQ\n", dev->name);
error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci));
if (error)
@@ -427,7 +426,7 @@ wl_iw_set_mode(struct net_device *dev,
{
int infra = 0, ap = 0, error = 0;
- WL_TRACE(("%s: SIOCSIWMODE\n", dev->name));
+ WL_TRACE("%s: SIOCSIWMODE\n", dev->name);
switch (*uwrq) {
case IW_MODE_MASTER:
@@ -462,7 +461,7 @@ wl_iw_get_mode(struct net_device *dev,
{
int error, infra = 0, ap = 0;
- WL_TRACE(("%s: SIOCGIWMODE\n", dev->name));
+ WL_TRACE("%s: SIOCGIWMODE\n", dev->name);
error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra));
if (error)
@@ -501,14 +500,14 @@ wl_iw_get_range(struct net_device *dev,
{30, 60, 90, 120, 180, 240, 270, 300}
};
- WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name));
+ WL_TRACE("%s: SIOCGIWRANGE\n", dev->name);
if (!extra)
return -EINVAL;
channels = kmalloc((MAXCHANNEL + 1) * 4, GFP_KERNEL);
if (!channels) {
- WL_ERROR(("Could not alloc channels\n"));
+ WL_ERROR("Could not alloc channels\n");
return -ENOMEM;
}
list = (wl_u32_list_t *) channels;
@@ -684,14 +683,14 @@ wl_iw_set_spy(struct net_device *dev,
struct sockaddr *addr = (struct sockaddr *)extra;
int i;
- WL_TRACE(("%s: SIOCSIWSPY\n", dev->name));
+ WL_TRACE("%s: SIOCSIWSPY\n", dev->name);
if (!extra)
return -EINVAL;
iw->spy_num = min_t(int, ARRAY_SIZE(iw->spy_addr), dwrq->length);
for (i = 0; i < iw->spy_num; i++)
- memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN);
+ memcpy(&iw->spy_addr[i], addr[i].sa_data, ETH_ALEN);
memset(iw->spy_qual, 0, sizeof(iw->spy_qual));
return 0;
@@ -706,14 +705,14 @@ wl_iw_get_spy(struct net_device *dev,
struct iw_quality *qual = (struct iw_quality *)&addr[iw->spy_num];
int i;
- WL_TRACE(("%s: SIOCGIWSPY\n", dev->name));
+ WL_TRACE("%s: SIOCGIWSPY\n", dev->name);
if (!extra)
return -EINVAL;
dwrq->length = iw->spy_num;
for (i = 0; i < iw->spy_num; i++) {
- memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN);
+ memcpy(addr[i].sa_data, &iw->spy_addr[i], ETH_ALEN);
addr[i].sa_family = AF_UNIX;
memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality));
iw->spy_qual[i].updated = 0;
@@ -751,8 +750,8 @@ wl_iw_ch_to_chanspec(int ch, wl_join_params_t *join_params,
join_params->params.chanspec_num =
htod32(join_params->params.chanspec_num);
- WL_TRACE(("%s join_params->params.chanspec_list[0]= %X\n",
- __func__, join_params->params.chanspec_list[0]));
+ WL_TRACE("%s join_params->params.chanspec_list[0]= %X\n",
+ __func__, join_params->params.chanspec_list[0]);
}
return 1;
}
@@ -765,16 +764,17 @@ wl_iw_set_wap(struct net_device *dev,
wl_join_params_t join_params;
int join_params_size;
- WL_TRACE(("%s: SIOCSIWAP\n", dev->name));
+ WL_TRACE("%s: SIOCSIWAP\n", dev->name);
if (awrq->sa_family != ARPHRD_ETHER) {
- WL_ERROR(("Invalid Header...sa_family\n"));
+ WL_ERROR("Invalid Header...sa_family\n");
return -EINVAL;
}
- if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) {
+ if (is_broadcast_ether_addr(awrq->sa_data) ||
+ is_zero_ether_addr(awrq->sa_data)) {
scb_val_t scbval;
- bzero(&scbval, sizeof(scb_val_t));
+ memset(&scbval, 0, sizeof(scb_val_t));
(void)dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval,
sizeof(scb_val_t));
return 0;
@@ -785,23 +785,23 @@ wl_iw_set_wap(struct net_device *dev,
memcpy(join_params.ssid.SSID, g_ssid.SSID, g_ssid.SSID_len);
join_params.ssid.SSID_len = htod32(g_ssid.SSID_len);
- memcpy(&join_params.params.bssid, awrq->sa_data, ETHER_ADDR_LEN);
+ memcpy(&join_params.params.bssid, awrq->sa_data, ETH_ALEN);
- WL_TRACE(("%s target_channel=%d\n", __func__,
- g_wl_iw_params.target_channel));
+ WL_TRACE("%s target_channel=%d\n",
+ __func__, g_wl_iw_params.target_channel);
wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, &join_params,
&join_params_size);
error = dev_wlc_ioctl(dev, WLC_SET_SSID, &join_params,
join_params_size);
if (error) {
- WL_ERROR(("%s Invalid ioctl data=%d\n", __func__, error));
+ WL_ERROR("%s Invalid ioctl data=%d\n", __func__, error);
}
if (g_ssid.SSID_len) {
- WL_TRACE(("%s: join SSID=%s BSSID=%pM ch=%d\n",
- __func__, g_ssid.SSID, awrq->sa_data,
- g_wl_iw_params.target_channel));
+ WL_TRACE("%s: join SSID=%s BSSID=%pM ch=%d\n",
+ __func__, g_ssid.SSID, awrq->sa_data,
+ g_wl_iw_params.target_channel);
}
memset(&g_ssid, 0, sizeof(g_ssid));
@@ -812,12 +812,12 @@ static int
wl_iw_get_wap(struct net_device *dev,
struct iw_request_info *info, struct sockaddr *awrq, char *extra)
{
- WL_TRACE(("%s: SIOCGIWAP\n", dev->name));
+ WL_TRACE("%s: SIOCGIWAP\n", dev->name);
awrq->sa_family = ARPHRD_ETHER;
- memset(awrq->sa_data, 0, ETHER_ADDR_LEN);
+ memset(awrq->sa_data, 0, ETH_ALEN);
- (void)dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN);
+ (void)dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETH_ALEN);
return 0;
}
@@ -831,16 +831,16 @@ wl_iw_mlme(struct net_device *dev,
scb_val_t scbval;
int error = -EINVAL;
- WL_TRACE(("%s: SIOCSIWMLME DISASSOC/DEAUTH\n", dev->name));
+ WL_TRACE("%s: SIOCSIWMLME DISASSOC/DEAUTH\n", dev->name);
mlme = (struct iw_mlme *)extra;
if (mlme == NULL) {
- WL_ERROR(("Invalid ioctl data.\n"));
+ WL_ERROR("Invalid ioctl data\n");
return error;
}
scbval.val = mlme->reason_code;
- bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN);
+ bcopy(&mlme->addr.sa_data, &scbval.ea, ETH_ALEN);
if (mlme->cmd == IW_MLME_DISASSOC) {
scbval.val = htod32(scbval.val);
@@ -853,7 +853,7 @@ wl_iw_mlme(struct net_device *dev,
dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON,
&scbval, sizeof(scb_val_t));
} else {
- WL_ERROR(("Invalid ioctl data.\n"));
+ WL_ERROR("Invalid ioctl data\n");
return error;
}
@@ -874,7 +874,7 @@ wl_iw_get_aplist(struct net_device *dev,
int error, i;
uint buflen = dwrq->length;
- WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+ WL_TRACE("%s: SIOCGIWAPLIST\n", dev->name);
if (!extra)
return -EINVAL;
@@ -886,7 +886,7 @@ wl_iw_get_aplist(struct net_device *dev,
list->buflen = htod32(buflen);
error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen);
if (error) {
- WL_ERROR(("%d: Scan results error %d\n", __LINE__, error));
+ WL_ERROR("%d: Scan results error %d\n", __LINE__, error);
kfree(list);
return error;
}
@@ -894,8 +894,8 @@ wl_iw_get_aplist(struct net_device *dev,
list->version = dtoh32(list->version);
list->count = dtoh32(list->count);
if (list->version != WL_BSS_INFO_VERSION) {
- WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
- __func__, list->version));
+ WL_ERROR("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+ __func__, list->version);
kfree(list);
return -EINVAL;
}
@@ -911,7 +911,7 @@ wl_iw_get_aplist(struct net_device *dev,
if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
continue;
- memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETH_ALEN);
addr[dwrq->length].sa_family = ARPHRD_ETHER;
qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
@@ -952,13 +952,13 @@ wl_iw_iscan_get_aplist(struct net_device *dev,
wl_bss_info_t *bi = NULL;
int i;
- WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+ WL_TRACE("%s: SIOCGIWAPLIST\n", dev->name);
if (!extra)
return -EINVAL;
if ((!iscan) || (!iscan->sysioc_tsk)) {
- WL_ERROR(("%s error\n", __func__));
+ WL_ERROR("%s error\n", __func__);
return 0;
}
@@ -966,9 +966,8 @@ wl_iw_iscan_get_aplist(struct net_device *dev,
while (buf) {
list = &((wl_iscan_results_t *) buf->iscan_buf)->results;
if (list->version != WL_BSS_INFO_VERSION) {
- WL_ERROR(("%s : list->version %d != "
- "WL_BSS_INFO_VERSION\n",
- __func__, list->version));
+ WL_ERROR("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+ __func__, list->version);
return -EINVAL;
}
@@ -985,7 +984,7 @@ wl_iw_iscan_get_aplist(struct net_device *dev,
continue;
memcpy(addr[dwrq->length].sa_data, &bi->BSSID,
- ETHER_ADDR_LEN);
+ ETH_ALEN);
addr[dwrq->length].sa_family = ARPHRD_ETHER;
qual[dwrq->length].qual =
rssi_to_qual(dtoh16(bi->RSSI));
@@ -1016,7 +1015,7 @@ static int wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid)
{
int err = 0;
- memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ memcpy(&params->bssid, &ether_bcast, ETH_ALEN);
params->bss_type = DOT11_BSSTYPE_ANY;
params->scan_type = 0;
params->nprobes = -1;
@@ -1043,15 +1042,15 @@ static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, u16 action)
iscan->iscan_ex_params_p->action = htod16(action);
iscan->iscan_ex_params_p->scan_duration = htod16(0);
- WL_SCAN(("%s : nprobes=%d\n", __func__,
- iscan->iscan_ex_params_p->params.nprobes));
- WL_SCAN(("active_time=%d\n",
- iscan->iscan_ex_params_p->params.active_time));
- WL_SCAN(("passive_time=%d\n",
- iscan->iscan_ex_params_p->params.passive_time));
- WL_SCAN(("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time));
- WL_SCAN(("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type));
- WL_SCAN(("bss_type=%d\n", iscan->iscan_ex_params_p->params.bss_type));
+ WL_SCAN("%s : nprobes=%d\n",
+ __func__, iscan->iscan_ex_params_p->params.nprobes);
+ WL_SCAN("active_time=%d\n",
+ iscan->iscan_ex_params_p->params.active_time);
+ WL_SCAN("passive_time=%d\n",
+ iscan->iscan_ex_params_p->params.passive_time);
+ WL_SCAN("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time);
+ WL_SCAN("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type);
+ WL_SCAN("bss_type=%d\n", iscan->iscan_ex_params_p->params.bss_type);
(void)dev_iw_iovar_setbuf(iscan->dev, "iscan", iscan->iscan_ex_params_p,
iscan->iscan_ex_param_size, iscan->ioctlbuf,
@@ -1066,7 +1065,7 @@ static void wl_iw_timerfunc(unsigned long data)
if (iscan) {
iscan->timer_on = 0;
if (iscan->iscan_state != ISCAN_STATE_IDLE) {
- WL_TRACE(("timer trigger\n"));
+ WL_TRACE("timer trigger\n");
up(&iscan->sysioc_sem);
}
}
@@ -1101,8 +1100,8 @@ static u32 wl_iw_iscan_get(iscan_info_t *iscan)
} else {
buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL);
if (!buf) {
- WL_ERROR(("%s can't alloc iscan_buf_t : going to abort "
- "currect iscan\n", __func__));
+ WL_ERROR("%s can't alloc iscan_buf_t : going to abort current iscan\n",
+ __func__);
MUTEX_UNLOCK_WL_SCAN_SET();
return WL_SCAN_RESULTS_NO_MEM;
}
@@ -1135,11 +1134,11 @@ static u32 wl_iw_iscan_get(iscan_info_t *iscan)
results->buflen = dtoh32(results->buflen);
results->version = dtoh32(results->version);
results->count = dtoh32(results->count);
- WL_TRACE(("results->count = %d\n", results->count));
- WL_TRACE(("results->buflen = %d\n", results->buflen));
+ WL_TRACE("results->count = %d\n", results->count);
+ WL_TRACE("results->buflen = %d\n", results->buflen);
status = dtoh32(list_buf->status);
} else {
- WL_ERROR(("%s returns error %d\n", __func__, res));
+ WL_ERROR("%s returns error %d\n", __func__, res);
status = WL_SCAN_RESULTS_NO_MEM;
}
MUTEX_UNLOCK_WL_SCAN_SET();
@@ -1148,8 +1147,8 @@ static u32 wl_iw_iscan_get(iscan_info_t *iscan)
static void wl_iw_force_specific_scan(iscan_info_t *iscan)
{
- WL_TRACE(("%s force Specific SCAN for %s\n", __func__,
- g_specific_ssid.SSID));
+ WL_TRACE("%s force Specific SCAN for %s\n",
+ __func__, g_specific_ssid.SSID);
rtnl_lock();
(void)dev_wlc_ioctl(iscan->dev, WLC_SCAN, &g_specific_ssid,
@@ -1166,7 +1165,7 @@ static void wl_iw_send_scan_complete(iscan_info_t *iscan)
memset(&wrqu, 0, sizeof(wrqu));
wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL);
- WL_TRACE(("Send Event ISCAN complete\n"));
+ WL_TRACE("Send Event ISCAN complete\n");
#endif
}
@@ -1190,8 +1189,8 @@ static int _iscan_sysioc_thread(void *data)
status = wl_iw_iscan_get(iscan);
rtnl_unlock();
if (g_scan_specified_ssid && (iscan_pass_abort == true)) {
- WL_TRACE(("%s Get results from specific scan "
- "status = %d\n", __func__, status));
+ WL_TRACE("%s Get results from specific scan status = %d\n",
+ __func__, status);
wl_iw_send_scan_complete(iscan);
iscan_pass_abort = false;
status = -1;
@@ -1199,7 +1198,7 @@ static int _iscan_sysioc_thread(void *data)
switch (status) {
case WL_SCAN_RESULTS_PARTIAL:
- WL_TRACE(("iscanresults incomplete\n"));
+ WL_TRACE("iscanresults incomplete\n");
rtnl_lock();
wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
rtnl_unlock();
@@ -1208,18 +1207,18 @@ static int _iscan_sysioc_thread(void *data)
iscan->timer_on = 1;
break;
case WL_SCAN_RESULTS_SUCCESS:
- WL_TRACE(("iscanresults complete\n"));
+ WL_TRACE("iscanresults complete\n");
iscan->iscan_state = ISCAN_STATE_IDLE;
wl_iw_send_scan_complete(iscan);
break;
case WL_SCAN_RESULTS_PENDING:
- WL_TRACE(("iscanresults pending\n"));
+ WL_TRACE("iscanresults pending\n");
mod_timer(&iscan->timer,
jiffies + iscan->timer_ms * HZ / 1000);
iscan->timer_on = 1;
break;
case WL_SCAN_RESULTS_ABORTED:
- WL_TRACE(("iscanresults aborted\n"));
+ WL_TRACE("iscanresults aborted\n");
iscan->iscan_state = ISCAN_STATE_IDLE;
if (g_scan_specified_ssid == 0)
wl_iw_send_scan_complete(iscan);
@@ -1229,12 +1228,12 @@ static int _iscan_sysioc_thread(void *data)
}
break;
case WL_SCAN_RESULTS_NO_MEM:
- WL_TRACE(("iscanresults can't alloc memory: skip\n"));
+ WL_TRACE("iscanresults can't alloc memory: skip\n");
iscan->iscan_state = ISCAN_STATE_IDLE;
break;
default:
- WL_TRACE(("iscanresults returned unknown status %d\n",
- status));
+ WL_TRACE("iscanresults returned unknown status %d\n",
+ status);
break;
}
}
@@ -1253,11 +1252,11 @@ wl_iw_set_scan(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
int error;
- WL_TRACE(("\n:%s dev:%s: SIOCSIWSCAN : SCAN\n", __func__, dev->name));
+ WL_TRACE("\n:%s dev:%s: SIOCSIWSCAN : SCAN\n", __func__, dev->name);
g_set_essid_before_scan = false;
#if defined(CSCAN)
- WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __func__));
+ WL_ERROR("%s: Scan from SIOCGIWSCAN not supported\n", __func__);
return -EINVAL;
#endif
@@ -1274,9 +1273,8 @@ wl_iw_set_scan(struct net_device *dev,
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
struct iw_scan_req *req = (struct iw_scan_req *)extra;
if (g_scan_specified_ssid) {
- WL_TRACE(("%s Specific SCAN is not done ignore "
- "scan for = %s\n",
- __func__, req->essid));
+ WL_TRACE("%s Specific SCAN is not done ignore scan for = %s\n",
+ __func__, req->essid);
return -EBUSY;
} else {
g_specific_ssid.SSID_len = min_t(size_t,
@@ -1287,9 +1285,9 @@ wl_iw_set_scan(struct net_device *dev,
g_specific_ssid.SSID_len =
htod32(g_specific_ssid.SSID_len);
g_scan_specified_ssid = 1;
- WL_TRACE(("### Specific scan ssid=%s len=%d\n",
- g_specific_ssid.SSID,
- g_specific_ssid.SSID_len));
+ WL_TRACE("### Specific scan ssid=%s len=%d\n",
+ g_specific_ssid.SSID,
+ g_specific_ssid.SSID_len);
}
}
}
@@ -1297,8 +1295,8 @@ wl_iw_set_scan(struct net_device *dev,
error = dev_wlc_ioctl(dev, WLC_SCAN, &g_specific_ssid,
sizeof(g_specific_ssid));
if (error) {
- WL_TRACE(("#### Set SCAN for %s failed with %d\n",
- g_specific_ssid.SSID, error));
+ WL_TRACE("#### Set SCAN for %s failed with %d\n",
+ g_specific_ssid.SSID, error);
g_scan_specified_ssid = 0;
return -EBUSY;
}
@@ -1317,7 +1315,7 @@ int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag)
wl_iw_set_event_mask(dev);
- WL_TRACE(("+++: Set Broadcast ISCAN\n"));
+ WL_TRACE("+++: Set Broadcast ISCAN\n");
memset(&ssid, 0, sizeof(ssid));
iscan->list_cur = iscan->list_hdr;
@@ -1346,20 +1344,20 @@ wl_iw_iscan_set_scan(struct net_device *dev,
wlc_ssid_t ssid;
iscan_info_t *iscan = g_iscan;
- WL_TRACE(("%s: SIOCSIWSCAN : ISCAN\n", dev->name));
+ WL_TRACE("%s: SIOCSIWSCAN : ISCAN\n", dev->name);
#if defined(CSCAN)
- WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __func__));
+ WL_ERROR("%s: Scan from SIOCGIWSCAN not supported\n", __func__);
return -EINVAL;
#endif
if (g_onoff == G_WLAN_SET_OFF) {
- WL_TRACE(("%s: driver is not up yet after START\n", __func__));
+ WL_TRACE("%s: driver is not up yet after START\n", __func__);
return 0;
}
#ifdef PNO_SUPPORT
if (dhd_dev_get_pno_status(dev)) {
- WL_ERROR(("%s: Scan called when PNO is active\n", __func__));
+ WL_ERROR("%s: Scan called when PNO is active\n", __func__);
}
#endif
@@ -1367,8 +1365,8 @@ wl_iw_iscan_set_scan(struct net_device *dev,
return wl_iw_set_scan(dev, info, wrqu, extra);
if (g_scan_specified_ssid) {
- WL_TRACE(("%s Specific SCAN already running ignoring BC scan\n",
- __func__));
+ WL_TRACE("%s Specific SCAN already running ignoring BC scan\n",
+ __func__);
return EBUSY;
}
@@ -1386,8 +1384,8 @@ wl_iw_iscan_set_scan(struct net_device *dev,
g_scan_specified_ssid = 0;
if (iscan->iscan_state == ISCAN_STATE_SCANING) {
- WL_TRACE(("%s ISCAN already in progress \n",
- __func__));
+ WL_TRACE("%s ISCAN already in progress\n",
+ __func__);
return 0;
}
}
@@ -1406,7 +1404,7 @@ static bool ie_is_wpa_ie(u8 **wpaie, u8 **tlvs, int *tlvs_len)
u8 *ie = *wpaie;
if ((ie[1] >= 6) &&
- !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) {
+ !memcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) {
return true;
}
@@ -1422,7 +1420,7 @@ static bool ie_is_wps_ie(u8 **wpsie, u8 **tlvs, int *tlvs_len)
u8 *ie = *wpsie;
if ((ie[1] >= 4) &&
- !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) {
+ !memcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) {
return true;
}
@@ -1501,9 +1499,8 @@ wl_iw_get_scan_prep(wl_scan_results_t *list,
for (i = 0; i < list->count && i < IW_MAX_AP; i++) {
if (list->version != WL_BSS_INFO_VERSION) {
- WL_ERROR(("%s : list->version %d != "
- "WL_BSS_INFO_VERSION\n",
- __func__, list->version));
+ WL_ERROR("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+ __func__, list->version);
return ret;
}
@@ -1511,11 +1508,11 @@ wl_iw_get_scan_prep(wl_scan_results_t *list,
dtoh32(bi->length)) : list->
bss_info;
- WL_TRACE(("%s : %s\n", __func__, bi->SSID));
+ WL_TRACE("%s : %s\n", __func__, bi->SSID);
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETH_ALEN);
event =
IWE_STREAM_ADD_EVENT(info, event, end, &iwe,
IW_EV_ADDR_LEN);
@@ -1590,11 +1587,11 @@ wl_iw_get_scan_prep(wl_scan_results_t *list,
ret = event - extra;
if (ret < 0) {
- WL_ERROR(("==> Wrong size\n"));
+ WL_ERROR("==> Wrong size\n");
ret = 0;
}
- WL_TRACE(("%s: size=%d bytes prepared\n", __func__,
- (unsigned int)(event - extra)));
+ WL_TRACE("%s: size=%d bytes prepared\n",
+ __func__, (unsigned int)(event - extra));
return (uint)ret;
}
@@ -1614,10 +1611,10 @@ wl_iw_get_scan(struct net_device *dev,
iscan_buf_t *p_buf;
#endif
- WL_TRACE(("%s: buflen_from_user %d: \n", dev->name, buflen_from_user));
+ WL_TRACE("%s: buflen_from_user %d:\n", dev->name, buflen_from_user);
if (!extra) {
- WL_TRACE(("%s: wl_iw_get_scan return -EINVAL\n", dev->name));
+ WL_TRACE("%s: wl_iw_get_scan return -EINVAL\n", dev->name);
return -EINVAL;
}
@@ -1631,8 +1628,8 @@ wl_iw_get_scan(struct net_device *dev,
if (g_scan_specified_ssid) {
list = kmalloc(len, GFP_KERNEL);
if (!list) {
- WL_TRACE(("%s: wl_iw_get_scan return -ENOMEM\n",
- dev->name));
+ WL_TRACE("%s: wl_iw_get_scan return -ENOMEM\n",
+ dev->name);
g_scan_specified_ssid = 0;
return -ENOMEM;
}
@@ -1642,8 +1639,8 @@ wl_iw_get_scan(struct net_device *dev,
list->buflen = htod32(len);
error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, len);
if (error) {
- WL_ERROR(("%s: %s : Scan_results ERROR %d\n", dev->name,
- __func__, error));
+ WL_ERROR("%s: %s : Scan_results ERROR %d\n",
+ dev->name, __func__, error);
dwrq->length = len;
if (g_scan_specified_ssid) {
g_scan_specified_ssid = 0;
@@ -1656,8 +1653,8 @@ wl_iw_get_scan(struct net_device *dev,
list->count = dtoh32(list->count);
if (list->version != WL_BSS_INFO_VERSION) {
- WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
- __func__, list->version));
+ WL_ERROR("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+ __func__, list->version);
if (g_scan_specified_ssid) {
g_scan_specified_ssid = 0;
kfree(list);
@@ -1666,8 +1663,8 @@ wl_iw_get_scan(struct net_device *dev,
}
if (g_scan_specified_ssid) {
- WL_TRACE(("%s: Specified scan APs in the list =%d\n",
- __func__, list->count));
+ WL_TRACE("%s: Specified scan APs in the list =%d\n",
+ __func__, list->count);
len_ret =
(__u16) wl_iw_get_scan_prep(list, info, extra,
buflen_from_user);
@@ -1678,8 +1675,8 @@ wl_iw_get_scan(struct net_device *dev,
while (p_buf != iscan->list_cur) {
list_merge =
&((wl_iscan_results_t *) p_buf->iscan_buf)->results;
- WL_TRACE(("%s: Bcast APs list=%d\n", __func__,
- list_merge->count));
+ WL_TRACE("%s: Bcast APs list=%d\n",
+ __func__, list_merge->count);
if (list_merge->count > 0)
len_ret +=
(__u16) wl_iw_get_scan_prep(list_merge,
@@ -1689,8 +1686,8 @@ wl_iw_get_scan(struct net_device *dev,
}
#else
list_merge = (wl_scan_results_t *) g_scan;
- WL_TRACE(("%s: Bcast APs list=%d\n", __func__,
- list_merge->count));
+ WL_TRACE("%s: Bcast APs list=%d\n",
+ __func__, list_merge->count);
if (list_merge->count > 0)
len_ret +=
(__u16) wl_iw_get_scan_prep(list_merge, info,
@@ -1714,8 +1711,8 @@ wl_iw_get_scan(struct net_device *dev,
dwrq->length = len;
dwrq->flags = 0;
- WL_TRACE(("%s return to WE %d bytes APs=%d\n", __func__,
- dwrq->length, list->count));
+ WL_TRACE("%s return to WE %d bytes APs=%d\n",
+ __func__, dwrq->length, list->count);
return 0;
}
@@ -1736,26 +1733,26 @@ wl_iw_iscan_get_scan(struct net_device *dev,
u32 counter = 0;
u8 channel;
- WL_TRACE(("%s %s buflen_from_user %d:\n", dev->name, __func__,
- dwrq->length));
+ WL_TRACE("%s %s buflen_from_user %d:\n",
+ dev->name, __func__, dwrq->length);
if (!extra) {
- WL_TRACE(("%s: INVALID SIOCGIWSCAN GET bad parameter\n",
- dev->name));
+ WL_TRACE("%s: INVALID SIOCGIWSCAN GET bad parameter\n",
+ dev->name);
return -EINVAL;
}
if ((!iscan) || (!iscan->sysioc_tsk)) {
- WL_ERROR(("%ssysioc_tsk\n", __func__));
+ WL_ERROR("%ssysioc_tsk\n", __func__);
return wl_iw_get_scan(dev, info, dwrq, extra);
}
if (iscan->iscan_state == ISCAN_STATE_SCANING) {
- WL_TRACE(("%s: SIOCGIWSCAN GET still scanning\n", dev->name));
+ WL_TRACE("%s: SIOCGIWSCAN GET still scanning\n", dev->name);
return -EAGAIN;
}
- WL_TRACE(("%s: SIOCGIWSCAN GET broadcast results\n", dev->name));
+ WL_TRACE("%s: SIOCGIWSCAN GET broadcast results\n", dev->name);
apcnt = 0;
p_buf = iscan->list_hdr;
while (p_buf != iscan->list_cur) {
@@ -1764,9 +1761,8 @@ wl_iw_iscan_get_scan(struct net_device *dev,
counter += list->count;
if (list->version != WL_BSS_INFO_VERSION) {
- WL_ERROR(("%s : list->version %d != "
- "WL_BSS_INFO_VERSION\n",
- __func__, list->version));
+ WL_ERROR("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+ __func__, list->version);
return -EINVAL;
}
@@ -1779,14 +1775,14 @@ wl_iw_iscan_get_scan(struct net_device *dev,
ASSERT(((unsigned long)bi + dtoh32(bi->length)) <=
((unsigned long)list + WLC_IW_ISCAN_MAXLEN));
- if (event + ETHER_ADDR_LEN + bi->SSID_len +
+ if (event + ETH_ALEN + bi->SSID_len +
IW_EV_UINT_LEN + IW_EV_FREQ_LEN + IW_EV_QUAL_LEN >=
end)
return -E2BIG;
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID,
- ETHER_ADDR_LEN);
+ ETH_ALEN);
event =
IWE_STREAM_ADD_EVENT(info, event, end, &iwe,
IW_EV_ADDR_LEN);
@@ -1876,8 +1872,8 @@ wl_iw_iscan_get_scan(struct net_device *dev,
dwrq->length = event - extra;
dwrq->flags = 0;
- WL_TRACE(("%s return to WE %d bytes APs=%d\n", __func__,
- dwrq->length, counter));
+ WL_TRACE("%s return to WE %d bytes APs=%d\n",
+ __func__, dwrq->length, counter);
if (!dwrq->length)
return -EAGAIN;
@@ -1895,7 +1891,7 @@ wl_iw_set_essid(struct net_device *dev,
wl_join_params_t join_params;
int join_params_size;
- WL_TRACE(("%s: SIOCSIWESSID\n", dev->name));
+ WL_TRACE("%s: SIOCSIWESSID\n", dev->name);
if (g_set_essid_before_scan)
return -EAGAIN;
@@ -1923,7 +1919,7 @@ wl_iw_set_essid(struct net_device *dev,
memcpy(&join_params.ssid.SSID, g_ssid.SSID, g_ssid.SSID_len);
join_params.ssid.SSID_len = htod32(g_ssid.SSID_len);
- memcpy(&join_params.params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+ memcpy(&join_params.params.bssid, &ether_bcast, ETH_ALEN);
wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, &join_params,
&join_params_size);
@@ -1931,11 +1927,11 @@ wl_iw_set_essid(struct net_device *dev,
error = dev_wlc_ioctl(dev, WLC_SET_SSID, &join_params,
join_params_size);
if (error)
- WL_ERROR(("Invalid ioctl data=%d\n", error));
+ WL_ERROR("Invalid ioctl data=%d\n", error);
if (g_ssid.SSID_len) {
- WL_TRACE(("%s: join SSID=%s ch=%d\n", __func__,
- g_ssid.SSID, g_wl_iw_params.target_channel));
+ WL_TRACE("%s: join SSID=%s ch=%d\n",
+ __func__, g_ssid.SSID, g_wl_iw_params.target_channel);
}
return 0;
}
@@ -1948,14 +1944,14 @@ wl_iw_get_essid(struct net_device *dev,
wlc_ssid_t ssid;
int error;
- WL_TRACE(("%s: SIOCGIWESSID\n", dev->name));
+ WL_TRACE("%s: SIOCGIWESSID\n", dev->name);
if (!extra)
return -EINVAL;
error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid));
if (error) {
- WL_ERROR(("Error getting the SSID\n"));
+ WL_ERROR("Error getting the SSID\n");
return error;
}
@@ -1976,7 +1972,7 @@ wl_iw_set_nick(struct net_device *dev,
{
wl_iw_t *iw = *(wl_iw_t **) netdev_priv(dev);
- WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name));
+ WL_TRACE("%s: SIOCSIWNICKN\n", dev->name);
if (!extra)
return -EINVAL;
@@ -1996,7 +1992,7 @@ wl_iw_get_nick(struct net_device *dev,
{
wl_iw_t *iw = *(wl_iw_t **) netdev_priv(dev);
- WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name));
+ WL_TRACE("%s: SIOCGIWNICKN\n", dev->name);
if (!extra)
return -EINVAL;
@@ -2014,7 +2010,7 @@ wl_iw_set_rate(struct net_device *dev,
wl_rateset_t rateset;
int error, rate, i, error_bg, error_a;
- WL_TRACE(("%s: SIOCSIWRATE\n", dev->name));
+ WL_TRACE("%s: SIOCSIWRATE\n", dev->name);
error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset,
sizeof(rateset));
@@ -2063,7 +2059,7 @@ wl_iw_get_rate(struct net_device *dev,
{
int error, rate;
- WL_TRACE(("%s: SIOCGIWRATE\n", dev->name));
+ WL_TRACE("%s: SIOCGIWRATE\n", dev->name);
error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate));
if (error)
@@ -2080,7 +2076,7 @@ wl_iw_set_rts(struct net_device *dev,
{
int error, rts;
- WL_TRACE(("%s: SIOCSIWRTS\n", dev->name));
+ WL_TRACE("%s: SIOCSIWRTS\n", dev->name);
if (vwrq->disabled)
rts = DOT11_DEFAULT_RTS_LEN;
@@ -2102,7 +2098,7 @@ wl_iw_get_rts(struct net_device *dev,
{
int error, rts;
- WL_TRACE(("%s: SIOCGIWRTS\n", dev->name));
+ WL_TRACE("%s: SIOCGIWRTS\n", dev->name);
error = dev_wlc_intvar_get(dev, "rtsthresh", &rts);
if (error)
@@ -2121,7 +2117,7 @@ wl_iw_set_frag(struct net_device *dev,
{
int error, frag;
- WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name));
+ WL_TRACE("%s: SIOCSIWFRAG\n", dev->name);
if (vwrq->disabled)
frag = DOT11_DEFAULT_FRAG_LEN;
@@ -2143,7 +2139,7 @@ wl_iw_get_frag(struct net_device *dev,
{
int error, fragthreshold;
- WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name));
+ WL_TRACE("%s: SIOCGIWFRAG\n", dev->name);
error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold);
if (error)
@@ -2163,7 +2159,7 @@ wl_iw_set_txpow(struct net_device *dev,
{
int error, disable;
u16 txpwrmw;
- WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name));
+ WL_TRACE("%s: SIOCSIWTXPOW\n", dev->name);
disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0;
disable += WL_RADIO_SW_DISABLE << 16;
@@ -2200,7 +2196,7 @@ wl_iw_get_txpow(struct net_device *dev,
int error, disable, txpwrdbm;
u8 result;
- WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name));
+ WL_TRACE("%s: SIOCGIWTXPOW\n", dev->name);
error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable));
if (error)
@@ -2229,7 +2225,7 @@ wl_iw_set_retry(struct net_device *dev,
{
int error, lrl, srl;
- WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name));
+ WL_TRACE("%s: SIOCSIWRETRY\n", dev->name);
if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME))
return -EINVAL;
@@ -2277,7 +2273,7 @@ wl_iw_get_retry(struct net_device *dev,
{
int error, lrl, srl;
- WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name));
+ WL_TRACE("%s: SIOCGIWRETRY\n", dev->name);
vwrq->disabled = 0;
@@ -2317,7 +2313,7 @@ wl_iw_set_encode(struct net_device *dev,
wl_wsec_key_t key;
int error, val, wsec;
- WL_TRACE(("%s: SIOCSIWENCODE\n", dev->name));
+ WL_TRACE("%s: SIOCSIWENCODE\n", dev->name);
memset(&key, 0, sizeof(key));
@@ -2409,9 +2405,9 @@ wl_iw_get_encode(struct net_device *dev,
wl_wsec_key_t key;
int error, val, wsec, auth;
- WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name));
+ WL_TRACE("%s: SIOCGIWENCODE\n", dev->name);
- bzero(&key, sizeof(wl_wsec_key_t));
+ memset(&key, 0, sizeof(wl_wsec_key_t));
if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS;
@@ -2465,7 +2461,7 @@ wl_iw_set_power(struct net_device *dev,
{
int error, pm;
- WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name));
+ WL_TRACE("%s: SIOCSIWPOWER\n", dev->name);
pm = vwrq->disabled ? PM_OFF : PM_MAX;
@@ -2484,7 +2480,7 @@ wl_iw_get_power(struct net_device *dev,
{
int error, pm;
- WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name));
+ WL_TRACE("%s: SIOCGIWPOWER\n", dev->name);
error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm));
if (error)
@@ -2503,7 +2499,7 @@ wl_iw_set_wpaie(struct net_device *dev,
struct iw_request_info *info, struct iw_point *iwp, char *extra)
{
- WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name));
+ WL_TRACE("%s: SIOCSIWGENIE\n", dev->name);
CHECK_EXTRA_FOR_NULL(extra);
@@ -2516,7 +2512,7 @@ static int
wl_iw_get_wpaie(struct net_device *dev,
struct iw_request_info *info, struct iw_point *iwp, char *extra)
{
- WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name));
+ WL_TRACE("%s: SIOCGIWGENIE\n", dev->name);
iwp->length = 64;
dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length);
return 0;
@@ -2531,7 +2527,7 @@ wl_iw_set_encodeext(struct net_device *dev,
int error;
struct iw_encode_ext *iwe;
- WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name));
+ WL_TRACE("%s: SIOCSIWENCODEEXT\n", dev->name);
CHECK_EXTRA_FOR_NULL(extra);
@@ -2548,14 +2544,14 @@ wl_iw_set_encodeext(struct net_device *dev,
key.len = iwe->key_len;
- if (!ETHER_ISMULTI(iwe->addr.sa_data))
+ if (!is_multicast_ether_addr(iwe->addr.sa_data))
bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea,
- ETHER_ADDR_LEN);
+ ETH_ALEN);
if (key.len == 0) {
if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- WL_WSEC(("Changing the the primary Key to %d\n",
- key.index));
+ WL_WSEC("Changing the the primary Key to %d\n",
+ key.index);
key.index = htod32(key.index);
error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY,
&key.index, sizeof(key.index));
@@ -2569,9 +2565,9 @@ wl_iw_set_encodeext(struct net_device *dev,
if (iwe->key_len > sizeof(key.data))
return -EINVAL;
- WL_WSEC(("Setting the key index %d\n", key.index));
+ WL_WSEC("Setting the key index %d\n", key.index);
if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- WL_WSEC(("key is a Primary Key\n"));
+ WL_WSEC("key is a Primary Key\n");
key.flags = WL_PRIMARY_KEY;
}
@@ -2638,15 +2634,15 @@ wl_iw_set_pmksa(struct net_device *dev,
uint i;
int ret = 0;
- WL_WSEC(("%s: SIOCSIWPMKSA\n", dev->name));
+ WL_WSEC("%s: SIOCSIWPMKSA\n", dev->name);
CHECK_EXTRA_FOR_NULL(extra);
iwpmksa = (struct iw_pmksa *)extra;
if (iwpmksa->cmd == IW_PMKSA_FLUSH) {
- WL_WSEC(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n"));
- bzero((char *)&pmkid_list, sizeof(pmkid_list));
+ WL_WSEC("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n");
+ memset((char *)&pmkid_list, 0, sizeof(pmkid_list));
}
else if (iwpmksa->cmd == IW_PMKSA_REMOVE) {
@@ -2656,30 +2652,30 @@ wl_iw_set_pmksa(struct net_device *dev,
pmkidptr = &pmkid;
bcopy(&iwpmksa->bssid.sa_data[0],
- &pmkidptr->pmkid[0].BSSID, ETHER_ADDR_LEN);
+ &pmkidptr->pmkid[0].BSSID, ETH_ALEN);
bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID,
WPA2_PMKID_LEN);
- WL_WSEC(("wl_iw_set_pmksa:IW_PMKSA_REMOVE:PMKID: "
- "%pM = ", &pmkidptr->pmkid[0].BSSID));
+ WL_WSEC("wl_iw_set_pmksa:IW_PMKSA_REMOVE:PMKID: %pM = ",
+ &pmkidptr->pmkid[0].BSSID);
for (j = 0; j < WPA2_PMKID_LEN; j++)
- WL_WSEC(("%02x ", pmkidptr->pmkid[0].PMKID[j]));
- WL_WSEC(("\n"));
+ WL_WSEC("%02x ", pmkidptr->pmkid[0].PMKID[j]);
+ WL_WSEC("\n");
}
for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
- if (!bcmp
+ if (!memcmp
(&iwpmksa->bssid.sa_data[0],
- &pmkid_list.pmkids.pmkid[i].BSSID, ETHER_ADDR_LEN))
+ &pmkid_list.pmkids.pmkid[i].BSSID, ETH_ALEN))
break;
if ((pmkid_list.pmkids.npmkid > 0)
&& (i < pmkid_list.pmkids.npmkid)) {
- bzero(&pmkid_list.pmkids.pmkid[i], sizeof(pmkid_t));
+ memset(&pmkid_list.pmkids.pmkid[i], 0, sizeof(pmkid_t));
for (; i < (pmkid_list.pmkids.npmkid - 1); i++) {
bcopy(&pmkid_list.pmkids.pmkid[i + 1].BSSID,
&pmkid_list.pmkids.pmkid[i].BSSID,
- ETHER_ADDR_LEN);
+ ETH_ALEN);
bcopy(&pmkid_list.pmkids.pmkid[i + 1].PMKID,
&pmkid_list.pmkids.pmkid[i].PMKID,
WPA2_PMKID_LEN);
@@ -2691,14 +2687,14 @@ wl_iw_set_pmksa(struct net_device *dev,
else if (iwpmksa->cmd == IW_PMKSA_ADD) {
for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
- if (!bcmp
+ if (!memcmp
(&iwpmksa->bssid.sa_data[0],
- &pmkid_list.pmkids.pmkid[i].BSSID, ETHER_ADDR_LEN))
+ &pmkid_list.pmkids.pmkid[i].BSSID, ETH_ALEN))
break;
if (i < MAXPMKID) {
bcopy(&iwpmksa->bssid.sa_data[0],
&pmkid_list.pmkids.pmkid[i].BSSID,
- ETHER_ADDR_LEN);
+ ETH_ALEN);
bcopy(&iwpmksa->pmkid[0],
&pmkid_list.pmkids.pmkid[i].PMKID,
WPA2_PMKID_LEN);
@@ -2710,25 +2706,25 @@ wl_iw_set_pmksa(struct net_device *dev,
uint j;
uint k;
k = pmkid_list.pmkids.npmkid;
- WL_WSEC(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %pM = ",
- &pmkid_list.pmkids.pmkid[k].BSSID));
+ WL_WSEC("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %pM = ",
+ &pmkid_list.pmkids.pmkid[k].BSSID);
for (j = 0; j < WPA2_PMKID_LEN; j++)
- WL_WSEC(("%02x ",
- pmkid_list.pmkids.pmkid[k].PMKID[j]));
- WL_WSEC(("\n"));
+ WL_WSEC("%02x ",
+ pmkid_list.pmkids.pmkid[k].PMKID[j]);
+ WL_WSEC("\n");
}
}
- WL_WSEC(("PRINTING pmkid LIST - No of elements %d\n",
- pmkid_list.pmkids.npmkid));
+ WL_WSEC("PRINTING pmkid LIST - No of elements %d\n",
+ pmkid_list.pmkids.npmkid);
for (i = 0; i < pmkid_list.pmkids.npmkid; i++) {
uint j;
- WL_WSEC(("PMKID[%d]: %pM = ", i,
- &pmkid_list.pmkids.pmkid[i].BSSID));
+ WL_WSEC("PMKID[%d]: %pM = ",
+ i, &pmkid_list.pmkids.pmkid[i].BSSID);
for (j = 0; j < WPA2_PMKID_LEN; j++)
- WL_WSEC(("%02x ", pmkid_list.pmkids.pmkid[i].PMKID[j]));
- WL_WSEC(("\n"));
+ WL_WSEC("%02x ", pmkid_list.pmkids.pmkid[i].PMKID[j]);
+ WL_WSEC("\n");
}
- WL_WSEC(("\n"));
+ WL_WSEC("\n");
if (!ret)
ret = dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list,
@@ -2742,7 +2738,7 @@ wl_iw_get_encodeext(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
- WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name));
+ WL_TRACE("%s: SIOCGIWENCODEEXT\n", dev->name);
return 0;
}
@@ -2757,13 +2753,13 @@ wl_iw_set_wpaauth(struct net_device *dev,
int val = 0;
wl_iw_t *iw = *(wl_iw_t **) netdev_priv(dev);
- WL_TRACE(("%s: SIOCSIWAUTH\n", dev->name));
+ WL_TRACE("%s: SIOCSIWAUTH\n", dev->name);
paramid = vwrq->flags & IW_AUTH_INDEX;
paramval = vwrq->value;
- WL_TRACE(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n",
- dev->name, paramid, paramval));
+ WL_TRACE("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n",
+ dev->name, paramid, paramval);
switch (paramid) {
case IW_AUTH_WPA_VERSION:
@@ -2773,8 +2769,8 @@ wl_iw_set_wpaauth(struct net_device *dev,
val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
else if (paramval & IW_AUTH_WPA_VERSION_WPA2)
val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
- WL_INFORM(("%s: %d: setting wpa_auth to 0x%0x\n", __func__,
- __LINE__, val));
+ WL_INFORM("%s: %d: setting wpa_auth to 0x%0x\n",
+ __func__, __LINE__, val);
error = dev_wlc_intvar_set(dev, "wpa_auth", val);
if (error)
return error;
@@ -2797,20 +2793,19 @@ wl_iw_set_wpaauth(struct net_device *dev,
}
if (iw->privacy_invoked && !val) {
- WL_WSEC(("%s: %s: 'Privacy invoked' true but clearing "
- "wsec, assuming " "we're a WPS enrollee\n",
- dev->name, __func__));
+ WL_WSEC("%s: %s: 'Privacy invoked' true but clearing wsec, assuming we're a WPS enrollee\n",
+ dev->name, __func__);
error = dev_wlc_intvar_set(dev, "is_WPS_enrollee",
true);
if (error) {
- WL_WSEC(("Failed to set is_WPS_enrollee\n"));
+ WL_WSEC("Failed to set is_WPS_enrollee\n");
return error;
}
} else if (val) {
error = dev_wlc_intvar_set(dev, "is_WPS_enrollee",
false);
if (error) {
- WL_WSEC(("Failed to clear is_WPS_enrollee\n"));
+ WL_WSEC("Failed to clear is_WPS_enrollee\n");
return error;
}
}
@@ -2837,8 +2832,8 @@ wl_iw_set_wpaauth(struct net_device *dev,
else
val = WPA2_AUTH_UNSPECIFIED;
}
- WL_INFORM(("%s: %d: setting wpa_auth to %d\n", __func__,
- __LINE__, val));
+ WL_INFORM("%s: %d: setting wpa_auth to %d\n",
+ __func__, __LINE__, val);
error = dev_wlc_intvar_set(dev, "wpa_auth", val);
if (error)
return error;
@@ -2850,7 +2845,7 @@ wl_iw_set_wpaauth(struct net_device *dev,
break;
case IW_AUTH_80211_AUTH_ALG:
- WL_INFORM(("Setting the D11auth %d\n", paramval));
+ WL_INFORM("Setting the D11auth %d\n", paramval);
if (paramval == IW_AUTH_ALG_OPEN_SYSTEM)
val = 0;
else if (paramval == IW_AUTH_ALG_SHARED_KEY)
@@ -2879,8 +2874,8 @@ wl_iw_set_wpaauth(struct net_device *dev,
dev_wlc_intvar_set(dev, "wsec", val);
}
val = 0;
- WL_INFORM(("%s: %d: setting wpa_auth to %d\n",
- __func__, __LINE__, val));
+ WL_INFORM("%s: %d: setting wpa_auth to %d\n",
+ __func__, __LINE__, val);
dev_wlc_intvar_set(dev, "wpa_auth", 0);
return error;
}
@@ -2897,7 +2892,7 @@ wl_iw_set_wpaauth(struct net_device *dev,
#if WIRELESS_EXT > 17
case IW_AUTH_ROAMING_CONTROL:
- WL_INFORM(("%s: IW_AUTH_ROAMING_CONTROL\n", __func__));
+ WL_INFORM("%s: IW_AUTH_ROAMING_CONTROL\n", __func__);
break;
case IW_AUTH_PRIVACY_INVOKED:
{
@@ -2908,8 +2903,7 @@ wl_iw_set_wpaauth(struct net_device *dev,
error = dev_wlc_intvar_set(dev,
"is_WPS_enrollee", false);
if (error) {
- WL_WSEC(("Failed to clear iovar "
- "is_WPS_enrollee\n"));
+ WL_WSEC("Failed to clear iovar is_WPS_enrollee\n");
return error;
}
} else {
@@ -2923,8 +2917,7 @@ wl_iw_set_wpaauth(struct net_device *dev,
"is_WPS_enrollee",
true);
if (error) {
- WL_WSEC(("Failed to set iovar "
- "is_WPS_enrollee\n"));
+ WL_WSEC("Failed to set iovar is_WPS_enrollee\n");
return error;
}
} else {
@@ -2932,8 +2925,7 @@ wl_iw_set_wpaauth(struct net_device *dev,
"is_WPS_enrollee",
false);
if (error) {
- WL_WSEC(("Failed to clear "
- "is_WPS_enrollee\n"));
+ WL_WSEC("Failed to clear is_WPS_enrollee\n");
return error;
}
}
@@ -2960,7 +2952,7 @@ wl_iw_get_wpaauth(struct net_device *dev,
int val;
wl_iw_t *iw = *(wl_iw_t **) netdev_priv(dev);
- WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name));
+ WL_TRACE("%s: SIOCGIWAUTH\n", dev->name);
paramid = vwrq->flags & IW_AUTH_INDEX;
@@ -3040,7 +3032,7 @@ wl_iw_get_wpaauth(struct net_device *dev,
break;
#if WIRELESS_EXT > 17
case IW_AUTH_ROAMING_CONTROL:
- WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __func__));
+ WL_ERROR("%s: IW_AUTH_ROAMING_CONTROL\n", __func__);
break;
case IW_AUTH_PRIVACY_INVOKED:
paramval = iw->privacy_invoked;
@@ -3157,19 +3149,19 @@ int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
char *extra = NULL;
int token_size = 1, max_tokens = 0, ret = 0;
- WL_TRACE(("\n%s, cmd:%x alled via dhd->do_ioctl()entry point\n",
- __func__, cmd));
+ WL_TRACE("\n%s, cmd:%x alled via dhd->do_ioctl()entry point\n",
+ __func__, cmd);
if (cmd < SIOCIWFIRST ||
IW_IOCTL_IDX(cmd) >= ARRAY_SIZE(wl_iw_handler)) {
- WL_ERROR(("%s: error in cmd=%x : out of range\n", __func__,
- cmd));
+ WL_ERROR("%s: error in cmd=%x : out of range\n",
+ __func__, cmd);
return -EOPNOTSUPP;
}
handler = wl_iw_handler[IW_IOCTL_IDX(cmd)];
if (!handler) {
- WL_ERROR(("%s: error in cmd=%x : not supported\n",
- __func__, cmd));
+ WL_ERROR("%s: error in cmd=%x : not supported\n",
+ __func__, cmd);
return -EOPNOTSUPP;
}
@@ -3234,9 +3226,8 @@ int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (max_tokens && wrq->u.data.pointer) {
if (wrq->u.data.length > max_tokens) {
- WL_ERROR(("%s: error in cmd=%x wrq->u.data.length=%d "
- "> max_tokens=%d\n",
- __func__, cmd, wrq->u.data.length, max_tokens));
+ WL_ERROR("%s: error in cmd=%x wrq->u.data.length=%d > max_tokens=%d\n",
+ __func__, cmd, wrq->u.data.length, max_tokens);
return -E2BIG;
}
extra = kmalloc(max_tokens * token_size, GFP_KERNEL);
@@ -3339,7 +3330,7 @@ wl_iw_conn_status_str(u32 event_type, u32 status, u32 reason,
memset(stringBuf, 0, buflen);
snprintf(stringBuf, buflen, "%s %s %02d %02d",
name, cause, status, reason);
- WL_INFORM(("Connection status: %s\n", stringBuf));
+ WL_INFORM("Connection status: %s\n", stringBuf);
return true;
} else {
return false;
@@ -3383,46 +3374,46 @@ void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void *data)
iw = 0;
if (!dev) {
- WL_ERROR(("%s: dev is null\n", __func__));
+ WL_ERROR("%s: dev is null\n", __func__);
return;
}
iw = *(wl_iw_t **) netdev_priv(dev);
- WL_TRACE(("%s: dev=%s event=%d\n", __func__, dev->name, event_type));
+ WL_TRACE("%s: dev=%s event=%d\n", __func__, dev->name, event_type);
switch (event_type) {
case WLC_E_TXFAIL:
cmd = IWEVTXDROP;
- memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ memcpy(wrqu.addr.sa_data, &e->addr, ETH_ALEN);
wrqu.addr.sa_family = ARPHRD_ETHER;
break;
#if WIRELESS_EXT > 14
case WLC_E_JOIN:
case WLC_E_ASSOC_IND:
case WLC_E_REASSOC_IND:
- memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ memcpy(wrqu.addr.sa_data, &e->addr, ETH_ALEN);
wrqu.addr.sa_family = ARPHRD_ETHER;
cmd = IWEVREGISTERED;
break;
case WLC_E_DEAUTH_IND:
case WLC_E_DISASSOC_IND:
cmd = SIOCGIWAP;
- bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+ memset(wrqu.addr.sa_data, 0, ETH_ALEN);
wrqu.addr.sa_family = ARPHRD_ETHER;
- bzero(&extra, ETHER_ADDR_LEN);
+ memset(&extra, 0, ETH_ALEN);
break;
case WLC_E_LINK:
case WLC_E_NDIS_LINK:
cmd = SIOCGIWAP;
if (!(flags & WLC_EVENT_MSG_LINK)) {
- bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
- bzero(&extra, ETHER_ADDR_LEN);
+ memset(wrqu.addr.sa_data, 0, ETH_ALEN);
+ memset(&extra, 0, ETH_ALEN);
WAKE_LOCK_TIMEOUT(iw->pub, WAKE_LOCK_LINK_DOWN_TMOUT,
20 * HZ);
} else {
- memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
- WL_TRACE(("Link UP\n"));
+ memcpy(wrqu.addr.sa_data, &e->addr, ETH_ALEN);
+ WL_TRACE("Link UP\n");
}
wrqu.addr.sa_family = ARPHRD_ETHER;
@@ -3433,8 +3424,8 @@ void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void *data)
wrqu.data.length = datalen + 1;
extra[0] = WLC_E_ACTION_FRAME;
memcpy(&extra[1], data, datalen);
- WL_TRACE(("WLC_E_ACTION_FRAME len %d \n",
- wrqu.data.length));
+ WL_TRACE("WLC_E_ACTION_FRAME len %d\n",
+ wrqu.data.length);
}
break;
@@ -3464,7 +3455,7 @@ void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void *data)
else
micerrevt->flags |= IW_MICFAILURE_PAIRWISE;
memcpy(micerrevt->src_addr.sa_data, &e->addr,
- ETHER_ADDR_LEN);
+ ETH_ALEN);
micerrevt->src_addr.sa_family = ARPHRD_ETHER;
break;
@@ -3487,14 +3478,14 @@ void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void *data)
wrqu.data.length = sizeof(struct iw_pmkid_cand);
pmkidcand = pmkcandlist->pmkid_cand;
while (count) {
- bzero(iwpmkidcand,
+ memset(iwpmkidcand, 0,
sizeof(struct iw_pmkid_cand));
if (pmkidcand->preauth)
iwpmkidcand->flags |=
IW_PMKID_CAND_PREAUTH;
bcopy(&pmkidcand->BSSID,
&iwpmkidcand->bssid.sa_data,
- ETHER_ADDR_LEN);
+ ETH_ALEN);
#ifndef SANDGATE2G
wireless_send_event(dev, cmd, &wrqu,
extra);
@@ -3515,13 +3506,13 @@ void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void *data)
} else {
cmd = SIOCGIWSCAN;
wrqu.data.length = strlen(extra);
- WL_TRACE(("Event WLC_E_SCAN_COMPLETE from specific "
- "scan %d\n", g_iscan->iscan_state));
+ WL_TRACE("Event WLC_E_SCAN_COMPLETE from specific scan %d\n",
+ g_iscan->iscan_state);
}
#else
cmd = SIOCGIWSCAN;
wrqu.data.length = strlen(extra);
- WL_TRACE(("Event WLC_E_SCAN_COMPLETE\n"));
+ WL_TRACE("Event WLC_E_SCAN_COMPLETE\n");
#endif
break;
@@ -3529,9 +3520,9 @@ void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void *data)
{
wlc_ssid_t *ssid;
ssid = (wlc_ssid_t *) data;
- WL_ERROR(("%s Event WLC_E_PFN_NET_FOUND, send %s up : "
- "find %s len=%d\n", __func__, PNO_EVENT_UP,
- ssid->SSID, ssid->SSID_len));
+ WL_ERROR("%s Event WLC_E_PFN_NET_FOUND, send %s up : find %s len=%d\n",
+ __func__, PNO_EVENT_UP,
+ ssid->SSID, ssid->SSID_len);
WAKE_LOCK_TIMEOUT(iw->pub, WAKE_LOCK_PNO_FIND_TMOUT,
20 * HZ);
cmd = IWEVCUSTOM;
@@ -3542,7 +3533,7 @@ void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void *data)
break;
default:
- WL_TRACE(("Unknown Event %d: ignoring\n", event_type));
+ WL_TRACE("Unknown Event %d: ignoring\n", event_type);
break;
}
#ifndef SANDGATE2G
@@ -3583,15 +3574,15 @@ wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats)
goto done;
phy_noise = dtoh32(phy_noise);
- WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n", phy_noise));
+ WL_TRACE("wl_iw_get_wireless_stats phy noise=%d\n", phy_noise);
- bzero(&scb_val, sizeof(scb_val_t));
+ memset(&scb_val, 0, sizeof(scb_val_t));
res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t));
if (res)
goto done;
rssi = dtoh32(scb_val.val);
- WL_TRACE(("wl_iw_get_wireless_stats rssi=%d\n", rssi));
+ WL_TRACE("wl_iw_get_wireless_stats rssi=%d\n", rssi);
if (rssi <= WL_IW_RSSI_NO_SIGNAL)
wstats->qual.qual = 0;
else if (rssi <= WL_IW_RSSI_VERY_LOW)
@@ -3614,23 +3605,21 @@ wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats)
#endif
#if WIRELESS_EXT > 11
- WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n",
- (int)sizeof(wl_cnt_t)));
+ WL_TRACE("wl_iw_get_wireless_stats counters=%zu\n", sizeof(wl_cnt_t));
memset(&cnt, 0, sizeof(wl_cnt_t));
res =
dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t));
if (res) {
- WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d\n",
- res));
+ WL_ERROR("wl_iw_get_wireless_stats counters failed error=%d\n",
+ res);
goto done;
}
cnt.version = dtoh16(cnt.version);
if (cnt.version != WL_CNT_T_VERSION) {
- WL_TRACE(("\tIncorrect version of counters struct: expected "
- "%d; got %d\n",
- WL_CNT_T_VERSION, cnt.version));
+ WL_TRACE("\tIncorrect version of counters struct: expected %d; got %d\n",
+ WL_CNT_T_VERSION, cnt.version);
goto done;
}
@@ -3641,22 +3630,22 @@ wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats)
wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant);
wstats->miss.beacon = 0;
- WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
- dtoh32(cnt.txframe), dtoh32(cnt.txbyte)));
- WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n",
- dtoh32(cnt.rxfrmtoolong)));
- WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n",
- dtoh32(cnt.rxbadplcp)));
- WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n",
- dtoh32(cnt.rxundec)));
- WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n",
- dtoh32(cnt.rxfragerr)));
- WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n",
- dtoh32(cnt.txfail)));
- WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n",
- dtoh32(cnt.rxrunt)));
- WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n",
- dtoh32(cnt.rxgiant)));
+ WL_TRACE("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
+ dtoh32(cnt.txframe), dtoh32(cnt.txbyte));
+ WL_TRACE("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n",
+ dtoh32(cnt.rxfrmtoolong));
+ WL_TRACE("wl_iw_get_wireless_stats counters rxbadplcp=%d\n",
+ dtoh32(cnt.rxbadplcp));
+ WL_TRACE("wl_iw_get_wireless_stats counters rxundec=%d\n",
+ dtoh32(cnt.rxundec));
+ WL_TRACE("wl_iw_get_wireless_stats counters rxfragerr=%d\n",
+ dtoh32(cnt.rxfragerr));
+ WL_TRACE("wl_iw_get_wireless_stats counters txfail=%d\n",
+ dtoh32(cnt.txfail));
+ WL_TRACE("wl_iw_get_wireless_stats counters rxrunt=%d\n",
+ dtoh32(cnt.rxrunt));
+ WL_TRACE("wl_iw_get_wireless_stats counters rxgiant=%d\n",
+ dtoh32(cnt.rxgiant));
#endif /* WIRELESS_EXT > 11 */
done:
@@ -3690,8 +3679,7 @@ int wl_iw_attach(struct net_device *dev, void *dhdp)
return -ENOMEM;
memset(iscan, 0, sizeof(iscan_info_t));
- iscan->iscan_ex_params_p =
- (wl_iscan_params_t *) kmalloc(params_size, GFP_KERNEL);
+ iscan->iscan_ex_params_p = kmalloc(params_size, GFP_KERNEL);
if (!iscan->iscan_ex_params_p)
return -ENOMEM;
iscan->iscan_ex_param_size = params_size;
@@ -3723,9 +3711,7 @@ int wl_iw_attach(struct net_device *dev, void *dhdp)
priv_dev = dev;
MUTEX_LOCK_SOFTAP_SET_INIT(iw->pub);
#endif
- g_scan = NULL;
-
- g_scan = (void *)kmalloc(G_SCAN_RESULTS, GFP_KERNEL);
+ g_scan = kmalloc(G_SCAN_RESULTS, GFP_KERNEL);
if (!g_scan)
return -ENOMEM;
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_iw.h b/drivers/staging/brcm80211/brcmfmac/wl_iw.h
index edbf61f30b47..c8637c50dc17 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_iw.h
+++ b/drivers/staging/brcm80211/brcmfmac/wl_iw.h
@@ -78,7 +78,7 @@ typedef struct wl_iw_extra_params {
#define CHECK_EXTRA_FOR_NULL(extra) \
if (!extra) { \
- WL_ERROR(("%s: error : extra is null pointer\n", __func__)); \
+ WL_ERROR("%s: error : extra is null pointer\n", __func__); \
return -EINVAL; \
}
diff --git a/drivers/staging/brcm80211/include/bcm_rpc.h b/drivers/staging/brcm80211/include/bcm_rpc.h
deleted file mode 100644
index 77e5d8f71966..000000000000
--- a/drivers/staging/brcm80211/include/bcm_rpc.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BCM_RPC_H_
-#define _BCM_RPC_H_
-
-#include <rpc_osl.h>
-
-typedef struct rpc_info rpc_info_t;
-typedef struct rpc_buf rpc_buf_t;
-struct rpc_transport_info;
-typedef void (*rpc_dispatch_cb_t) (void *ctx, struct rpc_buf *buf);
-typedef void (*rpc_resync_cb_t) (void *ctx);
-typedef void (*rpc_down_cb_t) (void *ctx);
-typedef void (*rpc_txdone_cb_t) (void *ctx, struct rpc_buf *buf);
-extern struct rpc_info *bcm_rpc_attach(void *pdev, osl_t *osh,
- struct rpc_transport_info *rpc_th);
-
-extern void bcm_rpc_detach(struct rpc_info *rpc);
-extern void bcm_rpc_down(struct rpc_info *rpc);
-extern void bcm_rpc_watchdog(struct rpc_info *rpc);
-
-extern struct rpc_buf *bcm_rpc_buf_alloc(struct rpc_info *rpc, int len);
-extern void bcm_rpc_buf_free(struct rpc_info *rpc, struct rpc_buf *b);
-/* get rpc transport handle */
-extern struct rpc_transport_info *bcm_rpc_tp_get(struct rpc_info *rpc);
-
-/* callback for: data_rx, down, resync */
-extern void bcm_rpc_rxcb_init(struct rpc_info *rpc, void *ctx,
- rpc_dispatch_cb_t cb, void *dnctx,
- rpc_down_cb_t dncb, rpc_resync_cb_t resync_cb,
- rpc_txdone_cb_t);
-extern void bcm_rpc_rxcb_deinit(struct rpc_info *rpci);
-
-/* HOST or CLIENT rpc call, requiring no return value */
-extern int bcm_rpc_call(struct rpc_info *rpc, struct rpc_buf *b);
-
-/* HOST rpc call, demanding return.
- * The thread may be suspended and control returns back to OS
- * The thread will resume(waked up) on either the return signal received or timeout
- * The implementation details depend on OS
- */
-extern struct rpc_buf *bcm_rpc_call_with_return(struct rpc_info *rpc,
- struct rpc_buf *b);
-
-/* CLIENT rpc call to respond to bcm_rpc_call_with_return, requiring no return value */
-extern int bcm_rpc_call_return(struct rpc_info *rpc, struct rpc_buf *retb);
-
-extern uint bcm_rpc_buf_header_len(struct rpc_info *rpci);
-
-#define RPC_PKTLOG_SIZE 50 /* Depth of the history */
-#define RPC_PKTLOG_RD_LEN 3
-#define RPC_PKTLOG_DUMP_SIZE 150 /* dump size should be more than the product of above two */
-extern int bcm_rpc_pktlog_get(struct rpc_info *rpci, u32 *buf,
- uint buf_size, bool send);
-extern int bcm_rpc_dump(rpc_info_t *rpci, struct bcmstrbuf *b);
-
-/* HIGH/BMAC: bit 15-8: RPC module, bit 7-0: TP module */
-#define RPC_ERROR_VAL 0x0001
-#define RPC_TRACE_VAL 0x0002
-#define RPC_PKTTRACE_VAL 0x0004
-#define RPC_PKTLOG_VAL 0x0008
-extern void bcm_rpc_msglevel_set(struct rpc_info *rpci, u16 msglevel,
- bool high_low);
-
-#endif /* _BCM_RPC_H_ */
diff --git a/drivers/staging/brcm80211/include/bcm_rpc_tp.h b/drivers/staging/brcm80211/include/bcm_rpc_tp.h
deleted file mode 100644
index bb8dc6dd6f44..000000000000
--- a/drivers/staging/brcm80211/include/bcm_rpc_tp.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _bcm_rpc_tp_h_
-#define _bcm_rpc_tp_h_
-#include <bcm_rpc.h>
-
-#define DBUS_RX_BUFFER_SIZE_RPC (2100) /* rxbufsize for dbus_attach, linux only for now */
-
-#define BCM_RPC_TP_ENCAP_LEN 4 /* TP header is 4 bytes */
-
-#define BCM_RPC_TP_HOST_AGG_MASK 0xffff0000
-#define BCM_RPC_TP_HOST_AGG_SHIFT 16
-#define BCM_RPC_TP_HOST_AGG_AMPDU 0x00010000 /* HOST->DNGL ampdu aggregation */
-#define BCM_RPC_TP_HOST_AGG_TEST 0x00100000 /* HOST->DNGL test aggregation */
-#define BCM_RPC_TP_DNGL_AGG_MASK 0x0000ffff
-#define BCM_RPC_TP_DNGL_AGG_DPC 0x00000001 /* DNGL->HOST data aggregation */
-#define BCM_RPC_TP_DNGL_AGG_FLOWCTL 0x00000002 /* DNGL->HOST tx flowcontrol agg */
-#define BCM_RPC_TP_DNGL_AGG_TEST 0x00000010 /* DNGL->HOST test agg */
-
-#define BCM_RPC_TP_DNGL_AGG_MAX_SFRAME 3 /* max agg subframes, must be <= USB_NTXD */
-#define BCM_RPC_TP_DNGL_AGG_MAX_BYTE 4000 /* max agg bytes */
-
-#define BCM_RPC_TP_HOST_AGG_MAX_SFRAME 3 /* max agg subframes, AMPDU only, 3 is enough */
-#define BCM_RPC_TP_HOST_AGG_MAX_BYTE 3400 /* max agg bytes; to fit 2+ tcp/udp pkts. Each one:
- * 802.3pkt + 802.11 hdr + rpc hdr + tp hdr < 1700B
- * Need to be in sync with dongle usb rx dma
- * rxbufsize(USBBULK_RXBUF_GIANT in usbdev_sb.c)
- */
-/* TP-DBUS pkts flowcontrol */
-#define BCM_RPC_TP_DBUS_NTXQ 50 /* queue size for TX on bulk OUT, aggregation possible */
-#define BCM_RPC_TP_DBUS_NRXQ 50 /* queue size for RX on bulk IN, aggregation possible */
-#define BCM_RPC_TP_DBUS_NRXQ_CTRL 1 /* queue size for RX on ctl EP0 */
-
-#define BCM_RPC_TP_DBUS_NRXQ_PKT (BCM_RPC_TP_DBUS_NRXQ * BCM_RPC_TP_DNGL_AGG_MAX_SFRAME)
-#define BCM_RPC_TP_DBUS_NTXQ_PKT (BCM_RPC_TP_DBUS_NTXQ * BCM_RPC_TP_HOST_AGG_MAX_SFRAME)
-
-typedef struct rpc_transport_info rpc_tp_info_t;
-
-typedef void (*rpc_tx_complete_fn_t) (void *, rpc_buf_t *, int status);
-typedef void (*rpc_rx_fn_t) (void *, rpc_buf_t *);
-
-#ifdef WLC_LOW
-typedef void (*rpc_txflowctl_cb_t) (void *ctx, bool on);
-#endif
-
-extern rpc_tp_info_t *bcm_rpc_tp_attach(osl_t *osh, void *bus);
-extern void bcm_rpc_tp_detach(rpc_tp_info_t *rpcb);
-extern void bcm_rpc_tp_down(rpc_tp_info_t *rpcb);
-extern void bcm_rpc_tp_watchdog(rpc_tp_info_t *rpcb);
-
-extern int bcm_rpc_tp_buf_send(rpc_tp_info_t *rpcb, rpc_buf_t *buf);
-
-/* callback for tx_complete, rx_pkt */
-extern void bcm_rpc_tp_register_cb(rpc_tp_info_t *rpcb,
- rpc_tx_complete_fn_t txcmplt,
- void *tx_context, rpc_rx_fn_t rxpkt,
- void *rx_context, rpc_osl_t *rpc_osh);
-extern void bcm_rpc_tp_deregister_cb(rpc_tp_info_t *rpcb);
-
-/* Buffer manipulation */
-extern uint bcm_rpc_buf_tp_header_len(rpc_tp_info_t *rpcb);
-extern rpc_buf_t *bcm_rpc_tp_buf_alloc(rpc_tp_info_t *rpcb, int len);
-extern void bcm_rpc_tp_buf_free(rpc_tp_info_t *rpcb, rpc_buf_t *buf);
-extern int bcm_rpc_buf_len_get(rpc_tp_info_t *rpcb, rpc_buf_t *b);
-extern int bcm_rpc_buf_len_set(rpc_tp_info_t *rpcb, rpc_buf_t *b, uint len);
-extern rpc_buf_t *bcm_rpc_buf_next_get(rpc_tp_info_t *rpcb, rpc_buf_t *b);
-extern void bcm_rpc_buf_next_set(rpc_tp_info_t *rpcb, rpc_buf_t *b,
- rpc_buf_t *nextb);
-extern unsigned char *bcm_rpc_buf_data(rpc_tp_info_t *rpcb, rpc_buf_t *b);
-extern unsigned char *bcm_rpc_buf_push(rpc_tp_info_t *rpcb, rpc_buf_t *b,
- uint delta);
-extern unsigned char *bcm_rpc_buf_pull(rpc_tp_info_t *rpcb, rpc_buf_t *b,
- uint delta);
-extern void bcm_rpc_tp_buf_release(rpc_tp_info_t *rpcb, rpc_buf_t *buf);
-extern void bcm_rpc_tp_buf_cnt_adjust(rpc_tp_info_t *rpcb, int adjust);
-/* RPC call_with_return */
-extern int bcm_rpc_tp_recv_rtn(rpc_tp_info_t *rpcb);
-extern int bcm_rpc_tp_get_device_speed(rpc_tp_info_t *rpc_th);
-#ifdef BCMDBG
-extern int bcm_rpc_tp_dump(rpc_tp_info_t *rpcb, struct bcmstrbuf *b);
-#endif
-
-#ifdef WLC_LOW
-/* intercept USB pkt to parse RPC header: USB driver rx-> wl_send -> this -> wl driver */
-extern void bcm_rpc_tp_rx_from_dnglbus(rpc_tp_info_t *rpc_th, struct lbuf *lb);
-
-/* RPC callreturn pkt, go to USB driver tx */
-extern int bcm_rpc_tp_send_callreturn(rpc_tp_info_t *rpc_th, rpc_buf_t *b);
-
-extern void bcm_rpc_tp_dump(rpc_tp_info_t *rpcb);
-extern void bcm_rpc_tp_txflowctl(rpc_tp_info_t *rpcb, bool state, int prio);
-extern void bcm_rpc_tp_txflowctlcb_init(rpc_tp_info_t *rpc_th, void *ctx,
- rpc_txflowctl_cb_t cb);
-extern void bcm_rpc_tp_txflowctlcb_deinit(rpc_tp_info_t *rpc_th);
-extern void bcm_rpc_tp_txq_wm_set(rpc_tp_info_t *rpc_th, u8 hiwm,
- u8 lowm);
-extern void bcm_rpc_tp_txq_wm_get(rpc_tp_info_t *rpc_th, u8 *hiwm,
- u8 *lowm);
-#endif /* WLC_LOW */
-
-extern void bcm_rpc_tp_agg_set(rpc_tp_info_t *rpcb, u32 reason, bool set);
-extern void bcm_rpc_tp_agg_limit_set(rpc_tp_info_t *rpc_th, u8 sf,
- u16 bytes);
-extern void bcm_rpc_tp_agg_limit_get(rpc_tp_info_t *rpc_th, u8 *sf,
- u16 *bytes);
-
-#define BCM_RPC_TP_MSG_LEVEL_MASK 0x00ff
-/* dongle msg level */
-#define RPC_TP_MSG_DNGL_ERR_VAL 0x0001 /* DNGL TP error msg */
-#define RPC_TP_MSG_DNGL_DBG_VAL 0x0002 /* DNGL TP dbg msg */
-#define RPC_TP_MSG_DNGL_AGG_VAL 0x0004 /* DNGL TP agg msg */
-#define RPC_TP_MSG_DNGL_DEA_VAL 0x0008 /* DNGL TP deag msg */
-
-/* host msg level */
-#define RPC_TP_MSG_HOST_ERR_VAL 0x0001 /* DNGL TP error msg */
-#define RPC_TP_MSG_HOST_DBG_VAL 0x0002 /* DNGL TP dbg msg */
-#define RPC_TP_MSG_HOST_AGG_VAL 0x0004 /* DNGL TP agg msg */
-#define RPC_TP_MSG_HOST_DEA_VAL 0x0008 /* DNGL TP deag msg */
-
-extern void bcm_rpc_tp_msglevel_set(rpc_tp_info_t *rpc_th, u8 msglevel,
- bool high_low);
-
-#endif /* _bcm_rpc_tp_h_ */
diff --git a/drivers/staging/brcm80211/include/bcm_xdr.h b/drivers/staging/brcm80211/include/bcm_xdr.h
deleted file mode 100644
index 50fbd78a8804..000000000000
--- a/drivers/staging/brcm80211/include/bcm_xdr.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BCM_XDR_H
-#define _BCM_XDR_H
-
-/*
- * bcm_xdr_buf_t
- * Structure used for bookkeeping of a buffer being packed or unpacked.
- * Keeps a current read/write pointer and size as well as
- * the original buffer pointer and size.
- *
- */
-typedef struct {
- u8 *buf; /* pointer to current position in origbuf */
- uint size; /* current (residual) size in bytes */
- u8 *origbuf; /* unmodified pointer to orignal buffer */
- uint origsize; /* unmodified orignal buffer size in bytes */
-} bcm_xdr_buf_t;
-
-void bcm_xdr_buf_init(bcm_xdr_buf_t *b, void *buf, size_t len);
-
-int bcm_xdr_pack_u32(bcm_xdr_buf_t *b, u32 val);
-int bcm_xdr_unpack_u32(bcm_xdr_buf_t *b, u32 *pval);
-int bcm_xdr_pack_s32(bcm_xdr_buf_t *b, s32 val);
-int bcm_xdr_unpack_s32(bcm_xdr_buf_t *b, s32 *pval);
-int bcm_xdr_pack_s8(bcm_xdr_buf_t *b, s8 val);
-int bcm_xdr_unpack_s8(bcm_xdr_buf_t *b, s8 *pval);
-int bcm_xdr_pack_opaque(bcm_xdr_buf_t *b, uint len, void *data);
-int bcm_xdr_unpack_opaque(bcm_xdr_buf_t *b, uint len, void **pdata);
-int bcm_xdr_unpack_opaque_cpy(bcm_xdr_buf_t *b, uint len, void *data);
-int bcm_xdr_pack_opaque_varlen(bcm_xdr_buf_t *b, uint len, void *data);
-int bcm_xdr_unpack_opaque_varlen(bcm_xdr_buf_t *b, uint *plen, void **pdata);
-int bcm_xdr_pack_string(bcm_xdr_buf_t *b, char *str);
-int bcm_xdr_unpack_string(bcm_xdr_buf_t *b, uint *plen, char **pstr);
-
-int bcm_xdr_pack_u8_vec(bcm_xdr_buf_t *, u8 *vec, u32 elems);
-int bcm_xdr_unpack_u8_vec(bcm_xdr_buf_t *, u8 *vec, u32 elems);
-int bcm_xdr_pack_u16_vec(bcm_xdr_buf_t *b, uint len, void *vec);
-int bcm_xdr_unpack_u16_vec(bcm_xdr_buf_t *b, uint len, void *vec);
-int bcm_xdr_pack_u32_vec(bcm_xdr_buf_t *b, uint len, void *vec);
-int bcm_xdr_unpack_u32_vec(bcm_xdr_buf_t *b, uint len, void *vec);
-
-int bcm_xdr_pack_opaque_raw(bcm_xdr_buf_t *b, uint len, void *data);
-int bcm_xdr_pack_opaque_pad(bcm_xdr_buf_t *b);
-
-#endif /* _BCM_XDR_H */
diff --git a/drivers/staging/brcm80211/include/bcmdefs.h b/drivers/staging/brcm80211/include/bcmdefs.h
index dc52e9dbb8b5..74601fc971c9 100644
--- a/drivers/staging/brcm80211/include/bcmdefs.h
+++ b/drivers/staging/brcm80211/include/bcmdefs.h
@@ -42,9 +42,6 @@
#define BCMFASTPATH
#endif
-/* Put some library data/code into ROM to reduce RAM requirements */
-#define BCMROMFN(_fn) _fn
-
/* Bus types */
#define SI_BUS 0 /* SOC Interconnect */
#define PCI_BUS 1 /* PCI target */
@@ -54,35 +51,6 @@
#define SPI_BUS 6 /* gSPI target */
#define RPC_BUS 7 /* RPC target */
-/* Allows size optimization for single-bus image */
-#ifdef BCMBUSTYPE
-#define BUSTYPE(bus) (BCMBUSTYPE)
-#else
-#define BUSTYPE(bus) (bus)
-#endif
-
-/* Allows size optimization for single-backplane image */
-#ifdef BCMCHIPTYPE
-#define CHIPTYPE(bus) (BCMCHIPTYPE)
-#else
-#define CHIPTYPE(bus) (bus)
-#endif
-
-/* Allows size optimization for SPROM support */
-#define SPROMBUS (PCI_BUS)
-
-/* Allows size optimization for single-chip image */
-#ifdef BCMCHIPID
-#define CHIPID(chip) (BCMCHIPID)
-#else
-#define CHIPID(chip) (chip)
-#endif
-
-#ifdef BCMCHIPREV
-#define CHIPREV(rev) (BCMCHIPREV)
-#else
-#define CHIPREV(rev) (rev)
-#endif
/* Defines for DMA Address Width - Shared between OSL and HNDDMA */
#define DMADDR_MASK_32 0x0 /* Address mask for 32-bits */
@@ -146,31 +114,11 @@ typedef struct {
#define BCMEXTRAHDROOM 172
-/* Headroom required for dongle-to-host communication. Packets allocated
- * locally in the dongle (e.g. for CDC ioctls or RNDIS messages) should
- * leave this much room in front for low-level message headers which may
- * be needed to get across the dongle bus to the host. (These messages
- * don't go over the network, so room for the full WL header above would
- * be a waste.).
-*/
-#define BCMDONGLEHDRSZ 12
-#define BCMDONGLEPADSZ 16
-
-#define BCMDONGLEOVERHEAD (BCMDONGLEHDRSZ + BCMDONGLEPADSZ)
-
#ifdef BCMDBG
-
-#define BCMDBG_ERR
-
#ifndef BCMDBG_ASSERT
#define BCMDBG_ASSERT
-#endif /* BCMDBG_ASSERT */
-
-#endif /* BCMDBG */
-
-#if defined(BCMDBG_ASSERT)
-#define BCMASSERT_SUPPORT
-#endif
+#endif /* BCMDBG_ASSERT */
+#endif /* BCMDBG */
/* Macros for doing definition and get/set of bitfields
* Usage example, e.g. a three-bit field (bits 4-6):
@@ -190,11 +138,10 @@ typedef struct {
(((val) & (~(field ## _M << field ## _S))) | \
((unsigned)(bits) << field ## _S))
-/* define BCMSMALL to remove misc features for memory-constrained environments */
-#define BCMSPACE
-#define bcmspace true /* if (bcmspace) code is retained */
-
/* Max. nvram variable table size */
#define MAXSZ_NVRAM_VARS 4096
+/* handle forward declaration */
+struct wl_info;
+
#endif /* _bcmdefs_h_ */
diff --git a/drivers/staging/brcm80211/include/bcmsdbus.h b/drivers/staging/brcm80211/include/bcmsdbus.h
index ca99495eaa89..89059dd8088b 100644
--- a/drivers/staging/brcm80211/include/bcmsdbus.h
+++ b/drivers/staging/brcm80211/include/bcmsdbus.h
@@ -46,8 +46,8 @@ typedef void (*sdioh_cb_fn_t) (void *);
* The handler shall be provided by all subsequent calls. No local cache
* cfghdl points to the starting address of pci device mapped memory
*/
-extern sdioh_info_t *sdioh_attach(osl_t *osh, void *cfghdl, uint irq);
-extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *si);
+extern sdioh_info_t *sdioh_attach(struct osl_info *osh, void *cfghdl, uint irq);
+extern SDIOH_API_RC sdioh_detach(struct osl_info *osh, sdioh_info_t *si);
extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si,
sdioh_cb_fn_t fn, void *argh);
extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si);
@@ -79,7 +79,7 @@ extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma,
uint fix_inc, uint rw, uint fnc_num,
u32 addr, uint regwidth,
u32 buflen, u8 *buffer,
- void *pkt);
+ struct sk_buff *pkt);
/* get cis data */
extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, u8 *cis,
diff --git a/drivers/staging/brcm80211/include/bcmsdh.h b/drivers/staging/brcm80211/include/bcmsdh.h
index 6b80983d43c9..0e1f79919c9c 100644
--- a/drivers/staging/brcm80211/include/bcmsdh.h
+++ b/drivers/staging/brcm80211/include/bcmsdh.h
@@ -40,11 +40,11 @@ typedef void (*bcmsdh_cb_fn_t) (void *);
* implementation may maintain a single "default" handle (e.g. the first or
* most recent one) to enable single-instance implementations to pass NULL.
*/
-extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva,
- uint irq);
+extern bcmsdh_info_t *bcmsdh_attach(struct osl_info *osh, void *cfghdl,
+ void **regsva, uint irq);
/* Detach - freeup resources allocated in attach */
-extern int bcmsdh_detach(osl_t *osh, void *sdh);
+extern int bcmsdh_detach(struct osl_info *osh, void *sdh);
/* Query if SD device interrupts are enabled */
extern bool bcmsdh_intr_query(void *sdh);
@@ -122,7 +122,7 @@ extern int bcmsdh_send_buf(void *sdh, u32 addr, uint fn, uint flags,
u8 *buf, uint nbytes, void *pkt,
bcmsdh_cmplt_fn_t complete, void *handle);
extern int bcmsdh_recv_buf(void *sdh, u32 addr, uint fn, uint flags,
- u8 *buf, uint nbytes, void *pkt,
+ u8 *buf, uint nbytes, struct sk_buff *pkt,
bcmsdh_cmplt_fn_t complete, void *handle);
/* Flags bits */
@@ -174,8 +174,8 @@ extern void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
typedef struct {
/* attach to device */
void *(*attach) (u16 vend_id, u16 dev_id, u16 bus, u16 slot,
- u16 func, uint bustype, void *regsva, osl_t *osh,
- void *param);
+ u16 func, uint bustype, void *regsva,
+ struct osl_info *osh, void *param);
/* detach from device */
void (*detach) (void *ch);
} bcmsdh_driver_t;
diff --git a/drivers/staging/brcm80211/include/bcmsdh_sdmmc.h b/drivers/staging/brcm80211/include/bcmsdh_sdmmc.h
index 7d5aa71a7dc7..4d671ddb3af1 100644
--- a/drivers/staging/brcm80211/include/bcmsdh_sdmmc.h
+++ b/drivers/staging/brcm80211/include/bcmsdh_sdmmc.h
@@ -51,7 +51,7 @@ extern void sdioh_sdmmc_osfree(sdioh_info_t *sd);
#define CLIENT_INTR 0x100 /* Get rid of this! */
struct sdioh_info {
- osl_t *osh; /* osh handler */
+ struct osl_info *osh; /* osh handler */
bool client_intr_enabled; /* interrupt connnected flag */
bool intr_handler_valid; /* client driver interrupt handler valid */
sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
@@ -94,8 +94,8 @@ extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
*/
/* Register mapping routines */
-extern u32 *sdioh_sdmmc_reg_map(osl_t *osh, s32 addr, int size);
-extern void sdioh_sdmmc_reg_unmap(osl_t *osh, s32 addr, int size);
+extern u32 *sdioh_sdmmc_reg_map(struct osl_info *osh, s32 addr, int size);
+extern void sdioh_sdmmc_reg_unmap(struct osl_info *osh, s32 addr, int size);
/* Interrupt (de)registration routines */
extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq);
diff --git a/drivers/staging/brcm80211/include/bcmsrom.h b/drivers/staging/brcm80211/include/bcmsrom.h
index 9d53657fdaa1..cdcef746284f 100644
--- a/drivers/staging/brcm80211/include/bcmsrom.h
+++ b/drivers/staging/brcm80211/include/bcmsrom.h
@@ -20,15 +20,15 @@
#include <bcmsrom_fmt.h>
/* Prototypes */
-extern int srom_var_init(si_t *sih, uint bus, void *curmap, osl_t *osh,
- char **vars, uint *count);
+extern int srom_var_init(si_t *sih, uint bus, void *curmap,
+ struct osl_info *osh, char **vars, uint *count);
-extern int srom_read(si_t *sih, uint bus, void *curmap, osl_t *osh,
+extern int srom_read(si_t *sih, uint bus, void *curmap, struct osl_info *osh,
uint byteoff, uint nbytes, u16 *buf, bool check_crc);
/* parse standard PCMCIA cis, normally used by SB/PCMCIA/SDIO/SPI/OTP
* and extract from it into name=value pairs
*/
-extern int srom_parsecis(osl_t *osh, u8 **pcis, uint ciscnt,
+extern int srom_parsecis(struct osl_info *osh, u8 **pcis, uint ciscnt,
char **vars, uint *count);
#endif /* _bcmsrom_h_ */
diff --git a/drivers/staging/brcm80211/include/bcmutils.h b/drivers/staging/brcm80211/include/bcmutils.h
index b53315981be0..a8f76d8199ff 100644
--- a/drivers/staging/brcm80211/include/bcmutils.h
+++ b/drivers/staging/brcm80211/include/bcmutils.h
@@ -30,7 +30,6 @@
};
/* ** driver-only section ** */
-#include <osl.h>
#define GPIO_PIN_NOTDEFINED 0x20 /* Pin not defined */
@@ -56,10 +55,10 @@
#endif
typedef struct pktq_prec {
- void *head; /* first packet to dequeue */
- void *tail; /* last packet to dequeue */
- u16 len; /* number of queued packets */
- u16 max; /* maximum number of queued packets */
+ struct sk_buff *head; /* first packet to dequeue */
+ struct sk_buff *tail; /* last packet to dequeue */
+ u16 len; /* number of queued packets */
+ u16 max; /* maximum number of queued packets */
} pktq_prec_t;
/* multi-priority pkt queue */
@@ -105,23 +104,26 @@
#define pktq_ppeek(pq, prec) ((pq)->q[prec].head)
#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail)
- extern void *pktq_penq(struct pktq *pq, int prec, void *p);
- extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
- extern void *pktq_pdeq(struct pktq *pq, int prec);
- extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
+extern struct sk_buff *pktq_penq(struct pktq *pq, int prec,
+ struct sk_buff *p);
+extern struct sk_buff *pktq_penq_head(struct pktq *pq, int prec,
+ struct sk_buff *p);
+extern struct sk_buff *pktq_pdeq(struct pktq *pq, int prec);
+extern struct sk_buff *pktq_pdeq_tail(struct pktq *pq, int prec);
+
/* Empty the queue at particular precedence level */
#ifdef BRCM_FULLMAC
- extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec,
+ extern void pktq_pflush(struct osl_info *osh, struct pktq *pq, int prec,
bool dir);
#else
- extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec,
+ extern void pktq_pflush(struct osl_info *osh, struct pktq *pq, int prec,
bool dir, ifpkt_cb_t fn, int arg);
#endif /* BRCM_FULLMAC */
/* operations on a set of precedences in packet queue */
- extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
- extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
+extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern struct sk_buff *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
/* operations on packet queue as a whole */
@@ -140,20 +142,19 @@
extern void pktq_init(struct pktq *pq, int num_prec, int max_len);
/* prec_out may be NULL if caller is not interested in return value */
- extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
+ extern struct sk_buff *pktq_peek_tail(struct pktq *pq, int *prec_out);
#ifdef BRCM_FULLMAC
- extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir);
+ extern void pktq_flush(struct osl_info *osh, struct pktq *pq, bool dir);
#else
- extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir,
+ extern void pktq_flush(struct osl_info *osh, struct pktq *pq, bool dir,
ifpkt_cb_t fn, int arg);
#endif
/* externs */
/* packet */
- extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len,
- unsigned char *buf);
- extern uint pktsegcnt(osl_t *osh, void *p);
- extern uint pkttotlen(osl_t *osh, void *p);
+ extern uint pktfrombuf(struct osl_info *osh, struct sk_buff *p,
+ uint offset, int len, unsigned char *buf);
+ extern uint pkttotlen(struct osl_info *osh, struct sk_buff *p);
/* ethernet address */
extern int bcm_ether_atoe(char *p, struct ether_addr *ea);
@@ -166,7 +167,8 @@
extern char *getvar(char *vars, const char *name);
extern int getintvar(char *vars, const char *name);
#ifdef BCMDBG
- extern void prpkt(const char *msg, osl_t *osh, void *p0);
+ extern void prpkt(const char *msg, struct osl_info *osh,
+ struct sk_buff *p0);
#endif /* BCMDBG */
#define bcm_perf_enable()
#define bcmstats(fmt)
@@ -359,7 +361,21 @@
#define CEIL(x, y) (((x) + ((y)-1)) / (y))
#define ISPOWEROF2(x) ((((x)-1)&(x)) == 0)
-/* bit map related macros */
+/* map physical to virtual I/O */
+#if !defined(CONFIG_MMC_MSM7X00A)
+#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), \
+ (unsigned long)(size))
+#else
+#define REG_MAP(pa, size) (void *)(0)
+#endif
+
+/* Register operations */
+#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
+#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
+
+#define SET_REG(osh, r, mask, val) \
+ W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val)))
+
#ifndef setbit
#ifndef NBBY /* the BSD family defines NBBY */
#define NBBY 8 /* 8 bits per byte */
diff --git a/drivers/staging/brcm80211/include/d11.h b/drivers/staging/brcm80211/include/d11.h
index c07548c70e30..be2d4970407c 100644
--- a/drivers/staging/brcm80211/include/d11.h
+++ b/drivers/staging/brcm80211/include/d11.h
@@ -17,13 +17,6 @@
#ifndef _D11_H
#define _D11_H
-#include <bcmdefs.h>
-#include <bcmdevs.h>
-#include <hndsoc.h>
-#include <sbhndpio.h>
-#include <sbhnddma.h>
-#include <proto/802.11.h>
-
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
diff --git a/drivers/staging/brcm80211/include/dbus.h b/drivers/staging/brcm80211/include/dbus.h
deleted file mode 100644
index 81ffea79d008..000000000000
--- a/drivers/staging/brcm80211/include/dbus.h
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __DBUS_H__
-#define __DBUS_H__
-
-#ifdef BCMDBG
-#define DBUSERR(args) do { if (net_ratelimit()) printf args; } while (0)
-#define DBUSTRACE(args)
-#define DBUSDBGLOCK(args)
-
-#else
-#define DBUSTRACE(args)
-#define DBUSERR(args)
-#define DBUSDBGLOCK(args)
-#endif
-
-enum {
- DBUS_OK = 0,
- DBUS_ERR = -200,
- DBUS_ERR_TIMEOUT,
- DBUS_ERR_DISCONNECT,
- DBUS_ERR_NODEVICE,
- DBUS_ERR_UNSUPPORTED,
- DBUS_ERR_PENDING,
- DBUS_ERR_NOMEM,
- DBUS_ERR_TXFAIL,
- DBUS_ERR_TXTIMEOUT,
- DBUS_ERR_TXDROP,
- DBUS_ERR_RXFAIL,
- DBUS_ERR_RXDROP,
- DBUS_ERR_TXCTLFAIL,
- DBUS_ERR_RXCTLFAIL,
- DBUS_ERR_REG_PARAM,
- DBUS_STATUS_CANCELLED
-};
-
-#define ERR_CBMASK_TXFAIL 0x00000001
-#define ERR_CBMASK_RXFAIL 0x00000002
-#define ERR_CBMASK_ALL 0xFFFFFFFF
-
-#define DBUS_CBCTL_WRITE 0
-#define DBUS_CBCTL_READ 1
-
-#define DBUS_TX_RETRY_LIMIT 3 /* retries for failed txirb */
-#define DBUS_TX_TIMEOUT_INTERVAL 250 /* timeout for txirb complete, in ms */
-
-#define DBUS_BUFFER_SIZE_TX 5000
-#define DBUS_BUFFER_SIZE_RX 5000
-
-#define DBUS_BUFFER_SIZE_TX_NOAGG 2048
-#define DBUS_BUFFER_SIZE_RX_NOAGG 2048
-
-/* DBUS types */
-enum {
- DBUS_USB,
- DBUS_SDIO,
- DBUS_SPI,
- DBUS_UNKNOWN
-};
-
-enum dbus_state {
- DBUS_STATE_DL_PENDING,
- DBUS_STATE_DL_DONE,
- DBUS_STATE_UP,
- DBUS_STATE_DOWN,
- DBUS_STATE_PNP_FWDL,
- DBUS_STATE_DISCONNECT
-};
-
-enum dbus_pnp_state {
- DBUS_PNP_DISCONNECT,
- DBUS_PNP_SLEEP,
- DBUS_PNP_RESUME
-};
-
-typedef enum _DEVICE_SPEED {
- INVALID_SPEED = -1,
- LOW_SPEED = 1, /* USB 1.1: 1.5 Mbps */
- FULL_SPEED, /* USB 1.1: 12 Mbps */
- HIGH_SPEED, /* USB 2.0: 480 Mbps */
- SUPER_SPEED, /* USB 3.0: 4.8 Gbps */
-} DEVICE_SPEED;
-
-typedef struct {
- int bustype;
- int vid;
- int pid;
- int devid;
- int chiprev; /* chip revsion number */
- int mtu;
- int nchan; /* Data Channels */
-} dbus_attrib_t;
-
-/* FIX: Account for errors related to DBUS;
- * Let upper layer account for packets/bytes
- */
-typedef struct {
- u32 rx_errors;
- u32 tx_errors;
- u32 rx_dropped;
- u32 tx_dropped;
-} dbus_stats_t;
-
-/*
- * Configurable BUS parameters
- */
-typedef struct {
- bool rxctl_deferrespok;
-} dbus_config_t;
-
-struct dbus_callbacks;
-struct exec_parms;
-
-typedef void *(*probe_cb_t) (void *arg, const char *desc, u32 bustype,
- u32 hdrlen);
-typedef void (*disconnect_cb_t) (void *arg);
-typedef void *(*exec_cb_t) (struct exec_parms *args);
-
-/* Client callbacks registered during dbus_attach() */
-typedef struct dbus_callbacks {
- void (*send_complete) (void *cbarg, void *info, int status);
- void (*recv_buf) (void *cbarg, u8 *buf, int len);
- void (*recv_pkt) (void *cbarg, void *pkt);
- void (*txflowcontrol) (void *cbarg, bool onoff);
- void (*errhandler) (void *cbarg, int err);
- void (*ctl_complete) (void *cbarg, int type, int status);
- void (*state_change) (void *cbarg, int state);
- void *(*pktget) (void *cbarg, uint len, bool send);
- void (*pktfree) (void *cbarg, void *p, bool send);
-} dbus_callbacks_t;
-
-struct dbus_pub;
-struct bcmstrbuf;
-struct dbus_irb;
-struct dbus_irb_rx;
-struct dbus_irb_tx;
-struct dbus_intf_callbacks;
-
-typedef struct {
- void *(*attach) (struct dbus_pub *pub, void *cbarg,
- struct dbus_intf_callbacks *cbs);
- void (*detach) (struct dbus_pub *pub, void *bus);
-
- int (*up) (void *bus);
- int (*down) (void *bus);
- int (*send_irb) (void *bus, struct dbus_irb_tx *txirb);
- int (*recv_irb) (void *bus, struct dbus_irb_rx *rxirb);
- int (*cancel_irb) (void *bus, struct dbus_irb_tx *txirb);
- int (*send_ctl) (void *bus, u8 *buf, int len);
- int (*recv_ctl) (void *bus, u8 *buf, int len);
- int (*get_stats) (void *bus, dbus_stats_t *stats);
- int (*get_attrib) (void *bus, dbus_attrib_t *attrib);
-
- int (*pnp) (void *bus, int event);
- int (*remove) (void *bus);
- int (*resume) (void *bus);
- int (*suspend) (void *bus);
- int (*stop) (void *bus);
- int (*reset) (void *bus);
-
- /* Access to bus buffers directly */
- void *(*pktget) (void *bus, int len);
- void (*pktfree) (void *bus, void *pkt);
-
- int (*iovar_op) (void *bus, const char *name, void *params, int plen,
- void *arg, int len, bool set);
- void (*dump) (void *bus, struct bcmstrbuf *strbuf);
- int (*set_config) (void *bus, dbus_config_t *config);
- int (*get_config) (void *bus, dbus_config_t *config);
-
- bool(*device_exists) (void *bus);
- bool(*dlneeded) (void *bus);
- int (*dlstart) (void *bus, u8 *fw, int len);
- int (*dlrun) (void *bus);
- bool(*recv_needed) (void *bus);
-
- void *(*exec_rxlock) (void *bus, exec_cb_t func,
- struct exec_parms *args);
- void *(*exec_txlock) (void *bus, exec_cb_t func,
- struct exec_parms *args);
-
- int (*tx_timer_init) (void *bus);
- int (*tx_timer_start) (void *bus, uint timeout);
- int (*tx_timer_stop) (void *bus);
-
- int (*sched_dpc) (void *bus);
- int (*lock) (void *bus);
- int (*unlock) (void *bus);
- int (*sched_probe_cb) (void *bus);
-
- int (*shutdown) (void *bus);
-
- int (*recv_stop) (void *bus);
- int (*recv_resume) (void *bus);
-
- /* Add from the bottom */
-} dbus_intf_t;
-
-typedef struct dbus_pub {
- struct osl_info *osh;
- dbus_stats_t stats;
- dbus_attrib_t attrib;
- enum dbus_state busstate;
- DEVICE_SPEED device_speed;
- int ntxq, nrxq, rxsize;
- void *bus;
- struct shared_info *sh;
-} dbus_pub_t;
-
-#define BUS_INFO(bus, type) (((type *) bus)->pub->bus)
-
-/*
- * Public Bus Function Interface
- */
-extern int dbus_register(int vid, int pid, probe_cb_t prcb,
- disconnect_cb_t discb, void *prarg, void *param1,
- void *param2);
-extern int dbus_deregister(void);
-
-extern const dbus_pub_t *dbus_attach(struct osl_info *osh, int rxsize, int nrxq,
- int ntxq, void *cbarg,
- dbus_callbacks_t *cbs,
- struct shared_info *sh);
-extern void dbus_detach(const dbus_pub_t *pub);
-
-extern int dbus_up(const dbus_pub_t *pub);
-extern int dbus_down(const dbus_pub_t *pub);
-extern int dbus_stop(const dbus_pub_t *pub);
-extern int dbus_shutdown(const dbus_pub_t *pub);
-extern void dbus_flowctrl_rx(const dbus_pub_t *pub, bool on);
-
-extern int dbus_send_buf(const dbus_pub_t *pub, u8 *buf, int len,
- void *info);
-extern int dbus_send_pkt(const dbus_pub_t *pub, void *pkt, void *info);
-extern int dbus_send_ctl(const dbus_pub_t *pub, u8 *buf, int len);
-extern int dbus_recv_ctl(const dbus_pub_t *pub, u8 *buf, int len);
-
-extern int dbus_get_stats(const dbus_pub_t *pub, dbus_stats_t *stats);
-extern int dbus_get_attrib(const dbus_pub_t *pub, dbus_attrib_t *attrib);
-extern int dbus_get_device_speed(const dbus_pub_t *pub);
-extern int dbus_set_config(const dbus_pub_t *pub, dbus_config_t *config);
-extern int dbus_get_config(const dbus_pub_t *pub, dbus_config_t *config);
-
-extern void *dbus_pktget(const dbus_pub_t *pub, int len);
-extern void dbus_pktfree(const dbus_pub_t *pub, void *pkt);
-
-extern int dbus_set_errmask(const dbus_pub_t *pub, u32 mask);
-extern int dbus_pnp_sleep(const dbus_pub_t *pub);
-extern int dbus_pnp_resume(const dbus_pub_t *pub, int *fw_reload);
-extern int dbus_pnp_disconnect(const dbus_pub_t *pub);
-
-extern int dbus_iovar_op(const dbus_pub_t *pub, const char *name,
- void *params, int plen, void *arg, int len, bool set);
-#ifdef BCMDBG
-extern void dbus_hist_dump(const dbus_pub_t *pub, struct bcmstrbuf *b);
-#endif /* BCMDBG */
-/*
- * Private Common Bus Interface
- */
-
-/* IO Request Block (IRB) */
-typedef struct dbus_irb {
- struct dbus_irb *next; /* it's casted from dbus_irb_tx or dbus_irb_rx struct */
-} dbus_irb_t;
-
-typedef struct dbus_irb_rx {
- struct dbus_irb irb; /* Must be first */
- u8 *buf;
- int buf_len;
- int actual_len;
- void *pkt;
- void *info;
- void *arg;
-} dbus_irb_rx_t;
-
-typedef struct dbus_irb_tx {
- struct dbus_irb irb; /* Must be first */
- u8 *buf;
- int len;
- void *pkt;
- int retry_count;
- void *info;
- void *arg;
-} dbus_irb_tx_t;
-
-/* DBUS interface callbacks are different from user callbacks
- * so, internally, different info can be passed to upper layer
- */
-typedef struct dbus_intf_callbacks {
- void (*send_irb_timeout) (void *cbarg, dbus_irb_tx_t *txirb);
- void (*send_irb_complete) (void *cbarg, dbus_irb_tx_t *txirb,
- int status);
- void (*recv_irb_complete) (void *cbarg, dbus_irb_rx_t *rxirb,
- int status);
- void (*errhandler) (void *cbarg, int err);
- void (*ctl_complete) (void *cbarg, int type, int status);
- void (*state_change) (void *cbarg, int state);
- bool(*isr) (void *cbarg, bool *wantdpc);
- bool(*dpc) (void *cbarg, bool bounded);
- void (*watchdog) (void *cbarg);
- void *(*pktget) (void *cbarg, uint len, bool send);
- void (*pktfree) (void *cbarg, void *p, bool send);
- struct dbus_irb *(*getirb) (void *cbarg, bool send);
- void (*rxerr_indicate) (void *cbarg, bool on);
-} dbus_intf_callbacks_t;
-
-/*
- * Porting: To support new bus, port these functions below
- */
-
-/*
- * Bus specific Interface
- * Implemented by dbus_usb.c/dbus_sdio.c
- */
-extern int dbus_bus_register(int vid, int pid, probe_cb_t prcb,
- disconnect_cb_t discb, void *prarg,
- dbus_intf_t **intf, void *param1, void *param2);
-extern int dbus_bus_deregister(void);
-
-/*
- * Bus-specific and OS-specific Interface
- * Implemented by dbus_usb_[linux/ndis].c/dbus_sdio_[linux/ndis].c
- */
-extern int dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb,
- disconnect_cb_t discb, void *prarg,
- dbus_intf_t **intf, void *param1,
- void *param2);
-extern int dbus_bus_osl_deregister(void);
-
-/*
- * Bus-specific, OS-specific, HW-specific Interface
- * Mainly for SDIO Host HW controller
- */
-extern int dbus_bus_osl_hw_register(int vid, int pid, probe_cb_t prcb,
- disconnect_cb_t discb, void *prarg,
- dbus_intf_t **intf);
-extern int dbus_bus_osl_hw_deregister(void);
-
-#endif /* __DBUS_H__ */
diff --git a/drivers/staging/brcm80211/include/epivers.h b/drivers/staging/brcm80211/include/epivers.h
deleted file mode 100644
index 2e6b5190ad6c..000000000000
--- a/drivers/staging/brcm80211/include/epivers.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _epivers_h_
-#define _epivers_h_
-
-#define EPI_MAJOR_VERSION 5
-
-#define EPI_MINOR_VERSION 75
-
-#define EPI_RC_NUMBER 11
-
-#define EPI_INCREMENTAL_NUMBER 0
-
-#define EPI_BUILD_NUMBER 1
-
-#define EPI_VERSION { 5, 75, 11, 0 }
-
-#ifdef BCMSDIO
-/* EPI_VERSION_NUM must match FW version */
-#define EPI_VERSION_NUM 0x054b0c00
-#else
-#define EPI_VERSION_NUM 0x054b0b00
-#endif
-
-#define EPI_VERSION_DEV 5.75.11
-
-/* Driver Version String, ASCII, 32 chars max */
-#define EPI_VERSION_STR "5.75.11"
-
-#endif /* _epivers_h_ */
diff --git a/drivers/staging/brcm80211/include/hnddma.h b/drivers/staging/brcm80211/include/hnddma.h
index bee4c89be23d..4c5462baf11e 100644
--- a/drivers/staging/brcm80211/include/hnddma.h
+++ b/drivers/staging/brcm80211/include/hnddma.h
@@ -19,7 +19,7 @@
#ifndef _hnddma_pub_
#define _hnddma_pub_
-typedef const struct hnddma_pub hnddma_t;
+struct hnddma_pub;
#endif /* _hnddma_pub_ */
/* range param for dma_getnexttxp() and dma_txreclaim */
@@ -30,52 +30,54 @@ typedef enum txd_range {
} txd_range_t;
/* dma function type */
-typedef void (*di_detach_t) (hnddma_t *dmah);
-typedef bool(*di_txreset_t) (hnddma_t *dmah);
-typedef bool(*di_rxreset_t) (hnddma_t *dmah);
-typedef bool(*di_rxidle_t) (hnddma_t *dmah);
-typedef void (*di_txinit_t) (hnddma_t *dmah);
-typedef bool(*di_txenabled_t) (hnddma_t *dmah);
-typedef void (*di_rxinit_t) (hnddma_t *dmah);
-typedef void (*di_txsuspend_t) (hnddma_t *dmah);
-typedef void (*di_txresume_t) (hnddma_t *dmah);
-typedef bool(*di_txsuspended_t) (hnddma_t *dmah);
-typedef bool(*di_txsuspendedidle_t) (hnddma_t *dmah);
-typedef int (*di_txfast_t) (hnddma_t *dmah, void *p, bool commit);
-typedef int (*di_txunframed_t) (hnddma_t *dmah, void *p, uint len,
+typedef void (*di_detach_t) (struct hnddma_pub *dmah);
+typedef bool(*di_txreset_t) (struct hnddma_pub *dmah);
+typedef bool(*di_rxreset_t) (struct hnddma_pub *dmah);
+typedef bool(*di_rxidle_t) (struct hnddma_pub *dmah);
+typedef void (*di_txinit_t) (struct hnddma_pub *dmah);
+typedef bool(*di_txenabled_t) (struct hnddma_pub *dmah);
+typedef void (*di_rxinit_t) (struct hnddma_pub *dmah);
+typedef void (*di_txsuspend_t) (struct hnddma_pub *dmah);
+typedef void (*di_txresume_t) (struct hnddma_pub *dmah);
+typedef bool(*di_txsuspended_t) (struct hnddma_pub *dmah);
+typedef bool(*di_txsuspendedidle_t) (struct hnddma_pub *dmah);
+typedef int (*di_txfast_t) (struct hnddma_pub *dmah, struct sk_buff *p,
+ bool commit);
+typedef int (*di_txunframed_t) (struct hnddma_pub *dmah, void *p, uint len,
bool commit);
-typedef void *(*di_getpos_t) (hnddma_t *di, bool direction);
-typedef void (*di_fifoloopbackenable_t) (hnddma_t *dmah);
-typedef bool(*di_txstopped_t) (hnddma_t *dmah);
-typedef bool(*di_rxstopped_t) (hnddma_t *dmah);
-typedef bool(*di_rxenable_t) (hnddma_t *dmah);
-typedef bool(*di_rxenabled_t) (hnddma_t *dmah);
-typedef void *(*di_rx_t) (hnddma_t *dmah);
-typedef bool(*di_rxfill_t) (hnddma_t *dmah);
-typedef void (*di_txreclaim_t) (hnddma_t *dmah, txd_range_t range);
-typedef void (*di_rxreclaim_t) (hnddma_t *dmah);
-typedef unsigned long (*di_getvar_t) (hnddma_t *dmah, const char *name);
-typedef void *(*di_getnexttxp_t) (hnddma_t *dmah, txd_range_t range);
-typedef void *(*di_getnextrxp_t) (hnddma_t *dmah, bool forceall);
-typedef void *(*di_peeknexttxp_t) (hnddma_t *dmah);
-typedef void *(*di_peeknextrxp_t) (hnddma_t *dmah);
-typedef void (*di_rxparam_get_t) (hnddma_t *dmah, u16 *rxoffset,
+typedef void *(*di_getpos_t) (struct hnddma_pub *di, bool direction);
+typedef void (*di_fifoloopbackenable_t) (struct hnddma_pub *dmah);
+typedef bool(*di_txstopped_t) (struct hnddma_pub *dmah);
+typedef bool(*di_rxstopped_t) (struct hnddma_pub *dmah);
+typedef bool(*di_rxenable_t) (struct hnddma_pub *dmah);
+typedef bool(*di_rxenabled_t) (struct hnddma_pub *dmah);
+typedef void *(*di_rx_t) (struct hnddma_pub *dmah);
+typedef bool(*di_rxfill_t) (struct hnddma_pub *dmah);
+typedef void (*di_txreclaim_t) (struct hnddma_pub *dmah, txd_range_t range);
+typedef void (*di_rxreclaim_t) (struct hnddma_pub *dmah);
+typedef unsigned long (*di_getvar_t) (struct hnddma_pub *dmah,
+ const char *name);
+typedef void *(*di_getnexttxp_t) (struct hnddma_pub *dmah, txd_range_t range);
+typedef void *(*di_getnextrxp_t) (struct hnddma_pub *dmah, bool forceall);
+typedef void *(*di_peeknexttxp_t) (struct hnddma_pub *dmah);
+typedef void *(*di_peeknextrxp_t) (struct hnddma_pub *dmah);
+typedef void (*di_rxparam_get_t) (struct hnddma_pub *dmah, u16 *rxoffset,
u16 *rxbufsize);
-typedef void (*di_txblock_t) (hnddma_t *dmah);
-typedef void (*di_txunblock_t) (hnddma_t *dmah);
-typedef uint(*di_txactive_t) (hnddma_t *dmah);
-typedef void (*di_txrotate_t) (hnddma_t *dmah);
-typedef void (*di_counterreset_t) (hnddma_t *dmah);
-typedef uint(*di_ctrlflags_t) (hnddma_t *dmah, uint mask, uint flags);
-typedef char *(*di_dump_t) (hnddma_t *dmah, struct bcmstrbuf *b,
+typedef void (*di_txblock_t) (struct hnddma_pub *dmah);
+typedef void (*di_txunblock_t) (struct hnddma_pub *dmah);
+typedef uint(*di_txactive_t) (struct hnddma_pub *dmah);
+typedef void (*di_txrotate_t) (struct hnddma_pub *dmah);
+typedef void (*di_counterreset_t) (struct hnddma_pub *dmah);
+typedef uint(*di_ctrlflags_t) (struct hnddma_pub *dmah, uint mask, uint flags);
+typedef char *(*di_dump_t) (struct hnddma_pub *dmah, struct bcmstrbuf *b,
bool dumpring);
-typedef char *(*di_dumptx_t) (hnddma_t *dmah, struct bcmstrbuf *b,
+typedef char *(*di_dumptx_t) (struct hnddma_pub *dmah, struct bcmstrbuf *b,
bool dumpring);
-typedef char *(*di_dumprx_t) (hnddma_t *dmah, struct bcmstrbuf *b,
+typedef char *(*di_dumprx_t) (struct hnddma_pub *dmah, struct bcmstrbuf *b,
bool dumpring);
-typedef uint(*di_rxactive_t) (hnddma_t *dmah);
-typedef uint(*di_txpending_t) (hnddma_t *dmah);
-typedef uint(*di_txcommitted_t) (hnddma_t *dmah);
+typedef uint(*di_rxactive_t) (struct hnddma_pub *dmah);
+typedef uint(*di_txpending_t) (struct hnddma_pub *dmah);
+typedef uint(*di_txcommitted_t) (struct hnddma_pub *dmah);
/* dma opsvec */
typedef struct di_fcn_s {
@@ -141,7 +143,8 @@ struct hnddma_pub {
uint txnobuf; /* tx out of dma descriptors */
};
-extern hnddma_t *dma_attach(osl_t *osh, char *name, si_t *sih,
+extern struct hnddma_pub *dma_attach(struct osl_info *osh, char *name,
+ si_t *sih,
void *dmaregstx, void *dmaregsrx, uint ntxd,
uint nrxd, uint rxbufsize, int rxextheadroom,
uint nrxpost, uint rxoffset, uint *msg_level);
@@ -238,6 +241,6 @@ extern const di_fcn_t dma64proc;
extern uint dma_addrwidth(si_t *sih, void *dmaregs);
/* pio helpers */
-extern void dma_txpioloopback(osl_t *osh, dma32regs_t *);
+extern void dma_txpioloopback(struct osl_info *osh, dma32regs_t *);
#endif /* _hnddma_h_ */
diff --git a/drivers/staging/brcm80211/include/hndpmu.h b/drivers/staging/brcm80211/include/hndpmu.h
index bbcf0eecd212..a0110e4c9ac4 100644
--- a/drivers/staging/brcm80211/include/hndpmu.h
+++ b/drivers/staging/brcm80211/include/hndpmu.h
@@ -28,44 +28,44 @@
#define SET_LDO_VOLTAGE_LNLDO1 9
#define SET_LDO_VOLTAGE_LNLDO2_SEL 10
-extern void si_pmu_init(si_t *sih, osl_t *osh);
-extern void si_pmu_chip_init(si_t *sih, osl_t *osh);
-extern void si_pmu_pll_init(si_t *sih, osl_t *osh, u32 xtalfreq);
-extern void si_pmu_res_init(si_t *sih, osl_t *osh);
-extern void si_pmu_swreg_init(si_t *sih, osl_t *osh);
+extern void si_pmu_init(si_t *sih, struct osl_info *osh);
+extern void si_pmu_chip_init(si_t *sih, struct osl_info *osh);
+extern void si_pmu_pll_init(si_t *sih, struct osl_info *osh, u32 xtalfreq);
+extern void si_pmu_res_init(si_t *sih, struct osl_info *osh);
+extern void si_pmu_swreg_init(si_t *sih, struct osl_info *osh);
-extern u32 si_pmu_force_ilp(si_t *sih, osl_t *osh, bool force);
+extern u32 si_pmu_force_ilp(si_t *sih, struct osl_info *osh, bool force);
-extern u32 si_pmu_si_clock(si_t *sih, osl_t *osh);
-extern u32 si_pmu_cpu_clock(si_t *sih, osl_t *osh);
-extern u32 si_pmu_mem_clock(si_t *sih, osl_t *osh);
-extern u32 si_pmu_alp_clock(si_t *sih, osl_t *osh);
-extern u32 si_pmu_ilp_clock(si_t *sih, osl_t *osh);
+extern u32 si_pmu_si_clock(si_t *sih, struct osl_info *osh);
+extern u32 si_pmu_cpu_clock(si_t *sih, struct osl_info *osh);
+extern u32 si_pmu_mem_clock(si_t *sih, struct osl_info *osh);
+extern u32 si_pmu_alp_clock(si_t *sih, struct osl_info *osh);
+extern u32 si_pmu_ilp_clock(si_t *sih, struct osl_info *osh);
-extern void si_pmu_set_switcher_voltage(si_t *sih, osl_t *osh,
+extern void si_pmu_set_switcher_voltage(si_t *sih, struct osl_info *osh,
u8 bb_voltage, u8 rf_voltage);
-extern void si_pmu_set_ldo_voltage(si_t *sih, osl_t *osh, u8 ldo,
+extern void si_pmu_set_ldo_voltage(si_t *sih, struct osl_info *osh, u8 ldo,
u8 voltage);
-extern u16 si_pmu_fast_pwrup_delay(si_t *sih, osl_t *osh);
-extern void si_pmu_rcal(si_t *sih, osl_t *osh);
+extern u16 si_pmu_fast_pwrup_delay(si_t *sih, struct osl_info *osh);
+extern void si_pmu_rcal(si_t *sih, struct osl_info *osh);
extern void si_pmu_pllupd(si_t *sih);
-extern void si_pmu_spuravoid(si_t *sih, osl_t *osh, u8 spuravoid);
+extern void si_pmu_spuravoid(si_t *sih, struct osl_info *osh, u8 spuravoid);
-extern bool si_pmu_is_otp_powered(si_t *sih, osl_t *osh);
-extern u32 si_pmu_measure_alpclk(si_t *sih, osl_t *osh);
+extern bool si_pmu_is_otp_powered(si_t *sih, struct osl_info *osh);
+extern u32 si_pmu_measure_alpclk(si_t *sih, struct osl_info *osh);
extern u32 si_pmu_chipcontrol(si_t *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_regcontrol(si_t *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_pllcontrol(si_t *sih, uint reg, u32 mask, u32 val);
extern void si_pmu_pllupd(si_t *sih);
-extern void si_pmu_sprom_enable(si_t *sih, osl_t *osh, bool enable);
+extern void si_pmu_sprom_enable(si_t *sih, struct osl_info *osh, bool enable);
extern void si_pmu_radio_enable(si_t *sih, bool enable);
-extern u32 si_pmu_waitforclk_on_backplane(si_t *sih, osl_t *osh,
+extern u32 si_pmu_waitforclk_on_backplane(si_t *sih, struct osl_info *osh,
u32 clk, u32 delay);
-extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on);
-extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh,
+extern void si_pmu_otp_power(si_t *sih, struct osl_info *osh, bool on);
+extern void si_sdiod_drive_strength_init(si_t *sih, struct osl_info *osh,
u32 drivestrength);
#endif /* _hndpmu_h_ */
diff --git a/drivers/staging/brcm80211/include/linux_osl.h b/drivers/staging/brcm80211/include/linux_osl.h
deleted file mode 100644
index c9c860b6e474..000000000000
--- a/drivers/staging/brcm80211/include/linux_osl.h
+++ /dev/null
@@ -1,407 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _linux_osl_h_
-#define _linux_osl_h_
-
-
-/* Linux Kernel: File Operations: start */
-extern void *osl_os_open_image(char *filename);
-extern int osl_os_get_image_block(char *buf, int len, void *image);
-extern void osl_os_close_image(void *image);
-/* Linux Kernel: File Operations: end */
-
-extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
-extern void osl_detach(osl_t *osh);
-
-extern u32 g_assert_type;
-
-#if defined(BCMDBG_ASSERT)
-#define ASSERT(exp) \
- do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
-extern void osl_assert(char *exp, char *file, int line);
-#else
-#ifdef __GNUC__
-#define GCC_VERSION \
- (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
-#if GCC_VERSION > 30100
-#define ASSERT(exp) do {} while (0)
-#else
- /* ASSERT could cause segmentation fault on GCC3.1, use empty instead */
-#define ASSERT(exp)
-#endif /* GCC_VERSION > 30100 */
-#endif /* __GNUC__ */
-#endif /* defined(BCMDBG_ASSERT) */
-
-/* PCI configuration space access macros */
-#define OSL_PCI_READ_CONFIG(osh, offset, size) \
- osl_pci_read_config((osh), (offset), (size))
-#define OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \
- osl_pci_write_config((osh), (offset), (size), (val))
-extern u32 osl_pci_read_config(osl_t *osh, uint offset, uint size);
-extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val);
-
-/* PCI device bus # and slot # */
-#define OSL_PCI_BUS(osh) osl_pci_bus(osh)
-#define OSL_PCI_SLOT(osh) osl_pci_slot(osh)
-extern uint osl_pci_bus(osl_t *osh);
-extern uint osl_pci_slot(osl_t *osh);
-
-/* Pkttag flag should be part of public information */
-typedef struct {
- bool pkttag;
- uint pktalloced; /* Number of allocated packet buffers */
- bool mmbus; /* Bus supports memory-mapped register accesses */
- pktfree_cb_fn_t tx_fn; /* Callback function for PKTFREE */
- void *tx_ctx; /* Context to the callback function */
-#if defined(BCMSDIO) && !defined(BRCM_FULLMAC)
- osl_rreg_fn_t rreg_fn; /* Read Register function */
- osl_wreg_fn_t wreg_fn; /* Write Register function */
- void *reg_ctx; /* Context to the reg callback functions */
-#endif
-} osl_pubinfo_t;
-
-#define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \
- do { \
- ((osl_pubinfo_t *)osh)->tx_fn = _tx_fn; \
- ((osl_pubinfo_t *)osh)->tx_ctx = _tx_ctx; \
- } while (0)
-
-#if defined(BCMSDIO) && !defined(BRCM_FULLMAC)
-#define REGOPSSET(osh, rreg, wreg, ctx) \
- do { \
- ((osl_pubinfo_t *)osh)->rreg_fn = rreg; \
- ((osl_pubinfo_t *)osh)->wreg_fn = wreg; \
- ((osl_pubinfo_t *)osh)->reg_ctx = ctx; \
- } while (0)
-#endif
-
-#define BUS_SWAP32(v) (v)
-
-#define DMA_CONSISTENT_ALIGN osl_dma_consistent_align()
-extern uint osl_dma_consistent_align(void);
-extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, u16 align,
- uint *tot, unsigned long *pap);
-
-#ifdef BRCM_FULLMAC
-#define DMA_ALLOC_CONSISTENT(osh, size, pap, dmah, alignbits) \
- osl_dma_alloc_consistent((osh), (size), (0), (tot), (pap))
-#else
-#define DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \
- osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
-#endif /* BRCM_FULLMAC */
-
-#define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
- osl_dma_free_consistent((osh), (void *)(va), (size), (pa))
-extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, unsigned long pa);
-
-/* map/unmap direction */
-#define DMA_TX 1 /* TX direction for DMA */
-#define DMA_RX 2 /* RX direction for DMA */
-
-/* map/unmap shared (dma-able) memory */
-#define DMA_MAP(osh, va, size, direction, p, dmah) \
- osl_dma_map((osh), (va), (size), (direction))
-#define DMA_UNMAP(osh, pa, size, direction, p, dmah) \
- osl_dma_unmap((osh), (pa), (size), (direction))
-extern uint osl_dma_map(osl_t *osh, void *va, uint size, int direction);
-extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction);
-
-/* API for DMA addressing capability */
-#define OSL_DMADDRWIDTH(osh, addrwidth) do {} while (0)
-
-/* register access macros */
-#if defined(BCMSDIO)
-#ifdef BRCM_FULLMAC
-#include <bcmsdh.h>
-#endif
-#define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(NULL, (unsigned long)(r), sizeof(*(r)), (v)))
-#define OSL_READ_REG(osh, r) (bcmsdh_reg_read(NULL, (unsigned long)(r), sizeof(*(r))))
-#endif
-
-#if defined(BCMSDIO)
-#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t *)(osh))->mmbus) \
- mmap_op else bus_op
-#define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t *)(osh))->mmbus) ? \
- mmap_op : bus_op
-#else
-#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) mmap_op
-#define SELECT_BUS_READ(osh, mmap_op, bus_op) mmap_op
-#endif
-
-#define OSL_ERROR(bcmerror) osl_error(bcmerror)
-extern int osl_error(int bcmerror);
-
-/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
-#define PKTBUFSZ 2048 /* largest reasonable packet buffer, driver uses for ethernet MTU */
-
-#define OSL_SYSUPTIME() ((u32)jiffies * (1000 / HZ))
-#define printf(fmt, args...) printk(fmt , ## args)
-#ifdef BRCM_FULLMAC
-#include <linux/kernel.h> /* for vsn/printf's */
-#include <linux/string.h> /* for mem*, str* */
-#endif
-/* bcopy's: Linux kernel doesn't provide these (anymore) */
-#define bcopy(src, dst, len) memcpy((dst), (src), (len))
-#define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
-#define bzero(b, len) memset((b), '\0', (len))
-
-/* register access macros */
-#if defined(OSLREGOPS)
-#else
-#ifndef IL_BIGENDIAN
-#ifndef __mips__
-#define R_REG(osh, r) (\
- SELECT_BUS_READ(osh, sizeof(*(r)) == sizeof(u8) ? readb((volatile u8*)(r)) : \
- sizeof(*(r)) == sizeof(u16) ? readw((volatile u16*)(r)) : \
- readl((volatile u32*)(r)), OSL_READ_REG(osh, r)) \
-)
-#else /* __mips__ */
-#define R_REG(osh, r) (\
- SELECT_BUS_READ(osh, \
- ({ \
- __typeof(*(r)) __osl_v; \
- __asm__ __volatile__("sync"); \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- __osl_v = readb((volatile u8*)(r)); \
- break; \
- case sizeof(u16): \
- __osl_v = readw((volatile u16*)(r)); \
- break; \
- case sizeof(u32): \
- __osl_v = \
- readl((volatile u32*)(r)); \
- break; \
- } \
- __asm__ __volatile__("sync"); \
- __osl_v; \
- }), \
- ({ \
- __typeof(*(r)) __osl_v; \
- __asm__ __volatile__("sync"); \
- __osl_v = OSL_READ_REG(osh, r); \
- __asm__ __volatile__("sync"); \
- __osl_v; \
- })) \
-)
-#endif /* __mips__ */
-
-#define W_REG(osh, r, v) do { \
- SELECT_BUS_WRITE(osh, \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- writeb((u8)(v), (volatile u8*)(r)); break; \
- case sizeof(u16): \
- writew((u16)(v), (volatile u16*)(r)); break; \
- case sizeof(u32): \
- writel((u32)(v), (volatile u32*)(r)); break; \
- }, \
- (OSL_WRITE_REG(osh, r, v))); \
- } while (0)
-#else /* IL_BIGENDIAN */
-#define R_REG(osh, r) (\
- SELECT_BUS_READ(osh, \
- ({ \
- __typeof(*(r)) __osl_v; \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- __osl_v = \
- readb((volatile u8*)((r)^3)); \
- break; \
- case sizeof(u16): \
- __osl_v = \
- readw((volatile u16*)((r)^2)); \
- break; \
- case sizeof(u32): \
- __osl_v = readl((volatile u32*)(r)); \
- break; \
- } \
- __osl_v; \
- }), \
- OSL_READ_REG(osh, r)) \
-)
-#define W_REG(osh, r, v) do { \
- SELECT_BUS_WRITE(osh, \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- writeb((u8)(v), \
- (volatile u8*)((r)^3)); break; \
- case sizeof(u16): \
- writew((u16)(v), \
- (volatile u16*)((r)^2)); break; \
- case sizeof(u32): \
- writel((u32)(v), \
- (volatile u32*)(r)); break; \
- }, \
- (OSL_WRITE_REG(osh, r, v))); \
- } while (0)
-#endif /* IL_BIGENDIAN */
-
-#endif /* OSLREGOPS */
-
-#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
-#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
-
-/* bcopy, bcmp, and bzero functions */
-#define bcopy(src, dst, len) memcpy((dst), (src), (len))
-#define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
-#define bzero(b, len) memset((b), '\0', (len))
-
-/* uncached/cached virtual address */
-#ifdef __mips__
-#include <asm/addrspace.h>
-#define OSL_UNCACHED(va) ((void *)KSEG1ADDR((va)))
-#define OSL_CACHED(va) ((void *)KSEG0ADDR((va)))
-#else
-#define OSL_UNCACHED(va) ((void *)va)
-#define OSL_CACHED(va) ((void *)va)
-#endif /* mips */
-
-#if defined(mips)
-#define OSL_GETCYCLES(x) ((x) = read_c0_count() * 2)
-#elif defined(__i386__)
-#define OSL_GETCYCLES(x) rdtscl((x))
-#else
-#define OSL_GETCYCLES(x) ((x) = 0)
-#endif /* defined(mips) */
-
-/* dereference an address that may cause a bus exception */
-#ifdef mips
-#define BUSPROBE(val, addr) get_dbe((val), (addr))
-#include <asm/paccess.h>
-#else
-#define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; })
-#endif /* mips */
-
-/* map/unmap physical to virtual I/O */
-#if !defined(CONFIG_MMC_MSM7X00A)
-#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), (unsigned long)(size))
-#else
-#define REG_MAP(pa, size) (void *)(0)
-#endif /* !defined(CONFIG_MMC_MSM7X00A */
-#define REG_UNMAP(va) iounmap((va))
-
-#define R_SM(r) (*(r))
-#define W_SM(r, v) (*(r) = (v))
-#define BZERO_SM(r, len) memset((r), '\0', (len))
-
-#ifdef BRCM_FULLMAC
-#include <linuxver.h> /* use current 2.4.x calling conventions */
-#endif
-
-/* packet primitives */
-#define PKTGET(osh, len, send) osl_pktget((osh), (len))
-#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send))
-#define PKTDATA(skb) (((struct sk_buff *)(skb))->data)
-#define PKTLEN(skb) (((struct sk_buff *)(skb))->len)
-#define PKTHEADROOM(skb) (PKTDATA(skb)-(((struct sk_buff *)(skb))->head))
-#define PKTTAILROOM(skb) ((((struct sk_buff *)(skb))->end)-(((struct sk_buff *)(skb))->tail))
-#define PKTNEXT(skb) (((struct sk_buff *)(skb))->next)
-#define PKTSETNEXT(skb, x) \
- (((struct sk_buff *)(skb))->next = (struct sk_buff *)(x))
-#define PKTSETLEN(skb, len) __skb_trim((struct sk_buff *)(skb), (len))
-#define PKTPUSH(skb, bytes) skb_push((struct sk_buff *)(skb), (bytes))
-#define PKTPULL(skb, bytes) skb_pull((struct sk_buff *)(skb), (bytes))
-#define PKTTAG(skb) ((void *)(((struct sk_buff *)(skb))->cb))
-#define PKTALLOCED(osh) (((osl_pubinfo_t *)(osh))->pktalloced)
-#define PKTSETPOOL(osh, skb, x, y) do {} while (0)
-#define PKTPOOL(osh, skb) false
-extern void *osl_pktget(osl_t *osh, uint len);
-extern void osl_pktfree(osl_t *osh, void *skb, bool send);
-
-#ifdef BRCM_FULLMAC
-extern void *osl_pktget_static(osl_t *osh, uint len);
-extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
-
-static inline void *
-osl_pkt_frmnative(osl_pubinfo_t *osh, struct sk_buff *skb)
-{
- struct sk_buff *nskb;
-
- if (osh->pkttag)
- bzero((void *)skb->cb, OSL_PKTTAG_SZ);
-
- for (nskb = skb; nskb; nskb = nskb->next)
- osh->pktalloced++;
-
- return (void *)skb;
-}
-#define PKTFRMNATIVE(osh, skb) \
- osl_pkt_frmnative(((osl_pubinfo_t *)osh), (struct sk_buff*)(skb))
-
-static inline struct sk_buff *
-osl_pkt_tonative(osl_pubinfo_t *osh, void *pkt)
-{
- struct sk_buff *nskb;
-
- if (osh->pkttag)
- bzero(((struct sk_buff *)pkt)->cb, OSL_PKTTAG_SZ);
-
- for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next)
- osh->pktalloced--;
-
- return (struct sk_buff *)pkt;
-}
-#define PKTTONATIVE(osh, pkt) \
- osl_pkt_tonative((osl_pubinfo_t *)(osh), (pkt))
-#else /* !BRCM_FULLMAC */
-#define PKTUNALLOC(osh) (((osl_pubinfo_t *)(osh))->pktalloced--)
-
-#define PKTSETSKIPCT(osh, skb)
-#define PKTCLRSKIPCT(osh, skb)
-#define PKTSKIPCT(osh, skb)
-#endif /* BRCM_FULLMAC */
-
-#define PKTLINK(skb) (((struct sk_buff *)(skb))->prev)
-#define PKTSETLINK(skb, x) (((struct sk_buff *)(skb))->prev = (struct sk_buff*)(x))
-#define PKTPRIO(skb) (((struct sk_buff *)(skb))->priority)
-#define PKTSETPRIO(skb, x) (((struct sk_buff *)(skb))->priority = (x))
-#define PKTSUMNEEDED(skb) (((struct sk_buff *)(skb))->ip_summed == CHECKSUM_PARTIAL)
-#define PKTSETSUMGOOD(skb, x) (((struct sk_buff *)(skb))->ip_summed = \
- ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
-/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */
-#define PKTSHARED(skb) (((struct sk_buff *)(skb))->cloned)
-
-#if defined(BCMSDIO) && !defined(BRCM_FULLMAC)
-#define RPC_READ_REG(osh, r) (\
- sizeof(*(r)) == sizeof(u8) ? osl_readb((osh), (volatile u8*)(r)) : \
- sizeof(*(r)) == sizeof(u16) ? osl_readw((osh), (volatile u16*)(r)) : \
- osl_readl((osh), (volatile u32*)(r)) \
-)
-#define RPC_WRITE_REG(osh, r, v) do { \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- osl_writeb((osh), (volatile u8*)(r), (u8)(v)); \
- break; \
- case sizeof(u16): \
- osl_writew((osh), (volatile u16*)(r), (u16)(v)); \
- break; \
- case sizeof(u32): \
- osl_writel((osh), (volatile u32*)(r), (u32)(v)); \
- break; \
- } \
-} while (0)
-
-extern u8 osl_readb(osl_t *osh, volatile u8 *r);
-extern u16 osl_readw(osl_t *osh, volatile u16 *r);
-extern u32 osl_readl(osl_t *osh, volatile u32 *r);
-extern void osl_writeb(osl_t *osh, volatile u8 *r, u8 v);
-extern void osl_writew(osl_t *osh, volatile u16 *r, u16 v);
-extern void osl_writel(osl_t *osh, volatile u32 *r, u32 v);
-#endif /* BCMSDIO */
-
-#endif /* _linux_osl_h_ */
diff --git a/drivers/staging/brcm80211/include/linuxver.h b/drivers/staging/brcm80211/include/linuxver.h
deleted file mode 100644
index dc721413ee29..000000000000
--- a/drivers/staging/brcm80211/include/linuxver.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _linuxver_h_
-#define _linuxver_h_
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/workqueue.h>
-#include <linux/sched.h>
-#include <linux/ieee80211.h>
-#include <linux/time.h>
-#include <linux/wait.h>
-
-#undef IP_TOS
-#include <asm/io.h>
-
-#endif /* _linuxver_h_ */
diff --git a/drivers/staging/brcm80211/include/nicpci.h b/drivers/staging/brcm80211/include/nicpci.h
index ce146e88ffdf..928818daedd7 100644
--- a/drivers/staging/brcm80211/include/nicpci.h
+++ b/drivers/staging/brcm80211/include/nicpci.h
@@ -45,17 +45,17 @@
#else
struct sbpcieregs;
-extern u8 pcicore_find_pci_capability(osl_t *osh, u8 req_cap_id,
+extern u8 pcicore_find_pci_capability(struct osl_info *osh, u8 req_cap_id,
unsigned char *buf, u32 *buflen);
-extern uint pcie_readreg(osl_t *osh, struct sbpcieregs *pcieregs,
+extern uint pcie_readreg(struct osl_info *osh, struct sbpcieregs *pcieregs,
uint addrtype, uint offset);
-extern uint pcie_writereg(osl_t *osh, struct sbpcieregs *pcieregs,
+extern uint pcie_writereg(struct osl_info *osh, struct sbpcieregs *pcieregs,
uint addrtype, uint offset, uint val);
extern u8 pcie_clkreq(void *pch, u32 mask, u32 val);
extern u32 pcie_lcreg(void *pch, u32 mask, u32 val);
-extern void *pcicore_init(si_t *sih, osl_t *osh, void *regs);
+extern void *pcicore_init(si_t *sih, struct osl_info *osh, void *regs);
extern void pcicore_deinit(void *pch);
extern void pcicore_attach(void *pch, char *pvars, int state);
extern void pcicore_hwup(void *pch);
@@ -70,10 +70,10 @@ extern u32 pcicore_pcieserdesreg(void *pch, u32 mdioslave, u32 offset,
extern u32 pcicore_pciereg(void *pch, u32 offset, u32 mask,
u32 val, uint type);
-extern bool pcicore_pmecap_fast(osl_t *osh);
+extern bool pcicore_pmecap_fast(struct osl_info *osh);
extern void pcicore_pmeen(void *pch);
extern void pcicore_pmeclr(void *pch);
extern bool pcicore_pmestat(void *pch);
-#endif /* defined(BCMSDIO) || (defined(BCMBUSTYPE) && (BCMBUSTYPE == SI_BUS)) */
+#endif /* defined(BCMSDIO)||(defined(BCMBUSTYPE) && (BCMBUSTYPE==SI_BUS)) */
#endif /* _NICPCI_H */
diff --git a/drivers/staging/brcm80211/include/osl.h b/drivers/staging/brcm80211/include/osl.h
index c0ebb3d97220..b28235618d8b 100644
--- a/drivers/staging/brcm80211/include/osl.h
+++ b/drivers/staging/brcm80211/include/osl.h
@@ -18,42 +18,197 @@
#define _osl_h_
/* osl handle type forward declaration */
-typedef struct osl_info osl_t;
+struct osl_info {
+ uint pktalloced; /* Number of allocated packet buffers */
+ bool mmbus; /* Bus supports memory-mapped registers */
+ uint magic;
+ void *pdev;
+ uint bustype;
+};
+
typedef struct osl_dmainfo osldma_t;
-#define OSL_PKTTAG_SZ 32 /* Size of PktTag */
-/* Drivers use PKTFREESETCB to register a callback function when a packet is freed by OSL */
-typedef void (*pktfree_cb_fn_t) (void *ctx, void *pkt, unsigned int status);
+extern struct osl_info *osl_attach(void *pdev, uint bustype);
+extern void osl_detach(struct osl_info *osh);
-#ifdef BCMSDIO
-/* Drivers use REGOPSSET() to register register read/write funcitons */
-typedef unsigned int (*osl_rreg_fn_t) (void *ctx, void *reg, unsigned int size);
-typedef void (*osl_wreg_fn_t) (void *ctx, void *reg, unsigned int val,
- unsigned int size);
-#endif
+extern u32 g_assert_type;
-#include <linux_osl.h>
+#if defined(BCMDBG_ASSERT)
+#define ASSERT(exp) \
+ do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
+extern void osl_assert(char *exp, char *file, int line);
+#else
+#define ASSERT(exp) do {} while (0)
+#endif /* defined(BCMDBG_ASSERT) */
-/* --------------------------------------------------------------------------
-** Register manipulation macros.
-*/
+/* PCI device bus # and slot # */
+#define OSL_PCI_BUS(osh) osl_pci_bus(osh)
+#define OSL_PCI_SLOT(osh) osl_pci_slot(osh)
+extern uint osl_pci_bus(struct osl_info *osh);
+extern uint osl_pci_slot(struct osl_info *osh);
-#define SET_REG(osh, r, mask, val) W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val)))
+#define BUS_SWAP32(v) (v)
-#ifndef AND_REG
-#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
-#endif /* !AND_REG */
+extern void *osl_dma_alloc_consistent(struct osl_info *osh, uint size,
+ u16 align, uint *tot, unsigned long *pap);
+
+#ifdef BRCM_FULLMAC
+#define DMA_ALLOC_CONSISTENT(osh, size, pap, dmah, alignbits) \
+ osl_dma_alloc_consistent((osh), (size), (0), (tot), (pap))
+#else
+#define DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \
+ osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#endif /* BRCM_FULLMAC */
-#ifndef OR_REG
-#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
-#endif /* !OR_REG */
+#define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
+ osl_dma_free_consistent((osh), (void *)(va), (size), (pa))
+extern void osl_dma_free_consistent(struct osl_info *osh, void *va,
+ uint size, unsigned long pa);
+
+/* map/unmap direction */
+#define DMA_TX 1 /* TX direction for DMA */
+#define DMA_RX 2 /* RX direction for DMA */
+
+/* map/unmap shared (dma-able) memory */
+#define DMA_MAP(osh, va, size, direction, p, dmah) \
+ osl_dma_map((osh), (va), (size), (direction))
+#define DMA_UNMAP(osh, pa, size, direction, p, dmah) \
+ osl_dma_unmap((osh), (pa), (size), (direction))
+extern uint osl_dma_map(struct osl_info *osh, void *va, uint size,
+ int direction);
+extern void osl_dma_unmap(struct osl_info *osh, uint pa, uint size,
+ int direction);
+
+/* register access macros */
+#if defined(BCMSDIO)
+#ifdef BRCM_FULLMAC
+#include <bcmsdh.h>
+#endif
+#define OSL_WRITE_REG(osh, r, v) \
+ (bcmsdh_reg_write(NULL, (unsigned long)(r), sizeof(*(r)), (v)))
+#define OSL_READ_REG(osh, r) \
+ (bcmsdh_reg_read(NULL, (unsigned long)(r), sizeof(*(r))))
+#endif
-#if !defined(OSL_SYSUPTIME)
-#define OSL_SYSUPTIME() (0)
-#define OSL_SYSUPTIME_SUPPORT false
+#if defined(BCMSDIO)
+#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) \
+ if ((osh)->mmbus) \
+ mmap_op else bus_op
+#define SELECT_BUS_READ(osh, mmap_op, bus_op) \
+ ((osh)->mmbus) ? mmap_op : bus_op
#else
-#define OSL_SYSUPTIME_SUPPORT true
-#endif /* OSL_SYSUPTIME */
+#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) mmap_op
+#define SELECT_BUS_READ(osh, mmap_op, bus_op) mmap_op
+#endif
+
+/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
+#define PKTBUFSZ 2048
+
+#define OSL_SYSUPTIME() ((u32)jiffies * (1000 / HZ))
+#define printf(fmt, args...) printk(fmt , ## args)
+#ifdef BRCM_FULLMAC
+#include <linux/kernel.h> /* for vsn/printf's */
+#include <linux/string.h> /* for mem*, str* */
+#endif
+/* bcopy's: Linux kernel doesn't provide these (anymore) */
+#define bcopy(src, dst, len) memcpy((dst), (src), (len))
+
+/* register access macros */
+#ifndef IL_BIGENDIAN
+#ifndef __mips__
+#define R_REG(osh, r) (\
+ SELECT_BUS_READ(osh, sizeof(*(r)) == sizeof(u8) ? \
+ readb((volatile u8*)(r)) : \
+ sizeof(*(r)) == sizeof(u16) ? readw((volatile u16*)(r)) : \
+ readl((volatile u32*)(r)), OSL_READ_REG(osh, r)) \
+)
+#else /* __mips__ */
+#define R_REG(osh, r) (\
+ SELECT_BUS_READ(osh, \
+ ({ \
+ __typeof(*(r)) __osl_v; \
+ __asm__ __volatile__("sync"); \
+ switch (sizeof(*(r))) { \
+ case sizeof(u8): \
+ __osl_v = readb((volatile u8*)(r)); \
+ break; \
+ case sizeof(u16): \
+ __osl_v = readw((volatile u16*)(r)); \
+ break; \
+ case sizeof(u32): \
+ __osl_v = \
+ readl((volatile u32*)(r)); \
+ break; \
+ } \
+ __asm__ __volatile__("sync"); \
+ __osl_v; \
+ }), \
+ ({ \
+ __typeof(*(r)) __osl_v; \
+ __asm__ __volatile__("sync"); \
+ __osl_v = OSL_READ_REG(osh, r); \
+ __asm__ __volatile__("sync"); \
+ __osl_v; \
+ })) \
+)
+#endif /* __mips__ */
+
+#define W_REG(osh, r, v) do { \
+ SELECT_BUS_WRITE(osh, \
+ switch (sizeof(*(r))) { \
+ case sizeof(u8): \
+ writeb((u8)(v), (volatile u8*)(r)); break; \
+ case sizeof(u16): \
+ writew((u16)(v), (volatile u16*)(r)); break; \
+ case sizeof(u32): \
+ writel((u32)(v), (volatile u32*)(r)); break; \
+ }, \
+ (OSL_WRITE_REG(osh, r, v))); \
+ } while (0)
+#else /* IL_BIGENDIAN */
+#define R_REG(osh, r) (\
+ SELECT_BUS_READ(osh, \
+ ({ \
+ __typeof(*(r)) __osl_v; \
+ switch (sizeof(*(r))) { \
+ case sizeof(u8): \
+ __osl_v = \
+ readb((volatile u8*)((r)^3)); \
+ break; \
+ case sizeof(u16): \
+ __osl_v = \
+ readw((volatile u16*)((r)^2)); \
+ break; \
+ case sizeof(u32): \
+ __osl_v = readl((volatile u32*)(r)); \
+ break; \
+ } \
+ __osl_v; \
+ }), \
+ OSL_READ_REG(osh, r)) \
+)
+#define W_REG(osh, r, v) do { \
+ SELECT_BUS_WRITE(osh, \
+ switch (sizeof(*(r))) { \
+ case sizeof(u8): \
+ writeb((u8)(v), \
+ (volatile u8*)((r)^3)); break; \
+ case sizeof(u16): \
+ writew((u16)(v), \
+ (volatile u16*)((r)^2)); break; \
+ case sizeof(u32): \
+ writel((u32)(v), \
+ (volatile u32*)(r)); break; \
+ }, \
+ (OSL_WRITE_REG(osh, r, v))); \
+ } while (0)
+#endif /* IL_BIGENDIAN */
+
+#define bcopy(src, dst, len) memcpy((dst), (src), (len))
+
+/* packet primitives */
+extern struct sk_buff *pkt_buf_get_skb(struct osl_info *osh, uint len);
+extern void pkt_buf_free_skb(struct osl_info *osh, struct sk_buff *skb, bool send);
-#endif /* _osl_h_ */
+#endif /* _osl_h_ */
diff --git a/drivers/staging/brcm80211/include/proto/ethernet.h b/drivers/staging/brcm80211/include/proto/ethernet.h
index cc17b428dd3f..567407de020e 100644
--- a/drivers/staging/brcm80211/include/proto/ethernet.h
+++ b/drivers/staging/brcm80211/include/proto/ethernet.h
@@ -17,28 +17,22 @@
#ifndef _NET_ETHERNET_H_
#define _NET_ETHERNET_H_
+#include <linux/if_ether.h>
+
#include <packed_section_start.h>
-#define ETHER_ADDR_LEN 6
#define ETHER_TYPE_LEN 2
#define ETHER_CRC_LEN 4
-#define ETHER_HDR_LEN (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN)
#define ETHER_MIN_LEN 64
#define ETHER_MIN_DATA 46
#define ETHER_MAX_LEN 1518
#define ETHER_MAX_DATA 1500
-#define ETHER_TYPE_MIN 0x0600
-#define ETHER_TYPE_IP 0x0800
-#define ETHER_TYPE_ARP 0x0806
-#define ETHER_TYPE_8021Q 0x8100
#define ETHER_TYPE_BRCM 0x886c
-#define ETHER_TYPE_802_1X 0x888e
-#define ETHER_TYPE_802_1X_PREAUTH 0x88c7
-#define ETHER_DEST_OFFSET (0 * ETHER_ADDR_LEN)
-#define ETHER_SRC_OFFSET (1 * ETHER_ADDR_LEN)
-#define ETHER_TYPE_OFFSET (2 * ETHER_ADDR_LEN)
+#define ETHER_DEST_OFFSET (0 * ETH_ALEN)
+#define ETHER_SRC_OFFSET (1 * ETH_ALEN)
+#define ETHER_TYPE_OFFSET (2 * ETH_ALEN)
#define ETHER_IS_VALID_LEN(foo) \
((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
@@ -53,50 +47,18 @@
}
BWL_PRE_PACKED_STRUCT struct ether_header {
- u8 ether_dhost[ETHER_ADDR_LEN];
- u8 ether_shost[ETHER_ADDR_LEN];
+ u8 ether_dhost[ETH_ALEN];
+ u8 ether_shost[ETH_ALEN];
u16 ether_type;
} BWL_POST_PACKED_STRUCT;
BWL_PRE_PACKED_STRUCT struct ether_addr {
- u8 octet[ETHER_ADDR_LEN];
+ u8 octet[ETH_ALEN];
} BWL_POST_PACKED_STRUCT;
-#define ETHER_SET_LOCALADDR(ea) (((u8 *)(ea))[0] = (((u8 *)(ea))[0] | 2))
-#define ETHER_IS_LOCALADDR(ea) (((u8 *)(ea))[0] & 2)
-#define ETHER_CLR_LOCALADDR(ea) (((u8 *)(ea))[0] = \
- (((u8 *)(ea))[0] & 0xd))
-#define ETHER_TOGGLE_LOCALADDR(ea) (((u8 *)(ea))[0] = \
- (((u8 *)(ea))[0] ^ 2))
-
#define ETHER_SET_UNICAST(ea) (((u8 *)(ea))[0] = (((u8 *)(ea))[0] & ~1))
-#define ETHER_ISMULTI(ea) (((const u8 *)(ea))[0] & 1)
-
-#define ether_cmp(a, b) (!(((short *)a)[0] == ((short *)b)[0]) | \
- !(((short *)a)[1] == ((short *)b)[1]) | \
- !(((short *)a)[2] == ((short *)b)[2]))
-
-#define ether_copy(s, d) { \
- ((short *)d)[0] = ((short *)s)[0]; \
- ((short *)d)[1] = ((short *)s)[1]; \
- ((short *)d)[2] = ((short *)s)[2]; }
-
static const struct ether_addr ether_bcast = { {255, 255, 255, 255, 255, 255} };
-static const struct ether_addr ether_null = { {0, 0, 0, 0, 0, 0} };
-
-#define ETHER_ISBCAST(ea) ((((u8 *)(ea))[0] & \
- ((u8 *)(ea))[1] & \
- ((u8 *)(ea))[2] & \
- ((u8 *)(ea))[3] & \
- ((u8 *)(ea))[4] & \
- ((u8 *)(ea))[5]) == 0xff)
-#define ETHER_ISNULLADDR(ea) ((((u8 *)(ea))[0] | \
- ((u8 *)(ea))[1] | \
- ((u8 *)(ea))[2] | \
- ((u8 *)(ea))[3] | \
- ((u8 *)(ea))[4] | \
- ((u8 *)(ea))[5]) == 0)
#define ETHER_MOVE_HDR(d, s) \
do { \
diff --git a/drivers/staging/brcm80211/include/proto/wpa.h b/drivers/staging/brcm80211/include/proto/wpa.h
index ec84c9f2b5ee..10c2fb62df09 100644
--- a/drivers/staging/brcm80211/include/proto/wpa.h
+++ b/drivers/staging/brcm80211/include/proto/wpa.h
@@ -19,95 +19,7 @@
#include <proto/ethernet.h>
-#include <packed_section_start.h>
-
-#define DOT11_RC_INVALID_WPA_IE 13
-#define DOT11_RC_MIC_FAILURE 14
-#define DOT11_RC_4WH_TIMEOUT 15
-#define DOT11_RC_GTK_UPDATE_TIMEOUT 16
-#define DOT11_RC_WPA_IE_MISMATCH 17
-#define DOT11_RC_INVALID_MC_CIPHER 18
-#define DOT11_RC_INVALID_UC_CIPHER 19
-#define DOT11_RC_INVALID_AKMP 20
-#define DOT11_RC_BAD_WPA_VERSION 21
-#define DOT11_RC_INVALID_WPA_CAP 22
-#define DOT11_RC_8021X_AUTH_FAIL 23
-
#define WPA2_PMKID_LEN 16
-
-typedef BWL_PRE_PACKED_STRUCT struct {
- u8 tag;
- u8 length;
- u8 oui[3];
- u8 oui_type;
- BWL_PRE_PACKED_STRUCT struct {
- u8 low;
- u8 high;
- } BWL_POST_PACKED_STRUCT version;
-} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t;
-#define WPA_IE_OUITYPE_LEN 4
-#define WPA_IE_FIXED_LEN 8
-#define WPA_IE_TAG_FIXED_LEN 6
-
-typedef BWL_PRE_PACKED_STRUCT struct {
- u8 tag;
- u8 length;
- BWL_PRE_PACKED_STRUCT struct {
- u8 low;
- u8 high;
- } BWL_POST_PACKED_STRUCT version;
-} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t;
-#define WPA_RSN_IE_FIXED_LEN 4
-#define WPA_RSN_IE_TAG_FIXED_LEN 2
-typedef u8 wpa_pmkid_t[WPA2_PMKID_LEN];
-
-typedef BWL_PRE_PACKED_STRUCT struct {
- u8 oui[3];
- u8 type;
-} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t;
-#define WPA_SUITE_LEN 4
-
-typedef BWL_PRE_PACKED_STRUCT struct {
- BWL_PRE_PACKED_STRUCT struct {
- u8 low;
- u8 high;
- } BWL_POST_PACKED_STRUCT count;
- wpa_suite_t list[1];
-} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t;
-#define WPA_IE_SUITE_COUNT_LEN 2
-typedef BWL_PRE_PACKED_STRUCT struct {
- BWL_PRE_PACKED_STRUCT struct {
- u8 low;
- u8 high;
- } BWL_POST_PACKED_STRUCT count;
- wpa_pmkid_t list[1];
-} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t;
-
-#define WPA_CIPHER_NONE 0
-#define WPA_CIPHER_WEP_40 1
-#define WPA_CIPHER_TKIP 2
-#define WPA_CIPHER_AES_OCB 3
-#define WPA_CIPHER_AES_CCM 4
-#define WPA_CIPHER_WEP_104 5
-
-#define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \
- (cipher) == WPA_CIPHER_WEP_40 || \
- (cipher) == WPA_CIPHER_WEP_104 || \
- (cipher) == WPA_CIPHER_TKIP || \
- (cipher) == WPA_CIPHER_AES_OCB || \
- (cipher) == WPA_CIPHER_AES_CCM)
-
-#define WPA_TKIP_CM_DETECT 60
-#define WPA_TKIP_CM_BLOCK 60
-
-#define RSN_CAP_LEN 2
-
-#define RSN_CAP_PREAUTH 0x0001
-#define RSN_CAP_NOPAIRWISE 0x0002
-#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C
-#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT 2
-#define RSN_CAP_GTK_REPLAY_CNTR_MASK 0x0030
-#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT 4
#define RSN_CAP_1_REPLAY_CNTR 0
#define RSN_CAP_2_REPLAY_CNTRS 1
#define RSN_CAP_4_REPLAY_CNTRS 2
@@ -118,10 +30,4 @@ typedef BWL_PRE_PACKED_STRUCT struct {
#define WPA_CAP_REPLAY_CNTR_SHIFT RSN_CAP_PTK_REPLAY_CNTR_SHIFT
#define WPA_CAP_REPLAY_CNTR_MASK RSN_CAP_PTK_REPLAY_CNTR_MASK
-#define WPA_CAP_LEN RSN_CAP_LEN
-
-#define WPA_CAP_WPA2_PREAUTH RSN_CAP_PREAUTH
-
-#include <packed_section_end.h>
-
#endif /* _proto_wpa_h_ */
diff --git a/drivers/staging/brcm80211/include/rpc_osl.h b/drivers/staging/brcm80211/include/rpc_osl.h
index 4a2648001bf0..c59d9ed1397a 100644
--- a/drivers/staging/brcm80211/include/rpc_osl.h
+++ b/drivers/staging/brcm80211/include/rpc_osl.h
@@ -18,7 +18,7 @@
#define _rpcosl_h_
typedef struct rpc_osl rpc_osl_t;
-extern rpc_osl_t *rpc_osl_attach(osl_t *osh);
+extern rpc_osl_t *rpc_osl_attach(struct osl_info *osh);
extern void rpc_osl_detach(rpc_osl_t *rpc_osh);
#define RPC_OSL_LOCK(rpc_osh) rpc_osl_lock((rpc_osh))
diff --git a/drivers/staging/brcm80211/include/siutils.h b/drivers/staging/brcm80211/include/siutils.h
index 57c36507a040..a935092d02df 100644
--- a/drivers/staging/brcm80211/include/siutils.h
+++ b/drivers/staging/brcm80211/include/siutils.h
@@ -19,9 +19,6 @@
#include <hndsoc.h>
-#if !defined(WLC_LOW)
-#include "bcm_rpc.h"
-#endif
/*
* Data structure to export all chip specific common variables
* public (read-only) portion of siutils handle returned by si_attach()
@@ -50,19 +47,12 @@ struct si_pub {
uint socirev; /* SOC interconnect rev */
bool pci_pr32414;
-#if !defined(WLC_LOW)
- rpc_info_t *rpc;
-#endif
};
/* for HIGH_ONLY driver, the si_t must be writable to allow states sync from BMAC to HIGH driver
* for monolithic driver, it is readonly to prevent accident change
*/
-#if !defined(WLC_LOW)
-typedef struct si_pub si_t;
-#else
typedef const struct si_pub si_t;
-#endif
/*
* Many of the routines below take an 'sih' handle as their first arg.
@@ -128,8 +118,8 @@ typedef void (*gpio_handler_t) (u32 stat, void *arg);
#define GPIO_CTRL_EPA_EN_MASK 0x40
/* === exported functions === */
-extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
- void *sdh, char **vars, uint *varsz);
+extern si_t *si_attach(uint pcidev, struct osl_info *osh, void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz);
extern void si_detach(si_t *sih);
extern bool si_pci_war16165(si_t *sih);
@@ -138,7 +128,7 @@ extern uint si_coreid(si_t *sih);
extern uint si_flag(si_t *sih);
extern uint si_coreidx(si_t *sih);
extern uint si_corerev(si_t *sih);
-extern void *si_osh(si_t *sih);
+struct osl_info *si_osh(si_t *sih);
extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask,
uint val);
extern void si_write_wrapperreg(si_t *sih, u32 offset, u32 val);
@@ -227,7 +217,7 @@ typedef struct gpioh_item {
/* misc si info needed by some of the routines */
typedef struct si_info {
struct si_pub pub; /* back plane public state (must be first field) */
- void *osh; /* osl os handle */
+ struct osl_info *osh; /* osl os handle */
void *sdh; /* bcmsdh handle */
uint dev_coreid; /* the core provides driver functions */
void *intr_arg; /* interrupt callback function arg */
@@ -305,9 +295,9 @@ typedef struct si_info {
#define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */
#define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */
-#define PCI(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \
+#define PCI(si) (((si)->pub.bustype == PCI_BUS) && \
((si)->pub.buscoretype == PCI_CORE_ID))
-#define PCIE(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \
+#define PCIE(si) (((si)->pub.bustype == PCI_BUS) && \
((si)->pub.buscoretype == PCIE_CORE_ID))
#define PCI_FORCEHT(si) \
(PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))
@@ -344,9 +334,9 @@ extern void si_epa_4313war(si_t *sih);
char *si_getnvramflvar(si_t *sih, const char *name);
/* AMBA Interconnect exported externs */
-extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
- void *sdh, char **vars, uint *varsz);
-extern si_t *ai_kattach(osl_t *osh);
+extern si_t *ai_attach(uint pcidev, struct osl_info *osh, void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz);
+extern si_t *ai_kattach(struct osl_info *osh);
extern void ai_scan(si_t *sih, void *regs, uint devid);
extern uint ai_flag(si_t *sih);
diff --git a/drivers/staging/brcm80211/include/wlioctl.h b/drivers/staging/brcm80211/include/wlioctl.h
index 96866fb8898c..9be793c5f10c 100644
--- a/drivers/staging/brcm80211/include/wlioctl.h
+++ b/drivers/staging/brcm80211/include/wlioctl.h
@@ -33,82 +33,9 @@
#define BWL_DEFAULT_PACKING
#include <packed_section_start.h>
-/* Legacy structure to help keep backward compatible wl tool and tray app */
-
-#define LEGACY_WL_BSS_INFO_VERSION 107 /* older version of wl_bss_info struct */
-
-typedef struct wl_bss_info_107 {
- u32 version; /* version field */
- u32 length; /* byte length of data in this record,
- * starting at version and including IEs
- */
- struct ether_addr BSSID;
- u16 beacon_period; /* units are Kusec */
- u16 capability; /* Capability information */
- u8 SSID_len;
- u8 SSID[32];
- struct {
- uint count; /* # rates in this set */
- u8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
- } rateset; /* supported rates */
- u8 channel; /* Channel no. */
- u16 atim_window; /* units are Kusec */
- u8 dtim_period; /* DTIM period */
- s16 RSSI; /* receive signal strength (in dBm) */
- s8 phy_noise; /* noise (in dBm) */
- u32 ie_length; /* byte length of Information Elements */
- /* variable length Information Elements */
-} wl_bss_info_107_t;
-
-/*
- * Per-BSS information structure.
- */
-
-#define LEGACY2_WL_BSS_INFO_VERSION 108 /* old version of wl_bss_info struct */
-
-/* BSS info structure
- * Applications MUST CHECK ie_offset field and length field to access IEs and
- * next bss_info structure in a vector (in wl_scan_results_t)
- */
-typedef struct wl_bss_info_108 {
- u32 version; /* version field */
- u32 length; /* byte length of data in this record,
- * starting at version and including IEs
- */
- struct ether_addr BSSID;
- u16 beacon_period; /* units are Kusec */
- u16 capability; /* Capability information */
- u8 SSID_len;
- u8 SSID[32];
- struct {
- uint count; /* # rates in this set */
- u8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
- } rateset; /* supported rates */
- chanspec_t chanspec; /* chanspec for bss */
- u16 atim_window; /* units are Kusec */
- u8 dtim_period; /* DTIM period */
- s16 RSSI; /* receive signal strength (in dBm) */
- s8 phy_noise; /* noise (in dBm) */
-
- u8 n_cap; /* BSS is 802.11N Capable */
- u32 nbss_cap; /* 802.11N BSS Capabilities (based on HT_CAP_*) */
- u8 ctl_ch; /* 802.11N BSS control channel number */
- u32 reserved32[1]; /* Reserved for expansion of BSS properties */
- u8 flags; /* flags */
- u8 reserved[3]; /* Reserved for expansion of BSS properties */
- u8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */
-
- u16 ie_offset; /* offset at which IEs start, from beginning */
- u32 ie_length; /* byte length of Information Elements */
- /* Add new fields here */
- /* variable length Information Elements */
-} wl_bss_info_108_t;
-
#ifdef BRCM_FULLMAC
+
#define WL_BSS_INFO_VERSION 108 /* current ver of wl_bss_info struct */
-#else
-#define WL_BSS_INFO_VERSION 109 /* current ver of wl_bss_info struct */
-#endif
/* BSS info structure
* Applications MUST CHECK ie_offset field and length field to access IEs and
@@ -148,12 +75,14 @@ typedef struct wl_bss_info {
/* Add new fields here */
/* variable length Information Elements */
} wl_bss_info_t;
+#endif /* BRCM_FULLMAC */
typedef struct wlc_ssid {
u32 SSID_len;
unsigned char SSID[32];
} wlc_ssid_t;
+#ifdef BRCM_FULLMAC
typedef struct chan_scandata {
u8 txpower;
u8 pad;
@@ -308,6 +237,7 @@ typedef struct wl_probe_params {
struct ether_addr bssid;
struct ether_addr mac;
} wl_probe_params_t;
+#endif /* BRCM_FULLMAC */
#define WL_NUMRATES 16 /* max # of rates in a rateset */
typedef struct wl_rateset {
@@ -315,6 +245,7 @@ typedef struct wl_rateset {
u8 rates[WL_NUMRATES]; /* rates in 500kbps units w/hi bit set if basic */
} wl_rateset_t;
+#ifdef BRCM_FULLMAC
typedef struct wl_rateset_args {
u32 count; /* # rates in this set */
u8 rates[WL_NUMRATES]; /* rates in 500kbps units w/hi bit set if basic */
@@ -352,6 +283,8 @@ typedef struct wl_join_params {
} wl_join_params_t;
#define WL_JOIN_PARAMS_FIXED_SIZE (sizeof(wl_join_params_t) - sizeof(chanspec_t))
+#endif /* BRCM_FULLMAC */
+
/* defines used by the nrate iovar */
#define NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
#define NRATE_RATE_MASK 0x0000007f /* rate/mcs value */
@@ -391,6 +324,7 @@ typedef struct {
#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */
+#ifdef BRCM_FULLMAC
#define MAX_CCA_CHANNELS 38 /* Max number of 20 Mhz wide channels */
#define MAX_CCA_SECS 60 /* CCA keeps this many seconds history */
@@ -428,8 +362,11 @@ typedef struct {
cca_congest_t secs[1]; /* Data */
} cca_congest_channel_req_t;
+#endif /* BRCM_FULLMAC */
+
#define WLC_CNTRY_BUF_SZ 4 /* Country string is 3 bytes + NUL */
+#ifdef BRCM_FULLMAC
typedef struct wl_country {
char country_abbrev[WLC_CNTRY_BUF_SZ]; /* nul-terminated country code used in
* the Country IE
@@ -516,6 +453,7 @@ typedef struct wl_rm_rep {
wl_rm_rep_elt_t rep[1]; /* variable length block of reports */
} wl_rm_rep_t;
#define WL_RM_REP_FIXED_LEN 8
+#endif /* BRCM_FULLMAC */
/* Enumerate crypto algorithms */
#define CRYPTO_ALGO_OFF 0
@@ -621,28 +559,6 @@ typedef struct wl_led_info {
u8 activehi;
} wl_led_info_t;
-/* flags */
-#define WLC_ASSOC_REQ_IS_REASSOC 0x01 /* assoc req was actually a reassoc */
-
-/* srom read/write struct passed through ioctl */
-typedef struct {
- uint byteoff; /* byte offset */
- uint nbytes; /* number of bytes */
- u16 buf[1];
-} srom_rw_t;
-
-/* similar cis (srom or otp) struct [iovar: may not be aligned] */
-typedef struct {
- u32 source; /* cis source */
- u32 byteoff; /* byte offset */
- u32 nbytes; /* number of bytes */
- /* data follows here */
-} cis_rw_t;
-
-#define WLC_CIS_DEFAULT 0 /* built-in default */
-#define WLC_CIS_SROM 1 /* source is sprom */
-#define WLC_CIS_OTP 2 /* source is otp */
-
/* R_REG and W_REG struct passed through ioctl */
typedef struct {
u32 byteoff; /* byte offset of the field in d11regs_t */
@@ -651,102 +567,14 @@ typedef struct {
uint band; /* band (optional) */
} rw_reg_t;
-/* Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band */
-/* PCL - Power Control Loop */
-/* current gain setting is replaced by user input */
-#define WL_ATTEN_APP_INPUT_PCL_OFF 0 /* turn off PCL, apply supplied input */
-#define WL_ATTEN_PCL_ON 1 /* turn on PCL */
-/* current gain setting is maintained */
-#define WL_ATTEN_PCL_OFF 2 /* turn off PCL. */
-
-typedef struct {
- u16 auto_ctrl; /* WL_ATTEN_XX */
- u16 bb; /* Baseband attenuation */
- u16 radio; /* Radio attenuation */
- u16 txctl1; /* Radio TX_CTL1 value */
-} atten_t;
-
-/* Per-AC retry parameters */
-struct wme_tx_params_s {
- u8 short_retry;
- u8 short_fallback;
- u8 long_retry;
- u8 long_fallback;
- u16 max_rate; /* In units of 512 Kbps */
-};
-
-typedef struct wme_tx_params_s wme_tx_params_t;
-
-#define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT)
-
-/* defines used by poweridx iovar - it controls power in a-band */
-/* current gain setting is maintained */
-#define WL_PWRIDX_PCL_OFF -2 /* turn off PCL. */
-#define WL_PWRIDX_PCL_ON -1 /* turn on PCL */
-#define WL_PWRIDX_LOWER_LIMIT -2 /* lower limit */
-#define WL_PWRIDX_UPPER_LIMIT 63 /* upper limit */
-/* value >= 0 causes
- * - input to be set to that value
- * - PCL to be off
- */
-
-/* Used to get specific link/ac parameters */
-typedef struct {
- int ac;
- u8 val;
- struct ether_addr ea;
-} link_val_t;
-
-#define BCM_MAC_STATUS_INDICATION (0x40010200L)
-
-typedef struct {
- u16 ver; /* version of this struct */
- u16 len; /* length in bytes of this structure */
- u16 cap; /* sta's advertised capabilities */
- u32 flags; /* flags defined below */
- u32 idle; /* time since data pkt rx'd from sta */
- struct ether_addr ea; /* Station address */
- wl_rateset_t rateset; /* rateset in use */
- u32 in; /* seconds elapsed since associated */
- u32 listen_interval_inms; /* Min Listen interval in ms for this STA */
- u32 tx_pkts; /* # of packets transmitted */
- u32 tx_failures; /* # of packets failed */
- u32 rx_ucast_pkts; /* # of unicast packets received */
- u32 rx_mcast_pkts; /* # of multicast packets received */
- u32 tx_rate; /* Rate of last successful tx frame */
- u32 rx_rate; /* Rate of last successful rx frame */
- u32 rx_decrypt_succeeds; /* # of packet decrypted successfully */
- u32 rx_decrypt_failures; /* # of packet decrypted unsuccessfully */
-} sta_info_t;
-
-#define WL_OLD_STAINFO_SIZE offsetof(sta_info_t, tx_pkts)
-
-#define WL_STA_VER 3
-
-/* Flags for sta_info_t indicating properties of STA */
-#define WL_STA_BRCM 0x1 /* Running a Broadcom driver */
-#define WL_STA_WME 0x2 /* WMM association */
-#define WL_STA_ABCAP 0x4
-#define WL_STA_AUTHE 0x8 /* Authenticated */
-#define WL_STA_ASSOC 0x10 /* Associated */
-#define WL_STA_AUTHO 0x20 /* Authorized */
-#define WL_STA_WDS 0x40 /* Wireless Distribution System */
-#define WL_STA_WDS_LINKUP 0x80 /* WDS traffic/probes flowing properly */
-#define WL_STA_PS 0x100 /* STA is in power save mode from AP's viewpoint */
-#define WL_STA_APSD_BE 0x200 /* APSD delv/trigger for AC_BE is default enabled */
-#define WL_STA_APSD_BK 0x400 /* APSD delv/trigger for AC_BK is default enabled */
-#define WL_STA_APSD_VI 0x800 /* APSD delv/trigger for AC_VI is default enabled */
-#define WL_STA_APSD_VO 0x1000 /* APSD delv/trigger for AC_VO is default enabled */
-#define WL_STA_N_CAP 0x2000 /* STA 802.11n capable */
-#define WL_STA_SCBSTATS 0x4000 /* Per STA debug stats */
-
-#define WL_WDS_LINKUP WL_STA_WDS_LINKUP /* deprecated */
+#ifdef BRCM_FULLMAC
/* Used to get specific STA parameters */
typedef struct {
u32 val;
struct ether_addr ea;
} scb_val_t;
+#endif /* BRCM_FULLMAC */
/* channel encoding */
typedef struct channel_info {
@@ -770,6 +598,7 @@ typedef struct get_pktcnt {
uint rx_ocast_good_pkt; /* unicast packets destined for others */
} get_pktcnt_t;
+#ifdef BRCM_FULLMAC
/* Linux network driver ioctl encoding */
typedef struct wl_ioctl {
uint cmd; /* common ioctl definition */
@@ -779,11 +608,8 @@ typedef struct wl_ioctl {
uint used; /* bytes read or written (optional) */
uint needed; /* bytes needed (optional) */
} wl_ioctl_t;
+#endif /* BRCM_FULLMAC */
-/* reference to wl_ioctl_t struct used by usermode driver */
-#define ioctl_subtype set /* subtype param */
-#define ioctl_pid used /* pid param */
-#define ioctl_status needed /* status param */
/*
* Structure for passing hardware and software
@@ -810,45 +636,11 @@ typedef struct wlc_rev_info {
#define WL_REV_INFO_LEGACY_LENGTH 48
-#define WL_BRAND_MAX 10
-typedef struct wl_instance_info {
- uint instance;
- char brand[WL_BRAND_MAX];
-} wl_instance_info_t;
-
-/* structure to change size of tx fifo */
-typedef struct wl_txfifo_sz {
- u16 magic;
- u16 fifo;
- u16 size;
-} wl_txfifo_sz_t;
-/* magic pattern used for mismatch driver and wl */
-#define WL_TXFIFO_SZ_MAGIC 0xa5a5
-
-/* Transfer info about an IOVar from the driver */
-/* Max supported IOV name size in bytes, + 1 for nul termination */
-#define WLC_IOV_NAME_LEN 30
-typedef struct wlc_iov_trx_s {
- u8 module;
- u8 type;
- char name[WLC_IOV_NAME_LEN];
-} wlc_iov_trx_t;
-
-/* check this magic number */
-#define WLC_IOCTL_MAGIC 0x14e46c77
-
-#define PROC_ENTRY_NAME "brcm_debug"
-/* bump this number if you change the ioctl interface */
-#define WLC_IOCTL_VERSION 1
-
#ifdef BRCM_FULLMAC
-#define WLC_IOCTL_MAXLEN 8192
-#else
-#define WLC_IOCTL_MAXLEN 3072 /* max length ioctl buffer required */
-#endif
#define WLC_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */
#define WLC_IOCTL_MEDLEN 1536 /* "med" length ioctl buffer required */
-#define WLC_SAMPLECOLLECT_MAXLEN 10240 /* Max Sample Collect buffer for two cores */
+#define WLC_IOCTL_MAXLEN 8192
+#endif
/* common ioctl definitions */
#define WLC_GET_MAGIC 0
@@ -1399,23 +1191,6 @@ typedef struct {
#define WL_TX_POWER_MCS40_FIRST 28
#define WL_TX_POWER_MCS40_NUM 17
-typedef struct {
- u32 flags;
- chanspec_t chanspec; /* txpwr report for this channel */
- chanspec_t local_chanspec; /* channel on which we are associated */
- u8 local_max; /* local max according to the AP */
- u8 local_constraint; /* local constraint according to the AP */
- s8 antgain[2]; /* Ant gain for each band - from SROM */
- u8 rf_cores; /* count of RF Cores being reported */
- u8 est_Pout[4]; /* Latest tx power out estimate per RF
- * chain without adjustment
- */
- u8 est_Pout_cck; /* Latest CCK tx power out estimate */
- u8 user_limit[WL_TX_POWER_RATES_LEGACY]; /* User limit */
- u8 reg_limit[WL_TX_POWER_RATES_LEGACY]; /* Regulatory power limit */
- u8 board_limit[WL_TX_POWER_RATES_LEGACY]; /* Max power board can support (SROM) */
- u8 target[WL_TX_POWER_RATES_LEGACY]; /* Latest target power */
-} tx_power_legacy2_t;
#define WL_TX_POWER_RATES 101
#define WL_TX_POWER_CCK_FIRST 0
@@ -1848,63 +1623,6 @@ struct ampdu_retry_tid {
u8 retry; /* retry value */
};
-/* structure for addts arguments */
-/* For ioctls that take a list of TSPEC */
-struct tslist {
- int count; /* number of tspecs */
- struct tsinfo_arg tsinfo[1]; /* variable length array of tsinfo */
-};
-
-/* structure for addts/delts arguments */
-typedef struct tspec_arg {
- u16 version; /* see definition of TSPEC_ARG_VERSION */
- u16 length; /* length of entire structure */
- uint flag; /* bit field */
- /* TSPEC Arguments */
- struct tsinfo_arg tsinfo; /* TS Info bit field */
- u16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */
- u16 max_msdu_size; /* Maximum MSDU Size (bytes) */
- uint min_srv_interval; /* Minimum Service Interval (us) */
- uint max_srv_interval; /* Maximum Service Interval (us) */
- uint inactivity_interval; /* Inactivity Interval (us) */
- uint suspension_interval; /* Suspension Interval (us) */
- uint srv_start_time; /* Service Start Time (us) */
- uint min_data_rate; /* Minimum Data Rate (bps) */
- uint mean_data_rate; /* Mean Data Rate (bps) */
- uint peak_data_rate; /* Peak Data Rate (bps) */
- uint max_burst_size; /* Maximum Burst Size (bytes) */
- uint delay_bound; /* Delay Bound (us) */
- uint min_phy_rate; /* Minimum PHY Rate (bps) */
- u16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0 to 8.0) */
- u16 medium_time; /* Medium Time (32 us/s periods) */
- u8 dialog_token; /* dialog token */
-} tspec_arg_t;
-
-/* tspec arg for desired station */
-typedef struct tspec_per_sta_arg {
- struct ether_addr ea;
- struct tspec_arg ts;
-} tspec_per_sta_arg_t;
-
-/* structure for max bandwidth for each access category */
-typedef struct wme_max_bandwidth {
- u32 ac[AC_COUNT]; /* max bandwidth for each access category */
-} wme_max_bandwidth_t;
-
-#define WL_WME_MBW_PARAMS_IO_BYTES (sizeof(wme_max_bandwidth_t))
-
-/* current version of wl_tspec_arg_t struct */
-#define TSPEC_ARG_VERSION 2 /* current version of wl_tspec_arg_t struct */
-#define TSPEC_ARG_LENGTH 55 /* argument length from tsinfo to medium_time */
-#define TSPEC_DEFAULT_DIALOG_TOKEN 42 /* default dialog token */
-#define TSPEC_DEFAULT_SBW_FACTOR 0x3000 /* default surplus bw */
-
-/* define for flag */
-#define TSPEC_PENDING 0 /* TSPEC pending */
-#define TSPEC_ACCEPTED 1 /* TSPEC accepted */
-#define TSPEC_REJECTED 2 /* TSPEC rejected */
-#define TSPEC_UNKNOWN 3 /* TSPEC unknown */
-#define TSPEC_STATUS_MASK 7 /* TSPEC status mask */
/* Software feature flag defines used by wlfeatureflag */
#define WL_SWFL_NOHWRADIO 0x0004
@@ -1913,16 +1631,6 @@ typedef struct wme_max_bandwidth {
#define WL_LIFETIME_MAX 0xFFFF /* Max value in ms */
-/*
- * Dongle pattern matching filter.
- */
-
-/* Packet filter types. Currently, only pattern matching is supported. */
-typedef enum wl_pkt_filter_type {
- WL_PKT_FILTER_TYPE_PATTERN_MATCH /* Pattern matching filter */
-} wl_pkt_filter_type_t;
-
-#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t
/* Pattern matching filter. Specifies an offset within received packets to
* start matching, the pattern to match, the size of the pattern, and a bitmask
@@ -1957,20 +1665,6 @@ typedef struct wl_pkt_filter_enable {
u32 enable; /* Enable/disable bool */
} wl_pkt_filter_enable_t;
-/* IOVAR "pkt_filter_list" parameter. Used to retrieve a list of installed filters. */
-typedef struct wl_pkt_filter_list {
- u32 num; /* Number of installed packet filters */
- wl_pkt_filter_t filter[1]; /* Variable array of packet filters. */
-} wl_pkt_filter_list_t;
-
-#define WL_PKT_FILTER_LIST_FIXED_LEN offsetof(wl_pkt_filter_list_t, filter)
-
-/* IOVAR "pkt_filter_stats" parameter. Used to retrieve debug statistics. */
-typedef struct wl_pkt_filter_stats {
- u32 num_pkts_matched; /* # filter matches for specified filter id */
- u32 num_pkts_forwarded; /* # packets fwded from dongle to host for all filters */
- u32 num_pkts_discarded; /* # packets discarded by dongle for all filters */
-} wl_pkt_filter_stats_t;
#define WLC_RSSI_INVALID 0 /* invalid RSSI value */
diff --git a/drivers/staging/brcm80211/phy/wlc_phy_cmn.c b/drivers/staging/brcm80211/phy/wlc_phy_cmn.c
index 8287261120f4..3bed37cb59b8 100644
--- a/drivers/staging/brcm80211/phy/wlc_phy_cmn.c
+++ b/drivers/staging/brcm80211/phy/wlc_phy_cmn.c
@@ -20,10 +20,15 @@
#include <linux/string.h>
#include <bcmdefs.h>
#include <osl.h>
-#include <linuxver.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pci.h>
#include <bcmendian.h>
#include <bcmnvram.h>
#include <sbchipc.h>
+#include <bcmdevs.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
#include <wlc_phy_int.h>
#include <wlc_phyreg_n.h>
@@ -163,7 +168,7 @@ char *phy_getvar(phy_info_t *pi, const char *name)
return NULL;
for (s = vars; s && *s;) {
- if ((bcmp(s, name, len) == 0) && (s[len] == '='))
+ if ((memcmp(s, name, len) == 0) && (s[len] == '='))
return &s[len + 1];
while (*s++)
@@ -272,7 +277,7 @@ u16 read_radio_reg(phy_info_t *pi, u16 addr)
void write_radio_reg(phy_info_t *pi, u16 addr, u16 val)
{
- osl_t *osh;
+ struct osl_info *osh;
if (NORADIO_ENAB(pi->pubpi))
return;
@@ -296,7 +301,7 @@ void write_radio_reg(phy_info_t *pi, u16 addr, u16 val)
W_REG(osh, &pi->regs->phy4wdatalo, val);
}
- if (BUSTYPE(pi->sh->bustype) == PCI_BUS) {
+ if (pi->sh->bustype == PCI_BUS) {
if (++pi->phy_wreg >= pi->phy_wreg_limit) {
(void)R_REG(osh, &pi->regs->maccontrol);
pi->phy_wreg = 0;
@@ -405,7 +410,7 @@ static bool wlc_phy_war41476(phy_info_t *pi)
u16 read_phy_reg(phy_info_t *pi, u16 addr)
{
- osl_t *osh;
+ struct osl_info *osh;
d11regs_t *regs;
osh = pi->sh->osh;
@@ -426,7 +431,7 @@ u16 read_phy_reg(phy_info_t *pi, u16 addr)
void write_phy_reg(phy_info_t *pi, u16 addr, u16 val)
{
- osl_t *osh;
+ struct osl_info *osh;
d11regs_t *regs;
osh = pi->sh->osh;
@@ -441,7 +446,7 @@ void write_phy_reg(phy_info_t *pi, u16 addr, u16 val)
#else
W_REG(osh, (volatile u32 *)(&regs->phyregaddr),
addr | (val << 16));
- if (BUSTYPE(pi->sh->bustype) == PCI_BUS) {
+ if (pi->sh->bustype == PCI_BUS) {
if (++pi->phy_wreg >= pi->phy_wreg_limit) {
pi->phy_wreg = 0;
(void)R_REG(osh, &regs->phyversion);
@@ -452,7 +457,7 @@ void write_phy_reg(phy_info_t *pi, u16 addr, u16 val)
void and_phy_reg(phy_info_t *pi, u16 addr, u16 val)
{
- osl_t *osh;
+ struct osl_info *osh;
d11regs_t *regs;
osh = pi->sh->osh;
@@ -473,7 +478,7 @@ void and_phy_reg(phy_info_t *pi, u16 addr, u16 val)
void or_phy_reg(phy_info_t *pi, u16 addr, u16 val)
{
- osl_t *osh;
+ struct osl_info *osh;
d11regs_t *regs;
osh = pi->sh->osh;
@@ -494,7 +499,7 @@ void or_phy_reg(phy_info_t *pi, u16 addr, u16 val)
void mod_phy_reg(phy_info_t *pi, u16 addr, u16 mask, u16 val)
{
- osl_t *osh;
+ struct osl_info *osh;
d11regs_t *regs;
osh = pi->sh->osh;
@@ -591,7 +596,7 @@ shared_phy_t *wlc_phy_shared_attach(shared_phy_params_t *shp)
void wlc_phy_shared_detach(shared_phy_t *phy_sh)
{
- osl_t *osh;
+ struct osl_info *osh;
if (phy_sh) {
osh = phy_sh->osh;
@@ -609,7 +614,7 @@ wlc_phy_t *wlc_phy_attach(shared_phy_t *sh, void *regs, int bandtype, char *vars
u32 sflags = 0;
uint phyversion;
int i;
- osl_t *osh;
+ struct osl_info *osh;
osh = sh->osh;
@@ -1080,8 +1085,8 @@ wlc_phy_table_addr(phy_info_t *pi, uint tbl_id, uint tbl_offset,
pi->tbl_data_hi = tblDataHi;
pi->tbl_data_lo = tblDataLo;
- if ((CHIPID(pi->sh->chip) == BCM43224_CHIP_ID ||
- CHIPID(pi->sh->chip) == BCM43421_CHIP_ID) &&
+ if ((pi->sh->chip == BCM43224_CHIP_ID ||
+ pi->sh->chip == BCM43421_CHIP_ID) &&
(pi->sh->chiprev == 1)) {
pi->tbl_addr = tblAddr;
pi->tbl_save_id = tbl_id;
@@ -1093,8 +1098,8 @@ void wlc_phy_table_data_write(phy_info_t *pi, uint width, u32 val)
{
ASSERT((width == 8) || (width == 16) || (width == 32));
- if ((CHIPID(pi->sh->chip) == BCM43224_CHIP_ID ||
- CHIPID(pi->sh->chip) == BCM43421_CHIP_ID) &&
+ if ((pi->sh->chip == BCM43224_CHIP_ID ||
+ pi->sh->chip == BCM43421_CHIP_ID) &&
(pi->sh->chiprev == 1) &&
(pi->tbl_save_id == NPHY_TBL_ID_ANTSWCTRLLUT)) {
read_phy_reg(pi, pi->tbl_data_lo);
@@ -1132,8 +1137,8 @@ wlc_phy_write_table(phy_info_t *pi, const phytbl_info_t *ptbl_info,
for (idx = 0; idx < ptbl_info->tbl_len; idx++) {
- if ((CHIPID(pi->sh->chip) == BCM43224_CHIP_ID ||
- CHIPID(pi->sh->chip) == BCM43421_CHIP_ID) &&
+ if ((pi->sh->chip == BCM43224_CHIP_ID ||
+ pi->sh->chip == BCM43421_CHIP_ID) &&
(pi->sh->chiprev == 1) &&
(tbl_id == NPHY_TBL_ID_ANTSWCTRLLUT)) {
read_phy_reg(pi, tblDataLo);
@@ -1175,8 +1180,8 @@ wlc_phy_read_table(phy_info_t *pi, const phytbl_info_t *ptbl_info,
for (idx = 0; idx < ptbl_info->tbl_len; idx++) {
- if ((CHIPID(pi->sh->chip) == BCM43224_CHIP_ID ||
- CHIPID(pi->sh->chip) == BCM43421_CHIP_ID) &&
+ if ((pi->sh->chip == BCM43224_CHIP_ID ||
+ pi->sh->chip == BCM43421_CHIP_ID) &&
(pi->sh->chiprev == 1)) {
(void)read_phy_reg(pi, tblDataLo);
@@ -1534,7 +1539,7 @@ wlc_phy_chanspec_band_validch(wlc_phy_t *ppi, uint band, chanvec_t *channels)
ASSERT((band == WLC_BAND_2G) || (band == WLC_BAND_5G));
- bzero(channels, sizeof(chanvec_t));
+ memset(channels, 0, sizeof(chanvec_t));
for (i = 0; i < ARRAY_SIZE(chan_info_all); i++) {
channel = chan_info_all[i].chan;
@@ -1896,7 +1901,7 @@ void wlc_phy_txpower_recalc_target(phy_info_t *pi)
tx_pwr_min = min(tx_pwr_min, tx_pwr_target[rate]);
}
- bzero(pi->tx_power_offset, sizeof(pi->tx_power_offset));
+ memset(pi->tx_power_offset, 0, sizeof(pi->tx_power_offset));
pi->tx_power_max = tx_pwr_max;
pi->tx_power_min = tx_pwr_min;
pi->tx_power_max_rate_ind = tx_pwr_max_rate_ind;
@@ -2507,7 +2512,7 @@ wlc_phy_noise_calc_phy(phy_info_t *pi, u32 *cmplx_pwr, s8 *pwr_ant)
s8 cmplx_pwr_dbm[PHY_CORE_MAX];
u8 i;
- bzero((u8 *) cmplx_pwr_dbm, sizeof(cmplx_pwr_dbm));
+ memset((u8 *) cmplx_pwr_dbm, 0, sizeof(cmplx_pwr_dbm));
ASSERT(pi->pubpi.phy_corenum <= PHY_CORE_MAX);
wlc_phy_compute_dB(cmplx_pwr, cmplx_pwr_dbm, pi->pubpi.phy_corenum);
@@ -2621,9 +2626,9 @@ wlc_phy_noise_sample_request(wlc_phy_t *pih, u8 reason, u8 ch)
u8 wait_crs = 0;
u8 i;
- bzero((u8 *) est, sizeof(est));
- bzero((u8 *) cmplx_pwr, sizeof(cmplx_pwr));
- bzero((u8 *) noise_dbm_ant, sizeof(noise_dbm_ant));
+ memset((u8 *) est, 0, sizeof(est));
+ memset((u8 *) cmplx_pwr, 0, sizeof(cmplx_pwr));
+ memset((u8 *) noise_dbm_ant, 0, sizeof(noise_dbm_ant));
log_num_samps = PHY_NOISE_SAMPLE_LOG_NUM_NPHY;
num_samps = 1 << log_num_samps;
@@ -2704,8 +2709,8 @@ static s8 wlc_phy_noise_read_shmem(phy_info_t *pi)
u8 idx, core;
ASSERT(pi->pubpi.phy_corenum <= PHY_CORE_MAX);
- bzero((u8 *) cmplx_pwr, sizeof(cmplx_pwr));
- bzero((u8 *) noise_dbm_ant, sizeof(noise_dbm_ant));
+ memset((u8 *) cmplx_pwr, 0, sizeof(cmplx_pwr));
+ memset((u8 *) noise_dbm_ant, 0, sizeof(noise_dbm_ant));
for (idx = 0, core = 0; core < pi->pubpi.phy_corenum; idx += 2, core++) {
lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP(idx));
@@ -3325,7 +3330,7 @@ const u8 *wlc_phy_get_ofdm_rate_lookup(void)
void wlc_lcnphy_epa_switch(phy_info_t *pi, bool mode)
{
- if ((CHIPID(pi->sh->chip) == BCM4313_CHIP_ID) &&
+ if ((pi->sh->chip == BCM4313_CHIP_ID) &&
(pi->sh->boardflags & BFL_FEM)) {
if (mode) {
u16 txant = 0;
diff --git a/drivers/staging/brcm80211/phy/wlc_phy_hal.h b/drivers/staging/brcm80211/phy/wlc_phy_hal.h
index 52260b2d0eba..514e15e00283 100644
--- a/drivers/staging/brcm80211/phy/wlc_phy_hal.h
+++ b/drivers/staging/brcm80211/phy/wlc_phy_hal.h
@@ -122,11 +122,7 @@ typedef struct shared_phy shared_phy_t;
struct phy_pub;
-#ifdef WLC_HIGH_ONLY
-typedef struct wlc_rpc_phy wlc_phy_t;
-#else
typedef struct phy_pub wlc_phy_t;
-#endif
typedef struct shared_phy_params {
void *osh;
@@ -150,7 +146,6 @@ typedef struct shared_phy_params {
u32 boardflags2;
} shared_phy_params_t;
-#ifdef WLC_LOW
extern shared_phy_t *wlc_phy_shared_attach(shared_phy_params_t *shp);
extern void wlc_phy_shared_detach(shared_phy_t *phy_sh);
@@ -189,7 +184,6 @@ extern void wlc_phy_set_deaf(wlc_phy_t *ppi, bool user_flag);
extern void wlc_phy_switch_radio(wlc_phy_t *ppi, bool on);
extern void wlc_phy_anacore(wlc_phy_t *ppi, bool on);
-#endif /* WLC_LOW */
extern void wlc_phy_BSSinit(wlc_phy_t *ppi, bool bonlyap, int rssi);
diff --git a/drivers/staging/brcm80211/phy/wlc_phy_int.h b/drivers/staging/brcm80211/phy/wlc_phy_int.h
index 9513b87fa163..72eee9120c2f 100644
--- a/drivers/staging/brcm80211/phy/wlc_phy_int.h
+++ b/drivers/staging/brcm80211/phy/wlc_phy_int.h
@@ -527,7 +527,7 @@ typedef struct {
struct shared_phy {
struct phy_info *phy_head;
uint unit;
- osl_t *osh;
+ struct osl_info *osh;
si_t *sih;
void *physhim;
uint corerev;
@@ -1158,7 +1158,7 @@ extern void wlc_phy_table_write_nphy(phy_info_t *pi, u32, u32, u32,
(pi->ipa5g_on && CHSPEC_IS5G(pi->radio_chanspec)))
#define WLC_PHY_WAR_PR51571(pi) \
- if ((BUSTYPE((pi)->sh->bustype) == PCI_BUS) && NREV_LT((pi)->pubpi.phy_rev, 3)) \
+ if (((pi)->sh->bustype == PCI_BUS) && NREV_LT((pi)->pubpi.phy_rev, 3)) \
(void)R_REG((pi)->sh->osh, &(pi)->regs->maccontrol)
extern void wlc_phy_cal_perical_nphy_run(phy_info_t *pi, u8 caltype);
diff --git a/drivers/staging/brcm80211/phy/wlc_phy_lcn.c b/drivers/staging/brcm80211/phy/wlc_phy_lcn.c
index 3d3112ed4e20..3ac2b49d9a9d 100644
--- a/drivers/staging/brcm80211/phy/wlc_phy_lcn.c
+++ b/drivers/staging/brcm80211/phy/wlc_phy_lcn.c
@@ -17,13 +17,18 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/bitops.h>
+#include <linux/delay.h>
#include <wlc_cfg.h>
#include <qmath.h>
#include <osl.h>
-#include <linuxver.h>
+#include <linux/pci.h>
#include <siutils.h>
#include <hndpmu.h>
+#include <bcmdevs.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
+
#include <wlc_phy_radio.h>
#include <wlc_phy_int.h>
#include <wlc_phy_lcn.h>
@@ -1327,7 +1332,7 @@ static void wlc_lcnphy_clear_tx_power_offsets(phy_info_t *pi)
u32 data_buf[64];
phytbl_info_t tab;
- bzero(data_buf, sizeof(data_buf));
+ memset(data_buf, 0, sizeof(data_buf));
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
@@ -1951,7 +1956,7 @@ wlc_lcnphy_tx_iqlo_cal(phy_info_t *pi,
band_idx = (CHSPEC_IS5G(pi->radio_chanspec) ? 1 : 0);
cal_gains = *target_gains;
- bzero(ncorr_override, sizeof(ncorr_override));
+ memset(ncorr_override, 0, sizeof(ncorr_override));
for (j = 0; j < iqcal_gainparams_numgains_lcnphy[band_idx]; j++) {
if (hash == tbl_iqcal_gainparams_lcnphy[band_idx][j][0]) {
cal_gains.gm_gain =
@@ -2529,7 +2534,7 @@ static void wlc_lcnphy_clear_papd_comptable(phy_info_t *pi)
tab.tbl_width = 32;
tab.tbl_offset = 0;
- bzero(temp_offset, sizeof(temp_offset));
+ memset(temp_offset, 0, sizeof(temp_offset));
for (j = 1; j < 128; j += 2)
temp_offset[j] = 0x80000;
diff --git a/drivers/staging/brcm80211/phy/wlc_phy_n.c b/drivers/staging/brcm80211/phy/wlc_phy_n.c
index 950008f122b1..c6cce8de1aee 100644
--- a/drivers/staging/brcm80211/phy/wlc_phy_n.c
+++ b/drivers/staging/brcm80211/phy/wlc_phy_n.c
@@ -18,13 +18,18 @@
#include <linux/string.h>
#include <bcmdefs.h>
#include <wlc_cfg.h>
-#include <linuxver.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
#include <osl.h>
#include <siutils.h>
#include <sbchipc.h>
#include <hndpmu.h>
#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
+
#include <wlc_phy_radio.h>
#include <wlc_phy_int.h>
#include <wlc_phyreg_n.h>
@@ -14554,7 +14559,7 @@ void WLBANDINITFN(wlc_phy_init_nphy) (phy_info_t *pi)
}
}
- if ((!PHY_IPA(pi)) && (CHIPID(pi->sh->chip) == BCM5357_CHIP_ID)) {
+ if ((!PHY_IPA(pi)) && (pi->sh->chip == BCM5357_CHIP_ID)) {
si_pmu_chipcontrol(pi->sh->sih, 1, CCTRL5357_EXTPA,
CCTRL5357_EXTPA);
}
@@ -17599,7 +17604,7 @@ static void wlc_phy_radio_postinit_2057(phy_info_t *pi)
mod_radio_reg(pi, RADIO_2057_XTALPUOVR_PINCTRL, 0x1, 0x1);
- if (CHIPID(pi->sh->chip) == !BCM6362_CHIP_ID) {
+ if (pi->sh->chip == !BCM6362_CHIP_ID) {
mod_radio_reg(pi, RADIO_2057_XTALPUOVR_PINCTRL, 0x2, 0x2);
}
@@ -18007,8 +18012,8 @@ wlc_phy_chanspec_radio2056_setup(phy_info_t *pi,
write_radio_reg(pi, RADIO_2056_SYN_PLL_LOOPFILTER2 |
RADIO_2056_SYN, 0x1f);
- if ((CHIPID(pi->sh->chip) == BCM4716_CHIP_ID) ||
- (CHIPID(pi->sh->chip) == BCM47162_CHIP_ID)) {
+ if ((pi->sh->chip == BCM4716_CHIP_ID) ||
+ (pi->sh->chip == BCM47162_CHIP_ID)) {
write_radio_reg(pi,
RADIO_2056_SYN_PLL_LOOPFILTER4 |
@@ -18070,8 +18075,8 @@ wlc_phy_chanspec_radio2056_setup(phy_info_t *pi,
WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
PADG_IDAC, 0xcc);
- if ((CHIPID(pi->sh->chip) == BCM4716_CHIP_ID) ||
- (CHIPID(pi->sh->chip) ==
+ if ((pi->sh->chip == BCM4716_CHIP_ID) ||
+ (pi->sh->chip ==
BCM47162_CHIP_ID)) {
bias = 0x40;
cascbias = 0x45;
@@ -18083,11 +18088,11 @@ wlc_phy_chanspec_radio2056_setup(phy_info_t *pi,
bias = 0x25;
cascbias = 0x20;
- if ((CHIPID(pi->sh->chip) ==
+ if ((pi->sh->chip ==
BCM43224_CHIP_ID)
- || (CHIPID(pi->sh->chip) ==
+ || (pi->sh->chip ==
BCM43225_CHIP_ID)
- || (CHIPID(pi->sh->chip) ==
+ || (pi->sh->chip ==
BCM43421_CHIP_ID)) {
if (pi->sh->chippkg ==
BCM43224_FAB_SMIC) {
@@ -18198,9 +18203,9 @@ wlc_phy_chanspec_radio2056_setup(phy_info_t *pi,
cascbias = 0x30;
- if ((CHIPID(pi->sh->chip) == BCM43224_CHIP_ID) ||
- (CHIPID(pi->sh->chip) == BCM43225_CHIP_ID) ||
- (CHIPID(pi->sh->chip) == BCM43421_CHIP_ID)) {
+ if ((pi->sh->chip == BCM43224_CHIP_ID) ||
+ (pi->sh->chip == BCM43225_CHIP_ID) ||
+ (pi->sh->chip == BCM43421_CHIP_ID)) {
if (pi->sh->chippkg == BCM43224_FAB_SMIC) {
cascbias = 0x35;
}
@@ -18927,7 +18932,7 @@ static void wlc_phy_spurwar_nphy(phy_info_t *pi)
case 38:
case 102:
case 118:
- if ((CHIPID(pi->sh->chip) == BCM4716_CHIP_ID) &&
+ if ((pi->sh->chip == BCM4716_CHIP_ID) &&
(pi->sh->chippkg == BCM4717_PKG_ID)) {
nphy_adj_tone_id_buf[0] = 32;
nphy_adj_noise_var_buf[0] = 0x21f;
@@ -19062,7 +19067,7 @@ wlc_phy_chanspec_nphy_setup(phy_info_t *pi, chanspec_t chanspec,
if (pi->nphy_aband_spurwar_en &&
((val == 38) || (val == 102)
|| (val == 118))) {
- if ((CHIPID(pi->sh->chip) ==
+ if ((pi->sh->chip ==
BCM4716_CHIP_ID)
&& (pi->sh->chippkg ==
BCM4717_PKG_ID)) {
@@ -19077,8 +19082,8 @@ wlc_phy_chanspec_nphy_setup(phy_info_t *pi, chanspec_t chanspec,
if (pi->phy_spuravoid == SPURAVOID_FORCEON)
spuravoid = 1;
- if ((CHIPID(pi->sh->chip) == BCM4716_CHIP_ID) ||
- (CHIPID(pi->sh->chip) == BCM47162_CHIP_ID)) {
+ if ((pi->sh->chip == BCM4716_CHIP_ID) ||
+ (pi->sh->chip == BCM47162_CHIP_ID)) {
si_pmu_spuravoid(pi->sh->sih, pi->sh->osh, spuravoid);
} else {
wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
@@ -19086,9 +19091,9 @@ wlc_phy_chanspec_nphy_setup(phy_info_t *pi, chanspec_t chanspec,
wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
}
- if ((CHIPID(pi->sh->chip) == BCM43224_CHIP_ID) ||
- (CHIPID(pi->sh->chip) == BCM43225_CHIP_ID) ||
- (CHIPID(pi->sh->chip) == BCM43421_CHIP_ID)) {
+ if ((pi->sh->chip == BCM43224_CHIP_ID) ||
+ (pi->sh->chip == BCM43225_CHIP_ID) ||
+ (pi->sh->chip == BCM43421_CHIP_ID)) {
if (spuravoid == 1) {
@@ -19105,8 +19110,8 @@ wlc_phy_chanspec_nphy_setup(phy_info_t *pi, chanspec_t chanspec,
}
}
- if (!((CHIPID(pi->sh->chip) == BCM4716_CHIP_ID) ||
- (CHIPID(pi->sh->chip) == BCM47162_CHIP_ID))) {
+ if (!((pi->sh->chip == BCM4716_CHIP_ID) ||
+ (pi->sh->chip == BCM47162_CHIP_ID))) {
wlapi_bmac_core_phypll_reset(pi->sh->physhim);
}
@@ -21062,11 +21067,11 @@ s16 wlc_phy_tempsense_nphy(phy_info_t *pi)
wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x03, 16,
&auxADC_rssi_ctrlH_save);
- if (CHIPID(pi->sh->chip) == BCM5357_CHIP_ID) {
+ if (pi->sh->chip == BCM5357_CHIP_ID) {
radio_temp[0] = (193 * (radio_temp[1] + radio_temp2[1])
+ 88 * (auxADC_Vl) - 27111 +
128) / 256;
- } else if (CHIPID(pi->sh->chip) == BCM43236_CHIP_ID) {
+ } else if (pi->sh->chip == BCM43236_CHIP_ID) {
radio_temp[0] = (198 * (radio_temp[1] + radio_temp2[1])
+ 91 * (auxADC_Vl) - 27243 +
128) / 256;
@@ -26277,7 +26282,7 @@ static u32 *wlc_phy_get_ipa_gaintbl_nphy(phy_info_t *pi)
} else if (NREV_IS(pi->pubpi.phy_rev, 6)) {
tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev6;
- if (CHIPID(pi->sh->chip) == BCM47162_CHIP_ID) {
+ if (pi->sh->chip == BCM47162_CHIP_ID) {
tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev5;
}
@@ -26833,7 +26838,7 @@ wlc_phy_a2_nphy(phy_info_t *pi, nphy_ipa_txcalgains_t *txgains,
phy_a2 = 63;
if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if (CHIPID(pi->sh->chip) == BCM6362_CHIP_ID) {
+ if (pi->sh->chip == BCM6362_CHIP_ID) {
phy_a1 = 35;
phy_a3 = 35;
} else if ((pi->pubpi.radiorev == 4)
@@ -26946,7 +26951,7 @@ wlc_phy_a2_nphy(phy_info_t *pi, nphy_ipa_txcalgains_t *txgains,
if (NREV_GE(pi->pubpi.phy_rev, 6)) {
phy_a5 = 0x00f7 | (phy_a4 << 8);
- if (CHIPID(pi->sh->chip) ==
+ if (pi->sh->chip ==
BCM47162_CHIP_ID) {
phy_a5 =
0x10f7 | (phy_a4 <<
diff --git a/drivers/staging/brcm80211/phy/wlc_phytbl_lcn.c b/drivers/staging/brcm80211/phy/wlc_phytbl_lcn.c
index 6ce9e5d96830..330b88152b65 100644
--- a/drivers/staging/brcm80211/phy/wlc_phytbl_lcn.c
+++ b/drivers/staging/brcm80211/phy/wlc_phytbl_lcn.c
@@ -15,6 +15,9 @@
*/
#include <linux/types.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
+#include <osl.h>
#include <wlc_phy_int.h>
#include <wlc_phytbl_lcn.h>
diff --git a/drivers/staging/brcm80211/phy/wlc_phytbl_n.c b/drivers/staging/brcm80211/phy/wlc_phytbl_n.c
index 7cc2c563c727..a9fc193721ef 100644
--- a/drivers/staging/brcm80211/phy/wlc_phytbl_n.c
+++ b/drivers/staging/brcm80211/phy/wlc_phytbl_n.c
@@ -16,6 +16,9 @@
#include <linux/kernel.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
+#include <osl.h>
#include <wlc_phy_int.h>
#include <wlc_phytbl_n.h>
diff --git a/drivers/staging/brcm80211/sys/wl_dbg.h b/drivers/staging/brcm80211/sys/wl_dbg.h
index e63b27ebad5e..54af257598c2 100644
--- a/drivers/staging/brcm80211/sys/wl_dbg.h
+++ b/drivers/staging/brcm80211/sys/wl_dbg.h
@@ -20,15 +20,20 @@
/* wl_msg_level is a bit vector with defs in wlioctl.h */
extern u32 wl_msg_level;
-#define WL_PRINT(args) printf args
-#define WL_NONE(args)
+#define WL_NONE(fmt, args...) no_printk(fmt, ##args)
+
+#define WL_PRINT(level, fmt, args...) \
+do { \
+ if (wl_msg_level & level) \
+ printk(fmt, ##args); \
+} while (0)
#ifdef BCMDBG
-#define WL_ERROR(args) do {if ((wl_msg_level & WL_ERROR_VAL)) WL_PRINT(args); } while (0)
-#define WL_TRACE(args) do {if (wl_msg_level & WL_TRACE_VAL) WL_PRINT(args); } while (0)
-#define WL_AMPDU(args) do {if (wl_msg_level & WL_AMPDU_VAL) WL_PRINT(args); } while (0)
-#define WL_FFPLD(args) do {if (wl_msg_level & WL_FFPLD_VAL) WL_PRINT(args); } while (0)
+#define WL_ERROR(fmt, args...) WL_PRINT(WL_ERROR_VAL, fmt, ##args)
+#define WL_TRACE(fmt, args...) WL_PRINT(WL_TRACE_VAL, fmt, ##args)
+#define WL_AMPDU(fmt, args...) WL_PRINT(WL_AMPDU_VAL, fmt, ##args)
+#define WL_FFPLD(fmt, args...) WL_PRINT(WL_FFPLD_VAL, fmt, ##args)
#define WL_ERROR_ON() (wl_msg_level & WL_ERROR_VAL)
@@ -44,35 +49,50 @@ extern u32 wl_msg_level;
extern u32 wl_ampdu_dbg;
-#define WL_AMPDU_UPDN(args) do {if (wl_ampdu_dbg & WL_AMPDU_UPDN_VAL) {WL_AMPDU(args); } } while (0)
-#define WL_AMPDU_RX(args) do {if (wl_ampdu_dbg & WL_AMPDU_RX_VAL) {WL_AMPDU(args); } } while (0)
-#define WL_AMPDU_ERR(args) do {if (wl_ampdu_dbg & WL_AMPDU_ERR_VAL) {WL_AMPDU(args); } } while (0)
-#define WL_AMPDU_TX(args) do {if (wl_ampdu_dbg & WL_AMPDU_TX_VAL) {WL_AMPDU(args); } } while (0)
-#define WL_AMPDU_CTL(args) do {if (wl_ampdu_dbg & WL_AMPDU_CTL_VAL) {WL_AMPDU(args); } } while (0)
-#define WL_AMPDU_HW(args) do {if (wl_ampdu_dbg & WL_AMPDU_HW_VAL) {WL_AMPDU(args); } } while (0)
-#define WL_AMPDU_HWTXS(args) do {if (wl_ampdu_dbg & WL_AMPDU_HWTXS_VAL) {WL_AMPDU(args); } } while (0)
-#define WL_AMPDU_HWDBG(args) do {if (wl_ampdu_dbg & WL_AMPDU_HWDBG_VAL) {WL_AMPDU(args); } } while (0)
+#define WL_AMPDU_PRINT(level, fmt, args...) \
+do { \
+ if (wl_ampdu_dbg & level) { \
+ WL_AMPDU(fmt, ##args); \
+ } \
+} while (0)
+
+#define WL_AMPDU_UPDN(fmt, args...) \
+ WL_AMPDU_PRINT(WL_AMPDU_UPDN_VAL, fmt, ##args)
+#define WL_AMPDU_RX(fmt, args...) \
+ WL_AMPDU_PRINT(WL_AMPDU_RX_VAL, fmt, ##args)
+#define WL_AMPDU_ERR(fmt, args...) \
+ WL_AMPDU_PRINT(WL_AMPDU_ERR_VAL, fmt, ##args)
+#define WL_AMPDU_TX(fmt, args...) \
+ WL_AMPDU_PRINT(WL_AMPDU_TX_VAL, fmt, ##args)
+#define WL_AMPDU_CTL(fmt, args...) \
+ WL_AMPDU_PRINT(WL_AMPDU_CTL_VAL, fmt, ##args)
+#define WL_AMPDU_HW(fmt, args...) \
+ WL_AMPDU_PRINT(WL_AMPDU_HW_VAL, fmt, ##args)
+#define WL_AMPDU_HWTXS(fmt, args...) \
+ WL_AMPDU_PRINT(WL_AMPDU_HWTXS_VAL, fmt, ##args)
+#define WL_AMPDU_HWDBG(fmt, args...) \
+ WL_AMPDU_PRINT(WL_AMPDU_HWDBG_VAL, fmt, ##args)
#define WL_AMPDU_ERR_ON() (wl_ampdu_dbg & WL_AMPDU_ERR_VAL)
#define WL_AMPDU_HW_ON() (wl_ampdu_dbg & WL_AMPDU_HW_VAL)
#define WL_AMPDU_HWTXS_ON() (wl_ampdu_dbg & WL_AMPDU_HWTXS_VAL)
#else /* BCMDBG */
-#define WL_ERROR(args)
-#define WL_TRACE(args)
-#define WL_AMPDU(args)
-#define WL_FFPLD(args)
+#define WL_ERROR(fmt, args...) no_printk(fmt, ##args)
+#define WL_TRACE(fmt, args...) no_printk(fmt, ##args)
+#define WL_AMPDU(fmt, args...) no_printk(fmt, ##args)
+#define WL_FFPLD(fmt, args...) no_printk(fmt, ##args)
#define WL_ERROR_ON() 0
-#define WL_AMPDU_UPDN(args)
-#define WL_AMPDU_RX(args)
-#define WL_AMPDU_ERR(args)
-#define WL_AMPDU_TX(args)
-#define WL_AMPDU_CTL(args)
-#define WL_AMPDU_HW(args)
-#define WL_AMPDU_HWTXS(args)
-#define WL_AMPDU_HWDBG(args)
+#define WL_AMPDU_UPDN(fmt, args...) no_printk(fmt, ##args)
+#define WL_AMPDU_RX(fmt, args...) no_printk(fmt, ##args)
+#define WL_AMPDU_ERR(fmt, args...) no_printk(fmt, ##args)
+#define WL_AMPDU_TX(fmt, args...) no_printk(fmt, ##args)
+#define WL_AMPDU_CTL(fmt, args...) no_printk(fmt, ##args)
+#define WL_AMPDU_HW(fmt, args...) no_printk(fmt, ##args)
+#define WL_AMPDU_HWTXS(fmt, args...) no_printk(fmt, ##args)
+#define WL_AMPDU_HWDBG(fmt, args...) no_printk(fmt, ##args)
#define WL_AMPDU_ERR_ON() 0
#define WL_AMPDU_HW_ON() 0
#define WL_AMPDU_HWTXS_ON() 0
diff --git a/drivers/staging/brcm80211/sys/wl_export.h b/drivers/staging/brcm80211/sys/wl_export.h
index 08442f8e84e0..aa8b5a3ed633 100644
--- a/drivers/staging/brcm80211/sys/wl_export.h
+++ b/drivers/staging/brcm80211/sys/wl_export.h
@@ -45,10 +45,10 @@ extern void wl_add_timer(struct wl_info *wl, struct wl_timer *timer, uint ms,
int periodic);
extern bool wl_del_timer(struct wl_info *wl, struct wl_timer *timer);
-extern uint wl_buf_to_pktcopy(osl_t *osh, void *p, unsigned char *buf, int len,
- uint offset);
-extern void *wl_get_pktbuffer(osl_t *osh, int len);
-extern int wl_set_pktlen(osl_t *osh, void *p, int len);
+extern uint wl_buf_to_pktcopy(struct osl_info *osh, void *p, unsigned char *buf,
+ int len, uint offset);
+extern void *wl_get_pktbuffer(struct osl_info *osh, int len);
+extern int wl_set_pktlen(struct osl_info *osh, void *p, int len);
#define wl_sort_bsslist(a, b) false
diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.c b/drivers/staging/brcm80211/sys/wl_mac80211.c
index d060377629ac..cd8392badff0 100644
--- a/drivers/staging/brcm80211/sys/wl_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wl_mac80211.c
@@ -21,72 +21,36 @@
#include <linux/string.h>
#include <linux/pci_ids.h>
#include <bcmdefs.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
#include <osl.h>
#define WLC_MAXBSSCFG 1 /* single BSS configs */
#include <wlc_cfg.h>
#include <net/mac80211.h>
-#include <epivers.h>
-#ifndef WLC_HIGH_ONLY
#include <phy_version.h>
-#endif
#include <bcmutils.h>
#include <pcicfg.h>
#include <wlioctl.h>
#include <wlc_key.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
#include <wlc_channel.h>
#include <wlc_pub.h>
#include <wlc_scb.h>
#include <wl_dbg.h>
-#ifdef BCMSDIO
-#include <bcmsdh.h>
-#endif
#include <wl_export.h>
-#ifdef WLC_HIGH_ONLY
-#include "dbus.h"
-#include "bcm_rpc_tp.h"
-#include "bcm_rpc.h"
-#include "bcm_xdr.h"
-#include "wlc_rpc.h"
-#endif
#include <wl_mac80211.h>
#include <linux/firmware.h>
-#ifndef WLC_HIGH_ONLY
#include <wl_ucode.h>
#include <d11ucode_ext.h>
-#endif
-#ifdef BCMSDIO
-extern struct device *sdiommc_dev;
-#endif
-
-extern void wlc_wme_setparams(wlc_info_t *wlc, u16 aci, void *arg,
- bool suspend);
-bool wlc_sendpkt_mac80211(wlc_info_t *wlc, void *sdu, struct ieee80211_hw *hw);
-void wlc_mac_bcn_promisc_change(wlc_info_t *wlc, bool promisc);
-void wlc_set_addrmatch(wlc_info_t *wlc, int match_reg_offset,
- const struct ether_addr *addr);
static void wl_timer(unsigned long data);
static void _wl_timer(wl_timer_t *t);
-#ifdef WLC_HIGH_ONLY
-#define RPCQ_LOCK(_wl, _flags) spin_lock_irqsave(&(_wl)->rpcq_lock, (_flags))
-#define RPCQ_UNLOCK(_wl, _flags) spin_unlock_irqrestore(&(_wl)->rpcq_lock, (_flags))
-#define TXQ_LOCK(_wl, _flags) spin_lock_irqsave(&(_wl)->txq_lock, (_flags))
-#define TXQ_UNLOCK(_wl, _flags) spin_unlock_irqrestore(&(_wl)->txq_lock, (_flags))
-static void wl_rpc_down(void *wlh);
-static void wl_rpcq_free(wl_info_t *wl);
-static void wl_rpcq_dispatch(struct wl_task *task);
-static void wl_rpc_dispatch_schedule(void *ctx, struct rpc_buf *buf);
-static void wl_start_txqwork(struct wl_task *task);
-static void wl_txq_free(wl_info_t *wl);
-static void wl_timer_task(wl_task_t *task);
-static int wl_schedule_task(wl_info_t *wl, void (*fn) (struct wl_task *),
- void *context);
-#endif /* WLC_HIGH_ONLY */
static int ieee_hw_init(struct ieee80211_hw *hw);
static int ieee_hw_rate_init(struct ieee80211_hw *hw);
@@ -134,16 +98,14 @@ struct ieee80211_tkip_data {
u8 rx_hdr[16], tx_hdr[16];
};
-#ifndef WLC_HIGH_ONLY
-#define WL_DEV_IF(dev) ((wl_if_t *)netdev_priv(dev))
-#define WL_INFO(dev) ((wl_info_t *)(WL_DEV_IF(dev)->wl)) /* points to wl */
-static int wl_request_fw(wl_info_t *wl, struct pci_dev *pdev);
-static void wl_release_fw(wl_info_t *wl);
-#endif
+#define WL_DEV_IF(dev) ((struct wl_if *)netdev_priv(dev))
+#define WL_INFO(dev) ((struct wl_info *)(WL_DEV_IF(dev)->wl))
+static int wl_request_fw(struct wl_info *wl, struct pci_dev *pdev);
+static void wl_release_fw(struct wl_info *wl);
/* local prototypes */
-static int wl_start(struct sk_buff *skb, wl_info_t *wl);
-static int wl_start_int(wl_info_t *wl, struct ieee80211_hw *hw,
+static int wl_start(struct sk_buff *skb, struct wl_info *wl);
+static int wl_start_int(struct wl_info *wl, struct ieee80211_hw *hw,
struct sk_buff *skb);
static void wl_dpc(unsigned long data);
@@ -152,7 +114,6 @@ MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");
-#ifndef BCMSDIO
/* recognized PCI IDs */
static struct pci_device_id wl_id_table[] = {
{PCI_VENDOR_ID_BROADCOM, 0x4357, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 43225 2G */
@@ -163,55 +124,18 @@ static struct pci_device_id wl_id_table[] = {
MODULE_DEVICE_TABLE(pci, wl_id_table);
static void wl_remove(struct pci_dev *pdev);
-#endif /* !BCMSDIO */
-#ifdef BCMSDIO
-static uint sd_drivestrength = 6;
-module_param(sd_drivestrength, uint, 0);
-#endif
#ifdef BCMDBG
static int msglevel = 0xdeadbeef;
module_param(msglevel, int, 0);
-#ifndef WLC_HIGH_ONLY
static int phymsglevel = 0xdeadbeef;
module_param(phymsglevel, int, 0);
-#endif /* WLC_HIGH_ONLY */
#endif /* BCMDBG */
-static int oneonly;
-module_param(oneonly, int, 0);
-
-static int piomode;
-module_param(piomode, int, 0);
-
-static int instance_base; /* Starting instance number */
-module_param(instance_base, int, 0);
-
-#if defined(BCMDBG)
-static char *macaddr;
-module_param(macaddr, charp, S_IRUGO);
-#endif
-
-static int nompc = 1;
-module_param(nompc, int, 0);
-
-static char name[IFNAMSIZ] = "eth%d";
-module_param_string(name, name, IFNAMSIZ, 0);
-
-#ifndef SRCBASE
-#define SRCBASE "."
-#endif
-
-#define WL_MAGIC 0xdeadbeef
-
#define HW_TO_WL(hw) (hw->priv)
#define WL_TO_HW(wl) (wl->pub->ieee_hw)
-#ifdef WLC_HIGH_ONLY
-static int wl_ops_tx_nl(struct ieee80211_hw *hw, struct sk_buff *skb);
-#else
static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
-#endif
static int wl_ops_start(struct ieee80211_hw *hw);
static void wl_ops_stop(struct ieee80211_hw *hw);
static int wl_ops_add_interface(struct ieee80211_hw *hw,
@@ -249,28 +173,13 @@ static int wl_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
-#ifdef WLC_HIGH_ONLY
-static int wl_ops_tx_nl(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- int status;
- wl_info_t *wl = hw->priv;
- if (!wl->pub->up) {
- WL_ERROR(("ops->tx called while down\n"));
- status = -ENETDOWN;
- goto done;
- }
- status = wl_start(skb, wl);
- done:
- return status;
-}
-#else
static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
int status;
- wl_info_t *wl = hw->priv;
+ struct wl_info *wl = hw->priv;
WL_LOCK(wl);
if (!wl->pub->up) {
- WL_ERROR(("ops->tx called while down\n"));
+ WL_ERROR("ops->tx called while down\n");
status = -ENETDOWN;
goto done;
}
@@ -279,13 +188,14 @@ static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
WL_UNLOCK(wl);
return status;
}
-#endif /* WLC_HIGH_ONLY */
static int wl_ops_start(struct ieee80211_hw *hw)
{
- wl_info_t *wl = hw->priv;
- /* struct ieee80211_channel *curchan = hw->conf.channel; */
- WL_NONE(("%s : Initial channel: %d\n", __func__, curchan->hw_value));
+ struct wl_info *wl = hw->priv;
+ /*
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ WL_NONE("%s : Initial channel: %d\n", __func__, curchan->hw_value);
+ */
WL_LOCK(wl);
ieee80211_wake_queues(hw);
@@ -296,20 +206,17 @@ static int wl_ops_start(struct ieee80211_hw *hw)
static void wl_ops_stop(struct ieee80211_hw *hw)
{
- wl_info_t *wl = hw->priv;
+ struct wl_info *wl = hw->priv;
ASSERT(wl);
WL_LOCK(wl);
- wl_down(wl);
ieee80211_stop_queues(hw);
WL_UNLOCK(wl);
-
- return;
}
static int
wl_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- wl_info_t *wl;
+ struct wl_info *wl;
int err;
/* Just STA for now */
@@ -318,8 +225,8 @@ wl_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
vif->type != NL80211_IFTYPE_STATION &&
vif->type != NL80211_IFTYPE_WDS &&
vif->type != NL80211_IFTYPE_ADHOC) {
- WL_ERROR(("%s: Attempt to add type %d, only STA for now\n",
- __func__, vif->type));
+ WL_ERROR("%s: Attempt to add type %d, only STA for now\n",
+ __func__, vif->type);
return -EOPNOTSUPP;
}
@@ -329,34 +236,38 @@ wl_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
WL_UNLOCK(wl);
if (err != 0)
- WL_ERROR(("%s: wl_up() returned %d\n", __func__, err));
+ WL_ERROR("%s: wl_up() returned %d\n", __func__, err);
return err;
}
static void
wl_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- return;
+ struct wl_info *wl;
+
+ wl = HW_TO_WL(hw);
+
+ /* put driver in down state */
+ WL_LOCK(wl);
+ wl_down(wl);
+ WL_UNLOCK(wl);
}
static int
ieee_set_channel(struct ieee80211_hw *hw, struct ieee80211_channel *chan,
enum nl80211_channel_type type)
{
- wl_info_t *wl = HW_TO_WL(hw);
+ struct wl_info *wl = HW_TO_WL(hw);
int err = 0;
switch (type) {
case NL80211_CHAN_HT20:
case NL80211_CHAN_NO_HT:
- WL_LOCK(wl);
err = wlc_set(wl->wlc, WLC_SET_CHANNEL, chan->hw_value);
- WL_UNLOCK(wl);
break;
case NL80211_CHAN_HT40MINUS:
case NL80211_CHAN_HT40PLUS:
- WL_ERROR(("%s: Need to implement 40 Mhz Channels!\n",
- __func__));
+ WL_ERROR("%s: Need to implement 40 Mhz Channels!\n", __func__);
break;
}
@@ -368,17 +279,18 @@ ieee_set_channel(struct ieee80211_hw *hw, struct ieee80211_channel *chan,
static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
{
struct ieee80211_conf *conf = &hw->conf;
- wl_info_t *wl = HW_TO_WL(hw);
+ struct wl_info *wl = HW_TO_WL(hw);
int err = 0;
int new_int;
+ WL_LOCK(wl);
if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
- WL_NONE(("%s: Setting listen interval to %d\n",
- __func__, conf->listen_interval));
+ WL_NONE("%s: Setting listen interval to %d\n",
+ __func__, conf->listen_interval);
if (wlc_iovar_setint
(wl->wlc, "bcn_li_bcn", conf->listen_interval)) {
- WL_ERROR(("%s: Error setting listen_interval\n",
- __func__));
+ WL_ERROR("%s: Error setting listen_interval\n",
+ __func__);
err = -EIO;
goto config_out;
}
@@ -386,47 +298,49 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
ASSERT(new_int == conf->listen_interval);
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR)
- WL_NONE(("Need to set monitor mode\n"));
+ WL_NONE("Need to set monitor mode\n");
if (changed & IEEE80211_CONF_CHANGE_PS)
- WL_NONE(("Need to set Power-save mode\n"));
+ WL_NONE("Need to set Power-save mode\n");
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- WL_NONE(("%s: Setting tx power to %d dbm\n", __func__,
- conf->power_level));
+ WL_NONE("%s: Setting tx power to %d dbm\n",
+ __func__, conf->power_level);
if (wlc_iovar_setint
(wl->wlc, "qtxpower", conf->power_level * 4)) {
- WL_ERROR(("%s: Error setting power_level\n", __func__));
+ WL_ERROR("%s: Error setting power_level\n", __func__);
err = -EIO;
goto config_out;
}
wlc_iovar_getint(wl->wlc, "qtxpower", &new_int);
if (new_int != (conf->power_level * 4))
- WL_ERROR(("%s: Power level req != actual, %d %d\n",
- __func__, conf->power_level * 4, new_int));
+ WL_ERROR("%s: Power level req != actual, %d %d\n",
+ __func__, conf->power_level * 4, new_int);
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
err = ieee_set_channel(hw, conf->channel, conf->channel_type);
}
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
- WL_NONE(("%s: srl %d, lrl %d\n", __func__,
- conf->short_frame_max_tx_count,
- conf->long_frame_max_tx_count));
+ WL_NONE("%s: srl %d, lrl %d\n",
+ __func__,
+ conf->short_frame_max_tx_count,
+ conf->long_frame_max_tx_count);
if (wlc_set
(wl->wlc, WLC_SET_SRL,
conf->short_frame_max_tx_count) < 0) {
- WL_ERROR(("%s: Error setting srl\n", __func__));
+ WL_ERROR("%s: Error setting srl\n", __func__);
err = -EIO;
goto config_out;
}
if (wlc_set(wl->wlc, WLC_SET_LRL, conf->long_frame_max_tx_count)
< 0) {
- WL_ERROR(("%s: Error setting lrl\n", __func__));
+ WL_ERROR("%s: Error setting lrl\n", __func__);
err = -EIO;
goto config_out;
}
}
config_out:
+ WL_UNLOCK(wl);
return err;
}
@@ -435,32 +349,29 @@ wl_ops_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
- wl_info_t *wl = HW_TO_WL(hw);
+ struct wl_info *wl = HW_TO_WL(hw);
int val;
-#ifdef WLC_HIGH_ONLY
- WL_LOCK(wl);
-#endif
if (changed & BSS_CHANGED_ASSOC) {
- WL_ERROR(("Associated:\t%s\n", info->assoc ? "True" : "False"));
+ WL_ERROR("Associated:\t%s\n", info->assoc ? "True" : "False");
/* association status changed (associated/disassociated)
* also implies a change in the AID.
*/
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- WL_NONE(("Use_cts_prot:\t%s Implement me\n",
- info->use_cts_prot ? "True" : "False"));
+ WL_NONE("Use_cts_prot:\t%s Implement me\n",
+ info->use_cts_prot ? "True" : "False");
/* CTS protection changed */
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- WL_NONE(("Short preamble:\t%s Implement me\n",
- info->use_short_preamble ? "True" : "False"));
+ WL_NONE("Short preamble:\t%s Implement me\n",
+ info->use_short_preamble ? "True" : "False");
/* preamble changed */
}
if (changed & BSS_CHANGED_ERP_SLOT) {
- WL_NONE(("Changing short slot:\t%s\n",
- info->use_short_slot ? "True" : "False"));
+ WL_NONE("Changing short slot:\t%s\n",
+ info->use_short_slot ? "True" : "False");
if (info->use_short_slot)
val = 1;
else
@@ -470,39 +381,36 @@ wl_ops_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_HT) {
- WL_NONE(("%s: HT mode - Implement me\n", __func__));
+ WL_NONE("%s: HT mode - Implement me\n", __func__);
/* 802.11n parameters changed */
}
if (changed & BSS_CHANGED_BASIC_RATES) {
- WL_NONE(("Need to change Basic Rates:\t0x%x! Implement me\n",
- (u32) info->basic_rates));
+ WL_NONE("Need to change Basic Rates:\t0x%x! Implement me\n",
+ (u32) info->basic_rates);
/* Basic rateset changed */
}
if (changed & BSS_CHANGED_BEACON_INT) {
- WL_NONE(("Beacon Interval:\t%d Implement me\n",
- info->beacon_int));
+ WL_NONE("Beacon Interval:\t%d Implement me\n",
+ info->beacon_int);
/* Beacon interval changed */
}
if (changed & BSS_CHANGED_BSSID) {
- WL_NONE(("new BSSID:\taid %d bss:%pM\n", info->aid,
- info->bssid));
+ WL_NONE("new BSSID:\taid %d bss:%pM\n",
+ info->aid, info->bssid);
/* BSSID changed, for whatever reason (IBSS and managed mode) */
/* FIXME: need to store bssid in bsscfg */
wlc_set_addrmatch(wl->wlc, RCM_BSSID_OFFSET,
(struct ether_addr *)info->bssid);
}
if (changed & BSS_CHANGED_BEACON) {
- WL_ERROR(("BSS_CHANGED_BEACON\n"));
+ WL_ERROR("BSS_CHANGED_BEACON\n");
/* Beacon data changed, retrieve new beacon (beaconing modes) */
}
if (changed & BSS_CHANGED_BEACON_ENABLED) {
- WL_ERROR(("Beacon enabled:\t%s\n",
- info->enable_beacon ? "True" : "False"));
+ WL_ERROR("Beacon enabled:\t%s\n",
+ info->enable_beacon ? "True" : "False");
/* Beaconing should be enabled/disabled (beaconing modes) */
}
-#ifdef WLC_HIGH_ONLY
- WL_UNLOCK(wl);
-#endif
return;
}
@@ -511,27 +419,24 @@ wl_ops_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast)
{
-#ifndef WLC_HIGH_ONLY
- wl_info_t *wl = hw->priv;
-#endif
+ struct wl_info *wl = hw->priv;
changed_flags &= MAC_FILTERS;
*total_flags &= MAC_FILTERS;
if (changed_flags & FIF_PROMISC_IN_BSS)
- WL_ERROR(("FIF_PROMISC_IN_BSS\n"));
+ WL_ERROR("FIF_PROMISC_IN_BSS\n");
if (changed_flags & FIF_ALLMULTI)
- WL_ERROR(("FIF_ALLMULTI\n"));
+ WL_ERROR("FIF_ALLMULTI\n");
if (changed_flags & FIF_FCSFAIL)
- WL_ERROR(("FIF_FCSFAIL\n"));
+ WL_ERROR("FIF_FCSFAIL\n");
if (changed_flags & FIF_PLCPFAIL)
- WL_ERROR(("FIF_PLCPFAIL\n"));
+ WL_ERROR("FIF_PLCPFAIL\n");
if (changed_flags & FIF_CONTROL)
- WL_ERROR(("FIF_CONTROL\n"));
+ WL_ERROR("FIF_CONTROL\n");
if (changed_flags & FIF_OTHER_BSS)
- WL_ERROR(("FIF_OTHER_BSS\n"));
+ WL_ERROR("FIF_OTHER_BSS\n");
if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
- WL_NONE(("FIF_BCN_PRBRESP_PROMISC\n"));
-#ifndef WLC_HIGH_ONLY
+ WL_NONE("FIF_BCN_PRBRESP_PROMISC\n");
WL_LOCK(wl);
if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
wl->pub->mac80211_state |= MAC80211_PROMISC_BCNS;
@@ -541,7 +446,6 @@ wl_ops_configure_filter(struct ieee80211_hw *hw,
wl->pub->mac80211_state &= ~MAC80211_PROMISC_BCNS;
}
WL_UNLOCK(wl);
-#endif
}
return;
}
@@ -549,25 +453,33 @@ wl_ops_configure_filter(struct ieee80211_hw *hw,
static int
wl_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
{
- WL_ERROR(("%s: Enter\n", __func__));
+ WL_ERROR("%s: Enter\n", __func__);
return 0;
}
static void wl_ops_sw_scan_start(struct ieee80211_hw *hw)
{
- WL_NONE(("Scan Start\n"));
+ struct wl_info *wl = hw->priv;
+ WL_NONE("Scan Start\n");
+ WL_LOCK(wl);
+ wlc_scan_start(wl->wlc);
+ WL_UNLOCK(wl);
return;
}
static void wl_ops_sw_scan_complete(struct ieee80211_hw *hw)
{
- WL_NONE(("Scan Complete\n"));
+ struct wl_info *wl = hw->priv;
+ WL_NONE("Scan Complete\n");
+ WL_LOCK(wl);
+ wlc_scan_stop(wl->wlc);
+ WL_UNLOCK(wl);
return;
}
static void wl_ops_set_tsf(struct ieee80211_hw *hw, u64 tsf)
{
- WL_ERROR(("%s: Enter\n", __func__));
+ WL_ERROR("%s: Enter\n", __func__);
return;
}
@@ -575,13 +487,13 @@ static int
wl_ops_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
- WL_ERROR(("%s: Enter\n", __func__));
+ WL_ERROR("%s: Enter\n", __func__);
return 0;
}
static int wl_ops_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
- WL_ERROR(("%s: Enter\n", __func__));
+ WL_ERROR("%s: Enter\n", __func__);
return 0;
}
@@ -589,10 +501,10 @@ static void
wl_ops_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
{
- WL_NONE(("%s: Enter\n", __func__));
+ WL_NONE("%s: Enter\n", __func__);
switch (cmd) {
default:
- WL_ERROR(("%s: Uknown cmd = %d\n", __func__, cmd));
+ WL_ERROR("%s: Unknown cmd = %d\n", __func__, cmd);
break;
}
return;
@@ -602,11 +514,11 @@ static int
wl_ops_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- wl_info_t *wl = hw->priv;
+ struct wl_info *wl = hw->priv;
- WL_NONE(("%s: Enter (WME config)\n", __func__));
- WL_NONE(("queue %d, txop %d, cwmin %d, cwmax %d, aifs %d\n", queue,
- params->txop, params->cw_min, params->cw_max, params->aifs));
+ WL_NONE("%s: Enter (WME config)\n", __func__);
+ WL_NONE("queue %d, txop %d, cwmin %d, cwmax %d, aifs %d\n", queue,
+ params->txop, params->cw_min, params->cw_max, params->aifs);
WL_LOCK(wl);
wlc_wme_setparams(wl->wlc, queue, (void *)params, true);
@@ -617,7 +529,7 @@ wl_ops_conf_tx(struct ieee80211_hw *hw, u16 queue,
static u64 wl_ops_get_tsf(struct ieee80211_hw *hw)
{
- WL_ERROR(("%s: Enter\n", __func__));
+ WL_ERROR("%s: Enter\n", __func__);
return 0;
}
@@ -628,11 +540,11 @@ wl_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct scb *scb;
int i;
- wl_info_t *wl = hw->priv;
+ struct wl_info *wl = hw->priv;
/* Init the scb */
scb = (struct scb *)sta->drv_priv;
- bzero(scb, sizeof(struct scb));
+ memset(scb, 0, sizeof(struct scb));
for (i = 0; i < NUMPRIO; i++)
scb->seqctl[i] = 0xFFFF;
scb->seqctl_nonqos = 0xFFFF;
@@ -641,20 +553,12 @@ wl_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
wl->pub->global_scb = scb;
wl->pub->global_ampdu = &(scb->scb_ampdu);
wl->pub->global_ampdu->scb = scb;
-#ifdef WLC_HIGH_ONLY
- wl->pub->global_ampdu->max_pdu = AMPDU_NUM_MPDU;
-#else
wl->pub->global_ampdu->max_pdu = 16;
-#endif
pktq_init(&scb->scb_ampdu.txq, AMPDU_MAX_SCB_TID,
AMPDU_MAX_SCB_TID * PKTQ_LEN_DEFAULT);
sta->ht_cap.ht_supported = true;
-#ifdef WLC_HIGH_ONLY
- sta->ht_cap.ampdu_factor = AMPDU_RX_FACTOR_16K;
-#else
sta->ht_cap.ampdu_factor = AMPDU_RX_FACTOR_64K;
-#endif
sta->ht_cap.ampdu_density = AMPDU_DEF_MPDU_DENSITY;
sta->ht_cap.cap = IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
@@ -668,7 +572,7 @@ static int
wl_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- WL_NONE(("%s: Enter\n", __func__));
+ WL_NONE("%s: Enter\n", __func__);
return 0;
}
@@ -681,19 +585,19 @@ wl_ampdu_action(struct ieee80211_hw *hw,
#if defined(BCMDBG)
struct scb *scb = (struct scb *)sta->drv_priv;
#endif
- wl_info_t *wl = hw->priv;
+ struct wl_info *wl = hw->priv;
ASSERT(scb->magic == SCB_MAGIC);
switch (action) {
case IEEE80211_AMPDU_RX_START:
- WL_NONE(("%s: action = IEEE80211_AMPDU_RX_START\n", __func__));
+ WL_NONE("%s: action = IEEE80211_AMPDU_RX_START\n", __func__);
break;
case IEEE80211_AMPDU_RX_STOP:
- WL_NONE(("%s: action = IEEE80211_AMPDU_RX_STOP\n", __func__));
+ WL_NONE("%s: action = IEEE80211_AMPDU_RX_STOP\n", __func__);
break;
case IEEE80211_AMPDU_TX_START:
if (!wlc_aggregatable(wl->wlc, tid)) {
- /* WL_ERROR(("START: tid %d is not agg' able, return FAILURE to stack\n", tid)); */
+ /* WL_ERROR("START: tid %d is not agg' able, return FAILURE to stack\n", tid); */
return -1;
}
/* XXX: Use the starting sequence number provided ... */
@@ -707,22 +611,18 @@ wl_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_OPERATIONAL:
/* Not sure what to do here */
/* Power save wakeup */
- WL_NONE(("%s: action = IEEE80211_AMPDU_TX_OPERATIONAL\n",
- __func__));
+ WL_NONE("%s: action = IEEE80211_AMPDU_TX_OPERATIONAL\n",
+ __func__);
break;
default:
- WL_ERROR(("%s: Invalid command, ignoring\n", __func__));
+ WL_ERROR("%s: Invalid command, ignoring\n", __func__);
}
return 0;
}
static const struct ieee80211_ops wl_ops = {
-#ifdef WLC_HIGH_ONLY
- .tx = wl_ops_tx_nl,
-#else
.tx = wl_ops_tx,
-#endif
.start = wl_ops_start,
.stop = wl_ops_stop,
.add_interface = wl_ops_add_interface,
@@ -744,10 +644,10 @@ static const struct ieee80211_ops wl_ops = {
.ampdu_action = wl_ampdu_action,
};
-static int wl_set_hint(wl_info_t *wl, char *abbrev)
+static int wl_set_hint(struct wl_info *wl, char *abbrev)
{
- WL_ERROR(("%s: Sending country code %c%c to MAC80211\n", __func__,
- abbrev[0], abbrev[1]));
+ WL_ERROR("%s: Sending country code %c%c to MAC80211\n",
+ __func__, abbrev[0], abbrev[1]);
return regulatory_hint(wl->pub->ieee_hw->wiphy, abbrev);
}
@@ -762,117 +662,61 @@ static int wl_set_hint(wl_info_t *wl, char *abbrev)
* a warning that this function is defined but not used if we declare
* it as static.
*/
-static wl_info_t *wl_attach(u16 vendor, u16 device, unsigned long regs,
+static struct wl_info *wl_attach(u16 vendor, u16 device, unsigned long regs,
uint bustype, void *btparam, uint irq)
{
- wl_info_t *wl;
- osl_t *osh;
+ struct wl_info *wl;
+ struct osl_info *osh;
int unit, err;
unsigned long base_addr;
struct ieee80211_hw *hw;
u8 perm[ETH_ALEN];
- unit = wl_found + instance_base;
+ unit = wl_found;
err = 0;
if (unit < 0) {
- WL_ERROR(("wl%d: unit number overflow, exiting\n", unit));
+ WL_ERROR("wl%d: unit number overflow, exiting\n", unit);
return NULL;
}
- if (oneonly && (unit != instance_base)) {
- WL_ERROR(("wl%d: wl_attach: oneonly is set, exiting\n", unit));
- return NULL;
- }
-
- /* Requires pkttag feature */
- osh = osl_attach(btparam, bustype, true);
+ osh = osl_attach(btparam, bustype);
ASSERT(osh);
-#ifdef WLC_HIGH_ONLY
- hw = ieee80211_alloc_hw(sizeof(wl_info_t), &wl_ops);
- if (!hw) {
- WL_ERROR(("%s: ieee80211_alloc_hw failed\n", __func__));
- ASSERT(0);
- }
-
- bzero(hw->priv, sizeof(*wl));
- wl = hw->priv;
-#else
/* allocate private info */
hw = pci_get_drvdata(btparam); /* btparam == pdev */
wl = hw->priv;
-#endif
ASSERT(wl);
- wl->magic = WL_MAGIC;
wl->osh = osh;
atomic_set(&wl->callbacks, 0);
/* setup the bottom half handler */
tasklet_init(&wl->tasklet, wl_dpc, (unsigned long) wl);
-#ifdef WLC_HIGH_ONLY
- wl->rpc_th = bcm_rpc_tp_attach(osh, NULL);
- if (wl->rpc_th == NULL) {
- WL_ERROR(("wl%d: %s: bcm_rpc_tp_attach failed!\n", unit,
- __func__));
- goto fail;
- }
-
- wl->rpc = bcm_rpc_attach(NULL, osh, wl->rpc_th);
- if (wl->rpc == NULL) {
- WL_ERROR(("wl%d: %s: bcm_rpc_attach failed!\n", unit,
- __func__));
- goto fail;
- }
- /* init tx work queue for wl_start/send pkt; no need to destroy workitem */
- INIT_WORK(&wl->txq_task.work, (work_func_t) wl_start_txqwork);
- wl->txq_task.context = wl;
-#endif /* WLC_HIGH_ONLY */
-
-#ifdef BCMSDIO
- SET_IEEE80211_DEV(hw, sdiommc_dev);
-#endif
base_addr = regs;
if (bustype == PCI_BUS) {
- /* piomode can be overwritten by command argument */
- wl->piomode = piomode;
- WL_TRACE(("PCI/%s\n", wl->piomode ? "PIO" : "DMA"));
+ wl->piomode = false;
} else if (bustype == RPC_BUS) {
/* Do nothing */
} else {
bustype = PCI_BUS;
- WL_TRACE(("force to PCI\n"));
+ WL_TRACE("force to PCI\n");
}
wl->bcm_bustype = bustype;
-#ifdef WLC_HIGH_ONLY
- if (wl->bcm_bustype == RPC_BUS) {
- wl->regsva = (void *)0;
- btparam = wl->rpc;
- } else
-#endif
wl->regsva = ioremap_nocache(base_addr, PCI_BAR0_WINSZ);
if (wl->regsva == NULL) {
- WL_ERROR(("wl%d: ioremap() failed\n", unit));
+ WL_ERROR("wl%d: ioremap() failed\n", unit);
goto fail;
}
-#ifdef WLC_HIGH_ONLY
- spin_lock_init(&wl->rpcq_lock);
- spin_lock_init(&wl->txq_lock);
-
- sema_init(&wl->sem, 1);
-#else
spin_lock_init(&wl->lock);
spin_lock_init(&wl->isr_lock);
-#endif
-#ifndef WLC_HIGH_ONLY
/* prepare ucode */
if (wl_request_fw(wl, (struct pci_dev *)btparam)) {
printf("%s: Failed to find firmware usually in %s\n",
@@ -881,17 +725,14 @@ static wl_info_t *wl_attach(u16 vendor, u16 device, unsigned long regs,
wl_remove((struct pci_dev *)btparam);
goto fail1;
}
-#endif
/* common load-time initialization */
wl->wlc = wlc_attach((void *)wl, vendor, device, unit, wl->piomode, osh,
wl->regsva, wl->bcm_bustype, btparam, &err);
-#ifndef WLC_HIGH_ONLY
wl_release_fw(wl);
-#endif
if (!wl->wlc) {
- printf("%s: %s wlc_attach() failed with code %d\n",
- KBUILD_MODNAME, EPI_VERSION_STR, err);
+ printf("%s: wlc_attach() failed with code %d\n",
+ KBUILD_MODNAME, err);
goto fail;
}
wl->pub = wlc_pub(wl->wlc);
@@ -900,52 +741,35 @@ static wl_info_t *wl_attach(u16 vendor, u16 device, unsigned long regs,
ASSERT(wl->pub->ieee_hw);
ASSERT(wl->pub->ieee_hw->priv == wl);
-#ifdef WLC_HIGH_ONLY
- REGOPSSET(osh, (osl_rreg_fn_t) wlc_reg_read,
- (osl_wreg_fn_t) wlc_reg_write, wl->wlc);
- wl->rpc_dispatch_ctx.rpc = wl->rpc;
- wl->rpc_dispatch_ctx.wlc = wl->wlc;
- bcm_rpc_rxcb_init(wl->rpc, wl, wl_rpc_dispatch_schedule, wl,
- wl_rpc_down, NULL, NULL);
-#endif /* WLC_HIGH_ONLY */
-
- if (nompc) {
- if (wlc_iovar_setint(wl->wlc, "mpc", 0)) {
- WL_ERROR(("wl%d: Error setting MPC variable to 0\n",
- unit));
- }
+
+ if (wlc_iovar_setint(wl->wlc, "mpc", 0)) {
+ WL_ERROR("wl%d: Error setting MPC variable to 0\n", unit);
}
-#ifdef BCMSDIO
- /* Set SDIO drive strength */
- wlc_iovar_setint(wl->wlc, "sd_drivestrength", sd_drivestrength);
-#endif
-#ifdef WLC_LOW
/* register our interrupt handler */
if (request_irq(irq, wl_isr, IRQF_SHARED, KBUILD_MODNAME, wl)) {
- WL_ERROR(("wl%d: request_irq() failed\n", unit));
+ WL_ERROR("wl%d: request_irq() failed\n", unit);
goto fail;
}
wl->irq = irq;
-#endif /* WLC_LOW */
/* register module */
wlc_module_register(wl->pub, NULL, "linux", wl, NULL, wl_linux_watchdog,
NULL);
if (ieee_hw_init(hw)) {
- WL_ERROR(("wl%d: %s: ieee_hw_init failed!\n", unit, __func__));
+ WL_ERROR("wl%d: %s: ieee_hw_init failed!\n", unit, __func__);
goto fail;
}
- bcopy(&wl->pub->cur_etheraddr, perm, ETHER_ADDR_LEN);
+ bcopy(&wl->pub->cur_etheraddr, perm, ETH_ALEN);
ASSERT(is_valid_ether_addr(perm));
SET_IEEE80211_PERM_ADDR(hw, perm);
err = ieee80211_register_hw(hw);
if (err) {
- WL_ERROR(("%s: ieee80211_register_hw failed, status %d\n",
- __func__, err));
+ WL_ERROR("%s: ieee80211_register_hw failed, status %d\n",
+ __func__, err);
}
if (wl->pub->srom_ccode[0])
@@ -953,79 +777,26 @@ static wl_info_t *wl_attach(u16 vendor, u16 device, unsigned long regs,
else
err = wl_set_hint(wl, "US");
if (err) {
- WL_ERROR(("%s: regulatory_hint failed, status %d\n", __func__,
- err));
- }
-#ifndef WLC_HIGH_ONLY
- WL_ERROR(("wl%d: Broadcom BCM43xx 802.11 MAC80211 Driver "
- EPI_VERSION_STR " (" PHY_VERSION_STR ")", unit));
-#else
- WL_ERROR(("wl%d: Broadcom BCM43xx 802.11 MAC80211 Driver "
- EPI_VERSION_STR, unit));
-#endif
+ WL_ERROR("%s: regulatory_hint failed, status %d\n",
+ __func__, err);
+ }
+ WL_ERROR("wl%d: Broadcom BCM43xx 802.11 MAC80211 Driver (" PHY_VERSION_STR ")",
+ unit);
#ifdef BCMDBG
- printf(" (Compiled in " SRCBASE " at " __TIME__ " on " __DATE__ ")");
+ printf(" (Compiled at " __TIME__ " on " __DATE__ ")");
#endif /* BCMDBG */
printf("\n");
wl_found++;
return wl;
- fail:
+fail:
wl_free(wl);
fail1:
return NULL;
}
-#ifdef WLC_HIGH_ONLY
-static void *wl_dbus_probe_cb(void *arg, const char *desc, u32 bustype,
- u32 hdrlen)
-{
- wl_info_t *wl;
- WL_ERROR(("%s:\n", __func__));
-
- wl = wl_attach(BCM_DNGL_VID, BCM_DNGL_BDC_PID, (unsigned long) NULL, RPC_BUS,
- NULL, 0);
- if (!wl) {
- WL_ERROR(("%s: wl_attach failed\n", __func__));
- }
-
- /* This is later passed to wl_dbus_disconnect_cb */
- return wl;
-}
-
-static void wl_dbus_disconnect_cb(void *arg)
-{
- wl_info_t *wl = arg;
-
- WL_ERROR(("%s:\n", __func__));
-
- if (wl) {
-#ifdef WLC_HIGH_ONLY
- if (wl->pub->ieee_hw) {
- ieee80211_unregister_hw(wl->pub->ieee_hw);
- WL_ERROR(("%s: Back from down\n", __func__));
- }
- wlc_device_removed(wl->wlc);
- wlc_bmac_dngl_reboot(wl->rpc);
- bcm_rpc_down(wl->rpc);
-#endif
- WL_LOCK(wl);
- wl_down(wl);
- WL_UNLOCK(wl);
-#ifdef WLC_HIGH_ONLY
- if (wl->pub->ieee_hw) {
- ieee80211_free_hw(wl->pub->ieee_hw);
- WL_ERROR(("%s: Back from ieee80211_free_hw\n",
- __func__));
- wl->pub->ieee_hw = NULL;
- }
-#endif
- wl_free(wl);
- }
-}
-#endif /* WLC_HIGH_ONLY */
#define CHAN2GHZ(channel, freqency, chflags) { \
@@ -1163,29 +934,13 @@ static struct ieee80211_supported_band wl_band_2GHz_nphy = {
.cap = IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT,
-#ifdef WLC_HIGH_ONLY
- .ht_supported = true,
- .ampdu_factor = AMPDU_RX_FACTOR_16K,
-#else
.ht_supported = true,
.ampdu_factor = AMPDU_RX_FACTOR_64K,
-#endif
.ampdu_density = AMPDU_DEF_MPDU_DENSITY,
.mcs = {
/* placeholders for now */
-#ifdef WLC_HIGH_ONLY
- /*
- * rx_mask[0] = 0xff by default
- * rx_mask[1] = 0xff if number of rx chain >=2
- * rx_mask[2] = 0xff if number of rx chain >=3
- * rx_mask[4] = 1 if 40Mhz is supported
- */
- .rx_mask = {0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- .rx_highest = 72, /* max rate of single stream */
-#else
.rx_mask = {0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0},
.rx_highest = 500,
-#endif
.tx_params = IEEE80211_HT_MCS_TX_DEFINED}
}
};
@@ -1212,7 +967,7 @@ static struct ieee80211_supported_band wl_band_5GHz_nphy = {
static int ieee_hw_rate_init(struct ieee80211_hw *hw)
{
- wl_info_t *wl = HW_TO_WL(hw);
+ struct wl_info *wl = HW_TO_WL(hw);
int has_5g;
char phy_list[4];
@@ -1222,20 +977,16 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
if (wlc_get(wl->wlc, WLC_GET_PHYLIST, (int *)&phy_list) < 0) {
- WL_ERROR(("Phy list failed\n"));
+ WL_ERROR("Phy list failed\n");
}
- WL_NONE(("%s: phylist = %c\n", __func__, phy_list[0]));
+ WL_NONE("%s: phylist = %c\n", __func__, phy_list[0]);
-#ifndef WLC_HIGH_ONLY
if (phy_list[0] == 'n' || phy_list[0] == 'c') {
if (phy_list[0] == 'c') {
/* Single stream */
wl_band_2GHz_nphy.ht_cap.mcs.rx_mask[1] = 0;
wl_band_2GHz_nphy.ht_cap.mcs.rx_highest = 72;
}
-#else
- if (phy_list[0] == 's') {
-#endif
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl_band_2GHz_nphy;
} else {
BUG();
@@ -1245,11 +996,7 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
/* Assume all bands use the same phy. True for 11n devices. */
if (NBANDS_PUB(wl->pub) > 1) {
has_5g++;
-#ifndef WLC_HIGH_ONLY
if (phy_list[0] == 'n' || phy_list[0] == 'c') {
-#else
- if (phy_list[0] == 's') {
-#endif
hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&wl_band_5GHz_nphy;
} else {
@@ -1257,7 +1004,7 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
}
}
- WL_NONE(("%s: 2ghz = %d, 5ghz = %d\n", __func__, 1, has_5g));
+ WL_NONE("%s: 2ghz = %d, 5ghz = %d\n", __func__, 1, has_5g);
return 0;
}
@@ -1288,7 +1035,6 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
return ieee_hw_rate_init(hw);
}
-#ifndef BCMSDIO
/**
* determines if a device is a WL device, and if so, attaches it.
*
@@ -1300,15 +1046,15 @@ int __devinit
wl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
- wl_info_t *wl;
+ struct wl_info *wl;
struct ieee80211_hw *hw;
u32 val;
ASSERT(pdev);
- WL_TRACE(("%s: bus %d slot %d func %d irq %d\n", __func__,
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), pdev->irq));
+ WL_TRACE("%s: bus %d slot %d func %d irq %d\n",
+ __func__, pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn), pdev->irq);
if ((pdev->vendor != PCI_VENDOR_ID_BROADCOM) ||
(((pdev->device & 0xff00) != 0x4300) &&
@@ -1318,9 +1064,9 @@ wl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = pci_enable_device(pdev);
if (rc) {
- WL_ERROR(("%s: Cannot enable device %d-%d_%d\n", __func__,
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn)));
+ WL_ERROR("%s: Cannot enable device %d-%d_%d\n",
+ __func__, pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
return -ENODEV;
}
pci_set_master(pdev);
@@ -1329,9 +1075,9 @@ wl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
- hw = ieee80211_alloc_hw(sizeof(wl_info_t), &wl_ops);
+ hw = ieee80211_alloc_hw(sizeof(struct wl_info), &wl_ops);
if (!hw) {
- WL_ERROR(("%s: ieee80211_alloc_hw failed\n", __func__));
+ WL_ERROR("%s: ieee80211_alloc_hw failed\n", __func__);
rc = -ENOMEM;
goto err_1;
}
@@ -1340,58 +1086,58 @@ wl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, hw);
- bzero(hw->priv, sizeof(*wl));
+ memset(hw->priv, 0, sizeof(*wl));
wl = wl_attach(pdev->vendor, pdev->device, pci_resource_start(pdev, 0),
PCI_BUS, pdev, pdev->irq);
if (!wl) {
- WL_ERROR(("%s: %s: wl_attach failed!\n",
- KBUILD_MODNAME, __func__));
+ WL_ERROR("%s: %s: wl_attach failed!\n",
+ KBUILD_MODNAME, __func__);
return -ENODEV;
}
return 0;
err_1:
- WL_ERROR(("%s: err_1: Major hoarkage\n", __func__));
+ WL_ERROR("%s: err_1: Major hoarkage\n", __func__);
return 0;
}
-#ifdef LINUXSTA_PS
static int wl_suspend(struct pci_dev *pdev, pm_message_t state)
{
- wl_info_t *wl;
+ struct wl_info *wl;
struct ieee80211_hw *hw;
- WL_TRACE(("wl: wl_suspend\n"));
+ WL_TRACE("wl: wl_suspend\n");
hw = pci_get_drvdata(pdev);
wl = HW_TO_WL(hw);
if (!wl) {
- WL_ERROR(("wl: wl_suspend: pci_get_drvdata failed\n"));
+ WL_ERROR("wl: wl_suspend: pci_get_drvdata failed\n");
return -ENODEV;
}
+ /* only need to flag hw is down for proper resume */
WL_LOCK(wl);
- wl_down(wl);
wl->pub->hw_up = false;
WL_UNLOCK(wl);
- pci_save_state(pdev, wl->pci_psstate);
+
+ pci_save_state(pdev);
pci_disable_device(pdev);
return pci_set_power_state(pdev, PCI_D3hot);
}
static int wl_resume(struct pci_dev *pdev)
{
- wl_info_t *wl;
+ struct wl_info *wl;
struct ieee80211_hw *hw;
int err = 0;
u32 val;
- WL_TRACE(("wl: wl_resume\n"));
+ WL_TRACE("wl: wl_resume\n");
hw = pci_get_drvdata(pdev);
wl = HW_TO_WL(hw);
if (!wl) {
- WL_ERROR(("wl: wl_resume: pci_get_drvdata failed\n"));
+ WL_ERROR("wl: wl_resume: pci_get_drvdata failed\n");
return -ENODEV;
}
@@ -1399,7 +1145,7 @@ static int wl_resume(struct pci_dev *pdev)
if (err)
return err;
- pci_restore_state(pdev, wl->pci_psstate);
+ pci_restore_state(pdev);
err = pci_enable_device(pdev);
if (err)
@@ -1411,27 +1157,26 @@ static int wl_resume(struct pci_dev *pdev)
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
- WL_LOCK(wl);
- err = wl_up(wl);
- WL_UNLOCK(wl);
-
+ /*
+ * done. driver will be put in up state
+ * in wl_ops_add_interface() call.
+ */
return err;
}
-#endif /* LINUXSTA_PS */
static void wl_remove(struct pci_dev *pdev)
{
- wl_info_t *wl;
+ struct wl_info *wl;
struct ieee80211_hw *hw;
hw = pci_get_drvdata(pdev);
wl = HW_TO_WL(hw);
if (!wl) {
- WL_ERROR(("wl: wl_remove: pci_get_drvdata failed\n"));
+ WL_ERROR("wl: wl_remove: pci_get_drvdata failed\n");
return;
}
if (!wlc_chipmatch(pdev->vendor, pdev->device)) {
- WL_ERROR(("wl: wl_remove: wlc_chipmatch failed\n"));
+ WL_ERROR("wl: wl_remove: wlc_chipmatch failed\n");
return;
}
if (wl->wlc) {
@@ -1439,7 +1184,7 @@ static void wl_remove(struct pci_dev *pdev)
WL_LOCK(wl);
wl_down(wl);
WL_UNLOCK(wl);
- WL_NONE(("%s: Down\n", __func__));
+ WL_NONE("%s: Down\n", __func__);
}
pci_disable_device(pdev);
@@ -1450,16 +1195,13 @@ static void wl_remove(struct pci_dev *pdev)
}
static struct pci_driver wl_pci_driver = {
- .name = "brcm80211",
- .probe = wl_pci_probe,
-#ifdef LINUXSTA_PS
- .suspend = wl_suspend,
- .resume = wl_resume,
-#endif /* LINUXSTA_PS */
- .remove = __devexit_p(wl_remove),
- .id_table = wl_id_table,
+ .name = "brcm80211",
+ .probe = wl_pci_probe,
+ .suspend = wl_suspend,
+ .resume = wl_resume,
+ .remove = __devexit_p(wl_remove),
+ .id_table = wl_id_table,
};
-#endif /* !BCMSDIO */
/**
* This is the main entry point for the WL driver.
@@ -1480,7 +1222,6 @@ static int __init wl_module_init(void)
if (var)
wl_msg_level = simple_strtoul(var, NULL, 0);
}
-#ifndef WLC_HIGH_ONLY
{
extern u32 phyhal_msg_level;
@@ -1492,25 +1233,13 @@ static int __init wl_module_init(void)
phyhal_msg_level = simple_strtoul(var, NULL, 0);
}
}
-#endif /* WLC_HIGH_ONLY */
#endif /* BCMDBG */
-#ifndef BCMSDIO
error = pci_register_driver(&wl_pci_driver);
if (!error)
return 0;
-#endif /* !BCMSDIO */
-#ifdef WLC_HIGH_ONLY
- /* BMAC_NOTE: define hardcode number, why NODEVICE is ok ? */
- error =
- dbus_register(BCM_DNGL_VID, 0, wl_dbus_probe_cb,
- wl_dbus_disconnect_cb, NULL, NULL, NULL);
- if (error == DBUS_ERR_NODEVICE) {
- error = DBUS_OK;
- }
-#endif /* WLC_HIGH_ONLY */
return error;
}
@@ -1524,13 +1253,8 @@ static int __init wl_module_init(void)
*/
static void __exit wl_module_exit(void)
{
-#ifndef BCMSDIO
pci_unregister_driver(&wl_pci_driver);
-#endif /* !BCMSDIO */
-#ifdef WLC_HIGH_ONLY
- dbus_deregister();
-#endif /* WLC_HIGH_ONLY */
}
module_init(wl_module_init);
@@ -1543,19 +1267,17 @@ module_exit(wl_module_exit);
* by the wl parameter.
*
*/
-void wl_free(wl_info_t *wl)
+void wl_free(struct wl_info *wl)
{
wl_timer_t *t, *next;
- osl_t *osh;
+ struct osl_info *osh;
ASSERT(wl);
-#ifndef WLC_HIGH_ONLY
/* free ucode data */
if (wl->fw.fw_cnt)
wl_ucode_data_free();
if (wl->irq)
free_irq(wl->irq, wl);
-#endif
/* kill dpc */
tasklet_kill(&wl->tasklet);
@@ -1593,103 +1315,50 @@ void wl_free(wl_info_t *wl)
* unregister_netdev() calls get_stats() which may read chip registers
* so we cannot unmap the chip registers until after calling unregister_netdev() .
*/
- if (wl->regsva && BUSTYPE(wl->bcm_bustype) != SDIO_BUS &&
- BUSTYPE(wl->bcm_bustype) != JTAG_BUS) {
+ if (wl->regsva && wl->bcm_bustype != SDIO_BUS &&
+ wl->bcm_bustype != JTAG_BUS) {
iounmap((void *)wl->regsva);
}
wl->regsva = NULL;
-#ifdef WLC_HIGH_ONLY
- wl_rpcq_free(wl);
-
- wl_txq_free(wl);
-
- if (wl->rpc) {
- bcm_rpc_detach(wl->rpc);
- wl->rpc = NULL;
- }
-
- if (wl->rpc_th) {
- bcm_rpc_tp_detach(wl->rpc_th);
- wl->rpc_th = NULL;
- }
-#endif /* WLC_HIGH_ONLY */
osl_detach(osh);
}
-#ifdef WLC_LOW
/* transmit a packet */
-static int BCMFASTPATH wl_start(struct sk_buff *skb, wl_info_t *wl)
+static int BCMFASTPATH wl_start(struct sk_buff *skb, struct wl_info *wl)
{
if (!wl)
return -ENETDOWN;
return wl_start_int(wl, WL_TO_HW(wl), skb);
}
-#endif /* WLC_LOW */
static int BCMFASTPATH
-wl_start_int(wl_info_t *wl, struct ieee80211_hw *hw, struct sk_buff *skb)
+wl_start_int(struct wl_info *wl, struct ieee80211_hw *hw, struct sk_buff *skb)
{
-#ifdef WLC_HIGH_ONLY
- WL_LOCK(wl);
-#endif
wlc_sendpkt_mac80211(wl->wlc, skb, hw);
-#ifdef WLC_HIGH_ONLY
- WL_UNLOCK(wl);
-#endif
return NETDEV_TX_OK;
}
-void wl_txflowcontrol(wl_info_t *wl, struct wl_if *wlif, bool state, int prio)
+void wl_txflowcontrol(struct wl_info *wl, struct wl_if *wlif, bool state,
+ int prio)
{
- WL_ERROR(("Shouldn't be here %s\n", __func__));
+ WL_ERROR("Shouldn't be here %s\n", __func__);
}
-#if defined(WLC_HIGH_ONLY)
-/* Schedule a completion handler to run at safe time */
-static int
-wl_schedule_task(wl_info_t *wl, void (*fn) (struct wl_task *task),
- void *context)
+void wl_init(struct wl_info *wl)
{
- wl_task_t *task;
-
- WL_TRACE(("wl%d: wl_schedule_task\n", wl->pub->unit));
-
- task = kmalloc(sizeof(wl_task_t), GFP_ATOMIC);
- if (!task) {
- WL_ERROR(("wl%d: wl_schedule_task: out of memory\n", wl->pub->unit));
- return -ENOMEM;
- }
-
- INIT_WORK(&task->work, (work_func_t) fn);
- task->context = context;
-
- if (!schedule_work(&task->work)) {
- WL_ERROR(("wl%d: schedule_work() failed\n", wl->pub->unit));
- kfree(task);
- return -ENOMEM;
- }
-
- atomic_inc(&wl->callbacks);
-
- return 0;
-}
-#endif /* defined(WLC_HIGH_ONLY) */
-
-void wl_init(wl_info_t *wl)
-{
- WL_TRACE(("wl%d: wl_init\n", wl->pub->unit));
+ WL_TRACE("wl%d: wl_init\n", wl->pub->unit);
wl_reset(wl);
wlc_init(wl->wlc);
}
-uint wl_reset(wl_info_t *wl)
+uint wl_reset(struct wl_info *wl)
{
- WL_TRACE(("wl%d: wl_reset\n", wl->pub->unit));
+ WL_TRACE("wl%d: wl_reset\n", wl->pub->unit);
wlc_reset(wl->wlc);
@@ -1703,25 +1372,22 @@ uint wl_reset(wl_info_t *wl)
* These are interrupt on/off entry points. Disable interrupts
* during interrupt state transition.
*/
-void BCMFASTPATH wl_intrson(wl_info_t *wl)
+void BCMFASTPATH wl_intrson(struct wl_info *wl)
{
-#if defined(WLC_LOW)
unsigned long flags;
INT_LOCK(wl, flags);
wlc_intrson(wl->wlc);
INT_UNLOCK(wl, flags);
-#endif /* WLC_LOW */
}
-bool wl_alloc_dma_resources(wl_info_t *wl, uint addrwidth)
+bool wl_alloc_dma_resources(struct wl_info *wl, uint addrwidth)
{
return true;
}
-u32 BCMFASTPATH wl_intrsoff(wl_info_t *wl)
+u32 BCMFASTPATH wl_intrsoff(struct wl_info *wl)
{
-#if defined(WLC_LOW)
unsigned long flags;
u32 status;
@@ -1729,23 +1395,18 @@ u32 BCMFASTPATH wl_intrsoff(wl_info_t *wl)
status = wlc_intrsoff(wl->wlc);
INT_UNLOCK(wl, flags);
return status;
-#else
- return 0;
-#endif /* WLC_LOW */
}
-void wl_intrsrestore(wl_info_t *wl, u32 macintmask)
+void wl_intrsrestore(struct wl_info *wl, u32 macintmask)
{
-#if defined(WLC_LOW)
unsigned long flags;
INT_LOCK(wl, flags);
wlc_intrsrestore(wl->wlc, macintmask);
INT_UNLOCK(wl, flags);
-#endif /* WLC_LOW */
}
-int wl_up(wl_info_t *wl)
+int wl_up(struct wl_info *wl)
{
int error = 0;
@@ -1757,7 +1418,7 @@ int wl_up(wl_info_t *wl)
return error;
}
-void wl_down(wl_info_t *wl)
+void wl_down(struct wl_info *wl)
{
uint callbacks, ret_val = 0;
@@ -1768,24 +1429,21 @@ void wl_down(wl_info_t *wl)
/* wait for down callbacks to complete */
WL_UNLOCK(wl);
-#ifndef WLC_HIGH_ONLY
/* For HIGH_only driver, it's important to actually schedule other work,
* not just spin wait since everything runs at schedule level
*/
SPINWAIT((atomic_read(&wl->callbacks) > callbacks), 100 * 1000);
-#endif /* WLC_HIGH_ONLY */
WL_LOCK(wl);
}
irqreturn_t BCMFASTPATH wl_isr(int irq, void *dev_id)
{
-#if defined(WLC_LOW)
- wl_info_t *wl;
+ struct wl_info *wl;
bool ours, wantdpc;
unsigned long flags;
- wl = (wl_info_t *) dev_id;
+ wl = (struct wl_info *) dev_id;
WL_ISRLOCK(wl, flags);
@@ -1805,17 +1463,13 @@ irqreturn_t BCMFASTPATH wl_isr(int irq, void *dev_id)
WL_ISRUNLOCK(wl, flags);
return IRQ_RETVAL(ours);
-#else
- return IRQ_RETVAL(0);
-#endif /* WLC_LOW */
}
static void BCMFASTPATH wl_dpc(unsigned long data)
{
-#ifdef WLC_LOW
- wl_info_t *wl;
+ struct wl_info *wl;
- wl = (wl_info_t *) data;
+ wl = (struct wl_info *) data;
WL_LOCK(wl);
@@ -1846,20 +1500,19 @@ static void BCMFASTPATH wl_dpc(unsigned long data)
done:
WL_UNLOCK(wl);
-#endif /* WLC_LOW */
}
-static void wl_link_up(wl_info_t *wl, char *ifname)
+static void wl_link_up(struct wl_info *wl, char *ifname)
{
- WL_ERROR(("wl%d: link up (%s)\n", wl->pub->unit, ifname));
+ WL_ERROR("wl%d: link up (%s)\n", wl->pub->unit, ifname);
}
-static void wl_link_down(wl_info_t *wl, char *ifname)
+static void wl_link_down(struct wl_info *wl, char *ifname)
{
- WL_ERROR(("wl%d: link down (%s)\n", wl->pub->unit, ifname));
+ WL_ERROR("wl%d: link down (%s)\n", wl->pub->unit, ifname);
}
-void wl_event(wl_info_t *wl, char *ifname, wlc_event_t *e)
+void wl_event(struct wl_info *wl, char *ifname, wlc_event_t *e)
{
switch (e->event.event_type) {
@@ -1877,12 +1530,7 @@ void wl_event(wl_info_t *wl, char *ifname, wlc_event_t *e)
static void wl_timer(unsigned long data)
{
-#ifndef WLC_HIGH_ONLY
_wl_timer((wl_timer_t *) data);
-#else
- wl_timer_t *t = (wl_timer_t *) data;
- wl_schedule_task(t->wl, wl_timer_task, t);
-#endif /* WLC_HIGH_ONLY */
}
static void _wl_timer(wl_timer_t *t)
@@ -1906,18 +1554,18 @@ static void _wl_timer(wl_timer_t *t)
WL_UNLOCK(t->wl);
}
-wl_timer_t *wl_init_timer(wl_info_t *wl, void (*fn) (void *arg), void *arg,
+wl_timer_t *wl_init_timer(struct wl_info *wl, void (*fn) (void *arg), void *arg,
const char *name)
{
wl_timer_t *t;
t = kmalloc(sizeof(wl_timer_t), GFP_ATOMIC);
if (!t) {
- WL_ERROR(("wl%d: wl_init_timer: out of memory\n", wl->pub->unit));
+ WL_ERROR("wl%d: wl_init_timer: out of memory\n", wl->pub->unit);
return 0;
}
- bzero(t, sizeof(wl_timer_t));
+ memset(t, 0, sizeof(wl_timer_t));
init_timer(&t->timer);
t->timer.data = (unsigned long) t;
@@ -1940,12 +1588,12 @@ wl_timer_t *wl_init_timer(wl_info_t *wl, void (*fn) (void *arg), void *arg,
/* BMAC_NOTE: Add timer adds only the kernel timer since it's going to be more accurate
* as well as it's easier to make it periodic
*/
-void wl_add_timer(wl_info_t *wl, wl_timer_t *t, uint ms, int periodic)
+void wl_add_timer(struct wl_info *wl, wl_timer_t *t, uint ms, int periodic)
{
#ifdef BCMDBG
if (t->set) {
- WL_ERROR(("%s: Already set. Name: %s, per %d\n",
- __func__, t->name, periodic));
+ WL_ERROR("%s: Already set. Name: %s, per %d\n",
+ __func__, t->name, periodic);
}
#endif
ASSERT(!t->set);
@@ -1960,7 +1608,7 @@ void wl_add_timer(wl_info_t *wl, wl_timer_t *t, uint ms, int periodic)
}
/* return true if timer successfully deleted, false if still pending */
-bool wl_del_timer(wl_info_t *wl, wl_timer_t *t)
+bool wl_del_timer(struct wl_info *wl, wl_timer_t *t)
{
if (t->set) {
t->set = false;
@@ -1973,7 +1621,7 @@ bool wl_del_timer(wl_info_t *wl, wl_timer_t *t)
return true;
}
-void wl_free_timer(wl_info_t *wl, wl_timer_t *t)
+void wl_free_timer(struct wl_info *wl, wl_timer_t *t)
{
wl_timer_t *tmp;
@@ -2009,7 +1657,7 @@ void wl_free_timer(wl_info_t *wl, wl_timer_t *t)
static int wl_linux_watchdog(void *ctx)
{
- wl_info_t *wl = (wl_info_t *) ctx;
+ struct wl_info *wl = (struct wl_info *) ctx;
struct net_device_stats *stats = NULL;
uint id;
/* refresh stats */
@@ -2049,233 +1697,12 @@ struct wl_fw_hdr {
u32 idx;
};
-#ifdef WLC_HIGH_ONLY
-static void wl_rpc_down(void *wlh)
-{
- wl_info_t *wl = (wl_info_t *) (wlh);
-
- wlc_device_removed(wl->wlc);
-
- wl_rpcq_free(wl);
-}
-
-static int BCMFASTPATH wl_start(struct sk_buff *skb, wl_info_t *wl)
-{
-
- unsigned long flags;
-
- skb->prev = NULL;
-
- /* Lock the queue as tasklet could be running at this time */
- TXQ_LOCK(wl, flags);
- if (wl->txq_head == NULL)
- wl->txq_head = skb;
- else {
- wl->txq_tail->prev = skb;
- }
- wl->txq_tail = skb;
-
- if (wl->txq_dispatched == false) {
- wl->txq_dispatched = true;
-
- if (schedule_work(&wl->txq_task.work)) {
- atomic_inc(&wl->callbacks);
- } else {
- WL_ERROR(("wl%d: wl_start/schedule_work failed\n",
- wl->pub->unit));
- }
- }
-
- TXQ_UNLOCK(wl, flags);
-
- return 0;
-
-}
-
-static void wl_start_txqwork(struct wl_task *task)
-{
- wl_info_t *wl = (wl_info_t *) task->context;
- struct sk_buff *skb;
- unsigned long flags;
- uint count = 0;
-
- WL_TRACE(("wl%d: wl_start_txqwork\n", wl->pub->unit));
-
- /* First remove an entry then go for execution */
- TXQ_LOCK(wl, flags);
- while (wl->txq_head) {
- skb = wl->txq_head;
- wl->txq_head = skb->prev;
- skb->prev = NULL;
- if (wl->txq_head == NULL)
- wl->txq_tail = NULL;
- TXQ_UNLOCK(wl, flags);
-
- /* it has WL_LOCK/WL_UNLOCK inside */
- wl_start_int(wl, WL_TO_HW(wl), skb);
-
- /* bounded our execution, reshedule ourself next */
- if (++count >= 10)
- break;
-
- TXQ_LOCK(wl, flags);
- }
-
- if (count >= 10) {
- if (!schedule_work(&wl->txq_task.work)) {
- WL_ERROR(("wl%d: wl_start/schedule_work failed\n",
- wl->pub->unit));
- atomic_dec(&wl->callbacks);
- }
- } else {
- wl->txq_dispatched = false;
- TXQ_UNLOCK(wl, flags);
- atomic_dec(&wl->callbacks);
- }
-
- return;
-}
-
-static void wl_txq_free(wl_info_t *wl)
-{
- struct sk_buff *skb;
-
- if (wl->txq_head == NULL) {
- ASSERT(wl->txq_tail == NULL);
- return;
- }
-
- while (wl->txq_head) {
- skb = wl->txq_head;
- wl->txq_head = skb->prev;
- PKTFREE(wl->osh, skb, true);
- }
-
- wl->txq_tail = NULL;
-}
-
-static void wl_rpcq_free(wl_info_t *wl)
-{
- rpc_buf_t *buf;
-
- if (wl->rpcq_head == NULL) {
- ASSERT(wl->rpcq_tail == NULL);
- return;
- }
-
- while (wl->rpcq_head) {
- buf = wl->rpcq_head;
- wl->rpcq_head = bcm_rpc_buf_next_get(wl->rpc_th, buf);
- bcm_rpc_buf_free(wl->rpc_dispatch_ctx.rpc, buf);
- }
-
- wl->rpcq_tail = NULL;
-}
-
-static void wl_rpcq_dispatch(struct wl_task *task)
-{
- wl_info_t *wl = (wl_info_t *) task->context;
- rpc_buf_t *buf;
- unsigned long flags;
-
- /* First remove an entry then go for execution */
- RPCQ_LOCK(wl, flags);
- while (wl->rpcq_head) {
- buf = wl->rpcq_head;
- wl->rpcq_head = bcm_rpc_buf_next_get(wl->rpc_th, buf);
-
- if (wl->rpcq_head == NULL)
- wl->rpcq_tail = NULL;
- RPCQ_UNLOCK(wl, flags);
-
- WL_LOCK(wl);
- wlc_rpc_high_dispatch(&wl->rpc_dispatch_ctx, buf);
- WL_UNLOCK(wl);
-
- RPCQ_LOCK(wl, flags);
- }
-
- wl->rpcq_dispatched = false;
-
- RPCQ_UNLOCK(wl, flags);
-
- kfree(task);
- atomic_dec(&wl->callbacks);
-}
-
-static void wl_rpcq_add(wl_info_t *wl, rpc_buf_t *buf)
-{
- unsigned long flags;
-
- bcm_rpc_buf_next_set(wl->rpc_th, buf, NULL);
-
- /* Lock the queue as tasklet could be running at this time */
- RPCQ_LOCK(wl, flags);
- if (wl->rpcq_head == NULL)
- wl->rpcq_head = buf;
- else
- bcm_rpc_buf_next_set(wl->rpc_th, wl->rpcq_tail, buf);
-
- wl->rpcq_tail = buf;
-
- if (wl->rpcq_dispatched == false) {
- wl->rpcq_dispatched = true;
- wl_schedule_task(wl, wl_rpcq_dispatch, wl);
- }
-
- RPCQ_UNLOCK(wl, flags);
-}
-
-#if defined(BCMDBG)
-static const struct name_entry rpc_name_tbl[] = RPC_ID_TABLE;
-#endif /* BCMDBG */
-
-/* dongle-side rpc dispatch routine */
-static void wl_rpc_dispatch_schedule(void *ctx, struct rpc_buf *buf)
-{
- bcm_xdr_buf_t b;
- wl_info_t *wl = (wl_info_t *) ctx;
- wlc_rpc_id_t rpc_id;
- int err;
-
- bcm_xdr_buf_init(&b, bcm_rpc_buf_data(wl->rpc_th, buf),
- bcm_rpc_buf_len_get(wl->rpc_th, buf));
-
- err = bcm_xdr_unpack_u32(&b, &rpc_id);
- ASSERT(!err);
- WL_TRACE(("%s: Dispatch id %s\n", __func__,
- WLC_RPC_ID_LOOKUP(rpc_name_tbl, rpc_id)));
-
- /* Handle few emergency ones */
- switch (rpc_id) {
- default:
- wl_rpcq_add(wl, buf);
- break;
- }
-}
-
-static void wl_timer_task(wl_task_t *task)
-{
- wl_timer_t *t = (wl_timer_t *) task->context;
-
- _wl_timer(t);
- kfree(task);
-
- /* This dec is for the task_schedule. The timer related
- * callback is decremented in _wl_timer
- */
- atomic_dec(&t->wl->callbacks);
-}
-#endif /* WLC_HIGH_ONLY */
-
-#ifndef WLC_HIGH_ONLY
char *wl_firmwares[WL_MAX_FW] = {
"brcm/bcm43xx",
NULL
};
-#ifdef WLC_LOW
-int wl_ucode_init_buf(wl_info_t *wl, void **pbuf, u32 idx)
+int wl_ucode_init_buf(struct wl_info *wl, void **pbuf, u32 idx)
{
int i, entry;
const u8 *pdata;
@@ -2301,7 +1728,7 @@ int wl_ucode_init_buf(wl_info_t *wl, void **pbuf, u32 idx)
return -1;
}
-int wl_ucode_init_uint(wl_info_t *wl, u32 *data, u32 idx)
+int wl_ucode_init_uint(struct wl_info *wl, u32 *data, u32 idx)
{
int i, entry;
const u8 *pdata;
@@ -2321,22 +1748,21 @@ int wl_ucode_init_uint(wl_info_t *wl, u32 *data, u32 idx)
printf("ERROR: ucode tag:%d can not be found!\n", idx);
return -1;
}
-#endif /* WLC_LOW */
-static int wl_request_fw(wl_info_t *wl, struct pci_dev *pdev)
+static int wl_request_fw(struct wl_info *wl, struct pci_dev *pdev)
{
int status;
struct device *device = &pdev->dev;
char fw_name[100];
int i;
- bzero((void *)&wl->fw, sizeof(struct wl_firmware));
+ memset((void *)&wl->fw, 0, sizeof(struct wl_firmware));
for (i = 0; i < WL_MAX_FW; i++) {
if (wl_firmwares[i] == NULL)
break;
sprintf(fw_name, "%s-%d.fw", wl_firmwares[i],
UCODE_LOADER_API_VER);
- WL_NONE(("request fw %s\n", fw_name));
+ WL_NONE("request fw %s\n", fw_name);
status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
if (status) {
printf("%s: fail to load firmware %s\n",
@@ -2344,7 +1770,7 @@ static int wl_request_fw(wl_info_t *wl, struct pci_dev *pdev)
wl_release_fw(wl);
return status;
}
- WL_NONE(("request fw %s\n", fw_name));
+ WL_NONE("request fw %s\n", fw_name);
sprintf(fw_name, "%s_hdr-%d.fw", wl_firmwares[i],
UCODE_LOADER_API_VER);
status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
@@ -2356,22 +1782,19 @@ static int wl_request_fw(wl_info_t *wl, struct pci_dev *pdev)
}
wl->fw.hdr_num_entries[i] =
wl->fw.fw_hdr[i]->size / (sizeof(struct wl_fw_hdr));
- WL_NONE(("request fw %s find: %d entries\n", fw_name,
- wl->fw.hdr_num_entries[i]));
+ WL_NONE("request fw %s find: %d entries\n",
+ fw_name, wl->fw.hdr_num_entries[i]);
}
wl->fw.fw_cnt = i;
- wl_ucode_data_init(wl);
- return 0;
+ return wl_ucode_data_init(wl);
}
-#ifdef WLC_LOW
void wl_ucode_free_buf(void *p)
{
kfree(p);
}
-#endif /* WLC_LOW */
-static void wl_release_fw(wl_info_t *wl)
+static void wl_release_fw(struct wl_info *wl)
{
int i;
for (i = 0; i < WL_MAX_FW; i++) {
@@ -2379,4 +1802,54 @@ static void wl_release_fw(wl_info_t *wl)
release_firmware(wl->fw.fw_hdr[i]);
}
}
-#endif /* WLC_HIGH_ONLY */
+
+
+/*
+ * checks validity of all firmware images loaded from user space
+ */
+int wl_check_firmwares(struct wl_info *wl)
+{
+ int i;
+ int entry;
+ int rc = 0;
+ const struct firmware *fw;
+ const struct firmware *fw_hdr;
+ struct wl_fw_hdr *ucode_hdr;
+ for (i = 0; i < WL_MAX_FW && rc == 0; i++) {
+ fw = wl->fw.fw_bin[i];
+ fw_hdr = wl->fw.fw_hdr[i];
+ if (fw == NULL && fw_hdr == NULL) {
+ break;
+ } else if (fw == NULL || fw_hdr == NULL) {
+ WL_ERROR("%s: invalid bin/hdr fw\n", __func__);
+ rc = -EBADF;
+ } else if (fw_hdr->size % sizeof(struct wl_fw_hdr)) {
+ WL_ERROR("%s: non integral fw hdr file size %d/%zu\n",
+ __func__, fw_hdr->size,
+ sizeof(struct wl_fw_hdr));
+ rc = -EBADF;
+ } else if (fw->size < MIN_FW_SIZE || fw->size > MAX_FW_SIZE) {
+ WL_ERROR("%s: out of bounds fw file size %d\n",
+ __func__, fw->size);
+ rc = -EBADF;
+ } else {
+ /* check if ucode section overruns firmware image */
+ ucode_hdr = (struct wl_fw_hdr *)fw_hdr->data;
+ for (entry = 0; entry < wl->fw.hdr_num_entries[i] && rc;
+ entry++, ucode_hdr++) {
+ if (ucode_hdr->offset + ucode_hdr->len >
+ fw->size) {
+ WL_ERROR("%s: conflicting bin/hdr\n",
+ __func__);
+ rc = -EBADF;
+ }
+ }
+ }
+ }
+ if (rc == 0 && wl->fw.fw_cnt != i) {
+ WL_ERROR("%s: invalid fw_cnt=%d\n", __func__, wl->fw.fw_cnt);
+ rc = -EBADF;
+ }
+ return rc;
+}
+
diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.h b/drivers/staging/brcm80211/sys/wl_mac80211.h
index 78cee4454b0b..bb39b7705947 100644
--- a/drivers/staging/brcm80211/sys/wl_mac80211.h
+++ b/drivers/staging/brcm80211/sys/wl_mac80211.h
@@ -60,58 +60,32 @@ struct wl_firmware {
};
struct wl_info {
- wlc_pub_t *pub; /* pointer to public wlc state */
+ struct wlc_pub *pub; /* pointer to public wlc state */
void *wlc; /* pointer to private common os-independent data */
- osl_t *osh; /* pointer to os handler */
+ struct osl_info *osh; /* pointer to os handler */
u32 magic;
int irq;
-#ifdef WLC_HIGH_ONLY
- struct semaphore sem; /* use semaphore to allow sleep */
-#else
spinlock_t lock; /* per-device perimeter lock */
spinlock_t isr_lock; /* per-device ISR synchronization lock */
-#endif
uint bcm_bustype; /* bus type */
bool piomode; /* set from insmod argument */
void *regsva; /* opaque chip registers virtual address */
atomic_t callbacks; /* # outstanding callback functions */
struct wl_timer *timers; /* timer cleanup queue */
struct tasklet_struct tasklet; /* dpc tasklet */
-#ifdef BCMSDIO
- bcmsdh_info_t *sdh; /* pointer to sdio bus handler */
- unsigned long flags; /* current irq flags */
-#endif /* BCMSDIO */
bool resched; /* dpc needs to be and is rescheduled */
#ifdef LINUXSTA_PS
u32 pci_psstate[16]; /* pci ps-state save/restore */
#endif
/* RPC, handle, lock, txq, workitem */
-#ifdef WLC_HIGH_ONLY
- rpc_info_t *rpc; /* RPC handle */
- rpc_tp_info_t *rpc_th; /* RPC transport handle */
- wlc_rpc_ctx_t rpc_dispatch_ctx;
-
- bool rpcq_dispatched; /* Avoid scheduling multiple tasks */
- spinlock_t rpcq_lock; /* Lock for the queue */
- rpc_buf_t *rpcq_head; /* RPC Q */
- rpc_buf_t *rpcq_tail; /* Points to the last buf */
-
- bool txq_dispatched; /* Avoid scheduling multiple tasks */
- spinlock_t txq_lock; /* Lock for the queue */
- struct sk_buff *txq_head; /* TX Q */
- struct sk_buff *txq_tail; /* Points to the last buf */
-
- wl_task_t txq_task; /* work queue for wl_start() */
-#endif /* WLC_HIGH_ONLY */
uint stats_id; /* the current set of stats */
/* ping-pong stats counters updated by Linux watchdog */
struct net_device_stats stats_watchdog[2];
struct wl_firmware fw;
};
-#ifndef WLC_HIGH_ONLY
#define WL_LOCK(wl) spin_lock_bh(&(wl)->lock)
#define WL_UNLOCK(wl) spin_unlock_bh(&(wl)->lock)
@@ -122,17 +96,6 @@ struct wl_info {
/* locking under WL_LOCK() to synchronize with wl_isr */
#define INT_LOCK(wl, flags) spin_lock_irqsave(&(wl)->isr_lock, flags)
#define INT_UNLOCK(wl, flags) spin_unlock_irqrestore(&(wl)->isr_lock, flags)
-#else /* BCMSDIO */
-
-#define WL_LOCK(wl) down(&(wl)->sem)
-#define WL_UNLOCK(wl) up(&(wl)->sem)
-
-#define WL_ISRLOCK(wl)
-#define WL_ISRUNLOCK(wl)
-#endif /* WLC_HIGH_ONLY */
-
-/* handle forward declaration */
-typedef struct wl_info wl_info_t;
#ifndef PCI_D0
#define PCI_D0 0
@@ -148,14 +111,7 @@ extern irqreturn_t wl_isr(int irq, void *dev_id);
extern int __devinit wl_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent);
-extern void wl_free(wl_info_t *wl);
+extern void wl_free(struct wl_info *wl);
extern int wl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
-extern int wl_ucode_data_init(wl_info_t *wl);
-extern void wl_ucode_data_free(void);
-#ifdef WLC_LOW
-extern void wl_ucode_free_buf(void *);
-extern int wl_ucode_init_buf(wl_info_t *wl, void **pbuf, u32 idx);
-extern int wl_ucode_init_uint(wl_info_t *wl, u32 *data, u32 idx);
-#endif /* WLC_LOW */
#endif /* _wl_mac80211_h_ */
diff --git a/drivers/staging/brcm80211/sys/wl_ucode.h b/drivers/staging/brcm80211/sys/wl_ucode.h
index a1ba37209f96..2a0f4028f6f3 100644
--- a/drivers/staging/brcm80211/sys/wl_ucode.h
+++ b/drivers/staging/brcm80211/sys/wl_ucode.h
@@ -14,6 +14,9 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#define MIN_FW_SIZE 40000 /* minimum firmware file size in bytes */
+#define MAX_FW_SIZE 150000
+
typedef struct d11init {
u16 addr;
u16 size;
@@ -35,3 +38,12 @@ extern u32 *bcm43xx_24_lcn;
extern u32 bcm43xx_24_lcnsz;
extern u32 *bcm43xx_bommajor;
extern u32 *bcm43xx_bomminor;
+
+extern int wl_ucode_data_init(struct wl_info *wl);
+extern void wl_ucode_data_free(void);
+
+extern int wl_ucode_init_buf(struct wl_info *wl, void **pbuf, unsigned int idx);
+extern int wl_ucode_init_uint(struct wl_info *wl, unsigned *data,
+ unsigned int idx);
+extern void wl_ucode_free_buf(void *);
+extern int wl_check_firmwares(struct wl_info *wl);
diff --git a/drivers/staging/brcm80211/sys/wl_ucode_loader.c b/drivers/staging/brcm80211/sys/wl_ucode_loader.c
index 0b41a9cb1ec9..23e10f3dec0d 100644
--- a/drivers/staging/brcm80211/sys/wl_ucode_loader.c
+++ b/drivers/staging/brcm80211/sys/wl_ucode_loader.c
@@ -14,17 +14,12 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-typedef struct wl_info wl_info_t;
#include <linux/types.h>
#include <bcmdefs.h>
#include <d11ucode_ext.h>
#include <wl_ucode.h>
-extern int wl_ucode_init_buf(wl_info_t *wl, void **pbuf, unsigned int idx);
-extern int wl_ucode_init_uint(wl_info_t *wl, unsigned *data, unsigned int idx);
-extern int wl_ucode_data_init(wl_info_t *wl);
-extern void wl_ucode_data_free(void);
-extern void wl_ucode_free_buf(void *);
+
d11init_t *d11lcn0bsinitvals24;
d11init_t *d11lcn0initvals24;
@@ -42,8 +37,12 @@ u32 bcm43xx_24_lcnsz;
u32 *bcm43xx_bommajor;
u32 *bcm43xx_bomminor;
-int wl_ucode_data_init(wl_info_t *wl)
+int wl_ucode_data_init(struct wl_info *wl)
{
+ int rc;
+ rc = wl_check_firmwares(wl);
+ if (rc < 0)
+ return rc;
wl_ucode_init_buf(wl, (void **)&d11lcn0bsinitvals24,
D11LCN0BSINITVALS24);
wl_ucode_init_buf(wl, (void **)&d11lcn0initvals24, D11LCN0INITVALS24);
diff --git a/drivers/staging/brcm80211/sys/wlc_alloc.c b/drivers/staging/brcm80211/sys/wlc_alloc.c
index 2dc89f9c2635..746439e8fd57 100644
--- a/drivers/staging/brcm80211/sys/wlc_alloc.c
+++ b/drivers/staging/brcm80211/sys/wlc_alloc.c
@@ -17,28 +17,33 @@
#include <linux/string.h>
#include <bcmdefs.h>
#include <wlc_cfg.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
#include <osl.h>
#include <bcmutils.h>
#include <siutils.h>
#include <wlioctl.h>
#include <wlc_pub.h>
#include <wlc_key.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
+#include <wlc_event.h>
#include <wlc_mac80211.h>
#include <wlc_alloc.h>
+#include <wl_dbg.h>
-static wlc_pub_t *wlc_pub_malloc(osl_t *osh, uint unit, uint *err,
- uint devid);
-static void wlc_pub_mfree(osl_t *osh, wlc_pub_t *pub);
+static struct wlc_pub *wlc_pub_malloc(struct osl_info *osh, uint unit,
+ uint *err, uint devid);
+static void wlc_pub_mfree(struct osl_info *osh, struct wlc_pub *pub);
static void wlc_tunables_init(wlc_tunables_t *tunables, uint devid);
-void *wlc_calloc(osl_t *osh, uint unit, uint size)
+void *wlc_calloc(struct osl_info *osh, uint unit, uint size)
{
void *item;
item = kzalloc(size, GFP_ATOMIC);
if (item == NULL)
- WL_ERROR(("wl%d: %s: out of memory\n", unit, __func__));
+ WL_ERROR("wl%d: %s: out of memory\n", unit, __func__);
return item;
}
@@ -58,18 +63,14 @@ void wlc_tunables_init(wlc_tunables_t *tunables, uint devid)
tunables->ampdudatahiwat = WLC_AMPDUDATAHIWAT;
tunables->rxbnd = RXBND;
tunables->txsbnd = TXSBND;
-#if defined(WLC_HIGH_ONLY) && defined(NTXD_USB_4319)
- if (devid == BCM4319_CHIP_ID) {
- tunables->ntxd = NTXD_USB_4319;
- }
-#endif /* WLC_HIGH_ONLY */
}
-static wlc_pub_t *wlc_pub_malloc(osl_t *osh, uint unit, uint *err, uint devid)
+static struct wlc_pub *wlc_pub_malloc(struct osl_info *osh, uint unit,
+ uint *err, uint devid)
{
- wlc_pub_t *pub;
+ struct wlc_pub *pub;
- pub = (wlc_pub_t *) wlc_calloc(osh, unit, sizeof(wlc_pub_t));
+ pub = (struct wlc_pub *) wlc_calloc(osh, unit, sizeof(struct wlc_pub));
if (pub == NULL) {
*err = 1001;
goto fail;
@@ -99,7 +100,7 @@ static wlc_pub_t *wlc_pub_malloc(osl_t *osh, uint unit, uint *err, uint devid)
return NULL;
}
-static void wlc_pub_mfree(osl_t *osh, wlc_pub_t *pub)
+static void wlc_pub_mfree(struct osl_info *osh, struct wlc_pub *pub)
{
if (pub == NULL)
return;
@@ -114,7 +115,7 @@ static void wlc_pub_mfree(osl_t *osh, wlc_pub_t *pub)
kfree(pub);
}
-wlc_bsscfg_t *wlc_bsscfg_malloc(osl_t *osh, uint unit)
+wlc_bsscfg_t *wlc_bsscfg_malloc(struct osl_info *osh, uint unit)
{
wlc_bsscfg_t *cfg;
@@ -134,7 +135,7 @@ wlc_bsscfg_t *wlc_bsscfg_malloc(osl_t *osh, uint unit)
return NULL;
}
-void wlc_bsscfg_mfree(osl_t *osh, wlc_bsscfg_t *cfg)
+void wlc_bsscfg_mfree(struct osl_info *osh, wlc_bsscfg_t *cfg)
{
if (cfg == NULL)
return;
@@ -155,7 +156,7 @@ void wlc_bsscfg_mfree(osl_t *osh, wlc_bsscfg_t *cfg)
kfree(cfg);
}
-void wlc_bsscfg_ID_assign(wlc_info_t *wlc, wlc_bsscfg_t *bsscfg)
+void wlc_bsscfg_ID_assign(struct wlc_info *wlc, wlc_bsscfg_t *bsscfg)
{
bsscfg->ID = wlc->next_bsscfg_ID;
wlc->next_bsscfg_ID++;
@@ -164,11 +165,13 @@ void wlc_bsscfg_ID_assign(wlc_info_t *wlc, wlc_bsscfg_t *bsscfg)
/*
* The common driver entry routine. Error codes should be unique
*/
-wlc_info_t *wlc_attach_malloc(osl_t *osh, uint unit, uint *err, uint devid)
+struct wlc_info *wlc_attach_malloc(struct osl_info *osh, uint unit, uint *err,
+ uint devid)
{
- wlc_info_t *wlc;
+ struct wlc_info *wlc;
- wlc = (wlc_info_t *) wlc_calloc(osh, unit, sizeof(wlc_info_t));
+ wlc = (struct wlc_info *) wlc_calloc(osh, unit,
+ sizeof(struct wlc_info));
if (wlc == NULL) {
*err = 1002;
goto fail;
@@ -176,7 +179,7 @@ wlc_info_t *wlc_attach_malloc(osl_t *osh, uint unit, uint *err, uint devid)
wlc->hwrxoff = WL_HWRXOFF;
- /* allocate wlc_pub_t state structure */
+ /* allocate struct wlc_pub state structure */
wlc->pub = wlc_pub_malloc(osh, unit, err, devid);
if (wlc->pub == NULL) {
*err = 1003;
@@ -184,17 +187,16 @@ wlc_info_t *wlc_attach_malloc(osl_t *osh, uint unit, uint *err, uint devid)
}
wlc->pub->wlc = wlc;
- /* allocate wlc_hw_info_t state structure */
+ /* allocate struct wlc_hw_info state structure */
- wlc->hw = (wlc_hw_info_t *)wlc_calloc(osh, unit,
- sizeof(wlc_hw_info_t));
+ wlc->hw = (struct wlc_hw_info *)wlc_calloc(osh, unit,
+ sizeof(struct wlc_hw_info));
if (wlc->hw == NULL) {
*err = 1005;
goto fail;
}
wlc->hw->wlc = wlc;
-#ifdef WLC_LOW
wlc->hw->bandstate[0] = (wlc_hwband_t *)wlc_calloc(osh, unit,
(sizeof(wlc_hwband_t) * MAXBANDS));
if (wlc->hw->bandstate[0] == NULL) {
@@ -209,7 +211,6 @@ wlc_info_t *wlc_attach_malloc(osl_t *osh, uint unit, uint *err, uint devid)
(sizeof(wlc_hwband_t) * i));
}
}
-#endif /* WLC_LOW */
wlc->modulecb = (modulecb_t *)wlc_calloc(osh, unit,
sizeof(modulecb_t) * WLC_MAXMODULES);
@@ -266,8 +267,8 @@ wlc_info_t *wlc_attach_malloc(osl_t *osh, uint unit, uint *err, uint devid)
goto fail;
}
- wlc->bandstate[0] = (wlcband_t *)wlc_calloc(osh, unit,
- (sizeof(wlcband_t) * MAXBANDS));
+ wlc->bandstate[0] = (struct wlcband *)wlc_calloc(osh, unit,
+ (sizeof(struct wlcband)*MAXBANDS));
if (wlc->bandstate[0] == NULL) {
*err = 1025;
goto fail;
@@ -276,12 +277,13 @@ wlc_info_t *wlc_attach_malloc(osl_t *osh, uint unit, uint *err, uint devid)
for (i = 1; i < MAXBANDS; i++) {
wlc->bandstate[i] =
- (wlcband_t *) ((unsigned long)wlc->bandstate[0] +
- (sizeof(wlcband_t) * i));
+ (struct wlcband *) ((unsigned long)wlc->bandstate[0]
+ + (sizeof(struct wlcband)*i));
}
}
- wlc->corestate = (wlccore_t *)wlc_calloc(osh, unit, sizeof(wlccore_t));
+ wlc->corestate = (struct wlccore *)wlc_calloc(osh, unit,
+ sizeof(struct wlccore));
if (wlc->corestate == NULL) {
*err = 1026;
goto fail;
@@ -301,7 +303,7 @@ wlc_info_t *wlc_attach_malloc(osl_t *osh, uint unit, uint *err, uint devid)
return NULL;
}
-void wlc_detach_mfree(wlc_info_t *wlc, osl_t *osh)
+void wlc_detach_mfree(struct wlc_info *wlc, struct osl_info *osh)
{
if (wlc == NULL)
return;
@@ -355,12 +357,10 @@ void wlc_detach_mfree(wlc_info_t *wlc, osl_t *osh)
}
if (wlc->hw) {
-#ifdef WLC_LOW
if (wlc->hw->bandstate[0]) {
kfree(wlc->hw->bandstate[0]);
wlc->hw->bandstate[0] = NULL;
}
-#endif
/* free hw struct */
kfree(wlc->hw);
diff --git a/drivers/staging/brcm80211/sys/wlc_alloc.h b/drivers/staging/brcm80211/sys/wlc_alloc.h
index 678a2b9784f8..ac34f782b400 100644
--- a/drivers/staging/brcm80211/sys/wlc_alloc.h
+++ b/drivers/staging/brcm80211/sys/wlc_alloc.h
@@ -14,12 +14,12 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-extern void *wlc_calloc(osl_t *osh, uint unit, uint size);
+extern void *wlc_calloc(struct osl_info *osh, uint unit, uint size);
-extern wlc_info_t *wlc_attach_malloc(osl_t *osh, uint unit, uint *err,
- uint devid);
-extern void wlc_detach_mfree(wlc_info_t *wlc, osl_t *osh);
+extern struct wlc_info *wlc_attach_malloc(struct osl_info *osh, uint unit,
+ uint *err, uint devid);
+extern void wlc_detach_mfree(struct wlc_info *wlc, struct osl_info *osh);
struct wlc_bsscfg;
-extern struct wlc_bsscfg *wlc_bsscfg_malloc(osl_t *osh, uint unit);
-extern void wlc_bsscfg_mfree(osl_t *osh, struct wlc_bsscfg *cfg);
+extern struct wlc_bsscfg *wlc_bsscfg_malloc(struct osl_info *osh, uint unit);
+extern void wlc_bsscfg_mfree(struct osl_info *osh, struct wlc_bsscfg *cfg);
diff --git a/drivers/staging/brcm80211/sys/wlc_ampdu.c b/drivers/staging/brcm80211/sys/wlc_ampdu.c
index a4e49f3c1363..d749917f5912 100644
--- a/drivers/staging/brcm80211/sys/wlc_ampdu.c
+++ b/drivers/staging/brcm80211/sys/wlc_ampdu.c
@@ -16,19 +16,19 @@
#include <linux/kernel.h>
#include <wlc_cfg.h>
#include <bcmdefs.h>
-#include <linuxver.h>
-#include <bcmdefs.h>
#include <osl.h>
#include <bcmutils.h>
#include <siutils.h>
#include <bcmendian.h>
#include <wlioctl.h>
+#include <sbhndpio.h>
#include <sbhnddma.h>
#include <hnddma.h>
#include <d11.h>
#include <wlc_rate.h>
#include <wlc_pub.h>
#include <wlc_key.h>
+#include <wlc_event.h>
#include <wlc_mac80211.h>
#include <wlc_phy_hal.h>
#include <wlc_antsel.h>
@@ -36,11 +36,8 @@
#include <net/mac80211.h>
#include <wlc_ampdu.h>
#include <wl_export.h>
+#include <wl_dbg.h>
-#ifdef WLC_HIGH_ONLY
-#include <bcm_rpc_tp.h>
-#include <wlc_rpctx.h>
-#endif
#define AMPDU_MAX_MPDU 32 /* max number of mpdus in an ampdu */
#define AMPDU_NUM_MPDU_LEGACY 16 /* max number of mpdus in an ampdu to a legacy */
@@ -101,7 +98,7 @@ typedef struct wlc_fifo_info {
/* AMPDU module specific state */
struct ampdu_info {
- wlc_info_t *wlc; /* pointer to main wlc structure */
+ struct wlc_info *wlc; /* pointer to main wlc structure */
int scb_handle; /* scb cubby handle to retrieve data from scb */
u8 ini_enable[AMPDU_MAX_SCB_TID]; /* per-tid initiator enable/disable of ampdu */
u8 ba_tx_wsize; /* Tx ba window size (in pdu) */
@@ -125,11 +122,6 @@ struct ampdu_info {
*/
wlc_fifo_info_t fifo_tb[NUM_FFPLD_FIFO]; /* table of fifo infos */
-#ifdef WLC_HIGH_ONLY
- void *p;
- tx_status_t txs;
- bool waiting_status; /* To help sanity checks */
-#endif
};
#define AMPDU_CLEANUPFLAG_RX (0x1)
@@ -138,38 +130,39 @@ struct ampdu_info {
#define SCB_AMPDU_CUBBY(ampdu, scb) (&(scb->scb_ampdu))
#define SCB_AMPDU_INI(scb_ampdu, tid) (&(scb_ampdu->ini[tid]))
-static void wlc_ffpld_init(ampdu_info_t *ampdu);
-static int wlc_ffpld_check_txfunfl(wlc_info_t *wlc, int f);
-static void wlc_ffpld_calc_mcs2ampdu_table(ampdu_info_t *ampdu, int f);
+static void wlc_ffpld_init(struct ampdu_info *ampdu);
+static int wlc_ffpld_check_txfunfl(struct wlc_info *wlc, int f);
+static void wlc_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f);
-static scb_ampdu_tid_ini_t *wlc_ampdu_init_tid_ini(ampdu_info_t *ampdu,
+static scb_ampdu_tid_ini_t *wlc_ampdu_init_tid_ini(struct ampdu_info *ampdu,
scb_ampdu_t *scb_ampdu,
u8 tid, bool override);
-static void ampdu_cleanup_tid_ini(ampdu_info_t *ampdu, scb_ampdu_t *scb_ampdu,
+static void ampdu_cleanup_tid_ini(struct ampdu_info *ampdu,
+ scb_ampdu_t *scb_ampdu,
u8 tid, bool force);
-static void ampdu_update_max_txlen(ampdu_info_t *ampdu, u8 dur);
-static void scb_ampdu_update_config(ampdu_info_t *ampdu, struct scb *scb);
-static void scb_ampdu_update_config_all(ampdu_info_t *ampdu);
+static void ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur);
+static void scb_ampdu_update_config(struct ampdu_info *ampdu, struct scb *scb);
+static void scb_ampdu_update_config_all(struct ampdu_info *ampdu);
#define wlc_ampdu_txflowcontrol(a, b, c) do {} while (0)
-static void wlc_ampdu_dotxstatus_complete(ampdu_info_t *ampdu, struct scb *scb,
- void *p, tx_status_t *txs,
- u32 frmtxstatus,
- u32 frmtxstatus2);
+static void wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu,
+ struct scb *scb,
+ struct sk_buff *p, tx_status_t *txs,
+ u32 frmtxstatus, u32 frmtxstatus2);
-static inline u16 pkt_txh_seqnum(wlc_info_t *wlc, void *p)
+static inline u16 pkt_txh_seqnum(struct wlc_info *wlc, struct sk_buff *p)
{
d11txh_t *txh;
struct dot11_header *h;
- txh = (d11txh_t *) PKTDATA(p);
+ txh = (d11txh_t *) p->data;
h = (struct dot11_header *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
return ltoh16(h->seq) >> SEQNUM_SHIFT;
}
-ampdu_info_t *wlc_ampdu_attach(wlc_info_t *wlc)
+struct ampdu_info *wlc_ampdu_attach(struct wlc_info *wlc)
{
- ampdu_info_t *ampdu;
+ struct ampdu_info *ampdu;
int i;
/* some code depends on packed structures */
@@ -179,9 +172,10 @@ ampdu_info_t *wlc_ampdu_attach(wlc_info_t *wlc)
ASSERT(wlc->pub->tunables->ampdunummpdu <= AMPDU_MAX_MPDU);
ASSERT(wlc->pub->tunables->ampdunummpdu > 0);
- ampdu = kzalloc(sizeof(ampdu_info_t), GFP_ATOMIC);
+ ampdu = kzalloc(sizeof(struct ampdu_info), GFP_ATOMIC);
if (!ampdu) {
- WL_ERROR(("wl%d: wlc_ampdu_attach: out of mem\n", wlc->pub->unit));
+ WL_ERROR("wl%d: wlc_ampdu_attach: out of mem\n",
+ wlc->pub->unit);
return NULL;
}
ampdu->wlc = wlc;
@@ -209,10 +203,6 @@ ampdu_info_t *wlc_ampdu_attach(wlc_info_t *wlc)
ampdu->rx_factor = AMPDU_RX_FACTOR_32K;
else
ampdu->rx_factor = AMPDU_RX_FACTOR_64K;
-#ifdef WLC_HIGH_ONLY
- /* Restrict to smaller rcv size for BMAC dongle */
- ampdu->rx_factor = AMPDU_RX_FACTOR_32K;
-#endif
ampdu->retry_limit = AMPDU_DEF_RETRY_LIMIT;
ampdu->rr_retry_limit = AMPDU_DEF_RR_RETRY_LIMIT;
@@ -232,7 +222,7 @@ ampdu_info_t *wlc_ampdu_attach(wlc_info_t *wlc)
return ampdu;
}
-void wlc_ampdu_detach(ampdu_info_t *ampdu)
+void wlc_ampdu_detach(struct ampdu_info *ampdu)
{
int i;
@@ -250,12 +240,12 @@ void wlc_ampdu_detach(ampdu_info_t *ampdu)
kfree(ampdu);
}
-void scb_ampdu_cleanup(ampdu_info_t *ampdu, struct scb *scb)
+void scb_ampdu_cleanup(struct ampdu_info *ampdu, struct scb *scb)
{
scb_ampdu_t *scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
u8 tid;
- WL_AMPDU_UPDN(("scb_ampdu_cleanup: enter\n"));
+ WL_AMPDU_UPDN("scb_ampdu_cleanup: enter\n");
ASSERT(scb_ampdu);
for (tid = 0; tid < AMPDU_MAX_SCB_TID; tid++) {
@@ -266,12 +256,12 @@ void scb_ampdu_cleanup(ampdu_info_t *ampdu, struct scb *scb)
/* reset the ampdu state machine so that it can gracefully handle packets that were
* freed from the dma and tx queues during reinit
*/
-void wlc_ampdu_reset(ampdu_info_t *ampdu)
+void wlc_ampdu_reset(struct ampdu_info *ampdu)
{
- WL_NONE(("%s: Entering\n", __func__));
+ WL_NONE("%s: Entering\n", __func__);
}
-static void scb_ampdu_update_config(ampdu_info_t *ampdu, struct scb *scb)
+static void scb_ampdu_update_config(struct ampdu_info *ampdu, struct scb *scb)
{
scb_ampdu_t *scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
int i;
@@ -301,12 +291,12 @@ static void scb_ampdu_update_config(ampdu_info_t *ampdu, struct scb *scb)
ASSERT(scb_ampdu->release);
}
-void scb_ampdu_update_config_all(ampdu_info_t *ampdu)
+void scb_ampdu_update_config_all(struct ampdu_info *ampdu)
{
scb_ampdu_update_config(ampdu, ampdu->wlc->pub->global_scb);
}
-static void wlc_ffpld_init(ampdu_info_t *ampdu)
+static void wlc_ffpld_init(struct ampdu_info *ampdu)
{
int i, j;
wlc_fifo_info_t *fifo;
@@ -330,9 +320,9 @@ static void wlc_ffpld_init(ampdu_info_t *ampdu)
* Return 1 if pre-loading not active, -1 if not an underflow event,
* 0 if pre-loading module took care of the event.
*/
-static int wlc_ffpld_check_txfunfl(wlc_info_t *wlc, int fid)
+static int wlc_ffpld_check_txfunfl(struct wlc_info *wlc, int fid)
{
- ampdu_info_t *ampdu = wlc->ampdu;
+ struct ampdu_info *ampdu = wlc->ampdu;
u32 phy_rate = MCS_RATE(FFPLD_MAX_MCS, true, false);
u32 txunfl_ratio;
u8 max_mpdu;
@@ -349,7 +339,7 @@ static int wlc_ffpld_check_txfunfl(wlc_info_t *wlc, int fid)
M_UCODE_MACSTAT + offsetof(macstat_t, txfunfl[fid]));
new_txunfl = (u16) (cur_txunfl - fifo->prev_txfunfl);
if (new_txunfl == 0) {
- WL_FFPLD(("check_txunfl : TX status FRAG set but no tx underflows\n"));
+ WL_FFPLD("check_txunfl : TX status FRAG set but no tx underflows\n");
return -1;
}
fifo->prev_txfunfl = cur_txunfl;
@@ -359,7 +349,7 @@ static int wlc_ffpld_check_txfunfl(wlc_info_t *wlc, int fid)
/* check if fifo is big enough */
if (wlc_xmtfifo_sz_get(wlc, fid, &xmtfifo_sz)) {
- WL_FFPLD(("check_txunfl : get xmtfifo_sz failed.\n"));
+ WL_FFPLD("check_txunfl : get xmtfifo_sz failed\n");
return -1;
}
@@ -373,8 +363,8 @@ static int wlc_ffpld_check_txfunfl(wlc_info_t *wlc, int fid)
if (fifo->accum_txfunfl < 10)
return 0;
- WL_FFPLD(("ampdu_count %d tx_underflows %d\n",
- current_ampdu_cnt, fifo->accum_txfunfl));
+ WL_FFPLD("ampdu_count %d tx_underflows %d\n",
+ current_ampdu_cnt, fifo->accum_txfunfl);
/*
compute the current ratio of tx unfl per ampdu.
@@ -427,8 +417,8 @@ static int wlc_ffpld_check_txfunfl(wlc_info_t *wlc, int fid)
(max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
/ (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
- WL_FFPLD(("DMA estimated transfer rate %d; pre-load size %d\n",
- fifo->dmaxferrate, fifo->ampdu_pld_size));
+ WL_FFPLD("DMA estimated transfer rate %d; pre-load size %d\n",
+ fifo->dmaxferrate, fifo->ampdu_pld_size);
} else {
/* decrease ampdu size */
@@ -450,7 +440,7 @@ static int wlc_ffpld_check_txfunfl(wlc_info_t *wlc, int fid)
return 0;
}
-static void wlc_ffpld_calc_mcs2ampdu_table(ampdu_info_t *ampdu, int f)
+static void wlc_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f)
{
int i;
u32 phy_rate, dma_rate, tmp;
@@ -483,11 +473,12 @@ static void wlc_ffpld_calc_mcs2ampdu_table(ampdu_info_t *ampdu, int f)
}
static void BCMFASTPATH
-wlc_ampdu_agg(ampdu_info_t *ampdu, struct scb *scb, void *p, uint prec)
+wlc_ampdu_agg(struct ampdu_info *ampdu, struct scb *scb, struct sk_buff *p,
+ uint prec)
{
scb_ampdu_t *scb_ampdu;
scb_ampdu_tid_ini_t *ini;
- u8 tid = (u8) PKTPRIO(p);
+ u8 tid = (u8) (p->priority);
scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
@@ -500,11 +491,12 @@ wlc_ampdu_agg(ampdu_info_t *ampdu, struct scb *scb, void *p, uint prec)
}
int BCMFASTPATH
-wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
+wlc_sendampdu(struct ampdu_info *ampdu, wlc_txq_info_t *qi,
+ struct sk_buff **pdu, int prec)
{
- wlc_info_t *wlc;
- osl_t *osh;
- void *p, *pkt[AMPDU_MAX_MPDU];
+ struct wlc_info *wlc;
+ struct osl_info *osh;
+ struct sk_buff *p, *pkt[AMPDU_MAX_MPDU];
u8 tid, ndelim;
int err = 0;
u8 preamble_type = WLC_GF_PREAMBLE;
@@ -540,7 +532,7 @@ wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
ASSERT(p);
- tid = (u8) PKTPRIO(p);
+ tid = (u8) (p->priority);
ASSERT(tid < AMPDU_MAX_SCB_TID);
f = ampdu->fifo_tb + prio2fifo[tid];
@@ -561,7 +553,7 @@ wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
wlc_ampdu_agg(ampdu, scb, p, tid);
if (wlc->block_datafifo) {
- WL_ERROR(("%s: Fifo blocked\n", __func__));
+ WL_ERROR("%s: Fifo blocked\n", __func__);
return BCME_BUSY;
}
rr_retry_limit = ampdu->rr_retry_limit_tid[tid];
@@ -576,7 +568,7 @@ wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
err = wlc_prep_pdu(wlc, p, &fifo);
} else {
- WL_ERROR(("%s: AMPDU flag is off!\n", __func__));
+ WL_ERROR("%s: AMPDU flag is off!\n", __func__);
*pdu = NULL;
err = 0;
break;
@@ -584,14 +576,16 @@ wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
if (err) {
if (err == BCME_BUSY) {
- WL_ERROR(("wl%d: wlc_sendampdu: prep_xdu retry; seq 0x%x\n", wlc->pub->unit, seq));
+ WL_ERROR("wl%d: wlc_sendampdu: prep_xdu retry; seq 0x%x\n",
+ wlc->pub->unit, seq);
WLCNTINCR(ampdu->cnt->sduretry);
*pdu = p;
break;
}
/* error in the packet; reject it */
- WL_AMPDU_ERR(("wl%d: wlc_sendampdu: prep_xdu rejected; seq 0x%x\n", wlc->pub->unit, seq));
+ WL_AMPDU_ERR("wl%d: wlc_sendampdu: prep_xdu rejected; seq 0x%x\n",
+ wlc->pub->unit, seq);
WLCNTINCR(ampdu->cnt->sdurejected);
*pdu = NULL;
@@ -600,7 +594,7 @@ wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
/* pkt is good to be aggregated */
ASSERT(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
- txh = (d11txh_t *) PKTDATA(p);
+ txh = (d11txh_t *) p->data;
plcp = (u8 *) (txh + 1);
h = (struct dot11_header *)(plcp + D11_PHY_HDR_LEN);
seq = ltoh16(h->seq) >> SEQNUM_SHIFT;
@@ -633,8 +627,8 @@ wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
seg_cnt += 1;
- WL_AMPDU_TX(("wl%d: wlc_sendampdu: mpdu %d plcp_len %d\n",
- wlc->pub->unit, count, len));
+ WL_AMPDU_TX("wl%d: wlc_sendampdu: mpdu %d plcp_len %d\n",
+ wlc->pub->unit, count, len);
/*
* aggregateable mpdu. For ucode/hw agg,
@@ -665,7 +659,8 @@ wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
dma_len += (u16) pkttotlen(osh, p);
- WL_AMPDU_TX(("wl%d: wlc_sendampdu: ampdu_len %d seg_cnt %d null delim %d\n", wlc->pub->unit, ampdu_len, seg_cnt, ndelim));
+ WL_AMPDU_TX("wl%d: wlc_sendampdu: ampdu_len %d seg_cnt %d null delim %d\n",
+ wlc->pub->unit, ampdu_len, seg_cnt, ndelim);
txh->MacTxControlLow = htol16(mcl);
@@ -695,8 +690,8 @@ wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
min(scb_ampdu->max_rxlen,
ampdu->max_txlen[mcs][is40][sgi]);
- WL_NONE(("sendampdu: sgi %d, is40 %d, mcs %d\n", sgi,
- is40, mcs));
+ WL_NONE("sendampdu: sgi %d, is40 %d, mcs %d\n",
+ sgi, is40, mcs);
maxlen = 64 * 1024; /* XXX Fix me to honor real max_rxlen */
@@ -739,13 +734,14 @@ wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
/* test whether to add more */
if ((MCS_RATE(mcs, true, false) >= f->dmaxferrate) &&
(count == f->mcs2ampdu_table[mcs])) {
- WL_AMPDU_ERR(("wl%d: PR 37644: stopping ampdu at %d for mcs %d", wlc->pub->unit, count, mcs));
+ WL_AMPDU_ERR("wl%d: PR 37644: stopping ampdu at %d for mcs %d\n",
+ wlc->pub->unit, count, mcs);
break;
}
if (count == scb_ampdu->max_pdu) {
- WL_NONE(("Stop taking from q, reached %d deep\n",
- scb_ampdu->max_pdu));
+ WL_NONE("Stop taking from q, reached %d deep\n",
+ scb_ampdu->max_pdu);
break;
}
@@ -755,7 +751,7 @@ wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
if (p) {
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
- ((u8) PKTPRIO(p) == tid)) {
+ ((u8) (p->priority) == tid)) {
plen =
pkttotlen(osh, p) + AMPDU_MAX_MPDU_OVERHEAD;
@@ -763,15 +759,16 @@ wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
if ((plen + ampdu_len) > maxlen) {
p = NULL;
- WL_ERROR(("%s: Bogus plen #1\n",
- __func__));
+ WL_ERROR("%s: Bogus plen #1\n",
+ __func__);
ASSERT(3 == 4);
continue;
}
/* check if there are enough descriptors available */
if (TXAVAIL(wlc, fifo) <= (seg_cnt + 1)) {
- WL_ERROR(("%s: No fifo space !!!!!!\n", __func__));
+ WL_ERROR("%s: No fifo space !!!!!!\n",
+ __func__);
p = NULL;
continue;
}
@@ -789,7 +786,7 @@ wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
WLCNTADD(ampdu->cnt->txmpdu, count);
/* patch up the last txh */
- txh = (d11txh_t *) PKTDATA(pkt[count - 1]);
+ txh = (d11txh_t *) pkt[count - 1]->data;
mcl = ltoh16(txh->MacTxControlLow);
mcl &= ~TXC_AMPDU_MASK;
mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT);
@@ -807,7 +804,7 @@ wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
ampdu_len -= roundup(len, 4) - len;
/* patch up the first txh & plcp */
- txh = (d11txh_t *) PKTDATA(pkt[0]);
+ txh = (d11txh_t *) pkt[0]->data;
plcp = (u8 *) (txh + 1);
WLC_SET_MIMO_PLCP_LEN(plcp, ampdu_len);
@@ -878,27 +875,18 @@ wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
WLC_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
}
- WL_AMPDU_TX(("wl%d: wlc_sendampdu: count %d ampdu_len %d\n",
- wlc->pub->unit, count, ampdu_len));
+ WL_AMPDU_TX("wl%d: wlc_sendampdu: count %d ampdu_len %d\n",
+ wlc->pub->unit, count, ampdu_len);
/* inform rate_sel if it this is a rate probe pkt */
frameid = ltoh16(txh->TxFrameID);
if (frameid & TXFID_RATE_PROBE_MASK) {
- WL_ERROR(("%s: XXX what to do with TXFID_RATE_PROBE_MASK!?\n", __func__));
+ WL_ERROR("%s: XXX what to do with TXFID_RATE_PROBE_MASK!?\n",
+ __func__);
}
-#ifdef WLC_HIGH_ONLY
- if (wlc->rpc_agg & BCM_RPC_TP_HOST_AGG_AMPDU)
- bcm_rpc_tp_agg_set(bcm_rpc_tp_get(wlc->rpc),
- BCM_RPC_TP_HOST_AGG_AMPDU, true);
-#endif
for (i = 0; i < count; i++)
wlc_txfifo(wlc, fifo, pkt[i], i == (count - 1),
ampdu->txpkt_weight);
-#ifdef WLC_HIGH_ONLY
- if (wlc->rpc_agg & BCM_RPC_TP_HOST_AGG_AMPDU)
- bcm_rpc_tp_agg_set(bcm_rpc_tp_get(wlc->rpc),
- BCM_RPC_TP_HOST_AGG_AMPDU, false);
-#endif
}
/* endif (count) */
@@ -906,11 +894,11 @@ wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
}
void BCMFASTPATH
-wlc_ampdu_dotxstatus(ampdu_info_t *ampdu, struct scb *scb, void *p,
- tx_status_t *txs)
+wlc_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
+ struct sk_buff *p, tx_status_t *txs)
{
scb_ampdu_t *scb_ampdu;
- wlc_info_t *wlc = ampdu->wlc;
+ struct wlc_info *wlc = ampdu->wlc;
scb_ampdu_tid_ini_t *ini;
u32 s1 = 0, s2 = 0;
struct ieee80211_tx_info *tx_info;
@@ -922,7 +910,7 @@ wlc_ampdu_dotxstatus(ampdu_info_t *ampdu, struct scb *scb, void *p,
ASSERT(txs->status & TX_STATUS_AMPDU);
scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
ASSERT(scb_ampdu);
- ini = SCB_AMPDU_INI(scb_ampdu, PKTPRIO(p));
+ ini = SCB_AMPDU_INI(scb_ampdu, p->priority);
ASSERT(ini->scb == scb);
/* BMAC_NOTE: For the split driver, second level txstatus comes later
@@ -930,7 +918,6 @@ wlc_ampdu_dotxstatus(ampdu_info_t *ampdu, struct scb *scb, void *p,
* call the first one
*/
if (txs->status & TX_STATUS_ACK_RCV) {
-#ifdef WLC_LOW
u8 status_delay = 0;
/* wait till the next 8 bytes of txstatus is available */
@@ -948,54 +935,14 @@ wlc_ampdu_dotxstatus(ampdu_info_t *ampdu, struct scb *scb, void *p,
ASSERT(!(s1 & TX_STATUS_INTERMEDIATE));
ASSERT(s1 & TX_STATUS_AMPDU);
s2 = R_REG(wlc->osh, &wlc->regs->frmtxstatus2);
-#else /* WLC_LOW */
-
- /* Store the relevant information in ampdu structure */
- WL_AMPDU_TX(("wl%d: wlc_ampdu_dotxstatus: High Recvd\n",
- wlc->pub->unit));
-
- ASSERT(!ampdu->p);
- ampdu->p = p;
- bcopy(txs, &ampdu->txs, sizeof(tx_status_t));
- ampdu->waiting_status = true;
- return;
-#endif /* WLC_LOW */
}
wlc_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2);
wlc_ampdu_txflowcontrol(wlc, scb_ampdu, ini);
}
-#ifdef WLC_HIGH_ONLY
-void wlc_ampdu_txstatus_complete(ampdu_info_t *ampdu, u32 s1, u32 s2)
-{
- WL_AMPDU_TX(("wl%d: wlc_ampdu_txstatus_complete: High Recvd 0x%x 0x%x p:%p\n", ampdu->wlc->pub->unit, s1, s2, ampdu->p));
-
- ASSERT(ampdu->waiting_status);
-
- /* The packet may have been freed if the SCB went away, if so, then still free the
- * DMA chain
- */
- if (ampdu->p) {
- struct ieee80211_tx_info *tx_info;
- struct scb *scb;
-
- tx_info = IEEE80211_SKB_CB(ampdu->p);
- scb = (struct scb *)tx_info->control.sta->drv_priv;
-
- wlc_ampdu_dotxstatus_complete(ampdu, scb, ampdu->p, &ampdu->txs,
- s1, s2);
- ampdu->p = NULL;
- }
-
- ampdu->waiting_status = false;
-}
-#endif /* WLC_HIGH_ONLY */
-void rate_status(wlc_info_t *wlc, struct ieee80211_tx_info *tx_info,
- tx_status_t *txs, u8 mcs);
-
void
-rate_status(wlc_info_t *wlc, struct ieee80211_tx_info *tx_info,
+rate_status(struct wlc_info *wlc, struct ieee80211_tx_info *tx_info,
tx_status_t *txs, u8 mcs)
{
struct ieee80211_tx_rate *txrate = tx_info->status.rates;
@@ -1008,17 +955,15 @@ rate_status(wlc_info_t *wlc, struct ieee80211_tx_info *tx_info,
}
}
-extern void wlc_txq_enq(wlc_info_t *wlc, struct scb *scb, void *sdu,
- uint prec);
-
#define SHORTNAME "AMPDU status"
static void BCMFASTPATH
-wlc_ampdu_dotxstatus_complete(ampdu_info_t *ampdu, struct scb *scb, void *p,
- tx_status_t *txs, u32 s1, u32 s2)
+wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
+ struct sk_buff *p, tx_status_t *txs,
+ u32 s1, u32 s2)
{
scb_ampdu_t *scb_ampdu;
- wlc_info_t *wlc = ampdu->wlc;
+ struct wlc_info *wlc = ampdu->wlc;
scb_ampdu_tid_ini_t *ini;
u8 bitmap[8], queue, tid;
d11txh_t *txh;
@@ -1037,7 +982,7 @@ wlc_ampdu_dotxstatus_complete(ampdu_info_t *ampdu, struct scb *scb, void *p,
#ifdef BCMDBG
u8 hole[AMPDU_MAX_MPDU];
- bzero(hole, sizeof(hole));
+ memset(hole, 0, sizeof(hole));
#endif
ASSERT(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
@@ -1046,7 +991,7 @@ wlc_ampdu_dotxstatus_complete(ampdu_info_t *ampdu, struct scb *scb, void *p,
scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
ASSERT(scb_ampdu);
- tid = (u8) PKTPRIO(p);
+ tid = (u8) (p->priority);
ini = SCB_AMPDU_INI(scb_ampdu, tid);
retry_limit = ampdu->retry_limit_tid[tid];
@@ -1054,7 +999,7 @@ wlc_ampdu_dotxstatus_complete(ampdu_info_t *ampdu, struct scb *scb, void *p,
ASSERT(ini->scb == scb);
- bzero(bitmap, sizeof(bitmap));
+ memset(bitmap, 0, sizeof(bitmap));
queue = txs->frameid & TXFID_QUEUE_MASK;
ASSERT(queue < AC_COUNT);
@@ -1091,13 +1036,16 @@ wlc_ampdu_dotxstatus_complete(ampdu_info_t *ampdu, struct scb *scb, void *p,
if (supr_status) {
update_rate = false;
if (supr_status == TX_STATUS_SUPR_BADCH) {
- WL_ERROR(("%s: Pkt tx suppressed, illegal channel possibly %d\n", __func__, CHSPEC_CHANNEL(wlc->default_bss->chanspec)));
+ WL_ERROR("%s: Pkt tx suppressed, illegal channel possibly %d\n",
+ __func__,
+ CHSPEC_CHANNEL(wlc->default_bss->chanspec));
} else {
if (supr_status == TX_STATUS_SUPR_FRAG)
- WL_NONE(("%s: AMPDU frag err\n",
- __func__));
+ WL_NONE("%s: AMPDU frag err\n",
+ __func__);
else
- WL_ERROR(("%s: wlc_ampdu_dotxstatus: supr_status 0x%x\n", __func__, supr_status));
+ WL_ERROR("%s: wlc_ampdu_dotxstatus: supr_status 0x%x\n",
+ __func__, supr_status);
}
/* no need to retry for badch; will fail again */
if (supr_status == TX_STATUS_SUPR_BADCH ||
@@ -1116,22 +1064,18 @@ wlc_ampdu_dotxstatus_complete(ampdu_info_t *ampdu, struct scb *scb, void *p,
if (wlc_ffpld_check_txfunfl(wlc, prio2fifo[tid])
> 0) {
tx_error = true;
-#ifdef WLC_HIGH_ONLY
- /* With BMAC, TX Underflows should not happen */
- WL_ERROR(("wl%d: BMAC TX Underflow?",
- wlc->pub->unit));
-#endif
}
}
} else if (txs->phyerr) {
update_rate = false;
WLCNTINCR(wlc->pub->_cnt->txphyerr);
- WL_ERROR(("wl%d: wlc_ampdu_dotxstatus: tx phy error (0x%x)\n", wlc->pub->unit, txs->phyerr));
+ WL_ERROR("wl%d: wlc_ampdu_dotxstatus: tx phy error (0x%x)\n",
+ wlc->pub->unit, txs->phyerr);
#ifdef BCMDBG
if (WL_ERROR_ON()) {
prpkt("txpkt (AMPDU)", wlc->osh, p);
- wlc_print_txdesc((d11txh_t *) PKTDATA(p));
+ wlc_print_txdesc((d11txh_t *) p->data);
wlc_print_txstatus(txs);
}
#endif /* BCMDBG */
@@ -1142,7 +1086,7 @@ wlc_ampdu_dotxstatus_complete(ampdu_info_t *ampdu, struct scb *scb, void *p,
while (p) {
tx_info = IEEE80211_SKB_CB(p);
ASSERT(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
- txh = (d11txh_t *) PKTDATA(p);
+ txh = (d11txh_t *) p->data;
mcl = ltoh16(txh->MacTxControlLow);
plcp = (u8 *) (txh + 1);
h = (struct dot11_header *)(plcp + D11_PHY_HDR_LEN);
@@ -1158,10 +1102,9 @@ wlc_ampdu_dotxstatus_complete(ampdu_info_t *ampdu, struct scb *scb, void *p,
if (ba_recd) {
bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX);
- WL_AMPDU_TX(("%s: tid %d seq is %d, start_seq is %d, "
- "bindex is %d set %d, index %d\n",
- __func__, tid, seq, start_seq, bindex,
- isset(bitmap, bindex), index));
+ WL_AMPDU_TX("%s: tid %d seq is %d, start_seq is %d, bindex is %d set %d, index %d\n",
+ __func__, tid, seq, start_seq, bindex,
+ isset(bitmap, bindex), index);
/* if acked then clear bit and free packet */
if ((bindex < AMPDU_TX_BA_MAX_WSIZE)
@@ -1186,8 +1129,8 @@ wlc_ampdu_dotxstatus_complete(ampdu_info_t *ampdu, struct scb *scb, void *p,
status & TX_STATUS_FRM_RTX_MASK) >>
TX_STATUS_FRM_RTX_SHIFT;
- PKTPULL(p, D11_PHY_HDR_LEN);
- PKTPULL(p, D11_TXH_LEN);
+ skb_pull(p, D11_PHY_HDR_LEN);
+ skb_pull(p, D11_TXH_LEN);
ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
p);
@@ -1212,9 +1155,10 @@ wlc_ampdu_dotxstatus_complete(ampdu_info_t *ampdu, struct scb *scb, void *p,
ieee80211_tx_info_clear_status(tx_info);
tx_info->flags |=
IEEE80211_TX_STAT_AMPDU_NO_BACK;
- PKTPULL(p, D11_PHY_HDR_LEN);
- PKTPULL(p, D11_TXH_LEN);
- WL_ERROR(("%s: BA Timeout, seq %d, in_transit %d\n", SHORTNAME, seq, ini->tx_in_transit));
+ skb_pull(p, D11_PHY_HDR_LEN);
+ skb_pull(p, D11_TXH_LEN);
+ WL_ERROR("%s: BA Timeout, seq %d, in_transit %d\n",
+ SHORTNAME, seq, ini->tx_in_transit);
ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
p);
}
@@ -1242,7 +1186,7 @@ wlc_ampdu_dotxstatus_complete(ampdu_info_t *ampdu, struct scb *scb, void *p,
}
static void
-ampdu_cleanup_tid_ini(ampdu_info_t *ampdu, scb_ampdu_t *scb_ampdu, u8 tid,
+ampdu_cleanup_tid_ini(struct ampdu_info *ampdu, scb_ampdu_t *scb_ampdu, u8 tid,
bool force)
{
scb_ampdu_tid_ini_t *ini;
@@ -1250,8 +1194,8 @@ ampdu_cleanup_tid_ini(ampdu_info_t *ampdu, scb_ampdu_t *scb_ampdu, u8 tid,
if (!ini)
return;
- WL_AMPDU_CTL(("wl%d: ampdu_cleanup_tid_ini: tid %d\n",
- ampdu->wlc->pub->unit, tid));
+ WL_AMPDU_CTL("wl%d: ampdu_cleanup_tid_ini: tid %d\n",
+ ampdu->wlc->pub->unit, tid);
if (ini->tx_in_transit && !force)
return;
@@ -1264,7 +1208,7 @@ ampdu_cleanup_tid_ini(ampdu_info_t *ampdu, scb_ampdu_t *scb_ampdu, u8 tid,
}
/* initialize the initiator code for tid */
-static scb_ampdu_tid_ini_t *wlc_ampdu_init_tid_ini(ampdu_info_t *ampdu,
+static scb_ampdu_tid_ini_t *wlc_ampdu_init_tid_ini(struct ampdu_info *ampdu,
scb_ampdu_t *scb_ampdu,
u8 tid, bool override)
{
@@ -1277,7 +1221,7 @@ static scb_ampdu_tid_ini_t *wlc_ampdu_init_tid_ini(ampdu_info_t *ampdu,
/* check for per-tid control of ampdu */
if (!ampdu->ini_enable[tid]) {
- WL_ERROR(("%s: Rejecting tid %d\n", __func__, tid));
+ WL_ERROR("%s: Rejecting tid %d\n", __func__, tid);
return NULL;
}
@@ -1290,21 +1234,21 @@ static scb_ampdu_tid_ini_t *wlc_ampdu_init_tid_ini(ampdu_info_t *ampdu,
return ini;
}
-int wlc_ampdu_set(ampdu_info_t *ampdu, bool on)
+int wlc_ampdu_set(struct ampdu_info *ampdu, bool on)
{
- wlc_info_t *wlc = ampdu->wlc;
+ struct wlc_info *wlc = ampdu->wlc;
wlc->pub->_ampdu = false;
if (on) {
if (!N_ENAB(wlc->pub)) {
- WL_AMPDU_ERR(("wl%d: driver not nmode enabled\n",
- wlc->pub->unit));
+ WL_AMPDU_ERR("wl%d: driver not nmode enabled\n",
+ wlc->pub->unit);
return BCME_UNSUPPORTED;
}
if (!wlc_ampdu_cap(ampdu)) {
- WL_AMPDU_ERR(("wl%d: device not ampdu capable\n",
- wlc->pub->unit));
+ WL_AMPDU_ERR("wl%d: device not ampdu capable\n",
+ wlc->pub->unit);
return BCME_UNSUPPORTED;
}
wlc->pub->_ampdu = on;
@@ -1313,7 +1257,7 @@ int wlc_ampdu_set(ampdu_info_t *ampdu, bool on)
return 0;
}
-bool wlc_ampdu_cap(ampdu_info_t *ampdu)
+bool wlc_ampdu_cap(struct ampdu_info *ampdu)
{
if (WLC_PHY_11N_CAP(ampdu->wlc->band))
return true;
@@ -1321,7 +1265,7 @@ bool wlc_ampdu_cap(ampdu_info_t *ampdu)
return false;
}
-static void ampdu_update_max_txlen(ampdu_info_t *ampdu, u8 dur)
+static void ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur)
{
u32 rate, mcs;
@@ -1343,7 +1287,7 @@ static void ampdu_update_max_txlen(ampdu_info_t *ampdu, u8 dur)
}
u8 BCMFASTPATH
-wlc_ampdu_null_delim_cnt(ampdu_info_t *ampdu, struct scb *scb,
+wlc_ampdu_null_delim_cnt(struct ampdu_info *ampdu, struct scb *scb,
ratespec_t rspec, int phylen)
{
scb_ampdu_t *scb_ampdu;
@@ -1379,25 +1323,25 @@ wlc_ampdu_null_delim_cnt(ampdu_info_t *ampdu, struct scb *scb,
return 0;
}
-void wlc_ampdu_macaddr_upd(wlc_info_t *wlc)
+void wlc_ampdu_macaddr_upd(struct wlc_info *wlc)
{
char template[T_RAM_ACCESS_SZ * 2];
/* driver needs to write the ta in the template; ta is at offset 16 */
- bzero(template, sizeof(template));
- bcopy((char *)wlc->pub->cur_etheraddr.octet, template, ETHER_ADDR_LEN);
+ memset(template, 0, sizeof(template));
+ bcopy((char *)wlc->pub->cur_etheraddr.octet, template, ETH_ALEN);
wlc_write_template_ram(wlc, (T_BA_TPL_BASE + 16), (T_RAM_ACCESS_SZ * 2),
template);
}
-bool wlc_aggregatable(wlc_info_t *wlc, u8 tid)
+bool wlc_aggregatable(struct wlc_info *wlc, u8 tid)
{
return wlc->ampdu->ini_enable[tid];
}
-void wlc_ampdu_shm_upd(ampdu_info_t *ampdu)
+void wlc_ampdu_shm_upd(struct ampdu_info *ampdu)
{
- wlc_info_t *wlc = ampdu->wlc;
+ struct wlc_info *wlc = ampdu->wlc;
/* Extend ucode internal watchdog timer to match larger received frames */
if ((ampdu->rx_factor & HT_PARAMS_RX_FACTOR_MASK) ==
diff --git a/drivers/staging/brcm80211/sys/wlc_ampdu.h b/drivers/staging/brcm80211/sys/wlc_ampdu.h
index c721b16cc706..03457f63f2ab 100644
--- a/drivers/staging/brcm80211/sys/wlc_ampdu.h
+++ b/drivers/staging/brcm80211/sys/wlc_ampdu.h
@@ -17,24 +17,20 @@
#ifndef _wlc_ampdu_h_
#define _wlc_ampdu_h_
-extern ampdu_info_t *wlc_ampdu_attach(wlc_info_t *wlc);
-extern void wlc_ampdu_detach(ampdu_info_t *ampdu);
-extern bool wlc_ampdu_cap(ampdu_info_t *ampdu);
-extern int wlc_ampdu_set(ampdu_info_t *ampdu, bool on);
-extern int wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **aggp,
- int prec);
-extern void wlc_ampdu_dotxstatus(ampdu_info_t *ampdu, struct scb *scb, void *p,
- tx_status_t *txs);
-extern void wlc_ampdu_reset(ampdu_info_t *ampdu);
-extern void wlc_ampdu_macaddr_upd(wlc_info_t *wlc);
-extern void wlc_ampdu_shm_upd(ampdu_info_t *ampdu);
+extern struct ampdu_info *wlc_ampdu_attach(struct wlc_info *wlc);
+extern void wlc_ampdu_detach(struct ampdu_info *ampdu);
+extern bool wlc_ampdu_cap(struct ampdu_info *ampdu);
+extern int wlc_ampdu_set(struct ampdu_info *ampdu, bool on);
+extern int wlc_sendampdu(struct ampdu_info *ampdu, wlc_txq_info_t *qi,
+ struct sk_buff **aggp, int prec);
+extern void wlc_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
+ struct sk_buff *p, tx_status_t *txs);
+extern void wlc_ampdu_reset(struct ampdu_info *ampdu);
+extern void wlc_ampdu_macaddr_upd(struct wlc_info *wlc);
+extern void wlc_ampdu_shm_upd(struct ampdu_info *ampdu);
-extern u8 wlc_ampdu_null_delim_cnt(ampdu_info_t *ampdu, struct scb *scb,
+extern u8 wlc_ampdu_null_delim_cnt(struct ampdu_info *ampdu, struct scb *scb,
ratespec_t rspec, int phylen);
-extern void scb_ampdu_cleanup(ampdu_info_t *ampdu, struct scb *scb);
-#ifdef WLC_HIGH_ONLY
-extern void wlc_ampdu_txstatus_complete(ampdu_info_t *ampdu, u32 s1,
- u32 s2);
-#endif
+extern void scb_ampdu_cleanup(struct ampdu_info *ampdu, struct scb *scb);
#endif /* _wlc_ampdu_h_ */
diff --git a/drivers/staging/brcm80211/sys/wlc_antsel.c b/drivers/staging/brcm80211/sys/wlc_antsel.c
index 5ff8831d2fa8..402ddf8f3371 100644
--- a/drivers/staging/brcm80211/sys/wlc_antsel.c
+++ b/drivers/staging/brcm80211/sys/wlc_antsel.c
@@ -19,18 +19,23 @@
#ifdef WLANTSEL
#include <linux/kernel.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
#include <bcmdefs.h>
#include <osl.h>
#include <bcmutils.h>
#include <siutils.h>
#include <wlioctl.h>
+#include <bcmdevs.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
#include <d11.h>
#include <wlc_rate.h>
#include <wlc_key.h>
#include <wlc_pub.h>
#include <wl_dbg.h>
+#include <wlc_event.h>
#include <wlc_mac80211.h>
#include <wlc_bmac.h>
#include <wlc_phy_hal.h>
@@ -58,10 +63,11 @@
#define ANT_SELCFG_DEF_2x4 0x02 /* default antenna configuration */
/* static functions */
-static int wlc_antsel_cfgupd(antsel_info_t *asi, wlc_antselcfg_t *antsel);
-static u8 wlc_antsel_id2antcfg(antsel_info_t *asi, u8 id);
-static u16 wlc_antsel_antcfg2antsel(antsel_info_t *asi, u8 ant_cfg);
-static void wlc_antsel_init_cfg(antsel_info_t *asi, wlc_antselcfg_t *antsel,
+static int wlc_antsel_cfgupd(struct antsel_info *asi, wlc_antselcfg_t *antsel);
+static u8 wlc_antsel_id2antcfg(struct antsel_info *asi, u8 id);
+static u16 wlc_antsel_antcfg2antsel(struct antsel_info *asi, u8 ant_cfg);
+static void wlc_antsel_init_cfg(struct antsel_info *asi,
+ wlc_antselcfg_t *antsel,
bool auto_sel);
const u16 mimo_2x4_div_antselpat_tbl[] = {
@@ -88,14 +94,15 @@ const u8 mimo_2x3_div_antselid_tbl[16] = {
0, 0, 0, 0, 0, 0, 0, 0 /* pat to antselid */
};
-antsel_info_t *wlc_antsel_attach(wlc_info_t *wlc, osl_t *osh,
- wlc_pub_t *pub,
- wlc_hw_info_t *wlc_hw) {
- antsel_info_t *asi;
+struct antsel_info *wlc_antsel_attach(struct wlc_info *wlc,
+ struct osl_info *osh,
+ struct wlc_pub *pub,
+ struct wlc_hw_info *wlc_hw) {
+ struct antsel_info *asi;
- asi = kzalloc(sizeof(antsel_info_t), GFP_ATOMIC);
+ asi = kzalloc(sizeof(struct antsel_info), GFP_ATOMIC);
if (!asi) {
- WL_ERROR(("wl%d: wlc_antsel_attach: out of mem\n", pub->unit));
+ WL_ERROR("wl%d: wlc_antsel_attach: out of mem\n", pub->unit);
return NULL;
}
@@ -124,7 +131,7 @@ antsel_info_t *wlc_antsel_attach(wlc_info_t *wlc, osl_t *osh,
asi->antsel_avail = false;
} else {
asi->antsel_avail = false;
- WL_ERROR(("wlc_antsel_attach: 2o3 board cfg invalid\n"));
+ WL_ERROR("wlc_antsel_attach: 2o3 board cfg invalid\n");
ASSERT(0);
}
break;
@@ -152,7 +159,7 @@ antsel_info_t *wlc_antsel_attach(wlc_info_t *wlc, osl_t *osh,
return asi;
}
-void wlc_antsel_detach(antsel_info_t *asi)
+void wlc_antsel_detach(struct antsel_info *asi)
{
if (!asi)
return;
@@ -160,7 +167,7 @@ void wlc_antsel_detach(antsel_info_t *asi)
kfree(asi);
}
-void wlc_antsel_init(antsel_info_t *asi)
+void wlc_antsel_init(struct antsel_info *asi)
{
if ((asi->antsel_type == ANTSEL_2x3) ||
(asi->antsel_type == ANTSEL_2x4))
@@ -169,7 +176,7 @@ void wlc_antsel_init(antsel_info_t *asi)
/* boardlevel antenna selection: init antenna selection structure */
static void
-wlc_antsel_init_cfg(antsel_info_t *asi, wlc_antselcfg_t *antsel,
+wlc_antsel_init_cfg(struct antsel_info *asi, wlc_antselcfg_t *antsel,
bool auto_sel)
{
if (asi->antsel_type == ANTSEL_2x3) {
@@ -200,7 +207,7 @@ wlc_antsel_init_cfg(antsel_info_t *asi, wlc_antselcfg_t *antsel,
}
void BCMFASTPATH
-wlc_antsel_antcfg_get(antsel_info_t *asi, bool usedef, bool sel,
+wlc_antsel_antcfg_get(struct antsel_info *asi, bool usedef, bool sel,
u8 antselid, u8 fbantselid, u8 *antcfg,
u8 *fbantcfg)
{
@@ -232,7 +239,7 @@ wlc_antsel_antcfg_get(antsel_info_t *asi, bool usedef, bool sel,
}
/* boardlevel antenna selection: convert mimo_antsel (ucode interface) to id */
-u8 wlc_antsel_antsel2id(antsel_info_t *asi, u16 antsel)
+u8 wlc_antsel_antsel2id(struct antsel_info *asi, u16 antsel)
{
u8 antselid = 0;
@@ -251,7 +258,7 @@ u8 wlc_antsel_antsel2id(antsel_info_t *asi, u16 antsel)
}
/* boardlevel antenna selection: convert id to ant_cfg */
-static u8 wlc_antsel_id2antcfg(antsel_info_t *asi, u8 id)
+static u8 wlc_antsel_id2antcfg(struct antsel_info *asi, u8 id)
{
u8 antcfg = ANT_SELCFG_DEF_2x2;
@@ -270,7 +277,7 @@ static u8 wlc_antsel_id2antcfg(antsel_info_t *asi, u8 id)
}
/* boardlevel antenna selection: convert ant_cfg to mimo_antsel (ucode interface) */
-static u16 wlc_antsel_antcfg2antsel(antsel_info_t *asi, u8 ant_cfg)
+static u16 wlc_antsel_antcfg2antsel(struct antsel_info *asi, u8 ant_cfg)
{
u8 idx = WLC_ANTIDX_11N(WLC_ANTSEL_11N(ant_cfg));
u16 mimo_antsel = 0;
@@ -290,9 +297,9 @@ static u16 wlc_antsel_antcfg2antsel(antsel_info_t *asi, u8 ant_cfg)
}
/* boardlevel antenna selection: ucode interface control */
-static int wlc_antsel_cfgupd(antsel_info_t *asi, wlc_antselcfg_t *antsel)
+static int wlc_antsel_cfgupd(struct antsel_info *asi, wlc_antselcfg_t *antsel)
{
- wlc_info_t *wlc = asi->wlc;
+ struct wlc_info *wlc = asi->wlc;
u8 ant_cfg;
u16 mimo_antsel;
diff --git a/drivers/staging/brcm80211/sys/wlc_antsel.h b/drivers/staging/brcm80211/sys/wlc_antsel.h
index 1d048bbea946..8875b5848665 100644
--- a/drivers/staging/brcm80211/sys/wlc_antsel.h
+++ b/drivers/staging/brcm80211/sys/wlc_antsel.h
@@ -16,13 +16,15 @@
#ifndef _wlc_antsel_h_
#define _wlc_antsel_h_
-extern antsel_info_t *wlc_antsel_attach(wlc_info_t *wlc, osl_t *osh,
- wlc_pub_t *pub,
- wlc_hw_info_t *wlc_hw);
-extern void wlc_antsel_detach(antsel_info_t *asi);
-extern void wlc_antsel_init(antsel_info_t *asi);
-extern void wlc_antsel_antcfg_get(antsel_info_t *asi, bool usedef, bool sel,
+extern struct antsel_info *wlc_antsel_attach(struct wlc_info *wlc,
+ struct osl_info *osh,
+ struct wlc_pub *pub,
+ struct wlc_hw_info *wlc_hw);
+extern void wlc_antsel_detach(struct antsel_info *asi);
+extern void wlc_antsel_init(struct antsel_info *asi);
+extern void wlc_antsel_antcfg_get(struct antsel_info *asi, bool usedef,
+ bool sel,
u8 id, u8 fbid, u8 *antcfg,
u8 *fbantcfg);
-extern u8 wlc_antsel_antsel2id(antsel_info_t *asi, u16 antsel);
+extern u8 wlc_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
#endif /* _wlc_antsel_h_ */
diff --git a/drivers/staging/brcm80211/sys/wlc_bmac.c b/drivers/staging/brcm80211/sys/wlc_bmac.c
index b70f9d099233..69f600affa46 100644
--- a/drivers/staging/brcm80211/sys/wlc_bmac.c
+++ b/drivers/staging/brcm80211/sys/wlc_bmac.c
@@ -14,13 +14,13 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef WLC_LOW
-#error "This file needs WLC_LOW"
-#endif
#include <linux/kernel.h>
#include <wlc_cfg.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <bcmdefs.h>
#include <osl.h>
#include <proto/802.11.h>
@@ -42,12 +42,14 @@
#include <wlc_channel.h>
#include <bcmsrom.h>
#include <wlc_key.h>
+#include <bcmdevs.h>
/* BMAC_NOTE: a WLC_HIGH compile include of wlc.h adds in more structures and type
* dependencies. Need to include these to files to allow a clean include of wlc.h
* with WLC_HIGH defined.
* At some point we may be able to skip the include of wlc.h and instead just
* define a stub wlc_info and band struct to allow rpc calls to get the rpc handle.
*/
+#include <wlc_event.h>
#include <wlc_mac80211.h>
#include <wlc_bmac.h>
#include <wlc_phy_shim.h>
@@ -55,9 +57,6 @@
#include <wl_export.h>
#include "wl_ucode.h"
#include "d11ucode_ext.h"
-#ifdef BCMSDIO
-#include <bcmsdh.h>
-#endif
#include <bcmotp.h>
/* BMAC_NOTE: With WLC_HIGH defined, some fns in this file make calls to high level
@@ -69,6 +68,7 @@
#include <pcie_core.h>
#include <wlc_alloc.h>
+#include <wl_dbg.h>
#define TIMER_INTERVAL_WATCHDOG_BMAC 1000 /* watchdog timer, in unit of ms */
@@ -113,64 +113,65 @@ static u16 xmtfifo_sz[][NFIFO] = {
{9, 58, 22, 14, 14, 5}, /* corerev 24: 2304, 14848, 5632, 3584, 3584, 1280 */
};
-static void wlc_clkctl_clk(wlc_hw_info_t *wlc, uint mode);
-static void wlc_coreinit(wlc_info_t *wlc);
+static void wlc_clkctl_clk(struct wlc_hw_info *wlc, uint mode);
+static void wlc_coreinit(struct wlc_info *wlc);
/* used by wlc_wakeucode_init() */
-static void wlc_write_inits(wlc_hw_info_t *wlc_hw, const d11init_t *inits);
-static void wlc_ucode_write(wlc_hw_info_t *wlc_hw, const u32 ucode[],
+static void wlc_write_inits(struct wlc_hw_info *wlc_hw, const d11init_t *inits);
+static void wlc_ucode_write(struct wlc_hw_info *wlc_hw, const u32 ucode[],
const uint nbytes);
-static void wlc_ucode_download(wlc_hw_info_t *wlc);
-static void wlc_ucode_txant_set(wlc_hw_info_t *wlc_hw);
+static void wlc_ucode_download(struct wlc_hw_info *wlc);
+static void wlc_ucode_txant_set(struct wlc_hw_info *wlc_hw);
/* used by wlc_dpc() */
-static bool wlc_bmac_dotxstatus(wlc_hw_info_t *wlc, tx_status_t *txs,
+static bool wlc_bmac_dotxstatus(struct wlc_hw_info *wlc, tx_status_t *txs,
u32 s2);
-static bool wlc_bmac_txstatus_corerev4(wlc_hw_info_t *wlc);
-static bool wlc_bmac_txstatus(wlc_hw_info_t *wlc, bool bound, bool *fatal);
-static bool wlc_bmac_recv(wlc_hw_info_t *wlc_hw, uint fifo, bool bound);
+static bool wlc_bmac_txstatus_corerev4(struct wlc_hw_info *wlc);
+static bool wlc_bmac_txstatus(struct wlc_hw_info *wlc, bool bound, bool *fatal);
+static bool wlc_bmac_recv(struct wlc_hw_info *wlc_hw, uint fifo, bool bound);
/* used by wlc_down() */
-static void wlc_flushqueues(wlc_info_t *wlc);
+static void wlc_flushqueues(struct wlc_info *wlc);
-static void wlc_write_mhf(wlc_hw_info_t *wlc_hw, u16 *mhfs);
-static void wlc_mctrl_reset(wlc_hw_info_t *wlc_hw);
-static void wlc_corerev_fifofixup(wlc_hw_info_t *wlc_hw);
+static void wlc_write_mhf(struct wlc_hw_info *wlc_hw, u16 *mhfs);
+static void wlc_mctrl_reset(struct wlc_hw_info *wlc_hw);
+static void wlc_corerev_fifofixup(struct wlc_hw_info *wlc_hw);
/* Low Level Prototypes */
-static u16 wlc_bmac_read_objmem(wlc_hw_info_t *wlc_hw, uint offset,
+static u16 wlc_bmac_read_objmem(struct wlc_hw_info *wlc_hw, uint offset,
u32 sel);
-static void wlc_bmac_write_objmem(wlc_hw_info_t *wlc_hw, uint offset, u16 v,
- u32 sel);
-static bool wlc_bmac_attach_dmapio(wlc_info_t *wlc, uint j, bool wme);
-static void wlc_bmac_detach_dmapio(wlc_hw_info_t *wlc_hw);
-static void wlc_ucode_bsinit(wlc_hw_info_t *wlc_hw);
-static bool wlc_validboardtype(wlc_hw_info_t *wlc);
-static bool wlc_isgoodchip(wlc_hw_info_t *wlc_hw);
-static char *wlc_get_macaddr(wlc_hw_info_t *wlc_hw);
-static void wlc_mhfdef(wlc_info_t *wlc, u16 *mhfs, u16 mhf2_init);
-static void wlc_mctrl_write(wlc_hw_info_t *wlc_hw);
-static void wlc_ucode_mute_override_set(wlc_hw_info_t *wlc_hw);
-static void wlc_ucode_mute_override_clear(wlc_hw_info_t *wlc_hw);
-static u32 wlc_wlintrsoff(wlc_info_t *wlc);
-static void wlc_wlintrsrestore(wlc_info_t *wlc, u32 macintmask);
-static void wlc_gpio_init(wlc_info_t *wlc);
-static void wlc_write_hw_bcntemplate0(wlc_hw_info_t *wlc_hw, void *bcn,
+static void wlc_bmac_write_objmem(struct wlc_hw_info *wlc_hw, uint offset,
+ u16 v, u32 sel);
+static bool wlc_bmac_attach_dmapio(struct wlc_info *wlc, uint j, bool wme);
+static void wlc_bmac_detach_dmapio(struct wlc_hw_info *wlc_hw);
+static void wlc_ucode_bsinit(struct wlc_hw_info *wlc_hw);
+static bool wlc_validboardtype(struct wlc_hw_info *wlc);
+static bool wlc_isgoodchip(struct wlc_hw_info *wlc_hw);
+static char *wlc_get_macaddr(struct wlc_hw_info *wlc_hw);
+static void wlc_mhfdef(struct wlc_info *wlc, u16 *mhfs, u16 mhf2_init);
+static void wlc_mctrl_write(struct wlc_hw_info *wlc_hw);
+static void wlc_ucode_mute_override_set(struct wlc_hw_info *wlc_hw);
+static void wlc_ucode_mute_override_clear(struct wlc_hw_info *wlc_hw);
+static u32 wlc_wlintrsoff(struct wlc_info *wlc);
+static void wlc_wlintrsrestore(struct wlc_info *wlc, u32 macintmask);
+static void wlc_gpio_init(struct wlc_info *wlc);
+static void wlc_write_hw_bcntemplate0(struct wlc_hw_info *wlc_hw, void *bcn,
int len);
-static void wlc_write_hw_bcntemplate1(wlc_hw_info_t *wlc_hw, void *bcn,
+static void wlc_write_hw_bcntemplate1(struct wlc_hw_info *wlc_hw, void *bcn,
int len);
-static void wlc_bmac_bsinit(wlc_info_t *wlc, chanspec_t chanspec);
-static u32 wlc_setband_inact(wlc_info_t *wlc, uint bandunit);
-static void wlc_bmac_setband(wlc_hw_info_t *wlc_hw, uint bandunit,
+static void wlc_bmac_bsinit(struct wlc_info *wlc, chanspec_t chanspec);
+static u32 wlc_setband_inact(struct wlc_info *wlc, uint bandunit);
+static void wlc_bmac_setband(struct wlc_hw_info *wlc_hw, uint bandunit,
chanspec_t chanspec);
-static void wlc_bmac_update_slot_timing(wlc_hw_info_t *wlc_hw, bool shortslot);
-static void wlc_upd_ofdm_pctl1_table(wlc_hw_info_t *wlc_hw);
-static u16 wlc_bmac_ofdm_ratetable_offset(wlc_hw_info_t *wlc_hw,
+static void wlc_bmac_update_slot_timing(struct wlc_hw_info *wlc_hw,
+ bool shortslot);
+static void wlc_upd_ofdm_pctl1_table(struct wlc_hw_info *wlc_hw);
+static u16 wlc_bmac_ofdm_ratetable_offset(struct wlc_hw_info *wlc_hw,
u8 rate);
/* === Low Level functions === */
-void wlc_bmac_set_shortslot(wlc_hw_info_t *wlc_hw, bool shortslot)
+void wlc_bmac_set_shortslot(struct wlc_hw_info *wlc_hw, bool shortslot)
{
wlc_hw->shortslot = shortslot;
@@ -186,9 +187,10 @@ void wlc_bmac_set_shortslot(wlc_hw_info_t *wlc_hw, bool shortslot)
* or shortslot 11g (9us slots)
* The PSM needs to be suspended for this call.
*/
-static void wlc_bmac_update_slot_timing(wlc_hw_info_t *wlc_hw, bool shortslot)
+static void wlc_bmac_update_slot_timing(struct wlc_hw_info *wlc_hw,
+ bool shortslot)
{
- osl_t *osh;
+ struct osl_info *osh;
d11regs_t *regs;
osh = wlc_hw->osh;
@@ -205,7 +207,7 @@ static void wlc_bmac_update_slot_timing(wlc_hw_info_t *wlc_hw, bool shortslot)
}
}
-static void WLBANDINITFN(wlc_ucode_bsinit) (wlc_hw_info_t *wlc_hw)
+static void WLBANDINITFN(wlc_ucode_bsinit) (struct wlc_hw_info *wlc_hw)
{
/* init microcode host flags */
wlc_write_mhf(wlc_hw, wlc_hw->band->mhfs);
@@ -215,30 +217,32 @@ static void WLBANDINITFN(wlc_ucode_bsinit) (wlc_hw_info_t *wlc_hw)
if (WLCISNPHY(wlc_hw->band)) {
wlc_write_inits(wlc_hw, d11n0bsinitvals16);
} else {
- WL_ERROR(("%s: wl%d: unsupported phy in corerev %d\n",
- __func__, wlc_hw->unit, wlc_hw->corerev));
+ WL_ERROR("%s: wl%d: unsupported phy in corerev %d\n",
+ __func__, wlc_hw->unit, wlc_hw->corerev);
}
} else {
if (D11REV_IS(wlc_hw->corerev, 24)) {
if (WLCISLCNPHY(wlc_hw->band)) {
wlc_write_inits(wlc_hw, d11lcn0bsinitvals24);
} else
- WL_ERROR(("%s: wl%d: unsupported phy in corerev %d\n", __func__, wlc_hw->unit, wlc_hw->corerev));
+ WL_ERROR("%s: wl%d: unsupported phy in corerev %d\n",
+ __func__, wlc_hw->unit,
+ wlc_hw->corerev);
} else {
- WL_ERROR(("%s: wl%d: unsupported corerev %d\n",
- __func__, wlc_hw->unit, wlc_hw->corerev));
+ WL_ERROR("%s: wl%d: unsupported corerev %d\n",
+ __func__, wlc_hw->unit, wlc_hw->corerev);
}
}
}
/* switch to new band but leave it inactive */
-static u32 WLBANDINITFN(wlc_setband_inact) (wlc_info_t *wlc, uint bandunit)
+static u32 WLBANDINITFN(wlc_setband_inact) (struct wlc_info *wlc, uint bandunit)
{
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
u32 macintmask;
u32 tmp;
- WL_TRACE(("wl%d: wlc_setband_inact\n", wlc_hw->unit));
+ WL_TRACE("wl%d: wlc_setband_inact\n", wlc_hw->unit);
ASSERT(bandunit != wlc_hw->band->bandunit);
ASSERT(si_iscoreup(wlc_hw->sih));
@@ -269,24 +273,24 @@ static u32 WLBANDINITFN(wlc_setband_inact) (wlc_info_t *wlc, uint bandunit)
* Param 'bound' indicates max. # frames to process before break out.
*/
static bool BCMFASTPATH
-wlc_bmac_recv(wlc_hw_info_t *wlc_hw, uint fifo, bool bound)
+wlc_bmac_recv(struct wlc_hw_info *wlc_hw, uint fifo, bool bound)
{
- void *p;
- void *head = NULL;
- void *tail = NULL;
+ struct sk_buff *p;
+ struct sk_buff *head = NULL;
+ struct sk_buff *tail = NULL;
uint n = 0;
uint bound_limit = bound ? wlc_hw->wlc->pub->tunables->rxbnd : -1;
u32 tsf_h, tsf_l;
wlc_d11rxhdr_t *wlc_rxhdr = NULL;
- WL_TRACE(("wl%d: %s\n", wlc_hw->unit, __func__));
+ WL_TRACE("wl%d: %s\n", wlc_hw->unit, __func__);
/* gather received frames */
while ((p = dma_rx(wlc_hw->di[fifo]))) {
if (!tail)
head = tail = p;
else {
- PKTSETLINK(tail, p);
+ tail->prev = p;
tail = p;
}
@@ -303,11 +307,11 @@ wlc_bmac_recv(wlc_hw_info_t *wlc_hw, uint fifo, bool bound)
/* process each frame */
while ((p = head) != NULL) {
- head = PKTLINK(head);
- PKTSETLINK(p, NULL);
+ head = head->prev;
+ p->prev = NULL;
/* record the tsf_l in wlc_rxd11hdr */
- wlc_rxhdr = (wlc_d11rxhdr_t *) PKTDATA(p);
+ wlc_rxhdr = (wlc_d11rxhdr_t *) p->data;
wlc_rxhdr->tsf_l = htol32(tsf_l);
/* compute the RSSI from d11rxhdr and record it in wlc_rxd11hr */
@@ -323,15 +327,15 @@ wlc_bmac_recv(wlc_hw_info_t *wlc_hw, uint fifo, bool bound)
* Return true if another dpc needs to be re-scheduled. false otherwise.
* Param 'bounded' indicates if applicable loops should be bounded.
*/
-bool BCMFASTPATH wlc_dpc(wlc_info_t *wlc, bool bounded)
+bool BCMFASTPATH wlc_dpc(struct wlc_info *wlc, bool bounded)
{
u32 macintstatus;
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
d11regs_t *regs = wlc_hw->regs;
bool fatal = false;
if (DEVICEREMOVED(wlc)) {
- WL_ERROR(("wl%d: %s: dead chip\n", wlc_hw->unit, __func__));
+ WL_ERROR("wl%d: %s: dead chip\n", wlc_hw->unit, __func__);
wl_down(wlc->wl);
return false;
}
@@ -340,8 +344,8 @@ bool BCMFASTPATH wlc_dpc(wlc_info_t *wlc, bool bounded)
macintstatus = wlc->macintstatus;
wlc->macintstatus = 0;
- WL_TRACE(("wl%d: wlc_dpc: macintstatus 0x%x\n", wlc_hw->unit,
- macintstatus));
+ WL_TRACE("wl%d: wlc_dpc: macintstatus 0x%x\n",
+ wlc_hw->unit, macintstatus);
if (macintstatus & MI_PRQ) {
/* Process probe request FIFO */
@@ -364,7 +368,7 @@ bool BCMFASTPATH wlc_dpc(wlc_info_t *wlc, bool bounded)
if (wlc_bmac_txstatus(wlc->hw, bounded, &fatal))
wlc->macintstatus |= MI_TFS;
if (fatal) {
- WL_ERROR(("MI_TFS: fatal\n"));
+ WL_ERROR("MI_TFS: fatal\n");
goto fatal;
}
}
@@ -374,7 +378,7 @@ bool BCMFASTPATH wlc_dpc(wlc_info_t *wlc, bool bounded)
/* ATIM window end */
if (macintstatus & MI_ATIMWINEND) {
- WL_TRACE(("wlc_isr: end of ATIM window\n"));
+ WL_TRACE("wlc_isr: end of ATIM window\n");
OR_REG(wlc_hw->osh, &regs->maccommand, wlc->qvalid);
wlc->qvalid = 0;
@@ -395,7 +399,7 @@ bool BCMFASTPATH wlc_dpc(wlc_info_t *wlc, bool bounded)
/* TX FIFO suspend/flush completion */
if (macintstatus & MI_TXSTOP) {
if (wlc_bmac_tx_fifo_suspended(wlc_hw, TX_DATA_FIFO)) {
- /* WL_ERROR(("dpc: fifo_suspend_comlete\n")); */
+ /* WL_ERROR("dpc: fifo_suspend_comlete\n"); */
}
}
@@ -405,11 +409,12 @@ bool BCMFASTPATH wlc_dpc(wlc_info_t *wlc, bool bounded)
}
if (macintstatus & MI_GP0) {
- WL_ERROR(("wl%d: PSM microcode watchdog fired at %d (seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now));
+ WL_ERROR("wl%d: PSM microcode watchdog fired at %d (seconds). Resetting.\n",
+ wlc_hw->unit, wlc_hw->now);
printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
- __func__, CHIPID(wlc_hw->sih->chip),
- CHIPREV(wlc_hw->sih->chiprev));
+ __func__, wlc_hw->sih->chip,
+ wlc_hw->sih->chiprev);
WLCNTINCR(wlc->pub->_cnt->psmwds);
@@ -427,7 +432,8 @@ bool BCMFASTPATH wlc_dpc(wlc_info_t *wlc, bool bounded)
u32 rfd = R_REG(wlc_hw->osh, &regs->phydebug) & PDBG_RFD;
#endif
- WL_ERROR(("wl%d: MAC Detected a change on the RF Disable Input 0x%x\n", wlc_hw->unit, rfd));
+ WL_ERROR("wl%d: MAC Detected a change on the RF Disable Input 0x%x\n",
+ wlc_hw->unit, rfd);
WLCNTINCR(wlc->pub->_cnt->rfdisable);
}
@@ -452,10 +458,10 @@ bool BCMFASTPATH wlc_dpc(wlc_info_t *wlc, bool bounded)
/* common low-level watchdog code */
void wlc_bmac_watchdog(void *arg)
{
- wlc_info_t *wlc = (wlc_info_t *) arg;
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_info *wlc = (struct wlc_info *) arg;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
- WL_TRACE(("wl%d: wlc_bmac_watchdog\n", wlc_hw->unit));
+ WL_TRACE("wl%d: wlc_bmac_watchdog\n", wlc_hw->unit);
if (!wlc_hw->up)
return;
@@ -476,13 +482,13 @@ void wlc_bmac_watchdog(void *arg)
}
void
-wlc_bmac_set_chanspec(wlc_hw_info_t *wlc_hw, chanspec_t chanspec, bool mute,
- struct txpwr_limits *txpwr)
+wlc_bmac_set_chanspec(struct wlc_hw_info *wlc_hw, chanspec_t chanspec,
+ bool mute, struct txpwr_limits *txpwr)
{
uint bandunit;
- WL_TRACE(("wl%d: wlc_bmac_set_chanspec 0x%x\n", wlc_hw->unit,
- chanspec));
+ WL_TRACE("wl%d: wlc_bmac_set_chanspec 0x%x\n",
+ wlc_hw->unit, chanspec);
wlc_hw->chanspec = chanspec;
@@ -520,7 +526,8 @@ wlc_bmac_set_chanspec(wlc_hw_info_t *wlc_hw, chanspec_t chanspec, bool mute,
}
}
-int wlc_bmac_revinfo_get(wlc_hw_info_t *wlc_hw, wlc_bmac_revinfo_t *revinfo)
+int wlc_bmac_revinfo_get(struct wlc_hw_info *wlc_hw,
+ wlc_bmac_revinfo_t *revinfo)
{
si_t *sih = wlc_hw->sih;
uint idx;
@@ -558,20 +565,20 @@ int wlc_bmac_revinfo_get(wlc_hw_info_t *wlc_hw, wlc_bmac_revinfo_t *revinfo)
return 0;
}
-int wlc_bmac_state_get(wlc_hw_info_t *wlc_hw, wlc_bmac_state_t *state)
+int wlc_bmac_state_get(struct wlc_hw_info *wlc_hw, wlc_bmac_state_t *state)
{
state->machwcap = wlc_hw->machwcap;
return 0;
}
-static bool wlc_bmac_attach_dmapio(wlc_info_t *wlc, uint j, bool wme)
+static bool wlc_bmac_attach_dmapio(struct wlc_info *wlc, uint j, bool wme)
{
uint i;
char name[8];
/* ucode host flag 2 needed for pio mode, independent of band and fifo */
u16 pio_mhf2 = 0;
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
uint unit = wlc_hw->unit;
wlc_tunables_t *tune = wlc->pub->tunables;
@@ -581,7 +588,7 @@ static bool wlc_bmac_attach_dmapio(wlc_info_t *wlc, uint j, bool wme)
if (wlc_hw->di[0] == 0) { /* Init FIFOs */
uint addrwidth;
int dma_attach_err = 0;
- osl_t *osh = wlc_hw->osh;
+ struct osl_info *osh = wlc_hw->osh;
/* Find out the DMA addressing capability and let OS know
* All the channels within one DMA core have 'common-minimum' same
@@ -589,10 +596,10 @@ static bool wlc_bmac_attach_dmapio(wlc_info_t *wlc, uint j, bool wme)
*/
addrwidth =
dma_addrwidth(wlc_hw->sih, DMAREG(wlc_hw, DMA_TX, 0));
- OSL_DMADDRWIDTH(osh, addrwidth);
if (!wl_alloc_dma_resources(wlc_hw->wlc->wl, addrwidth)) {
- WL_ERROR(("wl%d: wlc_attach: alloc_dma_resources failed\n", unit));
+ WL_ERROR("wl%d: wlc_attach: alloc_dma_resources failed\n",
+ unit);
return false;
}
@@ -665,8 +672,7 @@ static bool wlc_bmac_attach_dmapio(wlc_info_t *wlc, uint j, bool wme)
/* Cleaner to leave this as if with AP defined */
if (dma_attach_err) {
- WL_ERROR(("wl%d: wlc_attach: dma_attach failed\n",
- unit));
+ WL_ERROR("wl%d: wlc_attach: dma_attach failed\n", unit);
return false;
}
@@ -684,7 +690,7 @@ static bool wlc_bmac_attach_dmapio(wlc_info_t *wlc, uint j, bool wme)
return true;
}
-static void wlc_bmac_detach_dmapio(wlc_hw_info_t *wlc_hw)
+static void wlc_bmac_detach_dmapio(struct wlc_hw_info *wlc_hw)
{
uint j;
@@ -702,11 +708,11 @@ static void wlc_bmac_detach_dmapio(wlc_hw_info_t *wlc_hw)
* initialize software state for each core and band
* put the whole chip in reset(driver down state), no clock
*/
-int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
- bool piomode, osl_t *osh, void *regsva, uint bustype,
- void *btparam)
+int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
+ bool piomode, struct osl_info *osh, void *regsva,
+ uint bustype, void *btparam)
{
- wlc_hw_info_t *wlc_hw;
+ struct wlc_hw_info *wlc_hw;
d11regs_t *regs;
char *macaddr = NULL;
char *vars;
@@ -715,8 +721,8 @@ int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
bool wme = false;
shared_phy_params_t sha_params;
- WL_TRACE(("wl%d: wlc_bmac_attach: vendor 0x%x device 0x%x\n", unit,
- vendor, device));
+ WL_TRACE("wl%d: wlc_bmac_attach: vendor 0x%x device 0x%x\n",
+ unit, vendor, device);
ASSERT(sizeof(wlc_d11rxhdr_t) <= WL_HWRXOFF);
@@ -729,7 +735,7 @@ int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
wlc_hw->band = wlc_hw->bandstate[0];
wlc_hw->_piomode = piomode;
- /* populate wlc_hw_info_t with default values */
+ /* populate struct wlc_hw_info with default values */
wlc_bmac_info_init(wlc_hw);
/*
@@ -740,7 +746,7 @@ int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
wlc_hw->sih = si_attach((uint) device, osh, regsva, bustype, btparam,
&wlc_hw->vars, &wlc_hw->vars_size);
if (wlc_hw->sih == NULL) {
- WL_ERROR(("wl%d: wlc_bmac_attach: si_attach failed\n", unit));
+ WL_ERROR("wl%d: wlc_bmac_attach: si_attach failed\n", unit);
err = 11;
goto fail;
}
@@ -760,21 +766,22 @@ int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
var = getvar(vars, "vendid");
if (var) {
vendor = (u16) simple_strtoul(var, NULL, 0);
- WL_ERROR(("Overriding vendor id = 0x%x\n", vendor));
+ WL_ERROR("Overriding vendor id = 0x%x\n", vendor);
}
var = getvar(vars, "devid");
if (var) {
u16 devid = (u16) simple_strtoul(var, NULL, 0);
if (devid != 0xffff) {
device = devid;
- WL_ERROR(("Overriding device id = 0x%x\n",
- device));
+ WL_ERROR("Overriding device id = 0x%x\n",
+ device);
}
}
/* verify again the device is supported */
if (!wlc_chipmatch(vendor, device)) {
- WL_ERROR(("wl%d: wlc_bmac_attach: Unsupported vendor/device (0x%x/0x%x)\n", unit, vendor, device));
+ WL_ERROR("wl%d: wlc_bmac_attach: Unsupported vendor/device (0x%x/0x%x)\n",
+ unit, vendor, device);
err = 12;
goto fail;
}
@@ -809,7 +816,8 @@ int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
wlc_bmac_corereset(wlc_hw, WLC_USE_COREFLAGS);
if (!wlc_bmac_validate_chip_access(wlc_hw)) {
- WL_ERROR(("wl%d: wlc_bmac_attach: validate_chip_access failed\n", unit));
+ WL_ERROR("wl%d: wlc_bmac_attach: validate_chip_access failed\n",
+ unit);
err = 14;
goto fail;
}
@@ -821,7 +829,8 @@ int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
j = BOARDREV_PROMOTED;
wlc_hw->boardrev = (u16) j;
if (!wlc_validboardtype(wlc_hw)) {
- WL_ERROR(("wl%d: wlc_bmac_attach: Unsupported Broadcom board type (0x%x)" " or revision level (0x%x)\n", unit, wlc_hw->sih->boardtype, wlc_hw->boardrev));
+ WL_ERROR("wl%d: wlc_bmac_attach: Unsupported Broadcom board type (0x%x)" " or revision level (0x%x)\n",
+ unit, wlc_hw->sih->boardtype, wlc_hw->boardrev);
err = 15;
goto fail;
}
@@ -833,7 +842,7 @@ int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
|| (wlc_hw->boardflags & BFL_NOPLLDOWN))
wlc_bmac_pllreq(wlc_hw, true, WLC_PLLREQ_SHARED);
- if ((BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS)
+ if ((wlc_hw->sih->bustype == PCI_BUS)
&& (si_pci_war16165(wlc_hw->sih)))
wlc->war16165 = true;
@@ -844,7 +853,7 @@ int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
} else
wlc_hw->_nbands = 1;
- if ((CHIPID(wlc_hw->sih->chip) == BCM43225_CHIP_ID))
+ if ((wlc_hw->sih->chip == BCM43225_CHIP_ID))
wlc_hw->_nbands = 1;
/* BMAC_NOTE: remove init of pub values when wlc_attach() unconditionally does the
@@ -863,8 +872,8 @@ int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
wlc_hw->physhim = wlc_phy_shim_attach(wlc_hw, wlc->wl, wlc);
if (wlc_hw->physhim == NULL) {
- WL_ERROR(("wl%d: wlc_bmac_attach: wlc_phy_shim_attach failed\n",
- unit));
+ WL_ERROR("wl%d: wlc_bmac_attach: wlc_phy_shim_attach failed\n",
+ unit);
err = 25;
goto fail;
}
@@ -931,7 +940,8 @@ int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
wlc_hw->band->pi = wlc_phy_attach(wlc_hw->phy_sh,
(void *)regs, wlc_hw->band->bandtype, vars);
if (wlc_hw->band->pi == NULL) {
- WL_ERROR(("wl%d: wlc_bmac_attach: wlc_phy_attach failed\n", unit));
+ WL_ERROR("wl%d: wlc_bmac_attach: wlc_phy_attach failed\n",
+ unit);
err = 17;
goto fail;
}
@@ -961,7 +971,9 @@ int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
goto bad_phy;
} else {
bad_phy:
- WL_ERROR(("wl%d: wlc_bmac_attach: unsupported phy type/rev (%d/%d)\n", unit, wlc_hw->band->phytype, wlc_hw->band->phyrev));
+ WL_ERROR("wl%d: wlc_bmac_attach: unsupported phy type/rev (%d/%d)\n",
+ unit,
+ wlc_hw->band->phytype, wlc_hw->band->phyrev);
err = 18;
goto fail;
}
@@ -993,7 +1005,7 @@ int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
wlc_coredisable(wlc_hw);
/* Match driver "down" state */
- if (BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS)
+ if (wlc_hw->sih->bustype == PCI_BUS)
si_pci_down(wlc_hw->sih);
/* register sb interrupt callback functions */
@@ -1016,27 +1028,27 @@ int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
/* init etheraddr state variables */
macaddr = wlc_get_macaddr(wlc_hw);
if (macaddr == NULL) {
- WL_ERROR(("wl%d: wlc_bmac_attach: macaddr not found\n", unit));
+ WL_ERROR("wl%d: wlc_bmac_attach: macaddr not found\n", unit);
err = 21;
goto fail;
}
bcm_ether_atoe(macaddr, &wlc_hw->etheraddr);
- if (ETHER_ISBCAST((char *)&wlc_hw->etheraddr) ||
- ETHER_ISNULLADDR((char *)&wlc_hw->etheraddr)) {
- WL_ERROR(("wl%d: wlc_bmac_attach: bad macaddr %s\n", unit,
- macaddr));
+ if (is_broadcast_ether_addr(wlc_hw->etheraddr.octet) ||
+ is_zero_ether_addr(wlc_hw->etheraddr.octet)) {
+ WL_ERROR("wl%d: wlc_bmac_attach: bad macaddr %s\n",
+ unit, macaddr);
err = 22;
goto fail;
}
- WL_ERROR(("%s:: deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
- __func__, wlc_hw->deviceid, wlc_hw->_nbands,
- wlc_hw->sih->boardtype, macaddr));
+ WL_ERROR("%s:: deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
+ __func__, wlc_hw->deviceid, wlc_hw->_nbands,
+ wlc_hw->sih->boardtype, macaddr);
return err;
fail:
- WL_ERROR(("wl%d: wlc_bmac_attach: failed with err %d\n", unit, err));
+ WL_ERROR("wl%d: wlc_bmac_attach: failed with err %d\n", unit, err);
return err;
}
@@ -1045,9 +1057,9 @@ int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
* may get overrides later in this function
* BMAC_NOTES, move low out and resolve the dangling ones
*/
-void wlc_bmac_info_init(wlc_hw_info_t *wlc_hw)
+void wlc_bmac_info_init(struct wlc_hw_info *wlc_hw)
{
- wlc_info_t *wlc = wlc_hw->wlc;
+ struct wlc_info *wlc = wlc_hw->wlc;
/* set default sw macintmask value */
wlc->defmacintmask = DEF_MACINTMASK;
@@ -1067,11 +1079,11 @@ void wlc_bmac_info_init(wlc_hw_info_t *wlc_hw)
/*
* low level detach
*/
-int wlc_bmac_detach(wlc_info_t *wlc)
+int wlc_bmac_detach(struct wlc_info *wlc)
{
uint i;
wlc_hwband_t *band;
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
int callbacks;
callbacks = 0;
@@ -1082,7 +1094,7 @@ int wlc_bmac_detach(wlc_info_t *wlc)
*/
si_deregister_intr_callback(wlc_hw->sih);
- if (BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS)
+ if (wlc_hw->sih->bustype == PCI_BUS)
si_pci_sleep(wlc_hw->sih);
}
@@ -1118,9 +1130,9 @@ int wlc_bmac_detach(wlc_info_t *wlc)
}
-void wlc_bmac_reset(wlc_hw_info_t *wlc_hw)
+void wlc_bmac_reset(struct wlc_hw_info *wlc_hw)
{
- WL_TRACE(("wl%d: wlc_bmac_reset\n", wlc_hw->unit));
+ WL_TRACE("wl%d: wlc_bmac_reset\n", wlc_hw->unit);
WLCNTINCR(wlc_hw->wlc->pub->_cnt->reset);
@@ -1135,13 +1147,13 @@ void wlc_bmac_reset(wlc_hw_info_t *wlc_hw)
}
void
-wlc_bmac_init(wlc_hw_info_t *wlc_hw, chanspec_t chanspec,
+wlc_bmac_init(struct wlc_hw_info *wlc_hw, chanspec_t chanspec,
bool mute) {
u32 macintmask;
bool fastclk;
- wlc_info_t *wlc = wlc_hw->wlc;
+ struct wlc_info *wlc = wlc_hw->wlc;
- WL_TRACE(("wl%d: wlc_bmac_init\n", wlc_hw->unit));
+ WL_TRACE("wl%d: wlc_bmac_init\n", wlc_hw->unit);
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
@@ -1186,11 +1198,11 @@ wlc_bmac_init(wlc_hw_info_t *wlc_hw, chanspec_t chanspec,
wlc_clkctl_clk(wlc_hw, CLK_DYNAMIC);
}
-int wlc_bmac_up_prep(wlc_hw_info_t *wlc_hw)
+int wlc_bmac_up_prep(struct wlc_hw_info *wlc_hw)
{
uint coremask;
- WL_TRACE(("wl%d: %s:\n", wlc_hw->unit, __func__));
+ WL_TRACE("wl%d: %s:\n", wlc_hw->unit, __func__);
ASSERT(wlc_hw->wlc->pub->hw_up && wlc_hw->wlc->macintmask == 0);
@@ -1208,7 +1220,7 @@ int wlc_bmac_up_prep(wlc_hw_info_t *wlc_hw)
*/
coremask = (1 << wlc_hw->wlc->core->coreidx);
- if (BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS)
+ if (wlc_hw->sih->bustype == PCI_BUS)
si_pci_setup(wlc_hw->sih, coremask);
ASSERT(si_coreid(wlc_hw->sih) == D11_CORE_ID);
@@ -1219,13 +1231,13 @@ int wlc_bmac_up_prep(wlc_hw_info_t *wlc_hw)
*/
if (wlc_bmac_radio_read_hwdisabled(wlc_hw)) {
/* put SB PCI in down state again */
- if (BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS)
+ if (wlc_hw->sih->bustype == PCI_BUS)
si_pci_down(wlc_hw->sih);
wlc_bmac_xtal(wlc_hw, OFF);
return BCME_RADIOOFF;
}
- if (BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS)
+ if (wlc_hw->sih->bustype == PCI_BUS)
si_pci_up(wlc_hw->sih);
/* reset the d11 core */
@@ -1234,9 +1246,9 @@ int wlc_bmac_up_prep(wlc_hw_info_t *wlc_hw)
return 0;
}
-int wlc_bmac_up_finish(wlc_hw_info_t *wlc_hw)
+int wlc_bmac_up_finish(struct wlc_hw_info *wlc_hw)
{
- WL_TRACE(("wl%d: %s:\n", wlc_hw->unit, __func__));
+ WL_TRACE("wl%d: %s:\n", wlc_hw->unit, __func__);
wlc_hw->up = true;
wlc_phy_hw_state_upd(wlc_hw->band->pi, true);
@@ -1248,12 +1260,12 @@ int wlc_bmac_up_finish(wlc_hw_info_t *wlc_hw)
return 0;
}
-int wlc_bmac_down_prep(wlc_hw_info_t *wlc_hw)
+int wlc_bmac_down_prep(struct wlc_hw_info *wlc_hw)
{
bool dev_gone;
uint callbacks = 0;
- WL_TRACE(("wl%d: %s:\n", wlc_hw->unit, __func__));
+ WL_TRACE("wl%d: %s:\n", wlc_hw->unit, __func__);
if (!wlc_hw->up)
return callbacks;
@@ -1276,12 +1288,12 @@ int wlc_bmac_down_prep(wlc_hw_info_t *wlc_hw)
return callbacks;
}
-int wlc_bmac_down_finish(wlc_hw_info_t *wlc_hw)
+int wlc_bmac_down_finish(struct wlc_hw_info *wlc_hw)
{
uint callbacks = 0;
bool dev_gone;
- WL_TRACE(("wl%d: %s:\n", wlc_hw->unit, __func__));
+ WL_TRACE("wl%d: %s:\n", wlc_hw->unit, __func__);
if (!wlc_hw->up)
return callbacks;
@@ -1311,7 +1323,7 @@ int wlc_bmac_down_finish(wlc_hw_info_t *wlc_hw)
/* turn off primary xtal and pll */
if (!wlc_hw->noreset) {
- if (BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS)
+ if (wlc_hw->sih->bustype == PCI_BUS)
si_pci_down(wlc_hw->sih);
wlc_bmac_xtal(wlc_hw, OFF);
}
@@ -1320,7 +1332,7 @@ int wlc_bmac_down_finish(wlc_hw_info_t *wlc_hw)
return callbacks;
}
-void wlc_bmac_wait_for_wake(wlc_hw_info_t *wlc_hw)
+void wlc_bmac_wait_for_wake(struct wlc_hw_info *wlc_hw)
{
if (D11REV_IS(wlc_hw->corerev, 4)) /* no slowclock */
udelay(5);
@@ -1336,29 +1348,30 @@ void wlc_bmac_wait_for_wake(wlc_hw_info_t *wlc_hw)
ASSERT(wlc_bmac_read_shm(wlc_hw, M_UCODE_DBGST) != DBGST_ASLEEP);
}
-void wlc_bmac_hw_etheraddr(wlc_hw_info_t *wlc_hw, struct ether_addr *ea)
+void wlc_bmac_hw_etheraddr(struct wlc_hw_info *wlc_hw, struct ether_addr *ea)
{
- bcopy(&wlc_hw->etheraddr, ea, ETHER_ADDR_LEN);
+ bcopy(&wlc_hw->etheraddr, ea, ETH_ALEN);
}
-void wlc_bmac_set_hw_etheraddr(wlc_hw_info_t *wlc_hw, struct ether_addr *ea)
+void wlc_bmac_set_hw_etheraddr(struct wlc_hw_info *wlc_hw,
+ struct ether_addr *ea)
{
- bcopy(ea, &wlc_hw->etheraddr, ETHER_ADDR_LEN);
+ bcopy(ea, &wlc_hw->etheraddr, ETH_ALEN);
}
-int wlc_bmac_bandtype(wlc_hw_info_t *wlc_hw)
+int wlc_bmac_bandtype(struct wlc_hw_info *wlc_hw)
{
return wlc_hw->band->bandtype;
}
-void *wlc_cur_phy(wlc_info_t *wlc)
+void *wlc_cur_phy(struct wlc_info *wlc)
{
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
return (void *)wlc_hw->band->pi;
}
/* control chip clock to save power, enable dynamic clock or force fast clock */
-static void wlc_clkctl_clk(wlc_hw_info_t *wlc_hw, uint mode)
+static void wlc_clkctl_clk(struct wlc_hw_info *wlc_hw, uint mode)
{
if (PMUCTL_ENAB(wlc_hw->sih)) {
/* new chips with PMU, CCS_FORCEHT will distribute the HT clock on backplane,
@@ -1455,11 +1468,11 @@ static void wlc_clkctl_clk(wlc_hw_info_t *wlc_hw, uint mode)
/* set initial host flags value */
static void
-wlc_mhfdef(wlc_info_t *wlc, u16 *mhfs, u16 mhf2_init)
+wlc_mhfdef(struct wlc_info *wlc, u16 *mhfs, u16 mhf2_init)
{
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
- bzero(mhfs, sizeof(u16) * MHFMAX);
+ memset(mhfs, 0, MHFMAX * sizeof(u16));
mhfs[MHF2] |= mhf2_init;
@@ -1485,7 +1498,7 @@ wlc_mhfdef(wlc_info_t *wlc, u16 *mhfs, u16 mhf2_init)
* WLC_BAND_ALL <--- All bands
*/
void
-wlc_bmac_mhf(wlc_hw_info_t *wlc_hw, u8 idx, u16 mask, u16 val,
+wlc_bmac_mhf(struct wlc_hw_info *wlc_hw, u8 idx, u16 mask, u16 val,
int bands)
{
u16 save;
@@ -1539,7 +1552,7 @@ wlc_bmac_mhf(wlc_hw_info_t *wlc_hw, u8 idx, u16 mask, u16 val,
}
}
-u16 wlc_bmac_mhf_get(wlc_hw_info_t *wlc_hw, u8 idx, int bands)
+u16 wlc_bmac_mhf_get(struct wlc_hw_info *wlc_hw, u8 idx, int bands)
{
wlc_hwband_t *band;
ASSERT(idx < MHFMAX);
@@ -1565,7 +1578,7 @@ u16 wlc_bmac_mhf_get(wlc_hw_info_t *wlc_hw, u8 idx, int bands)
return band->mhfs[idx];
}
-static void wlc_write_mhf(wlc_hw_info_t *wlc_hw, u16 *mhfs)
+static void wlc_write_mhf(struct wlc_hw_info *wlc_hw, u16 *mhfs)
{
u8 idx;
u16 addr[] = {
@@ -1583,7 +1596,7 @@ static void wlc_write_mhf(wlc_hw_info_t *wlc_hw, u16 *mhfs)
/* set the maccontrol register to desired reset state and
* initialize the sw cache of the register
*/
-static void wlc_mctrl_reset(wlc_hw_info_t *wlc_hw)
+static void wlc_mctrl_reset(struct wlc_hw_info *wlc_hw)
{
/* IHR accesses are always enabled, PSM disabled, HPS off and WAKE on */
wlc_hw->maccontrol = 0;
@@ -1594,7 +1607,7 @@ static void wlc_mctrl_reset(wlc_hw_info_t *wlc_hw)
}
/* set or clear maccontrol bits */
-void wlc_bmac_mctrl(wlc_hw_info_t *wlc_hw, u32 mask, u32 val)
+void wlc_bmac_mctrl(struct wlc_hw_info *wlc_hw, u32 mask, u32 val)
{
u32 maccontrol;
u32 new_maccontrol;
@@ -1616,7 +1629,7 @@ void wlc_bmac_mctrl(wlc_hw_info_t *wlc_hw, u32 mask, u32 val)
}
/* write the software state of maccontrol and overrides to the maccontrol register */
-static void wlc_mctrl_write(wlc_hw_info_t *wlc_hw)
+static void wlc_mctrl_write(struct wlc_hw_info *wlc_hw)
{
u32 maccontrol = wlc_hw->maccontrol;
@@ -1633,7 +1646,7 @@ static void wlc_mctrl_write(wlc_hw_info_t *wlc_hw)
W_REG(wlc_hw->osh, &wlc_hw->regs->maccontrol, maccontrol);
}
-void wlc_ucode_wake_override_set(wlc_hw_info_t *wlc_hw, u32 override_bit)
+void wlc_ucode_wake_override_set(struct wlc_hw_info *wlc_hw, u32 override_bit)
{
ASSERT((wlc_hw->wake_override & override_bit) == 0);
@@ -1650,7 +1663,7 @@ void wlc_ucode_wake_override_set(wlc_hw_info_t *wlc_hw, u32 override_bit)
return;
}
-void wlc_ucode_wake_override_clear(wlc_hw_info_t *wlc_hw, u32 override_bit)
+void wlc_ucode_wake_override_clear(struct wlc_hw_info *wlc_hw, u32 override_bit)
{
ASSERT(wlc_hw->wake_override & override_bit);
@@ -1671,7 +1684,7 @@ void wlc_ucode_wake_override_clear(wlc_hw_info_t *wlc_hw, u32 override_bit)
* STA 0 1 <--- This will ensure no beacons
* IBSS 0 0
*/
-static void wlc_ucode_mute_override_set(wlc_hw_info_t *wlc_hw)
+static void wlc_ucode_mute_override_set(struct wlc_hw_info *wlc_hw)
{
wlc_hw->mute_override = 1;
@@ -1687,7 +1700,7 @@ static void wlc_ucode_mute_override_set(wlc_hw_info_t *wlc_hw)
}
/* Clear the override on AP and INFRA bits */
-static void wlc_ucode_mute_override_clear(wlc_hw_info_t *wlc_hw)
+static void wlc_ucode_mute_override_clear(struct wlc_hw_info *wlc_hw)
{
if (wlc_hw->mute_override == 0)
return;
@@ -1707,16 +1720,16 @@ static void wlc_ucode_mute_override_clear(wlc_hw_info_t *wlc_hw)
* Write a MAC address to the rcmta structure
*/
void
-wlc_bmac_set_rcmta(wlc_hw_info_t *wlc_hw, int idx,
+wlc_bmac_set_rcmta(struct wlc_hw_info *wlc_hw, int idx,
const struct ether_addr *addr)
{
d11regs_t *regs = wlc_hw->regs;
volatile u16 *objdata16 = (volatile u16 *)&regs->objdata;
u32 mac_hm;
u16 mac_l;
- osl_t *osh;
+ struct osl_info *osh;
- WL_TRACE(("wl%d: %s\n", wlc_hw->unit, __func__));
+ WL_TRACE("wl%d: %s\n", wlc_hw->unit, __func__);
ASSERT(wlc_hw->corerev > 4);
@@ -1740,16 +1753,16 @@ wlc_bmac_set_rcmta(wlc_hw_info_t *wlc_hw, int idx,
* Write a MAC address to the given match reg offset in the RXE match engine.
*/
void
-wlc_bmac_set_addrmatch(wlc_hw_info_t *wlc_hw, int match_reg_offset,
+wlc_bmac_set_addrmatch(struct wlc_hw_info *wlc_hw, int match_reg_offset,
const struct ether_addr *addr)
{
d11regs_t *regs;
u16 mac_l;
u16 mac_m;
u16 mac_h;
- osl_t *osh;
+ struct osl_info *osh;
- WL_TRACE(("wl%d: wlc_bmac_set_addrmatch\n", wlc_hw->unit));
+ WL_TRACE("wl%d: wlc_bmac_set_addrmatch\n", wlc_hw->unit);
ASSERT((match_reg_offset < RCM_SIZE) || (wlc_hw->corerev == 4));
@@ -1769,7 +1782,7 @@ wlc_bmac_set_addrmatch(wlc_hw_info_t *wlc_hw, int match_reg_offset,
}
void
-wlc_bmac_write_template_ram(wlc_hw_info_t *wlc_hw, int offset, int len,
+wlc_bmac_write_template_ram(struct wlc_hw_info *wlc_hw, int offset, int len,
void *buf)
{
d11regs_t *regs;
@@ -1778,9 +1791,9 @@ wlc_bmac_write_template_ram(wlc_hw_info_t *wlc_hw, int offset, int len,
#ifdef IL_BIGENDIAN
volatile u16 *dptr = NULL;
#endif /* IL_BIGENDIAN */
- osl_t *osh;
+ struct osl_info *osh;
- WL_TRACE(("wl%d: wlc_bmac_write_template_ram\n", wlc_hw->unit));
+ WL_TRACE("wl%d: wlc_bmac_write_template_ram\n", wlc_hw->unit);
regs = wlc_hw->regs;
osh = wlc_hw->osh;
@@ -1812,9 +1825,9 @@ wlc_bmac_write_template_ram(wlc_hw_info_t *wlc_hw, int offset, int len,
}
}
-void wlc_bmac_set_cwmin(wlc_hw_info_t *wlc_hw, u16 newmin)
+void wlc_bmac_set_cwmin(struct wlc_hw_info *wlc_hw, u16 newmin)
{
- osl_t *osh;
+ struct osl_info *osh;
osh = wlc_hw->osh;
wlc_hw->band->CWmin = newmin;
@@ -1824,9 +1837,9 @@ void wlc_bmac_set_cwmin(wlc_hw_info_t *wlc_hw, u16 newmin)
W_REG(osh, &wlc_hw->regs->objdata, newmin);
}
-void wlc_bmac_set_cwmax(wlc_hw_info_t *wlc_hw, u16 newmax)
+void wlc_bmac_set_cwmax(struct wlc_hw_info *wlc_hw, u16 newmax)
{
- osl_t *osh;
+ struct osl_info *osh;
osh = wlc_hw->osh;
wlc_hw->band->CWmax = newmax;
@@ -1836,7 +1849,7 @@ void wlc_bmac_set_cwmax(wlc_hw_info_t *wlc_hw, u16 newmax)
W_REG(osh, &wlc_hw->regs->objdata, newmax);
}
-void wlc_bmac_bw_set(wlc_hw_info_t *wlc_hw, u16 bw)
+void wlc_bmac_bw_set(struct wlc_hw_info *wlc_hw, u16 bw)
{
bool fastclk;
u32 tmp;
@@ -1861,7 +1874,7 @@ void wlc_bmac_bw_set(wlc_hw_info_t *wlc_hw, u16 bw)
}
static void
-wlc_write_hw_bcntemplate0(wlc_hw_info_t *wlc_hw, void *bcn, int len)
+wlc_write_hw_bcntemplate0(struct wlc_hw_info *wlc_hw, void *bcn, int len)
{
d11regs_t *regs = wlc_hw->regs;
@@ -1875,7 +1888,7 @@ wlc_write_hw_bcntemplate0(wlc_hw_info_t *wlc_hw, void *bcn, int len)
}
static void
-wlc_write_hw_bcntemplate1(wlc_hw_info_t *wlc_hw, void *bcn, int len)
+wlc_write_hw_bcntemplate1(struct wlc_hw_info *wlc_hw, void *bcn, int len)
{
d11regs_t *regs = wlc_hw->regs;
@@ -1890,7 +1903,7 @@ wlc_write_hw_bcntemplate1(wlc_hw_info_t *wlc_hw, void *bcn, int len)
/* mac is assumed to be suspended at this point */
void
-wlc_bmac_write_hw_bcntemplates(wlc_hw_info_t *wlc_hw, void *bcn, int len,
+wlc_bmac_write_hw_bcntemplates(struct wlc_hw_info *wlc_hw, void *bcn, int len,
bool both)
{
d11regs_t *regs = wlc_hw->regs;
@@ -1911,10 +1924,10 @@ wlc_bmac_write_hw_bcntemplates(wlc_hw_info_t *wlc_hw, void *bcn, int len,
}
}
-static void WLBANDINITFN(wlc_bmac_upd_synthpu) (wlc_hw_info_t *wlc_hw)
+static void WLBANDINITFN(wlc_bmac_upd_synthpu) (struct wlc_hw_info *wlc_hw)
{
u16 v;
- wlc_info_t *wlc = wlc_hw->wlc;
+ struct wlc_info *wlc = wlc_hw->wlc;
/* update SYNTHPU_DLY */
if (WLCISLCNPHY(wlc->band)) {
@@ -1930,12 +1943,12 @@ static void WLBANDINITFN(wlc_bmac_upd_synthpu) (wlc_hw_info_t *wlc_hw)
/* band-specific init */
static void
-WLBANDINITFN(wlc_bmac_bsinit) (wlc_info_t *wlc, chanspec_t chanspec)
+WLBANDINITFN(wlc_bmac_bsinit) (struct wlc_info *wlc, chanspec_t chanspec)
{
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
- WL_TRACE(("wl%d: wlc_bmac_bsinit: bandunit %d\n", wlc_hw->unit,
- wlc_hw->band->bandunit));
+ WL_TRACE("wl%d: wlc_bmac_bsinit: bandunit %d\n",
+ wlc_hw->unit, wlc_hw->band->bandunit);
/* sanity check */
if (PHY_TYPE(R_REG(wlc_hw->osh, &wlc_hw->regs->phyversion)) !=
@@ -1969,9 +1982,9 @@ WLBANDINITFN(wlc_bmac_bsinit) (wlc_info_t *wlc, chanspec_t chanspec)
wlc_bmac_upd_synthpu(wlc_hw);
}
-void wlc_bmac_core_phy_clk(wlc_hw_info_t *wlc_hw, bool clk)
+void wlc_bmac_core_phy_clk(struct wlc_hw_info *wlc_hw, bool clk)
{
- WL_TRACE(("wl%d: wlc_bmac_core_phy_clk: clk %d\n", wlc_hw->unit, clk));
+ WL_TRACE("wl%d: wlc_bmac_core_phy_clk: clk %d\n", wlc_hw->unit, clk);
wlc_hw->phyclk = clk;
@@ -1994,9 +2007,9 @@ void wlc_bmac_core_phy_clk(wlc_hw_info_t *wlc_hw, bool clk)
}
/* Perform a soft reset of the PHY PLL */
-void wlc_bmac_core_phypll_reset(wlc_hw_info_t *wlc_hw)
+void wlc_bmac_core_phypll_reset(struct wlc_hw_info *wlc_hw)
{
- WL_TRACE(("wl%d: wlc_bmac_core_phypll_reset\n", wlc_hw->unit));
+ WL_TRACE("wl%d: wlc_bmac_core_phypll_reset\n", wlc_hw->unit);
si_corereg(wlc_hw->sih, SI_CC_IDX,
offsetof(chipcregs_t, chipcontrol_addr), ~0, 0);
@@ -2015,7 +2028,7 @@ void wlc_bmac_core_phypll_reset(wlc_hw_info_t *wlc_hw)
/* light way to turn on phy clock without reset for NPHY only
* refer to wlc_bmac_core_phy_clk for full version
*/
-void wlc_bmac_phyclk_fgc(wlc_hw_info_t *wlc_hw, bool clk)
+void wlc_bmac_phyclk_fgc(struct wlc_hw_info *wlc_hw, bool clk)
{
/* support(necessary for NPHY and HYPHY) only */
if (!WLCISNPHY(wlc_hw->band))
@@ -2028,7 +2041,7 @@ void wlc_bmac_phyclk_fgc(wlc_hw_info_t *wlc_hw, bool clk)
}
-void wlc_bmac_macphyclk_set(wlc_hw_info_t *wlc_hw, bool clk)
+void wlc_bmac_macphyclk_set(struct wlc_hw_info *wlc_hw, bool clk)
{
if (ON == clk)
si_core_cflags(wlc_hw->sih, SICF_MPCLKE, SICF_MPCLKE);
@@ -2036,13 +2049,13 @@ void wlc_bmac_macphyclk_set(wlc_hw_info_t *wlc_hw, bool clk)
si_core_cflags(wlc_hw->sih, SICF_MPCLKE, 0);
}
-void wlc_bmac_phy_reset(wlc_hw_info_t *wlc_hw)
+void wlc_bmac_phy_reset(struct wlc_hw_info *wlc_hw)
{
wlc_phy_t *pih = wlc_hw->band->pi;
u32 phy_bw_clkbits;
bool phy_in_reset = false;
- WL_TRACE(("wl%d: wlc_bmac_phy_reset\n", wlc_hw->unit));
+ WL_TRACE("wl%d: wlc_bmac_phy_reset\n", wlc_hw->unit);
if (pih == NULL)
return;
@@ -2080,9 +2093,9 @@ void wlc_bmac_phy_reset(wlc_hw_info_t *wlc_hw)
/* switch to and initialize new band */
static void
-WLBANDINITFN(wlc_bmac_setband) (wlc_hw_info_t *wlc_hw, uint bandunit,
+WLBANDINITFN(wlc_bmac_setband) (struct wlc_hw_info *wlc_hw, uint bandunit,
chanspec_t chanspec) {
- wlc_info_t *wlc = wlc_hw->wlc;
+ struct wlc_info *wlc = wlc_hw->wlc;
u32 macintmask;
ASSERT(NBANDS_HW(wlc_hw) > 1);
@@ -2122,9 +2135,9 @@ WLBANDINITFN(wlc_bmac_setband) (wlc_hw_info_t *wlc_hw, uint bandunit,
}
/* low-level band switch utility routine */
-void WLBANDINITFN(wlc_setxband) (wlc_hw_info_t *wlc_hw, uint bandunit)
+void WLBANDINITFN(wlc_setxband) (struct wlc_hw_info *wlc_hw, uint bandunit)
{
- WL_TRACE(("wl%d: wlc_setxband: bandunit %d\n", wlc_hw->unit, bandunit));
+ WL_TRACE("wl%d: wlc_setxband: bandunit %d\n", wlc_hw->unit, bandunit);
wlc_hw->band = wlc_hw->bandstate[bandunit];
@@ -2138,19 +2151,19 @@ void WLBANDINITFN(wlc_setxband) (wlc_hw_info_t *wlc_hw, uint bandunit)
}
}
-static bool wlc_isgoodchip(wlc_hw_info_t *wlc_hw)
+static bool wlc_isgoodchip(struct wlc_hw_info *wlc_hw)
{
/* reject unsupported corerev */
if (!VALID_COREREV(wlc_hw->corerev)) {
- WL_ERROR(("unsupported core rev %d\n", wlc_hw->corerev));
+ WL_ERROR("unsupported core rev %d\n", wlc_hw->corerev);
return false;
}
return true;
}
-static bool wlc_validboardtype(wlc_hw_info_t *wlc_hw)
+static bool wlc_validboardtype(struct wlc_hw_info *wlc_hw)
{
bool goodboard = true;
uint boardrev = wlc_hw->boardrev;
@@ -2174,7 +2187,7 @@ static bool wlc_validboardtype(wlc_hw_info_t *wlc_hw)
return goodboard;
}
-static char *wlc_get_macaddr(wlc_hw_info_t *wlc_hw)
+static char *wlc_get_macaddr(struct wlc_hw_info *wlc_hw)
{
const char *varname = "macaddr";
char *macaddr;
@@ -2191,7 +2204,8 @@ static char *wlc_get_macaddr(wlc_hw_info_t *wlc_hw)
macaddr = getvar(wlc_hw->vars, varname);
if (macaddr == NULL) {
- WL_ERROR(("wl%d: wlc_get_macaddr: macaddr getvar(%s) not found\n", wlc_hw->unit, varname));
+ WL_ERROR("wl%d: wlc_get_macaddr: macaddr getvar(%s) not found\n",
+ wlc_hw->unit, varname);
}
return macaddr;
@@ -2203,7 +2217,7 @@ static char *wlc_get_macaddr(wlc_hw_info_t *wlc_hw)
* this function could be called when driver is down and w/o clock
* it operates on different registers depending on corerev and boardflag.
*/
-bool wlc_bmac_radio_read_hwdisabled(wlc_hw_info_t *wlc_hw)
+bool wlc_bmac_radio_read_hwdisabled(struct wlc_hw_info *wlc_hw)
{
bool v, clk, xtal;
u32 resetbits = 0, flags = 0;
@@ -2226,9 +2240,9 @@ bool wlc_bmac_radio_read_hwdisabled(wlc_hw_info_t *wlc_hw)
flags |= SICF_PCLKE;
/* AI chip doesn't restore bar0win2 on hibernation/resume, need sw fixup */
- if ((CHIPID(wlc_hw->sih->chip) == BCM43224_CHIP_ID) ||
- (CHIPID(wlc_hw->sih->chip) == BCM43225_CHIP_ID) ||
- (CHIPID(wlc_hw->sih->chip) == BCM43421_CHIP_ID))
+ if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
+ (wlc_hw->sih->chip == BCM43225_CHIP_ID) ||
+ (wlc_hw->sih->chip == BCM43421_CHIP_ID))
wlc_hw->regs =
(d11regs_t *) si_setcore(wlc_hw->sih, D11_CORE_ID,
0);
@@ -2249,12 +2263,12 @@ bool wlc_bmac_radio_read_hwdisabled(wlc_hw_info_t *wlc_hw)
}
/* Initialize just the hardware when coming out of POR or S3/S5 system states */
-void wlc_bmac_hw_up(wlc_hw_info_t *wlc_hw)
+void wlc_bmac_hw_up(struct wlc_hw_info *wlc_hw)
{
if (wlc_hw->wlc->pub->hw_up)
return;
- WL_TRACE(("wl%d: %s:\n", wlc_hw->unit, __func__));
+ WL_TRACE("wl%d: %s:\n", wlc_hw->unit, __func__);
/*
* Enable pll and xtal, initialize the power control registers,
@@ -2264,13 +2278,13 @@ void wlc_bmac_hw_up(wlc_hw_info_t *wlc_hw)
si_clkctl_init(wlc_hw->sih);
wlc_clkctl_clk(wlc_hw, CLK_FAST);
- if (BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS) {
+ if (wlc_hw->sih->bustype == PCI_BUS) {
si_pci_fixcfg(wlc_hw->sih);
/* AI chip doesn't restore bar0win2 on hibernation/resume, need sw fixup */
- if ((CHIPID(wlc_hw->sih->chip) == BCM43224_CHIP_ID) ||
- (CHIPID(wlc_hw->sih->chip) == BCM43225_CHIP_ID) ||
- (CHIPID(wlc_hw->sih->chip) == BCM43421_CHIP_ID))
+ if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
+ (wlc_hw->sih->chip == BCM43225_CHIP_ID) ||
+ (wlc_hw->sih->chip == BCM43421_CHIP_ID))
wlc_hw->regs =
(d11regs_t *) si_setcore(wlc_hw->sih, D11_CORE_ID,
0);
@@ -2283,7 +2297,7 @@ void wlc_bmac_hw_up(wlc_hw_info_t *wlc_hw)
wlc_hw->wlc->pub->hw_up = true;
if ((wlc_hw->boardflags & BFL_FEM)
- && (CHIPID(wlc_hw->sih->chip) == BCM4313_CHIP_ID)) {
+ && (wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
if (!
(wlc_hw->boardrev >= 0x1250
&& (wlc_hw->boardflags & BFL_FEM_BT)))
@@ -2291,10 +2305,10 @@ void wlc_bmac_hw_up(wlc_hw_info_t *wlc_hw)
}
}
-static bool wlc_dma_rxreset(wlc_hw_info_t *wlc_hw, uint fifo)
+static bool wlc_dma_rxreset(struct wlc_hw_info *wlc_hw, uint fifo)
{
- hnddma_t *di = wlc_hw->di[fifo];
- osl_t *osh;
+ struct hnddma_pub *di = wlc_hw->di[fifo];
+ struct osl_info *osh;
if (D11REV_LT(wlc_hw->corerev, 12)) {
bool rxidle = true;
@@ -2309,7 +2323,8 @@ static bool wlc_dma_rxreset(wlc_hw_info_t *wlc_hw, uint fifo)
50000);
if (!rxidle && (rcv_frm_cnt != 0))
- WL_ERROR(("wl%d: %s: rxdma[%d] not idle && rcv_frm_cnt(%d) not zero\n", wlc_hw->unit, __func__, fifo, rcv_frm_cnt));
+ WL_ERROR("wl%d: %s: rxdma[%d] not idle && rcv_frm_cnt(%d) not zero\n",
+ wlc_hw->unit, __func__, fifo, rcv_frm_cnt);
mdelay(2);
}
@@ -2324,7 +2339,7 @@ static bool wlc_dma_rxreset(wlc_hw_info_t *wlc_hw, uint fifo)
* clear software macintstatus for fresh new start
* one testing hack wlc_hw->noreset will bypass the d11/phy reset
*/
-void wlc_bmac_corereset(wlc_hw_info_t *wlc_hw, u32 flags)
+void wlc_bmac_corereset(struct wlc_hw_info *wlc_hw, u32 flags)
{
d11regs_t *regs;
uint i;
@@ -2334,7 +2349,7 @@ void wlc_bmac_corereset(wlc_hw_info_t *wlc_hw, u32 flags)
if (flags == WLC_USE_COREFLAGS)
flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0);
- WL_TRACE(("wl%d: %s\n", wlc_hw->unit, __func__));
+ WL_TRACE("wl%d: %s\n", wlc_hw->unit, __func__);
regs = wlc_hw->regs;
@@ -2347,17 +2362,20 @@ void wlc_bmac_corereset(wlc_hw_info_t *wlc_hw, u32 flags)
if (si_iscoreup(wlc_hw->sih)) {
for (i = 0; i < NFIFO; i++)
if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i]))) {
- WL_ERROR(("wl%d: %s: dma_txreset[%d]: cannot stop dma\n", wlc_hw->unit, __func__, i));
+ WL_ERROR("wl%d: %s: dma_txreset[%d]: cannot stop dma\n",
+ wlc_hw->unit, __func__, i);
}
if ((wlc_hw->di[RX_FIFO])
&& (!wlc_dma_rxreset(wlc_hw, RX_FIFO))) {
- WL_ERROR(("wl%d: %s: dma_rxreset[%d]: cannot stop dma\n", wlc_hw->unit, __func__, RX_FIFO));
+ WL_ERROR("wl%d: %s: dma_rxreset[%d]: cannot stop dma\n",
+ wlc_hw->unit, __func__, RX_FIFO);
}
if (D11REV_IS(wlc_hw->corerev, 4)
&& wlc_hw->di[RX_TXSTATUS_FIFO]
&& (!wlc_dma_rxreset(wlc_hw, RX_TXSTATUS_FIFO))) {
- WL_ERROR(("wl%d: %s: dma_rxreset[%d]: cannot stop dma\n", wlc_hw->unit, __func__, RX_TXSTATUS_FIFO));
+ WL_ERROR("wl%d: %s: dma_rxreset[%d]: cannot stop dma\n",
+ wlc_hw->unit, __func__, RX_TXSTATUS_FIFO);
}
}
/* if noreset, just stop the psm and return */
@@ -2413,14 +2431,14 @@ void wlc_bmac_corereset(wlc_hw_info_t *wlc_hw, u32 flags)
* txfifo sizes needs to be modified(increased) since the newer cores
* have more memory.
*/
-static void wlc_corerev_fifofixup(wlc_hw_info_t *wlc_hw)
+static void wlc_corerev_fifofixup(struct wlc_hw_info *wlc_hw)
{
d11regs_t *regs = wlc_hw->regs;
u16 fifo_nu;
u16 txfifo_startblk = TXFIFO_START_BLK, txfifo_endblk;
u16 txfifo_def, txfifo_def1;
u16 txfifo_cmd;
- osl_t *osh;
+ struct osl_info *osh;
if (D11REV_LT(wlc_hw->corerev, 9))
goto exit;
@@ -2473,22 +2491,22 @@ static void wlc_corerev_fifofixup(wlc_hw_info_t *wlc_hw)
* config other core registers
* init dma
*/
-static void wlc_coreinit(wlc_info_t *wlc)
+static void wlc_coreinit(struct wlc_info *wlc)
{
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
d11regs_t *regs;
u32 sflags;
uint bcnint_us;
uint i = 0;
bool fifosz_fixup = false;
- osl_t *osh;
+ struct osl_info *osh;
int err = 0;
u16 buf[NFIFO];
regs = wlc_hw->regs;
osh = wlc_hw->osh;
- WL_TRACE(("wl%d: wlc_coreinit\n", wlc_hw->unit));
+ WL_TRACE("wl%d: wlc_coreinit\n", wlc_hw->unit);
/* reset PSM */
wlc_bmac_mctrl(wlc_hw, ~0, (MCTL_IHR_EN | MCTL_PSM_JMP_0 | MCTL_WAKE));
@@ -2511,8 +2529,8 @@ static void wlc_coreinit(wlc_info_t *wlc)
SPINWAIT(((R_REG(osh, &regs->macintstatus) & MI_MACSSPNDD) == 0),
1000 * 1000);
if ((R_REG(osh, &regs->macintstatus) & MI_MACSSPNDD) == 0)
- WL_ERROR(("wl%d: wlc_coreinit: ucode did not self-suspend!\n",
- wlc_hw->unit));
+ WL_ERROR("wl%d: wlc_coreinit: ucode did not self-suspend!\n",
+ wlc_hw->unit);
wlc_gpio_init(wlc);
@@ -2522,18 +2540,18 @@ static void wlc_coreinit(wlc_info_t *wlc)
if (WLCISNPHY(wlc_hw->band))
wlc_write_inits(wlc_hw, d11n0initvals16);
else
- WL_ERROR(("%s: wl%d: unsupported phy in corerev %d\n",
- __func__, wlc_hw->unit, wlc_hw->corerev));
+ WL_ERROR("%s: wl%d: unsupported phy in corerev %d\n",
+ __func__, wlc_hw->unit, wlc_hw->corerev);
} else if (D11REV_IS(wlc_hw->corerev, 24)) {
if (WLCISLCNPHY(wlc_hw->band)) {
wlc_write_inits(wlc_hw, d11lcn0initvals24);
} else {
- WL_ERROR(("%s: wl%d: unsupported phy in corerev %d\n",
- __func__, wlc_hw->unit, wlc_hw->corerev));
+ WL_ERROR("%s: wl%d: unsupported phy in corerev %d\n",
+ __func__, wlc_hw->unit, wlc_hw->corerev);
}
} else {
- WL_ERROR(("%s: wl%d: unsupported corerev %d\n",
- __func__, wlc_hw->unit, wlc_hw->corerev));
+ WL_ERROR("%s: wl%d: unsupported corerev %d\n",
+ __func__, wlc_hw->unit, wlc_hw->corerev);
}
/* For old ucode, txfifo sizes needs to be modified(increased) for Corerev >= 9 */
@@ -2575,7 +2593,8 @@ static void wlc_coreinit(wlc_info_t *wlc)
err = -1;
}
if (err != 0) {
- WL_ERROR(("wlc_coreinit: txfifo mismatch: ucode size %d driver size %d index %d\n", buf[i], wlc_hw->xmtfifo_sz[i], i));
+ WL_ERROR("wlc_coreinit: txfifo mismatch: ucode size %d driver size %d index %d\n",
+ buf[i], wlc_hw->xmtfifo_sz[i], i);
/* DO NOT ASSERT corerev < 4 even there is a mismatch
* shmem, since driver don't overwrite those chip and
* ucode initialize data will be used.
@@ -2684,15 +2703,15 @@ static void wlc_coreinit(wlc_info_t *wlc)
* - 559241 = 0x88889 => tsf_clk_frac_h = 0x8, tsf_clk_frac_l = 0x8889
*/
-void wlc_bmac_switch_macfreq(wlc_hw_info_t *wlc_hw, u8 spurmode)
+void wlc_bmac_switch_macfreq(struct wlc_hw_info *wlc_hw, u8 spurmode)
{
d11regs_t *regs;
- osl_t *osh;
+ struct osl_info *osh;
regs = wlc_hw->regs;
osh = wlc_hw->osh;
- if ((CHIPID(wlc_hw->sih->chip) == BCM43224_CHIP_ID) ||
- (CHIPID(wlc_hw->sih->chip) == BCM43225_CHIP_ID)) {
+ if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
+ (wlc_hw->sih->chip == BCM43225_CHIP_ID)) {
if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */
W_REG(osh, &regs->tsf_clk_frac_l, 0x2082);
W_REG(osh, &regs->tsf_clk_frac_h, 0x8);
@@ -2715,12 +2734,12 @@ void wlc_bmac_switch_macfreq(wlc_hw_info_t *wlc_hw, u8 spurmode)
}
/* Initialize GPIOs that are controlled by D11 core */
-static void wlc_gpio_init(wlc_info_t *wlc)
+static void wlc_gpio_init(struct wlc_info *wlc)
{
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
d11regs_t *regs;
u32 gc, gm;
- osl_t *osh;
+ struct osl_info *osh;
regs = wlc_hw->regs;
osh = wlc_hw->osh;
@@ -2780,9 +2799,9 @@ static void wlc_gpio_init(wlc_info_t *wlc)
si_gpiocontrol(wlc_hw->sih, gm, gc, GPIO_DRV_PRIORITY);
}
-static void wlc_ucode_download(wlc_hw_info_t *wlc_hw)
+static void wlc_ucode_download(struct wlc_hw_info *wlc_hw)
{
- wlc_info_t *wlc;
+ struct wlc_info *wlc;
wlc = wlc_hw->wlc;
if (wlc_hw->ucode_loaded)
@@ -2794,30 +2813,30 @@ static void wlc_ucode_download(wlc_hw_info_t *wlc_hw)
bcm43xx_16_mimosz);
wlc_hw->ucode_loaded = true;
} else
- WL_ERROR(("%s: wl%d: unsupported phy in corerev %d\n",
- __func__, wlc_hw->unit, wlc_hw->corerev));
+ WL_ERROR("%s: wl%d: unsupported phy in corerev %d\n",
+ __func__, wlc_hw->unit, wlc_hw->corerev);
} else if (D11REV_IS(wlc_hw->corerev, 24)) {
if (WLCISLCNPHY(wlc_hw->band)) {
wlc_ucode_write(wlc_hw, bcm43xx_24_lcn,
bcm43xx_24_lcnsz);
wlc_hw->ucode_loaded = true;
} else {
- WL_ERROR(("%s: wl%d: unsupported phy in corerev %d\n",
- __func__, wlc_hw->unit, wlc_hw->corerev));
+ WL_ERROR("%s: wl%d: unsupported phy in corerev %d\n",
+ __func__, wlc_hw->unit, wlc_hw->corerev);
}
}
}
-static void wlc_ucode_write(wlc_hw_info_t *wlc_hw, const u32 ucode[],
+static void wlc_ucode_write(struct wlc_hw_info *wlc_hw, const u32 ucode[],
const uint nbytes) {
- osl_t *osh;
+ struct osl_info *osh;
d11regs_t *regs = wlc_hw->regs;
uint i;
uint count;
osh = wlc_hw->osh;
- WL_TRACE(("wl%d: wlc_ucode_write\n", wlc_hw->unit));
+ WL_TRACE("wl%d: wlc_ucode_write\n", wlc_hw->unit);
ASSERT(IS_ALIGNED(nbytes, sizeof(u32)));
@@ -2829,13 +2848,13 @@ static void wlc_ucode_write(wlc_hw_info_t *wlc_hw, const u32 ucode[],
W_REG(osh, &regs->objdata, ucode[i]);
}
-static void wlc_write_inits(wlc_hw_info_t *wlc_hw, const d11init_t *inits)
+static void wlc_write_inits(struct wlc_hw_info *wlc_hw, const d11init_t *inits)
{
int i;
- osl_t *osh;
+ struct osl_info *osh;
volatile u8 *base;
- WL_TRACE(("wl%d: wlc_write_inits\n", wlc_hw->unit));
+ WL_TRACE("wl%d: wlc_write_inits\n", wlc_hw->unit);
osh = wlc_hw->osh;
base = (volatile u8 *)wlc_hw->regs;
@@ -2852,7 +2871,7 @@ static void wlc_write_inits(wlc_hw_info_t *wlc_hw, const d11init_t *inits)
}
}
-static void wlc_ucode_txant_set(wlc_hw_info_t *wlc_hw)
+static void wlc_ucode_txant_set(struct wlc_hw_info *wlc_hw)
{
u16 phyctl;
u16 phytxant = wlc_hw->bmac_phytxant;
@@ -2869,7 +2888,7 @@ static void wlc_ucode_txant_set(wlc_hw_info_t *wlc_hw)
wlc_bmac_write_shm(wlc_hw, M_RSP_PCTLWD, phyctl);
}
-void wlc_bmac_txant_set(wlc_hw_info_t *wlc_hw, u16 phytxant)
+void wlc_bmac_txant_set(struct wlc_hw_info *wlc_hw, u16 phytxant)
{
/* update sw state */
wlc_hw->bmac_phytxant = phytxant;
@@ -2881,12 +2900,12 @@ void wlc_bmac_txant_set(wlc_hw_info_t *wlc_hw, u16 phytxant)
}
-u16 wlc_bmac_get_txant(wlc_hw_info_t *wlc_hw)
+u16 wlc_bmac_get_txant(struct wlc_hw_info *wlc_hw)
{
return (u16) wlc_hw->wlc->stf->txant;
}
-void wlc_bmac_antsel_type_set(wlc_hw_info_t *wlc_hw, u8 antsel_type)
+void wlc_bmac_antsel_type_set(struct wlc_hw_info *wlc_hw, u8 antsel_type)
{
wlc_hw->antsel_type = antsel_type;
@@ -2894,7 +2913,7 @@ void wlc_bmac_antsel_type_set(wlc_hw_info_t *wlc_hw, u8 antsel_type)
wlc_phy_antsel_type_set(wlc_hw->band->pi, antsel_type);
}
-void wlc_bmac_fifoerrors(wlc_hw_info_t *wlc_hw)
+void wlc_bmac_fifoerrors(struct wlc_hw_info *wlc_hw)
{
bool fatal = false;
uint unit;
@@ -2911,44 +2930,45 @@ void wlc_bmac_fifoerrors(wlc_hw_info_t *wlc_hw)
if (!intstatus)
continue;
- WL_TRACE(("wl%d: wlc_bmac_fifoerrors: intstatus%d 0x%x\n", unit,
- idx, intstatus));
+ WL_TRACE("wl%d: wlc_bmac_fifoerrors: intstatus%d 0x%x\n",
+ unit, idx, intstatus);
if (intstatus & I_RO) {
- WL_ERROR(("wl%d: fifo %d: receive fifo overflow\n",
- unit, idx));
+ WL_ERROR("wl%d: fifo %d: receive fifo overflow\n",
+ unit, idx);
WLCNTINCR(wlc_hw->wlc->pub->_cnt->rxoflo);
fatal = true;
}
if (intstatus & I_PC) {
- WL_ERROR(("wl%d: fifo %d: descriptor error\n", unit,
- idx));
+ WL_ERROR("wl%d: fifo %d: descriptor error\n",
+ unit, idx);
WLCNTINCR(wlc_hw->wlc->pub->_cnt->dmade);
fatal = true;
}
if (intstatus & I_PD) {
- WL_ERROR(("wl%d: fifo %d: data error\n", unit, idx));
+ WL_ERROR("wl%d: fifo %d: data error\n", unit, idx);
WLCNTINCR(wlc_hw->wlc->pub->_cnt->dmada);
fatal = true;
}
if (intstatus & I_DE) {
- WL_ERROR(("wl%d: fifo %d: descriptor protocol error\n",
- unit, idx));
+ WL_ERROR("wl%d: fifo %d: descriptor protocol error\n",
+ unit, idx);
WLCNTINCR(wlc_hw->wlc->pub->_cnt->dmape);
fatal = true;
}
if (intstatus & I_RU) {
- WL_ERROR(("wl%d: fifo %d: receive descriptor underflow\n", unit, idx));
+ WL_ERROR("wl%d: fifo %d: receive descriptor underflow\n",
+ idx, unit);
WLCNTINCR(wlc_hw->wlc->pub->_cnt->rxuflo[idx]);
}
if (intstatus & I_XU) {
- WL_ERROR(("wl%d: fifo %d: transmit fifo underflow\n",
- idx, unit));
+ WL_ERROR("wl%d: fifo %d: transmit fifo underflow\n",
+ idx, unit);
WLCNTINCR(wlc_hw->wlc->pub->_cnt->txuflo);
fatal = true;
}
@@ -2962,9 +2982,9 @@ void wlc_bmac_fifoerrors(wlc_hw_info_t *wlc_hw)
}
}
-void wlc_intrson(wlc_info_t *wlc)
+void wlc_intrson(struct wlc_info *wlc)
{
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
ASSERT(wlc->defmacintmask);
wlc->macintmask = wlc->defmacintmask;
W_REG(wlc_hw->osh, &wlc_hw->regs->macintmask, wlc->macintmask);
@@ -2975,7 +2995,7 @@ void wlc_intrson(wlc_info_t *wlc)
* but also because per-port code may require sync with valid interrupt.
*/
-static u32 wlc_wlintrsoff(wlc_info_t *wlc)
+static u32 wlc_wlintrsoff(struct wlc_info *wlc)
{
if (!wlc->hw->up)
return 0;
@@ -2983,7 +3003,7 @@ static u32 wlc_wlintrsoff(wlc_info_t *wlc)
return wl_intrsoff(wlc->wl);
}
-static void wlc_wlintrsrestore(wlc_info_t *wlc, u32 macintmask)
+static void wlc_wlintrsrestore(struct wlc_info *wlc, u32 macintmask)
{
if (!wlc->hw->up)
return;
@@ -2991,9 +3011,9 @@ static void wlc_wlintrsrestore(wlc_info_t *wlc, u32 macintmask)
wl_intrsrestore(wlc->wl, macintmask);
}
-u32 wlc_intrsoff(wlc_info_t *wlc)
+u32 wlc_intrsoff(struct wlc_info *wlc)
{
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
u32 macintmask;
if (!wlc_hw->clk)
@@ -3010,9 +3030,9 @@ u32 wlc_intrsoff(wlc_info_t *wlc)
return wlc->macintstatus ? 0 : macintmask;
}
-void wlc_intrsrestore(wlc_info_t *wlc, u32 macintmask)
+void wlc_intrsrestore(struct wlc_info *wlc, u32 macintmask)
{
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
if (!wlc_hw->clk)
return;
@@ -3020,7 +3040,7 @@ void wlc_intrsrestore(wlc_info_t *wlc, u32 macintmask)
W_REG(wlc_hw->osh, &wlc_hw->regs->macintmask, wlc->macintmask);
}
-void wlc_bmac_mute(wlc_hw_info_t *wlc_hw, bool on, mbool flags)
+void wlc_bmac_mute(struct wlc_hw_info *wlc_hw, bool on, mbool flags)
{
struct ether_addr null_ether_addr = { {0, 0, 0, 0, 0, 0} };
@@ -3056,12 +3076,12 @@ void wlc_bmac_mute(wlc_hw_info_t *wlc_hw, bool on, mbool flags)
wlc_ucode_mute_override_clear(wlc_hw);
}
-void wlc_bmac_set_deaf(wlc_hw_info_t *wlc_hw, bool user_flag)
+void wlc_bmac_set_deaf(struct wlc_hw_info *wlc_hw, bool user_flag)
{
wlc_phy_set_deaf(wlc_hw->band->pi, user_flag);
}
-int wlc_bmac_xmtfifo_sz_get(wlc_hw_info_t *wlc_hw, uint fifo, uint *blocks)
+int wlc_bmac_xmtfifo_sz_get(struct wlc_hw_info *wlc_hw, uint fifo, uint *blocks)
{
if (fifo >= NFIFO)
return BCME_RANGE;
@@ -3071,7 +3091,7 @@ int wlc_bmac_xmtfifo_sz_get(wlc_hw_info_t *wlc_hw, uint fifo, uint *blocks)
return 0;
}
-int wlc_bmac_xmtfifo_sz_set(wlc_hw_info_t *wlc_hw, uint fifo, uint blocks)
+int wlc_bmac_xmtfifo_sz_set(struct wlc_hw_info *wlc_hw, uint fifo, uint blocks)
{
if (fifo >= NFIFO || blocks > 299)
return BCME_RANGE;
@@ -3091,7 +3111,7 @@ int wlc_bmac_xmtfifo_sz_set(wlc_hw_info_t *wlc_hw, uint fifo, uint blocks)
* be pulling data into a tx fifo, by the time the MAC acks the suspend
* request.
*/
-bool wlc_bmac_tx_fifo_suspended(wlc_hw_info_t *wlc_hw, uint tx_fifo)
+bool wlc_bmac_tx_fifo_suspended(struct wlc_hw_info *wlc_hw, uint tx_fifo)
{
/* check that a suspend has been requested and is no longer pending */
@@ -3110,7 +3130,7 @@ bool wlc_bmac_tx_fifo_suspended(wlc_hw_info_t *wlc_hw, uint tx_fifo)
return false;
}
-void wlc_bmac_tx_fifo_suspend(wlc_hw_info_t *wlc_hw, uint tx_fifo)
+void wlc_bmac_tx_fifo_suspend(struct wlc_hw_info *wlc_hw, uint tx_fifo)
{
u8 fifo = 1 << tx_fifo;
@@ -3141,7 +3161,7 @@ void wlc_bmac_tx_fifo_suspend(wlc_hw_info_t *wlc_hw, uint tx_fifo)
}
}
-void wlc_bmac_tx_fifo_resume(wlc_hw_info_t *wlc_hw, uint tx_fifo)
+void wlc_bmac_tx_fifo_resume(struct wlc_hw_info *wlc_hw, uint tx_fifo)
{
/* BMAC_NOTE: WLC_TX_FIFO_ENAB is done in wlc_dpc() for DMA case but need to be done
* here for PIO otherwise the watchdog will catch the inconsistency and fire
@@ -3169,20 +3189,20 @@ void wlc_bmac_tx_fifo_resume(wlc_hw_info_t *wlc_hw, uint tx_fifo)
* 0 if the interrupt is not for us, or we are in some special cases;
* device interrupt status bits otherwise.
*/
-static inline u32 wlc_intstatus(wlc_info_t *wlc, bool in_isr)
+static inline u32 wlc_intstatus(struct wlc_info *wlc, bool in_isr)
{
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
d11regs_t *regs = wlc_hw->regs;
u32 macintstatus;
u32 intstatus_rxfifo, intstatus_txsfifo;
- osl_t *osh;
+ struct osl_info *osh;
osh = wlc_hw->osh;
/* macintstatus includes a DMA interrupt summary bit */
macintstatus = R_REG(osh, &regs->macintstatus);
- WL_TRACE(("wl%d: macintstatus: 0x%x\n", wlc_hw->unit, macintstatus));
+ WL_TRACE("wl%d: macintstatus: 0x%x\n", wlc_hw->unit, macintstatus);
/* detect cardbus removed, in power down(suspend) and in reset */
if (DEVICEREMOVED(wlc))
@@ -3207,9 +3227,7 @@ static inline u32 wlc_intstatus(wlc_info_t *wlc, bool in_isr)
*/
/* turn off the interrupts */
W_REG(osh, &regs->macintmask, 0);
-#ifndef BCMSDIO
(void)R_REG(osh, &regs->macintmask); /* sync readback */
-#endif
wlc->macintmask = 0;
/* clear device interrupts */
@@ -3224,7 +3242,9 @@ static inline u32 wlc_intstatus(wlc_info_t *wlc, bool in_isr)
R_REG(osh,
&regs->intctrlregs[RX_TXSTATUS_FIFO].
intstatus);
- WL_TRACE(("wl%d: intstatus_rxfifo 0x%x, intstatus_txsfifo 0x%x\n", wlc_hw->unit, intstatus_rxfifo, intstatus_txsfifo));
+ WL_TRACE("wl%d: intstatus_rxfifo 0x%x, intstatus_txsfifo 0x%x\n",
+ wlc_hw->unit,
+ intstatus_rxfifo, intstatus_txsfifo);
/* defer unsolicited interrupt hints */
intstatus_rxfifo &= DEF_RXINTMASK;
@@ -3261,7 +3281,7 @@ static inline u32 wlc_intstatus(wlc_info_t *wlc, bool in_isr)
/* Update wlc->macintstatus and wlc->intstatus[]. */
/* Return true if they are updated successfully. false otherwise */
-bool wlc_intrsupd(wlc_info_t *wlc)
+bool wlc_intrsupd(struct wlc_info *wlc)
{
u32 macintstatus;
@@ -3286,9 +3306,9 @@ bool wlc_intrsupd(wlc_info_t *wlc)
* *wantdpc will be set to true if further wlc_dpc() processing is required,
* false otherwise.
*/
-bool BCMFASTPATH wlc_isr(wlc_info_t *wlc, bool *wantdpc)
+bool BCMFASTPATH wlc_isr(struct wlc_info *wlc, bool *wantdpc)
{
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
u32 macintstatus;
*wantdpc = false;
@@ -3300,7 +3320,7 @@ bool BCMFASTPATH wlc_isr(wlc_info_t *wlc, bool *wantdpc)
macintstatus = wlc_intstatus(wlc, true);
if (macintstatus == 0xffffffff)
- WL_ERROR(("DEVICEREMOVED detected in the ISR code path.\n"));
+ WL_ERROR("DEVICEREMOVED detected in the ISR code path\n");
/* it is not for us */
if (macintstatus == 0)
@@ -3317,20 +3337,20 @@ bool BCMFASTPATH wlc_isr(wlc_info_t *wlc, bool *wantdpc)
}
/* process tx completion events for corerev < 5 */
-static bool wlc_bmac_txstatus_corerev4(wlc_hw_info_t *wlc_hw)
+static bool wlc_bmac_txstatus_corerev4(struct wlc_hw_info *wlc_hw)
{
- void *status_p;
+ struct sk_buff *status_p;
tx_status_t *txs;
- osl_t *osh;
+ struct osl_info *osh;
bool fatal = false;
- WL_TRACE(("wl%d: wlc_txstatusrecv\n", wlc_hw->unit));
+ WL_TRACE("wl%d: wlc_txstatusrecv\n", wlc_hw->unit);
osh = wlc_hw->osh;
while (!fatal && (status_p = dma_rx(wlc_hw->di[RX_TXSTATUS_FIFO]))) {
- txs = (tx_status_t *) PKTDATA(status_p);
+ txs = (tx_status_t *) status_p->data;
/* MAC uses little endian only */
ltoh16_buf((void *)txs, sizeof(tx_status_t));
@@ -3340,7 +3360,7 @@ static bool wlc_bmac_txstatus_corerev4(wlc_hw_info_t *wlc_hw)
fatal = wlc_bmac_dotxstatus(wlc_hw, txs, 0);
- PKTFREE(osh, status_p, false);
+ pkt_buf_free_skb(osh, status_p, false);
}
if (fatal)
@@ -3353,7 +3373,7 @@ static bool wlc_bmac_txstatus_corerev4(wlc_hw_info_t *wlc_hw)
}
static bool BCMFASTPATH
-wlc_bmac_dotxstatus(wlc_hw_info_t *wlc_hw, tx_status_t *txs, u32 s2)
+wlc_bmac_dotxstatus(struct wlc_hw_info *wlc_hw, tx_status_t *txs, u32 s2)
{
/* discard intermediate indications for ucode with one legitimate case:
* e.g. if "useRTS" is set. ucode did a successful rts/cts exchange, but the subsequent
@@ -3372,12 +3392,12 @@ wlc_bmac_dotxstatus(wlc_hw_info_t *wlc_hw, tx_status_t *txs, u32 s2)
* Return true if more tx status need to be processed. false otherwise.
*/
static bool BCMFASTPATH
-wlc_bmac_txstatus(wlc_hw_info_t *wlc_hw, bool bound, bool *fatal)
+wlc_bmac_txstatus(struct wlc_hw_info *wlc_hw, bool bound, bool *fatal)
{
bool morepending = false;
- wlc_info_t *wlc = wlc_hw->wlc;
+ struct wlc_info *wlc = wlc_hw->wlc;
- WL_TRACE(("wl%d: wlc_bmac_txstatus\n", wlc_hw->unit));
+ WL_TRACE("wl%d: wlc_bmac_txstatus\n", wlc_hw->unit);
if (D11REV_IS(wlc_hw->corerev, 4)) {
/* to retire soon */
@@ -3388,7 +3408,7 @@ wlc_bmac_txstatus(wlc_hw_info_t *wlc_hw, bool bound, bool *fatal)
} else {
/* corerev >= 5 */
d11regs_t *regs;
- osl_t *osh;
+ struct osl_info *osh;
tx_status_t txstatus, *txs;
u32 s1, s2;
uint n = 0;
@@ -3402,8 +3422,8 @@ wlc_bmac_txstatus(wlc_hw_info_t *wlc_hw, bool bound, bool *fatal)
&& (s1 = R_REG(osh, &regs->frmtxstatus)) & TXS_V) {
if (s1 == 0xffffffff) {
- WL_ERROR(("wl%d: %s: dead chip\n",
- wlc_hw->unit, __func__));
+ WL_ERROR("wl%d: %s: dead chip\n",
+ wlc_hw->unit, __func__);
ASSERT(s1 != 0xffffffff);
return morepending;
}
@@ -3436,15 +3456,15 @@ wlc_bmac_txstatus(wlc_hw_info_t *wlc_hw, bool bound, bool *fatal)
return morepending;
}
-void wlc_suspend_mac_and_wait(wlc_info_t *wlc)
+void wlc_suspend_mac_and_wait(struct wlc_info *wlc)
{
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
d11regs_t *regs = wlc_hw->regs;
u32 mc, mi;
- osl_t *osh;
+ struct osl_info *osh;
- WL_TRACE(("wl%d: wlc_suspend_mac_and_wait: bandunit %d\n", wlc_hw->unit,
- wlc_hw->band->bandunit));
+ WL_TRACE("wl%d: wlc_suspend_mac_and_wait: bandunit %d\n",
+ wlc_hw->unit, wlc_hw->band->bandunit);
/*
* Track overlapping suspend requests
@@ -3461,7 +3481,7 @@ void wlc_suspend_mac_and_wait(wlc_info_t *wlc)
mc = R_REG(osh, &regs->maccontrol);
if (mc == 0xffffffff) {
- WL_ERROR(("wl%d: %s: dead chip\n", wlc_hw->unit, __func__));
+ WL_ERROR("wl%d: %s: dead chip\n", wlc_hw->unit, __func__);
wl_down(wlc->wl);
return;
}
@@ -3471,7 +3491,7 @@ void wlc_suspend_mac_and_wait(wlc_info_t *wlc)
mi = R_REG(osh, &regs->macintstatus);
if (mi == 0xffffffff) {
- WL_ERROR(("wl%d: %s: dead chip\n", wlc_hw->unit, __func__));
+ WL_ERROR("wl%d: %s: dead chip\n", wlc_hw->unit, __func__);
wl_down(wlc->wl);
return;
}
@@ -3483,15 +3503,18 @@ void wlc_suspend_mac_and_wait(wlc_info_t *wlc)
WLC_MAX_MAC_SUSPEND);
if (!(R_REG(osh, &regs->macintstatus) & MI_MACSSPNDD)) {
- WL_ERROR(("wl%d: wlc_suspend_mac_and_wait: waited %d uS and "
- "MI_MACSSPNDD is still not on.\n",
- wlc_hw->unit, WLC_MAX_MAC_SUSPEND));
- WL_ERROR(("wl%d: psmdebug 0x%08x, phydebug 0x%08x, psm_brc 0x%04x\n", wlc_hw->unit, R_REG(osh, &regs->psmdebug), R_REG(osh, &regs->phydebug), R_REG(osh, &regs->psm_brc)));
+ WL_ERROR("wl%d: wlc_suspend_mac_and_wait: waited %d uS and MI_MACSSPNDD is still not on.\n",
+ wlc_hw->unit, WLC_MAX_MAC_SUSPEND);
+ WL_ERROR("wl%d: psmdebug 0x%08x, phydebug 0x%08x, psm_brc 0x%04x\n",
+ wlc_hw->unit,
+ R_REG(osh, &regs->psmdebug),
+ R_REG(osh, &regs->phydebug),
+ R_REG(osh, &regs->psm_brc));
}
mc = R_REG(osh, &regs->maccontrol);
if (mc == 0xffffffff) {
- WL_ERROR(("wl%d: %s: dead chip\n", wlc_hw->unit, __func__));
+ WL_ERROR("wl%d: %s: dead chip\n", wlc_hw->unit, __func__);
wl_down(wlc->wl);
return;
}
@@ -3500,15 +3523,15 @@ void wlc_suspend_mac_and_wait(wlc_info_t *wlc)
ASSERT(!(mc & MCTL_EN_MAC));
}
-void wlc_enable_mac(wlc_info_t *wlc)
+void wlc_enable_mac(struct wlc_info *wlc)
{
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
d11regs_t *regs = wlc_hw->regs;
u32 mc, mi;
- osl_t *osh;
+ struct osl_info *osh;
- WL_TRACE(("wl%d: wlc_enable_mac: bandunit %d\n", wlc_hw->unit,
- wlc->band->bandunit));
+ WL_TRACE("wl%d: wlc_enable_mac: bandunit %d\n",
+ wlc_hw->unit, wlc->band->bandunit);
/*
* Track overlapping suspend requests
@@ -3539,7 +3562,7 @@ void wlc_enable_mac(wlc_info_t *wlc)
wlc_ucode_wake_override_clear(wlc_hw, WLC_WAKE_OVERRIDE_MACSUSPEND);
}
-void wlc_bmac_ifsctl_edcrs_set(wlc_hw_info_t *wlc_hw, bool abie, bool isht)
+void wlc_bmac_ifsctl_edcrs_set(struct wlc_hw_info *wlc_hw, bool abie, bool isht)
{
if (!(WLCISNPHY(wlc_hw->band) && (D11REV_GE(wlc_hw->corerev, 16))))
return;
@@ -3575,7 +3598,7 @@ void wlc_bmac_ifsctl_edcrs_set(wlc_hw_info_t *wlc_hw, bool abie, bool isht)
}
}
-static void wlc_upd_ofdm_pctl1_table(wlc_hw_info_t *wlc_hw)
+static void wlc_upd_ofdm_pctl1_table(struct wlc_hw_info *wlc_hw)
{
u8 rate;
u8 rates[8] = {
@@ -3609,7 +3632,7 @@ static void wlc_upd_ofdm_pctl1_table(wlc_hw_info_t *wlc_hw)
}
}
-static u16 wlc_bmac_ofdm_ratetable_offset(wlc_hw_info_t *wlc_hw, u8 rate)
+static u16 wlc_bmac_ofdm_ratetable_offset(struct wlc_hw_info *wlc_hw, u8 rate)
{
uint i;
u8 plcp_rate = 0;
@@ -3642,7 +3665,7 @@ static u16 wlc_bmac_ofdm_ratetable_offset(wlc_hw_info_t *wlc_hw, u8 rate)
return 2 * wlc_bmac_read_shm(wlc_hw, M_RT_DIRMAP_A + (plcp_rate * 2));
}
-void wlc_bmac_band_stf_ss_set(wlc_hw_info_t *wlc_hw, u8 stf_mode)
+void wlc_bmac_band_stf_ss_set(struct wlc_hw_info *wlc_hw, u8 stf_mode)
{
wlc_hw->hw_stf_ss_opmode = stf_mode;
@@ -3651,7 +3674,7 @@ void wlc_bmac_band_stf_ss_set(wlc_hw_info_t *wlc_hw, u8 stf_mode)
}
void BCMFASTPATH
-wlc_bmac_read_tsf(wlc_hw_info_t *wlc_hw, u32 *tsf_l_ptr,
+wlc_bmac_read_tsf(struct wlc_hw_info *wlc_hw, u32 *tsf_l_ptr,
u32 *tsf_h_ptr)
{
d11regs_t *regs = wlc_hw->regs;
@@ -3663,14 +3686,14 @@ wlc_bmac_read_tsf(wlc_hw_info_t *wlc_hw, u32 *tsf_l_ptr,
return;
}
-bool wlc_bmac_validate_chip_access(wlc_hw_info_t *wlc_hw)
+bool wlc_bmac_validate_chip_access(struct wlc_hw_info *wlc_hw)
{
d11regs_t *regs;
u32 w, val;
volatile u16 *reg16;
- osl_t *osh;
+ struct osl_info *osh;
- WL_TRACE(("wl%d: validate_chip_access\n", wlc_hw->unit));
+ WL_TRACE("wl%d: validate_chip_access\n", wlc_hw->unit);
regs = wlc_hw->regs;
osh = wlc_hw->osh;
@@ -3690,7 +3713,8 @@ bool wlc_bmac_validate_chip_access(wlc_hw_info_t *wlc_hw)
(void)R_REG(osh, &regs->objaddr);
val = R_REG(osh, &regs->objdata);
if (val != (u32) 0xaa5555aa) {
- WL_ERROR(("wl%d: validate_chip_access: SHM = 0x%x, expected 0xaa5555aa\n", wlc_hw->unit, val));
+ WL_ERROR("wl%d: validate_chip_access: SHM = 0x%x, expected 0xaa5555aa\n",
+ wlc_hw->unit, val);
return false;
}
@@ -3702,7 +3726,8 @@ bool wlc_bmac_validate_chip_access(wlc_hw_info_t *wlc_hw)
(void)R_REG(osh, &regs->objaddr);
val = R_REG(osh, &regs->objdata);
if (val != (u32) 0x55aaaa55) {
- WL_ERROR(("wl%d: validate_chip_access: SHM = 0x%x, expected 0x55aaaa55\n", wlc_hw->unit, val));
+ WL_ERROR("wl%d: validate_chip_access: SHM = 0x%x, expected 0x55aaaa55\n",
+ wlc_hw->unit, val);
return false;
}
@@ -3732,12 +3757,14 @@ bool wlc_bmac_validate_chip_access(wlc_hw_info_t *wlc_hw)
/* verify with the 16 bit registers that have no side effects */
val = R_REG(osh, &regs->tsf_cfpstrt_l);
if (val != (uint) 0xBBBB) {
- WL_ERROR(("wl%d: validate_chip_access: tsf_cfpstrt_l = 0x%x, expected" " 0x%x\n", wlc_hw->unit, val, 0xBBBB));
+ WL_ERROR("wl%d: validate_chip_access: tsf_cfpstrt_l = 0x%x, expected 0x%x\n",
+ wlc_hw->unit, val, 0xBBBB);
return false;
}
val = R_REG(osh, &regs->tsf_cfpstrt_h);
if (val != (uint) 0xCCCC) {
- WL_ERROR(("wl%d: validate_chip_access: tsf_cfpstrt_h = 0x%x, expected" " 0x%x\n", wlc_hw->unit, val, 0xCCCC));
+ WL_ERROR("wl%d: validate_chip_access: tsf_cfpstrt_h = 0x%x, expected 0x%x\n",
+ wlc_hw->unit, val, 0xCCCC);
return false;
}
@@ -3749,7 +3776,10 @@ bool wlc_bmac_validate_chip_access(wlc_hw_info_t *wlc_hw)
w = R_REG(osh, &regs->maccontrol);
if ((w != (MCTL_IHR_EN | MCTL_WAKE)) &&
(w != (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE))) {
- WL_ERROR(("wl%d: validate_chip_access: maccontrol = 0x%x, expected 0x%x or 0x%x\n", wlc_hw->unit, w, (MCTL_IHR_EN | MCTL_WAKE), (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE)));
+ WL_ERROR("wl%d: validate_chip_access: maccontrol = 0x%x, expected 0x%x or 0x%x\n",
+ wlc_hw->unit, w,
+ (MCTL_IHR_EN | MCTL_WAKE),
+ (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE));
return false;
}
@@ -3758,13 +3788,13 @@ bool wlc_bmac_validate_chip_access(wlc_hw_info_t *wlc_hw)
#define PHYPLL_WAIT_US 100000
-void wlc_bmac_core_phypll_ctl(wlc_hw_info_t *wlc_hw, bool on)
+void wlc_bmac_core_phypll_ctl(struct wlc_hw_info *wlc_hw, bool on)
{
d11regs_t *regs;
- osl_t *osh;
+ struct osl_info *osh;
u32 tmp;
- WL_TRACE(("wl%d: wlc_bmac_core_phypll_ctl\n", wlc_hw->unit));
+ WL_TRACE("wl%d: wlc_bmac_core_phypll_ctl\n", wlc_hw->unit);
tmp = 0;
regs = wlc_hw->regs;
@@ -3785,8 +3815,8 @@ void wlc_bmac_core_phypll_ctl(wlc_hw_info_t *wlc_hw, bool on)
tmp = R_REG(osh, &regs->clk_ctl_st);
if ((tmp & (CCS_ERSRC_AVAIL_HT)) !=
(CCS_ERSRC_AVAIL_HT)) {
- WL_ERROR(("%s: turn on PHY PLL failed\n",
- __func__));
+ WL_ERROR("%s: turn on PHY PLL failed\n",
+ __func__);
ASSERT(0);
}
} else {
@@ -3803,8 +3833,8 @@ void wlc_bmac_core_phypll_ctl(wlc_hw_info_t *wlc_hw, bool on)
(CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
!=
(CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL)) {
- WL_ERROR(("%s: turn on PHY PLL failed\n",
- __func__));
+ WL_ERROR("%s: turn on PHY PLL failed\n",
+ __func__);
ASSERT(0);
}
}
@@ -3817,11 +3847,11 @@ void wlc_bmac_core_phypll_ctl(wlc_hw_info_t *wlc_hw, bool on)
}
}
-void wlc_coredisable(wlc_hw_info_t *wlc_hw)
+void wlc_coredisable(struct wlc_hw_info *wlc_hw)
{
bool dev_gone;
- WL_TRACE(("wl%d: %s\n", wlc_hw->unit, __func__));
+ WL_TRACE("wl%d: %s\n", wlc_hw->unit, __func__);
ASSERT(!wlc_hw->up);
@@ -3857,9 +3887,9 @@ void wlc_coredisable(wlc_hw_info_t *wlc_hw)
}
/* power both the pll and external oscillator on/off */
-void wlc_bmac_xtal(wlc_hw_info_t *wlc_hw, bool want)
+void wlc_bmac_xtal(struct wlc_hw_info *wlc_hw, bool want)
{
- WL_TRACE(("wl%d: wlc_bmac_xtal: want %d\n", wlc_hw->unit, want));
+ WL_TRACE("wl%d: wlc_bmac_xtal: want %d\n", wlc_hw->unit, want);
/* dont power down if plldown is false or we must poll hw radio disable */
if (!want && wlc_hw->pllreq)
@@ -3876,9 +3906,9 @@ void wlc_bmac_xtal(wlc_hw_info_t *wlc_hw, bool want)
}
}
-static void wlc_flushqueues(wlc_info_t *wlc)
+static void wlc_flushqueues(struct wlc_info *wlc)
{
- wlc_hw_info_t *wlc_hw = wlc->hw;
+ struct wlc_hw_info *wlc_hw = wlc->hw;
uint i;
wlc->txpend16165war = 0;
@@ -3888,8 +3918,8 @@ static void wlc_flushqueues(wlc_info_t *wlc)
if (wlc_hw->di[i]) {
dma_txreclaim(wlc_hw->di[i], HNDDMA_RANGE_ALL);
TXPKTPENDCLR(wlc, i);
- WL_TRACE(("wlc_flushqueues: pktpend fifo %d cleared\n",
- i));
+ WL_TRACE("wlc_flushqueues: pktpend fifo %d cleared\n",
+ i);
}
/* free any posted rx packets */
@@ -3898,12 +3928,12 @@ static void wlc_flushqueues(wlc_info_t *wlc)
dma_rxreclaim(wlc_hw->di[RX_TXSTATUS_FIFO]);
}
-u16 wlc_bmac_read_shm(wlc_hw_info_t *wlc_hw, uint offset)
+u16 wlc_bmac_read_shm(struct wlc_hw_info *wlc_hw, uint offset)
{
return wlc_bmac_read_objmem(wlc_hw, offset, OBJADDR_SHM_SEL);
}
-void wlc_bmac_write_shm(wlc_hw_info_t *wlc_hw, uint offset, u16 v)
+void wlc_bmac_write_shm(struct wlc_hw_info *wlc_hw, uint offset, u16 v)
{
wlc_bmac_write_objmem(wlc_hw, offset, v, OBJADDR_SHM_SEL);
}
@@ -3912,7 +3942,7 @@ void wlc_bmac_write_shm(wlc_hw_info_t *wlc_hw, uint offset, u16 v)
* SHM 'offset' needs to be an even address and
* Buffer length 'len' must be an even number of bytes
*/
-void wlc_bmac_set_shm(wlc_hw_info_t *wlc_hw, uint offset, u16 v, int len)
+void wlc_bmac_set_shm(struct wlc_hw_info *wlc_hw, uint offset, u16 v, int len)
{
int i;
@@ -3929,7 +3959,7 @@ void wlc_bmac_set_shm(wlc_hw_info_t *wlc_hw, uint offset, u16 v, int len)
}
static u16
-wlc_bmac_read_objmem(wlc_hw_info_t *wlc_hw, uint offset, u32 sel)
+wlc_bmac_read_objmem(struct wlc_hw_info *wlc_hw, uint offset, u32 sel)
{
d11regs_t *regs = wlc_hw->regs;
volatile u16 *objdata_lo = (volatile u16 *)&regs->objdata;
@@ -3950,7 +3980,7 @@ wlc_bmac_read_objmem(wlc_hw_info_t *wlc_hw, uint offset, u32 sel)
}
static void
-wlc_bmac_write_objmem(wlc_hw_info_t *wlc_hw, uint offset, u16 v, u32 sel)
+wlc_bmac_write_objmem(struct wlc_hw_info *wlc_hw, uint offset, u16 v, u32 sel)
{
d11regs_t *regs = wlc_hw->regs;
volatile u16 *objdata_lo = (volatile u16 *)&regs->objdata;
@@ -3973,7 +4003,7 @@ wlc_bmac_write_objmem(wlc_hw_info_t *wlc_hw, uint offset, u16 v, u32 sel)
* 'sel' selects the type of memory
*/
void
-wlc_bmac_copyto_objmem(wlc_hw_info_t *wlc_hw, uint offset, const void *buf,
+wlc_bmac_copyto_objmem(struct wlc_hw_info *wlc_hw, uint offset, const void *buf,
int len, u32 sel)
{
u16 v;
@@ -3999,7 +4029,7 @@ wlc_bmac_copyto_objmem(wlc_hw_info_t *wlc_hw, uint offset, const void *buf,
* 'sel' selects the type of memory
*/
void
-wlc_bmac_copyfrom_objmem(wlc_hw_info_t *wlc_hw, uint offset, void *buf,
+wlc_bmac_copyfrom_objmem(struct wlc_hw_info *wlc_hw, uint offset, void *buf,
int len, u32 sel)
{
u16 v;
@@ -4020,16 +4050,16 @@ wlc_bmac_copyfrom_objmem(wlc_hw_info_t *wlc_hw, uint offset, void *buf,
}
}
-void wlc_bmac_copyfrom_vars(wlc_hw_info_t *wlc_hw, char **buf, uint *len)
+void wlc_bmac_copyfrom_vars(struct wlc_hw_info *wlc_hw, char **buf, uint *len)
{
- WL_TRACE(("wlc_bmac_copyfrom_vars, nvram vars totlen=%d\n",
- wlc_hw->vars_size));
+ WL_TRACE("wlc_bmac_copyfrom_vars, nvram vars totlen=%d\n",
+ wlc_hw->vars_size);
*buf = wlc_hw->vars;
*len = wlc_hw->vars_size;
}
-void wlc_bmac_retrylimit_upd(wlc_hw_info_t *wlc_hw, u16 SRL, u16 LRL)
+void wlc_bmac_retrylimit_upd(struct wlc_hw_info *wlc_hw, u16 SRL, u16 LRL)
{
wlc_hw->SRL = SRL;
wlc_hw->LRL = LRL;
@@ -4047,17 +4077,17 @@ void wlc_bmac_retrylimit_upd(wlc_hw_info_t *wlc_hw, u16 SRL, u16 LRL)
}
}
-void wlc_bmac_set_noreset(wlc_hw_info_t *wlc_hw, bool noreset_flag)
+void wlc_bmac_set_noreset(struct wlc_hw_info *wlc_hw, bool noreset_flag)
{
wlc_hw->noreset = noreset_flag;
}
-void wlc_bmac_set_ucode_loaded(wlc_hw_info_t *wlc_hw, bool ucode_loaded)
+void wlc_bmac_set_ucode_loaded(struct wlc_hw_info *wlc_hw, bool ucode_loaded)
{
wlc_hw->ucode_loaded = ucode_loaded;
}
-void wlc_bmac_pllreq(wlc_hw_info_t *wlc_hw, bool set, mbool req_bit)
+void wlc_bmac_pllreq(struct wlc_hw_info *wlc_hw, bool set, mbool req_bit)
{
ASSERT(req_bit);
@@ -4088,7 +4118,7 @@ void wlc_bmac_pllreq(wlc_hw_info_t *wlc_hw, bool set, mbool req_bit)
return;
}
-void wlc_bmac_set_clk(wlc_hw_info_t *wlc_hw, bool on)
+void wlc_bmac_set_clk(struct wlc_hw_info *wlc_hw, bool on)
{
if (on) {
/* power up pll and oscillator */
@@ -4110,7 +4140,7 @@ void wlc_bmac_set_clk(wlc_hw_info_t *wlc_hw, bool on)
}
/* this will be true for all ai chips */
-bool wlc_bmac_taclear(wlc_hw_info_t *wlc_hw, bool ta_ok)
+bool wlc_bmac_taclear(struct wlc_hw_info *wlc_hw, bool ta_ok)
{
return true;
}
@@ -4118,7 +4148,7 @@ bool wlc_bmac_taclear(wlc_hw_info_t *wlc_hw, bool ta_ok)
/* Lower down relevant GPIOs like LED when going down w/o
* doing PCI config cycles or touching interrupts
*/
-void wlc_gpio_fast_deinit(wlc_hw_info_t *wlc_hw)
+void wlc_gpio_fast_deinit(struct wlc_hw_info *wlc_hw)
{
if ((wlc_hw == NULL) || (wlc_hw->sih == NULL))
return;
@@ -4126,17 +4156,17 @@ void wlc_gpio_fast_deinit(wlc_hw_info_t *wlc_hw)
/* Only chips with internal bus or PCIE cores or certain PCI cores
* are able to switch cores w/o disabling interrupts
*/
- if (!((BUSTYPE(wlc_hw->sih->bustype) == SI_BUS) ||
- ((BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS) &&
+ if (!((wlc_hw->sih->bustype == SI_BUS) ||
+ ((wlc_hw->sih->bustype == PCI_BUS) &&
((wlc_hw->sih->buscoretype == PCIE_CORE_ID) ||
(wlc_hw->sih->buscorerev >= 13)))))
return;
- WL_TRACE(("wl%d: %s\n", wlc_hw->unit, __func__));
+ WL_TRACE("wl%d: %s\n", wlc_hw->unit, __func__);
return;
}
-bool wlc_bmac_radio_hw(wlc_hw_info_t *wlc_hw, bool enable)
+bool wlc_bmac_radio_hw(struct wlc_hw_info *wlc_hw, bool enable)
{
/* Do not access Phy registers if core is not up */
if (si_iscoreup(wlc_hw->sih) == false)
@@ -4171,7 +4201,7 @@ bool wlc_bmac_radio_hw(wlc_hw_info_t *wlc_hw, bool enable)
return true;
}
-u16 wlc_bmac_rate_shm_offset(wlc_hw_info_t *wlc_hw, u8 rate)
+u16 wlc_bmac_rate_shm_offset(struct wlc_hw_info *wlc_hw, u8 rate)
{
u16 table_ptr;
u8 phy_rate, index;
@@ -4195,12 +4225,12 @@ u16 wlc_bmac_rate_shm_offset(wlc_hw_info_t *wlc_hw, u8 rate)
return 2 * wlc_bmac_read_shm(wlc_hw, table_ptr + (index * 2));
}
-void wlc_bmac_set_txpwr_percent(wlc_hw_info_t *wlc_hw, u8 val)
+void wlc_bmac_set_txpwr_percent(struct wlc_hw_info *wlc_hw, u8 val)
{
wlc_phy_txpwr_percent_set(wlc_hw->band->pi, val);
}
-void wlc_bmac_antsel_set(wlc_hw_info_t *wlc_hw, u32 antsel_avail)
+void wlc_bmac_antsel_set(struct wlc_hw_info *wlc_hw, u32 antsel_avail)
{
wlc_hw->antsel_avail = antsel_avail;
}
diff --git a/drivers/staging/brcm80211/sys/wlc_bmac.h b/drivers/staging/brcm80211/sys/wlc_bmac.h
index 872bc8d866d2..98150aaff3a3 100644
--- a/drivers/staging/brcm80211/sys/wlc_bmac.h
+++ b/drivers/staging/brcm80211/sys/wlc_bmac.h
@@ -57,7 +57,8 @@ typedef struct wlc_bmac_revinfo {
} band[MAXBANDS];
} wlc_bmac_revinfo_t;
-/* dup state between BMAC(wlc_hw_info_t) and HIGH(wlc_info_t) driver */
+/* dup state between BMAC(struct wlc_hw_info) and HIGH(struct wlc_info)
+ driver */
typedef struct wlc_bmac_state {
u32 machwcap; /* mac hw capibility */
u32 preamble_ovr; /* preamble override */
@@ -130,148 +131,143 @@ typedef enum {
WLCHW_STATE_LAST
} wlc_bmac_state_id_t;
-extern int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device,
- uint unit, bool piomode, osl_t *osh, void *regsva,
- uint bustype, void *btparam);
-extern int wlc_bmac_detach(wlc_info_t *wlc);
+extern int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device,
+ uint unit, bool piomode, struct osl_info *osh,
+ void *regsva, uint bustype, void *btparam);
+extern int wlc_bmac_detach(struct wlc_info *wlc);
extern void wlc_bmac_watchdog(void *arg);
-extern void wlc_bmac_info_init(wlc_hw_info_t *wlc_hw);
+extern void wlc_bmac_info_init(struct wlc_hw_info *wlc_hw);
/* up/down, reset, clk */
-#ifdef WLC_LOW
-extern void wlc_bmac_xtal(wlc_hw_info_t *wlc_hw, bool want);
-#endif
+extern void wlc_bmac_xtal(struct wlc_hw_info *wlc_hw, bool want);
-extern void wlc_bmac_copyto_objmem(wlc_hw_info_t *wlc_hw,
+extern void wlc_bmac_copyto_objmem(struct wlc_hw_info *wlc_hw,
uint offset, const void *buf, int len,
u32 sel);
-extern void wlc_bmac_copyfrom_objmem(wlc_hw_info_t *wlc_hw, uint offset,
+extern void wlc_bmac_copyfrom_objmem(struct wlc_hw_info *wlc_hw, uint offset,
void *buf, int len, u32 sel);
#define wlc_bmac_copyfrom_shm(wlc_hw, offset, buf, len) \
wlc_bmac_copyfrom_objmem(wlc_hw, offset, buf, len, OBJADDR_SHM_SEL)
#define wlc_bmac_copyto_shm(wlc_hw, offset, buf, len) \
wlc_bmac_copyto_objmem(wlc_hw, offset, buf, len, OBJADDR_SHM_SEL)
-extern void wlc_bmac_core_phy_clk(wlc_hw_info_t *wlc_hw, bool clk);
-extern void wlc_bmac_core_phypll_reset(wlc_hw_info_t *wlc_hw);
-extern void wlc_bmac_core_phypll_ctl(wlc_hw_info_t *wlc_hw, bool on);
-extern void wlc_bmac_phyclk_fgc(wlc_hw_info_t *wlc_hw, bool clk);
-extern void wlc_bmac_macphyclk_set(wlc_hw_info_t *wlc_hw, bool clk);
-extern void wlc_bmac_phy_reset(wlc_hw_info_t *wlc_hw);
-extern void wlc_bmac_corereset(wlc_hw_info_t *wlc_hw, u32 flags);
-extern void wlc_bmac_reset(wlc_hw_info_t *wlc_hw);
-extern void wlc_bmac_init(wlc_hw_info_t *wlc_hw, chanspec_t chanspec,
+extern void wlc_bmac_core_phy_clk(struct wlc_hw_info *wlc_hw, bool clk);
+extern void wlc_bmac_core_phypll_reset(struct wlc_hw_info *wlc_hw);
+extern void wlc_bmac_core_phypll_ctl(struct wlc_hw_info *wlc_hw, bool on);
+extern void wlc_bmac_phyclk_fgc(struct wlc_hw_info *wlc_hw, bool clk);
+extern void wlc_bmac_macphyclk_set(struct wlc_hw_info *wlc_hw, bool clk);
+extern void wlc_bmac_phy_reset(struct wlc_hw_info *wlc_hw);
+extern void wlc_bmac_corereset(struct wlc_hw_info *wlc_hw, u32 flags);
+extern void wlc_bmac_reset(struct wlc_hw_info *wlc_hw);
+extern void wlc_bmac_init(struct wlc_hw_info *wlc_hw, chanspec_t chanspec,
bool mute);
-extern int wlc_bmac_up_prep(wlc_hw_info_t *wlc_hw);
-extern int wlc_bmac_up_finish(wlc_hw_info_t *wlc_hw);
-extern int wlc_bmac_down_prep(wlc_hw_info_t *wlc_hw);
-extern int wlc_bmac_down_finish(wlc_hw_info_t *wlc_hw);
-extern void wlc_bmac_corereset(wlc_hw_info_t *wlc_hw, u32 flags);
-extern void wlc_bmac_switch_macfreq(wlc_hw_info_t *wlc_hw, u8 spurmode);
+extern int wlc_bmac_up_prep(struct wlc_hw_info *wlc_hw);
+extern int wlc_bmac_up_finish(struct wlc_hw_info *wlc_hw);
+extern int wlc_bmac_down_prep(struct wlc_hw_info *wlc_hw);
+extern int wlc_bmac_down_finish(struct wlc_hw_info *wlc_hw);
+extern void wlc_bmac_corereset(struct wlc_hw_info *wlc_hw, u32 flags);
+extern void wlc_bmac_switch_macfreq(struct wlc_hw_info *wlc_hw, u8 spurmode);
/* chanspec, ucode interface */
-extern int wlc_bmac_bandtype(wlc_hw_info_t *wlc_hw);
-extern void wlc_bmac_set_chanspec(wlc_hw_info_t *wlc_hw, chanspec_t chanspec,
+extern int wlc_bmac_bandtype(struct wlc_hw_info *wlc_hw);
+extern void wlc_bmac_set_chanspec(struct wlc_hw_info *wlc_hw,
+ chanspec_t chanspec,
bool mute, struct txpwr_limits *txpwr);
-extern void wlc_bmac_txfifo(wlc_hw_info_t *wlc_hw, uint fifo, void *p,
+extern void wlc_bmac_txfifo(struct wlc_hw_info *wlc_hw, uint fifo, void *p,
bool commit, u16 frameid, u8 txpktpend);
-extern int wlc_bmac_xmtfifo_sz_get(wlc_hw_info_t *wlc_hw, uint fifo,
+extern int wlc_bmac_xmtfifo_sz_get(struct wlc_hw_info *wlc_hw, uint fifo,
uint *blocks);
-extern void wlc_bmac_mhf(wlc_hw_info_t *wlc_hw, u8 idx, u16 mask,
+extern void wlc_bmac_mhf(struct wlc_hw_info *wlc_hw, u8 idx, u16 mask,
u16 val, int bands);
-extern void wlc_bmac_mctrl(wlc_hw_info_t *wlc_hw, u32 mask, u32 val);
-extern u16 wlc_bmac_mhf_get(wlc_hw_info_t *wlc_hw, u8 idx, int bands);
-extern int wlc_bmac_xmtfifo_sz_set(wlc_hw_info_t *wlc_hw, uint fifo,
+extern void wlc_bmac_mctrl(struct wlc_hw_info *wlc_hw, u32 mask, u32 val);
+extern u16 wlc_bmac_mhf_get(struct wlc_hw_info *wlc_hw, u8 idx, int bands);
+extern int wlc_bmac_xmtfifo_sz_set(struct wlc_hw_info *wlc_hw, uint fifo,
uint blocks);
-extern void wlc_bmac_txant_set(wlc_hw_info_t *wlc_hw, u16 phytxant);
-extern u16 wlc_bmac_get_txant(wlc_hw_info_t *wlc_hw);
-extern void wlc_bmac_antsel_type_set(wlc_hw_info_t *wlc_hw, u8 antsel_type);
-extern int wlc_bmac_revinfo_get(wlc_hw_info_t *wlc_hw,
+extern void wlc_bmac_txant_set(struct wlc_hw_info *wlc_hw, u16 phytxant);
+extern u16 wlc_bmac_get_txant(struct wlc_hw_info *wlc_hw);
+extern void wlc_bmac_antsel_type_set(struct wlc_hw_info *wlc_hw,
+ u8 antsel_type);
+extern int wlc_bmac_revinfo_get(struct wlc_hw_info *wlc_hw,
wlc_bmac_revinfo_t *revinfo);
-extern int wlc_bmac_state_get(wlc_hw_info_t *wlc_hw, wlc_bmac_state_t *state);
-extern void wlc_bmac_write_shm(wlc_hw_info_t *wlc_hw, uint offset, u16 v);
-extern u16 wlc_bmac_read_shm(wlc_hw_info_t *wlc_hw, uint offset);
-extern void wlc_bmac_set_shm(wlc_hw_info_t *wlc_hw, uint offset, u16 v,
+extern int wlc_bmac_state_get(struct wlc_hw_info *wlc_hw,
+ wlc_bmac_state_t *state);
+extern void wlc_bmac_write_shm(struct wlc_hw_info *wlc_hw, uint offset, u16 v);
+extern u16 wlc_bmac_read_shm(struct wlc_hw_info *wlc_hw, uint offset);
+extern void wlc_bmac_set_shm(struct wlc_hw_info *wlc_hw, uint offset, u16 v,
int len);
-extern void wlc_bmac_write_template_ram(wlc_hw_info_t *wlc_hw, int offset,
+extern void wlc_bmac_write_template_ram(struct wlc_hw_info *wlc_hw, int offset,
int len, void *buf);
-extern void wlc_bmac_copyfrom_vars(wlc_hw_info_t *wlc_hw, char **buf,
+extern void wlc_bmac_copyfrom_vars(struct wlc_hw_info *wlc_hw, char **buf,
uint *len);
-extern void wlc_bmac_process_ps_switch(wlc_hw_info_t *wlc,
+extern void wlc_bmac_process_ps_switch(struct wlc_hw_info *wlc,
struct ether_addr *ea, s8 ps_on);
-extern void wlc_bmac_hw_etheraddr(wlc_hw_info_t *wlc_hw,
+extern void wlc_bmac_hw_etheraddr(struct wlc_hw_info *wlc_hw,
struct ether_addr *ea);
-extern void wlc_bmac_set_hw_etheraddr(wlc_hw_info_t *wlc_hw,
+extern void wlc_bmac_set_hw_etheraddr(struct wlc_hw_info *wlc_hw,
struct ether_addr *ea);
-extern bool wlc_bmac_validate_chip_access(wlc_hw_info_t *wlc_hw);
+extern bool wlc_bmac_validate_chip_access(struct wlc_hw_info *wlc_hw);
-extern bool wlc_bmac_radio_read_hwdisabled(wlc_hw_info_t *wlc_hw);
-extern void wlc_bmac_set_shortslot(wlc_hw_info_t *wlc_hw, bool shortslot);
-extern void wlc_bmac_mute(wlc_hw_info_t *wlc_hw, bool want, mbool flags);
-extern void wlc_bmac_set_deaf(wlc_hw_info_t *wlc_hw, bool user_flag);
-extern void wlc_bmac_band_stf_ss_set(wlc_hw_info_t *wlc_hw, u8 stf_mode);
+extern bool wlc_bmac_radio_read_hwdisabled(struct wlc_hw_info *wlc_hw);
+extern void wlc_bmac_set_shortslot(struct wlc_hw_info *wlc_hw, bool shortslot);
+extern void wlc_bmac_mute(struct wlc_hw_info *wlc_hw, bool want, mbool flags);
+extern void wlc_bmac_set_deaf(struct wlc_hw_info *wlc_hw, bool user_flag);
+extern void wlc_bmac_band_stf_ss_set(struct wlc_hw_info *wlc_hw, u8 stf_mode);
-extern void wlc_bmac_wait_for_wake(wlc_hw_info_t *wlc_hw);
-extern bool wlc_bmac_tx_fifo_suspended(wlc_hw_info_t *wlc_hw, uint tx_fifo);
-extern void wlc_bmac_tx_fifo_suspend(wlc_hw_info_t *wlc_hw, uint tx_fifo);
-extern void wlc_bmac_tx_fifo_resume(wlc_hw_info_t *wlc_hw, uint tx_fifo);
+extern void wlc_bmac_wait_for_wake(struct wlc_hw_info *wlc_hw);
+extern bool wlc_bmac_tx_fifo_suspended(struct wlc_hw_info *wlc_hw,
+ uint tx_fifo);
+extern void wlc_bmac_tx_fifo_suspend(struct wlc_hw_info *wlc_hw, uint tx_fifo);
+extern void wlc_bmac_tx_fifo_resume(struct wlc_hw_info *wlc_hw, uint tx_fifo);
-extern void wlc_ucode_wake_override_set(wlc_hw_info_t *wlc_hw,
+extern void wlc_ucode_wake_override_set(struct wlc_hw_info *wlc_hw,
u32 override_bit);
-extern void wlc_ucode_wake_override_clear(wlc_hw_info_t *wlc_hw,
+extern void wlc_ucode_wake_override_clear(struct wlc_hw_info *wlc_hw,
u32 override_bit);
-extern void wlc_bmac_set_rcmta(wlc_hw_info_t *wlc_hw, int idx,
+extern void wlc_bmac_set_rcmta(struct wlc_hw_info *wlc_hw, int idx,
const struct ether_addr *addr);
-extern void wlc_bmac_set_addrmatch(wlc_hw_info_t *wlc_hw, int match_reg_offset,
+extern void wlc_bmac_set_addrmatch(struct wlc_hw_info *wlc_hw,
+ int match_reg_offset,
const struct ether_addr *addr);
-extern void wlc_bmac_write_hw_bcntemplates(wlc_hw_info_t *wlc_hw, void *bcn,
- int len, bool both);
+extern void wlc_bmac_write_hw_bcntemplates(struct wlc_hw_info *wlc_hw,
+ void *bcn, int len, bool both);
-extern void wlc_bmac_read_tsf(wlc_hw_info_t *wlc_hw, u32 *tsf_l_ptr,
+extern void wlc_bmac_read_tsf(struct wlc_hw_info *wlc_hw, u32 *tsf_l_ptr,
u32 *tsf_h_ptr);
-extern void wlc_bmac_set_cwmin(wlc_hw_info_t *wlc_hw, u16 newmin);
-extern void wlc_bmac_set_cwmax(wlc_hw_info_t *wlc_hw, u16 newmax);
-extern void wlc_bmac_set_noreset(wlc_hw_info_t *wlc, bool noreset_flag);
-extern void wlc_bmac_set_ucode_loaded(wlc_hw_info_t *wlc, bool ucode_loaded);
+extern void wlc_bmac_set_cwmin(struct wlc_hw_info *wlc_hw, u16 newmin);
+extern void wlc_bmac_set_cwmax(struct wlc_hw_info *wlc_hw, u16 newmax);
+extern void wlc_bmac_set_noreset(struct wlc_hw_info *wlc, bool noreset_flag);
+extern void wlc_bmac_set_ucode_loaded(struct wlc_hw_info *wlc,
+ bool ucode_loaded);
-extern void wlc_bmac_retrylimit_upd(wlc_hw_info_t *wlc_hw, u16 SRL,
+extern void wlc_bmac_retrylimit_upd(struct wlc_hw_info *wlc_hw, u16 SRL,
u16 LRL);
-extern void wlc_bmac_fifoerrors(wlc_hw_info_t *wlc_hw);
+extern void wlc_bmac_fifoerrors(struct wlc_hw_info *wlc_hw);
-#ifdef WLC_HIGH_ONLY
-extern void wlc_bmac_dngl_reboot(rpc_info_t *);
-extern void wlc_bmac_dngl_rpc_agg(rpc_info_t *, u16 agg);
-extern void wlc_bmac_dngl_rpc_msglevel(rpc_info_t *, u16 level);
-extern void wlc_bmac_dngl_rpc_txq_wm_set(rpc_info_t *rpc, u32 wm);
-extern void wlc_bmac_dngl_rpc_txq_wm_get(rpc_info_t *rpc, u32 *wm);
-extern void wlc_bmac_dngl_rpc_agg_limit_set(rpc_info_t *rpc, u32 val);
-extern void wlc_bmac_dngl_rpc_agg_limit_get(rpc_info_t *rpc, u32 *pval);
-extern int wlc_bmac_debug_template(wlc_hw_info_t *wlc_hw);
-#endif
/* API for BMAC driver (e.g. wlc_phy.c etc) */
-extern void wlc_bmac_bw_set(wlc_hw_info_t *wlc_hw, u16 bw);
-extern void wlc_bmac_pllreq(wlc_hw_info_t *wlc_hw, bool set, mbool req_bit);
-extern void wlc_bmac_set_clk(wlc_hw_info_t *wlc_hw, bool on);
-extern bool wlc_bmac_taclear(wlc_hw_info_t *wlc_hw, bool ta_ok);
+extern void wlc_bmac_bw_set(struct wlc_hw_info *wlc_hw, u16 bw);
+extern void wlc_bmac_pllreq(struct wlc_hw_info *wlc_hw, bool set,
+ mbool req_bit);
+extern void wlc_bmac_set_clk(struct wlc_hw_info *wlc_hw, bool on);
+extern bool wlc_bmac_taclear(struct wlc_hw_info *wlc_hw, bool ta_ok);
extern void wlc_bmac_hw_up(struct wlc_hw_info *wlc_hw);
-extern void wlc_bmac_dump(wlc_hw_info_t *wlc_hw, struct bcmstrbuf *b,
+extern void wlc_bmac_dump(struct wlc_hw_info *wlc_hw, struct bcmstrbuf *b,
wlc_bmac_dump_id_t dump_id);
-extern void wlc_gpio_fast_deinit(wlc_hw_info_t *wlc_hw);
+extern void wlc_gpio_fast_deinit(struct wlc_hw_info *wlc_hw);
-extern bool wlc_bmac_radio_hw(wlc_hw_info_t *wlc_hw, bool enable);
-extern u16 wlc_bmac_rate_shm_offset(wlc_hw_info_t *wlc_hw, u8 rate);
+extern bool wlc_bmac_radio_hw(struct wlc_hw_info *wlc_hw, bool enable);
+extern u16 wlc_bmac_rate_shm_offset(struct wlc_hw_info *wlc_hw, u8 rate);
-extern void wlc_bmac_assert_type_set(wlc_hw_info_t *wlc_hw, u32 type);
-extern void wlc_bmac_set_txpwr_percent(wlc_hw_info_t *wlc_hw, u8 val);
-extern void wlc_bmac_blink_sync(wlc_hw_info_t *wlc_hw, u32 led_pins);
-extern void wlc_bmac_ifsctl_edcrs_set(wlc_hw_info_t *wlc_hw, bool abie,
+extern void wlc_bmac_assert_type_set(struct wlc_hw_info *wlc_hw, u32 type);
+extern void wlc_bmac_set_txpwr_percent(struct wlc_hw_info *wlc_hw, u8 val);
+extern void wlc_bmac_blink_sync(struct wlc_hw_info *wlc_hw, u32 led_pins);
+extern void wlc_bmac_ifsctl_edcrs_set(struct wlc_hw_info *wlc_hw, bool abie,
bool isht);
-extern void wlc_bmac_antsel_set(wlc_hw_info_t *wlc_hw, u32 antsel_avail);
+extern void wlc_bmac_antsel_set(struct wlc_hw_info *wlc_hw, u32 antsel_avail);
diff --git a/drivers/staging/brcm80211/sys/wlc_bsscfg.h b/drivers/staging/brcm80211/sys/wlc_bsscfg.h
index ae5542ab0334..d6a1971c69a0 100644
--- a/drivers/staging/brcm80211/sys/wlc_bsscfg.h
+++ b/drivers/staging/brcm80211/sys/wlc_bsscfg.h
@@ -34,7 +34,8 @@ typedef struct wlc_bsscfg wlc_bsscfg_t;
#define MAXMACLIST 64 /* max # source MAC matches */
#define BCN_TEMPLATE_COUNT 2
-/* Iterator for "associated" STA bss configs: (wlc_info_t *wlc, int idx, wlc_bsscfg_t *cfg) */
+/* Iterator for "associated" STA bss configs:
+ (struct wlc_info *wlc, int idx, wlc_bsscfg_t *cfg) */
#define FOREACH_AS_STA(wlc, idx, cfg) \
for (idx = 0; (int) idx < WLC_MAXBSSCFG; idx++) \
if ((cfg = (wlc)->bsscfg[idx]) && BSSCFG_STA(cfg) && cfg->associated)
diff --git a/drivers/staging/brcm80211/sys/wlc_cfg.h b/drivers/staging/brcm80211/sys/wlc_cfg.h
index a415e1fd2c05..3decb7d1a5e5 100644
--- a/drivers/staging/brcm80211/sys/wlc_cfg.h
+++ b/drivers/staging/brcm80211/sys/wlc_cfg.h
@@ -23,14 +23,6 @@
#define IS_SINGLEBAND_5G(device) 0
-/* Keep WLC_HIGH_ONLY, WLC_SPLIT for USB extension later on */
-#if !defined(WLC_LOW)
-#define WLC_HIGH_ONLY
-#endif
-#if !defined(WLC_LOW)
-#define WLC_SPLIT
-#endif
-
/* **** Core type/rev defaults **** */
#define D11_DEFAULT 0x0fffffb0 /* Supported D11 revs: 4, 5, 7-27
* also need to update wlc.h MAXCOREREV
@@ -61,22 +53,6 @@
* 3 5356a0
*/
-#ifdef BCMSDIO
-#define D11CONF 0x100000
-#define SSLPNCONF 2
-#define GCCONF 0
-#define ACCONF 0
-#define NCONF 0
-#define LPCONF 0
-#define LCNCONF 0
-#define NTXD 32
-#define NRXD 16
-#define NRXBUFPOST 8
-#define WLC_DATAHIWAT 32
-#define RXBND 8
-#define MAXPKTCB 64
-#define AMPDU_NUM_MPDU 8
-#endif
/* For undefined values, use defaults */
#ifndef D11CONF
diff --git a/drivers/staging/brcm80211/sys/wlc_channel.c b/drivers/staging/brcm80211/sys/wlc_channel.c
index 509280337e34..a35c15214880 100644
--- a/drivers/staging/brcm80211/sys/wlc_channel.c
+++ b/drivers/staging/brcm80211/sys/wlc_channel.c
@@ -19,16 +19,21 @@
#include <bcmdefs.h>
#include <wlc_cfg.h>
#include <osl.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
#include <bcmutils.h>
#include <siutils.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
#include <wlioctl.h>
#include <wlc_pub.h>
#include <wlc_key.h>
+#include <wlc_event.h>
#include <wlc_mac80211.h>
#include <wlc_bmac.h>
#include <wlc_stf.h>
#include <wlc_channel.h>
+#include <wl_dbg.h>
typedef struct wlc_cm_band {
u8 locale_flags; /* locale_info_t flags */
@@ -39,8 +44,8 @@ typedef struct wlc_cm_band {
} wlc_cm_band_t;
struct wlc_cm_info {
- wlc_pub_t *pub;
- wlc_info_t *wlc;
+ struct wlc_pub *pub;
+ struct wlc_info *wlc;
char srom_ccode[WLC_CNTRY_BUF_SZ]; /* Country Code in SROM */
uint srom_regrev; /* Regulatory Rev for the SROM ccode */
const country_info_t *country; /* current country def */
@@ -377,7 +382,7 @@ void wlc_locale_get_channels(const locale_info_t *locale, chanvec_t *channels)
{
u8 i;
- bzero(channels, sizeof(chanvec_t));
+ memset(channels, 0, sizeof(chanvec_t));
for (i = 0; i < ARRAY_SIZE(g_table_locale_base); i++) {
if (locale->valid_channels & (1 << i)) {
@@ -562,8 +567,8 @@ struct chan20_info chan20_info[] = {
const locale_info_t *wlc_get_locale_2g(u8 locale_idx)
{
if (locale_idx >= ARRAY_SIZE(g_locale_2g_table)) {
- WL_ERROR(("%s: locale 2g index size out of range %d\n",
- __func__, locale_idx));
+ WL_ERROR("%s: locale 2g index size out of range %d\n",
+ __func__, locale_idx);
ASSERT(locale_idx < ARRAY_SIZE(g_locale_2g_table));
return NULL;
}
@@ -573,8 +578,8 @@ const locale_info_t *wlc_get_locale_2g(u8 locale_idx)
const locale_info_t *wlc_get_locale_5g(u8 locale_idx)
{
if (locale_idx >= ARRAY_SIZE(g_locale_5g_table)) {
- WL_ERROR(("%s: locale 5g index size out of range %d\n",
- __func__, locale_idx));
+ WL_ERROR("%s: locale 5g index size out of range %d\n",
+ __func__, locale_idx);
ASSERT(locale_idx < ARRAY_SIZE(g_locale_5g_table));
return NULL;
}
@@ -584,8 +589,8 @@ const locale_info_t *wlc_get_locale_5g(u8 locale_idx)
const locale_mimo_info_t *wlc_get_mimo_2g(u8 locale_idx)
{
if (locale_idx >= ARRAY_SIZE(g_mimo_2g_table)) {
- WL_ERROR(("%s: mimo 2g index size out of range %d\n", __func__,
- locale_idx));
+ WL_ERROR("%s: mimo 2g index size out of range %d\n",
+ __func__, locale_idx);
return NULL;
}
return g_mimo_2g_table[locale_idx];
@@ -594,26 +599,26 @@ const locale_mimo_info_t *wlc_get_mimo_2g(u8 locale_idx)
const locale_mimo_info_t *wlc_get_mimo_5g(u8 locale_idx)
{
if (locale_idx >= ARRAY_SIZE(g_mimo_5g_table)) {
- WL_ERROR(("%s: mimo 5g index size out of range %d\n", __func__,
- locale_idx));
+ WL_ERROR("%s: mimo 5g index size out of range %d\n",
+ __func__, locale_idx);
return NULL;
}
return g_mimo_5g_table[locale_idx];
}
-wlc_cm_info_t *wlc_channel_mgr_attach(wlc_info_t *wlc)
+wlc_cm_info_t *wlc_channel_mgr_attach(struct wlc_info *wlc)
{
wlc_cm_info_t *wlc_cm;
char country_abbrev[WLC_CNTRY_BUF_SZ];
const country_info_t *country;
- wlc_pub_t *pub = wlc->pub;
+ struct wlc_pub *pub = wlc->pub;
char *ccode;
- WL_TRACE(("wl%d: wlc_channel_mgr_attach\n", wlc->pub->unit));
+ WL_TRACE("wl%d: wlc_channel_mgr_attach\n", wlc->pub->unit);
wlc_cm = kzalloc(sizeof(wlc_cm_info_t), GFP_ATOMIC);
if (wlc_cm == NULL) {
- WL_ERROR(("wl%d: %s: out of memory", pub->unit, __func__));
+ WL_ERROR("wl%d: %s: out of memory", pub->unit, __func__);
return NULL;
}
wlc_cm->pub = pub;
@@ -624,12 +629,13 @@ wlc_cm_info_t *wlc_channel_mgr_attach(wlc_info_t *wlc)
ccode = getvar(wlc->pub->vars, "ccode");
if (ccode) {
strncpy(wlc->pub->srom_ccode, ccode, WLC_CNTRY_BUF_SZ - 1);
- WL_NONE(("%s: SROM country code is %c%c\n", __func__,
- wlc->pub->srom_ccode[0], wlc->pub->srom_ccode[1]));
+ WL_NONE("%s: SROM country code is %c%c\n",
+ __func__,
+ wlc->pub->srom_ccode[0], wlc->pub->srom_ccode[1]);
}
/* internal country information which must match regulatory constraints in firmware */
- bzero(country_abbrev, WLC_CNTRY_BUF_SZ);
+ memset(country_abbrev, 0, WLC_CNTRY_BUF_SZ);
strncpy(country_abbrev, "X2", sizeof(country_abbrev) - 1);
country = wlc_country_lookup(wlc, country_abbrev);
@@ -659,7 +665,7 @@ const char *wlc_channel_country_abbrev(wlc_cm_info_t *wlc_cm)
u8 wlc_channel_locale_flags(wlc_cm_info_t *wlc_cm)
{
- wlc_info_t *wlc = wlc_cm->wlc;
+ struct wlc_info *wlc = wlc_cm->wlc;
return wlc_cm->bandstate[wlc->band->bandunit].locale_flags;
}
@@ -711,7 +717,9 @@ wlc_set_countrycode_rev(wlc_cm_info_t *wlc_cm,
char mapped_ccode[WLC_CNTRY_BUF_SZ];
uint mapped_regrev;
- WL_NONE(("%s: (country_abbrev \"%s\", ccode \"%s\", regrev %d) SPROM \"%s\"/%u\n", __func__, country_abbrev, ccode, regrev, wlc_cm->srom_ccode, wlc_cm->srom_regrev));
+ WL_NONE("%s: (country_abbrev \"%s\", ccode \"%s\", regrev %d) SPROM \"%s\"/%u\n",
+ __func__, country_abbrev, ccode, regrev,
+ wlc_cm->srom_ccode, wlc_cm->srom_regrev);
/* if regrev is -1, lookup the mapped country code,
* otherwise use the ccode and regrev directly
@@ -750,7 +758,7 @@ wlc_set_country_common(wlc_cm_info_t *wlc_cm,
{
const locale_mimo_info_t *li_mimo;
const locale_info_t *locale;
- wlc_info_t *wlc = wlc_cm->wlc;
+ struct wlc_info *wlc = wlc_cm->wlc;
char prev_country_abbrev[WLC_CNTRY_BUF_SZ];
ASSERT(country != NULL);
@@ -758,7 +766,7 @@ wlc_set_country_common(wlc_cm_info_t *wlc_cm,
/* save current country state */
wlc_cm->country = country;
- bzero(&prev_country_abbrev, WLC_CNTRY_BUF_SZ);
+ memset(&prev_country_abbrev, 0, WLC_CNTRY_BUF_SZ);
strncpy(prev_country_abbrev, wlc_cm->country_abbrev,
WLC_CNTRY_BUF_SZ - 1);
@@ -814,7 +822,7 @@ static const country_info_t *wlc_countrycode_map(wlc_cm_info_t *wlc_cm,
char *mapped_ccode,
uint *mapped_regrev)
{
- wlc_info_t *wlc = wlc_cm->wlc;
+ struct wlc_info *wlc = wlc_cm->wlc;
const country_info_t *country;
uint srom_regrev = wlc_cm->srom_regrev;
const char *srom_ccode = wlc_cm->srom_ccode;
@@ -822,8 +830,8 @@ static const country_info_t *wlc_countrycode_map(wlc_cm_info_t *wlc_cm,
/* check for currently supported ccode size */
if (strlen(ccode) > (WLC_CNTRY_BUF_SZ - 1)) {
- WL_ERROR(("wl%d: %s: ccode \"%s\" too long for match\n",
- wlc->pub->unit, __func__, ccode));
+ WL_ERROR("wl%d: %s: ccode \"%s\" too long for match\n",
+ wlc->pub->unit, __func__, ccode);
return NULL;
}
@@ -838,7 +846,7 @@ static const country_info_t *wlc_countrycode_map(wlc_cm_info_t *wlc_cm,
if (!strcmp(srom_ccode, ccode)) {
*mapped_regrev = srom_regrev;
mapped = 0;
- WL_ERROR(("srom_code == ccode %s\n", __func__));
+ WL_ERROR("srom_code == ccode %s\n", __func__);
ASSERT(0);
} else {
mapped =
@@ -890,7 +898,7 @@ static const country_info_t *wlc_country_lookup_direct(const char *ccode,
}
}
- WL_ERROR(("%s: Returning NULL\n", __func__));
+ WL_ERROR("%s: Returning NULL\n", __func__);
ASSERT(0);
return NULL;
}
@@ -898,9 +906,9 @@ static const country_info_t *wlc_country_lookup_direct(const char *ccode,
static int
wlc_channels_init(wlc_cm_info_t *wlc_cm, const country_info_t *country)
{
- wlc_info_t *wlc = wlc_cm->wlc;
+ struct wlc_info *wlc = wlc_cm->wlc;
uint i, j;
- wlcband_t *band;
+ struct wlcband *band;
const locale_info_t *li;
chanvec_t sup_chan;
const locale_mimo_info_t *li_mimo;
@@ -952,7 +960,7 @@ wlc_channels_init(wlc_cm_info_t *wlc_cm, const country_info_t *country)
*/
static void wlc_channels_commit(wlc_cm_info_t *wlc_cm)
{
- wlc_info_t *wlc = wlc_cm->wlc;
+ struct wlc_info *wlc = wlc_cm->wlc;
uint chan;
struct txpwr_limits txpwr;
@@ -969,7 +977,9 @@ static void wlc_channels_commit(wlc_cm_info_t *wlc_cm)
if (chan == INVCHANNEL) {
/* country/locale with no valid channels, set the radio disable bit */
mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
- WL_ERROR(("wl%d: %s: no valid channel for \"%s\" nbands %d bandlocked %d\n", wlc->pub->unit, __func__, wlc_cm->country_abbrev, NBANDS(wlc), wlc->bandlocked));
+ WL_ERROR("wl%d: %s: no valid channel for \"%s\" nbands %d bandlocked %d\n",
+ wlc->pub->unit, __func__,
+ wlc_cm->country_abbrev, NBANDS(wlc), wlc->bandlocked);
} else
if (mboolisset(wlc->pub->radio_disabled,
WL_RADIO_COUNTRY_DISABLE)) {
@@ -998,12 +1008,12 @@ static void wlc_channels_commit(wlc_cm_info_t *wlc_cm)
/* reset the quiet channels vector to the union of the restricted and radar channel sets */
void wlc_quiet_channels_reset(wlc_cm_info_t *wlc_cm)
{
- wlc_info_t *wlc = wlc_cm->wlc;
+ struct wlc_info *wlc = wlc_cm->wlc;
uint i, j;
- wlcband_t *band;
+ struct wlcband *band;
const chanvec_t *chanvec;
- bzero(&wlc_cm->quiet_channels, sizeof(chanvec_t));
+ memset(&wlc_cm->quiet_channels, 0, sizeof(chanvec_t));
band = wlc->band;
for (i = 0; i < NBANDS(wlc);
@@ -1036,7 +1046,7 @@ bool wlc_quiet_chanspec(wlc_cm_info_t *wlc_cm, chanspec_t chspec)
*/
bool wlc_valid_channel20_db(wlc_cm_info_t *wlc_cm, uint val)
{
- wlc_info_t *wlc = wlc_cm->wlc;
+ struct wlc_info *wlc = wlc_cm->wlc;
return VALID_CHANNEL20(wlc, val) ||
(!wlc->bandlocked
@@ -1054,7 +1064,7 @@ wlc_valid_channel20_in_band(wlc_cm_info_t *wlc_cm, uint bandunit, uint val)
/* Is the channel valid for the current locale and current band? */
bool wlc_valid_channel20(wlc_cm_info_t *wlc_cm, uint val)
{
- wlc_info_t *wlc = wlc_cm->wlc;
+ struct wlc_info *wlc = wlc_cm->wlc;
return ((val < MAXCHANNEL) &&
isset(wlc_cm->bandstate[wlc->band->bandunit].valid_channels.vec,
@@ -1064,7 +1074,7 @@ bool wlc_valid_channel20(wlc_cm_info_t *wlc_cm, uint val)
/* Is the 40 MHz allowed for the current locale and specified band? */
bool wlc_valid_40chanspec_in_band(wlc_cm_info_t *wlc_cm, uint bandunit)
{
- wlc_info_t *wlc = wlc_cm->wlc;
+ struct wlc_info *wlc = wlc_cm->wlc;
return (((wlc_cm->bandstate[bandunit].
locale_flags & (WLC_NO_MIMO | WLC_NO_40MHZ)) == 0)
@@ -1162,7 +1172,7 @@ void
wlc_channel_set_chanspec(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
u8 local_constraint_qdbm)
{
- wlc_info_t *wlc = wlc_cm->wlc;
+ struct wlc_info *wlc = wlc_cm->wlc;
struct txpwr_limits txpwr;
wlc_channel_reg_limits(wlc_cm, chanspec, &txpwr);
@@ -1179,7 +1189,7 @@ int
wlc_channel_set_txpower_limit(wlc_cm_info_t *wlc_cm,
u8 local_constraint_qdbm)
{
- wlc_info_t *wlc = wlc_cm->wlc;
+ struct wlc_info *wlc = wlc_cm->wlc;
struct txpwr_limits txpwr;
wlc_channel_reg_limits(wlc_cm, wlc->chanspec, &txpwr);
@@ -1299,13 +1309,13 @@ void
wlc_channel_reg_limits(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
txpwr_limits_t *txpwr)
{
- wlc_info_t *wlc = wlc_cm->wlc;
+ struct wlc_info *wlc = wlc_cm->wlc;
uint i;
uint chan;
int maxpwr;
int delta;
const country_info_t *country;
- wlcband_t *band;
+ struct wlcband *band;
const locale_info_t *li;
int conducted_max;
int conducted_ofdm_max;
@@ -1314,7 +1324,7 @@ wlc_channel_reg_limits(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
int maxpwr_idx;
uint j;
- bzero(txpwr, sizeof(txpwr_limits_t));
+ memset(txpwr, 0, sizeof(txpwr_limits_t));
if (!wlc_valid_chanspec_db(wlc_cm, chanspec)) {
country = wlc_country_lookup(wlc, wlc->autocountry_default);
@@ -1528,13 +1538,13 @@ static bool wlc_japan_ccode(const char *ccode)
static bool
wlc_valid_chanspec_ext(wlc_cm_info_t *wlc_cm, chanspec_t chspec, bool dualband)
{
- wlc_info_t *wlc = wlc_cm->wlc;
+ struct wlc_info *wlc = wlc_cm->wlc;
u8 channel = CHSPEC_CHANNEL(chspec);
/* check the chanspec */
if (wf_chspec_malformed(chspec)) {
- WL_ERROR(("wl%d: malformed chanspec 0x%x\n", wlc->pub->unit,
- chspec));
+ WL_ERROR("wl%d: malformed chanspec 0x%x\n",
+ wlc->pub->unit, chspec);
ASSERT(0);
return false;
}
diff --git a/drivers/staging/brcm80211/sys/wlc_event.c b/drivers/staging/brcm80211/sys/wlc_event.c
index 7e1bf0e2ecdd..dabd7094cd73 100644
--- a/drivers/staging/brcm80211/sys/wlc_event.c
+++ b/drivers/staging/brcm80211/sys/wlc_event.c
@@ -16,9 +16,13 @@
#include <linux/kernel.h>
#include <bcmdefs.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <osl.h>
#include <bcmutils.h>
#include <siutils.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
#include <wlioctl.h>
#include <wlc_cfg.h>
#include <wlc_pub.h>
@@ -32,6 +36,7 @@
#ifdef MSGTRACE
#include <msgtrace.h>
#endif
+#include <wl_dbg.h>
/* Local prototypes */
static void wlc_timer_cb(void *arg);
@@ -42,7 +47,7 @@ struct wlc_eventq {
wlc_event_t *tail;
struct wlc_info *wlc;
void *wl;
- wlc_pub_t *pub;
+ struct wlc_pub *pub;
bool tpending;
bool workpending;
struct wl_timer *timer;
@@ -53,7 +58,8 @@ struct wlc_eventq {
/*
* Export functions
*/
-wlc_eventq_t *wlc_eventq_attach(wlc_pub_t *pub, struct wlc_info *wlc, void *wl,
+wlc_eventq_t *wlc_eventq_attach(struct wlc_pub *pub, struct wlc_info *wlc,
+ void *wl,
wlc_eventq_cb_t cb)
{
wlc_eventq_t *eq;
@@ -69,8 +75,8 @@ wlc_eventq_t *wlc_eventq_attach(wlc_pub_t *pub, struct wlc_info *wlc, void *wl,
eq->timer = wl_init_timer(eq->wl, wlc_timer_cb, eq, "eventq");
if (!eq->timer) {
- WL_ERROR(("wl%d: wlc_eventq_attach: timer failed\n",
- pub->unit));
+ WL_ERROR("wl%d: wlc_eventq_attach: timer failed\n",
+ pub->unit);
kfree(eq);
return NULL;
}
diff --git a/drivers/staging/brcm80211/sys/wlc_event.h b/drivers/staging/brcm80211/sys/wlc_event.h
index e443dae258b7..e75582dcdd93 100644
--- a/drivers/staging/brcm80211/sys/wlc_event.h
+++ b/drivers/staging/brcm80211/sys/wlc_event.h
@@ -21,7 +21,8 @@ typedef struct wlc_eventq wlc_eventq_t;
typedef void (*wlc_eventq_cb_t) (void *arg);
-extern wlc_eventq_t *wlc_eventq_attach(wlc_pub_t *pub, struct wlc_info *wlc,
+extern wlc_eventq_t *wlc_eventq_attach(struct wlc_pub *pub,
+ struct wlc_info *wlc,
void *wl, wlc_eventq_cb_t cb);
extern int wlc_eventq_detach(wlc_eventq_t *eq);
extern int wlc_eventq_down(wlc_eventq_t *eq);
@@ -38,7 +39,7 @@ extern int wlc_eventq_query_ind(wlc_eventq_t *eq, void *bitvect);
extern int wlc_eventq_test_ind(wlc_eventq_t *eq, int et);
extern int wlc_eventq_set_ind(wlc_eventq_t *eq, uint et, bool on);
extern void wlc_eventq_flush(wlc_eventq_t *eq);
-extern void wlc_assign_event_msg(wlc_info_t *wlc, wl_event_msg_t *msg,
+extern void wlc_assign_event_msg(struct wlc_info *wlc, wl_event_msg_t *msg,
const wlc_event_t *e, u8 *data,
u32 len);
diff --git a/drivers/staging/brcm80211/sys/wlc_mac80211.c b/drivers/staging/brcm80211/sys/wlc_mac80211.c
index feaffcc64ec6..e37e8058e2b8 100644
--- a/drivers/staging/brcm80211/sys/wlc_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wlc_mac80211.c
@@ -15,9 +15,10 @@
*/
#include <linux/kernel.h>
#include <linux/ctype.h>
+#include <linux/etherdevice.h>
#include <bcmdefs.h>
+#include <bcmdevs.h>
#include <wlc_cfg.h>
-#include <linuxver.h>
#include <osl.h>
#include <bcmutils.h>
#include <bcmwifi.h>
@@ -27,7 +28,7 @@
#include <pcicfg.h>
#include <bcmsrom.h>
#include <wlioctl.h>
-#include <epivers.h>
+#include <sbhndpio.h>
#include <sbhnddma.h>
#include <hnddma.h>
#include <hndpmu.h>
@@ -37,6 +38,7 @@
#include <wlc_key.h>
#include <wlc_bsscfg.h>
#include <wlc_channel.h>
+#include <wlc_event.h>
#include <wlc_mac80211.h>
#include <wlc_bmac.h>
#include <wlc_scb.h>
@@ -47,27 +49,11 @@
#include <wlc_ampdu.h>
#include <wlc_event.h>
#include <wl_export.h>
-#ifdef BCMSDIO
-#include <bcmsdh.h>
-#else
#include "d11ucode_ext.h"
-#endif
-#ifdef WLC_HIGH_ONLY
-#include <bcm_rpc_tp.h>
-#include <bcm_rpc.h>
-#include <bcm_xdr.h>
-#include <wlc_rpc.h>
-#include <wlc_rpctx.h>
-#endif /* WLC_HIGH_ONLY */
#include <wlc_alloc.h>
#include <net/mac80211.h>
+#include <wl_dbg.h>
-#ifdef WLC_HIGH_ONLY
-#undef R_REG
-#undef W_REG
-#define R_REG(osh, r) RPC_READ_REG(osh, r)
-#define W_REG(osh, r, v) RPC_WRITE_REG(osh, r, v)
-#endif
/*
* buffer length needed for wlc_format_ssid
@@ -107,12 +93,8 @@
/* To inform the ucode of the last mcast frame posted so that it can clear moredata bit */
#define BCMCFID(wlc, fid) wlc_bmac_write_shm((wlc)->hw, M_BCMC_FID, (fid))
-#ifndef WLC_HIGH_ONLY
-#define WLC_WAR16165(wlc) (BUSTYPE(wlc->pub->sih->bustype) == PCI_BUS && \
+#define WLC_WAR16165(wlc) (wlc->pub->sih->bustype == PCI_BUS && \
(!AP_ENAB(wlc->pub)) && (wlc->war16165))
-#else
-#define WLC_WAR16165(wlc) (false)
-#endif /* WLC_HIGH_ONLY */
/* debug/trace */
uint wl_msg_level =
@@ -135,9 +117,11 @@ uint wl_msg_level =
#define SCAN_IN_PROGRESS(x) 0
+#define EPI_VERSION_NUM 0x054b0b00
+
#ifdef BCMDBG
/* pointer to most recently allocated wl/wlc */
-static wlc_info_t *wlc_info_dbg = (wlc_info_t *) (NULL);
+static struct wlc_info *wlc_info_dbg = (struct wlc_info *) (NULL);
#endif
/* IOVar table */
@@ -238,91 +222,91 @@ static const u8 acbitmap2maxprio[] = {
#define WLC_REPLAY_CNTRS_VALUE WPA_CAP_16_REPLAY_CNTRS
/* local prototypes */
-extern void wlc_txq_enq(void *ctx, struct scb *scb, void *sdu, uint prec);
-static u16 BCMFASTPATH wlc_d11hdrs_mac80211(wlc_info_t *wlc,
- struct ieee80211_hw *hw, void *p,
+static u16 BCMFASTPATH wlc_d11hdrs_mac80211(struct wlc_info *wlc,
+ struct ieee80211_hw *hw,
+ struct sk_buff *p,
struct scb *scb, uint frag,
uint nfrags, uint queue,
uint next_frag_len,
wsec_key_t *key,
ratespec_t rspec_override);
-bool wlc_sendpkt_mac80211(wlc_info_t *wlc, void *sdu, struct ieee80211_hw *hw);
-void wlc_wme_setparams(wlc_info_t *wlc, u16 aci, void *arg, bool suspend);
-static void wlc_bss_default_init(wlc_info_t *wlc);
-static void wlc_ucode_mac_upd(wlc_info_t *wlc);
-static ratespec_t mac80211_wlc_set_nrate(wlc_info_t *wlc, wlcband_t *cur_band,
- u32 int_val);
-static void wlc_tx_prec_map_init(wlc_info_t *wlc);
+
+static void wlc_bss_default_init(struct wlc_info *wlc);
+static void wlc_ucode_mac_upd(struct wlc_info *wlc);
+static ratespec_t mac80211_wlc_set_nrate(struct wlc_info *wlc,
+ struct wlcband *cur_band, u32 int_val);
+static void wlc_tx_prec_map_init(struct wlc_info *wlc);
static void wlc_watchdog(void *arg);
static void wlc_watchdog_by_timer(void *arg);
-static int wlc_set_rateset(wlc_info_t *wlc, wlc_rateset_t *rs_arg);
-static int wlc_iovar_rangecheck(wlc_info_t *wlc, u32 val,
+static int wlc_set_rateset(struct wlc_info *wlc, wlc_rateset_t *rs_arg);
+static int wlc_iovar_rangecheck(struct wlc_info *wlc, u32 val,
const bcm_iovar_t *vi);
-static u8 wlc_local_constraint_qdbm(wlc_info_t *wlc);
+static u8 wlc_local_constraint_qdbm(struct wlc_info *wlc);
/* send and receive */
-static wlc_txq_info_t *wlc_txq_alloc(wlc_info_t *wlc, osl_t *osh);
-static void wlc_txq_free(wlc_info_t *wlc, osl_t *osh, wlc_txq_info_t *qi);
-static void wlc_txflowcontrol_signal(wlc_info_t *wlc, wlc_txq_info_t *qi,
+static wlc_txq_info_t *wlc_txq_alloc(struct wlc_info *wlc,
+ struct osl_info *osh);
+static void wlc_txq_free(struct wlc_info *wlc, struct osl_info *osh,
+ wlc_txq_info_t *qi);
+static void wlc_txflowcontrol_signal(struct wlc_info *wlc, wlc_txq_info_t *qi,
bool on, int prio);
-static void wlc_txflowcontrol_reset(wlc_info_t *wlc);
-static u16 wlc_compute_airtime(wlc_info_t *wlc, ratespec_t rspec,
+static void wlc_txflowcontrol_reset(struct wlc_info *wlc);
+static u16 wlc_compute_airtime(struct wlc_info *wlc, ratespec_t rspec,
uint length);
static void wlc_compute_cck_plcp(ratespec_t rate, uint length, u8 *plcp);
static void wlc_compute_ofdm_plcp(ratespec_t rate, uint length, u8 *plcp);
static void wlc_compute_mimo_plcp(ratespec_t rate, uint length, u8 *plcp);
-static u16 wlc_compute_frame_dur(wlc_info_t *wlc, ratespec_t rate,
+static u16 wlc_compute_frame_dur(struct wlc_info *wlc, ratespec_t rate,
u8 preamble_type, uint next_frag_len);
-static void wlc_recvctl(wlc_info_t *wlc, osl_t *osh, d11rxhdr_t *rxh,
- void *p);
-static uint wlc_calc_frame_len(wlc_info_t *wlc, ratespec_t rate,
+static void wlc_recvctl(struct wlc_info *wlc, struct osl_info *osh,
+ d11rxhdr_t *rxh, struct sk_buff *p);
+static uint wlc_calc_frame_len(struct wlc_info *wlc, ratespec_t rate,
u8 preamble_type, uint dur);
-static uint wlc_calc_ack_time(wlc_info_t *wlc, ratespec_t rate,
+static uint wlc_calc_ack_time(struct wlc_info *wlc, ratespec_t rate,
u8 preamble_type);
-static uint wlc_calc_cts_time(wlc_info_t *wlc, ratespec_t rate,
+static uint wlc_calc_cts_time(struct wlc_info *wlc, ratespec_t rate,
u8 preamble_type);
/* interrupt, up/down, band */
-static void wlc_setband(wlc_info_t *wlc, uint bandunit);
-static chanspec_t wlc_init_chanspec(wlc_info_t *wlc);
-static void wlc_bandinit_ordered(wlc_info_t *wlc, chanspec_t chanspec);
-static void wlc_bsinit(wlc_info_t *wlc);
-static int wlc_duty_cycle_set(wlc_info_t *wlc, int duty_cycle, bool isOFDM,
+static void wlc_setband(struct wlc_info *wlc, uint bandunit);
+static chanspec_t wlc_init_chanspec(struct wlc_info *wlc);
+static void wlc_bandinit_ordered(struct wlc_info *wlc, chanspec_t chanspec);
+static void wlc_bsinit(struct wlc_info *wlc);
+static int wlc_duty_cycle_set(struct wlc_info *wlc, int duty_cycle, bool isOFDM,
bool writeToShm);
-static void wlc_radio_hwdisable_upd(wlc_info_t *wlc);
-static bool wlc_radio_monitor_start(wlc_info_t *wlc);
+static void wlc_radio_hwdisable_upd(struct wlc_info *wlc);
+static bool wlc_radio_monitor_start(struct wlc_info *wlc);
static void wlc_radio_timer(void *arg);
-static void wlc_radio_enable(wlc_info_t *wlc);
-static void wlc_radio_upd(wlc_info_t *wlc);
+static void wlc_radio_enable(struct wlc_info *wlc);
+static void wlc_radio_upd(struct wlc_info *wlc);
/* scan, association, BSS */
-static uint wlc_calc_ba_time(wlc_info_t *wlc, ratespec_t rate,
+static uint wlc_calc_ba_time(struct wlc_info *wlc, ratespec_t rate,
u8 preamble_type);
-static void wlc_update_mimo_band_bwcap(wlc_info_t *wlc, u8 bwcap);
-static void wlc_ht_update_sgi_rx(wlc_info_t *wlc, int val);
-void wlc_ht_mimops_cap_update(wlc_info_t *wlc, u8 mimops_mode);
-static void wlc_ht_update_ldpc(wlc_info_t *wlc, s8 val);
-static void wlc_war16165(wlc_info_t *wlc, bool tx);
+static void wlc_update_mimo_band_bwcap(struct wlc_info *wlc, u8 bwcap);
+static void wlc_ht_update_sgi_rx(struct wlc_info *wlc, int val);
+static void wlc_ht_update_ldpc(struct wlc_info *wlc, s8 val);
+static void wlc_war16165(struct wlc_info *wlc, bool tx);
static void wlc_process_eventq(void *arg);
-static void wlc_wme_retries_write(wlc_info_t *wlc);
-static bool wlc_attach_stf_ant_init(wlc_info_t *wlc);
-static uint wlc_attach_module(wlc_info_t *wlc);
-static void wlc_detach_module(wlc_info_t *wlc);
-static void wlc_timers_deinit(wlc_info_t *wlc);
-static void wlc_down_led_upd(wlc_info_t *wlc);
-static uint wlc_down_del_timer(wlc_info_t *wlc);
-static void wlc_ofdm_rateset_war(wlc_info_t *wlc);
-static int _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len,
+static void wlc_wme_retries_write(struct wlc_info *wlc);
+static bool wlc_attach_stf_ant_init(struct wlc_info *wlc);
+static uint wlc_attach_module(struct wlc_info *wlc);
+static void wlc_detach_module(struct wlc_info *wlc);
+static void wlc_timers_deinit(struct wlc_info *wlc);
+static void wlc_down_led_upd(struct wlc_info *wlc);
+static uint wlc_down_del_timer(struct wlc_info *wlc);
+static void wlc_ofdm_rateset_war(struct wlc_info *wlc);
+static int _wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
struct wlc_if *wlcif);
#if defined(BCMDBG)
-void wlc_get_rcmta(wlc_info_t *wlc, int idx, struct ether_addr *addr)
+void wlc_get_rcmta(struct wlc_info *wlc, int idx, struct ether_addr *addr)
{
d11regs_t *regs = wlc->regs;
u32 v32;
- osl_t *osh;
+ struct osl_info *osh;
- WL_TRACE(("wl%d: %s\n", WLCWLUNIT(wlc), __func__));
+ WL_TRACE("wl%d: %s\n", WLCWLUNIT(wlc), __func__);
ASSERT(wlc->pub->corerev > 4);
@@ -344,14 +328,14 @@ void wlc_get_rcmta(wlc_info_t *wlc, int idx, struct ether_addr *addr)
#endif /* defined(BCMDBG) */
/* keep the chip awake if needed */
-bool wlc_stay_awake(wlc_info_t *wlc)
+bool wlc_stay_awake(struct wlc_info *wlc)
{
return true;
}
/* conditions under which the PM bit should be set in outgoing frames and STAY_AWAKE is meaningful
*/
-bool wlc_ps_allowed(wlc_info_t *wlc)
+bool wlc_ps_allowed(struct wlc_info *wlc)
{
int idx;
wlc_bsscfg_t *cfg;
@@ -378,9 +362,9 @@ bool wlc_ps_allowed(wlc_info_t *wlc)
return true;
}
-void wlc_reset(wlc_info_t *wlc)
+void wlc_reset(struct wlc_info *wlc)
{
- WL_TRACE(("wl%d: wlc_reset\n", wlc->pub->unit));
+ WL_TRACE("wl%d: wlc_reset\n", wlc->pub->unit);
wlc->check_for_unaligned_tbtt = false;
@@ -389,34 +373,19 @@ void wlc_reset(wlc_info_t *wlc)
wlc_statsupd(wlc);
/* reset our snapshot of macstat counters */
- bzero((char *)wlc->core->macstat_snapshot, sizeof(macstat_t));
+ memset((char *)wlc->core->macstat_snapshot, 0,
+ sizeof(macstat_t));
}
wlc_bmac_reset(wlc->hw);
wlc_ampdu_reset(wlc->ampdu);
wlc->txretried = 0;
-#ifdef WLC_HIGH_ONLY
- /* Need to set a flag(to be cleared asynchronously by BMAC driver with high call)
- * in order to prevent wlc_rpctx_txreclaim() from screwing wlc_rpctx_getnexttxp(),
- * which could be invoked by already QUEUED high call(s) from BMAC driver before
- * wlc_bmac_reset() finishes.
- * It's not needed before in monolithic driver model because d11core interrupts would
- * have been cleared instantly in wlc_bmac_reset() and no txstatus interrupt
- * will come to driver to fetch those flushed dma pkt pointers.
- */
- wlc->reset_bmac_pending = true;
-
- wlc_rpctx_txreclaim(wlc->rpctx);
-
- wlc_stf_phy_txant_upd(wlc);
- wlc_phy_ant_rxdiv_set(wlc->band->pi, wlc->stf->ant_rx_ovr);
-#endif
}
-void wlc_fatal_error(wlc_info_t *wlc)
+void wlc_fatal_error(struct wlc_info *wlc)
{
- WL_ERROR(("wl%d: fatal error, reinitializing\n", wlc->pub->unit));
+ WL_ERROR("wl%d: fatal error, reinitializing\n", wlc->pub->unit);
wl_init(wlc->wl);
}
@@ -425,7 +394,7 @@ void wlc_fatal_error(wlc_info_t *wlc)
* if other configurations are in conflict (bandlocked, 11n mode disabled,
* invalid channel for current country, etc.)
*/
-static chanspec_t wlc_init_chanspec(wlc_info_t *wlc)
+static chanspec_t wlc_init_chanspec(struct wlc_info *wlc)
{
chanspec_t chanspec =
1 | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE |
@@ -441,7 +410,7 @@ static chanspec_t wlc_init_chanspec(wlc_info_t *wlc)
struct scb global_scb;
-static void wlc_init_scb(wlc_info_t *wlc, struct scb *scb)
+static void wlc_init_scb(struct wlc_info *wlc, struct scb *scb)
{
int i;
scb->flags = SCB_WMECAP | SCB_HTCAP;
@@ -449,7 +418,7 @@ static void wlc_init_scb(wlc_info_t *wlc, struct scb *scb)
scb->seqnum[i] = 0;
}
-void wlc_init(wlc_info_t *wlc)
+void wlc_init(struct wlc_info *wlc)
{
d11regs_t *regs;
chanspec_t chanspec;
@@ -457,7 +426,7 @@ void wlc_init(wlc_info_t *wlc)
wlc_bsscfg_t *bsscfg;
bool mute = false;
- WL_TRACE(("wl%d: wlc_init\n", wlc->pub->unit));
+ WL_TRACE("wl%d: wlc_init\n", wlc->pub->unit);
regs = wlc->regs;
@@ -583,13 +552,13 @@ void wlc_init(wlc_info_t *wlc)
}
}
-void wlc_mac_bcn_promisc_change(wlc_info_t *wlc, bool promisc)
+void wlc_mac_bcn_promisc_change(struct wlc_info *wlc, bool promisc)
{
wlc->bcnmisc_monitor = promisc;
wlc_mac_bcn_promisc(wlc);
}
-void wlc_mac_bcn_promisc(wlc_info_t *wlc)
+void wlc_mac_bcn_promisc(struct wlc_info *wlc)
{
if ((AP_ENAB(wlc->pub) && (N_ENAB(wlc->pub) || wlc->band->gmode)) ||
wlc->bcnmisc_ibss || wlc->bcnmisc_scan || wlc->bcnmisc_monitor)
@@ -599,7 +568,7 @@ void wlc_mac_bcn_promisc(wlc_info_t *wlc)
}
/* set or clear maccontrol bits MCTL_PROMISC and MCTL_KEEPCONTROL */
-void wlc_mac_promisc(wlc_info_t *wlc)
+void wlc_mac_promisc(struct wlc_info *wlc)
{
u32 promisc_bits = 0;
@@ -621,7 +590,7 @@ void wlc_mac_promisc(wlc_info_t *wlc)
}
/* check if hps and wake states of sw and hw are in sync */
-bool wlc_ps_check(wlc_info_t *wlc)
+bool wlc_ps_check(struct wlc_info *wlc)
{
bool res = true;
bool hps, wake;
@@ -636,8 +605,8 @@ bool wlc_ps_check(wlc_info_t *wlc)
* to avoid assert
*/
if (tmp == 0xffffffff) {
- WL_ERROR(("wl%d: %s: dead chip\n", wlc->pub->unit,
- __func__));
+ WL_ERROR("wl%d: %s: dead chip\n",
+ wlc->pub->unit, __func__);
return DEVICEREMOVED(wlc);
}
@@ -646,7 +615,8 @@ bool wlc_ps_check(wlc_info_t *wlc)
if (hps != ((tmp & MCTL_HPS) != 0)) {
int idx;
wlc_bsscfg_t *cfg;
- WL_ERROR(("wl%d: hps not sync, sw %d, maccontrol 0x%x\n", wlc->pub->unit, hps, tmp));
+ WL_ERROR("wl%d: hps not sync, sw %d, maccontrol 0x%x\n",
+ wlc->pub->unit, hps, tmp);
FOREACH_BSS(wlc, idx, cfg) {
if (!BSSCFG_STA(cfg))
continue;
@@ -654,23 +624,14 @@ bool wlc_ps_check(wlc_info_t *wlc)
res = false;
}
-#ifdef WLC_LOW
/* For a monolithic build the wake check can be exact since it looks at wake
* override bits. The MCTL_WAKE bit should match the 'wake' value.
*/
wake = STAY_AWAKE(wlc) || wlc->hw->wake_override;
wake_ok = (wake == ((tmp & MCTL_WAKE) != 0));
-#else
- /* For a split build we will not have access to any wake overrides from the low
- * level. The check can only make sure the MCTL_WAKE bit is on if the high
- * level 'wake' value is true. If the high level 'wake' is false, the MCTL_WAKE
- * may be either true or false due to the low level override.
- */
- wake = STAY_AWAKE(wlc);
- wake_ok = (wake && ((tmp & MCTL_WAKE) != 0)) || !wake;
-#endif
if (hps && !wake_ok) {
- WL_ERROR(("wl%d: wake not sync, sw %d maccontrol 0x%x\n", wlc->pub->unit, wake, tmp));
+ WL_ERROR("wl%d: wake not sync, sw %d maccontrol 0x%x\n",
+ wlc->pub->unit, wake, tmp);
res = false;
}
}
@@ -679,7 +640,7 @@ bool wlc_ps_check(wlc_info_t *wlc)
}
/* push sw hps and wake state through hardware */
-void wlc_set_ps_ctrl(wlc_info_t *wlc)
+void wlc_set_ps_ctrl(struct wlc_info *wlc)
{
u32 v1, v2;
bool hps, wake;
@@ -688,8 +649,8 @@ void wlc_set_ps_ctrl(wlc_info_t *wlc)
hps = PS_ALLOWED(wlc);
wake = hps ? (STAY_AWAKE(wlc)) : true;
- WL_TRACE(("wl%d: wlc_set_ps_ctrl: hps %d wake %d\n", wlc->pub->unit,
- hps, wake));
+ WL_TRACE("wl%d: wlc_set_ps_ctrl: hps %d wake %d\n",
+ wlc->pub->unit, hps, wake);
v1 = R_REG(wlc->osh, &wlc->regs->maccontrol);
v2 = 0;
@@ -714,7 +675,7 @@ void wlc_set_ps_ctrl(wlc_info_t *wlc)
int wlc_set_mac(wlc_bsscfg_t *cfg)
{
int err = 0;
- wlc_info_t *wlc = cfg->wlc;
+ struct wlc_info *wlc = cfg->wlc;
if (cfg == wlc->cfg) {
/* enter the MAC addr into the RXE match registers */
@@ -731,7 +692,7 @@ int wlc_set_mac(wlc_bsscfg_t *cfg)
*/
void wlc_set_bssid(wlc_bsscfg_t *cfg)
{
- wlc_info_t *wlc = cfg->wlc;
+ struct wlc_info *wlc = cfg->wlc;
/* if primary config, we need to update BSSID in RXE match registers */
if (cfg == wlc->cfg) {
@@ -748,7 +709,7 @@ void wlc_set_bssid(wlc_bsscfg_t *cfg)
* Suspend the the MAC and update the slot timing
* for standard 11b/g (20us slots) or shortslot 11g (9us slots).
*/
-void wlc_switch_shortslot(wlc_info_t *wlc, bool shortslot)
+void wlc_switch_shortslot(struct wlc_info *wlc, bool shortslot)
{
int idx;
wlc_bsscfg_t *cfg;
@@ -776,7 +737,7 @@ void wlc_switch_shortslot(wlc_info_t *wlc, bool shortslot)
wlc_bmac_set_shortslot(wlc->hw, shortslot);
}
-static u8 wlc_local_constraint_qdbm(wlc_info_t *wlc)
+static u8 wlc_local_constraint_qdbm(struct wlc_info *wlc)
{
u8 local;
s16 local_max;
@@ -803,7 +764,7 @@ static u8 wlc_local_constraint_qdbm(wlc_info_t *wlc)
}
/* propagate home chanspec to all bsscfgs in case bsscfg->current_bss->chanspec is referenced */
-void wlc_set_home_chanspec(wlc_info_t *wlc, chanspec_t chanspec)
+void wlc_set_home_chanspec(struct wlc_info *wlc, chanspec_t chanspec)
{
if (wlc->home_chanspec != chanspec) {
int idx;
@@ -821,7 +782,7 @@ void wlc_set_home_chanspec(wlc_info_t *wlc, chanspec_t chanspec)
}
}
-static void wlc_set_phy_chanspec(wlc_info_t *wlc, chanspec_t chanspec)
+static void wlc_set_phy_chanspec(struct wlc_info *wlc, chanspec_t chanspec)
{
/* Save our copy of the chanspec */
wlc->chanspec = chanspec;
@@ -840,15 +801,15 @@ static void wlc_set_phy_chanspec(wlc_info_t *wlc, chanspec_t chanspec)
}
-void wlc_set_chanspec(wlc_info_t *wlc, chanspec_t chanspec)
+void wlc_set_chanspec(struct wlc_info *wlc, chanspec_t chanspec)
{
uint bandunit;
bool switchband = false;
chanspec_t old_chanspec = wlc->chanspec;
if (!wlc_valid_chanspec_db(wlc->cmi, chanspec)) {
- WL_ERROR(("wl%d: %s: Bad channel %d\n",
- wlc->pub->unit, __func__, CHSPEC_CHANNEL(chanspec)));
+ WL_ERROR("wl%d: %s: Bad channel %d\n",
+ wlc->pub->unit, __func__, CHSPEC_CHANNEL(chanspec));
ASSERT(wlc_valid_chanspec_db(wlc->cmi, chanspec));
return;
}
@@ -859,7 +820,9 @@ void wlc_set_chanspec(wlc_info_t *wlc, chanspec_t chanspec)
if (wlc->band->bandunit != bandunit || wlc->bandinit_pending) {
switchband = true;
if (wlc->bandlocked) {
- WL_ERROR(("wl%d: %s: chspec %d band is locked!\n", wlc->pub->unit, __func__, CHSPEC_CHANNEL(chanspec)));
+ WL_ERROR("wl%d: %s: chspec %d band is locked!\n",
+ wlc->pub->unit, __func__,
+ CHSPEC_CHANNEL(chanspec));
return;
}
/* BMAC_NOTE: should the setband call come after the wlc_bmac_chanspec() ?
@@ -895,7 +858,7 @@ void wlc_set_chanspec(wlc_info_t *wlc, chanspec_t chanspec)
}
#if defined(BCMDBG)
-static int wlc_get_current_txpwr(wlc_info_t *wlc, void *pwr, uint len)
+static int wlc_get_current_txpwr(struct wlc_info *wlc, void *pwr, uint len)
{
txpwr_limits_t txpwr;
tx_power_t power;
@@ -909,7 +872,7 @@ static int wlc_get_current_txpwr(wlc_info_t *wlc, void *pwr, uint len)
else if (len < sizeof(tx_power_t))
return BCME_BUFTOOSHORT;
- bzero(&power, sizeof(tx_power_t));
+ memset(&power, 0, sizeof(tx_power_t));
power.chanspec = WLC_BAND_PI_RADIO_CHANSPEC;
if (wlc->pub->associated)
@@ -1031,7 +994,7 @@ static int wlc_get_current_txpwr(wlc_info_t *wlc, void *pwr, uint len)
} else {
int band_idx = CHSPEC_IS2G(power.chanspec) ? 0 : 1;
- bzero(old_power, sizeof(tx_power_legacy_t));
+ memset(old_power, 0, sizeof(tx_power_legacy_t));
old_power->txpwr_local_max = power.local_max;
old_power->txpwr_local_constraint = power.local_constraint;
@@ -1064,7 +1027,7 @@ static int wlc_get_current_txpwr(wlc_info_t *wlc, void *pwr, uint len)
}
#endif /* defined(BCMDBG) */
-static u32 wlc_watchdog_backup_bi(wlc_info_t *wlc)
+static u32 wlc_watchdog_backup_bi(struct wlc_info *wlc)
{
u32 bi;
bi = 2 * wlc->cfg->current_bss->dtim_period *
@@ -1083,7 +1046,7 @@ static u32 wlc_watchdog_backup_bi(wlc_info_t *wlc)
/* Change to run the watchdog either from a periodic timer or from tbtt handler.
* Call watchdog from tbtt handler if tbtt is true, watchdog timer otherwise.
*/
-void wlc_watchdog_upd(wlc_info_t *wlc, bool tbtt)
+void wlc_watchdog_upd(struct wlc_info *wlc, bool tbtt)
{
/* make sure changing watchdog driver is allowed */
if (!wlc->pub->up || !wlc->pub->align_wd_tbtt)
@@ -1112,7 +1075,7 @@ void wlc_watchdog_upd(wlc_info_t *wlc, bool tbtt)
}
}
-ratespec_t wlc_lowest_basic_rspec(wlc_info_t *wlc, wlc_rateset_t *rs)
+ratespec_t wlc_lowest_basic_rspec(struct wlc_info *wlc, wlc_rateset_t *rs)
{
ratespec_t lowest_basic_rspec;
uint i;
@@ -1140,7 +1103,7 @@ ratespec_t wlc_lowest_basic_rspec(wlc_info_t *wlc, wlc_rateset_t *rs)
* ratespec CCK ant = wlc->stf->txant
* OFDM ant = 3
*/
-void wlc_beacon_phytxctl_txant_upd(wlc_info_t *wlc, ratespec_t bcn_rspec)
+void wlc_beacon_phytxctl_txant_upd(struct wlc_info *wlc, ratespec_t bcn_rspec)
{
u16 phyctl;
u16 phytxant = wlc->stf->phytxant;
@@ -1159,9 +1122,9 @@ void wlc_beacon_phytxctl_txant_upd(wlc_info_t *wlc, ratespec_t bcn_rspec)
/* centralized protection config change function to simplify debugging, no consistency checking
* this should be called only on changes to avoid overhead in periodic function
*/
-void wlc_protection_upd(wlc_info_t *wlc, uint idx, int val)
+void wlc_protection_upd(struct wlc_info *wlc, uint idx, int val)
{
- WL_TRACE(("wlc_protection_upd: idx %d, val %d\n", idx, val));
+ WL_TRACE("wlc_protection_upd: idx %d, val %d\n", idx, val);
switch (idx) {
case WLC_PROT_G_SPEC:
@@ -1205,7 +1168,7 @@ void wlc_protection_upd(wlc_info_t *wlc, uint idx, int val)
}
-static void wlc_ht_update_sgi_rx(wlc_info_t *wlc, int val)
+static void wlc_ht_update_sgi_rx(struct wlc_info *wlc, int val)
{
wlc->ht_cap.cap &= ~(HT_CAP_SHORT_GI_20 | HT_CAP_SHORT_GI_40);
wlc->ht_cap.cap |= (val & WLC_N_SGI_20) ? HT_CAP_SHORT_GI_20 : 0;
@@ -1217,7 +1180,7 @@ static void wlc_ht_update_sgi_rx(wlc_info_t *wlc, int val)
}
}
-static void wlc_ht_update_ldpc(wlc_info_t *wlc, s8 val)
+static void wlc_ht_update_ldpc(struct wlc_info *wlc, s8 val)
{
wlc->stf->ldpc = val;
@@ -1236,7 +1199,7 @@ static void wlc_ht_update_ldpc(wlc_info_t *wlc, s8 val)
* ucode, hwmac update
* Channel dependent updates for ucode and hw
*/
-static void wlc_ucode_mac_upd(wlc_info_t *wlc)
+static void wlc_ucode_mac_upd(struct wlc_info *wlc)
{
/* enable or disable any active IBSSs depending on whether or not
* we are on the home channel
@@ -1263,13 +1226,13 @@ static void wlc_ucode_mac_upd(wlc_info_t *wlc)
wlc_mac_promisc(wlc);
}
-static void wlc_bandinit_ordered(wlc_info_t *wlc, chanspec_t chanspec)
+static void wlc_bandinit_ordered(struct wlc_info *wlc, chanspec_t chanspec)
{
wlc_rateset_t default_rateset;
uint parkband;
uint i, band_order[2];
- WL_TRACE(("wl%d: wlc_bandinit_ordered\n", wlc->pub->unit));
+ WL_TRACE("wl%d: wlc_bandinit_ordered\n", wlc->pub->unit);
/*
* We might have been bandlocked during down and the chip power-cycled (hibernate).
* figure out the right band to park on
@@ -1310,10 +1273,10 @@ static void wlc_bandinit_ordered(wlc_info_t *wlc, chanspec_t chanspec)
}
/* band-specific init */
-static void WLBANDINITFN(wlc_bsinit) (wlc_info_t *wlc)
+static void WLBANDINITFN(wlc_bsinit) (struct wlc_info *wlc)
{
- WL_TRACE(("wl%d: wlc_bsinit: bandunit %d\n", wlc->pub->unit,
- wlc->band->bandunit));
+ WL_TRACE("wl%d: wlc_bsinit: bandunit %d\n",
+ wlc->pub->unit, wlc->band->bandunit);
/* write ucode ACK/CTS rate table */
wlc_set_ratetable(wlc);
@@ -1328,7 +1291,7 @@ static void WLBANDINITFN(wlc_bsinit) (wlc_info_t *wlc)
}
/* switch to and initialize new band */
-static void WLBANDINITFN(wlc_setband) (wlc_info_t *wlc, uint bandunit)
+static void WLBANDINITFN(wlc_setband) (struct wlc_info *wlc, uint bandunit)
{
int idx;
wlc_bsscfg_t *cfg;
@@ -1353,7 +1316,7 @@ static void WLBANDINITFN(wlc_setband) (wlc_info_t *wlc, uint bandunit)
}
/* Initialize a WME Parameter Info Element with default STA parameters from WMM Spec, Table 12 */
-void wlc_wme_initparams_sta(wlc_info_t *wlc, wme_param_ie_t *pe)
+void wlc_wme_initparams_sta(struct wlc_info *wlc, wme_param_ie_t *pe)
{
static const wme_param_ie_t stadef = {
WME_OUI,
@@ -1378,7 +1341,7 @@ void wlc_wme_initparams_sta(wlc_info_t *wlc, wme_param_ie_t *pe)
memcpy(pe, &stadef, sizeof(*pe));
}
-void wlc_wme_setparams(wlc_info_t *wlc, u16 aci, void *arg, bool suspend)
+void wlc_wme_setparams(struct wlc_info *wlc, u16 aci, void *arg, bool suspend)
{
int i;
shm_acparams_t acp_shm;
@@ -1389,7 +1352,7 @@ void wlc_wme_setparams(wlc_info_t *wlc, u16 aci, void *arg, bool suspend)
/* Only apply params if the core is out of reset and has clocks */
if (!wlc->clk) {
- WL_ERROR(("wl%d: %s : no-clock\n", wlc->pub->unit, __func__));
+ WL_ERROR("wl%d: %s : no-clock\n", wlc->pub->unit, __func__);
return;
}
@@ -1402,7 +1365,7 @@ void wlc_wme_setparams(wlc_info_t *wlc, u16 aci, void *arg, bool suspend)
wlc->wme_admctl = 0;
do {
- bzero((char *)&acp_shm, sizeof(shm_acparams_t));
+ memset((char *)&acp_shm, 0, sizeof(shm_acparams_t));
/* find out which ac this set of params applies to */
ASSERT(aci < AC_COUNT);
/* set the admission control policy for this AC */
@@ -1421,8 +1384,8 @@ void wlc_wme_setparams(wlc_info_t *wlc, u16 aci, void *arg, bool suspend)
if (acp_shm.aifs < EDCF_AIFSN_MIN
|| acp_shm.aifs > EDCF_AIFSN_MAX) {
- WL_ERROR(("wl%d: wlc_edcf_setparams: bad aifs %d\n",
- wlc->pub->unit, acp_shm.aifs));
+ WL_ERROR("wl%d: wlc_edcf_setparams: bad aifs %d\n",
+ wlc->pub->unit, acp_shm.aifs);
continue;
}
@@ -1459,7 +1422,7 @@ void wlc_wme_setparams(wlc_info_t *wlc, u16 aci, void *arg, bool suspend)
void wlc_edcf_setparams(wlc_bsscfg_t *cfg, bool suspend)
{
- wlc_info_t *wlc = cfg->wlc;
+ struct wlc_info *wlc = cfg->wlc;
uint aci, i, j;
edcf_acparam_t *edcf_acp;
shm_acparams_t acp_shm;
@@ -1483,7 +1446,7 @@ void wlc_edcf_setparams(wlc_bsscfg_t *cfg, bool suspend)
wlc->wme_admctl = 0;
for (i = 0; i < AC_COUNT; i++, edcf_acp++) {
- bzero((char *)&acp_shm, sizeof(shm_acparams_t));
+ memset((char *)&acp_shm, 0, sizeof(shm_acparams_t));
/* find out which ac this set of params applies to */
aci = (edcf_acp->ACI & EDCF_ACI_MASK) >> EDCF_ACI_SHIFT;
ASSERT(aci < AC_COUNT);
@@ -1505,8 +1468,8 @@ void wlc_edcf_setparams(wlc_bsscfg_t *cfg, bool suspend)
if (acp_shm.aifs < EDCF_AIFSN_MIN
|| acp_shm.aifs > EDCF_AIFSN_MAX) {
- WL_ERROR(("wl%d: wlc_edcf_setparams: bad aifs %d\n",
- wlc->pub->unit, acp_shm.aifs));
+ WL_ERROR("wl%d: wlc_edcf_setparams: bad aifs %d\n",
+ wlc->pub->unit, acp_shm.aifs);
continue;
}
@@ -1548,20 +1511,19 @@ void wlc_edcf_setparams(wlc_bsscfg_t *cfg, bool suspend)
}
-bool wlc_timers_init(wlc_info_t *wlc, int unit)
+bool wlc_timers_init(struct wlc_info *wlc, int unit)
{
wlc->wdtimer = wl_init_timer(wlc->wl, wlc_watchdog_by_timer,
wlc, "watchdog");
if (!wlc->wdtimer) {
- WL_ERROR(("wl%d: wl_init_timer for wdtimer failed\n", unit));
+ WL_ERROR("wl%d: wl_init_timer for wdtimer failed\n", unit);
goto fail;
}
wlc->radio_timer = wl_init_timer(wlc->wl, wlc_radio_timer,
wlc, "radio");
if (!wlc->radio_timer) {
- WL_ERROR(("wl%d: wl_init_timer for radio_timer failed\n",
- unit));
+ WL_ERROR("wl%d: wl_init_timer for radio_timer failed\n", unit);
goto fail;
}
@@ -1575,7 +1537,7 @@ bool wlc_timers_init(wlc_info_t *wlc, int unit)
* Initialize wlc_info default values ...
* may get overrides later in this function
*/
-void wlc_info_init(wlc_info_t *wlc, int unit)
+void wlc_info_init(struct wlc_info *wlc, int unit)
{
int i;
/* Assume the device is there until proven otherwise */
@@ -1686,7 +1648,7 @@ void wlc_info_init(wlc_info_t *wlc, int unit)
wlc->pr80838_war = true;
}
-static bool wlc_state_bmac_sync(wlc_info_t *wlc)
+static bool wlc_state_bmac_sync(struct wlc_info *wlc)
{
wlc_bmac_state_t state_bmac;
@@ -1700,7 +1662,7 @@ static bool wlc_state_bmac_sync(wlc_info_t *wlc)
return true;
}
-static uint wlc_attach_module(wlc_info_t *wlc)
+static uint wlc_attach_module(struct wlc_info *wlc)
{
uint err = 0;
uint unit;
@@ -1708,15 +1670,14 @@ static uint wlc_attach_module(wlc_info_t *wlc)
wlc->asi = wlc_antsel_attach(wlc, wlc->osh, wlc->pub, wlc->hw);
if (wlc->asi == NULL) {
- WL_ERROR(("wl%d: wlc_attach: wlc_antsel_attach failed\n",
- unit));
+ WL_ERROR("wl%d: wlc_attach: wlc_antsel_attach failed\n", unit);
err = 44;
goto fail;
}
wlc->ampdu = wlc_ampdu_attach(wlc);
if (wlc->ampdu == NULL) {
- WL_ERROR(("wl%d: wlc_attach: wlc_ampdu_attach failed\n", unit));
+ WL_ERROR("wl%d: wlc_attach: wlc_ampdu_attach failed\n", unit);
err = 50;
goto fail;
}
@@ -1725,13 +1686,13 @@ static uint wlc_attach_module(wlc_info_t *wlc)
wlc->eventq =
wlc_eventq_attach(wlc->pub, wlc, wlc->wl, wlc_process_eventq);
if (wlc->eventq == NULL) {
- WL_ERROR(("wl%d: wlc_attach: wlc_eventq_attachfailed\n", unit));
+ WL_ERROR("wl%d: wlc_attach: wlc_eventq_attachfailed\n", unit);
err = 57;
goto fail;
}
if ((wlc_stf_attach(wlc) != 0)) {
- WL_ERROR(("wl%d: wlc_attach: wlc_stf_attach failed\n", unit));
+ WL_ERROR("wl%d: wlc_attach: wlc_stf_attach failed\n", unit);
err = 68;
goto fail;
}
@@ -1739,9 +1700,9 @@ static uint wlc_attach_module(wlc_info_t *wlc)
return err;
}
-wlc_pub_t *wlc_pub(void *wlc)
+struct wlc_pub *wlc_pub(void *wlc)
{
- return ((wlc_info_t *) wlc)->pub;
+ return ((struct wlc_info *) wlc)->pub;
}
#define CHIP_SUPPORTS_11N(wlc) 1
@@ -1750,25 +1711,25 @@ wlc_pub_t *wlc_pub(void *wlc)
* The common driver entry routine. Error codes should be unique
*/
void *wlc_attach(void *wl, u16 vendor, u16 device, uint unit, bool piomode,
- osl_t *osh, void *regsva, uint bustype, void *btparam,
- uint *perr)
+ struct osl_info *osh, void *regsva, uint bustype,
+ void *btparam, uint *perr)
{
- wlc_info_t *wlc;
+ struct wlc_info *wlc;
uint err = 0;
uint j;
- wlc_pub_t *pub;
+ struct wlc_pub *pub;
wlc_txq_info_t *qi;
uint n_disabled;
- WL_NONE(("wl%d: %s: vendor 0x%x device 0x%x\n", unit, __func__, vendor,
- device));
+ WL_NONE("wl%d: %s: vendor 0x%x device 0x%x\n",
+ unit, __func__, vendor, device);
ASSERT(WSEC_MAX_RCMTA_KEYS <= WSEC_MAX_KEYS);
ASSERT(WSEC_MAX_DEFAULT_KEYS == WLC_DEFAULT_KEYS);
/* some code depends on packed structures */
- ASSERT(sizeof(struct ether_addr) == ETHER_ADDR_LEN);
- ASSERT(sizeof(struct ether_header) == ETHER_HDR_LEN);
+ ASSERT(sizeof(struct ether_addr) == ETH_ALEN);
+ ASSERT(sizeof(struct ether_header) == ETH_HLEN);
ASSERT(sizeof(d11regs_t) == SI_CORE_SIZE);
ASSERT(sizeof(ofdm_phy_hdr_t) == D11_PHY_HDR_LEN);
ASSERT(sizeof(cck_phy_hdr_t) == D11_PHY_HDR_LEN);
@@ -1780,8 +1741,10 @@ void *wlc_attach(void *wl, u16 vendor, u16 device, uint unit, bool piomode,
ASSERT(sizeof(struct dot11_bcn_prb) == DOT11_BCN_PRB_LEN);
ASSERT(sizeof(tx_status_t) == TXSTATUS_LEN);
ASSERT(sizeof(ht_cap_ie_t) == HT_CAP_IE_LEN);
+#ifdef BRCM_FULLMAC
ASSERT(offsetof(wl_scan_params_t, channel_list) ==
WL_SCAN_PARAMS_FIXED_SIZE);
+#endif
ASSERT(IS_ALIGNED(offsetof(wsec_key_t, data), sizeof(u32)));
ASSERT(ISPOWEROF2(MA_WINDOW_SZ));
@@ -1797,8 +1760,8 @@ void *wlc_attach(void *wl, u16 vendor, u16 device, uint unit, bool piomode,
|| (WPA_CAP_4_REPLAY_CNTRS == WLC_REPLAY_CNTRS_VALUE
&& 4 == WLC_NUMRXIVS));
- /* allocate wlc_info_t state and its substructures */
- wlc = (wlc_info_t *) wlc_attach_malloc(osh, unit, &err, device);
+ /* allocate struct wlc_info state and its substructures */
+ wlc = (struct wlc_info *) wlc_attach_malloc(osh, unit, &err, device);
if (wlc == NULL)
goto fail;
wlc->osh = osh;
@@ -1819,7 +1782,7 @@ void *wlc_attach(void *wl, u16 vendor, u16 device, uint unit, bool piomode,
/* By default restrict TKIP associations from 11n STA's */
wlc->ht_wsec_restriction = WLC_HT_TKIP_RESTRICT;
- /* populate wlc_info_t with default values */
+ /* populate struct wlc_info with default values */
wlc_info_init(wlc, unit);
/* update sta/ap related parameters */
@@ -1851,10 +1814,6 @@ void *wlc_attach(void *wl, u16 vendor, u16 device, uint unit, bool piomode,
/* propagate *vars* from BMAC driver to high driver */
wlc_bmac_copyfrom_vars(wlc->hw, &pub->vars, &wlc->vars_size);
-#ifdef WLC_HIGH_ONLY
- WL_TRACE(("nvram : vars %p , vars_size %d\n", pub->vars,
- wlc->vars_size));
-#endif
/* set maximum allowed duty cycle */
wlc->tx_duty_cycle_ofdm =
@@ -1872,19 +1831,17 @@ void *wlc_attach(void *wl, u16 vendor, u16 device, uint unit, bool piomode,
wlc_phy_stf_chain_init(wlc->band->pi, wlc->stf->hw_txchain,
wlc->stf->hw_rxchain);
-#ifdef WLC_LOW
/* pull up some info resulting from the low attach */
{
int i;
for (i = 0; i < NFIFO; i++)
wlc->core->txavail[i] = wlc->hw->txavail[i];
}
-#endif /* WLC_LOW */
wlc_bmac_hw_etheraddr(wlc->hw, &wlc->perm_etheraddr);
bcopy((char *)&wlc->perm_etheraddr, (char *)&pub->cur_etheraddr,
- ETHER_ADDR_LEN);
+ ETH_ALEN);
for (j = 0; j < NBANDS(wlc); j++) {
/* Use band 1 for single band 11a */
@@ -1942,7 +1899,7 @@ void *wlc_attach(void *wl, u16 vendor, u16 device, uint unit, bool piomode,
goto fail;
if (!wlc_timers_init(wlc, unit)) {
- WL_ERROR(("wl%d: %s: wlc_init_timer failed\n", unit, __func__));
+ WL_ERROR("wl%d: %s: wlc_init_timer failed\n", unit, __func__);
err = 32;
goto fail;
}
@@ -1950,8 +1907,8 @@ void *wlc_attach(void *wl, u16 vendor, u16 device, uint unit, bool piomode,
/* depend on rateset, gmode */
wlc->cmi = wlc_channel_mgr_attach(wlc);
if (!wlc->cmi) {
- WL_ERROR(("wl%d: %s: wlc_channel_mgr_attach failed\n", unit,
- __func__));
+ WL_ERROR("wl%d: %s: wlc_channel_mgr_attach failed\n",
+ unit, __func__);
err = 33;
goto fail;
}
@@ -1966,8 +1923,8 @@ void *wlc_attach(void *wl, u16 vendor, u16 device, uint unit, bool piomode,
/* allocate our initial queue */
qi = wlc_txq_alloc(wlc, osh);
if (qi == NULL) {
- WL_ERROR(("wl%d: %s: failed to malloc tx queue\n", unit,
- __func__));
+ WL_ERROR("wl%d: %s: failed to malloc tx queue\n",
+ unit, __func__);
err = 100;
goto fail;
}
@@ -2037,7 +1994,7 @@ void *wlc_attach(void *wl, u16 vendor, u16 device, uint unit, bool piomode,
wlc_radio_mpc_upd(wlc);
if (WLANTSEL_ENAB(wlc)) {
- if ((CHIPID(wlc->pub->sih->chip)) == BCM43235_CHIP_ID) {
+ if ((wlc->pub->sih->chip) == BCM43235_CHIP_ID) {
if ((getintvar(wlc->pub->vars, "aa2g") == 7) ||
(getintvar(wlc->pub->vars, "aa5g") == 7)) {
wlc_bmac_antsel_set(wlc->hw, 1);
@@ -2053,7 +2010,7 @@ void *wlc_attach(void *wl, u16 vendor, u16 device, uint unit, bool piomode,
return (void *)wlc;
fail:
- WL_ERROR(("wl%d: %s: failed with err %d\n", unit, __func__, err));
+ WL_ERROR("wl%d: %s: failed with err %d\n", unit, __func__, err);
if (wlc)
wlc_detach(wlc);
@@ -2062,7 +2019,7 @@ void *wlc_attach(void *wl, u16 vendor, u16 device, uint unit, bool piomode,
return NULL;
}
-static void wlc_attach_antgain_init(wlc_info_t *wlc)
+static void wlc_attach_antgain_init(struct wlc_info *wlc)
{
uint unit;
unit = wlc->pub->unit;
@@ -2071,7 +2028,8 @@ static void wlc_attach_antgain_init(wlc_info_t *wlc)
/* default antenna gain for srom rev 1 is 2 dBm (8 qdbm) */
wlc->band->antgain = 8;
} else if (wlc->band->antgain == -1) {
- WL_ERROR(("wl%d: %s: Invalid antennas available in srom, using 2dB\n", unit, __func__));
+ WL_ERROR("wl%d: %s: Invalid antennas available in srom, using 2dB\n",
+ unit, __func__);
wlc->band->antgain = 8;
} else {
s8 gain, fract;
@@ -2093,7 +2051,7 @@ static void wlc_attach_antgain_init(wlc_info_t *wlc)
}
}
-static bool wlc_attach_stf_ant_init(wlc_info_t *wlc)
+static bool wlc_attach_stf_ant_init(struct wlc_info *wlc)
{
int aa;
uint unit;
@@ -2110,7 +2068,8 @@ static bool wlc_attach_stf_ant_init(wlc_info_t *wlc)
aa = (s8) getintvar(vars,
(BAND_5G(bandtype) ? "aa1" : "aa0"));
if ((aa < 1) || (aa > 15)) {
- WL_ERROR(("wl%d: %s: Invalid antennas available in srom (0x%x), using 3.\n", unit, __func__, aa));
+ WL_ERROR("wl%d: %s: Invalid antennas available in srom (0x%x), using 3\n",
+ unit, __func__, aa);
aa = 3;
}
@@ -2132,136 +2091,8 @@ static bool wlc_attach_stf_ant_init(wlc_info_t *wlc)
return true;
}
-#ifdef WLC_HIGH_ONLY
-/* HIGH_ONLY bmac_attach, which sync over LOW_ONLY bmac_attach states */
-int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
- bool piomode, osl_t *osh, void *regsva, uint bustype,
- void *btparam)
-{
- wlc_bmac_revinfo_t revinfo;
- uint idx = 0;
- rpc_info_t *rpc = (rpc_info_t *) btparam;
-
- ASSERT(bustype == RPC_BUS);
-
- /* install the rpc handle in the various state structures used by stub RPC functions */
- wlc->rpc = rpc;
- wlc->hw->rpc = rpc;
- wlc->hw->osh = osh;
- wlc->regs = 0;
-
- wlc->rpctx = wlc_rpctx_attach(wlc->pub, wlc);
- if (wlc->rpctx == NULL)
- return -1;
-
- /*
- * FIFO 0
- * TX: TX_AC_BK_FIFO (TX AC Background data packets)
- */
- /* Always initialized */
- ASSERT(NRPCTXBUFPOST <= NTXD);
- wlc_rpctx_fifoinit(wlc->rpctx, TX_DATA_FIFO, NRPCTXBUFPOST);
- wlc_rpctx_fifoinit(wlc->rpctx, TX_CTL_FIFO, NRPCTXBUFPOST);
- wlc_rpctx_fifoinit(wlc->rpctx, TX_BCMC_FIFO, NRPCTXBUFPOST);
-
- /* VI and BK inited only if WME */
- if (WME_ENAB(wlc->pub)) {
- wlc_rpctx_fifoinit(wlc->rpctx, TX_AC_BK_FIFO, NRPCTXBUFPOST);
- wlc_rpctx_fifoinit(wlc->rpctx, TX_AC_VI_FIFO, NRPCTXBUFPOST);
- }
-
- /* Allocate SB handle */
- wlc->pub->sih = osl_malloc(wlc->osh, sizeof(si_t));
- if (!wlc->pub->sih)
- return -1;
- bzero(wlc->pub->sih, sizeof(si_t));
-
- /* sync up revinfo with BMAC */
- bzero(&revinfo, sizeof(wlc_bmac_revinfo_t));
- if (wlc_bmac_revinfo_get(wlc->hw, &revinfo) != 0)
- return -1;
- wlc->vendorid = (u16) revinfo.vendorid;
- wlc->deviceid = (u16) revinfo.deviceid;
-
- wlc->pub->boardrev = (u16) revinfo.boardrev;
- wlc->pub->corerev = revinfo.corerev;
- wlc->pub->sromrev = (u8) revinfo.sromrev;
- wlc->pub->sih->chiprev = revinfo.chiprev;
- wlc->pub->sih->chip = revinfo.chip;
- wlc->pub->sih->chippkg = revinfo.chippkg;
- wlc->pub->sih->boardtype = revinfo.boardtype;
- wlc->pub->sih->boardvendor = revinfo.boardvendor;
- wlc->pub->sih->bustype = revinfo.bustype;
- wlc->pub->sih->buscoretype = revinfo.buscoretype;
- wlc->pub->sih->buscorerev = revinfo.buscorerev;
- wlc->pub->sih->issim = (bool) revinfo.issim;
- wlc->pub->sih->rpc = rpc;
-
- if (revinfo.nbands == 0 || revinfo.nbands > 2)
- return -1;
- wlc->pub->_nbands = revinfo.nbands;
-
- for (idx = 0; idx < wlc->pub->_nbands; idx++) {
- uint bandunit, bandtype; /* To access bandstate */
- wlc_phy_t *pi = osl_malloc(wlc->osh, sizeof(wlc_phy_t));
-
- if (!pi)
- return -1;
- bzero(pi, sizeof(wlc_phy_t));
- pi->rpc = rpc;
-
- bandunit = revinfo.band[idx].bandunit;
- bandtype = revinfo.band[idx].bandtype;
- wlc->bandstate[bandunit]->radiorev =
- (u8) revinfo.band[idx].radiorev;
- wlc->bandstate[bandunit]->phytype =
- (u16) revinfo.band[idx].phytype;
- wlc->bandstate[bandunit]->phyrev =
- (u16) revinfo.band[idx].phyrev;
- wlc->bandstate[bandunit]->radioid =
- (u16) revinfo.band[idx].radioid;
- wlc->bandstate[bandunit]->abgphy_encore =
- revinfo.band[idx].abgphy_encore;
-
- wlc->bandstate[bandunit]->pi = pi;
- wlc->bandstate[bandunit]->bandunit = bandunit;
- wlc->bandstate[bandunit]->bandtype = bandtype;
- }
-
- /* misc stuff */
-
- return 0;
-}
-
-/* Free the convenience handles */
-int wlc_bmac_detach(wlc_info_t *wlc)
-{
- uint idx;
-
- if (wlc->pub->sih) {
- osl_mfree(wlc->osh, (void *)wlc->pub->sih, sizeof(si_t));
- wlc->pub->sih = NULL;
- }
-
- for (idx = 0; idx < MAXBANDS; idx++)
- if (wlc->bandstate[idx]->pi) {
- kfree(wlc->bandstate[idx]->pi);
- wlc->bandstate[idx]->pi = NULL;
- }
-
- if (wlc->rpctx) {
- wlc_rpctx_detach(wlc->rpctx);
- wlc->rpctx = NULL;
- }
-
- return 0;
-
-}
-
-#endif /* WLC_HIGH_ONLY */
-
-static void wlc_timers_deinit(wlc_info_t *wlc)
+static void wlc_timers_deinit(struct wlc_info *wlc)
{
/* free timer state */
if (wlc->wdtimer) {
@@ -2274,7 +2105,7 @@ static void wlc_timers_deinit(wlc_info_t *wlc)
}
}
-static void wlc_detach_module(wlc_info_t *wlc)
+static void wlc_detach_module(struct wlc_info *wlc)
{
if (wlc->asi) {
wlc_antsel_detach(wlc->asi);
@@ -2297,7 +2128,7 @@ static void wlc_detach_module(wlc_info_t *wlc)
* One exception is sb register access, which is possible if crystal is turned on
* After "down" state, driver should avoid software timer with the exception of radio_monitor.
*/
-uint wlc_detach(wlc_info_t *wlc)
+uint wlc_detach(struct wlc_info *wlc)
{
uint i;
uint callbacks = 0;
@@ -2305,7 +2136,7 @@ uint wlc_detach(wlc_info_t *wlc)
if (wlc == NULL)
return 0;
- WL_TRACE(("wl%d: %s\n", wlc->pub->unit, __func__));
+ WL_TRACE("wl%d: %s\n", wlc->pub->unit, __func__);
ASSERT(!wlc->pub->up);
@@ -2328,15 +2159,6 @@ uint wlc_detach(wlc_info_t *wlc)
/* free other state */
-#ifdef WLC_HIGH_ONLY
- /* High-Only driver has an allocated copy of vars, monolithic just
- * references the wlc->hw->vars which is freed in wlc_bmac_detach()
- */
- if (wlc->pub->vars) {
- kfree(wlc->pub->vars);
- wlc->pub->vars = NULL;
- }
-#endif
#ifdef BCMDBG
if (wlc->country_ie_override) {
@@ -2360,13 +2182,6 @@ uint wlc_detach(wlc_info_t *wlc)
/* Detach from iovar manager */
wlc_module_unregister(wlc->pub, "wlc_iovars", wlc);
- /*
- if (wlc->ap) {
- wlc_ap_detach(wlc->ap);
- wlc->ap = NULL;
- }
- */
-
while (wlc->tx_queues != NULL) {
wlc_txq_free(wlc, wlc->osh, wlc->tx_queues);
}
@@ -2383,7 +2198,7 @@ uint wlc_detach(wlc_info_t *wlc)
}
/* update state that depends on the current value of "ap" */
-void wlc_ap_upd(wlc_info_t *wlc)
+void wlc_ap_upd(struct wlc_info *wlc)
{
if (AP_ENAB(wlc->pub))
wlc->PLCPHdr_override = WLC_PLCP_AUTO; /* AP: short not allowed, but not enforced */
@@ -2398,7 +2213,7 @@ void wlc_ap_upd(wlc_info_t *wlc)
}
/* read hwdisable state and propagate to wlc flag */
-static void wlc_radio_hwdisable_upd(wlc_info_t *wlc)
+static void wlc_radio_hwdisable_upd(struct wlc_info *wlc)
{
if (wlc->pub->wlfeatureflag & WL_SWFL_NOHWRADIO || wlc->pub->hw_off)
return;
@@ -2411,17 +2226,17 @@ static void wlc_radio_hwdisable_upd(wlc_info_t *wlc)
}
/* return true if Minimum Power Consumption should be entered, false otherwise */
-bool wlc_is_non_delay_mpc(wlc_info_t *wlc)
+bool wlc_is_non_delay_mpc(struct wlc_info *wlc)
{
return false;
}
-bool wlc_ismpc(wlc_info_t *wlc)
+bool wlc_ismpc(struct wlc_info *wlc)
{
return (wlc->mpc_delay_off == 0) && (wlc_is_non_delay_mpc(wlc));
}
-void wlc_radio_mpc_upd(wlc_info_t *wlc)
+void wlc_radio_mpc_upd(struct wlc_info *wlc)
{
bool mpc_radio, radio_state;
@@ -2477,7 +2292,7 @@ void wlc_radio_mpc_upd(wlc_info_t *wlc)
* centralized radio disable/enable function,
* invoke radio enable/disable after updating hwradio status
*/
-static void wlc_radio_upd(wlc_info_t *wlc)
+static void wlc_radio_upd(struct wlc_info *wlc)
{
if (wlc->pub->radio_disabled)
wlc_radio_disable(wlc);
@@ -2486,7 +2301,7 @@ static void wlc_radio_upd(wlc_info_t *wlc)
}
/* maintain LED behavior in down state */
-static void wlc_down_led_upd(wlc_info_t *wlc)
+static void wlc_down_led_upd(struct wlc_info *wlc)
{
ASSERT(!wlc->pub->up);
@@ -2499,7 +2314,7 @@ static void wlc_down_led_upd(wlc_info_t *wlc)
}
}
-void wlc_radio_disable(wlc_info_t *wlc)
+void wlc_radio_disable(struct wlc_info *wlc)
{
if (!wlc->pub->up) {
wlc_down_led_upd(wlc);
@@ -2510,7 +2325,7 @@ void wlc_radio_disable(wlc_info_t *wlc)
wl_down(wlc->wl);
}
-static void wlc_radio_enable(wlc_info_t *wlc)
+static void wlc_radio_enable(struct wlc_info *wlc)
{
if (wlc->pub->up)
return;
@@ -2526,10 +2341,10 @@ static void wlc_radio_enable(wlc_info_t *wlc)
/* periodical query hw radio button while driver is "down" */
static void wlc_radio_timer(void *arg)
{
- wlc_info_t *wlc = (wlc_info_t *) arg;
+ struct wlc_info *wlc = (struct wlc_info *) arg;
if (DEVICEREMOVED(wlc)) {
- WL_ERROR(("wl%d: %s: dead chip\n", wlc->pub->unit, __func__));
+ WL_ERROR("wl%d: %s: dead chip\n", wlc->pub->unit, __func__);
wl_down(wlc->wl);
return;
}
@@ -2544,7 +2359,7 @@ static void wlc_radio_timer(void *arg)
wlc_radio_upd(wlc);
}
-static bool wlc_radio_monitor_start(wlc_info_t *wlc)
+static bool wlc_radio_monitor_start(struct wlc_info *wlc)
{
/* Don't start the timer if HWRADIO feature is disabled */
if (wlc->radio_monitor || (wlc->pub->wlfeatureflag & WL_SWFL_NOHWRADIO))
@@ -2556,7 +2371,7 @@ static bool wlc_radio_monitor_start(wlc_info_t *wlc)
return true;
}
-bool wlc_radio_monitor_stop(wlc_info_t *wlc)
+bool wlc_radio_monitor_stop(struct wlc_info *wlc)
{
if (!wlc->radio_monitor)
return true;
@@ -2570,7 +2385,7 @@ bool wlc_radio_monitor_stop(wlc_info_t *wlc)
}
/* bring the driver down, but don't reset hardware */
-void wlc_out(wlc_info_t *wlc)
+void wlc_out(struct wlc_info *wlc)
{
wlc_bmac_set_noreset(wlc->hw, true);
wlc_radio_upd(wlc);
@@ -2591,7 +2406,7 @@ void wlc_out(wlc_info_t *wlc)
* if there is no packet pending for the FIFO, then the corresponding prec bits should be set
* in prec_map. Of course, ignore this rule when block_datafifo is set
*/
-static bool wlc_tx_prec_map_verify(wlc_info_t *wlc)
+static bool wlc_tx_prec_map_verify(struct wlc_info *wlc)
{
/* For non-WME, both fifos have overlapping prec_map. So it's an error only if both
* fail the check.
@@ -2613,7 +2428,7 @@ static bool wlc_tx_prec_map_verify(wlc_info_t *wlc)
static void wlc_watchdog_by_timer(void *arg)
{
- wlc_info_t *wlc = (wlc_info_t *) arg;
+ struct wlc_info *wlc = (struct wlc_info *) arg;
wlc_watchdog(arg);
if (WLC_WATCHDOG_TBTT(wlc)) {
/* set to normal osl watchdog period */
@@ -2626,17 +2441,17 @@ static void wlc_watchdog_by_timer(void *arg)
/* common watchdog code */
static void wlc_watchdog(void *arg)
{
- wlc_info_t *wlc = (wlc_info_t *) arg;
+ struct wlc_info *wlc = (struct wlc_info *) arg;
int i;
wlc_bsscfg_t *cfg;
- WL_TRACE(("wl%d: wlc_watchdog\n", wlc->pub->unit));
+ WL_TRACE("wl%d: wlc_watchdog\n", wlc->pub->unit);
if (!wlc->pub->up)
return;
if (DEVICEREMOVED(wlc)) {
- WL_ERROR(("wl%d: %s: dead chip\n", wlc->pub->unit, __func__));
+ WL_ERROR("wl%d: %s: dead chip\n", wlc->pub->unit, __func__);
wl_down(wlc->wl);
return;
}
@@ -2667,13 +2482,7 @@ static void wlc_watchdog(void *arg)
if (wlc->pub->radio_disabled)
return;
-#ifdef WLC_LOW
wlc_bmac_watchdog(wlc);
-#endif
-#ifdef WLC_HIGH_ONLY
- /* maintenance */
- wlc_bmac_rpc_watchdog(wlc);
-#endif
/* occasionally sample mac stat counters to detect 16-bit counter wrap */
if ((WLC_UPDATE_STATS(wlc))
@@ -2702,10 +2511,8 @@ static void wlc_watchdog(void *arg)
wlc->tempsense_lasttime = wlc->pub->now;
wlc_tempsense_upd(wlc);
}
-#ifdef WLC_LOW
/* BMAC_NOTE: for HIGH_ONLY driver, this seems being called after RPC bus failed */
ASSERT(wlc_bmac_taclear(wlc->hw, true));
-#endif
/* Verify that tx_prec_map and fifos are in sync to avoid lock ups */
ASSERT(wlc_tx_prec_map_verify(wlc));
@@ -2714,9 +2521,9 @@ static void wlc_watchdog(void *arg)
}
/* make interface operational */
-int wlc_up(wlc_info_t *wlc)
+int wlc_up(struct wlc_info *wlc)
{
- WL_TRACE(("wl%d: %s:\n", wlc->pub->unit, __func__));
+ WL_TRACE("wl%d: %s:\n", wlc->pub->unit, __func__);
/* HW is turned off so don't try to access it */
if (wlc->pub->hw_off || DEVICEREMOVED(wlc))
@@ -2728,7 +2535,7 @@ int wlc_up(wlc_info_t *wlc)
}
if ((wlc->pub->boardflags & BFL_FEM)
- && (CHIPID(wlc->pub->sih->chip) == BCM4313_CHIP_ID)) {
+ && (wlc->pub->sih->chip == BCM4313_CHIP_ID)) {
if (wlc->pub->boardrev >= 0x1250
&& (wlc->pub->boardflags & BFL_FEM_BT)) {
wlc_mhf(wlc, MHF5, MHF5_4313_GPIOCTRL,
@@ -2761,7 +2568,8 @@ int wlc_up(wlc_info_t *wlc)
if (!BSSCFG_STA(bsscfg)
|| !bsscfg->enable || !bsscfg->BSS)
continue;
- WL_ERROR(("wl%d.%d: wlc_up: rfdisable -> " "wlc_bsscfg_disable()\n", wlc->pub->unit, idx));
+ WL_ERROR("wl%d.%d: wlc_up: rfdisable -> " "wlc_bsscfg_disable()\n",
+ wlc->pub->unit, idx);
}
}
} else
@@ -2822,10 +2630,10 @@ int wlc_up(wlc_info_t *wlc)
}
/* Initialize the base precedence map for dequeueing from txq based on WME settings */
-static void wlc_tx_prec_map_init(wlc_info_t *wlc)
+static void wlc_tx_prec_map_init(struct wlc_info *wlc)
{
wlc->tx_prec_map = WLC_PREC_BMP_ALL;
- bzero(wlc->fifo2prec_map, sizeof(u16) * NFIFO);
+ memset(wlc->fifo2prec_map, 0, NFIFO * sizeof(u16));
/* For non-WME, both fifos have overlapping MAXPRIO. So just disable all precedences
* if either is full.
@@ -2841,7 +2649,7 @@ static void wlc_tx_prec_map_init(wlc_info_t *wlc)
}
}
-static uint wlc_down_del_timer(wlc_info_t *wlc)
+static uint wlc_down_del_timer(struct wlc_info *wlc)
{
uint callbacks = 0;
@@ -2853,7 +2661,7 @@ static uint wlc_down_del_timer(wlc_info_t *wlc)
* disable the hardware, free any transient buffer state.
* Return a count of the number of driver callbacks still pending.
*/
-uint wlc_down(wlc_info_t *wlc)
+uint wlc_down(struct wlc_info *wlc)
{
uint callbacks = 0;
@@ -2861,12 +2669,12 @@ uint wlc_down(wlc_info_t *wlc)
bool dev_gone = false;
wlc_txq_info_t *qi;
- WL_TRACE(("wl%d: %s:\n", wlc->pub->unit, __func__));
+ WL_TRACE("wl%d: %s:\n", wlc->pub->unit, __func__);
/* check if we are already in the going down path */
if (wlc->going_down) {
- WL_ERROR(("wl%d: %s: Driver going down so return\n",
- wlc->pub->unit, __func__));
+ WL_ERROR("wl%d: %s: Driver going down so return\n",
+ wlc->pub->unit, __func__);
return 0;
}
if (!wlc->pub->up)
@@ -2922,14 +2730,11 @@ uint wlc_down(wlc_info_t *wlc)
/* wlc_bmac_down_finish has done wlc_coredisable(). so clk is off */
wlc->clk = false;
-#ifdef WLC_HIGH_ONLY
- wlc_rpctx_txreclaim(wlc->rpctx);
-#endif
/* Verify all packets are flushed from the driver */
- if (PKTALLOCED(wlc->osh) != 0) {
- WL_ERROR(("%d packets not freed at wlc_down!!!!!!\n",
- PKTALLOCED(wlc->osh)));
+ if (wlc->osh->pktalloced != 0) {
+ WL_ERROR("%d packets not freed at wlc_down!!!!!!\n",
+ wlc->osh->pktalloced);
}
#ifdef BCMDBG
/* Since all the packets should have been freed,
@@ -2943,7 +2748,7 @@ uint wlc_down(wlc_info_t *wlc)
}
/* Set the current gmode configuration */
-int wlc_set_gmode(wlc_info_t *wlc, u8 gmode, bool config)
+int wlc_set_gmode(struct wlc_info *wlc, u8 gmode, bool config)
{
int ret = 0;
uint i;
@@ -2958,7 +2763,7 @@ int wlc_set_gmode(wlc_info_t *wlc, u8 gmode, bool config)
bool preamble_restrict = false; /* Restrict association to stations that support short
* preambles
*/
- wlcband_t *band;
+ struct wlcband *band;
/* if N-support is enabled, allow Gmode set as long as requested
* Gmode is not GMODE_LEGACY_B
@@ -2985,10 +2790,10 @@ int wlc_set_gmode(wlc_info_t *wlc, u8 gmode, bool config)
wlc_protection_upd(wlc, WLC_PROT_G_USER, gmode);
/* Clear supported rates filter */
- bzero(&wlc->sup_rates_override, sizeof(wlc_rateset_t));
+ memset(&wlc->sup_rates_override, 0, sizeof(wlc_rateset_t));
/* Clear rateset override */
- bzero(&rs, sizeof(wlc_rateset_t));
+ memset(&rs, 0, sizeof(wlc_rateset_t));
switch (gmode) {
case GMODE_LEGACY_B:
@@ -3026,8 +2831,8 @@ int wlc_set_gmode(wlc_info_t *wlc, u8 gmode, bool config)
default:
/* Error */
- WL_ERROR(("wl%d: %s: invalid gmode %d\n", wlc->pub->unit,
- __func__, gmode));
+ WL_ERROR("wl%d: %s: invalid gmode %d\n",
+ wlc->pub->unit, __func__, gmode);
return BCME_UNSUPPORTED;
}
@@ -3093,7 +2898,7 @@ int wlc_set_gmode(wlc_info_t *wlc, u8 gmode, bool config)
return ret;
}
-static int wlc_nmode_validate(wlc_info_t *wlc, s32 nmode)
+static int wlc_nmode_validate(struct wlc_info *wlc, s32 nmode)
{
int err = 0;
@@ -3117,7 +2922,7 @@ static int wlc_nmode_validate(wlc_info_t *wlc, s32 nmode)
return err;
}
-int wlc_set_nmode(wlc_info_t *wlc, s32 nmode)
+int wlc_set_nmode(struct wlc_info *wlc, s32 nmode)
{
uint i;
int err;
@@ -3176,7 +2981,7 @@ int wlc_set_nmode(wlc_info_t *wlc, s32 nmode)
return err;
}
-static int wlc_set_rateset(wlc_info_t *wlc, wlc_rateset_t *rs_arg)
+static int wlc_set_rateset(struct wlc_info *wlc, wlc_rateset_t *rs_arg)
{
wlc_rateset_t rs, new;
uint bandunit;
@@ -3219,18 +3024,18 @@ static int wlc_set_rateset(wlc_info_t *wlc, wlc_rateset_t *rs_arg)
}
/* simplified integer set interface for common ioctl handler */
-int wlc_set(wlc_info_t *wlc, int cmd, int arg)
+int wlc_set(struct wlc_info *wlc, int cmd, int arg)
{
return wlc_ioctl(wlc, cmd, (void *)&arg, sizeof(arg), NULL);
}
/* simplified integer get interface for common ioctl handler */
-int wlc_get(wlc_info_t *wlc, int cmd, int *arg)
+int wlc_get(struct wlc_info *wlc, int cmd, int *arg)
{
return wlc_ioctl(wlc, cmd, arg, sizeof(int), NULL);
}
-static void wlc_ofdm_rateset_war(wlc_info_t *wlc)
+static void wlc_ofdm_rateset_war(struct wlc_info *wlc)
{
u8 r;
bool war = false;
@@ -3246,14 +3051,16 @@ static void wlc_ofdm_rateset_war(wlc_info_t *wlc)
}
int
-wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
+wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
+ struct wlc_if *wlcif)
{
return _wlc_ioctl(wlc, cmd, arg, len, wlcif);
}
/* common ioctl handler. return: 0=ok, -1=error, positive=particular error */
static int
-_wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
+_wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
+ struct wlc_if *wlcif)
{
int val, *pval;
bool bool_val;
@@ -3265,7 +3072,7 @@ _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
uint band;
rw_reg_t *r;
wlc_bsscfg_t *bsscfg;
- osl_t *osh;
+ struct osl_info *osh;
wlc_bss_info_t *current_bss;
/* update bsscfg pointer */
@@ -3280,7 +3087,7 @@ _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
/* If the device is turned off, then it's not "removed" */
if (!wlc->pub->hw_off && DEVICEREMOVED(wlc)) {
- WL_ERROR(("wl%d: %s: dead chip\n", wlc->pub->unit, __func__));
+ WL_ERROR("wl%d: %s: dead chip\n", wlc->pub->unit, __func__);
wl_down(wlc->wl);
return BCME_ERROR;
}
@@ -3300,8 +3107,8 @@ _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
bool_val = val != 0;
if (cmd != WLC_SET_CHANNEL)
- WL_NONE(("WLC_IOCTL: cmd %d val 0x%x (%d) len %d\n", cmd,
- (uint) val, val, len));
+ WL_NONE("WLC_IOCTL: cmd %d val 0x%x (%d) len %d\n",
+ cmd, (uint)val, val, len);
bcmerror = 0;
regs = wlc->regs;
@@ -3321,8 +3128,8 @@ _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
default:
if ((arg == NULL) || (len <= 0)) {
- WL_ERROR(("wl%d: %s: Command %d needs arguments\n",
- wlc->pub->unit, __func__, cmd));
+ WL_ERROR("wl%d: %s: Command %d needs arguments\n",
+ wlc->pub->unit, __func__, cmd);
bcmerror = BCME_BADARG;
goto done;
}
@@ -3388,10 +3195,6 @@ _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
wlc_set_chanspec(wlc, chspec);
wlc_enable_mac(wlc);
}
-#ifdef WLC_HIGH_ONLY
- /* delay for channel change */
- msleep(50);
-#endif
break;
}
@@ -3659,8 +3462,8 @@ _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
/* 4322 supports antdiv in phy, no need to set it to ucode */
if (WLCISNPHY(wlc->band)
&& D11REV_IS(wlc->pub->corerev, 16)) {
- WL_ERROR(("wl%d: can't set ucantdiv for 4322\n",
- wlc->pub->unit));
+ WL_ERROR("wl%d: can't set ucantdiv for 4322\n",
+ wlc->pub->unit);
bcmerror = BCME_UNSUPPORTED;
} else
wlc_mhf(wlc, MHF1, MHF1_ANTDIV,
@@ -3757,8 +3560,8 @@ _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
if ((radiomask == 0) || (radiomask & ~validbits)
|| (radioval & ~validbits)
|| ((radioval & ~radiomask) != 0)) {
- WL_ERROR(("SET_RADIO with wrong bits 0x%x\n",
- val));
+ WL_ERROR("SET_RADIO with wrong bits 0x%x\n",
+ val);
bcmerror = BCME_RANGE;
break;
}
@@ -3788,7 +3591,7 @@ _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
break;
}
- bzero((char *)&key, sizeof(key));
+ memset((char *)&key, 0, sizeof(key));
if (src_key) {
key.index = src_key->id;
key.len = src_key->len;
@@ -3800,7 +3603,7 @@ _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
key.flags |= WL_PRIMARY_KEY;
bcopy(src_key->ea.octet, key.ea.octet,
- ETHER_ADDR_LEN);
+ ETH_ALEN);
}
bcopy((char *)&key, arg, sizeof(key));
@@ -3835,8 +3638,8 @@ _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
u16 lo;
u32 hi;
/* group keys in WPA-NONE (IBSS only, AES and TKIP) use a global TXIV */
- if ((bsscfg->WPA_auth & WPA_AUTH_NONE)
- && ETHER_ISNULLADDR(&key->ea)) {
+ if ((bsscfg->WPA_auth & WPA_AUTH_NONE) &&
+ is_zero_ether_addr(key->ea.octet)) {
lo = bsscfg->wpa_none_txiv.lo;
hi = bsscfg->wpa_none_txiv.hi;
} else {
@@ -3885,7 +3688,7 @@ _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
wlc_rateset_t rs;
wl_rateset_t *ret_rs = (wl_rateset_t *) arg;
- bzero(&rs, sizeof(wlc_rateset_t));
+ memset(&rs, 0, sizeof(wlc_rateset_t));
wlc_default_rateset(wlc, (wlc_rateset_t *) &rs);
if (len < (int)(rs.count + sizeof(rs.count))) {
@@ -3913,7 +3716,7 @@ _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
break;
}
- bzero(&rs, sizeof(wlc_rateset_t));
+ memset(&rs, 0, sizeof(wlc_rateset_t));
/* Copy only legacy rateset section */
rs.count = in_rs->count;
@@ -4247,7 +4050,7 @@ _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
/* check for an empty rateset to clear the override */
if (rs.count == 0) {
- bzero(&wlc->sup_rates_override,
+ memset(&wlc->sup_rates_override, 0,
sizeof(wlc_rateset_t));
break;
}
@@ -4394,7 +4197,7 @@ _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
#endif
case WLC_LAST:
- WL_ERROR(("%s: WLC_LAST\n", __func__));
+ WL_ERROR("%s: WLC_LAST\n", __func__);
}
done:
@@ -4406,21 +4209,19 @@ _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
}
}
-#ifdef WLC_LOW
/* BMAC_NOTE: for HIGH_ONLY driver, this seems being called after RPC bus failed */
/* In hw_off condition, IOCTLs that reach here are deemed safe but taclear would
* certainly result in getting -1 for register reads. So skip ta_clear altogether
*/
if (!(wlc->pub->hw_off))
ASSERT(wlc_bmac_taclear(wlc->hw, ta_ok) || !ta_ok);
-#endif
return bcmerror;
}
#if defined(BCMDBG)
/* consolidated register access ioctl error checking */
-int wlc_iocregchk(wlc_info_t *wlc, uint band)
+int wlc_iocregchk(struct wlc_info *wlc, uint band)
{
/* if band is specified, it must be the current band */
if ((band != WLC_BAND_AUTO) && (band != (uint) wlc->band->bandtype))
@@ -4440,7 +4241,7 @@ int wlc_iocregchk(wlc_info_t *wlc, uint band)
#if defined(BCMDBG)
/* For some ioctls, make sure that the pi pointer matches the current phy */
-int wlc_iocpichk(wlc_info_t *wlc, uint phytype)
+int wlc_iocpichk(struct wlc_info *wlc, uint phytype)
{
if (wlc->band->phytype != phytype)
return BCME_BADBAND;
@@ -4474,21 +4275,21 @@ static const bcm_iovar_t *wlc_iovar_lookup(const bcm_iovar_t *table,
}
/* simplified integer get interface for common WLC_GET_VAR ioctl handler */
-int wlc_iovar_getint(wlc_info_t *wlc, const char *name, int *arg)
+int wlc_iovar_getint(struct wlc_info *wlc, const char *name, int *arg)
{
return wlc_iovar_op(wlc, name, NULL, 0, arg, sizeof(s32), IOV_GET,
NULL);
}
/* simplified integer set interface for common WLC_SET_VAR ioctl handler */
-int wlc_iovar_setint(wlc_info_t *wlc, const char *name, int arg)
+int wlc_iovar_setint(struct wlc_info *wlc, const char *name, int arg)
{
return wlc_iovar_op(wlc, name, NULL, 0, (void *)&arg, sizeof(arg),
IOV_SET, NULL);
}
/* simplified s8 get interface for common WLC_GET_VAR ioctl handler */
-int wlc_iovar_gets8(wlc_info_t *wlc, const char *name, s8 *arg)
+int wlc_iovar_gets8(struct wlc_info *wlc, const char *name, s8 *arg)
{
int iovar_int;
int err;
@@ -4507,11 +4308,11 @@ int wlc_iovar_gets8(wlc_info_t *wlc, const char *name, s8 *arg)
* calling function must keep 'iovars' until wlc_module_unregister is called.
* 'iovar' must have the last entry's name field being NULL as terminator.
*/
-int wlc_module_register(wlc_pub_t *pub, const bcm_iovar_t *iovars,
+int wlc_module_register(struct wlc_pub *pub, const bcm_iovar_t *iovars,
const char *name, void *hdl, iovar_fn_t i_fn,
watchdog_fn_t w_fn, down_fn_t d_fn)
{
- wlc_info_t *wlc = (wlc_info_t *) pub->wlc;
+ struct wlc_info *wlc = (struct wlc_info *) pub->wlc;
int i;
ASSERT(name != NULL);
@@ -4537,9 +4338,9 @@ int wlc_module_register(wlc_pub_t *pub, const bcm_iovar_t *iovars,
}
/* unregister module callbacks */
-int wlc_module_unregister(wlc_pub_t *pub, const char *name, void *hdl)
+int wlc_module_unregister(struct wlc_pub *pub, const char *name, void *hdl)
{
- wlc_info_t *wlc = (wlc_info_t *) pub->wlc;
+ struct wlc_info *wlc = (struct wlc_info *) pub->wlc;
int i;
if (wlc == NULL)
@@ -4550,7 +4351,7 @@ int wlc_module_unregister(wlc_pub_t *pub, const char *name, void *hdl)
for (i = 0; i < WLC_MAXMODULES; i++) {
if (!strcmp(wlc->modulecb[i].name, name) &&
(wlc->modulecb[i].hdl == hdl)) {
- bzero(&wlc->modulecb[i], sizeof(modulecb_t));
+ memset(&wlc->modulecb[i], 0, sizeof(modulecb_t));
return 0;
}
}
@@ -4560,7 +4361,7 @@ int wlc_module_unregister(wlc_pub_t *pub, const char *name, void *hdl)
}
/* Write WME tunable parameters for retransmit/max rate from wlc struct to ucode */
-static void wlc_wme_retries_write(wlc_info_t *wlc)
+static void wlc_wme_retries_write(struct wlc_info *wlc)
{
int ac;
@@ -4582,7 +4383,7 @@ static void wlc_wme_retries_write(wlc_info_t *wlc)
* All pointers may point into the same buffer.
*/
int
-wlc_iovar_op(wlc_info_t *wlc, const char *name,
+wlc_iovar_op(struct wlc_info *wlc, const char *name,
void *params, int p_len, void *arg, int len,
bool set, struct wlc_if *wlcif)
{
@@ -4606,8 +4407,8 @@ wlc_iovar_op(wlc_info_t *wlc, const char *name,
if (!set && (len == sizeof(int)) &&
!(IS_ALIGNED((unsigned long)(arg), (uint) sizeof(int)))) {
- WL_ERROR(("wl%d: %s unaligned get ptr for %s\n",
- wlc->pub->unit, __func__, name));
+ WL_ERROR("wl%d: %s unaligned get ptr for %s\n",
+ wlc->pub->unit, __func__, name);
ASSERT(0);
}
@@ -4622,11 +4423,6 @@ wlc_iovar_op(wlc_info_t *wlc, const char *name,
/* iovar name not found */
if (i >= WLC_MAXMODULES) {
err = BCME_UNSUPPORTED;
-#ifdef WLC_HIGH_ONLY
- err =
- bcmsdh_iovar_op(wlc->btparam, name, params, p_len, arg, len,
- set);
-#endif
goto exit;
}
@@ -4658,10 +4454,10 @@ wlc_iovar_op(wlc_info_t *wlc, const char *name,
}
int
-wlc_iovar_check(wlc_pub_t *pub, const bcm_iovar_t *vi, void *arg, int len,
+wlc_iovar_check(struct wlc_pub *pub, const bcm_iovar_t *vi, void *arg, int len,
bool set)
{
- wlc_info_t *wlc = (wlc_info_t *) pub->wlc;
+ struct wlc_info *wlc = (struct wlc_info *) pub->wlc;
int err = 0;
s32 int_val = 0;
@@ -4729,7 +4525,7 @@ wlc_doiovar(void *hdl, const bcm_iovar_t *vi, u32 actionid,
const char *name, void *params, uint p_len, void *arg, int len,
int val_size, struct wlc_if *wlcif)
{
- wlc_info_t *wlc = hdl;
+ struct wlc_info *wlc = hdl;
wlc_bsscfg_t *bsscfg;
int err = 0;
s32 int_val = 0;
@@ -4739,7 +4535,7 @@ wlc_doiovar(void *hdl, const bcm_iovar_t *vi, u32 actionid,
bool bool_val2;
wlc_bss_info_t *current_bss;
- WL_TRACE(("wl%d: %s\n", wlc->pub->unit, __func__));
+ WL_TRACE("wl%d: %s\n", wlc->pub->unit, __func__);
bsscfg = NULL;
current_bss = NULL;
@@ -4762,8 +4558,8 @@ wlc_doiovar(void *hdl, const bcm_iovar_t *vi, u32 actionid,
bool_val = (int_val != 0) ? true : false;
bool_val2 = (int_val2 != 0) ? true : false;
- WL_TRACE(("wl%d: %s: id %d\n", wlc->pub->unit, __func__,
- IOV_ID(actionid)));
+ WL_TRACE("wl%d: %s: id %d\n",
+ wlc->pub->unit, __func__, IOV_ID(actionid));
/* Do the actual parameter implementation */
switch (actionid) {
@@ -4821,7 +4617,7 @@ wlc_doiovar(void *hdl, const bcm_iovar_t *vi, u32 actionid,
break;
default:
- WL_ERROR(("wl%d: %s: unsupported\n", wlc->pub->unit, __func__));
+ WL_ERROR("wl%d: %s: unsupported\n", wlc->pub->unit, __func__);
err = BCME_UNSUPPORTED;
break;
}
@@ -4833,7 +4629,7 @@ wlc_doiovar(void *hdl, const bcm_iovar_t *vi, u32 actionid,
}
static int
-wlc_iovar_rangecheck(wlc_info_t *wlc, u32 val, const bcm_iovar_t *vi)
+wlc_iovar_rangecheck(struct wlc_info *wlc, u32 val, const bcm_iovar_t *vi)
{
int err = 0;
u32 min_val = 0;
@@ -4930,7 +4726,7 @@ void wlc_print_txstatus(tx_status_t *txs)
#define MACSTATUPD(name) \
wlc_ctrupd_cache(macstats.name, &wlc->core->macstat_snapshot->name, &wlc->pub->_cnt->name)
-void wlc_statsupd(wlc_info_t *wlc)
+void wlc_statsupd(struct wlc_info *wlc)
{
int i;
#ifdef BCMDBG
@@ -4956,8 +4752,8 @@ void wlc_statsupd(wlc_info_t *wlc)
/* check for rx fifo 0 overflow */
delta = (u16) (wlc->core->macstat_snapshot->rxf0ovfl - rxf0ovfl);
if (delta)
- WL_ERROR(("wl%d: %u rx fifo 0 overflows!\n", wlc->pub->unit,
- delta));
+ WL_ERROR("wl%d: %u rx fifo 0 overflows!\n",
+ wlc->pub->unit, delta);
/* check for tx fifo underflows */
for (i = 0; i < NFIFO; i++) {
@@ -4965,8 +4761,8 @@ void wlc_statsupd(wlc_info_t *wlc)
(u16) (wlc->core->macstat_snapshot->txfunfl[i] -
txfunfl[i]);
if (delta)
- WL_ERROR(("wl%d: %u tx fifo %d underflows!\n",
- wlc->pub->unit, delta, i));
+ WL_ERROR("wl%d: %u tx fifo %d underflows!\n",
+ wlc->pub->unit, delta, i);
}
#endif /* BCMDBG */
@@ -5015,7 +4811,7 @@ void wlc_statsupd(wlc_info_t *wlc)
bool wlc_chipmatch(u16 vendor, u16 device)
{
if (vendor != VENDOR_BROADCOM) {
- WL_ERROR(("wlc_chipmatch: unknown vendor id %04x\n", vendor));
+ WL_ERROR("wlc_chipmatch: unknown vendor id %04x\n", vendor);
return false;
}
@@ -5027,7 +4823,7 @@ bool wlc_chipmatch(u16 vendor, u16 device)
if ((device == BCM43236_D11N_ID) || (device == BCM43236_D11N2G_ID))
return true;
- WL_ERROR(("wlc_chipmatch: unknown device id %04x\n", device));
+ WL_ERROR("wlc_chipmatch: unknown device id %04x\n", device);
return false;
}
@@ -5182,20 +4978,12 @@ int wlc_format_ssid(char *buf, const unsigned char ssid[], uint ssid_len)
}
#endif /* defined(BCMDBG) */
-u16 wlc_rate_shm_offset(wlc_info_t *wlc, u8 rate)
+u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate)
{
return wlc_bmac_rate_shm_offset(wlc->hw, rate);
}
/* Callback for device removed */
-#if defined(WLC_HIGH_ONLY)
-void wlc_device_removed(void *arg)
-{
- wlc_info_t *wlc = (wlc_info_t *) arg;
-
- wlc->device_present = false;
-}
-#endif /* WLC_HIGH_ONLY */
/*
* Attempts to queue a packet onto a multiple-precedence queue,
@@ -5207,16 +4995,16 @@ void wlc_device_removed(void *arg)
* Returns true if packet consumed (queued), false if not.
*/
bool BCMFASTPATH
-wlc_prec_enq(wlc_info_t *wlc, struct pktq *q, void *pkt, int prec)
+wlc_prec_enq(struct wlc_info *wlc, struct pktq *q, void *pkt, int prec)
{
return wlc_prec_enq_head(wlc, q, pkt, prec, false);
}
bool BCMFASTPATH
-wlc_prec_enq_head(wlc_info_t *wlc, struct pktq *q, void *pkt, int prec,
- bool head)
+wlc_prec_enq_head(struct wlc_info *wlc, struct pktq *q, struct sk_buff *pkt,
+ int prec, bool head)
{
- void *p;
+ struct sk_buff *p;
int eprec = -1; /* precedence to evict from */
/* Determine precedence from which to evict packet, if any */
@@ -5226,8 +5014,8 @@ wlc_prec_enq_head(wlc_info_t *wlc, struct pktq *q, void *pkt, int prec,
p = pktq_peek_tail(q, &eprec);
ASSERT(p != NULL);
if (eprec > prec) {
- WL_ERROR(("%s: Failing: eprec %d > prec %d\n", __func__,
- eprec, prec));
+ WL_ERROR("%s: Failing: eprec %d > prec %d\n",
+ __func__, eprec, prec);
return false;
}
}
@@ -5243,8 +5031,8 @@ wlc_prec_enq_head(wlc_info_t *wlc, struct pktq *q, void *pkt, int prec,
/* Refuse newer packet unless configured to discard oldest */
if (eprec == prec && !discard_oldest) {
- WL_ERROR(("%s: No where to go, prec == %d\n", __func__,
- prec));
+ WL_ERROR("%s: No where to go, prec == %d\n",
+ __func__, prec);
return false;
}
@@ -5256,14 +5044,14 @@ wlc_prec_enq_head(wlc_info_t *wlc, struct pktq *q, void *pkt, int prec,
/* Increment wme stats */
if (WME_ENAB(wlc->pub)) {
WLCNTINCR(wlc->pub->_wme_cnt->
- tx_failed[WME_PRIO2AC(PKTPRIO(p))].packets);
+ tx_failed[WME_PRIO2AC(p->priority)].packets);
WLCNTADD(wlc->pub->_wme_cnt->
- tx_failed[WME_PRIO2AC(PKTPRIO(p))].bytes,
+ tx_failed[WME_PRIO2AC(p->priority)].bytes,
pkttotlen(wlc->osh, p));
}
ASSERT(0);
- PKTFREE(wlc->osh, p, true);
+ pkt_buf_free_skb(wlc->osh, p, true);
WLCNTINCR(wlc->pub->_cnt->txnobuf);
}
@@ -5277,25 +5065,26 @@ wlc_prec_enq_head(wlc_info_t *wlc, struct pktq *q, void *pkt, int prec,
return true;
}
-void BCMFASTPATH wlc_txq_enq(void *ctx, struct scb *scb, void *sdu, uint prec)
+void BCMFASTPATH wlc_txq_enq(void *ctx, struct scb *scb, struct sk_buff *sdu,
+ uint prec)
{
- wlc_info_t *wlc = (wlc_info_t *) ctx;
+ struct wlc_info *wlc = (struct wlc_info *) ctx;
wlc_txq_info_t *qi = wlc->active_queue; /* Check me */
struct pktq *q = &qi->q;
int prio;
- prio = PKTPRIO(sdu);
+ prio = sdu->priority;
ASSERT(pktq_max(q) >= wlc->pub->tunables->datahiwat);
if (!wlc_prec_enq(wlc, q, sdu, prec)) {
if (!EDCF_ENAB(wlc->pub)
|| (wlc->pub->wlfeatureflag & WL_SWFL_FLOWCONTROL))
- WL_ERROR(("wl%d: wlc_txq_enq: txq overflow\n",
- wlc->pub->unit));
+ WL_ERROR("wl%d: wlc_txq_enq: txq overflow\n",
+ wlc->pub->unit);
/* ASSERT(9 == 8); *//* XXX we might hit this condtion in case packet flooding from mac80211 stack */
- PKTFREE(wlc->osh, sdu, true);
+ pkt_buf_free_skb(wlc->osh, sdu, true);
WLCNTINCR(wlc->pub->_cnt->txnobuf);
}
@@ -5317,13 +5106,14 @@ void BCMFASTPATH wlc_txq_enq(void *ctx, struct scb *scb, void *sdu, uint prec)
}
bool BCMFASTPATH
-wlc_sendpkt_mac80211(wlc_info_t *wlc, void *sdu, struct ieee80211_hw *hw)
+wlc_sendpkt_mac80211(struct wlc_info *wlc, struct sk_buff *sdu,
+ struct ieee80211_hw *hw)
{
u8 prio;
uint fifo;
void *pkt;
struct scb *scb = &global_scb;
- struct dot11_header *d11_header = (struct dot11_header *)PKTDATA(sdu);
+ struct dot11_header *d11_header = (struct dot11_header *)(sdu->data);
u16 type, fc;
ASSERT(sdu);
@@ -5332,13 +5122,12 @@ wlc_sendpkt_mac80211(wlc_info_t *wlc, void *sdu, struct ieee80211_hw *hw)
type = FC_TYPE(fc);
/* 802.11 standard requires management traffic to go at highest priority */
- prio = (type == FC_TYPE_DATA ? PKTPRIO(sdu) : MAXPRIO);
+ prio = (type == FC_TYPE_DATA ? sdu->priority : MAXPRIO);
fifo = prio2fifo[prio];
- ASSERT((uint) PKTHEADROOM(sdu) >= TXOFF);
- ASSERT(!PKTSHARED(sdu));
- ASSERT(!PKTNEXT(sdu));
- ASSERT(!PKTLINK(sdu));
+ ASSERT((uint) skb_headroom(sdu) >= TXOFF);
+ ASSERT(!(sdu->next));
+ ASSERT(!(sdu->prev));
ASSERT(fifo < NFIFO);
pkt = sdu;
@@ -5352,9 +5141,9 @@ wlc_sendpkt_mac80211(wlc_info_t *wlc, void *sdu, struct ieee80211_hw *hw)
return 0;
}
-void BCMFASTPATH wlc_send_q(wlc_info_t *wlc, wlc_txq_info_t *qi)
+void BCMFASTPATH wlc_send_q(struct wlc_info *wlc, wlc_txq_info_t *qi)
{
- void *pkt[DOT11_MAXNUMFRAGS];
+ struct sk_buff *pkt[DOT11_MAXNUMFRAGS];
int prec;
u16 prec_map;
int err = 0, i, count;
@@ -5427,7 +5216,7 @@ void BCMFASTPATH wlc_send_q(wlc_info_t *wlc, wlc_txq_info_t *qi)
* for MC frames so is used as part of the sequence number.
*/
static inline u16
-bcmc_fid_generate(wlc_info_t *wlc, wlc_bsscfg_t *bsscfg, d11txh_t *txh)
+bcmc_fid_generate(struct wlc_info *wlc, wlc_bsscfg_t *bsscfg, d11txh_t *txh)
{
u16 frameid;
@@ -5441,13 +5230,14 @@ bcmc_fid_generate(wlc_info_t *wlc, wlc_bsscfg_t *bsscfg, d11txh_t *txh)
}
void BCMFASTPATH
-wlc_txfifo(wlc_info_t *wlc, uint fifo, void *p, bool commit, s8 txpktpend)
+wlc_txfifo(struct wlc_info *wlc, uint fifo, struct sk_buff *p, bool commit,
+ s8 txpktpend)
{
u16 frameid = INVALIDFID;
d11txh_t *txh;
ASSERT(fifo < NFIFO);
- txh = (d11txh_t *) PKTDATA(p);
+ txh = (d11txh_t *) (p->data);
/* When a BC/MC frame is being committed to the BCMC fifo via DMA (NOT PIO), update
* ucode or BSS info as appropriate.
@@ -5460,21 +5250,14 @@ wlc_txfifo(wlc_info_t *wlc, uint fifo, void *p, bool commit, s8 txpktpend)
if (WLC_WAR16165(wlc))
wlc_war16165(wlc, true);
-#ifdef WLC_HIGH_ONLY
- if (RPCTX_ENAB(wlc->pub)) {
- (void)wlc_rpctx_tx(wlc->rpctx, fifo, p, commit, frameid,
- txpktpend);
- return;
- }
-#else
/* Bump up pending count for if not using rpc. If rpc is used, this will be handled
* in wlc_bmac_txfifo()
*/
if (commit) {
TXPKTPENDINC(wlc, fifo, txpktpend);
- WL_TRACE(("wlc_txfifo, pktpend inc %d to %d\n", txpktpend,
- TXPKTPENDGET(wlc, fifo)));
+ WL_TRACE("wlc_txfifo, pktpend inc %d to %d\n",
+ txpktpend, TXPKTPENDGET(wlc, fifo));
}
/* Commit BCMC sequence number in the SHM frame ID location */
@@ -5482,13 +5265,12 @@ wlc_txfifo(wlc_info_t *wlc, uint fifo, void *p, bool commit, s8 txpktpend)
BCMCFID(wlc, frameid);
if (dma_txfast(wlc->hw->di[fifo], p, commit) < 0) {
- WL_ERROR(("wlc_txfifo: fatal, toss frames !!!\n"));
+ WL_ERROR("wlc_txfifo: fatal, toss frames !!!\n");
}
-#endif /* WLC_HIGH_ONLY */
}
static u16
-wlc_compute_airtime(wlc_info_t *wlc, ratespec_t rspec, uint length)
+wlc_compute_airtime(struct wlc_info *wlc, ratespec_t rspec, uint length)
{
u16 usec = 0;
uint mac_rate = RSPEC2RATE(rspec);
@@ -5523,7 +5305,8 @@ wlc_compute_airtime(wlc_info_t *wlc, ratespec_t rspec, uint length)
usec = (length << 3) / 11;
break;
default:
- WL_ERROR(("wl%d: wlc_compute_airtime: unsupported rspec 0x%x\n", wlc->pub->unit, rspec));
+ WL_ERROR("wl%d: wlc_compute_airtime: unsupported rspec 0x%x\n",
+ wlc->pub->unit, rspec);
ASSERT((const char *)"Bad phy_rate" == NULL);
break;
}
@@ -5533,7 +5316,7 @@ wlc_compute_airtime(wlc_info_t *wlc, ratespec_t rspec, uint length)
}
void BCMFASTPATH
-wlc_compute_plcp(wlc_info_t *wlc, ratespec_t rspec, uint length, u8 *plcp)
+wlc_compute_plcp(struct wlc_info *wlc, ratespec_t rspec, uint length, u8 *plcp)
{
if (IS_MCS(rspec)) {
wlc_compute_mimo_plcp(rspec, length, plcp);
@@ -5574,7 +5357,7 @@ wlc_compute_ofdm_plcp(ratespec_t rspec, u32 length, u8 *plcp)
rate_signal = rate_info[rate] & RATE_MASK;
ASSERT(rate_signal != 0);
- bzero(plcp, D11_PHY_HDR_LEN);
+ memset(plcp, 0, D11_PHY_HDR_LEN);
D11A_PHY_HDR_SRATE((ofdm_phy_hdr_t *) plcp, rate_signal);
tmp = (length & 0xfff) << 5;
@@ -5619,7 +5402,7 @@ static void wlc_cck_plcp_set(int rate_500, uint length, u8 *plcp)
break;
default:
- WL_ERROR(("wlc_cck_plcp_set: unsupported rate %d\n", rate_500));
+ WL_ERROR("wlc_cck_plcp_set: unsupported rate %d\n", rate_500);
rate_500 = WLC_RATE_1M;
usec = length << 3;
break;
@@ -5657,7 +5440,7 @@ static void wlc_compute_cck_plcp(ratespec_t rspec, uint length, u8 *plcp)
* preamble_type use short/GF or long/MM PLCP header
*/
static u16 BCMFASTPATH
-wlc_compute_frame_dur(wlc_info_t *wlc, ratespec_t rate, u8 preamble_type,
+wlc_compute_frame_dur(struct wlc_info *wlc, ratespec_t rate, u8 preamble_type,
uint next_frag_len)
{
u16 dur, sifs;
@@ -5691,7 +5474,7 @@ wlc_compute_frame_dur(wlc_info_t *wlc, ratespec_t rate, u8 preamble_type,
* frame_len next MPDU frame length in bytes
*/
u16 BCMFASTPATH
-wlc_compute_rtscts_dur(wlc_info_t *wlc, bool cts_only, ratespec_t rts_rate,
+wlc_compute_rtscts_dur(struct wlc_info *wlc, bool cts_only, ratespec_t rts_rate,
ratespec_t frame_rate, u8 rts_preamble_type,
u8 frame_preamble_type, uint frame_len, bool ba)
{
@@ -5722,7 +5505,7 @@ wlc_compute_rtscts_dur(wlc_info_t *wlc, bool cts_only, ratespec_t rts_rate,
return dur;
}
-static bool wlc_phy_rspec_check(wlc_info_t *wlc, u16 bw, ratespec_t rspec)
+static bool wlc_phy_rspec_check(struct wlc_info *wlc, u16 bw, ratespec_t rspec)
{
if (IS_MCS(rspec)) {
uint mcs = rspec & RSPEC_RATE_MASK;
@@ -5748,7 +5531,7 @@ static bool wlc_phy_rspec_check(wlc_info_t *wlc, u16 bw, ratespec_t rspec)
return true;
}
-u16 BCMFASTPATH wlc_phytxctl1_calc(wlc_info_t *wlc, ratespec_t rspec)
+u16 BCMFASTPATH wlc_phytxctl1_calc(struct wlc_info *wlc, ratespec_t rspec)
{
u16 phyctl1 = 0;
u16 bw;
@@ -5759,7 +5542,8 @@ u16 BCMFASTPATH wlc_phytxctl1_calc(wlc_info_t *wlc, ratespec_t rspec)
bw = RSPEC_GET_BW(rspec);
/* 10Mhz is not supported yet */
if (bw < PHY_TXC1_BW_20MHZ) {
- WL_ERROR(("wlc_phytxctl1_calc: bw %d is not supported yet, set to 20L\n", bw));
+ WL_ERROR("wlc_phytxctl1_calc: bw %d is not supported yet, set to 20L\n",
+ bw);
bw = PHY_TXC1_BW_20MHZ;
}
@@ -5784,7 +5568,7 @@ u16 BCMFASTPATH wlc_phytxctl1_calc(wlc_info_t *wlc, ratespec_t rspec)
/* get the phyctl byte from rate phycfg table */
phycfg = wlc_rate_legacy_phyctl(RSPEC2RATE(rspec));
if (phycfg == -1) {
- WL_ERROR(("wlc_phytxctl1_calc: wrong legacy OFDM/CCK rate\n"));
+ WL_ERROR("wlc_phytxctl1_calc: wrong legacy OFDM/CCK rate\n");
ASSERT(0);
phycfg = 0;
}
@@ -5798,16 +5582,14 @@ u16 BCMFASTPATH wlc_phytxctl1_calc(wlc_info_t *wlc, ratespec_t rspec)
/* phy clock must support 40Mhz if tx descriptor uses it */
if ((phyctl1 & PHY_TXC1_BW_MASK) >= PHY_TXC1_BW_40MHZ) {
ASSERT(CHSPEC_WLC_BW(wlc->chanspec) == WLC_40_MHZ);
-#ifndef WLC_HIGH_ONLY
ASSERT(wlc->chanspec == wlc_phy_chanspec_get(wlc->band->pi));
-#endif
}
#endif /* BCMDBG */
return phyctl1;
}
ratespec_t BCMFASTPATH
-wlc_rspec_to_rts_rspec(wlc_info_t *wlc, ratespec_t rspec, bool use_rspec,
+wlc_rspec_to_rts_rspec(struct wlc_info *wlc, ratespec_t rspec, bool use_rspec,
u16 mimo_ctlchbw)
{
ratespec_t rts_rspec = 0;
@@ -5863,15 +5645,15 @@ wlc_rspec_to_rts_rspec(wlc_info_t *wlc, ratespec_t rspec, bool use_rspec,
*
*/
static u16 BCMFASTPATH
-wlc_d11hdrs_mac80211(wlc_info_t *wlc, struct ieee80211_hw *hw,
- void *p, struct scb *scb, uint frag,
+wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
+ struct sk_buff *p, struct scb *scb, uint frag,
uint nfrags, uint queue, uint next_frag_len,
wsec_key_t *key, ratespec_t rspec_override)
{
struct dot11_header *h;
d11txh_t *txh;
u8 *plcp, plcp_fallback[D11_PHY_HDR_LEN];
- osl_t *osh;
+ struct osl_info *osh;
int len, phylen, rts_phylen;
u16 fc, type, frameid, mch, phyctl, xfts, mainrates;
u16 seq = 0, mcl = 0, status = 0;
@@ -5911,7 +5693,7 @@ wlc_d11hdrs_mac80211(wlc_info_t *wlc, struct ieee80211_hw *hw,
osh = wlc->osh;
/* locate 802.11 MAC header */
- h = (struct dot11_header *)PKTDATA(p);
+ h = (struct dot11_header *)(p->data);
fc = ltoh16(h->fc);
type = FC_TYPE(fc);
@@ -5935,29 +5717,29 @@ wlc_d11hdrs_mac80211(wlc_info_t *wlc, struct ieee80211_hw *hw,
ASSERT(tx_info);
/* add PLCP */
- plcp = PKTPUSH(p, D11_PHY_HDR_LEN);
+ plcp = skb_push(p, D11_PHY_HDR_LEN);
/* add Broadcom tx descriptor header */
- txh = (d11txh_t *) PKTPUSH(p, D11_TXH_LEN);
- bzero((char *)txh, D11_TXH_LEN);
+ txh = (d11txh_t *) skb_push(p, D11_TXH_LEN);
+ memset((char *)txh, 0, D11_TXH_LEN);
/* setup frameid */
if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
/* non-AP STA should never use BCMC queue */
ASSERT(queue != TX_BCMC_FIFO);
if (queue == TX_BCMC_FIFO) {
- WL_ERROR(("wl%d: %s: ASSERT queue == TX_BCMC!\n",
- WLCWLUNIT(wlc), __func__));
+ WL_ERROR("wl%d: %s: ASSERT queue == TX_BCMC!\n",
+ WLCWLUNIT(wlc), __func__);
frameid = bcmc_fid_generate(wlc, NULL, txh);
} else {
/* Increment the counter for first fragment */
if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) {
- SCB_SEQNUM(scb, PKTPRIO(p))++;
+ SCB_SEQNUM(scb, p->priority)++;
}
/* extract fragment number from frame first */
seq = ltoh16(seq) & FRAGNUM_MASK;
- seq |= (SCB_SEQNUM(scb, PKTPRIO(p)) << SEQNUM_SHIFT);
+ seq |= (SCB_SEQNUM(scb, p->priority) << SEQNUM_SHIFT);
h->seq = htol16(seq);
frameid = ((seq << TXFID_SEQ_SHIFT) & TXFID_SEQ_MASK) |
@@ -5981,13 +5763,6 @@ wlc_d11hdrs_mac80211(wlc_info_t *wlc, struct ieee80211_hw *hw,
if (txrate[1]->idx < 0) {
txrate[1] = txrate[0];
}
-#ifdef WLC_HIGH_ONLY
- /* Double protection , just in case */
- if (txrate[0]->idx > HIGHEST_SINGLE_STREAM_MCS)
- txrate[0]->idx = HIGHEST_SINGLE_STREAM_MCS;
- if (txrate[1]->idx > HIGHEST_SINGLE_STREAM_MCS)
- txrate[1]->idx = HIGHEST_SINGLE_STREAM_MCS;
-#endif
for (k = 0; k < hw->max_rates; k++) {
is_mcs[k] =
@@ -6034,7 +5809,8 @@ wlc_d11hdrs_mac80211(wlc_info_t *wlc, struct ieee80211_hw *hw,
ASSERT(RSPEC_ACTIVE(rspec[k]));
rspec[k] = WLC_RATE_1M;
} else {
- if (WLANTSEL_ENAB(wlc) && !ETHER_ISMULTI(&h->a1)) {
+ if (WLANTSEL_ENAB(wlc) &&
+ !is_multicast_ether_addr(h->a1.octet)) {
/* set tx antenna config */
wlc_antsel_antcfg_get(wlc->asi, false, false, 0,
0, &antcfg, &fbantcfg);
@@ -6131,7 +5907,8 @@ wlc_d11hdrs_mac80211(wlc_info_t *wlc, struct ieee80211_hw *hw,
if ((txrate[k]->flags & IEEE80211_TX_RC_MCS)
&& (!IS_MCS(rspec[k]))) {
- WL_ERROR(("wl%d: %s: IEEE80211_TX_RC_MCS != IS_MCS(rspec)\n", WLCWLUNIT(wlc), __func__));
+ WL_ERROR("wl%d: %s: IEEE80211_TX_RC_MCS != IS_MCS(rspec)\n",
+ WLCWLUNIT(wlc), __func__);
ASSERT(0 && "Rate mismatch");
}
@@ -6195,7 +5972,8 @@ wlc_d11hdrs_mac80211(wlc_info_t *wlc, struct ieee80211_hw *hw,
plcp[0];
/* DUR field for main rate */
- if ((fc != FC_PS_POLL) && !ETHER_ISMULTI(&h->a1) && !use_rifs) {
+ if ((fc != FC_PS_POLL) &&
+ !is_multicast_ether_addr(h->a1.octet) && !use_rifs) {
durid =
wlc_compute_frame_dur(wlc, rspec[0], preamble_type[0],
next_frag_len);
@@ -6213,7 +5991,7 @@ wlc_d11hdrs_mac80211(wlc_info_t *wlc, struct ieee80211_hw *hw,
/* DUR field for fallback rate */
if (fc == FC_PS_POLL)
txh->FragDurFallback = h->durid;
- else if (ETHER_ISMULTI(&h->a1) || use_rifs)
+ else if (is_multicast_ether_addr(h->a1.octet) || use_rifs)
txh->FragDurFallback = 0;
else {
durid = wlc_compute_frame_dur(wlc, rspec[1],
@@ -6225,7 +6003,7 @@ wlc_d11hdrs_mac80211(wlc_info_t *wlc, struct ieee80211_hw *hw,
if (frag == 0)
mcl |= TXC_STARTMSDU;
- if (!ETHER_ISMULTI(&h->a1))
+ if (!is_multicast_ether_addr(h->a1.octet))
mcl |= TXC_IMMEDACK;
if (BAND_5G(wlc->band->bandtype))
@@ -6260,7 +6038,7 @@ wlc_d11hdrs_mac80211(wlc_info_t *wlc, struct ieee80211_hw *hw,
txh->TxFesTimeFallback = htol16(0);
/* TxFrameRA */
- bcopy((char *)&h->a1, (char *)&txh->TxFrameRA, ETHER_ADDR_LEN);
+ bcopy((char *)&h->a1, (char *)&txh->TxFrameRA, ETH_ALEN);
/* TxFrameID */
txh->TxFrameID = htol16(frameid);
@@ -6347,11 +6125,11 @@ wlc_d11hdrs_mac80211(wlc_info_t *wlc, struct ieee80211_hw *hw,
if (use_cts) {
rts->fc = htol16(FC_CTS);
- bcopy((char *)&h->a2, (char *)&rts->ra, ETHER_ADDR_LEN);
+ bcopy((char *)&h->a2, (char *)&rts->ra, ETH_ALEN);
} else {
rts->fc = htol16((u16) FC_RTS);
bcopy((char *)&h->a1, (char *)&rts->ra,
- 2 * ETHER_ADDR_LEN);
+ 2 * ETH_ALEN);
}
/* mainrate
@@ -6362,9 +6140,10 @@ wlc_d11hdrs_mac80211(wlc_info_t *wlc, struct ieee80211_hw *hw,
D11A_PHY_HDR_GRATE((ofdm_phy_hdr_t *) rts_plcp) :
rts_plcp[0]) << 8;
} else {
- bzero((char *)txh->RTSPhyHeader, D11_PHY_HDR_LEN);
- bzero((char *)&txh->rts_frame, sizeof(struct dot11_rts_frame));
- bzero((char *)txh->RTSPLCPFallback,
+ memset((char *)txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN);
+ memset((char *)&txh->rts_frame, 0,
+ sizeof(struct dot11_rts_frame));
+ memset((char *)txh->RTSPLCPFallback, 0,
sizeof(txh->RTSPLCPFallback));
txh->RTSDurFallback = 0;
}
@@ -6453,7 +6232,7 @@ wlc_d11hdrs_mac80211(wlc_info_t *wlc, struct ieee80211_hw *hw,
if (SCB_WME(scb) && qos && wlc->edcf_txop[ac]) {
uint frag_dur, dur, dur_fallback;
- ASSERT(!ETHER_ISMULTI(&h->a1));
+ ASSERT(!is_multicast_ether_addr(h->a1.octet));
/* WME: Update TXOP threshold */
if ((!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) && (frag == 0)) {
@@ -6523,19 +6302,23 @@ wlc_d11hdrs_mac80211(wlc_info_t *wlc, struct ieee80211_hw *hw,
}
}
} else
- WL_ERROR(("wl%d: %s txop invalid for rate %d\n",
- wlc->pub->unit, fifo_names[queue],
- RSPEC2RATE(rspec[0])));
+ WL_ERROR("wl%d: %s txop invalid for rate %d\n",
+ wlc->pub->unit, fifo_names[queue],
+ RSPEC2RATE(rspec[0]));
if (dur > wlc->edcf_txop[ac])
- WL_ERROR(("wl%d: %s: %s txop exceeded phylen %d/%d dur %d/%d\n", wlc->pub->unit, __func__, fifo_names[queue], phylen, wlc->fragthresh[queue], dur, wlc->edcf_txop[ac]));
+ WL_ERROR("wl%d: %s: %s txop exceeded phylen %d/%d dur %d/%d\n",
+ wlc->pub->unit, __func__,
+ fifo_names[queue],
+ phylen, wlc->fragthresh[queue],
+ dur, wlc->edcf_txop[ac]);
}
}
return 0;
}
-void wlc_tbtt(wlc_info_t *wlc, d11regs_t *regs)
+void wlc_tbtt(struct wlc_info *wlc, d11regs_t *regs)
{
wlc_bsscfg_t *cfg = wlc->cfg;
@@ -6571,19 +6354,19 @@ void wlc_tbtt(wlc_info_t *wlc, d11regs_t *regs)
}
/* GP timer is a freerunning 32 bit counter, decrements at 1 us rate */
-void wlc_hwtimer_gptimer_set(wlc_info_t *wlc, uint us)
+void wlc_hwtimer_gptimer_set(struct wlc_info *wlc, uint us)
{
ASSERT(wlc->pub->corerev >= 3); /* no gptimer in earlier revs */
W_REG(wlc->osh, &wlc->regs->gptimer, us);
}
-void wlc_hwtimer_gptimer_abort(wlc_info_t *wlc)
+void wlc_hwtimer_gptimer_abort(struct wlc_info *wlc)
{
ASSERT(wlc->pub->corerev >= 3);
W_REG(wlc->osh, &wlc->regs->gptimer, 0);
}
-static void wlc_hwtimer_gptimer_cb(wlc_info_t *wlc)
+static void wlc_hwtimer_gptimer_cb(struct wlc_info *wlc)
{
/* when interrupt is generated, the counter is loaded with last value
* written and continue to decrement. So it has to be cleaned first
@@ -6596,7 +6379,7 @@ static void wlc_hwtimer_gptimer_cb(wlc_info_t *wlc)
* POLICY: no macinstatus change, no bounding loop.
* All dpc bounding should be handled in BMAC dpc, like txstatus and rxint
*/
-void wlc_high_dpc(wlc_info_t *wlc, u32 macintstatus)
+void wlc_high_dpc(struct wlc_info *wlc, u32 macintstatus)
{
d11regs_t *regs = wlc->regs;
#ifdef BCMDBG
@@ -6634,8 +6417,8 @@ void wlc_high_dpc(wlc_info_t *wlc, u32 macintstatus)
if (macintstatus & ~(MI_TBTT | MI_TXSTOP)) {
bcm_format_flags(int_flags, macintstatus, flagstr,
sizeof(flagstr));
- WL_TRACE(("wl%d: macintstatus 0x%x %s\n", wlc->pub->unit,
- macintstatus, flagstr));
+ WL_TRACE("wl%d: macintstatus 0x%x %s\n",
+ wlc->pub->unit, macintstatus, flagstr);
}
#endif /* BCMDBG */
@@ -6650,11 +6433,12 @@ void wlc_high_dpc(wlc_info_t *wlc, u32 macintstatus)
wlc_tbtt(wlc, regs);
if (macintstatus & MI_GP0) {
- WL_ERROR(("wl%d: PSM microcode watchdog fired at %d (seconds). Resetting.\n", wlc->pub->unit, wlc->pub->now));
+ WL_ERROR("wl%d: PSM microcode watchdog fired at %d (seconds). Resetting.\n",
+ wlc->pub->unit, wlc->pub->now);
printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
- __func__, CHIPID(wlc->pub->sih->chip),
- CHIPREV(wlc->pub->sih->chiprev));
+ __func__, wlc->pub->sih->chip,
+ wlc->pub->sih->chiprev);
WLCNTINCR(wlc->pub->_cnt->psmwds);
@@ -6668,7 +6452,9 @@ void wlc_high_dpc(wlc_info_t *wlc, u32 macintstatus)
}
if (macintstatus & MI_RFDISABLE) {
- WL_ERROR(("wl%d: MAC Detected a change on the RF Disable Input 0x%x\n", wlc->pub->unit, R_REG(wlc->osh, &regs->phydebug) & PDBG_RFD));
+ WL_ERROR("wl%d: MAC Detected a change on the RF Disable Input 0x%x\n",
+ wlc->pub->unit,
+ R_REG(wlc->osh, &regs->phydebug) & PDBG_RFD);
/* delay the cleanup to wl_down in IBSS case */
if ((R_REG(wlc->osh, &regs->phydebug) & PDBG_RFD)) {
int idx;
@@ -6677,7 +6463,8 @@ void wlc_high_dpc(wlc_info_t *wlc, u32 macintstatus)
if (!BSSCFG_STA(bsscfg) || !bsscfg->enable
|| !bsscfg->BSS)
continue;
- WL_ERROR(("wl%d: wlc_dpc: rfdisable -> wlc_bsscfg_disable()\n", wlc->pub->unit));
+ WL_ERROR("wl%d: wlc_dpc: rfdisable -> wlc_bsscfg_disable()\n",
+ wlc->pub->unit);
}
}
}
@@ -6686,14 +6473,12 @@ void wlc_high_dpc(wlc_info_t *wlc, u32 macintstatus)
if (!pktq_empty(&wlc->active_queue->q))
wlc_send_q(wlc, wlc->active_queue);
-#ifndef WLC_HIGH_ONLY
ASSERT(wlc_ps_check(wlc));
-#endif
}
-static void *wlc_15420war(wlc_info_t *wlc, uint queue)
+static void *wlc_15420war(struct wlc_info *wlc, uint queue)
{
- hnddma_t *di;
+ struct hnddma_pub *di;
void *p;
ASSERT(queue < NFIFO);
@@ -6715,13 +6500,14 @@ static void *wlc_15420war(wlc_info_t *wlc, uint queue)
if (dma_txactive(wlc->hw->di[queue]) == 0) {
WLCNTINCR(wlc->pub->_cnt->txdmawar);
if (!dma_txreset(di))
- WL_ERROR(("wl%d: %s: dma_txreset[%d]: cannot stop dma\n", wlc->pub->unit, __func__, queue));
+ WL_ERROR("wl%d: %s: dma_txreset[%d]: cannot stop dma\n",
+ wlc->pub->unit, __func__, queue);
dma_txinit(di);
}
return p;
}
-static void wlc_war16165(wlc_info_t *wlc, bool tx)
+static void wlc_war16165(struct wlc_info *wlc, bool tx)
{
if (tx) {
/* the post-increment is used in STAY_AWAKE macro */
@@ -6737,14 +6523,14 @@ static void wlc_war16165(wlc_info_t *wlc, bool tx)
/* process an individual tx_status_t */
/* WLC_HIGH_API */
bool BCMFASTPATH
-wlc_dotxstatus(wlc_info_t *wlc, tx_status_t *txs, u32 frm_tx2)
+wlc_dotxstatus(struct wlc_info *wlc, tx_status_t *txs, u32 frm_tx2)
{
- void *p;
+ struct sk_buff *p;
uint queue;
d11txh_t *txh;
struct scb *scb = NULL;
bool free_pdu;
- osl_t *osh;
+ struct osl_info *osh;
int tx_rts, tx_frame_count, tx_rts_count;
uint totlen, supr_status;
bool lastframe;
@@ -6768,7 +6554,7 @@ wlc_dotxstatus(wlc_info_t *wlc, tx_status_t *txs, u32 frm_tx2)
((txs->
status & TX_STATUS_FRM_RTX_MASK) >>
TX_STATUS_FRM_RTX_SHIFT));
- WL_ERROR(("%s: INTERMEDIATE but not AMPDU\n", __func__));
+ WL_ERROR("%s: INTERMEDIATE but not AMPDU\n", __func__);
return false;
}
@@ -6789,12 +6575,12 @@ wlc_dotxstatus(wlc_info_t *wlc, tx_status_t *txs, u32 frm_tx2)
if (p == NULL)
goto fatal;
- txh = (d11txh_t *) PKTDATA(p);
+ txh = (d11txh_t *) (p->data);
mcl = ltoh16(txh->MacTxControlLow);
if (txs->phyerr) {
- WL_ERROR(("phyerr 0x%x, rate 0x%x\n", txs->phyerr,
- txh->MainRates));
+ WL_ERROR("phyerr 0x%x, rate 0x%x\n",
+ txs->phyerr, txh->MainRates);
wlc_print_txdesc(txh);
wlc_print_txstatus(txs);
}
@@ -6825,8 +6611,8 @@ wlc_dotxstatus(wlc_info_t *wlc, tx_status_t *txs, u32 frm_tx2)
supr_status = txs->status & TX_STATUS_SUPR_MASK;
if (supr_status == TX_STATUS_SUPR_BADCH)
- WL_NONE(("%s: Pkt tx suppressed, possibly channel %d\n",
- __func__, CHSPEC_CHANNEL(wlc->default_bss->chanspec)));
+ WL_NONE("%s: Pkt tx suppressed, possibly channel %d\n",
+ __func__, CHSPEC_CHANNEL(wlc->default_bss->chanspec));
tx_rts = htol16(txh->MacTxControlLow) & TXC_SENDRTS;
tx_frame_count =
@@ -6837,7 +6623,7 @@ wlc_dotxstatus(wlc_info_t *wlc, tx_status_t *txs, u32 frm_tx2)
lastframe = (fc & FC_MOREFRAG) == 0;
if (!lastframe) {
- WL_ERROR(("Not last frame!\n"));
+ WL_ERROR("Not last frame!\n");
} else {
u16 sfbl, lfbl;
ieee80211_tx_info_clear_status(tx_info);
@@ -6879,17 +6665,17 @@ wlc_dotxstatus(wlc_info_t *wlc, tx_status_t *txs, u32 frm_tx2)
wlc_txfifo_complete(wlc, queue, 1);
if (lastframe) {
- PKTSETNEXT(p, NULL);
- PKTSETLINK(p, NULL);
+ p->next = NULL;
+ p->prev = NULL;
wlc->txretried = 0;
/* remove PLCP & Broadcom tx descriptor header */
- PKTPULL(p, D11_PHY_HDR_LEN);
- PKTPULL(p, D11_TXH_LEN);
+ skb_pull(p, D11_PHY_HDR_LEN);
+ skb_pull(p, D11_TXH_LEN);
ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw, p);
WLCNTINCR(wlc->pub->_cnt->ieee_tx_status);
} else {
- WL_ERROR(("%s: Not last frame => not calling tx_status\n",
- __func__));
+ WL_ERROR("%s: Not last frame => not calling tx_status\n",
+ __func__);
}
return false;
@@ -6897,24 +6683,18 @@ wlc_dotxstatus(wlc_info_t *wlc, tx_status_t *txs, u32 frm_tx2)
fatal:
ASSERT(0);
if (p)
- PKTFREE(osh, p, true);
+ pkt_buf_free_skb(osh, p, true);
-#ifdef WLC_HIGH_ONLY
- /* If this is a split driver, do the big-hammer here.
- * If this is a monolithic driver, wlc_bmac.c:wlc_dpc() will do the big-hammer.
- */
- wl_init(wlc->wl);
-#endif
return true;
}
void BCMFASTPATH
-wlc_txfifo_complete(wlc_info_t *wlc, uint fifo, s8 txpktpend)
+wlc_txfifo_complete(struct wlc_info *wlc, uint fifo, s8 txpktpend)
{
TXPKTPENDDEC(wlc, fifo, txpktpend);
- WL_TRACE(("wlc_txfifo_complete, pktpend dec %d to %d\n", txpktpend,
- TXPKTPENDGET(wlc, fifo)));
+ WL_TRACE("wlc_txfifo_complete, pktpend dec %d to %d\n",
+ txpktpend, TXPKTPENDGET(wlc, fifo));
/* There is more room; mark precedences related to this FIFO sendable */
WLC_TX_FIFO_ENAB(wlc, fifo);
@@ -7014,7 +6794,7 @@ u32 wlc_calc_tbtt_offset(u32 bp, u32 tsf_h, u32 tsf_l)
}
/* Update beacon listen interval in shared memory */
-void wlc_bcn_li_upd(wlc_info_t *wlc)
+void wlc_bcn_li_upd(struct wlc_info *wlc)
{
if (AP_ENAB(wlc->pub))
return;
@@ -7028,7 +6808,7 @@ void wlc_bcn_li_upd(wlc_info_t *wlc)
}
static void
-prep_mac80211_status(wlc_info_t *wlc, d11rxhdr_t *rxh, void *p,
+prep_mac80211_status(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p,
struct ieee80211_rx_status *rx_status)
{
u32 tsf_l, tsf_h;
@@ -7061,7 +6841,7 @@ prep_mac80211_status(wlc_info_t *wlc, d11rxhdr_t *rxh, void *p,
/* qual */
rx_status->antenna = (rxh->PhyRxStatus_0 & PRXS0_RXANT_UPSUBBAND) ? 1 : 0; /* ant */
- plcp = PKTDATA(p);
+ plcp = p->data;
rspec = wlc_compute_rspec(rxh, plcp);
if (IS_MCS(rspec)) {
@@ -7108,19 +6888,19 @@ prep_mac80211_status(wlc_info_t *wlc, d11rxhdr_t *rxh, void *p,
rx_status->rate_idx = 11;
break;
default:
- WL_ERROR(("%s: Unknown rate\n", __func__));
+ WL_ERROR("%s: Unknown rate\n", __func__);
}
/* Determine short preamble and rate_idx */
preamble = 0;
if (IS_CCK(rspec)) {
if (rxh->PhyRxStatus_0 & PRXS0_SHORTH)
- WL_ERROR(("Short CCK\n"));
+ WL_ERROR("Short CCK\n");
rx_status->flag |= RX_FLAG_SHORTPRE;
} else if (IS_OFDM(rspec)) {
rx_status->flag |= RX_FLAG_SHORTPRE;
} else {
- WL_ERROR(("%s: Unknown modulation\n", __func__));
+ WL_ERROR("%s: Unknown modulation\n", __func__);
}
}
@@ -7129,16 +6909,17 @@ prep_mac80211_status(wlc_info_t *wlc, d11rxhdr_t *rxh, void *p,
if (rxh->RxStatus1 & RXS_DECERR) {
rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
- WL_ERROR(("%s: RX_FLAG_FAILED_PLCP_CRC\n", __func__));
+ WL_ERROR("%s: RX_FLAG_FAILED_PLCP_CRC\n", __func__);
}
if (rxh->RxStatus1 & RXS_FCSERR) {
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- WL_ERROR(("%s: RX_FLAG_FAILED_FCS_CRC\n", __func__));
+ WL_ERROR("%s: RX_FLAG_FAILED_FCS_CRC\n", __func__);
}
}
static void
-wlc_recvctl(wlc_info_t *wlc, osl_t *osh, d11rxhdr_t *rxh, void *p)
+wlc_recvctl(struct wlc_info *wlc, struct osl_info *osh, d11rxhdr_t *rxh,
+ struct sk_buff *p)
{
int len_mpdu;
struct ieee80211_rx_status rx_status;
@@ -7155,12 +6936,12 @@ wlc_recvctl(wlc_info_t *wlc, osl_t *osh, d11rxhdr_t *rxh, void *p)
prep_mac80211_status(wlc, rxh, p, &rx_status);
/* mac header+body length, exclude CRC and plcp header */
- len_mpdu = PKTLEN(p) - D11_PHY_HDR_LEN - DOT11_FCS_LEN;
- PKTPULL(p, D11_PHY_HDR_LEN);
- PKTSETLEN(p, len_mpdu);
+ len_mpdu = p->len - D11_PHY_HDR_LEN - DOT11_FCS_LEN;
+ skb_pull(p, D11_PHY_HDR_LEN);
+ __skb_trim(p, len_mpdu);
- ASSERT(!PKTNEXT(p));
- ASSERT(!PKTLINK(p));
+ ASSERT(!(p->next));
+ ASSERT(!(p->prev));
ASSERT(IS_ALIGNED((unsigned long)skb->data, 2));
@@ -7168,17 +6949,17 @@ wlc_recvctl(wlc_info_t *wlc, osl_t *osh, d11rxhdr_t *rxh, void *p)
ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p);
WLCNTINCR(wlc->pub->_cnt->ieee_rx);
- PKTUNALLOC(osh);
+ osh->pktalloced--;
return;
}
-void wlc_bss_list_free(wlc_info_t *wlc, wlc_bss_list_t *bss_list)
+void wlc_bss_list_free(struct wlc_info *wlc, wlc_bss_list_t *bss_list)
{
uint index;
wlc_bss_info_t *bi;
if (!bss_list) {
- WL_ERROR(("%s: Attempting to free NULL list\n", __func__));
+ WL_ERROR("%s: Attempting to free NULL list\n", __func__);
return;
}
/* inspect all BSS descriptor */
@@ -7201,48 +6982,48 @@ void wlc_bss_list_free(wlc_info_t *wlc, wlc_bss_list_t *bss_list)
* Param 'bound' indicates max. # frames to process before break out.
*/
/* WLC_HIGH_API */
-void BCMFASTPATH wlc_recv(wlc_info_t *wlc, void *p)
+void BCMFASTPATH wlc_recv(struct wlc_info *wlc, struct sk_buff *p)
{
d11rxhdr_t *rxh;
struct dot11_header *h;
- osl_t *osh;
+ struct osl_info *osh;
u16 fc;
uint len;
bool is_amsdu;
- WL_TRACE(("wl%d: wlc_recv\n", wlc->pub->unit));
+ WL_TRACE("wl%d: wlc_recv\n", wlc->pub->unit);
osh = wlc->osh;
/* frame starts with rxhdr */
- rxh = (d11rxhdr_t *) PKTDATA(p);
+ rxh = (d11rxhdr_t *) (p->data);
/* strip off rxhdr */
- PKTPULL(p, wlc->hwrxoff);
+ skb_pull(p, wlc->hwrxoff);
/* fixup rx header endianness */
ltoh16_buf((void *)rxh, sizeof(d11rxhdr_t));
/* MAC inserts 2 pad bytes for a4 headers or QoS or A-MSDU subframes */
if (rxh->RxStatus1 & RXS_PBPRES) {
- if (PKTLEN(p) < 2) {
+ if (p->len < 2) {
WLCNTINCR(wlc->pub->_cnt->rxrunt);
- WL_ERROR(("wl%d: wlc_recv: rcvd runt of len %d\n",
- wlc->pub->unit, PKTLEN(p)));
+ WL_ERROR("wl%d: wlc_recv: rcvd runt of len %d\n",
+ wlc->pub->unit, p->len);
goto toss;
}
- PKTPULL(p, 2);
+ skb_pull(p, 2);
}
- h = (struct dot11_header *)(PKTDATA(p) + D11_PHY_HDR_LEN);
- len = PKTLEN(p);
+ h = (struct dot11_header *)(p->data + D11_PHY_HDR_LEN);
+ len = p->len;
if (rxh->RxStatus1 & RXS_FCSERR) {
if (wlc->pub->mac80211_state & MAC80211_PROMISC_BCNS) {
- WL_ERROR(("FCSERR while scanning******* - tossing\n"));
+ WL_ERROR("FCSERR while scanning******* - tossing\n");
goto toss;
} else {
- WL_ERROR(("RCSERR!!!\n"));
+ WL_ERROR("RCSERR!!!\n");
goto toss;
}
}
@@ -7261,10 +7042,10 @@ void BCMFASTPATH wlc_recv(wlc_info_t *wlc, void *p)
if (!is_amsdu) {
/* CTS and ACK CTL frames are w/o a2 */
if (FC_TYPE(fc) == FC_TYPE_DATA || FC_TYPE(fc) == FC_TYPE_MNG) {
- if ((ETHER_ISNULLADDR(&h->a2) || ETHER_ISMULTI(&h->a2))) {
- WL_ERROR(("wl%d: %s: dropping a frame with "
- "invalid src mac address, a2: %pM\n",
- wlc->pub->unit, __func__, &h->a2));
+ if ((is_zero_ether_addr(h->a2.octet) ||
+ is_multicast_ether_addr(h->a2.octet))) {
+ WL_ERROR("wl%d: %s: dropping a frame with invalid src mac address, a2: %pM\n",
+ wlc->pub->unit, __func__, &h->a2);
WLCNTINCR(wlc->pub->_cnt->rxbadsrcmac);
goto toss;
}
@@ -7279,7 +7060,7 @@ void BCMFASTPATH wlc_recv(wlc_info_t *wlc, void *p)
}
if (is_amsdu) {
- WL_ERROR(("%s: is_amsdu causing toss\n", __func__));
+ WL_ERROR("%s: is_amsdu causing toss\n", __func__);
goto toss;
}
@@ -7287,7 +7068,7 @@ void BCMFASTPATH wlc_recv(wlc_info_t *wlc, void *p)
return;
toss:
- PKTFREE(osh, p, false);
+ pkt_buf_free_skb(osh, p, false);
}
/* calculate frame duration for Mixed-mode L-SIG spoofing, return
@@ -7297,12 +7078,12 @@ void BCMFASTPATH wlc_recv(wlc_info_t *wlc, void *p)
* len = 3(nsyms + nstream + 3) - 3
*/
u16 BCMFASTPATH
-wlc_calc_lsig_len(wlc_info_t *wlc, ratespec_t ratespec, uint mac_len)
+wlc_calc_lsig_len(struct wlc_info *wlc, ratespec_t ratespec, uint mac_len)
{
uint nsyms, len = 0, kNdps;
- WL_TRACE(("wl%d: wlc_calc_lsig_len: rate %d, len%d\n", wlc->pub->unit,
- RSPEC2RATE(ratespec), mac_len));
+ WL_TRACE("wl%d: wlc_calc_lsig_len: rate %d, len%d\n",
+ wlc->pub->unit, RSPEC2RATE(ratespec), mac_len);
if (IS_MCS(ratespec)) {
uint mcs = ratespec & RSPEC_RATE_MASK;
@@ -7338,7 +7119,7 @@ wlc_calc_lsig_len(wlc_info_t *wlc, ratespec_t ratespec, uint mac_len)
/* calculate frame duration of a given rate and length, return time in usec unit */
uint BCMFASTPATH
-wlc_calc_frame_time(wlc_info_t *wlc, ratespec_t ratespec, u8 preamble_type,
+wlc_calc_frame_time(struct wlc_info *wlc, ratespec_t ratespec, u8 preamble_type,
uint mac_len)
{
uint nsyms, dur = 0, Ndps, kNdps;
@@ -7346,11 +7127,12 @@ wlc_calc_frame_time(wlc_info_t *wlc, ratespec_t ratespec, u8 preamble_type,
if (rate == 0) {
ASSERT(0);
- WL_ERROR(("wl%d: WAR: using rate of 1 mbps\n", wlc->pub->unit));
+ WL_ERROR("wl%d: WAR: using rate of 1 mbps\n", wlc->pub->unit);
rate = WLC_RATE_1M;
}
- WL_TRACE(("wl%d: wlc_calc_frame_time: rspec 0x%x, preamble_type %d, len%d\n", wlc->pub->unit, ratespec, preamble_type, mac_len));
+ WL_TRACE("wl%d: wlc_calc_frame_time: rspec 0x%x, preamble_type %d, len%d\n",
+ wlc->pub->unit, ratespec, preamble_type, mac_len);
if (IS_MCS(ratespec)) {
uint mcs = ratespec & RSPEC_RATE_MASK;
@@ -7408,13 +7190,14 @@ wlc_calc_frame_time(wlc_info_t *wlc, ratespec_t ratespec, u8 preamble_type,
/* The opposite of wlc_calc_frame_time */
static uint
-wlc_calc_frame_len(wlc_info_t *wlc, ratespec_t ratespec, u8 preamble_type,
+wlc_calc_frame_len(struct wlc_info *wlc, ratespec_t ratespec, u8 preamble_type,
uint dur)
{
uint nsyms, mac_len, Ndps, kNdps;
uint rate = RSPEC2RATE(ratespec);
- WL_TRACE(("wl%d: wlc_calc_frame_len: rspec 0x%x, preamble_type %d, dur %d\n", wlc->pub->unit, ratespec, preamble_type, dur));
+ WL_TRACE("wl%d: wlc_calc_frame_len: rspec 0x%x, preamble_type %d, dur %d\n",
+ wlc->pub->unit, ratespec, preamble_type, dur);
if (IS_MCS(ratespec)) {
uint mcs = ratespec & RSPEC_RATE_MASK;
@@ -7454,10 +7237,10 @@ wlc_calc_frame_len(wlc_info_t *wlc, ratespec_t ratespec, u8 preamble_type,
}
static uint
-wlc_calc_ba_time(wlc_info_t *wlc, ratespec_t rspec, u8 preamble_type)
+wlc_calc_ba_time(struct wlc_info *wlc, ratespec_t rspec, u8 preamble_type)
{
- WL_TRACE(("wl%d: wlc_calc_ba_time: rspec 0x%x, preamble_type %d\n",
- wlc->pub->unit, rspec, preamble_type));
+ WL_TRACE("wl%d: wlc_calc_ba_time: rspec 0x%x, preamble_type %d\n",
+ wlc->pub->unit, rspec, preamble_type);
/* Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that is less than
* or equal to the rate of the immediately previous frame in the FES
*/
@@ -7471,12 +7254,12 @@ wlc_calc_ba_time(wlc_info_t *wlc, ratespec_t rspec, u8 preamble_type)
}
static uint BCMFASTPATH
-wlc_calc_ack_time(wlc_info_t *wlc, ratespec_t rspec, u8 preamble_type)
+wlc_calc_ack_time(struct wlc_info *wlc, ratespec_t rspec, u8 preamble_type)
{
uint dur = 0;
- WL_TRACE(("wl%d: wlc_calc_ack_time: rspec 0x%x, preamble_type %d\n",
- wlc->pub->unit, rspec, preamble_type));
+ WL_TRACE("wl%d: wlc_calc_ack_time: rspec 0x%x, preamble_type %d\n",
+ wlc->pub->unit, rspec, preamble_type);
/* Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that is less than
* or equal to the rate of the immediately previous frame in the FES
*/
@@ -7491,15 +7274,15 @@ wlc_calc_ack_time(wlc_info_t *wlc, ratespec_t rspec, u8 preamble_type)
}
static uint
-wlc_calc_cts_time(wlc_info_t *wlc, ratespec_t rspec, u8 preamble_type)
+wlc_calc_cts_time(struct wlc_info *wlc, ratespec_t rspec, u8 preamble_type)
{
- WL_TRACE(("wl%d: wlc_calc_cts_time: ratespec 0x%x, preamble_type %d\n",
- wlc->pub->unit, rspec, preamble_type));
+ WL_TRACE("wl%d: wlc_calc_cts_time: ratespec 0x%x, preamble_type %d\n",
+ wlc->pub->unit, rspec, preamble_type);
return wlc_calc_ack_time(wlc, rspec, preamble_type);
}
/* derive wlc->band->basic_rate[] table from 'rateset' */
-void wlc_rate_lookup_init(wlc_info_t *wlc, wlc_rateset_t *rateset)
+void wlc_rate_lookup_init(struct wlc_info *wlc, wlc_rateset_t *rateset)
{
u8 rate;
u8 mandatory;
@@ -7509,7 +7292,7 @@ void wlc_rate_lookup_init(wlc_info_t *wlc, wlc_rateset_t *rateset)
uint i;
/* incoming rates are in 500kbps units as in 802.11 Supported Rates */
- bzero(br, WLC_MAXRATE + 1);
+ memset(br, 0, WLC_MAXRATE + 1);
/* For each basic rate in the rates list, make an entry in the
* best basic lookup.
@@ -7523,7 +7306,8 @@ void wlc_rate_lookup_init(wlc_info_t *wlc, wlc_rateset_t *rateset)
rate = (rateset->rates[i] & RATE_MASK);
if (rate > WLC_MAXRATE) {
- WL_ERROR(("wlc_rate_lookup_init: invalid rate 0x%X in rate set\n", rateset->rates[i]));
+ WL_ERROR("wlc_rate_lookup_init: invalid rate 0x%X in rate set\n",
+ rateset->rates[i]);
continue;
}
@@ -7588,7 +7372,7 @@ void wlc_rate_lookup_init(wlc_info_t *wlc, wlc_rateset_t *rateset)
}
}
-static void wlc_write_rate_shm(wlc_info_t *wlc, u8 rate, u8 basic_rate)
+static void wlc_write_rate_shm(struct wlc_info *wlc, u8 rate, u8 basic_rate)
{
u8 phy_rate, index;
u8 basic_phy_rate, basic_index;
@@ -7621,7 +7405,7 @@ static void wlc_write_rate_shm(wlc_info_t *wlc, u8 rate, u8 basic_rate)
wlc_write_shm(wlc, (basic_table + index * 2), basic_ptr);
}
-static const wlc_rateset_t *wlc_rateset_get_hwrs(wlc_info_t *wlc)
+static const wlc_rateset_t *wlc_rateset_get_hwrs(struct wlc_info *wlc)
{
const wlc_rateset_t *rs_dflt;
@@ -7638,7 +7422,7 @@ static const wlc_rateset_t *wlc_rateset_get_hwrs(wlc_info_t *wlc)
return rs_dflt;
}
-void wlc_set_ratetable(wlc_info_t *wlc)
+void wlc_set_ratetable(struct wlc_info *wlc)
{
const wlc_rateset_t *rs_dflt;
wlc_rateset_t rs;
@@ -7674,7 +7458,8 @@ void wlc_set_ratetable(wlc_info_t *wlc)
* Return true if the specified rate is supported by the specified band.
* WLC_BAND_AUTO indicates the current band.
*/
-bool wlc_valid_rate(wlc_info_t *wlc, ratespec_t rspec, int band, bool verbose)
+bool wlc_valid_rate(struct wlc_info *wlc, ratespec_t rspec, int band,
+ bool verbose)
{
wlc_rateset_t *hw_rateset;
uint i;
@@ -7701,16 +7486,17 @@ bool wlc_valid_rate(wlc_info_t *wlc, ratespec_t rspec, int band, bool verbose)
return true;
error:
if (verbose) {
- WL_ERROR(("wl%d: wlc_valid_rate: rate spec 0x%x not in hw_rateset\n", wlc->pub->unit, rspec));
+ WL_ERROR("wl%d: wlc_valid_rate: rate spec 0x%x not in hw_rateset\n",
+ wlc->pub->unit, rspec);
}
return false;
}
-static void wlc_update_mimo_band_bwcap(wlc_info_t *wlc, u8 bwcap)
+static void wlc_update_mimo_band_bwcap(struct wlc_info *wlc, u8 bwcap)
{
uint i;
- wlcband_t *band;
+ struct wlcband *band;
for (i = 0; i < NBANDS(wlc); i++) {
if (IS_SINGLEBAND_5G(wlc->deviceid))
@@ -7734,7 +7520,7 @@ static void wlc_update_mimo_band_bwcap(wlc_info_t *wlc, u8 bwcap)
wlc->mimo_band_bwcap = bwcap;
}
-void wlc_mod_prb_rsp_rate_table(wlc_info_t *wlc, uint frame_len)
+void wlc_mod_prb_rsp_rate_table(struct wlc_info *wlc, uint frame_len)
{
const wlc_rateset_t *rs_dflt;
wlc_rateset_t rs;
@@ -7777,14 +7563,14 @@ void wlc_mod_prb_rsp_rate_table(wlc_info_t *wlc, uint frame_len)
}
u16
-wlc_compute_bcntsfoff(wlc_info_t *wlc, ratespec_t rspec, bool short_preamble,
- bool phydelay)
+wlc_compute_bcntsfoff(struct wlc_info *wlc, ratespec_t rspec,
+ bool short_preamble, bool phydelay)
{
uint bcntsfoff = 0;
if (IS_MCS(rspec)) {
- WL_ERROR(("wl%d: recd beacon with mcs rate; rspec 0x%x\n",
- wlc->pub->unit, rspec));
+ WL_ERROR("wl%d: recd beacon with mcs rate; rspec 0x%x\n",
+ wlc->pub->unit, rspec);
} else if (IS_OFDM(rspec)) {
/* tx delay from MAC through phy to air (2.1 usec) +
* phy header time (preamble + PLCP SIGNAL == 20 usec) +
@@ -7824,7 +7610,7 @@ wlc_compute_bcntsfoff(wlc_info_t *wlc, ratespec_t rspec, bool short_preamble,
* and included up to, but not including, the 4 byte FCS.
*/
static void
-wlc_bcn_prb_template(wlc_info_t *wlc, uint type, ratespec_t bcn_rspec,
+wlc_bcn_prb_template(struct wlc_info *wlc, uint type, ratespec_t bcn_rspec,
wlc_bsscfg_t *cfg, u16 *buf, int *len)
{
cck_phy_hdr_t *plcp;
@@ -7843,7 +7629,7 @@ wlc_bcn_prb_template(wlc_info_t *wlc, uint type, ratespec_t bcn_rspec,
*len = hdr_len + body_len; /* return actual size */
/* format PHY and MAC headers */
- bzero((char *)buf, hdr_len);
+ memset((char *)buf, 0, hdr_len);
plcp = (cck_phy_hdr_t *) buf;
@@ -7872,9 +7658,9 @@ wlc_bcn_prb_template(wlc_info_t *wlc, uint type, ratespec_t bcn_rspec,
/* A1 filled in by MAC for prb resp, broadcast for bcn */
if (type == FC_BEACON)
bcopy((const char *)&ether_bcast, (char *)&h->da,
- ETHER_ADDR_LEN);
- bcopy((char *)&cfg->cur_etheraddr, (char *)&h->sa, ETHER_ADDR_LEN);
- bcopy((char *)&cfg->BSSID, (char *)&h->bssid, ETHER_ADDR_LEN);
+ ETH_ALEN);
+ bcopy((char *)&cfg->cur_etheraddr, (char *)&h->sa, ETH_ALEN);
+ bcopy((char *)&cfg->BSSID, (char *)&h->bssid, ETH_ALEN);
/* SEQ filled in by MAC */
@@ -7891,7 +7677,7 @@ int wlc_get_header_len()
* template updated.
* Otherwise, it updates the hardware template.
*/
-void wlc_bss_update_beacon(wlc_info_t *wlc, wlc_bsscfg_t *cfg)
+void wlc_bss_update_beacon(struct wlc_info *wlc, wlc_bsscfg_t *cfg)
{
int len = BCN_TMPL_LEN;
@@ -7907,7 +7693,7 @@ void wlc_bss_update_beacon(wlc_info_t *wlc, wlc_bsscfg_t *cfg)
u16 bcn[BCN_TMPL_LEN / 2];
u32 both_valid = MCMD_BCN0VLD | MCMD_BCN1VLD;
d11regs_t *regs = wlc->regs;
- osl_t *osh = NULL;
+ struct osl_info *osh = NULL;
osh = wlc->osh;
@@ -7944,7 +7730,7 @@ void wlc_bss_update_beacon(wlc_info_t *wlc, wlc_bsscfg_t *cfg)
/*
* Update all beacons for the system.
*/
-void wlc_update_beacon(wlc_info_t *wlc)
+void wlc_update_beacon(struct wlc_info *wlc)
{
int idx;
wlc_bsscfg_t *bsscfg;
@@ -7957,14 +7743,14 @@ void wlc_update_beacon(wlc_info_t *wlc)
}
/* Write ssid into shared memory */
-void wlc_shm_ssid_upd(wlc_info_t *wlc, wlc_bsscfg_t *cfg)
+void wlc_shm_ssid_upd(struct wlc_info *wlc, wlc_bsscfg_t *cfg)
{
u8 *ssidptr = cfg->SSID;
u16 base = M_SSID;
u8 ssidbuf[DOT11_MAX_SSID_LEN];
/* padding the ssid with zero and copy it into shm */
- bzero(ssidbuf, DOT11_MAX_SSID_LEN);
+ memset(ssidbuf, 0, DOT11_MAX_SSID_LEN);
bcopy(ssidptr, ssidbuf, cfg->SSID_len);
wlc_copyto_shm(wlc, base, ssidbuf, DOT11_MAX_SSID_LEN);
@@ -7973,7 +7759,7 @@ void wlc_shm_ssid_upd(wlc_info_t *wlc, wlc_bsscfg_t *cfg)
wlc_write_shm(wlc, M_SSIDLEN, (u16) cfg->SSID_len);
}
-void wlc_update_probe_resp(wlc_info_t *wlc, bool suspend)
+void wlc_update_probe_resp(struct wlc_info *wlc, bool suspend)
{
int idx;
wlc_bsscfg_t *bsscfg;
@@ -7986,7 +7772,7 @@ void wlc_update_probe_resp(wlc_info_t *wlc, bool suspend)
}
void
-wlc_bss_update_probe_resp(wlc_info_t *wlc, wlc_bsscfg_t *cfg, bool suspend)
+wlc_bss_update_probe_resp(struct wlc_info *wlc, wlc_bsscfg_t *cfg, bool suspend)
{
u16 prb_resp[BCN_TMPL_LEN / 2];
int len = BCN_TMPL_LEN;
@@ -8027,9 +7813,9 @@ wlc_bss_update_probe_resp(wlc_info_t *wlc, wlc_bsscfg_t *cfg, bool suspend)
}
/* prepares pdu for transmission. returns BCM error codes */
-int wlc_prep_pdu(wlc_info_t *wlc, void *pdu, uint *fifop)
+int wlc_prep_pdu(struct wlc_info *wlc, struct sk_buff *pdu, uint *fifop)
{
- osl_t *osh;
+ struct osl_info *osh;
uint fifo;
d11txh_t *txh;
struct dot11_header *h;
@@ -8039,7 +7825,7 @@ int wlc_prep_pdu(wlc_info_t *wlc, void *pdu, uint *fifop)
osh = wlc->osh;
ASSERT(pdu);
- txh = (d11txh_t *) PKTDATA(pdu);
+ txh = (d11txh_t *) (pdu->data);
ASSERT(txh);
h = (struct dot11_header *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
ASSERT(h);
@@ -8066,7 +7852,7 @@ int wlc_prep_pdu(wlc_info_t *wlc, void *pdu, uint *fifop)
}
/* init tx reported rate mechanism */
-void wlc_reprate_init(wlc_info_t *wlc)
+void wlc_reprate_init(struct wlc_info *wlc)
{
int i;
wlc_bsscfg_t *bsscfg;
@@ -8080,13 +7866,13 @@ void wlc_reprate_init(wlc_info_t *wlc)
void wlc_bsscfg_reprate_init(wlc_bsscfg_t *bsscfg)
{
bsscfg->txrspecidx = 0;
- bzero((char *)bsscfg->txrspec, sizeof(bsscfg->txrspec));
+ memset((char *)bsscfg->txrspec, 0, sizeof(bsscfg->txrspec));
}
/* Retrieve a consolidated set of revision information,
* typically for the WLC_GET_REVINFO ioctl
*/
-int wlc_get_revision_info(wlc_info_t *wlc, void *buf, uint len)
+int wlc_get_revision_info(struct wlc_info *wlc, void *buf, uint len)
{
wlc_rev_info_t *rinfo = (wlc_rev_info_t *) buf;
@@ -8120,7 +7906,7 @@ int wlc_get_revision_info(wlc_info_t *wlc, void *buf, uint len)
return BCME_OK;
}
-void wlc_default_rateset(wlc_info_t *wlc, wlc_rateset_t *rs)
+void wlc_default_rateset(struct wlc_info *wlc, wlc_rateset_t *rs)
{
wlc_rateset_default(rs, NULL, wlc->band->phytype, wlc->band->bandtype,
false, RATE_MASK_FULL, (bool) N_ENAB(wlc->pub),
@@ -8128,14 +7914,14 @@ void wlc_default_rateset(wlc_info_t *wlc, wlc_rateset_t *rs)
wlc->stf->txstreams);
}
-static void wlc_bss_default_init(wlc_info_t *wlc)
+static void wlc_bss_default_init(struct wlc_info *wlc)
{
chanspec_t chanspec;
- wlcband_t *band;
+ struct wlcband *band;
wlc_bss_info_t *bi = wlc->default_bss;
/* init default and target BSS with some sane initial values */
- bzero((char *)(bi), sizeof(wlc_bss_info_t));
+ memset((char *)(bi), 0, sizeof(wlc_bss_info_t));
bi->beacon_period = ISSIM_ENAB(wlc->pub->sih) ? BEACON_INTERVAL_DEF_QT :
BEACON_INTERVAL_DEFAULT;
bi->dtim_period = ISSIM_ENAB(wlc->pub->sih) ? DTIM_INTERVAL_DEF_QT :
@@ -8166,7 +7952,7 @@ static void wlc_bss_default_init(wlc_info_t *wlc)
/* Deferred event processing */
static void wlc_process_eventq(void *arg)
{
- wlc_info_t *wlc = (wlc_info_t *) arg;
+ struct wlc_info *wlc = (struct wlc_info *) arg;
wlc_event_t *etmp;
while ((etmp = wlc_eventq_deq(wlc->eventq))) {
@@ -8192,7 +7978,8 @@ wlc_uint64_sub(u32 *a_high, u32 *a_low, u32 b_high, u32 b_low)
}
static ratespec_t
-mac80211_wlc_set_nrate(wlc_info_t *wlc, wlcband_t *cur_band, u32 int_val)
+mac80211_wlc_set_nrate(struct wlc_info *wlc, struct wlcband *cur_band,
+ u32 int_val)
{
u8 stf = (int_val & NRATE_STF_MASK) >> NRATE_STF_SHIFT;
u8 rate = int_val & NRATE_RATE_MASK;
@@ -8211,8 +7998,8 @@ mac80211_wlc_set_nrate(wlc_info_t *wlc, wlcband_t *cur_band, u32 int_val)
if (N_ENAB(wlc->pub) && ismcs) {
/* mcs only allowed when nmode */
if (stf > PHY_TXC1_MODE_SDM) {
- WL_ERROR(("wl%d: %s: Invalid stf\n", WLCWLUNIT(wlc),
- __func__));
+ WL_ERROR("wl%d: %s: Invalid stf\n",
+ WLCWLUNIT(wlc), __func__);
bcmerror = BCME_RANGE;
goto done;
}
@@ -8222,8 +8009,8 @@ mac80211_wlc_set_nrate(wlc_info_t *wlc, wlcband_t *cur_band, u32 int_val)
if (!CHSPEC_IS40(wlc->home_chanspec) ||
((stf != PHY_TXC1_MODE_SISO)
&& (stf != PHY_TXC1_MODE_CDD))) {
- WL_ERROR(("wl%d: %s: Invalid mcs 32\n",
- WLCWLUNIT(wlc), __func__));
+ WL_ERROR("wl%d: %s: Invalid mcs 32\n",
+ WLCWLUNIT(wlc), __func__);
bcmerror = BCME_RANGE;
goto done;
}
@@ -8231,7 +8018,8 @@ mac80211_wlc_set_nrate(wlc_info_t *wlc, wlcband_t *cur_band, u32 int_val)
} else if (rate > HIGHEST_SINGLE_STREAM_MCS) {
/* mcs > 7 must use stf SDM */
if (stf != PHY_TXC1_MODE_SDM) {
- WL_TRACE(("wl%d: %s: enabling SDM mode for mcs %d\n", WLCWLUNIT(wlc), __func__, rate));
+ WL_TRACE("wl%d: %s: enabling SDM mode for mcs %d\n",
+ WLCWLUNIT(wlc), __func__, rate);
stf = PHY_TXC1_MODE_SDM;
}
} else {
@@ -8239,37 +8027,37 @@ mac80211_wlc_set_nrate(wlc_info_t *wlc, wlcband_t *cur_band, u32 int_val)
if ((stf > PHY_TXC1_MODE_STBC) ||
(!WLC_STBC_CAP_PHY(wlc)
&& (stf == PHY_TXC1_MODE_STBC))) {
- WL_ERROR(("wl%d: %s: Invalid STBC\n",
- WLCWLUNIT(wlc), __func__));
+ WL_ERROR("wl%d: %s: Invalid STBC\n",
+ WLCWLUNIT(wlc), __func__);
bcmerror = BCME_RANGE;
goto done;
}
}
} else if (IS_OFDM(rate)) {
if ((stf != PHY_TXC1_MODE_CDD) && (stf != PHY_TXC1_MODE_SISO)) {
- WL_ERROR(("wl%d: %s: Invalid OFDM\n", WLCWLUNIT(wlc),
- __func__));
+ WL_ERROR("wl%d: %s: Invalid OFDM\n",
+ WLCWLUNIT(wlc), __func__);
bcmerror = BCME_RANGE;
goto done;
}
} else if (IS_CCK(rate)) {
if ((cur_band->bandtype != WLC_BAND_2G)
|| (stf != PHY_TXC1_MODE_SISO)) {
- WL_ERROR(("wl%d: %s: Invalid CCK\n", WLCWLUNIT(wlc),
- __func__));
+ WL_ERROR("wl%d: %s: Invalid CCK\n",
+ WLCWLUNIT(wlc), __func__);
bcmerror = BCME_RANGE;
goto done;
}
} else {
- WL_ERROR(("wl%d: %s: Unknown rate type\n", WLCWLUNIT(wlc),
- __func__));
+ WL_ERROR("wl%d: %s: Unknown rate type\n",
+ WLCWLUNIT(wlc), __func__);
bcmerror = BCME_RANGE;
goto done;
}
/* make sure multiple antennae are available for non-siso rates */
if ((stf != PHY_TXC1_MODE_SISO) && (wlc->stf->txstreams == 1)) {
- WL_ERROR(("wl%d: %s: SISO antenna but !SISO request\n",
- WLCWLUNIT(wlc), __func__));
+ WL_ERROR("wl%d: %s: SISO antenna but !SISO request\n",
+ WLCWLUNIT(wlc), __func__);
bcmerror = BCME_RANGE;
goto done;
}
@@ -8300,13 +8088,13 @@ mac80211_wlc_set_nrate(wlc_info_t *wlc, wlcband_t *cur_band, u32 int_val)
return rspec;
done:
- WL_ERROR(("Hoark\n"));
+ WL_ERROR("Hoark\n");
return rate;
}
/* formula: IDLE_BUSY_RATIO_X_16 = (100-duty_cycle)/duty_cycle*16 */
static int
-wlc_duty_cycle_set(wlc_info_t *wlc, int duty_cycle, bool isOFDM,
+wlc_duty_cycle_set(struct wlc_info *wlc, int duty_cycle, bool isOFDM,
bool writeToShm)
{
int idle_busy_ratio_x_16 = 0;
@@ -8314,8 +8102,7 @@ wlc_duty_cycle_set(wlc_info_t *wlc, int duty_cycle, bool isOFDM,
isOFDM ? M_TX_IDLE_BUSY_RATIO_X_16_OFDM :
M_TX_IDLE_BUSY_RATIO_X_16_CCK;
if (duty_cycle > 100 || duty_cycle < 0) {
- WL_ERROR(("wl%d: duty cycle value off limit\n",
- wlc->pub->unit));
+ WL_ERROR("wl%d: duty cycle value off limit\n", wlc->pub->unit);
return BCME_RANGE;
}
if (duty_cycle)
@@ -8335,7 +8122,7 @@ wlc_duty_cycle_set(wlc_info_t *wlc, int duty_cycle, bool isOFDM,
/* Read a single u16 from shared memory.
* SHM 'offset' needs to be an even address
*/
-u16 wlc_read_shm(wlc_info_t *wlc, uint offset)
+u16 wlc_read_shm(struct wlc_info *wlc, uint offset)
{
return wlc_bmac_read_shm(wlc->hw, offset);
}
@@ -8343,7 +8130,7 @@ u16 wlc_read_shm(wlc_info_t *wlc, uint offset)
/* Write a single u16 to shared memory.
* SHM 'offset' needs to be an even address
*/
-void wlc_write_shm(wlc_info_t *wlc, uint offset, u16 v)
+void wlc_write_shm(struct wlc_info *wlc, uint offset, u16 v)
{
wlc_bmac_write_shm(wlc->hw, offset, v);
}
@@ -8352,7 +8139,7 @@ void wlc_write_shm(wlc_info_t *wlc, uint offset, u16 v)
* SHM 'offset' needs to be an even address and
* Range length 'len' must be an even number of bytes
*/
-void wlc_set_shm(wlc_info_t *wlc, uint offset, u16 v, int len)
+void wlc_set_shm(struct wlc_info *wlc, uint offset, u16 v, int len)
{
/* offset and len need to be even */
ASSERT((offset & 1) == 0);
@@ -8368,7 +8155,7 @@ void wlc_set_shm(wlc_info_t *wlc, uint offset, u16 v, int len)
* SHM 'offset' needs to be an even address and
* Buffer length 'len' must be an even number of bytes
*/
-void wlc_copyto_shm(wlc_info_t *wlc, uint offset, const void *buf, int len)
+void wlc_copyto_shm(struct wlc_info *wlc, uint offset, const void *buf, int len)
{
/* offset and len need to be even */
ASSERT((offset & 1) == 0);
@@ -8384,7 +8171,7 @@ void wlc_copyto_shm(wlc_info_t *wlc, uint offset, const void *buf, int len)
* SHM 'offset' needs to be an even address and
* Buffer length 'len' must be an even number of bytes
*/
-void wlc_copyfrom_shm(wlc_info_t *wlc, uint offset, void *buf, int len)
+void wlc_copyfrom_shm(struct wlc_info *wlc, uint offset, void *buf, int len)
{
/* offset and len need to be even */
ASSERT((offset & 1) == 0);
@@ -8397,71 +8184,73 @@ void wlc_copyfrom_shm(wlc_info_t *wlc, uint offset, void *buf, int len)
}
/* wrapper BMAC functions to for HIGH driver access */
-void wlc_mctrl(wlc_info_t *wlc, u32 mask, u32 val)
+void wlc_mctrl(struct wlc_info *wlc, u32 mask, u32 val)
{
wlc_bmac_mctrl(wlc->hw, mask, val);
}
-void wlc_corereset(wlc_info_t *wlc, u32 flags)
+void wlc_corereset(struct wlc_info *wlc, u32 flags)
{
wlc_bmac_corereset(wlc->hw, flags);
}
-void wlc_mhf(wlc_info_t *wlc, u8 idx, u16 mask, u16 val, int bands)
+void wlc_mhf(struct wlc_info *wlc, u8 idx, u16 mask, u16 val, int bands)
{
wlc_bmac_mhf(wlc->hw, idx, mask, val, bands);
}
-u16 wlc_mhf_get(wlc_info_t *wlc, u8 idx, int bands)
+u16 wlc_mhf_get(struct wlc_info *wlc, u8 idx, int bands)
{
return wlc_bmac_mhf_get(wlc->hw, idx, bands);
}
-int wlc_xmtfifo_sz_get(wlc_info_t *wlc, uint fifo, uint *blocks)
+int wlc_xmtfifo_sz_get(struct wlc_info *wlc, uint fifo, uint *blocks)
{
return wlc_bmac_xmtfifo_sz_get(wlc->hw, fifo, blocks);
}
-void wlc_write_template_ram(wlc_info_t *wlc, int offset, int len, void *buf)
+void wlc_write_template_ram(struct wlc_info *wlc, int offset, int len,
+ void *buf)
{
wlc_bmac_write_template_ram(wlc->hw, offset, len, buf);
}
-void wlc_write_hw_bcntemplates(wlc_info_t *wlc, void *bcn, int len, bool both)
+void wlc_write_hw_bcntemplates(struct wlc_info *wlc, void *bcn, int len,
+ bool both)
{
wlc_bmac_write_hw_bcntemplates(wlc->hw, bcn, len, both);
}
void
-wlc_set_addrmatch(wlc_info_t *wlc, int match_reg_offset,
+wlc_set_addrmatch(struct wlc_info *wlc, int match_reg_offset,
const struct ether_addr *addr)
{
wlc_bmac_set_addrmatch(wlc->hw, match_reg_offset, addr);
}
-void wlc_set_rcmta(wlc_info_t *wlc, int idx, const struct ether_addr *addr)
+void wlc_set_rcmta(struct wlc_info *wlc, int idx, const struct ether_addr *addr)
{
wlc_bmac_set_rcmta(wlc->hw, idx, addr);
}
-void wlc_read_tsf(wlc_info_t *wlc, u32 *tsf_l_ptr, u32 *tsf_h_ptr)
+void wlc_read_tsf(struct wlc_info *wlc, u32 *tsf_l_ptr, u32 *tsf_h_ptr)
{
wlc_bmac_read_tsf(wlc->hw, tsf_l_ptr, tsf_h_ptr);
}
-void wlc_set_cwmin(wlc_info_t *wlc, u16 newmin)
+void wlc_set_cwmin(struct wlc_info *wlc, u16 newmin)
{
wlc->band->CWmin = newmin;
wlc_bmac_set_cwmin(wlc->hw, newmin);
}
-void wlc_set_cwmax(wlc_info_t *wlc, u16 newmax)
+void wlc_set_cwmax(struct wlc_info *wlc, u16 newmax)
{
wlc->band->CWmax = newmax;
wlc_bmac_set_cwmax(wlc->hw, newmax);
}
-void wlc_fifoerrors(wlc_info_t *wlc)
+void wlc_fifoerrors(struct wlc_info *wlc)
{
wlc_bmac_fifoerrors(wlc->hw);
@@ -8469,19 +8258,16 @@ void wlc_fifoerrors(wlc_info_t *wlc)
/* Search mem rw utilities */
-void wlc_pllreq(wlc_info_t *wlc, bool set, mbool req_bit)
+void wlc_pllreq(struct wlc_info *wlc, bool set, mbool req_bit)
{
wlc_bmac_pllreq(wlc->hw, set, req_bit);
}
-void wlc_reset_bmac_done(wlc_info_t *wlc)
+void wlc_reset_bmac_done(struct wlc_info *wlc)
{
-#ifdef WLC_HIGH_ONLY
- wlc->reset_bmac_pending = false;
-#endif
}
-void wlc_ht_mimops_cap_update(wlc_info_t *wlc, u8 mimops_mode)
+void wlc_ht_mimops_cap_update(struct wlc_info *wlc, u8 mimops_mode)
{
wlc->ht_cap.cap &= ~HT_CAP_MIMO_PS_MASK;
wlc->ht_cap.cap |= (mimops_mode << HT_CAP_MIMO_PS_SHIFT);
@@ -8494,7 +8280,7 @@ void wlc_ht_mimops_cap_update(wlc_info_t *wlc, u8 mimops_mode)
/* check for the particular priority flow control bit being set */
bool
-wlc_txflowcontrol_prio_isset(wlc_info_t *wlc, wlc_txq_info_t *q, int prio)
+wlc_txflowcontrol_prio_isset(struct wlc_info *wlc, wlc_txq_info_t *q, int prio)
{
uint prio_mask;
@@ -8509,12 +8295,13 @@ wlc_txflowcontrol_prio_isset(wlc_info_t *wlc, wlc_txq_info_t *q, int prio)
}
/* propogate the flow control to all interfaces using the given tx queue */
-void wlc_txflowcontrol(wlc_info_t *wlc, wlc_txq_info_t *qi, bool on, int prio)
+void wlc_txflowcontrol(struct wlc_info *wlc, wlc_txq_info_t *qi,
+ bool on, int prio)
{
uint prio_bits;
uint cur_bits;
- WL_ERROR(("%s: flow contro kicks in\n", __func__));
+ WL_ERROR("%s: flow control kicks in\n", __func__);
if (prio == ALLPRIO) {
prio_bits = TXQ_STOP_FOR_PRIOFC_MASK;
@@ -8551,7 +8338,7 @@ void wlc_txflowcontrol(wlc_info_t *wlc, wlc_txq_info_t *qi, bool on, int prio)
}
void
-wlc_txflowcontrol_override(wlc_info_t *wlc, wlc_txq_info_t *qi, bool on,
+wlc_txflowcontrol_override(struct wlc_info *wlc, wlc_txq_info_t *qi, bool on,
uint override)
{
uint prev_override;
@@ -8598,7 +8385,7 @@ wlc_txflowcontrol_override(wlc_info_t *wlc, wlc_txq_info_t *qi, bool on,
}
}
-static void wlc_txflowcontrol_reset(wlc_info_t *wlc)
+static void wlc_txflowcontrol_reset(struct wlc_info *wlc)
{
wlc_txq_info_t *qi;
@@ -8611,10 +8398,10 @@ static void wlc_txflowcontrol_reset(wlc_info_t *wlc)
}
static void
-wlc_txflowcontrol_signal(wlc_info_t *wlc, wlc_txq_info_t *qi, bool on,
+wlc_txflowcontrol_signal(struct wlc_info *wlc, wlc_txq_info_t *qi, bool on,
int prio)
{
- wlc_if_t *wlcif;
+ struct wlc_if *wlcif;
for (wlcif = wlc->wlcif_list; wlcif != NULL; wlcif = wlcif->next) {
if (wlcif->qi == qi && wlcif->flags & WLC_IF_LINKED)
@@ -8622,7 +8409,7 @@ wlc_txflowcontrol_signal(wlc_info_t *wlc, wlc_txq_info_t *qi, bool on,
}
}
-static wlc_txq_info_t *wlc_txq_alloc(wlc_info_t *wlc, osl_t *osh)
+static wlc_txq_info_t *wlc_txq_alloc(struct wlc_info *wlc, struct osl_info *osh)
{
wlc_txq_info_t *qi, *p;
@@ -8652,7 +8439,8 @@ static wlc_txq_info_t *wlc_txq_alloc(wlc_info_t *wlc, osl_t *osh)
return qi;
}
-static void wlc_txq_free(wlc_info_t *wlc, osl_t *osh, wlc_txq_info_t *qi)
+static void wlc_txq_free(struct wlc_info *wlc, struct osl_info *osh,
+ wlc_txq_info_t *qi)
{
wlc_txq_info_t *p;
@@ -8673,3 +8461,16 @@ static void wlc_txq_free(wlc_info_t *wlc, osl_t *osh, wlc_txq_info_t *qi)
kfree(qi);
}
+
+/*
+ * Flag 'scan in progress' to withold dynamic phy calibration
+ */
+void wlc_scan_start(struct wlc_info *wlc)
+{
+ wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, true);
+}
+
+void wlc_scan_stop(struct wlc_info *wlc)
+{
+ wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, false);
+}
diff --git a/drivers/staging/brcm80211/sys/wlc_mac80211.h b/drivers/staging/brcm80211/sys/wlc_mac80211.h
index 6a77591234b7..5df996b78911 100644
--- a/drivers/staging/brcm80211/sys/wlc_mac80211.h
+++ b/drivers/staging/brcm80211/sys/wlc_mac80211.h
@@ -17,19 +17,10 @@
#ifndef _wlc_h_
#define _wlc_h_
-#include <wlc_types.h>
-
-#include <wl_dbg.h>
#include <wlioctl.h>
-#include <wlc_event.h>
#include <wlc_phy_hal.h>
#include <wlc_channel.h>
-#ifdef WLC_SPLIT
-#include <bcm_rpc.h>
-#endif
-
#include <wlc_bsscfg.h>
-
#include <wlc_scb.h>
#define MA_WINDOW_SZ 8 /* moving average window size */
@@ -220,15 +211,11 @@ extern const u8 prio2fifo[];
* (some platforms return all 0).
* If clocks are present, call the sb routine which will figure out if the device is removed.
*/
-#ifdef WLC_HIGH_ONLY
-#define DEVICEREMOVED(wlc) (!wlc->device_present)
-#else
#define DEVICEREMOVED(wlc) \
((wlc->hw->clk) ? \
((R_REG(wlc->hw->osh, &wlc->hw->regs->maccontrol) & \
(MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN) : \
(si_deviceremoved(wlc->hw->sih)))
-#endif /* WLC_HIGH_ONLY */
#define WLCWLUNIT(wlc) ((wlc)->pub->unit)
@@ -315,22 +302,20 @@ typedef struct wlc_stf {
/*
* core state (mac)
*/
-typedef struct wlccore {
-#ifdef WLC_LOW
+struct wlccore {
uint coreidx; /* # sb enumerated core */
/* fifo */
uint *txavail[NFIFO]; /* # tx descriptors available */
s16 txpktpend[NFIFO]; /* tx admission control */
-#endif /* WLC_LOW */
macstat_t *macstat_snapshot; /* mac hw prev read values */
-} wlccore_t;
+};
/*
* band state (phy+ana+radio)
*/
-typedef struct wlcband {
+struct wlcband {
int bandtype; /* WLC_BAND_2G, WLC_BAND_5G */
uint bandunit; /* bandstate[] index */
@@ -359,13 +344,13 @@ typedef struct wlcband {
u16 CWmin; /* The minimum size of contention window, in unit of aSlotTime */
u16 CWmax; /* The maximum size of contention window, in unit of aSlotTime */
u16 bcntsfoff; /* beacon tsf offset */
-} wlcband_t;
+};
/* generic function callback takes just one arg */
typedef void (*cb_fn_t) (void *);
/* tx completion callback takes 3 args */
-typedef void (*pkcb_fn_t) (wlc_info_t *wlc, uint txstatus, void *arg);
+typedef void (*pkcb_fn_t) (struct wlc_info *wlc, uint txstatus, void *arg);
typedef struct pkt_cb {
pkcb_fn_t fn; /* function to call when tx frame completes */
@@ -398,14 +383,14 @@ typedef struct dumpcb_s {
/* virtual interface */
struct wlc_if {
- wlc_if_t *next;
+ struct wlc_if *next;
u8 type; /* WLC_IFTYPE_BSS or WLC_IFTYPE_WDS */
u8 index; /* assigned in wl_add_if(), index of the wlif if any,
* not necessarily corresponding to bsscfg._idx or
* AID2PVBMAP(scb).
*/
u8 flags; /* flags for the interface */
- wl_if_t *wlif; /* pointer to wlif */
+ struct wl_if *wlif; /* pointer to wlif */
struct wlc_txq_info *qi; /* pointer to associated tx queue */
union {
struct scb *scb; /* pointer to scb if WLC_IFTYPE_WDS */
@@ -416,7 +401,6 @@ struct wlc_if {
/* flags for the interface */
#define WLC_IF_LINKED 0x02 /* this interface is linked to a wl_if */
-#ifdef WLC_LOW
typedef struct wlc_hwband {
int bandtype; /* WLC_BAND_2G, WLC_BAND_5G */
uint bandunit; /* bandstate[] index */
@@ -433,20 +417,15 @@ typedef struct wlc_hwband {
wlc_phy_t *pi; /* pointer to phy specific information */
bool abgphy_encore;
} wlc_hwband_t;
-#endif /* WLC_LOW */
struct wlc_hw_info {
-#ifdef WLC_SPLIT
- rpc_info_t *rpc; /* Handle to RPC module */
-#endif
- osl_t *osh; /* pointer to os handle */
+ struct osl_info *osh; /* pointer to os handle */
bool _piomode; /* true if pio mode */
- wlc_info_t *wlc;
+ struct wlc_info *wlc;
/* fifo */
- hnddma_t *di[NFIFO]; /* hnddma handles, per fifo */
+ struct hnddma_pub *di[NFIFO]; /* hnddma handles, per fifo */
-#ifdef WLC_LOW
uint unit; /* device instance number */
/* version info */
@@ -497,31 +476,21 @@ struct wlc_hw_info {
bool forcefastclk; /* true if the h/w is forcing the use of fast clk */
bool clk; /* core is out of reset and has clock */
bool sbclk; /* sb has clock */
- bmac_pmq_t *bmac_pmq; /* bmac PM states derived from ucode PMQ */
+ struct bmac_pmq *bmac_pmq; /* bmac PM states derived from ucode PMQ */
bool phyclk; /* phy is out of reset and has clock */
bool dma_lpbk; /* core is in DMA loopback */
-#ifdef BCMSDIO
- void *sdh;
-#endif
bool ucode_loaded; /* true after ucode downloaded */
-#ifdef WLC_LOW_ONLY
- struct wl_timer *wdtimer; /* timer for watchdog routine */
- struct ether_addr orig_etheraddr; /* original hw ethernet address */
- u16 rpc_dngl_agg; /* rpc agg control for dongle */
- u32 mem_required_def; /* memory required to replenish RX DMA ring */
- u32 mem_required_lower; /* memory required with lower RX bound */
- u32 mem_required_least; /* minimum memory requirement to handle RX */
-
-#endif /* WLC_LOW_ONLY */
u8 hw_stf_ss_opmode; /* STF single stream operation mode */
u8 antsel_type; /* Type of boardlevel mimo antenna switch-logic
* 0 = N/A, 1 = 2x4 board, 2 = 2x3 CB2 board
*/
- u32 antsel_avail; /* put antsel_info_t here if more info is needed */
-#endif /* WLC_LOW */
+ u32 antsel_avail; /*
+ * put struct antsel_info here if more info is
+ * needed
+ */
};
/* TX Queue information
@@ -541,15 +510,12 @@ typedef struct wlc_txq_info {
* Principal common (os-independent) software data structure.
*/
struct wlc_info {
- wlc_pub_t *pub; /* pointer to wlc public state */
- osl_t *osh; /* pointer to os handle */
+ struct wlc_pub *pub; /* pointer to wlc public state */
+ struct osl_info *osh; /* pointer to os handle */
struct wl_info *wl; /* pointer to os-specific private state */
d11regs_t *regs; /* pointer to device registers */
- wlc_hw_info_t *hw; /* HW related state used primarily by BMAC */
-#ifdef WLC_SPLIT
- rpc_info_t *rpc; /* Handle to RPC module */
-#endif
+ struct wlc_hw_info *hw; /* HW related state used primarily by BMAC */
/* clock */
int clkreq_override; /* setting for clkreq for PCIE : Auto, 0, 1 */
@@ -566,10 +532,11 @@ struct wlc_info {
bool clk; /* core is out of reset and has clock */
/* multiband */
- wlccore_t *core; /* pointer to active io core */
- wlcband_t *band; /* pointer to active per-band state */
- wlccore_t *corestate; /* per-core state (one per hw core) */
- wlcband_t *bandstate[MAXBANDS]; /* per-band state (one per phy/radio) */
+ struct wlccore *core; /* pointer to active io core */
+ struct wlcband *band; /* pointer to active per-band state */
+ struct wlccore *corestate; /* per-core state (one per hw core) */
+ /* per-band state (one per phy/radio): */
+ struct wlcband *bandstate[MAXBANDS];
bool war16165; /* PCI slow clock 16165 war flag */
@@ -584,15 +551,9 @@ struct wlc_info {
s8 txpwr_local_max; /* regulatory local txpwr max */
u8 txpwr_local_constraint; /* local power contraint in dB */
-#ifdef WLC_HIGH_ONLY
- rpctx_info_t *rpctx; /* RPC TX module */
- bool reset_bmac_pending; /* bmac reset is in progressing */
- u32 rpc_agg; /* host agg: bit 16-31, bmac agg: bit 0-15 */
- u32 rpc_msglevel; /* host rpc: bit 16-31, bmac rpc: bit 0-15 */
-#endif
- ampdu_info_t *ampdu; /* ampdu module handler */
- antsel_info_t *asi; /* antsel module handler */
+ struct ampdu_info *ampdu; /* ampdu module handler */
+ struct antsel_info *asi; /* antsel module handler */
wlc_cm_info_t *cmi; /* channel manager module handler */
void *btparam; /* bus type specific cookie */
@@ -792,8 +753,6 @@ struct wlc_info {
ac_bitmap_t apsd_trigger_ac; /* Permissible Acess Category in which APSD Null
* Trigger frames can be send
*/
- wlc_ap_info_t *ap;
-
u8 htphy_membership; /* HT PHY membership */
bool _regulatory_domain; /* 802.11d enabled? */
@@ -811,7 +770,7 @@ struct wlc_info {
u16 next_bsscfg_ID;
- wlc_if_t *wlcif_list; /* linked list of wlc_if structs */
+ struct wlc_if *wlcif_list; /* linked list of wlc_if structs */
wlc_txq_info_t *active_queue; /* txq for the currently active transmit context */
u32 mpc_dur; /* total time (ms) in mpc mode except for the
* portion since radio is turned off last time
@@ -825,8 +784,8 @@ struct wlc_info {
/* antsel module specific state */
struct antsel_info {
- wlc_info_t *wlc; /* pointer to main wlc structure */
- wlc_pub_t *pub; /* pointer to public fn */
+ struct wlc_info *wlc; /* pointer to main wlc structure */
+ struct wlc_pub *pub; /* pointer to public fn */
u8 antsel_type; /* Type of boardlevel mimo antenna switch-logic
* 0 = N/A, 1 = 2x4 board, 2 = 2x3 CB2 board
*/
@@ -842,23 +801,9 @@ struct antsel_info {
#define IS_MBAND_UNLOCKED(wlc) \
((NBANDS(wlc) > 1) && !(wlc)->bandlocked)
-#ifdef WLC_LOW
#define WLC_BAND_PI_RADIO_CHANSPEC wlc_phy_chanspec_get(wlc->band->pi)
-#else
-#define WLC_BAND_PI_RADIO_CHANSPEC (wlc->chanspec)
-#endif
/* sum the individual fifo tx pending packet counts */
-#if defined(WLC_HIGH_ONLY)
-#define TXPKTPENDTOT(wlc) (wlc_rpctx_txpktpend((wlc)->rpctx, 0, true))
-#define TXPKTPENDGET(wlc, fifo) (wlc_rpctx_txpktpend((wlc)->rpctx, (fifo), false))
-#define TXPKTPENDINC(wlc, fifo, val) (wlc_rpctx_txpktpendinc((wlc)->rpctx, (fifo), (val)))
-#define TXPKTPENDDEC(wlc, fifo, val) (wlc_rpctx_txpktpenddec((wlc)->rpctx, (fifo), (val)))
-#define TXPKTPENDCLR(wlc, fifo) (wlc_rpctx_txpktpendclr((wlc)->rpctx, (fifo)))
-#define TXAVAIL(wlc, fifo) (wlc_rpctx_txavail((wlc)->rpctx, (fifo)))
-#define GETNEXTTXP(wlc, _queue) (wlc_rpctx_getnexttxp((wlc)->rpctx, (_queue)))
-
-#else
#define TXPKTPENDTOT(wlc) ((wlc)->core->txpktpend[0] + (wlc)->core->txpktpend[1] + \
(wlc)->core->txpktpend[2] + (wlc)->core->txpktpend[3])
#define TXPKTPENDGET(wlc, fifo) ((wlc)->core->txpktpend[(fifo)])
@@ -868,48 +813,49 @@ struct antsel_info {
#define TXAVAIL(wlc, fifo) (*(wlc)->core->txavail[(fifo)])
#define GETNEXTTXP(wlc, _queue) \
dma_getnexttxp((wlc)->hw->di[(_queue)], HNDDMA_RANGE_TRANSMITTED)
-#endif /* WLC_HIGH_ONLY */
#define WLC_IS_MATCH_SSID(wlc, ssid1, ssid2, len1, len2) \
- ((len1 == len2) && !bcmp(ssid1, ssid2, len1))
-
-/* API shared by both WLC_HIGH and WLC_LOW driver */
-extern void wlc_high_dpc(wlc_info_t *wlc, u32 macintstatus);
-extern void wlc_fatal_error(wlc_info_t *wlc);
-extern void wlc_bmac_rpc_watchdog(wlc_info_t *wlc);
-extern void wlc_recv(wlc_info_t *wlc, void *p);
-extern bool wlc_dotxstatus(wlc_info_t *wlc, tx_status_t *txs, u32 frm_tx2);
-extern void wlc_txfifo(wlc_info_t *wlc, uint fifo, void *p, bool commit,
- s8 txpktpend);
-extern void wlc_txfifo_complete(wlc_info_t *wlc, uint fifo, s8 txpktpend);
-extern void wlc_info_init(wlc_info_t *wlc, int unit);
+ ((len1 == len2) && !memcmp(ssid1, ssid2, len1))
+
+extern void wlc_high_dpc(struct wlc_info *wlc, u32 macintstatus);
+extern void wlc_fatal_error(struct wlc_info *wlc);
+extern void wlc_bmac_rpc_watchdog(struct wlc_info *wlc);
+extern void wlc_recv(struct wlc_info *wlc, struct sk_buff *p);
+extern bool wlc_dotxstatus(struct wlc_info *wlc, tx_status_t *txs, u32 frm_tx2);
+extern void wlc_txfifo(struct wlc_info *wlc, uint fifo, struct sk_buff *p,
+ bool commit, s8 txpktpend);
+extern void wlc_txfifo_complete(struct wlc_info *wlc, uint fifo, s8 txpktpend);
+extern void wlc_txq_enq(void *ctx, struct scb *scb, struct sk_buff *sdu,
+ uint prec);
+extern void wlc_info_init(struct wlc_info *wlc, int unit);
extern void wlc_print_txstatus(tx_status_t *txs);
-extern int wlc_xmtfifo_sz_get(wlc_info_t *wlc, uint fifo, uint *blocks);
-extern void wlc_write_template_ram(wlc_info_t *wlc, int offset, int len,
+extern int wlc_xmtfifo_sz_get(struct wlc_info *wlc, uint fifo, uint *blocks);
+extern void wlc_write_template_ram(struct wlc_info *wlc, int offset, int len,
void *buf);
-extern void wlc_write_hw_bcntemplates(wlc_info_t *wlc, void *bcn, int len,
+extern void wlc_write_hw_bcntemplates(struct wlc_info *wlc, void *bcn, int len,
bool both);
#if defined(BCMDBG)
-extern void wlc_get_rcmta(wlc_info_t *wlc, int idx, struct ether_addr *addr);
+extern void wlc_get_rcmta(struct wlc_info *wlc, int idx,
+ struct ether_addr *addr);
#endif
-extern void wlc_set_rcmta(wlc_info_t *wlc, int idx,
+extern void wlc_set_rcmta(struct wlc_info *wlc, int idx,
const struct ether_addr *addr);
-extern void wlc_set_addrmatch(wlc_info_t *wlc, int match_reg_offset,
+extern void wlc_set_addrmatch(struct wlc_info *wlc, int match_reg_offset,
const struct ether_addr *addr);
-extern void wlc_read_tsf(wlc_info_t *wlc, u32 *tsf_l_ptr,
+extern void wlc_read_tsf(struct wlc_info *wlc, u32 *tsf_l_ptr,
u32 *tsf_h_ptr);
-extern void wlc_set_cwmin(wlc_info_t *wlc, u16 newmin);
-extern void wlc_set_cwmax(wlc_info_t *wlc, u16 newmax);
-extern void wlc_fifoerrors(wlc_info_t *wlc);
-extern void wlc_pllreq(wlc_info_t *wlc, bool set, mbool req_bit);
-extern void wlc_reset_bmac_done(wlc_info_t *wlc);
-extern void wlc_protection_upd(wlc_info_t *wlc, uint idx, int val);
-extern void wlc_hwtimer_gptimer_set(wlc_info_t *wlc, uint us);
-extern void wlc_hwtimer_gptimer_abort(wlc_info_t *wlc);
+extern void wlc_set_cwmin(struct wlc_info *wlc, u16 newmin);
+extern void wlc_set_cwmax(struct wlc_info *wlc, u16 newmax);
+extern void wlc_fifoerrors(struct wlc_info *wlc);
+extern void wlc_pllreq(struct wlc_info *wlc, bool set, mbool req_bit);
+extern void wlc_reset_bmac_done(struct wlc_info *wlc);
+extern void wlc_protection_upd(struct wlc_info *wlc, uint idx, int val);
+extern void wlc_hwtimer_gptimer_set(struct wlc_info *wlc, uint us);
+extern void wlc_hwtimer_gptimer_abort(struct wlc_info *wlc);
#if defined(BCMDBG)
extern void wlc_print_rxh(d11rxhdr_t *rxh);
-extern void wlc_print_hdrs(wlc_info_t *wlc, const char *prefix, u8 *frame,
+extern void wlc_print_hdrs(struct wlc_info *wlc, const char *prefix, u8 *frame,
d11txh_t *txh, d11rxhdr_t *rxh, uint len);
extern void wlc_print_txdesc(d11txh_t *txh);
#endif
@@ -917,124 +863,126 @@ extern void wlc_print_txdesc(d11txh_t *txh);
extern void wlc_print_dot11_mac_hdr(u8 *buf, int len);
#endif
-#ifdef WLC_LOW
-extern void wlc_setxband(wlc_hw_info_t *wlc_hw, uint bandunit);
-extern void wlc_coredisable(wlc_hw_info_t *wlc_hw);
-#endif
+extern void wlc_setxband(struct wlc_hw_info *wlc_hw, uint bandunit);
+extern void wlc_coredisable(struct wlc_hw_info *wlc_hw);
-extern bool wlc_valid_rate(wlc_info_t *wlc, ratespec_t rate, int band,
+extern bool wlc_valid_rate(struct wlc_info *wlc, ratespec_t rate, int band,
bool verbose);
-extern void wlc_ap_upd(wlc_info_t *wlc);
+extern void wlc_ap_upd(struct wlc_info *wlc);
/* helper functions */
-extern void wlc_shm_ssid_upd(wlc_info_t *wlc, wlc_bsscfg_t *cfg);
-extern int wlc_set_gmode(wlc_info_t *wlc, u8 gmode, bool config);
+extern void wlc_shm_ssid_upd(struct wlc_info *wlc, wlc_bsscfg_t *cfg);
+extern int wlc_set_gmode(struct wlc_info *wlc, u8 gmode, bool config);
-extern void wlc_mac_bcn_promisc_change(wlc_info_t *wlc, bool promisc);
-extern void wlc_mac_bcn_promisc(wlc_info_t *wlc);
-extern void wlc_mac_promisc(wlc_info_t *wlc);
-extern void wlc_txflowcontrol(wlc_info_t *wlc, wlc_txq_info_t *qi, bool on,
+extern void wlc_mac_bcn_promisc_change(struct wlc_info *wlc, bool promisc);
+extern void wlc_mac_bcn_promisc(struct wlc_info *wlc);
+extern void wlc_mac_promisc(struct wlc_info *wlc);
+extern void wlc_txflowcontrol(struct wlc_info *wlc, wlc_txq_info_t *qi, bool on,
int prio);
-extern void wlc_txflowcontrol_override(wlc_info_t *wlc, wlc_txq_info_t *qi,
+extern void wlc_txflowcontrol_override(struct wlc_info *wlc, wlc_txq_info_t *qi,
bool on, uint override);
-extern bool wlc_txflowcontrol_prio_isset(wlc_info_t *wlc, wlc_txq_info_t *qi,
- int prio);
-extern void wlc_send_q(wlc_info_t *wlc, wlc_txq_info_t *qi);
-extern int wlc_prep_pdu(wlc_info_t *wlc, void *pdu, uint *fifo);
+extern bool wlc_txflowcontrol_prio_isset(struct wlc_info *wlc,
+ wlc_txq_info_t *qi, int prio);
+extern void wlc_send_q(struct wlc_info *wlc, wlc_txq_info_t *qi);
+extern int wlc_prep_pdu(struct wlc_info *wlc, struct sk_buff *pdu, uint *fifo);
-extern u16 wlc_calc_lsig_len(wlc_info_t *wlc, ratespec_t ratespec,
+extern u16 wlc_calc_lsig_len(struct wlc_info *wlc, ratespec_t ratespec,
uint mac_len);
-extern ratespec_t wlc_rspec_to_rts_rspec(wlc_info_t *wlc, ratespec_t rspec,
+extern ratespec_t wlc_rspec_to_rts_rspec(struct wlc_info *wlc, ratespec_t rspec,
bool use_rspec, u16 mimo_ctlchbw);
-extern u16 wlc_compute_rtscts_dur(wlc_info_t *wlc, bool cts_only,
+extern u16 wlc_compute_rtscts_dur(struct wlc_info *wlc, bool cts_only,
ratespec_t rts_rate, ratespec_t frame_rate,
u8 rts_preamble_type,
u8 frame_preamble_type, uint frame_len,
bool ba);
-extern void wlc_tbtt(wlc_info_t *wlc, d11regs_t *regs);
+extern void wlc_tbtt(struct wlc_info *wlc, d11regs_t *regs);
#if defined(BCMDBG)
-extern void wlc_dump_ie(wlc_info_t *wlc, bcm_tlv_t *ie, struct bcmstrbuf *b);
+extern void wlc_dump_ie(struct wlc_info *wlc, bcm_tlv_t *ie,
+ struct bcmstrbuf *b);
#endif
-extern bool wlc_ps_check(wlc_info_t *wlc);
-extern void wlc_reprate_init(wlc_info_t *wlc);
+extern bool wlc_ps_check(struct wlc_info *wlc);
+extern void wlc_reprate_init(struct wlc_info *wlc);
extern void wlc_bsscfg_reprate_init(wlc_bsscfg_t *bsscfg);
extern void wlc_uint64_sub(u32 *a_high, u32 *a_low, u32 b_high,
u32 b_low);
extern u32 wlc_calc_tbtt_offset(u32 bi, u32 tsf_h, u32 tsf_l);
/* Shared memory access */
-extern void wlc_write_shm(wlc_info_t *wlc, uint offset, u16 v);
-extern u16 wlc_read_shm(wlc_info_t *wlc, uint offset);
-extern void wlc_set_shm(wlc_info_t *wlc, uint offset, u16 v, int len);
-extern void wlc_copyto_shm(wlc_info_t *wlc, uint offset, const void *buf,
+extern void wlc_write_shm(struct wlc_info *wlc, uint offset, u16 v);
+extern u16 wlc_read_shm(struct wlc_info *wlc, uint offset);
+extern void wlc_set_shm(struct wlc_info *wlc, uint offset, u16 v, int len);
+extern void wlc_copyto_shm(struct wlc_info *wlc, uint offset, const void *buf,
int len);
-extern void wlc_copyfrom_shm(wlc_info_t *wlc, uint offset, void *buf, int len);
+extern void wlc_copyfrom_shm(struct wlc_info *wlc, uint offset, void *buf,
+ int len);
-extern void wlc_update_beacon(wlc_info_t *wlc);
-extern void wlc_bss_update_beacon(wlc_info_t *wlc, struct wlc_bsscfg *bsscfg);
+extern void wlc_update_beacon(struct wlc_info *wlc);
+extern void wlc_bss_update_beacon(struct wlc_info *wlc,
+ struct wlc_bsscfg *bsscfg);
-extern void wlc_update_probe_resp(wlc_info_t *wlc, bool suspend);
-extern void wlc_bss_update_probe_resp(wlc_info_t *wlc, wlc_bsscfg_t *cfg,
+extern void wlc_update_probe_resp(struct wlc_info *wlc, bool suspend);
+extern void wlc_bss_update_probe_resp(struct wlc_info *wlc, wlc_bsscfg_t *cfg,
bool suspend);
-extern bool wlc_ismpc(wlc_info_t *wlc);
-extern bool wlc_is_non_delay_mpc(wlc_info_t *wlc);
-extern void wlc_radio_mpc_upd(wlc_info_t *wlc);
-extern bool wlc_prec_enq(wlc_info_t *wlc, struct pktq *q, void *pkt, int prec);
-extern bool wlc_prec_enq_head(wlc_info_t *wlc, struct pktq *q, void *pkt,
- int prec, bool head);
-extern u16 wlc_phytxctl1_calc(wlc_info_t *wlc, ratespec_t rspec);
-extern void wlc_compute_plcp(wlc_info_t *wlc, ratespec_t rate, uint length,
+extern bool wlc_ismpc(struct wlc_info *wlc);
+extern bool wlc_is_non_delay_mpc(struct wlc_info *wlc);
+extern void wlc_radio_mpc_upd(struct wlc_info *wlc);
+extern bool wlc_prec_enq(struct wlc_info *wlc, struct pktq *q, void *pkt,
+ int prec);
+extern bool wlc_prec_enq_head(struct wlc_info *wlc, struct pktq *q,
+ struct sk_buff *pkt, int prec, bool head);
+extern u16 wlc_phytxctl1_calc(struct wlc_info *wlc, ratespec_t rspec);
+extern void wlc_compute_plcp(struct wlc_info *wlc, ratespec_t rate, uint length,
u8 *plcp);
-extern uint wlc_calc_frame_time(wlc_info_t *wlc, ratespec_t ratespec,
+extern uint wlc_calc_frame_time(struct wlc_info *wlc, ratespec_t ratespec,
u8 preamble_type, uint mac_len);
-extern void wlc_set_chanspec(wlc_info_t *wlc, chanspec_t chanspec);
+extern void wlc_set_chanspec(struct wlc_info *wlc, chanspec_t chanspec);
-extern bool wlc_timers_init(wlc_info_t *wlc, int unit);
+extern bool wlc_timers_init(struct wlc_info *wlc, int unit);
extern const bcm_iovar_t wlc_iovars[];
extern int wlc_doiovar(void *hdl, const bcm_iovar_t *vi, u32 actionid,
const char *name, void *params, uint p_len, void *arg,
- int len, int val_size, wlc_if_t *wlcif);
+ int len, int val_size, struct wlc_if *wlcif);
#if defined(BCMDBG)
-extern void wlc_print_ies(wlc_info_t *wlc, u8 *ies, uint ies_len);
+extern void wlc_print_ies(struct wlc_info *wlc, u8 *ies, uint ies_len);
#endif
-extern int wlc_set_nmode(wlc_info_t *wlc, s32 nmode);
-extern void wlc_ht_mimops_cap_update(wlc_info_t *wlc, u8 mimops_mode);
-extern void wlc_mimops_action_ht_send(wlc_info_t *wlc, wlc_bsscfg_t *bsscfg,
- u8 mimops_mode);
+extern int wlc_set_nmode(struct wlc_info *wlc, s32 nmode);
+extern void wlc_ht_mimops_cap_update(struct wlc_info *wlc, u8 mimops_mode);
+extern void wlc_mimops_action_ht_send(struct wlc_info *wlc,
+ wlc_bsscfg_t *bsscfg, u8 mimops_mode);
-extern void wlc_switch_shortslot(wlc_info_t *wlc, bool shortslot);
+extern void wlc_switch_shortslot(struct wlc_info *wlc, bool shortslot);
extern void wlc_set_bssid(wlc_bsscfg_t *cfg);
extern void wlc_edcf_setparams(wlc_bsscfg_t *cfg, bool suspend);
-extern void wlc_wme_setparams(wlc_info_t *wlc, u16 aci, void *arg,
- bool suspend);
-extern void wlc_set_ratetable(wlc_info_t *wlc);
+extern void wlc_set_ratetable(struct wlc_info *wlc);
extern int wlc_set_mac(wlc_bsscfg_t *cfg);
-extern void wlc_beacon_phytxctl_txant_upd(wlc_info_t *wlc,
+extern void wlc_beacon_phytxctl_txant_upd(struct wlc_info *wlc,
ratespec_t bcn_rate);
-extern void wlc_mod_prb_rsp_rate_table(wlc_info_t *wlc, uint frame_len);
-extern ratespec_t wlc_lowest_basic_rspec(wlc_info_t *wlc, wlc_rateset_t *rs);
-extern u16 wlc_compute_bcntsfoff(wlc_info_t *wlc, ratespec_t rspec,
+extern void wlc_mod_prb_rsp_rate_table(struct wlc_info *wlc, uint frame_len);
+extern ratespec_t wlc_lowest_basic_rspec(struct wlc_info *wlc,
+ wlc_rateset_t *rs);
+extern u16 wlc_compute_bcntsfoff(struct wlc_info *wlc, ratespec_t rspec,
bool short_preamble, bool phydelay);
-extern void wlc_radio_disable(wlc_info_t *wlc);
-extern void wlc_bcn_li_upd(wlc_info_t *wlc);
-
-extern int wlc_get_revision_info(wlc_info_t *wlc, void *buf, uint len);
-extern void wlc_out(wlc_info_t *wlc);
-extern void wlc_set_home_chanspec(wlc_info_t *wlc, chanspec_t chanspec);
-extern void wlc_watchdog_upd(wlc_info_t *wlc, bool tbtt);
-extern bool wlc_ps_allowed(wlc_info_t *wlc);
-extern bool wlc_stay_awake(wlc_info_t *wlc);
-extern void wlc_wme_initparams_sta(wlc_info_t *wlc, wme_param_ie_t *pe);
-
-extern void wlc_bss_list_free(wlc_info_t *wlc, wlc_bss_list_t *bss_list);
+extern void wlc_radio_disable(struct wlc_info *wlc);
+extern void wlc_bcn_li_upd(struct wlc_info *wlc);
+
+extern int wlc_get_revision_info(struct wlc_info *wlc, void *buf, uint len);
+extern void wlc_out(struct wlc_info *wlc);
+extern void wlc_set_home_chanspec(struct wlc_info *wlc, chanspec_t chanspec);
+extern void wlc_watchdog_upd(struct wlc_info *wlc, bool tbtt);
+extern bool wlc_ps_allowed(struct wlc_info *wlc);
+extern bool wlc_stay_awake(struct wlc_info *wlc);
+extern void wlc_wme_initparams_sta(struct wlc_info *wlc, wme_param_ie_t *pe);
+
+extern void wlc_bss_list_free(struct wlc_info *wlc, wlc_bss_list_t *bss_list);
+extern void wlc_ht_mimops_cap_update(struct wlc_info *wlc, u8 mimops_mode);
#endif /* _wlc_h_ */
diff --git a/drivers/staging/brcm80211/sys/wlc_phy_shim.c b/drivers/staging/brcm80211/sys/wlc_phy_shim.c
index bf8e2e1a15f6..8bd4ede4c92a 100644
--- a/drivers/staging/brcm80211/sys/wlc_phy_shim.c
+++ b/drivers/staging/brcm80211/sys/wlc_phy_shim.c
@@ -24,9 +24,10 @@
#include <linux/kernel.h>
#include <bcmdefs.h>
#include <wlc_cfg.h>
-#include <linuxver.h>
-#include <bcmutils.h>
+#include <linux/module.h>
+#include <linux/pci.h>
#include <osl.h>
+#include <bcmutils.h>
#include <proto/802.11.h>
#include <bcmwifi.h>
@@ -46,6 +47,7 @@
#include <wlc_channel.h>
#include <bcmsrom.h>
#include <wlc_key.h>
+#include <wlc_event.h>
#include <wlc_mac80211.h>
@@ -53,21 +55,23 @@
#include <wlc_phy_shim.h>
#include <wlc_phy_hal.h>
#include <wl_export.h>
+#include <wl_dbg.h>
/* PHY SHIM module specific state */
struct wlc_phy_shim_info {
- wlc_hw_info_t *wlc_hw; /* pointer to main wlc_hw structure */
+ struct wlc_hw_info *wlc_hw; /* pointer to main wlc_hw structure */
void *wlc; /* pointer to main wlc structure */
void *wl; /* pointer to os-specific private state */
};
-wlc_phy_shim_info_t *wlc_phy_shim_attach(wlc_hw_info_t *wlc_hw,
+wlc_phy_shim_info_t *wlc_phy_shim_attach(struct wlc_hw_info *wlc_hw,
void *wl, void *wlc) {
wlc_phy_shim_info_t *physhim = NULL;
physhim = kzalloc(sizeof(wlc_phy_shim_info_t), GFP_ATOMIC);
if (!physhim) {
- WL_ERROR(("wl%d: wlc_phy_shim_attach: out of mem\n", wlc_hw->unit));
+ WL_ERROR("wl%d: wlc_phy_shim_attach: out of mem\n",
+ wlc_hw->unit);
return NULL;
}
physhim->wlc_hw = wlc_hw;
diff --git a/drivers/staging/brcm80211/sys/wlc_pub.h b/drivers/staging/brcm80211/sys/wlc_pub.h
index a6a8c33483c9..aff413001b70 100644
--- a/drivers/staging/brcm80211/sys/wlc_pub.h
+++ b/drivers/staging/brcm80211/sys/wlc_pub.h
@@ -251,7 +251,7 @@ typedef int (*iovar_fn_t) (void *handle, const bcm_iovar_t *vi,
* Public portion of "common" os-independent state structure.
* The wlc handle points at this.
*/
-typedef struct wlc_pub {
+struct wlc_pub {
void *wlc;
struct ieee80211_hw *ieee_hw;
@@ -260,7 +260,7 @@ typedef struct wlc_pub {
uint mac80211_state;
uint unit; /* device instance number */
uint corerev; /* core revision */
- osl_t *osh; /* pointer to os handle */
+ struct osl_info *osh; /* pointer to os handle */
si_t *sih; /* SB handle (cookie for siutils calls) */
char *vars; /* "environment" name=value */
bool up; /* interface up and running */
@@ -318,9 +318,6 @@ typedef struct wlc_pub {
* is implemented properly in osl of that port
* when it enables this Power Save feature.
*/
-#ifdef BCMSDIO
- uint sdiod_drive_strength; /* SDIO drive strength */
-#endif /* BCMSDIO */
u16 boardrev; /* version # of particular board */
u8 sromrev; /* version # of the srom */
@@ -333,7 +330,7 @@ typedef struct wlc_pub {
bool _lmacproto; /* lmac protocol module included and enabled */
bool phy_11ncapable; /* the PHY/HW is capable of 802.11N */
bool _ampdumac; /* mac assist ampdu enabled or not */
-} wlc_pub_t;
+};
/* wl_monitor rx status per packet */
typedef struct wl_rxsts {
@@ -437,17 +434,13 @@ struct wlc_if;
#define EDCF_ENAB(pub) (WME_ENAB(pub))
#define QOS_ENAB(pub) (WME_ENAB(pub) || N_ENAB(pub))
-#define MONITOR_ENAB(wlc) (bcmspace && (wlc)->monitor)
-
-#define PROMISC_ENAB(wlc) (bcmspace && (wlc)->promisc)
-
-extern void wlc_pkttag_info_move(wlc_pub_t *pub, void *pkt_from, void *pkt_to);
+#define MONITOR_ENAB(wlc) ((wlc)->monitor)
-#define WLPKTTAGSCB(p) (WLPKTTAG(p)->_scb)
+#define PROMISC_ENAB(wlc) ((wlc)->promisc)
#define WLC_PREC_COUNT 16 /* Max precedence level implemented */
-/* pri is PKTPRIO encoded in the packet. This maps the Packet priority to
+/* pri is priority encoded in the packet. This maps the Packet priority to
* enqueue precedence as defined in wlc_prec_map
*/
extern const u8 wlc_prio2prec_map[];
@@ -497,8 +490,8 @@ extern const u8 wme_fifo2ac[];
/* common functions for every port */
extern void *wlc_attach(void *wl, u16 vendor, u16 device, uint unit,
- bool piomode, osl_t *osh, void *regsva, uint bustype,
- void *btparam, uint *perr);
+ bool piomode, struct osl_info *osh, void *regsva,
+ uint bustype, void *btparam, uint *perr);
extern uint wlc_detach(struct wlc_info *wlc);
extern int wlc_up(struct wlc_info *wlc);
extern uint wlc_down(struct wlc_info *wlc);
@@ -517,8 +510,10 @@ extern void wlc_intrsrestore(struct wlc_info *wlc, u32 macintmask);
extern bool wlc_intrsupd(struct wlc_info *wlc);
extern bool wlc_isr(struct wlc_info *wlc, bool *wantdpc);
extern bool wlc_dpc(struct wlc_info *wlc, bool bounded);
-extern bool wlc_send80211_raw(struct wlc_info *wlc, wlc_if_t *wlcif, void *p,
- uint ac);
+extern bool wlc_send80211_raw(struct wlc_info *wlc, struct wlc_if *wlcif,
+ void *p, uint ac);
+extern bool wlc_sendpkt_mac80211(struct wlc_info *wlc, struct sk_buff *sdu,
+ struct ieee80211_hw *hw);
extern int wlc_iovar_op(struct wlc_info *wlc, const char *name, void *params,
int p_len, void *arg, int len, bool set,
struct wlc_if *wlcif);
@@ -527,8 +522,13 @@ extern int wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
/* helper functions */
extern void wlc_statsupd(struct wlc_info *wlc);
extern int wlc_get_header_len(void);
+extern void wlc_mac_bcn_promisc_change(struct wlc_info *wlc, bool promisc);
+extern void wlc_set_addrmatch(struct wlc_info *wlc, int match_reg_offset,
+ const struct ether_addr *addr);
+extern void wlc_wme_setparams(struct wlc_info *wlc, u16 aci, void *arg,
+ bool suspend);
-extern wlc_pub_t *wlc_pub(void *wlc);
+extern struct wlc_pub *wlc_pub(void *wlc);
/* common functions for every port */
extern int wlc_bmac_up_prep(struct wlc_hw_info *wlc_hw);
@@ -554,13 +554,15 @@ extern void wlc_scb_ratesel_init_all(struct wlc_info *wlc);
/* ioctl */
extern int wlc_iovar_gets8(struct wlc_info *wlc, const char *name,
s8 *arg);
-extern int wlc_iovar_check(wlc_pub_t *pub, const bcm_iovar_t *vi, void *arg,
+extern int wlc_iovar_check(struct wlc_pub *pub, const bcm_iovar_t *vi,
+ void *arg,
int len, bool set);
-extern int wlc_module_register(wlc_pub_t *pub, const bcm_iovar_t *iovars,
+extern int wlc_module_register(struct wlc_pub *pub, const bcm_iovar_t *iovars,
const char *name, void *hdl, iovar_fn_t iovar_fn,
watchdog_fn_t watchdog_fn, down_fn_t down_fn);
-extern int wlc_module_unregister(wlc_pub_t *pub, const char *name, void *hdl);
+extern int wlc_module_unregister(struct wlc_pub *pub, const char *name,
+ void *hdl);
extern void wlc_event_if(struct wlc_info *wlc, struct wlc_bsscfg *cfg,
wlc_event_t *e, const struct ether_addr *addr);
extern void wlc_suspend_mac_and_wait(struct wlc_info *wlc);
@@ -568,6 +570,8 @@ extern void wlc_enable_mac(struct wlc_info *wlc);
extern u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate);
extern u32 wlc_get_rspec_history(struct wlc_bsscfg *cfg);
extern u32 wlc_get_current_highest_rate(struct wlc_bsscfg *cfg);
+extern void wlc_scan_start(struct wlc_info *wlc);
+extern void wlc_scan_stop(struct wlc_info *wlc);
static inline int wlc_iovar_getuint(struct wlc_info *wlc, const char *name,
uint *arg)
@@ -617,10 +621,6 @@ extern void wlc_pmkid_event(struct wlc_bsscfg *cfg);
#define BAND_2G_NAME "2.4G"
#define BAND_5G_NAME "5G"
-#if defined(BCMSDIO) || defined(WLC_HIGH_ONLY)
-void wlc_device_removed(void *arg);
-#endif
-
/* BMAC RPC: 7 u32 params: pkttotlen, fifo, commit, fid, txpktpend, pktflag, rpc_id */
#define WLC_RPCTX_PARAMS 32
diff --git a/drivers/staging/brcm80211/sys/wlc_rate.c b/drivers/staging/brcm80211/sys/wlc_rate.c
index d2d72568756d..ab7d0bed3c0a 100644
--- a/drivers/staging/brcm80211/sys/wlc_rate.c
+++ b/drivers/staging/brcm80211/sys/wlc_rate.c
@@ -17,12 +17,14 @@
#include <bcmdefs.h>
#include <wlc_cfg.h>
#include <osl.h>
-#include <linuxver.h>
+#include <linux/module.h>
#include <bcmutils.h>
#include <siutils.h>
#include <bcmendian.h>
#include <wlioctl.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
#include <proto/802.11.h>
#include <d11.h>
#include <wlc_rate.h>
@@ -297,7 +299,7 @@ wlc_rate_hwrs_filter_sort_validate(wlc_rateset_t *rs,
uint count;
uint i;
- bzero(rateset, sizeof(rateset));
+ memset(rateset, 0, sizeof(rateset));
count = rs->count;
for (i = 0; i < count; i++) {
diff --git a/drivers/staging/brcm80211/sys/wlc_rpc.h b/drivers/staging/brcm80211/sys/wlc_rpc.h
deleted file mode 100644
index db39645ccbdc..000000000000
--- a/drivers/staging/brcm80211/sys/wlc_rpc.h
+++ /dev/null
@@ -1,527 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _WLC_RPC_H_
-#define _WLC_RPC_H_
-
-#include <wlc_types.h>
-
-/* RPC IDs, reordering is OK. This needs to be in sync with RPC_ID_TABLE below */
-typedef enum {
- WLRPC_NULL_ID = 0,
- WLRPC_WLC_REG_READ_ID,
- WLRPC_WLC_REG_WRITE_ID,
- WLRPC_WLC_MHF_SET_ID,
- WLRPC_WLC_MHF_GET_ID,
- WLRPC_WLC_BMAC_UP_PREP_ID,
- WLRPC_WLC_BMAC_UP_FINISH_ID,
- WLRPC_WLC_BMAC_DOWN_PREP_ID,
- WLRPC_WLC_BMAC_DOWN_FINISH_ID,
- WLRPC_WLC_BMAC_WRITE_HW_BCNTEMPLATES_ID,
- WLRPC_WLC_BMAC_RESET_ID,
- WLRPC_WLC_DNGL_REBOOT_ID,
- WLRPC_WLC_BMAC_RPC_TXQ_WM_SET_ID,
- WLRPC_WLC_BMAC_RPC_TXQ_WM_GET_ID,
- WLRPC_WLC_BMAC_RPC_AGG_SET_ID,
- WLRPC_WLC_BMAC_RPC_MSGLEVEL_SET_ID,
- WLRPC_WLC_BMAC_RPC_AGG_LIMIT_SET_ID,
- WLRPC_WLC_BMAC_RPC_AGG_LIMIT_GET_ID,
- WLRPC_WLC_BMAC_INIT_ID,
- WLRPC_WLC_BMAC_SET_CWMIN_ID,
- WLRPC_WLC_BMAC_MUTE_ID,
- WLRPC_WLC_PHY_DOIOVAR_ID,
- WLRPC_WLC_PHY_HOLD_UPD_ID,
- WLRPC_WLC_PHY_MUTE_UPD_ID,
- WLRPC_WLC_PHY_CLEAR_TSSI_ID,
- WLRPC_WLC_PHY_ANT_RXDIV_GET_ID,
- WLRPC_WLC_PHY_ANT_RXDIV_SET_ID,
- WLRPC_WLC_PHY_PREAMBLE_SET_ID,
- WLRPC_WLC_PHY_FREQTRACK_END_ID,
- WLRPC_WLC_PHY_FREQTRACK_START_ID,
- WLRPC_WLC_PHY_IOCTL_ID,
- WLRPC_WLC_PHY_NOISE_SAMPLE_REQUEST_ID,
- WLRPC_WLC_PHY_CAL_PERICAL_ID,
- WLRPC_WLC_PHY_TXPOWER_GET_ID,
- WLRPC_WLC_PHY_TXPOWER_SET_ID,
- WLRPC_WLC_PHY_TXPOWER_SROMLIMIT_ID,
- WLRPC_WLC_PHY_RADAR_DETECT_ENABLE_ID,
- WLRPC_WLC_PHY_RADAR_DETECT_RUN_ID,
- WLRPC_WLC_PHY_TEST_ISON_ID,
- WLRPC_WLC_BMAC_COPYFROM_OBJMEM_ID,
- WLRPC_WLC_BMAC_COPYTO_OBJMEM_ID,
- WLRPC_WLC_ENABLE_MAC_ID,
- WLRPC_WLC_MCTRL_ID,
- WLRPC_WLC_CORERESET_ID,
- WLRPC_WLC_BMAC_READ_SHM_ID,
- WLRPC_WLC_BMAC_READ_TSF_ID,
- WLRPC_WLC_BMAC_SET_ADDRMATCH_ID,
- WLRPC_WLC_BMAC_SET_CWMAX_ID,
- WLRPC_WLC_BMAC_SET_RCMTA_ID,
- WLRPC_WLC_BMAC_SET_SHM_ID,
- WLRPC_WLC_SUSPEND_MAC_AND_WAIT_ID,
- WLRPC_WLC_BMAC_WRITE_SHM_ID,
- WLRPC_WLC_BMAC_WRITE_TEMPLATE_RAM_ID,
- WLRPC_WLC_TX_FIFO_SUSPEND_ID,
- WLRPC_WLC_TX_FIFO_RESUME_ID,
- WLRPC_WLC_TX_FIFO_SUSPENDED_ID,
- WLRPC_WLC_HW_ETHERADDR_ID,
- WLRPC_WLC_SET_HW_ETHERADDR_ID,
- WLRPC_WLC_BMAC_CHANSPEC_SET_ID,
- WLRPC_WLC_BMAC_TXANT_SET_ID,
- WLRPC_WLC_BMAC_ANTSEL_TYPE_SET_ID,
- WLRPC_WLC_BMAC_TXFIFO_ID,
- WLRPC_WLC_RADIO_READ_HWDISABLED_ID,
- WLRPC_WLC_RM_CCA_MEASURE_ID,
- WLRPC_WLC_SET_SHORTSLOT_ID,
- WLRPC_WLC_WAIT_FOR_WAKE_ID,
- WLRPC_WLC_PHY_TXPOWER_GET_CURRENT_ID,
- WLRPC_WLC_PHY_TXPOWER_HW_CTRL_GET_ID,
- WLRPC_WLC_PHY_TXPOWER_HW_CTRL_SET_ID,
- WLRPC_WLC_PHY_BSSINIT_ID,
- WLRPC_WLC_BAND_STF_SS_SET_ID,
- WLRPC_WLC_PHY_BAND_FIRST_CHANSPEC_ID,
- WLRPC_WLC_PHY_TXPOWER_LIMIT_SET_ID,
- WLRPC_WLC_PHY_BAND_CHANNELS_ID,
- WLRPC_WLC_BMAC_REVINFO_GET_ID,
- WLRPC_WLC_BMAC_STATE_GET_ID,
- WLRPC_WLC_BMAC_XMTFIFO_SZ_GET_ID,
- WLRPC_WLC_BMAC_XMTFIFO_SZ_SET_ID,
- WLRPC_WLC_BMAC_VALIDATE_CHIP_ACCESS_ID,
- WLRPC_WLC_RM_CCA_COMPLETE_ID,
- WLRPC_WLC_RECV_ID,
- WLRPC_WLC_DOTXSTATUS_ID,
- WLRPC_WLC_HIGH_DPC_ID,
- WLRPC_WLC_FATAL_ERROR_ID,
- WLRPC_WLC_PHY_SET_CHANNEL_14_WIDE_FILTER_ID,
- WLRPC_WLC_PHY_NOISE_AVG_ID,
- WLRPC_WLC_PHYCHAIN_INIT_ID,
- WLRPC_WLC_PHYCHAIN_SET_ID,
- WLRPC_WLC_PHYCHAIN_GET_ID,
- WLRPC_WLC_PHY_TKIP_RIFS_WAR_ID,
- WLRPC_WLC_BMAC_COPYFROM_VARS_ID,
- WLRPC_WLC_BMAC_RETRYLIMIT_UPD_ID,
- WLRPC_WLC_BMAC_BTC_MODE_SET_ID,
- WLRPC_WLC_BMAC_BTC_MODE_GET_ID,
- WLRPC_WLC_BMAC_BTC_WIRE_SET_ID,
- WLRPC_WLC_BMAC_BTC_WIRE_GET_ID,
- WLRPC_WLC_BMAC_SET_NORESET_ID,
- WLRPC_WLC_AMPDU_TXSTATUS_COMPLETE_ID,
- WLRPC_WLC_BMAC_FIFOERRORS_ID,
- WLRPC_WLC_PHY_TXPOWER_GET_TARGET_MIN_ID,
- WLRPC_WLC_PHY_TXPOWER_GET_TARGET_MAX_ID,
- WLRPC_WLC_NOISE_CB_ID,
- WLRPC_WLC_BMAC_LED_HW_DEINIT_ID,
- WLRPC_WLC_BMAC_LED_HW_MASK_INIT_ID,
- WLRPC_WLC_PLLREQ_ID,
- WLRPC_WLC_BMAC_TACLEAR_ID,
- WLRPC_WLC_BMAC_SET_CLK_ID,
- WLRPC_WLC_PHY_OFDM_RATESET_WAR_ID,
- WLRPC_WLC_PHY_BF_PREEMPT_ENABLE_ID,
- WLRPC_WLC_BMAC_DOIOVARS_ID,
- WLRPC_WLC_BMAC_DUMP_ID,
- WLRPC_WLC_CISWRITE_ID,
- WLRPC_WLC_CISDUMP_ID,
- WLRPC_WLC_UPDATE_PHY_MODE_ID,
- WLRPC_WLC_RESET_BMAC_DONE_ID,
- WLRPC_WLC_BMAC_LED_BLINK_EVENT_ID,
- WLRPC_WLC_BMAC_LED_SET_ID,
- WLRPC_WLC_BMAC_LED_BLINK_ID,
- WLRPC_WLC_BMAC_LED_ID,
- WLRPC_WLC_BMAC_RATE_SHM_OFFSET_ID,
- WLRPC_SI_ISCORE_UP_ID,
- WLRPC_WLC_BMAC_PS_SWITCH_ID,
- WLRPC_WLC_PHY_STF_SSMODE_GET_ID,
- WLRPC_WLC_BMAC_DEBUG_ID,
- WLRPC_WLC_EXTLOG_MSG_ID,
- WLRPC_WLC_EXTLOG_CFG_ID,
- WLRPC_BCM_ASSERT_LOG_ID,
- WLRPC_BCM_ASSERT_TYPE_ID,
- WLRPC_WLC_BMAC_SET_PHYCAL_CACHE_FLAG_ID,
- WLRPC_WLC_BMAC_GET_PHYCAL_CACHE_FLAG_ID,
- WLRPC_WLC_PHY_CAL_CACHE_INIT_ID,
- WLRPC_WLC_PHY_CAL_CACHE_DEINIT_ID,
- WLRPC_WLC_BMAC_HW_UP_ID,
- WLRPC_WLC_BMAC_SET_TXPWR_PERCENT_ID,
- WLRPC_WLC_PHYCHAIN_ACTIVE_GET_ID,
- WLRPC_WLC_BMAC_BLINK_SYNC_ID,
- WLRPC_WLC_BMAC_UCODE_DBGSEL_SET_ID,
- WLRPC_WLC_BMAC_UCODE_DBGSEL_GET_ID,
- WLRPC_WLC_PHY_RADAR_DETECT_MODE_SET_ID,
- WLRPC_WLC_PHY_ACIM_NOISEM_RESET_NPHY_ID,
- WLRPC_WLC_PHY_INTERFER_SET_NPHY_ID,
- WLRPC_WLC_BMAC_IFSCTL_EDCRS_SET_ID,
- WLRPC_WLC_PKTENGTX,
- WLRPC_WLC_BMAC_SET_DEAF,
- WLRPC_WLC_BMAC_CLEAR_DEAF,
- WLRPC_WLC_BMAC_BTC_FLAGS_SET_ID,
- WLRPC_WLC_BMAC_BTC_FLAGS_GET_ID,
- WLRPC_WLC_BMAC_SET_RCMTA_TYPE_ID,
- WLRPC_WLC_BMAC_BTC_FLAGS_UPD_ID,
- WLRPC_WLC_BMAC_BTC_STUCKWAR_ID,
- WLRPC_WLC_BMAC_CCA_STATS_READ_ID,
- WLRPC_WLC_BMAC_ANTSEL_SET_ID,
- WLRPC_WLC_BMAC_SET_UCODE_LOADED,
- WLRPC_WLC_PHY_LDPC_SET_ID,
-
- WLRPC_LAST
-} wlc_rpc_id_t;
-
-#if defined(BCMDBG) | 0
-struct name_entry {
- int id;
- char *name;
-};
-
-#define NAME_ENTRY(x) {x, #x}
-
-#define RPC_ID_TABLE { \
- NAME_ENTRY(WLRPC_WLC_REG_READ_ID), \
- NAME_ENTRY(WLRPC_WLC_REG_WRITE_ID), \
- NAME_ENTRY(WLRPC_WLC_MHF_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_MHF_GET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_UP_PREP_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_UP_FINISH_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_DOWN_PREP_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_DOWN_FINISH_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_WRITE_HW_BCNTEMPLATES_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_RESET_ID), \
- NAME_ENTRY(WLRPC_WLC_DNGL_REBOOT_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_RPC_TXQ_WM_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_RPC_TXQ_WM_GET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_RPC_AGG_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_RPC_MSGLEVEL_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_RPC_AGG_LIMIT_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_RPC_AGG_LIMIT_GET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_INIT_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_SET_CWMIN_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_MUTE_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_DOIOVAR_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_HOLD_UPD_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_MUTE_UPD_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_CLEAR_TSSI_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_ANT_RXDIV_GET_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_ANT_RXDIV_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_PREAMBLE_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_FREQTRACK_END_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_FREQTRACK_START_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_IOCTL_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_NOISE_SAMPLE_REQUEST_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_CAL_PERICAL_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_GET_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_SROMLIMIT_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_RADAR_DETECT_ENABLE_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_RADAR_DETECT_RUN_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_TEST_ISON_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_COPYFROM_OBJMEM_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_COPYTO_OBJMEM_ID), \
- NAME_ENTRY(WLRPC_WLC_ENABLE_MAC_ID), \
- NAME_ENTRY(WLRPC_WLC_MCTRL_ID), \
- NAME_ENTRY(WLRPC_WLC_CORERESET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_READ_SHM_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_READ_TSF_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_SET_ADDRMATCH_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_SET_CWMAX_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_SET_RCMTA_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_SET_SHM_ID), \
- NAME_ENTRY(WLRPC_WLC_SUSPEND_MAC_AND_WAIT_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_WRITE_SHM_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_WRITE_TEMPLATE_RAM_ID), \
- NAME_ENTRY(WLRPC_WLC_TX_FIFO_SUSPEND_ID), \
- NAME_ENTRY(WLRPC_WLC_TX_FIFO_RESUME_ID), \
- NAME_ENTRY(WLRPC_WLC_TX_FIFO_SUSPENDED_ID), \
- NAME_ENTRY(WLRPC_WLC_HW_ETHERADDR_ID), \
- NAME_ENTRY(WLRPC_WLC_SET_HW_ETHERADDR_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_CHANSPEC_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_TXANT_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_ANTSEL_TYPE_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_TXFIFO_ID), \
- NAME_ENTRY(WLRPC_WLC_RADIO_READ_HWDISABLED_ID), \
- NAME_ENTRY(WLRPC_WLC_RM_CCA_MEASURE_ID), \
- NAME_ENTRY(WLRPC_WLC_SET_SHORTSLOT_ID), \
- NAME_ENTRY(WLRPC_WLC_WAIT_FOR_WAKE_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_GET_CURRENT_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_HW_CTRL_GET_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_HW_CTRL_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_BSSINIT_ID), \
- NAME_ENTRY(WLRPC_WLC_BAND_STF_SS_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_BAND_FIRST_CHANSPEC_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_LIMIT_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_BAND_CHANNELS_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_REVINFO_GET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_STATE_GET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_XMTFIFO_SZ_GET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_XMTFIFO_SZ_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_VALIDATE_CHIP_ACCESS_ID), \
- NAME_ENTRY(WLRPC_WLC_RM_CCA_COMPLETE_ID), \
- NAME_ENTRY(WLRPC_WLC_RECV_ID), \
- NAME_ENTRY(WLRPC_WLC_DOTXSTATUS_ID), \
- NAME_ENTRY(WLRPC_WLC_HIGH_DPC_ID), \
- NAME_ENTRY(WLRPC_WLC_FATAL_ERROR_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_SET_CHANNEL_14_WIDE_FILTER_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_NOISE_AVG_ID), \
- NAME_ENTRY(WLRPC_WLC_PHYCHAIN_INIT_ID), \
- NAME_ENTRY(WLRPC_WLC_PHYCHAIN_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_PHYCHAIN_GET_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_TKIP_RIFS_WAR_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_COPYFROM_VARS_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_RETRYLIMIT_UPD_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_BTC_MODE_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_BTC_MODE_GET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_BTC_WIRE_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_BTC_WIRE_GET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_SET_NORESET_ID), \
- NAME_ENTRY(WLRPC_WLC_AMPDU_TXSTATUS_COMPLETE_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_FIFOERRORS_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_GET_TARGET_MIN_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_GET_TARGET_MAX_ID), \
- NAME_ENTRY(WLRPC_WLC_NOISE_CB_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_LED_HW_DEINIT_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_LED_HW_MASK_INIT_ID), \
- NAME_ENTRY(WLRPC_WLC_PLLREQ_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_TACLEAR_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_SET_CLK_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_OFDM_RATESET_WAR_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_BF_PREEMPT_ENABLE_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_DOIOVARS_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_DUMP_ID), \
- NAME_ENTRY(WLRPC_WLC_CISWRITE_ID), \
- NAME_ENTRY(WLRPC_WLC_CISDUMP_ID), \
- NAME_ENTRY(WLRPC_WLC_UPDATE_PHY_MODE_ID), \
- NAME_ENTRY(WLRPC_WLC_RESET_BMAC_DONE_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_LED_BLINK_EVENT_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_LED_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_LED_BLINK_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_LED_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_RATE_SHM_OFFSET_ID), \
- NAME_ENTRY(WLRPC_SI_ISCORE_UP_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_PS_SWITCH_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_STF_SSMODE_GET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_DEBUG_ID), \
- NAME_ENTRY(WLRPC_WLC_EXTLOG_MSG_ID), \
- NAME_ENTRY(WLRPC_WLC_EXTLOG_CFG_ID), \
- NAME_ENTRY(WLRPC_BCM_ASSERT_LOG_ID), \
- NAME_ENTRY(WLRPC_BCM_ASSERT_TYPE_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_SET_PHYCAL_CACHE_FLAG_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_GET_PHYCAL_CACHE_FLAG_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_CAL_CACHE_INIT_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_CAL_CACHE_DEINIT_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_HW_UP_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_SET_TXPWR_PERCENT_ID), \
- NAME_ENTRY(WLRPC_WLC_PHYCHAIN_ACTIVE_GET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_BLINK_SYNC_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_UCODE_DBGSEL_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_UCODE_DBGSEL_GET_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_RADAR_DETECT_MODE_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_ACIM_NOISEM_RESET_NPHY_ID), \
- NAME_ENTRY(WLRPC_WLC_PHY_INTERFER_SET_NPHY_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_IFSCTL_EDCRS_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_PKTENGTX), \
- NAME_ENTRY(WLRPC_WLC_BMAC_SET_DEAF), \
- NAME_ENTRY(WLRPC_WLC_BMAC_CLEAR_DEAF), \
- NAME_ENTRY(WLRPC_WLC_BMAC_BTC_FLAGS_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_BTC_FLAGS_GET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_SET_RCMTA_TYPE_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_CCA_STATS_READ_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_ANTSEL_SET_ID), \
- NAME_ENTRY(WLRPC_WLC_BMAC_SET_UCODE_LOADED), \
- NAME_ENTRY(WLRPC_WLC_PHY_LDPC_SET_ID), \
- {0, NULL} \
- }
-
-static __inline char *_wlc_rpc_id_lookup(const struct name_entry *tbl, int _id)
-{
- const struct name_entry *elt = tbl;
- static char __unknown[64];
- for (; elt->name != NULL; elt++) {
- if (_id == elt->id)
- break;
- }
- if (_id == elt->id)
- strncpy(__unknown, elt->name, sizeof(__unknown));
- else
- snprintf(__unknown, sizeof(__unknown), "ID:%d", _id);
- return __unknown;
-}
-
-#define WLC_RPC_ID_LOOKUP(tbl, _id) (_wlc_rpc_id_lookup(tbl, _id))
-
-#endif /* BCMDBG */
-
-/* refer to txpwr_limits_t for each elements, mcs32 is the at the end for 1 byte */
-#define TXPOWER_XDR_SZ (roundup(WLC_NUM_RATES_CCK, 4) + roundup(WLC_NUM_RATES_OFDM, 4) * 4 + \
- roundup(WLC_NUM_RATES_MCS_1_STREAM, 4) * 6 + roundup(WLC_NUM_RATES_MCS_2_STREAM, 4) * 2 + \
- roundup(1, 4))
-
-#define wlc_rpc_txpwr_limits(b, txpwr, op, err) \
- do { \
- (err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->cck, WLC_NUM_RATES_CCK); \
- ASSERT(!(err)); \
- \
- /* 20 MHz Legacy OFDM rates with SISO transmission */ \
- (err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->ofdm, WLC_NUM_RATES_OFDM); \
- ASSERT(!(err)); \
- \
- /* 20 MHz Legacy OFDM rates with CDD transmission */ \
- (err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->ofdm_cdd, WLC_NUM_RATES_OFDM); \
- ASSERT(!(err)); \
- \
- /* 40 MHz Legacy OFDM rates with SISO transmission */ \
- (err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->ofdm_40_siso, WLC_NUM_RATES_OFDM); \
- ASSERT(!(err)); \
- \
- /* 40 MHz Legacy OFDM rates with CDD transmission */ \
- (err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->ofdm_40_cdd, WLC_NUM_RATES_OFDM); \
- ASSERT(!(err)); \
- \
- /* 20MHz MCS rates SISO/CDD/STBC/SDM */ \
- (err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->mcs_20_siso, WLC_NUM_RATES_MCS_1_STREAM); \
- ASSERT(!(err)); \
- \
- (err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->mcs_20_cdd, WLC_NUM_RATES_MCS_1_STREAM); \
- ASSERT(!(err)); \
- \
- (err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->mcs_20_stbc, WLC_NUM_RATES_MCS_1_STREAM); \
- ASSERT(!(err)); \
- \
- (err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->mcs_20_mimo, WLC_NUM_RATES_MCS_2_STREAM); \
- ASSERT(!(err)); \
- \
- /* 40MHz MCS rates SISO/CDD/STBC/SDM */ \
- (err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->mcs_40_siso, WLC_NUM_RATES_MCS_1_STREAM); \
- ASSERT(!(err)); \
- \
- (err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->mcs_40_cdd, WLC_NUM_RATES_MCS_1_STREAM); \
- ASSERT(!(err)); \
- \
- (err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->mcs_40_stbc, WLC_NUM_RATES_MCS_1_STREAM); \
- ASSERT(!(err)); \
- \
- (err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->mcs_40_mimo, WLC_NUM_RATES_MCS_2_STREAM); \
- ASSERT(!(err)); \
- } while (0)
-
-typedef struct wlc_rpc_ctx {
- rpc_info_t *rpc;
- wlc_info_t *wlc;
- wlc_hw_info_t *wlc_hw;
-} wlc_rpc_ctx_t;
-
-static inline rpc_buf_t *wlc_rpc_buf_alloc(rpc_info_t *rpc, bcm_xdr_buf_t *b,
- uint len, wlc_rpc_id_t rpc_id)
-{
- rpc_buf_t *rpc_buf;
-
- rpc_buf = bcm_rpc_buf_alloc(rpc, len + sizeof(u32));
-
- if (!rpc_buf)
- return NULL;
-
- bcm_xdr_buf_init(b, bcm_rpc_buf_data(bcm_rpc_tp_get(rpc), rpc_buf),
- len + sizeof(u32));
-
- bcm_xdr_pack_u32(b, rpc_id);
-
- return rpc_buf;
-}
-
-#if defined(BCMDBG)
-static __inline wlc_rpc_id_t
-wlc_rpc_id_get(struct rpc_info *rpc, rpc_buf_t *buf)
-{
- wlc_rpc_id_t rpc_id;
- bcm_xdr_buf_t b;
-
- bcm_xdr_buf_init(&b, bcm_rpc_buf_data(bcm_rpc_tp_get(rpc), buf),
- sizeof(u32));
-
- bcm_xdr_unpack_u32(&b, (u32 *)((unsigned long) & rpc_id));
- return rpc_id;
-}
-#endif
-
-static __inline int _wlc_rpc_call(struct rpc_info *rpc, rpc_buf_t *send)
-{
- int _err = 0;
-#if defined(BCMDBG)
- wlc_rpc_id_t rpc_id = wlc_rpc_id_get(rpc, send);
- /* const struct name_entry rpc_name_tbl[] = RPC_ID_TABLE; */
- static struct name_entry rpc_name_tbl[] = RPC_ID_TABLE;
- WL_TRACE(("%s: Called id %s\n", __func__,
- WLC_RPC_ID_LOOKUP(rpc_name_tbl, rpc_id)));
-#endif
- _err = bcm_rpc_call(rpc, send);
- if (_err) {
-#if defined(BCMDBG)
- WL_ERROR(("%s: Call id %s FAILED\n", __func__,
- WLC_RPC_ID_LOOKUP(rpc_name_tbl, rpc_id)));
-#endif
- _err = 0;
- }
- return _err;
-}
-
-#define wlc_rpc_call(rpc, send) (_wlc_rpc_call(rpc, send))
-
-#include <sbhnddma.h>
-#include <sbhndpio.h>
-#include <d11.h>
-
-#ifdef WLC_LOW
-extern void wlc_rpc_bmac_dispatch(wlc_rpc_ctx_t *rpc_ctx, struct rpc_buf *buf);
-extern void wlc_rpc_bmac_dump_txfifohist(wlc_hw_info_t *wlc_hw,
- bool dump_clear);
-#else
-extern void wlc_rpc_high_dispatch(wlc_rpc_ctx_t *ctx, struct rpc_buf *buf);
-#endif
-
-/* Packed structure for ease of transport across RPC bus along u32 boundary */
-typedef struct wlc_rpc_txstatus {
- u32 PAD_framelen;
- u32 status_frameid;
- u32 sequence_lasttxtime;
- u32 ackphyrxsh_phyerr;
-} wlc_rpc_txstatus_t;
-
-static inline
- void txstatus2rpc_txstatus(tx_status_t *txstatus,
- wlc_rpc_txstatus_t *rpc_txstatus)
-{
- rpc_txstatus->PAD_framelen = txstatus->framelen;
- rpc_txstatus->status_frameid =
- (txstatus->status << 16) | txstatus->frameid;
- rpc_txstatus->sequence_lasttxtime =
- (txstatus->sequence << 16) | txstatus->lasttxtime;
- rpc_txstatus->ackphyrxsh_phyerr =
- (txstatus->ackphyrxsh << 16) | txstatus->phyerr;
-}
-
-static inline
- void rpc_txstatus2txstatus(wlc_rpc_txstatus_t *rpc_txstatus,
- tx_status_t *txstatus)
-{
- txstatus->framelen = rpc_txstatus->PAD_framelen & 0xffff;
- txstatus->status = (rpc_txstatus->status_frameid >> 16) & 0xffff;
- txstatus->frameid = rpc_txstatus->status_frameid & 0xffff;
- txstatus->sequence = (rpc_txstatus->sequence_lasttxtime >> 16) & 0xffff;
- txstatus->lasttxtime = rpc_txstatus->sequence_lasttxtime & 0xffff;
- txstatus->ackphyrxsh = (rpc_txstatus->ackphyrxsh_phyerr >> 16) & 0xffff;
- txstatus->phyerr = rpc_txstatus->ackphyrxsh_phyerr & 0xffff;
-}
-
-extern void wlc_bmac_dngl_reboot(rpc_info_t *rpc);
-
-#endif /* WLC_RPC_H */
diff --git a/drivers/staging/brcm80211/sys/wlc_rpctx.h b/drivers/staging/brcm80211/sys/wlc_rpctx.h
deleted file mode 100644
index 7427154a4bd4..000000000000
--- a/drivers/staging/brcm80211/sys/wlc_rpctx.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _wlc_rpctx_h_
-#define _wlc_rpctx_h_
-
-/* forward declaration */
-struct wlc_info;
-
-/* This controls how many packets are given to the dongle. This is required as
- * NTXD needs to be power of 2 but we may not have enough memory to absorb that
- * large number of frames
- */
-#ifndef NRPCTXBUFPOST
-#define NRPCTXBUFPOST NTXD
-#endif
-
-#if defined(WLC_HIGH_ONLY)
-
-struct wlc_rpc_phy {
- struct rpc_info *rpc;
-};
-
-#define RPCTX_ENAB(pub) (true)
-extern rpctx_info_t *wlc_rpctx_attach(wlc_pub_t *pub, struct wlc_info *wlc);
-extern int wlc_rpctx_fifoinit(rpctx_info_t *rpctx, uint fifo, uint ntxd);
-extern void wlc_rpctx_detach(rpctx_info_t *rpctx);
-extern int wlc_rpctx_dump(rpctx_info_t *rpctx, struct bcmstrbuf *b);
-extern void *wlc_rpctx_getnexttxp(rpctx_info_t *rpctx, uint fifo);
-extern void wlc_rpctx_txreclaim(rpctx_info_t *rpctx);
-extern uint wlc_rpctx_txavail(rpctx_info_t *rpctx, uint fifo);
-extern int wlc_rpctx_pkteng(rpctx_info_t *rpctx, uint fifo, void *p);
-extern int wlc_rpctx_tx(rpctx_info_t *rpctx, uint fifo, void *p, bool commit,
- u16 frameid, u8 txpktpend);
-extern void wlc_rpctx_txpktpendinc(rpctx_info_t *rpctx, uint fifo, u8 val);
-extern void wlc_rpctx_txpktpenddec(rpctx_info_t *rpctx, uint fifo, u8 val);
-extern void wlc_rpctx_txpktpendclr(rpctx_info_t *rpctx, uint fifo);
-extern int wlc_rpctx_txpktpend(rpctx_info_t *rpctx, uint fifo, bool all);
-
-#else
-#define RPCTX_ENAB(pub) (false)
-#define wlc_rpctx_attach(pub, wlc) (NULL)
-#define wlc_rpctx_fifoinit(rpctx, fifo, ntxd) (0)
-#define wlc_rpctx_detach(rpctx) ASSERT(0)
-#define wlc_rpctx_txavail(rpctx, f) (false)
-#define wlc_rpctx_dump(rpctx, b) (0)
-#define wlc_rpctx_getnexttxp(rpctx, f) (NULL)
-#define wlc_rpctx_txreclaim(rpctx) ASSERT(0)
-#define wlc_rpctx_pkteng(rpctx, fifo, p) do { } while (0)
-#define wlc_rpctx_tx(rpctx, f, p, c, fid, t) (0)
-#define wlc_rpctx_txpktpendinc(rpctx, f, val) do { } while (0)
-#define wlc_rpctx_txpktpenddec(rpctx, f, val) do { } while (0)
-#define wlc_rpctx_txpktpendclr(rpctx, f) do { } while (0)
-#define wlc_rpctx_txpktpend(rpctx, f, all) (0)
-
-#endif /* WLC_HIGH */
-
-#endif /* _wlc_rpctx_h_ */
diff --git a/drivers/staging/brcm80211/sys/wlc_scb.h b/drivers/staging/brcm80211/sys/wlc_scb.h
index ce26c740e6c1..fe84e993b52a 100644
--- a/drivers/staging/brcm80211/sys/wlc_scb.h
+++ b/drivers/staging/brcm80211/sys/wlc_scb.h
@@ -19,7 +19,7 @@
#include <proto/802.1d.h>
-extern bool wlc_aggregatable(wlc_info_t *wlc, u8 tid);
+extern bool wlc_aggregatable(struct wlc_info *wlc, u8 tid);
#define AMPDU_TX_BA_MAX_WSIZE 64 /* max Tx ba window size (in pdu) */
/* structure to store per-tid state for the ampdu initiator */
diff --git a/drivers/staging/brcm80211/sys/wlc_stf.c b/drivers/staging/brcm80211/sys/wlc_stf.c
index 4728ad90e295..8975b09a7438 100644
--- a/drivers/staging/brcm80211/sys/wlc_stf.c
+++ b/drivers/staging/brcm80211/sys/wlc_stf.c
@@ -15,8 +15,8 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <wlc_cfg.h>
-#include <linuxver.h>
#include <bcmdefs.h>
#include <osl.h>
#include <bcmutils.h>
@@ -25,29 +25,33 @@
#include <proto/802.11.h>
#include <wlioctl.h>
#include <bcmwifi.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
#include <d11.h>
#include <wlc_rate.h>
#include <wlc_pub.h>
#include <wlc_key.h>
#include <wlc_channel.h>
#include <wlc_bsscfg.h>
+#include <wlc_event.h>
#include <wlc_mac80211.h>
#include <wlc_scb.h>
#include <wl_export.h>
#include <wlc_bmac.h>
#include <wlc_stf.h>
+#include <wl_dbg.h>
#define WLC_STF_SS_STBC_RX(wlc) (WLCISNPHY(wlc->band) && \
NREV_GT(wlc->band->phyrev, 3) && NREV_LE(wlc->band->phyrev, 6))
-static s8 wlc_stf_stbc_rx_get(wlc_info_t *wlc);
-static bool wlc_stf_stbc_tx_set(wlc_info_t *wlc, s32 int_val);
-static int wlc_stf_txcore_set(wlc_info_t *wlc, u8 Nsts, u8 val);
-static int wlc_stf_spatial_policy_set(wlc_info_t *wlc, int val);
-static void wlc_stf_stbc_rx_ht_update(wlc_info_t *wlc, int val);
+static s8 wlc_stf_stbc_rx_get(struct wlc_info *wlc);
+static bool wlc_stf_stbc_tx_set(struct wlc_info *wlc, s32 int_val);
+static int wlc_stf_txcore_set(struct wlc_info *wlc, u8 Nsts, u8 val);
+static int wlc_stf_spatial_policy_set(struct wlc_info *wlc, int val);
+static void wlc_stf_stbc_rx_ht_update(struct wlc_info *wlc, int val);
-static void _wlc_stf_phy_txant_upd(wlc_info_t *wlc);
-static u16 _wlc_stf_phytxchain_sel(wlc_info_t *wlc, ratespec_t rspec);
+static void _wlc_stf_phy_txant_upd(struct wlc_info *wlc);
+static u16 _wlc_stf_phytxchain_sel(struct wlc_info *wlc, ratespec_t rspec);
#define NSTS_1 1
#define NSTS_2 2
@@ -61,7 +65,7 @@ const u8 txcore_default[5] = {
(0x0f) /* For Nsts = 4, enable all cores */
};
-static void wlc_stf_stbc_rx_ht_update(wlc_info_t *wlc, int val)
+static void wlc_stf_stbc_rx_ht_update(struct wlc_info *wlc, int val)
{
ASSERT((val == HT_CAP_RX_STBC_NO)
|| (val == HT_CAP_RX_STBC_ONE_STREAM));
@@ -82,7 +86,7 @@ static void wlc_stf_stbc_rx_ht_update(wlc_info_t *wlc, int val)
}
/* every WLC_TEMPSENSE_PERIOD seconds temperature check to decide whether to turn on/off txchain */
-void wlc_tempsense_upd(wlc_info_t *wlc)
+void wlc_tempsense_upd(struct wlc_info *wlc)
{
wlc_phy_t *pi = wlc->band->pi;
uint active_chains, txchain;
@@ -106,7 +110,7 @@ void wlc_tempsense_upd(wlc_info_t *wlc)
}
void
-wlc_stf_ss_algo_channel_get(wlc_info_t *wlc, u16 *ss_algo_channel,
+wlc_stf_ss_algo_channel_get(struct wlc_info *wlc, u16 *ss_algo_channel,
chanspec_t chanspec)
{
tx_power_t power;
@@ -147,12 +151,12 @@ wlc_stf_ss_algo_channel_get(wlc_info_t *wlc, u16 *ss_algo_channel,
setbit(ss_algo_channel, PHY_TXC1_MODE_STBC);
}
-static s8 wlc_stf_stbc_rx_get(wlc_info_t *wlc)
+static s8 wlc_stf_stbc_rx_get(struct wlc_info *wlc)
{
return (wlc->ht_cap.cap & HT_CAP_RX_STBC_MASK) >> HT_CAP_RX_STBC_SHIFT;
}
-static bool wlc_stf_stbc_tx_set(wlc_info_t *wlc, s32 int_val)
+static bool wlc_stf_stbc_tx_set(struct wlc_info *wlc, s32 int_val)
{
if ((int_val != AUTO) && (int_val != OFF) && (int_val != ON)) {
return false;
@@ -173,7 +177,7 @@ static bool wlc_stf_stbc_tx_set(wlc_info_t *wlc, s32 int_val)
return true;
}
-bool wlc_stf_stbc_rx_set(wlc_info_t *wlc, s32 int_val)
+bool wlc_stf_stbc_rx_set(struct wlc_info *wlc, s32 int_val)
{
if ((int_val != HT_CAP_RX_STBC_NO)
&& (int_val != HT_CAP_RX_STBC_ONE_STREAM)) {
@@ -190,10 +194,10 @@ bool wlc_stf_stbc_rx_set(wlc_info_t *wlc, s32 int_val)
return true;
}
-static int wlc_stf_txcore_set(wlc_info_t *wlc, u8 Nsts, u8 core_mask)
+static int wlc_stf_txcore_set(struct wlc_info *wlc, u8 Nsts, u8 core_mask)
{
- WL_TRACE(("wl%d: %s: Nsts %d core_mask %x\n",
- wlc->pub->unit, __func__, Nsts, core_mask));
+ WL_TRACE("wl%d: %s: Nsts %d core_mask %x\n",
+ wlc->pub->unit, __func__, Nsts, core_mask);
ASSERT((Nsts > 0) && (Nsts <= MAX_STREAMS_SUPPORTED));
@@ -227,12 +231,12 @@ static int wlc_stf_txcore_set(wlc_info_t *wlc, u8 Nsts, u8 core_mask)
return BCME_OK;
}
-static int wlc_stf_spatial_policy_set(wlc_info_t *wlc, int val)
+static int wlc_stf_spatial_policy_set(struct wlc_info *wlc, int val)
{
int i;
u8 core_mask = 0;
- WL_TRACE(("wl%d: %s: val %x\n", wlc->pub->unit, __func__, val));
+ WL_TRACE("wl%d: %s: val %x\n", wlc->pub->unit, __func__, val);
wlc->stf->spatial_policy = (s8) val;
for (i = 1; i <= MAX_STREAMS_SUPPORTED; i++) {
@@ -243,7 +247,7 @@ static int wlc_stf_spatial_policy_set(wlc_info_t *wlc, int val)
return BCME_OK;
}
-int wlc_stf_txchain_set(wlc_info_t *wlc, s32 int_val, bool force)
+int wlc_stf_txchain_set(struct wlc_info *wlc, s32 int_val, bool force)
{
u8 txchain = (u8) int_val;
u8 txstreams;
@@ -274,13 +278,15 @@ int wlc_stf_txchain_set(wlc_info_t *wlc, s32 int_val, bool force)
if (RSPEC_STF(wlc->bandstate[i]->rspec_override)
!= PHY_TXC1_MODE_SISO) {
wlc->bandstate[i]->rspec_override = 0;
- WL_ERROR(("%s(): temp sense override non-SISO" " rspec_override.\n", __func__));
+ WL_ERROR("%s(): temp sense override non-SISO rspec_override\n",
+ __func__);
}
if (RSPEC_STF
(wlc->bandstate[i]->mrspec_override) !=
PHY_TXC1_MODE_SISO) {
wlc->bandstate[i]->mrspec_override = 0;
- WL_ERROR(("%s(): temp sense override non-SISO" " mrspec_override.\n", __func__));
+ WL_ERROR("%s(): temp sense override non-SISO mrspec_override\n",
+ __func__);
}
}
}
@@ -303,7 +309,7 @@ int wlc_stf_txchain_set(wlc_info_t *wlc, s32 int_val, bool force)
return BCME_OK;
}
-int wlc_stf_rxchain_set(wlc_info_t *wlc, s32 int_val)
+int wlc_stf_rxchain_set(struct wlc_info *wlc, s32 int_val)
{
u8 rxchain_cnt;
u8 rxchain = (u8) int_val;
@@ -367,7 +373,7 @@ int wlc_stf_rxchain_set(wlc_info_t *wlc, s32 int_val)
}
/* update wlc->stf->ss_opmode which represents the operational stf_ss mode we're using */
-int wlc_stf_ss_update(wlc_info_t *wlc, wlcband_t *band)
+int wlc_stf_ss_update(struct wlc_info *wlc, struct wlcband *band)
{
int ret_code = 0;
u8 prev_stf_ss;
@@ -402,7 +408,7 @@ int wlc_stf_ss_update(wlc_info_t *wlc, wlcband_t *band)
return ret_code;
}
-int wlc_stf_attach(wlc_info_t *wlc)
+int wlc_stf_attach(struct wlc_info *wlc)
{
wlc->bandstate[BAND_2G_INDEX]->band_stf_ss_mode = PHY_TXC1_MODE_SISO;
wlc->bandstate[BAND_5G_INDEX]->band_stf_ss_mode = PHY_TXC1_MODE_CDD;
@@ -425,11 +431,11 @@ int wlc_stf_attach(wlc_info_t *wlc)
return 0;
}
-void wlc_stf_detach(wlc_info_t *wlc)
+void wlc_stf_detach(struct wlc_info *wlc)
{
}
-int wlc_stf_ant_txant_validate(wlc_info_t *wlc, s8 val)
+int wlc_stf_ant_txant_validate(struct wlc_info *wlc, s8 val)
{
int bcmerror = BCME_OK;
@@ -476,7 +482,7 @@ int wlc_stf_ant_txant_validate(wlc_info_t *wlc, s8 val)
* do tx-antenna selection for SISO transmissions
* for NREV>=7, bit 6 and bit 7 mean antenna 0 and 1 respectively, nit6+bit7 means both cores active
*/
-static void _wlc_stf_phy_txant_upd(wlc_info_t *wlc)
+static void _wlc_stf_phy_txant_upd(struct wlc_info *wlc)
{
s8 txant;
@@ -517,12 +523,12 @@ static void _wlc_stf_phy_txant_upd(wlc_info_t *wlc)
wlc_bmac_txant_set(wlc->hw, wlc->stf->phytxant);
}
-void wlc_stf_phy_txant_upd(wlc_info_t *wlc)
+void wlc_stf_phy_txant_upd(struct wlc_info *wlc)
{
_wlc_stf_phy_txant_upd(wlc);
}
-void wlc_stf_phy_chain_calc(wlc_info_t *wlc)
+void wlc_stf_phy_chain_calc(struct wlc_info *wlc)
{
/* get available rx/tx chains */
wlc->stf->hw_txchain = (u8) getintvar(wlc->pub->vars, "txchain");
@@ -559,7 +565,7 @@ void wlc_stf_phy_chain_calc(wlc_info_t *wlc)
wlc_stf_spatial_policy_set(wlc, MIN_SPATIAL_EXPANSION);
}
-static u16 _wlc_stf_phytxchain_sel(wlc_info_t *wlc, ratespec_t rspec)
+static u16 _wlc_stf_phytxchain_sel(struct wlc_info *wlc, ratespec_t rspec)
{
u16 phytxant = wlc->stf->phytxant;
@@ -572,12 +578,12 @@ static u16 _wlc_stf_phytxchain_sel(wlc_info_t *wlc, ratespec_t rspec)
return phytxant;
}
-u16 wlc_stf_phytxchain_sel(wlc_info_t *wlc, ratespec_t rspec)
+u16 wlc_stf_phytxchain_sel(struct wlc_info *wlc, ratespec_t rspec)
{
return _wlc_stf_phytxchain_sel(wlc, rspec);
}
-u16 wlc_stf_d11hdrs_phyctl_txant(wlc_info_t *wlc, ratespec_t rspec)
+u16 wlc_stf_d11hdrs_phyctl_txant(struct wlc_info *wlc, ratespec_t rspec)
{
u16 phytxant = wlc->stf->phytxant;
u16 mask = PHY_TXC_ANT_MASK;
diff --git a/drivers/staging/brcm80211/sys/wlc_stf.h b/drivers/staging/brcm80211/sys/wlc_stf.h
index ee9b02a119bb..8de6382e620d 100644
--- a/drivers/staging/brcm80211/sys/wlc_stf.h
+++ b/drivers/staging/brcm80211/sys/wlc_stf.h
@@ -20,23 +20,24 @@
#define MIN_SPATIAL_EXPANSION 0
#define MAX_SPATIAL_EXPANSION 1
-extern int wlc_stf_attach(wlc_info_t *wlc);
-extern void wlc_stf_detach(wlc_info_t *wlc);
+extern int wlc_stf_attach(struct wlc_info *wlc);
+extern void wlc_stf_detach(struct wlc_info *wlc);
-extern void wlc_tempsense_upd(wlc_info_t *wlc);
-extern void wlc_stf_ss_algo_channel_get(wlc_info_t *wlc,
+extern void wlc_tempsense_upd(struct wlc_info *wlc);
+extern void wlc_stf_ss_algo_channel_get(struct wlc_info *wlc,
u16 *ss_algo_channel,
chanspec_t chanspec);
-extern int wlc_stf_ss_update(wlc_info_t *wlc, struct wlcband *band);
-extern void wlc_stf_phy_txant_upd(wlc_info_t *wlc);
-extern int wlc_stf_txchain_set(wlc_info_t *wlc, s32 int_val, bool force);
-extern int wlc_stf_rxchain_set(wlc_info_t *wlc, s32 int_val);
-extern bool wlc_stf_stbc_rx_set(wlc_info_t *wlc, s32 int_val);
+extern int wlc_stf_ss_update(struct wlc_info *wlc, struct wlcband *band);
+extern void wlc_stf_phy_txant_upd(struct wlc_info *wlc);
+extern int wlc_stf_txchain_set(struct wlc_info *wlc, s32 int_val, bool force);
+extern int wlc_stf_rxchain_set(struct wlc_info *wlc, s32 int_val);
+extern bool wlc_stf_stbc_rx_set(struct wlc_info *wlc, s32 int_val);
-extern int wlc_stf_ant_txant_validate(wlc_info_t *wlc, s8 val);
-extern void wlc_stf_phy_txant_upd(wlc_info_t *wlc);
-extern void wlc_stf_phy_chain_calc(wlc_info_t *wlc);
-extern u16 wlc_stf_phytxchain_sel(wlc_info_t *wlc, ratespec_t rspec);
-extern u16 wlc_stf_d11hdrs_phyctl_txant(wlc_info_t *wlc, ratespec_t rspec);
-extern u16 wlc_stf_spatial_expansion_get(wlc_info_t *wlc, ratespec_t rspec);
+extern int wlc_stf_ant_txant_validate(struct wlc_info *wlc, s8 val);
+extern void wlc_stf_phy_txant_upd(struct wlc_info *wlc);
+extern void wlc_stf_phy_chain_calc(struct wlc_info *wlc);
+extern u16 wlc_stf_phytxchain_sel(struct wlc_info *wlc, ratespec_t rspec);
+extern u16 wlc_stf_d11hdrs_phyctl_txant(struct wlc_info *wlc, ratespec_t rspec);
+extern u16 wlc_stf_spatial_expansion_get(struct wlc_info *wlc,
+ ratespec_t rspec);
#endif /* _wlc_stf_h_ */
diff --git a/drivers/staging/brcm80211/sys/wlc_types.h b/drivers/staging/brcm80211/sys/wlc_types.h
index 33047ebab979..df6e04c6ac58 100644
--- a/drivers/staging/brcm80211/sys/wlc_types.h
+++ b/drivers/staging/brcm80211/sys/wlc_types.h
@@ -19,34 +19,19 @@
/* forward declarations */
-typedef struct wlc_info wlc_info_t;
-typedef struct wlc_hw_info wlc_hw_info_t;
-typedef struct wlc_if wlc_if_t;
-typedef struct wl_if wl_if_t;
-typedef struct led_info led_info_t;
-typedef struct bmac_led bmac_led_t;
-typedef struct bmac_led_info bmac_led_info_t;
-typedef struct scb_module scb_module_t;
-typedef struct ba_info ba_info_t;
-typedef struct ampdu_info ampdu_info_t;
-typedef struct ratesel_info ratesel_info_t;
-typedef struct wlc_ap_info wlc_ap_info_t;
-typedef struct wlc_auth_info wlc_auth_info_t;
-typedef struct supplicant supplicant_t;
-typedef struct authenticator authenticator_t;
-typedef struct antsel_info antsel_info_t;
-#if !defined(WLC_LOW)
-typedef struct rpctx_info rpctx_info_t;
-#endif
-#ifdef WLC_LOW
-typedef struct bmac_pmq bmac_pmq_t;
-#endif
+struct wlc_info;
+struct wlc_hw_info;
+struct wlc_if;
+struct wl_if;
+struct ampdu_info;
+struct antsel_info;
+struct bmac_pmq;
struct d11init;
#ifndef _hnddma_pub_
#define _hnddma_pub_
-typedef const struct hnddma_pub hnddma_t;
+struct hnddma_pub;
#endif /* _hnddma_pub_ */
#endif /* _wlc_types_h_ */
diff --git a/drivers/staging/brcm80211/util/aiutils.c b/drivers/staging/brcm80211/util/aiutils.c
index 75a7e3a5c009..ddd2f9d64c20 100644
--- a/drivers/staging/brcm80211/util/aiutils.c
+++ b/drivers/staging/brcm80211/util/aiutils.c
@@ -14,11 +14,16 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <bcmdefs.h>
+#ifdef BRCM_FULLMAC
+#include <linux/netdevice.h>
+#endif
#include <osl.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
#include <bcmutils.h>
#include <siutils.h>
#include <hndsoc.h>
@@ -26,8 +31,8 @@
#include <pcicfg.h>
#include <bcmdevs.h>
-#define BCM47162_DMP() ((CHIPID(sih->chip) == BCM47162_CHIP_ID) && \
- (CHIPREV(sih->chiprev) == 0) && \
+#define BCM47162_DMP() ((sih->chip == BCM47162_CHIP_ID) && \
+ (sih->chiprev == 0) && \
(sii->coreid[sii->curidx] == MIPS74K_CORE_ID))
/* EROM parsing */
@@ -115,7 +120,7 @@ void ai_scan(si_t *sih, void *regs, uint devid)
erombase = R_REG(sii->osh, &cc->eromptr);
- switch (BUSTYPE(sih->bustype)) {
+ switch (sih->bustype) {
case SI_BUS:
eromptr = (u32 *) REG_MAP(erombase, SI_CORE_SIZE);
break;
@@ -125,7 +130,7 @@ void ai_scan(si_t *sih, void *regs, uint devid)
sii->curwrap = (void *)((unsigned long)regs + SI_CORE_SIZE);
/* Now point the window at the erom */
- OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
+ pci_write_config_dword(sii->osh->pdev, PCI_BAR0_WIN, erombase);
eromptr = regs;
break;
@@ -330,7 +335,7 @@ void *ai_setcoreidx(si_t *sih, uint coreidx)
ASSERT((sii->intrsenabled_fn == NULL)
|| !(*(sii)->intrsenabled_fn) ((sii)->intr_arg));
- switch (BUSTYPE(sih->bustype)) {
+ switch (sih->bustype) {
case SI_BUS:
/* map new one */
if (!sii->regs[coreidx]) {
@@ -347,10 +352,10 @@ void *ai_setcoreidx(si_t *sih, uint coreidx)
case PCI_BUS:
/* point bar0 window */
- OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
+ pci_write_config_dword(sii->osh->pdev, PCI_BAR0_WIN, addr);
regs = sii->curmap;
/* point bar0 2nd 4KB window */
- OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
+ pci_write_config_dword(sii->osh->pdev, PCI_BAR0_WIN2, wrap);
break;
#ifdef BCMSDIO
@@ -504,7 +509,7 @@ uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
if (coreidx >= SI_MAXCORES)
return 0;
- if (BUSTYPE(sih->bustype) == SI_BUS) {
+ if (sih->bustype == SI_BUS) {
/* If internal bus, we can always get at everything */
fast = true;
/* map if does not exist */
@@ -514,7 +519,7 @@ uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
ASSERT(GOODREGS(sii->regs[coreidx]));
}
r = (u32 *) ((unsigned char *) sii->regs[coreidx] + regoff);
- } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ } else if (sih->bustype == PCI_BUS) {
/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
diff --git a/drivers/staging/brcm80211/util/bcmotp.c b/drivers/staging/brcm80211/util/bcmotp.c
index c909832c7ee1..d820e7b9e970 100644
--- a/drivers/staging/brcm80211/util/bcmotp.c
+++ b/drivers/staging/brcm80211/util/bcmotp.c
@@ -14,11 +14,13 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <bcmdefs.h>
#include <osl.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
#include <bcmdevs.h>
#include <bcmutils.h>
#include <siutils.h>
@@ -77,7 +79,7 @@ typedef struct {
uint ccrev; /* chipc revision */
otp_fn_t *fn; /* OTP functions */
si_t *sih; /* Saved sb handle */
- osl_t *osh;
+ struct osl_info *osh;
#ifdef BCMIPXOTP
/* IPX OTP section */
@@ -221,7 +223,7 @@ static int ipxotp_max_rgnsz(si_t *sih, int osizew)
{
int ret = 0;
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
@@ -271,8 +273,8 @@ static void _ipxotp_init(otpinfo_t *oi, chipcregs_t *cc)
/* Read OTP lock bits and subregion programmed indication bits */
oi->status = R_REG(oi->osh, &cc->otpstatus);
- if ((CHIPID(oi->sih->chip) == BCM43224_CHIP_ID)
- || (CHIPID(oi->sih->chip) == BCM43225_CHIP_ID)) {
+ if ((oi->sih->chip == BCM43224_CHIP_ID)
+ || (oi->sih->chip == BCM43225_CHIP_ID)) {
u32 p_bits;
p_bits =
(ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) &
@@ -569,7 +571,7 @@ static int hndotp_size(void *oh)
static u16 hndotp_otpr(void *oh, chipcregs_t *cc, uint wn)
{
otpinfo_t *oi = (otpinfo_t *) oh;
- osl_t *osh;
+ struct osl_info *osh;
volatile u16 *ptr;
ASSERT(wn < ((oi->size / 2) + OTP_RC_LIM_OFF));
@@ -584,7 +586,7 @@ static u16 hndotp_otpr(void *oh, chipcregs_t *cc, uint wn)
static u16 hndotp_otproff(void *oh, chipcregs_t *cc, int woff)
{
otpinfo_t *oi = (otpinfo_t *) oh;
- osl_t *osh;
+ struct osl_info *osh;
volatile u16 *ptr;
ASSERT(woff >= (-((int)oi->size / 2)));
@@ -603,7 +605,7 @@ static u16 hndotp_read_bit(void *oh, chipcregs_t *cc, uint idx)
otpinfo_t *oi = (otpinfo_t *) oh;
uint k, row, col;
u32 otpp, st;
- osl_t *osh;
+ struct osl_info *osh;
osh = si_osh(oi->sih);
row = idx / 65;
@@ -636,7 +638,7 @@ static void *hndotp_init(si_t *sih)
otpinfo_t *oi;
u32 cap = 0, clkdiv, otpdiv = 0;
void *ret = NULL;
- osl_t *osh;
+ struct osl_info *osh;
oi = &otpinfo;
@@ -900,7 +902,7 @@ void *otp_init(si_t *sih)
void *ret = NULL;
oi = &otpinfo;
- bzero(oi, sizeof(otpinfo_t));
+ memset(oi, 0, sizeof(otpinfo_t));
oi->ccrev = sih->ccrev;
diff --git a/drivers/staging/brcm80211/util/bcmsrom.c b/drivers/staging/brcm80211/util/bcmsrom.c
index 1282ef7eb922..19d45026a5ee 100644
--- a/drivers/staging/brcm80211/util/bcmsrom.c
+++ b/drivers/staging/brcm80211/util/bcmsrom.c
@@ -15,9 +15,11 @@
*/
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/etherdevice.h>
#include <bcmdefs.h>
#include <osl.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
#include <stdarg.h>
#include <bcmutils.h>
#include <hndsoc.h>
@@ -66,29 +68,30 @@ extern uint _varsz;
#define SROM_CIS_SINGLE 1
-static int initvars_srom_si(si_t *sih, osl_t *osh, void *curmap, char **vars,
- uint *count);
+static int initvars_srom_si(si_t *sih, struct osl_info *osh, void *curmap,
+ char **vars, uint *count);
static void _initvars_srom_pci(u8 sromrev, u16 *srom, uint off,
varbuf_t *b);
static int initvars_srom_pci(si_t *sih, void *curmap, char **vars,
uint *count);
static int initvars_flash_si(si_t *sih, char **vars, uint *count);
#ifdef BCMSDIO
-static int initvars_cis_sdio(osl_t *osh, char **vars, uint *count);
-static int sprom_cmd_sdio(osl_t *osh, u8 cmd);
-static int sprom_read_sdio(osl_t *osh, u16 addr, u16 *data);
+static int initvars_cis_sdio(struct osl_info *osh, char **vars, uint *count);
+static int sprom_cmd_sdio(struct osl_info *osh, u8 cmd);
+static int sprom_read_sdio(struct osl_info *osh, u16 addr, u16 *data);
#endif /* BCMSDIO */
-static int sprom_read_pci(osl_t *osh, si_t *sih, u16 *sprom, uint wordoff,
- u16 *buf, uint nwords, bool check_crc);
+static int sprom_read_pci(struct osl_info *osh, si_t *sih, u16 *sprom,
+ uint wordoff, u16 *buf, uint nwords, bool check_crc);
#if defined(BCMNVRAMR)
-static int otp_read_pci(osl_t *osh, si_t *sih, u16 *buf, uint bufsz);
+static int otp_read_pci(struct osl_info *osh, si_t *sih, u16 *buf, uint bufsz);
#endif
-static u16 srom_cc_cmd(si_t *sih, osl_t *osh, void *ccregs, u32 cmd,
+static u16 srom_cc_cmd(si_t *sih, struct osl_info *osh, void *ccregs, u32 cmd,
uint wordoff, u16 data);
-static int initvars_table(osl_t *osh, char *start, char *end, char **vars,
- uint *count);
-static int initvars_flash(si_t *sih, osl_t *osh, char **vp, uint len);
+static int initvars_table(struct osl_info *osh, char *start, char *end,
+ char **vars, uint *count);
+static int initvars_flash(si_t *sih, struct osl_info *osh, char **vp,
+ uint len);
/* Initialization of varbuf structure */
static void varbuf_init(varbuf_t *b, char *buf, uint size)
@@ -129,7 +132,7 @@ static int varbuf_append(varbuf_t *b, const char *fmt, ...)
if (s != NULL) {
len = (size_t) (s - b->buf);
for (s = b->base; s < b->buf;) {
- if ((bcmp(s, b->buf, len) == 0) && s[len] == '=') {
+ if ((memcmp(s, b->buf, len) == 0) && s[len] == '=') {
len = strlen(s) + 1;
memmove(s, (s + len),
((b->buf + r + 1) - (s + len)));
@@ -155,21 +158,21 @@ static int varbuf_append(varbuf_t *b, const char *fmt, ...)
* Initialize local vars from the right source for this platform.
* Return 0 on success, nonzero on error.
*/
-int srom_var_init(si_t *sih, uint bustype, void *curmap, osl_t *osh,
+int srom_var_init(si_t *sih, uint bustype, void *curmap, struct osl_info *osh,
char **vars, uint *count)
{
uint len;
len = 0;
- ASSERT(bustype == BUSTYPE(bustype));
+ ASSERT(bustype == bustype);
if (vars == NULL || count == NULL)
return 0;
*vars = NULL;
*count = 0;
- switch (BUSTYPE(bustype)) {
+ switch (bustype) {
case SI_BUS:
case JTAG_BUS:
return initvars_srom_si(sih, osh, curmap, vars, count);
@@ -194,7 +197,7 @@ int srom_var_init(si_t *sih, uint bustype, void *curmap, osl_t *osh,
/* support only 16-bit word read from srom */
int
-srom_read(si_t *sih, uint bustype, void *curmap, osl_t *osh,
+srom_read(si_t *sih, uint bustype, void *curmap, struct osl_info *osh,
uint byteoff, uint nbytes, u16 *buf, bool check_crc)
{
uint off, nw;
@@ -202,7 +205,7 @@ srom_read(si_t *sih, uint bustype, void *curmap, osl_t *osh,
uint i;
#endif /* BCMSDIO */
- ASSERT(bustype == BUSTYPE(bustype));
+ ASSERT(bustype == bustype);
/* check input - 16-bit access only */
if (byteoff & 1 || nbytes & 1 || (byteoff + nbytes) > SROM_MAX)
@@ -211,7 +214,7 @@ srom_read(si_t *sih, uint bustype, void *curmap, osl_t *osh,
off = byteoff / 2;
nw = nbytes / 2;
- if (BUSTYPE(bustype) == PCI_BUS) {
+ if (bustype == PCI_BUS) {
if (!curmap)
return 1;
@@ -233,7 +236,7 @@ srom_read(si_t *sih, uint bustype, void *curmap, osl_t *osh,
}
#endif
#ifdef BCMSDIO
- } else if (BUSTYPE(bustype) == SDIO_BUS) {
+ } else if (bustype == SDIO_BUS) {
off = byteoff / 2;
nw = nbytes / 2;
for (i = 0; i < nw; i++) {
@@ -242,7 +245,7 @@ srom_read(si_t *sih, uint bustype, void *curmap, osl_t *osh,
return 1;
}
#endif /* BCMSDIO */
- } else if (BUSTYPE(bustype) == SI_BUS) {
+ } else if (bustype == SI_BUS) {
return 1;
} else {
return 1;
@@ -376,7 +379,8 @@ u8 patch_pair;
/* For dongle HW, accept partial calibration parameters */
#define BCMDONGLECASE(n)
-int srom_parsecis(osl_t *osh, u8 *pcis[], uint ciscnt, char **vars, uint *count)
+int srom_parsecis(struct osl_info *osh, u8 *pcis[], uint ciscnt, char **vars,
+ uint *count)
{
char eabuf[32];
char *base;
@@ -402,7 +406,7 @@ int srom_parsecis(osl_t *osh, u8 *pcis[], uint ciscnt, char **vars, uint *count)
return -2;
varbuf_init(&b, base, MAXSZ_NVRAM_VARS);
- bzero(base, MAXSZ_NVRAM_VARS);
+ memset(base, 0, MAXSZ_NVRAM_VARS);
eabuf[0] = '\0';
for (cisnum = 0; cisnum < ciscnt; cisnum++) {
cis = *pcis++;
@@ -496,12 +500,12 @@ int srom_parsecis(osl_t *osh, u8 *pcis[], uint ciscnt, char **vars, uint *count)
break;
default:
/* set macaddr if HNBU_MACADDR not seen yet */
- if (eabuf[0] == '\0'
- && cis[i] == LAN_NID
- && !(ETHER_ISNULLADDR(&cis[i + 2]))
- && !(ETHER_ISMULTI(&cis[i + 2]))) {
+ if (eabuf[0] == '\0' &&
+ cis[i] == LAN_NID &&
+ !is_zero_ether_addr(&cis[i + 2]) &&
+ !is_multicast_ether_addr(&cis[i + 2])) {
ASSERT(cis[i + 1] ==
- ETHER_ADDR_LEN);
+ ETH_ALEN);
snprintf(eabuf, sizeof(eabuf),
"%pM", &cis[i + 2]);
@@ -970,8 +974,8 @@ int srom_parsecis(osl_t *osh, u8 *pcis[], uint ciscnt, char **vars, uint *count)
break;
case HNBU_MACADDR:
- if (!(ETHER_ISNULLADDR(&cis[i + 1])) &&
- !(ETHER_ISMULTI(&cis[i + 1]))) {
+ if (!is_zero_ether_addr(&cis[i + 1]) &&
+ !is_multicast_ether_addr(&cis[i + 1])) {
snprintf(eabuf, sizeof(eabuf),
"%pM", &cis[i + 1]);
@@ -1405,8 +1409,8 @@ int srom_parsecis(osl_t *osh, u8 *pcis[], uint ciscnt, char **vars, uint *count)
* not in the bus cores.
*/
static u16
-srom_cc_cmd(si_t *sih, osl_t *osh, void *ccregs, u32 cmd, uint wordoff,
- u16 data)
+srom_cc_cmd(si_t *sih, struct osl_info *osh, void *ccregs, u32 cmd,
+ uint wordoff, u16 data)
{
chipcregs_t *cc = (chipcregs_t *) ccregs;
uint wait_cnt = 1000;
@@ -1439,7 +1443,7 @@ srom_cc_cmd(si_t *sih, osl_t *osh, void *ccregs, u32 cmd, uint wordoff,
* Return 0 on success, nonzero on error.
*/
static int
-sprom_read_pci(osl_t *osh, si_t *sih, u16 *sprom, uint wordoff,
+sprom_read_pci(struct osl_info *osh, si_t *sih, u16 *sprom, uint wordoff,
u16 *buf, uint nwords, bool check_crc)
{
int err = 0;
@@ -1499,7 +1503,7 @@ sprom_read_pci(osl_t *osh, si_t *sih, u16 *sprom, uint wordoff,
}
#if defined(BCMNVRAMR)
-static int otp_read_pci(osl_t *osh, si_t *sih, u16 *buf, uint bufsz)
+static int otp_read_pci(struct osl_info *osh, si_t *sih, u16 *buf, uint bufsz)
{
u8 *otp;
uint sz = OTP_SZ_MAX / 2; /* size in words */
@@ -1547,8 +1551,8 @@ static int otp_read_pci(osl_t *osh, si_t *sih, u16 *buf, uint bufsz)
* Create variable table from memory.
* Return 0 on success, nonzero on error.
*/
-static int initvars_table(osl_t *osh, char *start, char *end, char **vars,
- uint *count)
+static int initvars_table(struct osl_info *osh, char *start, char *end,
+ char **vars, uint *count)
{
int c = (int)(end - start);
@@ -1574,7 +1578,8 @@ static int initvars_table(osl_t *osh, char *start, char *end, char **vars,
* of the table upon enter and to the end of the table upon exit when success.
* Return 0 on success, nonzero on error.
*/
-static int initvars_flash(si_t *sih, osl_t *osh, char **base, uint len)
+static int initvars_flash(si_t *sih, struct osl_info *osh, char **base,
+ uint len)
{
char *vp = *base;
char *flash;
@@ -1634,7 +1639,7 @@ static int initvars_flash(si_t *sih, osl_t *osh, char **base, uint len)
*/
static int initvars_flash_si(si_t *sih, char **vars, uint *count)
{
- osl_t *osh = si_osh(sih);
+ struct osl_info *osh = si_osh(sih);
char *vp, *base;
int err;
@@ -1845,7 +1850,7 @@ static int initvars_srom_pci(si_t *sih, void *curmap, char **vars, uint *count)
u32 sr;
varbuf_t b;
char *vp, *base = NULL;
- osl_t *osh = si_osh(sih);
+ struct osl_info *osh = si_osh(sih);
bool flash = false;
int err = 0;
@@ -1986,7 +1991,7 @@ static int initvars_srom_pci(si_t *sih, void *curmap, char **vars, uint *count)
* Read the SDIO cis and call parsecis to initialize the vars.
* Return 0 on success, nonzero on error.
*/
-static int initvars_cis_sdio(osl_t *osh, char **vars, uint *count)
+static int initvars_cis_sdio(struct osl_info *osh, char **vars, uint *count)
{
u8 *cis[SBSDIO_NUM_FUNCTION + 1];
uint fn, numfn;
@@ -2020,7 +2025,7 @@ static int initvars_cis_sdio(osl_t *osh, char **vars, uint *count)
}
/* set SDIO sprom command register */
-static int sprom_cmd_sdio(osl_t *osh, u8 cmd)
+static int sprom_cmd_sdio(struct osl_info *osh, u8 cmd)
{
u8 status = 0;
uint wait_cnt = 1000;
@@ -2040,7 +2045,7 @@ static int sprom_cmd_sdio(osl_t *osh, u8 cmd)
}
/* read a word from the SDIO srom */
-static int sprom_read_sdio(osl_t *osh, u16 addr, u16 *data)
+static int sprom_read_sdio(struct osl_info *osh, u16 addr, u16 *data)
{
u8 addr_l, addr_h, data_l, data_h;
@@ -2068,8 +2073,8 @@ static int sprom_read_sdio(osl_t *osh, u16 addr, u16 *data)
}
#endif /* BCMSDIO */
-static int initvars_srom_si(si_t *sih, osl_t *osh, void *curmap, char **vars,
- uint *varsz)
+static int initvars_srom_si(si_t *sih, struct osl_info *osh, void *curmap,
+ char **vars, uint *varsz)
{
/* Search flash nvram section for srom variables */
return initvars_flash_si(sih, vars, varsz);
diff --git a/drivers/staging/brcm80211/util/bcmutils.c b/drivers/staging/brcm80211/util/bcmutils.c
index 9789ea45ecd6..fd30cc6fb7f8 100644
--- a/drivers/staging/brcm80211/util/bcmutils.c
+++ b/drivers/staging/brcm80211/util/bcmutils.c
@@ -19,8 +19,10 @@
#include <linux/string.h>
#include <bcmdefs.h>
#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
#include <osl.h>
-#include <linuxver.h>
#include <bcmutils.h>
#include <siutils.h>
#include <bcmnvram.h>
@@ -30,26 +32,26 @@
#include <proto/802.1d.h>
#include <proto/802.11.h>
-
/* copy a buffer into a pkt buffer chain */
-uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, unsigned char *buf)
+uint pktfrombuf(struct osl_info *osh, struct sk_buff *p, uint offset, int len,
+ unsigned char *buf)
{
uint n, ret = 0;
/* skip 'offset' bytes */
- for (; p && offset; p = PKTNEXT(p)) {
- if (offset < (uint) PKTLEN(p))
+ for (; p && offset; p = p->next) {
+ if (offset < (uint) (p->len))
break;
- offset -= PKTLEN(p);
+ offset -= p->len;
}
if (!p)
return 0;
/* copy the data */
- for (; p && len; p = PKTNEXT(p)) {
- n = min((uint) PKTLEN(p) - offset, (uint) len);
- bcopy(buf, PKTDATA(p) + offset, n);
+ for (; p && len; p = p->next) {
+ n = min((uint) (p->len) - offset, (uint) len);
+ bcopy(buf, p->data + offset, n);
buf += n;
len -= n;
ret += n;
@@ -59,13 +61,13 @@ uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, unsigned char *buf)
return ret;
}
/* return total length of buffer chain */
-uint BCMFASTPATH pkttotlen(osl_t *osh, void *p)
+uint BCMFASTPATH pkttotlen(struct osl_info *osh, struct sk_buff *p)
{
uint total;
total = 0;
- for (; p; p = PKTNEXT(p))
- total += PKTLEN(p);
+ for (; p; p = p->next)
+ total += p->len;
return total;
}
@@ -73,12 +75,13 @@ uint BCMFASTPATH pkttotlen(osl_t *osh, void *p)
* osl multiple-precedence packet queue
* hi_prec is always >= the number of the highest non-empty precedence
*/
-void *BCMFASTPATH pktq_penq(struct pktq *pq, int prec, void *p)
+struct sk_buff *BCMFASTPATH pktq_penq(struct pktq *pq, int prec,
+ struct sk_buff *p)
{
struct pktq_prec *q;
ASSERT(prec >= 0 && prec < pq->num_prec);
- ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+ ASSERT(p->prev == NULL); /* queueing chains not allowed */
ASSERT(!pktq_full(pq));
ASSERT(!pktq_pfull(pq, prec));
@@ -86,7 +89,7 @@ void *BCMFASTPATH pktq_penq(struct pktq *pq, int prec, void *p)
q = &pq->q[prec];
if (q->head)
- PKTSETLINK(q->tail, p);
+ q->tail->prev = p;
else
q->head = p;
@@ -101,12 +104,13 @@ void *BCMFASTPATH pktq_penq(struct pktq *pq, int prec, void *p)
return p;
}
-void *BCMFASTPATH pktq_penq_head(struct pktq *pq, int prec, void *p)
+struct sk_buff *BCMFASTPATH pktq_penq_head(struct pktq *pq, int prec,
+ struct sk_buff *p)
{
struct pktq_prec *q;
ASSERT(prec >= 0 && prec < pq->num_prec);
- ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+ ASSERT(p->prev == NULL); /* queueing chains not allowed */
ASSERT(!pktq_full(pq));
ASSERT(!pktq_pfull(pq, prec));
@@ -116,7 +120,7 @@ void *BCMFASTPATH pktq_penq_head(struct pktq *pq, int prec, void *p)
if (q->head == NULL)
q->tail = p;
- PKTSETLINK(p, q->head);
+ p->prev = q->head;
q->head = p;
q->len++;
@@ -128,10 +132,10 @@ void *BCMFASTPATH pktq_penq_head(struct pktq *pq, int prec, void *p)
return p;
}
-void *BCMFASTPATH pktq_pdeq(struct pktq *pq, int prec)
+struct sk_buff *BCMFASTPATH pktq_pdeq(struct pktq *pq, int prec)
{
struct pktq_prec *q;
- void *p;
+ struct sk_buff *p;
ASSERT(prec >= 0 && prec < pq->num_prec);
@@ -141,7 +145,7 @@ void *BCMFASTPATH pktq_pdeq(struct pktq *pq, int prec)
if (p == NULL)
return NULL;
- q->head = PKTLINK(p);
+ q->head = p->prev;
if (q->head == NULL)
q->tail = NULL;
@@ -149,15 +153,15 @@ void *BCMFASTPATH pktq_pdeq(struct pktq *pq, int prec)
pq->len--;
- PKTSETLINK(p, NULL);
+ p->prev = NULL;
return p;
}
-void *BCMFASTPATH pktq_pdeq_tail(struct pktq *pq, int prec)
+struct sk_buff *BCMFASTPATH pktq_pdeq_tail(struct pktq *pq, int prec)
{
struct pktq_prec *q;
- void *p, *prev;
+ struct sk_buff *p, *prev;
ASSERT(prec >= 0 && prec < pq->num_prec);
@@ -167,11 +171,11 @@ void *BCMFASTPATH pktq_pdeq_tail(struct pktq *pq, int prec)
if (p == NULL)
return NULL;
- for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ for (prev = NULL; p != q->tail; p = p->prev)
prev = p;
if (prev)
- PKTSETLINK(prev, NULL);
+ prev->prev = NULL;
else
q->head = NULL;
@@ -184,17 +188,17 @@ void *BCMFASTPATH pktq_pdeq_tail(struct pktq *pq, int prec)
}
#ifdef BRCM_FULLMAC
-void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir)
+void pktq_pflush(struct osl_info *osh, struct pktq *pq, int prec, bool dir)
{
struct pktq_prec *q;
- void *p;
+ struct sk_buff *p;
q = &pq->q[prec];
p = q->head;
while (p) {
- q->head = PKTLINK(p);
- PKTSETLINK(p, NULL);
- PKTFREE(osh, p, dir);
+ q->head = p->prev;
+ p->prev = NULL;
+ pkt_buf_free_skb(osh, p, dir);
q->len--;
pq->len--;
p = q->head;
@@ -203,7 +207,7 @@ void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir)
q->tail = NULL;
}
-void pktq_flush(osl_t *osh, struct pktq *pq, bool dir)
+void pktq_flush(struct osl_info *osh, struct pktq *pq, bool dir)
{
int prec;
for (prec = 0; prec < pq->num_prec; prec++)
@@ -212,11 +216,11 @@ void pktq_flush(osl_t *osh, struct pktq *pq, bool dir)
}
#else /* !BRCM_FULLMAC */
void
-pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn,
- int arg)
+pktq_pflush(struct osl_info *osh, struct pktq *pq, int prec, bool dir,
+ ifpkt_cb_t fn, int arg)
{
struct pktq_prec *q;
- void *p, *prev = NULL;
+ struct sk_buff *p, *prev = NULL;
q = &pq->q[prec];
p = q->head;
@@ -224,17 +228,17 @@ pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn,
if (fn == NULL || (*fn) (p, arg)) {
bool head = (p == q->head);
if (head)
- q->head = PKTLINK(p);
+ q->head = p->prev;
else
- PKTSETLINK(prev, PKTLINK(p));
- PKTSETLINK(p, NULL);
- PKTFREE(osh, p, dir);
+ prev->prev = p->prev;
+ p->prev = NULL;
+ pkt_buf_free_skb(osh, p, dir);
q->len--;
pq->len--;
- p = (head ? q->head : PKTLINK(prev));
+ p = (head ? q->head : prev->prev);
} else {
prev = p;
- p = PKTLINK(p);
+ p = p->prev;
}
}
@@ -244,7 +248,8 @@ pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn,
}
}
-void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg)
+void pktq_flush(struct osl_info *osh, struct pktq *pq, bool dir,
+ ifpkt_cb_t fn, int arg)
{
int prec;
for (prec = 0; prec < pq->num_prec; prec++)
@@ -261,7 +266,7 @@ void pktq_init(struct pktq *pq, int num_prec, int max_len)
ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
/* pq is variable size; only zero out what's requested */
- bzero(pq,
+ memset(pq, 0,
offsetof(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
pq->num_prec = (u16) num_prec;
@@ -272,7 +277,7 @@ void pktq_init(struct pktq *pq, int num_prec, int max_len)
pq->q[prec].max = pq->max;
}
-void *pktq_peek_tail(struct pktq *pq, int *prec_out)
+struct sk_buff *pktq_peek_tail(struct pktq *pq, int *prec_out)
{
int prec;
@@ -303,10 +308,11 @@ int pktq_mlen(struct pktq *pq, uint prec_bmp)
return len;
}
/* Priority dequeue from a specific set of precedences */
-void *BCMFASTPATH pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
+struct sk_buff *BCMFASTPATH pktq_mdeq(struct pktq *pq, uint prec_bmp,
+ int *prec_out)
{
struct pktq_prec *q;
- void *p;
+ struct sk_buff *p;
int prec;
if (pq->len == 0)
@@ -325,7 +331,7 @@ void *BCMFASTPATH pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
if (p == NULL)
return NULL;
- q->head = PKTLINK(p);
+ q->head = p->prev;
if (q->head == NULL)
q->tail = NULL;
@@ -336,7 +342,7 @@ void *BCMFASTPATH pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
pq->len--;
- PKTSETLINK(p, NULL);
+ p->prev = NULL;
return p;
}
@@ -373,7 +379,7 @@ char *getvar(char *vars, const char *name)
/* first look in vars[] */
for (s = vars; s && *s;) {
- if ((bcmp(s, name, len) == 0) && (s[len] == '='))
+ if ((memcmp(s, name, len) == 0) && (s[len] == '='))
return &s[len + 1];
while (*s++)
@@ -404,15 +410,15 @@ int getintvar(char *vars, const char *name)
#if defined(BCMDBG)
/* pretty hex print a pkt buffer chain */
-void prpkt(const char *msg, osl_t *osh, void *p0)
+void prpkt(const char *msg, struct osl_info *osh, struct sk_buff *p0)
{
- void *p;
+ struct sk_buff *p;
if (msg && (msg[0] != '\0'))
printf("%s:\n", msg);
- for (p = p0; p; p = PKTNEXT(p))
- prhex(NULL, PKTDATA(p), PKTLEN(p));
+ for (p = p0; p; p = p->next)
+ prhex(NULL, p->data, p->len);
}
#endif /* defined(BCMDBG) */
diff --git a/drivers/staging/brcm80211/util/bcmwifi.c b/drivers/staging/brcm80211/util/bcmwifi.c
index 1bb6c78eece7..81e54bd7a554 100644
--- a/drivers/staging/brcm80211/util/bcmwifi.c
+++ b/drivers/staging/brcm80211/util/bcmwifi.c
@@ -15,6 +15,10 @@
*/
#include <linux/ctype.h>
#include <linux/kernel.h>
+#ifdef BRCM_FULLMAC
+#include <linux/netdevice.h>
+#endif
+#include <osl.h>
#include <bcmdefs.h>
#include <bcmutils.h>
#include <bcmwifi.h>
diff --git a/drivers/staging/brcm80211/util/hnddma.c b/drivers/staging/brcm80211/util/hnddma.c
index fe503e7de563..d08869239d5b 100644
--- a/drivers/staging/brcm80211/util/hnddma.c
+++ b/drivers/staging/brcm80211/util/hnddma.c
@@ -16,7 +16,8 @@
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linuxver.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
#include <bcmdefs.h>
#include <bcmdevs.h>
#include <osl.h>
@@ -28,6 +29,10 @@
#include <sbhnddma.h>
#include <hnddma.h>
+#if defined(__mips__)
+#include <asm/addrspace.h>
+#endif
+
/* debug/trace */
#ifdef BCMDBG
#define DMA_ERROR(args) \
@@ -68,11 +73,12 @@ static uint dma_msg_level;
#define DI_INFO(dmah) ((dma_info_t *)dmah)
+#define R_SM(r) (*(r))
+#define W_SM(r, v) (*(r) = (v))
+
/* dma engine software state */
typedef struct dma_info {
- struct hnddma_pub hnddma; /* exported structure, don't use hnddma_t,
- * which could be const
- */
+ struct hnddma_pub hnddma; /* exported structure */
uint *msg_level; /* message level pointer */
char name[MAXNAMEL]; /* callers name for diag msgs */
@@ -222,7 +228,7 @@ static void _dma_counterreset(dma_info_t *di);
static void _dma_fifoloopbackenable(dma_info_t *di);
static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags);
static u8 dma_align_sizetobits(uint size);
-static void *dma_ringalloc(osl_t *osh, u32 boundary, uint size,
+static void *dma_ringalloc(struct osl_info *osh, u32 boundary, uint size,
u16 *alignbits, uint *alloced,
dmaaddr_t *descpa, osldma_t **dmah);
@@ -231,7 +237,7 @@ static bool dma32_alloc(dma_info_t *di, uint direction);
static bool dma32_txreset(dma_info_t *di);
static bool dma32_rxreset(dma_info_t *di);
static bool dma32_txsuspendedidle(dma_info_t *di);
-static int dma32_txfast(dma_info_t *di, void *p0, bool commit);
+static int dma32_txfast(dma_info_t *di, struct sk_buff *p0, bool commit);
static void *dma32_getnexttxp(dma_info_t *di, txd_range_t range);
static void *dma32_getnextrxp(dma_info_t *di, bool forceall);
static void dma32_txrotate(dma_info_t *di);
@@ -246,14 +252,14 @@ static bool dma32_txstopped(dma_info_t *di);
static bool dma32_rxstopped(dma_info_t *di);
static bool dma32_rxenabled(dma_info_t *di);
-static bool _dma32_addrext(osl_t *osh, dma32regs_t *dma32regs);
+static bool _dma32_addrext(struct osl_info *osh, dma32regs_t *dma32regs);
/* Prototypes for 64-bit routines */
static bool dma64_alloc(dma_info_t *di, uint direction);
static bool dma64_txreset(dma_info_t *di);
static bool dma64_rxreset(dma_info_t *di);
static bool dma64_txsuspendedidle(dma_info_t *di);
-static int dma64_txfast(dma_info_t *di, void *p0, bool commit);
+static int dma64_txfast(dma_info_t *di, struct sk_buff *p0, bool commit);
static int dma64_txunframed(dma_info_t *di, void *p0, uint len, bool commit);
static void *dma64_getpos(dma_info_t *di, bool direction);
static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range);
@@ -270,7 +276,7 @@ static void dma64_txreclaim(dma_info_t *di, txd_range_t range);
static bool dma64_txstopped(dma_info_t *di);
static bool dma64_rxstopped(dma_info_t *di);
static bool dma64_rxenabled(dma_info_t *di);
-static bool _dma64_addrext(osl_t *osh, dma64regs_t *dma64regs);
+static bool _dma64_addrext(struct osl_info *osh, dma64regs_t *dma64regs);
static inline u32 parity32(u32 data);
@@ -368,10 +374,10 @@ static const di_fcn_t dma32proc = {
39
};
-hnddma_t *dma_attach(osl_t *osh, char *name, si_t *sih, void *dmaregstx,
- void *dmaregsrx, uint ntxd, uint nrxd, uint rxbufsize,
- int rxextheadroom, uint nrxpost, uint rxoffset,
- uint *msg_level)
+struct hnddma_pub *dma_attach(struct osl_info *osh, char *name, si_t *sih,
+ void *dmaregstx, void *dmaregsrx, uint ntxd,
+ uint nrxd, uint rxbufsize, int rxextheadroom,
+ uint nrxpost, uint rxoffset, uint *msg_level)
{
dma_info_t *di;
uint size;
@@ -570,7 +576,7 @@ hnddma_t *dma_attach(osl_t *osh, char *name, si_t *sih, void *dmaregstx,
}
}
- return (hnddma_t *) di;
+ return (struct hnddma_pub *) di;
fail:
_dma_detach(di);
@@ -663,7 +669,7 @@ dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx,
}
}
-static bool _dma32_addrext(osl_t *osh, dma32regs_t *dma32regs)
+static bool _dma32_addrext(struct osl_info *osh, dma32regs_t *dma32regs)
{
u32 w;
@@ -902,7 +908,7 @@ static void _dma_rxinit(dma_info_t *di)
/* clear rx descriptor ring */
if (DMA64_ENAB(di) && DMA64_MODE(di)) {
- BZERO_SM((void *)di->rxd64,
+ memset((void *)di->rxd64, '\0',
(di->nrxd * sizeof(dma64dd_t)));
/* DMA engine with out alignment requirement requires table to be inited
@@ -916,7 +922,7 @@ static void _dma_rxinit(dma_info_t *di)
if (di->aligndesc_4k)
_dma_ddtable_init(di, DMA_RX, di->rxdpa);
} else if (DMA32_ENAB(di)) {
- BZERO_SM((void *)di->rxd32,
+ memset((void *)di->rxd32, '\0',
(di->nrxd * sizeof(dma32dd_t)));
_dma_rxenable(di);
_dma_ddtable_init(di, DMA_RX, di->rxdpa);
@@ -978,7 +984,7 @@ _dma_rx_param_get(dma_info_t *di, u16 *rxoffset, u16 *rxbufsize)
*/
static void *BCMFASTPATH _dma_rx(dma_info_t *di)
{
- void *p, *head, *tail;
+ struct sk_buff *p, *head, *tail;
uint len;
uint pkt_len;
int resid = 0;
@@ -988,30 +994,31 @@ static void *BCMFASTPATH _dma_rx(dma_info_t *di)
if (head == NULL)
return NULL;
- len = ltoh16(*(u16 *) (PKTDATA(head)));
+ len = ltoh16(*(u16 *) (head->data));
DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
#if defined(__mips__)
+#define OSL_UNCACHED(va) ((void *)KSEG1ADDR((va)))
if (!len) {
- while (!(len = *(u16 *) OSL_UNCACHED(PKTDATA(head))))
+ while (!(len = *(u16 *) OSL_UNCACHED(head->data)))
udelay(1);
- *(u16 *) PKTDATA(head) = htol16((u16) len);
+ *(u16 *) (head->data) = htol16((u16) len);
}
#endif /* defined(__mips__) */
/* set actual length */
pkt_len = min((di->rxoffset + len), di->rxbufsize);
- PKTSETLEN(head, pkt_len);
+ __skb_trim(head, pkt_len);
resid = len - (di->rxbufsize - di->rxoffset);
/* check for single or multi-buffer rx */
if (resid > 0) {
tail = head;
while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
- PKTSETNEXT(tail, p);
+ tail->next = p;
pkt_len = min(resid, (int)di->rxbufsize);
- PKTSETLEN(p, pkt_len);
+ __skb_trim(p, pkt_len);
tail = p;
resid -= di->rxbufsize;
@@ -1037,7 +1044,7 @@ static void *BCMFASTPATH _dma_rx(dma_info_t *di)
if ((di->hnddma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
di->name, len));
- PKTFREE(di->osh, head, false);
+ pkt_buf_free_skb(di->osh, head, false);
di->hnddma.rxgiants++;
goto next_frame;
}
@@ -1053,7 +1060,7 @@ static void *BCMFASTPATH _dma_rx(dma_info_t *di)
*/
static bool BCMFASTPATH _dma_rxfill(dma_info_t *di)
{
- void *p;
+ struct sk_buff *p;
u16 rxin, rxout;
u32 flags = 0;
uint n;
@@ -1085,7 +1092,7 @@ static bool BCMFASTPATH _dma_rxfill(dma_info_t *di)
size to be allocated
*/
- p = osl_pktget(di->osh, di->rxbufsize + extra_offset);
+ p = pkt_buf_get_skb(di->osh, di->rxbufsize + extra_offset);
if (p == NULL) {
DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
@@ -1109,17 +1116,18 @@ static bool BCMFASTPATH _dma_rxfill(dma_info_t *di)
}
/* reserve an extra headroom, if applicable */
if (extra_offset)
- PKTPULL(p, extra_offset);
+ skb_pull(p, extra_offset);
/* Do a cached write instead of uncached write since DMA_MAP
* will flush the cache.
*/
- *(u32 *) (PKTDATA(p)) = 0;
+ *(u32 *) (p->data) = 0;
if (DMASGLIST_ENAB)
- bzero(&di->rxp_dmah[rxout], sizeof(hnddma_seg_map_t));
+ memset(&di->rxp_dmah[rxout], 0,
+ sizeof(hnddma_seg_map_t));
- pa = DMA_MAP(di->osh, PKTDATA(p),
+ pa = DMA_MAP(di->osh, p->data,
di->rxbufsize, DMA_RX, p, &di->rxp_dmah[rxout]);
ASSERT(IS_ALIGNED(PHYSADDRLO(pa), 4));
@@ -1220,15 +1228,10 @@ static void _dma_rxreclaim(dma_info_t *di)
{
void *p;
- /* "unused local" warning suppression for OSLs that
- * define PKTFREE() without using the di->osh arg
- */
- di = di;
-
DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
while ((p = _dma_getnextrxp(di, true)))
- PKTFREE(di->osh, p, false);
+ pkt_buf_free_skb(di->osh, p, false);
}
static void *BCMFASTPATH _dma_getnextrxp(dma_info_t *di, bool forceall)
@@ -1372,7 +1375,7 @@ static unsigned long _dma_getvar(dma_info_t *di, const char *name)
return 0;
}
-void dma_txpioloopback(osl_t *osh, dma32regs_t *regs)
+void dma_txpioloopback(struct osl_info *osh, dma32regs_t *regs)
{
OR_REG(osh, &regs->control, XC_LE);
}
@@ -1395,7 +1398,7 @@ u8 dma_align_sizetobits(uint size)
* descriptor ring size aligned location. This will ensure that the ring will
* not cross page boundary
*/
-static void *dma_ringalloc(osl_t *osh, u32 boundary, uint size,
+static void *dma_ringalloc(struct osl_info *osh, u32 boundary, uint size,
u16 *alignbits, uint *alloced,
dmaaddr_t *descpa, osldma_t **dmah)
{
@@ -1434,7 +1437,7 @@ static void dma32_txinit(dma_info_t *di)
di->hnddma.txavail = di->ntxd - 1;
/* clear tx descriptor ring */
- BZERO_SM((void *)di->txd32, (di->ntxd * sizeof(dma32dd_t)));
+ memset((void *)di->txd32, '\0', (di->ntxd * sizeof(dma32dd_t)));
if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
control |= XC_PD;
@@ -1491,7 +1494,7 @@ static void dma32_txreclaim(dma_info_t *di, txd_range_t range)
return;
while ((p = dma32_getnexttxp(di, range)))
- PKTFREE(di->osh, p, true);
+ pkt_buf_free_skb(di->osh, p, true);
}
static bool dma32_txstopped(dma_info_t *di)
@@ -1651,9 +1654,9 @@ static bool dma32_txsuspendedidle(dma_info_t *di)
* WARNING: call must check the return value for error.
* the error(toss frames) could be fatal and cause many subsequent hard to debug problems
*/
-static int dma32_txfast(dma_info_t *di, void *p0, bool commit)
+static int dma32_txfast(dma_info_t *di, struct sk_buff *p0, bool commit)
{
- void *p, *next;
+ struct sk_buff *p, *next;
unsigned char *data;
uint len;
u16 txout;
@@ -1672,12 +1675,12 @@ static int dma32_txfast(dma_info_t *di, void *p0, bool commit)
uint nsegs, j;
hnddma_seg_map_t *map;
- data = PKTDATA(p);
- len = PKTLEN(p);
+ data = p->data;
+ len = p->len;
#ifdef BCM_DMAPAD
len += PKTDMAPAD(di->osh, p);
#endif
- next = PKTNEXT(p);
+ next = p->next;
/* return nonzero if out of tx descriptors */
if (NEXTTXD(txout) == di->txin)
@@ -1687,7 +1690,8 @@ static int dma32_txfast(dma_info_t *di, void *p0, bool commit)
continue;
if (DMASGLIST_ENAB)
- bzero(&di->txp_dmah[txout], sizeof(hnddma_seg_map_t));
+ memset(&di->txp_dmah[txout], 0,
+ sizeof(hnddma_seg_map_t));
/* get physical address of buffer start */
pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
@@ -1761,7 +1765,7 @@ static int dma32_txfast(dma_info_t *di, void *p0, bool commit)
outoftxd:
DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
- PKTFREE(di->osh, p0, true);
+ pkt_buf_free_skb(di->osh, p0, true);
di->hnddma.txavail = 0;
di->hnddma.txnobuf++;
return -1;
@@ -1959,7 +1963,7 @@ static void dma32_txrotate(dma_info_t *di)
if (DMASGLIST_ENAB) {
bcopy(&di->txp_dmah[old], &di->txp_dmah[new],
sizeof(hnddma_seg_map_t));
- bzero(&di->txp_dmah[old], sizeof(hnddma_seg_map_t));
+ memset(&di->txp_dmah[old], 0, sizeof(hnddma_seg_map_t));
}
di->txp[old] = NULL;
@@ -1989,7 +1993,7 @@ static void dma64_txinit(dma_info_t *di)
di->hnddma.txavail = di->ntxd - 1;
/* clear tx descriptor ring */
- BZERO_SM((void *)di->txd64, (di->ntxd * sizeof(dma64dd_t)));
+ memset((void *)di->txd64, '\0', (di->ntxd * sizeof(dma64dd_t)));
/* DMA engine with out alignment requirement requires table to be inited
* before enabling the engine
@@ -2060,7 +2064,7 @@ static void BCMFASTPATH dma64_txreclaim(dma_info_t *di, txd_range_t range)
while ((p = dma64_getnexttxp(di, range))) {
/* For unframed data, we don't have any packets to free */
if (!(di->hnddma.dmactrlflags & DMA_CTRL_UNFRAMED))
- PKTFREE(di->osh, p, true);
+ pkt_buf_free_skb(di->osh, p, true);
}
}
@@ -2300,9 +2304,10 @@ static int dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit)
* WARNING: call must check the return value for error.
* the error(toss frames) could be fatal and cause many subsequent hard to debug problems
*/
-static int BCMFASTPATH dma64_txfast(dma_info_t *di, void *p0, bool commit)
+static int BCMFASTPATH dma64_txfast(dma_info_t *di, struct sk_buff *p0,
+ bool commit)
{
- void *p, *next;
+ struct sk_buff *p, *next;
unsigned char *data;
uint len;
u16 txout;
@@ -2321,12 +2326,12 @@ static int BCMFASTPATH dma64_txfast(dma_info_t *di, void *p0, bool commit)
uint nsegs, j;
hnddma_seg_map_t *map;
- data = PKTDATA(p);
- len = PKTLEN(p);
+ data = p->data;
+ len = p->len;
#ifdef BCM_DMAPAD
len += PKTDMAPAD(di->osh, p);
#endif /* BCM_DMAPAD */
- next = PKTNEXT(p);
+ next = p->next;
/* return nonzero if out of tx descriptors */
if (NEXTTXD(txout) == di->txin)
@@ -2337,7 +2342,8 @@ static int BCMFASTPATH dma64_txfast(dma_info_t *di, void *p0, bool commit)
/* get physical address of buffer start */
if (DMASGLIST_ENAB)
- bzero(&di->txp_dmah[txout], sizeof(hnddma_seg_map_t));
+ memset(&di->txp_dmah[txout], 0,
+ sizeof(hnddma_seg_map_t));
pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
&di->txp_dmah[txout]);
@@ -2409,7 +2415,7 @@ static int BCMFASTPATH dma64_txfast(dma_info_t *di, void *p0, bool commit)
outoftxd:
DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
- PKTFREE(di->osh, p0, true);
+ pkt_buf_free_skb(di->osh, p0, true);
di->hnddma.txavail = 0;
di->hnddma.txnobuf++;
return -1;
@@ -2563,7 +2569,7 @@ static void *BCMFASTPATH dma64_getnextrxp(dma_info_t *di, bool forceall)
return rxp;
}
-static bool _dma64_addrext(osl_t *osh, dma64regs_t * dma64regs)
+static bool _dma64_addrext(struct osl_info *osh, dma64regs_t * dma64regs)
{
u32 w;
OR_REG(osh, &dma64regs->control, D64_XC_AE);
@@ -2635,7 +2641,7 @@ static void dma64_txrotate(dma_info_t *di)
if (DMASGLIST_ENAB) {
bcopy(&di->txp_dmah[old], &di->txp_dmah[new],
sizeof(hnddma_seg_map_t));
- bzero(&di->txp_dmah[old], sizeof(hnddma_seg_map_t));
+ memset(&di->txp_dmah[old], 0, sizeof(hnddma_seg_map_t));
}
di->txp[old] = NULL;
@@ -2654,7 +2660,7 @@ static void dma64_txrotate(dma_info_t *di)
uint dma_addrwidth(si_t *sih, void *dmaregs)
{
dma32regs_t *dma32regs;
- osl_t *osh;
+ struct osl_info *osh;
osh = si_osh(sih);
@@ -2664,8 +2670,8 @@ uint dma_addrwidth(si_t *sih, void *dmaregs)
/* backplane are 64-bit capable */
if (si_backplane64(sih))
/* If bus is System Backplane or PCIE then we can access 64-bits */
- if ((BUSTYPE(sih->bustype) == SI_BUS) ||
- ((BUSTYPE(sih->bustype) == PCI_BUS) &&
+ if ((sih->bustype == SI_BUS) ||
+ ((sih->bustype == PCI_BUS) &&
(sih->buscoretype == PCIE_CORE_ID)))
return DMADDRWIDTH_64;
@@ -2679,8 +2685,8 @@ uint dma_addrwidth(si_t *sih, void *dmaregs)
dma32regs = (dma32regs_t *) dmaregs;
/* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
- if ((BUSTYPE(sih->bustype) == SI_BUS) ||
- ((BUSTYPE(sih->bustype) == PCI_BUS)
+ if ((sih->bustype == SI_BUS) ||
+ ((sih->bustype == PCI_BUS)
&& sih->buscoretype == PCIE_CORE_ID)
|| (_dma32_addrext(osh, dma32regs)))
return DMADDRWIDTH_32;
diff --git a/drivers/staging/brcm80211/util/hndpmu.c b/drivers/staging/brcm80211/util/hndpmu.c
index a8f3306c1d2b..6cc59a895868 100644
--- a/drivers/staging/brcm80211/util/hndpmu.c
+++ b/drivers/staging/brcm80211/util/hndpmu.c
@@ -13,9 +13,14 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#ifdef BRCM_FULLMAC
+#include <linux/netdevice.h>
+#endif
#include <bcmdefs.h>
#include <osl.h>
#include <bcmutils.h>
@@ -40,23 +45,23 @@
#define PMU_NONE(args)
/* PLL controls/clocks */
-static void si_pmu1_pllinit0(si_t *sih, osl_t *osh, chipcregs_t *cc,
+static void si_pmu1_pllinit0(si_t *sih, struct osl_info *osh, chipcregs_t *cc,
u32 xtal);
-static u32 si_pmu1_cpuclk0(si_t *sih, osl_t *osh, chipcregs_t *cc);
-static u32 si_pmu1_alpclk0(si_t *sih, osl_t *osh, chipcregs_t *cc);
+static u32 si_pmu1_cpuclk0(si_t *sih, struct osl_info *osh, chipcregs_t *cc);
+static u32 si_pmu1_alpclk0(si_t *sih, struct osl_info *osh, chipcregs_t *cc);
/* PMU resources */
static bool si_pmu_res_depfltr_bb(si_t *sih);
static bool si_pmu_res_depfltr_ncb(si_t *sih);
static bool si_pmu_res_depfltr_paldo(si_t *sih);
static bool si_pmu_res_depfltr_npaldo(si_t *sih);
-static u32 si_pmu_res_deps(si_t *sih, osl_t *osh, chipcregs_t *cc,
+static u32 si_pmu_res_deps(si_t *sih, struct osl_info *osh, chipcregs_t *cc,
u32 rsrcs, bool all);
-static uint si_pmu_res_uptime(si_t *sih, osl_t *osh, chipcregs_t *cc,
+static uint si_pmu_res_uptime(si_t *sih, struct osl_info *osh, chipcregs_t *cc,
u8 rsrc);
static void si_pmu_res_masks(si_t *sih, u32 * pmin, u32 * pmax);
static void si_pmu_spuravoid_pllupdate(si_t *sih, chipcregs_t *cc,
- osl_t *osh, u8 spuravoid);
+ struct osl_info *osh, u8 spuravoid);
static void si_pmu_set_4330_plldivs(si_t *sih);
@@ -101,7 +106,7 @@ void si_pmu_pllupd(si_t *sih)
}
/* Setup switcher voltage */
-void si_pmu_set_switcher_voltage(si_t *sih, osl_t *osh, u8 bb_voltage,
+void si_pmu_set_switcher_voltage(si_t *sih, struct osl_info *osh, u8 bb_voltage,
u8 rf_voltage)
{
chipcregs_t *cc;
@@ -124,14 +129,14 @@ void si_pmu_set_switcher_voltage(si_t *sih, osl_t *osh, u8 bb_voltage,
si_setcoreidx(sih, origidx);
}
-void si_pmu_set_ldo_voltage(si_t *sih, osl_t *osh, u8 ldo, u8 voltage)
+void si_pmu_set_ldo_voltage(si_t *sih, struct osl_info *osh, u8 ldo, u8 voltage)
{
u8 sr_cntl_shift = 0, rc_shift = 0, shift = 0, mask = 0;
u8 addr = 0;
ASSERT(sih->cccaps & CC_CAP_PMU);
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM4336_CHIP_ID:
switch (ldo) {
case SET_LDO_VOLTAGE_CLDO_PWM:
@@ -182,7 +187,7 @@ void si_pmu_set_ldo_voltage(si_t *sih, osl_t *osh, u8 ldo, u8 voltage)
/* d11 slow to fast clock transition time in slow clock cycles */
#define D11SCC_SLOW2FAST_TRANSITION 2
-u16 si_pmu_fast_pwrup_delay(si_t *sih, osl_t *osh)
+u16 si_pmu_fast_pwrup_delay(si_t *sih, struct osl_info *osh)
{
uint delay = PMU_MAX_TRANSITION_DLY;
chipcregs_t *cc;
@@ -199,7 +204,7 @@ u16 si_pmu_fast_pwrup_delay(si_t *sih, osl_t *osh)
cc = si_setcoreidx(sih, SI_CC_IDX);
ASSERT(cc != NULL);
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
case BCM43421_CHIP_ID:
@@ -259,7 +264,7 @@ u16 si_pmu_fast_pwrup_delay(si_t *sih, osl_t *osh)
return (u16) delay;
}
-u32 si_pmu_force_ilp(si_t *sih, osl_t *osh, bool force)
+u32 si_pmu_force_ilp(si_t *sih, struct osl_info *osh, bool force)
{
chipcregs_t *cc;
uint origidx;
@@ -599,7 +604,7 @@ static void si_pmu_res_masks(si_t *sih, u32 * pmin, u32 * pmax)
rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
/* determine min/max rsrc masks */
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
case BCM43421_CHIP_ID:
@@ -677,7 +682,7 @@ static void si_pmu_res_masks(si_t *sih, u32 * pmin, u32 * pmax)
}
/* initialize PMU resources */
-void si_pmu_res_init(si_t *sih, osl_t *osh)
+void si_pmu_res_init(si_t *sih, struct osl_info *osh)
{
chipcregs_t *cc;
uint origidx;
@@ -696,7 +701,7 @@ void si_pmu_res_init(si_t *sih, osl_t *osh)
cc = si_setcoreidx(sih, SI_CC_IDX);
ASSERT(cc != NULL);
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM4329_CHIP_ID:
/* Optimize resources up/down timers */
if (ISSIM_ENAB(sih)) {
@@ -1095,7 +1100,7 @@ static const pmu1_xtaltab0_t *si_pmu1_xtaltab0(si_t *sih)
#ifdef BCMDBG
char chn[8];
#endif
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM4329_CHIP_ID:
return pmu1_xtaltab0_880_4329;
case BCM4319_CHIP_ID:
@@ -1123,7 +1128,7 @@ static const pmu1_xtaltab0_t *si_pmu1_xtaldef0(si_t *sih)
char chn[8];
#endif
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM4329_CHIP_ID:
/* Default to 38400Khz */
return &pmu1_xtaltab0_880_4329[PMU1_XTALTAB0_880_38400K];
@@ -1155,7 +1160,7 @@ static u32 si_pmu1_pllfvco0(si_t *sih)
char chn[8];
#endif
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM4329_CHIP_ID:
return FVCO_880;
case BCM4319_CHIP_ID:
@@ -1178,7 +1183,7 @@ static u32 si_pmu1_pllfvco0(si_t *sih)
/* query alp/xtal clock frequency */
static u32
-si_pmu1_alpclk0(si_t *sih, osl_t *osh, chipcregs_t *cc)
+si_pmu1_alpclk0(si_t *sih, struct osl_info *osh, chipcregs_t *cc)
{
const pmu1_xtaltab0_t *xt;
u32 xf;
@@ -1203,7 +1208,8 @@ si_pmu1_alpclk0(si_t *sih, osl_t *osh, chipcregs_t *cc)
* case the xtal frequency is unknown to the s/w so we need to call
* si_pmu1_xtaldef0() wherever it is needed to return a default value.
*/
-static void si_pmu1_pllinit0(si_t *sih, osl_t *osh, chipcregs_t *cc, u32 xtal)
+static void si_pmu1_pllinit0(si_t *sih, struct osl_info *osh, chipcregs_t *cc,
+ u32 xtal)
{
const pmu1_xtaltab0_t *xt;
u32 tmp;
@@ -1233,8 +1239,8 @@ static void si_pmu1_pllinit0(si_t *sih, osl_t *osh, chipcregs_t *cc, u32 xtal)
*/
if ((((R_REG(osh, &cc->pmucontrol) & PCTL_XTALFREQ_MASK) >>
PCTL_XTALFREQ_SHIFT) == xt->xf) &&
- !((CHIPID(sih->chip) == BCM4319_CHIP_ID)
- || (CHIPID(sih->chip) == BCM4330_CHIP_ID))) {
+ !((sih->chip == BCM4319_CHIP_ID)
+ || (sih->chip == BCM4330_CHIP_ID))) {
PMU_MSG(("PLL already programmed for %d.%d MHz\n",
xt->fref / 1000, xt->fref % 1000));
return;
@@ -1244,7 +1250,7 @@ static void si_pmu1_pllinit0(si_t *sih, osl_t *osh, chipcregs_t *cc, u32 xtal)
PMU_MSG(("Programming PLL for %d.%d MHz\n", xt->fref / 1000,
xt->fref % 1000));
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM4329_CHIP_ID:
/* Change the BBPLL drive strength to 8 for all channels */
buf_strength = 0x888888;
@@ -1351,11 +1357,11 @@ static void si_pmu1_pllinit0(si_t *sih, osl_t *osh, chipcregs_t *cc, u32 xtal)
p2div << PMU1_PLL0_PC0_P2DIV_SHIFT) & PMU1_PLL0_PC0_P2DIV_MASK);
W_REG(osh, &cc->pllcontrol_data, tmp);
- if ((CHIPID(sih->chip) == BCM4330_CHIP_ID))
+ if ((sih->chip == BCM4330_CHIP_ID))
si_pmu_set_4330_plldivs(sih);
- if ((CHIPID(sih->chip) == BCM4329_CHIP_ID)
- && (CHIPREV(sih->chiprev) == 0)) {
+ if ((sih->chip == BCM4329_CHIP_ID)
+ && (sih->chiprev == 0)) {
W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
tmp = R_REG(osh, &cc->pllcontrol_data);
@@ -1363,9 +1369,9 @@ static void si_pmu1_pllinit0(si_t *sih, osl_t *osh, chipcregs_t *cc, u32 xtal)
tmp = tmp | DOT11MAC_880MHZ_CLK_DIVISOR_VAL;
W_REG(osh, &cc->pllcontrol_data, tmp);
}
- if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) ||
- (CHIPID(sih->chip) == BCM4336_CHIP_ID) ||
- (CHIPID(sih->chip) == BCM4330_CHIP_ID))
+ if ((sih->chip == BCM4319_CHIP_ID) ||
+ (sih->chip == BCM4336_CHIP_ID) ||
+ (sih->chip == BCM4330_CHIP_ID))
ndiv_mode = PMU1_PLL0_PC2_NDIV_MODE_MFB;
else
ndiv_mode = PMU1_PLL0_PC2_NDIV_MODE_MASH;
@@ -1407,7 +1413,7 @@ static void si_pmu1_pllinit0(si_t *sih, osl_t *osh, chipcregs_t *cc, u32 xtal)
/* to operate the 4319 usb in 24MHz/48MHz; chipcontrol[2][84:83] needs
* to be updated.
*/
- if ((CHIPID(sih->chip) == BCM4319_CHIP_ID)
+ if ((sih->chip == BCM4319_CHIP_ID)
&& (xt->fref != XTAL_FREQ_30000MHZ)) {
W_REG(osh, &cc->chipcontrol_addr, PMU1_PLL0_CHIPCTL2);
tmp =
@@ -1436,8 +1442,8 @@ static void si_pmu1_pllinit0(si_t *sih, osl_t *osh, chipcregs_t *cc, u32 xtal)
PCTL_ILP_DIV_MASK) |
((xt->xf << PCTL_XTALFREQ_SHIFT) & PCTL_XTALFREQ_MASK);
- if ((CHIPID(sih->chip) == BCM4329_CHIP_ID)
- && CHIPREV(sih->chiprev) == 0) {
+ if ((sih->chip == BCM4329_CHIP_ID)
+ && sih->chiprev == 0) {
/* clear the htstretch before clearing HTReqEn */
AND_REG(osh, &cc->clkstretch, ~CSTRETCH_HT);
tmp &= ~PCTL_HT_REQ_EN;
@@ -1448,7 +1454,7 @@ static void si_pmu1_pllinit0(si_t *sih, osl_t *osh, chipcregs_t *cc, u32 xtal)
/* query the CPU clock frequency */
static u32
-si_pmu1_cpuclk0(si_t *sih, osl_t *osh, chipcregs_t *cc)
+si_pmu1_cpuclk0(si_t *sih, struct osl_info *osh, chipcregs_t *cc)
{
u32 tmp, m1div;
#ifdef BCMDBG
@@ -1502,7 +1508,7 @@ si_pmu1_cpuclk0(si_t *sih, osl_t *osh, chipcregs_t *cc)
}
/* initialize PLL */
-void si_pmu_pll_init(si_t *sih, osl_t *osh, uint xtalfreq)
+void si_pmu_pll_init(si_t *sih, struct osl_info *osh, uint xtalfreq)
{
chipcregs_t *cc;
uint origidx;
@@ -1517,7 +1523,7 @@ void si_pmu_pll_init(si_t *sih, osl_t *osh, uint xtalfreq)
cc = si_setcoreidx(sih, SI_CC_IDX);
ASSERT(cc != NULL);
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM4329_CHIP_ID:
if (xtalfreq == 0)
xtalfreq = 38400;
@@ -1555,7 +1561,7 @@ void si_pmu_pll_init(si_t *sih, osl_t *osh, uint xtalfreq)
}
/* query alp/xtal clock frequency */
-u32 si_pmu_alp_clock(si_t *sih, osl_t *osh)
+u32 si_pmu_alp_clock(si_t *sih, struct osl_info *osh)
{
chipcregs_t *cc;
uint origidx;
@@ -1571,7 +1577,7 @@ u32 si_pmu_alp_clock(si_t *sih, osl_t *osh)
cc = si_setcoreidx(sih, SI_CC_IDX);
ASSERT(cc != NULL);
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
case BCM43421_CHIP_ID:
@@ -1616,7 +1622,7 @@ u32 si_pmu_alp_clock(si_t *sih, osl_t *osh)
* pllreg "pll0" i.e. 12 for main 6 for phy, 0 for misc.
*/
static u32
-si_pmu5_clock(si_t *sih, osl_t *osh, chipcregs_t *cc, uint pll0,
+si_pmu5_clock(si_t *sih, struct osl_info *osh, chipcregs_t *cc, uint pll0,
uint m) {
u32 tmp, div, ndiv, p1, p2, fc;
@@ -1631,7 +1637,7 @@ si_pmu5_clock(si_t *sih, osl_t *osh, chipcregs_t *cc, uint pll0,
return 0;
}
- if (CHIPID(sih->chip) == BCM5357_CHIP_ID) {
+ if (sih->chip == BCM5357_CHIP_ID) {
/* Detect failure in clock setting */
if ((R_REG(osh, &cc->chipstatus) & 0x40000) != 0) {
return 133 * 1000000;
@@ -1669,7 +1675,7 @@ si_pmu5_clock(si_t *sih, osl_t *osh, chipcregs_t *cc, uint pll0,
/* For designs that feed the same clock to both backplane
* and CPU just return the CPU clock speed.
*/
-u32 si_pmu_si_clock(si_t *sih, osl_t *osh)
+u32 si_pmu_si_clock(si_t *sih, struct osl_info *osh)
{
chipcregs_t *cc;
uint origidx;
@@ -1685,7 +1691,7 @@ u32 si_pmu_si_clock(si_t *sih, osl_t *osh)
cc = si_setcoreidx(sih, SI_CC_IDX);
ASSERT(cc != NULL);
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
case BCM43421_CHIP_ID:
@@ -1702,7 +1708,7 @@ u32 si_pmu_si_clock(si_t *sih, osl_t *osh)
PMU5_MAINPLL_SI);
break;
case BCM4329_CHIP_ID:
- if (CHIPREV(sih->chiprev) == 0)
+ if (sih->chiprev == 0)
clock = 38400 * 1000;
else
clock = si_pmu1_cpuclk0(sih, osh, cc);
@@ -1748,7 +1754,7 @@ u32 si_pmu_si_clock(si_t *sih, osl_t *osh)
}
/* query CPU clock frequency */
-u32 si_pmu_cpu_clock(si_t *sih, osl_t *osh)
+u32 si_pmu_cpu_clock(si_t *sih, struct osl_info *osh)
{
chipcregs_t *cc;
uint origidx;
@@ -1757,14 +1763,14 @@ u32 si_pmu_cpu_clock(si_t *sih, osl_t *osh)
ASSERT(sih->cccaps & CC_CAP_PMU);
if ((sih->pmurev >= 5) &&
- !((CHIPID(sih->chip) == BCM4329_CHIP_ID) ||
- (CHIPID(sih->chip) == BCM4319_CHIP_ID) ||
- (CHIPID(sih->chip) == BCM43236_CHIP_ID) ||
- (CHIPID(sih->chip) == BCM4336_CHIP_ID) ||
- (CHIPID(sih->chip) == BCM4330_CHIP_ID))) {
+ !((sih->chip == BCM4329_CHIP_ID) ||
+ (sih->chip == BCM4319_CHIP_ID) ||
+ (sih->chip == BCM43236_CHIP_ID) ||
+ (sih->chip == BCM4336_CHIP_ID) ||
+ (sih->chip == BCM4330_CHIP_ID))) {
uint pll;
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM5356_CHIP_ID:
pll = PMU5356_MAINPLL_PLL0;
break;
@@ -1792,7 +1798,7 @@ u32 si_pmu_cpu_clock(si_t *sih, osl_t *osh)
}
/* query memory clock frequency */
-u32 si_pmu_mem_clock(si_t *sih, osl_t *osh)
+u32 si_pmu_mem_clock(si_t *sih, struct osl_info *osh)
{
chipcregs_t *cc;
uint origidx;
@@ -1801,14 +1807,14 @@ u32 si_pmu_mem_clock(si_t *sih, osl_t *osh)
ASSERT(sih->cccaps & CC_CAP_PMU);
if ((sih->pmurev >= 5) &&
- !((CHIPID(sih->chip) == BCM4329_CHIP_ID) ||
- (CHIPID(sih->chip) == BCM4319_CHIP_ID) ||
- (CHIPID(sih->chip) == BCM4330_CHIP_ID) ||
- (CHIPID(sih->chip) == BCM4336_CHIP_ID) ||
- (CHIPID(sih->chip) == BCM43236_CHIP_ID))) {
+ !((sih->chip == BCM4329_CHIP_ID) ||
+ (sih->chip == BCM4319_CHIP_ID) ||
+ (sih->chip == BCM4330_CHIP_ID) ||
+ (sih->chip == BCM4336_CHIP_ID) ||
+ (sih->chip == BCM43236_CHIP_ID))) {
uint pll;
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM5356_CHIP_ID:
pll = PMU5356_MAINPLL_PLL0;
break;
@@ -1841,7 +1847,7 @@ u32 si_pmu_mem_clock(si_t *sih, osl_t *osh)
static u32 ilpcycles_per_sec;
-u32 si_pmu_ilp_clock(si_t *sih, osl_t *osh)
+u32 si_pmu_ilp_clock(si_t *sih, struct osl_info *osh)
{
if (ISSIM_ENAB(sih))
return ILP_CLOCK;
@@ -1905,7 +1911,7 @@ static const sdiod_drive_str_t sdiod_drive_strength_tab3[] = {
#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
void
-si_sdiod_drive_strength_init(si_t *sih, osl_t *osh,
+si_sdiod_drive_strength_init(si_t *sih, struct osl_info *osh,
u32 drivestrength) {
chipcregs_t *cc;
uint origidx, intr_val = 0;
@@ -1976,7 +1982,7 @@ si_sdiod_drive_strength_init(si_t *sih, osl_t *osh,
}
/* initialize PMU */
-void si_pmu_init(si_t *sih, osl_t *osh)
+void si_pmu_init(si_t *sih, struct osl_info *osh)
{
chipcregs_t *cc;
uint origidx;
@@ -1993,7 +1999,7 @@ void si_pmu_init(si_t *sih, osl_t *osh)
else if (sih->pmurev >= 2)
OR_REG(osh, &cc->pmucontrol, PCTL_NOILP_ON_WAIT);
- if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 2)) {
+ if ((sih->chip == BCM4329_CHIP_ID) && (sih->chiprev == 2)) {
/* Fix for 4329b0 bad LPOM state. */
W_REG(osh, &cc->regcontrol_addr, 2);
OR_REG(osh, &cc->regcontrol_data, 0x100);
@@ -2008,7 +2014,7 @@ void si_pmu_init(si_t *sih, osl_t *osh)
/* Return up time in ILP cycles for the given resource. */
static uint
-si_pmu_res_uptime(si_t *sih, osl_t *osh, chipcregs_t *cc,
+si_pmu_res_uptime(si_t *sih, struct osl_info *osh, chipcregs_t *cc,
u8 rsrc) {
u32 deps;
uint up, i, dup, dmax;
@@ -2045,7 +2051,7 @@ si_pmu_res_uptime(si_t *sih, osl_t *osh, chipcregs_t *cc,
/* Return dependancies (direct or all/indirect) for the given resources */
static u32
-si_pmu_res_deps(si_t *sih, osl_t *osh, chipcregs_t *cc, u32 rsrcs,
+si_pmu_res_deps(si_t *sih, struct osl_info *osh, chipcregs_t *cc, u32 rsrcs,
bool all)
{
u32 deps = 0;
@@ -2065,7 +2071,7 @@ si_pmu_res_deps(si_t *sih, osl_t *osh, chipcregs_t *cc, u32 rsrcs,
}
/* power up/down OTP through PMU resources */
-void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on)
+void si_pmu_otp_power(si_t *sih, struct osl_info *osh, bool on)
{
chipcregs_t *cc;
uint origidx;
@@ -2084,7 +2090,7 @@ void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on)
cc = si_setcoreidx(sih, SI_CC_IDX);
ASSERT(cc != NULL);
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM4329_CHIP_ID:
rsrcs = PMURES_BIT(RES4329_OTP_PU);
break;
@@ -2135,7 +2141,7 @@ void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on)
si_setcoreidx(sih, origidx);
}
-void si_pmu_rcal(si_t *sih, osl_t *osh)
+void si_pmu_rcal(si_t *sih, struct osl_info *osh)
{
chipcregs_t *cc;
uint origidx;
@@ -2147,7 +2153,7 @@ void si_pmu_rcal(si_t *sih, osl_t *osh)
cc = si_setcoreidx(sih, SI_CC_IDX);
ASSERT(cc != NULL);
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM4329_CHIP_ID:{
u8 rcal_code;
u32 val;
@@ -2218,7 +2224,7 @@ void si_pmu_rcal(si_t *sih, osl_t *osh)
si_setcoreidx(sih, origidx);
}
-void si_pmu_spuravoid(si_t *sih, osl_t *osh, u8 spuravoid)
+void si_pmu_spuravoid(si_t *sih, struct osl_info *osh, u8 spuravoid)
{
chipcregs_t *cc;
uint origidx, intr_val;
@@ -2230,7 +2236,7 @@ void si_pmu_spuravoid(si_t *sih, osl_t *osh, u8 spuravoid)
ASSERT(cc != NULL);
/* force the HT off */
- if (CHIPID(sih->chip) == BCM4336_CHIP_ID) {
+ if (sih->chip == BCM4336_CHIP_ID) {
tmp = R_REG(osh, &cc->max_res_mask);
tmp &= ~RES4336_HT_AVAIL;
W_REG(osh, &cc->max_res_mask, tmp);
@@ -2244,7 +2250,7 @@ void si_pmu_spuravoid(si_t *sih, osl_t *osh, u8 spuravoid)
si_pmu_spuravoid_pllupdate(sih, cc, osh, spuravoid);
/* enable HT back on */
- if (CHIPID(sih->chip) == BCM4336_CHIP_ID) {
+ if (sih->chip == BCM4336_CHIP_ID) {
tmp = R_REG(osh, &cc->max_res_mask);
tmp |= RES4336_HT_AVAIL;
W_REG(osh, &cc->max_res_mask, tmp);
@@ -2255,7 +2261,7 @@ void si_pmu_spuravoid(si_t *sih, osl_t *osh, u8 spuravoid)
}
static void
-si_pmu_spuravoid_pllupdate(si_t *sih, chipcregs_t *cc, osl_t *osh,
+si_pmu_spuravoid_pllupdate(si_t *sih, chipcregs_t *cc, struct osl_info *osh,
u8 spuravoid)
{
u32 tmp = 0;
@@ -2263,14 +2269,14 @@ si_pmu_spuravoid_pllupdate(si_t *sih, chipcregs_t *cc, osl_t *osh,
u8 bcm5357_bcm43236_p1div[] = { 0x1, 0x5, 0x5 };
u8 bcm5357_bcm43236_ndiv[] = { 0x30, 0xf6, 0xfc };
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM5357_CHIP_ID:
case BCM43235_CHIP_ID:
case BCM43236_CHIP_ID:
case BCM43238_CHIP_ID:
/* BCM5357 needs to touch PLL1_PLLCTL[02], so offset PLL0_PLLCTL[02] by 6 */
- phypll_offset = (CHIPID(sih->chip) == BCM5357_CHIP_ID) ? 6 : 0;
+ phypll_offset = (sih->chip == BCM5357_CHIP_ID) ? 6 : 0;
/* RMW only the P1 divider */
W_REG(osh, &cc->pllcontrol_addr,
@@ -2451,7 +2457,7 @@ si_pmu_spuravoid_pllupdate(si_t *sih, chipcregs_t *cc, osl_t *osh,
W_REG(osh, &cc->pmucontrol, tmp);
}
-bool si_pmu_is_otp_powered(si_t *sih, osl_t *osh)
+bool si_pmu_is_otp_powered(si_t *sih, struct osl_info *osh)
{
uint idx;
chipcregs_t *cc;
@@ -2462,7 +2468,7 @@ bool si_pmu_is_otp_powered(si_t *sih, osl_t *osh)
cc = si_setcoreidx(sih, SI_CC_IDX);
ASSERT(cc != NULL);
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM4329_CHIP_ID:
st = (R_REG(osh, &cc->res_state) & PMURES_BIT(RES4329_OTP_PU))
!= 0;
@@ -2503,9 +2509,9 @@ bool si_pmu_is_otp_powered(si_t *sih, osl_t *osh)
void
#if defined(BCMDBG)
-si_pmu_sprom_enable(si_t *sih, osl_t *osh, bool enable)
+si_pmu_sprom_enable(si_t *sih, struct osl_info *osh, bool enable)
#else
-si_pmu_sprom_enable(si_t *sih, osl_t *osh, bool enable)
+si_pmu_sprom_enable(si_t *sih, struct osl_info *osh, bool enable)
#endif
{
chipcregs_t *cc;
@@ -2521,7 +2527,7 @@ si_pmu_sprom_enable(si_t *sih, osl_t *osh, bool enable)
}
/* initialize PMU chip controls and other chip level stuff */
-void si_pmu_chip_init(si_t *sih, osl_t *osh)
+void si_pmu_chip_init(si_t *sih, struct osl_info *osh)
{
uint origidx;
@@ -2543,11 +2549,11 @@ void si_pmu_chip_init(si_t *sih, osl_t *osh)
}
/* initialize PMU switch/regulators */
-void si_pmu_swreg_init(si_t *sih, osl_t *osh)
+void si_pmu_swreg_init(si_t *sih, struct osl_info *osh)
{
ASSERT(sih->cccaps & CC_CAP_PMU);
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM4336_CHIP_ID:
/* Reduce CLDO PWM output voltage to 1.2V */
si_pmu_set_ldo_voltage(sih, osh, SET_LDO_VOLTAGE_CLDO_PWM, 0xe);
@@ -2556,7 +2562,7 @@ void si_pmu_swreg_init(si_t *sih, osl_t *osh)
0xe);
/* Reduce LNLDO1 output voltage to 1.2V */
si_pmu_set_ldo_voltage(sih, osh, SET_LDO_VOLTAGE_LNLDO1, 0xe);
- if (CHIPREV(sih->chiprev) == 0)
+ if (sih->chiprev == 0)
si_pmu_regcontrol(sih, 2, 0x400000, 0x400000);
break;
@@ -2573,7 +2579,7 @@ void si_pmu_radio_enable(si_t *sih, bool enable)
{
ASSERT(sih->cccaps & CC_CAP_PMU);
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM4319_CHIP_ID:
if (enable)
si_write_wrapperreg(sih, AI_OOBSELOUTB74,
@@ -2587,7 +2593,7 @@ void si_pmu_radio_enable(si_t *sih, bool enable)
/* Wait for a particular clock level to be on the backplane */
u32
-si_pmu_waitforclk_on_backplane(si_t *sih, osl_t *osh, u32 clk,
+si_pmu_waitforclk_on_backplane(si_t *sih, struct osl_info *osh, u32 clk,
u32 delay)
{
chipcregs_t *cc;
@@ -2616,7 +2622,7 @@ si_pmu_waitforclk_on_backplane(si_t *sih, osl_t *osh, u32 clk,
#define EXT_ILP_HZ 32768
-u32 si_pmu_measure_alpclk(si_t *sih, osl_t *osh)
+u32 si_pmu_measure_alpclk(si_t *sih, struct osl_info *osh)
{
chipcregs_t *cc;
uint origidx;
diff --git a/drivers/staging/brcm80211/util/linux_osl.c b/drivers/staging/brcm80211/util/linux_osl.c
index 2bb5b8722df6..e6716e823baa 100644
--- a/drivers/staging/brcm80211/util/linux_osl.c
+++ b/drivers/staging/brcm80211/util/linux_osl.c
@@ -20,145 +20,57 @@
#include <asm/paccess.h>
#endif /* mips */
#include <bcmendian.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
#include <bcmdefs.h>
#include <osl.h>
#include <bcmutils.h>
#include <pcicfg.h>
-#define PCI_CFG_RETRY 10
-
#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognise osh */
#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
-struct osl_info {
- osl_pubinfo_t pub;
- uint magic;
- void *pdev;
- uint failed;
- uint bustype;
-};
-
/* Global ASSERT type flag */
u32 g_assert_type;
-#ifdef BRCM_FULLMAC
-static s16 linuxbcmerrormap[] = { 0, /* 0 */
- -EINVAL, /* BCME_ERROR */
- -EINVAL, /* BCME_BADARG */
- -EINVAL, /* BCME_BADOPTION */
- -EINVAL, /* BCME_NOTUP */
- -EINVAL, /* BCME_NOTDOWN */
- -EINVAL, /* BCME_NOTAP */
- -EINVAL, /* BCME_NOTSTA */
- -EINVAL, /* BCME_BADKEYIDX */
- -EINVAL, /* BCME_RADIOOFF */
- -EINVAL, /* BCME_NOTBANDLOCKED */
- -EINVAL, /* BCME_NOCLK */
- -EINVAL, /* BCME_BADRATESET */
- -EINVAL, /* BCME_BADBAND */
- -E2BIG, /* BCME_BUFTOOSHORT */
- -E2BIG, /* BCME_BUFTOOLONG */
- -EBUSY, /* BCME_BUSY */
- -EINVAL, /* BCME_NOTASSOCIATED */
- -EINVAL, /* BCME_BADSSIDLEN */
- -EINVAL, /* BCME_OUTOFRANGECHAN */
- -EINVAL, /* BCME_BADCHAN */
- -EFAULT, /* BCME_BADADDR */
- -ENOMEM, /* BCME_NORESOURCE */
- -EOPNOTSUPP, /* BCME_UNSUPPORTED */
- -EMSGSIZE, /* BCME_BADLENGTH */
- -EINVAL, /* BCME_NOTREADY */
- -EPERM, /* BCME_NOTPERMITTED */
- -ENOMEM, /* BCME_NOMEM */
- -EINVAL, /* BCME_ASSOCIATED */
- -ERANGE, /* BCME_RANGE */
- -EINVAL, /* BCME_NOTFOUND */
- -EINVAL, /* BCME_WME_NOT_ENABLED */
- -EINVAL, /* BCME_TSPEC_NOTFOUND */
- -EINVAL, /* BCME_ACM_NOTSUPPORTED */
- -EINVAL, /* BCME_NOT_WME_ASSOCIATION */
- -EIO, /* BCME_SDIO_ERROR */
- -ENODEV, /* BCME_DONGLE_DOWN */
- -EINVAL, /* BCME_VERSION */
- -EIO, /* BCME_TXFAIL */
- -EIO, /* BCME_RXFAIL */
- -EINVAL, /* BCME_NODEVICE */
- -EINVAL, /* BCME_NMODE_DISABLED */
- -ENODATA, /* BCME_NONRESIDENT */
-
-/* When an new error code is added to bcmutils.h, add os
- * spcecific error translation here as well
- */
-/* check if BCME_LAST changed since the last time this function was updated */
-#if BCME_LAST != -42
-#error "You need to add a OS error translation in the linuxbcmerrormap \
- for new error code defined in bcmutils.h"
-#endif
-};
-
-/* translate bcmerrors into linux errors */
-int osl_error(int bcmerror)
-{
- if (bcmerror > 0)
- bcmerror = 0;
- else if (bcmerror < BCME_LAST)
- bcmerror = BCME_ERROR;
-
- /* Array bounds covered by ASSERT in osl_attach */
- return linuxbcmerrormap[-bcmerror];
-}
-#endif /* BRCM_FULLMAC */
-
-osl_t *osl_attach(void *pdev, uint bustype, bool pkttag)
+struct osl_info *osl_attach(void *pdev, uint bustype)
{
- osl_t *osh;
+ struct osl_info *osh;
- osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
+ osh = kmalloc(sizeof(struct osl_info), GFP_ATOMIC);
ASSERT(osh);
- bzero(osh, sizeof(osl_t));
-
-#ifdef BRCM_FULLMAC
- /* Check that error map has the right number of entries in it */
- ASSERT(ABS(BCME_LAST) == (ARRAY_SIZE(linuxbcmerrormap) - 1));
-#endif /* BRCM_FULLMAC */
+ memset(osh, 0, sizeof(struct osl_info));
osh->magic = OS_HANDLE_MAGIC;
- osh->failed = 0;
osh->pdev = pdev;
- osh->pub.pkttag = pkttag;
osh->bustype = bustype;
switch (bustype) {
case PCI_BUS:
case SI_BUS:
case PCMCIA_BUS:
- osh->pub.mmbus = true;
+ osh->mmbus = true;
break;
case JTAG_BUS:
case SDIO_BUS:
case USB_BUS:
case SPI_BUS:
case RPC_BUS:
- osh->pub.mmbus = false;
+ osh->mmbus = false;
break;
default:
ASSERT(false);
break;
}
-#if defined(BCMDBG) && !defined(BRCM_FULLMAC)
- if (pkttag) {
- struct sk_buff *skb;
- ASSERT(OSL_PKTTAG_SZ <= sizeof(skb->cb));
- }
-#endif
return osh;
}
-void osl_detach(osl_t *osh)
+void osl_detach(struct osl_info *osh)
{
if (osh == NULL)
return;
@@ -167,8 +79,7 @@ void osl_detach(osl_t *osh)
kfree(osh);
}
-/* Return a new packet. zero out pkttag */
-void *BCMFASTPATH osl_pktget(osl_t *osh, uint len)
+struct sk_buff *BCMFASTPATH pkt_buf_get_skb(struct osl_info *osh, uint len)
{
struct sk_buff *skb;
@@ -177,24 +88,20 @@ void *BCMFASTPATH osl_pktget(osl_t *osh, uint len)
skb_put(skb, len);
skb->priority = 0;
- osh->pub.pktalloced++;
+ osh->pktalloced++;
}
- return (void *)skb;
+ return skb;
}
/* Free the driver packet. Free the tag if present */
-void BCMFASTPATH osl_pktfree(osl_t *osh, void *p, bool send)
+void BCMFASTPATH pkt_buf_free_skb(struct osl_info *osh, struct sk_buff *skb, bool send)
{
- struct sk_buff *skb, *nskb;
+ struct sk_buff *nskb;
int nest = 0;
- skb = (struct sk_buff *)p;
ASSERT(skb);
- if (send && osh->pub.tx_fn)
- osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
-
/* perversion: we use skb->next to chain multi-skb packets */
while (skb) {
nskb = skb->next;
@@ -211,63 +118,14 @@ void BCMFASTPATH osl_pktfree(osl_t *osh, void *p, bool send)
*/
dev_kfree_skb(skb);
- osh->pub.pktalloced--;
+ osh->pktalloced--;
nest++;
skb = nskb;
}
}
-u32 osl_pci_read_config(osl_t *osh, uint offset, uint size)
-{
- uint val = 0;
- uint retry = PCI_CFG_RETRY;
-
- ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
-
- /* only 4byte access supported */
- ASSERT(size == 4);
-
- do {
- pci_read_config_dword(osh->pdev, offset, &val);
- if (val != 0xffffffff)
- break;
- } while (retry--);
-
-#ifdef BCMDBG
- if (retry < PCI_CFG_RETRY)
- printk("PCI CONFIG READ access to %d required %d retries\n",
- offset, (PCI_CFG_RETRY - retry));
-#endif /* BCMDBG */
-
- return val;
-}
-
-void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
-{
- uint retry = PCI_CFG_RETRY;
-
- ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
-
- /* only 4byte access supported */
- ASSERT(size == 4);
-
- do {
- pci_write_config_dword(osh->pdev, offset, val);
- if (offset != PCI_BAR0_WIN)
- break;
- if (osl_pci_read_config(osh, offset, size) == val)
- break;
- } while (retry--);
-
-#if defined(BCMDBG) && !defined(BRCM_FULLMAC)
- if (retry < PCI_CFG_RETRY)
- printk("PCI CONFIG WRITE access to %d required %d retries\n",
- offset, (PCI_CFG_RETRY - retry));
-#endif /* BCMDBG */
-}
-
/* return bus # for the pci device pointed by osh->pdev */
-uint osl_pci_bus(osl_t *osh)
+uint osl_pci_bus(struct osl_info *osh)
{
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
@@ -275,40 +133,37 @@ uint osl_pci_bus(osl_t *osh)
}
/* return slot # for the pci device pointed by osh->pdev */
-uint osl_pci_slot(osl_t *osh)
+uint osl_pci_slot(struct osl_info *osh)
{
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
}
-uint osl_dma_consistent_align(void)
-{
- return PAGE_SIZE;
-}
-
-void *osl_dma_alloc_consistent(osl_t *osh, uint size, u16 align_bits,
+void *osl_dma_alloc_consistent(struct osl_info *osh, uint size, u16 align_bits,
uint *alloced, unsigned long *pap)
{
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
if (align_bits) {
u16 align = (1 << align_bits);
- if (!IS_ALIGNED(DMA_CONSISTENT_ALIGN, align))
+ if (!IS_ALIGNED(PAGE_SIZE, align))
size += align;
*alloced = size;
}
return pci_alloc_consistent(osh->pdev, size, (dma_addr_t *) pap);
}
-void osl_dma_free_consistent(osl_t *osh, void *va, uint size, unsigned long pa)
+void osl_dma_free_consistent(struct osl_info *osh, void *va, uint size,
+ unsigned long pa)
{
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
pci_free_consistent(osh->pdev, size, va, (dma_addr_t) pa);
}
-uint BCMFASTPATH osl_dma_map(osl_t *osh, void *va, uint size, int direction)
+uint BCMFASTPATH osl_dma_map(struct osl_info *osh, void *va, uint size,
+ int direction)
{
int dir;
@@ -317,7 +172,8 @@ uint BCMFASTPATH osl_dma_map(osl_t *osh, void *va, uint size, int direction)
return pci_map_single(osh->pdev, va, size, dir);
}
-void BCMFASTPATH osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
+void BCMFASTPATH osl_dma_unmap(struct osl_info *osh, uint pa, uint size,
+ int direction)
{
int dir;
@@ -373,52 +229,3 @@ void osl_assert(char *exp, char *file, int line)
}
#endif /* defined(BCMDBG_ASSERT) */
-#if defined(BCMSDIO) && !defined(BRCM_FULLMAC)
-u8 osl_readb(osl_t *osh, volatile u8 *r)
-{
- osl_rreg_fn_t rreg = ((osl_pubinfo_t *) osh)->rreg_fn;
- void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
-
- return (u8) ((rreg) (ctx, (void *)r, sizeof(u8)));
-}
-
-u16 osl_readw(osl_t *osh, volatile u16 *r)
-{
- osl_rreg_fn_t rreg = ((osl_pubinfo_t *) osh)->rreg_fn;
- void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
-
- return (u16) ((rreg) (ctx, (void *)r, sizeof(u16)));
-}
-
-u32 osl_readl(osl_t *osh, volatile u32 *r)
-{
- osl_rreg_fn_t rreg = ((osl_pubinfo_t *) osh)->rreg_fn;
- void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
-
- return (u32) ((rreg) (ctx, (void *)r, sizeof(u32)));
-}
-
-void osl_writeb(osl_t *osh, volatile u8 *r, u8 v)
-{
- osl_wreg_fn_t wreg = ((osl_pubinfo_t *) osh)->wreg_fn;
- void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
-
- ((wreg) (ctx, (void *)r, v, sizeof(u8)));
-}
-
-void osl_writew(osl_t *osh, volatile u16 *r, u16 v)
-{
- osl_wreg_fn_t wreg = ((osl_pubinfo_t *) osh)->wreg_fn;
- void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
-
- ((wreg) (ctx, (void *)r, v, sizeof(u16)));
-}
-
-void osl_writel(osl_t *osh, volatile u32 *r, u32 v)
-{
- osl_wreg_fn_t wreg = ((osl_pubinfo_t *) osh)->wreg_fn;
- void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
-
- ((wreg) (ctx, (void *)r, v, sizeof(u32)));
-}
-#endif /* BCMSDIO */
diff --git a/drivers/staging/brcm80211/util/nicpci.c b/drivers/staging/brcm80211/util/nicpci.c
index 23f86dd7b159..56e658c429a8 100644
--- a/drivers/staging/brcm80211/util/nicpci.c
+++ b/drivers/staging/brcm80211/util/nicpci.c
@@ -14,8 +14,9 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/delay.h>
#include <linux/string.h>
-#include <linuxver.h>
+#include <linux/pci.h>
#include <bcmdefs.h>
#include <osl.h>
#include <bcmutils.h>
@@ -35,7 +36,7 @@ typedef struct {
} regs; /* Memory mapped register to the core */
si_t *sih; /* System interconnect handle */
- osl_t *osh; /* OSL handle */
+ struct osl_info *osh; /* OSL handle */
u8 pciecap_lcreg_offset; /* PCIE capability LCreg offset in the config space */
bool pcie_pr42767;
u8 pcie_polarity;
@@ -47,7 +48,8 @@ typedef struct {
/* debug/trace */
#define PCI_ERROR(args)
-#define PCIE_PUB(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && ((sih)->buscoretype == PCIE_CORE_ID))
+#define PCIE_PUB(sih) \
+ (((sih)->bustype == PCI_BUS) && ((sih)->buscoretype == PCIE_CORE_ID))
/* routines to access mdio slave device registers */
static bool pcie_mdiosetblock(pcicore_info_t *pi, uint blk);
@@ -71,35 +73,6 @@ static bool pcicore_pmecap(pcicore_info_t *pi);
#define PCIE_ASPM(sih) ((PCIE_PUB(sih)) && (((sih)->buscorerev >= 3) && ((sih)->buscorerev <= 5)))
-#define DWORD_ALIGN(x) (x & ~(0x03))
-#define BYTE_POS(x) (x & 0x3)
-#define WORD_POS(x) (x & 0x1)
-
-#define BYTE_SHIFT(x) (8 * BYTE_POS(x))
-#define WORD_SHIFT(x) (16 * WORD_POS(x))
-
-#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF)
-#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF)
-
-#define read_pci_cfg_byte(a) \
- (BYTE_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xff)
-
-#define read_pci_cfg_word(a) \
- (WORD_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xffff)
-
-#define write_pci_cfg_byte(a, val) do { \
- u32 tmpval; \
- tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFF << BYTE_POS(a)) | \
- val << BYTE_POS(a); \
- OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \
- } while (0)
-
-#define write_pci_cfg_word(a, val) do { \
- u32 tmpval; \
- tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFFFF << WORD_POS(a)) | \
- val << WORD_POS(a); \
- OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \
- } while (0)
/* delay needed between the mdio control/ mdiodata register data access */
#define PR28829_DELAY() udelay(10)
@@ -107,7 +80,7 @@ static bool pcicore_pmecap(pcicore_info_t *pi);
/* Initialize the PCI core. It's caller's responsibility to make sure that this is done
* only once
*/
-void *pcicore_init(si_t *sih, osl_t *osh, void *regs)
+void *pcicore_init(si_t *sih, struct osl_info *osh, void *regs)
{
pcicore_info_t *pi;
@@ -149,8 +122,8 @@ void pcicore_deinit(void *pch)
/* return cap_offset if requested capability exists in the PCI config space */
/* Note that it's caller's responsibility to make sure it's a pci bus */
u8
-pcicore_find_pci_capability(osl_t *osh, u8 req_cap_id, unsigned char *buf,
- u32 *buflen)
+pcicore_find_pci_capability(struct osl_info *osh, u8 req_cap_id,
+ unsigned char *buf, u32 *buflen)
{
u8 cap_id;
u8 cap_ptr = 0;
@@ -158,29 +131,29 @@ pcicore_find_pci_capability(osl_t *osh, u8 req_cap_id, unsigned char *buf,
u8 byte_val;
/* check for Header type 0 */
- byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
+ pci_read_config_byte(osh->pdev, PCI_CFG_HDR, &byte_val);
if ((byte_val & 0x7f) != PCI_HEADER_NORMAL)
goto end;
/* check if the capability pointer field exists */
- byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
+ pci_read_config_byte(osh->pdev, PCI_CFG_STAT, &byte_val);
if (!(byte_val & PCI_CAPPTR_PRESENT))
goto end;
- cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
+ pci_read_config_byte(osh->pdev, PCI_CFG_CAPPTR, &cap_ptr);
/* check if the capability pointer is 0x00 */
if (cap_ptr == 0x00)
goto end;
/* loop thr'u the capability list and see if the pcie capabilty exists */
- cap_id = read_pci_cfg_byte(cap_ptr);
+ pci_read_config_byte(osh->pdev, cap_ptr, &cap_id);
while (cap_id != req_cap_id) {
- cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
+ pci_read_config_byte(osh->pdev, cap_ptr + 1, &cap_ptr);
if (cap_ptr == 0x00)
break;
- cap_id = read_pci_cfg_byte(cap_ptr);
+ pci_read_config_byte(osh->pdev, cap_ptr, &cap_id);
}
if (cap_id != req_cap_id) {
goto end;
@@ -199,7 +172,7 @@ pcicore_find_pci_capability(osl_t *osh, u8 req_cap_id, unsigned char *buf,
bufsize = SZPCR - cap_data;
*buflen = bufsize;
while (bufsize--) {
- *buf = read_pci_cfg_byte(cap_data);
+ pci_read_config_byte(osh->pdev, cap_data, buf);
cap_data++;
buf++;
}
@@ -210,7 +183,8 @@ pcicore_find_pci_capability(osl_t *osh, u8 req_cap_id, unsigned char *buf,
/* ***** Register Access API */
uint
-pcie_readreg(osl_t *osh, sbpcieregs_t *pcieregs, uint addrtype, uint offset)
+pcie_readreg(struct osl_info *osh, sbpcieregs_t *pcieregs, uint addrtype,
+ uint offset)
{
uint retval = 0xFFFFFFFF;
@@ -236,8 +210,8 @@ pcie_readreg(osl_t *osh, sbpcieregs_t *pcieregs, uint addrtype, uint offset)
}
uint
-pcie_writereg(osl_t *osh, sbpcieregs_t *pcieregs, uint addrtype, uint offset,
- uint val)
+pcie_writereg(struct osl_info *osh, sbpcieregs_t *pcieregs, uint addrtype,
+ uint offset, uint val)
{
ASSERT(pcieregs != NULL);
@@ -373,15 +347,15 @@ u8 pcie_clkreq(void *pch, u32 mask, u32 val)
if (!offset)
return 0;
- reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(u32));
+ pci_read_config_dword(pi->osh->pdev, offset, &reg_val);
/* set operation */
if (mask) {
if (val)
reg_val |= PCIE_CLKREQ_ENAB;
else
reg_val &= ~PCIE_CLKREQ_ENAB;
- OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(u32), reg_val);
- reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(u32));
+ pci_write_config_dword(pi->osh->pdev, offset, reg_val);
+ pci_read_config_dword(pi->osh->pdev, offset, &reg_val);
}
if (reg_val & PCIE_CLKREQ_ENAB)
return 1;
@@ -393,7 +367,7 @@ static void pcie_extendL1timer(pcicore_info_t *pi, bool extend)
{
u32 w;
si_t *sih = pi->sih;
- osl_t *osh = pi->osh;
+ struct osl_info *osh = pi->osh;
sbpcieregs_t *pcieregs = pi->regs.pcieregs;
if (!PCIE_PUB(sih) || sih->buscorerev < 7)
@@ -502,12 +476,12 @@ static void pcie_war_aspm_clkreq(pcicore_info_t *pi)
W_REG(pi->osh, reg16, val16);
- w = OSL_PCI_READ_CONFIG(pi->osh, pi->pciecap_lcreg_offset,
- sizeof(u32));
+ pci_read_config_dword(pi->osh->pdev, pi->pciecap_lcreg_offset,
+ &w);
w &= ~PCIE_ASPM_ENAB;
w |= pi->pcie_war_aspm_ovr;
- OSL_PCI_WRITE_CONFIG(pi->osh, pi->pciecap_lcreg_offset,
- sizeof(u32), w);
+ pci_write_config_dword(pi->osh->pdev,
+ pi->pciecap_lcreg_offset, w);
}
reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5];
@@ -577,7 +551,7 @@ static void pcie_war_noplldown(pcicore_info_t *pi)
static void pcie_war_pci_setup(pcicore_info_t *pi)
{
si_t *sih = pi->sih;
- osl_t *osh = pi->osh;
+ struct osl_info *osh = pi->osh;
sbpcieregs_t *pcieregs = pi->regs.pcieregs;
u32 w;
@@ -694,11 +668,9 @@ void pcicore_sleep(void *pch)
if (!pi || !PCIE_ASPM(pi->sih))
return;
- w = OSL_PCI_READ_CONFIG(pi->osh, pi->pciecap_lcreg_offset,
- sizeof(u32));
+ pci_read_config_dword(pi->osh->pdev, pi->pciecap_lcreg_offset, &w);
w &= ~PCIE_CAP_LCREG_ASPML1;
- OSL_PCI_WRITE_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(u32),
- w);
+ pci_write_config_dword(pi->osh->pdev, pi->pciecap_lcreg_offset, w);
pi->pcie_pr42767 = false;
}
@@ -718,7 +690,7 @@ void pcicore_down(void *pch, int state)
/* ***** Wake-on-wireless-LAN (WOWL) support functions ***** */
/* Just uses PCI config accesses to find out, when needed before sb_attach is done */
-bool pcicore_pmecap_fast(osl_t *osh)
+bool pcicore_pmecap_fast(struct osl_info *osh)
{
u8 cap_ptr;
u32 pmecap;
@@ -730,7 +702,7 @@ bool pcicore_pmecap_fast(osl_t *osh)
if (!cap_ptr)
return false;
- pmecap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(u32));
+ pci_read_config_dword(osh->pdev, cap_ptr, &pmecap);
return (pmecap & PME_CAP_PM_STATES) != 0;
}
@@ -753,9 +725,8 @@ static bool pcicore_pmecap(pcicore_info_t *pi)
pi->pmecap_offset = cap_ptr;
- pmecap =
- OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset,
- sizeof(u32));
+ pci_read_config_dword(pi->osh->pdev, pi->pmecap_offset,
+ &pmecap);
/* At least one state can generate PME */
pi->pmecap = (pmecap & PME_CAP_PM_STATES) != 0;
@@ -774,11 +745,11 @@ void pcicore_pmeen(void *pch)
if (!pcicore_pmecap(pi))
return;
- w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET,
- sizeof(u32));
+ pci_read_config_dword(pi->osh->pdev, pi->pmecap_offset + PME_CSR_OFFSET,
+ &w);
w |= (PME_CSR_PME_EN);
- OSL_PCI_WRITE_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET,
- sizeof(u32), w);
+ pci_write_config_dword(pi->osh->pdev,
+ pi->pmecap_offset + PME_CSR_OFFSET, w);
}
/*
@@ -792,8 +763,8 @@ bool pcicore_pmestat(void *pch)
if (!pcicore_pmecap(pi))
return false;
- w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET,
- sizeof(u32));
+ pci_read_config_dword(pi->osh->pdev, pi->pmecap_offset + PME_CSR_OFFSET,
+ &w);
return (w & PME_CSR_PME_STAT) == PME_CSR_PME_STAT;
}
@@ -808,22 +779,23 @@ void pcicore_pmeclr(void *pch)
if (!pcicore_pmecap(pi))
return;
- w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET,
- sizeof(u32));
+ pci_read_config_dword(pi->osh->pdev, pi->pmecap_offset + PME_CSR_OFFSET,
+ &w);
PCI_ERROR(("pcicore_pci_pmeclr PMECSR : 0x%x\n", w));
/* PMESTAT is cleared by writing 1 to it */
w &= ~(PME_CSR_PME_EN);
- OSL_PCI_WRITE_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET,
- sizeof(u32), w);
+ pci_write_config_dword(pi->osh->pdev,
+ pi->pmecap_offset + PME_CSR_OFFSET, w);
}
u32 pcie_lcreg(void *pch, u32 mask, u32 val)
{
pcicore_info_t *pi = (pcicore_info_t *) pch;
u8 offset;
+ u32 tmpval;
offset = pi->pciecap_lcreg_offset;
if (!offset)
@@ -831,9 +803,10 @@ u32 pcie_lcreg(void *pch, u32 mask, u32 val)
/* set operation */
if (mask)
- OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(u32), val);
+ pci_write_config_dword(pi->osh->pdev, offset, val);
- return OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(u32));
+ pci_read_config_dword(pi->osh->pdev, offset, &tmpval);
+ return tmpval;
}
u32
@@ -842,7 +815,7 @@ pcicore_pciereg(void *pch, u32 offset, u32 mask, u32 val, uint type)
u32 reg_val = 0;
pcicore_info_t *pi = (pcicore_info_t *) pch;
sbpcieregs_t *pcieregs = pi->regs.pcieregs;
- osl_t *osh = pi->osh;
+ struct osl_info *osh = pi->osh;
if (mask) {
PCI_ERROR(("PCIEREG: 0x%x writeval 0x%x\n", offset, val));
diff --git a/drivers/staging/brcm80211/util/nvram/nvram_ro.c b/drivers/staging/brcm80211/util/nvram/nvram_ro.c
index f80375cd6801..e4d41ee78e2a 100644
--- a/drivers/staging/brcm80211/util/nvram/nvram_ro.c
+++ b/drivers/staging/brcm80211/util/nvram/nvram_ro.c
@@ -49,7 +49,7 @@ static char *findvar(char *vars, char *lim, const char *name);
/* copy flash to ram */
static void get_flash_nvram(si_t *sih, struct nvram_header *nvh)
{
- osl_t *osh;
+ struct osl_info *osh;
uint nvs, bufsz;
vars_t *new;
@@ -133,7 +133,7 @@ static char *findvar(char *vars, char *lim, const char *name)
len = strlen(name);
for (s = vars; (s < lim) && *s;) {
- if ((bcmp(s, name, len) == 0) && (s[len] == '='))
+ if ((memcmp(s, name, len) == 0) && (s[len] == '='))
return &s[len + 1];
while (*s++)
diff --git a/drivers/staging/brcm80211/util/sbutils.c b/drivers/staging/brcm80211/util/sbutils.c
index e4c0baba553d..63c3ab1866a4 100644
--- a/drivers/staging/brcm80211/util/sbutils.c
+++ b/drivers/staging/brcm80211/util/sbutils.c
@@ -16,6 +16,9 @@
#include <linux/types.h>
#include <bcmdefs.h>
+#ifdef BRCM_FULLMAC
+#include <linux/netdevice.h>
+#endif
#include <osl.h>
#include <bcmutils.h>
#include <siutils.h>
@@ -87,7 +90,7 @@ static u32 _sb_coresba(si_info_t *sii)
{
u32 sbaddr = 0;
- switch (BUSTYPE(sii->pub.bustype)) {
+ switch (sii->pub.bustype) {
case SPI_BUS:
case SDIO_BUS:
sbaddr = (u32)(unsigned long)sii->curmap;
@@ -248,7 +251,7 @@ static uint _sb_scan(si_info_t *sii, u32 sba, void *regs, uint bus, u32 sbba,
else {
/* Older chips */
SI_ERROR(("sb_chip2numcores: unsupported chip "
- "0x%x\n", CHIPID(sii->pub.chip)));
+ "0x%x\n", sii->pub.chip));
ASSERT(0);
numcores = 1;
}
@@ -344,7 +347,7 @@ static void *_sb_setcoreidx(si_info_t *sii, uint coreidx)
u32 sbaddr = sii->coresba[coreidx];
void *regs;
- switch (BUSTYPE(sii->pub.bustype)) {
+ switch (sii->pub.bustype) {
#ifdef BCMSDIO
case SPI_BUS:
case SDIO_BUS:
@@ -409,8 +412,8 @@ bool sb_taclear(si_t *sih, bool details)
sii = SI_INFO(sih);
- if ((BUSTYPE(sii->pub.bustype) == SDIO_BUS) ||
- (BUSTYPE(sii->pub.bustype) == SPI_BUS)) {
+ if ((sii->pub.bustype == SDIO_BUS) ||
+ (sii->pub.bustype == SPI_BUS)) {
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
diff --git a/drivers/staging/brcm80211/util/siutils.c b/drivers/staging/brcm80211/util/siutils.c
index f3ea7e1a7aef..b66de9b35a5a 100644
--- a/drivers/staging/brcm80211/util/siutils.c
+++ b/drivers/staging/brcm80211/util/siutils.c
@@ -14,11 +14,16 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <bcmdefs.h>
+#ifdef BRCM_FULLMAC
+#include <linux/netdevice.h>
+#endif
#include <osl.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
#include <bcmutils.h>
#include <siutils.h>
#include <bcmdevs.h>
@@ -53,7 +58,7 @@
#endif
/* local prototypes */
-static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
+static si_info_t *si_doattach(si_info_t *sii, uint devid, struct osl_info *osh,
void *regs, uint bustype, void *sdh, char **vars,
uint *varsz);
static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid,
@@ -81,8 +86,8 @@ static u32 si_gpioreservation;
* vars - pointer to a pointer area for "environment" variables
* varsz - pointer to int to return the size of the vars
*/
-si_t *si_attach(uint devid, osl_t *osh, void *regs, uint bustype, void *sdh,
- char **vars, uint *varsz)
+si_t *si_attach(uint devid, struct osl_info *osh, void *regs, uint bustype,
+ void *sdh, char **vars, uint *varsz)
{
si_info_t *sii;
@@ -113,12 +118,12 @@ static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid,
#ifndef BRCM_FULLMAC
/* kludge to enable the clock on the 4306 which lacks a slowclock */
- if (BUSTYPE(bustype) == PCI_BUS && !si_ispcie(sii))
+ if (bustype == PCI_BUS && !si_ispcie(sii))
si_clkctl_xtal(&sii->pub, XTAL | PLL, ON);
#endif
#if defined(BCMSDIO)
- if (BUSTYPE(bustype) == SDIO_BUS) {
+ if (bustype == SDIO_BUS) {
int err;
u8 clkset;
@@ -220,7 +225,7 @@ static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype,
SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
i, cid, crev, sii->coresba[i], sii->regs[i]));
- if (BUSTYPE(bustype) == PCI_BUS) {
+ if (bustype == PCI_BUS) {
if (cid == PCI_CORE_ID) {
pciidx = i;
pcirev = crev;
@@ -232,8 +237,8 @@ static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype,
}
}
#ifdef BCMSDIO
- else if (((BUSTYPE(bustype) == SDIO_BUS) ||
- (BUSTYPE(bustype) == SPI_BUS)) &&
+ else if (((bustype == SDIO_BUS) ||
+ (bustype == SPI_BUS)) &&
((cid == PCMCIA_CORE_ID) || (cid == SDIOD_CORE_ID))) {
sii->pub.buscorerev = crev;
sii->pub.buscoretype = cid;
@@ -255,7 +260,7 @@ static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype,
* or downloaded code was
* already running.
*/
- if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) {
+ if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) ||
si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
si_core_disable(&sii->pub, 0);
@@ -281,7 +286,7 @@ static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype,
sii->pub.buscoretype, sii->pub.buscorerev));
/* fixup necessary chip/core configurations */
- if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ if (sii->pub.bustype == PCI_BUS) {
if (SI_FAST(sii)) {
if (!sii->pch) {
sii->pch = (void *)pcicore_init(
@@ -308,10 +313,10 @@ static __used void si_nvram_process(si_info_t *sii, char *pvars)
uint w = 0;
/* get boardtype and boardrev */
- switch (BUSTYPE(sii->pub.bustype)) {
+ switch (sii->pub.bustype) {
case PCI_BUS:
/* do a pci config read to get subsystem id and subvendor id */
- w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_SVID, sizeof(u32));
+ pci_read_config_dword(sii->osh->pdev, PCI_CFG_SVID, &w);
/* Let nvram variables override subsystem Vend/ID */
sii->pub.boardvendor = (u16)si_getdevpathintvar(&sii->pub,
"boardvendor");
@@ -364,7 +369,7 @@ static __used void si_nvram_process(si_info_t *sii, char *pvars)
/* this is will make Sonics calls directly, since Sonics is no longer supported in the Si abstraction */
/* this has been customized for the bcm 4329 ONLY */
#ifdef BCMSDIO
-static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
+static si_info_t *si_doattach(si_info_t *sii, uint devid, struct osl_info *osh,
void *regs, uint bustype, void *sdh,
char **vars, uint *varsz)
{
@@ -376,7 +381,7 @@ static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
ASSERT(GOODREGS(regs));
- bzero((unsigned char *) sii, sizeof(si_info_t));
+ memset((unsigned char *) sii, 0, sizeof(si_info_t));
savewin = 0;
@@ -390,11 +395,6 @@ static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
cc = (chipcregs_t *) sii->curmap;
sih->bustype = bustype;
- if (bustype != BUSTYPE(bustype)) {
- SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n", bustype, BUSTYPE(bustype)));
- return NULL;
- }
-
/* bus/core/clk setup for register access */
if (!si_buscore_prep(sii, bustype, devid, sdh)) {
SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n",
@@ -414,7 +414,7 @@ static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
- if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) &&
+ if ((sih->chip == BCM4329_CHIP_ID) &&
(sih->chippkg != BCM4329_289PIN_PKG_ID))
sih->chippkg = BCM4329_182PIN_PKG_ID;
@@ -444,7 +444,7 @@ static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
/* Init nvram from sprom/otp if they exist */
if (srom_var_init
- (&sii->pub, BUSTYPE(bustype), regs, sii->osh, vars, varsz)) {
+ (&sii->pub, bustype, regs, sii->osh, vars, varsz)) {
SI_ERROR(("si_doattach: srom_var_init failed: bad srom\n"));
goto exit;
}
@@ -499,7 +499,7 @@ static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
}
#else /* BCMSDIO */
-static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
+static si_info_t *si_doattach(si_info_t *sii, uint devid, struct osl_info *osh,
void *regs, uint bustype, void *sdh,
char **vars, uint *varsz)
{
@@ -511,7 +511,7 @@ static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
ASSERT(GOODREGS(regs));
- bzero((unsigned char *) sii, sizeof(si_info_t));
+ memset((unsigned char *) sii, 0, sizeof(si_info_t));
savewin = 0;
@@ -522,30 +522,29 @@ static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
sii->osh = osh;
/* check to see if we are a si core mimic'ing a pci core */
- if ((bustype == PCI_BUS) &&
- (OSL_PCI_READ_CONFIG(sii->osh, PCI_SPROM_CONTROL, sizeof(u32)) ==
- 0xffffffff)) {
- SI_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SI " "devid:0x%x\n", __func__, devid));
- bustype = SI_BUS;
+ if (bustype == PCI_BUS) {
+ pci_read_config_dword(sii->osh->pdev, PCI_SPROM_CONTROL, &w);
+ if (w == 0xffffffff) {
+ SI_ERROR(("%s: incoming bus is PCI but it's a lie, "
+ " switching to SI devid:0x%x\n",
+ __func__, devid));
+ bustype = SI_BUS;
+ }
}
/* find Chipcommon address */
if (bustype == PCI_BUS) {
- savewin =
- OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(u32));
+ pci_read_config_dword(sii->osh->pdev, PCI_BAR0_WIN, &savewin);
if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
savewin = SI_ENUM_BASE;
- OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE);
+ pci_write_config_dword(sii->osh->pdev, PCI_BAR0_WIN,
+ SI_ENUM_BASE);
cc = (chipcregs_t *) regs;
} else {
cc = (chipcregs_t *) REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
}
sih->bustype = bustype;
- if (bustype != BUSTYPE(bustype)) {
- SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n", bustype, BUSTYPE(bustype)));
- return NULL;
- }
/* bus/core/clk setup for register access */
if (!si_buscore_prep(sii, bustype, devid, sdh)) {
@@ -569,7 +568,7 @@ static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
sih->issim = IS_SIM(sih->chippkg);
/* scan for cores */
- if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) {
+ if (sii->pub.socitype == SOCI_AI) {
SI_MSG(("Found chip type AI (0x%08x)\n", w));
/* pass chipc address instead of original core base */
ai_scan(&sii->pub, (void *)cc, devid);
@@ -592,10 +591,10 @@ static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
/* assume current core is CC */
if ((sii->pub.ccrev == 0x25)
&&
- ((CHIPID(sih->chip) == BCM43236_CHIP_ID
- || CHIPID(sih->chip) == BCM43235_CHIP_ID
- || CHIPID(sih->chip) == BCM43238_CHIP_ID)
- && (CHIPREV(sii->pub.chiprev) <= 2))) {
+ ((sih->chip == BCM43236_CHIP_ID
+ || sih->chip == BCM43235_CHIP_ID
+ || sih->chip == BCM43238_CHIP_ID)
+ && (sii->pub.chiprev <= 2))) {
if ((cc->chipstatus & CST43236_BP_CLK) != 0) {
uint clkdiv;
@@ -613,7 +612,7 @@ static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
/* Init nvram from sprom/otp if they exist */
if (srom_var_init
- (&sii->pub, BUSTYPE(bustype), regs, sii->osh, vars, varsz)) {
+ (&sii->pub, bustype, regs, sii->osh, vars, varsz)) {
SI_ERROR(("si_doattach: srom_var_init failed: bad srom\n"));
goto exit;
}
@@ -651,10 +650,10 @@ static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
pcicore_attach(sii->pch, pvars, SI_DOATTACH);
}
- if ((CHIPID(sih->chip) == BCM43224_CHIP_ID) ||
- (CHIPID(sih->chip) == BCM43421_CHIP_ID)) {
+ if ((sih->chip == BCM43224_CHIP_ID) ||
+ (sih->chip == BCM43421_CHIP_ID)) {
/* enable 12 mA drive strenth for 43224 and set chipControl register bit 15 */
- if (CHIPREV(sih->chiprev) == 0) {
+ if (sih->chiprev == 0) {
SI_MSG(("Applying 43224A0 WARs\n"));
si_corereg(sih, SI_CC_IDX,
offsetof(chipcregs_t, chipcontrol),
@@ -663,28 +662,28 @@ static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
CCTRL_43224A0_12MA_LED_DRIVE);
}
- if (CHIPREV(sih->chiprev) >= 1) {
+ if (sih->chiprev >= 1) {
SI_MSG(("Applying 43224B0+ WARs\n"));
si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
CCTRL_43224B0_12MA_LED_DRIVE);
}
}
- if (CHIPID(sih->chip) == BCM4313_CHIP_ID) {
+ if (sih->chip == BCM4313_CHIP_ID) {
/* enable 12 mA drive strenth for 4313 and set chipControl register bit 1 */
SI_MSG(("Applying 4313 WARs\n"));
si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
CCTRL_4313_12MA_LED_DRIVE);
}
- if (CHIPID(sih->chip) == BCM4331_CHIP_ID) {
+ if (sih->chip == BCM4331_CHIP_ID) {
/* Enable Ext PA lines depending on chip package option */
si_chipcontrl_epa4331(sih, true);
}
return sii;
exit:
- if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ if (sih->bustype == PCI_BUS) {
if (sii->pch)
pcicore_deinit(sii->pch);
sii->pch = NULL;
@@ -708,17 +707,17 @@ void si_detach(si_t *sih)
if (sii == NULL)
return;
- if (BUSTYPE(sih->bustype) == SI_BUS)
+ if (sih->bustype == SI_BUS)
for (idx = 0; idx < SI_MAXCORES; idx++)
if (sii->regs[idx]) {
- REG_UNMAP(sii->regs[idx]);
+ iounmap(sii->regs[idx]);
sii->regs[idx] = NULL;
}
#ifndef BRCM_FULLMAC
nvram_exit((void *)si_local); /* free up nvram buffers */
- if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ if (sih->bustype == PCI_BUS) {
if (sii->pch)
pcicore_deinit(sii->pch);
sii->pch = NULL;
@@ -730,7 +729,7 @@ void si_detach(si_t *sih)
kfree(sii);
}
-void *si_osh(si_t *sih)
+struct osl_info *si_osh(si_t *sih)
{
si_info_t *sii;
@@ -766,7 +765,7 @@ void si_deregister_intr_callback(si_t *sih)
uint si_flag(si_t *sih)
{
- if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ if (sih->socitype == SOCI_AI)
return ai_flag(sih);
else {
ASSERT(0);
@@ -776,7 +775,7 @@ uint si_flag(si_t *sih)
void si_setint(si_t *sih, int siflag)
{
- if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ if (sih->socitype == SOCI_AI)
ai_setint(sih, siflag);
else
ASSERT(0);
@@ -808,7 +807,7 @@ bool si_backplane64(si_t *sih)
#ifndef BCMSDIO
uint si_corerev(si_t *sih)
{
- if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ if (sih->socitype == SOCI_AI)
return ai_corerev(sih);
else {
ASSERT(0);
@@ -851,7 +850,7 @@ void *si_setcore(si_t *sih, uint coreid, uint coreunit)
if (!GOODIDX(idx))
return NULL;
- if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ if (sih->socitype == SOCI_AI)
return ai_setcoreidx(sih, idx);
else {
#ifdef BCMSDIO
@@ -866,7 +865,7 @@ void *si_setcore(si_t *sih, uint coreid, uint coreunit)
#ifndef BCMSDIO
void *si_setcoreidx(si_t *sih, uint coreidx)
{
- if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ if (sih->socitype == SOCI_AI)
return ai_setcoreidx(sih, coreidx);
else {
ASSERT(0);
@@ -918,7 +917,7 @@ void si_restore_core(si_t *sih, uint coreid, uint intr_val)
u32 si_core_cflags(si_t *sih, u32 mask, u32 val)
{
- if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ if (sih->socitype == SOCI_AI)
return ai_core_cflags(sih, mask, val);
else {
ASSERT(0);
@@ -928,7 +927,7 @@ u32 si_core_cflags(si_t *sih, u32 mask, u32 val)
u32 si_core_sflags(si_t *sih, u32 mask, u32 val)
{
- if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ if (sih->socitype == SOCI_AI)
return ai_core_sflags(sih, mask, val);
else {
ASSERT(0);
@@ -938,7 +937,7 @@ u32 si_core_sflags(si_t *sih, u32 mask, u32 val)
bool si_iscoreup(si_t *sih)
{
- if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ if (sih->socitype == SOCI_AI)
return ai_iscoreup(sih);
else {
#ifdef BCMSDIO
@@ -953,7 +952,7 @@ bool si_iscoreup(si_t *sih)
void si_write_wrapperreg(si_t *sih, u32 offset, u32 val)
{
/* only for 4319, no requirement for SOCI_SB */
- if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+ if (sih->socitype == SOCI_AI) {
ai_write_wrap_reg(sih, offset, val);
}
}
@@ -961,7 +960,7 @@ void si_write_wrapperreg(si_t *sih, u32 offset, u32 val)
uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
{
- if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ if (sih->socitype == SOCI_AI)
return ai_corereg(sih, coreidx, regoff, mask, val);
else {
#ifdef BCMSDIO
@@ -976,7 +975,7 @@ uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
void si_core_disable(si_t *sih, u32 bits)
{
- if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ if (sih->socitype == SOCI_AI)
ai_core_disable(sih, bits);
#ifdef BCMSDIO
else
@@ -986,7 +985,7 @@ void si_core_disable(si_t *sih, u32 bits)
void si_core_reset(si_t *sih, u32 bits, u32 resetbits)
{
- if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ if (sih->socitype == SOCI_AI)
ai_core_reset(sih, bits, resetbits);
#ifdef BCMSDIO
else
@@ -1043,8 +1042,8 @@ void si_watchdog(si_t *sih, uint ticks)
if (PMUCTL_ENAB(sih)) {
- if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) &&
- (CHIPREV(sih->chiprev) == 0) && (ticks != 0)) {
+ if ((sih->chip == BCM4319_CHIP_ID) &&
+ (sih->chiprev == 0) && (ticks != 0)) {
si_corereg(sih, SI_CC_IDX,
offsetof(chipcregs_t, clk_ctl_st), ~0, 0x2);
si_setcore(sih, USB20D_CORE_ID, 0);
@@ -1085,16 +1084,18 @@ void si_watchdog(si_t *sih, uint ticks)
static uint si_slowclk_src(si_info_t *sii)
{
chipcregs_t *cc;
+ u32 val;
ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
if (sii->pub.ccrev < 6) {
- if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) &&
- (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(u32))
- & PCI_CFG_GPIO_SCS))
- return SCC_SS_PCI;
- else
- return SCC_SS_XTAL;
+ if (sii->pub.bustype == PCI_BUS) {
+ pci_read_config_dword(sii->osh->pdev, PCI_GPIO_OUT,
+ &val);
+ if (val & PCI_CFG_GPIO_SCS)
+ return SCC_SS_PCI;
+ }
+ return SCC_SS_XTAL;
} else if (sii->pub.ccrev < 10) {
cc = (chipcregs_t *) si_setcoreidx(&sii->pub, sii->curidx);
return R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK;
@@ -1264,7 +1265,7 @@ int si_clkctl_xtal(si_t *sih, uint what, bool on)
sii = SI_INFO(sih);
- switch (BUSTYPE(sih->bustype)) {
+ switch (sih->bustype) {
#ifdef BCMSDIO
case SDIO_BUS:
@@ -1276,12 +1277,9 @@ int si_clkctl_xtal(si_t *sih, uint what, bool on)
if (PCIE(sii))
return -1;
- in = OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_IN, sizeof(u32));
- out =
- OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(u32));
- outen =
- OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUTEN,
- sizeof(u32));
+ pci_read_config_dword(sii->osh->pdev, PCI_GPIO_IN, &in);
+ pci_read_config_dword(sii->osh->pdev, PCI_GPIO_OUT, &out);
+ pci_read_config_dword(sii->osh->pdev, PCI_GPIO_OUTEN, &outen);
/*
* Avoid glitching the clock if GPRS is already using it.
@@ -1302,18 +1300,18 @@ int si_clkctl_xtal(si_t *sih, uint what, bool on)
out |= PCI_CFG_GPIO_XTAL;
if (what & PLL)
out |= PCI_CFG_GPIO_PLL;
- OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUT,
- sizeof(u32), out);
- OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUTEN,
- sizeof(u32), outen);
+ pci_write_config_dword(sii->osh->pdev,
+ PCI_GPIO_OUT, out);
+ pci_write_config_dword(sii->osh->pdev,
+ PCI_GPIO_OUTEN, outen);
udelay(XTAL_ON_DELAY);
}
/* turn pll on */
if (what & PLL) {
out &= ~PCI_CFG_GPIO_PLL;
- OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUT,
- sizeof(u32), out);
+ pci_write_config_dword(sii->osh->pdev,
+ PCI_GPIO_OUT, out);
mdelay(2);
}
} else {
@@ -1321,10 +1319,10 @@ int si_clkctl_xtal(si_t *sih, uint what, bool on)
out &= ~PCI_CFG_GPIO_XTAL;
if (what & PLL)
out |= PCI_CFG_GPIO_PLL;
- OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUT,
- sizeof(u32), out);
- OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUTEN,
- sizeof(u32), outen);
+ pci_write_config_dword(sii->osh->pdev,
+ PCI_GPIO_OUT, out);
+ pci_write_config_dword(sii->osh->pdev,
+ PCI_GPIO_OUTEN, outen);
}
default:
@@ -1378,7 +1376,7 @@ static bool _si_clkctl_cc(si_info_t *sii, uint mode)
INTR_OFF(sii, intr_val);
origidx = sii->curidx;
- if ((BUSTYPE(sii->pub.bustype) == SI_BUS) &&
+ if ((sii->pub.bustype == SI_BUS) &&
si_setcore(&sii->pub, MIPS33_CORE_ID, 0) &&
(si_corerev(&sii->pub) <= 7) && (sii->pub.ccrev >= 10))
goto done;
@@ -1460,7 +1458,7 @@ int si_devpath(si_t *sih, char *path, int size)
if (!path || size <= 0)
return -1;
- switch (BUSTYPE(sih->bustype)) {
+ switch (sih->bustype) {
case SI_BUS:
case JTAG_BUS:
slen = snprintf(path, (size_t) size, "sb/%u/", si_coreidx(sih));
@@ -1550,7 +1548,7 @@ static __used bool si_ispcie(si_info_t *sii)
{
u8 cap_ptr;
- if (BUSTYPE(sii->pub.bustype) != PCI_BUS)
+ if (sii->pub.bustype != PCI_BUS)
return false;
cap_ptr =
@@ -1617,7 +1615,7 @@ void si_pci_up(si_t *sih)
sii = SI_INFO(sih);
/* if not pci bus, we're done */
- if (BUSTYPE(sih->bustype) != PCI_BUS)
+ if (sih->bustype != PCI_BUS)
return;
if (PCI_FORCEHT(sii))
@@ -1646,7 +1644,7 @@ void si_pci_down(si_t *sih)
sii = SI_INFO(sih);
/* if not pci bus, we're done */
- if (BUSTYPE(sih->bustype) != PCI_BUS)
+ if (sih->bustype != PCI_BUS)
return;
/* release FORCEHT since chip is going to "down" state */
@@ -1669,7 +1667,7 @@ void si_pci_setup(si_t *sih, uint coremask)
sii = SI_INFO(sih);
- if (BUSTYPE(sii->pub.bustype) != PCI_BUS)
+ if (sii->pub.bustype != PCI_BUS)
return;
ASSERT(PCI(sii) || PCIE(sii));
@@ -1692,9 +1690,9 @@ void si_pci_setup(si_t *sih, uint coremask)
*/
if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
/* pci config write to set this core bit in PCIIntMask */
- w = OSL_PCI_READ_CONFIG(sii->osh, PCI_INT_MASK, sizeof(u32));
+ pci_read_config_dword(sii->osh->pdev, PCI_INT_MASK, &w);
w |= (coremask << PCI_SBIM_SHIFT);
- OSL_PCI_WRITE_CONFIG(sii->osh, PCI_INT_MASK, sizeof(u32), w);
+ pci_write_config_dword(sii->osh->pdev, PCI_INT_MASK, w);
} else {
/* set sbintvec bit for our flag number */
si_setint(sih, siflag);
@@ -1731,7 +1729,7 @@ int si_pci_fixcfg(si_t *sih)
si_info_t *sii = SI_INFO(sih);
- ASSERT(BUSTYPE(sii->pub.bustype) == PCI_BUS);
+ ASSERT(sii->pub.bustype == PCI_BUS);
/* Fixup PI in SROM shadow area to enable the correct PCI core access */
/* save the current index */
@@ -1777,7 +1775,7 @@ u32 si_gpiocontrol(si_t *sih, u32 mask, u32 val, u8 priority)
* ignore reservation if it's high priority (e.g., test apps)
*/
if ((priority != GPIO_HI_PRIORITY) &&
- (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ (sih->bustype == SI_BUS) && (val || mask)) {
mask = priority ? (si_gpioreservation & mask) :
((si_gpioreservation | mask) & ~(si_gpioreservation));
val &= mask;
@@ -1929,10 +1927,10 @@ bool si_deviceremoved(si_t *sih)
sii = SI_INFO(sih);
- switch (BUSTYPE(sih->bustype)) {
+ switch (sih->bustype) {
case PCI_BUS:
ASSERT(sii->osh != NULL);
- w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_VID, sizeof(u32));
+ pci_read_config_dword(sii->osh->pdev, PCI_CFG_VID, &w);
if ((w & 0xFFFF) != VENDOR_BROADCOM)
return true;
break;
@@ -1959,7 +1957,7 @@ bool si_is_sprom_available(si_t *sih)
return sromctrl & SRC_PRESENT;
}
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM4329_CHIP_ID:
return (sih->chipst & CST4329_SPROM_SEL) != 0;
case BCM4319_CHIP_ID:
@@ -1979,7 +1977,7 @@ bool si_is_sprom_available(si_t *sih)
bool si_is_otp_disabled(si_t *sih)
{
- switch (CHIPID(sih->chip)) {
+ switch (sih->chip) {
case BCM4329_CHIP_ID:
return (sih->chipst & CST4329_SPROM_OTP_SEL_MASK) ==
CST4329_OTP_PWRDN;
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index aad47326d6dc..1502d80f6f78 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -439,6 +439,7 @@ config COMEDI_NI_AT_AO
config COMEDI_NI_ATMIO
tristate "NI AT-MIO E series ISA-PNP card support"
depends on ISAPNP && COMEDI_NI_TIO && COMEDI_NI_COMMON
+ select COMEDI_8255
default N
---help---
Enable support for National Instruments AT-MIO E series cards
@@ -1040,6 +1041,8 @@ config COMEDI_NI_PCIDIO
config COMEDI_NI_PCIMIO
tristate "NI PCI-MIO-E series and M series support"
depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
+ select COMEDI_8255
+ select COMEDI_FC
default N
---help---
Enable support for National Instruments PCI-MIO-E series and M series
@@ -1164,6 +1167,7 @@ config COMEDI_NI_LABPC_CS
config COMEDI_NI_MIO_CS
tristate "NI DAQCard E series PCMCIA support"
depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
+ select COMEDI_8255
select COMEDI_FC
default N
---help---
@@ -1268,7 +1272,6 @@ config COMEDI_MITE
config COMEDI_NI_TIO
tristate "NI general purpose counter support"
depends on COMEDI_MITE
- select COMEDI_8255
default N
---help---
Enable support for National Instruments general purpose counters.
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 4a29ed737e3f..dca861ee0466 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -117,8 +117,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
for (driv = comedi_drivers; driv; driv = driv->next) {
if (!try_module_get(driv->module)) {
- printk
- (KERN_INFO "comedi: failed to increment module count, skipping\n");
+ printk(KERN_INFO "comedi: failed to increment module count, skipping\n");
continue;
}
if (driv->num_names) {
@@ -205,9 +204,8 @@ int comedi_driver_unregister(struct comedi_driver *driver)
mutex_lock(&dev->mutex);
if (dev->attached && dev->driver == driver) {
if (dev->use_count)
- printk
- (KERN_WARNING "BUG! detaching device with use_count=%d\n",
- dev->use_count);
+ printk(KERN_WARNING "BUG! detaching device with use_count=%d\n",
+ dev->use_count);
comedi_device_detach(dev);
}
mutex_unlock(&dev->mutex);
@@ -442,7 +440,9 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned i;
for (i = 0; i < async->n_buf_pages; ++i) {
if (async->buf_page_list[i].virt_addr) {
- clear_bit(PG_reserved, &(virt_to_page(async->buf_page_list[i].virt_addr)->flags));
+ clear_bit(PG_reserved,
+ &(virt_to_page(async->buf_page_list[i].
+ virt_addr)->flags));
if (s->async_dma_dir != DMA_NONE) {
dma_free_coherent(dev->hw_dev,
PAGE_SIZE,
@@ -470,10 +470,8 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
struct page **pages = NULL;
async->buf_page_list =
- vmalloc(sizeof(struct comedi_buf_page) * n_pages);
+ vzalloc(sizeof(struct comedi_buf_page) * n_pages);
if (async->buf_page_list) {
- memset(async->buf_page_list, 0,
- sizeof(struct comedi_buf_page) * n_pages);
pages = vmalloc(sizeof(struct page *) * n_pages);
}
if (pages) {
@@ -496,8 +494,10 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
break;
set_bit(PG_reserved,
- &(virt_to_page(async->buf_page_list[i].virt_addr)->flags));
- pages[i] = virt_to_page(async->buf_page_list[i].virt_addr);
+ &(virt_to_page(async->buf_page_list[i].
+ virt_addr)->flags));
+ pages[i] = virt_to_page(async->buf_page_list[i].
+ virt_addr);
}
}
if (i == n_pages) {
@@ -514,7 +514,10 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
NULL) {
break;
}
- clear_bit(PG_reserved, &(virt_to_page(async->buf_page_list[i].virt_addr)->flags));
+ clear_bit(PG_reserved,
+ &(virt_to_page(async->
+ buf_page_list[i].
+ virt_addr)->flags));
if (s->async_dma_dir != DMA_NONE) {
dma_free_coherent(dev->hw_dev,
PAGE_SIZE,
@@ -646,8 +649,7 @@ unsigned comedi_buf_write_free(struct comedi_async *async, unsigned int nbytes)
{
if ((int)(async->buf_write_count + nbytes -
async->buf_write_alloc_count) > 0) {
- printk
- (KERN_INFO "comedi: attempted to write-free more bytes than have been write-allocated.\n");
+ printk(KERN_INFO "comedi: attempted to write-free more bytes than have been write-allocated.\n");
nbytes = async->buf_write_alloc_count - async->buf_write_count;
}
async->buf_write_count += nbytes;
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c
index 7361d508bf37..0e6affd95962 100644
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c
+++ b/drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c
@@ -1008,7 +1008,7 @@ int i_APCI1710_InsnWriteEnableDisableTorCounter(struct comedi_device *dev,
b_ExternGate = (unsigned char) data[3];
b_CycleMode = (unsigned char) data[4];
b_InterruptEnable = (unsigned char) data[5];
- i_ReturnValue = insn->n;;
+ i_ReturnValue = insn->n;
devpriv->tsk_Current = current; /* Save the current process task structure */
/**************************/
/* Test the module number */
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.c b/drivers/staging/comedi/drivers/addi-data/addi_common.c
index 93d7c056741d..76f2483871a7 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.c
+++ b/drivers/staging/comedi/drivers/addi-data/addi_common.c
@@ -2710,10 +2710,10 @@ static int i_ADDI_Attach(struct comedi_device *dev, struct comedi_devconfig *it)
} else {
outl(0x83838383, devpriv->i_IobaseAmcc + 0x60);
}
- /* Enable the interrupt for the controler */
+ /* Enable the interrupt for the controller */
dw_Dummy = inl(devpriv->i_IobaseAmcc + 0x38);
outl(dw_Dummy | 0x2000, devpriv->i_IobaseAmcc + 0x38);
- printk("\nEnable the interrupt for the controler");
+ printk("\nEnable the interrupt for the controller");
}
printk("\nRead Eeprom");
i_EepromReadMainHeader(io_addr[0], this_board->pc_EepromChip,
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c
index 912bc0fc54bf..a76ed2553fb4 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c
@@ -225,7 +225,7 @@ int i_APCI1710_Reset(struct comedi_device *dev)
devpriv->s_BoardInfos.b_BoardVersion = 1;
- /* Enable the interrupt for the controler */
+ /* Enable the interrupt for the controller */
dw_Dummy = inl(devpriv->s_BoardInfos.ui_Address + 0x38);
outl(dw_Dummy | 0x2000, devpriv->s_BoardInfos.ui_Address + 0x38);
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
index 2a8a6c730920..62f421a06f05 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
@@ -2850,7 +2850,7 @@ static int i_APCI1500_Reset(struct comedi_device *dev)
i_Logic = 0;
i_CounterLogic = 0;
i_InterruptMask = 0;
- i_InputChannel = 0;;
+ i_InputChannel = 0;
i_TimerCounter1Enabled = 0;
i_TimerCounter2Enabled = 0;
i_WatchdogCounter3Enabled = 0;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1516.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1516.c
index 12fcc35ecc52..8a584a014b0b 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1516.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1516.c
@@ -335,7 +335,7 @@ int i_APCI1516_WriteDigitalOutput(struct comedi_device *dev, struct comedi_subde
return -EINVAL;
} /* if else data[3]==1) */
} /* if else data[3]==0) */
- return (insn->n);;
+ return (insn->n);
}
/*
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
index b943a06e70dc..a93e2349ad3a 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
@@ -3011,7 +3011,7 @@ int i_APCI3200_Reset(struct comedi_device *dev)
outl(0x83838383, devpriv->i_IobaseAmcc + 0x60);
- /* Enable the interrupt for the controler */
+ /* Enable the interrupt for the controller */
dw_Dummy = inl(devpriv->i_IobaseAmcc + 0x38);
outl(dw_Dummy | 0x2000, devpriv->i_IobaseAmcc + 0x38);
outl(0, devpriv->i_IobaseAddon); /* Resets the output */
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
index 356a1891e2e7..acaceb01629a 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
@@ -339,7 +339,7 @@ int i_APCI3501_ConfigAnalogOutput(struct comedi_device *dev, struct comedi_subde
int i_APCI3501_WriteAnalogOutput(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
- unsigned int ul_Command1 = 0, ul_Channel_no, ul_Polarity, ul_DAC_Ready = 0;;
+ unsigned int ul_Command1 = 0, ul_Channel_no, ul_Polarity, ul_DAC_Ready = 0;
ul_Channel_no = CR_CHAN(insn->chanspec);
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 5d064577b2f1..7edeb1103dc8 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -971,7 +971,7 @@ static int pci230_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (thisboard->ao_chans > 0) {
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
- s->n_chan = thisboard->ao_chans;;
+ s->n_chan = thisboard->ao_chans;
s->maxdata = (1 << thisboard->ao_bits) - 1;
s->range_table = &pci230_ao_range;
s->insn_write = &pci230_ao_winsn;
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index 0345b4caba73..bb93685d8b93 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -169,7 +169,7 @@ static int das16cs_attach(struct comedi_device *dev,
if (!link)
return -EIO;
- dev->iobase = link->resource[0]->start;;
+ dev->iobase = link->resource[0]->start;
printk("I/O base=0x%04lx ", dev->iobase);
printk("fingerprint:\n");
diff --git a/drivers/staging/comedi/drivers/comedi_bond.c b/drivers/staging/comedi/drivers/comedi_bond.c
index cfcbd9b8f393..d8aefb23d6b9 100644
--- a/drivers/staging/comedi/drivers/comedi_bond.c
+++ b/drivers/staging/comedi/drivers/comedi_bond.c
@@ -370,7 +370,7 @@ static int doDevConfig(struct comedi_device *dev, struct comedi_devconfig *it)
struct comedi_device *devs_opened[COMEDI_NUM_BOARD_MINORS];
memset(devs_opened, 0, sizeof(devs_opened));
- devpriv->name[0] = 0;;
+ devpriv->name[0] = 0;
/* Loop through all comedi devices specified on the command-line,
building our device list */
for (i = 0; i < COMEDI_NDEVCONFOPTS && (!i || it->options[i]); ++i) {
diff --git a/drivers/staging/comedi/drivers/ii_pci20kc.c b/drivers/staging/comedi/drivers/ii_pci20kc.c
index 39a6a850d63c..e4711ef54719 100644
--- a/drivers/staging/comedi/drivers/ii_pci20kc.c
+++ b/drivers/staging/comedi/drivers/ii_pci20kc.c
@@ -19,7 +19,7 @@
* - 16 bit
*
* only ONE PCI-20341 module possible
- * only ONE PCI-20006 module possible
+ * only ONE PCI-20006 module possible
* no extern trigger implemented
*
* NOT WORKING (but soon) only 4 on-board differential channels supported
@@ -83,11 +83,11 @@ options for PCI-20341M:
#include "../comedidev.h"
#define PCI20000_ID 0x1d
-#define PCI20341_ID 0x77
-#define PCI20006_ID 0xe3
+#define PCI20341_ID 0x77
+#define PCI20006_ID 0xe3
#define PCI20xxx_EMPTY_ID 0xff
-#define PCI20000_OFFSET 0x100
+#define PCI20000_OFFSET 0x100
#define PCI20000_MODULES 3
#define PCI20000_DIO_0 0x80
@@ -246,7 +246,7 @@ static int pci20xxx_attach(struct comedi_device *dev,
pci20006_init(dev, s, it->options[2 * i + 2],
it->options[2 * i + 3]);
printk(KERN_INFO "comedi%d: "
- "ii_pci20kc PCI-20006 module in slot %d \n",
+ "ii_pci20kc PCI-20006 module in slot %d\n",
dev->minor, i + 1);
break;
case PCI20341_ID:
@@ -255,7 +255,7 @@ static int pci20xxx_attach(struct comedi_device *dev,
pci20341_init(dev, s, it->options[2 * i + 2],
it->options[2 * i + 3]);
printk(KERN_INFO "comedi%d: "
- "ii_pci20kc PCI-20341 module in slot %d \n",
+ "ii_pci20kc PCI-20341 module in slot %d\n",
dev->minor, i + 1);
break;
default:
@@ -376,9 +376,20 @@ static int pci20341_insn_read(struct comedi_device *dev,
static const int pci20341_timebase[] = { 0x00, 0x00, 0x00, 0x04 };
static const int pci20341_settling_time[] = { 0x58, 0x58, 0x93, 0x99 };
-static const struct comedi_lrange range_bipolar0_5 = { 1, {BIP_RANGE(0.5)} };
-static const struct comedi_lrange range_bipolar0_05 = { 1, {BIP_RANGE(0.05)} };
-static const struct comedi_lrange range_bipolar0_025 = { 1, {BIP_RANGE(0.025)} };
+static const struct comedi_lrange range_bipolar0_5 = {
+ 1,
+ {BIP_RANGE(0.5)}
+};
+
+static const struct comedi_lrange range_bipolar0_05 = {
+ 1,
+ {BIP_RANGE(0.05)}
+};
+
+static const struct comedi_lrange range_bipolar0_025 = {
+ 1,
+ {BIP_RANGE(0.025)}
+};
static const struct comedi_lrange *const pci20341_ranges[] = {
&range_bipolar5,
@@ -408,12 +419,18 @@ static int pci20341_init(struct comedi_device *dev, struct comedi_subdevice *s,
s->maxdata = 0xffff;
s->range_table = pci20341_ranges[opt0];
- option = sdp->pci20341.timebase | PCI20341_REPMODE; /* depends on gain, trigger, repetition mode */
-
- writeb(PCI20341_INIT, sdp->iobase + PCI20341_CONFIG_REG); /* initialize Module */
- writeb(PCI20341_PACER, sdp->iobase + PCI20341_MOD_STATUS); /* set Pacer */
- writeb(option, sdp->iobase + PCI20341_OPT_REG); /* option register */
- writeb(sdp->pci20341.settling_time, sdp->iobase + PCI20341_SET_TIME_REG); /* settling time counter */
+ /* depends on gain, trigger, repetition mode */
+ option = sdp->pci20341.timebase | PCI20341_REPMODE;
+
+ /* initialize Module */
+ writeb(PCI20341_INIT, sdp->iobase + PCI20341_CONFIG_REG);
+ /* set Pacer */
+ writeb(PCI20341_PACER, sdp->iobase + PCI20341_MOD_STATUS);
+ /* option register */
+ writeb(option, sdp->iobase + PCI20341_OPT_REG);
+ /* settling time counter */
+ writeb(sdp->pci20341.settling_time,
+ sdp->iobase + PCI20341_SET_TIME_REG);
/* trigger not implemented */
return 0;
}
@@ -429,11 +446,15 @@ static int pci20341_insn_read(struct comedi_device *dev,
unsigned int clb; /* channel list byte */
unsigned int boarddata;
- writeb(1, sdp->iobase + PCI20341_LCHAN_ADDR_REG); /* write number of input channels */
+ /* write number of input channels */
+ writeb(1, sdp->iobase + PCI20341_LCHAN_ADDR_REG);
clb = PCI20341_DAISY_CHAIN | PCI20341_MUX | (sdp->pci20341.ai_gain << 3)
| CR_CHAN(insn->chanspec);
writeb(clb, sdp->iobase + PCI20341_CHAN_LIST);
- writeb(0x00, sdp->iobase + PCI20341_CC_RESET); /* reset settling time counter and trigger delay counter */
+
+ /* reset settling time counter and trigger delay counter */
+ writeb(0x00, sdp->iobase + PCI20341_CC_RESET);
+
writeb(0x00, sdp->iobase + PCI20341_CHAN_RESET);
/* generate Pacer */
@@ -444,9 +465,12 @@ static int pci20341_insn_read(struct comedi_device *dev,
* the whole interrupt stuff
*/
j = 0;
- readb(sdp->iobase + PCI20341_SOFT_PACER); /* generate Pacer */
+ /* generate Pacer */
+ readb(sdp->iobase + PCI20341_SOFT_PACER);
+
eoc = readb(sdp->iobase + PCI20341_STATUS_REG);
- while ((eoc < 0x80) && j < 100) { /* poll Interrupt Flag */
+ /* poll Interrupt Flag */
+ while ((eoc < 0x80) && j < 100) {
j++;
eoc = readb(sdp->iobase + PCI20341_STATUS_REG);
}
@@ -460,7 +484,9 @@ static int pci20341_insn_read(struct comedi_device *dev,
lo = readb(sdp->iobase + PCI20341_LDATA);
hi = readb(sdp->iobase + PCI20341_LDATA + 1);
boarddata = lo + 0x100 * hi;
- data[i] = (short)((boarddata + 0x8000) & 0xffff); /* board-data -> comedi-data */
+
+ /* board-data -> comedi-data */
+ data[i] = (short)((boarddata + 0x8000) & 0xffff);
}
return i;
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index 8b383ee959b2..5c6c72744167 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -54,6 +54,7 @@ Devices: [JR3] PCI force sensor board (jr3_pci)
#define PCI_VENDOR_ID_JR3 0x1762
#define PCI_DEVICE_ID_JR3_1_CHANNEL 0x3111
+#define PCI_DEVICE_ID_JR3_1_CHANNEL_NEW 0x1111
#define PCI_DEVICE_ID_JR3_2_CHANNEL 0x3112
#define PCI_DEVICE_ID_JR3_3_CHANNEL 0x3113
#define PCI_DEVICE_ID_JR3_4_CHANNEL 0x3114
@@ -73,6 +74,8 @@ static DEFINE_PCI_DEVICE_TABLE(jr3_pci_pci_table) = {
{
PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
+ PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL,
@@ -807,6 +810,10 @@ static int jr3_pci_attach(struct comedi_device *dev,
devpriv->n_channels = 1;
}
break;
+ case PCI_DEVICE_ID_JR3_1_CHANNEL_NEW:{
+ devpriv->n_channels = 1;
+ }
+ break;
case PCI_DEVICE_ID_JR3_2_CHANNEL:{
devpriv->n_channels = 2;
}
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index cd25b241cc1f..fd274e9c7b78 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -61,8 +61,6 @@
#define PCI_DAQ_SIZE 4096
#define PCI_DAQ_SIZE_660X 8192
-MODULE_LICENSE("GPL");
-
struct mite_struct *mite_devices;
EXPORT_SYMBOL(mite_devices);
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 14e716e99a5c..54741c9e1af5 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -527,3 +527,7 @@ static void __exit driver_ni6527_cleanup_module(void)
module_init(driver_ni6527_init_module);
module_exit(driver_ni6527_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 8b8e2aaf77fb..403fc0997d37 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -871,3 +871,7 @@ static void __exit driver_ni_65xx_cleanup_module(void)
module_init(driver_ni_65xx_init_module);
module_exit(driver_ni_65xx_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 6612b085c4ef..ca2aeaa9449c 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -1421,3 +1421,7 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
};
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index e9f034efdc6f..d8d91f90060e 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -384,3 +384,7 @@ static int ni_670x_find_device(struct comedi_device *dev, int bus, int slot)
mite_list_devices();
return -EIO;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 4d1868d04bac..0728c3c0cb0e 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -575,7 +575,8 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase,
/* grab our IRQ */
if (irq) {
isr_flags = 0;
- if (thisboard->bustype == pci_bustype)
+ if (thisboard->bustype == pci_bustype
+ || thisboard->bustype == pcmcia_bustype)
isr_flags |= IRQF_SHARED;
if (request_irq(irq, labpc_interrupt, isr_flags,
driver_labpc.driver_name, dev)) {
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 84a15c34e484..005d2fe86ee4 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -1354,3 +1354,7 @@ static void __exit driver_pcidio_cleanup_module(void)
module_init(driver_pcidio_init_module);
module_exit(driver_pcidio_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 23a381247285..9148abdad074 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1853,3 +1853,7 @@ static int pcimio_dio_change(struct comedi_device *dev,
return 0;
}
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index b0d44b547a69..a9611587460a 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -62,11 +62,10 @@ static void ni_tio_configure_dma(struct ni_gpct *counter, short enable,
unsigned input_select_bits = 0;
if (enable) {
- if (read_not_write) {
+ if (read_not_write)
input_select_bits |= Gi_Read_Acknowledges_Irq;
- } else {
+ else
input_select_bits |= Gi_Write_Acknowledges_Irq;
- }
}
ni_tio_set_bits(counter,
NITIO_Gi_Input_Select_Reg(counter->counter_index),
@@ -84,9 +83,8 @@ static void ni_tio_configure_dma(struct ni_gpct *counter, short enable,
gi_dma_config_bits |= Gi_DMA_Enable_Bit;
gi_dma_config_bits |= Gi_DMA_Int_Bit;
}
- if (read_not_write == 0) {
+ if (read_not_write == 0)
gi_dma_config_bits |= Gi_DMA_Write_Bit;
- }
ni_tio_set_bits(counter,
NITIO_Gi_DMA_Config_Reg(counter->
counter_index),
@@ -174,7 +172,7 @@ static int ni_tio_input_cmd(struct ni_gpct *counter, struct comedi_async *async)
static int ni_tio_output_cmd(struct ni_gpct *counter,
struct comedi_async *async)
{
- printk("ni_tio: output commands not yet implemented.\n");
+ printk(KERN_ERR "ni_tio: output commands not yet implemented.\n");
return -ENOTSUPP;
counter->mite_chan->dir = COMEDI_OUTPUT;
@@ -198,9 +196,8 @@ static int ni_tio_cmd_setup(struct ni_gpct *counter, struct comedi_async *async)
set_gate_source = 1;
gate_source = cmd->convert_arg;
}
- if (set_gate_source) {
+ if (set_gate_source)
retval = ni_tio_set_gate_src(counter, 0, gate_source);
- }
if (cmd->flags & TRIG_WAKE_EOS) {
ni_tio_set_bits(counter,
NITIO_Gi_Interrupt_Enable_Reg(counter->
@@ -221,22 +218,21 @@ int ni_tio_cmd(struct ni_gpct *counter, struct comedi_async *async)
spin_lock_irqsave(&counter->lock, flags);
if (counter->mite_chan == NULL) {
- printk
- ("ni_tio: commands only supported with DMA. Interrupt-driven commands not yet implemented.\n");
+ printk(KERN_ERR "ni_tio: commands only supported with DMA. Interrupt-driven commands not yet implemented.\n");
retval = -EIO;
} else {
retval = ni_tio_cmd_setup(counter, async);
if (retval == 0) {
- if (cmd->flags & CMDF_WRITE) {
+ if (cmd->flags & CMDF_WRITE)
retval = ni_tio_output_cmd(counter, async);
- } else {
+ else
retval = ni_tio_input_cmd(counter, async);
- }
}
}
spin_unlock_irqrestore(&counter->lock, flags);
return retval;
}
+EXPORT_SYMBOL_GPL(ni_tio_cmd);
int ni_tio_cmdtest(struct ni_gpct *counter, struct comedi_cmd *cmd)
{
@@ -342,6 +338,7 @@ int ni_tio_cmdtest(struct ni_gpct *counter, struct comedi_cmd *cmd)
return 0;
}
+EXPORT_SYMBOL_GPL(ni_tio_cmdtest);
int ni_tio_cancel(struct ni_gpct *counter)
{
@@ -349,9 +346,8 @@ int ni_tio_cancel(struct ni_gpct *counter)
ni_tio_arm(counter, 0, 0);
spin_lock_irqsave(&counter->lock, flags);
- if (counter->mite_chan) {
+ if (counter->mite_chan)
mite_dma_disarm(counter->mite_chan);
- }
spin_unlock_irqrestore(&counter->lock, flags);
ni_tio_configure_dma(counter, 0, 0);
@@ -361,10 +357,11 @@ int ni_tio_cancel(struct ni_gpct *counter)
0x0);
return 0;
}
+EXPORT_SYMBOL_GPL(ni_tio_cancel);
- /* During buffered input counter operation for e-series, the gate interrupt is acked
- automatically by the dma controller, due to the Gi_Read/Write_Acknowledges_IRQ bits
- in the input select register. */
+ /* During buffered input counter operation for e-series, the gate
+ interrupt is acked automatically by the dma controller, due to the
+ Gi_Read/Write_Acknowledges_IRQ bits in the input select register. */
static int should_ack_gate(struct ni_gpct *counter)
{
unsigned long flags;
@@ -372,7 +369,10 @@ static int should_ack_gate(struct ni_gpct *counter)
switch (counter->counter_dev->variant) {
case ni_gpct_variant_m_series:
- case ni_gpct_variant_660x: /* not sure if 660x really supports gate interrupts (the bits are not listed in register-level manual) */
+ /* not sure if 660x really supports gate
+ interrupts (the bits are not listed
+ in register-level manual) */
+ case ni_gpct_variant_660x:
return 1;
break;
case ni_gpct_variant_e_series:
@@ -416,7 +416,8 @@ void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, int *gate_error,
if (gxx_status & Gi_Gate_Error_Bit(counter->counter_index)) {
ack |= Gi_Gate_Error_Confirm_Bit(counter->counter_index);
if (gate_error) {
- /*660x don't support automatic acknowledgement of gate interrupt via dma read/write
+ /*660x don't support automatic acknowledgement
+ of gate interrupt via dma read/write
and report bogus gate errors */
if (counter->counter_dev->variant !=
ni_gpct_variant_660x) {
@@ -429,9 +430,8 @@ void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, int *gate_error,
if (tc_error)
*tc_error = 1;
}
- if (gi_status & Gi_TC_Bit) {
+ if (gi_status & Gi_TC_Bit)
ack |= Gi_TC_Interrupt_Ack_Bit;
- }
if (gi_status & Gi_Gate_Interrupt_Bit) {
if (should_ack_gate(counter))
ack |= Gi_Gate_Interrupt_Ack_Bit;
@@ -452,13 +452,14 @@ void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, int *gate_error,
NITIO_Gxx_Joint_Status2_Reg
(counter->counter_index)) &
Gi_Permanent_Stale_Bit(counter->counter_index)) {
- printk("%s: Gi_Permanent_Stale_Data detected.\n",
- __FUNCTION__);
+ printk(KERN_INFO "%s: Gi_Permanent_Stale_Data detected.\n",
+ __func__);
if (perm_stale_data)
*perm_stale_data = 1;
}
}
}
+EXPORT_SYMBOL_GPL(ni_tio_acknowledge_and_confirm);
void ni_tio_handle_interrupt(struct ni_gpct *counter,
struct comedi_subdevice *s)
@@ -472,20 +473,19 @@ void ni_tio_handle_interrupt(struct ni_gpct *counter,
ni_tio_acknowledge_and_confirm(counter, &gate_error, &tc_error,
&perm_stale_data, NULL);
if (gate_error) {
- printk("%s: Gi_Gate_Error detected.\n", __FUNCTION__);
+ printk(KERN_NOTICE "%s: Gi_Gate_Error detected.\n", __func__);
s->async->events |= COMEDI_CB_OVERFLOW;
}
- if (perm_stale_data) {
+ if (perm_stale_data)
s->async->events |= COMEDI_CB_ERROR;
- }
switch (counter->counter_dev->variant) {
case ni_gpct_variant_m_series:
case ni_gpct_variant_660x:
if (read_register(counter,
- NITIO_Gi_DMA_Status_Reg
- (counter->counter_index)) & Gi_DRQ_Error_Bit)
- {
- printk("%s: Gi_DRQ_Error detected.\n", __FUNCTION__);
+ NITIO_Gi_DMA_Status_Reg
+ (counter->counter_index)) & Gi_DRQ_Error_Bit) {
+ printk(KERN_NOTICE "%s: Gi_DRQ_Error detected.\n",
+ __func__);
s->async->events |= COMEDI_CB_OVERFLOW;
}
break;
@@ -506,6 +506,7 @@ void ni_tio_handle_interrupt(struct ni_gpct *counter,
mite_sync_input_dma(counter->mite_chan, s->async);
spin_unlock_irqrestore(&counter->lock, flags);
}
+EXPORT_SYMBOL_GPL(ni_tio_handle_interrupt);
void ni_tio_set_mite_channel(struct ni_gpct *counter,
struct mite_channel *mite_chan)
@@ -516,6 +517,7 @@ void ni_tio_set_mite_channel(struct ni_gpct *counter,
counter->mite_chan = mite_chan;
spin_unlock_irqrestore(&counter->lock, flags);
}
+EXPORT_SYMBOL_GPL(ni_tio_set_mite_channel);
static int __init ni_tiocmd_init_module(void)
{
@@ -529,10 +531,3 @@ static void __exit ni_tiocmd_cleanup_module(void)
}
module_exit(ni_tiocmd_cleanup_module);
-
-EXPORT_SYMBOL_GPL(ni_tio_cmd);
-EXPORT_SYMBOL_GPL(ni_tio_cmdtest);
-EXPORT_SYMBOL_GPL(ni_tio_cancel);
-EXPORT_SYMBOL_GPL(ni_tio_handle_interrupt);
-EXPORT_SYMBOL_GPL(ni_tio_set_mite_channel);
-EXPORT_SYMBOL_GPL(ni_tio_acknowledge_and_confirm);
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 60ebfc3c75fd..aa8aeeee043f 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -753,7 +753,7 @@ static int rtd_attach(struct comedi_device *dev, struct comedi_devconfig *it)
struct comedi_subdevice *s;
struct pci_dev *pcidev;
int ret;
- resource_size_t physLas0; /* configuation */
+ resource_size_t physLas0; /* configuration */
resource_size_t physLas1; /* data area */
resource_size_t physLcfg; /* PLX9080 */
#ifdef USE_DMA
diff --git a/drivers/staging/comedi/drivers/s526.c b/drivers/staging/comedi/drivers/s526.c
index 3607aaee4af6..2b34daedc3d7 100644
--- a/drivers/staging/comedi/drivers/s526.c
+++ b/drivers/staging/comedi/drivers/s526.c
@@ -114,7 +114,7 @@ static const int s526_ports[] = {
};
struct counter_mode_register_t {
-#if defined (__LITTLE_ENDIAN_BITFIELD)
+#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned short coutSource:1;
unsigned short coutPolarity:1;
unsigned short autoLoadResetRcap:3;
@@ -207,7 +207,9 @@ static const struct s526_board s526_boards[] = {
/* this structure is for data unique to this hardware driver. If
several hardware drivers keep similar information in this structure,
- feel free to suggest moving the variable to the struct comedi_device struct. */
+ feel free to suggest moving the variable to the struct comedi_device
+ struct.
+*/
struct s526_private {
int data;
@@ -304,7 +306,7 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* int subdev_channel = 0; */
union cmReg cmReg;
- printk("comedi%d: s526: ", dev->minor);
+ printk(KERN_INFO "comedi%d: s526: ", dev->minor);
iobase = it->options[0];
if (!iobase || !request_region(iobase, S526_IOSIZE, thisboard->name)) {
@@ -317,7 +319,8 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/*** make it a little quieter, exw, 8/29/06
for (i = 0; i < S526_NUM_PORTS; i++) {
- printk("0x%02x: 0x%04x\n", ADDR_REG(s526_ports[i]), inw(ADDR_REG(s526_ports[i])));
+ printk("0x%02x: 0x%04x\n", ADDR_REG(s526_ports[i]),
+ inw(ADDR_REG(s526_ports[i])));
}
***/
@@ -402,7 +405,7 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->type = COMEDI_SUBD_UNUSED;
}
- printk("attached\n");
+ printk(KERN_INFO "attached\n");
return 1;
@@ -411,7 +414,7 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* One-shot (software trigger) */
cmReg.reg.coutSource = 0; /* out RCAP */
cmReg.reg.coutPolarity = 1; /* Polarity inverted */
- cmReg.reg.autoLoadResetRcap = 1; /* Auto load 0:disabled, 1:enabled */
+ cmReg.reg.autoLoadResetRcap = 1;/* Auto load 0:disabled, 1:enabled */
cmReg.reg.hwCtEnableSource = 3; /* NOT RCAP */
cmReg.reg.ctEnableCtrl = 2; /* Hardware */
cmReg.reg.clockSource = 2; /* Internal */
@@ -426,10 +429,12 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
outw(0x0001, ADDR_CHAN_REG(REG_C0H, subdev_channel));
outw(0x3C68, ADDR_CHAN_REG(REG_C0L, subdev_channel));
- outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel)); /* Reset the counter */
- outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel)); /* Load the counter from PR0 */
-
- outw(0x0008, ADDR_CHAN_REG(REG_C0C, subdev_channel)); /* Reset RCAP (fires one-shot) */
+ /* Reset the counter */
+ outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel));
+ /* Load the counter from PR0 */
+ outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel));
+ /* Reset RCAP (fires one-shot) */
+ outw(0x0008, ADDR_CHAN_REG(REG_C0C, subdev_channel));
#else
@@ -447,11 +452,12 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
cmReg.reg.reserved = 0;
n = 0;
- printk("Mode reg=0x%04x, 0x%04lx\n", cmReg.value, ADDR_CHAN_REG(REG_C0M,
- n));
+ printk(KERN_INFO "Mode reg=0x%04x, 0x%04lx\n",
+ cmReg.value, ADDR_CHAN_REG(REG_C0M, n));
outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, n));
udelay(1000);
- printk("Read back mode reg=0x%04x\n", inw(ADDR_CHAN_REG(REG_C0M, n)));
+ printk(KERN_INFO "Read back mode reg=0x%04x\n",
+ inw(ADDR_CHAN_REG(REG_C0M, n)));
/* Load the pre-load register high word */
/* value = (short) (0x55); */
@@ -466,20 +472,23 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* Reset the counter if it is software preload */
if (cmReg.reg.autoLoadResetRcap == 0) {
- outw(0x8000, ADDR_CHAN_REG(REG_C0C, n)); /* Reset the counter */
- outw(0x4000, ADDR_CHAN_REG(REG_C0C, n)); /* Load the counter from PR0 */
+ /* Reset the counter */
+ outw(0x8000, ADDR_CHAN_REG(REG_C0C, n));
+ /* Load the counter from PR0 */
+ outw(0x4000, ADDR_CHAN_REG(REG_C0C, n));
}
outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, n));
udelay(1000);
- printk("Read back mode reg=0x%04x\n", inw(ADDR_CHAN_REG(REG_C0M, n)));
+ printk(KERN_INFO "Read back mode reg=0x%04x\n",
+ inw(ADDR_CHAN_REG(REG_C0M, n)));
#endif
- printk("Current registres:\n");
+ printk(KERN_INFO "Current registres:\n");
for (i = 0; i < S526_NUM_PORTS; i++) {
- printk("0x%02lx: 0x%04x\n", ADDR_REG(s526_ports[i]),
- inw(ADDR_REG(s526_ports[i])));
+ printk(KERN_INFO "0x%02lx: 0x%04x\n",
+ ADDR_REG(s526_ports[i]), inw(ADDR_REG(s526_ports[i])));
}
return 1;
}
@@ -494,7 +503,7 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
*/
static int s526_detach(struct comedi_device *dev)
{
- printk("comedi%d: s526: remove\n", dev->minor);
+ printk(KERN_INFO "comedi%d: s526: remove\n", dev->minor);
if (dev->iobase > 0)
release_region(dev->iobase, S526_IOSIZE);
@@ -513,7 +522,7 @@ static int s526_gpct_rinsn(struct comedi_device *dev,
/* Check if (n > 0) */
if (insn->n <= 0) {
- printk("s526: INSN_READ: n should be > 0\n");
+ printk(KERN_ERR "s526: INSN_READ: n should be > 0\n");
return -EINVAL;
}
/* Read the low word first */
@@ -522,7 +531,8 @@ static int s526_gpct_rinsn(struct comedi_device *dev,
datahigh = inw(ADDR_CHAN_REG(REG_C0H, counter_channel));
data[i] = (int)(datahigh & 0x00FF);
data[i] = (data[i] << 16) | (datalow & 0xFFFF);
-/* printk("s526 GPCT[%d]: %x(0x%04x, 0x%04x)\n", counter_channel, data[i], datahigh, datalow); */
+ /* printk("s526 GPCT[%d]: %x(0x%04x, 0x%04x)\n",
+ counter_channel, data[i], datahigh, datalow); */
}
return i;
}
@@ -536,7 +546,8 @@ static int s526_gpct_insn_config(struct comedi_device *dev,
short value;
union cmReg cmReg;
-/* printk("s526: GPCT_INSN_CONFIG: Configuring Channel %d\n", subdev_channel); */
+ /* printk("s526: GPCT_INSN_CONFIG: Configuring Channel %d\n",
+ subdev_channel); */
for (i = 0; i < MAX_GPCT_CONFIG_DATA; i++) {
devpriv->s526_gpct_config[subdev_channel].data[i] =
@@ -554,7 +565,7 @@ static int s526_gpct_insn_config(struct comedi_device *dev,
data[2]: Pre-load Register Value
data[3]: Conter Control Register
*/
- printk("s526: GPCT_INSN_CONFIG: Configuring Encoder\n");
+ printk(KERN_INFO "s526: GPCT_INSN_CONFIG: Configuring Encoder\n");
devpriv->s526_gpct_config[subdev_channel].app =
PositionMeasurement;
@@ -563,7 +574,7 @@ static int s526_gpct_insn_config(struct comedi_device *dev,
/* One-shot (software trigger) */
cmReg.reg.coutSource = 0; /* out RCAP */
cmReg.reg.coutPolarity = 1; /* Polarity inverted */
- cmReg.reg.autoLoadResetRcap = 0; /* Auto load disabled */
+ cmReg.reg.autoLoadResetRcap = 0;/* Auto load disabled */
cmReg.reg.hwCtEnableSource = 3; /* NOT RCAP */
cmReg.reg.ctEnableCtrl = 2; /* Hardware */
cmReg.reg.clockSource = 2; /* Internal */
@@ -578,10 +589,13 @@ static int s526_gpct_insn_config(struct comedi_device *dev,
outw(0x0001, ADDR_CHAN_REG(REG_C0H, subdev_channel));
outw(0x3C68, ADDR_CHAN_REG(REG_C0L, subdev_channel));
- outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel)); /* Reset the counter */
- outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel)); /* Load the counter from PR0 */
+ /* Reset the counter */
+ outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel));
+ /* Load the counter from PR0 */
+ outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel));
- outw(0x0008, ADDR_CHAN_REG(REG_C0C, subdev_channel)); /* Reset RCAP (fires one-shot) */
+ /* Reset RCAP (fires one-shot) */
+ outw(0x0008, ADDR_CHAN_REG(REG_C0C, subdev_channel));
#endif
@@ -594,30 +608,34 @@ static int s526_gpct_insn_config(struct comedi_device *dev,
/* Reset the counter if it is software preload */
if (cmReg.reg.autoLoadResetRcap == 0) {
- outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel)); /* Reset the counter */
-/* outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel)); Load the counter from PR0 */
+ /* Reset the counter */
+ outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel));
+ /* Load the counter from PR0
+ * outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel));
+ */
}
#else
- cmReg.reg.countDirCtrl = 0; /* 0 quadrature, 1 software control */
+ /* 0 quadrature, 1 software control */
+ cmReg.reg.countDirCtrl = 0;
/* data[1] contains GPCT_X1, GPCT_X2 or GPCT_X4 */
- if (insn->data[1] == GPCT_X2) {
+ if (insn->data[1] == GPCT_X2)
cmReg.reg.clockSource = 1;
- } else if (insn->data[1] == GPCT_X4) {
+ else if (insn->data[1] == GPCT_X4)
cmReg.reg.clockSource = 2;
- } else {
+ else
cmReg.reg.clockSource = 0;
- }
/* When to take into account the indexpulse: */
- if (insn->data[2] == GPCT_IndexPhaseLowLow) {
+ /*if (insn->data[2] == GPCT_IndexPhaseLowLow) {
} else if (insn->data[2] == GPCT_IndexPhaseLowHigh) {
} else if (insn->data[2] == GPCT_IndexPhaseHighLow) {
} else if (insn->data[2] == GPCT_IndexPhaseHighHigh) {
- }
+ }*/
/* Take into account the index pulse? */
if (insn->data[3] == GPCT_RESET_COUNTER_ON_INDEX)
- cmReg.reg.autoLoadResetRcap = 4; /* Auto load with INDEX^ */
+ /* Auto load with INDEX^ */
+ cmReg.reg.autoLoadResetRcap = 4;
/* Set Counter Mode Register */
cmReg.value = (short)(insn->data[1] & 0xFFFF);
@@ -638,8 +656,10 @@ static int s526_gpct_insn_config(struct comedi_device *dev,
}
/* Reset the counter if it is software preload */
if (cmReg.reg.autoLoadResetRcap == 0) {
- outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel)); /* Reset the counter */
- outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel)); /* Load the counter from PR0 */
+ /* Reset the counter */
+ outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel));
+ /* Load the counter from PR0 */
+ outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel));
}
#endif
break;
@@ -652,7 +672,7 @@ static int s526_gpct_insn_config(struct comedi_device *dev,
data[3]: Pre-load Register 1 Value
data[4]: Conter Control Register
*/
- printk("s526: GPCT_INSN_CONFIG: Configuring SPG\n");
+ printk(KERN_INFO "s526: GPCT_INSN_CONFIG: Configuring SPG\n");
devpriv->s526_gpct_config[subdev_channel].app =
SinglePulseGeneration;
@@ -697,7 +717,7 @@ static int s526_gpct_insn_config(struct comedi_device *dev,
data[3]: Pre-load Register 1 Value
data[4]: Conter Control Register
*/
- printk("s526: GPCT_INSN_CONFIG: Configuring PTG\n");
+ printk(KERN_INFO "s526: GPCT_INSN_CONFIG: Configuring PTG\n");
devpriv->s526_gpct_config[subdev_channel].app =
PulseTrainGeneration;
@@ -735,7 +755,7 @@ static int s526_gpct_insn_config(struct comedi_device *dev,
break;
default:
- printk("s526: unsupported GPCT_insn_config\n");
+ printk(KERN_ERR "s526: unsupported GPCT_insn_config\n");
return -EINVAL;
break;
}
@@ -751,20 +771,21 @@ static int s526_gpct_winsn(struct comedi_device *dev,
short value;
union cmReg cmReg;
- printk("s526: GPCT_INSN_WRITE on channel %d\n", subdev_channel);
+ printk(KERN_INFO "s526: GPCT_INSN_WRITE on channel %d\n",
+ subdev_channel);
cmReg.value = inw(ADDR_CHAN_REG(REG_C0M, subdev_channel));
- printk("s526: Counter Mode Register: %x\n", cmReg.value);
+ printk(KERN_INFO "s526: Counter Mode Register: %x\n", cmReg.value);
/* Check what Application of Counter this channel is configured for */
switch (devpriv->s526_gpct_config[subdev_channel].app) {
case PositionMeasurement:
- printk("S526: INSN_WRITE: PM\n");
+ printk(KERN_INFO "S526: INSN_WRITE: PM\n");
outw(0xFFFF & ((*data) >> 16), ADDR_CHAN_REG(REG_C0H,
subdev_channel));
outw(0xFFFF & (*data), ADDR_CHAN_REG(REG_C0L, subdev_channel));
break;
case SinglePulseGeneration:
- printk("S526: INSN_WRITE: SPG\n");
+ printk(KERN_INFO "S526: INSN_WRITE: SPG\n");
outw(0xFFFF & ((*data) >> 16), ADDR_CHAN_REG(REG_C0H,
subdev_channel));
outw(0xFFFF & (*data), ADDR_CHAN_REG(REG_C0L, subdev_channel));
@@ -777,14 +798,14 @@ static int s526_gpct_winsn(struct comedi_device *dev,
The above periods must be expressed as a multiple of the
pulse frequency on the selected source
*/
- printk("S526: INSN_WRITE: PTG\n");
+ printk(KERN_INFO "S526: INSN_WRITE: PTG\n");
if ((insn->data[1] > insn->data[0]) && (insn->data[0] > 0)) {
(devpriv->s526_gpct_config[subdev_channel]).data[0] =
insn->data[0];
(devpriv->s526_gpct_config[subdev_channel]).data[1] =
insn->data[1];
} else {
- printk("s526: INSN_WRITE: PTG: Problem with Pulse params -> %d %d\n",
+ printk(KERN_ERR "s526: INSN_WRITE: PTG: Problem with Pulse params -> %d %d\n",
insn->data[0], insn->data[1]);
return -EINVAL;
}
@@ -873,7 +894,7 @@ static int s526_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
if (i == TIMEOUT) {
/* printk() should be used instead of printk()
* whenever the code can be called from real-time. */
- printk("s526: ADC(0x%04x) timeout\n",
+ printk(KERN_ERR "s526: ADC(0x%04x) timeout\n",
inw(ADDR_REG(REG_ISR)));
return -ETIMEDOUT;
}
@@ -906,11 +927,14 @@ static int s526_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
* very useful, but that's how the interface is defined. */
for (i = 0; i < insn->n; i++) {
/* a typical programming sequence */
-/* outw(data[i], dev->iobase + REG_ADD); write the data to preload register */
- outw(data[i], ADDR_REG(REG_ADD)); /* write the data to preload register */
+ /* write the data to preload register
+ * outw(data[i], dev->iobase + REG_ADD);
+ */
+ /* write the data to preload register */
+ outw(data[i], ADDR_REG(REG_ADD));
devpriv->ao_readback[chan] = data[i];
/* outw(val + 1, dev->iobase + REG_DAC); starts the D/A conversion. */
- outw(val + 1, ADDR_REG(REG_DAC)); /* starts the D/A conversion. */
+ outw(val + 1, ADDR_REG(REG_DAC)); /*starts the D/A conversion.*/
}
/* return the number of samples read/written */
@@ -954,7 +978,7 @@ static int s526_dio_insn_bits(struct comedi_device *dev,
/* on return, data[1] contains the value of the digital
* input and output lines. */
- data[1] = inw(ADDR_REG(REG_DIO)) & 0xFF; /* low 8 bits are the data */
+ data[1] = inw(ADDR_REG(REG_DIO)) & 0xFF; /* low 8 bits are the data */
/* or we could just return the software copy of the output values if
* it was a purely digital output subdevice */
/* data[1]=s->state & 0xFF; */
@@ -969,7 +993,7 @@ static int s526_dio_insn_config(struct comedi_device *dev,
int chan = CR_CHAN(insn->chanspec);
int group, mask;
- printk("S526 DIO insn_config\n");
+ printk(KERN_INFO "S526 DIO insn_config\n");
/* The input or output configuration of each digital line is
* configured by a special insn_config instruction. chanspec
@@ -980,11 +1004,12 @@ static int s526_dio_insn_config(struct comedi_device *dev,
mask = 0xF << (group << 2);
switch (data[0]) {
case INSN_CONFIG_DIO_OUTPUT:
- s->state |= 1 << (group + 10); // bit 10/11 set the group 1/2's mode
+ /* bit 10/11 set the group 1/2's mode */
+ s->state |= 1 << (group + 10);
s->io_bits |= mask;
break;
case INSN_CONFIG_DIO_INPUT:
- s->state &= ~(1 << (group + 10));// 1 is output, 0 is input.
+ s->state &= ~(1 << (group + 10)); /* 1 is output, 0 is input. */
s->io_bits &= ~mask;
break;
case INSN_CONFIG_DIO_QUERY:
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index de784ff08caa..696ee045e25f 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -2398,7 +2398,7 @@ static int usbduxsub_probe(struct usb_interface *uinterf,
usbduxsub[index].dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL);
if (!usbduxsub[index].dux_commands) {
dev_err(dev, "comedi_: usbdux: "
- "error alloc space for dac commands\n");
+ "error alloc space for dux commands\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
diff --git a/drivers/staging/cptm1217/Kconfig b/drivers/staging/cptm1217/Kconfig
new file mode 100644
index 000000000000..43b1cc0a50a5
--- /dev/null
+++ b/drivers/staging/cptm1217/Kconfig
@@ -0,0 +1,12 @@
+config TOUCHSCREEN_CLEARPAD_TM1217
+ tristate "Synaptics Clearpad TM1217"
+ depends on I2C
+ depends on GPIOLIB
+ depends on INPUT
+ help
+ Say Y here if you have a Synaptics Clearpad TM1217 Controller
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called clearpad_tm1217.
diff --git a/drivers/staging/cptm1217/Makefile b/drivers/staging/cptm1217/Makefile
new file mode 100644
index 000000000000..8961fafa80e7
--- /dev/null
+++ b/drivers/staging/cptm1217/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += clearpad_tm1217.o
+
diff --git a/drivers/staging/cptm1217/TODO b/drivers/staging/cptm1217/TODO
new file mode 100644
index 000000000000..303922465e4d
--- /dev/null
+++ b/drivers/staging/cptm1217/TODO
@@ -0,0 +1,5 @@
+- Wait for the official upstream general clearpad drivers as promised over
+ the past few months
+- Merge any device support needed from this driver into it
+- Delete this driver
+
diff --git a/drivers/staging/cptm1217/clearpad_tm1217.c b/drivers/staging/cptm1217/clearpad_tm1217.c
new file mode 100644
index 000000000000..76e4b782d2fb
--- /dev/null
+++ b/drivers/staging/cptm1217/clearpad_tm1217.c
@@ -0,0 +1,675 @@
+/*
+ * clearpad_tm1217.c - Touch Screen driver for Synaptics Clearpad
+ * TM1217 controller
+ *
+ * Copyright (C) 2008 Intel Corp
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; ifnot, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Questions/Comments/Bug fixes to Ramesh Agarwal (ramesh.agarwal@intel.com)
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/timer.h>
+#include <linux/gpio.h>
+#include <linux/hrtimer.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include "cp_tm1217.h"
+
+#define CPTM1217_DEVICE_NAME "cptm1217"
+#define CPTM1217_DRIVER_NAME CPTM1217_DEVICE_NAME
+
+#define MAX_TOUCH_SUPPORTED 2
+#define TOUCH_SUPPORTED 1
+#define SAMPLING_FREQ 80 /* Frequency in HZ */
+#define DELAY_BTWIN_SAMPLE (1000 / SAMPLING_FREQ)
+#define WAIT_FOR_RESPONSE 5 /* 5msec just works */
+#define MAX_RETRIES 5 /* As above */
+#define INCREMENTAL_DELAY 5 /* As above */
+
+/* Regster Definitions */
+#define TMA1217_DEV_STATUS 0x13 /* Device Status */
+#define TMA1217_INT_STATUS 0x14 /* Interrupt Status */
+
+/* Controller can detect upto 2 possible finger touches.
+ * Each finger touch provides 12 bit X Y co-ordinates, the values are split
+ * across 2 registers, and an 8 bit Z value */
+#define TMA1217_FINGER_STATE 0x18 /* Finger State */
+#define TMA1217_FINGER1_X_HIGHER8 0x19 /* Higher 8 bit of X coordinate */
+#define TMA1217_FINGER1_Y_HIGHER8 0x1A /* Higher 8 bit of Y coordinate */
+#define TMA1217_FINGER1_XY_LOWER4 0x1B /* Lower 4 bits of X and Y */
+#define TMA1217_FINGER1_Z_VALUE 0x1D /* 8 bit Z value for finger 1 */
+#define TMA1217_FINGER2_X_HIGHER8 0x1E /* Higher 8 bit of X coordinate */
+#define TMA1217_FINGER2_Y_HIGHER8 0x1F /* Higher 8 bit of Y coordinate */
+#define TMA1217_FINGER2_XY_LOWER4 0x20 /* Lower 4 bits of X and Y */
+#define TMA1217_FINGER2_Z_VALUE 0x22 /* 8 bit Z value for finger 2 */
+#define TMA1217_DEVICE_CTRL 0x23 /* Device Control */
+#define TMA1217_INTERRUPT_ENABLE 0x24 /* Interrupt Enable */
+#define TMA1217_REPORT_MODE 0x2B /* Reporting Mode */
+#define TMA1217_MAX_X_LOWER8 0x31 /* Bit 0-7 for Max X */
+#define TMA1217_MAX_X_HIGHER4 0x32 /* Bit 8-11 for Max X */
+#define TMA1217_MAX_Y_LOWER8 0x33 /* Bit 0-7 for Max Y */
+#define TMA1217_MAX_Y_HIGHER4 0x34 /* Bit 8-11 for Max Y */
+#define TMA1217_DEVICE_CMD_RESET 0x67 /* Device CMD reg for reset */
+#define TMA1217_DEVICE_CMD_REZERO 0x69 /* Device CMD reg for rezero */
+
+#define TMA1217_MANUFACTURER_ID 0x73 /* Manufacturer Id */
+#define TMA1217_PRODUCT_FAMILY 0x75 /* Product Family */
+#define TMA1217_FIRMWARE_REVISION 0x76 /* Firmware Revision */
+#define TMA1217_SERIAL_NO_HIGH 0x7C /* Bit 8-15 of device serial no. */
+#define TMA1217_SERIAL_NO_LOW 0x7D /* Bit 0-7 of device serial no. */
+#define TMA1217_PRODUCT_ID_START 0x7E /* Start address for 10 byte ID */
+#define TMA1217_DEVICE_CAPABILITY 0x8B /* Reporting capability */
+
+
+/*
+ * The touch position structure.
+ */
+struct touch_state {
+ int x;
+ int y;
+ bool button;
+};
+
+/* Device Specific info given by the controller */
+struct cp_dev_info {
+ u16 maxX;
+ u16 maxY;
+};
+
+/* Vendor related info given by the controller */
+struct cp_vendor_info {
+ u8 vendor_id;
+ u8 product_family;
+ u8 firmware_rev;
+ u16 serial_no;
+};
+
+/*
+ * Private structure to store the device details
+ */
+struct cp_tm1217_device {
+ struct i2c_client *client;
+ struct device *dev;
+ struct cp_vendor_info vinfo;
+ struct cp_dev_info dinfo;
+ struct input_dev_info {
+ char phys[32];
+ char name[128];
+ struct input_dev *input;
+ struct touch_state touch;
+ } cp_input_info[MAX_TOUCH_SUPPORTED];
+
+ int thread_running;
+ struct mutex thread_mutex;
+
+ int gpio;
+};
+
+
+/* The following functions are used to read/write registers on the device
+ * as per the RMI prorocol. Technically, a page select should be written
+ * before doing read/write but since the register offsets are below 0xFF
+ * we can use the default value of page which is 0x00
+ */
+static int cp_tm1217_read(struct cp_tm1217_device *ts,
+ u8 *req, int size)
+{
+ int i, retval;
+
+ /* Send the address */
+ retval = i2c_master_send(ts->client, &req[0], 1);
+ if (retval != 1) {
+ dev_err(ts->dev, "cp_tm1217: I2C send failed\n");
+ return retval;
+ }
+ msleep(WAIT_FOR_RESPONSE);
+ for (i = 0; i < MAX_RETRIES; i++) {
+ retval = i2c_master_recv(ts->client, &req[1], size);
+ if (retval == size) {
+ break;
+ } else {
+ msleep(INCREMENTAL_DELAY);
+ dev_dbg(ts->dev, "cp_tm1217: Retry count is %d\n", i);
+ }
+ }
+ if (retval != size)
+ dev_err(ts->dev, "cp_tm1217: Read from device failed\n");
+
+ return retval;
+}
+
+static int cp_tm1217_write(struct cp_tm1217_device *ts,
+ u8 *req, int size)
+{
+ int retval;
+
+ /* Send the address and the data to be written */
+ retval = i2c_master_send(ts->client, &req[0], size + 1);
+ if (retval != size + 1) {
+ dev_err(ts->dev, "cp_tm1217: I2C write failed: %d\n", retval);
+ return retval;
+ }
+ /* Wait for the write to complete. TBD why this is required */
+ msleep(WAIT_FOR_RESPONSE);
+
+ return size;
+}
+
+static int cp_tm1217_mask_interrupt(struct cp_tm1217_device *ts)
+{
+ u8 req[2];
+ int retval;
+
+ req[0] = TMA1217_INTERRUPT_ENABLE;
+ req[1] = 0x0;
+ retval = cp_tm1217_write(ts, req, 1);
+ if (retval != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static int cp_tm1217_unmask_interrupt(struct cp_tm1217_device *ts)
+{
+ u8 req[2];
+ int retval;
+
+ req[0] = TMA1217_INTERRUPT_ENABLE;
+ req[1] = 0xa;
+ retval = cp_tm1217_write(ts, req, 1);
+ if (retval != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static void process_touch(struct cp_tm1217_device *ts, int index)
+{
+ int retval;
+ struct input_dev_info *input_info =
+ (struct input_dev_info *)&ts->cp_input_info[index];
+ u8 xy_data[6];
+
+ if (index == 0)
+ xy_data[0] = TMA1217_FINGER1_X_HIGHER8;
+ else
+ xy_data[0] = TMA1217_FINGER2_X_HIGHER8;
+
+ retval = cp_tm1217_read(ts, xy_data, 5);
+ if (retval < 5) {
+ dev_err(ts->dev, "cp_tm1217: XY read from device failed\n");
+ return;
+ }
+
+ /* Note: Currently not using the Z values but may be requried in
+ the future. */
+ input_info->touch.x = (xy_data[1] << 4)
+ | (xy_data[3] & 0x0F);
+ input_info->touch.y = (xy_data[2] << 4)
+ | ((xy_data[3] & 0xF0) >> 4);
+ input_report_abs(input_info->input, ABS_X, input_info->touch.x);
+ input_report_abs(input_info->input, ABS_Y, input_info->touch.y);
+ input_sync(input_info->input);
+}
+
+static void cp_tm1217_get_data(struct cp_tm1217_device *ts)
+{
+ u8 req[2];
+ int retval, i, finger_touched = 0;
+
+ do {
+ req[0] = TMA1217_FINGER_STATE;
+ retval = cp_tm1217_read(ts, req, 1);
+ if (retval != 1) {
+ dev_err(ts->dev,
+ "cp_tm1217: Read from device failed\n");
+ continue;
+ }
+ finger_touched = 0;
+ /* Start sampling until the pressure is below
+ threshold */
+ for (i = 0; i < TOUCH_SUPPORTED; i++) {
+ if (req[1] & 0x3) {
+ finger_touched++;
+ if (ts->cp_input_info[i].touch.button == 0) {
+ /* send the button touch event */
+ input_report_key(
+ ts->cp_input_info[i].input,
+ BTN_TOUCH, 1);
+ ts->cp_input_info[i].touch.button = 1;
+ }
+ process_touch(ts, i);
+ } else {
+ if (ts->cp_input_info[i].touch.button == 1) {
+ /* send the button release event */
+ input_report_key(
+ ts->cp_input_info[i].input,
+ BTN_TOUCH, 0);
+ input_sync(ts->cp_input_info[i].input);
+ ts->cp_input_info[i].touch.button = 0;
+ }
+ }
+ req[1] = req[1] >> 2;
+ }
+ msleep(DELAY_BTWIN_SAMPLE);
+ } while (finger_touched > 0);
+}
+
+static irqreturn_t cp_tm1217_sample_thread(int irq, void *handle)
+{
+ struct cp_tm1217_device *ts = (struct cp_tm1217_device *) handle;
+ u8 req[2];
+ int retval;
+
+ /* Chedk if another thread is already running */
+ mutex_lock(&ts->thread_mutex);
+ if (ts->thread_running == 1) {
+ mutex_unlock(&ts->thread_mutex);
+ return IRQ_HANDLED;
+ } else {
+ ts->thread_running = 1;
+ mutex_unlock(&ts->thread_mutex);
+ }
+
+ /* Mask the interrupts */
+ retval = cp_tm1217_mask_interrupt(ts);
+
+ /* Read the Interrupt Status register to find the cause of the
+ Interrupt */
+ req[0] = TMA1217_INT_STATUS;
+ retval = cp_tm1217_read(ts, req, 1);
+ if (retval != 1)
+ goto exit_thread;
+
+ if (!(req[1] & 0x8))
+ goto exit_thread;
+
+ cp_tm1217_get_data(ts);
+
+exit_thread:
+ /* Unmask the interrupts before going to sleep */
+ retval = cp_tm1217_unmask_interrupt(ts);
+
+ mutex_lock(&ts->thread_mutex);
+ ts->thread_running = 0;
+ mutex_unlock(&ts->thread_mutex);
+
+ return IRQ_HANDLED;
+}
+
+static int cp_tm1217_init_data(struct cp_tm1217_device *ts)
+{
+ int retval;
+ u8 req[2];
+
+ /* Read the vendor id/ fw revision etc. Ignoring return check as this
+ is non critical info */
+ req[0] = TMA1217_MANUFACTURER_ID;
+ retval = cp_tm1217_read(ts, req, 1);
+ ts->vinfo.vendor_id = req[1];
+
+ req[0] = TMA1217_PRODUCT_FAMILY;
+ retval = cp_tm1217_read(ts, req, 1);
+ ts->vinfo.product_family = req[1];
+
+ req[0] = TMA1217_FIRMWARE_REVISION;
+ retval = cp_tm1217_read(ts, req, 1);
+ ts->vinfo.firmware_rev = req[1];
+
+ req[0] = TMA1217_SERIAL_NO_HIGH;
+ retval = cp_tm1217_read(ts, req, 1);
+ ts->vinfo.serial_no = (req[1] << 8);
+
+ req[0] = TMA1217_SERIAL_NO_LOW;
+ retval = cp_tm1217_read(ts, req, 1);
+ ts->vinfo.serial_no = ts->vinfo.serial_no | req[1];
+
+ req[0] = TMA1217_MAX_X_HIGHER4;
+ retval = cp_tm1217_read(ts, req, 1);
+ ts->dinfo.maxX = (req[1] & 0xF) << 8;
+
+ req[0] = TMA1217_MAX_X_LOWER8;
+ retval = cp_tm1217_read(ts, req, 1);
+ ts->dinfo.maxX = ts->dinfo.maxX | req[1];
+
+ req[0] = TMA1217_MAX_Y_HIGHER4;
+ retval = cp_tm1217_read(ts, req, 1);
+ ts->dinfo.maxY = (req[1] & 0xF) << 8;
+
+ req[0] = TMA1217_MAX_Y_LOWER8;
+ retval = cp_tm1217_read(ts, req, 1);
+ ts->dinfo.maxY = ts->dinfo.maxY | req[1];
+
+ return 0;
+
+}
+
+/*
+ * Set up a GPIO for use as the interrupt. We can't simply do this at
+ * boot time because the GPIO drivers themselves may not be around at
+ * boot/firmware set up time to do the work. Instead defer it to driver
+ * detection.
+ */
+
+static int cp_tm1217_setup_gpio_irq(struct cp_tm1217_device *ts)
+{
+ int retval;
+
+ /* Hook up the irq handler */
+ retval = gpio_request(ts->gpio, "cp_tm1217_touch");
+ if (retval < 0) {
+ dev_err(ts->dev, "cp_tm1217: GPIO request failed error %d\n",
+ retval);
+ return retval;
+ }
+
+ retval = gpio_direction_input(ts->gpio);
+ if (retval < 0) {
+ dev_err(ts->dev,
+ "cp_tm1217: GPIO direction configuration failed, error %d\n",
+ retval);
+ gpio_free(ts->gpio);
+ return retval;
+ }
+
+ retval = gpio_to_irq(ts->gpio);
+ if (retval < 0) {
+ dev_err(ts->dev, "cp_tm1217: GPIO to IRQ failedi,"
+ " error %d\n", retval);
+ gpio_free(ts->gpio);
+ }
+ dev_dbg(ts->dev,
+ "cp_tm1217: Got IRQ number is %d for GPIO %d\n",
+ retval, ts->gpio);
+ return retval;
+}
+
+static int cp_tm1217_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct cp_tm1217_device *ts;
+ struct input_dev *input_dev;
+ struct input_dev_info *input_info;
+ struct cp_tm1217_platform_data *pdata;
+ u8 req[2];
+ int i, retval;
+
+ /* No pdata is fine - we then use "normal" IRQ mode */
+
+ pdata = client->dev.platform_data;
+
+ ts = kzalloc(sizeof(struct cp_tm1217_device), GFP_KERNEL);
+ if (!ts) {
+ dev_err(&client->dev,
+ "cp_tm1217: Private Device Struct alloc failed\n");
+ return -ENOMEM;
+ }
+
+ ts->client = client;
+ ts->dev = &client->dev;
+ i2c_set_clientdata(client, ts);
+
+ ts->thread_running = 0;
+ mutex_init(&ts->thread_mutex);
+
+ /* Reset the Controller */
+ req[0] = TMA1217_DEVICE_CMD_RESET;
+ req[1] = 0x1;
+ retval = cp_tm1217_write(ts, req, 1);
+ if (retval != 1) {
+ dev_err(ts->dev, "cp_tm1217: Controller reset failed\n");
+ kfree(ts);
+ return -EIO;
+ }
+
+ /* Clear up the interrupt status from reset. */
+ req[0] = TMA1217_INT_STATUS;
+ retval = cp_tm1217_read(ts, req, 1);
+
+ /* Mask all the interrupts */
+ retval = cp_tm1217_mask_interrupt(ts);
+
+ /* Read the controller information */
+ cp_tm1217_init_data(ts);
+
+ /* The following code will register multiple event devices when
+ multi-pointer is enabled, the code has not been tested
+ with MPX */
+ for (i = 0; i < TOUCH_SUPPORTED; i++) {
+ input_dev = input_allocate_device();
+ if (input_dev == NULL) {
+ dev_err(ts->dev,
+ "cp_tm1217:Input Device Struct alloc failed\n");
+ kfree(ts);
+ return -ENOMEM;
+ }
+ input_info = &ts->cp_input_info[i];
+ snprintf(input_info->name, sizeof(input_info->name),
+ "cp_tm1217_touchscreen_%d", i);
+ input_dev->name = input_info->name;
+ snprintf(input_info->phys, sizeof(input_info->phys),
+ "%s/input%d", dev_name(&client->dev), i);
+
+ input_dev->phys = input_info->phys;
+ input_dev->id.bustype = BUS_I2C;
+
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ input_set_abs_params(input_dev, ABS_X, 0, ts->dinfo.maxX, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, ts->dinfo.maxY, 0, 0);
+
+ retval = input_register_device(input_dev);
+ if (retval) {
+ dev_err(ts->dev,
+ "Input dev registration failed for %s\n",
+ input_dev->name);
+ goto fail;
+ }
+ input_info->input = input_dev;
+ }
+
+ /* Setup the reporting mode to send an interrupt only when
+ finger arrives or departs. */
+ req[0] = TMA1217_REPORT_MODE;
+ req[1] = 0x02;
+ retval = cp_tm1217_write(ts, req, 1);
+
+ /* Setup the device to no sleep mode for now and make it configured */
+ req[0] = TMA1217_DEVICE_CTRL;
+ req[1] = 0x84;
+ retval = cp_tm1217_write(ts, req, 1);
+
+ /* Check for the status of the device */
+ req[0] = TMA1217_DEV_STATUS;
+ retval = cp_tm1217_read(ts, req, 1);
+ if (req[1] != 0) {
+ dev_err(ts->dev,
+ "cp_tm1217: Device Status 0x%x != 0: config failed\n",
+ req[1]);
+
+ retval = -EIO;
+ goto fail;
+ }
+
+ if (pdata && pdata->gpio) {
+ ts->gpio = pdata->gpio;
+ retval = cp_tm1217_setup_gpio_irq(ts);
+ } else
+ retval = client->irq;
+
+ if (retval < 0) {
+ dev_err(ts->dev, "cp_tm1217: GPIO request failed error %d\n",
+ retval);
+ goto fail;
+ }
+
+ client->irq = retval;
+
+
+ retval = request_threaded_irq(client->irq,
+ NULL, cp_tm1217_sample_thread,
+ IRQF_TRIGGER_FALLING, "cp_tm1217_touch", ts);
+ if (retval < 0) {
+ dev_err(ts->dev, "cp_tm1217: Request IRQ error %d\n", retval);
+ goto fail_gpio;
+ }
+
+ /* Unmask the interrupts */
+ retval = cp_tm1217_unmask_interrupt(ts);
+ if (retval == 0)
+ return 0;
+
+ free_irq(client->irq, ts);
+fail_gpio:
+ if (ts->gpio)
+ gpio_free(ts->gpio);
+fail:
+ /* Clean up before returning failure */
+ for (i = 0; i < TOUCH_SUPPORTED; i++) {
+ if (ts->cp_input_info[i].input) {
+ input_unregister_device(ts->cp_input_info[i].input);
+ input_free_device(ts->cp_input_info[i].input);
+ }
+ }
+ kfree(ts);
+ return retval;
+
+}
+
+/*
+ * cp_tm1217 suspend
+ *
+ */
+static int cp_tm1217_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct cp_tm1217_device *ts = i2c_get_clientdata(client);
+ u8 req[2];
+ int retval;
+
+ /* Put the controller to sleep */
+ req[0] = TMA1217_DEVICE_CTRL;
+ retval = cp_tm1217_read(ts, req, 1);
+ req[1] = (req[1] & 0xF8) | 0x1;
+ retval = cp_tm1217_write(ts, req, 1);
+
+ if (device_may_wakeup(&client->dev))
+ enable_irq_wake(client->irq);
+
+ return 0;
+}
+
+/*
+ * cp_tm1217_resume
+ *
+ */
+static int cp_tm1217_resume(struct i2c_client *client)
+{
+ struct cp_tm1217_device *ts = i2c_get_clientdata(client);
+ u8 req[2];
+ int retval;
+
+ /* Take the controller out of sleep */
+ req[0] = TMA1217_DEVICE_CTRL;
+ retval = cp_tm1217_read(ts, req, 1);
+ req[1] = (req[1] & 0xF8) | 0x4;
+ retval = cp_tm1217_write(ts, req, 1);
+
+ /* Restore the register settings sinc the power to the
+ could have been cut off */
+
+ /* Setup the reporting mode to send an interrupt only when
+ finger arrives or departs. */
+ req[0] = TMA1217_REPORT_MODE;
+ req[1] = 0x02;
+ retval = cp_tm1217_write(ts, req, 1);
+
+ /* Setup the device to no sleep mode for now and make it configured */
+ req[0] = TMA1217_DEVICE_CTRL;
+ req[1] = 0x84;
+ retval = cp_tm1217_write(ts, req, 1);
+
+ /* Setup the interrupt mask */
+ retval = cp_tm1217_unmask_interrupt(ts);
+
+ if (device_may_wakeup(&client->dev))
+ disable_irq_wake(client->irq);
+
+ return 0;
+}
+
+/*
+ * cp_tm1217_remove
+ *
+ */
+static int cp_tm1217_remove(struct i2c_client *client)
+{
+ struct cp_tm1217_device *ts = i2c_get_clientdata(client);
+ int i;
+
+ free_irq(client->irq, ts);
+ if (ts->gpio)
+ gpio_free(ts->gpio);
+ for (i = 0; i < TOUCH_SUPPORTED; i++)
+ input_unregister_device(ts->cp_input_info[i].input);
+ kfree(ts);
+ return 0;
+}
+
+static struct i2c_device_id cp_tm1217_idtable[] = {
+ { CPTM1217_DEVICE_NAME, 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, cp_tm1217_idtable);
+
+static struct i2c_driver cp_tm1217_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = CPTM1217_DRIVER_NAME,
+ },
+ .id_table = cp_tm1217_idtable,
+ .probe = cp_tm1217_probe,
+ .remove = cp_tm1217_remove,
+ .suspend = cp_tm1217_suspend,
+ .resume = cp_tm1217_resume,
+};
+
+static int __init clearpad_tm1217_init(void)
+{
+ return i2c_add_driver(&cp_tm1217_driver);
+}
+
+static void __exit clearpad_tm1217_exit(void)
+{
+ i2c_del_driver(&cp_tm1217_driver);
+}
+
+module_init(clearpad_tm1217_init);
+module_exit(clearpad_tm1217_exit);
+
+MODULE_AUTHOR("Ramesh Agarwal <ramesh.agarwal@intel.com>");
+MODULE_DESCRIPTION("Synaptics TM1217 TouchScreen Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/cptm1217/cp_tm1217.h b/drivers/staging/cptm1217/cp_tm1217.h
new file mode 100644
index 000000000000..a0ce31db53f8
--- /dev/null
+++ b/drivers/staging/cptm1217/cp_tm1217.h
@@ -0,0 +1,9 @@
+#ifndef __LINUX_I2C_CP_TM1217_H
+#define __LINUX_I2C_CP_TM1217_H
+
+struct cp_tm1217_platform_data
+{
+ int gpio; /* If not set uses the IRQ resource 0 */
+};
+
+#endif
diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c
index f63185790c48..153ddbf4247d 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.c
+++ b/drivers/staging/crystalhd/crystalhd_hw.c
@@ -1711,7 +1711,7 @@ enum BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, ui
}
BCMLOG(BCMLOG_INFO, "Firmware Downloaded Successfully\n");
- return BC_STS_SUCCESS;;
+ return BC_STS_SUCCESS;
}
enum BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw,
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c
index 28c6b8ced424..719e70bc871e 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.c
+++ b/drivers/staging/crystalhd/crystalhd_lnx.c
@@ -516,7 +516,7 @@ static void __devexit chd_dec_pci_remove(struct pci_dev *pdev)
BCMLOG_ENTER;
- pinfo = (struct crystalhd_adp *) pci_get_drvdata(pdev);
+ pinfo = pci_get_drvdata(pdev);
if (!pinfo) {
BCMLOG_ERR("could not get adp\n");
return;
@@ -626,7 +626,7 @@ int chd_dec_pci_suspend(struct pci_dev *pdev, pm_message_t state)
struct crystalhd_ioctl_data *temp;
enum BC_STATUS sts = BC_STS_SUCCESS;
- adp = (struct crystalhd_adp *)pci_get_drvdata(pdev);
+ adp = pci_get_drvdata(pdev);
if (!adp) {
BCMLOG_ERR("could not get adp\n");
return -ENODEV;
@@ -660,7 +660,7 @@ int chd_dec_pci_resume(struct pci_dev *pdev)
enum BC_STATUS sts = BC_STS_SUCCESS;
int rc;
- adp = (struct crystalhd_adp *)pci_get_drvdata(pdev);
+ adp = pci_get_drvdata(pdev);
if (!adp) {
BCMLOG_ERR("could not get adp\n");
return -ENODEV;
diff --git a/drivers/staging/cs5535_gpio/Kconfig b/drivers/staging/cs5535_gpio/Kconfig
new file mode 100644
index 000000000000..a1b3a8d2b866
--- /dev/null
+++ b/drivers/staging/cs5535_gpio/Kconfig
@@ -0,0 +1,11 @@
+config CS5535_GPIO
+ tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)"
+ depends on X86_32
+ help
+ Note: this driver is DEPRECATED. Please use the cs5535-gpio module
+ in the GPIO section instead (CONFIG_GPIO_CS5535).
+
+ Give userspace access to the GPIO pins on the AMD CS5535 and
+ CS5536 Geode companion devices.
+
+ If compiled as a module, it will be called cs5535_gpio.
diff --git a/drivers/staging/cs5535_gpio/Makefile b/drivers/staging/cs5535_gpio/Makefile
new file mode 100644
index 000000000000..d67c4b85f191
--- /dev/null
+++ b/drivers/staging/cs5535_gpio/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o
diff --git a/drivers/staging/cs5535_gpio/TODO b/drivers/staging/cs5535_gpio/TODO
new file mode 100644
index 000000000000..98d1cd1e2363
--- /dev/null
+++ b/drivers/staging/cs5535_gpio/TODO
@@ -0,0 +1,6 @@
+This is an obsolete driver for some the CS5535 and CS5536 southbridge GPIOs.
+It has been replaced by a driver that makes use of the Linux GPIO subsystem.
+Please switch to that driver, and let dilinger@queued.net know if there's
+anything missing from the new driver.
+
+This driver is scheduled for removal in 2.6.40.
diff --git a/drivers/char/cs5535_gpio.c b/drivers/staging/cs5535_gpio/cs5535_gpio.c
index 0cf1e5fad9ab..0cf1e5fad9ab 100644
--- a/drivers/char/cs5535_gpio.c
+++ b/drivers/staging/cs5535_gpio/cs5535_gpio.c
diff --git a/drivers/staging/cx25821/cx25821-alsa.c b/drivers/staging/cx25821/cx25821-alsa.c
index 9a205a342c55..160f6693aa33 100644
--- a/drivers/staging/cx25821/cx25821-alsa.c
+++ b/drivers/staging/cx25821/cx25821-alsa.c
@@ -630,7 +630,7 @@ static int snd_cx25821_pcm(struct cx25821_audio_dev *chip, int device,
* Only boards with eeprom and byte 1 at eeprom=1 have it
*/
-static const struct pci_device_id cx25821_audio_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(cx25821_audio_pci_tbl) = {
{0x14f1, 0x0920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
diff --git a/drivers/staging/cxt1e1/comet.c b/drivers/staging/cxt1e1/comet.c
index dcbe6b628455..52224cdc967d 100644
--- a/drivers/staging/cxt1e1/comet.c
+++ b/drivers/staging/cxt1e1/comet.c
@@ -29,7 +29,7 @@
#endif
-extern int log_level;
+extern int cxt1e1_log_level;
#define COMET_NUM_SAMPLES 24 /* Number of entries in the waveform table */
#define COMET_NUM_UNITS 5 /* Number of points per entry in table */
@@ -292,12 +292,12 @@ init_comet (void *ci, comet_t * comet, u_int32_t port_mode, int clockmaster,
* i.e.FPMODE=0 (@0x20) */
if ((moreParams & CFG_CLK_PORT_MASK) == CFG_CLK_PORT_INTERNAL)
{
- if (log_level >= LOG_SBEBUG12)
+ if (cxt1e1_log_level >= LOG_SBEBUG12)
pr_info(">> %s: clockmaster internal clock\n", __func__);
pci_write_32 ((u_int32_t *) &comet->tx_time, 0x0d); /* internal oscillator */
} else /* external clock source */
{
- if (log_level >= LOG_SBEBUG12)
+ if (cxt1e1_log_level >= LOG_SBEBUG12)
pr_info(">> %s: clockmaster external clock\n", __func__);
pci_write_32 ((u_int32_t *) &comet->tx_time, 0x09); /* loop timing
* (external) */
@@ -312,7 +312,7 @@ init_comet (void *ci, comet_t * comet, u_int32_t port_mode, int clockmaster,
pci_write_32 ((u_int32_t *) &comet->brif_cfg, 0x21); /* Slave Mode (CMODE=1) */
pci_write_32 ((u_int32_t *) &comet->brif_fpcfg, 0x20); /* Slave Mode i.e.
* FPMODE=1 (@0x20) */
- if (log_level >= LOG_SBEBUG12)
+ if (cxt1e1_log_level >= LOG_SBEBUG12)
pr_info(">> %s: clockslave internal clock\n", __func__);
pci_write_32 ((u_int32_t *) &comet->tx_time, 0x0d); /* oscillator timing */
}
diff --git a/drivers/staging/cxt1e1/functions.c b/drivers/staging/cxt1e1/functions.c
index ab399c2f7488..d9a9aa3571d9 100644
--- a/drivers/staging/cxt1e1/functions.c
+++ b/drivers/staging/cxt1e1/functions.c
@@ -54,7 +54,7 @@ static int dummy = 0;
#endif
-extern int log_level;
+extern int cxt1e1_log_level;
extern int drvr_state;
@@ -67,7 +67,7 @@ pci_read_32 (u_int32_t *p)
FLUSH_PCI_READ ();
v = le32_to_cpu (*p);
- if (log_level >= LOG_DEBUG)
+ if (cxt1e1_log_level >= LOG_DEBUG)
pr_info("pci_read : %x = %x\n", (u_int32_t) p, v);
return v;
#else
@@ -80,7 +80,7 @@ void
pci_write_32 (u_int32_t *p, u_int32_t v)
{
#ifdef FLOW_DEBUG
- if (log_level >= LOG_DEBUG)
+ if (cxt1e1_log_level >= LOG_DEBUG)
pr_info("pci_write: %x = %x\n", (u_int32_t) p, v);
#endif
*p = cpu_to_le32 (v);
@@ -118,7 +118,7 @@ watchdog_func (unsigned long arg)
if (drvr_state != SBE_DRVR_AVAILABLE)
{
- if (log_level >= LOG_MONITOR)
+ if (cxt1e1_log_level >= LOG_MONITOR)
pr_warning("%s: drvr not available (%x)\n", __func__, drvr_state);
return;
}
diff --git a/drivers/staging/cxt1e1/hwprobe.c b/drivers/staging/cxt1e1/hwprobe.c
index 89200e7af26c..c517cc22f391 100644
--- a/drivers/staging/cxt1e1/hwprobe.c
+++ b/drivers/staging/cxt1e1/hwprobe.c
@@ -37,7 +37,7 @@
#define STATIC static
#endif
-extern int log_level;
+extern int cxt1e1_log_level;
extern int error_flag;
extern int drvr_state;
@@ -143,7 +143,7 @@ hdw_sn_get (hdw_info_t * hi, int brdno)
if ((hi->promfmt = pmc_verify_cksum (&hi->mfg_info.data)) == PROM_FORMAT_Unk)
{
/* bad crc, data is suspect */
- if (log_level >= LOG_WARN)
+ if (cxt1e1_log_level >= LOG_WARN)
pr_info("%s: EEPROM cksum error\n", hi->devname);
hi->mfg_info_sts = EEPROM_CRCERR;
} else
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index c7930287e3db..0f78f8962751 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -108,9 +108,9 @@ extern int unregister_hdlc_device_v7 (hdlc_device *);
#endif
int error_flag; /* module load error reporting */
-int log_level = LOG_ERROR;
+int cxt1e1_log_level = LOG_ERROR;
int log_level_default = LOG_ERROR;
-module_param(log_level, int, 0444);
+module_param(cxt1e1_log_level, int, 0444);
int cxt1e1_max_mru = MUSYCC_MRU;
int max_mru_default = MUSYCC_MRU;
@@ -497,7 +497,7 @@ create_chan (struct net_device * ndev, ci_t * ci,
rtnl_lock (); /* needed due to Ioctl calling sequence */
if (ret)
{
- if (log_level >= LOG_WARN)
+ if (cxt1e1_log_level >= LOG_WARN)
pr_info("%s: create_chan[%d] registration error = %d.\n",
ci->devname, cp->channum, ret);
free_netdev (dev); /* cleanup */
@@ -722,11 +722,11 @@ do_get_chan_stats (struct net_device * ndev, void *data)
STATIC status_t
do_set_loglevel (struct net_device * ndev, void *data)
{
- unsigned int log_level;
+ unsigned int cxt1e1_log_level;
- if (copy_from_user (&log_level, data, sizeof (int)))
+ if (copy_from_user (&cxt1e1_log_level, data, sizeof (int)))
return -EFAULT;
- sbecom_set_loglevel (log_level);
+ sbecom_set_loglevel (cxt1e1_log_level);
return 0;
}
@@ -1115,9 +1115,9 @@ c4_mod_init (void)
return -rtn; /* installation failure - see system log */
/* housekeeping notifications */
- if (log_level != log_level_default)
- pr_info("NOTE: driver parameter <log_level> changed from default %d to %d.\n",
- log_level_default, log_level);
+ if (cxt1e1_log_level != log_level_default)
+ pr_info("NOTE: driver parameter <cxt1e1_log_level> changed from default %d to %d.\n",
+ log_level_default, cxt1e1_log_level);
if (cxt1e1_max_mru != max_mru_default)
pr_info("NOTE: driver parameter <cxt1e1_max_mru> changed from default %d to %d.\n",
max_mru_default, cxt1e1_max_mru);
diff --git a/drivers/staging/cxt1e1/musycc.c b/drivers/staging/cxt1e1/musycc.c
index fc15610f6974..f274c77fb3fc 100644
--- a/drivers/staging/cxt1e1/musycc.c
+++ b/drivers/staging/cxt1e1/musycc.c
@@ -97,7 +97,7 @@ char SBEid_pmcc4_musyccc[] =
/* global driver variables */
extern ci_t *c4_list;
extern int drvr_state;
-extern int log_level;
+extern int cxt1e1_log_level;
extern int cxt1e1_max_mru;
extern int cxt1e1_max_mtu;
@@ -627,7 +627,7 @@ rewrite:
if ((r != req) && (req != SR_CHIP_RESET) && (++rcnt <= MUSYCC_SR_RETRY_CNT))
{
- if (log_level >= LOG_MONITOR)
+ if (cxt1e1_log_level >= LOG_MONITOR)
pr_info("%s: %d - reissue srv req/last %x/%x (hdw reads %x), Chan %d.\n",
pi->up->devname, rcnt, req, pi->sr_last, r,
(pi->portnum * MUSYCC_NCHANS) + (req & 0x1f));
@@ -951,7 +951,7 @@ musycc_bh_tx_eom (mpi_t * pi, int gchan)
ch = pi->chan[gchan];
if (ch == 0 || ch->state != UP)
{
- if (log_level >= LOG_ERROR)
+ if (cxt1e1_log_level >= LOG_ERROR)
pr_info("%s: intr: xmit EOM on uninitialized channel %d\n",
pi->up->devname, gchan);
}
@@ -1002,7 +1002,7 @@ musycc_bh_tx_eom (mpi_t * pi, int gchan)
}
if (status & MUSYCC_TX_OWNED)
{
- if (log_level >= LOG_MONITOR)
+ if (cxt1e1_log_level >= LOG_MONITOR)
{
pr_info("%s: Port %d Chan %2d - unexpected TX msg ownership intr (md %p sts %x)\n",
pi->up->devname, pi->portnum, ch->channum,
@@ -1016,7 +1016,7 @@ musycc_bh_tx_eom (mpi_t * pi, int gchan)
break; /* Not our mdesc, done */
} else
{
- if (log_level >= LOG_MONITOR)
+ if (cxt1e1_log_level >= LOG_MONITOR)
pr_info("%s: Port %d Chan %2d - recovered TX msg ownership [%d] (md %p sts %x)\n",
pi->up->devname, pi->portnum, ch->channum, readCount, md, status);
}
@@ -1054,7 +1054,7 @@ musycc_bh_tx_eom (mpi_t * pi, int gchan)
}
md->status = 0;
#ifdef RLD_TXFULL_DEBUG
- if (log_level >= LOG_MONITOR2)
+ if (cxt1e1_log_level >= LOG_MONITOR2)
pr_info("~~ tx_eom: tx_full %x txd_free %d -> %d\n",
ch->tx_full, ch->txd_free, ch->txd_free + 1);
#endif
@@ -1063,7 +1063,7 @@ musycc_bh_tx_eom (mpi_t * pi, int gchan)
if ((ch->p.chan_mode != CFG_CH_PROTO_TRANS) && (status & EOBIRQ_ENABLE))
{
- if (log_level >= LOG_MONITOR)
+ if (cxt1e1_log_level >= LOG_MONITOR)
pr_info("%s: Mode (%x) incorrect EOB status (%x)\n",
pi->up->devname, ch->p.chan_mode, status);
if ((status & EOMIRQ_ENABLE) == 0)
@@ -1094,7 +1094,7 @@ musycc_bh_tx_eom (mpi_t * pi, int gchan)
{
#ifdef RLD_TXFULL_DEBUG
- if (log_level >= LOG_MONITOR2)
+ if (cxt1e1_log_level >= LOG_MONITOR2)
pr_info("tx_eom[%d]: enable xmit tx_full no more, txd_free %d txd_num/2 %d\n",
ch->channum,
ch->txd_free, ch->txd_num / 2);
@@ -1108,7 +1108,7 @@ musycc_bh_tx_eom (mpi_t * pi, int gchan)
#ifdef RLD_TXFULL_DEBUG
else if (ch->tx_full)
{
- if (log_level >= LOG_MONITOR2)
+ if (cxt1e1_log_level >= LOG_MONITOR2)
pr_info("tx_eom[%d]: bypass TX enable though room available? (txd_free %d txd_num/2 %d)\n",
ch->channum,
ch->txd_free, ch->txd_num / 2);
@@ -1138,7 +1138,7 @@ musycc_bh_rx_eom (mpi_t * pi, int gchan)
ch = pi->chan[gchan];
if (ch == 0 || ch->state != UP)
{
- if (log_level > LOG_ERROR)
+ if (cxt1e1_log_level > LOG_ERROR)
pr_info("%s: intr: receive EOM on uninitialized channel %d\n",
pi->up->devname, gchan);
return;
@@ -1269,7 +1269,7 @@ musycc_intr_th_handler (void *devp)
if (nextInt != INTRPTS_NEXTINT (ci->intlog.this_status_new))
{
- if (log_level >= LOG_MONITOR)
+ if (cxt1e1_log_level >= LOG_MONITOR)
{
pr_info("%s: note - updated ISD from %08x to %08x\n",
ci->devname, status,
@@ -1337,11 +1337,11 @@ musycc_intr_th_handler (void *devp)
ci->intlog.last_status_new = ci->intlog.this_status_new;
ci->intlog.this_status_new = currInt;
- if ((log_level >= LOG_WARN) && (status & INTRPTS_INTFULL_M))
+ if ((cxt1e1_log_level >= LOG_WARN) && (status & INTRPTS_INTFULL_M))
{
pr_info("%s: Interrupt queue full condition occurred\n", ci->devname);
}
- if (log_level >= LOG_DEBUG)
+ if (cxt1e1_log_level >= LOG_DEBUG)
pr_info("%s: interrupts pending, isd @ 0x%p: %x curr %d cnt %d NEXT %d\n",
ci->devname, &ci->reg->isd,
status, nextInt, intCnt, (intCnt + nextInt) & (INT_QUEUE_SIZE - 1));
@@ -1448,7 +1448,7 @@ musycc_intr_bh_tasklet (ci_t * ci)
if ((currInt == badInt) || (currInt == badInt2)) /* catch failure of Bug
* Fix checking */
{
- if (log_level >= LOG_WARN)
+ if (cxt1e1_log_level >= LOG_WARN)
pr_info("%s: Illegal Interrupt Detected @ 0x%p, mod %d.)\n",
ci->devname, &ci->iqd_p[headx], headx);
@@ -1483,7 +1483,7 @@ musycc_intr_bh_tasklet (ci_t * ci)
ci->iqd_p[headx] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY);
FLUSH_MEM_WRITE ();
- if (log_level >= LOG_DEBUG)
+ if (cxt1e1_log_level >= LOG_DEBUG)
{
if (err != 0)
pr_info(" %08x -> err: %2d,", currInt, err);
@@ -1497,7 +1497,7 @@ musycc_intr_bh_tasklet (ci_t * ci)
switch (event)
{
case EVE_SACK: /* Service Request Acknowledge */
- if (log_level >= LOG_DEBUG)
+ if (cxt1e1_log_level >= LOG_DEBUG)
{
volatile u_int32_t r;
@@ -1534,7 +1534,7 @@ musycc_intr_bh_tasklet (ci_t * ci)
}
break;
default:
- if (log_level >= LOG_WARN)
+ if (cxt1e1_log_level >= LOG_WARN)
pr_info("%s: unexpected interrupt event: %d, iqd[%d]: %08x, port: %d\n", ci->devname,
event, headx, currInt, group);
break;
@@ -1573,9 +1573,9 @@ musycc_intr_bh_tasklet (ci_t * ci)
{
#ifdef RLD_TRANS_DEBUG
- if (1 || log_level >= LOG_MONITOR)
+ if (1 || cxt1e1_log_level >= LOG_MONITOR)
#else
- if (log_level >= LOG_MONITOR)
+ if (cxt1e1_log_level >= LOG_MONITOR)
#endif
{
pr_info("%s: TX buffer underflow [ONR] on channel %d, mode %x QStopped %x free %d\n",
@@ -1605,7 +1605,7 @@ musycc_intr_bh_tasklet (ci_t * ci)
ch->s.rx_over_errors++;
ch->ch_start_rx = CH_START_RX_ONR;
- if (log_level >= LOG_WARN)
+ if (cxt1e1_log_level >= LOG_WARN)
{
pr_info("%s: RX buffer overflow [ONR] on channel %d, mode %x\n",
ci->devname, ch->channum, ch->p.chan_mode);
@@ -1623,7 +1623,7 @@ musycc_intr_bh_tasklet (ci_t * ci)
* Per MUSYCC manual, Section 6.4.8.3 [Transmit Errors],
* this BUFF error requires Transmit channel reactivation.
*/
- if (log_level >= LOG_MONITOR)
+ if (cxt1e1_log_level >= LOG_MONITOR)
pr_info("%s: TX buffer underrun [BUFF] on channel %d, mode %x\n",
ci->devname, ch->channum, ch->p.chan_mode);
} else /* RX buffer overrun */
@@ -1636,7 +1636,7 @@ musycc_intr_bh_tasklet (ci_t * ci)
* space for this channel. Receive channel reactivation is
* not required, but data has been lost.
*/
- if (log_level >= LOG_WARN)
+ if (cxt1e1_log_level >= LOG_WARN)
pr_info("%s: RX buffer overrun [BUFF] on channel %d, mode %x\n",
ci->devname, ch->channum, ch->p.chan_mode);
/*
@@ -1658,7 +1658,7 @@ musycc_intr_bh_tasklet (ci_t * ci)
} /* switch on err */
/* Check for interrupt lost condition */
- if ((currInt & INTRPT_ILOST_M) && (log_level >= LOG_ERROR))
+ if ((currInt & INTRPT_ILOST_M) && (cxt1e1_log_level >= LOG_ERROR))
{
pr_info("%s: Interrupt queue overflow - ILOST asserted\n",
ci->devname);
@@ -1667,7 +1667,7 @@ musycc_intr_bh_tasklet (ci_t * ci)
FLUSH_MEM_WRITE ();
FLUSH_MEM_READ ();
} /* while */
- if ((log_level >= LOG_MONITOR2) && (ci->iqp_headx != ci->iqp_tailx))
+ if ((cxt1e1_log_level >= LOG_MONITOR2) && (ci->iqp_headx != ci->iqp_tailx))
{
int bh;
@@ -1821,9 +1821,9 @@ musycc_start_xmit (ci_t * ci, int channum, void *mem_token)
return EROFS; /* how else to flag unwritable state ? */
#ifdef RLD_TRANS_DEBUGx
- if (1 || log_level >= LOG_MONITOR2)
+ if (1 || cxt1e1_log_level >= LOG_MONITOR2)
#else
- if (log_level >= LOG_MONITOR2)
+ if (cxt1e1_log_level >= LOG_MONITOR2)
#endif
{
pr_info("++ start_xmt[%d]: state %x start %x full %d free %d required %d stopped %x\n",
@@ -1846,7 +1846,7 @@ musycc_start_xmit (ci_t * ci, int channum, void *mem_token)
if (txd_need_cnt == 0)
{
- if (log_level >= LOG_MONITOR2)
+ if (cxt1e1_log_level >= LOG_MONITOR2)
pr_info("%s channel %d: no TX data in User buffer\n", ci->devname, channum);
OS_mem_token_free (mem_token);
return 0; /* no data to send */
@@ -1857,7 +1857,7 @@ musycc_start_xmit (ci_t * ci, int channum, void *mem_token)
if (txd_need_cnt > ch->txd_num) /* never enough descriptors for this
* large a buffer */
{
- if (log_level >= LOG_DEBUG)
+ if (cxt1e1_log_level >= LOG_DEBUG)
{
pr_info("start_xmit: discarding buffer, insufficient descriptor cnt %d, need %d.\n",
ch->txd_num, txd_need_cnt + 1);
@@ -1874,7 +1874,7 @@ musycc_start_xmit (ci_t * ci, int channum, void *mem_token)
/************************************************************/
if (txd_need_cnt > ch->txd_free)
{
- if (log_level >= LOG_MONITOR2)
+ if (cxt1e1_log_level >= LOG_MONITOR2)
{
pr_info("start_xmit[%d]: EBUSY - need more descriptors, have %d of %d need %d\n",
channum, ch->txd_free, ch->txd_num, txd_need_cnt);
diff --git a/drivers/staging/cxt1e1/pmcc4_drv.c b/drivers/staging/cxt1e1/pmcc4_drv.c
index 5c8a3eb0cfc3..341e7a92f099 100644
--- a/drivers/staging/cxt1e1/pmcc4_drv.c
+++ b/drivers/staging/cxt1e1/pmcc4_drv.c
@@ -135,7 +135,7 @@ void musycc_serv_req (mpi_t *, u_int32_t);
void musycc_update_timeslots (mpi_t *);
extern void musycc_update_tx_thp (mch_t *);
-extern int log_level;
+extern int cxt1e1_log_level;
extern int cxt1e1_max_mru;
extern int cxt1e1_max_mtu;
extern int max_rxdesc_used, max_rxdesc_default;
@@ -168,12 +168,12 @@ sbecom_set_loglevel (int d)
* for card 0 only */
} else
{
- if (log_level != d)
+ if (cxt1e1_log_level != d)
{
- pr_info("log level changed from %d to %d\n", log_level, d);
- log_level = d; /* set new */
+ pr_info("log level changed from %d to %d\n", cxt1e1_log_level, d);
+ cxt1e1_log_level = d; /* set new */
} else
- pr_info("log level is %d\n", log_level);
+ pr_info("log level is %d\n", cxt1e1_log_level);
}
}
@@ -513,7 +513,7 @@ checkPorts (ci_t * ci)
if ((value == 0x1c) || (value == 0x19) || (value == 0x12))
c4_loop_port (ci, portnum, COMET_MDIAG_LBOFF); /* take port out of any
* loopbk mode */
- if (log_level >= LOG_DEBUG)
+ if (cxt1e1_log_level >= LOG_DEBUG)
if (value != 0x3f)
pr_warning("%s: BOC value = %x on Port %d\n",
ci->devname, value, portnum);
@@ -533,7 +533,7 @@ c4_watchdog (ci_t * ci)
{
if (drvr_state != SBE_DRVR_AVAILABLE)
{
- if (log_level >= LOG_MONITOR)
+ if (cxt1e1_log_level >= LOG_MONITOR)
pr_info("drvr not available (%x)\n", drvr_state);
return;
}
@@ -794,19 +794,19 @@ c4_loop_port (ci_t * ci, int portnum, u_int8_t cmd)
}
pci_write_32 ((u_int32_t *) &comet->mdiag, cmd);
- if (log_level >= LOG_WARN)
+ if (cxt1e1_log_level >= LOG_WARN)
pr_info("%s: loopback mode changed to %2x from %2x on Port %d\n",
ci->devname, cmd, loopValue, portnum);
loopValue = pci_read_32 ((u_int32_t *) &comet->mdiag) & COMET_MDIAG_LBMASK;
if (loopValue != cmd)
{
- if (log_level >= LOG_ERROR)
+ if (cxt1e1_log_level >= LOG_ERROR)
pr_info("%s: write to loop register failed, unknown state for Port %d\n",
ci->devname, portnum);
}
} else
{
- if (log_level >= LOG_WARN)
+ if (cxt1e1_log_level >= LOG_WARN)
pr_info("%s: loopback already in that mode (%2x)\n",
ci->devname, loopValue);
}
@@ -997,7 +997,7 @@ c4_set_port (ci_t * ci, int portnum)
pi = &ci->port[portnum];
pp = &ci->port[portnum].p;
e1mode = IS_FRAME_ANY_E1 (pp->port_mode);
- if (log_level >= LOG_MONITOR2)
+ if (cxt1e1_log_level >= LOG_MONITOR2)
{
pr_info("%s: c4_set_port[%d]: entered, e1mode = %x, openchans %d.\n",
ci->devname,
@@ -1278,12 +1278,12 @@ c4_fifo_alloc (mpi_t * pi, int chan, int *len)
}
if (max != *len)
{
- if (log_level >= LOG_WARN)
+ if (cxt1e1_log_level >= LOG_WARN)
pr_info("%s: wanted to allocate %d fifo space, but got only %d\n",
pi->up->devname, *len, max);
*len = max;
}
- if (log_level >= LOG_DEBUG)
+ if (cxt1e1_log_level >= LOG_DEBUG)
pr_info("%s: allocated %d fifo at %d for channel %d/%d\n",
pi->up->devname, max, start, chan, pi->p.portnum);
for (i = maxstart; i < (maxstart + max); i++)
@@ -1296,7 +1296,7 @@ c4_fifo_free (mpi_t * pi, int chan)
{
int i;
- if (log_level >= LOG_DEBUG)
+ if (cxt1e1_log_level >= LOG_DEBUG)
pr_info("%s: deallocated fifo for channel %d/%d\n",
pi->up->devname, chan, pi->p.portnum);
for (i = 0; i < 32; i++)
@@ -1321,7 +1321,7 @@ c4_chan_up (ci_t * ci, int channum)
return ENOENT;
if (ch->state == UP)
{
- if (log_level >= LOG_MONITOR)
+ if (cxt1e1_log_level >= LOG_MONITOR)
pr_info("%s: channel already UP, graceful early exit\n",
ci->devname);
return 0;
@@ -1334,7 +1334,7 @@ c4_chan_up (ci_t * ci, int channum)
{
if (ch->p.bitmask[i] & pi->tsm[i])
{
- if (1 || log_level >= LOG_WARN)
+ if (1 || cxt1e1_log_level >= LOG_WARN)
{
pr_info("%s: c4_chan_up[%d] EINVAL (attempt to cfg in-use or unavailable TimeSlot[%d])\n",
ci->devname, channum, i);
@@ -1351,7 +1351,7 @@ c4_chan_up (ci_t * ci, int channum)
nbuf = nts / 8 ? nts / 8 : 1;
if (!nbuf)
{
- /* if( log_level >= LOG_WARN) */
+ /* if( cxt1e1_log_level >= LOG_WARN) */
pr_info("%s: c4_chan_up[%d] ENOBUFS (no TimeSlots assigned)\n",
ci->devname, channum);
return ENOBUFS; /* this should not happen */
@@ -1420,7 +1420,7 @@ c4_chan_up (ci_t * ci, int channum)
#if 0
/* DEBUG INFO */
- if (log_level >= LOG_MONITOR)
+ if (cxt1e1_log_level >= LOG_MONITOR)
pr_info("%s: mode %x rxnum %d (rxused %d def %d) txnum %d (txused %d def %d)\n",
ci->devname, ch->p.chan_mode,
rxnum, max_rxdesc_used, max_rxdesc_default,
@@ -1451,7 +1451,7 @@ c4_chan_up (ci_t * ci, int channum)
if (!(m = OS_mem_token_alloc (cxt1e1_max_mru)))
{
- if (log_level >= LOG_MONITOR)
+ if (cxt1e1_log_level >= LOG_MONITOR)
pr_info("%s: c4_chan_up[%d] - token alloc failure, size = %d.\n",
ci->devname, channum, cxt1e1_max_mru);
goto errfree;
diff --git a/drivers/staging/cxt1e1/sbecom_inline_linux.h b/drivers/staging/cxt1e1/sbecom_inline_linux.h
index 5a72cb5cff42..501a331ded5b 100644
--- a/drivers/staging/cxt1e1/sbecom_inline_linux.h
+++ b/drivers/staging/cxt1e1/sbecom_inline_linux.h
@@ -86,7 +86,7 @@ pci_read_32 (u_int32_t *p)
FLUSH_PCI_READ ();
v = le32_to_cpu (*p);
- if (log_level >= LOG_DEBUG)
+ if (cxt1e1_log_level >= LOG_DEBUG)
pr_info("pci_read : %x = %x\n", (u_int32_t) p, v);
return v;
#else
@@ -99,7 +99,7 @@ static inline void
pci_write_32 (u_int32_t *p, u_int32_t v)
{
#ifdef FLOW_DEBUG
- if (log_level >= LOG_DEBUG)
+ if (cxt1e1_log_level >= LOG_DEBUG)
pr_info("pci_write: %x = %x\n", (u_int32_t) p, v);
#endif
*p = cpu_to_le32 (v);
diff --git a/drivers/staging/easycap/Kconfig b/drivers/staging/easycap/Kconfig
index 9d5fe4ddc30a..bd96f39f2735 100644
--- a/drivers/staging/easycap/Kconfig
+++ b/drivers/staging/easycap/Kconfig
@@ -1,7 +1,6 @@
config EASYCAP
tristate "EasyCAP USB ID 05e1:0408 support"
depends on USB && VIDEO_DEV
- depends on BKL # please fix
---help---
This is an integrated audio/video driver for EasyCAP cards with
diff --git a/drivers/staging/easycap/Makefile b/drivers/staging/easycap/Makefile
index 8a3d911aee5d..f1f2fbebf8f6 100644
--- a/drivers/staging/easycap/Makefile
+++ b/drivers/staging/easycap/Makefile
@@ -10,4 +10,5 @@ ccflags-y := -Wall
ccflags-y += -DEASYCAP_IS_VIDEODEV_CLIENT
ccflags-y += -DEASYCAP_NEEDS_V4L2_DEVICE_H
ccflags-y += -DEASYCAP_NEEDS_V4L2_FOPS
+ccflags-y += -DEASYCAP_NEEDS_UNLOCKED_IOCTL
diff --git a/drivers/staging/easycap/README b/drivers/staging/easycap/README
index 3775481f05e8..6b5ac0d34bd9 100644
--- a/drivers/staging/easycap/README
+++ b/drivers/staging/easycap/README
@@ -24,6 +24,9 @@ Two kinds of EasyCAP have this USB ID, namely:
BUILD OPTIONS AND DEPENDENCIES
------------------------------
+Unless EASYCAP_DEBUG is defined during compilation it will not be possible
+to select a debug level at the time of module installation.
+
If the parameter EASYCAP_IS_VIDEODEV_CLIENT is undefined during compilation
the built module is entirely independent of the videodev module, and when
the EasyCAP is physically plugged into a USB port the special files
@@ -33,41 +36,54 @@ respectively.
If the parameter EASYCAP_IS_VIDEODEV_CLIENT is defined during compilation
the built easycap module is configured to register with the videodev module,
in which case the special files created when the EasyCAP is plugged in are
-/dev/video0 and /dev/easysnd0. Use of the easycap module as a client of
-the videodev module has received very little testing as of June 2010.
+/dev/video0 and /dev/easysnd0.
+
+During in-tree builds the following should should be defined whenever the
+parameter EASYCAP_IS_VIDEODEV_CLIENT is defined:
+
+EASYCAP_NEEDS_V4L2_DEVICE_H
+EASYCAP_NEEDS_V4L2_FOPS
+EASYCAP_NEEDS_UNLOCKED_IOCTL
+
+If the build is performed out-of-tree against older kernels the parameters
+to be defined depend on the kernel version in a way which will not be
+discussed here.
-KNOWN BUILD PROBLEMS
+KNOWN RUNTIME ISSUES
--------------------
-(1) Recent gcc versions may generate the message:
+(1) Intentionally, this driver will not stream material which is unambiguously
+identified by the hardware as copy-protected. Normal video output will be
+present for about a minute but will then freeze when this situation arises.
- warning: the frame size of .... bytes is larger than 1024 bytes
+(2) The controls for luminance, contrast, saturation, hue and volume may not
+always work properly.
-This warning can be suppressed by specifying in the Makefile:
+(3) Reduced-resolution S-Video seems to suffer from moire artefacts.
- EXTRA_CFLAGS += -Wframe-larger-than=8192
-but it would be preferable to remove the cause of the warning.
+INPUT NUMBERING
+---------------
+For the EasyCAP with S-VIDEO input cable the driver regards a request for
+inputs numbered 0 or 1 as referring to CVBS and a request for input
+numbered 5 as referring to S-VIDEO.
-KNOWN RUNTIME ISSUES
---------------------
+For the EasyCAP with four CVBS inputs the driver expects to be asked for
+any one of inputs numbered 1,2,3,4. If input 0 is asked for, it is
+interpreted as input 1.
-(1) Randomly (maybe 5 to 10% of occasions) the driver fails to produce any
-output at start-up. Closing mplayer (or whatever the user program is) and
-restarting it restores normal performance without any other remedial action
-being necessary. The reason for this is not known.
-(2) Intentionally, this driver will not stream material which is unambiguously
-identified by the hardware as copy-protected. The video output will freeze
-within about a minute when this situation arises.
+MODULE PARAMETERS
+-----------------
-(3) The controls for luminance, contrast, saturation, hue and volume may not
-always work properly.
+Three module parameters are defined:
-(4) Reduced-resolution S-Video seems to suffer from moire artefacts. No
-attempt has yet been made to rememdy this.
+debug the easycap module is configured at diagnostic level n (0 to 9)
+gain audio gain level n (0 to 31, default is 16)
+bars 0 => testcard bars when incoming video signal is lost
+ 1 => testcard bars when incoming video signal is lost (default)
SUPPORTED TV STANDARDS AND RESOLUTIONS
@@ -82,18 +98,29 @@ usable as (for example) the "norm=" parameter in the mplayer command:
PAL_60, NTSC_443,
PAL_M.
+In addition, the driver offers "custom" pseudo-standards with a framerate
+which is 20% of the usual framerate. These pseudo-standards are named:
+
+ PAL_BGHIN_SLOW, NTSC_N_443_SLOW,
+ PAL_Nc_SLOW, NTSC_N_SLOW,
+ SECAM_SLOW, NTSC_M_SLOW, NTSC_M_JP_SLOW,
+ PAL_60_SLOW, NTSC_443_SLOW,
+ PAL_M_SLOW.
+
+
The available picture sizes are:
at 25 frames per second: 720x576, 704x576, 640x480, 360x288, 320x240;
- at 30 frames per second: 720x480, 640x480, 360x240, 320x240;
+ at 30 frames per second: 720x480, 640x480, 360x240, 320x240.
WHAT'S TESTED AND WHAT'S NOT
----------------------------
-This driver is known to work with mplayer, mencoder, tvtime and sufficiently
-recent versions of vlc. An interface to ffmpeg is implemented, but serious
-audio-video synchronization problems remain.
+This driver is known to work with mplayer, mencoder, tvtime, zoneminder,
+xawtv, gstreamer and sufficiently recent versions of vlc. An interface
+to ffmpeg is implemented, but serious audio-video synchronization problems
+remain.
The driver is designed to support all the TV standards accepted by the
hardware, but as yet it has actually been tested on only a few of these.
@@ -101,10 +128,7 @@ hardware, but as yet it has actually been tested on only a few of these.
I have been unable to test and calibrate the S-video input myself because I
do not possess any equipment with S-video output.
-This driver does not understand the V4L1 IOCTL commands, so programs such
-as camorama are not compatible. There are reports that the driver does
-work with sufficiently recent (V4L2) versions of zoneminder, but I have not
-attempted to confirm this myself.
+This driver does not understand the V4L1 IOCTL commands.
UDEV RULES
@@ -120,6 +144,17 @@ ATTRS{idVendor}=="05e1", ATTRS{idProduct}=="0408", \
LABEL="easycap_rules_end"
+MODPROBE CONFIGURATION
+----------------------
+
+The easycap module is in competition with the module snd-usb-audio for the
+EasyCAP's audio channel, and its installation can be aided by providing a
+file in directory /etc/modprobe.d with content:
+
+options easycap gain=16 bars=1
+install easycap /sbin/rmmod snd-usb-audio; /sbin/modprobe --ignore-install easycap
+
+
ACKNOWLEGEMENTS AND REFERENCES
------------------------------
This driver makes use of information contained in the Syntek Semicon DC-1125
diff --git a/drivers/staging/easycap/easycap.h b/drivers/staging/easycap/easycap.h
index 884263b2775d..8ebf96f8a242 100644
--- a/drivers/staging/easycap/easycap.h
+++ b/drivers/staging/easycap/easycap.h
@@ -33,6 +33,7 @@
* EASYCAP_NEEDS_USBVIDEO_H
* EASYCAP_NEEDS_V4L2_DEVICE_H
* EASYCAP_NEEDS_V4L2_FOPS
+ * EASYCAP_NEEDS_UNLOCKED_IOCTL
*
* IF REQUIRED THEY MUST BE EXTERNALLY DEFINED, FOR EXAMPLE AS COMPILER
* OPTIONS.
@@ -42,35 +43,24 @@
#if (!defined(EASYCAP_H))
#define EASYCAP_H
-#if defined(EASYCAP_DEBUG)
-#if (9 < EASYCAP_DEBUG)
-#error Debug levels 0 to 9 are okay.\
- To achieve higher levels, remove this trap manually from easycap.h
-#endif
-#endif /*EASYCAP_DEBUG*/
+/*---------------------------------------------------------------------------*/
+/*
+ * THESE ARE NORMALLY DEFINED
+ */
+/*---------------------------------------------------------------------------*/
+#define PATIENCE 500
+#undef PREFER_NTSC
+#define PERSEVERE
/*---------------------------------------------------------------------------*/
/*
* THESE ARE FOR MAINTENANCE ONLY - NORMALLY UNDEFINED:
*/
/*---------------------------------------------------------------------------*/
-#undef PREFER_NTSC
#undef EASYCAP_TESTCARD
#undef EASYCAP_TESTTONE
-#undef LOCKFRAME
#undef NOREADBACK
#undef AUDIOTIME
/*---------------------------------------------------------------------------*/
-/*
- *
- * DEFINE BRIDGER TO ACTIVATE THE ROUTINE FOR BRIDGING VIDEOTAPE DROPOUTS.
- *
- * *** UNDER DEVELOPMENT/TESTING - NOT READY YET!***
- *
- */
-/*---------------------------------------------------------------------------*/
-#undef BRIDGER
-/*---------------------------------------------------------------------------*/
-
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -92,25 +82,14 @@
/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
-#if (!defined(__OLD_VIDIOC_))
-#define __OLD_VIDIOC_
-#endif /* !defined(__OLD_VIDIOC_) */
-
#include <media/v4l2-dev.h>
-
#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
#include <media/v4l2-device.h>
#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
-
-#if (!defined(__OLD_VIDIOC_))
-#define __OLD_VIDIOC_
-#endif /* !defined(__OLD_VIDIOC_) */
#include <linux/videodev2.h>
-
#include <linux/soundcard.h>
-
#if defined(EASYCAP_NEEDS_USBVIDEO_H)
#include <config/video/usbvideo.h>
#endif /*EASYCAP_NEEDS_USBVIDEO_H*/
@@ -121,7 +100,6 @@
#define STRINGIZE_AGAIN(x) #x
#define STRINGIZE(x) STRINGIZE_AGAIN(x)
-
/*---------------------------------------------------------------------------*/
/* VENDOR, PRODUCT: Syntek Semiconductor Co., Ltd
*
@@ -135,12 +113,12 @@
#define USB_EASYCAP_VENDOR_ID 0x05e1
#define USB_EASYCAP_PRODUCT_ID 0x0408
-#define EASYCAP_DRIVER_VERSION "0.8.21"
+#define EASYCAP_DRIVER_VERSION "0.8.41"
#define EASYCAP_DRIVER_DESCRIPTION "easycapdc60"
#define USB_SKEL_MINOR_BASE 192
-#define VIDEO_DEVICE_MANY 8
-
+#define DONGLE_MANY 8
+#define INPUT_MANY 6
/*---------------------------------------------------------------------------*/
/*
* DEFAULT LUMINANCE, CONTRAST, SATURATION AND HUE
@@ -164,6 +142,8 @@
#if (USB_2_0_MAXPACKETSIZE > PAGE_SIZE)
#error video_isoc_buffer[.] will not be big enough
#endif
+#define VIDEO_JUNK_TOLERATE VIDEO_ISOC_BUFFER_MANY
+#define VIDEO_LOST_TOLERATE 50
/*---------------------------------------------------------------------------*/
/*
* VIDEO BUFFERS
@@ -210,7 +190,17 @@
#define NTSC_M_JP 5
#define PAL_60 7
#define PAL_M 9
-#define STANDARD_MANY 10
+#define PAL_BGHIN_SLOW 10
+#define PAL_Nc_SLOW 12
+#define SECAM_SLOW 14
+#define NTSC_N_SLOW 16
+#define NTSC_N_443_SLOW 18
+#define NTSC_M_SLOW 11
+#define NTSC_443_SLOW 13
+#define NTSC_M_JP_SLOW 15
+#define PAL_60_SLOW 17
+#define PAL_M_SLOW 19
+#define STANDARD_MANY 20
/*---------------------------------------------------------------------------*/
/*
* ENUMS
@@ -238,7 +228,6 @@ PIXELFORMAT_MANY
enum {
FIELD_NONE,
FIELD_INTERLACED,
-FIELD_ALTERNATE,
INTERLACE_MANY
};
#define SETTINGS_MANY (STANDARD_MANY * \
@@ -251,11 +240,18 @@ INTERLACE_MANY
* STRUCTURE DEFINITIONS
*/
/*---------------------------------------------------------------------------*/
+struct easycap_dongle {
+struct easycap *peasycap;
+struct mutex mutex_video;
+struct mutex mutex_audio;
+};
+/*---------------------------------------------------------------------------*/
struct data_buffer {
struct list_head list_head;
void *pgo;
void *pto;
__u16 kount;
+__u16 input;
};
/*---------------------------------------------------------------------------*/
struct data_urb {
@@ -274,6 +270,22 @@ __u16 mask;
char name[128];
struct v4l2_format v4l2_format;
};
+struct inputset {
+int input;
+int input_ok;
+int standard_offset;
+int standard_offset_ok;
+int format_offset;
+int format_offset_ok;
+int brightness;
+int brightness_ok;
+int contrast;
+int contrast_ok;
+int saturation;
+int saturation_ok;
+int hue;
+int hue_ok;
+};
/*---------------------------------------------------------------------------*/
/*
* easycap.ilk == 0 => CVBS+S-VIDEO HARDWARE, AUDIO wMaxPacketSize=256
@@ -282,6 +294,19 @@ struct v4l2_format v4l2_format;
*/
/*---------------------------------------------------------------------------*/
struct easycap {
+#define TELLTALE "expectedstring"
+char telltale[16];
+int isdongle;
+
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
+struct video_device video_device;
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+struct v4l2_device v4l2_device;
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+int status;
unsigned int audio_pages_per_fragment;
unsigned int audio_bytes_per_fragment;
unsigned int audio_buffer_page_many;
@@ -291,26 +316,14 @@ unsigned int audio_buffer_page_many;
__s16 oldaudio;
#endif /*UPSAMPLE*/
-struct easycap_format easycap_format[1 + SETTINGS_MANY];
-
int ilk;
bool microphone;
-/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
-#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
-struct video_device *pvideo_device;
-#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
-
struct usb_device *pusb_device;
struct usb_interface *pusb_interface;
struct kref kref;
-struct mutex mutex_mmap_video[FRAME_BUFFER_MANY];
-struct mutex mutex_timeval0;
-struct mutex mutex_timeval1;
-
int queued[FRAME_BUFFER_MANY];
int done[FRAME_BUFFER_MANY];
@@ -321,16 +334,24 @@ int input;
int polled;
int standard_offset;
int format_offset;
+struct inputset inputset[INPUT_MANY];
+bool ntsc;
int fps;
int usec;
int tolerate;
+int skip;
+int skipped;
+int lost[INPUT_MANY];
int merit[180];
struct timeval timeval0;
struct timeval timeval1;
struct timeval timeval2;
+struct timeval timeval3;
+struct timeval timeval6;
struct timeval timeval7;
+struct timeval timeval8;
long long int dnbydt;
int video_interface;
@@ -347,8 +368,6 @@ int video_idle;
int video_eof;
int video_junk;
-int fudge;
-
struct data_buffer video_isoc_buffer[VIDEO_ISOC_BUFFER_MANY];
struct data_buffer \
field_buffer[FIELD_BUFFER_MANY][(FIELD_BUFFER_SIZE/PAGE_SIZE)];
@@ -358,6 +377,13 @@ struct data_buffer \
struct list_head urb_video_head;
struct list_head *purb_video_head;
+__u8 cache[8];
+__u8 *pcache;
+int video_mt;
+int audio_mt;
+long long audio_bytes;
+__u32 isequence;
+
int vma_many;
/*---------------------------------------------------------------------------*/
@@ -383,7 +409,6 @@ int frame_lock; /* Flag set to 1 by DQBUF and cleared by QBUF */
*/
/*---------------------------------------------------------------------------*/
__u32 pixelformat;
-__u32 field;
int width;
int height;
int bytesperpixel;
@@ -463,8 +488,10 @@ struct data_buffer audio_buffer[];
void easycap_complete(struct urb *);
int easycap_open(struct inode *, struct file *);
int easycap_release(struct inode *, struct file *);
-long easycap_ioctl(struct file *, unsigned int, unsigned long);
-
+long easycap_ioctl_noinode(struct file *, unsigned int, \
+ unsigned long);
+int easycap_ioctl(struct inode *, struct file *, unsigned int, \
+ unsigned long);
/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
int easycap_open_noinode(struct file *);
@@ -489,12 +516,10 @@ int kill_video_urbs(struct easycap *);
int field2frame(struct easycap *);
int redaub(struct easycap *, void *, void *, \
int, int, __u8, __u8, bool);
-void debrief(struct easycap *);
-void sayreadonly(struct easycap *);
void easycap_testcard(struct easycap *, int);
-int explain_ioctl(__u32);
-int explain_cid(__u32);
int fillin_formats(void);
+int reset(struct easycap *);
+int newinput(struct easycap *, int);
int adjust_standard(struct easycap *, v4l2_std_id);
int adjust_format(struct easycap *, __u32, __u32, __u32, \
int, bool);
@@ -512,7 +537,10 @@ void easysnd_complete(struct urb *);
ssize_t easysnd_read(struct file *, char __user *, size_t, loff_t *);
int easysnd_open(struct inode *, struct file *);
int easysnd_release(struct inode *, struct file *);
-long easysnd_ioctl(struct file *, unsigned int, unsigned long);
+long easysnd_ioctl_noinode(struct file *, unsigned int, \
+ unsigned long);
+int easysnd_ioctl(struct inode *, struct file *, unsigned int, \
+ unsigned long);
unsigned int easysnd_poll(struct file *, poll_table *);
void easysnd_delete(struct kref *);
int submit_audio_urbs(struct easycap *);
@@ -532,11 +560,11 @@ int wakeup_device(struct usb_device *);
int confirm_resolution(struct usb_device *);
int confirm_stream(struct usb_device *);
-int setup_stk(struct usb_device *);
-int setup_saa(struct usb_device *);
+int setup_stk(struct usb_device *, bool);
+int setup_saa(struct usb_device *, bool);
int setup_vt(struct usb_device *);
-int check_stk(struct usb_device *);
-int check_saa(struct usb_device *);
+int check_stk(struct usb_device *, bool);
+int check_saa(struct usb_device *, bool);
int ready_saa(struct usb_device *);
int merit_saa(struct usb_device *);
int check_vt(struct usb_device *);
@@ -554,12 +582,9 @@ int stop_100(struct usb_device *);
int write_300(struct usb_device *);
int read_vt(struct usb_device *, __u16);
int write_vt(struct usb_device *, __u16, __u16);
-
-int set2to78(struct usb_device *);
-int set2to93(struct usb_device *);
-
int regset(struct usb_device *, __u16, __u16);
int regget(struct usb_device *, __u16, void *);
+int isdongle(struct easycap *);
/*---------------------------------------------------------------------------*/
struct signed_div_result {
long long int quotient;
@@ -587,24 +612,41 @@ unsigned long long int remainder;
} \
} while (0)
/*---------------------------------------------------------------------------*/
-
+/*
+ * MACROS SAM(...) AND JOM(...) ALLOW DIAGNOSTIC OUTPUT TO BE TAGGED WITH
+ * THE IDENTITY OF THE DONGLE TO WHICH IT APPLIES, BUT IF INVOKED WHEN THE
+ * POINTER peasycap IS INVALID AN Oops IS LIKELY, AND ITS CAUSE MAY NOT BE
+ * IMMEDIATELY OBVIOUS FROM A CASUAL READING OF THE SOURCE CODE. BEWARE.
+*/
+/*---------------------------------------------------------------------------*/
#define SAY(format, args...) do { \
- printk(KERN_DEBUG "easycap: %s: " format, __func__, ##args); \
+ printk(KERN_DEBUG "easycap:: %s: " \
+ format, __func__, ##args); \
+} while (0)
+#define SAM(format, args...) do { \
+ printk(KERN_DEBUG "easycap::%i%s: " \
+ format, peasycap->isdongle, __func__, ##args);\
} while (0)
-
#if defined(EASYCAP_DEBUG)
#define JOT(n, format, args...) do { \
if (n <= easycap_debug) { \
- printk(KERN_DEBUG "easycap: %s: " format, __func__, ##args); \
+ printk(KERN_DEBUG "easycap:: %s: " \
+ format, __func__, ##args);\
} \
} while (0)
+#define JOM(n, format, args...) do { \
+ if (n <= easycap_debug) { \
+ printk(KERN_DEBUG "easycap::%i%s: " \
+ format, peasycap->isdongle, __func__, ##args);\
+ } \
+} while (0)
+
#else
#define JOT(n, format, args...) do {} while (0)
+#define JOM(n, format, args...) do {} while (0)
#endif /*EASYCAP_DEBUG*/
-#define POUT JOT(8, ":-(in file %s line %4i\n", __FILE__, __LINE__)
-
#define MICROSECONDS(X, Y) \
((1000000*((long long int)(X.tv_sec - Y.tv_sec))) + \
(long long int)(X.tv_usec - Y.tv_usec))
diff --git a/drivers/staging/easycap/easycap_debug.h b/drivers/staging/easycap/easycap_debug.h
index 1d10d7ea7d68..b6b571843125 100644
--- a/drivers/staging/easycap/easycap_debug.h
+++ b/drivers/staging/easycap/easycap_debug.h
@@ -25,3 +25,5 @@
*/
/*****************************************************************************/
extern int easycap_debug;
+extern int easycap_gain;
+extern struct easycap_dongle easycap_dongle[];
diff --git a/drivers/staging/easycap/easycap_ioctl.c b/drivers/staging/easycap/easycap_ioctl.c
index 9a42ae02cd5d..447953a4e80c 100644
--- a/drivers/staging/easycap/easycap_ioctl.c
+++ b/drivers/staging/easycap/easycap_ioctl.c
@@ -36,53 +36,101 @@
* UNLESS THERE IS A PREMATURE ERROR RETURN THIS ROUTINE UPDATES THE
* FOLLOWING:
* peasycap->standard_offset
+ * peasycap->inputset[peasycap->input].standard_offset
* peasycap->fps
* peasycap->usec
* peasycap->tolerate
+ * peasycap->skip
*/
/*---------------------------------------------------------------------------*/
int adjust_standard(struct easycap *peasycap, v4l2_std_id std_id)
{
struct easycap_standard const *peasycap_standard;
__u16 reg, set;
-int ir, rc, need;
+int ir, rc, need, k;
unsigned int itwas, isnow;
+bool resubmit;
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return -EFAULT;
+}
if ((struct usb_device *)NULL == peasycap->pusb_device) {
- SAY("ERROR: peasycap->pusb_device is NULL\n");
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
return -EFAULT;
}
peasycap_standard = &easycap_standard[0];
while (0xFFFF != peasycap_standard->mask) {
- if (std_id & peasycap_standard->v4l2_standard.id)
+ if (std_id == peasycap_standard->v4l2_standard.id)
break;
peasycap_standard++;
}
if (0xFFFF == peasycap_standard->mask) {
- SAY("ERROR: 0x%08X=std_id: standard not found\n", \
+ peasycap_standard = &easycap_standard[0];
+ while (0xFFFF != peasycap_standard->mask) {
+ if (std_id & peasycap_standard->v4l2_standard.id)
+ break;
+ peasycap_standard++;
+ }
+}
+if (0xFFFF == peasycap_standard->mask) {
+ SAM("ERROR: 0x%08X=std_id: standard not found\n", \
(unsigned int)std_id);
return -EINVAL;
}
-SAY("user requests standard: %s\n", \
+SAM("selected standard: %s\n", \
&(peasycap_standard->v4l2_standard.name[0]));
if (peasycap->standard_offset == \
(int)(peasycap_standard - &easycap_standard[0])) {
- SAY("requested standard already in effect\n");
+ SAM("requested standard already in effect\n");
return 0;
}
peasycap->standard_offset = (int)(peasycap_standard - &easycap_standard[0]);
+for (k = 0; k < INPUT_MANY; k++) {
+ if (!peasycap->inputset[k].standard_offset_ok) {
+ peasycap->inputset[k].standard_offset = \
+ peasycap->standard_offset;
+ }
+}
+if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) {
+ peasycap->inputset[peasycap->input].standard_offset = \
+ peasycap->standard_offset;
+ peasycap->inputset[peasycap->input].standard_offset_ok = 1;
+} else
+ JOM(8, "%i=peasycap->input\n", peasycap->input);
peasycap->fps = peasycap_standard->v4l2_standard.frameperiod.denominator / \
peasycap_standard->v4l2_standard.frameperiod.numerator;
-if (!peasycap->fps) {
- SAY("MISTAKE: frames-per-second is zero\n");
- return -EFAULT;
+switch (peasycap->fps) {
+case 6:
+case 30: {
+ peasycap->ntsc = true;
+ break;
}
-JOT(8, "%i frames-per-second\n", peasycap->fps);
-peasycap->usec = 1000000 / (2 * peasycap->fps);
-peasycap->tolerate = 1000 * (25 / peasycap->fps);
-
-kill_video_urbs(peasycap);
-
+case 5:
+case 25: {
+ peasycap->ntsc = false;
+ break;
+}
+default: {
+ SAM("MISTAKE: %i=frames-per-second\n", peasycap->fps);
+ return -ENOENT;
+}
+}
+JOM(8, "%i frames-per-second\n", peasycap->fps);
+if (0x8000 & peasycap_standard->mask) {
+ peasycap->skip = 5;
+ peasycap->usec = 1000000 / (2 * (5 * peasycap->fps));
+ peasycap->tolerate = 1000 * (25 / (5 * peasycap->fps));
+} else {
+ peasycap->skip = 0;
+ peasycap->usec = 1000000 / (2 * peasycap->fps);
+ peasycap->tolerate = 1000 * (25 / peasycap->fps);
+}
+if (peasycap->video_isoc_streaming) {
+ resubmit = true;
+ kill_video_urbs(peasycap);
+} else
+ resubmit = false;
/*--------------------------------------------------------------------------*/
/*
* SAA7113H DATASHEET PAGE 44, TABLE 42
@@ -94,55 +142,41 @@ case NTSC_M_JP: {
reg = 0x0A; set = 0x95;
ir = read_saa(peasycap->pusb_device, reg);
if (0 > ir)
- SAY("ERROR: cannot read SAA register 0x%02X\n", reg);
+ SAM("ERROR: cannot read SAA register 0x%02X\n", reg);
else
itwas = (unsigned int)ir;
-
-
- set2to78(peasycap->pusb_device);
-
-
rc = write_saa(peasycap->pusb_device, reg, set);
if (0 != rc)
- SAY("ERROR: failed to set SAA register " \
+ SAM("ERROR: failed to set SAA register " \
"0x%02X to 0x%02X for JP standard\n", reg, set);
else {
isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
if (0 > ir)
- JOT(8, "SAA register 0x%02X changed " \
+ JOM(8, "SAA register 0x%02X changed " \
"to 0x%02X\n", reg, isnow);
else
- JOT(8, "SAA register 0x%02X changed " \
+ JOM(8, "SAA register 0x%02X changed " \
"from 0x%02X to 0x%02X\n", reg, itwas, isnow);
-
- set2to78(peasycap->pusb_device);
-
}
reg = 0x0B; set = 0x48;
ir = read_saa(peasycap->pusb_device, reg);
if (0 > ir)
- SAY("ERROR: cannot read SAA register 0x%02X\n", reg);
+ SAM("ERROR: cannot read SAA register 0x%02X\n", reg);
else
itwas = (unsigned int)ir;
-
- set2to78(peasycap->pusb_device);
-
rc = write_saa(peasycap->pusb_device, reg, set);
if (0 != rc)
- SAY("ERROR: failed to set SAA register 0x%02X to 0x%02X " \
+ SAM("ERROR: failed to set SAA register 0x%02X to 0x%02X " \
"for JP standard\n", reg, set);
else {
isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
if (0 > ir)
- JOT(8, "SAA register 0x%02X changed " \
+ JOM(8, "SAA register 0x%02X changed " \
"to 0x%02X\n", reg, isnow);
else
- JOT(8, "SAA register 0x%02X changed " \
+ JOM(8, "SAA register 0x%02X changed " \
"from 0x%02X to 0x%02X\n", reg, itwas, isnow);
-
- set2to78(peasycap->pusb_device);
-
}
/*--------------------------------------------------------------------------*/
/*
@@ -176,23 +210,20 @@ default:
if (need) {
ir = read_saa(peasycap->pusb_device, reg);
if (0 > ir)
- SAY("ERROR: failed to read SAA register 0x%02X\n", reg);
+ SAM("ERROR: failed to read SAA register 0x%02X\n", reg);
else
itwas = (unsigned int)ir;
-
- set2to78(peasycap->pusb_device);
-
rc = write_saa(peasycap->pusb_device, reg, set);
if (0 != write_saa(peasycap->pusb_device, reg, set)) {
- SAY("ERROR: failed to set SAA register " \
+ SAM("ERROR: failed to set SAA register " \
"0x%02X to 0x%02X for table 42\n", reg, set);
} else {
isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
if (0 > ir)
- JOT(8, "SAA register 0x%02X changed " \
+ JOM(8, "SAA register 0x%02X changed " \
"to 0x%02X\n", reg, isnow);
else
- JOT(8, "SAA register 0x%02X changed " \
+ JOM(8, "SAA register 0x%02X changed " \
"from 0x%02X to 0x%02X\n", reg, itwas, isnow);
}
}
@@ -204,7 +235,7 @@ if (need) {
reg = 0x08;
ir = read_saa(peasycap->pusb_device, reg);
if (0 > ir)
- SAY("ERROR: failed to read SAA register 0x%02X " \
+ SAM("ERROR: failed to read SAA register 0x%02X " \
"so cannot reset\n", reg);
else {
itwas = (unsigned int)ir;
@@ -212,19 +243,18 @@ else {
set = itwas | 0x40 ;
else
set = itwas & ~0x40 ;
-
-set2to78(peasycap->pusb_device);
-
-rc = write_saa(peasycap->pusb_device, reg, set);
-if (0 != rc)
- SAY("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", reg, set);
-else {
- isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
- if (0 > ir)
- JOT(8, "SAA register 0x%02X changed to 0x%02X\n", reg, isnow);
- else
- JOT(8, "SAA register 0x%02X changed " \
- "from 0x%02X to 0x%02X\n", reg, itwas, isnow);
+ rc = write_saa(peasycap->pusb_device, reg, set);
+ if (0 != rc)
+ SAM("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", \
+ reg, set);
+ else {
+ isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
+ if (0 > ir)
+ JOM(8, "SAA register 0x%02X changed to 0x%02X\n", \
+ reg, isnow);
+ else
+ JOM(8, "SAA register 0x%02X changed " \
+ "from 0x%02X to 0x%02X\n", reg, itwas, isnow);
}
}
/*--------------------------------------------------------------------------*/
@@ -235,7 +265,7 @@ else {
reg = 0x40;
ir = read_saa(peasycap->pusb_device, reg);
if (0 > ir)
- SAY("ERROR: failed to read SAA register 0x%02X " \
+ SAM("ERROR: failed to read SAA register 0x%02X " \
"so cannot reset\n", reg);
else {
itwas = (unsigned int)ir;
@@ -243,19 +273,18 @@ else {
set = itwas | 0x80 ;
else
set = itwas & ~0x80 ;
-
-set2to78(peasycap->pusb_device);
-
-rc = write_saa(peasycap->pusb_device, reg, set);
-if (0 != rc)
- SAY("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", reg, set);
-else {
- isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
- if (0 > ir)
- JOT(8, "SAA register 0x%02X changed to 0x%02X\n", reg, isnow);
- else
- JOT(8, "SAA register 0x%02X changed " \
- "from 0x%02X to 0x%02X\n", reg, itwas, isnow);
+ rc = write_saa(peasycap->pusb_device, reg, set);
+ if (0 != rc)
+ SAM("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", \
+ reg, set);
+ else {
+ isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
+ if (0 > ir)
+ JOM(8, "SAA register 0x%02X changed to 0x%02X\n", \
+ reg, isnow);
+ else
+ JOM(8, "SAA register 0x%02X changed " \
+ "from 0x%02X to 0x%02X\n", reg, itwas, isnow);
}
}
/*--------------------------------------------------------------------------*/
@@ -266,41 +295,39 @@ else {
reg = 0x5A;
ir = read_saa(peasycap->pusb_device, reg);
if (0 > ir)
- SAY("ERROR: failed to read SAA register 0x%02X but continuing\n", reg);
+ SAM("ERROR: failed to read SAA register 0x%02X but continuing\n", reg);
itwas = (unsigned int)ir;
if (peasycap_standard->mask & 0x0001)
set = 0x0A ;
else
set = 0x07 ;
-
- set2to78(peasycap->pusb_device);
-
if (0 != write_saa(peasycap->pusb_device, reg, set))
- SAY("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", \
+ SAM("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", \
reg, set);
else {
isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
if (0 > ir)
- JOT(8, "SAA register 0x%02X changed "
+ JOM(8, "SAA register 0x%02X changed "
"to 0x%02X\n", reg, isnow);
else
- JOT(8, "SAA register 0x%02X changed "
+ JOM(8, "SAA register 0x%02X changed "
"from 0x%02X to 0x%02X\n", reg, itwas, isnow);
}
- if (0 != check_saa(peasycap->pusb_device))
- SAY("ERROR: check_saa() failed\n");
+if (true == resubmit)
+ submit_video_urbs(peasycap);
return 0;
}
/*****************************************************************************/
/*--------------------------------------------------------------------------*/
/*
- * THE ALGORITHM FOR RESPONDING TO THE VIDIO_S_FMT IOCTL DEPENDS ON THE
- * CURRENT VALUE OF peasycap->standard_offset.
+ * THE ALGORITHM FOR RESPONDING TO THE VIDIO_S_FMT IOCTL REQUIRES
+ * A VALID VALUE OF peasycap->standard_offset, OTHERWISE -EBUSY IS RETURNED.
+ *
* PROVIDED THE ARGUMENT try IS false AND THERE IS NO PREMATURE ERROR RETURN
* THIS ROUTINE UPDATES THE FOLLOWING:
* peasycap->format_offset
+ * peasycap->inputset[peasycap->input].format_offset
* peasycap->pixelformat
- * peasycap->field
* peasycap->height
* peasycap->width
* peasycap->bytesperpixel
@@ -321,39 +348,93 @@ int adjust_format(struct easycap *peasycap, \
struct easycap_format *peasycap_format, *peasycap_best_format;
__u16 mask;
struct usb_device *p;
-int miss, multiplier, best;
-char bf[5], *pc;
+int miss, multiplier, best, k;
+char bf[5], fo[32], *pc;
__u32 uc;
+bool resubmit;
-if ((struct easycap *)NULL == peasycap) {
+if (NULL == peasycap) {
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
+if (0 > peasycap->standard_offset) {
+ JOM(8, "%i=peasycap->standard_offset\n", peasycap->standard_offset);
+ return -EBUSY;
+}
p = peasycap->pusb_device;
if ((struct usb_device *)NULL == p) {
- SAY("ERROR: peaycap->pusb_device is NULL\n");
+ SAM("ERROR: peaycap->pusb_device is NULL\n");
return -EFAULT;
}
pc = &bf[0];
-uc = pixelformat; memcpy((void *)pc, (void *)(&uc), 4); bf[4] = 0;
-mask = easycap_standard[peasycap->standard_offset].mask;
-SAY("sought: %ix%i,%s(0x%08X),%i=field,0x%02X=std mask\n", \
+uc = pixelformat;
+memcpy((void *)pc, (void *)(&uc), 4);
+bf[4] = 0;
+mask = 0xFF & easycap_standard[peasycap->standard_offset].mask;
+SAM("sought: %ix%i,%s(0x%08X),%i=field,0x%02X=std mask\n", \
width, height, pc, pixelformat, field, mask);
+switch (field) {
+case V4L2_FIELD_ANY: {
+ strcpy(&fo[0], "V4L2_FIELD_ANY ");
+ break;
+}
+case V4L2_FIELD_NONE: {
+ strcpy(&fo[0], "V4L2_FIELD_NONE");
+ break;
+}
+case V4L2_FIELD_TOP: {
+ strcpy(&fo[0], "V4L2_FIELD_TOP");
+ break;
+}
+case V4L2_FIELD_BOTTOM: {
+ strcpy(&fo[0], "V4L2_FIELD_BOTTOM");
+ break;
+}
+case V4L2_FIELD_INTERLACED: {
+ strcpy(&fo[0], "V4L2_FIELD_INTERLACED");
+ break;
+}
+case V4L2_FIELD_SEQ_TB: {
+ strcpy(&fo[0], "V4L2_FIELD_SEQ_TB");
+ break;
+}
+case V4L2_FIELD_SEQ_BT: {
+ strcpy(&fo[0], "V4L2_FIELD_SEQ_BT");
+ break;
+}
+case V4L2_FIELD_ALTERNATE: {
+ strcpy(&fo[0], "V4L2_FIELD_ALTERNATE");
+ break;
+}
+case V4L2_FIELD_INTERLACED_TB: {
+ strcpy(&fo[0], "V4L2_FIELD_INTERLACED_TB");
+ break;
+}
+case V4L2_FIELD_INTERLACED_BT: {
+ strcpy(&fo[0], "V4L2_FIELD_INTERLACED_BT");
+ break;
+}
+default: {
+ strcpy(&fo[0], "V4L2_FIELD_... UNKNOWN ");
+ break;
+}
+}
+SAM("sought: %s\n", &fo[0]);
if (V4L2_FIELD_ANY == field) {
- field = V4L2_FIELD_INTERLACED;
- SAY("prefer: V4L2_FIELD_INTERLACED=field, was V4L2_FIELD_ANY\n");
+ field = V4L2_FIELD_NONE;
+ SAM("prefer: V4L2_FIELD_NONE=field, was V4L2_FIELD_ANY\n");
}
peasycap_best_format = (struct easycap_format *)NULL;
peasycap_format = &easycap_format[0];
while (0 != peasycap_format->v4l2_format.fmt.pix.width) {
- JOT(16, ".> %i %i 0x%08X %ix%i\n", \
+ JOM(16, ".> %i %i 0x%08X %ix%i\n", \
peasycap_format->mask & 0x01,
peasycap_format->v4l2_format.fmt.pix.field,
peasycap_format->v4l2_format.fmt.pix.pixelformat,
peasycap_format->v4l2_format.fmt.pix.width,
peasycap_format->v4l2_format.fmt.pix.height);
- if (((peasycap_format->mask & 0x0F) == (mask & 0x0F)) && \
+ if (((peasycap_format->mask & 0x1F) == (mask & 0x1F)) && \
(peasycap_format->v4l2_format.fmt.pix.field == field) && \
(peasycap_format->v4l2_format.fmt.pix.pixelformat == \
pixelformat) && \
@@ -365,11 +446,11 @@ while (0 != peasycap_format->v4l2_format.fmt.pix.width) {
peasycap_format++;
}
if (0 == peasycap_format->v4l2_format.fmt.pix.width) {
- SAY("cannot do: %ix%i with standard mask 0x%02X\n", \
+ SAM("cannot do: %ix%i with standard mask 0x%02X\n", \
width, height, mask);
peasycap_format = &easycap_format[0]; best = -1;
while (0 != peasycap_format->v4l2_format.fmt.pix.width) {
- if (((peasycap_format->mask & 0x0F) == (mask & 0x0F)) && \
+ if (((peasycap_format->mask & 0x1F) == (mask & 0x1F)) && \
(peasycap_format->v4l2_format.fmt.pix\
.field == field) && \
(peasycap_format->v4l2_format.fmt.pix\
@@ -386,16 +467,16 @@ if (0 == peasycap_format->v4l2_format.fmt.pix.width) {
peasycap_format++;
}
if (-1 == best) {
- SAY("cannot do %ix... with standard mask 0x%02X\n", \
+ SAM("cannot do %ix... with standard mask 0x%02X\n", \
width, mask);
- SAY("cannot do ...x%i with standard mask 0x%02X\n", \
+ SAM("cannot do ...x%i with standard mask 0x%02X\n", \
height, mask);
- SAY(" %ix%i unmatched\n", width, height);
+ SAM(" %ix%i unmatched\n", width, height);
return peasycap->format_offset;
}
}
if ((struct easycap_format *)NULL == peasycap_best_format) {
- SAY("MISTAKE: peasycap_best_format is NULL");
+ SAM("MISTAKE: peasycap_best_format is NULL");
return -EINVAL;
}
peasycap_format = peasycap_best_format;
@@ -406,23 +487,43 @@ if (true == try)
/*...........................................................................*/
if (false != try) {
- SAY("MISTAKE: true==try where is should be false\n");
+ SAM("MISTAKE: true==try where is should be false\n");
return -EINVAL;
}
-SAY("actioning: %ix%i %s\n", \
+SAM("actioning: %ix%i %s\n", \
peasycap_format->v4l2_format.fmt.pix.width, \
peasycap_format->v4l2_format.fmt.pix.height,
&peasycap_format->name[0]);
peasycap->height = peasycap_format->v4l2_format.fmt.pix.height;
peasycap->width = peasycap_format->v4l2_format.fmt.pix.width;
peasycap->pixelformat = peasycap_format->v4l2_format.fmt.pix.pixelformat;
-peasycap->field = peasycap_format->v4l2_format.fmt.pix.field;
peasycap->format_offset = (int)(peasycap_format - &easycap_format[0]);
-peasycap->bytesperpixel = (0x00F0 & peasycap_format->mask) >> 4 ;
+
+
+for (k = 0; k < INPUT_MANY; k++) {
+ if (!peasycap->inputset[k].format_offset_ok) {
+ peasycap->inputset[k].format_offset = \
+ peasycap->format_offset;
+ }
+}
+if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) {
+ peasycap->inputset[peasycap->input].format_offset = \
+ peasycap->format_offset;
+ peasycap->inputset[peasycap->input].format_offset_ok = 1;
+} else
+ JOM(8, "%i=peasycap->input\n", peasycap->input);
+
+
+
+peasycap->bytesperpixel = (0x00E0 & peasycap_format->mask) >> 5 ;
if (0x0100 & peasycap_format->mask)
peasycap->byteswaporder = true;
else
peasycap->byteswaporder = false;
+if (0x0200 & peasycap_format->mask)
+ peasycap->skip = 5;
+else
+ peasycap->skip = 0;
if (0x0800 & peasycap_format->mask)
peasycap->decimatepixel = true;
else
@@ -439,27 +540,11 @@ peasycap->videofieldamount = multiplier * peasycap->width * \
multiplier * peasycap->height;
peasycap->frame_buffer_used = peasycap->bytesperpixel * \
peasycap->width * peasycap->height;
-
-if (true == peasycap->offerfields) {
- SAY("WARNING: %i=peasycap->field is untested: " \
- "please report problems\n", peasycap->field);
-
-
-/*
- * FIXME ---- THIS IS UNTESTED, MAY BE (AND PROBABLY IS) INCORRECT:
- *
- * peasycap->frame_buffer_used = peasycap->frame_buffer_used / 2;
- *
- * SO DO NOT RISK IT YET.
- *
- */
-
-
-
-}
-
-kill_video_urbs(peasycap);
-
+if (peasycap->video_isoc_streaming) {
+ resubmit = true;
+ kill_video_urbs(peasycap);
+} else
+ resubmit = false;
/*---------------------------------------------------------------------------*/
/*
* PAL
@@ -474,13 +559,13 @@ if (0 == (0x01 & peasycap_format->mask)) {
(288 == \
peasycap_format->v4l2_format.fmt.pix.height))) {
if (0 != set_resolution(p, 0x0000, 0x0001, 0x05A0, 0x0121)) {
- SAY("ERROR: set_resolution() failed\n");
+ SAM("ERROR: set_resolution() failed\n");
return -EINVAL;
}
} else if ((704 == peasycap_format->v4l2_format.fmt.pix.width) && \
(576 == peasycap_format->v4l2_format.fmt.pix.height)) {
if (0 != set_resolution(p, 0x0004, 0x0001, 0x0584, 0x0121)) {
- SAY("ERROR: set_resolution() failed\n");
+ SAM("ERROR: set_resolution() failed\n");
return -EINVAL;
}
} else if (((640 == peasycap_format->v4l2_format.fmt.pix.width) && \
@@ -491,11 +576,11 @@ if (0 == (0x01 & peasycap_format->mask)) {
(240 == \
peasycap_format->v4l2_format.fmt.pix.height))) {
if (0 != set_resolution(p, 0x0014, 0x0020, 0x0514, 0x0110)) {
- SAY("ERROR: set_resolution() failed\n");
+ SAM("ERROR: set_resolution() failed\n");
return -EINVAL;
}
} else {
- SAY("MISTAKE: bad format, cannot set resolution\n");
+ SAM("MISTAKE: bad format, cannot set resolution\n");
return -EINVAL;
}
/*---------------------------------------------------------------------------*/
@@ -512,7 +597,7 @@ if (0 == (0x01 & peasycap_format->mask)) {
(240 == \
peasycap_format->v4l2_format.fmt.pix.height))) {
if (0 != set_resolution(p, 0x0000, 0x0003, 0x05A0, 0x00F3)) {
- SAY("ERROR: set_resolution() failed\n");
+ SAM("ERROR: set_resolution() failed\n");
return -EINVAL;
}
} else if (((640 == peasycap_format->v4l2_format.fmt.pix.width) && \
@@ -523,28 +608,31 @@ if (0 == (0x01 & peasycap_format->mask)) {
(240 == \
peasycap_format->v4l2_format.fmt.pix.height))) {
if (0 != set_resolution(p, 0x0014, 0x0003, 0x0514, 0x00F3)) {
- SAY("ERROR: set_resolution() failed\n");
+ SAM("ERROR: set_resolution() failed\n");
return -EINVAL;
}
} else {
- SAY("MISTAKE: bad format, cannot set resolution\n");
+ SAM("MISTAKE: bad format, cannot set resolution\n");
return -EINVAL;
}
}
/*---------------------------------------------------------------------------*/
-
-check_stk(peasycap->pusb_device);
-
+if (true == resubmit)
+ submit_video_urbs(peasycap);
return (int)(peasycap_best_format - &easycap_format[0]);
}
/*****************************************************************************/
int adjust_brightness(struct easycap *peasycap, int value)
{
unsigned int mood;
-int i1;
+int i1, k;
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return -EFAULT;
+}
if ((struct usb_device *)NULL == peasycap->pusb_device) {
- SAY("ERROR: peasycap->pusb_device is NULL\n");
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
return -EFAULT;
}
i1 = 0;
@@ -553,37 +641,56 @@ while (0xFFFFFFFF != easycap_control[i1].id) {
if ((easycap_control[i1].minimum > value) || \
(easycap_control[i1].maximum < value))
value = easycap_control[i1].default_value;
+
+ if ((easycap_control[i1].minimum <= peasycap->brightness) && \
+ (easycap_control[i1].maximum >= \
+ peasycap->brightness)) {
+ if (peasycap->brightness == value) {
+ SAM("unchanged brightness at 0x%02X\n", \
+ value);
+ return 0;
+ }
+ }
peasycap->brightness = value;
+ for (k = 0; k < INPUT_MANY; k++) {
+ if (!peasycap->inputset[k].brightness_ok)
+ peasycap->inputset[k].brightness = \
+ peasycap->brightness;
+ }
+ if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) {
+ peasycap->inputset[peasycap->input].brightness = \
+ peasycap->brightness;
+ peasycap->inputset[peasycap->input].brightness_ok = 1;
+ } else
+ JOM(8, "%i=peasycap->input\n", peasycap->input);
mood = 0x00FF & (unsigned int)peasycap->brightness;
-
- set2to78(peasycap->pusb_device);
-
if (!write_saa(peasycap->pusb_device, 0x0A, mood)) {
- SAY("adjusting brightness to 0x%02X\n", mood);
+ SAM("adjusting brightness to 0x%02X\n", mood);
return 0;
} else {
- SAY("WARNING: failed to adjust brightness " \
+ SAM("WARNING: failed to adjust brightness " \
"to 0x%02X\n", mood);
return -ENOENT;
}
-
- set2to78(peasycap->pusb_device);
-
break;
}
i1++;
}
-SAY("WARNING: failed to adjust brightness: control not found\n");
+SAM("WARNING: failed to adjust brightness: control not found\n");
return -ENOENT;
}
/*****************************************************************************/
int adjust_contrast(struct easycap *peasycap, int value)
{
unsigned int mood;
-int i1;
+int i1, k;
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return -EFAULT;
+}
if ((struct usb_device *)NULL == peasycap->pusb_device) {
- SAY("ERROR: peasycap->pusb_device is NULL\n");
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
return -EFAULT;
}
i1 = 0;
@@ -592,37 +699,58 @@ while (0xFFFFFFFF != easycap_control[i1].id) {
if ((easycap_control[i1].minimum > value) || \
(easycap_control[i1].maximum < value))
value = easycap_control[i1].default_value;
- peasycap->contrast = value;
- mood = 0x00FF & (unsigned int) (peasycap->contrast - 128);
- set2to78(peasycap->pusb_device);
+
+ if ((easycap_control[i1].minimum <= peasycap->contrast) && \
+ (easycap_control[i1].maximum >= \
+ peasycap->contrast)) {
+ if (peasycap->contrast == value) {
+ SAM("unchanged contrast at 0x%02X\n", value);
+ return 0;
+ }
+ }
+ peasycap->contrast = value;
+ for (k = 0; k < INPUT_MANY; k++) {
+ if (!peasycap->inputset[k].contrast_ok) {
+ peasycap->inputset[k].contrast = \
+ peasycap->contrast;
+ }
+ }
+ if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) {
+ peasycap->inputset[peasycap->input].contrast = \
+ peasycap->contrast;
+ peasycap->inputset[peasycap->input].contrast_ok = 1;
+ } else
+ JOM(8, "%i=peasycap->input\n", peasycap->input);
+ mood = 0x00FF & (unsigned int) (peasycap->contrast - 128);
if (!write_saa(peasycap->pusb_device, 0x0B, mood)) {
- SAY("adjusting contrast to 0x%02X\n", mood);
+ SAM("adjusting contrast to 0x%02X\n", mood);
return 0;
} else {
- SAY("WARNING: failed to adjust contrast to " \
+ SAM("WARNING: failed to adjust contrast to " \
"0x%02X\n", mood);
return -ENOENT;
}
-
- set2to78(peasycap->pusb_device);
-
break;
}
i1++;
}
-SAY("WARNING: failed to adjust contrast: control not found\n");
+SAM("WARNING: failed to adjust contrast: control not found\n");
return -ENOENT;
}
/*****************************************************************************/
int adjust_saturation(struct easycap *peasycap, int value)
{
unsigned int mood;
-int i1;
+int i1, k;
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return -EFAULT;
+}
if ((struct usb_device *)NULL == peasycap->pusb_device) {
- SAY("ERROR: peasycap->pusb_device is NULL\n");
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
return -EFAULT;
}
i1 = 0;
@@ -631,37 +759,58 @@ while (0xFFFFFFFF != easycap_control[i1].id) {
if ((easycap_control[i1].minimum > value) || \
(easycap_control[i1].maximum < value))
value = easycap_control[i1].default_value;
- peasycap->saturation = value;
- mood = 0x00FF & (unsigned int) (peasycap->saturation - 128);
- set2to78(peasycap->pusb_device);
+ if ((easycap_control[i1].minimum <= peasycap->saturation) && \
+ (easycap_control[i1].maximum >= \
+ peasycap->saturation)) {
+ if (peasycap->saturation == value) {
+ SAM("unchanged saturation at 0x%02X\n", \
+ value);
+ return 0;
+ }
+ }
+ peasycap->saturation = value;
+ for (k = 0; k < INPUT_MANY; k++) {
+ if (!peasycap->inputset[k].saturation_ok) {
+ peasycap->inputset[k].saturation = \
+ peasycap->saturation;
+ }
+ }
+ if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) {
+ peasycap->inputset[peasycap->input].saturation = \
+ peasycap->saturation;
+ peasycap->inputset[peasycap->input].saturation_ok = 1;
+ } else
+ JOM(8, "%i=peasycap->input\n", peasycap->input);
+ mood = 0x00FF & (unsigned int) (peasycap->saturation - 128);
if (!write_saa(peasycap->pusb_device, 0x0C, mood)) {
- SAY("adjusting saturation to 0x%02X\n", mood);
+ SAM("adjusting saturation to 0x%02X\n", mood);
return 0;
} else {
- SAY("WARNING: failed to adjust saturation to " \
+ SAM("WARNING: failed to adjust saturation to " \
"0x%02X\n", mood);
return -ENOENT;
}
break;
-
- set2to78(peasycap->pusb_device);
-
}
i1++;
}
-SAY("WARNING: failed to adjust saturation: control not found\n");
+SAM("WARNING: failed to adjust saturation: control not found\n");
return -ENOENT;
}
/*****************************************************************************/
int adjust_hue(struct easycap *peasycap, int value)
{
unsigned int mood;
-int i1, i2;
+int i1, i2, k;
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return -EFAULT;
+}
if ((struct usb_device *)NULL == peasycap->pusb_device) {
- SAY("ERROR: peasycap->pusb_device is NULL\n");
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
return -EFAULT;
}
i1 = 0;
@@ -670,27 +819,40 @@ while (0xFFFFFFFF != easycap_control[i1].id) {
if ((easycap_control[i1].minimum > value) || \
(easycap_control[i1].maximum < value))
value = easycap_control[i1].default_value;
+
+ if ((easycap_control[i1].minimum <= peasycap->hue) && \
+ (easycap_control[i1].maximum >= \
+ peasycap->hue)) {
+ if (peasycap->hue == value) {
+ SAM("unchanged hue at 0x%02X\n", value);
+ return 0;
+ }
+ }
peasycap->hue = value;
+ for (k = 0; k < INPUT_MANY; k++) {
+ if (!peasycap->inputset[k].hue_ok)
+ peasycap->inputset[k].hue = peasycap->hue;
+ }
+ if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) {
+ peasycap->inputset[peasycap->input].hue = \
+ peasycap->hue;
+ peasycap->inputset[peasycap->input].hue_ok = 1;
+ } else
+ JOM(8, "%i=peasycap->input\n", peasycap->input);
i2 = peasycap->hue - 128;
mood = 0x00FF & ((int) i2);
-
- set2to78(peasycap->pusb_device);
-
if (!write_saa(peasycap->pusb_device, 0x0D, mood)) {
- SAY("adjusting hue to 0x%02X\n", mood);
+ SAM("adjusting hue to 0x%02X\n", mood);
return 0;
} else {
- SAY("WARNING: failed to adjust hue to 0x%02X\n", mood);
+ SAM("WARNING: failed to adjust hue to 0x%02X\n", mood);
return -ENOENT;
}
-
- set2to78(peasycap->pusb_device);
-
break;
}
i1++;
}
-SAY("WARNING: failed to adjust hue: control not found\n");
+SAM("WARNING: failed to adjust hue: control not found\n");
return -ENOENT;
}
/*****************************************************************************/
@@ -699,33 +861,45 @@ int adjust_volume(struct easycap *peasycap, int value)
__s8 mood;
int i1;
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return -EFAULT;
+}
if ((struct usb_device *)NULL == peasycap->pusb_device) {
- SAY("ERROR: peasycap->pusb_device is NULL\n");
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
return -EFAULT;
}
i1 = 0;
while (0xFFFFFFFF != easycap_control[i1].id) {
if (V4L2_CID_AUDIO_VOLUME == easycap_control[i1].id) {
if ((easycap_control[i1].minimum > value) || \
- (easycap_control[i1].maximum < value))
+ (easycap_control[i1].maximum < value))
value = easycap_control[i1].default_value;
+ if ((easycap_control[i1].minimum <= peasycap->volume) && \
+ (easycap_control[i1].maximum >= \
+ peasycap->volume)) {
+ if (peasycap->volume == value) {
+ SAM("unchanged volume at 0x%02X\n", value);
+ return 0;
+ }
+ }
peasycap->volume = value;
mood = (16 > peasycap->volume) ? 16 : \
((31 < peasycap->volume) ? 31 : \
(__s8) peasycap->volume);
if (!audio_gainset(peasycap->pusb_device, mood)) {
- SAY("adjusting volume to 0x%01X\n", mood);
+ SAM("adjusting volume to 0x%02X\n", mood);
return 0;
} else {
- SAY("WARNING: failed to adjust volume to " \
- "0x%1X\n", mood);
+ SAM("WARNING: failed to adjust volume to " \
+ "0x%2X\n", mood);
return -ENOENT;
}
break;
}
i1++;
}
-SAY("WARNING: failed to adjust volume: control not found\n");
+SAM("WARNING: failed to adjust volume: control not found\n");
return -ENOENT;
}
/*****************************************************************************/
@@ -744,8 +918,12 @@ int adjust_mute(struct easycap *peasycap, int value)
{
int i1;
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return -EFAULT;
+}
if ((struct usb_device *)NULL == peasycap->pusb_device) {
- SAY("ERROR: peasycap->pusb_device is NULL\n");
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
return -EFAULT;
}
i1 = 0;
@@ -756,13 +934,13 @@ while (0xFFFFFFFF != easycap_control[i1].id) {
case 1: {
peasycap->audio_idle = 1;
peasycap->timeval0.tv_sec = 0;
- SAY("adjusting mute: %i=peasycap->audio_idle\n", \
+ SAM("adjusting mute: %i=peasycap->audio_idle\n", \
peasycap->audio_idle);
return 0;
}
default: {
peasycap->audio_idle = 0;
- SAY("adjusting mute: %i=peasycap->audio_idle\n", \
+ SAM("adjusting mute: %i=peasycap->audio_idle\n", \
peasycap->audio_idle);
return 0;
}
@@ -771,47 +949,107 @@ while (0xFFFFFFFF != easycap_control[i1].id) {
}
i1++;
}
-SAY("WARNING: failed to adjust mute: control not found\n");
+SAM("WARNING: failed to adjust mute: control not found\n");
return -ENOENT;
}
-
-/*--------------------------------------------------------------------------*/
-static int easycap_ioctl_bkl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+/*****************************************************************************/
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if ((defined(EASYCAP_IS_VIDEODEV_CLIENT)) || \
+ (defined(EASYCAP_NEEDS_UNLOCKED_IOCTL)))
+long
+easycap_ioctl_noinode(struct file *file, unsigned int cmd, unsigned long arg) {
+ return (long)easycap_ioctl((struct inode *)NULL, file, cmd, arg);
+}
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT||EASYCAP_NEEDS_UNLOCKED_IOCTL*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+/*---------------------------------------------------------------------------*/
+int
+easycap_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
{
-static struct easycap *peasycap;
-static struct usb_device *p;
-static __u32 isequence;
+struct easycap *peasycap;
+struct usb_device *p;
+int kd;
+if (NULL == file) {
+ SAY("ERROR: file is NULL\n");
+ return -ERESTARTSYS;
+}
peasycap = file->private_data;
if (NULL == peasycap) {
SAY("ERROR: peasycap is NULL\n");
return -1;
}
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap\n");
+ return -EFAULT;
+}
p = peasycap->pusb_device;
-if ((struct usb_device *)NULL == p) {
- SAY("ERROR: peasycap->pusb_device is NULL\n");
+if (NULL == p) {
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
return -EFAULT;
}
+kd = isdongle(peasycap);
+if (0 <= kd && DONGLE_MANY > kd) {
+ if (mutex_lock_interruptible(&easycap_dongle[kd].mutex_video)) {
+ SAY("ERROR: cannot lock easycap_dongle[%i].mutex_video\n", kd);
+ return -ERESTARTSYS;
+ }
+ JOM(4, "locked easycap_dongle[%i].mutex_video\n", kd);
/*---------------------------------------------------------------------------*/
/*
- * MOST OF THE VARIABLES DECLARED static IN THE case{} BLOCKS BELOW ARE SO
- * DECLARED SIMPLY TO AVOID A COMPILER WARNING OF THE KIND:
- * easycap_ioctl.c: warning:
- * the frame size of ... bytes is larger than 1024 bytes
- */
+ * MEANWHILE, easycap_usb_disconnect() MAY HAVE FREED POINTER peasycap,
+ * IN WHICH CASE A REPEAT CALL TO isdongle() WILL FAIL.
+ * IF NECESSARY, BAIL OUT.
+*/
+/*---------------------------------------------------------------------------*/
+ if (kd != isdongle(peasycap))
+ return -ERESTARTSYS;
+ if (NULL == file) {
+ SAY("ERROR: file is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -ERESTARTSYS;
+ }
+ peasycap = file->private_data;
+ if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -ERESTARTSYS;
+ }
+ if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -EFAULT;
+ }
+ p = peasycap->pusb_device;
+ if (NULL == peasycap->pusb_device) {
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -ERESTARTSYS;
+ }
+} else {
+/*---------------------------------------------------------------------------*/
+/*
+ * IF easycap_usb_disconnect() HAS ALREADY FREED POINTER peasycap BEFORE THE
+ * ATTEMPT TO ACQUIRE THE SEMAPHORE, isdongle() WILL HAVE FAILED. BAIL OUT.
+*/
+/*---------------------------------------------------------------------------*/
+ return -ERESTARTSYS;
+}
/*---------------------------------------------------------------------------*/
switch (cmd) {
case VIDIOC_QUERYCAP: {
- static struct v4l2_capability v4l2_capability;
- static char version[16], *p1, *p2;
- static int i, rc, k[3];
- static long lng;
+ struct v4l2_capability v4l2_capability;
+ char version[16], *p1, *p2;
+ int i, rc, k[3];
+ long lng;
- JOT(8, "VIDIOC_QUERYCAP\n");
+ JOM(8, "VIDIOC_QUERYCAP\n");
if (16 <= strlen(EASYCAP_DRIVER_VERSION)) {
- SAY("ERROR: bad driver version string\n"); return -EINVAL;
+ SAM("ERROR: bad driver version string\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -EINVAL;
}
strcpy(&version[0], EASYCAP_DRIVER_VERSION);
for (i = 0; i < 3; i++)
@@ -826,8 +1064,9 @@ case VIDIOC_QUERYCAP: {
if (3 > i) {
rc = (int) strict_strtol(p1, 10, &lng);
if (0 != rc) {
- SAY("ERROR: %i=strict_strtol(%s,.,,)\n", \
+ SAM("ERROR: %i=strict_strtol(%s,.,,)\n", \
rc, p1);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
k[i] = (int)lng;
@@ -844,7 +1083,7 @@ case VIDIOC_QUERYCAP: {
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE;
v4l2_capability.version = KERNEL_VERSION(k[0], k[1], k[2]);
- JOT(8, "v4l2_capability.version=(%i,%i,%i)\n", k[0], k[1], k[2]);
+ JOM(8, "v4l2_capability.version=(%i,%i,%i)\n", k[0], k[1], k[2]);
strlcpy(&v4l2_capability.card[0], "EasyCAP DC60", \
sizeof(v4l2_capability.card));
@@ -853,26 +1092,26 @@ case VIDIOC_QUERYCAP: {
sizeof(v4l2_capability.bus_info)) < 0) {
strlcpy(&v4l2_capability.bus_info[0], "EasyCAP bus_info", \
sizeof(v4l2_capability.bus_info));
- JOT(8, "%s=v4l2_capability.bus_info\n", \
+ JOM(8, "%s=v4l2_capability.bus_info\n", \
&v4l2_capability.bus_info[0]);
}
if (0 != copy_to_user((void __user *)arg, &v4l2_capability, \
sizeof(struct v4l2_capability))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_ENUMINPUT: {
- static struct v4l2_input v4l2_input;
- static __u32 index;
+ struct v4l2_input v4l2_input;
+ __u32 index;
- JOT(8, "VIDIOC_ENUMINPUT\n");
+ JOM(8, "VIDIOC_ENUMINPUT\n");
if (0 != copy_from_user(&v4l2_input, (void __user *)arg, \
sizeof(struct v4l2_input))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
@@ -889,7 +1128,7 @@ case VIDIOC_ENUMINPUT: {
v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
V4L2_STD_NTSC ;
v4l2_input.status = 0;
- JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+ JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
break;
}
case 1: {
@@ -901,7 +1140,7 @@ case VIDIOC_ENUMINPUT: {
v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
V4L2_STD_NTSC ;
v4l2_input.status = 0;
- JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+ JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
break;
}
case 2: {
@@ -913,7 +1152,7 @@ case VIDIOC_ENUMINPUT: {
v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
V4L2_STD_NTSC ;
v4l2_input.status = 0;
- JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+ JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
break;
}
case 3: {
@@ -925,7 +1164,7 @@ case VIDIOC_ENUMINPUT: {
v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
V4L2_STD_NTSC ;
v4l2_input.status = 0;
- JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+ JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
break;
}
case 4: {
@@ -937,7 +1176,7 @@ case VIDIOC_ENUMINPUT: {
v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
V4L2_STD_NTSC ;
v4l2_input.status = 0;
- JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+ JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
break;
}
case 5: {
@@ -949,31 +1188,32 @@ case VIDIOC_ENUMINPUT: {
v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
V4L2_STD_NTSC ;
v4l2_input.status = 0;
- JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+ JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
break;
}
default: {
- JOT(8, "%i=index: exhausts inputs\n", index);
+ JOM(8, "%i=index: exhausts inputs\n", index);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
}
if (0 != copy_to_user((void __user *)arg, &v4l2_input, \
sizeof(struct v4l2_input))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_G_INPUT: {
- static __u32 index;
+ __u32 index;
- JOT(8, "VIDIOC_G_INPUT\n");
+ JOM(8, "VIDIOC_G_INPUT\n");
index = (__u32)peasycap->input;
- JOT(8, "user is told: %i\n", index);
+ JOM(8, "user is told: %i\n", index);
if (0 != copy_to_user((void __user *)arg, &index, sizeof(__u32))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
break;
@@ -981,79 +1221,89 @@ case VIDIOC_G_INPUT: {
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_S_INPUT:
{
- static __u32 index;
+ __u32 index;
+ int rc;
- JOT(8, "VIDIOC_S_INPUT\n");
+ JOM(8, "VIDIOC_S_INPUT\n");
if (0 != copy_from_user(&index, (void __user *)arg, sizeof(__u32))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
- JOT(8, "user requests input %i\n", index);
+ JOM(8, "user requests input %i\n", index);
if ((int)index == peasycap->input) {
- SAY("requested input already in effect\n");
+ SAM("requested input already in effect\n");
break;
}
- if ((0 > index) || (5 < index)) {
- JOT(8, "ERROR: bad requested input: %i\n", index);
+ if ((0 > index) || (INPUT_MANY <= index)) {
+ JOM(8, "ERROR: bad requested input: %i\n", index);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
- peasycap->input = (int)index;
-
- select_input(peasycap->pusb_device, peasycap->input, 9);
+ rc = newinput(peasycap, (int)index);
+ if (0 == rc) {
+ JOM(8, "newinput(.,%i) OK\n", (int)index);
+ } else {
+ SAM("ERROR: newinput(.,%i) returned %i\n", (int)index, rc);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -EFAULT;
+ }
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_ENUMAUDIO: {
- JOT(8, "VIDIOC_ENUMAUDIO\n");
+ JOM(8, "VIDIOC_ENUMAUDIO\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_ENUMAUDOUT: {
- static struct v4l2_audioout v4l2_audioout;
+ struct v4l2_audioout v4l2_audioout;
- JOT(8, "VIDIOC_ENUMAUDOUT\n");
+ JOM(8, "VIDIOC_ENUMAUDOUT\n");
if (0 != copy_from_user(&v4l2_audioout, (void __user *)arg, \
sizeof(struct v4l2_audioout))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
- if (0 != v4l2_audioout.index)
+ if (0 != v4l2_audioout.index) {
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
+ }
memset(&v4l2_audioout, 0, sizeof(struct v4l2_audioout));
v4l2_audioout.index = 0;
strcpy(&v4l2_audioout.name[0], "Soundtrack");
if (0 != copy_to_user((void __user *)arg, &v4l2_audioout, \
sizeof(struct v4l2_audioout))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_QUERYCTRL: {
- static int i1;
- static struct v4l2_queryctrl v4l2_queryctrl;
+ int i1;
+ struct v4l2_queryctrl v4l2_queryctrl;
- JOT(8, "VIDIOC_QUERYCTRL\n");
+ JOM(8, "VIDIOC_QUERYCTRL\n");
if (0 != copy_from_user(&v4l2_queryctrl, (void __user *)arg, \
sizeof(struct v4l2_queryctrl))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
i1 = 0;
while (0xFFFFFFFF != easycap_control[i1].id) {
if (easycap_control[i1].id == v4l2_queryctrl.id) {
- JOT(8, "VIDIOC_QUERYCTRL %s=easycap_control[%i]" \
+ JOM(8, "VIDIOC_QUERYCTRL %s=easycap_control[%i]" \
".name\n", &easycap_control[i1].name[0], i1);
memcpy(&v4l2_queryctrl, &easycap_control[i1], \
sizeof(struct v4l2_queryctrl));
@@ -1062,127 +1312,137 @@ case VIDIOC_QUERYCTRL: {
i1++;
}
if (0xFFFFFFFF == easycap_control[i1].id) {
- JOT(8, "%i=index: exhausts controls\n", i1);
+ JOM(8, "%i=index: exhausts controls\n", i1);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
if (0 != copy_to_user((void __user *)arg, &v4l2_queryctrl, \
sizeof(struct v4l2_queryctrl))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_QUERYMENU: {
- JOT(8, "VIDIOC_QUERYMENU unsupported\n");
+ JOM(8, "VIDIOC_QUERYMENU unsupported\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
- break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_G_CTRL: {
- static struct v4l2_control v4l2_control;
+ struct v4l2_control *pv4l2_control;
- JOT(8, "VIDIOC_G_CTRL\n");
-
- if (0 != copy_from_user(&v4l2_control, (void __user *)arg, \
+ JOM(8, "VIDIOC_G_CTRL\n");
+ pv4l2_control = kzalloc(sizeof(struct v4l2_control), GFP_KERNEL);
+ if (!pv4l2_control) {
+ SAM("ERROR: out of memory\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -ENOMEM;
+ }
+ if (0 != copy_from_user(pv4l2_control, (void __user *)arg, \
sizeof(struct v4l2_control))) {
- POUT;
+ kfree(pv4l2_control);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
- switch (v4l2_control.id) {
+ switch (pv4l2_control->id) {
case V4L2_CID_BRIGHTNESS: {
- v4l2_control.value = peasycap->brightness;
- JOT(8, "user enquires brightness: %i\n", v4l2_control.value);
+ pv4l2_control->value = peasycap->brightness;
+ JOM(8, "user enquires brightness: %i\n", pv4l2_control->value);
break;
}
case V4L2_CID_CONTRAST: {
- v4l2_control.value = peasycap->contrast;
- JOT(8, "user enquires contrast: %i\n", v4l2_control.value);
+ pv4l2_control->value = peasycap->contrast;
+ JOM(8, "user enquires contrast: %i\n", pv4l2_control->value);
break;
}
case V4L2_CID_SATURATION: {
- v4l2_control.value = peasycap->saturation;
- JOT(8, "user enquires saturation: %i\n", v4l2_control.value);
+ pv4l2_control->value = peasycap->saturation;
+ JOM(8, "user enquires saturation: %i\n", pv4l2_control->value);
break;
}
case V4L2_CID_HUE: {
- v4l2_control.value = peasycap->hue;
- JOT(8, "user enquires hue: %i\n", v4l2_control.value);
+ pv4l2_control->value = peasycap->hue;
+ JOM(8, "user enquires hue: %i\n", pv4l2_control->value);
break;
}
case V4L2_CID_AUDIO_VOLUME: {
- v4l2_control.value = peasycap->volume;
- JOT(8, "user enquires volume: %i\n", v4l2_control.value);
+ pv4l2_control->value = peasycap->volume;
+ JOM(8, "user enquires volume: %i\n", pv4l2_control->value);
break;
}
case V4L2_CID_AUDIO_MUTE: {
if (1 == peasycap->mute)
- v4l2_control.value = true;
+ pv4l2_control->value = true;
else
- v4l2_control.value = false;
- JOT(8, "user enquires mute: %i\n", v4l2_control.value);
+ pv4l2_control->value = false;
+ JOM(8, "user enquires mute: %i\n", pv4l2_control->value);
break;
}
default: {
- SAY("ERROR: unknown V4L2 control: 0x%08X=id\n", \
- v4l2_control.id);
- explain_cid(v4l2_control.id);
+ SAM("ERROR: unknown V4L2 control: 0x%08X=id\n", \
+ pv4l2_control->id);
+ kfree(pv4l2_control);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
}
- if (0 != copy_to_user((void __user *)arg, &v4l2_control, \
+ if (0 != copy_to_user((void __user *)arg, pv4l2_control, \
sizeof(struct v4l2_control))) {
- POUT;
+ kfree(pv4l2_control);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
+ kfree(pv4l2_control);
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
#if defined(VIDIOC_S_CTRL_OLD)
case VIDIOC_S_CTRL_OLD: {
- JOT(8, "VIDIOC_S_CTRL_OLD required at least for xawtv\n");
+ JOM(8, "VIDIOC_S_CTRL_OLD required at least for xawtv\n");
}
#endif /*VIDIOC_S_CTRL_OLD*/
case VIDIOC_S_CTRL:
{
- static struct v4l2_control v4l2_control;
+ struct v4l2_control v4l2_control;
- JOT(8, "VIDIOC_S_CTRL\n");
+ JOM(8, "VIDIOC_S_CTRL\n");
if (0 != copy_from_user(&v4l2_control, (void __user *)arg, \
sizeof(struct v4l2_control))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
switch (v4l2_control.id) {
case V4L2_CID_BRIGHTNESS: {
- JOT(8, "user requests brightness %i\n", v4l2_control.value);
+ JOM(8, "user requests brightness %i\n", v4l2_control.value);
if (0 != adjust_brightness(peasycap, v4l2_control.value))
;
break;
}
case V4L2_CID_CONTRAST: {
- JOT(8, "user requests contrast %i\n", v4l2_control.value);
+ JOM(8, "user requests contrast %i\n", v4l2_control.value);
if (0 != adjust_contrast(peasycap, v4l2_control.value))
;
break;
}
case V4L2_CID_SATURATION: {
- JOT(8, "user requests saturation %i\n", v4l2_control.value);
+ JOM(8, "user requests saturation %i\n", v4l2_control.value);
if (0 != adjust_saturation(peasycap, v4l2_control.value))
;
break;
}
case V4L2_CID_HUE: {
- JOT(8, "user requests hue %i\n", v4l2_control.value);
+ JOM(8, "user requests hue %i\n", v4l2_control.value);
if (0 != adjust_hue(peasycap, v4l2_control.value))
;
break;
}
case V4L2_CID_AUDIO_VOLUME: {
- JOT(8, "user requests volume %i\n", v4l2_control.value);
+ JOM(8, "user requests volume %i\n", v4l2_control.value);
if (0 != adjust_volume(peasycap, v4l2_control.value))
;
break;
@@ -1190,40 +1450,41 @@ case VIDIOC_S_CTRL:
case V4L2_CID_AUDIO_MUTE: {
int mute;
- JOT(8, "user requests mute %i\n", v4l2_control.value);
+ JOM(8, "user requests mute %i\n", v4l2_control.value);
if (true == v4l2_control.value)
mute = 1;
else
mute = 0;
if (0 != adjust_mute(peasycap, mute))
- SAY("WARNING: failed to adjust mute to %i\n", mute);
+ SAM("WARNING: failed to adjust mute to %i\n", mute);
break;
}
default: {
- SAY("ERROR: unknown V4L2 control: 0x%08X=id\n", \
+ SAM("ERROR: unknown V4L2 control: 0x%08X=id\n", \
v4l2_control.id);
- explain_cid(v4l2_control.id);
- return -EINVAL;
- }
- }
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -EINVAL;
+ }
+ }
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_S_EXT_CTRLS: {
- JOT(8, "VIDIOC_S_EXT_CTRLS unsupported\n");
+ JOM(8, "VIDIOC_S_EXT_CTRLS unsupported\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_ENUM_FMT: {
- static __u32 index;
- static struct v4l2_fmtdesc v4l2_fmtdesc;
+ __u32 index;
+ struct v4l2_fmtdesc v4l2_fmtdesc;
- JOT(8, "VIDIOC_ENUM_FMT\n");
+ JOM(8, "VIDIOC_ENUM_FMT\n");
if (0 != copy_from_user(&v4l2_fmtdesc, (void __user *)arg, \
sizeof(struct v4l2_fmtdesc))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
@@ -1238,117 +1499,327 @@ case VIDIOC_ENUM_FMT: {
v4l2_fmtdesc.flags = 0;
strcpy(&v4l2_fmtdesc.description[0], "uyvy");
v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_UYVY;
- JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+ JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
break;
}
case 1: {
v4l2_fmtdesc.flags = 0;
strcpy(&v4l2_fmtdesc.description[0], "yuy2");
v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_YUYV;
- JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+ JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
break;
}
case 2: {
v4l2_fmtdesc.flags = 0;
strcpy(&v4l2_fmtdesc.description[0], "rgb24");
v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_RGB24;
- JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+ JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
break;
}
case 3: {
v4l2_fmtdesc.flags = 0;
strcpy(&v4l2_fmtdesc.description[0], "rgb32");
v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_RGB32;
- JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+ JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
break;
}
case 4: {
v4l2_fmtdesc.flags = 0;
strcpy(&v4l2_fmtdesc.description[0], "bgr24");
v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_BGR24;
- JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+ JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
break;
}
case 5: {
v4l2_fmtdesc.flags = 0;
strcpy(&v4l2_fmtdesc.description[0], "bgr32");
v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_BGR32;
- JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+ JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
break;
}
default: {
- JOT(8, "%i=index: exhausts formats\n", index);
+ JOM(8, "%i=index: exhausts formats\n", index);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
}
if (0 != copy_to_user((void __user *)arg, &v4l2_fmtdesc, \
sizeof(struct v4l2_fmtdesc))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+/*
+ * THE RESPONSE TO VIDIOC_ENUM_FRAMESIZES MUST BE CONDITIONED ON THE
+ * THE CURRENT STANDARD, BECAUSE THAT IS WHAT gstreamer EXPECTS. BEWARE.
+*/
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_ENUM_FRAMESIZES: {
- JOT(8, "VIDIOC_ENUM_FRAMESIZES unsupported\n");
- return -EINVAL;
+ __u32 index;
+ struct v4l2_frmsizeenum v4l2_frmsizeenum;
+
+ JOM(8, "VIDIOC_ENUM_FRAMESIZES\n");
+
+ if (0 != copy_from_user(&v4l2_frmsizeenum, (void __user *)arg, \
+ sizeof(struct v4l2_frmsizeenum))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -EFAULT;
+ }
+
+ index = v4l2_frmsizeenum.index;
+
+ v4l2_frmsizeenum.type = (__u32) V4L2_FRMSIZE_TYPE_DISCRETE;
+
+ if (true == peasycap->ntsc) {
+ switch (index) {
+ case 0: {
+ v4l2_frmsizeenum.discrete.width = 640;
+ v4l2_frmsizeenum.discrete.height = 480;
+ JOM(8, "%i=index: %ix%i\n", index, \
+ (int)(v4l2_frmsizeenum.\
+ discrete.width), \
+ (int)(v4l2_frmsizeenum.\
+ discrete.height));
+ break;
+ }
+ case 1: {
+ v4l2_frmsizeenum.discrete.width = 320;
+ v4l2_frmsizeenum.discrete.height = 240;
+ JOM(8, "%i=index: %ix%i\n", index, \
+ (int)(v4l2_frmsizeenum.\
+ discrete.width), \
+ (int)(v4l2_frmsizeenum.\
+ discrete.height));
+ break;
+ }
+ case 2: {
+ v4l2_frmsizeenum.discrete.width = 720;
+ v4l2_frmsizeenum.discrete.height = 480;
+ JOM(8, "%i=index: %ix%i\n", index, \
+ (int)(v4l2_frmsizeenum.\
+ discrete.width), \
+ (int)(v4l2_frmsizeenum.\
+ discrete.height));
+ break;
+ }
+ case 3: {
+ v4l2_frmsizeenum.discrete.width = 360;
+ v4l2_frmsizeenum.discrete.height = 240;
+ JOM(8, "%i=index: %ix%i\n", index, \
+ (int)(v4l2_frmsizeenum.\
+ discrete.width), \
+ (int)(v4l2_frmsizeenum.\
+ discrete.height));
+ break;
+ }
+ default: {
+ JOM(8, "%i=index: exhausts framesizes\n", index);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -EINVAL;
+ }
+ }
+ } else {
+ switch (index) {
+ case 0: {
+ v4l2_frmsizeenum.discrete.width = 640;
+ v4l2_frmsizeenum.discrete.height = 480;
+ JOM(8, "%i=index: %ix%i\n", index, \
+ (int)(v4l2_frmsizeenum.\
+ discrete.width), \
+ (int)(v4l2_frmsizeenum.\
+ discrete.height));
+ break;
+ }
+ case 1: {
+ v4l2_frmsizeenum.discrete.width = 320;
+ v4l2_frmsizeenum.discrete.height = 240;
+ JOM(8, "%i=index: %ix%i\n", index, \
+ (int)(v4l2_frmsizeenum.\
+ discrete.width), \
+ (int)(v4l2_frmsizeenum.\
+ discrete.height));
+ break;
+ }
+ case 2: {
+ v4l2_frmsizeenum.discrete.width = 704;
+ v4l2_frmsizeenum.discrete.height = 576;
+ JOM(8, "%i=index: %ix%i\n", index, \
+ (int)(v4l2_frmsizeenum.\
+ discrete.width), \
+ (int)(v4l2_frmsizeenum.\
+ discrete.height));
+ break;
+ }
+ case 3: {
+ v4l2_frmsizeenum.discrete.width = 720;
+ v4l2_frmsizeenum.discrete.height = 576;
+ JOM(8, "%i=index: %ix%i\n", index, \
+ (int)(v4l2_frmsizeenum.\
+ discrete.width), \
+ (int)(v4l2_frmsizeenum.\
+ discrete.height));
+ break;
+ }
+ case 4: {
+ v4l2_frmsizeenum.discrete.width = 360;
+ v4l2_frmsizeenum.discrete.height = 288;
+ JOM(8, "%i=index: %ix%i\n", index, \
+ (int)(v4l2_frmsizeenum.\
+ discrete.width), \
+ (int)(v4l2_frmsizeenum.\
+ discrete.height));
+ break;
+ }
+ default: {
+ JOM(8, "%i=index: exhausts framesizes\n", index);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -EINVAL;
+ }
+ }
+ }
+ if (0 != copy_to_user((void __user *)arg, &v4l2_frmsizeenum, \
+ sizeof(struct v4l2_frmsizeenum))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -EFAULT;
+ }
+ break;
}
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+/*
+ * THE RESPONSE TO VIDIOC_ENUM_FRAMEINTERVALS MUST BE CONDITIONED ON THE
+ * THE CURRENT STANDARD, BECAUSE THAT IS WHAT gstreamer EXPECTS. BEWARE.
+*/
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_ENUM_FRAMEINTERVALS: {
- JOT(8, "VIDIOC_ENUM_FRAME_INTERVALS unsupported\n");
- return -EINVAL;
+ __u32 index;
+ int denominator;
+ struct v4l2_frmivalenum v4l2_frmivalenum;
+
+ JOM(8, "VIDIOC_ENUM_FRAMEINTERVALS\n");
+
+ if (peasycap->fps)
+ denominator = peasycap->fps;
+ else {
+ if (true == peasycap->ntsc)
+ denominator = 30;
+ else
+ denominator = 25;
+ }
+
+ if (0 != copy_from_user(&v4l2_frmivalenum, (void __user *)arg, \
+ sizeof(struct v4l2_frmivalenum))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -EFAULT;
+ }
+
+ index = v4l2_frmivalenum.index;
+
+ v4l2_frmivalenum.type = (__u32) V4L2_FRMIVAL_TYPE_DISCRETE;
+
+ switch (index) {
+ case 0: {
+ v4l2_frmivalenum.discrete.numerator = 1;
+ v4l2_frmivalenum.discrete.denominator = denominator;
+ JOM(8, "%i=index: %i/%i\n", index, \
+ (int)(v4l2_frmivalenum.discrete.numerator), \
+ (int)(v4l2_frmivalenum.discrete.denominator));
+ break;
+ }
+ case 1: {
+ v4l2_frmivalenum.discrete.numerator = 1;
+ v4l2_frmivalenum.discrete.denominator = denominator/5;
+ JOM(8, "%i=index: %i/%i\n", index, \
+ (int)(v4l2_frmivalenum.discrete.numerator), \
+ (int)(v4l2_frmivalenum.discrete.denominator));
+ break;
+ }
+ default: {
+ JOM(8, "%i=index: exhausts frameintervals\n", index);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -EINVAL;
+ }
+ }
+ if (0 != copy_to_user((void __user *)arg, &v4l2_frmivalenum, \
+ sizeof(struct v4l2_frmivalenum))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -EFAULT;
+ }
+ break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_G_FMT: {
- static struct v4l2_format v4l2_format;
- static struct v4l2_pix_format v4l2_pix_format;
-
- JOT(8, "VIDIOC_G_FMT\n");
-
- if (0 != copy_from_user(&v4l2_format, (void __user *)arg, \
+ struct v4l2_format *pv4l2_format;
+ struct v4l2_pix_format *pv4l2_pix_format;
+
+ JOM(8, "VIDIOC_G_FMT\n");
+ pv4l2_format = kzalloc(sizeof(struct v4l2_format), GFP_KERNEL);
+ if (!pv4l2_format) {
+ SAM("ERROR: out of memory\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -ENOMEM;
+ }
+ pv4l2_pix_format = kzalloc(sizeof(struct v4l2_pix_format), GFP_KERNEL);
+ if (!pv4l2_pix_format) {
+ SAM("ERROR: out of memory\n");
+ kfree(pv4l2_format);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -ENOMEM;
+ }
+ if (0 != copy_from_user(pv4l2_format, (void __user *)arg, \
sizeof(struct v4l2_format))) {
- POUT;
+ kfree(pv4l2_format);
+ kfree(pv4l2_pix_format);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
- if (v4l2_format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- POUT;
+ if (pv4l2_format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ kfree(pv4l2_format);
+ kfree(pv4l2_pix_format);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
- memset(&v4l2_pix_format, 0, sizeof(struct v4l2_pix_format));
- v4l2_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- memcpy(&(v4l2_format.fmt.pix), \
- &(easycap_format[peasycap->format_offset]\
- .v4l2_format.fmt.pix), sizeof(v4l2_pix_format));
- JOT(8, "user is told: %s\n", \
+ memset(pv4l2_pix_format, 0, sizeof(struct v4l2_pix_format));
+ pv4l2_format->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ memcpy(&pv4l2_format->fmt.pix, \
+ &easycap_format[peasycap->format_offset]\
+ .v4l2_format.fmt.pix, sizeof(struct v4l2_pix_format));
+ JOM(8, "user is told: %s\n", \
&easycap_format[peasycap->format_offset].name[0]);
- if (0 != copy_to_user((void __user *)arg, &v4l2_format, \
+ if (0 != copy_to_user((void __user *)arg, pv4l2_format, \
sizeof(struct v4l2_format))) {
- POUT;
+ kfree(pv4l2_format);
+ kfree(pv4l2_pix_format);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
+ kfree(pv4l2_format);
+ kfree(pv4l2_pix_format);
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_TRY_FMT:
case VIDIOC_S_FMT: {
- static struct v4l2_format v4l2_format;
- static struct v4l2_pix_format v4l2_pix_format;
- static bool try;
- static int best_format;
+ struct v4l2_format v4l2_format;
+ struct v4l2_pix_format v4l2_pix_format;
+ bool try;
+ int best_format;
if (VIDIOC_TRY_FMT == cmd) {
- JOT(8, "VIDIOC_TRY_FMT\n");
+ JOM(8, "VIDIOC_TRY_FMT\n");
try = true;
} else {
- JOT(8, "VIDIOC_S_FMT\n");
+ JOM(8, "VIDIOC_S_FMT\n");
try = false;
}
if (0 != copy_from_user(&v4l2_format, (void __user *)arg, \
sizeof(struct v4l2_format))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
@@ -1359,7 +1830,12 @@ case VIDIOC_S_FMT: {
v4l2_format.fmt.pix.field, \
try);
if (0 > best_format) {
- JOT(8, "WARNING: adjust_format() returned %i\n", best_format);
+ if (-EBUSY == best_format) {
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -EBUSY;
+ }
+ JOM(8, "WARNING: adjust_format() returned %i\n", best_format);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -ENOENT;
}
/*...........................................................................*/
@@ -1368,29 +1844,29 @@ case VIDIOC_S_FMT: {
memcpy(&(v4l2_format.fmt.pix), &(easycap_format[best_format]\
.v4l2_format.fmt.pix), sizeof(v4l2_pix_format));
- JOT(8, "user is told: %s\n", &easycap_format[best_format].name[0]);
+ JOM(8, "user is told: %s\n", &easycap_format[best_format].name[0]);
if (0 != copy_to_user((void __user *)arg, &v4l2_format, \
sizeof(struct v4l2_format))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_CROPCAP: {
- static struct v4l2_cropcap v4l2_cropcap;
+ struct v4l2_cropcap v4l2_cropcap;
- JOT(8, "VIDIOC_CROPCAP\n");
+ JOM(8, "VIDIOC_CROPCAP\n");
if (0 != copy_from_user(&v4l2_cropcap, (void __user *)arg, \
sizeof(struct v4l2_cropcap))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
if (v4l2_cropcap.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- JOT(8, "v4l2_cropcap.type != V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
+ JOM(8, "v4l2_cropcap.type != V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
memset(&v4l2_cropcap, 0, sizeof(struct v4l2_cropcap));
v4l2_cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -1405,11 +1881,11 @@ case VIDIOC_CROPCAP: {
v4l2_cropcap.pixelaspect.numerator = 1;
v4l2_cropcap.pixelaspect.denominator = 1;
- JOT(8, "user is told: %ix%i\n", peasycap->width, peasycap->height);
+ JOM(8, "user is told: %ix%i\n", peasycap->width, peasycap->height);
if (0 != copy_to_user((void __user *)arg, &v4l2_cropcap, \
sizeof(struct v4l2_cropcap))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
break;
@@ -1417,13 +1893,15 @@ case VIDIOC_CROPCAP: {
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_G_CROP:
case VIDIOC_S_CROP: {
- JOT(8, "VIDIOC_G_CROP|VIDIOC_S_CROP unsupported\n");
+ JOM(8, "VIDIOC_G_CROP|VIDIOC_S_CROP unsupported\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_QUERYSTD: {
- JOT(8, "VIDIOC_QUERYSTD: " \
+ JOM(8, "VIDIOC_QUERYSTD: " \
"EasyCAP is incapable of detecting standard\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
break;
}
@@ -1436,16 +1914,16 @@ case VIDIOC_QUERYSTD: {
*/
/*---------------------------------------------------------------------------*/
case VIDIOC_ENUMSTD: {
- static int last0 = -1, last1 = -1, last2 = -1, last3 = -1;
- static struct v4l2_standard v4l2_standard;
- static __u32 index;
- static struct easycap_standard const *peasycap_standard;
+ int last0 = -1, last1 = -1, last2 = -1, last3 = -1;
+ struct v4l2_standard v4l2_standard;
+ __u32 index;
+ struct easycap_standard const *peasycap_standard;
- JOT(8, "VIDIOC_ENUMSTD\n");
+ JOM(8, "VIDIOC_ENUMSTD\n");
if (0 != copy_from_user(&v4l2_standard, (void __user *)arg, \
sizeof(struct v4l2_standard))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
index = v4l2_standard.index;
@@ -1466,10 +1944,11 @@ case VIDIOC_ENUMSTD: {
peasycap_standard++;
}
if (0xFFFF == peasycap_standard->mask) {
- JOT(8, "%i=index: exhausts standards\n", index);
+ JOM(8, "%i=index: exhausts standards\n", index);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
- JOT(8, "%i=index: %s\n", index, \
+ JOM(8, "%i=index: %s\n", index, \
&(peasycap_standard->v4l2_standard.name[0]));
memcpy(&v4l2_standard, &(peasycap_standard->v4l2_standard), \
sizeof(struct v4l2_standard));
@@ -1478,87 +1957,101 @@ case VIDIOC_ENUMSTD: {
if (0 != copy_to_user((void __user *)arg, &v4l2_standard, \
sizeof(struct v4l2_standard))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_G_STD: {
- static v4l2_std_id std_id;
- static struct easycap_standard const *peasycap_standard;
+ v4l2_std_id std_id;
+ struct easycap_standard const *peasycap_standard;
+
+ JOM(8, "VIDIOC_G_STD\n");
- JOT(8, "VIDIOC_G_STD\n");
+ if (0 > peasycap->standard_offset) {
+ JOM(8, "%i=peasycap->standard_offset\n", \
+ peasycap->standard_offset);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -EBUSY;
+ }
if (0 != copy_from_user(&std_id, (void __user *)arg, \
sizeof(v4l2_std_id))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
peasycap_standard = &easycap_standard[peasycap->standard_offset];
std_id = peasycap_standard->v4l2_standard.id;
- JOT(8, "user is told: %s\n", \
+ JOM(8, "user is told: %s\n", \
&peasycap_standard->v4l2_standard.name[0]);
if (0 != copy_to_user((void __user *)arg, &std_id, \
sizeof(v4l2_std_id))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_S_STD: {
- static v4l2_std_id std_id;
- static int rc;
+ v4l2_std_id std_id;
+ int rc;
- JOT(8, "VIDIOC_S_STD\n");
+ JOM(8, "VIDIOC_S_STD\n");
if (0 != copy_from_user(&std_id, (void __user *)arg, \
sizeof(v4l2_std_id))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
+ JOM(8, "User requests standard: 0x%08X%08X\n", \
+ (int)((std_id & (((v4l2_std_id)0xFFFFFFFF) << 32)) >> 32), \
+ (int)(std_id & ((v4l2_std_id)0xFFFFFFFF)));
+
rc = adjust_standard(peasycap, std_id);
if (0 > rc) {
- JOT(8, "WARNING: adjust_standard() returned %i\n", rc);
+ JOM(8, "WARNING: adjust_standard() returned %i\n", rc);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -ENOENT;
}
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_REQBUFS: {
- static int nbuffers;
- static struct v4l2_requestbuffers v4l2_requestbuffers;
+ int nbuffers;
+ struct v4l2_requestbuffers v4l2_requestbuffers;
- JOT(8, "VIDIOC_REQBUFS\n");
+ JOM(8, "VIDIOC_REQBUFS\n");
if (0 != copy_from_user(&v4l2_requestbuffers, (void __user *)arg, \
sizeof(struct v4l2_requestbuffers))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
- if (v4l2_requestbuffers.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (v4l2_requestbuffers.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
+ }
if (v4l2_requestbuffers.memory != V4L2_MEMORY_MMAP) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
nbuffers = v4l2_requestbuffers.count;
- JOT(8, " User requests %i buffers ...\n", nbuffers);
+ JOM(8, " User requests %i buffers ...\n", nbuffers);
if (nbuffers < 2)
nbuffers = 2;
if (nbuffers > FRAME_BUFFER_MANY)
nbuffers = FRAME_BUFFER_MANY;
if (v4l2_requestbuffers.count == nbuffers) {
- JOT(8, " ... agree to %i buffers\n", \
+ JOM(8, " ... agree to %i buffers\n", \
nbuffers);
} else {
- JOT(8, " ... insist on %i buffers\n", \
+ JOM(8, " ... insist on %i buffers\n", \
nbuffers);
v4l2_requestbuffers.count = nbuffers;
}
@@ -1566,32 +2059,35 @@ case VIDIOC_REQBUFS: {
if (0 != copy_to_user((void __user *)arg, &v4l2_requestbuffers, \
sizeof(struct v4l2_requestbuffers))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_QUERYBUF: {
- static __u32 index;
- static struct v4l2_buffer v4l2_buffer;
+ __u32 index;
+ struct v4l2_buffer v4l2_buffer;
- JOT(8, "VIDIOC_QUERYBUF\n");
+ JOM(8, "VIDIOC_QUERYBUF\n");
if (peasycap->video_eof) {
- JOT(8, "returning -1 because %i=video_eof\n", \
+ JOM(8, "returning -EIO because %i=video_eof\n", \
peasycap->video_eof);
- return -1;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -EIO;
}
if (0 != copy_from_user(&v4l2_buffer, (void __user *)arg, \
sizeof(struct v4l2_buffer))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
- if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
+ }
index = v4l2_buffer.index;
if (index < 0 || index >= peasycap->frame_buffer_many)
return -EINVAL;
@@ -1602,49 +2098,55 @@ case VIDIOC_QUERYBUF: {
v4l2_buffer.flags = V4L2_BUF_FLAG_MAPPED | \
peasycap->done[index] | \
peasycap->queued[index];
- v4l2_buffer.field = peasycap->field;
+ v4l2_buffer.field = V4L2_FIELD_NONE;
v4l2_buffer.memory = V4L2_MEMORY_MMAP;
v4l2_buffer.m.offset = index * FRAME_BUFFER_SIZE;
v4l2_buffer.length = FRAME_BUFFER_SIZE;
- JOT(16, " %10i=index\n", v4l2_buffer.index);
- JOT(16, " 0x%08X=type\n", v4l2_buffer.type);
- JOT(16, " %10i=bytesused\n", v4l2_buffer.bytesused);
- JOT(16, " 0x%08X=flags\n", v4l2_buffer.flags);
- JOT(16, " %10i=field\n", v4l2_buffer.field);
- JOT(16, " %10li=timestamp.tv_usec\n", \
+ JOM(16, " %10i=index\n", v4l2_buffer.index);
+ JOM(16, " 0x%08X=type\n", v4l2_buffer.type);
+ JOM(16, " %10i=bytesused\n", v4l2_buffer.bytesused);
+ JOM(16, " 0x%08X=flags\n", v4l2_buffer.flags);
+ JOM(16, " %10i=field\n", v4l2_buffer.field);
+ JOM(16, " %10li=timestamp.tv_usec\n", \
(long)v4l2_buffer.timestamp.tv_usec);
- JOT(16, " %10i=sequence\n", v4l2_buffer.sequence);
- JOT(16, " 0x%08X=memory\n", v4l2_buffer.memory);
- JOT(16, " %10i=m.offset\n", v4l2_buffer.m.offset);
- JOT(16, " %10i=length\n", v4l2_buffer.length);
+ JOM(16, " %10i=sequence\n", v4l2_buffer.sequence);
+ JOM(16, " 0x%08X=memory\n", v4l2_buffer.memory);
+ JOM(16, " %10i=m.offset\n", v4l2_buffer.m.offset);
+ JOM(16, " %10i=length\n", v4l2_buffer.length);
if (0 != copy_to_user((void __user *)arg, &v4l2_buffer, \
sizeof(struct v4l2_buffer))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_QBUF: {
- static struct v4l2_buffer v4l2_buffer;
+ struct v4l2_buffer v4l2_buffer;
- JOT(8, "VIDIOC_QBUF\n");
+ JOM(8, "VIDIOC_QBUF\n");
if (0 != copy_from_user(&v4l2_buffer, (void __user *)arg, \
sizeof(struct v4l2_buffer))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
- if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
- if (v4l2_buffer.memory != V4L2_MEMORY_MMAP)
+ }
+ if (v4l2_buffer.memory != V4L2_MEMORY_MMAP) {
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
+ }
if (v4l2_buffer.index < 0 || \
- (v4l2_buffer.index >= peasycap->frame_buffer_many))
+ (v4l2_buffer.index >= peasycap->frame_buffer_many)) {
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
+ }
v4l2_buffer.flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED;
peasycap->done[v4l2_buffer.index] = 0;
@@ -1652,11 +2154,11 @@ case VIDIOC_QBUF: {
if (0 != copy_to_user((void __user *)arg, &v4l2_buffer, \
sizeof(struct v4l2_buffer))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
- JOT(8, "..... user queueing frame buffer %i\n", \
+ JOM(8, "..... user queueing frame buffer %i\n", \
(int)v4l2_buffer.index);
peasycap->frame_lock = 0;
@@ -1667,36 +2169,60 @@ case VIDIOC_QBUF: {
case VIDIOC_DQBUF:
{
#if defined(AUDIOTIME)
- static struct signed_div_result sdr;
- static long long int above, below, dnbydt, fudge, sll;
- static unsigned long long int ull;
- static struct timeval timeval0;
+ struct signed_div_result sdr;
+ long long int above, below, dnbydt, fudge, sll;
+ unsigned long long int ull;
+ struct timeval timeval8;
struct timeval timeval1;
#endif /*AUDIOTIME*/
- static struct timeval timeval, timeval2;
- static int i, j;
- static struct v4l2_buffer v4l2_buffer;
+ struct timeval timeval, timeval2;
+ int i, j;
+ struct v4l2_buffer v4l2_buffer;
+ int rcdq;
+ __u16 input;
- JOT(8, "VIDIOC_DQBUF\n");
+ JOM(8, "VIDIOC_DQBUF\n");
if ((peasycap->video_idle) || (peasycap->video_eof)) {
- JOT(8, "returning -EIO because " \
+ JOM(8, "returning -EIO because " \
"%i=video_idle %i=video_eof\n", \
peasycap->video_idle, peasycap->video_eof);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EIO;
}
if (0 != copy_from_user(&v4l2_buffer, (void __user *)arg, \
sizeof(struct v4l2_buffer))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
- if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
+ }
+
+ if (true == peasycap->offerfields) {
+ /*-----------------------------------------------------------*/
+ /*
+ * IN ITS 50 "fps" MODE tvtime SEEMS ALWAYS TO REQUEST
+ * V4L2_FIELD_BOTTOM
+ */
+ /*-----------------------------------------------------------*/
+ if (V4L2_FIELD_TOP == v4l2_buffer.field)
+ JOM(8, "user wants V4L2_FIELD_TOP\n");
+ else if (V4L2_FIELD_BOTTOM == v4l2_buffer.field)
+ JOM(8, "user wants V4L2_FIELD_BOTTOM\n");
+ else if (V4L2_FIELD_ANY == v4l2_buffer.field)
+ JOM(8, "user wants V4L2_FIELD_ANY\n");
+ else
+ JOM(8, "user wants V4L2_FIELD_...UNKNOWN: %i\n", \
+ v4l2_buffer.field);
+ }
if (!peasycap->video_isoc_streaming) {
- JOT(16, "returning -EIO because video urbs not streaming\n");
+ JOM(16, "returning -EIO because video urbs not streaming\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EIO;
}
/*---------------------------------------------------------------------------*/
@@ -1708,19 +2234,28 @@ case VIDIOC_DQBUF:
/*---------------------------------------------------------------------------*/
if (!peasycap->polled) {
- if (-EIO == easycap_dqbuf(peasycap, 0))
- return -EIO;
+ do {
+ rcdq = easycap_dqbuf(peasycap, 0);
+ if (-EIO == rcdq) {
+ JOM(8, "returning -EIO because " \
+ "dqbuf() returned -EIO\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -EIO;
+ }
+ } while (0 != rcdq);
} else {
- if (peasycap->video_eof)
+ if (peasycap->video_eof) {
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EIO;
+ }
}
if (V4L2_BUF_FLAG_DONE != peasycap->done[peasycap->frame_read]) {
- SAY("ERROR: V4L2_BUF_FLAG_DONE != 0x%08X\n", \
+ SAM("ERROR: V4L2_BUF_FLAG_DONE != 0x%08X\n", \
peasycap->done[peasycap->frame_read]);
}
peasycap->polled = 0;
- if (!(isequence % 10)) {
+ if (!(peasycap->isequence % 10)) {
for (i = 0; i < 179; i++)
peasycap->merit[i] = peasycap->merit[i+1];
peasycap->merit[179] = merit_saa(peasycap->pusb_device);
@@ -1728,7 +2263,7 @@ case VIDIOC_DQBUF:
for (i = 0; i < 180; i++)
j += peasycap->merit[i];
if (90 < j) {
- SAY("easycap driver shutting down " \
+ SAM("easycap driver shutting down " \
"on condition blue\n");
peasycap->video_eof = 1; peasycap->audio_eof = 1;
}
@@ -1738,31 +2273,23 @@ case VIDIOC_DQBUF:
v4l2_buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
v4l2_buffer.bytesused = peasycap->frame_buffer_used;
v4l2_buffer.flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE;
- v4l2_buffer.field = peasycap->field;
- if (V4L2_FIELD_ALTERNATE == v4l2_buffer.field)
- v4l2_buffer.field = \
- 0x000F & (peasycap->\
- frame_buffer[peasycap->frame_read][0].kount);
+ if (true == peasycap->offerfields)
+ v4l2_buffer.field = V4L2_FIELD_BOTTOM;
+ else
+ v4l2_buffer.field = V4L2_FIELD_NONE;
do_gettimeofday(&timeval);
timeval2 = timeval;
#if defined(AUDIOTIME)
if (!peasycap->timeval0.tv_sec) {
- timeval0 = timeval;
+ timeval8 = timeval;
timeval1 = timeval;
timeval2 = timeval;
dnbydt = 192000;
-
- if (mutex_lock_interruptible(&(peasycap->mutex_timeval0)))
- return -ERESTARTSYS;
- peasycap->timeval0 = timeval0;
- mutex_unlock(&(peasycap->mutex_timeval0));
+ peasycap->timeval0 = timeval8;
} else {
- if (mutex_lock_interruptible(&(peasycap->mutex_timeval1)))
- return -ERESTARTSYS;
dnbydt = peasycap->dnbydt;
timeval1 = peasycap->timeval1;
- mutex_unlock(&(peasycap->mutex_timeval1));
above = dnbydt * MICROSECONDS(timeval, timeval1);
below = 192000;
sdr = signed_div(above, below);
@@ -1774,72 +2301,76 @@ case VIDIOC_DQBUF:
timeval2.tv_usec = sdr.remainder;
timeval2.tv_sec = timeval1.tv_sec + sdr.quotient;
}
- if (!(isequence % 500)) {
+ if (!(peasycap->isequence % 500)) {
fudge = ((long long int)(1000000)) * \
((long long int)(timeval.tv_sec - \
timeval2.tv_sec)) + \
(long long int)(timeval.tv_usec - \
- timeval2.tv_usec);
+ timeval2.tv_usec);
sdr = signed_div(fudge, 1000);
sll = sdr.quotient;
ull = sdr.remainder;
- SAY("%5lli.%-3lli=ms timestamp fudge\n", sll, ull);
+ SAM("%5lli.%-3lli=ms timestamp fudge\n", sll, ull);
}
#endif /*AUDIOTIME*/
v4l2_buffer.timestamp = timeval2;
- v4l2_buffer.sequence = isequence++;
+ v4l2_buffer.sequence = peasycap->isequence++;
v4l2_buffer.memory = V4L2_MEMORY_MMAP;
v4l2_buffer.m.offset = v4l2_buffer.index * FRAME_BUFFER_SIZE;
v4l2_buffer.length = FRAME_BUFFER_SIZE;
- JOT(16, " %10i=index\n", v4l2_buffer.index);
- JOT(16, " 0x%08X=type\n", v4l2_buffer.type);
- JOT(16, " %10i=bytesused\n", v4l2_buffer.bytesused);
- JOT(16, " 0x%08X=flags\n", v4l2_buffer.flags);
- JOT(16, " %10i=field\n", v4l2_buffer.field);
- JOT(16, " %10li=timestamp.tv_usec\n", \
+ JOM(16, " %10i=index\n", v4l2_buffer.index);
+ JOM(16, " 0x%08X=type\n", v4l2_buffer.type);
+ JOM(16, " %10i=bytesused\n", v4l2_buffer.bytesused);
+ JOM(16, " 0x%08X=flags\n", v4l2_buffer.flags);
+ JOM(16, " %10i=field\n", v4l2_buffer.field);
+ JOM(16, " %10li=timestamp.tv_sec\n", \
+ (long)v4l2_buffer.timestamp.tv_sec);
+ JOM(16, " %10li=timestamp.tv_usec\n", \
(long)v4l2_buffer.timestamp.tv_usec);
- JOT(16, " %10i=sequence\n", v4l2_buffer.sequence);
- JOT(16, " 0x%08X=memory\n", v4l2_buffer.memory);
- JOT(16, " %10i=m.offset\n", v4l2_buffer.m.offset);
- JOT(16, " %10i=length\n", v4l2_buffer.length);
+ JOM(16, " %10i=sequence\n", v4l2_buffer.sequence);
+ JOM(16, " 0x%08X=memory\n", v4l2_buffer.memory);
+ JOM(16, " %10i=m.offset\n", v4l2_buffer.m.offset);
+ JOM(16, " %10i=length\n", v4l2_buffer.length);
if (0 != copy_to_user((void __user *)arg, &v4l2_buffer, \
sizeof(struct v4l2_buffer))) {
- POUT;
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
- JOT(8, "..... user is offered frame buffer %i\n", \
+ input = peasycap->frame_buffer[peasycap->frame_read][0].input;
+ if (0x08 & input) {
+ JOM(8, "user is offered frame buffer %i, input %i\n", \
+ peasycap->frame_read, (0x07 & input));
+ } else {
+ JOM(8, "user is offered frame buffer %i\n", \
peasycap->frame_read);
+ }
peasycap->frame_lock = 1;
+ JOM(8, "%i=peasycap->frame_fill\n", peasycap->frame_fill);
if (peasycap->frame_read == peasycap->frame_fill) {
if (peasycap->frame_lock) {
- JOT(8, "ERROR: filling frame buffer " \
+ JOM(8, "WORRY: filling frame buffer " \
"while offered to user\n");
}
}
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
-/*---------------------------------------------------------------------------*/
-/*
- * AUDIO URBS HAVE ALREADY BEEN SUBMITTED WHEN THIS COMMAND IS RECEIVED;
- * VIDEO URBS HAVE NOT.
- */
-/*---------------------------------------------------------------------------*/
case VIDIOC_STREAMON: {
- static int i;
+ int i;
- JOT(8, "VIDIOC_STREAMON\n");
+ JOM(8, "VIDIOC_STREAMON\n");
- isequence = 0;
+ peasycap->isequence = 0;
for (i = 0; i < 180; i++)
peasycap->merit[i] = 0;
if ((struct usb_device *)NULL == peasycap->pusb_device) {
- SAY("ERROR: peasycap->pusb_device is NULL\n");
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
submit_video_urbs(peasycap);
@@ -1851,10 +2382,11 @@ case VIDIOC_STREAMON: {
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_STREAMOFF: {
- JOT(8, "VIDIOC_STREAMOFF\n");
+ JOM(8, "VIDIOC_STREAMOFF\n");
if ((struct usb_device *)NULL == peasycap->pusb_device) {
- SAY("ERROR: peasycap->pusb_device is NULL\n");
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
@@ -1866,7 +2398,7 @@ case VIDIOC_STREAMOFF: {
* THE USERSPACE PROGRAM, E.G. mplayer, MAY HANG ON EXIT. BEWARE.
*/
/*---------------------------------------------------------------------------*/
- JOT(8, "calling wake_up on wq_video and wq_audio\n");
+ JOM(8, "calling wake_up on wq_video and wq_audio\n");
wake_up_interruptible(&(peasycap->wq_video));
wake_up_interruptible(&(peasycap->wq_audio));
/*---------------------------------------------------------------------------*/
@@ -1874,111 +2406,200 @@ case VIDIOC_STREAMOFF: {
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_G_PARM: {
- static struct v4l2_streamparm v4l2_streamparm;
+ struct v4l2_streamparm *pv4l2_streamparm;
- JOT(8, "VIDIOC_G_PARM\n");
-
- if (0 != copy_from_user(&v4l2_streamparm, (void __user *)arg, \
+ JOM(8, "VIDIOC_G_PARM\n");
+ pv4l2_streamparm = kzalloc(sizeof(struct v4l2_streamparm), GFP_KERNEL);
+ if (!pv4l2_streamparm) {
+ SAM("ERROR: out of memory\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -ENOMEM;
+ }
+ if (0 != copy_from_user(pv4l2_streamparm, (void __user *)arg, \
sizeof(struct v4l2_streamparm))) {
- POUT;
+ kfree(pv4l2_streamparm);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
- if (v4l2_streamparm.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- POUT;
+ if (pv4l2_streamparm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ kfree(pv4l2_streamparm);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
- v4l2_streamparm.parm.capture.capability = 0;
- v4l2_streamparm.parm.capture.capturemode = 0;
- v4l2_streamparm.parm.capture.timeperframe.numerator = 1;
- v4l2_streamparm.parm.capture.timeperframe.denominator = 30;
- v4l2_streamparm.parm.capture.readbuffers = peasycap->frame_buffer_many;
- v4l2_streamparm.parm.capture.extendedmode = 0;
- if (0 != copy_to_user((void __user *)arg, &v4l2_streamparm, \
+ pv4l2_streamparm->parm.capture.capability = 0;
+ pv4l2_streamparm->parm.capture.capturemode = 0;
+ pv4l2_streamparm->parm.capture.timeperframe.numerator = 1;
+
+ if (peasycap->fps) {
+ pv4l2_streamparm->parm.capture.timeperframe.\
+ denominator = peasycap->fps;
+ } else {
+ if (true == peasycap->ntsc) {
+ pv4l2_streamparm->parm.capture.timeperframe.\
+ denominator = 30;
+ } else {
+ pv4l2_streamparm->parm.capture.timeperframe.\
+ denominator = 25;
+ }
+ }
+
+ pv4l2_streamparm->parm.capture.readbuffers = \
+ peasycap->frame_buffer_many;
+ pv4l2_streamparm->parm.capture.extendedmode = 0;
+ if (0 != copy_to_user((void __user *)arg, pv4l2_streamparm, \
sizeof(struct v4l2_streamparm))) {
- POUT;
+ kfree(pv4l2_streamparm);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EFAULT;
}
+ kfree(pv4l2_streamparm);
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_S_PARM: {
- JOT(8, "VIDIOC_S_PARM unsupported\n");
+ JOM(8, "VIDIOC_S_PARM unsupported\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_G_AUDIO: {
- JOT(8, "VIDIOC_G_AUDIO unsupported\n");
+ JOM(8, "VIDIOC_G_AUDIO unsupported\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_S_AUDIO: {
- JOT(8, "VIDIOC_S_AUDIO unsupported\n");
+ JOM(8, "VIDIOC_S_AUDIO unsupported\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_S_TUNER: {
- JOT(8, "VIDIOC_S_TUNER unsupported\n");
+ JOM(8, "VIDIOC_S_TUNER unsupported\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_G_FBUF:
case VIDIOC_S_FBUF:
case VIDIOC_OVERLAY: {
- JOT(8, "VIDIOC_G_FBUF|VIDIOC_S_FBUF|VIDIOC_OVERLAY unsupported\n");
+ JOM(8, "VIDIOC_G_FBUF|VIDIOC_S_FBUF|VIDIOC_OVERLAY unsupported\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
case VIDIOC_G_TUNER: {
- JOT(8, "VIDIOC_G_TUNER unsupported\n");
+ JOM(8, "VIDIOC_G_TUNER unsupported\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
case VIDIOC_G_FREQUENCY:
case VIDIOC_S_FREQUENCY: {
- JOT(8, "VIDIOC_G_FREQUENCY|VIDIOC_S_FREQUENCY unsupported\n");
+ JOM(8, "VIDIOC_G_FREQUENCY|VIDIOC_S_FREQUENCY unsupported\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -EINVAL;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
default: {
- JOT(8, "ERROR: unrecognized V4L2 IOCTL command: 0x%08X\n", cmd);
- explain_ioctl(cmd);
- POUT;
+ JOM(8, "ERROR: unrecognized V4L2 IOCTL command: 0x%08X\n", cmd);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
return -ENOIOCTLCMD;
}
}
+mutex_unlock(&easycap_dongle[kd].mutex_video);
+JOM(4, "unlocked easycap_dongle[%i].mutex_video\n", kd);
return 0;
}
-
-long easycap_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct inode *inode = file->f_dentry->d_inode;
- long ret;
-
- lock_kernel();
- ret = easycap_ioctl_bkl(inode, file, cmd, arg);
- unlock_kernel();
-
- return ret;
-}
-
-/*--------------------------------------------------------------------------*/
-static int easysnd_ioctl_bkl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+/*****************************************************************************/
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if ((defined(EASYCAP_IS_VIDEODEV_CLIENT)) || \
+ (defined(EASYCAP_NEEDS_UNLOCKED_IOCTL)))
+long
+easysnd_ioctl_noinode(struct file *file, unsigned int cmd, unsigned long arg) {
+ return (long)easysnd_ioctl((struct inode *)NULL, file, cmd, arg);
+}
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT||EASYCAP_NEEDS_UNLOCKED_IOCTL*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+/*---------------------------------------------------------------------------*/
+int
+easysnd_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
{
struct easycap *peasycap;
struct usb_device *p;
+int kd;
+if (NULL == file) {
+ SAY("ERROR: file is NULL\n");
+ return -ERESTARTSYS;
+}
peasycap = file->private_data;
if (NULL == peasycap) {
SAY("ERROR: peasycap is NULL.\n");
- return -1;
+ return -EFAULT;
+}
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap\n");
+ return -EFAULT;
}
p = peasycap->pusb_device;
+if (NULL == p) {
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+}
+kd = isdongle(peasycap);
+if (0 <= kd && DONGLE_MANY > kd) {
+ if (mutex_lock_interruptible(&easycap_dongle[kd].mutex_audio)) {
+ SAY("ERROR: cannot lock easycap_dongle[%i].mutex_audio\n", kd);
+ return -ERESTARTSYS;
+ }
+ JOM(4, "locked easycap_dongle[%i].mutex_audio\n", kd);
+/*---------------------------------------------------------------------------*/
+/*
+ * MEANWHILE, easycap_usb_disconnect() MAY HAVE FREED POINTER peasycap,
+ * IN WHICH CASE A REPEAT CALL TO isdongle() WILL FAIL.
+ * IF NECESSARY, BAIL OUT.
+*/
+/*---------------------------------------------------------------------------*/
+ if (kd != isdongle(peasycap))
+ return -ERESTARTSYS;
+ if (NULL == file) {
+ SAY("ERROR: file is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
+ return -ERESTARTSYS;
+ }
+ peasycap = file->private_data;
+ if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
+ return -ERESTARTSYS;
+ }
+ if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
+ return -EFAULT;
+ }
+ p = peasycap->pusb_device;
+ if (NULL == peasycap->pusb_device) {
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
+ return -ERESTARTSYS;
+ }
+} else {
+/*---------------------------------------------------------------------------*/
+/*
+ * IF easycap_usb_disconnect() HAS ALREADY FREED POINTER peasycap BEFORE THE
+ * ATTEMPT TO ACQUIRE THE SEMAPHORE, isdongle() WILL HAVE FAILED. BAIL OUT.
+*/
+/*---------------------------------------------------------------------------*/
+ return -ERESTARTSYS;
+}
/*---------------------------------------------------------------------------*/
switch (cmd) {
case SNDCTL_DSP_GETCAPS: {
int caps;
- JOT(8, "SNDCTL_DSP_GETCAPS\n");
+ JOM(8, "SNDCTL_DSP_GETCAPS\n");
#if defined(UPSAMPLE)
if (true == peasycap->microphone)
@@ -1992,13 +2613,15 @@ case SNDCTL_DSP_GETCAPS: {
caps = 0x04400000;
#endif /*UPSAMPLE*/
- if (0 != copy_to_user((void __user *)arg, &caps, sizeof(int)))
+ if (0 != copy_to_user((void __user *)arg, &caps, sizeof(int))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
+ }
break;
}
case SNDCTL_DSP_GETFMTS: {
int incoming;
- JOT(8, "SNDCTL_DSP_GETFMTS\n");
+ JOM(8, "SNDCTL_DSP_GETFMTS\n");
#if defined(UPSAMPLE)
if (true == peasycap->microphone)
@@ -2012,16 +2635,20 @@ case SNDCTL_DSP_GETFMTS: {
incoming = AFMT_S16_LE;
#endif /*UPSAMPLE*/
- if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int)))
+ if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
+ }
break;
}
case SNDCTL_DSP_SETFMT: {
int incoming, outgoing;
- JOT(8, "SNDCTL_DSP_SETFMT\n");
- if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+ JOM(8, "SNDCTL_DSP_SETFMT\n");
+ if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
- JOT(8, "........... %i=incoming\n", incoming);
+ }
+ JOM(8, "........... %i=incoming\n", incoming);
#if defined(UPSAMPLE)
if (true == peasycap->microphone)
@@ -2036,22 +2663,27 @@ case SNDCTL_DSP_SETFMT: {
#endif /*UPSAMPLE*/
if (incoming != outgoing) {
- JOT(8, "........... %i=outgoing\n", outgoing);
- JOT(8, " cf. %i=AFMT_S16_LE\n", AFMT_S16_LE);
- JOT(8, " cf. %i=AFMT_U8\n", AFMT_U8);
+ JOM(8, "........... %i=outgoing\n", outgoing);
+ JOM(8, " cf. %i=AFMT_S16_LE\n", AFMT_S16_LE);
+ JOM(8, " cf. %i=AFMT_U8\n", AFMT_U8);
if (0 != copy_to_user((void __user *)arg, &outgoing, \
- sizeof(int)))
+ sizeof(int))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
+ }
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EINVAL ;
}
break;
}
case SNDCTL_DSP_STEREO: {
int incoming;
- JOT(8, "SNDCTL_DSP_STEREO\n");
- if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+ JOM(8, "SNDCTL_DSP_STEREO\n");
+ if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
- JOT(8, "........... %i=incoming\n", incoming);
+ }
+ JOM(8, "........... %i=incoming\n", incoming);
#if defined(UPSAMPLE)
if (true == peasycap->microphone)
@@ -2065,16 +2697,20 @@ case SNDCTL_DSP_STEREO: {
incoming = 1;
#endif /*UPSAMPLE*/
- if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int)))
+ if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
+ }
break;
}
case SNDCTL_DSP_SPEED: {
int incoming;
- JOT(8, "SNDCTL_DSP_SPEED\n");
- if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+ JOM(8, "SNDCTL_DSP_SPEED\n");
+ if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
- JOT(8, "........... %i=incoming\n", incoming);
+ }
+ JOM(8, "........... %i=incoming\n", incoming);
#if defined(UPSAMPLE)
if (true == peasycap->microphone)
@@ -2088,29 +2724,37 @@ case SNDCTL_DSP_SPEED: {
incoming = 48000;
#endif /*UPSAMPLE*/
- if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int)))
+ if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
+ }
break;
}
case SNDCTL_DSP_GETTRIGGER: {
int incoming;
- JOT(8, "SNDCTL_DSP_GETTRIGGER\n");
- if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+ JOM(8, "SNDCTL_DSP_GETTRIGGER\n");
+ if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
- JOT(8, "........... %i=incoming\n", incoming);
+ }
+ JOM(8, "........... %i=incoming\n", incoming);
incoming = PCM_ENABLE_INPUT;
- if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int)))
+ if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
+ }
break;
}
case SNDCTL_DSP_SETTRIGGER: {
int incoming;
- JOT(8, "SNDCTL_DSP_SETTRIGGER\n");
- if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+ JOM(8, "SNDCTL_DSP_SETTRIGGER\n");
+ if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
- JOT(8, "........... %i=incoming\n", incoming);
- JOT(8, "........... cf 0x%x=PCM_ENABLE_INPUT " \
+ }
+ JOM(8, "........... %i=incoming\n", incoming);
+ JOM(8, "........... cf 0x%x=PCM_ENABLE_INPUT " \
"0x%x=PCM_ENABLE_OUTPUT\n", \
PCM_ENABLE_INPUT, PCM_ENABLE_OUTPUT);
;
@@ -2121,19 +2765,23 @@ case SNDCTL_DSP_SETTRIGGER: {
}
case SNDCTL_DSP_GETBLKSIZE: {
int incoming;
- JOT(8, "SNDCTL_DSP_GETBLKSIZE\n");
- if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+ JOM(8, "SNDCTL_DSP_GETBLKSIZE\n");
+ if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
- JOT(8, "........... %i=incoming\n", incoming);
+ }
+ JOM(8, "........... %i=incoming\n", incoming);
incoming = peasycap->audio_bytes_per_fragment;
- if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int)))
+ if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
+ }
break;
}
case SNDCTL_DSP_GETISPACE: {
struct audio_buf_info audio_buf_info;
- JOT(8, "SNDCTL_DSP_GETISPACE\n");
+ JOM(8, "SNDCTL_DSP_GETISPACE\n");
audio_buf_info.bytes = peasycap->audio_bytes_per_fragment;
audio_buf_info.fragments = 1;
@@ -2141,555 +2789,31 @@ case SNDCTL_DSP_GETISPACE: {
audio_buf_info.fragstotal = 0;
if (0 != copy_to_user((void __user *)arg, &audio_buf_info, \
- sizeof(int)))
+ sizeof(int))) {
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
+ }
break;
}
+case 0x00005401:
+case 0x00005402:
+case 0x00005403:
+case 0x00005404:
+case 0x00005405:
+case 0x00005406: {
+ JOM(8, "SNDCTL_TMR_...: 0x%08X unsupported\n", cmd);
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
+ return -ENOIOCTLCMD;
+}
default: {
- JOT(8, "ERROR: unrecognized DSP IOCTL command: 0x%08X\n", cmd);
- POUT;
+ JOM(8, "ERROR: unrecognized DSP IOCTL command: 0x%08X\n", cmd);
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -ENOIOCTLCMD;
}
}
+mutex_unlock(&easycap_dongle[kd].mutex_audio);
return 0;
}
+/*****************************************************************************/
-long easysnd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct inode *inode = file->f_dentry->d_inode;
- long ret;
-
- lock_kernel();
- ret = easysnd_ioctl_bkl(inode, file, cmd, arg);
- unlock_kernel();
-
- return ret;
-}
-/*****************************************************************************/
-int explain_ioctl(__u32 wot)
-{
-int k;
-/*---------------------------------------------------------------------------*/
-/*
- * THE DATA FOR THE ARRAY mess BELOW WERE CONSTRUCTED BY RUNNING THE FOLLOWING
- * SHELL SCRIPT:
- * #
- * cat /usr/src/linux-headers-`uname -r`/include/linux/videodev2.h | \
- * grep "^#define VIDIOC_" - | grep -v "_OLD" - | \
- * sed -e "s,_IO.*$,,;p" | sed -e "N;s,\n,, " | \
- * sed -e "s/^#define / {/;s/#define /, \"/;s/$/\"},/" | \
- * sed -e "s, ,,g;s, ,,g" >ioctl.tmp
- * echo "{0xFFFFFFFF,\"\"}" >>ioctl.tmp
- * exit 0
- * #
- * AND REINSTATING THE EXCISED "_OLD" CASES WERE LATER MANUALLY.
- *
- * THE DATA FOR THE ARRAY mess1 BELOW WERE CONSTRUCTED BY RUNNING THE FOLLOWING
- * SHELL SCRIPT:
- * cat /usr/src/linux-headers-`uname -r`/include/linux/videodev.h | \
- * grep "^#define VIDIOC" - | grep -v "_OLD" - | \
- * sed -e "s,_IO.*$,,;p" | sed -e "N;s,\n,, " | \
- * sed -e "s/^#define / {/;s/#define /, \"/;s/$/\"},/" | \
- * sed -e "s, ,,g;s, ,,g" >ioctl.tmp
- * echo "{0xFFFFFFFF,\"\"}" >>ioctl.tmp
- * exit 0
- * #
- */
-/*---------------------------------------------------------------------------*/
-static struct mess {
- __u32 command;
- char name[64];
-} mess[] = {
-#if defined(VIDIOC_QUERYCAP)
-{VIDIOC_QUERYCAP, "VIDIOC_QUERYCAP"},
-#endif
-#if defined(VIDIOC_RESERVED)
-{VIDIOC_RESERVED, "VIDIOC_RESERVED"},
-#endif
-#if defined(VIDIOC_ENUM_FMT)
-{VIDIOC_ENUM_FMT, "VIDIOC_ENUM_FMT"},
-#endif
-#if defined(VIDIOC_G_FMT)
-{VIDIOC_G_FMT, "VIDIOC_G_FMT"},
-#endif
-#if defined(VIDIOC_S_FMT)
-{VIDIOC_S_FMT, "VIDIOC_S_FMT"},
-#endif
-#if defined(VIDIOC_REQBUFS)
-{VIDIOC_REQBUFS, "VIDIOC_REQBUFS"},
-#endif
-#if defined(VIDIOC_QUERYBUF)
-{VIDIOC_QUERYBUF, "VIDIOC_QUERYBUF"},
-#endif
-#if defined(VIDIOC_G_FBUF)
-{VIDIOC_G_FBUF, "VIDIOC_G_FBUF"},
-#endif
-#if defined(VIDIOC_S_FBUF)
-{VIDIOC_S_FBUF, "VIDIOC_S_FBUF"},
-#endif
-#if defined(VIDIOC_OVERLAY)
-{VIDIOC_OVERLAY, "VIDIOC_OVERLAY"},
-#endif
-#if defined(VIDIOC_QBUF)
-{VIDIOC_QBUF, "VIDIOC_QBUF"},
-#endif
-#if defined(VIDIOC_DQBUF)
-{VIDIOC_DQBUF, "VIDIOC_DQBUF"},
-#endif
-#if defined(VIDIOC_STREAMON)
-{VIDIOC_STREAMON, "VIDIOC_STREAMON"},
-#endif
-#if defined(VIDIOC_STREAMOFF)
-{VIDIOC_STREAMOFF, "VIDIOC_STREAMOFF"},
-#endif
-#if defined(VIDIOC_G_PARM)
-{VIDIOC_G_PARM, "VIDIOC_G_PARM"},
-#endif
-#if defined(VIDIOC_S_PARM)
-{VIDIOC_S_PARM, "VIDIOC_S_PARM"},
-#endif
-#if defined(VIDIOC_G_STD)
-{VIDIOC_G_STD, "VIDIOC_G_STD"},
-#endif
-#if defined(VIDIOC_S_STD)
-{VIDIOC_S_STD, "VIDIOC_S_STD"},
-#endif
-#if defined(VIDIOC_ENUMSTD)
-{VIDIOC_ENUMSTD, "VIDIOC_ENUMSTD"},
-#endif
-#if defined(VIDIOC_ENUMINPUT)
-{VIDIOC_ENUMINPUT, "VIDIOC_ENUMINPUT"},
-#endif
-#if defined(VIDIOC_G_CTRL)
-{VIDIOC_G_CTRL, "VIDIOC_G_CTRL"},
-#endif
-#if defined(VIDIOC_S_CTRL)
-{VIDIOC_S_CTRL, "VIDIOC_S_CTRL"},
-#endif
-#if defined(VIDIOC_G_TUNER)
-{VIDIOC_G_TUNER, "VIDIOC_G_TUNER"},
-#endif
-#if defined(VIDIOC_S_TUNER)
-{VIDIOC_S_TUNER, "VIDIOC_S_TUNER"},
-#endif
-#if defined(VIDIOC_G_AUDIO)
-{VIDIOC_G_AUDIO, "VIDIOC_G_AUDIO"},
-#endif
-#if defined(VIDIOC_S_AUDIO)
-{VIDIOC_S_AUDIO, "VIDIOC_S_AUDIO"},
-#endif
-#if defined(VIDIOC_QUERYCTRL)
-{VIDIOC_QUERYCTRL, "VIDIOC_QUERYCTRL"},
-#endif
-#if defined(VIDIOC_QUERYMENU)
-{VIDIOC_QUERYMENU, "VIDIOC_QUERYMENU"},
-#endif
-#if defined(VIDIOC_G_INPUT)
-{VIDIOC_G_INPUT, "VIDIOC_G_INPUT"},
-#endif
-#if defined(VIDIOC_S_INPUT)
-{VIDIOC_S_INPUT, "VIDIOC_S_INPUT"},
-#endif
-#if defined(VIDIOC_G_OUTPUT)
-{VIDIOC_G_OUTPUT, "VIDIOC_G_OUTPUT"},
-#endif
-#if defined(VIDIOC_S_OUTPUT)
-{VIDIOC_S_OUTPUT, "VIDIOC_S_OUTPUT"},
-#endif
-#if defined(VIDIOC_ENUMOUTPUT)
-{VIDIOC_ENUMOUTPUT, "VIDIOC_ENUMOUTPUT"},
-#endif
-#if defined(VIDIOC_G_AUDOUT)
-{VIDIOC_G_AUDOUT, "VIDIOC_G_AUDOUT"},
-#endif
-#if defined(VIDIOC_S_AUDOUT)
-{VIDIOC_S_AUDOUT, "VIDIOC_S_AUDOUT"},
-#endif
-#if defined(VIDIOC_G_MODULATOR)
-{VIDIOC_G_MODULATOR, "VIDIOC_G_MODULATOR"},
-#endif
-#if defined(VIDIOC_S_MODULATOR)
-{VIDIOC_S_MODULATOR, "VIDIOC_S_MODULATOR"},
-#endif
-#if defined(VIDIOC_G_FREQUENCY)
-{VIDIOC_G_FREQUENCY, "VIDIOC_G_FREQUENCY"},
-#endif
-#if defined(VIDIOC_S_FREQUENCY)
-{VIDIOC_S_FREQUENCY, "VIDIOC_S_FREQUENCY"},
-#endif
-#if defined(VIDIOC_CROPCAP)
-{VIDIOC_CROPCAP, "VIDIOC_CROPCAP"},
-#endif
-#if defined(VIDIOC_G_CROP)
-{VIDIOC_G_CROP, "VIDIOC_G_CROP"},
-#endif
-#if defined(VIDIOC_S_CROP)
-{VIDIOC_S_CROP, "VIDIOC_S_CROP"},
-#endif
-#if defined(VIDIOC_G_JPEGCOMP)
-{VIDIOC_G_JPEGCOMP, "VIDIOC_G_JPEGCOMP"},
-#endif
-#if defined(VIDIOC_S_JPEGCOMP)
-{VIDIOC_S_JPEGCOMP, "VIDIOC_S_JPEGCOMP"},
-#endif
-#if defined(VIDIOC_QUERYSTD)
-{VIDIOC_QUERYSTD, "VIDIOC_QUERYSTD"},
-#endif
-#if defined(VIDIOC_TRY_FMT)
-{VIDIOC_TRY_FMT, "VIDIOC_TRY_FMT"},
-#endif
-#if defined(VIDIOC_ENUMAUDIO)
-{VIDIOC_ENUMAUDIO, "VIDIOC_ENUMAUDIO"},
-#endif
-#if defined(VIDIOC_ENUMAUDOUT)
-{VIDIOC_ENUMAUDOUT, "VIDIOC_ENUMAUDOUT"},
-#endif
-#if defined(VIDIOC_G_PRIORITY)
-{VIDIOC_G_PRIORITY, "VIDIOC_G_PRIORITY"},
-#endif
-#if defined(VIDIOC_S_PRIORITY)
-{VIDIOC_S_PRIORITY, "VIDIOC_S_PRIORITY"},
-#endif
-#if defined(VIDIOC_G_SLICED_VBI_CAP)
-{VIDIOC_G_SLICED_VBI_CAP, "VIDIOC_G_SLICED_VBI_CAP"},
-#endif
-#if defined(VIDIOC_LOG_STATUS)
-{VIDIOC_LOG_STATUS, "VIDIOC_LOG_STATUS"},
-#endif
-#if defined(VIDIOC_G_EXT_CTRLS)
-{VIDIOC_G_EXT_CTRLS, "VIDIOC_G_EXT_CTRLS"},
-#endif
-#if defined(VIDIOC_S_EXT_CTRLS)
-{VIDIOC_S_EXT_CTRLS, "VIDIOC_S_EXT_CTRLS"},
-#endif
-#if defined(VIDIOC_TRY_EXT_CTRLS)
-{VIDIOC_TRY_EXT_CTRLS, "VIDIOC_TRY_EXT_CTRLS"},
-#endif
-#if defined(VIDIOC_ENUM_FRAMESIZES)
-{VIDIOC_ENUM_FRAMESIZES, "VIDIOC_ENUM_FRAMESIZES"},
-#endif
-#if defined(VIDIOC_ENUM_FRAMEINTERVALS)
-{VIDIOC_ENUM_FRAMEINTERVALS, "VIDIOC_ENUM_FRAMEINTERVALS"},
-#endif
-#if defined(VIDIOC_G_ENC_INDEX)
-{VIDIOC_G_ENC_INDEX, "VIDIOC_G_ENC_INDEX"},
-#endif
-#if defined(VIDIOC_ENCODER_CMD)
-{VIDIOC_ENCODER_CMD, "VIDIOC_ENCODER_CMD"},
-#endif
-#if defined(VIDIOC_TRY_ENCODER_CMD)
-{VIDIOC_TRY_ENCODER_CMD, "VIDIOC_TRY_ENCODER_CMD"},
-#endif
-#if defined(VIDIOC_G_CHIP_IDENT)
-{VIDIOC_G_CHIP_IDENT, "VIDIOC_G_CHIP_IDENT"},
-#endif
-
-#if defined(VIDIOC_OVERLAY_OLD)
-{VIDIOC_OVERLAY_OLD, "VIDIOC_OVERLAY_OLD"},
-#endif
-#if defined(VIDIOC_S_PARM_OLD)
-{VIDIOC_S_PARM_OLD, "VIDIOC_S_PARM_OLD"},
-#endif
-#if defined(VIDIOC_S_CTRL_OLD)
-{VIDIOC_S_CTRL_OLD, "VIDIOC_S_CTRL_OLD"},
-#endif
-#if defined(VIDIOC_G_AUDIO_OLD)
-{VIDIOC_G_AUDIO_OLD, "VIDIOC_G_AUDIO_OLD"},
-#endif
-#if defined(VIDIOC_G_AUDOUT_OLD)
-{VIDIOC_G_AUDOUT_OLD, "VIDIOC_G_AUDOUT_OLD"},
-#endif
-#if defined(VIDIOC_CROPCAP_OLD)
-{VIDIOC_CROPCAP_OLD, "VIDIOC_CROPCAP_OLD"},
-#endif
-{0xFFFFFFFF, ""}
-};
-
-static struct mess mess1[] = \
-{
-#if defined(VIDIOCGCAP)
-{VIDIOCGCAP, "VIDIOCGCAP"},
-#endif
-#if defined(VIDIOCGCHAN)
-{VIDIOCGCHAN, "VIDIOCGCHAN"},
-#endif
-#if defined(VIDIOCSCHAN)
-{VIDIOCSCHAN, "VIDIOCSCHAN"},
-#endif
-#if defined(VIDIOCGTUNER)
-{VIDIOCGTUNER, "VIDIOCGTUNER"},
-#endif
-#if defined(VIDIOCSTUNER)
-{VIDIOCSTUNER, "VIDIOCSTUNER"},
-#endif
-#if defined(VIDIOCGPICT)
-{VIDIOCGPICT, "VIDIOCGPICT"},
-#endif
-#if defined(VIDIOCSPICT)
-{VIDIOCSPICT, "VIDIOCSPICT"},
-#endif
-#if defined(VIDIOCCAPTURE)
-{VIDIOCCAPTURE, "VIDIOCCAPTURE"},
-#endif
-#if defined(VIDIOCGWIN)
-{VIDIOCGWIN, "VIDIOCGWIN"},
-#endif
-#if defined(VIDIOCSWIN)
-{VIDIOCSWIN, "VIDIOCSWIN"},
-#endif
-#if defined(VIDIOCGFBUF)
-{VIDIOCGFBUF, "VIDIOCGFBUF"},
-#endif
-#if defined(VIDIOCSFBUF)
-{VIDIOCSFBUF, "VIDIOCSFBUF"},
-#endif
-#if defined(VIDIOCKEY)
-{VIDIOCKEY, "VIDIOCKEY"},
-#endif
-#if defined(VIDIOCGFREQ)
-{VIDIOCGFREQ, "VIDIOCGFREQ"},
-#endif
-#if defined(VIDIOCSFREQ)
-{VIDIOCSFREQ, "VIDIOCSFREQ"},
-#endif
-#if defined(VIDIOCGAUDIO)
-{VIDIOCGAUDIO, "VIDIOCGAUDIO"},
-#endif
-#if defined(VIDIOCSAUDIO)
-{VIDIOCSAUDIO, "VIDIOCSAUDIO"},
-#endif
-#if defined(VIDIOCSYNC)
-{VIDIOCSYNC, "VIDIOCSYNC"},
-#endif
-#if defined(VIDIOCMCAPTURE)
-{VIDIOCMCAPTURE, "VIDIOCMCAPTURE"},
-#endif
-#if defined(VIDIOCGMBUF)
-{VIDIOCGMBUF, "VIDIOCGMBUF"},
-#endif
-#if defined(VIDIOCGUNIT)
-{VIDIOCGUNIT, "VIDIOCGUNIT"},
-#endif
-#if defined(VIDIOCGCAPTURE)
-{VIDIOCGCAPTURE, "VIDIOCGCAPTURE"},
-#endif
-#if defined(VIDIOCSCAPTURE)
-{VIDIOCSCAPTURE, "VIDIOCSCAPTURE"},
-#endif
-#if defined(VIDIOCSPLAYMODE)
-{VIDIOCSPLAYMODE, "VIDIOCSPLAYMODE"},
-#endif
-#if defined(VIDIOCSWRITEMODE)
-{VIDIOCSWRITEMODE, "VIDIOCSWRITEMODE"},
-#endif
-#if defined(VIDIOCGPLAYINFO)
-{VIDIOCGPLAYINFO, "VIDIOCGPLAYINFO"},
-#endif
-#if defined(VIDIOCSMICROCODE)
-{VIDIOCSMICROCODE, "VIDIOCSMICROCODE"},
-#endif
-{0xFFFFFFFF, ""}
-};
-
-k = 0;
-while (mess[k].name[0]) {
- if (wot == mess[k].command) {
- JOT(8, "ioctl 0x%08X is %s\n", \
- mess[k].command, &mess[k].name[0]);
- return 0;
- }
- k++;
-}
-JOT(8, "ioctl 0x%08X is not in videodev2.h\n", wot);
-
-k = 0;
-while (mess1[k].name[0]) {
- if (wot == mess1[k].command) {
- JOT(8, "ioctl 0x%08X is %s (V4L1)\n", \
- mess1[k].command, &mess1[k].name[0]);
- return 0;
- }
- k++;
-}
-JOT(8, "ioctl 0x%08X is not in videodev.h\n", wot);
-return -1;
-}
-/*****************************************************************************/
-int explain_cid(__u32 wot)
-{
-int k;
-/*---------------------------------------------------------------------------*/
-/*
- * THE DATA FOR THE ARRAY mess BELOW WERE CONSTRUCTED BY RUNNING THE FOLLOWING
- * SHELL SCRIPT:
- * #
- * cat /usr/src/linux-headers-`uname -r`/include/linux/videodev2.h | \
- * grep "^#define V4L2_CID_" | \
- * sed -e "s,(.*$,,;p" | sed -e "N;s,\n,, " | \
- * sed -e "s/^#define / {/;s/#define /, \"/;s/$/\"},/" | \
- * sed -e "s, ,,g;s, ,,g" | grep -v "_BASE" | grep -v "MPEG" >cid.tmp
- * echo "{0xFFFFFFFF,\"\"}" >>cid.tmp
- * exit 0
- * #
- */
-/*---------------------------------------------------------------------------*/
-static struct mess
-{
-__u32 command;
-char name[64];
-} mess[] = {
-#if defined(V4L2_CID_USER_CLASS)
-{V4L2_CID_USER_CLASS, "V4L2_CID_USER_CLASS"},
-#endif
-#if defined(V4L2_CID_BRIGHTNESS)
-{V4L2_CID_BRIGHTNESS, "V4L2_CID_BRIGHTNESS"},
-#endif
-#if defined(V4L2_CID_CONTRAST)
-{V4L2_CID_CONTRAST, "V4L2_CID_CONTRAST"},
-#endif
-#if defined(V4L2_CID_SATURATION)
-{V4L2_CID_SATURATION, "V4L2_CID_SATURATION"},
-#endif
-#if defined(V4L2_CID_HUE)
-{V4L2_CID_HUE, "V4L2_CID_HUE"},
-#endif
-#if defined(V4L2_CID_AUDIO_VOLUME)
-{V4L2_CID_AUDIO_VOLUME, "V4L2_CID_AUDIO_VOLUME"},
-#endif
-#if defined(V4L2_CID_AUDIO_BALANCE)
-{V4L2_CID_AUDIO_BALANCE, "V4L2_CID_AUDIO_BALANCE"},
-#endif
-#if defined(V4L2_CID_AUDIO_BASS)
-{V4L2_CID_AUDIO_BASS, "V4L2_CID_AUDIO_BASS"},
-#endif
-#if defined(V4L2_CID_AUDIO_TREBLE)
-{V4L2_CID_AUDIO_TREBLE, "V4L2_CID_AUDIO_TREBLE"},
-#endif
-#if defined(V4L2_CID_AUDIO_MUTE)
-{V4L2_CID_AUDIO_MUTE, "V4L2_CID_AUDIO_MUTE"},
-#endif
-#if defined(V4L2_CID_AUDIO_LOUDNESS)
-{V4L2_CID_AUDIO_LOUDNESS, "V4L2_CID_AUDIO_LOUDNESS"},
-#endif
-#if defined(V4L2_CID_BLACK_LEVEL)
-{V4L2_CID_BLACK_LEVEL, "V4L2_CID_BLACK_LEVEL"},
-#endif
-#if defined(V4L2_CID_AUTO_WHITE_BALANCE)
-{V4L2_CID_AUTO_WHITE_BALANCE, "V4L2_CID_AUTO_WHITE_BALANCE"},
-#endif
-#if defined(V4L2_CID_DO_WHITE_BALANCE)
-{V4L2_CID_DO_WHITE_BALANCE, "V4L2_CID_DO_WHITE_BALANCE"},
-#endif
-#if defined(V4L2_CID_RED_BALANCE)
-{V4L2_CID_RED_BALANCE, "V4L2_CID_RED_BALANCE"},
-#endif
-#if defined(V4L2_CID_BLUE_BALANCE)
-{V4L2_CID_BLUE_BALANCE, "V4L2_CID_BLUE_BALANCE"},
-#endif
-#if defined(V4L2_CID_GAMMA)
-{V4L2_CID_GAMMA, "V4L2_CID_GAMMA"},
-#endif
-#if defined(V4L2_CID_WHITENESS)
-{V4L2_CID_WHITENESS, "V4L2_CID_WHITENESS"},
-#endif
-#if defined(V4L2_CID_EXPOSURE)
-{V4L2_CID_EXPOSURE, "V4L2_CID_EXPOSURE"},
-#endif
-#if defined(V4L2_CID_AUTOGAIN)
-{V4L2_CID_AUTOGAIN, "V4L2_CID_AUTOGAIN"},
-#endif
-#if defined(V4L2_CID_GAIN)
-{V4L2_CID_GAIN, "V4L2_CID_GAIN"},
-#endif
-#if defined(V4L2_CID_HFLIP)
-{V4L2_CID_HFLIP, "V4L2_CID_HFLIP"},
-#endif
-#if defined(V4L2_CID_VFLIP)
-{V4L2_CID_VFLIP, "V4L2_CID_VFLIP"},
-#endif
-#if defined(V4L2_CID_HCENTER)
-{V4L2_CID_HCENTER, "V4L2_CID_HCENTER"},
-#endif
-#if defined(V4L2_CID_VCENTER)
-{V4L2_CID_VCENTER, "V4L2_CID_VCENTER"},
-#endif
-#if defined(V4L2_CID_POWER_LINE_FREQUENCY)
-{V4L2_CID_POWER_LINE_FREQUENCY, "V4L2_CID_POWER_LINE_FREQUENCY"},
-#endif
-#if defined(V4L2_CID_HUE_AUTO)
-{V4L2_CID_HUE_AUTO, "V4L2_CID_HUE_AUTO"},
-#endif
-#if defined(V4L2_CID_WHITE_BALANCE_TEMPERATURE)
-{V4L2_CID_WHITE_BALANCE_TEMPERATURE, "V4L2_CID_WHITE_BALANCE_TEMPERATURE"},
-#endif
-#if defined(V4L2_CID_SHARPNESS)
-{V4L2_CID_SHARPNESS, "V4L2_CID_SHARPNESS"},
-#endif
-#if defined(V4L2_CID_BACKLIGHT_COMPENSATION)
-{V4L2_CID_BACKLIGHT_COMPENSATION, "V4L2_CID_BACKLIGHT_COMPENSATION"},
-#endif
-#if defined(V4L2_CID_CHROMA_AGC)
-{V4L2_CID_CHROMA_AGC, "V4L2_CID_CHROMA_AGC"},
-#endif
-#if defined(V4L2_CID_COLOR_KILLER)
-{V4L2_CID_COLOR_KILLER, "V4L2_CID_COLOR_KILLER"},
-#endif
-#if defined(V4L2_CID_LASTP1)
-{V4L2_CID_LASTP1, "V4L2_CID_LASTP1"},
-#endif
-#if defined(V4L2_CID_CAMERA_CLASS)
-{V4L2_CID_CAMERA_CLASS, "V4L2_CID_CAMERA_CLASS"},
-#endif
-#if defined(V4L2_CID_EXPOSURE_AUTO)
-{V4L2_CID_EXPOSURE_AUTO, "V4L2_CID_EXPOSURE_AUTO"},
-#endif
-#if defined(V4L2_CID_EXPOSURE_ABSOLUTE)
-{V4L2_CID_EXPOSURE_ABSOLUTE, "V4L2_CID_EXPOSURE_ABSOLUTE"},
-#endif
-#if defined(V4L2_CID_EXPOSURE_AUTO_PRIORITY)
-{V4L2_CID_EXPOSURE_AUTO_PRIORITY, "V4L2_CID_EXPOSURE_AUTO_PRIORITY"},
-#endif
-#if defined(V4L2_CID_PAN_RELATIVE)
-{V4L2_CID_PAN_RELATIVE, "V4L2_CID_PAN_RELATIVE"},
-#endif
-#if defined(V4L2_CID_TILT_RELATIVE)
-{V4L2_CID_TILT_RELATIVE, "V4L2_CID_TILT_RELATIVE"},
-#endif
-#if defined(V4L2_CID_PAN_RESET)
-{V4L2_CID_PAN_RESET, "V4L2_CID_PAN_RESET"},
-#endif
-#if defined(V4L2_CID_TILT_RESET)
-{V4L2_CID_TILT_RESET, "V4L2_CID_TILT_RESET"},
-#endif
-#if defined(V4L2_CID_PAN_ABSOLUTE)
-{V4L2_CID_PAN_ABSOLUTE, "V4L2_CID_PAN_ABSOLUTE"},
-#endif
-#if defined(V4L2_CID_TILT_ABSOLUTE)
-{V4L2_CID_TILT_ABSOLUTE, "V4L2_CID_TILT_ABSOLUTE"},
-#endif
-#if defined(V4L2_CID_FOCUS_ABSOLUTE)
-{V4L2_CID_FOCUS_ABSOLUTE, "V4L2_CID_FOCUS_ABSOLUTE"},
-#endif
-#if defined(V4L2_CID_FOCUS_RELATIVE)
-{V4L2_CID_FOCUS_RELATIVE, "V4L2_CID_FOCUS_RELATIVE"},
-#endif
-#if defined(V4L2_CID_FOCUS_AUTO)
-{V4L2_CID_FOCUS_AUTO, "V4L2_CID_FOCUS_AUTO"},
-#endif
-{0xFFFFFFFF, ""}
-};
-
-k = 0;
-while (mess[k].name[0]) {
- if (wot == mess[k].command) {
- JOT(8, "ioctl 0x%08X is %s\n", \
- mess[k].command, &mess[k].name[0]);
- return 0;
- }
- k++;
-}
-JOT(8, "cid 0x%08X is not in videodev2.h\n", wot);
-return -1;
-}
-/*****************************************************************************/
diff --git a/drivers/staging/easycap/easycap_low.c b/drivers/staging/easycap/easycap_low.c
index ad1fc4cc471a..28c4d1e3c02f 100644
--- a/drivers/staging/easycap/easycap_low.c
+++ b/drivers/staging/easycap/easycap_low.c
@@ -38,148 +38,209 @@
*/
/****************************************************************************/
-#include "easycap_debug.h"
#include "easycap.h"
+#include "easycap_debug.h"
/*--------------------------------------------------------------------------*/
-const struct stk1160config { int reg; int set; } stk1160config[256] = {
- {0x000, 0x0098},
- {0x002, 0x0093},
-
- {0x001, 0x0003},
- {0x003, 0x0080},
- {0x00D, 0x0000},
- {0x00F, 0x0002},
- {0x018, 0x0010},
- {0x019, 0x0000},
- {0x01A, 0x0014},
- {0x01B, 0x000E},
- {0x01C, 0x0046},
-
- {0x100, 0x0033},
- {0x103, 0x0000},
- {0x104, 0x0000},
- {0x105, 0x0000},
- {0x106, 0x0000},
-
-#if defined(PREFER_NTSC)
-
-#undef OLDMARGIN
-#if defined(OLDMARGIN)
- {0x110, 0x0008},
-#else
- {0x110, 0x0014},
-#endif /*OLDMARGIN*/
-
- {0x111, 0x0000},
- {0x112, 0x0003},
- {0x113, 0x0000},
-
-#if defined(OLDMARGIN)
- {0x114, 0x0508},
-#else
- {0x114, 0x0514},
-#endif /*OLDMARGIN*/
-
- {0x115, 0x0005},
- {0x116, 0x00F3},
- {0x117, 0x0000},
-
-#else /* ! PREFER_NTSC*/
-
-#if defined(OLDMARGIN)
- {0x110, 0x0008},
-#else
- {0x110, 0x0014},
-#endif /*OLDMARGIN*/
-
- {0x111, 0x0000},
- {0x112, 0x0020},
- {0x113, 0x0000},
-
-#if defined(OLDMARGIN)
- {0x114, 0x0508},
-#else
- {0x114, 0x0514},
-#endif /*OLDMARGIN*/
-
- {0x115, 0x0005},
- {0x116, 0x0110},
- {0x117, 0x0001},
-
-#endif /* ! PREFER_NTSC*/
-
- {0x202, 0x000F},
- {0x203, 0x004A},
- {0x2FF, 0x0000},
-/*---------------------------------------------------------------------------*/
- {0xFFF, 0xFFFF}
- };
+const struct stk1160config { int reg; int set; } stk1160configPAL[256] = {
+ {0x000, 0x0098},
+ {0x002, 0x0093},
+
+ {0x001, 0x0003},
+ {0x003, 0x0080},
+ {0x00D, 0x0000},
+ {0x00F, 0x0002},
+ {0x018, 0x0010},
+ {0x019, 0x0000},
+ {0x01A, 0x0014},
+ {0x01B, 0x000E},
+ {0x01C, 0x0046},
+
+ {0x100, 0x0033},
+ {0x103, 0x0000},
+ {0x104, 0x0000},
+ {0x105, 0x0000},
+ {0x106, 0x0000},
+
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+/*
+ * RESOLUTION 640x480
+*/
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+ {0x110, 0x0008},
+ {0x111, 0x0000},
+ {0x112, 0x0020},
+ {0x113, 0x0000},
+ {0x114, 0x0508},
+ {0x115, 0x0005},
+ {0x116, 0x0110},
+ {0x117, 0x0001},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+
+ {0x202, 0x000F},
+ {0x203, 0x004A},
+ {0x2FF, 0x0000},
+
+ {0xFFF, 0xFFFF}
+};
/*--------------------------------------------------------------------------*/
-const struct saa7113config { int reg; int set; } saa7113config[256] = {
- {0x01, 0x08},
- {0x02, 0x80},
- {0x03, 0x33},
- {0x04, 0x00},
- {0x05, 0x00},
- {0x06, 0xE9},
- {0x07, 0x0D},
-#if defined(PREFER_NTSC)
- {0x08, 0x78},
-#else
- {0x08, 0x38},
-#endif /* ! PREFER_NTSC*/
- {0x09, 0x00},
- {0x0A, SAA_0A_DEFAULT},
- {0x0B, SAA_0B_DEFAULT},
- {0x0C, SAA_0C_DEFAULT},
- {0x0D, SAA_0D_DEFAULT},
- {0x0E, 0x01},
- {0x0F, 0x36},
- {0x10, 0x00},
- {0x11, 0x0C},
- {0x12, 0xE7},
- {0x13, 0x00},
- {0x15, 0x00},
- {0x16, 0x00},
-#if defined(PREFER_NTSC)
- {0x40, 0x82},
+const struct stk1160config stk1160configNTSC[256] = {
+ {0x000, 0x0098},
+ {0x002, 0x0093},
+
+ {0x001, 0x0003},
+ {0x003, 0x0080},
+ {0x00D, 0x0000},
+ {0x00F, 0x0002},
+ {0x018, 0x0010},
+ {0x019, 0x0000},
+ {0x01A, 0x0014},
+ {0x01B, 0x000E},
+ {0x01C, 0x0046},
+
+ {0x100, 0x0033},
+ {0x103, 0x0000},
+ {0x104, 0x0000},
+ {0x105, 0x0000},
+ {0x106, 0x0000},
+
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+/*
+ * RESOLUTION 640x480
+*/
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+ {0x110, 0x0008},
+ {0x111, 0x0000},
+ {0x112, 0x0003},
+ {0x113, 0x0000},
+ {0x114, 0x0508},
+ {0x115, 0x0005},
+ {0x116, 0x00F3},
+ {0x117, 0x0000},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+
+ {0x202, 0x000F},
+ {0x203, 0x004A},
+ {0x2FF, 0x0000},
+
+ {0xFFF, 0xFFFF}
+};
+/*--------------------------------------------------------------------------*/
+const struct saa7113config { int reg; int set; } saa7113configPAL[256] = {
+ {0x01, 0x08},
+#if defined(ANTIALIAS)
+ {0x02, 0xC0},
#else
- {0x40, 0x02},
-#endif /* ! PREFER_NTSC*/
- {0x41, 0xFF},
- {0x42, 0xFF},
- {0x43, 0xFF},
- {0x44, 0xFF},
- {0x45, 0xFF},
- {0x46, 0xFF},
- {0x47, 0xFF},
- {0x48, 0xFF},
- {0x49, 0xFF},
- {0x4A, 0xFF},
- {0x4B, 0xFF},
- {0x4C, 0xFF},
- {0x4D, 0xFF},
- {0x4E, 0xFF},
- {0x4F, 0xFF},
- {0x50, 0xFF},
- {0x51, 0xFF},
- {0x52, 0xFF},
- {0x53, 0xFF},
- {0x54, 0xFF},
- {0x55, 0xFF},
- {0x56, 0xFF},
- {0x57, 0xFF},
- {0x58, 0x40},
- {0x59, 0x54},
-#if defined(PREFER_NTSC)
- {0x5A, 0x0A},
+ {0x02, 0x80},
+#endif /*ANTIALIAS*/
+ {0x03, 0x33},
+ {0x04, 0x00},
+ {0x05, 0x00},
+ {0x06, 0xE9},
+ {0x07, 0x0D},
+ {0x08, 0x38},
+ {0x09, 0x00},
+ {0x0A, SAA_0A_DEFAULT},
+ {0x0B, SAA_0B_DEFAULT},
+ {0x0C, SAA_0C_DEFAULT},
+ {0x0D, SAA_0D_DEFAULT},
+ {0x0E, 0x01},
+ {0x0F, 0x36},
+ {0x10, 0x00},
+ {0x11, 0x0C},
+ {0x12, 0xE7},
+ {0x13, 0x00},
+ {0x15, 0x00},
+ {0x16, 0x00},
+ {0x40, 0x02},
+ {0x41, 0xFF},
+ {0x42, 0xFF},
+ {0x43, 0xFF},
+ {0x44, 0xFF},
+ {0x45, 0xFF},
+ {0x46, 0xFF},
+ {0x47, 0xFF},
+ {0x48, 0xFF},
+ {0x49, 0xFF},
+ {0x4A, 0xFF},
+ {0x4B, 0xFF},
+ {0x4C, 0xFF},
+ {0x4D, 0xFF},
+ {0x4E, 0xFF},
+ {0x4F, 0xFF},
+ {0x50, 0xFF},
+ {0x51, 0xFF},
+ {0x52, 0xFF},
+ {0x53, 0xFF},
+ {0x54, 0xFF},
+ {0x55, 0xFF},
+ {0x56, 0xFF},
+ {0x57, 0xFF},
+ {0x58, 0x40},
+ {0x59, 0x54},
+ {0x5A, 0x07},
+ {0x5B, 0x83},
+
+ {0xFF, 0xFF}
+};
+/*--------------------------------------------------------------------------*/
+const struct saa7113config saa7113configNTSC[256] = {
+ {0x01, 0x08},
+#if defined(ANTIALIAS)
+ {0x02, 0xC0},
#else
- {0x5A, 0x07},
-#endif /* ! PREFER_NTSC*/
- {0x5B, 0x83},
- {0xFF, 0xFF}
- };
+ {0x02, 0x80},
+#endif /*ANTIALIAS*/
+ {0x03, 0x33},
+ {0x04, 0x00},
+ {0x05, 0x00},
+ {0x06, 0xE9},
+ {0x07, 0x0D},
+ {0x08, 0x78},
+ {0x09, 0x00},
+ {0x0A, SAA_0A_DEFAULT},
+ {0x0B, SAA_0B_DEFAULT},
+ {0x0C, SAA_0C_DEFAULT},
+ {0x0D, SAA_0D_DEFAULT},
+ {0x0E, 0x01},
+ {0x0F, 0x36},
+ {0x10, 0x00},
+ {0x11, 0x0C},
+ {0x12, 0xE7},
+ {0x13, 0x00},
+ {0x15, 0x00},
+ {0x16, 0x00},
+ {0x40, 0x82},
+ {0x41, 0xFF},
+ {0x42, 0xFF},
+ {0x43, 0xFF},
+ {0x44, 0xFF},
+ {0x45, 0xFF},
+ {0x46, 0xFF},
+ {0x47, 0xFF},
+ {0x48, 0xFF},
+ {0x49, 0xFF},
+ {0x4A, 0xFF},
+ {0x4B, 0xFF},
+ {0x4C, 0xFF},
+ {0x4D, 0xFF},
+ {0x4E, 0xFF},
+ {0x4F, 0xFF},
+ {0x50, 0xFF},
+ {0x51, 0xFF},
+ {0x52, 0xFF},
+ {0x53, 0xFF},
+ {0x54, 0xFF},
+ {0x55, 0xFF},
+ {0x56, 0xFF},
+ {0x57, 0xFF},
+ {0x58, 0x40},
+ {0x59, 0x54},
+ {0x5A, 0x0A},
+ {0x5B, 0x83},
+
+ {0xFF, 0xFF}
+};
/*--------------------------------------------------------------------------*/
/****************************************************************************/
@@ -187,6 +248,9 @@ int
confirm_resolution(struct usb_device *p)
{
__u8 get0, get1, get2, get3, get4, get5, get6, get7;
+
+if (NULL == p)
+ return -ENODEV;
GET(p, 0x0110, &get0);
GET(p, 0x0111, &get1);
GET(p, 0x0112, &get2);
@@ -227,6 +291,8 @@ confirm_stream(struct usb_device *p)
__u16 get2;
__u8 igot;
+if (NULL == p)
+ return -ENODEV;
GET(p, 0x0100, &igot); get2 = 0x80 & igot;
if (0x80 == get2)
JOT(8, "confirm_stream: OK\n");
@@ -236,15 +302,24 @@ return 0;
}
/****************************************************************************/
int
-setup_stk(struct usb_device *p)
+setup_stk(struct usb_device *p, bool ntsc)
{
int i0;
+if (NULL == p)
+ return -ENODEV;
i0 = 0;
-while (0xFFF != stk1160config[i0].reg) {
- SET(p, stk1160config[i0].reg, stk1160config[i0].set);
- i0++;
+if (true == ntsc) {
+ while (0xFFF != stk1160configNTSC[i0].reg) {
+ SET(p, stk1160configNTSC[i0].reg, stk1160configNTSC[i0].set);
+ i0++;
+ }
+} else {
+ while (0xFFF != stk1160configPAL[i0].reg) {
+ SET(p, stk1160configPAL[i0].reg, stk1160configPAL[i0].set);
+ i0++;
}
+}
write_300(p);
@@ -252,19 +327,26 @@ return 0;
}
/****************************************************************************/
int
-setup_saa(struct usb_device *p)
+setup_saa(struct usb_device *p, bool ntsc)
{
int i0, ir;
-
-set2to78(p);
-
-
+if (NULL == p)
+ return -ENODEV;
i0 = 0;
-while (0xFF != saa7113config[i0].reg) {
- ir = write_saa(p, saa7113config[i0].reg, saa7113config[i0].set);
- i0++;
+if (true == ntsc) {
+ while (0xFF != saa7113configNTSC[i0].reg) {
+ ir = write_saa(p, saa7113configNTSC[i0].reg, \
+ saa7113configNTSC[i0].set);
+ i0++;
}
+} else {
+ while (0xFF != saa7113configPAL[i0].reg) {
+ ir = write_saa(p, saa7113configPAL[i0].reg, \
+ saa7113configPAL[i0].set);
+ i0++;
+ }
+}
return 0;
}
/****************************************************************************/
@@ -273,6 +355,8 @@ write_000(struct usb_device *p, __u16 set2, __u16 set0)
{
__u8 igot0, igot2;
+if (NULL == p)
+ return -ENODEV;
GET(p, 0x0002, &igot2);
GET(p, 0x0000, &igot0);
SET(p, 0x0002, set2);
@@ -283,6 +367,8 @@ return 0;
int
write_saa(struct usb_device *p, __u16 reg0, __u16 set0)
{
+if (NULL == p)
+ return -ENODEV;
SET(p, 0x200, 0x00);
SET(p, 0x204, reg0);
SET(p, 0x205, set0);
@@ -306,6 +392,8 @@ __u8 igot;
__u16 got502, got503;
__u16 set502, set503;
+if (NULL == p)
+ return -ENODEV;
SET(p, 0x0504, reg0);
SET(p, 0x0500, 0x008B);
@@ -341,6 +429,8 @@ read_vt(struct usb_device *p, __u16 reg0)
__u8 igot;
__u16 got502, got503;
+if (NULL == p)
+ return -ENODEV;
SET(p, 0x0504, reg0);
SET(p, 0x0500, 0x008B);
@@ -360,6 +450,8 @@ return (got503 << 8) | got502;
int
write_300(struct usb_device *p)
{
+if (NULL == p)
+ return -ENODEV;
SET(p, 0x300, 0x0012);
SET(p, 0x350, 0x002D);
SET(p, 0x351, 0x0001);
@@ -376,24 +468,48 @@ return 0;
*/
/*--------------------------------------------------------------------------*/
int
-check_saa(struct usb_device *p)
+check_saa(struct usb_device *p, bool ntsc)
{
int i0, ir, rc;
-i0 = 0;
+if (NULL == p)
+ return -ENODEV;
+i0 = 0;
rc = 0;
-while (0xFF != saa7113config[i0].reg) {
- if (0x0F == saa7113config[i0].reg) {
- i0++; continue;
+if (true == ntsc) {
+ while (0xFF != saa7113configNTSC[i0].reg) {
+ if (0x0F == saa7113configNTSC[i0].reg) {
+ i0++;
+ continue;
+ }
+
+ ir = read_saa(p, saa7113configNTSC[i0].reg);
+ if (ir != saa7113configNTSC[i0].set) {
+ SAY("SAA register 0x%02X has 0x%02X, " \
+ "expected 0x%02X\n", \
+ saa7113configNTSC[i0].reg, \
+ ir, saa7113configNTSC[i0].set);
+ rc--;
+ }
+ i0++;
}
+} else {
+ while (0xFF != saa7113configPAL[i0].reg) {
+ if (0x0F == saa7113configPAL[i0].reg) {
+ i0++;
+ continue;
+ }
- ir = read_saa(p, saa7113config[i0].reg);
- if (ir != saa7113config[i0].set) {
- SAY("SAA register 0x%02X has 0x%02X, expected 0x%02X\n", \
- saa7113config[i0].reg, ir, saa7113config[i0].set);
- rc--;
+ ir = read_saa(p, saa7113configPAL[i0].reg);
+ if (ir != saa7113configPAL[i0].set) {
+ SAY("SAA register 0x%02X has 0x%02X, " \
+ "expected 0x%02X\n", \
+ saa7113configPAL[i0].reg, \
+ ir, saa7113configPAL[i0].set);
+ rc--;
+ }
+ i0++;
}
- i0++;
}
if (-8 > rc)
return rc;
@@ -406,6 +522,8 @@ merit_saa(struct usb_device *p)
{
int rc;
+if (NULL == p)
+ return -ENODEV;
rc = read_saa(p, 0x1F);
if ((0 > rc) || (0x02 & rc))
return 1 ;
@@ -416,29 +534,46 @@ else
int
ready_saa(struct usb_device *p)
{
-int j, rc;
-static int max = 10;
-
+int j, rc, rate;
+const int max = 5, marktime = PATIENCE/5;
+/*--------------------------------------------------------------------------*/
+/*
+ * RETURNS 0 FOR INTERLACED 50 Hz
+ * 1 FOR NON-INTERLACED 50 Hz
+ * 2 FOR INTERLACED 60 Hz
+ * 3 FOR NON-INTERLACED 60 Hz
+*/
+/*--------------------------------------------------------------------------*/
+if (NULL == p)
+ return -ENODEV;
j = 0;
while (max > j) {
rc = read_saa(p, 0x1F);
if (0 <= rc) {
- if ((1 == (0x01 & rc))&&(0 == (0x40 & rc)))
+ if (0 == (0x40 & rc))
+ break;
+ if (1 == (0x01 & rc))
break;
}
- msleep(100); j++;
+ msleep(marktime);
+ j++;
}
if (max == j)
return -1;
else {
- if (0x20 & rc)
+ if (0x20 & rc) {
+ rate = 2;
JOT(8, "hardware detects 60 Hz\n");
- else
+ } else {
+ rate = 0;
JOT(8, "hardware detects 50 Hz\n");
+ }
if (0x80 & rc)
JOT(8, "hardware detects interlacing\n");
- else
+ else {
+ rate++;
JOT(8, "hardware detects no interlacing\n");
+ }
}
return 0;
}
@@ -447,45 +582,80 @@ return 0;
/*
* NOTE: THE FOLLOWING ARE NOT CHECKED:
* REGISTERS 0x000, 0x002: FUNCTIONALITY IS NOT KNOWN
- * REGISTER 0x100: ACCEPT ALSO (0x80 | stk1160config[.].set)
+ * REGISTER 0x100: ACCEPT ALSO (0x80 | stk1160config....[.].set)
*/
/*--------------------------------------------------------------------------*/
int
-check_stk(struct usb_device *p)
+check_stk(struct usb_device *p, bool ntsc)
{
int i0, ir;
-i0 = 0;
-while (0xFFF != stk1160config[i0].reg) {
- if (0x000 == stk1160config[i0].reg) {
- i0++; continue;
- }
- if (0x002 == stk1160config[i0].reg) {
- i0++; continue;
- }
-
- ir = read_stk(p, stk1160config[i0].reg);
- if (0x100 == stk1160config[i0].reg) {
- if ((ir != (0xFF & stk1160config[i0].set)) && \
- (ir != (0x80 | (0xFF & stk1160config[i0].set))) && \
- (0xFFFF != stk1160config[i0].set)) {
- SAY("STK register 0x%03X has 0x%02X, " \
- "expected 0x%02X\n", \
- stk1160config[i0].reg, ir, \
- stk1160config[i0].set);
+if (NULL == p)
+ return -ENODEV;
+i0 = 0;
+if (true == ntsc) {
+ while (0xFFF != stk1160configNTSC[i0].reg) {
+ if (0x000 == stk1160configNTSC[i0].reg) {
+ i0++; continue;
+ }
+ if (0x002 == stk1160configNTSC[i0].reg) {
+ i0++; continue;
+ }
+ ir = read_stk(p, stk1160configNTSC[i0].reg);
+ if (0x100 == stk1160configNTSC[i0].reg) {
+ if ((ir != (0xFF & stk1160configNTSC[i0].set)) && \
+ (ir != (0x80 | (0xFF & \
+ stk1160configNTSC[i0].set))) && \
+ (0xFFFF != \
+ stk1160configNTSC[i0].set)) {
+ SAY("STK register 0x%03X has 0x%02X, " \
+ "expected 0x%02X\n", \
+ stk1160configNTSC[i0].reg, \
+ ir, stk1160configNTSC[i0].set);
+ }
+ i0++; continue;
}
- i0++; continue;
+ if ((ir != (0xFF & stk1160configNTSC[i0].set)) && \
+ (0xFFFF != stk1160configNTSC[i0].set)) {
+ SAY("STK register 0x%03X has 0x%02X, " \
+ "expected 0x%02X\n", \
+ stk1160configNTSC[i0].reg, \
+ ir, stk1160configNTSC[i0].set);
}
-
- if ((ir != (0xFF & stk1160config[i0].set)) && \
- (0xFFFF != stk1160config[i0].set)) {
- SAY("STK register 0x%03X has 0x%02X, " \
- "expected 0x%02X\n", \
- stk1160config[i0].reg, ir, \
- stk1160config[i0].set);
+ i0++;
+ }
+} else {
+ while (0xFFF != stk1160configPAL[i0].reg) {
+ if (0x000 == stk1160configPAL[i0].reg) {
+ i0++; continue;
}
- i0++;
+ if (0x002 == stk1160configPAL[i0].reg) {
+ i0++; continue;
+ }
+ ir = read_stk(p, stk1160configPAL[i0].reg);
+ if (0x100 == stk1160configPAL[i0].reg) {
+ if ((ir != (0xFF & stk1160configPAL[i0].set)) && \
+ (ir != (0x80 | (0xFF & \
+ stk1160configPAL[i0].set))) && \
+ (0xFFFF != \
+ stk1160configPAL[i0].set)) {
+ SAY("STK register 0x%03X has 0x%02X, " \
+ "expected 0x%02X\n", \
+ stk1160configPAL[i0].reg, \
+ ir, stk1160configPAL[i0].set);
+ }
+ i0++; continue;
+ }
+ if ((ir != (0xFF & stk1160configPAL[i0].set)) && \
+ (0xFFFF != stk1160configPAL[i0].set)) {
+ SAY("STK register 0x%03X has 0x%02X, " \
+ "expected 0x%02X\n", \
+ stk1160configPAL[i0].reg, \
+ ir, stk1160configPAL[i0].set);
+ }
+ i0++;
}
+}
return 0;
}
/****************************************************************************/
@@ -494,6 +664,8 @@ read_saa(struct usb_device *p, __u16 reg0)
{
__u8 igot;
+if (NULL == p)
+ return -ENODEV;
SET(p, 0x208, reg0);
SET(p, 0x200, 0x20);
if (0 != wait_i2c(p))
@@ -508,12 +680,14 @@ read_stk(struct usb_device *p, __u32 reg0)
{
__u8 igot;
+if (NULL == p)
+ return -ENODEV;
igot = 0;
GET(p, reg0, &igot);
return igot;
}
-/*****************************************************************************/
-/*---------------------------------------------------------------------------*/
+/****************************************************************************/
+/*--------------------------------------------------------------------------*/
/*
* HARDWARE USERSPACE INPUT NUMBER PHYSICAL INPUT DRIVER input VALUE
*
@@ -534,81 +708,100 @@ return igot;
int
select_input(struct usb_device *p, int input, int mode)
{
+int ir;
+if (NULL == p)
+ return -ENODEV;
stop_100(p);
-
-msleep(20);
switch (input) {
case 0:
case 1: {
- SET(p, 0x0000, 0x0098); break;
+ if (0 != write_saa(p, 0x02, 0x80)) {
+ SAY("ERROR: failed to set SAA register 0x02 for input %i\n", \
+ input);
+ }
+ SET(p, 0x0000, 0x0098);
+ SET(p, 0x0002, 0x0078);
+ break;
}
case 2: {
- SET(p, 0x0000, 0x0090); break;
+ if (0 != write_saa(p, 0x02, 0x80)) {
+ SAY("ERROR: failed to set SAA register 0x02 for input %i\n", \
+ input);
+ }
+ SET(p, 0x0000, 0x0090);
+ SET(p, 0x0002, 0x0078);
+ break;
}
case 3: {
- SET(p, 0x0000, 0x0088); break;
+ if (0 != write_saa(p, 0x02, 0x80)) {
+ SAY("ERROR: failed to set SAA register 0x02 for input %i\n", \
+ input);
+ }
+ SET(p, 0x0000, 0x0088);
+ SET(p, 0x0002, 0x0078);
+ break;
}
case 4: {
- SET(p, 0x0000, 0x0080); break;
+ if (0 != write_saa(p, 0x02, 0x80)) {
+ SAY("ERROR: failed to set SAA register 0x02 for input %i\n", \
+ input);
+ }
+ SET(p, 0x0000, 0x0080);
+ SET(p, 0x0002, 0x0078);
+ break;
}
case 5: {
if (9 != mode)
mode = 7;
switch (mode) {
- case 7:
- {
+ case 7: {
if (0 != write_saa(p, 0x02, 0x87)) {
- SAY("ERROR: failed to set SAA " \
- "register 0x02 for input " \
- "%i\n", input);
+ SAY("ERROR: failed to set SAA register 0x02 " \
+ "for input %i\n", input);
}
if (0 != write_saa(p, 0x05, 0xFF)) {
- SAY("ERROR: failed to set SAA " \
- "register 0x05 for input " \
- "%i\n", input);
+ SAY("ERROR: failed to set SAA register 0x05 " \
+ "for input %i\n", input);
}
break;
}
- case 9:
- {
+ case 9: {
if (0 != write_saa(p, 0x02, 0x89)) {
- SAY("ERROR: failed to set SAA " \
- "register 0x02 for input " \
- "%i\n", input);
+ SAY("ERROR: failed to set SAA register 0x02 " \
+ "for input %i\n", input);
}
if (0 != write_saa(p, 0x05, 0x00)) {
- SAY("ERROR: failed to set SAA " \
- "register 0x05 for input " \
- "%i\n", input);
+ SAY("ERROR: failed to set SAA register 0x05 " \
+ "for input %i\n", input);
}
- break;
+ break;
}
- default:
- {
+ default: {
SAY("MISTAKE: bad mode: %i\n", mode);
return -1;
- }
+ }
}
if (0 != write_saa(p, 0x04, 0x00)) {
- SAY("ERROR: failed to set SAA register 0x04 " \
- "for input %i\n", input);
+ SAY("ERROR: failed to set SAA register 0x04 for input %i\n", \
+ input);
}
if (0 != write_saa(p, 0x09, 0x80)) {
- SAY("ERROR: failed to set SAA register 0x09 " \
- "for input %i\n", input);
+ SAY("ERROR: failed to set SAA register 0x09 for input %i\n", \
+ input);
}
+ SET(p, 0x0002, 0x0093);
break;
}
-default:
- {
+default: {
SAY("ERROR: bad input: %i\n", input);
return -1;
}
}
-msleep(20);
-SET(p, 0x0002, 0x0093);
-msleep(20);
+ir = read_stk(p, 0x00);
+JOT(8, "STK register 0x00 has 0x%02X\n", ir);
+ir = read_saa(p, 0x02);
+JOT(8, "SAA register 0x02 has 0x%02X\n", ir);
start_100(p);
@@ -621,6 +814,8 @@ set_resolution(struct usb_device *p, \
{
__u16 u0x0111, u0x0113, u0x0115, u0x0117;
+if (NULL == p)
+ return -ENODEV;
u0x0111 = ((0xFF00 & set0) >> 8);
u0x0113 = ((0xFF00 & set1) >> 8);
u0x0115 = ((0xFF00 & set2) >> 8);
@@ -641,13 +836,25 @@ return 0;
int
start_100(struct usb_device *p)
{
-__u16 get0;
-__u8 igot;
-
-GET(p, 0x0100, &igot); get0 = igot;
-msleep(0x1f4);
+__u16 get116, get117, get0;
+__u8 igot116, igot117, igot;
+
+if (NULL == p)
+ return -ENODEV;
+GET(p, 0x0116, &igot116);
+get116 = igot116;
+GET(p, 0x0117, &igot117);
+get117 = igot117;
+SET(p, 0x0116, 0x0000);
+SET(p, 0x0117, 0x0000);
+
+GET(p, 0x0100, &igot);
+get0 = igot;
SET(p, 0x0100, (0x80 | get0));
-msleep(0x1f4);
+
+SET(p, 0x0116, get116);
+SET(p, 0x0117, get117);
+
return 0;
}
/****************************************************************************/
@@ -657,10 +864,11 @@ stop_100(struct usb_device *p)
__u16 get0;
__u8 igot;
-GET(p, 0x0100, &igot); get0 = igot;
-msleep(0x1f4);
+if (NULL == p)
+ return -ENODEV;
+GET(p, 0x0100, &igot);
+get0 = igot;
SET(p, 0x0100, (0x7F & get0));
-msleep(0x1f4);
return 0;
}
/****************************************************************************/
@@ -674,9 +882,11 @@ wait_i2c(struct usb_device *p)
{
__u16 get0;
__u8 igot;
-const int max = 4;
+const int max = 2;
int k;
+if (NULL == p)
+ return -ENODEV;
for (k = 0; k < max; k++) {
GET(p, 0x0201, &igot); get0 = igot;
switch (get0) {
@@ -685,7 +895,7 @@ for (k = 0; k < max; k++) {
return 0;
}
case 0x00: {
- msleep(10);
+ msleep(20);
continue;
}
default: {
@@ -703,8 +913,7 @@ __u16 igot;
int rc0, rc1;
if (!pusb_device)
- return -EFAULT;
-
+ return -ENODEV;
rc1 = 0; igot = 0;
rc0 = usb_control_msg(pusb_device, usb_sndctrlpipe(pusb_device, 0), \
(__u8)0x01, \
@@ -741,27 +950,14 @@ case 0x204:
case 0x205:
case 0x350:
case 0x351: {
- if (0 != igot) {
+ if (0 != (0xFF & igot)) {
JOT(8, "unexpected 0x%02X for STK register 0x%03X\n", \
igot, index);
}
break;
}
-case 0x114:
-case 0x116: {
- if ((0xFF & value) != igot) {
- JOT(8, "unexpected 0x%02X != 0x%02X " \
- "for STK register 0x%03X\n", \
- igot, value, index);
- }
-break;
-}
-case 0x200: {
- if (0 == igot)
- break;
-}
default: {
- if (value != igot) {
+ if ((0xFF & value) != (0xFF & igot)) {
JOT(8, "unexpected 0x%02X != 0x%02X " \
"for STK register 0x%03X\n", \
igot, value, index);
@@ -780,8 +976,7 @@ regget(struct usb_device *pusb_device, __u16 index, void *pvoid)
int ir;
if (!pusb_device)
- return -EFAULT;
-
+ return -ENODEV;
ir = usb_control_msg(pusb_device, usb_rcvctrlpipe(pusb_device, 0), \
(__u8)0x00, \
(__u8)(USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE), \
@@ -796,6 +991,8 @@ return 0xFF & ir;
int
wakeup_device(struct usb_device *pusb_device)
{
+if (!pusb_device)
+ return -ENODEV;
return usb_control_msg(pusb_device, usb_sndctrlpipe(pusb_device, 0), \
(__u8)USB_REQ_SET_FEATURE, \
(__u8)(USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE), \
@@ -806,6 +1003,12 @@ return usb_control_msg(pusb_device, usb_sndctrlpipe(pusb_device, 0), \
(int)50000);
}
/*****************************************************************************/
+int
+audio_setup(struct easycap *peasycap)
+{
+struct usb_device *pusb_device;
+unsigned char buffer[1];
+int rc, id1, id2;
/*---------------------------------------------------------------------------*/
/*
* IMPORTANT:
@@ -814,29 +1017,21 @@ return usb_control_msg(pusb_device, usb_sndctrlpipe(pusb_device, 0), \
* TO ENABLE AUDIO THE VALUE 0x0200 MUST BE SENT.
*/
/*---------------------------------------------------------------------------*/
-int
-audio_setup(struct easycap *peasycap)
-{
-struct usb_device *pusb_device;
-static __u8 request = 0x01;
-static __u8 requesttype = \
+const __u8 request = 0x01;
+const __u8 requesttype = \
(__u8)(USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
-
-static __u16 value_unmute = 0x0200;
-static __u16 index = 0x0301;
-
-static unsigned char buffer[1];
-static __u16 length = 1;
-int rc, id1, id2;
+const __u16 value_unmute = 0x0200;
+const __u16 index = 0x0301;
+const __u16 length = 1;
if (NULL == peasycap)
return -EFAULT;
pusb_device = peasycap->pusb_device;
if (NULL == pusb_device)
- return -EFAULT;
+ return -ENODEV;
-JOT(8, "%02X %02X %02X %02X %02X %02X %02X %02X\n", \
+JOM(8, "%02X %02X %02X %02X %02X %02X %02X %02X\n", \
requesttype, request, \
(0x00FF & value_unmute), \
(0xFF00 & value_unmute) >> 8, \
@@ -875,41 +1070,25 @@ if (rc != (int)length)
* THE UPPER BYTE SEEMS TO HAVE NO EFFECT.
*/
/*--------------------------------------------------------------------------*/
-
SET(pusb_device, 0x0500, 0x0094);
-
SET(pusb_device, 0x0500, 0x008C);
-
SET(pusb_device, 0x0506, 0x0001);
SET(pusb_device, 0x0507, 0x0000);
-
id1 = read_vt(pusb_device, 0x007C);
id2 = read_vt(pusb_device, 0x007E);
-SAY("0x%04X:0x%04X is audio vendor id\n", id1, id2);
-
+SAM("0x%04X:0x%04X is audio vendor id\n", id1, id2);
/*---------------------------------------------------------------------------*/
/*
-* SELECT AUDIO SOURCE "LINE IN" AND SET DEFAULT GAIN TO 0 dB.
-*
-* THESE COMMANDS SEEM TO BE ACCEPTED (THOUGH POSSIBLY IGNORED) EVEN WHEN
-* THERE IS NO SEPARATE AUDIO CHIP PRESENT.
+ * SELECT AUDIO SOURCE "LINE IN" AND SET THE AUDIO GAIN.
*/
/*---------------------------------------------------------------------------*/
-
-write_vt(pusb_device, 0x0002, 0x8000);
-write_vt(pusb_device, 0x001C, 0x8000);
-
-write_vt(pusb_device, 0x000E, 0x0000);
-write_vt(pusb_device, 0x0010, 0x0000);
-write_vt(pusb_device, 0x0012, 0x8000);
-write_vt(pusb_device, 0x0016, 0x0000);
-
-write_vt(pusb_device, 0x001A, 0x0404);
-write_vt(pusb_device, 0x0002, 0x0000);
-write_vt(pusb_device, 0x001C, 0x0000);
-
+if (31 < easycap_gain)
+ easycap_gain = 31;
+if (0 > easycap_gain)
+ easycap_gain = 0;
+if (0 != audio_gainset(pusb_device, (__s8)easycap_gain))
+ SAY("ERROR: audio_gainset() failed\n");
check_vt(pusb_device);
-
return 0;
}
/*****************************************************************************/
@@ -918,6 +1097,8 @@ check_vt(struct usb_device *pusb_device)
{
int igot;
+if (!pusb_device)
+ return -ENODEV;
igot = read_vt(pusb_device, 0x0002);
if (0 > igot)
SAY("ERROR: failed to read VT1612A register 0x02\n");
@@ -942,17 +1123,23 @@ if (0 > igot)
if (0x8000 & igot)
SAY("register 0x%02X muted\n", 0x12);
+igot = read_vt(pusb_device, 0x0014);
+if (0 > igot)
+ SAY("ERROR: failed to read VT1612A register 0x14\n");
+if (0x8000 & igot)
+ SAY("register 0x%02X muted\n", 0x14);
+
igot = read_vt(pusb_device, 0x0016);
if (0 > igot)
SAY("ERROR: failed to read VT1612A register 0x16\n");
if (0x8000 & igot)
SAY("register 0x%02X muted\n", 0x16);
-igot = read_vt(pusb_device, 0x001A);
+igot = read_vt(pusb_device, 0x0018);
if (0 > igot)
- SAY("ERROR: failed to read VT1612A register 0x1A\n");
+ SAY("ERROR: failed to read VT1612A register 0x18\n");
if (0x8000 & igot)
- SAY("register 0x%02X muted\n", 0x1A);
+ SAY("register 0x%02X muted\n", 0x18);
igot = read_vt(pusb_device, 0x001C);
if (0 > igot)
@@ -964,14 +1151,18 @@ return 0;
}
/*****************************************************************************/
/*---------------------------------------------------------------------------*/
-/*
- * NOTE: THIS DOES INCREASE THE VOLUME DRAMATICALLY:
- * audio_gainset(pusb_device, 0x000F);
+/* NOTE: THIS DOES INCREASE THE VOLUME DRAMATICALLY:
+ * audio_gainset(pusb_device, 0x000F);
*
- * IF 16<loud<31 VT1621A REGISTER 0x1C IS SET FOR POSITIVE GAIN.
- * IF loud<=16 VT1621A REGISTER 0x1C IS SET FOR ZERO GAIN.
- * THERE IS NEVER ANY (ADDITIONAL) ATTENUATION.
- */
+ * loud dB register 0x10 dB register 0x1C dB total
+ * 0 -34.5 0 -34.5
+ * .. .... . ....
+ * 15 10.5 0 10.5
+ * 16 12.0 0 12.0
+ * 17 12.0 1.5 13.5
+ * .. .... .... ....
+ * 31 12.0 22.5 34.5
+*/
/*---------------------------------------------------------------------------*/
int
audio_gainset(struct usb_device *pusb_device, __s8 loud)
@@ -980,25 +1171,65 @@ int igot;
__u8 u8;
__u16 mute;
-if (16 > loud)
- loud = 16;
-u8 = 0x000F & (__u8)(loud - 16);
+if (NULL == pusb_device)
+ return -ENODEV;
+if (0 > loud)
+ loud = 0;
+if (31 < loud)
+ loud = 31;
write_vt(pusb_device, 0x0002, 0x8000);
+/*---------------------------------------------------------------------------*/
+igot = read_vt(pusb_device, 0x000E);
+if (0 > igot) {
+ SAY("ERROR: failed to read VT1612A register 0x0E\n");
+ mute = 0x0000;
+} else
+ mute = 0x8000 & ((unsigned int)igot);
+mute = 0;
+
+if (16 > loud)
+ u8 = 0x01 | (0x001F & (((__u8)(15 - loud)) << 1));
+else
+ u8 = 0;
+JOT(8, "0x%04X=(mute|u8) for VT1612A register 0x0E\n", mute | u8);
+write_vt(pusb_device, 0x000E, (mute | u8));
+/*---------------------------------------------------------------------------*/
+igot = read_vt(pusb_device, 0x0010);
+if (0 > igot) {
+ SAY("ERROR: failed to read VT1612A register 0x10\n");
+ mute = 0x0000;
+} else
+ mute = 0x8000 & ((unsigned int)igot);
+mute = 0;
+
+JOT(8, "0x%04X=(mute|u8|(u8<<8)) for VT1612A register 0x10,...0x18\n", \
+ mute | u8 | (u8 << 8));
+write_vt(pusb_device, 0x0010, (mute | u8 | (u8 << 8)));
+write_vt(pusb_device, 0x0012, (mute | u8 | (u8 << 8)));
+write_vt(pusb_device, 0x0014, (mute | u8 | (u8 << 8)));
+write_vt(pusb_device, 0x0016, (mute | u8 | (u8 << 8)));
+write_vt(pusb_device, 0x0018, (mute | u8 | (u8 << 8)));
+/*---------------------------------------------------------------------------*/
igot = read_vt(pusb_device, 0x001C);
if (0 > igot) {
SAY("ERROR: failed to read VT1612A register 0x1C\n");
mute = 0x0000;
} else
mute = 0x8000 & ((unsigned int)igot);
+mute = 0;
-JOT(8, "0x%04X=(mute|u8|(u8<<8))\n", mute | u8 | (u8 << 8));
+if (16 <= loud)
+ u8 = 0x000F & (__u8)(loud - 16);
+else
+ u8 = 0;
-write_vt(pusb_device, 0x001C, 0x8000);
+JOT(8, "0x%04X=(mute|u8|(u8<<8)) for VT1612A register 0x1C\n", \
+ mute | u8 | (u8 << 8));
write_vt(pusb_device, 0x001C, (mute | u8 | (u8 << 8)));
+write_vt(pusb_device, 0x001A, 0x0404);
write_vt(pusb_device, 0x0002, 0x0000);
-
return 0;
}
/*****************************************************************************/
@@ -1007,35 +1238,11 @@ audio_gainget(struct usb_device *pusb_device)
{
int igot;
+if (NULL == pusb_device)
+ return -ENODEV;
igot = read_vt(pusb_device, 0x001C);
if (0 > igot)
SAY("ERROR: failed to read VT1612A register 0x1C\n");
return igot;
}
/*****************************************************************************/
-int
-set2to78(struct usb_device *p)
-{
-int ir;
-
-msleep(20);
-ir = regset(p, 0x0002, 0x0078);
-if (0 > ir)
- SAY("ERROR: failed to set register 0x0002 to 0x0078\n");
-msleep(20);
-return ir;
-}
-/*****************************************************************************/
-int
-set2to93(struct usb_device *p)
-{
-int ir;
-
-msleep(20);
-ir = regset(p, 0x0002, 0x0093);
-if (0 > ir)
- SAY("ERROR: failed to set register 0x0002 to 0x0078\n");
-msleep(20);
-return ir;
-}
-/*****************************************************************************/
diff --git a/drivers/staging/easycap/easycap_main.c b/drivers/staging/easycap/easycap_main.c
index 5a4bbd9b453f..acc1f56e6f29 100644
--- a/drivers/staging/easycap/easycap_main.c
+++ b/drivers/staging/easycap/easycap_main.c
@@ -30,9 +30,29 @@
#include "easycap.h"
#include "easycap_standard.h"
+#include "easycap_ioctl.h"
-int easycap_debug;
-module_param(easycap_debug, int, S_IRUGO | S_IWUSR);
+static int easycap_debug;
+static int easycap_bars;
+int easycap_gain = 16;
+module_param_named(debug, easycap_debug, int, S_IRUGO | S_IWUSR);
+module_param_named(bars, easycap_bars, int, S_IRUGO | S_IWUSR);
+module_param_named(gain, easycap_gain, int, S_IRUGO | S_IWUSR);
+
+/*---------------------------------------------------------------------------*/
+/*
+ * dongle_this IS INDISPENSIBLY static BECAUSE FUNCTION easycap_usb_probe()
+ * IS CALLED SUCCESSIVELY FOR INTERFACES 0, 1, 2 AND THE POINTER peasycap
+ * ALLOCATED DURING THE PROBING OF INTERFACE 0 MUST BE REMEMBERED WHEN
+ * PROBING INTERFACES 1 AND 2.
+ *
+ * IOCTL LOCKING IS DONE AT MODULE LEVEL, NOT DEVICE LEVEL.
+*/
+/*---------------------------------------------------------------------------*/
+
+struct easycap_dongle easycap_dongle[DONGLE_MANY];
+static int dongle_this;
+static int dongle_done;
/*---------------------------------------------------------------------------*/
/*
@@ -63,22 +83,25 @@ const struct file_operations easycap_fops = {
.owner = THIS_MODULE,
.open = easycap_open,
.release = easycap_release,
- .unlocked_ioctl = easycap_ioctl,
+#if defined(EASYCAP_NEEDS_UNLOCKED_IOCTL)
+ .unlocked_ioctl = easycap_ioctl_noinode,
+#else
+ .ioctl = easycap_ioctl,
+#endif /*EASYCAP_NEEDS_UNLOCKED_IOCTL*/
.poll = easycap_poll,
.mmap = easycap_mmap,
.llseek = no_llseek,
};
struct vm_operations_struct easycap_vm_ops = {
-.open = easycap_vma_open,
-.close = easycap_vma_close,
-.fault = easycap_vma_fault,
+ .open = easycap_vma_open,
+ .close = easycap_vma_close,
+ .fault = easycap_vma_fault,
};
struct usb_class_driver easycap_class = {
-.name = "usb/easycap%d",
-.fops = &easycap_fops,
-.minor_base = USB_SKEL_MINOR_BASE,
+ .name = "usb/easycap%d",
+ .fops = &easycap_fops,
+ .minor_base = USB_SKEL_MINOR_BASE,
};
-
/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
#if defined(EASYCAP_NEEDS_V4L2_FOPS)
@@ -86,16 +109,17 @@ const struct v4l2_file_operations v4l2_fops = {
.owner = THIS_MODULE,
.open = easycap_open_noinode,
.release = easycap_release_noinode,
- .unlocked_ioctl = easycap_ioctl,
+#if defined(EASYCAP_NEEDS_UNLOCKED_IOCTL)
+ .unlocked_ioctl = easycap_ioctl_noinode,
+#else
+ .ioctl = easycap_ioctl,
+#endif /*EASYCAP_NEEDS_UNLOCKED_IOCTL*/
.poll = easycap_poll,
.mmap = easycap_mmap,
};
#endif /*EASYCAP_NEEDS_V4L2_FOPS*/
-int video_device_many /*=0*/;
-struct video_device *pvideo_array[VIDEO_DEVICE_MANY], *pvideo_device;
#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
-
/*--------------------------------------------------------------------------*/
/*
* PARAMETERS USED WHEN REGISTERING THE AUDIO INTERFACE
@@ -105,7 +129,11 @@ const struct file_operations easysnd_fops = {
.owner = THIS_MODULE,
.open = easysnd_open,
.release = easysnd_release,
- .unlocked_ioctl = easysnd_ioctl,
+#if defined(EASYCAP_NEEDS_UNLOCKED_IOCTL)
+ .unlocked_ioctl = easysnd_ioctl_noinode,
+#else
+ .ioctl = easysnd_ioctl,
+#endif /*EASYCAP_NEEDS_UNLOCKED_IOCTL*/
.read = easysnd_read,
.llseek = no_llseek,
};
@@ -115,17 +143,26 @@ struct usb_class_driver easysnd_class = {
.minor_base = USB_SKEL_MINOR_BASE,
};
/****************************************************************************/
-/*--------------------------------------------------------------------------*/
+/*---------------------------------------------------------------------------*/
/*
- * IT IS NOT APPROPRIATE FOR easycap_open() TO SUBMIT THE VIDEO URBS HERE,
- * BECAUSE THERE WILL ALWAYS BE SUBSEQUENT NEGOTIATION OF TV STANDARD AND
- * FORMAT BY IOCTL AND IT IS INADVISABLE TO HAVE THE URBS RUNNING WHILE
- * REGISTERS OF THE SA7113H ARE BEING MANIPULATED.
- *
- * THE SUBMISSION OF VIDEO URBS IS THEREFORE DELAYED UNTIL THE IOCTL COMMAND
- * STREAMON IS RECEIVED.
- */
-/*--------------------------------------------------------------------------*/
+ * THIS ROUTINE DOES NOT DETECT DUPLICATE OCCURRENCES OF POINTER peasycap
+*/
+/*---------------------------------------------------------------------------*/
+int
+isdongle(struct easycap *peasycap)
+{
+int k;
+if (NULL == peasycap)
+ return -2;
+for (k = 0; k < DONGLE_MANY; k++) {
+ if (easycap_dongle[k].peasycap == peasycap) {
+ peasycap->isdongle = k;
+ return k;
+ }
+}
+return -1;
+}
+/*****************************************************************************/
/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
int
@@ -140,15 +177,17 @@ easycap_open(struct inode *inode, struct file *file)
{
#if (!defined(EASYCAP_IS_VIDEODEV_CLIENT))
struct usb_interface *pusb_interface;
+#else
+struct video_device *pvideo_device;
#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
-struct usb_device *p;
struct easycap *peasycap;
-int i, k, m, rc;
+int rc;
JOT(4, "\n");
SAY("==========OPEN=========\n");
peasycap = (struct easycap *)NULL;
+/*---------------------------------------------------------------------------*/
#if (!defined(EASYCAP_IS_VIDEODEV_CLIENT))
if ((struct inode *)NULL == inode) {
SAY("ERROR: inode is NULL.\n");
@@ -162,161 +201,427 @@ if (!pusb_interface) {
peasycap = usb_get_intfdata(pusb_interface);
/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
#else
-for (i = 0; i < video_device_many; i++) {
- pvideo_device = pvideo_array[i];
- if ((struct video_device *)NULL != pvideo_device) {
- peasycap = (struct easycap *)video_get_drvdata(pvideo_device);
- break;
- }
+pvideo_device = video_devdata(file);
+if ((struct video_device *)NULL == pvideo_device) {
+ SAY("ERROR: pvideo_device is NULL.\n");
+ return -EFAULT;
}
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+peasycap = (struct easycap *)video_get_drvdata(pvideo_device);
#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
-if ((struct easycap *)NULL == peasycap) {
- SAY("MISTAKE: peasycap is NULL\n");
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
-file->private_data = peasycap;
-/*---------------------------------------------------------------------------*/
-/*
- * INITIALIZATION
- */
-/*---------------------------------------------------------------------------*/
-JOT(4, "starting initialization\n");
-
-for (k = 0; k < FRAME_BUFFER_MANY; k++) {
- for (m = 0; m < FRAME_BUFFER_SIZE/PAGE_SIZE; m++)
- memset(peasycap->frame_buffer[k][m].pgo, 0, PAGE_SIZE);
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+ return -EFAULT;
}
-p = peasycap->pusb_device;
-if ((struct usb_device *)NULL == p) {
- SAY("ERROR: peasycap->pusb_device is NULL\n");
+if (NULL == peasycap->pusb_device) {
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
return -EFAULT;
} else {
- JOT(16, "0x%08lX=peasycap->pusb_device\n", \
+ JOM(16, "0x%08lX=peasycap->pusb_device\n", \
(long int)peasycap->pusb_device);
}
+file->private_data = peasycap;
rc = wakeup_device(peasycap->pusb_device);
if (0 == rc)
- JOT(8, "wakeup_device() OK\n");
+ JOM(8, "wakeup_device() OK\n");
else {
- SAY("ERROR: wakeup_device() returned %i\n", rc);
+ SAM("ERROR: wakeup_device() returned %i\n", rc);
+ if (-ENODEV == rc)
+ SAM("ERROR: wakeup_device() returned -ENODEV\n");
+ else
+ SAM("ERROR: wakeup_device() returned %i\n", rc);
+ return rc;
+}
+peasycap->input = 0;
+rc = reset(peasycap);
+if (0 != rc) {
+ SAM("ERROR: reset() returned %i\n", rc);
return -EFAULT;
}
-rc = setup_stk(p); peasycap->input = 0;
-if (0 == rc)
- JOT(8, "setup_stk() OK\n");
-else {
- SAY("ERROR: setup_stk() returned %i\n", rc);
+return 0;
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * RESET THE HARDWARE TO ITS REFERENCE STATE.
+ *
+ * THIS ROUTINE MAY BE CALLED REPEATEDLY IF easycap_complete() DETECTS
+ * A BAD VIDEO FRAME SIZE.
+*/
+/*---------------------------------------------------------------------------*/
+int
+reset(struct easycap *peasycap)
+{
+struct easycap_standard const *peasycap_standard;
+int i, rc, input, rate;
+bool ntsc, other;
+
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
-rc = setup_saa(p);
+input = peasycap->input;
+
+/*---------------------------------------------------------------------------*/
+/*
+ * IF THE SAA7113H HAS ALREADY ACQUIRED SYNC, USE ITS HARDWARE-DETECTED
+ * FIELD FREQUENCY TO DISTINGUISH NTSC FROM PAL. THIS IS ESSENTIAL FOR
+ * gstreamer AND OTHER USERSPACE PROGRAMS WHICH MAY NOT ATTEMPT TO INITIATE
+ * A SWITCH BETWEEN PAL AND NTSC.
+ *
+ * FUNCTION ready_saa() MAY REQUIRE A SUBSTANTIAL FRACTION OF A SECOND TO
+ * COMPLETE, SO SHOULD NOT BE INVOKED WITHOUT GOOD REASON.
+*/
+/*---------------------------------------------------------------------------*/
+other = false;
+if (true == peasycap->ntsc)
+ JOM(8, "true=peasycap->ntsc\n");
+else
+ JOM(8, "false=peasycap->ntsc\n");
+rate = ready_saa(peasycap->pusb_device);
+if (0 > rate) {
+ JOM(8, "not ready to capture after %i ms ...\n", PATIENCE);
+ if (true == peasycap->ntsc) {
+ JOM(8, "... trying PAL ...\n"); ntsc = false;
+ } else {
+ JOM(8, "... trying NTSC ...\n"); ntsc = true;
+}
+rc = setup_stk(peasycap->pusb_device, ntsc);
if (0 == rc)
- JOT(8, "setup_saa() OK\n");
+ JOM(4, "setup_stk() OK\n");
else {
- SAY("ERROR: setup_saa() returned %i\n", rc);
+ SAM("ERROR: setup_stk() returned %i\n", rc);
return -EFAULT;
}
-rc = check_saa(p);
+rc = setup_saa(peasycap->pusb_device, ntsc);
if (0 == rc)
- JOT(8, "check_saa() OK\n");
-else if (-8 < rc)
- SAY("check_saa() returned %i\n", rc);
+ JOM(4, "setup_saa() OK\n");
else {
- SAY("ERROR: check_saa() returned %i\n", rc);
+ SAM("ERROR: setup_saa() returned %i\n", rc);
return -EFAULT;
}
-peasycap->standard_offset = -1;
+rate = ready_saa(peasycap->pusb_device);
+if (0 > rate) {
+ JOM(8, "not ready to capture after %i ms ...\n", PATIENCE);
+ JOM(8, "... saa register 0x1F has 0x%02X\n", \
+ read_saa(peasycap->pusb_device, 0x1F));
+ ntsc = peasycap->ntsc;
+ } else {
+ JOM(8, "... success at second try: %i=rate\n", rate);
+ ntsc = (0 < (rate/2)) ? true : false ;
+ other = true;
+ }
+} else {
+ JOM(8, "... success at first try: %i=rate\n", rate);
+ ntsc = (0 < rate/2) ? true : false ;
+}
+if (true == ntsc)
+ JOM(8, "true=ntsc\n");
+else
+ JOM(8, "false=ntsc\n");
/*---------------------------------------------------------------------------*/
-#if defined(PREFER_NTSC)
-rc = adjust_standard(peasycap, V4L2_STD_NTSC_M);
+rc = setup_stk(peasycap->pusb_device, ntsc);
if (0 == rc)
- JOT(8, "adjust_standard(.,NTSC_M) OK\n");
+ JOM(4, "setup_stk() OK\n");
else {
- SAY("ERROR: adjust_standard(.,NTSC_M) returned %i\n", rc);
+ SAM("ERROR: setup_stk() returned %i\n", rc);
return -EFAULT;
}
-rc = adjust_format(peasycap, 640, 480, V4L2_PIX_FMT_UYVY, V4L2_FIELD_NONE, \
- false);
-if (0 <= rc)
- JOT(8, "adjust_format(.,640,480,UYVY) OK\n");
+rc = setup_saa(peasycap->pusb_device, ntsc);
+if (0 == rc)
+ JOM(4, "setup_saa() OK\n");
else {
- SAY("ERROR: adjust_format(.,640,480,UYVY) returned %i\n", rc);
+ SAM("ERROR: setup_saa() returned %i\n", rc);
return -EFAULT;
}
-#else
+for (i = 0; i < 180; i++)
+ peasycap->merit[i] = 0;
+peasycap->video_eof = 0;
+peasycap->audio_eof = 0;
+do_gettimeofday(&peasycap->timeval7);
+/*---------------------------------------------------------------------------*/
+/*
+ * RESTORE INPUT AND FORCE REFRESH OF STANDARD, FORMAT, ETC.
+ *
+ * WHILE THIS PROCEDURE IS IN PROGRESS, SOME IOCTL COMMANDS WILL RETURN -EBUSY.
+*/
+/*---------------------------------------------------------------------------*/
+peasycap->input = -8192;
+peasycap->standard_offset = -8192;
+if (true == other) {
+ peasycap_standard = &easycap_standard[0];
+ while (0xFFFF != peasycap_standard->mask) {
+ if (true == ntsc) {
+ if (NTSC_M == \
+ peasycap_standard->v4l2_standard.index) {
+ peasycap->inputset[input].standard_offset = \
+ peasycap_standard - \
+ &easycap_standard[0];
+ break;
+ }
+ } else {
+ if (PAL_BGHIN == \
+ peasycap_standard->v4l2_standard.index) {
+ peasycap->inputset[input].standard_offset = \
+ peasycap_standard -
+ &easycap_standard[0];
+ break;
+ }
+ }
+ peasycap_standard++;
+ }
+ if (0xFFFF == peasycap_standard->mask) {
+ SAM("ERROR: standard not found\n");
+ return -EINVAL;
+ }
+JOM(8, "%i=peasycap->inputset[%i].standard_offset\n", \
+ peasycap->inputset[input].standard_offset, input);
+}
+peasycap->format_offset = -8192;
+peasycap->brightness = -8192;
+peasycap->contrast = -8192;
+peasycap->saturation = -8192;
+peasycap->hue = -8192;
+
+rc = newinput(peasycap, input);
-rc = adjust_standard(peasycap, \
- (V4L2_STD_PAL_B | V4L2_STD_PAL_G | V4L2_STD_PAL_H | \
- V4L2_STD_PAL_I | V4L2_STD_PAL_N));
if (0 == rc)
- JOT(8, "adjust_standard(.,PAL_BGHIN) OK\n");
+ JOM(4, "restored input, standard and format\n");
else {
- SAY("ERROR: adjust_standard(.,PAL_BGHIN) returned %i\n", rc);
+ SAM("ERROR: newinput(.,%i) returned %i\n", rc, input);
return -EFAULT;
}
-rc = adjust_format(peasycap, 640, 480, V4L2_PIX_FMT_UYVY, V4L2_FIELD_NONE, \
- false);
-if (0 <= rc)
- JOT(8, "adjust_format(.,640,480,uyvy,false) OK\n");
-else {
- SAY("ERROR: adjust_format(.,640,480,uyvy,false) returned %i\n", rc);
+if (true == peasycap->ntsc)
+ JOM(8, "true=peasycap->ntsc\n");
+else
+ JOM(8, "false=peasycap->ntsc\n");
+
+if (0 > peasycap->input) {
+ SAM("MISTAKE: %i=peasycap->input\n", peasycap->input);
+ return -ENOENT;
+}
+if (0 > peasycap->standard_offset) {
+ SAM("MISTAKE: %i=peasycap->standard_offset\n", \
+ peasycap->standard_offset);
+ return -ENOENT;
+}
+if (0 > peasycap->format_offset) {
+ SAM("MISTAKE: %i=peasycap->format_offset\n", \
+ peasycap->format_offset);
+ return -ENOENT;
+}
+if (0 > peasycap->brightness) {
+ SAM("MISTAKE: %i=peasycap->brightness\n", peasycap->brightness);
+ return -ENOENT;
+}
+if (0 > peasycap->contrast) {
+ SAM("MISTAKE: %i=peasycap->contrast\n", peasycap->contrast);
+ return -ENOENT;
+}
+if (0 > peasycap->saturation) {
+ SAM("MISTAKE: %i=peasycap->saturation\n", peasycap->saturation);
+ return -ENOENT;
+}
+if (0 > peasycap->hue) {
+ SAM("MISTAKE: %i=peasycap->hue\n", peasycap->hue);
+ return -ENOENT;
+}
+return 0;
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ * IF THE REQUESTED INPUT IS THE SAME AS THE EXISTING INPUT, DO NOTHING.
+ * OTHERWISE:
+ * KILL URBS, CLEAR FIELD AND FRAME BUFFERS AND RESET THEIR
+ * _read AND _fill POINTERS.
+ * SELECT THE NEW INPUT.
+ * ADJUST THE STANDARD, FORMAT, BRIGHTNESS, CONTRAST, SATURATION AND HUE
+ * ON THE BASIS OF INFORMATION IN STRUCTURE easycap.inputset[input].
+ * RESUBMIT THE URBS IF STREAMING WAS ALREADY IN PROGRESS.
+ *
+ * NOTE:
+ * THIS ROUTINE MAY BE CALLED FREQUENTLY BY ZONEMINDER VIA IOCTL,
+ * SO IT SHOULD WRITE ONLY SPARINGLY TO THE LOGFILE.
+*/
+/*---------------------------------------------------------------------------*/
+int
+newinput(struct easycap *peasycap, int input)
+{
+int rc, k, m, mood, off;
+int inputnow, video_idlenow, audio_idlenow;
+bool resubmit;
+
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
+JOM(8, "%i=input sought\n", input);
-#endif /* !PREFER_NTSC*/
+if (0 > input && INPUT_MANY <= input)
+ return -ENOENT;
+inputnow = peasycap->input;
+if (input == inputnow)
+ return 0;
/*---------------------------------------------------------------------------*/
-rc = adjust_brightness(peasycap, -8192);
+/*
+ * IF STREAMING IS IN PROGRESS THE URBS ARE KILLED AT THIS
+ * STAGE AND WILL BE RESUBMITTED PRIOR TO EXIT FROM THE ROUTINE.
+ * IF NO STREAMING IS IN PROGRESS NO URBS WILL BE SUBMITTED BY THE
+ * ROUTINE.
+*/
+/*---------------------------------------------------------------------------*/
+video_idlenow = peasycap->video_idle;
+audio_idlenow = peasycap->audio_idle;
+
+peasycap->video_idle = 1;
+peasycap->audio_idle = 1;
+if (peasycap->video_isoc_streaming) {
+ resubmit = true;
+ kill_video_urbs(peasycap);
+} else
+ resubmit = false;
+/*---------------------------------------------------------------------------*/
+if (NULL == peasycap->pusb_device) {
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
+ return -ENODEV;
+}
+rc = usb_set_interface(peasycap->pusb_device,
+ peasycap->video_interface, \
+ peasycap->video_altsetting_off);
if (0 != rc) {
- SAY("ERROR: adjust_brightness(default) returned %i\n", rc);
+ SAM("ERROR: usb_set_interface() returned %i\n", rc);
return -EFAULT;
}
-rc = adjust_contrast(peasycap, -8192);
+rc = stop_100(peasycap->pusb_device);
if (0 != rc) {
- SAY("ERROR: adjust_contrast(default) returned %i\n", rc);
+ SAM("ERROR: stop_100() returned %i\n", rc);
return -EFAULT;
}
-rc = adjust_saturation(peasycap, -8192);
-if (0 != rc) {
- SAY("ERROR: adjust_saturation(default) returned %i\n", rc);
- return -EFAULT;
+for (k = 0; k < FIELD_BUFFER_MANY; k++) {
+ for (m = 0; m < FIELD_BUFFER_SIZE/PAGE_SIZE; m++)
+ memset(peasycap->field_buffer[k][m].pgo, 0, PAGE_SIZE);
}
-rc = adjust_hue(peasycap, -8192);
-if (0 != rc) {
- SAY("ERROR: adjust_hue(default) returned %i\n", rc);
- return -EFAULT;
+for (k = 0; k < FRAME_BUFFER_MANY; k++) {
+ for (m = 0; m < FRAME_BUFFER_SIZE/PAGE_SIZE; m++)
+ memset(peasycap->frame_buffer[k][m].pgo, 0, PAGE_SIZE);
+}
+peasycap->field_page = 0;
+peasycap->field_read = 0;
+peasycap->field_fill = 0;
+
+peasycap->frame_read = 0;
+peasycap->frame_fill = 0;
+for (k = 0; k < peasycap->input; k++) {
+ (peasycap->frame_fill)++;
+ if (peasycap->frame_buffer_many <= peasycap->frame_fill)
+ peasycap->frame_fill = 0;
}
+peasycap->input = input;
+select_input(peasycap->pusb_device, peasycap->input, 9);
/*---------------------------------------------------------------------------*/
-rc = usb_set_interface(peasycap->pusb_device, peasycap->video_interface, \
- peasycap->video_altsetting_on);
-if (0 == rc)
- JOT(8, "usb_set_interface(.,%i,%i) OK\n", peasycap->video_interface, \
- peasycap->video_altsetting_on);
-else {
- SAY("ERROR: usb_set_interface() returned %i\n", rc);
+if (input == peasycap->inputset[input].input) {
+ off = peasycap->inputset[input].standard_offset;
+ if (off != peasycap->standard_offset) {
+ rc = adjust_standard(peasycap, \
+ easycap_standard[off].v4l2_standard.id);
+ if (0 != rc) {
+ SAM("ERROR: adjust_standard() returned %i\n", rc);
+ return -EFAULT;
+ }
+ JOM(8, "%i=peasycap->standard_offset\n", \
+ peasycap->standard_offset);
+ } else {
+ JOM(8, "%i=peasycap->standard_offset unchanged\n", \
+ peasycap->standard_offset);
+ }
+ off = peasycap->inputset[input].format_offset;
+ if (off != peasycap->format_offset) {
+ rc = adjust_format(peasycap, \
+ easycap_format[off].v4l2_format.fmt.pix.width, \
+ easycap_format[off].v4l2_format.fmt.pix.height, \
+ easycap_format[off].v4l2_format.fmt.pix.pixelformat, \
+ easycap_format[off].v4l2_format.fmt.pix.field, false);
+ if (0 > rc) {
+ SAM("ERROR: adjust_format() returned %i\n", rc);
+ return -EFAULT;
+ }
+ JOM(8, "%i=peasycap->format_offset\n", peasycap->format_offset);
+ } else {
+ JOM(8, "%i=peasycap->format_offset unchanged\n", \
+ peasycap->format_offset);
+ }
+ mood = peasycap->inputset[input].brightness;
+ if (mood != peasycap->brightness) {
+ rc = adjust_brightness(peasycap, mood);
+ if (0 != rc) {
+ SAM("ERROR: adjust_brightness returned %i\n", rc);
+ return -EFAULT;
+ }
+ JOM(8, "%i=peasycap->brightness\n", peasycap->brightness);
+ }
+ mood = peasycap->inputset[input].contrast;
+ if (mood != peasycap->contrast) {
+ rc = adjust_contrast(peasycap, mood);
+ if (0 != rc) {
+ SAM("ERROR: adjust_contrast returned %i\n", rc);
+ return -EFAULT;
+ }
+ JOM(8, "%i=peasycap->contrast\n", peasycap->contrast);
+ }
+ mood = peasycap->inputset[input].saturation;
+ if (mood != peasycap->saturation) {
+ rc = adjust_saturation(peasycap, mood);
+ if (0 != rc) {
+ SAM("ERROR: adjust_saturation returned %i\n", rc);
+ return -EFAULT;
+ }
+ JOM(8, "%i=peasycap->saturation\n", peasycap->saturation);
+ }
+ mood = peasycap->inputset[input].hue;
+ if (mood != peasycap->hue) {
+ rc = adjust_hue(peasycap, mood);
+ if (0 != rc) {
+ SAM("ERROR: adjust_hue returned %i\n", rc);
+ return -EFAULT;
+ }
+ JOM(8, "%i=peasycap->hue\n", peasycap->hue);
+ }
+} else {
+ SAM("MISTAKE: easycap.inputset[%i] unpopulated\n", input);
+ return -ENOENT;
+}
+/*---------------------------------------------------------------------------*/
+if (NULL == peasycap->pusb_device) {
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
+ return -ENODEV;
+}
+rc = usb_set_interface(peasycap->pusb_device,
+ peasycap->video_interface, \
+ peasycap->video_altsetting_on);
+if (0 != rc) {
+ SAM("ERROR: usb_set_interface() returned %i\n", rc);
return -EFAULT;
}
-rc = start_100(p);
-if (0 == rc)
- JOT(8, "start_100() OK\n");
-else {
- SAY("ERROR: start_100() returned %i\n", rc);
+rc = start_100(peasycap->pusb_device);
+if (0 != rc) {
+ SAM("ERROR: start_100() returned %i\n", rc);
return -EFAULT;
}
+if (true == resubmit)
+ submit_video_urbs(peasycap);
+
peasycap->video_isoc_sequence = VIDEO_ISOC_BUFFER_MANY - 1;
-peasycap->video_idle = 0;
+peasycap->video_idle = video_idlenow;
+peasycap->audio_idle = audio_idlenow;
peasycap->video_junk = 0;
-for (i = 0; i < 180; i++)
- peasycap->merit[i] = 0;
-peasycap->video_eof = 0;
-peasycap->audio_eof = 0;
-do_gettimeofday(&peasycap->timeval7);
-
-peasycap->fudge = 0;
-
-JOT(4, "finished initialization\n");
return 0;
}
/*****************************************************************************/
@@ -326,33 +631,25 @@ submit_video_urbs(struct easycap *peasycap)
struct data_urb *pdata_urb;
struct urb *purb;
struct list_head *plist_head;
-int j, isbad, m, rc;
+int j, isbad, nospc, m, rc;
int isbuf;
-if ((struct list_head *)NULL == peasycap->purb_video_head) {
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return -EFAULT;
+}
+
+if (NULL == peasycap->purb_video_head) {
SAY("ERROR: peasycap->urb_video_head uninitialized\n");
return -EFAULT;
}
-if ((struct usb_device *)NULL == peasycap->pusb_device) {
+if (NULL == peasycap->pusb_device) {
SAY("ERROR: peasycap->pusb_device is NULL\n");
- return -EFAULT;
+ return -ENODEV;
}
if (!peasycap->video_isoc_streaming) {
-
-
-
-
-
-
-
-
- JOT(4, "submission of all video urbs\n");
- if (0 != ready_saa(peasycap->pusb_device)) {
- SAY("ERROR: not ready to capture after waiting " \
- "one second\n");
- SAY("..... continuing anyway\n");
- }
- isbad = 0; m = 0;
+ JOM(4, "submission of all video urbs\n");
+ isbad = 0; nospc = 0; m = 0;
list_for_each(plist_head, (peasycap->purb_video_head)) {
pdata_urb = list_entry(plist_head, struct data_urb, list_head);
if (NULL != pdata_urb) {
@@ -389,44 +686,57 @@ if (!peasycap->video_isoc_streaming) {
rc = usb_submit_urb(purb, GFP_KERNEL);
if (0 != rc) {
isbad++;
- SAY("ERROR: usb_submit_urb() failed " \
+ SAM("ERROR: usb_submit_urb() failed " \
"for urb with rc:\n");
switch (rc) {
case -ENOMEM: {
- SAY("ENOMEM\n");
+ SAM("ERROR: -ENOMEM=" \
+ "usb_submit_urb()\n");
break;
}
case -ENODEV: {
- SAY("ENODEV\n");
+ SAM("ERROR: -ENODEV=" \
+ "usb_submit_urb()\n");
break;
}
case -ENXIO: {
- SAY("ENXIO\n");
+ SAM("ERROR: -ENXIO=" \
+ "usb_submit_urb()\n");
break;
}
case -EINVAL: {
- SAY("EINVAL\n");
+ SAM("ERROR: -EINVAL=" \
+ "usb_submit_urb()\n");
break;
}
case -EAGAIN: {
- SAY("EAGAIN\n");
+ SAM("ERROR: -EAGAIN=" \
+ "usb_submit_urb()\n");
break;
}
case -EFBIG: {
- SAY("EFBIG\n");
+ SAM("ERROR: -EFBIG=" \
+ "usb_submit_urb()\n");
break;
}
case -EPIPE: {
- SAY("EPIPE\n");
+ SAM("ERROR: -EPIPE=" \
+ "usb_submit_urb()\n");
break;
}
case -EMSGSIZE: {
- SAY("EMSGSIZE\n");
+ SAM("ERROR: -EMSGSIZE=" \
+ "usb_submit_urb()\n");
+ break;
+ }
+ case -ENOSPC: {
+ nospc++;
break;
}
default: {
- SAY("unknown error code %i\n",\
- rc);
+ SAM("ERROR: %i=" \
+ "usb_submit_urb()\n",\
+ rc);
break;
}
}
@@ -434,14 +744,20 @@ if (!peasycap->video_isoc_streaming) {
m++;
}
} else {
- isbad++;
+ isbad++;
}
} else {
isbad++;
}
}
+ if (nospc) {
+ SAM("-ENOSPC=usb_submit_urb() for %i urbs\n", nospc);
+ SAM("..... possibly inadequate USB bandwidth\n");
+ peasycap->video_eof = 1;
+ }
+
if (isbad) {
- JOT(4, "attempting cleanup instead of submitting\n");
+ JOM(4, "attempting cleanup instead of submitting\n");
list_for_each(plist_head, (peasycap->purb_video_head)) {
pdata_urb = list_entry(plist_head, struct data_urb, \
list_head);
@@ -454,16 +770,10 @@ if (!peasycap->video_isoc_streaming) {
peasycap->video_isoc_streaming = 0;
} else {
peasycap->video_isoc_streaming = 1;
- JOT(4, "submitted %i video urbs\n", m);
+ JOM(4, "submitted %i video urbs\n", m);
}
-
-
-
-
-
-
} else {
- JOT(4, "already streaming video urbs\n");
+ JOM(4, "already streaming video urbs\n");
}
return 0;
}
@@ -475,35 +785,32 @@ int m;
struct list_head *plist_head;
struct data_urb *pdata_urb;
-if ((struct easycap *)NULL == peasycap) {
+if (NULL == peasycap) {
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
if (peasycap->video_isoc_streaming) {
-
-
-
if ((struct list_head *)NULL != peasycap->purb_video_head) {
peasycap->video_isoc_streaming = 0;
- JOT(4, "killing video urbs\n");
+ JOM(4, "killing video urbs\n");
m = 0;
list_for_each(plist_head, (peasycap->purb_video_head)) {
pdata_urb = list_entry(plist_head, struct data_urb, \
list_head);
- if ((struct data_urb *)NULL != pdata_urb) {
- if ((struct urb *)NULL != pdata_urb->purb) {
+ if (NULL != pdata_urb) {
+ if (NULL != pdata_urb->purb) {
usb_kill_urb(pdata_urb->purb);
m++;
}
}
}
- JOT(4, "%i video urbs killed\n", m);
+ JOM(4, "%i video urbs killed\n", m);
} else {
- SAY("ERROR: peasycap->purb_video_head is NULL\n");
+ SAM("ERROR: peasycap->purb_video_head is NULL\n");
return -EFAULT;
}
} else {
- JOT(8, "%i=video_isoc_streaming, no video urbs killed\n", \
+ JOM(8, "%i=video_isoc_streaming, no video urbs killed\n", \
peasycap->video_isoc_streaming);
}
return 0;
@@ -533,11 +840,15 @@ if (NULL == peasycap) {
SAY("ending unsuccessfully\n");
return -EFAULT;
}
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+ return -EFAULT;
+}
if (0 != kill_video_urbs(peasycap)) {
- SAY("ERROR: kill_video_urbs() failed\n");
+ SAM("ERROR: kill_video_urbs() failed\n");
return -EFAULT;
}
-JOT(4, "ending successfully\n");
+JOM(4, "ending successfully\n");
/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
#else
#
@@ -550,63 +861,45 @@ return 0;
/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
int
-videodev_release(struct video_device *pvd)
+videodev_release(struct video_device *pvideo_device)
{
struct easycap *peasycap;
-int i, j, k;
JOT(4, "\n");
-k = 0;
-for (i = 0; i < video_device_many; i++) {
- pvideo_device = pvideo_array[i];
- if ((struct video_device *)NULL != pvideo_device) {
- if (pvd->minor == pvideo_device->minor) {
- peasycap = (struct easycap *)\
- video_get_drvdata(pvideo_device);
- if ((struct easycap *)NULL == peasycap) {
- SAY("ERROR: peasycap is NULL\n");
- SAY("ending unsuccessfully\n");
- return -EFAULT;
- }
- if (0 != kill_video_urbs(peasycap)) {
- SAY("ERROR: kill_video_urbs() failed\n");
- return -EFAULT;
- }
- JOT(4, "freeing video_device structure: " \
- "/dev/video%i\n", i);
- kfree((void *)pvideo_device);
- for (j = i; j < (VIDEO_DEVICE_MANY - 1); j++)
- pvideo_array[j] = pvideo_array[j + 1];
- video_device_many--; k++;
- break;
- }
- }
-}
-if (!k) {
- SAY("ERROR: lost video_device structure for %i=minor\n", pvd->minor);
- SAY("cannot free: may cause memory leak\n");
+peasycap = video_get_drvdata(pvideo_device);
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
SAY("ending unsuccessfully\n");
return -EFAULT;
}
-
-JOT(4, "ending successfully\n");
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+ return -EFAULT;
+}
+if (0 != kill_video_urbs(peasycap)) {
+ SAM("ERROR: kill_video_urbs() failed\n");
+ return -EFAULT;
+}
+JOM(4, "ending successfully\n");
return 0;
}
#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
-/****************************************************************************/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+/*****************************************************************************/
/*--------------------------------------------------------------------------*/
/*
- * THIS FUNCTION IS CALLED FROM WITHIN easycap_usb_disconnect().
- * BY THIS STAGE THE DEVICE HAS ALREADY BEEN PHYSICALLY UNPLUGGED.
- * peasycap->pusb_device IS NO LONGER VALID AND SHOULD HAVE BEEN SET TO NULL.
+ * THIS FUNCTION IS CALLED FROM WITHIN easycap_usb_disconnect() AND IS
+ * PROTECTED BY SEMAPHORES SET AND CLEARED BY easycap_usb_disconnect().
+ *
+ * BY THIS STAGE THE DEVICE HAS ALREADY BEEN PHYSICALLY UNPLUGGED, SO
+ * peasycap->pusb_device IS NO LONGER VALID.
*/
/*---------------------------------------------------------------------------*/
void
easycap_delete(struct kref *pkref)
{
-int k, m, lost;
+int k, m, gone, kd;
int allocation_video_urb, allocation_video_page, allocation_video_struct;
int allocation_audio_urb, allocation_audio_page, allocation_audio_struct;
int registered_video, registered_audio;
@@ -617,22 +910,27 @@ struct list_head *plist_head, *plist_next;
JOT(4, "\n");
peasycap = container_of(pkref, struct easycap, kref);
-if ((struct easycap *)NULL == peasycap) {
- SAY("ERROR: peasycap is NULL: cannot perform deletions\n");
+if (NULL == peasycap) {
+ SAM("ERROR: peasycap is NULL: cannot perform deletions\n");
+ return;
+}
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
return;
}
+kd = isdongle(peasycap);
/*---------------------------------------------------------------------------*/
/*
* FREE VIDEO.
*/
/*---------------------------------------------------------------------------*/
if ((struct list_head *)NULL != peasycap->purb_video_head) {
- JOT(4, "freeing video urbs\n");
+ JOM(4, "freeing video urbs\n");
m = 0;
list_for_each(plist_head, (peasycap->purb_video_head)) {
pdata_urb = list_entry(plist_head, struct data_urb, list_head);
if (NULL == pdata_urb)
- JOT(4, "ERROR: pdata_urb is NULL\n");
+ JOM(4, "ERROR: pdata_urb is NULL\n");
else {
if ((struct urb *)NULL != pdata_urb->purb) {
usb_free_urb(pdata_urb->purb);
@@ -643,9 +941,9 @@ if ((struct list_head *)NULL != peasycap->purb_video_head) {
}
}
- JOT(4, "%i video urbs freed\n", m);
+ JOM(4, "%i video urbs freed\n", m);
/*---------------------------------------------------------------------------*/
- JOT(4, "freeing video data_urb structures.\n");
+ JOM(4, "freeing video data_urb structures.\n");
m = 0;
list_for_each_safe(plist_head, plist_next, peasycap->purb_video_head) {
pdata_urb = list_entry(plist_head, struct data_urb, list_head);
@@ -656,14 +954,12 @@ if ((struct list_head *)NULL != peasycap->purb_video_head) {
m++;
}
}
- JOT(4, "%i video data_urb structures freed\n", m);
- JOT(4, "setting peasycap->purb_video_head=NULL\n");
+ JOM(4, "%i video data_urb structures freed\n", m);
+ JOM(4, "setting peasycap->purb_video_head=NULL\n");
peasycap->purb_video_head = (struct list_head *)NULL;
- } else {
-JOT(4, "peasycap->purb_video_head is NULL\n");
}
/*---------------------------------------------------------------------------*/
-JOT(4, "freeing video isoc buffers.\n");
+JOM(4, "freeing video isoc buffers.\n");
m = 0;
for (k = 0; k < VIDEO_ISOC_BUFFER_MANY; k++) {
if ((void *)NULL != peasycap->video_isoc_buffer[k].pgo) {
@@ -676,10 +972,10 @@ for (k = 0; k < VIDEO_ISOC_BUFFER_MANY; k++) {
m++;
}
}
-JOT(4, "isoc video buffers freed: %i pages\n", m * (0x01 << VIDEO_ISOC_ORDER));
+JOM(4, "isoc video buffers freed: %i pages\n", m * (0x01 << VIDEO_ISOC_ORDER));
/*---------------------------------------------------------------------------*/
-JOT(4, "freeing video field buffers.\n");
-lost = 0;
+JOM(4, "freeing video field buffers.\n");
+gone = 0;
for (k = 0; k < FIELD_BUFFER_MANY; k++) {
for (m = 0; m < FIELD_BUFFER_SIZE/PAGE_SIZE; m++) {
if ((void *)NULL != peasycap->field_buffer[k][m].pgo) {
@@ -687,14 +983,14 @@ for (k = 0; k < FIELD_BUFFER_MANY; k++) {
(peasycap->field_buffer[k][m].pgo));
peasycap->field_buffer[k][m].pgo = (void *)NULL;
peasycap->allocation_video_page -= 1;
- lost++;
+ gone++;
}
}
}
-JOT(4, "video field buffers freed: %i pages\n", lost);
+JOM(4, "video field buffers freed: %i pages\n", gone);
/*---------------------------------------------------------------------------*/
-JOT(4, "freeing video frame buffers.\n");
-lost = 0;
+JOM(4, "freeing video frame buffers.\n");
+gone = 0;
for (k = 0; k < FRAME_BUFFER_MANY; k++) {
for (m = 0; m < FRAME_BUFFER_SIZE/PAGE_SIZE; m++) {
if ((void *)NULL != peasycap->frame_buffer[k][m].pgo) {
@@ -702,23 +998,23 @@ for (k = 0; k < FRAME_BUFFER_MANY; k++) {
(peasycap->frame_buffer[k][m].pgo));
peasycap->frame_buffer[k][m].pgo = (void *)NULL;
peasycap->allocation_video_page -= 1;
- lost++;
+ gone++;
}
}
}
-JOT(4, "video frame buffers freed: %i pages\n", lost);
+JOM(4, "video frame buffers freed: %i pages\n", gone);
/*---------------------------------------------------------------------------*/
/*
* FREE AUDIO.
*/
/*---------------------------------------------------------------------------*/
if ((struct list_head *)NULL != peasycap->purb_audio_head) {
- JOT(4, "freeing audio urbs\n");
+ JOM(4, "freeing audio urbs\n");
m = 0;
list_for_each(plist_head, (peasycap->purb_audio_head)) {
pdata_urb = list_entry(plist_head, struct data_urb, list_head);
if (NULL == pdata_urb)
- JOT(4, "ERROR: pdata_urb is NULL\n");
+ JOM(4, "ERROR: pdata_urb is NULL\n");
else {
if ((struct urb *)NULL != pdata_urb->purb) {
usb_free_urb(pdata_urb->purb);
@@ -728,9 +1024,9 @@ if ((struct list_head *)NULL != peasycap->purb_audio_head) {
}
}
}
- JOT(4, "%i audio urbs freed\n", m);
+ JOM(4, "%i audio urbs freed\n", m);
/*---------------------------------------------------------------------------*/
- JOT(4, "freeing audio data_urb structures.\n");
+ JOM(4, "freeing audio data_urb structures.\n");
m = 0;
list_for_each_safe(plist_head, plist_next, peasycap->purb_audio_head) {
pdata_urb = list_entry(plist_head, struct data_urb, list_head);
@@ -741,14 +1037,12 @@ if ((struct list_head *)NULL != peasycap->purb_audio_head) {
m++;
}
}
-JOT(4, "%i audio data_urb structures freed\n", m);
-JOT(4, "setting peasycap->purb_audio_head=NULL\n");
+JOM(4, "%i audio data_urb structures freed\n", m);
+JOM(4, "setting peasycap->purb_audio_head=NULL\n");
peasycap->purb_audio_head = (struct list_head *)NULL;
-} else {
-JOT(4, "peasycap->purb_audio_head is NULL\n");
}
/*---------------------------------------------------------------------------*/
-JOT(4, "freeing audio isoc buffers.\n");
+JOM(4, "freeing audio isoc buffers.\n");
m = 0;
for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
if ((void *)NULL != peasycap->audio_isoc_buffer[k].pgo) {
@@ -761,22 +1055,22 @@ for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
m++;
}
}
-JOT(4, "easysnd_delete(): isoc audio buffers freed: %i pages\n", \
+JOM(4, "easysnd_delete(): isoc audio buffers freed: %i pages\n", \
m * (0x01 << AUDIO_ISOC_ORDER));
/*---------------------------------------------------------------------------*/
-JOT(4, "freeing audio buffers.\n");
-lost = 0;
+JOM(4, "freeing audio buffers.\n");
+gone = 0;
for (k = 0; k < peasycap->audio_buffer_page_many; k++) {
if ((void *)NULL != peasycap->audio_buffer[k].pgo) {
free_page((unsigned long)(peasycap->audio_buffer[k].pgo));
peasycap->audio_buffer[k].pgo = (void *)NULL;
peasycap->allocation_audio_page -= 1;
- lost++;
+ gone++;
}
}
-JOT(4, "easysnd_delete(): audio buffers freed: %i pages\n", lost);
+JOM(4, "easysnd_delete(): audio buffers freed: %i pages\n", gone);
/*---------------------------------------------------------------------------*/
-JOT(4, "freeing easycap structure.\n");
+JOM(4, "freeing easycap structure.\n");
allocation_video_urb = peasycap->allocation_video_urb;
allocation_video_page = peasycap->allocation_video_page;
allocation_video_struct = peasycap->allocation_video_struct;
@@ -785,15 +1079,16 @@ allocation_audio_urb = peasycap->allocation_audio_urb;
allocation_audio_page = peasycap->allocation_audio_page;
allocation_audio_struct = peasycap->allocation_audio_struct;
registered_audio = peasycap->registered_audio;
-m = 0;
-if ((struct easycap *)NULL != peasycap) {
- kfree(peasycap); peasycap = (struct easycap *)NULL;
+
+kfree(peasycap);
+if (0 <= kd && DONGLE_MANY > kd) {
+ easycap_dongle[kd].peasycap = (struct easycap *)NULL;
+ JOT(4, " null-->easycap_dongle[%i].peasycap\n", kd);
allocation_video_struct -= sizeof(struct easycap);
- m++;
+} else {
+ SAY("ERROR: cannot purge easycap_dongle[].peasycap");
}
-JOT(4, "%i easycap structure freed\n", m);
/*---------------------------------------------------------------------------*/
-
SAY("%8i= video urbs after all deletions\n", allocation_video_urb);
SAY("%8i= video pages after all deletions\n", allocation_video_page);
SAY("%8i= video structs after all deletions\n", allocation_video_struct);
@@ -810,27 +1105,85 @@ return;
unsigned int easycap_poll(struct file *file, poll_table *wait)
{
struct easycap *peasycap;
+int rc, kd;
JOT(8, "\n");
if (NULL == ((poll_table *)wait))
JOT(8, "WARNING: poll table pointer is NULL ... continuing\n");
-if (NULL == ((struct file *)file)) {
+if ((struct file *)NULL == file) {
SAY("ERROR: file pointer is NULL\n");
- return -EFAULT;
+ return -ERESTARTSYS;
}
peasycap = file->private_data;
if (NULL == peasycap) {
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+ return -EFAULT;
+}
+if (NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+}
+/*---------------------------------------------------------------------------*/
+kd = isdongle(peasycap);
+if (0 <= kd && DONGLE_MANY > kd) {
+ if (mutex_lock_interruptible(&easycap_dongle[kd].mutex_video)) {
+ SAY("ERROR: cannot down easycap_dongle[%i].mutex_video\n", kd);
+ return -ERESTARTSYS;
+ }
+ JOM(4, "locked easycap_dongle[%i].mutex_video\n", kd);
+ /*-------------------------------------------------------------------*/
+ /*
+ * MEANWHILE, easycap_usb_disconnect() MAY HAVE FREED POINTER
+ * peasycap, IN WHICH CASE A REPEAT CALL TO isdongle() WILL FAIL.
+ * IF NECESSARY, BAIL OUT.
+ */
+ /*-------------------------------------------------------------------*/
+ if (kd != isdongle(peasycap))
+ return -ERESTARTSYS;
+ if (NULL == file) {
+ SAY("ERROR: file is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -ERESTARTSYS;
+ }
+ peasycap = file->private_data;
+ if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -ERESTARTSYS;
+ }
+ if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap: 0x%08lX\n", \
+ (unsigned long int) peasycap);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -ERESTARTSYS;
+ }
+ if (NULL == peasycap->pusb_device) {
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return -ERESTARTSYS;
+ }
+} else
+ /*-------------------------------------------------------------------*/
+ /*
+ * IF easycap_usb_disconnect() HAS ALREADY FREED POINTER peasycap
+ * BEFORE THE ATTEMPT TO ACQUIRE THE SEMAPHORE, isdongle() WILL
+ * HAVE FAILED. BAIL OUT.
+ */
+ /*-------------------------------------------------------------------*/
+ return -ERESTARTSYS;
+/*---------------------------------------------------------------------------*/
+rc = easycap_dqbuf(peasycap, 0);
peasycap->polled = 1;
-
-if (0 == easycap_dqbuf(peasycap, 0))
+mutex_unlock(&easycap_dongle[kd].mutex_video);
+if (0 == rc)
return POLLIN | POLLRDNORM;
else
return POLLERR;
-
}
/*****************************************************************************/
/*---------------------------------------------------------------------------*/
@@ -841,7 +1194,7 @@ else
int
easycap_dqbuf(struct easycap *peasycap, int mode)
{
-int miss, rc;
+int input, ifield, miss, rc;
JOT(8, "\n");
@@ -849,129 +1202,188 @@ if (NULL == peasycap) {
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
+if (NULL == peasycap->pusb_device) {
+ SAY("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+}
+ifield = 0;
+JOM(8, "%i=ifield\n", ifield);
+/*---------------------------------------------------------------------------*/
+/*
+ * CHECK FOR LOST INPUT SIGNAL.
+ *
+ * FOR THE FOUR-CVBS EasyCAP, THIS DOES NOT WORK AS EXPECTED.
+ * IF INPUT 0 IS PRESENT AND SYNC ACQUIRED, UNPLUGGING INPUT 4 DOES NOT
+ * RESULT IN SETTING BIT 0x40 ON REGISTER 0x1F, PRESUMABLY BECAUSE THERE
+ * IS FLYWHEELING ON INPUT 0. THE UPSHOT IS:
+ *
+ * INPUT 0 PLUGGED, INPUT 4 PLUGGED => SCREEN 0 OK, SCREEN 4 OK
+ * INPUT 0 PLUGGED, INPUT 4 UNPLUGGED => SCREEN 0 OK, SCREEN 4 BLACK
+ * INPUT 0 UNPLUGGED, INPUT 4 PLUGGED => SCREEN 0 BARS, SCREEN 4 OK
+ * INPUT 0 UNPLUGGED, INPUT 4 UNPLUGGED => SCREEN 0 BARS, SCREEN 4 BARS
+*/
+/*---------------------------------------------------------------------------*/
+input = peasycap->input;
+if (0 <= input && INPUT_MANY > input) {
+ rc = read_saa(peasycap->pusb_device, 0x1F);
+ if (0 <= rc) {
+ if (rc & 0x40)
+ peasycap->lost[input] += 1;
+ else
+ peasycap->lost[input] -= 2;
+
+ if (0 > peasycap->lost[input])
+ peasycap->lost[input] = 0;
+ else if ((2 * VIDEO_LOST_TOLERATE) < peasycap->lost[input])
+ peasycap->lost[input] = (2 * VIDEO_LOST_TOLERATE);
+ }
+}
/*---------------------------------------------------------------------------*/
/*
- * WAIT FOR FIELD 0
+ * WAIT FOR FIELD ifield (0 => TOP, 1 => BOTTOM)
*/
/*---------------------------------------------------------------------------*/
miss = 0;
-if (mutex_lock_interruptible(&(peasycap->mutex_mmap_video[0])))
- return -ERESTARTSYS;
while ((peasycap->field_read == peasycap->field_fill) || \
(0 != (0xFF00 & peasycap->field_buffer\
[peasycap->field_read][0].kount)) || \
- (0 != (0x00FF & peasycap->field_buffer\
+ (ifield != (0x00FF & peasycap->field_buffer\
[peasycap->field_read][0].kount))) {
- mutex_unlock(&(peasycap->mutex_mmap_video[0]));
-
if (mode)
return -EAGAIN;
- JOT(8, "first wait on wq_video, " \
+ JOM(8, "first wait on wq_video, " \
"%i=field_read %i=field_fill\n", \
peasycap->field_read, peasycap->field_fill);
- msleep(1);
if (0 != (wait_event_interruptible(peasycap->wq_video, \
(peasycap->video_idle || peasycap->video_eof || \
((peasycap->field_read != peasycap->field_fill) && \
(0 == (0xFF00 & peasycap->field_buffer\
[peasycap->field_read][0].kount)) && \
- (0 == (0x00FF & peasycap->field_buffer\
- [peasycap->field_read][0].kount))))))){
- SAY("aborted by signal\n");
+ (ifield == (0x00FF & peasycap->field_buffer\
+ [peasycap->field_read][0].kount))))))) {
+ SAM("aborted by signal\n");
return -EIO;
}
if (peasycap->video_idle) {
- JOT(8, "%i=peasycap->video_idle\n", peasycap->video_idle);
- return -EIO;
+ JOM(8, "%i=peasycap->video_idle ... returning -EAGAIN\n", \
+ peasycap->video_idle);
+ return -EAGAIN;
}
if (peasycap->video_eof) {
- JOT(8, "%i=peasycap->video_eof\n", peasycap->video_eof);
- debrief(peasycap);
+ JOM(8, "%i=peasycap->video_eof\n", peasycap->video_eof);
+ #if defined(PERSEVERE)
+ if (1 == peasycap->status) {
+ JOM(8, "persevering ...\n");
+ peasycap->video_eof = 0;
+ peasycap->audio_eof = 0;
+ if (0 != reset(peasycap)) {
+ JOM(8, " ... failed ... returning -EIO\n");
+ peasycap->video_eof = 1;
+ peasycap->audio_eof = 1;
+ kill_video_urbs(peasycap);
+ return -EIO;
+ }
+ peasycap->status = 0;
+ JOM(8, " ... OK ... returning -EAGAIN\n");
+ return -EAGAIN;
+ }
+ #endif /*PERSEVERE*/
+ peasycap->video_eof = 1;
+ peasycap->audio_eof = 1;
kill_video_urbs(peasycap);
+ JOM(8, "returning -EIO\n");
return -EIO;
}
miss++;
-if (mutex_lock_interruptible(&(peasycap->mutex_mmap_video[0])))
- return -ERESTARTSYS;
}
-mutex_unlock(&(peasycap->mutex_mmap_video[0]));
-JOT(8, "first awakening on wq_video after %i waits\n", miss);
+JOM(8, "first awakening on wq_video after %i waits\n", miss);
rc = field2frame(peasycap);
if (0 != rc)
- SAY("ERROR: field2frame() returned %i\n", rc);
-
-if (true == peasycap->offerfields) {
- peasycap->frame_read = peasycap->frame_fill;
- (peasycap->frame_fill)++;
- if (peasycap->frame_buffer_many <= peasycap->frame_fill)
- peasycap->frame_fill = 0;
-
- if (0x01 & easycap_standard[peasycap->standard_offset].mask) {
- peasycap->frame_buffer[peasycap->frame_read][0].kount = \
- V4L2_FIELD_BOTTOM;
- } else {
- peasycap->frame_buffer[peasycap->frame_read][0].kount = \
- V4L2_FIELD_TOP;
- }
-JOT(8, "setting: %i=peasycap->frame_read\n", peasycap->frame_read);
-JOT(8, "bumped to: %i=peasycap->frame_fill\n", peasycap->frame_fill);
-}
+ SAM("ERROR: field2frame() returned %i\n", rc);
/*---------------------------------------------------------------------------*/
/*
- * WAIT FOR FIELD 1
+ * WAIT FOR THE OTHER FIELD
*/
/*---------------------------------------------------------------------------*/
+if (ifield)
+ ifield = 0;
+else
+ ifield = 1;
miss = 0;
-if (mutex_lock_interruptible(&(peasycap->mutex_mmap_video[0])))
- return -ERESTARTSYS;
while ((peasycap->field_read == peasycap->field_fill) || \
(0 != (0xFF00 & peasycap->field_buffer\
[peasycap->field_read][0].kount)) || \
- (0 == (0x00FF & peasycap->field_buffer\
+ (ifield != (0x00FF & peasycap->field_buffer\
[peasycap->field_read][0].kount))) {
- mutex_unlock(&(peasycap->mutex_mmap_video[0]));
-
if (mode)
return -EAGAIN;
- JOT(8, "second wait on wq_video, " \
+ JOM(8, "second wait on wq_video, " \
"%i=field_read %i=field_fill\n", \
peasycap->field_read, peasycap->field_fill);
- msleep(1);
if (0 != (wait_event_interruptible(peasycap->wq_video, \
(peasycap->video_idle || peasycap->video_eof || \
((peasycap->field_read != peasycap->field_fill) && \
(0 == (0xFF00 & peasycap->field_buffer\
[peasycap->field_read][0].kount)) && \
- (0 != (0x00FF & peasycap->field_buffer\
- [peasycap->field_read][0].kount))))))){
- SAY("aborted by signal\n");
+ (ifield == (0x00FF & peasycap->field_buffer\
+ [peasycap->field_read][0].\
+ kount))))))) {
+ SAM("aborted by signal\n");
return -EIO;
}
if (peasycap->video_idle) {
- JOT(8, "%i=peasycap->video_idle\n", peasycap->video_idle);
- return -EIO;
+ JOM(8, "%i=peasycap->video_idle ... returning -EAGAIN\n", \
+ peasycap->video_idle);
+ return -EAGAIN;
}
if (peasycap->video_eof) {
- JOT(8, "%i=peasycap->video_eof\n", peasycap->video_eof);
- debrief(peasycap);
+ JOM(8, "%i=peasycap->video_eof\n", peasycap->video_eof);
+ #if defined(PERSEVERE)
+ if (1 == peasycap->status) {
+ JOM(8, "persevering ...\n");
+ peasycap->video_eof = 0;
+ peasycap->audio_eof = 0;
+ if (0 != reset(peasycap)) {
+ JOM(8, " ... failed ... returning -EIO\n");
+ peasycap->video_eof = 1;
+ peasycap->audio_eof = 1;
+ kill_video_urbs(peasycap);
+ return -EIO;
+ }
+ peasycap->status = 0;
+ JOM(8, " ... OK ... returning -EAGAIN\n");
+ return -EAGAIN;
+ }
+ #endif /*PERSEVERE*/
+ peasycap->video_eof = 1;
+ peasycap->audio_eof = 1;
kill_video_urbs(peasycap);
+ JOM(8, "returning -EIO\n");
return -EIO;
}
miss++;
-if (mutex_lock_interruptible(&(peasycap->mutex_mmap_video[0])))
- return -ERESTARTSYS;
}
-mutex_unlock(&(peasycap->mutex_mmap_video[0]));
-JOT(8, "second awakening on wq_video after %i waits\n", miss);
+JOM(8, "second awakening on wq_video after %i waits\n", miss);
rc = field2frame(peasycap);
if (0 != rc)
- SAY("ERROR: field2frame() returned %i\n", rc);
-
+ SAM("ERROR: field2frame() returned %i\n", rc);
+/*---------------------------------------------------------------------------*/
+/*
+ * WASTE THIS FRAME
+*/
+/*---------------------------------------------------------------------------*/
+if (0 != peasycap->skip) {
+ peasycap->skipped++;
+ if (peasycap->skip != peasycap->skipped)
+ return peasycap->skip - peasycap->skipped;
+ peasycap->skipped = 0;
+}
+/*---------------------------------------------------------------------------*/
peasycap->frame_read = peasycap->frame_fill;
peasycap->queued[peasycap->frame_read] = 0;
peasycap->done[peasycap->frame_read] = V4L2_BUF_FLAG_DONE;
@@ -988,8 +1400,8 @@ if (0x01 & easycap_standard[peasycap->standard_offset].mask) {
V4L2_FIELD_BOTTOM;
}
-JOT(8, "setting: %i=peasycap->frame_read\n", peasycap->frame_read);
-JOT(8, "bumped to: %i=peasycap->frame_fill\n", peasycap->frame_fill);
+JOM(8, "setting: %i=peasycap->frame_read\n", peasycap->frame_read);
+JOM(8, "bumped to: %i=peasycap->frame_fill\n", peasycap->frame_fill);
return 0;
}
@@ -1003,14 +1415,12 @@ return 0;
* odd==false IS TRANSFERRED TO THE FRAME BUFFER.
*
* THE BOOLEAN PARAMETER offerfields IS true ONLY WHEN THE USER PROGRAM
- * CHOOSES THE OPTION V4L2_FIELD_ALTERNATE. NO USERSPACE PROGRAM TESTED
- * TO DATE HAS DONE THIS. BUGS ARE LIKELY.
+ * CHOOSES THE OPTION V4L2_FIELD_INTERLACED.
*/
/*---------------------------------------------------------------------------*/
int
field2frame(struct easycap *peasycap)
{
-static struct timeval timeval0;
struct timeval timeval;
long long int above, below;
__u32 remainder;
@@ -1019,16 +1429,26 @@ struct signed_div_result sdr;
void *pex, *pad;
int kex, kad, mex, mad, rex, rad, rad2;
int c2, c3, w2, w3, cz, wz;
-int rc, bytesperpixel, multiplier, much, more, over, rump, caches;
+int rc, bytesperpixel, multiplier, much, more, over, rump, caches, input;
__u8 mask, margin;
-bool odd, isuy, decimatepixel, offerfields;
+bool odd, isuy, decimatepixel, offerfields, badinput;
+
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return -EFAULT;
+}
-JOT(8, "===== parity %i, field buffer %i --> frame buffer %i\n", \
+badinput = false;
+input = 0x07 & peasycap->field_buffer[peasycap->field_read][0].input;
+
+JOM(8, "===== parity %i, input 0x%02X, field buffer %i --> " \
+ "frame buffer %i\n", \
peasycap->field_buffer[peasycap->field_read][0].kount,\
+ peasycap->field_buffer[peasycap->field_read][0].input,\
peasycap->field_read, peasycap->frame_fill);
-JOT(8, "===== %i=bytesperpixel\n", peasycap->bytesperpixel);
+JOM(8, "===== %i=bytesperpixel\n", peasycap->bytesperpixel);
if (true == peasycap->offerfields)
- JOT(8, "===== offerfields\n");
+ JOM(8, "===== offerfields\n");
/*---------------------------------------------------------------------------*/
/*
@@ -1036,15 +1456,17 @@ if (true == peasycap->offerfields)
*/
/*---------------------------------------------------------------------------*/
if (peasycap->field_read == peasycap->field_fill) {
- SAY("ERROR: on entry, still filling field buffer %i\n", \
+ SAM("ERROR: on entry, still filling field buffer %i\n", \
peasycap->field_read);
return 0;
}
#if defined(EASYCAP_TESTCARD)
easycap_testcard(peasycap, peasycap->field_read);
#else
-if (0 != (0x0400 & peasycap->field_buffer[peasycap->field_read][0].kount))
- easycap_testcard(peasycap, peasycap->field_read);
+if (0 <= input && INPUT_MANY > input) {
+ if (easycap_bars && VIDEO_LOST_TOLERATE <= peasycap->lost[input])
+ easycap_testcard(peasycap, peasycap->field_read);
+}
#endif /*EASYCAP_TESTCARD*/
/*---------------------------------------------------------------------------*/
@@ -1055,7 +1477,7 @@ decimatepixel = peasycap->decimatepixel;
if ((2 != bytesperpixel) && \
(3 != bytesperpixel) && \
(4 != bytesperpixel)) {
- SAY("MISTAKE: %i=bytesperpixel\n", bytesperpixel);
+ SAM("MISTAKE: %i=bytesperpixel\n", bytesperpixel);
return -EFAULT;
}
if (true == decimatepixel)
@@ -1082,8 +1504,8 @@ if (peasycap->field_buffer[kex][0].kount)
else
odd = false;
-if ((true == odd) && (false == offerfields) &&(false == decimatepixel)) {
- JOT(8, " initial skipping %4i bytes p.%4i\n", \
+if ((true == odd) && (false == decimatepixel)) {
+ JOM(8, " initial skipping %4i bytes p.%4i\n", \
w3/multiplier, mad);
pad += (w3 / multiplier); rad -= (w3 / multiplier);
}
@@ -1108,7 +1530,7 @@ while (cz < wz) {
rump = 0;
if (much % 2) {
- SAY("MISTAKE: much is odd\n");
+ SAM("MISTAKE: much is odd\n");
return -EFAULT;
}
@@ -1116,13 +1538,11 @@ while (cz < wz) {
much) / 2;
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
if (1 < bytesperpixel) {
- if ((rad * \
- 2) < (much * \
- bytesperpixel)) {
+ if (rad * 2 < much * bytesperpixel) {
/*
** INJUDICIOUS ALTERATION OF THIS
- ** BLOCK WILL CAUSE BREAKAGE.
- ** BEWARE.
+ ** STATEMENT BLOCK WILL CAUSE
+ ** BREAKAGE. BEWARE.
**/
rad2 = rad + bytesperpixel - 1;
much = ((((2 * \
@@ -1145,18 +1565,25 @@ while (cz < wz) {
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
} else {
- SAY("MISTAKE: %i=bytesperpixel\n", \
+ SAM("MISTAKE: %i=bytesperpixel\n", \
bytesperpixel);
return -EFAULT;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
if (rump)
caches++;
-
+ if (true == badinput) {
+ JOM(8, "ERROR: 0x%02X=->field_buffer" \
+ "[%i][%i].input, " \
+ "0x%02X=(0x08|->input)\n", \
+ peasycap->field_buffer\
+ [kex][mex].input, kex, mex, \
+ (0x08|peasycap->input));
+ }
rc = redaub(peasycap, pad, pex, much, more, \
mask, margin, isuy);
if (0 > rc) {
- SAY("ERROR: redaub() failed\n");
+ SAM("ERROR: redaub() failed\n");
return -EFAULT;
}
if (much % 4) {
@@ -1171,6 +1598,9 @@ while (cz < wz) {
mex++;
pex = peasycap->field_buffer[kex][mex].pgo;
rex = PAGE_SIZE;
+ if (peasycap->field_buffer[kex][mex].input != \
+ (0x08|peasycap->input))
+ badinput = true;
}
pad += more;
rad -= more;
@@ -1190,7 +1620,7 @@ while (cz < wz) {
* UNLESS IT IS THE LAST LINE OF AN ODD FRAME
*/
/*---------------------------------------------------------------------------*/
- if (((false == odd) || (cz != wz))&&(false == offerfields)) {
+ if ((false == odd) || (cz != wz)) {
over = w3;
do {
if (!rad) {
@@ -1224,7 +1654,7 @@ while (cz < wz) {
rump = 0;
if (much % 2) {
- SAY("MISTAKE: much is odd\n");
+ SAM("MISTAKE: much is odd\n");
return -EFAULT;
}
@@ -1232,12 +1662,11 @@ while (cz < wz) {
much) / 4;
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
if (1 < bytesperpixel) {
- if ((rad * 4) < (much * \
- bytesperpixel)) {
+ if (rad * 4 < much * bytesperpixel) {
/*
** INJUDICIOUS ALTERATION OF THIS
- ** BLOCK WILL CAUSE BREAKAGE.
- ** BEWARE.
+ ** STATEMENT BLOCK WILL CAUSE
+ ** BREAKAGE. BEWARE.
**/
rad2 = rad + bytesperpixel - 1;
much = ((((2 * rad2)/bytesperpixel)/2)\
@@ -1261,7 +1690,7 @@ while (cz < wz) {
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
} else {
- SAY("MISTAKE: %i=bytesperpixel\n", \
+ SAM("MISTAKE: %i=bytesperpixel\n", \
bytesperpixel);
return -EFAULT;
}
@@ -1269,10 +1698,18 @@ while (cz < wz) {
if (rump)
caches++;
+ if (true == badinput) {
+ JOM(8, "ERROR: 0x%02X=->field_buffer" \
+ "[%i][%i].input, " \
+ "0x%02X=(0x08|->input)\n", \
+ peasycap->field_buffer\
+ [kex][mex].input, kex, mex, \
+ (0x08|peasycap->input));
+ }
rc = redaub(peasycap, pad, pex, much, more, \
mask, margin, isuy);
if (0 > rc) {
- SAY("ERROR: redaub() failed\n");
+ SAM("ERROR: redaub() failed\n");
return -EFAULT;
}
over -= much; cz += much;
@@ -1281,6 +1718,9 @@ while (cz < wz) {
mex++;
pex = peasycap->field_buffer[kex][mex].pgo;
rex = PAGE_SIZE;
+ if (peasycap->field_buffer[kex][mex].input != \
+ (0x08|peasycap->input))
+ badinput = true;
}
pad += more;
rad -= more;
@@ -1307,6 +1747,16 @@ while (cz < wz) {
mex++;
pex = peasycap->field_buffer[kex][mex].pgo;
rex = PAGE_SIZE;
+ if (peasycap->field_buffer[kex][mex].input != \
+ (0x08|peasycap->input)) {
+ JOM(8, "ERROR: 0x%02X=->field_buffer"\
+ "[%i][%i].input, " \
+ "0x%02X=(0x08|->input)\n", \
+ peasycap->field_buffer\
+ [kex][mex].input, kex, mex, \
+ (0x08|peasycap->input));
+ badinput = true;
+ }
}
much = over;
if (rex < much)
@@ -1325,39 +1775,39 @@ while (cz < wz) {
/*---------------------------------------------------------------------------*/
c2 = (mex + 1)*PAGE_SIZE - rex;
if (cz != c2)
- SAY("ERROR: discrepancy %i in bytes read\n", c2 - cz);
+ SAM("ERROR: discrepancy %i in bytes read\n", c2 - cz);
c3 = (mad + 1)*PAGE_SIZE - rad;
if (false == decimatepixel) {
if (bytesperpixel * \
cz != c3) \
- SAY("ERROR: discrepancy %i in bytes written\n", \
+ SAM("ERROR: discrepancy %i in bytes written\n", \
c3 - (bytesperpixel * \
cz));
} else {
if (false == odd) {
if (bytesperpixel * \
cz != (4 * c3))
- SAY("ERROR: discrepancy %i in bytes written\n", \
+ SAM("ERROR: discrepancy %i in bytes written\n", \
(2*c3)-(bytesperpixel * \
cz));
} else {
if (0 != c3)
- SAY("ERROR: discrepancy %i " \
+ SAM("ERROR: discrepancy %i " \
"in bytes written\n", c3);
}
}
if (rump)
- SAY("ERROR: undischarged cache at end of line in frame buffer\n");
+ SAM("WORRY: undischarged cache at end of line in frame buffer\n");
-JOT(8, "===== field2frame(): %i bytes --> %i bytes (incl skip)\n", c2, c3);
-JOT(8, "===== field2frame(): %i=mad %i=rad\n", mad, rad);
+JOM(8, "===== field2frame(): %i bytes --> %i bytes (incl skip)\n", c2, c3);
+JOM(8, "===== field2frame(): %i=mad %i=rad\n", mad, rad);
if (true == odd)
- JOT(8, "+++++ field2frame(): frame buffer %i is full\n", kad);
+ JOM(8, "+++++ field2frame(): frame buffer %i is full\n", kad);
if (peasycap->field_read == peasycap->field_fill)
- SAY("WARNING: on exit, filling field buffer %i\n", \
+ SAM("WARNING: on exit, filling field buffer %i\n", \
peasycap->field_read);
/*---------------------------------------------------------------------------*/
/*
@@ -1365,23 +1815,24 @@ if (peasycap->field_read == peasycap->field_fill)
*/
/*---------------------------------------------------------------------------*/
do_gettimeofday(&timeval);
-if (timeval0.tv_sec) {
+if (peasycap->timeval6.tv_sec) {
below = ((long long int)(1000000)) * \
- ((long long int)(timeval.tv_sec - timeval0.tv_sec)) + \
- (long long int)(timeval.tv_usec - timeval0.tv_usec);
+ ((long long int)(timeval.tv_sec - \
+ peasycap->timeval6.tv_sec)) + \
+ (long long int)(timeval.tv_usec - peasycap->timeval6.tv_usec);
above = (long long int)1000000;
sdr = signed_div(above, below);
above = sdr.quotient;
remainder = (__u32)sdr.remainder;
- JOT(8, "video streaming at %3lli.%03i fields per second\n", above, \
+ JOM(8, "video streaming at %3lli.%03i fields per second\n", above, \
(remainder/1000));
}
-timeval0 = timeval;
+peasycap->timeval6 = timeval;
if (caches)
- JOT(8, "%i=caches\n", caches);
+ JOM(8, "%i=caches\n", caches);
return 0;
}
/*****************************************************************************/
@@ -1434,7 +1885,7 @@ redaub(struct easycap *peasycap, void *pad, void *pex, int much, int more, \
__u8 mask, __u8 margin, bool isuy)
{
static __s32 ay[256], bu[256], rv[256], gu[256], gv[256];
-static __u8 cache[8], *pcache;
+__u8 *pcache;
__u8 r, g, b, y, u, v, c, *p2, *p3, *pz, *pr;
int bytesperpixel;
bool byteswaporder, decimatepixel, last;
@@ -1442,7 +1893,7 @@ int j, rump;
__s32 s32;
if (much % 2) {
- SAY("MISTAKE: much is odd\n");
+ SAM("MISTAKE: much is odd\n");
return -EFAULT;
}
bytesperpixel = peasycap->bytesperpixel;
@@ -1475,30 +1926,31 @@ if (!bu[255]) {
ay[j] = ay[16];
for (j = 236; j < 256; j++)
ay[j] = ay[235];
- JOT(8, "lookup tables are prepared\n");
+ JOM(8, "lookup tables are prepared\n");
}
-if ((__u8 *)NULL == pcache)
- pcache = &cache[0];
+pcache = peasycap->pcache;
+if (NULL == pcache)
+ pcache = &peasycap->cache[0];
/*---------------------------------------------------------------------------*/
/*
* TRANSFER CONTENTS OF CACHE TO THE FRAME BUFFER
*/
/*---------------------------------------------------------------------------*/
if (!pcache) {
- SAY("MISTAKE: pcache is NULL\n");
+ SAM("MISTAKE: pcache is NULL\n");
return -EFAULT;
}
-if (pcache != &cache[0])
- JOT(16, "cache has %i bytes\n", (int)(pcache - &cache[0]));
-p2 = &cache[0];
-p3 = (__u8 *)pad - (int)(pcache - &cache[0]);
+if (pcache != &peasycap->cache[0])
+ JOM(16, "cache has %i bytes\n", (int)(pcache - &peasycap->cache[0]));
+p2 = &peasycap->cache[0];
+p3 = (__u8 *)pad - (int)(pcache - &peasycap->cache[0]);
while (p2 < pcache) {
*p3++ = *p2; p2++;
}
-pcache = &cache[0];
+pcache = &peasycap->cache[0];
if (p3 != pad) {
- SAY("MISTAKE: pointer misalignment\n");
+ SAM("MISTAKE: pointer misalignment\n");
return -EFAULT;
}
/*---------------------------------------------------------------------------*/
@@ -1513,7 +1965,7 @@ else
v = *(p2 - 1);
if (rump)
- JOT(16, "%4i=much %4i=more %i=rump\n", much, more, rump);
+ JOM(16, "%4i=much %4i=more %i=rump\n", much, more, rump);
/*---------------------------------------------------------------------------*/
switch (bytesperpixel) {
@@ -1619,7 +2071,7 @@ case 3:
0 : (__u8)s32);
if ((true == last) && rump) {
- pcache = &cache[0];
+ pcache = &peasycap->cache[0];
switch (bytesperpixel - rump) {
case 1: {
*p3 = r;
@@ -1634,7 +2086,7 @@ case 3:
break;
}
default: {
- SAY("MISTAKE: %i=rump\n", \
+ SAM("MISTAKE: %i=rump\n", \
bytesperpixel - rump);
return -EFAULT;
}
@@ -1692,7 +2144,7 @@ case 3:
0 : (__u8)s32);
if ((true == last) && rump) {
- pcache = &cache[0];
+ pcache = &peasycap->cache[0];
switch (bytesperpixel - rump) {
case 1: {
*p3 = b;
@@ -1707,7 +2159,7 @@ case 3:
break;
}
default: {
- SAY("MISTAKE: %i=rump\n", \
+ SAM("MISTAKE: %i=rump\n", \
bytesperpixel - rump);
return -EFAULT;
}
@@ -1768,7 +2220,7 @@ case 3:
0 : (__u8)s32);
if ((true == last) && rump) {
- pcache = &cache[0];
+ pcache = &peasycap->cache[0];
switch (bytesperpixel - rump) {
case 1: {
*p3 = r;
@@ -1783,7 +2235,7 @@ case 3:
break;
}
default: {
- SAY("MISTAKE: " \
+ SAM("MISTAKE: " \
"%i=rump\n", \
bytesperpixel - rump);
return -EFAULT;
@@ -1844,7 +2296,7 @@ case 3:
0 : (__u8)s32);
if ((true == last) && rump) {
- pcache = &cache[0];
+ pcache = &peasycap->cache[0];
switch (bytesperpixel - rump) {
case 1: {
*p3 = b;
@@ -1859,7 +2311,7 @@ case 3:
break;
}
default: {
- SAY("MISTAKE: " \
+ SAM("MISTAKE: " \
"%i=rump\n", \
bytesperpixel - rump);
return -EFAULT;
@@ -1924,7 +2376,7 @@ case 4:
0 : (__u8)s32);
if ((true == last) && rump) {
- pcache = &cache[0];
+ pcache = &peasycap->cache[0];
switch (bytesperpixel - rump) {
case 1: {
*p3 = r;
@@ -1948,7 +2400,7 @@ case 4:
break;
}
default: {
- SAY("MISTAKE: %i=rump\n", \
+ SAM("MISTAKE: %i=rump\n", \
bytesperpixel - rump);
return -EFAULT;
}
@@ -2006,7 +2458,7 @@ case 4:
0 : (__u8)s32);
if ((true == last) && rump) {
- pcache = &cache[0];
+ pcache = &peasycap->cache[0];
switch (bytesperpixel - rump) {
case 1: {
*p3 = b;
@@ -2030,7 +2482,7 @@ case 4:
break;
}
default: {
- SAY("MISTAKE: %i=rump\n", \
+ SAM("MISTAKE: %i=rump\n", \
bytesperpixel - rump);
return -EFAULT;
}
@@ -2093,7 +2545,7 @@ case 4:
0 : (__u8)s32);
if ((true == last) && rump) {
- pcache = &cache[0];
+ pcache = &peasycap->cache[0];
switch (bytesperpixel - rump) {
case 1: {
*p3 = r;
@@ -2117,7 +2569,7 @@ case 4:
break;
}
default: {
- SAY("MISTAKE: " \
+ SAM("MISTAKE: " \
"%i=rump\n", \
bytesperpixel - \
rump);
@@ -2178,7 +2630,7 @@ case 4:
0 : (__u8)s32);
if ((true == last) && rump) {
- pcache = &cache[0];
+ pcache = &peasycap->cache[0];
switch (bytesperpixel - rump) {
case 1: {
*p3 = b;
@@ -2202,7 +2654,7 @@ case 4:
break;
}
default: {
- SAY("MISTAKE: " \
+ SAM("MISTAKE: " \
"%i=rump\n", \
bytesperpixel - rump);
return -EFAULT;
@@ -2226,48 +2678,13 @@ case 4:
break;
}
default: {
- SAY("MISTAKE: %i=bytesperpixel\n", bytesperpixel);
+ SAM("MISTAKE: %i=bytesperpixel\n", bytesperpixel);
return -EFAULT;
}
}
return 0;
}
/*****************************************************************************/
-void
-debrief(struct easycap *peasycap)
-{
-if ((struct usb_device *)NULL != peasycap->pusb_device) {
- check_stk(peasycap->pusb_device);
- check_saa(peasycap->pusb_device);
- sayreadonly(peasycap);
- SAY("%i=peasycap->field_fill\n", peasycap->field_fill);
- SAY("%i=peasycap->field_read\n", peasycap->field_read);
- SAY("%i=peasycap->frame_fill\n", peasycap->frame_fill);
- SAY("%i=peasycap->frame_read\n", peasycap->frame_read);
-}
-return;
-}
-/*****************************************************************************/
-void
-sayreadonly(struct easycap *peasycap)
-{
-static int done;
-int got00, got1F, got60, got61, got62;
-
-if ((!done) && ((struct usb_device *)NULL != peasycap->pusb_device)) {
- done = 1;
- got00 = read_saa(peasycap->pusb_device, 0x00);
- got1F = read_saa(peasycap->pusb_device, 0x1F);
- got60 = read_saa(peasycap->pusb_device, 0x60);
- got61 = read_saa(peasycap->pusb_device, 0x61);
- got62 = read_saa(peasycap->pusb_device, 0x62);
- SAY("0x%02X=reg0x00 0x%02X=reg0x1F\n", got00, got1F);
- SAY("0x%02X=reg0x60 0x%02X=reg0x61 0x%02X=reg0x62\n", \
- got60, got61, got62);
-}
-return;
-}
-/*****************************************************************************/
/*---------------------------------------------------------------------------*/
/*
* SEE CORBET ET AL. "LINUX DEVICE DRIVERS", 3rd EDITION, PAGES 430-434
@@ -2292,11 +2709,16 @@ easycap_vma_open(struct vm_area_struct *pvma)
struct easycap *peasycap;
peasycap = pvma->vm_private_data;
-if (NULL != peasycap)
- peasycap->vma_many++;
-
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return;
+}
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+ return;
+}
+peasycap->vma_many++;
JOT(8, "%i=peasycap->vma_many\n", peasycap->vma_many);
-
return;
}
/*****************************************************************************/
@@ -2306,10 +2728,16 @@ easycap_vma_close(struct vm_area_struct *pvma)
struct easycap *peasycap;
peasycap = pvma->vm_private_data;
-if (NULL != peasycap) {
- peasycap->vma_many--;
- JOT(8, "%i=peasycap->vma_many\n", peasycap->vma_many);
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return;
+}
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+ return;
}
+peasycap->vma_many--;
+JOT(8, "%i=peasycap->vma_many\n", peasycap->vma_many);
return;
}
/*****************************************************************************/
@@ -2355,24 +2783,22 @@ if (NULL == peasycap) {
SAY("ERROR: peasycap is NULL\n");
return retcode;
}
-mutex_lock(&(peasycap->mutex_mmap_video[0]));
/*---------------------------------------------------------------------------*/
pbuf = peasycap->frame_buffer[k][m].pgo;
if (NULL == pbuf) {
- SAY("ERROR: pbuf is NULL\n");
+ SAM("ERROR: pbuf is NULL\n");
goto finish;
}
page = virt_to_page(pbuf);
if (NULL == page) {
- SAY("ERROR: page is NULL\n");
+ SAM("ERROR: page is NULL\n");
goto finish;
}
get_page(page);
/*---------------------------------------------------------------------------*/
finish:
-mutex_unlock(&(peasycap->mutex_mmap_video[0]));
if (NULL == page) {
- SAY("ERROR: page is NULL after get_page(page)\n");
+ SAM("ERROR: page is NULL after get_page(page)\n");
} else {
pvmf->page = page;
retcode = VM_FAULT_MINOR;
@@ -2383,7 +2809,7 @@ return retcode;
/*---------------------------------------------------------------------------*/
/*
* ON COMPLETION OF A VIDEO URB ITS DATA IS COPIED TO THE FIELD BUFFERS
- * PROVIDED peasycap->video_idle IS ZER0. REGARDLESS OF THIS BEING TRUE,
+ * PROVIDED peasycap->video_idle IS ZERO. REGARDLESS OF THIS BEING TRUE,
* IT IS RESUBMITTED PROVIDED peasycap->video_isoc_streaming IS NOT ZERO.
*
* THIS FUNCTION IS AN INTERRUPT SERVICE ROUTINE AND MUST NOT SLEEP.
@@ -2400,7 +2826,8 @@ return retcode;
* 0 != (kount & 0x8000) => AT LEAST ONE URB COMPLETED WITH ERRORS
* 0 != (kount & 0x4000) => BUFFER HAS TOO MUCH DATA
* 0 != (kount & 0x2000) => BUFFER HAS NOT ENOUGH DATA
- * 0 != (kount & 0x0400) => FIELD WAS SUBMITTED BY BRIDGER ROUTINE
+ * 0 != (kount & 0x1000) => BUFFER HAS DATA FROM DISPARATE INPUTS
+ * 0 != (kount & 0x0400) => RESERVED
* 0 != (kount & 0x0200) => FIELD BUFFER NOT YET CHECKED
* 0 != (kount & 0x0100) => BUFFER HAS TWO EXTRA BYTES - WHY?
*/
@@ -2408,19 +2835,14 @@ return retcode;
void
easycap_complete(struct urb *purb)
{
-static int mt;
struct easycap *peasycap;
struct data_buffer *pfield_buffer;
char errbuf[16];
int i, more, much, leap, rc, last;
int videofieldamount;
-unsigned int override;
+unsigned int override, bad;
int framestatus, framelength, frameactual, frameoffset;
__u8 *pu;
-#if defined(BRIDGER)
-struct timeval timeval;
-long long usec;
-#endif /*BRIDGER*/
if (NULL == purb) {
SAY("ERROR: easycap_complete(): purb is NULL\n");
@@ -2431,74 +2853,78 @@ if (NULL == peasycap) {
SAY("ERROR: easycap_complete(): peasycap is NULL\n");
return;
}
-
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+ return;
+}
if (peasycap->video_eof)
return;
-
for (i = 0; i < VIDEO_ISOC_BUFFER_MANY; i++)
if (purb->transfer_buffer == peasycap->video_isoc_buffer[i].pgo)
break;
-JOT(16, "%2i=urb\n", i);
+JOM(16, "%2i=urb\n", i);
last = peasycap->video_isoc_sequence;
if ((((VIDEO_ISOC_BUFFER_MANY - 1) == last) && \
(0 != i)) || \
(((VIDEO_ISOC_BUFFER_MANY - 1) != last) && \
((last + 1) != i))) {
- SAY("ERROR: out-of-order urbs %i,%i ... continuing\n", last, i);
+ JOM(16, "ERROR: out-of-order urbs %i,%i ... continuing\n", last, i);
}
peasycap->video_isoc_sequence = i;
if (peasycap->video_idle) {
- JOT(16, "%i=video_idle %i=video_isoc_streaming\n", \
+ JOM(16, "%i=video_idle %i=video_isoc_streaming\n", \
peasycap->video_idle, peasycap->video_isoc_streaming);
if (peasycap->video_isoc_streaming) {
rc = usb_submit_urb(purb, GFP_ATOMIC);
if (0 != rc) {
- SAY("ERROR: while %i=video_idle, " \
- "usb_submit_urb() failed with rc:\n", \
- peasycap->video_idle);
switch (rc) {
case -ENOMEM: {
- SAY("ENOMEM\n");
+ SAM("ENOMEM\n");
break;
}
case -ENODEV: {
- SAY("ENODEV\n");
+ SAM("ENODEV\n");
break;
}
case -ENXIO: {
- SAY("ENXIO\n");
+ SAM("ENXIO\n");
break;
}
case -EINVAL: {
- SAY("EINVAL\n");
+ SAM("EINVAL\n");
break;
}
case -EAGAIN: {
- SAY("EAGAIN\n");
+ SAM("EAGAIN\n");
break;
}
case -EFBIG: {
- SAY("EFBIG\n");
+ SAM("EFBIG\n");
break;
}
case -EPIPE: {
- SAY("EPIPE\n");
+ SAM("EPIPE\n");
break;
}
case -EMSGSIZE: {
- SAY("EMSGSIZE\n");
+ SAM("EMSGSIZE\n");
break;
}
case -ENOSPC: {
- SAY("ENOSPC\n");
+ SAM("ENOSPC\n");
break;
}
default: {
- SAY("0x%08X\n", rc);
+ SAM("0x%08X\n", rc);
break;
}
}
+ if (-ENODEV != rc) \
+ SAM("ERROR: while %i=video_idle, " \
+ "usb_submit_urb() " \
+ "failed with rc:\n", \
+ peasycap->video_idle);
}
}
return;
@@ -2506,80 +2932,80 @@ return;
override = 0;
/*---------------------------------------------------------------------------*/
if (FIELD_BUFFER_MANY <= peasycap->field_fill) {
- SAY("ERROR: bad peasycap->field_fill\n");
+ SAM("ERROR: bad peasycap->field_fill\n");
return;
}
if (purb->status) {
if ((-ESHUTDOWN == purb->status) || (-ENOENT == purb->status)) {
- JOT(8, "urb status -ESHUTDOWN or -ENOENT\n");
+ JOM(8, "urb status -ESHUTDOWN or -ENOENT\n");
return;
}
(peasycap->field_buffer[peasycap->field_fill][0].kount) |= 0x8000 ;
- SAY("ERROR: bad urb status:\n");
+ SAM("ERROR: bad urb status:\n");
switch (purb->status) {
case -EINPROGRESS: {
- SAY("-EINPROGRESS\n"); break;
+ SAM("-EINPROGRESS\n"); break;
}
case -ENOSR: {
- SAY("-ENOSR\n"); break;
+ SAM("-ENOSR\n"); break;
}
case -EPIPE: {
- SAY("-EPIPE\n"); break;
+ SAM("-EPIPE\n"); break;
}
case -EOVERFLOW: {
- SAY("-EOVERFLOW\n"); break;
+ SAM("-EOVERFLOW\n"); break;
}
case -EPROTO: {
- SAY("-EPROTO\n"); break;
+ SAM("-EPROTO\n"); break;
}
case -EILSEQ: {
- SAY("-EILSEQ\n"); break;
+ SAM("-EILSEQ\n"); break;
}
case -ETIMEDOUT: {
- SAY("-ETIMEDOUT\n"); break;
+ SAM("-ETIMEDOUT\n"); break;
}
case -EMSGSIZE: {
- SAY("-EMSGSIZE\n"); break;
+ SAM("-EMSGSIZE\n"); break;
}
case -EOPNOTSUPP: {
- SAY("-EOPNOTSUPP\n"); break;
+ SAM("-EOPNOTSUPP\n"); break;
}
case -EPFNOSUPPORT: {
- SAY("-EPFNOSUPPORT\n"); break;
+ SAM("-EPFNOSUPPORT\n"); break;
}
case -EAFNOSUPPORT: {
- SAY("-EAFNOSUPPORT\n"); break;
+ SAM("-EAFNOSUPPORT\n"); break;
}
case -EADDRINUSE: {
- SAY("-EADDRINUSE\n"); break;
+ SAM("-EADDRINUSE\n"); break;
}
case -EADDRNOTAVAIL: {
- SAY("-EADDRNOTAVAIL\n"); break;
+ SAM("-EADDRNOTAVAIL\n"); break;
}
case -ENOBUFS: {
- SAY("-ENOBUFS\n"); break;
+ SAM("-ENOBUFS\n"); break;
}
case -EISCONN: {
- SAY("-EISCONN\n"); break;
+ SAM("-EISCONN\n"); break;
}
case -ENOTCONN: {
- SAY("-ENOTCONN\n"); break;
+ SAM("-ENOTCONN\n"); break;
}
case -ESHUTDOWN: {
- SAY("-ESHUTDOWN\n"); break;
+ SAM("-ESHUTDOWN\n"); break;
}
case -ENOENT: {
- SAY("-ENOENT\n"); break;
+ SAM("-ENOENT\n"); break;
}
case -ECONNRESET: {
- SAY("-ECONNRESET\n"); break;
+ SAM("-ECONNRESET\n"); break;
}
case -ENOSPC: {
- SAY("ENOSPC\n"); break;
+ SAM("ENOSPC\n"); break;
}
default: {
- SAY("unknown error code 0x%08X\n", purb->status); break;
+ SAM("unknown error code 0x%08X\n", purb->status); break;
}
}
/*---------------------------------------------------------------------------*/
@@ -2638,7 +3064,7 @@ if (purb->status) {
strcpy(&errbuf[0], "-ECONNRESET"); break;
}
case -ENOSPC: {
- SAY("ENOSPC\n"); break;
+ SAM("ENOSPC\n"); break;
}
case -ESHUTDOWN: {
strcpy(&errbuf[0], "-ESHUTDOWN"); break;
@@ -2653,7 +3079,7 @@ if (purb->status) {
frameactual = purb->iso_frame_desc[i].actual_length;
frameoffset = purb->iso_frame_desc[i].offset;
- JOT(16, "frame[%2i]:" \
+ JOM(16, "frame[%2i]:" \
"%4i=status " \
"%4i=actual " \
"%4i=length " \
@@ -2667,19 +3093,20 @@ if (purb->status) {
PAGE_SIZE) + \
(int)(pfield_buffer->pto - pfield_buffer->pgo);
if (4 == more)
- mt++;
+ peasycap->video_mt++;
if (4 < more) {
- if (mt) {
- JOT(8, "%4i empty video urb frames\n", mt);
- mt = 0;
+ if (peasycap->video_mt) {
+ JOM(8, "%4i empty video urb frames\n", \
+ peasycap->video_mt);
+ peasycap->video_mt = 0;
}
if (FIELD_BUFFER_MANY <= peasycap->field_fill) {
- SAY("ERROR: bad peasycap->field_fill\n");
+ SAM("ERROR: bad peasycap->field_fill\n");
return;
}
if (FIELD_BUFFER_SIZE/PAGE_SIZE <= \
peasycap->field_page) {
- SAY("ERROR: bad peasycap->field_page\n");
+ SAM("ERROR: bad peasycap->field_page\n");
return;
}
pfield_buffer = &peasycap->field_buffer\
@@ -2712,11 +3139,13 @@ if (purb->status) {
peasycap->videofieldamount) {
if (2 == videofieldamount - \
peasycap->\
- videofieldamount)
+ videofieldamount) {
(peasycap->field_buffer\
[peasycap->field_fill]\
[0].kount) |= 0x0100;
- else
+ peasycap->video_junk += (1 + \
+ VIDEO_JUNK_TOLERATE);
+ } else
(peasycap->field_buffer\
[peasycap->field_fill]\
[0].kount) |= 0x4000;
@@ -2727,53 +3156,74 @@ if (purb->status) {
[peasycap->field_fill]\
[0].kount) |= 0x2000;
}
- if (!(0xFF00 & peasycap->field_buffer\
+ bad = 0xFF00 & peasycap->field_buffer\
[peasycap->field_fill]\
- [0].kount)) {
- (peasycap->video_junk)--;
- if (-16 > peasycap->video_junk)
- peasycap->video_junk = -16;
- peasycap->field_read = \
+ [0].kount;
+ if (!bad) {
+ (peasycap->video_junk)--;
+ if (-VIDEO_JUNK_TOLERATE > \
+ peasycap->video_junk) \
+ peasycap->video_junk =\
+ -VIDEO_JUNK_TOLERATE;
+ peasycap->field_read = \
(peasycap->\
field_fill)++;
-
- if (FIELD_BUFFER_MANY <= \
- peasycap->field_fill)
- peasycap->field_fill = 0;
- peasycap->field_page = 0;
- pfield_buffer = &peasycap->\
- field_buffer\
- [peasycap->field_fill]\
- [peasycap->field_page];
- pfield_buffer->pto = \
+ if (FIELD_BUFFER_MANY <= \
+ peasycap->\
+ field_fill)
+ peasycap->\
+ field_fill = 0;
+ peasycap->field_page = 0;
+ pfield_buffer = &peasycap->\
+ field_buffer\
+ [peasycap->\
+ field_fill]\
+ [peasycap->\
+ field_page];
+ pfield_buffer->pto = \
pfield_buffer->pgo;
-
- JOT(8, "bumped to: %i=peasycap->" \
- "field_fill %i=parity\n", \
- peasycap->field_fill, \
- 0x00FF & pfield_buffer->kount);
- JOT(8, "field buffer %i has %i " \
- "bytes fit to be read\n", \
- peasycap->field_read, \
- videofieldamount);
- JOT(8, "wakeup call to wq_video, " \
- "%i=field_read %i=field_fill "\
- "%i=parity\n", \
- peasycap->field_read, \
- peasycap->field_fill, \
- 0x00FF & peasycap->\
- field_buffer[peasycap->\
- field_read][0].kount);
- wake_up_interruptible(&(peasycap->\
- wq_video));
- do_gettimeofday(&peasycap->timeval7);
+ JOM(8, "bumped to: %i="\
+ "peasycap->" \
+ "field_fill %i="\
+ "parity\n", \
+ peasycap->field_fill, \
+ 0x00FF & \
+ pfield_buffer->kount);
+ JOM(8, "field buffer %i has "\
+ "%i bytes fit to be "\
+ "read\n", \
+ peasycap->field_read, \
+ videofieldamount);
+ JOM(8, "wakeup call to "\
+ "wq_video, " \
+ "%i=field_read "\
+ "%i=field_fill "\
+ "%i=parity\n", \
+ peasycap->field_read, \
+ peasycap->field_fill, \
+ 0x00FF & peasycap->\
+ field_buffer\
+ [peasycap->\
+ field_read][0].kount);
+ wake_up_interruptible\
+ (&(peasycap->\
+ wq_video));
+ do_gettimeofday\
+ (&peasycap->timeval7);
} else {
peasycap->video_junk++;
- JOT(8, "field buffer %i had %i " \
- "bytes, now discarded\n", \
+ if (bad & 0x0010) \
+ peasycap->video_junk += \
+ (1 + VIDEO_JUNK_TOLERATE/2);
+ JOM(8, "field buffer %i had %i " \
+ "bytes, now discarded: "\
+ "0x%04X\n", \
peasycap->field_fill, \
- videofieldamount);
-
+ videofieldamount,\
+ (0xFF00 & \
+ peasycap->field_buffer\
+ [peasycap->field_fill][0].\
+ kount));
(peasycap->field_fill)++;
if (FIELD_BUFFER_MANY <= \
@@ -2787,20 +3237,22 @@ if (purb->status) {
pfield_buffer->pto = \
pfield_buffer->pgo;
- JOT(8, "bumped to: %i=peasycap->" \
+ JOM(8, "bumped to: %i=peasycap->" \
"field_fill %i=parity\n", \
peasycap->field_fill, \
0x00FF & pfield_buffer->kount);
}
if (8 == more) {
- JOT(8, "end-of-field: received " \
+ JOM(8, "end-of-field: received " \
"parity byte 0x%02X\n", \
(0xFF & *pu));
if (0x40 & *pu)
pfield_buffer->kount = 0x0000;
else
pfield_buffer->kount = 0x0001;
- JOT(8, "end-of-field: 0x%02X=kount\n",\
+ pfield_buffer->input = 0x08 | \
+ (0x07 & peasycap->input);
+ JOM(8, "end-of-field: 0x%02X=kount\n",\
0xFF & pfield_buffer->kount);
}
}
@@ -2813,12 +3265,12 @@ if (purb->status) {
more -= leap;
if (FIELD_BUFFER_MANY <= peasycap->field_fill) {
- SAY("ERROR: bad peasycap->field_fill\n");
+ SAM("ERROR: bad peasycap->field_fill\n");
return;
}
if (FIELD_BUFFER_SIZE/PAGE_SIZE <= \
peasycap->field_page) {
- SAY("ERROR: bad peasycap->field_page\n");
+ SAM("ERROR: bad peasycap->field_page\n");
return;
}
pfield_buffer = &peasycap->field_buffer\
@@ -2829,7 +3281,7 @@ if (purb->status) {
[peasycap->field_page];
if (PAGE_SIZE < (pfield_buffer->pto - \
pfield_buffer->pgo)) {
- SAY("ERROR: bad pfield_buffer->pto\n");
+ SAM("ERROR: bad pfield_buffer->pto\n");
return;
}
if (PAGE_SIZE == (pfield_buffer->pto - \
@@ -2837,7 +3289,7 @@ if (purb->status) {
(peasycap->field_page)++;
if (FIELD_BUFFER_SIZE/PAGE_SIZE <= \
peasycap->field_page) {
- JOT(16, "wrapping peasycap->" \
+ JOM(16, "wrapping peasycap->" \
"field_page\n");
peasycap->field_page = 0;
}
@@ -2847,6 +3299,15 @@ if (purb->status) {
[peasycap->field_page];
pfield_buffer->pto = \
pfield_buffer->pgo;
+ pfield_buffer->input = 0x08 | \
+ (0x07 & peasycap->input);
+ if ((peasycap->field_buffer[peasycap->\
+ field_fill][0]).\
+ input != \
+ pfield_buffer->input)
+ (peasycap->field_buffer\
+ [peasycap->field_fill]\
+ [0]).kount |= 0x1000;
}
much = PAGE_SIZE - (int)(pfield_buffer->pto - \
@@ -2865,55 +3326,6 @@ if (purb->status) {
}
/*---------------------------------------------------------------------------*/
/*
- *
- *
- * *** UNDER DEVELOPMENT/TESTING - NOT READY YET! ***
- *
- *
- *
- * VIDEOTAPES MAY HAVE BEEN MANUALLY PAUSED AND RESTARTED DURING RECORDING.
- * THIS CAUSES LOSS OF SYNC, CONFUSING DOWNSTREAM USERSPACE PROGRAMS WHICH
- * MAY INTERPRET THE INTERRUPTION AS A SYMPTOM OF LATENCY. TO OVERCOME THIS
- * THE DRIVER BRIDGES THE HIATUS BY SENDING DUMMY VIDEO FRAMES AT ROUGHLY
- * THE RIGHT TIME INTERVALS IN THE HOPE OF PERSUADING THE DOWNSTREAM USERSPACE
- * PROGRAM TO RESUME NORMAL SERVICE WHEN THE INTERRUPTION IS OVER.
- */
-/*---------------------------------------------------------------------------*/
-#if defined(BRIDGER)
-do_gettimeofday(&timeval);
-if (peasycap->timeval7.tv_sec) {
- usec = 1000000*(timeval.tv_sec - peasycap->timeval7.tv_sec) + \
- (timeval.tv_usec - peasycap->timeval7.tv_usec);
- if (usec > (peasycap->usec + peasycap->tolerate)) {
- JOT(8, "bridging hiatus\n");
- peasycap->video_junk = 0;
- peasycap->field_buffer[peasycap->field_fill][0].kount |= 0x0400;
-
- peasycap->field_read = (peasycap->field_fill)++;
-
- if (FIELD_BUFFER_MANY <= peasycap->field_fill) \
- peasycap->field_fill = 0;
- peasycap->field_page = 0;
- pfield_buffer = &peasycap->field_buffer\
- [peasycap->field_fill][peasycap->field_page];
- pfield_buffer->pto = pfield_buffer->pgo;
-
- JOT(8, "bumped to: %i=peasycap->field_fill %i=parity\n", \
- peasycap->field_fill, 0x00FF & pfield_buffer->kount);
- JOT(8, "field buffer %i has %i bytes to be overwritten\n", \
- peasycap->field_read, videofieldamount);
- JOT(8, "wakeup call to wq_video, " \
- "%i=field_read %i=field_fill %i=parity\n", \
- peasycap->field_read, peasycap->field_fill, \
- 0x00FF & \
- peasycap->field_buffer[peasycap->field_read][0].kount);
- wake_up_interruptible(&(peasycap->wq_video));
- do_gettimeofday(&peasycap->timeval7);
- }
-}
-#endif /*BRIDGER*/
-/*---------------------------------------------------------------------------*/
-/*
* RESUBMIT THIS URB, UNLESS A SEVERE PERSISTENT ERROR CONDITION EXISTS.
*
* IF THE WAIT QUEUES ARE NOT CLEARED IN RESPONSE TO AN ERROR CONDITION
@@ -2921,51 +3333,57 @@ if (peasycap->timeval7.tv_sec) {
*/
/*---------------------------------------------------------------------------*/
if (VIDEO_ISOC_BUFFER_MANY <= peasycap->video_junk) {
- SAY("easycap driver shutting down on condition green\n");
+ SAM("easycap driver shutting down on condition green\n");
+ peasycap->status = 1;
peasycap->video_eof = 1;
+ peasycap->video_junk = 0;
+ wake_up_interruptible(&peasycap->wq_video);
+#if !defined(PERSEVERE)
peasycap->audio_eof = 1;
- peasycap->video_junk = -VIDEO_ISOC_BUFFER_MANY;
- wake_up_interruptible(&(peasycap->wq_video));
- wake_up_interruptible(&(peasycap->wq_audio));
+ wake_up_interruptible(&peasycap->wq_audio);
+#endif /*PERSEVERE*/
return;
}
if (peasycap->video_isoc_streaming) {
rc = usb_submit_urb(purb, GFP_ATOMIC);
if (0 != rc) {
- SAY("ERROR: while %i=video_idle, usb_submit_urb() failed " \
- "with rc:\n", peasycap->video_idle);
switch (rc) {
case -ENOMEM: {
- SAY("ENOMEM\n"); break;
+ SAM("ENOMEM\n"); break;
}
case -ENODEV: {
- SAY("ENODEV\n"); break;
+ SAM("ENODEV\n"); break;
}
case -ENXIO: {
- SAY("ENXIO\n"); break;
+ SAM("ENXIO\n"); break;
}
case -EINVAL: {
- SAY("EINVAL\n"); break;
+ SAM("EINVAL\n"); break;
}
case -EAGAIN: {
- SAY("EAGAIN\n"); break;
+ SAM("EAGAIN\n"); break;
}
case -EFBIG: {
- SAY("EFBIG\n"); break;
+ SAM("EFBIG\n"); break;
}
case -EPIPE: {
- SAY("EPIPE\n"); break;
+ SAM("EPIPE\n"); break;
}
case -EMSGSIZE: {
- SAY("EMSGSIZE\n"); break;
+ SAM("EMSGSIZE\n"); break;
}
case -ENOSPC: {
- SAY("ENOSPC\n"); break;
+ SAM("ENOSPC\n"); break;
}
default: {
- SAY("0x%08X\n", rc); break;
+ SAM("0x%08X\n", rc); break;
}
}
+ if (-ENODEV != rc) \
+ SAM("ERROR: while %i=video_idle, " \
+ "usb_submit_urb() " \
+ "failed with rc:\n", \
+ peasycap->video_idle);
}
}
return;
@@ -2977,8 +3395,8 @@ return;
* FIXME
*
*
- * THIS FUNCTION ASSUMES THAT, ON EACH AND EVERY OCCASION THAT THE DEVICE IS
- * PHYSICALLY PLUGGED IN, INTERFACE 0 IS PROBED FIRST.
+ * THIS FUNCTION ASSUMES THAT, ON EACH AND EVERY OCCASION THAT THE EasyCAP
+ * IS PHYSICALLY PLUGGED IN, INTERFACE 0 IS PROBED FIRST.
* IF THIS IS NOT TRUE, THERE IS THE POSSIBILITY OF AN Oops.
*
* THIS HAS NEVER BEEN A PROBLEM IN PRACTICE, BUT SOMETHING SEEMS WRONG HERE.
@@ -2994,7 +3412,7 @@ struct usb_endpoint_descriptor *pepd;
struct usb_interface_descriptor *pusb_interface_descriptor;
struct usb_interface_assoc_descriptor *pusb_interface_assoc_descriptor;
struct urb *purb;
-static struct easycap *peasycap /*=NULL*/;
+struct easycap *peasycap;
struct data_urb *pdata_urb;
size_t wMaxPacketSize;
int ISOCwMaxPacketSize;
@@ -3004,19 +3422,32 @@ int CTRLwMaxPacketSize;
__u8 bEndpointAddress;
__u8 ISOCbEndpointAddress;
__u8 INTbEndpointAddress;
-int isin, i, j, k, m;
+int isin, i, j, k, m, rc;
__u8 bInterfaceNumber;
__u8 bInterfaceClass;
__u8 bInterfaceSubClass;
void *pbuf;
int okalt[8], isokalt;
-int okepn[8], isokepn;
-int okmps[8], isokmps;
+int okepn[8];
+int okmps[8];
int maxpacketsize;
-int rc;
+__u16 mask;
+__s32 value;
+struct easycap_format *peasycap_format;
JOT(4, "\n");
+if (!dongle_done) {
+ dongle_done = 1;
+ for (k = 0; k < DONGLE_MANY; k++) {
+ easycap_dongle[k].peasycap = (struct easycap *)NULL;
+ mutex_init(&easycap_dongle[k].mutex_video);
+ mutex_init(&easycap_dongle[k].mutex_audio);
+ }
+}
+
+peasycap = (struct easycap *)NULL;
+
if ((struct usb_interface *)NULL == pusb_interface) {
SAY("ERROR: pusb_interface is NULL\n");
return -EFAULT;
@@ -3117,46 +3548,83 @@ JOT(4, "intf[%i]: pusb_interface_assoc_descriptor is NULL\n", \
/*
* A NEW struct easycap IS ALWAYS ALLOCATED WHEN INTERFACE 0 IS PROBED.
* IT IS NOT POSSIBLE HERE TO FREE ANY EXISTING struct easycap. THIS
- * SHOULD HAVE BEEN DONE BY easycap_delete() WHEN THE DEVICE WAS PHYSICALLY
- * UNPLUGGED.
- */
+ * SHOULD HAVE BEEN DONE BY easycap_delete() WHEN THE EasyCAP WAS
+ * PHYSICALLY UNPLUGGED.
+ *
+ * THE POINTER peasycap TO THE struct easycap IS REMEMBERED WHEN
+ * INTERFACES 1 AND 2 ARE PROBED.
+ *
+ * IF TWO EasyCAPs ARE PLUGGED IN NEARLY SIMULTANEOUSLY THERE WILL
+ * BE TROUBLE. BEWARE.
+*/
/*---------------------------------------------------------------------------*/
if (0 == bInterfaceNumber) {
peasycap = kzalloc(sizeof(struct easycap), GFP_KERNEL);
if (NULL == peasycap) {
SAY("ERROR: Could not allocate peasycap\n");
return -ENOMEM;
- } else {
- peasycap->allocation_video_struct = sizeof(struct easycap);
- peasycap->allocation_video_page = 0;
- peasycap->allocation_video_urb = 0;
- peasycap->allocation_audio_struct = 0;
- peasycap->allocation_audio_page = 0;
- peasycap->allocation_audio_urb = 0;
}
+ SAM("allocated 0x%08lX=peasycap\n", (unsigned long int) peasycap);
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
+ SAM("where 0x%08lX=&peasycap->video_device\n", \
+ (unsigned long int) &peasycap->video_device);
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+ SAM("and 0x%08lX=&peasycap->v4l2_device\n", \
+ (unsigned long int) &peasycap->v4l2_device);
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
/*---------------------------------------------------------------------------*/
/*
- * INITIALIZE THE NEW easycap STRUCTURE.
- * NO PARAMETERS ARE SPECIFIED HERE REQUIRING THE SETTING OF REGISTERS.
- * THAT IS DONE FIRST BY easycap_open() AND LATER BY easycap_ioctl().
- */
+ * PERFORM URGENT INTIALIZATIONS ...
+*/
/*---------------------------------------------------------------------------*/
- peasycap->pusb_device = pusb_device;
- peasycap->pusb_interface = pusb_interface;
-
+ strcpy(&peasycap->telltale[0], TELLTALE);
kref_init(&peasycap->kref);
- JOT(8, "intf[%i]: after kref_init(..._video) " \
+ JOM(8, "intf[%i]: after kref_init(..._video) " \
"%i=peasycap->kref.refcount.counter\n", \
bInterfaceNumber, peasycap->kref.refcount.counter);
- init_waitqueue_head(&(peasycap->wq_video));
- init_waitqueue_head(&(peasycap->wq_audio));
+ init_waitqueue_head(&peasycap->wq_video);
+ init_waitqueue_head(&peasycap->wq_audio);
+
+ for (dongle_this = 0; dongle_this < DONGLE_MANY; dongle_this++) {
+ if (NULL == easycap_dongle[dongle_this].peasycap) {
+ if (0 == mutex_is_locked(&easycap_dongle\
+ [dongle_this].mutex_video)) {
+ if (0 == mutex_is_locked(&easycap_dongle\
+ [dongle_this].mutex_audio)) {
+ easycap_dongle\
+ [dongle_this].peasycap = \
+ peasycap;
+ JOM(8, "intf[%i]: peasycap-->easycap" \
+ "_dongle[%i].peasycap\n", \
+ bInterfaceNumber, dongle_this);
+ break;
+ }
+ }
+ }
+ }
+ if (DONGLE_MANY <= dongle_this) {
+ SAM("ERROR: too many dongles\n");
+ return -ENOMEM;
+ }
- mutex_init(&(peasycap->mutex_timeval0));
- mutex_init(&(peasycap->mutex_timeval1));
+ peasycap->allocation_video_struct = sizeof(struct easycap);
+ peasycap->allocation_video_page = 0;
+ peasycap->allocation_video_urb = 0;
+ peasycap->allocation_audio_struct = 0;
+ peasycap->allocation_audio_page = 0;
+ peasycap->allocation_audio_urb = 0;
- for (k = 0; k < FRAME_BUFFER_MANY; k++)
- mutex_init(&(peasycap->mutex_mmap_video[k]));
+/*---------------------------------------------------------------------------*/
+/*
+ * ... AND FURTHER INITIALIZE THE STRUCTURE
+*/
+/*---------------------------------------------------------------------------*/
+ peasycap->pusb_device = pusb_device;
+ peasycap->pusb_interface = pusb_interface;
peasycap->ilk = 0;
peasycap->microphone = false;
@@ -3177,46 +3645,172 @@ if (0 == bInterfaceNumber) {
peasycap->frame_buffer_many = FRAME_BUFFER_MANY;
- if ((struct mutex *)NULL == &(peasycap->mutex_mmap_video[0])) {
- SAY("ERROR: &(peasycap->mutex_mmap_video[%i]) is NULL\n", 0);
- return -EFAULT;
- }
+ for (k = 0; k < INPUT_MANY; k++)
+ peasycap->lost[k] = 0;
+ peasycap->skip = 0;
+ peasycap->skipped = 0;
+ peasycap->offerfields = 0;
/*---------------------------------------------------------------------------*/
/*
- * DYNAMICALLY FILL IN THE AVAILABLE FORMATS.
+ * DYNAMICALLY FILL IN THE AVAILABLE FORMATS ...
*/
/*---------------------------------------------------------------------------*/
rc = fillin_formats();
if (0 > rc) {
- SAY("ERROR: fillin_formats() returned %i\n", rc);
+ SAM("ERROR: fillin_formats() returned %i\n", rc);
return -EFAULT;
}
- JOT(4, "%i formats available\n", rc);
- } else {
+ JOM(4, "%i formats available\n", rc);
/*---------------------------------------------------------------------------*/
- if ((struct easycap *)NULL == peasycap) {
- SAY("ERROR: peasycap is NULL " \
- "when probing interface %i\n", \
- bInterfaceNumber);
- return -EFAULT;
+/*
+ * ... AND POPULATE easycap.inputset[]
+*/
+/*---------------------------------------------------------------------------*/
+ for (k = 0; k < INPUT_MANY; k++) {
+ peasycap->inputset[k].input_ok = 0;
+ peasycap->inputset[k].standard_offset_ok = 0;
+ peasycap->inputset[k].format_offset_ok = 0;
+ peasycap->inputset[k].brightness_ok = 0;
+ peasycap->inputset[k].contrast_ok = 0;
+ peasycap->inputset[k].saturation_ok = 0;
+ peasycap->inputset[k].hue_ok = 0;
+ }
+ if (true == peasycap->ntsc) {
+ i = 0;
+ m = 0;
+ mask = 0;
+ while (0xFFFF != easycap_standard[i].mask) {
+ if (NTSC_M == easycap_standard[i].\
+ v4l2_standard.index) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++) {
+ peasycap->inputset[k].\
+ standard_offset = i;
+ }
+ mask = easycap_standard[i].mask;
+ }
+ i++;
}
+ } else {
+ i = 0;
+ m = 0;
+ mask = 0;
+ while (0xFFFF != easycap_standard[i].mask) {
+ if (PAL_BGHIN == easycap_standard[i].\
+ v4l2_standard.index) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++) {
+ peasycap->inputset[k].\
+ standard_offset = i;
+ }
+ mask = easycap_standard[i].mask;
+ }
+ i++;
+ }
+ }
- JOT(8, "kref_get() with %i=peasycap->kref.refcount.counter\n", \
- (int)peasycap->kref.refcount.counter);
- kref_get(&peasycap->kref);
+ if (1 != m) {
+ SAM("MISTAKE: easycap.inputset[].standard_offset " \
+ "unpopulated, %i=m\n", m);
+ return -ENOENT;
+ }
+
+ peasycap_format = &easycap_format[0];
+ i = 0;
+ m = 0;
+ while (0 != peasycap_format->v4l2_format.fmt.pix.width) {
+ if (((peasycap_format->mask & 0x0F) == (mask & 0x0F)) && \
+ (peasycap_format->\
+ v4l2_format.fmt.pix.field == \
+ V4L2_FIELD_NONE) && \
+ (peasycap_format->\
+ v4l2_format.fmt.pix.pixelformat == \
+ V4L2_PIX_FMT_UYVY) && \
+ (peasycap_format->\
+ v4l2_format.fmt.pix.width == \
+ 640) && \
+ (peasycap_format->\
+ v4l2_format.fmt.pix.height == 480)) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ peasycap->inputset[k].format_offset = i;
+ break;
+ }
+ peasycap_format++;
+ i++;
+ }
+ if (1 != m) {
+ SAM("MISTAKE: easycap.inputset[].format_offset unpopulated\n");
+ return -ENOENT;
+ }
+
+ i = 0;
+ m = 0;
+ while (0xFFFFFFFF != easycap_control[i].id) {
+ value = easycap_control[i].default_value;
+ if (V4L2_CID_BRIGHTNESS == easycap_control[i].id) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ peasycap->inputset[k].brightness = value;
+ } else if (V4L2_CID_CONTRAST == easycap_control[i].id) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ peasycap->inputset[k].contrast = value;
+ } else if (V4L2_CID_SATURATION == easycap_control[i].id) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ peasycap->inputset[k].saturation = value;
+ } else if (V4L2_CID_HUE == easycap_control[i].id) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ peasycap->inputset[k].hue = value;
+ }
+ i++;
+ }
+ if (4 != m) {
+ SAM("MISTAKE: easycap.inputset[].brightness,... " \
+ "underpopulated\n");
+ return -ENOENT;
+ }
+ for (k = 0; k < INPUT_MANY; k++)
+ peasycap->inputset[k].input = k;
+ JOM(4, "populated easycap.inputset[]\n");
+ JOM(4, "finished initialization\n");
+} else {
+/*---------------------------------------------------------------------------*/
+ /*
+ * FOR INTERFACES 1 AND 2 THE POINTER peasycap IS OBTAINED BY ASSUMING
+ * THAT dongle_this HAS NOT CHANGED SINCE INTERFACE 0 WAS PROBED. IF
+ * THIS IS NOT THE CASE, FOR EXAMPLE WHEN TWO EASYCAPs ARE PLUGGED IN
+ * SIMULTANEOUSLY, THERE WILL BE SERIOUS TROUBLE.
+ */
+/*---------------------------------------------------------------------------*/
+ if ((0 > dongle_this) || (DONGLE_MANY <= dongle_this)) {
+ SAY("ERROR: bad dongle count\n");
+ return -EFAULT;
+ }
+ peasycap = easycap_dongle[dongle_this].peasycap;
+ JOT(8, "intf[%i]: easycap_dongle[%i].peasycap-->peasycap\n", \
+ bInterfaceNumber, dongle_this);
+
+ if ((struct easycap *)NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL when probing interface %i\n", \
+ bInterfaceNumber);
+ return -EFAULT;
+ }
}
/*---------------------------------------------------------------------------*/
if ((USB_CLASS_VIDEO == bInterfaceClass) || \
- (USB_CLASS_VENDOR_SPEC == bInterfaceClass)) {
+ (USB_CLASS_VENDOR_SPEC == bInterfaceClass)) {
if (-1 == peasycap->video_interface) {
peasycap->video_interface = bInterfaceNumber;
- JOT(4, "setting peasycap->video_interface=%i\n", \
+ JOM(4, "setting peasycap->video_interface=%i\n", \
peasycap->video_interface);
} else {
if (peasycap->video_interface != bInterfaceNumber) {
- SAY("ERROR: attempting to reset " \
+ SAM("ERROR: attempting to reset " \
"peasycap->video_interface\n");
- SAY("...... continuing with " \
+ SAM("...... continuing with " \
"%i=peasycap->video_interface\n", \
peasycap->video_interface);
}
@@ -3225,13 +3819,13 @@ if ((USB_CLASS_VIDEO == bInterfaceClass) || \
(0x02 == bInterfaceSubClass)) {
if (-1 == peasycap->audio_interface) {
peasycap->audio_interface = bInterfaceNumber;
- JOT(4, "setting peasycap->audio_interface=%i\n", \
+ JOM(4, "setting peasycap->audio_interface=%i\n", \
peasycap->audio_interface);
} else {
if (peasycap->audio_interface != bInterfaceNumber) {
- SAY("ERROR: attempting to reset " \
+ SAM("ERROR: attempting to reset " \
"peasycap->audio_interface\n");
- SAY("...... continuing with " \
+ SAM("...... continuing with " \
"%i=peasycap->audio_interface\n", \
peasycap->audio_interface);
}
@@ -3244,37 +3838,35 @@ if ((USB_CLASS_VIDEO == bInterfaceClass) || \
*/
/*---------------------------------------------------------------------------*/
isokalt = 0;
-isokepn = 0;
-isokmps = 0;
for (i = 0; i < pusb_interface->num_altsetting; i++) {
pusb_host_interface = &(pusb_interface->altsetting[i]);
if ((struct usb_host_interface *)NULL == pusb_host_interface) {
- SAY("ERROR: pusb_host_interface is NULL\n");
+ SAM("ERROR: pusb_host_interface is NULL\n");
return -EFAULT;
}
pusb_interface_descriptor = &(pusb_host_interface->desc);
if ((struct usb_interface_descriptor *)NULL == \
pusb_interface_descriptor) {
- SAY("ERROR: pusb_interface_descriptor is NULL\n");
+ SAM("ERROR: pusb_interface_descriptor is NULL\n");
return -EFAULT;
}
- JOT(4, "intf[%i]alt[%i]: desc.bDescriptorType=0x%02X\n", \
+ JOM(4, "intf[%i]alt[%i]: desc.bDescriptorType=0x%02X\n", \
bInterfaceNumber, i, pusb_interface_descriptor->bDescriptorType);
- JOT(4, "intf[%i]alt[%i]: desc.bInterfaceNumber=0x%02X\n", \
+ JOM(4, "intf[%i]alt[%i]: desc.bInterfaceNumber=0x%02X\n", \
bInterfaceNumber, i, pusb_interface_descriptor->bInterfaceNumber);
- JOT(4, "intf[%i]alt[%i]: desc.bAlternateSetting=0x%02X\n", \
+ JOM(4, "intf[%i]alt[%i]: desc.bAlternateSetting=0x%02X\n", \
bInterfaceNumber, i, pusb_interface_descriptor->bAlternateSetting);
- JOT(4, "intf[%i]alt[%i]: desc.bNumEndpoints=0x%02X\n", \
+ JOM(4, "intf[%i]alt[%i]: desc.bNumEndpoints=0x%02X\n", \
bInterfaceNumber, i, pusb_interface_descriptor->bNumEndpoints);
- JOT(4, "intf[%i]alt[%i]: desc.bInterfaceClass=0x%02X\n", \
+ JOM(4, "intf[%i]alt[%i]: desc.bInterfaceClass=0x%02X\n", \
bInterfaceNumber, i, pusb_interface_descriptor->bInterfaceClass);
- JOT(4, "intf[%i]alt[%i]: desc.bInterfaceSubClass=0x%02X\n", \
+ JOM(4, "intf[%i]alt[%i]: desc.bInterfaceSubClass=0x%02X\n", \
bInterfaceNumber, i, pusb_interface_descriptor->bInterfaceSubClass);
- JOT(4, "intf[%i]alt[%i]: desc.bInterfaceProtocol=0x%02X\n", \
+ JOM(4, "intf[%i]alt[%i]: desc.bInterfaceProtocol=0x%02X\n", \
bInterfaceNumber, i, pusb_interface_descriptor->bInterfaceProtocol);
- JOT(4, "intf[%i]alt[%i]: desc.iInterface=0x%02X\n", \
+ JOM(4, "intf[%i]alt[%i]: desc.iInterface=0x%02X\n", \
bInterfaceNumber, i, pusb_interface_descriptor->iInterface);
ISOCwMaxPacketSize = -1;
@@ -3285,86 +3877,80 @@ for (i = 0; i < pusb_interface->num_altsetting; i++) {
INTbEndpointAddress = 0;
if (0 == pusb_interface_descriptor->bNumEndpoints)
- JOT(4, "intf[%i]alt[%i] has no endpoints\n", \
+ JOM(4, "intf[%i]alt[%i] has no endpoints\n", \
bInterfaceNumber, i);
/*---------------------------------------------------------------------------*/
for (j = 0; j < pusb_interface_descriptor->bNumEndpoints; j++) {
pepd = &(pusb_host_interface->endpoint[j].desc);
if ((struct usb_endpoint_descriptor *)NULL == pepd) {
- SAY("ERROR: pepd is NULL.\n");
- SAY("...... skipping\n");
+ SAM("ERROR: pepd is NULL.\n");
+ SAM("...... skipping\n");
continue;
}
wMaxPacketSize = le16_to_cpu(pepd->wMaxPacketSize);
bEndpointAddress = pepd->bEndpointAddress;
- JOT(4, "intf[%i]alt[%i]end[%i]: bEndpointAddress=0x%X\n", \
+ JOM(4, "intf[%i]alt[%i]end[%i]: bEndpointAddress=0x%X\n", \
bInterfaceNumber, i, j, \
pepd->bEndpointAddress);
- JOT(4, "intf[%i]alt[%i]end[%i]: bmAttributes=0x%X\n", \
+ JOM(4, "intf[%i]alt[%i]end[%i]: bmAttributes=0x%X\n", \
bInterfaceNumber, i, j, \
pepd->bmAttributes);
- JOT(4, "intf[%i]alt[%i]end[%i]: wMaxPacketSize=%i\n", \
+ JOM(4, "intf[%i]alt[%i]end[%i]: wMaxPacketSize=%i\n", \
bInterfaceNumber, i, j, \
pepd->wMaxPacketSize);
- JOT(4, "intf[%i]alt[%i]end[%i]: bInterval=%i\n",
+ JOM(4, "intf[%i]alt[%i]end[%i]: bInterval=%i\n",
bInterfaceNumber, i, j, \
pepd->bInterval);
if (pepd->bEndpointAddress & USB_DIR_IN) {
- JOT(4, "intf[%i]alt[%i]end[%i] is an IN endpoint\n",\
+ JOM(4, "intf[%i]alt[%i]end[%i] is an IN endpoint\n",\
bInterfaceNumber, i, j);
isin = 1;
} else {
- JOT(4, "intf[%i]alt[%i]end[%i] is an OUT endpoint\n",\
+ JOM(4, "intf[%i]alt[%i]end[%i] is an OUT endpoint\n",\
bInterfaceNumber, i, j);
- SAY("ERROR: OUT endpoint unexpected\n");
- SAY("...... continuing\n");
+ SAM("ERROR: OUT endpoint unexpected\n");
+ SAM("...... continuing\n");
isin = 0;
}
if ((pepd->bmAttributes & \
USB_ENDPOINT_XFERTYPE_MASK) == \
USB_ENDPOINT_XFER_ISOC) {
- JOT(4, "intf[%i]alt[%i]end[%i] is an ISOC endpoint\n",\
+ JOM(4, "intf[%i]alt[%i]end[%i] is an ISOC endpoint\n",\
bInterfaceNumber, i, j);
if (isin) {
switch (bInterfaceClass) {
case USB_CLASS_VIDEO:
case USB_CLASS_VENDOR_SPEC: {
if (!peasycap) {
- SAY("MISTAKE: " \
+ SAM("MISTAKE: " \
"peasycap is NULL\n");
return -EFAULT;
}
if (pepd->wMaxPacketSize) {
if (8 > isokalt) {
okalt[isokalt] = i;
- JOT(4,\
+ JOM(4,\
"%i=okalt[%i]\n", \
okalt[isokalt], \
isokalt);
- isokalt++;
- }
- if (8 > isokepn) {
- okepn[isokepn] = \
+ okepn[isokalt] = \
pepd->\
bEndpointAddress & \
0x0F;
- JOT(4,\
+ JOM(4,\
"%i=okepn[%i]\n", \
- okepn[isokepn], \
- isokepn);
- isokepn++;
- }
- if (8 > isokmps) {
- okmps[isokmps] = \
+ okepn[isokalt], \
+ isokalt);
+ okmps[isokalt] = \
le16_to_cpu(pepd->\
wMaxPacketSize);
- JOT(4,\
+ JOM(4,\
"%i=okmps[%i]\n", \
- okmps[isokmps], \
- isokmps);
- isokmps++;
+ okmps[isokalt], \
+ isokalt);
+ isokalt++;
}
} else {
if (-1 == peasycap->\
@@ -3372,16 +3958,16 @@ for (i = 0; i < pusb_interface->num_altsetting; i++) {
peasycap->\
video_altsetting_off =\
i;
- JOT(4, "%i=video_" \
+ JOM(4, "%i=video_" \
"altsetting_off " \
"<====\n", \
peasycap->\
video_altsetting_off);
} else {
- SAY("ERROR: peasycap" \
+ SAM("ERROR: peasycap" \
"->video_altsetting_" \
"off already set\n");
- SAY("...... " \
+ SAM("...... " \
"continuing with " \
"%i=peasycap->video_" \
"altsetting_off\n", \
@@ -3395,39 +3981,33 @@ for (i = 0; i < pusb_interface->num_altsetting; i++) {
if (0x02 != bInterfaceSubClass)
break;
if (!peasycap) {
- SAY("MISTAKE: " \
+ SAM("MISTAKE: " \
"peasycap is NULL\n");
return -EFAULT;
}
if (pepd->wMaxPacketSize) {
if (8 > isokalt) {
okalt[isokalt] = i ;
- JOT(4,\
+ JOM(4,\
"%i=okalt[%i]\n", \
okalt[isokalt], \
isokalt);
- isokalt++;
- }
- if (8 > isokepn) {
- okepn[isokepn] = \
+ okepn[isokalt] = \
pepd->\
bEndpointAddress & \
0x0F;
- JOT(4,\
+ JOM(4,\
"%i=okepn[%i]\n", \
- okepn[isokepn], \
- isokepn);
- isokepn++;
- }
- if (8 > isokmps) {
- okmps[isokmps] = \
+ okepn[isokalt], \
+ isokalt);
+ okmps[isokalt] = \
le16_to_cpu(pepd->\
wMaxPacketSize);
- JOT(4,\
+ JOM(4,\
"%i=okmps[%i]\n",\
- okmps[isokmps], \
- isokmps);
- isokmps++;
+ okmps[isokalt], \
+ isokalt);
+ isokalt++;
}
} else {
if (-1 == peasycap->\
@@ -3435,16 +4015,16 @@ for (i = 0; i < pusb_interface->num_altsetting; i++) {
peasycap->\
audio_altsetting_off =\
i;
- JOT(4, "%i=audio_" \
+ JOM(4, "%i=audio_" \
"altsetting_off " \
"<====\n", \
peasycap->\
audio_altsetting_off);
} else {
- SAY("ERROR: peasycap" \
+ SAM("ERROR: peasycap" \
"->audio_altsetting_" \
"off already set\n");
- SAY("...... " \
+ SAM("...... " \
"continuing with " \
"%i=peasycap->\
audio_altsetting_" \
@@ -3462,19 +4042,19 @@ for (i = 0; i < pusb_interface->num_altsetting; i++) {
} else if ((pepd->bmAttributes & \
USB_ENDPOINT_XFERTYPE_MASK) ==\
USB_ENDPOINT_XFER_BULK) {
- JOT(4, "intf[%i]alt[%i]end[%i] is a BULK endpoint\n",\
+ JOM(4, "intf[%i]alt[%i]end[%i] is a BULK endpoint\n",\
bInterfaceNumber, i, j);
} else if ((pepd->bmAttributes & \
USB_ENDPOINT_XFERTYPE_MASK) ==\
USB_ENDPOINT_XFER_INT) {
- JOT(4, "intf[%i]alt[%i]end[%i] is an INT endpoint\n",\
+ JOM(4, "intf[%i]alt[%i]end[%i] is an INT endpoint\n",\
bInterfaceNumber, i, j);
} else {
- JOT(4, "intf[%i]alt[%i]end[%i] is a CTRL endpoint\n",\
+ JOM(4, "intf[%i]alt[%i]end[%i] is a CTRL endpoint\n",\
bInterfaceNumber, i, j);
}
if (0 == pepd->wMaxPacketSize) {
- JOT(4, "intf[%i]alt[%i]end[%i] " \
+ JOM(4, "intf[%i]alt[%i]end[%i] " \
"has zero packet size\n", \
bInterfaceNumber, i, j);
}
@@ -3485,7 +4065,7 @@ for (i = 0; i < pusb_interface->num_altsetting; i++) {
* PERFORM INITIALIZATION OF THE PROBED INTERFACE
*/
/*---------------------------------------------------------------------------*/
-JOT(4, "initialization begins for interface %i\n", \
+JOM(4, "initialization begins for interface %i\n", \
pusb_interface_descriptor->bInterfaceNumber);
switch (bInterfaceNumber) {
/*---------------------------------------------------------------------------*/
@@ -3495,89 +4075,78 @@ switch (bInterfaceNumber) {
/*---------------------------------------------------------------------------*/
case 0: {
if (!peasycap) {
- SAY("MISTAKE: peasycap is NULL\n");
+ SAM("MISTAKE: peasycap is NULL\n");
return -EFAULT;
}
if (!isokalt) {
- SAY("ERROR: no viable video_altsetting_on\n");
+ SAM("ERROR: no viable video_altsetting_on\n");
return -ENOENT;
} else {
peasycap->video_altsetting_on = okalt[isokalt - 1];
- JOT(4, "%i=video_altsetting_on <====\n", \
+ JOM(4, "%i=video_altsetting_on <====\n", \
peasycap->video_altsetting_on);
}
- if (!isokepn) {
- SAY("ERROR: no viable video_endpointnumber\n");
- return -ENOENT;
- } else {
- peasycap->video_endpointnumber = okepn[isokepn - 1];
- JOT(4, "%i=video_endpointnumber\n", \
- peasycap->video_endpointnumber);
- }
- if (!isokmps) {
- SAY("ERROR: no viable video_maxpacketsize\n");
- return -ENOENT;
/*---------------------------------------------------------------------------*/
/*
* DECIDE THE VIDEO STREAMING PARAMETERS
*/
/*---------------------------------------------------------------------------*/
+ peasycap->video_endpointnumber = okepn[isokalt - 1];
+ JOM(4, "%i=video_endpointnumber\n", peasycap->video_endpointnumber);
+ maxpacketsize = okmps[isokalt - 1];
+ if (USB_2_0_MAXPACKETSIZE > maxpacketsize) {
+ peasycap->video_isoc_maxframesize = maxpacketsize;
} else {
- maxpacketsize = okmps[isokmps - 1] - 1024;
- if (USB_2_0_MAXPACKETSIZE > maxpacketsize) {
- peasycap->video_isoc_maxframesize = maxpacketsize;
- } else {
- peasycap->video_isoc_maxframesize = \
- USB_2_0_MAXPACKETSIZE;
- }
- JOT(4, "%i=video_isoc_maxframesize\n", \
- peasycap->video_isoc_maxframesize);
- if (0 >= peasycap->video_isoc_maxframesize) {
- SAY("ERROR: bad video_isoc_maxframesize\n");
- return -ENOENT;
- }
- peasycap->video_isoc_framesperdesc = VIDEO_ISOC_FRAMESPERDESC;
- JOT(4, "%i=video_isoc_framesperdesc\n", \
- peasycap->video_isoc_framesperdesc);
- if (0 >= peasycap->video_isoc_framesperdesc) {
- SAY("ERROR: bad video_isoc_framesperdesc\n");
- return -ENOENT;
- }
- peasycap->video_isoc_buffer_size = \
- peasycap->video_isoc_maxframesize * \
- peasycap->video_isoc_framesperdesc;
- JOT(4, "%i=video_isoc_buffer_size\n", \
- peasycap->video_isoc_buffer_size);
- if ((PAGE_SIZE << VIDEO_ISOC_ORDER) < \
- peasycap->video_isoc_buffer_size) {
- SAY("MISTAKE: " \
- "peasycap->video_isoc_buffer_size too big\n");
- return -EFAULT;
- }
+ peasycap->video_isoc_maxframesize = \
+ USB_2_0_MAXPACKETSIZE;
+ }
+ JOM(4, "%i=video_isoc_maxframesize\n", \
+ peasycap->video_isoc_maxframesize);
+ if (0 >= peasycap->video_isoc_maxframesize) {
+ SAM("ERROR: bad video_isoc_maxframesize\n");
+ SAM(" possibly because port is USB 1.1\n");
+ return -ENOENT;
+ }
+ peasycap->video_isoc_framesperdesc = VIDEO_ISOC_FRAMESPERDESC;
+ JOM(4, "%i=video_isoc_framesperdesc\n", \
+ peasycap->video_isoc_framesperdesc);
+ if (0 >= peasycap->video_isoc_framesperdesc) {
+ SAM("ERROR: bad video_isoc_framesperdesc\n");
+ return -ENOENT;
+ }
+ peasycap->video_isoc_buffer_size = \
+ peasycap->video_isoc_maxframesize * \
+ peasycap->video_isoc_framesperdesc;
+ JOM(4, "%i=video_isoc_buffer_size\n", \
+ peasycap->video_isoc_buffer_size);
+ if ((PAGE_SIZE << VIDEO_ISOC_ORDER) < \
+ peasycap->video_isoc_buffer_size) {
+ SAM("MISTAKE: peasycap->video_isoc_buffer_size too big\n");
+ return -EFAULT;
}
/*---------------------------------------------------------------------------*/
if (-1 == peasycap->video_interface) {
- SAY("MISTAKE: video_interface is unset\n");
+ SAM("MISTAKE: video_interface is unset\n");
return -EFAULT;
}
if (-1 == peasycap->video_altsetting_on) {
- SAY("MISTAKE: video_altsetting_on is unset\n");
+ SAM("MISTAKE: video_altsetting_on is unset\n");
return -EFAULT;
}
if (-1 == peasycap->video_altsetting_off) {
- SAY("MISTAKE: video_interface_off is unset\n");
+ SAM("MISTAKE: video_interface_off is unset\n");
return -EFAULT;
}
if (-1 == peasycap->video_endpointnumber) {
- SAY("MISTAKE: video_endpointnumber is unset\n");
+ SAM("MISTAKE: video_endpointnumber is unset\n");
return -EFAULT;
}
if (-1 == peasycap->video_isoc_maxframesize) {
- SAY("MISTAKE: video_isoc_maxframesize is unset\n");
+ SAM("MISTAKE: video_isoc_maxframesize is unset\n");
return -EFAULT;
}
if (-1 == peasycap->video_isoc_buffer_size) {
- SAY("MISTAKE: video_isoc_buffer_size is unset\n");
+ SAM("MISTAKE: video_isoc_buffer_size is unset\n");
return -EFAULT;
}
/*---------------------------------------------------------------------------*/
@@ -3588,20 +4157,20 @@ case 0: {
INIT_LIST_HEAD(&(peasycap->urb_video_head));
peasycap->purb_video_head = &(peasycap->urb_video_head);
/*---------------------------------------------------------------------------*/
- JOT(4, "allocating %i frame buffers of size %li\n", \
+ JOM(4, "allocating %i frame buffers of size %li\n", \
FRAME_BUFFER_MANY, (long int)FRAME_BUFFER_SIZE);
- JOT(4, ".... each scattered over %li pages\n", \
+ JOM(4, ".... each scattered over %li pages\n", \
FRAME_BUFFER_SIZE/PAGE_SIZE);
for (k = 0; k < FRAME_BUFFER_MANY; k++) {
for (m = 0; m < FRAME_BUFFER_SIZE/PAGE_SIZE; m++) {
if ((void *)NULL != peasycap->frame_buffer[k][m].pgo)
- SAY("attempting to reallocate frame " \
+ SAM("attempting to reallocate frame " \
" buffers\n");
else {
pbuf = (void *)__get_free_page(GFP_KERNEL);
if ((void *)NULL == pbuf) {
- SAY("ERROR: Could not allocate frame "\
+ SAM("ERROR: Could not allocate frame "\
"buffer %i page %i\n", k, m);
return -ENOMEM;
} else
@@ -3615,23 +4184,23 @@ case 0: {
peasycap->frame_fill = 0;
peasycap->frame_read = 0;
- JOT(4, "allocation of frame buffers done: %i pages\n", k * \
+ JOM(4, "allocation of frame buffers done: %i pages\n", k * \
m);
/*---------------------------------------------------------------------------*/
- JOT(4, "allocating %i field buffers of size %li\n", \
+ JOM(4, "allocating %i field buffers of size %li\n", \
FIELD_BUFFER_MANY, (long int)FIELD_BUFFER_SIZE);
- JOT(4, ".... each scattered over %li pages\n", \
+ JOM(4, ".... each scattered over %li pages\n", \
FIELD_BUFFER_SIZE/PAGE_SIZE);
for (k = 0; k < FIELD_BUFFER_MANY; k++) {
for (m = 0; m < FIELD_BUFFER_SIZE/PAGE_SIZE; m++) {
if ((void *)NULL != peasycap->field_buffer[k][m].pgo) {
- SAY("ERROR: attempting to reallocate " \
+ SAM("ERROR: attempting to reallocate " \
"field buffers\n");
} else {
pbuf = (void *) __get_free_page(GFP_KERNEL);
if ((void *)NULL == pbuf) {
- SAY("ERROR: Could not allocate field" \
+ SAM("ERROR: Could not allocate field" \
" buffer %i page %i\n", k, m);
return -ENOMEM;
}
@@ -3647,18 +4216,18 @@ case 0: {
peasycap->field_fill = 0;
peasycap->field_page = 0;
peasycap->field_read = 0;
- JOT(4, "allocation of field buffers done: %i pages\n", k * \
+ JOM(4, "allocation of field buffers done: %i pages\n", k * \
m);
/*---------------------------------------------------------------------------*/
- JOT(4, "allocating %i isoc video buffers of size %i\n", \
+ JOM(4, "allocating %i isoc video buffers of size %i\n", \
VIDEO_ISOC_BUFFER_MANY, \
peasycap->video_isoc_buffer_size);
- JOT(4, ".... each occupying contiguous memory pages\n");
+ JOM(4, ".... each occupying contiguous memory pages\n");
for (k = 0; k < VIDEO_ISOC_BUFFER_MANY; k++) {
pbuf = (void *)__get_free_pages(GFP_KERNEL, VIDEO_ISOC_ORDER);
if (NULL == pbuf) {
- SAY("ERROR: Could not allocate isoc video buffer " \
+ SAM("ERROR: Could not allocate isoc video buffer " \
"%i\n", k);
return -ENOMEM;
} else
@@ -3670,26 +4239,26 @@ case 0: {
peasycap->video_isoc_buffer_size;
peasycap->video_isoc_buffer[k].kount = k;
}
- JOT(4, "allocation of isoc video buffers done: %i pages\n", \
+ JOM(4, "allocation of isoc video buffers done: %i pages\n", \
k * (0x01 << VIDEO_ISOC_ORDER));
/*---------------------------------------------------------------------------*/
/*
* ALLOCATE AND INITIALIZE MULTIPLE struct urb ...
*/
/*---------------------------------------------------------------------------*/
- JOT(4, "allocating %i struct urb.\n", VIDEO_ISOC_BUFFER_MANY);
- JOT(4, "using %i=peasycap->video_isoc_framesperdesc\n", \
+ JOM(4, "allocating %i struct urb.\n", VIDEO_ISOC_BUFFER_MANY);
+ JOM(4, "using %i=peasycap->video_isoc_framesperdesc\n", \
peasycap->video_isoc_framesperdesc);
- JOT(4, "using %i=peasycap->video_isoc_maxframesize\n", \
+ JOM(4, "using %i=peasycap->video_isoc_maxframesize\n", \
peasycap->video_isoc_maxframesize);
- JOT(4, "using %i=peasycap->video_isoc_buffer_sizen", \
+ JOM(4, "using %i=peasycap->video_isoc_buffer_sizen", \
peasycap->video_isoc_buffer_size);
for (k = 0; k < VIDEO_ISOC_BUFFER_MANY; k++) {
purb = usb_alloc_urb(peasycap->video_isoc_framesperdesc, \
GFP_KERNEL);
if (NULL == purb) {
- SAY("ERROR: usb_alloc_urb returned NULL for buffer " \
+ SAM("ERROR: usb_alloc_urb returned NULL for buffer " \
"%i\n", k);
return -ENOMEM;
} else
@@ -3697,7 +4266,7 @@ case 0: {
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
if (NULL == pdata_urb) {
- SAY("ERROR: Could not allocate struct data_urb.\n");
+ SAM("ERROR: Could not allocate struct data_urb.\n");
return -ENOMEM;
} else
peasycap->allocation_video_struct += \
@@ -3714,30 +4283,30 @@ case 0: {
*/
/*---------------------------------------------------------------------------*/
if (!k) {
- JOT(4, "initializing video urbs thus:\n");
- JOT(4, " purb->interval = 1;\n");
- JOT(4, " purb->dev = peasycap->pusb_device;\n");
- JOT(4, " purb->pipe = usb_rcvisocpipe" \
+ JOM(4, "initializing video urbs thus:\n");
+ JOM(4, " purb->interval = 1;\n");
+ JOM(4, " purb->dev = peasycap->pusb_device;\n");
+ JOM(4, " purb->pipe = usb_rcvisocpipe" \
"(peasycap->pusb_device,%i);\n", \
peasycap->video_endpointnumber);
- JOT(4, " purb->transfer_flags = URB_ISO_ASAP;\n");
- JOT(4, " purb->transfer_buffer = peasycap->" \
+ JOM(4, " purb->transfer_flags = URB_ISO_ASAP;\n");
+ JOM(4, " purb->transfer_buffer = peasycap->" \
"video_isoc_buffer[.].pgo;\n");
- JOT(4, " purb->transfer_buffer_length = %i;\n", \
+ JOM(4, " purb->transfer_buffer_length = %i;\n", \
peasycap->video_isoc_buffer_size);
- JOT(4, " purb->complete = easycap_complete;\n");
- JOT(4, " purb->context = peasycap;\n");
- JOT(4, " purb->start_frame = 0;\n");
- JOT(4, " purb->number_of_packets = %i;\n", \
+ JOM(4, " purb->complete = easycap_complete;\n");
+ JOM(4, " purb->context = peasycap;\n");
+ JOM(4, " purb->start_frame = 0;\n");
+ JOM(4, " purb->number_of_packets = %i;\n", \
peasycap->video_isoc_framesperdesc);
- JOT(4, " for (j = 0; j < %i; j++)\n", \
+ JOM(4, " for (j = 0; j < %i; j++)\n", \
peasycap->video_isoc_framesperdesc);
- JOT(4, " {\n");
- JOT(4, " purb->iso_frame_desc[j].offset = j*%i;\n",\
+ JOM(4, " {\n");
+ JOM(4, " purb->iso_frame_desc[j].offset = j*%i;\n",\
peasycap->video_isoc_maxframesize);
- JOT(4, " purb->iso_frame_desc[j].length = %i;\n", \
+ JOM(4, " purb->iso_frame_desc[j].length = %i;\n", \
peasycap->video_isoc_maxframesize);
- JOT(4, " }\n");
+ JOM(4, " }\n");
}
purb->interval = 1;
@@ -3759,13 +4328,33 @@ case 0: {
peasycap->video_isoc_maxframesize;
}
}
- JOT(4, "allocation of %i struct urb done.\n", k);
+ JOM(4, "allocation of %i struct urb done.\n", k);
/*--------------------------------------------------------------------------*/
/*
* SAVE POINTER peasycap IN THIS INTERFACE.
*/
/*--------------------------------------------------------------------------*/
usb_set_intfdata(pusb_interface, peasycap);
+/*---------------------------------------------------------------------------*/
+/*
+ * IT IS ESSENTIAL TO INITIALIZE THE HARDWARE BEFORE, RATHER THAN AFTER,
+ * THE DEVICE IS REGISTERED, BECAUSE SOME VERSIONS OF THE videodev MODULE
+ * CALL easycap_open() IMMEDIATELY AFTER REGISTRATION, CAUSING A CLASH.
+ * BEWARE.
+*/
+/*---------------------------------------------------------------------------*/
+#if defined(PREFER_NTSC)
+ peasycap->ntsc = true;
+ JOM(8, "defaulting initially to NTSC\n");
+#else
+ peasycap->ntsc = false;
+ JOM(8, "defaulting initially to PAL\n");
+#endif /*PREFER_NTSC*/
+ rc = reset(peasycap);
+ if (0 != rc) {
+ SAM("ERROR: reset() returned %i\n", rc);
+ return -EFAULT;
+ }
/*--------------------------------------------------------------------------*/
/*
* THE VIDEO DEVICE CAN BE REGISTERED NOW, AS IT IS READY.
@@ -3776,48 +4365,58 @@ case 0: {
err("Not able to get a minor for this device");
usb_set_intfdata(pusb_interface, NULL);
return -ENODEV;
- } else
+ } else {
(peasycap->registered_video)++;
- SAY("easycap attached to minor #%d\n", pusb_interface->minor);
- break;
-/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
-#else
- pvideo_device = (struct video_device *)\
- kzalloc(sizeof(struct video_device), GFP_KERNEL);
- if ((struct video_device *)NULL == pvideo_device) {
- SAY("ERROR: Could not allocate structure video_device\n");
- return -ENOMEM;
+ SAM("easycap attached to minor #%d\n", pusb_interface->minor);
+ break;
}
- if (VIDEO_DEVICE_MANY <= video_device_many) {
- SAY("ERROR: Too many /dev/videos\n");
- return -ENOMEM;
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#else
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+ if (0 != (v4l2_device_register(&(pusb_interface->dev), \
+ &(peasycap->v4l2_device)))) {
+ SAM("v4l2_device_register() failed\n");
+ return -ENODEV;
+ } else {
+ JOM(4, "registered device instance: %s\n", \
+ &(peasycap->v4l2_device.name[0]));
}
- pvideo_array[video_device_many] = pvideo_device; video_device_many++;
+/*---------------------------------------------------------------------------*/
+/*
+ * FIXME
+ *
+ *
+ * THIS IS BELIEVED TO BE HARMLESS, BUT MAY WELL BE UNNECESSARY OR WRONG:
+*/
+/*---------------------------------------------------------------------------*/
+ peasycap->video_device.v4l2_dev = (struct v4l2_device *)NULL;
+/*---------------------------------------------------------------------------*/
+
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
- strcpy(&pvideo_device->name[0], "easycapdc60");
+ strcpy(&peasycap->video_device.name[0], "easycapdc60");
#if defined(EASYCAP_NEEDS_V4L2_FOPS)
- pvideo_device->fops = &v4l2_fops;
+ peasycap->video_device.fops = &v4l2_fops;
#else
- pvideo_device->fops = &easycap_fops;
+ peasycap->video_device.fops = &easycap_fops;
#endif /*EASYCAP_NEEDS_V4L2_FOPS*/
- pvideo_device->minor = -1;
- pvideo_device->release = (void *)(&videodev_release);
+ peasycap->video_device.minor = -1;
+ peasycap->video_device.release = (void *)(&videodev_release);
- video_set_drvdata(pvideo_device, (void *)peasycap);
+ video_set_drvdata(&(peasycap->video_device), (void *)peasycap);
- rc = video_register_device(pvideo_device, VFL_TYPE_GRABBER, -1);
- if (0 != rc) {
+ if (0 != (video_register_device(&(peasycap->video_device), \
+ VFL_TYPE_GRABBER, -1))) {
err("Not able to register with videodev");
- videodev_release(pvideo_device);
+ videodev_release(&(peasycap->video_device));
return -ENODEV;
} else {
- peasycap->pvideo_device = pvideo_device;
(peasycap->registered_video)++;
- JOT(4, "registered with videodev: %i=minor\n", \
- pvideo_device->minor);
+ SAM("registered with videodev: %i=minor\n", \
+ peasycap->video_device.minor);
}
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
break;
}
/*--------------------------------------------------------------------------*/
@@ -3827,125 +4426,118 @@ case 0: {
*/
/*--------------------------------------------------------------------------*/
case 1: {
+ if (!peasycap) {
+ SAM("ERROR: peasycap is NULL\n");
+ return -EFAULT;
+ }
/*--------------------------------------------------------------------------*/
/*
* SAVE POINTER peasycap IN INTERFACE 1
*/
/*--------------------------------------------------------------------------*/
usb_set_intfdata(pusb_interface, peasycap);
- JOT(4, "no initialization required for interface %i\n", \
+ JOM(4, "no initialization required for interface %i\n", \
pusb_interface_descriptor->bInterfaceNumber);
break;
}
/*--------------------------------------------------------------------------*/
case 2: {
if (!peasycap) {
- SAY("MISTAKE: peasycap is NULL\n");
+ SAM("MISTAKE: peasycap is NULL\n");
return -EFAULT;
}
if (!isokalt) {
- SAY("ERROR: no viable audio_altsetting_on\n");
+ SAM("ERROR: no viable audio_altsetting_on\n");
return -ENOENT;
} else {
peasycap->audio_altsetting_on = okalt[isokalt - 1];
- JOT(4, "%i=audio_altsetting_on <====\n", \
+ JOM(4, "%i=audio_altsetting_on <====\n", \
peasycap->audio_altsetting_on);
}
- if (!isokepn) {
- SAY("ERROR: no viable audio_endpointnumber\n");
+
+ peasycap->audio_endpointnumber = okepn[isokalt - 1];
+ JOM(4, "%i=audio_endpointnumber\n", peasycap->audio_endpointnumber);
+
+ peasycap->audio_isoc_maxframesize = okmps[isokalt - 1];
+ JOM(4, "%i=audio_isoc_maxframesize\n", \
+ peasycap->audio_isoc_maxframesize);
+ if (0 >= peasycap->audio_isoc_maxframesize) {
+ SAM("ERROR: bad audio_isoc_maxframesize\n");
return -ENOENT;
- } else {
- peasycap->audio_endpointnumber = okepn[isokepn - 1];
- JOT(4, "%i=audio_endpointnumber\n", \
- peasycap->audio_endpointnumber);
}
- if (!isokmps) {
- SAY("ERROR: no viable audio_maxpacketsize\n");
- return -ENOENT;
+ if (9 == peasycap->audio_isoc_maxframesize) {
+ peasycap->ilk |= 0x02;
+ SAM("hardware is FOUR-CVBS\n");
+ peasycap->microphone = true;
+ peasycap->audio_pages_per_fragment = 4;
+ } else if (256 == peasycap->audio_isoc_maxframesize) {
+ peasycap->ilk &= ~0x02;
+ SAM("hardware is CVBS+S-VIDEO\n");
+ peasycap->microphone = false;
+ peasycap->audio_pages_per_fragment = 4;
} else {
- peasycap->audio_isoc_maxframesize = okmps[isokmps - 1];
- JOT(4, "%i=audio_isoc_maxframesize\n", \
- peasycap->audio_isoc_maxframesize);
- if (0 >= peasycap->audio_isoc_maxframesize) {
- SAY("ERROR: bad audio_isoc_maxframesize\n");
- return -ENOENT;
- }
- if (9 == peasycap->audio_isoc_maxframesize) {
- peasycap->ilk |= 0x02;
- SAY("hardware is FOUR-CVBS\n");
- peasycap->microphone = true;
- peasycap->audio_pages_per_fragment = 4;
- } else if (256 == peasycap->audio_isoc_maxframesize) {
- peasycap->ilk &= ~0x02;
- SAY("hardware is CVBS+S-VIDEO\n");
- peasycap->microphone = false;
- peasycap->audio_pages_per_fragment = 4;
- } else {
- SAY("hardware is unidentified:\n");
- SAY("%i=audio_isoc_maxframesize\n", \
+ SAM("hardware is unidentified:\n");
+ SAM("%i=audio_isoc_maxframesize\n", \
peasycap->audio_isoc_maxframesize);
- return -ENOENT;
- }
+ return -ENOENT;
+ }
- peasycap->audio_bytes_per_fragment = \
+ peasycap->audio_bytes_per_fragment = \
peasycap->audio_pages_per_fragment * \
PAGE_SIZE ;
- peasycap->audio_buffer_page_many = (AUDIO_FRAGMENT_MANY * \
+ peasycap->audio_buffer_page_many = (AUDIO_FRAGMENT_MANY * \
peasycap->audio_pages_per_fragment);
- JOT(4, "%6i=AUDIO_FRAGMENT_MANY\n", AUDIO_FRAGMENT_MANY);
- JOT(4, "%6i=audio_pages_per_fragment\n", \
+ JOM(4, "%6i=AUDIO_FRAGMENT_MANY\n", AUDIO_FRAGMENT_MANY);
+ JOM(4, "%6i=audio_pages_per_fragment\n", \
peasycap->audio_pages_per_fragment);
- JOT(4, "%6i=audio_bytes_per_fragment\n", \
+ JOM(4, "%6i=audio_bytes_per_fragment\n", \
peasycap->audio_bytes_per_fragment);
- JOT(4, "%6i=audio_buffer_page_many\n", \
+ JOM(4, "%6i=audio_buffer_page_many\n", \
peasycap->audio_buffer_page_many);
- peasycap->audio_isoc_framesperdesc = 128;
+ peasycap->audio_isoc_framesperdesc = 128;
- JOT(4, "%i=audio_isoc_framesperdesc\n", \
+ JOM(4, "%i=audio_isoc_framesperdesc\n", \
peasycap->audio_isoc_framesperdesc);
- if (0 >= peasycap->audio_isoc_framesperdesc) {
- SAY("ERROR: bad audio_isoc_framesperdesc\n");
- return -ENOENT;
- }
+ if (0 >= peasycap->audio_isoc_framesperdesc) {
+ SAM("ERROR: bad audio_isoc_framesperdesc\n");
+ return -ENOENT;
+ }
- peasycap->audio_isoc_buffer_size = \
+ peasycap->audio_isoc_buffer_size = \
peasycap->audio_isoc_maxframesize * \
peasycap->audio_isoc_framesperdesc;
- JOT(4, "%i=audio_isoc_buffer_size\n", \
+ JOM(4, "%i=audio_isoc_buffer_size\n", \
peasycap->audio_isoc_buffer_size);
- if (AUDIO_ISOC_BUFFER_SIZE < \
- peasycap->audio_isoc_buffer_size) {
- SAY("MISTAKE: audio_isoc_buffer_size bigger "
+ if (AUDIO_ISOC_BUFFER_SIZE < peasycap->audio_isoc_buffer_size) {
+ SAM("MISTAKE: audio_isoc_buffer_size bigger "
"than %li=AUDIO_ISOC_BUFFER_SIZE\n", \
AUDIO_ISOC_BUFFER_SIZE);
- return -EFAULT;
- }
+ return -EFAULT;
}
-
if (-1 == peasycap->audio_interface) {
- SAY("MISTAKE: audio_interface is unset\n");
+ SAM("MISTAKE: audio_interface is unset\n");
return -EFAULT;
}
if (-1 == peasycap->audio_altsetting_on) {
- SAY("MISTAKE: audio_altsetting_on is unset\n");
+ SAM("MISTAKE: audio_altsetting_on is unset\n");
return -EFAULT;
}
if (-1 == peasycap->audio_altsetting_off) {
- SAY("MISTAKE: audio_interface_off is unset\n");
+ SAM("MISTAKE: audio_interface_off is unset\n");
return -EFAULT;
}
if (-1 == peasycap->audio_endpointnumber) {
- SAY("MISTAKE: audio_endpointnumber is unset\n");
+ SAM("MISTAKE: audio_endpointnumber is unset\n");
return -EFAULT;
}
if (-1 == peasycap->audio_isoc_maxframesize) {
- SAY("MISTAKE: audio_isoc_maxframesize is unset\n");
+ SAM("MISTAKE: audio_isoc_maxframesize is unset\n");
return -EFAULT;
}
if (-1 == peasycap->audio_isoc_buffer_size) {
- SAY("MISTAKE: audio_isoc_buffer_size is unset\n");
+ SAM("MISTAKE: audio_isoc_buffer_size is unset\n");
return -EFAULT;
}
/*---------------------------------------------------------------------------*/
@@ -3956,17 +4548,17 @@ case 2: {
INIT_LIST_HEAD(&(peasycap->urb_audio_head));
peasycap->purb_audio_head = &(peasycap->urb_audio_head);
- JOT(4, "allocating an audio buffer\n");
- JOT(4, ".... scattered over %i pages\n", \
+ JOM(4, "allocating an audio buffer\n");
+ JOM(4, ".... scattered over %i pages\n", \
peasycap->audio_buffer_page_many);
for (k = 0; k < peasycap->audio_buffer_page_many; k++) {
if ((void *)NULL != peasycap->audio_buffer[k].pgo) {
- SAY("ERROR: attempting to reallocate audio buffers\n");
+ SAM("ERROR: attempting to reallocate audio buffers\n");
} else {
pbuf = (void *) __get_free_page(GFP_KERNEL);
if ((void *)NULL == pbuf) {
- SAY("ERROR: Could not allocate audio " \
+ SAM("ERROR: Could not allocate audio " \
"buffer page %i\n", k);
return -ENOMEM;
} else
@@ -3979,16 +4571,16 @@ case 2: {
peasycap->audio_fill = 0;
peasycap->audio_read = 0;
- JOT(4, "allocation of audio buffer done: %i pages\n", k);
+ JOM(4, "allocation of audio buffer done: %i pages\n", k);
/*---------------------------------------------------------------------------*/
- JOT(4, "allocating %i isoc audio buffers of size %i\n", \
+ JOM(4, "allocating %i isoc audio buffers of size %i\n", \
AUDIO_ISOC_BUFFER_MANY, peasycap->audio_isoc_buffer_size);
- JOT(4, ".... each occupying contiguous memory pages\n");
+ JOM(4, ".... each occupying contiguous memory pages\n");
for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
pbuf = (void *)__get_free_pages(GFP_KERNEL, AUDIO_ISOC_ORDER);
if (NULL == pbuf) {
- SAY("ERROR: Could not allocate isoc audio buffer " \
+ SAM("ERROR: Could not allocate isoc audio buffer " \
"%i\n", k);
return -ENOMEM;
} else
@@ -4000,25 +4592,25 @@ case 2: {
peasycap->audio_isoc_buffer_size;
peasycap->audio_isoc_buffer[k].kount = k;
}
- JOT(4, "allocation of isoc audio buffers done.\n");
+ JOM(4, "allocation of isoc audio buffers done.\n");
/*---------------------------------------------------------------------------*/
/*
* ALLOCATE AND INITIALIZE MULTIPLE struct urb ...
*/
/*---------------------------------------------------------------------------*/
- JOT(4, "allocating %i struct urb.\n", AUDIO_ISOC_BUFFER_MANY);
- JOT(4, "using %i=peasycap->audio_isoc_framesperdesc\n", \
+ JOM(4, "allocating %i struct urb.\n", AUDIO_ISOC_BUFFER_MANY);
+ JOM(4, "using %i=peasycap->audio_isoc_framesperdesc\n", \
peasycap->audio_isoc_framesperdesc);
- JOT(4, "using %i=peasycap->audio_isoc_maxframesize\n", \
+ JOM(4, "using %i=peasycap->audio_isoc_maxframesize\n", \
peasycap->audio_isoc_maxframesize);
- JOT(4, "using %i=peasycap->audio_isoc_buffer_size\n", \
+ JOM(4, "using %i=peasycap->audio_isoc_buffer_size\n", \
peasycap->audio_isoc_buffer_size);
for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
purb = usb_alloc_urb(peasycap->audio_isoc_framesperdesc, \
GFP_KERNEL);
if (NULL == purb) {
- SAY("ERROR: usb_alloc_urb returned NULL for buffer " \
+ SAM("ERROR: usb_alloc_urb returned NULL for buffer " \
"%i\n", k);
return -ENOMEM;
} else
@@ -4026,7 +4618,7 @@ case 2: {
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
if (NULL == pdata_urb) {
- SAY("ERROR: Could not allocate struct data_urb.\n");
+ SAM("ERROR: Could not allocate struct data_urb.\n");
return -ENOMEM;
} else
peasycap->allocation_audio_struct += \
@@ -4043,30 +4635,30 @@ case 2: {
*/
/*---------------------------------------------------------------------------*/
if (!k) {
- JOT(4, "initializing audio urbs thus:\n");
- JOT(4, " purb->interval = 1;\n");
- JOT(4, " purb->dev = peasycap->pusb_device;\n");
- JOT(4, " purb->pipe = usb_rcvisocpipe(peasycap->" \
+ JOM(4, "initializing audio urbs thus:\n");
+ JOM(4, " purb->interval = 1;\n");
+ JOM(4, " purb->dev = peasycap->pusb_device;\n");
+ JOM(4, " purb->pipe = usb_rcvisocpipe(peasycap->" \
"pusb_device,%i);\n", \
peasycap->audio_endpointnumber);
- JOT(4, " purb->transfer_flags = URB_ISO_ASAP;\n");
- JOT(4, " purb->transfer_buffer = " \
+ JOM(4, " purb->transfer_flags = URB_ISO_ASAP;\n");
+ JOM(4, " purb->transfer_buffer = " \
"peasycap->audio_isoc_buffer[.].pgo;\n");
- JOT(4, " purb->transfer_buffer_length = %i;\n", \
+ JOM(4, " purb->transfer_buffer_length = %i;\n", \
peasycap->audio_isoc_buffer_size);
- JOT(4, " purb->complete = easysnd_complete;\n");
- JOT(4, " purb->context = peasycap;\n");
- JOT(4, " purb->start_frame = 0;\n");
- JOT(4, " purb->number_of_packets = %i;\n", \
+ JOM(4, " purb->complete = easysnd_complete;\n");
+ JOM(4, " purb->context = peasycap;\n");
+ JOM(4, " purb->start_frame = 0;\n");
+ JOM(4, " purb->number_of_packets = %i;\n", \
peasycap->audio_isoc_framesperdesc);
- JOT(4, " for (j = 0; j < %i; j++)\n", \
+ JOM(4, " for (j = 0; j < %i; j++)\n", \
peasycap->audio_isoc_framesperdesc);
- JOT(4, " {\n");
- JOT(4, " purb->iso_frame_desc[j].offset = j*%i;\n",\
+ JOM(4, " {\n");
+ JOM(4, " purb->iso_frame_desc[j].offset = j*%i;\n",\
peasycap->audio_isoc_maxframesize);
- JOT(4, " purb->iso_frame_desc[j].length = %i;\n", \
+ JOM(4, " purb->iso_frame_desc[j].length = %i;\n", \
peasycap->audio_isoc_maxframesize);
- JOT(4, " }\n");
+ JOM(4, " }\n");
}
purb->interval = 1;
@@ -4088,7 +4680,7 @@ case 2: {
peasycap->audio_isoc_maxframesize;
}
}
- JOT(4, "allocation of %i struct urb done.\n", k);
+ JOM(4, "allocation of %i struct urb done.\n", k);
/*---------------------------------------------------------------------------*/
/*
* SAVE POINTER peasycap IN THIS INTERFACE.
@@ -4105,14 +4697,18 @@ case 2: {
err("Not able to get a minor for this device.");
usb_set_intfdata(pusb_interface, NULL);
return -ENODEV;
- } else
+ } else {
+ JOM(8, "kref_get() with %i=peasycap->kref.refcount.counter\n",\
+ (int)peasycap->kref.refcount.counter);
+ kref_get(&peasycap->kref);
(peasycap->registered_audio)++;
+ }
/*---------------------------------------------------------------------------*/
/*
* LET THE USER KNOW WHAT NODE THE AUDIO DEVICE IS ATTACHED TO.
*/
/*---------------------------------------------------------------------------*/
- SAY("easysnd attached to minor #%d\n", pusb_interface->minor);
+ SAM("easysnd attached to minor #%d\n", pusb_interface->minor);
break;
}
/*---------------------------------------------------------------------------*/
@@ -4121,20 +4717,19 @@ case 2: {
*/
/*---------------------------------------------------------------------------*/
default: {
- JOT(4, "ERROR: unexpected interface %i\n", bInterfaceNumber);
+ JOM(4, "ERROR: unexpected interface %i\n", bInterfaceNumber);
return -EINVAL;
}
}
-JOT(4, "ends successfully for interface %i\n", \
+JOM(4, "ends successfully for interface %i\n", \
pusb_interface_descriptor->bInterfaceNumber);
return 0;
}
/*****************************************************************************/
/*---------------------------------------------------------------------------*/
/*
- * WHEN THIS FUNCTION IS CALLED THE DEVICE HAS ALREADY BEEN PHYSICALLY
- * UNPLUGGED.
- * HENCE peasycap->pusb_device IS NO LONGER VALID AND MUST BE SET TO NULL.
+ * WHEN THIS FUNCTION IS CALLED THE EasyCAP HAS ALREADY BEEN PHYSICALLY
+ * UNPLUGGED. HENCE peasycap->pusb_device IS NO LONGER VALID.
*/
/*---------------------------------------------------------------------------*/
void
@@ -4147,7 +4742,14 @@ struct easycap *peasycap;
struct list_head *plist_head;
struct data_urb *pdata_urb;
-int minor, m;
+int minor, m, kd;
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+struct v4l2_device *pv4l2_device;
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
JOT(4, "\n");
@@ -4169,107 +4771,188 @@ bInterfaceNumber = pusb_interface_descriptor->bInterfaceNumber;
minor = pusb_interface->minor;
JOT(4, "intf[%i]: minor=%i\n", bInterfaceNumber, minor);
+if (1 == bInterfaceNumber)
+ return;
+
peasycap = usb_get_intfdata(pusb_interface);
-if ((struct easycap *)NULL == peasycap)
+if (NULL == peasycap) {
SAY("ERROR: peasycap is NULL\n");
-else {
- peasycap->pusb_device = (struct usb_device *)NULL;
- switch (bInterfaceNumber) {
-/*---------------------------------------------------------------------------*/
- case 0: {
- if ((struct list_head *)NULL != peasycap->purb_video_head) {
- JOT(4, "killing video urbs\n");
- m = 0;
- list_for_each(plist_head, (peasycap->purb_video_head))
- {
- pdata_urb = list_entry(plist_head, \
- struct data_urb, list_head);
- if ((struct data_urb *)NULL != pdata_urb) {
- if ((struct urb *)NULL != \
- pdata_urb->purb) {
- usb_kill_urb(pdata_urb->purb);
- m++;
- }
+ return;
+}
+/*---------------------------------------------------------------------------*/
+#if (!defined(EASYCAP_IS_VIDEODEV_CLIENT))
+#
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#else
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+/*---------------------------------------------------------------------------*/
+/*
+ * SOME VERSIONS OF THE videodev MODULE OVERWRITE THE DATA WHICH HAS
+ * BEEN WRITTEN BY THE CALL TO usb_set_intfdata() IN easycap_usb_probe(),
+ * REPLACING IT WITH A POINTER TO THE EMBEDDED v4l2_device STRUCTURE.
+ * TO DETECT THIS, THE STRING IN THE easycap.telltale[] BUFFER IS CHECKED.
+*/
+/*---------------------------------------------------------------------------*/
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ pv4l2_device = usb_get_intfdata(pusb_interface);
+ if ((struct v4l2_device *)NULL == pv4l2_device) {
+ SAY("ERROR: pv4l2_device is NULL\n");
+ return;
+ }
+ peasycap = (struct easycap *) \
+ container_of(pv4l2_device, struct easycap, v4l2_device);
+}
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
+#
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+/*---------------------------------------------------------------------------*/
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+ return;
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * IF THE WAIT QUEUES ARE NOT CLEARED A DEADLOCK IS POSSIBLE. BEWARE.
+*/
+/*---------------------------------------------------------------------------*/
+peasycap->video_eof = 1;
+peasycap->audio_eof = 1;
+wake_up_interruptible(&(peasycap->wq_video));
+wake_up_interruptible(&(peasycap->wq_audio));
+/*---------------------------------------------------------------------------*/
+switch (bInterfaceNumber) {
+case 0: {
+ if ((struct list_head *)NULL != peasycap->purb_video_head) {
+ JOM(4, "killing video urbs\n");
+ m = 0;
+ list_for_each(plist_head, (peasycap->purb_video_head))
+ {
+ pdata_urb = list_entry(plist_head, \
+ struct data_urb, list_head);
+ if ((struct data_urb *)NULL != pdata_urb) {
+ if ((struct urb *)NULL != \
+ pdata_urb->purb) {
+ usb_kill_urb(pdata_urb->purb);
+ m++;
}
}
- JOT(4, "%i video urbs killed\n", m);
- } else
- SAY("ERROR: peasycap->purb_video_head is NULL\n");
- break;
+ }
+ JOM(4, "%i video urbs killed\n", m);
}
+ break;
+}
/*---------------------------------------------------------------------------*/
- case 2: {
- if ((struct list_head *)NULL != peasycap->purb_audio_head) {
- JOT(4, "killing audio urbs\n");
- m = 0;
- list_for_each(plist_head, \
- (peasycap->purb_audio_head)) {
- pdata_urb = list_entry(plist_head, \
- struct data_urb, list_head);
- if ((struct data_urb *)NULL != pdata_urb) {
- if ((struct urb *)NULL != \
- pdata_urb->purb) {
- usb_kill_urb(pdata_urb->purb);
- m++;
- }
+case 2: {
+ if ((struct list_head *)NULL != peasycap->purb_audio_head) {
+ JOM(4, "killing audio urbs\n");
+ m = 0;
+ list_for_each(plist_head, \
+ (peasycap->purb_audio_head)) {
+ pdata_urb = list_entry(plist_head, \
+ struct data_urb, list_head);
+ if ((struct data_urb *)NULL != pdata_urb) {
+ if ((struct urb *)NULL != \
+ pdata_urb->purb) {
+ usb_kill_urb(pdata_urb->purb);
+ m++;
}
}
- JOT(4, "%i audio urbs killed\n", m);
- } else
- SAY("ERROR: peasycap->purb_audio_head is NULL\n");
- break;
+ }
+ JOM(4, "%i audio urbs killed\n", m);
}
+ break;
+}
/*---------------------------------------------------------------------------*/
- default:
- break;
- }
+default:
+ break;
}
/*--------------------------------------------------------------------------*/
/*
* DEREGISTER
+ *
+ * THIS PROCEDURE WILL BLOCK UNTIL easycap_poll(), VIDEO IOCTL AND AUDIO
+ * IOCTL ARE ALL UNLOCKED. IF THIS IS NOT DONE AN Oops CAN OCCUR WHEN
+ * AN EasyCAP IS UNPLUGGED WHILE THE URBS ARE RUNNING. BEWARE.
*/
/*--------------------------------------------------------------------------*/
+kd = isdongle(peasycap);
switch (bInterfaceNumber) {
case 0: {
+ if (0 <= kd && DONGLE_MANY > kd) {
+ wake_up_interruptible(&peasycap->wq_video);
+ JOM(4, "about to lock easycap_dongle[%i].mutex_video\n", kd);
+ if (mutex_lock_interruptible(&easycap_dongle[kd].\
+ mutex_video)) {
+ SAY("ERROR: cannot lock easycap_dongle[%i]." \
+ "mutex_video\n", kd);
+ return;
+ }
+ JOM(4, "locked easycap_dongle[%i].mutex_video\n", kd);
+ } else
+ SAY("ERROR: %i=kd is bad: cannot lock dongle\n", kd);
+/*---------------------------------------------------------------------------*/
#if (!defined(EASYCAP_IS_VIDEODEV_CLIENT))
if ((struct easycap *)NULL == peasycap) {
- SAY("ERROR: peasycap has become NULL\n");
+ SAM("ERROR: peasycap has become NULL\n");
} else {
- lock_kernel();
usb_deregister_dev(pusb_interface, &easycap_class);
(peasycap->registered_video)--;
-
- JOT(4, "intf[%i]: usb_deregister_dev()\n", bInterfaceNumber);
- unlock_kernel();
- SAY("easycap detached from minor #%d\n", minor);
+ JOM(4, "intf[%i]: usb_deregister_dev()\n", bInterfaceNumber);
+ SAM("easycap detached from minor #%d\n", minor);
}
-/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
#else
- if ((struct easycap *)NULL == peasycap)
- SAY("ERROR: peasycap has become NULL\n");
- else {
- lock_kernel();
- video_unregister_device(peasycap->pvideo_device);
- (peasycap->registered_video)--;
- unlock_kernel();
- JOT(4, "unregistered with videodev: %i=minor\n", \
- pvideo_device->minor);
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+ if (!peasycap->v4l2_device.name[0]) {
+ SAM("ERROR: peasycap->v4l2_device.name is empty\n");
+ if (0 <= kd && DONGLE_MANY > kd)
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ return;
}
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+ v4l2_device_disconnect(&peasycap->v4l2_device);
+ JOM(4, "v4l2_device_disconnect() OK\n");
+ v4l2_device_unregister(&peasycap->v4l2_device);
+ JOM(4, "v4l2_device_unregister() OK\n");
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
+
+ video_unregister_device(&peasycap->video_device);
+ JOM(4, "intf[%i]: video_unregister_device() OK\n", bInterfaceNumber);
+ (peasycap->registered_video)--;
+ JOM(4, "unregistered with videodev: %i=minor\n", minor);
#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+
+ if (0 <= kd && DONGLE_MANY > kd) {
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ JOM(4, "unlocked easycap_dongle[%i].mutex_video\n", kd);
+ }
break;
}
case 2: {
- lock_kernel();
+ if (0 <= kd && DONGLE_MANY > kd) {
+ wake_up_interruptible(&peasycap->wq_audio);
+ JOM(4, "about to lock easycap_dongle[%i].mutex_audio\n", kd);
+ if (mutex_lock_interruptible(&easycap_dongle[kd].\
+ mutex_audio)) {
+ SAY("ERROR: cannot lock easycap_dongle[%i]." \
+ "mutex_audio\n", kd);
+ return;
+ }
+ JOM(4, "locked easycap_dongle[%i].mutex_audio\n", kd);
+ } else
+ SAY("ERROR: %i=kd is bad: cannot lock dongle\n", kd);
usb_deregister_dev(pusb_interface, &easysnd_class);
- if ((struct easycap *)NULL != peasycap)
- (peasycap->registered_audio)--;
+ (peasycap->registered_audio)--;
- JOT(4, "intf[%i]: usb_deregister_dev()\n", bInterfaceNumber);
- unlock_kernel();
+ JOM(4, "intf[%i]: usb_deregister_dev()\n", bInterfaceNumber);
+ SAM("easysnd detached from minor #%d\n", minor);
- SAY("easysnd detached from minor #%d\n", minor);
+ if (0 <= kd && DONGLE_MANY > kd) {
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
+ JOM(4, "unlocked easycap_dongle[%i].mutex_audio\n", kd);
+ }
break;
}
default:
@@ -4280,25 +4963,42 @@ default:
* CALL easycap_delete() IF NO REMAINING REFERENCES TO peasycap
*/
/*---------------------------------------------------------------------------*/
-if ((struct easycap *)NULL == peasycap) {
- SAY("ERROR: peasycap has become NULL\n");
- SAY("cannot call kref_put()\n");
- SAY("ending unsuccessfully: may cause memory leak\n");
- return;
-}
if (!peasycap->kref.refcount.counter) {
- SAY("ERROR: peasycap->kref.refcount.counter is zero " \
+ SAM("ERROR: peasycap->kref.refcount.counter is zero "
"so cannot call kref_put()\n");
- SAY("ending unsuccessfully: may cause memory leak\n");
+ SAM("ending unsuccessfully: may cause memory leak\n");
return;
}
-JOT(4, "intf[%i]: kref_put() with %i=peasycap->kref.refcount.counter\n", \
+if (0 <= kd && DONGLE_MANY > kd) {
+ JOM(4, "about to lock easycap_dongle[%i].mutex_video\n", kd);
+ if (mutex_lock_interruptible(&easycap_dongle[kd].mutex_video)) {
+ SAY("ERROR: cannot down easycap_dongle[%i].mutex_video\n", kd);
+ SAM("ending unsuccessfully: may cause memory leak\n");
+ return;
+ }
+ JOM(4, "locked easycap_dongle[%i].mutex_video\n", kd);
+ JOM(4, "about to lock easycap_dongle[%i].mutex_audio\n", kd);
+ if (mutex_lock_interruptible(&easycap_dongle[kd].mutex_audio)) {
+ SAY("ERROR: cannot down easycap_dongle[%i].mutex_audio\n", kd);
+ mutex_unlock(&(easycap_dongle[kd].mutex_video));
+ JOM(4, "unlocked easycap_dongle[%i].mutex_video\n", kd);
+ SAM("ending unsuccessfully: may cause memory leak\n");
+ return;
+ }
+ JOM(4, "locked easycap_dongle[%i].mutex_audio\n", kd);
+}
+JOM(4, "intf[%i]: %i=peasycap->kref.refcount.counter\n", \
bInterfaceNumber, (int)peasycap->kref.refcount.counter);
kref_put(&peasycap->kref, easycap_delete);
JOT(4, "intf[%i]: kref_put() done.\n", bInterfaceNumber);
+if (0 <= kd && DONGLE_MANY > kd) {
+ mutex_unlock(&(easycap_dongle[kd].mutex_audio));
+ JOT(4, "unlocked easycap_dongle[%i].mutex_audio\n", kd);
+ mutex_unlock(&easycap_dongle[kd].mutex_video);
+ JOT(4, "unlocked easycap_dongle[%i].mutex_video\n", kd);
+}
/*---------------------------------------------------------------------------*/
-
-JOT(4, "ends\n");
+JOM(4, "ends\n");
return;
}
/*****************************************************************************/
@@ -4308,7 +5008,8 @@ easycap_module_init(void)
int result;
SAY("========easycap=======\n");
-JOT(4, "begins. %i=debug\n", easycap_debug);
+JOT(4, "begins. %i=debug %i=bars %i=gain\n", easycap_debug, easycap_bars, \
+ easycap_gain);
SAY("version: " EASYCAP_DRIVER_VERSION "\n");
/*---------------------------------------------------------------------------*/
/*
@@ -4349,6 +5050,9 @@ MODULE_AUTHOR("R.M. Thomas <rmthomas@sciolus.org>");
MODULE_DESCRIPTION(EASYCAP_DRIVER_DESCRIPTION);
MODULE_VERSION(EASYCAP_DRIVER_VERSION);
#if defined(EASYCAP_DEBUG)
-MODULE_PARM_DESC(easycap_debug, "debug: 0 (default), 1, 2,...");
+MODULE_PARM_DESC(debug, "Debug level: 0(default),1,2,...,9");
#endif /*EASYCAP_DEBUG*/
+MODULE_PARM_DESC(bars, \
+ "Testcard bars on input signal failure: 0=>no, 1=>yes(default)");
+MODULE_PARM_DESC(gain, "Audio gain: 0,...,16(default),...31");
/*****************************************************************************/
diff --git a/drivers/staging/easycap/easycap_settings.c b/drivers/staging/easycap/easycap_settings.c
index 38d94051241d..df3f17d361b1 100644
--- a/drivers/staging/easycap/easycap_settings.c
+++ b/drivers/staging/easycap/easycap_settings.c
@@ -33,11 +33,15 @@
* THE LEAST SIGNIFICANT BIT OF easycap_standard.mask HAS MEANING:
* 0 => 25 fps
* 1 => 30 fps
+ *
+ * THE MOST SIGNIFICANT BIT OF easycap_standard.mask HAS MEANING:
+ * 0 => full framerate
+ * 1 => 20% framerate
*/
/*---------------------------------------------------------------------------*/
const struct easycap_standard easycap_standard[] = {
{
-.mask = 0x000F & PAL_BGHIN ,
+.mask = 0x00FF & PAL_BGHIN ,
.v4l2_standard = {
.index = PAL_BGHIN,
.id = (V4L2_STD_PAL_B | V4L2_STD_PAL_G | V4L2_STD_PAL_H | \
@@ -50,7 +54,7 @@ const struct easycap_standard easycap_standard[] = {
},
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
{
-.mask = 0x000F & NTSC_N_443 ,
+.mask = 0x00FF & NTSC_N_443 ,
.v4l2_standard = {
.index = NTSC_N_443,
.id = V4L2_STD_UNKNOWN,
@@ -62,7 +66,7 @@ const struct easycap_standard easycap_standard[] = {
},
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
{
-.mask = 0x000F & PAL_Nc ,
+.mask = 0x00FF & PAL_Nc ,
.v4l2_standard = {
.index = PAL_Nc,
.id = V4L2_STD_PAL_Nc,
@@ -74,7 +78,7 @@ const struct easycap_standard easycap_standard[] = {
},
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
{
-.mask = 0x000F & NTSC_N ,
+.mask = 0x00FF & NTSC_N ,
.v4l2_standard = {
.index = NTSC_N,
.id = V4L2_STD_UNKNOWN,
@@ -86,7 +90,7 @@ const struct easycap_standard easycap_standard[] = {
},
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
{
-.mask = 0x000F & SECAM ,
+.mask = 0x00FF & SECAM ,
.v4l2_standard = {
.index = SECAM,
.id = V4L2_STD_SECAM,
@@ -98,7 +102,7 @@ const struct easycap_standard easycap_standard[] = {
},
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
{
-.mask = 0x000F & NTSC_M ,
+.mask = 0x00FF & NTSC_M ,
.v4l2_standard = {
.index = NTSC_M,
.id = V4L2_STD_NTSC_M,
@@ -110,7 +114,7 @@ const struct easycap_standard easycap_standard[] = {
},
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
{
-.mask = 0x000F & NTSC_M_JP ,
+.mask = 0x00FF & NTSC_M_JP ,
.v4l2_standard = {
.index = NTSC_M_JP,
.id = V4L2_STD_NTSC_M_JP,
@@ -122,7 +126,7 @@ const struct easycap_standard easycap_standard[] = {
},
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
{
-.mask = 0x000F & PAL_60 ,
+.mask = 0x00FF & PAL_60 ,
.v4l2_standard = {
.index = PAL_60,
.id = V4L2_STD_PAL_60,
@@ -134,7 +138,7 @@ const struct easycap_standard easycap_standard[] = {
},
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
{
-.mask = 0x000F & NTSC_443 ,
+.mask = 0x00FF & NTSC_443 ,
.v4l2_standard = {
.index = NTSC_443,
.id = V4L2_STD_NTSC_443,
@@ -146,7 +150,7 @@ const struct easycap_standard easycap_standard[] = {
},
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
{
-.mask = 0x000F & PAL_M ,
+.mask = 0x00FF & PAL_M ,
.v4l2_standard = {
.index = PAL_M,
.id = V4L2_STD_PAL_M,
@@ -158,6 +162,128 @@ const struct easycap_standard easycap_standard[] = {
},
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
{
+.mask = 0x8000 | (0x00FF & PAL_BGHIN_SLOW),
+.v4l2_standard = {
+ .index = PAL_BGHIN_SLOW,
+ .id = (V4L2_STD_PAL_B | V4L2_STD_PAL_G | V4L2_STD_PAL_H | \
+ V4L2_STD_PAL_I | V4L2_STD_PAL_N | \
+ (((v4l2_std_id)0x01) << 32)),
+ .name = "PAL_BGHIN_SLOW",
+ .frameperiod = {1, 5},
+ .framelines = 625,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & NTSC_N_443_SLOW),
+.v4l2_standard = {
+ .index = NTSC_N_443_SLOW,
+ .id = (V4L2_STD_UNKNOWN | (((v4l2_std_id)0x11) << 32)),
+ .name = "NTSC_N_443_SLOW",
+ .frameperiod = {1, 5},
+ .framelines = 480,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & PAL_Nc_SLOW),
+.v4l2_standard = {
+ .index = PAL_Nc_SLOW,
+ .id = (V4L2_STD_PAL_Nc | (((v4l2_std_id)0x01) << 32)),
+ .name = "PAL_Nc_SLOW",
+ .frameperiod = {1, 5},
+ .framelines = 625,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & NTSC_N_SLOW),
+.v4l2_standard = {
+ .index = NTSC_N_SLOW,
+ .id = (V4L2_STD_UNKNOWN | (((v4l2_std_id)0x21) << 32)),
+ .name = "NTSC_N_SLOW",
+ .frameperiod = {1, 5},
+ .framelines = 525,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & SECAM_SLOW),
+.v4l2_standard = {
+ .index = SECAM_SLOW,
+ .id = (V4L2_STD_SECAM | (((v4l2_std_id)0x01) << 32)),
+ .name = "SECAM_SLOW",
+ .frameperiod = {1, 5},
+ .framelines = 625,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & NTSC_M_SLOW),
+.v4l2_standard = {
+ .index = NTSC_M_SLOW,
+ .id = (V4L2_STD_NTSC_M | (((v4l2_std_id)0x01) << 32)),
+ .name = "NTSC_M_SLOW",
+ .frameperiod = {1, 6},
+ .framelines = 525,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & NTSC_M_JP_SLOW),
+.v4l2_standard = {
+ .index = NTSC_M_JP_SLOW,
+ .id = (V4L2_STD_NTSC_M_JP | (((v4l2_std_id)0x01) << 32)),
+ .name = "NTSC_M_JP_SLOW",
+ .frameperiod = {1, 6},
+ .framelines = 525,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & PAL_60_SLOW),
+.v4l2_standard = {
+ .index = PAL_60_SLOW,
+ .id = (V4L2_STD_PAL_60 | (((v4l2_std_id)0x01) << 32)),
+ .name = "PAL_60_SLOW",
+ .frameperiod = {1, 6},
+ .framelines = 525,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & NTSC_443_SLOW),
+.v4l2_standard = {
+ .index = NTSC_443_SLOW,
+ .id = (V4L2_STD_NTSC_443 | (((v4l2_std_id)0x01) << 32)),
+ .name = "NTSC_443_SLOW",
+ .frameperiod = {1, 6},
+ .framelines = 525,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & PAL_M_SLOW),
+.v4l2_standard = {
+ .index = PAL_M_SLOW,
+ .id = (V4L2_STD_PAL_M | (((v4l2_std_id)0x01) << 32)),
+ .name = "PAL_M_SLOW",
+ .frameperiod = {1, 6},
+ .framelines = 525,
+ .reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
.mask = 0xFFFF
}
};
@@ -165,15 +291,16 @@ const struct easycap_standard easycap_standard[] = {
/*
* THE 16-BIT easycap_format.mask HAS MEANING:
* (least significant) BIT 0: 0 => PAL, 25 FPS; 1 => NTSC, 30 FPS
- * BITS 1-3: RESERVED FOR DIFFERENTIATING STANDARDS
- * BITS 4-7: NUMBER OF BYTES PER PIXEL
+ * BITS 2-4: RESERVED FOR DIFFERENTIATING STANDARDS
+ * BITS 5-7: NUMBER OF BYTES PER PIXEL
* BIT 8: 0 => NATIVE BYTE ORDER; 1 => SWAPPED
* BITS 9-10: RESERVED FOR OTHER BYTE PERMUTATIONS
- * BIT 11: 0 => UNDECIMATED; 1 => DECIMATED
- * BIT 12: 0 => OFFER FRAMES; 1 => OFFER FIELDS
- * (most significant) BITS 13-15: RESERVED FOR OTHER FIELD ORDER OPTIONS
+ * BIT 11: 0 => UNDECIMATED; 1 => DECIMATED
+ * BIT 12: 0 => OFFER FRAMES; 1 => OFFER FIELDS
+ * BIT 13: 0 => FULL FRAMERATE; 1 => REDUCED
+ * (most significant) BITS 14-15: RESERVED FOR OTHER FIELD/FRAME OPTIONS
* IT FOLLOWS THAT:
- * bytesperpixel IS ((0x00F0 & easycap_format.mask) >> 4)
+ * bytesperpixel IS ((0x00E0 & easycap_format.mask) >> 5)
* byteswaporder IS true IF (0 != (0x0100 & easycap_format.mask))
*
* decimatepixel IS true IF (0 != (0x0800 & easycap_format.mask))
@@ -197,65 +324,135 @@ for (i = 0, n = 0; i < STANDARD_MANY; i++) {
mask1 = 0x0000;
switch (i) {
case PAL_BGHIN: {
- mask1 = PAL_BGHIN;
+ mask1 = 0x1F & PAL_BGHIN;
strcpy(&name1[0], "PAL_BGHIN");
colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
break;
}
case SECAM: {
- mask1 = SECAM;
+ mask1 = 0x1F & SECAM;
strcpy(&name1[0], "SECAM");
colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
break;
}
case PAL_Nc: {
- mask1 = PAL_Nc;
+ mask1 = 0x1F & PAL_Nc;
strcpy(&name1[0], "PAL_Nc");
colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
break;
}
case PAL_60: {
- mask1 = PAL_60;
+ mask1 = 0x1F & PAL_60;
strcpy(&name1[0], "PAL_60");
colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
break;
}
case PAL_M: {
- mask1 = PAL_M;
+ mask1 = 0x1F & PAL_M;
strcpy(&name1[0], "PAL_M");
colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
break;
}
case NTSC_M: {
- mask1 = NTSC_M;
+ mask1 = 0x1F & NTSC_M;
strcpy(&name1[0], "NTSC_M");
colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
break;
}
case NTSC_443: {
- mask1 = NTSC_443;
+ mask1 = 0x1F & NTSC_443;
strcpy(&name1[0], "NTSC_443");
colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
break;
}
case NTSC_M_JP: {
- mask1 = NTSC_M_JP;
+ mask1 = 0x1F & NTSC_M_JP;
strcpy(&name1[0], "NTSC_M_JP");
colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
break;
}
case NTSC_N: {
- mask1 = NTSC_M;
+ mask1 = 0x1F & NTSC_M;
strcpy(&name1[0], "NTSC_N");
colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
break;
}
case NTSC_N_443: {
- mask1 = NTSC_N_443;
+ mask1 = 0x1F & NTSC_N_443;
strcpy(&name1[0], "NTSC_N_443");
colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
break;
}
+ case PAL_BGHIN_SLOW: {
+ mask1 = 0x001F & PAL_BGHIN_SLOW;
+ mask1 |= 0x0200;
+ strcpy(&name1[0], "PAL_BGHIN_SLOW");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
+ break;
+ }
+ case SECAM_SLOW: {
+ mask1 = 0x001F & SECAM_SLOW;
+ mask1 |= 0x0200;
+ strcpy(&name1[0], "SECAM_SLOW");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
+ break;
+ }
+ case PAL_Nc_SLOW: {
+ mask1 = 0x001F & PAL_Nc_SLOW;
+ mask1 |= 0x0200;
+ strcpy(&name1[0], "PAL_Nc_SLOW");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
+ break;
+ }
+ case PAL_60_SLOW: {
+ mask1 = 0x001F & PAL_60_SLOW;
+ mask1 |= 0x0200;
+ strcpy(&name1[0], "PAL_60_SLOW");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
+ break;
+ }
+ case PAL_M_SLOW: {
+ mask1 = 0x001F & PAL_M_SLOW;
+ mask1 |= 0x0200;
+ strcpy(&name1[0], "PAL_M_SLOW");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
+ break;
+ }
+ case NTSC_M_SLOW: {
+ mask1 = 0x001F & NTSC_M_SLOW;
+ mask1 |= 0x0200;
+ strcpy(&name1[0], "NTSC_M_SLOW");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
+ break;
+ }
+ case NTSC_443_SLOW: {
+ mask1 = 0x001F & NTSC_443_SLOW;
+ mask1 |= 0x0200;
+ strcpy(&name1[0], "NTSC_443_SLOW");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
+ break;
+ }
+ case NTSC_M_JP_SLOW: {
+ mask1 = 0x001F & NTSC_M_JP_SLOW;
+ mask1 |= 0x0200;
+ strcpy(&name1[0], "NTSC_M_JP_SLOW");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
+ break;
+ }
+ case NTSC_N_SLOW: {
+ mask1 = 0x001F & NTSC_N_SLOW;
+ mask1 |= 0x0200;
+ strcpy(&name1[0], "NTSC_N_SLOW");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
+ break;
+ }
+ case NTSC_N_443_SLOW: {
+ mask1 = 0x001F & NTSC_N_443_SLOW;
+ mask1 |= 0x0200;
+ strcpy(&name1[0], "NTSC_N_443_SLOW");
+ colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
+ break;
+ }
default:
return -1;
}
@@ -311,39 +508,39 @@ for (i = 0, n = 0; i < STANDARD_MANY; i++) {
case FMT_UYVY: {
strcpy(&name3[0], "_" STRINGIZE(FMT_UYVY));
pixelformat = V4L2_PIX_FMT_UYVY;
- mask3 |= (0x02 << 4);
+ mask3 |= (0x02 << 5);
break;
}
case FMT_YUY2: {
strcpy(&name3[0], "_" STRINGIZE(FMT_YUY2));
pixelformat = V4L2_PIX_FMT_YUYV;
- mask3 |= (0x02 << 4);
+ mask3 |= (0x02 << 5);
mask3 |= 0x0100;
break;
}
case FMT_RGB24: {
strcpy(&name3[0], "_" STRINGIZE(FMT_RGB24));
pixelformat = V4L2_PIX_FMT_RGB24;
- mask3 |= (0x03 << 4);
+ mask3 |= (0x03 << 5);
break;
}
case FMT_RGB32: {
strcpy(&name3[0], "_" STRINGIZE(FMT_RGB32));
pixelformat = V4L2_PIX_FMT_RGB32;
- mask3 |= (0x04 << 4);
+ mask3 |= (0x04 << 5);
break;
}
case FMT_BGR24: {
strcpy(&name3[0], "_" STRINGIZE(FMT_BGR24));
pixelformat = V4L2_PIX_FMT_BGR24;
- mask3 |= (0x03 << 4);
+ mask3 |= (0x03 << 5);
mask3 |= 0x0100;
break;
}
case FMT_BGR32: {
strcpy(&name3[0], "_" STRINGIZE(FMT_BGR32));
pixelformat = V4L2_PIX_FMT_BGR32;
- mask3 |= (0x04 << 4);
+ mask3 |= (0x04 << 5);
mask3 |= 0x0100;
break;
}
@@ -363,13 +560,8 @@ for (i = 0, n = 0; i < STANDARD_MANY; i++) {
}
case FIELD_INTERLACED: {
strcpy(&name4[0], "-i");
- field = V4L2_FIELD_INTERLACED;
- break;
- }
- case FIELD_ALTERNATE: {
- strcpy(&name4[0], "-a");
mask4 |= 0x1000;
- field = V4L2_FIELD_ALTERNATE;
+ field = V4L2_FIELD_INTERLACED;
break;
}
default:
@@ -413,7 +605,7 @@ return n;
}
/*---------------------------------------------------------------------------*/
struct v4l2_queryctrl easycap_control[] = \
- {{
+{{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Brightness",
@@ -485,5 +677,5 @@ struct v4l2_queryctrl easycap_control[] = \
{
.id = 0xFFFFFFFF
}
- };
+};
/*****************************************************************************/
diff --git a/drivers/staging/easycap/easycap_sound.c b/drivers/staging/easycap/easycap_sound.c
index 63562bda738e..24d8bb4e449e 100644
--- a/drivers/staging/easycap/easycap_sound.c
+++ b/drivers/staging/easycap/easycap_sound.c
@@ -36,17 +36,15 @@
/*---------------------------------------------------------------------------*/
/*
* ON COMPLETION OF AN AUDIO URB ITS DATA IS COPIED TO THE AUDIO BUFFERS
- * PROVIDED peasycap->audio_idle IS ZER0. REGARDLESS OF THIS BEING TRUE,
+ * PROVIDED peasycap->audio_idle IS ZERO. REGARDLESS OF THIS BEING TRUE,
* IT IS RESUBMITTED PROVIDED peasycap->audio_isoc_streaming IS NOT ZERO.
*/
/*---------------------------------------------------------------------------*/
void
easysnd_complete(struct urb *purb)
{
-static int mt;
struct easycap *peasycap;
struct data_buffer *paudio_buffer;
-char errbuf[16];
__u8 *p1, *p2;
__s16 s16;
int i, j, more, much, leap, rc;
@@ -66,48 +64,62 @@ if (NULL == peasycap) {
SAY("ERROR: peasycap is NULL\n");
return;
}
-much = 0;
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap\n");
+ return;
+}
+much = 0;
if (peasycap->audio_idle) {
- JOT(16, "%i=audio_idle %i=audio_isoc_streaming\n", \
+ JOM(16, "%i=audio_idle %i=audio_isoc_streaming\n", \
peasycap->audio_idle, peasycap->audio_isoc_streaming);
if (peasycap->audio_isoc_streaming) {
rc = usb_submit_urb(purb, GFP_ATOMIC);
if (0 != rc) {
- SAY("ERROR: while %i=audio_idle, " \
+ if (-ENODEV != rc)
+ SAM("ERROR: while %i=audio_idle, " \
"usb_submit_urb() failed with rc:\n", \
peasycap->audio_idle);
switch (rc) {
case -ENOMEM: {
- SAY("ENOMEM\n"); break;
+ SAM("-ENOMEM\n");
+ break;
}
case -ENODEV: {
- SAY("ENODEV\n"); break;
+ break;
}
case -ENXIO: {
- SAY("ENXIO\n"); break;
+ SAM("-ENXIO\n");
+ break;
}
case -EINVAL: {
- SAY("EINVAL\n"); break;
+ SAM("-EINVAL\n");
+ break;
}
case -EAGAIN: {
- SAY("EAGAIN\n"); break;
+ SAM("-EAGAIN\n");
+ break;
}
case -EFBIG: {
- SAY("EFBIG\n"); break;
+ SAM("-EFBIG\n");
+ break;
}
case -EPIPE: {
- SAY("EPIPE\n"); break;
+ SAM("-EPIPE\n");
+ break;
}
case -EMSGSIZE: {
- SAY("EMSGSIZE\n"); break;
+ SAM("-EMSGSIZE\n");
+ break;
}
case -ENOSPC: {
- SAY("ENOSPC\n"); break;
+ SAM("-ENOSPC\n");
+ break;
}
default: {
- SAY("0x%08X\n", rc); break;
+ SAM("unknown error: 0x%08X\n", rc);
+ break;
}
}
}
@@ -116,74 +128,95 @@ return;
}
/*---------------------------------------------------------------------------*/
if (purb->status) {
- if (-ESHUTDOWN == purb->status) {
- JOT(16, "immediate return because -ESHUTDOWN=purb->status\n");
+ if ((-ESHUTDOWN == purb->status) || (-ENOENT == purb->status)) {
+ JOM(16, "urb status -ESHUTDOWN or -ENOENT\n");
return;
}
- SAY("ERROR: non-zero urb status:\n");
+ SAM("ERROR: non-zero urb status:\n");
switch (purb->status) {
case -EINPROGRESS: {
- SAY("-EINPROGRESS\n"); break;
+ SAM("-EINPROGRESS\n");
+ break;
}
case -ENOSR: {
- SAY("-ENOSR\n"); break;
+ SAM("-ENOSR\n");
+ break;
}
case -EPIPE: {
- SAY("-EPIPE\n"); break;
+ SAM("-EPIPE\n");
+ break;
}
case -EOVERFLOW: {
- SAY("-EOVERFLOW\n"); break;
+ SAM("-EOVERFLOW\n");
+ break;
}
case -EPROTO: {
- SAY("-EPROTO\n"); break;
+ SAM("-EPROTO\n");
+ break;
}
case -EILSEQ: {
- SAY("-EILSEQ\n"); break;
+ SAM("-EILSEQ\n");
+ break;
}
case -ETIMEDOUT: {
- SAY("-ETIMEDOUT\n"); break;
+ SAM("-ETIMEDOUT\n");
+ break;
}
case -EMSGSIZE: {
- SAY("-EMSGSIZE\n"); break;
+ SAM("-EMSGSIZE\n");
+ break;
}
case -EOPNOTSUPP: {
- SAY("-EOPNOTSUPP\n"); break;
+ SAM("-EOPNOTSUPP\n");
+ break;
}
case -EPFNOSUPPORT: {
- SAY("-EPFNOSUPPORT\n"); break;
+ SAM("-EPFNOSUPPORT\n");
+ break;
}
case -EAFNOSUPPORT: {
- SAY("-EAFNOSUPPORT\n"); break;
+ SAM("-EAFNOSUPPORT\n");
+ break;
}
case -EADDRINUSE: {
- SAY("-EADDRINUSE\n"); break;
+ SAM("-EADDRINUSE\n");
+ break;
}
case -EADDRNOTAVAIL: {
- SAY("-EADDRNOTAVAIL\n"); break;
+ SAM("-EADDRNOTAVAIL\n");
+ break;
}
case -ENOBUFS: {
- SAY("-ENOBUFS\n"); break;
+ SAM("-ENOBUFS\n");
+ break;
}
case -EISCONN: {
- SAY("-EISCONN\n"); break;
+ SAM("-EISCONN\n");
+ break;
}
case -ENOTCONN: {
- SAY("-ENOTCONN\n"); break;
+ SAM("-ENOTCONN\n");
+ break;
}
case -ESHUTDOWN: {
- SAY("-ESHUTDOWN\n"); break;
+ SAM("-ESHUTDOWN\n");
+ break;
}
case -ENOENT: {
- SAY("-ENOENT\n"); break;
+ SAM("-ENOENT\n");
+ break;
}
case -ECONNRESET: {
- SAY("-ECONNRESET\n"); break;
+ SAM("-ECONNRESET\n");
+ break;
}
case -ENOSPC: {
- SAY("ENOSPC\n"); break;
+ SAM("ENOSPC\n");
+ break;
}
default: {
- SAY("unknown error code 0x%08X\n", purb->status); break;
+ SAM("unknown error code 0x%08X\n", purb->status);
+ break;
}
}
/*---------------------------------------------------------------------------*/
@@ -196,35 +229,43 @@ if (purb->status) {
if (peasycap->audio_isoc_streaming) {
rc = usb_submit_urb(purb, GFP_ATOMIC);
if (0 != rc) {
- SAY("ERROR: while %i=audio_idle, usb_submit_urb() "
+ SAM("ERROR: while %i=audio_idle, usb_submit_urb() "
"failed with rc:\n", peasycap->audio_idle);
switch (rc) {
case -ENOMEM: {
- SAY("ENOMEM\n"); break;
+ SAM("-ENOMEM\n");
+ break;
}
case -ENODEV: {
- SAY("ENODEV\n"); break;
+ SAM("-ENODEV\n");
+ break;
}
case -ENXIO: {
- SAY("ENXIO\n"); break;
+ SAM("-ENXIO\n");
+ break;
}
case -EINVAL: {
- SAY("EINVAL\n"); break;
+ SAM("-EINVAL\n");
+ break;
}
case -EAGAIN: {
- SAY("EAGAIN\n"); break;
+ SAM("-EAGAIN\n");
+ break;
}
case -EFBIG: {
- SAY("EFBIG\n"); break;
+ SAM("-EFBIG\n");
+ break;
}
case -EPIPE: {
- SAY("EPIPE\n"); break;
+ SAM("-EPIPE\n");
+ break;
}
case -EMSGSIZE: {
- SAY("EMSGSIZE\n"); break;
+ SAM("-EMSGSIZE\n");
+ break;
}
default: {
- SAY("0x%08X\n", rc); break;
+ SAM("0x%08X\n", rc); break;
}
}
}
@@ -243,72 +284,80 @@ oldaudio = peasycap->oldaudio;
for (i = 0; i < purb->number_of_packets; i++) {
switch (purb->iso_frame_desc[i].status) {
case 0: {
- strcpy(&errbuf[0], "OK"); break;
+ break;
}
case -ENOENT: {
- strcpy(&errbuf[0], "-ENOENT"); break;
+ SAM("-ENOENT\n");
+ break;
}
case -EINPROGRESS: {
- strcpy(&errbuf[0], "-EINPROGRESS"); break;
+ SAM("-EINPROGRESS\n");
+ break;
}
case -EPROTO: {
- strcpy(&errbuf[0], "-EPROTO"); break;
+ SAM("-EPROTO\n");
+ break;
}
case -EILSEQ: {
- strcpy(&errbuf[0], "-EILSEQ"); break;
+ SAM("-EILSEQ\n");
+ break;
}
case -ETIME: {
- strcpy(&errbuf[0], "-ETIME"); break;
+ SAM("-ETIME\n");
+ break;
}
case -ETIMEDOUT: {
- strcpy(&errbuf[0], "-ETIMEDOUT"); break;
+ SAM("-ETIMEDOUT\n");
+ break;
}
case -EPIPE: {
- strcpy(&errbuf[0], "-EPIPE"); break;
+ SAM("-EPIPE\n");
+ break;
}
case -ECOMM: {
- strcpy(&errbuf[0], "-ECOMM"); break;
+ SAM("-ECOMM\n");
+ break;
}
case -ENOSR: {
- strcpy(&errbuf[0], "-ENOSR"); break;
+ SAM("-ENOSR\n");
+ break;
}
case -EOVERFLOW: {
- strcpy(&errbuf[0], "-EOVERFLOW"); break;
+ SAM("-EOVERFLOW\n");
+ break;
}
case -EREMOTEIO: {
- strcpy(&errbuf[0], "-EREMOTEIO"); break;
+ SAM("-EREMOTEIO\n");
+ break;
}
case -ENODEV: {
- strcpy(&errbuf[0], "-ENODEV"); break;
+ SAM("-ENODEV\n");
+ break;
}
case -EXDEV: {
- strcpy(&errbuf[0], "-EXDEV"); break;
+ SAM("-EXDEV\n");
+ break;
}
case -EINVAL: {
- strcpy(&errbuf[0], "-EINVAL"); break;
+ SAM("-EINVAL\n");
+ break;
}
case -ECONNRESET: {
- strcpy(&errbuf[0], "-ECONNRESET"); break;
+ SAM("-ECONNRESET\n");
+ break;
}
case -ENOSPC: {
- strcpy(&errbuf[0], "-ENOSPC"); break;
+ SAM("-ENOSPC\n");
+ break;
}
case -ESHUTDOWN: {
- strcpy(&errbuf[0], "-ESHUTDOWN"); break;
+ SAM("-ESHUTDOWN\n");
+ break;
}
default: {
- strcpy(&errbuf[0], "UNKNOWN"); break;
- }
+ SAM("unknown error:0x%08X\n", purb->iso_frame_desc[i].status);
+ break;
}
- if ((!purb->iso_frame_desc[i].status) && 0) {
- JOT(16, "frame[%2i]: %i=status{=%16s} " \
- "%5i=actual " \
- "%5i=length " \
- "%3i=offset\n", \
- i, purb->iso_frame_desc[i].status, &errbuf[0],
- purb->iso_frame_desc[i].actual_length,
- purb->iso_frame_desc[i].length,
- purb->iso_frame_desc[i].offset);
}
if (!purb->iso_frame_desc[i].status) {
more = purb->iso_frame_desc[i].actual_length;
@@ -319,11 +368,12 @@ for (i = 0; i < purb->number_of_packets; i++) {
#endif
if (!more)
- mt++;
+ peasycap->audio_mt++;
else {
- if (mt) {
- JOT(16, "%4i empty audio urb frames\n", mt);
- mt = 0;
+ if (peasycap->audio_mt) {
+ JOM(16, "%4i empty audio urb frames\n", \
+ peasycap->audio_mt);
+ peasycap->audio_mt = 0;
}
p1 = (__u8 *)(purb->transfer_buffer + \
@@ -340,13 +390,13 @@ for (i = 0; i < purb->number_of_packets; i++) {
/*---------------------------------------------------------------------------*/
while (more) {
if (0 > more) {
- SAY("easysnd_complete: MISTAKE: " \
+ SAM("easysnd_complete: MISTAKE: " \
"more is negative\n");
return;
}
if (peasycap->audio_buffer_page_many <= \
peasycap->audio_fill) {
- SAY("ERROR: bad " \
+ SAM("ERROR: bad " \
"peasycap->audio_fill\n");
return;
}
@@ -355,7 +405,7 @@ for (i = 0; i < purb->number_of_packets; i++) {
[peasycap->audio_fill];
if (PAGE_SIZE < (paudio_buffer->pto - \
paudio_buffer->pgo)) {
- SAY("ERROR: bad paudio_buffer->pto\n");
+ SAM("ERROR: bad paudio_buffer->pto\n");
return;
}
if (PAGE_SIZE == (paudio_buffer->pto - \
@@ -374,7 +424,7 @@ for (i = 0; i < purb->number_of_packets; i++) {
peasycap->audio_fill)
peasycap->audio_fill = 0;
- JOT(12, "bumped peasycap->" \
+ JOM(12, "bumped peasycap->" \
"audio_fill to %i\n", \
peasycap->audio_fill);
@@ -387,7 +437,7 @@ for (i = 0; i < purb->number_of_packets; i++) {
if (!(peasycap->audio_fill % \
peasycap->\
audio_pages_per_fragment)) {
- JOT(12, "wakeup call on wq_" \
+ JOM(12, "wakeup call on wq_" \
"audio, %i=frag reading %i" \
"=fragment fill\n", \
(peasycap->audio_read / \
@@ -414,7 +464,7 @@ for (i = 0; i < purb->number_of_packets; i++) {
} else {
#if defined(UPSAMPLE)
if (much % 16)
- JOT(8, "MISTAKE? much" \
+ JOM(8, "MISTAKE? much" \
" is not divisible by 16\n");
if (much > (16 * \
more))
@@ -468,7 +518,7 @@ for (i = 0; i < purb->number_of_packets; i++) {
}
}
} else {
- JOT(12, "discarding audio samples because " \
+ JOM(12, "discarding audio samples because " \
"%i=purb->iso_frame_desc[i].status\n", \
purb->iso_frame_desc[i].status);
}
@@ -486,38 +536,50 @@ peasycap->oldaudio = oldaudio;
if (peasycap->audio_isoc_streaming) {
rc = usb_submit_urb(purb, GFP_ATOMIC);
if (0 != rc) {
- SAY("ERROR: while %i=audio_idle, usb_submit_urb() failed " \
+ if (-ENODEV != rc) {
+ SAM("ERROR: while %i=audio_idle, " \
+ "usb_submit_urb() failed " \
"with rc:\n", peasycap->audio_idle);
+ }
switch (rc) {
case -ENOMEM: {
- SAY("ENOMEM\n"); break;
+ SAM("-ENOMEM\n");
+ break;
}
case -ENODEV: {
- SAY("ENODEV\n"); break;
+ break;
}
case -ENXIO: {
- SAY("ENXIO\n"); break;
+ SAM("-ENXIO\n");
+ break;
}
case -EINVAL: {
- SAY("EINVAL\n"); break;
+ SAM("-EINVAL\n");
+ break;
}
case -EAGAIN: {
- SAY("EAGAIN\n"); break;
+ SAM("-EAGAIN\n");
+ break;
}
case -EFBIG: {
- SAY("EFBIG\n"); break;
+ SAM("-EFBIG\n");
+ break;
}
case -EPIPE: {
- SAY("EPIPE\n"); break;
+ SAM("-EPIPE\n");
+ break;
}
case -EMSGSIZE: {
- SAY("EMSGSIZE\n"); break;
+ SAM("-EMSGSIZE\n");
+ break;
}
case -ENOSPC: {
- SAY("ENOSPC\n"); break;
+ SAM("-ENOSPC\n");
+ break;
}
default: {
- SAY("0x%08X\n", rc); break;
+ SAM("unknown error: 0x%08X\n", rc);
+ break;
}
}
}
@@ -529,8 +591,7 @@ return;
/*
* THE AUDIO URBS ARE SUBMITTED AT THIS EARLY STAGE SO THAT IT IS POSSIBLE TO
* STREAM FROM /dev/easysnd1 WITH SIMPLE PROGRAMS SUCH AS cat WHICH DO NOT
- * HAVE AN IOCTL INTERFACE. THE VIDEO URBS, BY CONTRAST, MUST BE SUBMITTED
- * MUCH LATER: SEE COMMENTS IN FILE easycap_main.c.
+ * HAVE AN IOCTL INTERFACE.
*/
/*---------------------------------------------------------------------------*/
int
@@ -539,8 +600,15 @@ easysnd_open(struct inode *inode, struct file *file)
struct usb_interface *pusb_interface;
struct easycap *peasycap;
int subminor, rc;
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+struct v4l2_device *pv4l2_device;
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
-JOT(4, "begins.\n");
+JOT(4, "begins\n");
subminor = iminor(inode);
@@ -556,70 +624,90 @@ if (NULL == peasycap) {
SAY("ending unsuccessfully\n");
return -1;
}
+/*---------------------------------------------------------------------------*/
+#if (!defined(EASYCAP_IS_VIDEODEV_CLIENT))
+#
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#else
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+/*---------------------------------------------------------------------------*/
+/*
+ * SOME VERSIONS OF THE videodev MODULE OVERWRITE THE DATA WHICH HAS
+ * BEEN WRITTEN BY THE CALL TO usb_set_intfdata() IN easycap_usb_probe(),
+ * REPLACING IT WITH A POINTER TO THE EMBEDDED v4l2_device STRUCTURE.
+ * TO DETECT THIS, THE STRING IN THE easycap.telltale[] BUFFER IS CHECKED.
+*/
+/*---------------------------------------------------------------------------*/
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ pv4l2_device = usb_get_intfdata(pusb_interface);
+ if ((struct v4l2_device *)NULL == pv4l2_device) {
+ SAY("ERROR: pv4l2_device is NULL\n");
+ return -EFAULT;
+ }
+ peasycap = (struct easycap *) \
+ container_of(pv4l2_device, struct easycap, v4l2_device);
+}
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
+#
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+/*---------------------------------------------------------------------------*/
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+ return -EFAULT;
+}
+/*---------------------------------------------------------------------------*/
file->private_data = peasycap;
/*---------------------------------------------------------------------------*/
/*
- * INITIALIZATION.
+ * INITIALIZATION
*/
/*---------------------------------------------------------------------------*/
-JOT(4, "starting initialization\n");
+JOM(4, "starting initialization\n");
if ((struct usb_device *)NULL == peasycap->pusb_device) {
- SAY("ERROR: peasycap->pusb_device is NULL\n");
- return -EFAULT;
-} else {
- JOT(16, "0x%08lX=peasycap->pusb_device\n", \
- (long int)peasycap->pusb_device);
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
+ return -ENODEV;
}
+JOM(16, "0x%08lX=peasycap->pusb_device\n", (long int)peasycap->pusb_device);
rc = audio_setup(peasycap);
if (0 <= rc)
- JOT(8, "audio_setup() returned %i\n", rc);
+ JOM(8, "audio_setup() returned %i\n", rc);
else
- JOT(8, "easysnd open(): ERROR: audio_setup() returned %i\n", rc);
+ JOM(8, "easysnd open(): ERROR: audio_setup() returned %i\n", rc);
if ((struct usb_device *)NULL == peasycap->pusb_device) {
- SAY("ERROR: peasycap->pusb_device has become NULL\n");
- return -EFAULT;
-}
-rc = adjust_volume(peasycap, -8192);
-if (0 != rc) {
- SAY("ERROR: adjust_volume(default) returned %i\n", rc);
- return -EFAULT;
+ SAM("ERROR: peasycap->pusb_device has become NULL\n");
+ return -ENODEV;
}
/*---------------------------------------------------------------------------*/
if ((struct usb_device *)NULL == peasycap->pusb_device) {
- SAY("ERROR: peasycap->pusb_device has become NULL\n");
- return -EFAULT;
+ SAM("ERROR: peasycap->pusb_device has become NULL\n");
+ return -ENODEV;
}
rc = usb_set_interface(peasycap->pusb_device, peasycap->audio_interface, \
peasycap->audio_altsetting_on);
-JOT(8, "usb_set_interface(.,%i,%i) returned %i\n", peasycap->audio_interface, \
+JOM(8, "usb_set_interface(.,%i,%i) returned %i\n", peasycap->audio_interface, \
peasycap->audio_altsetting_on, rc);
-if ((struct usb_device *)NULL == peasycap->pusb_device) {
- SAY("ERROR: peasycap->pusb_device has become NULL\n");
- return -EFAULT;
-}
rc = wakeup_device(peasycap->pusb_device);
if (0 == rc)
- JOT(8, "wakeup_device() returned %i\n", rc);
+ JOM(8, "wakeup_device() returned %i\n", rc);
else
- JOT(8, "easysnd open(): ERROR: wakeup_device() returned %i\n", rc);
+ JOM(8, "ERROR: wakeup_device() returned %i\n", rc);
-if ((struct usb_device *)NULL == peasycap->pusb_device) {
- SAY("ERROR: peasycap->pusb_device has become NULL\n");
- return -EFAULT;
-}
-submit_audio_urbs(peasycap);
+peasycap->audio_eof = 0;
peasycap->audio_idle = 0;
peasycap->timeval1.tv_sec = 0;
peasycap->timeval1.tv_usec = 0;
-JOT(4, "finished initialization\n");
+submit_audio_urbs(peasycap);
+
+JOM(4, "finished initialization\n");
return 0;
}
/*****************************************************************************/
@@ -635,11 +723,15 @@ if (NULL == peasycap) {
SAY("ERROR: peasycap is NULL.\n");
return -EFAULT;
}
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+ return -EFAULT;
+}
if (0 != kill_audio_urbs(peasycap)) {
- SAY("ERROR: kill_audio_urbs() failed\n");
+ SAM("ERROR: kill_audio_urbs() failed\n");
return -EFAULT;
}
-JOT(4, "ending successfully\n");
+JOM(4, "ending successfully\n");
return 0;
}
/*****************************************************************************/
@@ -648,12 +740,11 @@ easysnd_read(struct file *file, char __user *puserspacebuffer, \
size_t kount, loff_t *poff)
{
struct timeval timeval;
-static struct timeval timeval1;
-static long long int audio_bytes, above, below, mean;
+long long int above, below, mean;
struct signed_div_result sdr;
unsigned char *p0;
long int kount1, more, rc, l0, lm;
-int fragment;
+int fragment, kd;
struct easycap *peasycap;
struct data_buffer *pdata_buffer;
size_t szret;
@@ -671,23 +762,89 @@ size_t szret;
JOT(8, "===== easysnd_read(): kount=%i, *poff=%i\n", (int)kount, (int)(*poff));
-peasycap = (struct easycap *)(file->private_data);
+if (NULL == file) {
+ SAY("ERROR: file is NULL\n");
+ return -ERESTARTSYS;
+}
+peasycap = file->private_data;
if (NULL == peasycap) {
SAY("ERROR in easysnd_read(): peasycap is NULL\n");
return -EFAULT;
}
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+ return -EFAULT;
+}
+if (NULL == peasycap->pusb_device) {
+ SAY("ERROR in easysnd_read(): peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+}
+kd = isdongle(peasycap);
+if (0 <= kd && DONGLE_MANY > kd) {
+ if (mutex_lock_interruptible(&(easycap_dongle[kd].mutex_audio))) {
+ SAY("ERROR: cannot lock easycap_dongle[%i].mutex_audio\n", kd);
+ return -ERESTARTSYS;
+ }
+ JOM(4, "locked easycap_dongle[%i].mutex_audio\n", kd);
/*---------------------------------------------------------------------------*/
+/*
+ * MEANWHILE, easycap_usb_disconnect() MAY HAVE FREED POINTER peasycap,
+ * IN WHICH CASE A REPEAT CALL TO isdongle() WILL FAIL.
+ * IF NECESSARY, BAIL OUT.
+*/
+/*---------------------------------------------------------------------------*/
+ if (kd != isdongle(peasycap))
+ return -ERESTARTSYS;
+ if (NULL == file) {
+ SAY("ERROR: file is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
+ return -ERESTARTSYS;
+ }
+ peasycap = file->private_data;
+ if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
+ return -ERESTARTSYS;
+ }
+ if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+ SAY("ERROR: bad peasycap: 0x%08lX\n", \
+ (unsigned long int) peasycap);
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
+ return -ERESTARTSYS;
+ }
+ if (NULL == peasycap->pusb_device) {
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
+ return -ERESTARTSYS;
+ }
+} else {
+/*---------------------------------------------------------------------------*/
+/*
+ * IF easycap_usb_disconnect() HAS ALREADY FREED POINTER peasycap BEFORE THE
+ * ATTEMPT TO ACQUIRE THE SEMAPHORE, isdongle() WILL HAVE FAILED. BAIL OUT.
+*/
+/*---------------------------------------------------------------------------*/
+ return -ERESTARTSYS;
+}
+/*---------------------------------------------------------------------------*/
+if (file->f_flags & O_NONBLOCK)
+ JOT(16, "NONBLOCK kount=%i, *poff=%i\n", (int)kount, (int)(*poff));
+else
+ JOT(8, "BLOCKING kount=%i, *poff=%i\n", (int)kount, (int)(*poff));
+
if ((0 > peasycap->audio_read) || \
(peasycap->audio_buffer_page_many <= peasycap->audio_read)) {
- SAY("ERROR: peasycap->audio_read out of range\n");
+ SAM("ERROR: peasycap->audio_read out of range\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
}
pdata_buffer = &peasycap->audio_buffer[peasycap->audio_read];
if ((struct data_buffer *)NULL == pdata_buffer) {
- SAY("ERROR: pdata_buffer is NULL\n");
+ SAM("ERROR: pdata_buffer is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
}
-JOT(12, "before wait, %i=frag read %i=frag fill\n", \
+JOM(12, "before wait, %i=frag read %i=frag fill\n", \
(peasycap->audio_read / peasycap->audio_pages_per_fragment), \
(peasycap->audio_fill / peasycap->audio_pages_per_fragment));
fragment = (peasycap->audio_read / peasycap->audio_pages_per_fragment);
@@ -695,7 +852,8 @@ while ((fragment == (peasycap->audio_fill / \
peasycap->audio_pages_per_fragment)) || \
(0 == (PAGE_SIZE - (pdata_buffer->pto - pdata_buffer->pgo)))) {
if (file->f_flags & O_NONBLOCK) {
- JOT(16, "returning -EAGAIN as instructed\n");
+ JOM(16, "returning -EAGAIN as instructed\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EAGAIN;
}
rc = wait_event_interruptible(peasycap->wq_audio, \
@@ -704,50 +862,56 @@ while ((fragment == (peasycap->audio_fill / \
peasycap->audio_pages_per_fragment)) && \
(0 < (PAGE_SIZE - (pdata_buffer->pto - pdata_buffer->pgo))))));
if (0 != rc) {
- SAY("aborted by signal\n");
+ SAM("aborted by signal\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -ERESTARTSYS;
}
if (peasycap->audio_eof) {
- JOT(8, "returning 0 because %i=audio_eof\n", \
+ JOM(8, "returning 0 because %i=audio_eof\n", \
peasycap->audio_eof);
kill_audio_urbs(peasycap);
- msleep(500);
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return 0;
}
if (peasycap->audio_idle) {
- JOT(16, "returning 0 because %i=audio_idle\n", \
+ JOM(16, "returning 0 because %i=audio_idle\n", \
peasycap->audio_idle);
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return 0;
}
if (!peasycap->audio_isoc_streaming) {
- JOT(16, "returning 0 because audio urbs not streaming\n");
+ JOM(16, "returning 0 because audio urbs not streaming\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return 0;
}
}
-JOT(12, "after wait, %i=frag read %i=frag fill\n", \
+JOM(12, "after wait, %i=frag read %i=frag fill\n", \
(peasycap->audio_read / peasycap->audio_pages_per_fragment), \
(peasycap->audio_fill / peasycap->audio_pages_per_fragment));
szret = (size_t)0;
while (fragment == (peasycap->audio_read / \
peasycap->audio_pages_per_fragment)) {
if (NULL == pdata_buffer->pgo) {
- SAY("ERROR: pdata_buffer->pgo is NULL\n");
+ SAM("ERROR: pdata_buffer->pgo is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
}
if (NULL == pdata_buffer->pto) {
- SAY("ERROR: pdata_buffer->pto is NULL\n");
+ SAM("ERROR: pdata_buffer->pto is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
}
kount1 = PAGE_SIZE - (pdata_buffer->pto - pdata_buffer->pgo);
if (0 > kount1) {
- SAY("easysnd_read: MISTAKE: kount1 is negative\n");
+ SAM("easysnd_read: MISTAKE: kount1 is negative\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -ERESTARTSYS;
}
if (!kount1) {
(peasycap->audio_read)++;
if (peasycap->audio_buffer_page_many <= peasycap->audio_read)
peasycap->audio_read = 0;
- JOT(12, "bumped peasycap->audio_read to %i\n", \
+ JOM(12, "bumped peasycap->audio_read to %i\n", \
peasycap->audio_read);
if (fragment != (peasycap->audio_read / \
@@ -757,30 +921,34 @@ while (fragment == (peasycap->audio_read / \
if ((0 > peasycap->audio_read) || \
(peasycap->audio_buffer_page_many <= \
peasycap->audio_read)) {
- SAY("ERROR: peasycap->audio_read out of range\n");
+ SAM("ERROR: peasycap->audio_read out of range\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
}
pdata_buffer = &peasycap->audio_buffer[peasycap->audio_read];
if ((struct data_buffer *)NULL == pdata_buffer) {
- SAY("ERROR: pdata_buffer is NULL\n");
+ SAM("ERROR: pdata_buffer is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
}
if (NULL == pdata_buffer->pgo) {
- SAY("ERROR: pdata_buffer->pgo is NULL\n");
+ SAM("ERROR: pdata_buffer->pgo is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
}
if (NULL == pdata_buffer->pto) {
- SAY("ERROR: pdata_buffer->pto is NULL\n");
+ SAM("ERROR: pdata_buffer->pto is NULL\n");
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
}
kount1 = PAGE_SIZE - (pdata_buffer->pto - pdata_buffer->pgo);
}
- JOT(12, "ready to send %li bytes\n", (long int) kount1);
- JOT(12, "still to send %li bytes\n", (long int) kount);
+ JOM(12, "ready to send %li bytes\n", (long int) kount1);
+ JOM(12, "still to send %li bytes\n", (long int) kount);
more = kount1;
if (more > kount)
more = kount;
- JOT(12, "agreed to send %li bytes from page %i\n", \
+ JOM(12, "agreed to send %li bytes from page %i\n", \
more, peasycap->audio_read);
if (!more)
break;
@@ -798,7 +966,8 @@ while (fragment == (peasycap->audio_read / \
/*---------------------------------------------------------------------------*/
rc = copy_to_user(puserspacebuffer, pdata_buffer->pto, more);
if (0 != rc) {
- SAY("ERROR: copy_to_user() returned %li\n", rc);
+ SAM("ERROR: copy_to_user() returned %li\n", rc);
+ mutex_unlock(&easycap_dongle[kd].mutex_audio);
return -EFAULT;
}
*poff += (loff_t)more;
@@ -807,11 +976,11 @@ while (fragment == (peasycap->audio_read / \
puserspacebuffer += more;
kount -= (size_t)more;
}
-JOT(12, "after read, %i=frag read %i=frag fill\n", \
+JOM(12, "after read, %i=frag read %i=frag fill\n", \
(peasycap->audio_read / peasycap->audio_pages_per_fragment), \
(peasycap->audio_fill / peasycap->audio_pages_per_fragment));
if (kount < 0) {
- SAY("MISTAKE: %li=kount %li=szret\n", \
+ SAM("MISTAKE: %li=kount %li=szret\n", \
(long int)kount, (long int)szret);
}
/*---------------------------------------------------------------------------*/
@@ -827,11 +996,11 @@ if (peasycap->audio_sample) {
mean = peasycap->audio_niveau;
sdr = signed_div(mean, peasycap->audio_sample);
- JOT(8, "%8lli=mean %8lli=meansquare after %lli samples, =>\n", \
+ JOM(8, "%8lli=mean %8lli=meansquare after %lli samples, =>\n", \
sdr.quotient, above, peasycap->audio_sample);
sdr = signed_div(above, 32768);
- JOT(8, "audio dynamic range is roughly %lli\n", sdr.quotient);
+ JOM(8, "audio dynamic range is roughly %lli\n", sdr.quotient);
}
/*---------------------------------------------------------------------------*/
/*
@@ -840,33 +1009,28 @@ if (peasycap->audio_sample) {
/*---------------------------------------------------------------------------*/
do_gettimeofday(&timeval);
if (!peasycap->timeval1.tv_sec) {
- audio_bytes = 0;
- timeval1 = timeval;
-
- if (mutex_lock_interruptible(&(peasycap->mutex_timeval1)))
- return -ERESTARTSYS;
- peasycap->timeval1 = timeval1;
- mutex_unlock(&(peasycap->mutex_timeval1));
+ peasycap->audio_bytes = 0;
+ peasycap->timeval3 = timeval;
+ peasycap->timeval1 = peasycap->timeval3;
sdr.quotient = 192000;
} else {
- audio_bytes += (long long int) szret;
+ peasycap->audio_bytes += (long long int) szret;
below = ((long long int)(1000000)) * \
- ((long long int)(timeval.tv_sec - timeval1.tv_sec)) + \
- (long long int)(timeval.tv_usec - timeval1.tv_usec);
- above = 1000000 * ((long long int) audio_bytes);
+ ((long long int)(timeval.tv_sec - \
+ peasycap->timeval3.tv_sec)) + \
+ (long long int)(timeval.tv_usec - peasycap->timeval3.tv_usec);
+ above = 1000000 * ((long long int) peasycap->audio_bytes);
if (below)
sdr = signed_div(above, below);
else
sdr.quotient = 192000;
}
-JOT(8, "audio streaming at %lli bytes/second\n", sdr.quotient);
-if (mutex_lock_interruptible(&(peasycap->mutex_timeval1)))
- return -ERESTARTSYS;
+JOM(8, "audio streaming at %lli bytes/second\n", sdr.quotient);
peasycap->dnbydt = sdr.quotient;
-mutex_unlock(&(peasycap->mutex_timeval1));
-JOT(8, "returning %li\n", (long int)szret);
+JOM(8, "returning %li\n", (long int)szret);
+mutex_unlock(&easycap_dongle[kd].mutex_audio);
return szret;
}
/*****************************************************************************/
@@ -881,27 +1045,31 @@ submit_audio_urbs(struct easycap *peasycap)
struct data_urb *pdata_urb;
struct urb *purb;
struct list_head *plist_head;
-int j, isbad, m, rc;
+int j, isbad, nospc, m, rc;
int isbuf;
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return -EFAULT;
+}
if ((struct list_head *)NULL == peasycap->purb_audio_head) {
- SAY("ERROR: peasycap->urb_audio_head uninitialized\n");
+ SAM("ERROR: peasycap->urb_audio_head uninitialized\n");
return -EFAULT;
}
if ((struct usb_device *)NULL == peasycap->pusb_device) {
- SAY("ERROR: peasycap->pusb_device is NULL\n");
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
return -EFAULT;
}
if (!peasycap->audio_isoc_streaming) {
- JOT(4, "initial submission of all audio urbs\n");
+ JOM(4, "initial submission of all audio urbs\n");
rc = usb_set_interface(peasycap->pusb_device,
peasycap->audio_interface, \
peasycap->audio_altsetting_on);
- JOT(8, "usb_set_interface(.,%i,%i) returned %i\n", \
+ JOM(8, "usb_set_interface(.,%i,%i) returned %i\n", \
peasycap->audio_interface, \
peasycap->audio_altsetting_on, rc);
- isbad = 0; m = 0;
+ isbad = 0; nospc = 0; m = 0;
list_for_each(plist_head, (peasycap->purb_audio_head)) {
pdata_urb = list_entry(plist_head, struct data_urb, list_head);
if (NULL != pdata_urb) {
@@ -938,39 +1106,49 @@ if (!peasycap->audio_isoc_streaming) {
rc = usb_submit_urb(purb, GFP_KERNEL);
if (0 != rc) {
isbad++;
- SAY("ERROR: usb_submit_urb() failed" \
+ SAM("ERROR: usb_submit_urb() failed" \
" for urb with rc:\n");
switch (rc) {
case -ENOMEM: {
- SAY("ENOMEM\n"); break;
+ SAM("-ENOMEM\n");
+ break;
}
case -ENODEV: {
- SAY("ENODEV\n"); break;
+ SAM("-ENODEV\n");
+ break;
}
case -ENXIO: {
- SAY("ENXIO\n"); break;
+ SAM("-ENXIO\n");
+ break;
}
case -EINVAL: {
- SAY("EINVAL\n"); break;
+ SAM("-EINVAL\n");
+ break;
}
case -EAGAIN: {
- SAY("EAGAIN\n"); break;
+ SAM("-EAGAIN\n");
+ break;
}
case -EFBIG: {
- SAY("EFBIG\n"); break;
+ SAM("-EFBIG\n");
+ break;
}
case -EPIPE: {
- SAY("EPIPE\n"); break;
+ SAM("-EPIPE\n");
+ break;
}
case -EMSGSIZE: {
- SAY("EMSGSIZE\n"); break;
+ SAM("-EMSGSIZE\n");
+ break;
}
case -ENOSPC: {
- SAY("ENOSPC\n"); break;
+ nospc++;
+ break;
}
default: {
- SAY("unknown error code %i\n",\
- rc); break;
+ SAM("unknown error code %i\n",\
+ rc);
+ break;
}
}
} else {
@@ -983,8 +1161,13 @@ if (!peasycap->audio_isoc_streaming) {
isbad++;
}
}
+ if (nospc) {
+ SAM("-ENOSPC=usb_submit_urb() for %i urbs\n", nospc);
+ SAM("..... possibly inadequate USB bandwidth\n");
+ peasycap->audio_eof = 1;
+ }
if (isbad) {
- JOT(4, "attempting cleanup instead of submitting\n");
+ JOM(4, "attempting cleanup instead of submitting\n");
list_for_each(plist_head, (peasycap->purb_audio_head)) {
pdata_urb = list_entry(plist_head, struct data_urb, \
list_head);
@@ -997,10 +1180,10 @@ if (!peasycap->audio_isoc_streaming) {
peasycap->audio_isoc_streaming = 0;
} else {
peasycap->audio_isoc_streaming = 1;
- JOT(4, "submitted %i audio urbs\n", m);
+ JOM(4, "submitted %i audio urbs\n", m);
}
} else
- JOT(4, "already streaming audio urbs\n");
+ JOM(4, "already streaming audio urbs\n");
return 0;
}
@@ -1017,10 +1200,14 @@ int m;
struct list_head *plist_head;
struct data_urb *pdata_urb;
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return -EFAULT;
+}
if (peasycap->audio_isoc_streaming) {
if ((struct list_head *)NULL != peasycap->purb_audio_head) {
peasycap->audio_isoc_streaming = 0;
- JOT(4, "killing audio urbs\n");
+ JOM(4, "killing audio urbs\n");
m = 0;
list_for_each(plist_head, (peasycap->purb_audio_head)) {
pdata_urb = list_entry(plist_head, struct data_urb,
@@ -1032,13 +1219,13 @@ if (peasycap->audio_isoc_streaming) {
}
}
}
- JOT(4, "%i audio urbs killed\n", m);
+ JOM(4, "%i audio urbs killed\n", m);
} else {
- SAY("ERROR: peasycap->purb_audio_head is NULL\n");
+ SAM("ERROR: peasycap->purb_audio_head is NULL\n");
return -EFAULT;
}
} else {
- JOT(8, "%i=audio_isoc_streaming, no audio urbs killed\n", \
+ JOM(8, "%i=audio_isoc_streaming, no audio urbs killed\n", \
peasycap->audio_isoc_streaming);
}
return 0;
diff --git a/drivers/staging/easycap/easycap_testcard.c b/drivers/staging/easycap/easycap_testcard.c
index 3c2ce28fab95..e27dfe9a9ba3 100644
--- a/drivers/staging/easycap/easycap_testcard.c
+++ b/drivers/staging/easycap/easycap_testcard.c
@@ -29,37 +29,69 @@
#include "easycap_debug.h"
/*****************************************************************************/
-#define TESTCARD_BYTESPERLINE (2 * 1440)
+#define TESTCARD_BYTESPERLINE (2 * 720)
void
-easycap_testcard(struct easycap *peasycap, int field_fill)
+easycap_testcard(struct easycap *peasycap, int field)
{
int total;
int y, u, v, r, g, b;
unsigned char uyvy[4];
-
-int i1, line, k, m, n, more, much, barwidth;
+int i1, line, k, m, n, more, much, barwidth, barheight;
unsigned char bfbar[TESTCARD_BYTESPERLINE / 8], *p1, *p2;
struct data_buffer *pfield_buffer;
-JOT(8, "%i=field_fill\n", field_fill);
-
-if ((TESTCARD_BYTESPERLINE / 2) < peasycap->width) {
- SAY("ERROR: image is too wide\n");
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return;
+}
+JOM(8, "%i=field\n", field);
+switch (peasycap->width) {
+case 720:
+case 360: {
+ barwidth = (2 * 720) / 8;
+ break;
+}
+case 704:
+case 352: {
+ barwidth = (2 * 704) / 8;
+ break;
+}
+case 640:
+case 320: {
+ barwidth = (2 * 640) / 8;
+ break;
+}
+default: {
+ SAM("ERROR: cannot set barwidth\n");
return;
}
-if (peasycap->width % 16) {
- SAY("ERROR: indivisible image width\n");
+}
+if (TESTCARD_BYTESPERLINE < barwidth) {
+ SAM("ERROR: barwidth is too large\n");
return;
}
-
+switch (peasycap->height) {
+case 576:
+case 288: {
+ barheight = 576;
+ break;
+}
+case 480:
+case 240: {
+ barheight = 480;
+ break;
+}
+default: {
+ SAM("ERROR: cannot set barheight\n");
+ return;
+}
+}
total = 0;
-barwidth = (2 * peasycap->width) / 8;
-
-k = field_fill;
+k = field;
m = 0;
n = 0;
-for (line = 0; line < (peasycap->height / 2); line++) {
+for (line = 0; line < (barheight / 2); line++) {
for (i1 = 0; i1 < 8; i1++) {
r = (i1 * 256)/8;
g = (i1 * 256)/8;
@@ -88,15 +120,15 @@ for (line = 0; line < (peasycap->height / 2); line++) {
while (more) {
if ((FIELD_BUFFER_SIZE/PAGE_SIZE) <= m) {
- SAY("ERROR: bad m reached\n");
+ SAM("ERROR: bad m reached\n");
return;
}
if (PAGE_SIZE < n) {
- SAY("ERROR: bad n reached\n"); return;
+ SAM("ERROR: bad n reached\n"); return;
}
if (0 > more) {
- SAY("ERROR: internal fault\n");
+ SAM("ERROR: internal fault\n");
return;
}
@@ -117,10 +149,6 @@ for (line = 0; line < (peasycap->height / 2); line++) {
}
}
}
-
-JOT(8, "%i=total\n", total);
-if (total != peasycap->width * peasycap->height)
- SAY("ERROR: wrong number of bytes written: %i\n", total);
return;
}
/*****************************************************************************/
@@ -157,35 +185,35 @@ for (i1 = 0; i1 <= last; i1++)
printf("%6i, ", i2); printf("%6i\n};\n", i2);
}
}
-return(0);
+return 0;
}
-----------------------------------------------------------------------------*/
int tones[2048] = {
- 0, 0, 502, 502, 1004, 1004, 1505, 1505, 2005, 2005,
- 2503, 2503, 2998, 2998, 3491, 3491, 3980, 3980, 4466, 4466,
- 4948, 4948, 5424, 5424, 5896, 5896, 6362, 6362, 6822, 6822,
- 7276, 7276, 7723, 7723, 8162, 8162, 8594, 8594, 9018, 9018,
- 9434, 9434, 9840, 9840, 10237, 10237, 10625, 10625, 11002, 11002,
- 11370, 11370, 11726, 11726, 12072, 12072, 12406, 12406, 12728, 12728,
- 13038, 13038, 13337, 13337, 13622, 13622, 13895, 13895, 14155, 14155,
- 14401, 14401, 14634, 14634, 14853, 14853, 15058, 15058, 15249, 15249,
- 15426, 15426, 15588, 15588, 15735, 15735, 15868, 15868, 15985, 15985,
- 16088, 16088, 16175, 16175, 16248, 16248, 16305, 16305, 16346, 16346,
- 16372, 16372, 16383, 16383, 16379, 16379, 16359, 16359, 16323, 16323,
- 16272, 16272, 16206, 16206, 16125, 16125, 16028, 16028, 15917, 15917,
- 15790, 15790, 15649, 15649, 15492, 15492, 15322, 15322, 15136, 15136,
- 14937, 14937, 14723, 14723, 14496, 14496, 14255, 14255, 14001, 14001,
- 13733, 13733, 13452, 13452, 13159, 13159, 12854, 12854, 12536, 12536,
- 12207, 12207, 11866, 11866, 11513, 11513, 11150, 11150, 10777, 10777,
- 10393, 10393, 10000, 10000, 9597, 9597, 9185, 9185, 8765, 8765,
- 8336, 8336, 7900, 7900, 7456, 7456, 7005, 7005, 6547, 6547,
- 6083, 6083, 5614, 5614, 5139, 5139, 4659, 4659, 4175, 4175,
- 3687, 3687, 3196, 3196, 2701, 2701, 2204, 2204, 1705, 1705,
- 1205, 1205, 703, 703, 201, 201, -301, -301, -803, -803,
- -1305, -1305, -1805, -1805, -2304, -2304, -2801, -2801, -3294, -3294,
- -3785, -3785, -4272, -4272, -4756, -4756, -5234, -5234, -5708, -5708,
- -6176, -6176, -6639, -6639, -7095, -7095, -7545, -7545, -7988, -7988,
- -8423, -8423, -8850, -8850, -9268, -9268, -9679, -9679, -10079, -10079,
+0, 0, 502, 502, 1004, 1004, 1505, 1505, 2005, 2005,
+2503, 2503, 2998, 2998, 3491, 3491, 3980, 3980, 4466, 4466,
+4948, 4948, 5424, 5424, 5896, 5896, 6362, 6362, 6822, 6822,
+7276, 7276, 7723, 7723, 8162, 8162, 8594, 8594, 9018, 9018,
+9434, 9434, 9840, 9840, 10237, 10237, 10625, 10625, 11002, 11002,
+11370, 11370, 11726, 11726, 12072, 12072, 12406, 12406, 12728, 12728,
+13038, 13038, 13337, 13337, 13622, 13622, 13895, 13895, 14155, 14155,
+14401, 14401, 14634, 14634, 14853, 14853, 15058, 15058, 15249, 15249,
+15426, 15426, 15588, 15588, 15735, 15735, 15868, 15868, 15985, 15985,
+16088, 16088, 16175, 16175, 16248, 16248, 16305, 16305, 16346, 16346,
+16372, 16372, 16383, 16383, 16379, 16379, 16359, 16359, 16323, 16323,
+16272, 16272, 16206, 16206, 16125, 16125, 16028, 16028, 15917, 15917,
+15790, 15790, 15649, 15649, 15492, 15492, 15322, 15322, 15136, 15136,
+14937, 14937, 14723, 14723, 14496, 14496, 14255, 14255, 14001, 14001,
+13733, 13733, 13452, 13452, 13159, 13159, 12854, 12854, 12536, 12536,
+12207, 12207, 11866, 11866, 11513, 11513, 11150, 11150, 10777, 10777,
+10393, 10393, 10000, 10000, 9597, 9597, 9185, 9185, 8765, 8765,
+8336, 8336, 7900, 7900, 7456, 7456, 7005, 7005, 6547, 6547,
+6083, 6083, 5614, 5614, 5139, 5139, 4659, 4659, 4175, 4175,
+3687, 3687, 3196, 3196, 2701, 2701, 2204, 2204, 1705, 1705,
+1205, 1205, 703, 703, 201, 201, -301, -301, -803, -803,
+-1305, -1305, -1805, -1805, -2304, -2304, -2801, -2801, -3294, -3294,
+-3785, -3785, -4272, -4272, -4756, -4756, -5234, -5234, -5708, -5708,
+-6176, -6176, -6639, -6639, -7095, -7095, -7545, -7545, -7988, -7988,
+-8423, -8423, -8850, -8850, -9268, -9268, -9679, -9679, -10079, -10079,
-10471, -10471, -10853, -10853, -11224, -11224, -11585, -11585, -11935, -11935,
-12273, -12273, -12600, -12600, -12916, -12916, -13219, -13219, -13510, -13510,
-13788, -13788, -14053, -14053, -14304, -14304, -14543, -14543, -14767, -14767,
@@ -198,35 +226,35 @@ int tones[2048] = {
-14353, -14353, -14104, -14104, -13842, -13842, -13566, -13566, -13278, -13278,
-12977, -12977, -12665, -12665, -12340, -12340, -12003, -12003, -11656, -11656,
-11297, -11297, -10928, -10928, -10548, -10548, -10159, -10159, -9759, -9759,
- -9351, -9351, -8934, -8934, -8509, -8509, -8075, -8075, -7634, -7634,
- -7186, -7186, -6731, -6731, -6269, -6269, -5802, -5802, -5329, -5329,
- -4852, -4852, -4369, -4369, -3883, -3883, -3393, -3393, -2900, -2900,
- -2404, -2404, -1905, -1905, -1405, -1405, -904, -904, -402, -402,
- 100, 100, 603, 603, 1105, 1105, 1605, 1605, 2105, 2105,
- 2602, 2602, 3097, 3097, 3589, 3589, 4078, 4078, 4563, 4563,
- 5043, 5043, 5519, 5519, 5990, 5990, 6455, 6455, 6914, 6914,
- 7366, 7366, 7811, 7811, 8249, 8249, 8680, 8680, 9102, 9102,
- 9516, 9516, 9920, 9920, 10315, 10315, 10701, 10701, 11077, 11077,
- 11442, 11442, 11796, 11796, 12139, 12139, 12471, 12471, 12791, 12791,
- 13099, 13099, 13395, 13395, 13678, 13678, 13948, 13948, 14205, 14205,
- 14449, 14449, 14679, 14679, 14895, 14895, 15098, 15098, 15286, 15286,
- 15459, 15459, 15618, 15618, 15763, 15763, 15892, 15892, 16007, 16007,
- 16107, 16107, 16191, 16191, 16260, 16260, 16314, 16314, 16353, 16353,
- 16376, 16376, 16384, 16384, 16376, 16376, 16353, 16353, 16314, 16314,
- 16260, 16260, 16191, 16191, 16107, 16107, 16007, 16007, 15892, 15892,
- 15763, 15763, 15618, 15618, 15459, 15459, 15286, 15286, 15098, 15098,
- 14895, 14895, 14679, 14679, 14449, 14449, 14205, 14205, 13948, 13948,
- 13678, 13678, 13395, 13395, 13099, 13099, 12791, 12791, 12471, 12471,
- 12139, 12139, 11796, 11796, 11442, 11442, 11077, 11077, 10701, 10701,
- 10315, 10315, 9920, 9920, 9516, 9516, 9102, 9102, 8680, 8680,
- 8249, 8249, 7811, 7811, 7366, 7366, 6914, 6914, 6455, 6455,
- 5990, 5990, 5519, 5519, 5043, 5043, 4563, 4563, 4078, 4078,
- 3589, 3589, 3097, 3097, 2602, 2602, 2105, 2105, 1605, 1605,
- 1105, 1105, 603, 603, 100, 100, -402, -402, -904, -904,
- -1405, -1405, -1905, -1905, -2404, -2404, -2900, -2900, -3393, -3393,
- -3883, -3883, -4369, -4369, -4852, -4852, -5329, -5329, -5802, -5802,
- -6269, -6269, -6731, -6731, -7186, -7186, -7634, -7634, -8075, -8075,
- -8509, -8509, -8934, -8934, -9351, -9351, -9759, -9759, -10159, -10159,
+-9351, -9351, -8934, -8934, -8509, -8509, -8075, -8075, -7634, -7634,
+-7186, -7186, -6731, -6731, -6269, -6269, -5802, -5802, -5329, -5329,
+-4852, -4852, -4369, -4369, -3883, -3883, -3393, -3393, -2900, -2900,
+-2404, -2404, -1905, -1905, -1405, -1405, -904, -904, -402, -402,
+100, 100, 603, 603, 1105, 1105, 1605, 1605, 2105, 2105,
+2602, 2602, 3097, 3097, 3589, 3589, 4078, 4078, 4563, 4563,
+5043, 5043, 5519, 5519, 5990, 5990, 6455, 6455, 6914, 6914,
+7366, 7366, 7811, 7811, 8249, 8249, 8680, 8680, 9102, 9102,
+9516, 9516, 9920, 9920, 10315, 10315, 10701, 10701, 11077, 11077,
+11442, 11442, 11796, 11796, 12139, 12139, 12471, 12471, 12791, 12791,
+13099, 13099, 13395, 13395, 13678, 13678, 13948, 13948, 14205, 14205,
+14449, 14449, 14679, 14679, 14895, 14895, 15098, 15098, 15286, 15286,
+15459, 15459, 15618, 15618, 15763, 15763, 15892, 15892, 16007, 16007,
+16107, 16107, 16191, 16191, 16260, 16260, 16314, 16314, 16353, 16353,
+16376, 16376, 16384, 16384, 16376, 16376, 16353, 16353, 16314, 16314,
+16260, 16260, 16191, 16191, 16107, 16107, 16007, 16007, 15892, 15892,
+15763, 15763, 15618, 15618, 15459, 15459, 15286, 15286, 15098, 15098,
+14895, 14895, 14679, 14679, 14449, 14449, 14205, 14205, 13948, 13948,
+13678, 13678, 13395, 13395, 13099, 13099, 12791, 12791, 12471, 12471,
+12139, 12139, 11796, 11796, 11442, 11442, 11077, 11077, 10701, 10701,
+10315, 10315, 9920, 9920, 9516, 9516, 9102, 9102, 8680, 8680,
+8249, 8249, 7811, 7811, 7366, 7366, 6914, 6914, 6455, 6455,
+5990, 5990, 5519, 5519, 5043, 5043, 4563, 4563, 4078, 4078,
+3589, 3589, 3097, 3097, 2602, 2602, 2105, 2105, 1605, 1605,
+1105, 1105, 603, 603, 100, 100, -402, -402, -904, -904,
+-1405, -1405, -1905, -1905, -2404, -2404, -2900, -2900, -3393, -3393,
+-3883, -3883, -4369, -4369, -4852, -4852, -5329, -5329, -5802, -5802,
+-6269, -6269, -6731, -6731, -7186, -7186, -7634, -7634, -8075, -8075,
+-8509, -8509, -8934, -8934, -9351, -9351, -9759, -9759, -10159, -10159,
-10548, -10548, -10928, -10928, -11297, -11297, -11656, -11656, -12003, -12003,
-12340, -12340, -12665, -12665, -12977, -12977, -13278, -13278, -13566, -13566,
-13842, -13842, -14104, -14104, -14353, -14353, -14589, -14589, -14810, -14810,
@@ -239,35 +267,35 @@ int tones[2048] = {
-14304, -14304, -14053, -14053, -13788, -13788, -13510, -13510, -13219, -13219,
-12916, -12916, -12600, -12600, -12273, -12273, -11935, -11935, -11585, -11585,
-11224, -11224, -10853, -10853, -10471, -10471, -10079, -10079, -9679, -9679,
- -9268, -9268, -8850, -8850, -8423, -8423, -7988, -7988, -7545, -7545,
- -7095, -7095, -6639, -6639, -6176, -6176, -5708, -5708, -5234, -5234,
- -4756, -4756, -4272, -4272, -3785, -3785, -3294, -3294, -2801, -2801,
- -2304, -2304, -1805, -1805, -1305, -1305, -803, -803, -301, -301,
- 201, 201, 703, 703, 1205, 1205, 1705, 1705, 2204, 2204,
- 2701, 2701, 3196, 3196, 3687, 3687, 4175, 4175, 4659, 4659,
- 5139, 5139, 5614, 5614, 6083, 6083, 6547, 6547, 7005, 7005,
- 7456, 7456, 7900, 7900, 8336, 8336, 8765, 8765, 9185, 9185,
- 9597, 9597, 10000, 10000, 10393, 10393, 10777, 10777, 11150, 11150,
- 11513, 11513, 11866, 11866, 12207, 12207, 12536, 12536, 12854, 12854,
- 13159, 13159, 13452, 13452, 13733, 13733, 14001, 14001, 14255, 14255,
- 14496, 14496, 14723, 14723, 14937, 14937, 15136, 15136, 15322, 15322,
- 15492, 15492, 15649, 15649, 15790, 15790, 15917, 15917, 16028, 16028,
- 16125, 16125, 16206, 16206, 16272, 16272, 16323, 16323, 16359, 16359,
- 16379, 16379, 16383, 16383, 16372, 16372, 16346, 16346, 16305, 16305,
- 16248, 16248, 16175, 16175, 16088, 16088, 15985, 15985, 15868, 15868,
- 15735, 15735, 15588, 15588, 15426, 15426, 15249, 15249, 15058, 15058,
- 14853, 14853, 14634, 14634, 14401, 14401, 14155, 14155, 13895, 13895,
- 13622, 13622, 13337, 13337, 13038, 13038, 12728, 12728, 12406, 12406,
- 12072, 12072, 11726, 11726, 11370, 11370, 11002, 11002, 10625, 10625,
- 10237, 10237, 9840, 9840, 9434, 9434, 9018, 9018, 8594, 8594,
- 8162, 8162, 7723, 7723, 7276, 7276, 6822, 6822, 6362, 6362,
- 5896, 5896, 5424, 5424, 4948, 4948, 4466, 4466, 3980, 3980,
- 3491, 3491, 2998, 2998, 2503, 2503, 2005, 2005, 1505, 1505,
- 1004, 1004, 502, 502, 0, 0, -502, -502, -1004, -1004,
- -1505, -1505, -2005, -2005, -2503, -2503, -2998, -2998, -3491, -3491,
- -3980, -3980, -4466, -4466, -4948, -4948, -5424, -5424, -5896, -5896,
- -6362, -6362, -6822, -6822, -7276, -7276, -7723, -7723, -8162, -8162,
- -8594, -8594, -9018, -9018, -9434, -9434, -9840, -9840, -10237, -10237,
+-9268, -9268, -8850, -8850, -8423, -8423, -7988, -7988, -7545, -7545,
+-7095, -7095, -6639, -6639, -6176, -6176, -5708, -5708, -5234, -5234,
+-4756, -4756, -4272, -4272, -3785, -3785, -3294, -3294, -2801, -2801,
+-2304, -2304, -1805, -1805, -1305, -1305, -803, -803, -301, -301,
+201, 201, 703, 703, 1205, 1205, 1705, 1705, 2204, 2204,
+2701, 2701, 3196, 3196, 3687, 3687, 4175, 4175, 4659, 4659,
+5139, 5139, 5614, 5614, 6083, 6083, 6547, 6547, 7005, 7005,
+7456, 7456, 7900, 7900, 8336, 8336, 8765, 8765, 9185, 9185,
+9597, 9597, 10000, 10000, 10393, 10393, 10777, 10777, 11150, 11150,
+11513, 11513, 11866, 11866, 12207, 12207, 12536, 12536, 12854, 12854,
+13159, 13159, 13452, 13452, 13733, 13733, 14001, 14001, 14255, 14255,
+14496, 14496, 14723, 14723, 14937, 14937, 15136, 15136, 15322, 15322,
+15492, 15492, 15649, 15649, 15790, 15790, 15917, 15917, 16028, 16028,
+16125, 16125, 16206, 16206, 16272, 16272, 16323, 16323, 16359, 16359,
+16379, 16379, 16383, 16383, 16372, 16372, 16346, 16346, 16305, 16305,
+16248, 16248, 16175, 16175, 16088, 16088, 15985, 15985, 15868, 15868,
+15735, 15735, 15588, 15588, 15426, 15426, 15249, 15249, 15058, 15058,
+14853, 14853, 14634, 14634, 14401, 14401, 14155, 14155, 13895, 13895,
+13622, 13622, 13337, 13337, 13038, 13038, 12728, 12728, 12406, 12406,
+12072, 12072, 11726, 11726, 11370, 11370, 11002, 11002, 10625, 10625,
+10237, 10237, 9840, 9840, 9434, 9434, 9018, 9018, 8594, 8594,
+8162, 8162, 7723, 7723, 7276, 7276, 6822, 6822, 6362, 6362,
+5896, 5896, 5424, 5424, 4948, 4948, 4466, 4466, 3980, 3980,
+3491, 3491, 2998, 2998, 2503, 2503, 2005, 2005, 1505, 1505,
+1004, 1004, 502, 502, 0, 0, -502, -502, -1004, -1004,
+-1505, -1505, -2005, -2005, -2503, -2503, -2998, -2998, -3491, -3491,
+-3980, -3980, -4466, -4466, -4948, -4948, -5424, -5424, -5896, -5896,
+-6362, -6362, -6822, -6822, -7276, -7276, -7723, -7723, -8162, -8162,
+-8594, -8594, -9018, -9018, -9434, -9434, -9840, -9840, -10237, -10237,
-10625, -10625, -11002, -11002, -11370, -11370, -11726, -11726, -12072, -12072,
-12406, -12406, -12728, -12728, -13038, -13038, -13337, -13337, -13622, -13622,
-13895, -13895, -14155, -14155, -14401, -14401, -14634, -14634, -14853, -14853,
@@ -280,35 +308,35 @@ int tones[2048] = {
-14255, -14255, -14001, -14001, -13733, -13733, -13452, -13452, -13159, -13159,
-12854, -12854, -12536, -12536, -12207, -12207, -11866, -11866, -11513, -11513,
-11150, -11150, -10777, -10777, -10393, -10393, -10000, -10000, -9597, -9597,
- -9185, -9185, -8765, -8765, -8336, -8336, -7900, -7900, -7456, -7456,
- -7005, -7005, -6547, -6547, -6083, -6083, -5614, -5614, -5139, -5139,
- -4659, -4659, -4175, -4175, -3687, -3687, -3196, -3196, -2701, -2701,
- -2204, -2204, -1705, -1705, -1205, -1205, -703, -703, -201, -201,
- 301, 301, 803, 803, 1305, 1305, 1805, 1805, 2304, 2304,
- 2801, 2801, 3294, 3294, 3785, 3785, 4272, 4272, 4756, 4756,
- 5234, 5234, 5708, 5708, 6176, 6176, 6639, 6639, 7095, 7095,
- 7545, 7545, 7988, 7988, 8423, 8423, 8850, 8850, 9268, 9268,
- 9679, 9679, 10079, 10079, 10471, 10471, 10853, 10853, 11224, 11224,
- 11585, 11585, 11935, 11935, 12273, 12273, 12600, 12600, 12916, 12916,
- 13219, 13219, 13510, 13510, 13788, 13788, 14053, 14053, 14304, 14304,
- 14543, 14543, 14767, 14767, 14978, 14978, 15175, 15175, 15357, 15357,
- 15525, 15525, 15678, 15678, 15817, 15817, 15940, 15940, 16049, 16049,
- 16142, 16142, 16221, 16221, 16284, 16284, 16331, 16331, 16364, 16364,
- 16381, 16381, 16382, 16382, 16368, 16368, 16339, 16339, 16294, 16294,
- 16234, 16234, 16159, 16159, 16069, 16069, 15963, 15963, 15842, 15842,
- 15707, 15707, 15557, 15557, 15392, 15392, 15212, 15212, 15018, 15018,
- 14810, 14810, 14589, 14589, 14353, 14353, 14104, 14104, 13842, 13842,
- 13566, 13566, 13278, 13278, 12977, 12977, 12665, 12665, 12340, 12340,
- 12003, 12003, 11656, 11656, 11297, 11297, 10928, 10928, 10548, 10548,
- 10159, 10159, 9759, 9759, 9351, 9351, 8934, 8934, 8509, 8509,
- 8075, 8075, 7634, 7634, 7186, 7186, 6731, 6731, 6269, 6269,
- 5802, 5802, 5329, 5329, 4852, 4852, 4369, 4369, 3883, 3883,
- 3393, 3393, 2900, 2900, 2404, 2404, 1905, 1905, 1405, 1405,
- 904, 904, 402, 402, -100, -100, -603, -603, -1105, -1105,
- -1605, -1605, -2105, -2105, -2602, -2602, -3097, -3097, -3589, -3589,
- -4078, -4078, -4563, -4563, -5043, -5043, -5519, -5519, -5990, -5990,
- -6455, -6455, -6914, -6914, -7366, -7366, -7811, -7811, -8249, -8249,
- -8680, -8680, -9102, -9102, -9516, -9516, -9920, -9920, -10315, -10315,
+-9185, -9185, -8765, -8765, -8336, -8336, -7900, -7900, -7456, -7456,
+-7005, -7005, -6547, -6547, -6083, -6083, -5614, -5614, -5139, -5139,
+-4659, -4659, -4175, -4175, -3687, -3687, -3196, -3196, -2701, -2701,
+-2204, -2204, -1705, -1705, -1205, -1205, -703, -703, -201, -201,
+301, 301, 803, 803, 1305, 1305, 1805, 1805, 2304, 2304,
+2801, 2801, 3294, 3294, 3785, 3785, 4272, 4272, 4756, 4756,
+5234, 5234, 5708, 5708, 6176, 6176, 6639, 6639, 7095, 7095,
+7545, 7545, 7988, 7988, 8423, 8423, 8850, 8850, 9268, 9268,
+9679, 9679, 10079, 10079, 10471, 10471, 10853, 10853, 11224, 11224,
+11585, 11585, 11935, 11935, 12273, 12273, 12600, 12600, 12916, 12916,
+13219, 13219, 13510, 13510, 13788, 13788, 14053, 14053, 14304, 14304,
+14543, 14543, 14767, 14767, 14978, 14978, 15175, 15175, 15357, 15357,
+15525, 15525, 15678, 15678, 15817, 15817, 15940, 15940, 16049, 16049,
+16142, 16142, 16221, 16221, 16284, 16284, 16331, 16331, 16364, 16364,
+16381, 16381, 16382, 16382, 16368, 16368, 16339, 16339, 16294, 16294,
+16234, 16234, 16159, 16159, 16069, 16069, 15963, 15963, 15842, 15842,
+15707, 15707, 15557, 15557, 15392, 15392, 15212, 15212, 15018, 15018,
+14810, 14810, 14589, 14589, 14353, 14353, 14104, 14104, 13842, 13842,
+13566, 13566, 13278, 13278, 12977, 12977, 12665, 12665, 12340, 12340,
+12003, 12003, 11656, 11656, 11297, 11297, 10928, 10928, 10548, 10548,
+10159, 10159, 9759, 9759, 9351, 9351, 8934, 8934, 8509, 8509,
+8075, 8075, 7634, 7634, 7186, 7186, 6731, 6731, 6269, 6269,
+5802, 5802, 5329, 5329, 4852, 4852, 4369, 4369, 3883, 3883,
+3393, 3393, 2900, 2900, 2404, 2404, 1905, 1905, 1405, 1405,
+904, 904, 402, 402, -100, -100, -603, -603, -1105, -1105,
+-1605, -1605, -2105, -2105, -2602, -2602, -3097, -3097, -3589, -3589,
+-4078, -4078, -4563, -4563, -5043, -5043, -5519, -5519, -5990, -5990,
+-6455, -6455, -6914, -6914, -7366, -7366, -7811, -7811, -8249, -8249,
+-8680, -8680, -9102, -9102, -9516, -9516, -9920, -9920, -10315, -10315,
-10701, -10701, -11077, -11077, -11442, -11442, -11796, -11796, -12139, -12139,
-12471, -12471, -12791, -12791, -13099, -13099, -13395, -13395, -13678, -13678,
-13948, -13948, -14205, -14205, -14449, -14449, -14679, -14679, -14895, -14895,
@@ -321,35 +349,35 @@ int tones[2048] = {
-14205, -14205, -13948, -13948, -13678, -13678, -13395, -13395, -13099, -13099,
-12791, -12791, -12471, -12471, -12139, -12139, -11796, -11796, -11442, -11442,
-11077, -11077, -10701, -10701, -10315, -10315, -9920, -9920, -9516, -9516,
- -9102, -9102, -8680, -8680, -8249, -8249, -7811, -7811, -7366, -7366,
- -6914, -6914, -6455, -6455, -5990, -5990, -5519, -5519, -5043, -5043,
- -4563, -4563, -4078, -4078, -3589, -3589, -3097, -3097, -2602, -2602,
- -2105, -2105, -1605, -1605, -1105, -1105, -603, -603, -100, -100,
- 402, 402, 904, 904, 1405, 1405, 1905, 1905, 2404, 2404,
- 2900, 2900, 3393, 3393, 3883, 3883, 4369, 4369, 4852, 4852,
- 5329, 5329, 5802, 5802, 6269, 6269, 6731, 6731, 7186, 7186,
- 7634, 7634, 8075, 8075, 8509, 8509, 8934, 8934, 9351, 9351,
- 9759, 9759, 10159, 10159, 10548, 10548, 10928, 10928, 11297, 11297,
- 11656, 11656, 12003, 12003, 12340, 12340, 12665, 12665, 12977, 12977,
- 13278, 13278, 13566, 13566, 13842, 13842, 14104, 14104, 14353, 14353,
- 14589, 14589, 14810, 14810, 15018, 15018, 15212, 15212, 15392, 15392,
- 15557, 15557, 15707, 15707, 15842, 15842, 15963, 15963, 16069, 16069,
- 16159, 16159, 16234, 16234, 16294, 16294, 16339, 16339, 16368, 16368,
- 16382, 16382, 16381, 16381, 16364, 16364, 16331, 16331, 16284, 16284,
- 16221, 16221, 16142, 16142, 16049, 16049, 15940, 15940, 15817, 15817,
- 15678, 15678, 15525, 15525, 15357, 15357, 15175, 15175, 14978, 14978,
- 14767, 14767, 14543, 14543, 14304, 14304, 14053, 14053, 13788, 13788,
- 13510, 13510, 13219, 13219, 12916, 12916, 12600, 12600, 12273, 12273,
- 11935, 11935, 11585, 11585, 11224, 11224, 10853, 10853, 10471, 10471,
- 10079, 10079, 9679, 9679, 9268, 9268, 8850, 8850, 8423, 8423,
- 7988, 7988, 7545, 7545, 7095, 7095, 6639, 6639, 6176, 6176,
- 5708, 5708, 5234, 5234, 4756, 4756, 4272, 4272, 3785, 3785,
- 3294, 3294, 2801, 2801, 2304, 2304, 1805, 1805, 1305, 1305,
- 803, 803, 301, 301, -201, -201, -703, -703, -1205, -1205,
- -1705, -1705, -2204, -2204, -2701, -2701, -3196, -3196, -3687, -3687,
- -4175, -4175, -4659, -4659, -5139, -5139, -5614, -5614, -6083, -6083,
- -6547, -6547, -7005, -7005, -7456, -7456, -7900, -7900, -8336, -8336,
- -8765, -8765, -9185, -9185, -9597, -9597, -10000, -10000, -10393, -10393,
+-9102, -9102, -8680, -8680, -8249, -8249, -7811, -7811, -7366, -7366,
+-6914, -6914, -6455, -6455, -5990, -5990, -5519, -5519, -5043, -5043,
+-4563, -4563, -4078, -4078, -3589, -3589, -3097, -3097, -2602, -2602,
+-2105, -2105, -1605, -1605, -1105, -1105, -603, -603, -100, -100,
+402, 402, 904, 904, 1405, 1405, 1905, 1905, 2404, 2404,
+2900, 2900, 3393, 3393, 3883, 3883, 4369, 4369, 4852, 4852,
+5329, 5329, 5802, 5802, 6269, 6269, 6731, 6731, 7186, 7186,
+7634, 7634, 8075, 8075, 8509, 8509, 8934, 8934, 9351, 9351,
+9759, 9759, 10159, 10159, 10548, 10548, 10928, 10928, 11297, 11297,
+11656, 11656, 12003, 12003, 12340, 12340, 12665, 12665, 12977, 12977,
+13278, 13278, 13566, 13566, 13842, 13842, 14104, 14104, 14353, 14353,
+14589, 14589, 14810, 14810, 15018, 15018, 15212, 15212, 15392, 15392,
+15557, 15557, 15707, 15707, 15842, 15842, 15963, 15963, 16069, 16069,
+16159, 16159, 16234, 16234, 16294, 16294, 16339, 16339, 16368, 16368,
+16382, 16382, 16381, 16381, 16364, 16364, 16331, 16331, 16284, 16284,
+16221, 16221, 16142, 16142, 16049, 16049, 15940, 15940, 15817, 15817,
+15678, 15678, 15525, 15525, 15357, 15357, 15175, 15175, 14978, 14978,
+14767, 14767, 14543, 14543, 14304, 14304, 14053, 14053, 13788, 13788,
+13510, 13510, 13219, 13219, 12916, 12916, 12600, 12600, 12273, 12273,
+11935, 11935, 11585, 11585, 11224, 11224, 10853, 10853, 10471, 10471,
+10079, 10079, 9679, 9679, 9268, 9268, 8850, 8850, 8423, 8423,
+7988, 7988, 7545, 7545, 7095, 7095, 6639, 6639, 6176, 6176,
+5708, 5708, 5234, 5234, 4756, 4756, 4272, 4272, 3785, 3785,
+3294, 3294, 2801, 2801, 2304, 2304, 1805, 1805, 1305, 1305,
+803, 803, 301, 301, -201, -201, -703, -703, -1205, -1205,
+-1705, -1705, -2204, -2204, -2701, -2701, -3196, -3196, -3687, -3687,
+-4175, -4175, -4659, -4659, -5139, -5139, -5614, -5614, -6083, -6083,
+-6547, -6547, -7005, -7005, -7456, -7456, -7900, -7900, -8336, -8336,
+-8765, -8765, -9185, -9185, -9597, -9597, -10000, -10000, -10393, -10393,
-10777, -10777, -11150, -11150, -11513, -11513, -11866, -11866, -12207, -12207,
-12536, -12536, -12854, -12854, -13159, -13159, -13452, -13452, -13733, -13733,
-14001, -14001, -14255, -14255, -14496, -14496, -14723, -14723, -14937, -14937,
@@ -362,10 +390,10 @@ int tones[2048] = {
-14155, -14155, -13895, -13895, -13622, -13622, -13337, -13337, -13038, -13038,
-12728, -12728, -12406, -12406, -12072, -12072, -11726, -11726, -11370, -11370,
-11002, -11002, -10625, -10625, -10237, -10237, -9840, -9840, -9434, -9434,
- -9018, -9018, -8594, -8594, -8162, -8162, -7723, -7723, -7276, -7276,
- -6822, -6822, -6362, -6362, -5896, -5896, -5424, -5424, -4948, -4948,
- -4466, -4466, -3980, -3980, -3491, -3491, -2998, -2998, -2503, -2503,
- -2005, -2005, -1505, -1505, -1004, -1004, -502, -502
+-9018, -9018, -8594, -8594, -8162, -8162, -7723, -7723, -7276, -7276,
+-6822, -6822, -6362, -6362, -5896, -5896, -5424, -5424, -4948, -4948,
+-4466, -4466, -3980, -3980, -3491, -3491, -2998, -2998, -2503, -2503,
+-2005, -2005, -1505, -1505, -1004, -1004, -502, -502
};
/*****************************************************************************/
void
@@ -375,10 +403,12 @@ int i1;
unsigned char *p2;
struct data_buffer *paudio_buffer;
-JOT(8, "%i=audio_fill\n", audio_fill);
-
+if (NULL == peasycap) {
+ SAY("ERROR: peasycap is NULL\n");
+ return;
+}
+JOM(8, "%i=audio_fill\n", audio_fill);
paudio_buffer = &peasycap->audio_buffer[audio_fill];
-
p2 = (unsigned char *)(paudio_buffer->pgo);
for (i1 = 0; i1 < PAGE_SIZE; i1 += 4, p2 += 4) {
*p2 = (unsigned char) (0x00FF & tones[i1/2]);
diff --git a/drivers/staging/et131x/et131x_initpci.c b/drivers/staging/et131x/et131x_initpci.c
index 10bcb45d73a3..f62ba7a68f34 100644
--- a/drivers/staging/et131x/et131x_initpci.c
+++ b/drivers/staging/et131x/et131x_initpci.c
@@ -783,7 +783,7 @@ static void __devexit et131x_pci_remove(struct pci_dev *pdev)
/* Retrieve the net_device pointer from the pci_dev struct, as well
* as the private adapter struct
*/
- netdev = (struct net_device *) pci_get_drvdata(pdev);
+ netdev = pci_get_drvdata(pdev);
adapter = netdev_priv(netdev);
/* Perform device cleanup */
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index ef7fbf8b069a..2babb034a254 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -89,7 +89,7 @@ static int debug = ALPHATRACK_DEBUG;
/* Use our own dbg macro */
#define dbg_info(dev, format, arg...) do \
- { if (debug) dev_info(dev , format , ## arg); } while (0)
+ { if (debug) dev_info(dev , format , ## arg); } while (0)
#define alphatrack_ocmd_info(dev, cmd, format, arg...)
@@ -769,7 +769,7 @@ static int usb_alphatrack_probe(struct usb_interface *intf,
}
dev->write_buffer =
- kmalloc(sizeof(struct alphatrack_ocmd) * true_size, GFP_KERNEL);
+ kmalloc(true_size * sizeof(struct alphatrack_ocmd), GFP_KERNEL);
if (!dev->write_buffer) {
dev_err(&intf->dev, "Couldn't allocate write_buffer\n");
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index eed7e94308db..588afd5a5ddb 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -132,7 +132,7 @@ inline void ft1000_asic_write(struct net_device *dev, u16 offset, u16 value)
//---------------------------------------------------------------------------
static inline u16 ft1000_read_fifo_len(struct net_device *dev)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
if (info->AsicID == ELECTRABUZZ_ID) {
return (ft1000_read_reg(dev, FT1000_REG_UFIFO_STAT) - 16);
@@ -155,7 +155,7 @@ static inline u16 ft1000_read_fifo_len(struct net_device *dev)
//---------------------------------------------------------------------------
u16 ft1000_read_dpram(struct net_device * dev, int offset)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
unsigned long flags;
u16 data;
@@ -184,7 +184,7 @@ u16 ft1000_read_dpram(struct net_device * dev, int offset)
static inline void ft1000_write_dpram(struct net_device *dev,
int offset, u16 value)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
unsigned long flags;
// Provide mutual exclusive access while reading ASIC registers.
@@ -208,7 +208,7 @@ static inline void ft1000_write_dpram(struct net_device *dev,
//---------------------------------------------------------------------------
u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
unsigned long flags;
u16 data;
@@ -242,7 +242,7 @@ u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index)
static inline void ft1000_write_dpram_mag_16(struct net_device *dev,
int offset, u16 value, int Index)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
unsigned long flags;
// Provide mutual exclusive access while reading ASIC registers.
@@ -270,7 +270,7 @@ static inline void ft1000_write_dpram_mag_16(struct net_device *dev,
//---------------------------------------------------------------------------
u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
unsigned long flags;
u32 data;
@@ -298,7 +298,7 @@ u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset)
//---------------------------------------------------------------------------
void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
unsigned long flags;
// Provide mutual exclusive access while reading ASIC registers.
@@ -320,7 +320,7 @@ void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value)
//---------------------------------------------------------------------------
static void ft1000_enable_interrupts(struct net_device *dev)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
u16 tempword;
DEBUG(1, "ft1000_hw:ft1000_enable_interrupts()\n");
@@ -345,7 +345,7 @@ static void ft1000_enable_interrupts(struct net_device *dev)
//---------------------------------------------------------------------------
static void ft1000_disable_interrupts(struct net_device *dev)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
u16 tempword;
DEBUG(1, "ft1000_hw: ft1000_disable_interrupts()\n");
@@ -370,7 +370,7 @@ static void ft1000_disable_interrupts(struct net_device *dev)
//---------------------------------------------------------------------------
static void ft1000_reset_asic(struct net_device *dev)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
u16 tempword;
DEBUG(1, "ft1000_hw:ft1000_reset_asic called\n");
@@ -414,7 +414,7 @@ static void ft1000_reset_asic(struct net_device *dev)
//---------------------------------------------------------------------------
static int ft1000_reset_card(struct net_device *dev)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
u16 tempword;
int i;
unsigned long flags;
@@ -618,7 +618,7 @@ static void ft1000_hbchk(u_long data)
FT1000_INFO *info;
USHORT tempword;
- info = (FT1000_INFO *) netdev_priv(dev);
+ info = netdev_priv(dev);
if (info->CardReady == 1) {
// Perform dsp heartbeat check
@@ -831,7 +831,7 @@ static void ft1000_hbchk(u_long data)
//---------------------------------------------------------------------------
void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, u16 qtype)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
int i;
u16 tempword;
unsigned long flags;
@@ -916,7 +916,7 @@ void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, u16 qt
//---------------------------------------------------------------------------
BOOLEAN ft1000_receive_cmd(struct net_device *dev, u16 * pbuffer, int maxsz, u16 *pnxtph)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
u16 size;
u16 *ppseudohdr;
int i;
@@ -1009,7 +1009,7 @@ BOOLEAN ft1000_receive_cmd(struct net_device *dev, u16 * pbuffer, int maxsz, u16
//---------------------------------------------------------------------------
void ft1000_proc_drvmsg(struct net_device *dev)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
u16 msgtype;
u16 tempword;
PMEDIAMSG pmediamsg;
@@ -1292,7 +1292,7 @@ void ft1000_proc_drvmsg(struct net_device *dev)
//---------------------------------------------------------------------------
int ft1000_parse_dpram_msg(struct net_device *dev)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
u16 doorbell;
u16 portid;
u16 nxtph;
@@ -1449,7 +1449,7 @@ int ft1000_parse_dpram_msg(struct net_device *dev)
//---------------------------------------------------------------------------
static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
u16 i;
u32 templong;
u16 tempword;
@@ -1596,7 +1596,7 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
int ft1000_copy_up_pkt(struct net_device *dev)
{
u16 tempword;
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
u16 len;
struct sk_buff *skb;
u16 i;
@@ -1783,7 +1783,7 @@ int ft1000_copy_up_pkt(struct net_device *dev)
//---------------------------------------------------------------------------
int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
union {
PSEUDO_HDR blk;
u16 buff[sizeof(PSEUDO_HDR) >> 1];
@@ -1943,7 +1943,7 @@ int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
static struct net_device_stats *ft1000_stats(struct net_device *dev)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
return (&info->stats);
}
@@ -1967,7 +1967,7 @@ static int ft1000_open(struct net_device *dev)
static int ft1000_close(struct net_device *dev)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
DEBUG(0, "ft1000_hw: ft1000_close()\n");
@@ -1989,7 +1989,7 @@ static int ft1000_close(struct net_device *dev)
static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
u8 *pdata;
DEBUG(1, "ft1000_hw: ft1000_start_xmit()\n");
@@ -2026,7 +2026,7 @@ static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
u16 tempword;
u16 inttype;
int cnt;
@@ -2091,7 +2091,7 @@ static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
void stop_ft1000_card(struct net_device *dev)
{
- FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+ FT1000_INFO *info = netdev_priv(dev);
PPROV_RECORD ptr;
// int cnt;
@@ -2127,7 +2127,7 @@ static void ft1000_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
FT1000_INFO *ft_info;
- ft_info = (FT1000_INFO *) netdev_priv(dev);
+ ft_info = netdev_priv(dev);
snprintf(info->driver, 32, "ft1000");
snprintf(info->bus_info, ETHTOOL_BUSINFO_LEN, "PCMCIA 0x%lx",
@@ -2139,7 +2139,7 @@ static void ft1000_get_drvinfo(struct net_device *dev,
static u32 ft1000_get_link(struct net_device *dev)
{
FT1000_INFO *info;
- info = (FT1000_INFO *) netdev_priv(dev);
+ info = netdev_priv(dev);
return info->mediastate;
}
@@ -2185,7 +2185,7 @@ struct net_device *init_ft1000_card(unsigned short irq, int port,
}
SET_NETDEV_DEV(dev, fdev);
- info = (FT1000_INFO *) netdev_priv(dev);
+ info = netdev_priv(dev);
memset(info, 0, sizeof(FT1000_INFO));
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
index b45de9bc1b20..935608e72007 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
@@ -65,7 +65,7 @@ int ft1000ReadProc(char *page, char **start, off_t off,
time_t delta;
dev = (struct net_device *)data;
- info = (FT1000_INFO *) netdev_priv(dev);
+ info = netdev_priv(dev);
if (off > 0) {
*eof = 1;
@@ -174,7 +174,7 @@ static int ft1000NotifyProc(struct notifier_block *this, unsigned long event,
struct net_device *dev = ptr;
FT1000_INFO *info;
- info = (FT1000_INFO *) netdev_priv(dev);
+ info = netdev_priv(dev);
switch (event) {
case NETDEV_CHANGENAME:
@@ -195,7 +195,7 @@ void ft1000InitProc(struct net_device *dev)
{
FT1000_INFO *info;
- info = (FT1000_INFO *) netdev_priv(dev);
+ info = netdev_priv(dev);
info->proc_ft1000 = proc_mkdir(FT1000_PROC, init_net.proc_net);
create_proc_read_entry(dev->name, 0644, info->proc_ft1000,
@@ -208,7 +208,7 @@ void ft1000CleanupProc(struct net_device *dev)
{
FT1000_INFO *info;
- info = (FT1000_INFO *) netdev_priv(dev);
+ info = netdev_priv(dev);
remove_proc_entry(dev->name, info->proc_ft1000);
remove_proc_entry(FT1000_PROC, init_net.proc_net);
diff --git a/drivers/staging/ft1000/ft1000-usb/Makefile b/drivers/staging/ft1000/ft1000-usb/Makefile
index dd87ecd7918e..f0f524015888 100644
--- a/drivers/staging/ft1000/ft1000-usb/Makefile
+++ b/drivers/staging/ft1000/ft1000-usb/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_FT1000_USB) += ft1000.o
-ft1000-y := ft1000_chdev.o ft1000_download.o ft1000_hw.o ft1000_proc.o ft1000_usb.o
+ft1000-y := ft1000_debug.o ft1000_download.o ft1000_hw.o ft1000_proc.o ft1000_usb.o
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
index 20d509836d9e..149ba59f96bf 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
@@ -27,33 +27,22 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/poll.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
-#include <linux/fs.h>
-#include <linux/kmod.h>
#include <linux/ioctl.h>
-#include <linux/unistd.h>
-
+#include <linux/debugfs.h>
#include "ft1000_usb.h"
-//#include "ft1000_ioctl.h"
static int ft1000_flarion_cnt = 0;
-//need to looking usage of ft1000Handle
-
-static int ft1000_ChOpen (struct inode *Inode, struct file *File);
-static unsigned int ft1000_ChPoll(struct file *file, poll_table *wait);
-static long ft1000_ChIoctl(struct file *File, unsigned int Command,
- unsigned long Argument);
-static int ft1000_ChRelease (struct inode *Inode, struct file *File);
-
-// Global pointer to device object
-static struct ft1000_device *pdevobj[MAX_NUM_CARDS + 2];
-//static devfs_handle_t ft1000Handle[MAX_NUM_CARDS];
+static int ft1000_open (struct inode *inode, struct file *file);
+static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait);
+static long ft1000_ioctl(struct file *file, unsigned int command,
+ unsigned long argument);
+static int ft1000_release (struct inode *inode, struct file *file);
// List to free receive command buffer pool
struct list_head freercvpool;
@@ -63,103 +52,18 @@ spinlock_t free_buff_lock;
int numofmsgbuf = 0;
-// Global variable to indicate that all provisioning data is sent to DSP
-//BOOLEAN fProvComplete;
-
//
// Table of entry-point routines for char device
//
static struct file_operations ft1000fops =
{
- .unlocked_ioctl = ft1000_ChIoctl,
- .poll = ft1000_ChPoll,
- .open = ft1000_ChOpen,
- .release = ft1000_ChRelease,
+ .unlocked_ioctl = ft1000_ioctl,
+ .poll = ft1000_poll_dev,
+ .open = ft1000_open,
+ .release = ft1000_release,
.llseek = no_llseek,
};
-
-
-
-//---------------------------------------------------------------------------
-// Function: exec_mknod
-//
-// Parameters:
-//
-// Returns:
-//
-// Description:
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-static int exec_mknod (void *pdata)
-{
- struct ft1000_info *info;
- char mjnum[4];
- char minornum[4];
- char temp[32];
- int retcode;
-// int i; //aelias [-] reason : unused variable
- char *envp[] = { "HOME=/", "PATH=/usr/bin:/bin", NULL };
- char *argv[]={"-m 666",temp,"c",mjnum,minornum,NULL};
-
- info = pdata;
- DEBUG("ft1000_chdev:exec_mknod is called with major number = %d\n", info->DeviceMajor);
- sprintf(temp, "%s%s", "/dev/", info->DeviceName) ;
- sprintf(mjnum, "%d", info->DeviceMajor);
- sprintf(minornum, "%d", info->CardNumber);
-
- //char *argv[]={"mknod","-m 666",temp,"c",mjnum,minornum,NULL};
-// char *argv[]={"-m 666",temp,"c",mjnum,minornum,NULL};
-
- //for (i=0; i<7;i++)
- // DEBUG("argv[%d]=%s\n", i, argv[i]);
-
-
- retcode = call_usermodehelper ("/bin/mknod", argv, envp, 1);
- if (retcode) {
- DEBUG("ft1000_chdev:exec_mknod failed to make the node: retcode = %d\n", retcode);
- }
-
-
-
- return retcode;
-
-}
-
-//---------------------------------------------------------------------------
-// Function: rm_mknod
-//
-// Description: This module removes the FT1000 device file
-//
-//---------------------------------------------------------------------------
-static int rm_mknod (void *pdata)
-{
-
- struct ft1000_info *info;
- //char *argv[4]={"rm", "-f", "/dev/FT1000", NULL};
- int retcode;
- char temp[32];
- char *argv[]={"rm", "-f", temp, NULL};
-
- info = (struct ft1000_info *)pdata;
- DEBUG("ft1000_chdev:rm_mknod is called for device %s\n", info->DeviceName);
- sprintf(temp, "%s%s", "/dev/", info->DeviceName) ;
-
-// char *argv[]={"rm", "-f", temp, NULL};
-
- retcode = call_usermodehelper ("/bin/rm", argv, NULL, 1);
- if (retcode) {
- DEBUG("ft1000_chdev:rm_mknod failed to remove the node: retcode = %d\n", retcode);
- }
- else
- DEBUG("ft1000_chdev:rm_mknod done!\n");
-
-
- return retcode;
-
-}
//---------------------------------------------------------------------------
// Function: ft1000_get_buffer
//
@@ -233,80 +137,55 @@ void ft1000_free_buffer(struct dpram_blk *pdpram_blk, struct list_head *plist)
// Notes: Only called by init_module().
//
//---------------------------------------------------------------------------
-int ft1000_CreateDevice(struct ft1000_device *dev)
+int ft1000_create_dev(struct ft1000_device *dev)
{
struct ft1000_info *info = netdev_priv(dev->net);
int result;
int i;
- pid_t pid;
+ struct dentry *dir, *file;
+ struct ft1000_debug_dirs *tmp;
// make a new device name
- sprintf(info->DeviceName, "%s%d", "FT100", info->CardNumber);
-
- // Delete any existing FT1000 node
- pid = kernel_thread (rm_mknod,(void *)info, 0);
- msleep(1000);
+ sprintf(info->DeviceName, "%s%d", "FT1000_", info->CardNumber);
- DEBUG("ft1000_CreateDevice: number of instance = %d\n", ft1000_flarion_cnt);
+ DEBUG("%s: number of instance = %d\n", __func__, ft1000_flarion_cnt);
DEBUG("DeviceCreated = %x\n", info->DeviceCreated);
- //save the device info to global array
- pdevobj[info->CardNumber] = dev;
-
- DEBUG("ft1000_CreateDevice: ******SAVED pdevobj[%d]=%p\n", info->CardNumber, pdevobj[info->CardNumber]); //aelias [+] reason:up
-
if (info->DeviceCreated)
{
- DEBUG("ft1000_CreateDevice: \"%s\" already registered\n", info->DeviceName);
+ DEBUG("%s: \"%s\" already registered\n", __func__, info->DeviceName);
return -EIO;
}
// register the device
- DEBUG("ft1000_CreateDevice: \"%s\" device registration\n", info->DeviceName);
- info->DeviceMajor = 0;
-
- result = register_chrdev(info->DeviceMajor, info->DeviceName, &ft1000fops);
- if (result < 0)
- {
- DEBUG("ft1000_CreateDevice: unable to get major %d\n", info->DeviceMajor);
- return result;
- }
-
- DEBUG("ft1000_CreateDevice: registered char device \"%s\"\n", info->DeviceName);
+ DEBUG("%s: \"%s\" debugfs device registration\n", __func__, info->DeviceName);
- // save a dynamic device major number
- if (info->DeviceMajor == 0)
- {
- info->DeviceMajor = result;
- DEBUG("ft1000_PcdCreateDevice: device major = %d\n", info->DeviceMajor);
- }
+ tmp = kmalloc(sizeof(struct ft1000_debug_dirs), GFP_KERNEL);
+ if (tmp == NULL) {
+ result = -1;
+ goto fail;
+ }
- // Create a thread to call user mode app to mknod
- pid = kernel_thread (exec_mknod, (void *)info, 0);
+ dir = debugfs_create_dir(info->DeviceName, 0);
+ if (IS_ERR(dir)) {
+ result = PTR_ERR(dir);
+ goto debug_dir_fail;
+ }
- // initialize application information
+ file = debugfs_create_file("device", S_IRUGO | S_IWUSR, dir,
+ dev, &ft1000fops);
+ if (IS_ERR(file)) {
+ result = PTR_ERR(file);
+ goto debug_file_fail;
+ }
-// if (ft1000_flarion_cnt == 0) {
-//
-// DEBUG("Initialize free_buff_lock and freercvpool\n");
-// spin_lock_init(&free_buff_lock);
-//
-// // initialize a list of buffers to be use for queuing up receive command data
-// INIT_LIST_HEAD (&freercvpool);
-//
-// // create list of free buffers
-// for (i=0; i<NUM_OF_FREE_BUFFERS; i++) {
-// // Get memory for DPRAM_DATA link list
-// pdpram_blk = kmalloc ( sizeof(struct dpram_blk), GFP_KERNEL );
-// // Get a block of memory to store command data
-// pdpram_blk->pbuffer = kmalloc ( MAX_CMD_SQSIZE, GFP_KERNEL );
-// // link provisioning data
-// list_add_tail (&pdpram_blk->list, &freercvpool);
-// }
-// numofmsgbuf = NUM_OF_FREE_BUFFERS;
-// }
+ tmp->dent = dir;
+ tmp->file = file;
+ tmp->int_number = info->CardNumber;
+ list_add(&(tmp->list), &(info->nodes.list));
+ DEBUG("%s: registered debugfs directory \"%s\"\n", __func__, info->DeviceName);
// initialize application information
info->appcnt = 0;
@@ -323,17 +202,17 @@ int ft1000_CreateDevice(struct ft1000_device *dev)
INIT_LIST_HEAD (&info->app_info[i].app_sqlist);
}
-
-
-
-// ft1000Handle[info->CardNumber] = devfs_register(NULL, info->DeviceName, DEVFS_FL_AUTO_DEVNUM, 0, 0,
-// S_IFCHR | S_IRUGO | S_IWUGO, &ft1000fops, NULL);
-
-
info->DeviceCreated = TRUE;
ft1000_flarion_cnt++;
- return result;
+ return 0;
+
+debug_file_fail:
+ debugfs_remove(dir);
+debug_dir_fail:
+ kfree(tmp);
+fail:
+ return result;
}
//---------------------------------------------------------------------------
@@ -346,27 +225,33 @@ int ft1000_CreateDevice(struct ft1000_device *dev)
// Notes: Only called by cleanup_module().
//
//---------------------------------------------------------------------------
-void ft1000_DestroyDevice(struct net_device *dev)
+void ft1000_destroy_dev(struct net_device *dev)
{
struct ft1000_info *info = netdev_priv(dev);
- int result = 0;
- pid_t pid;
int i;
struct dpram_blk *pdpram_blk;
struct dpram_blk *ptr;
+ struct list_head *pos, *q;
+ struct ft1000_debug_dirs *dir;
- DEBUG("ft1000_chdev:ft1000_DestroyDevice called\n");
+ DEBUG("%s called\n", __func__);
if (info->DeviceCreated)
{
ft1000_flarion_cnt--;
- unregister_chrdev(info->DeviceMajor, info->DeviceName);
- DEBUG("ft1000_DestroyDevice: unregistered device \"%s\", result = %d\n",
- info->DeviceName, result);
-
- pid = kernel_thread (rm_mknod, (void *)info, 0);
+ list_for_each_safe(pos, q, &info->nodes.list) {
+ dir = list_entry(pos, struct ft1000_debug_dirs, list);
+ if (dir->int_number == info->CardNumber) {
+ debugfs_remove(dir->file);
+ debugfs_remove(dir->dent);
+ list_del(pos);
+ kfree(dir);
+ }
+ }
+ DEBUG("%s: unregistered device \"%s\"\n", __func__,
+ info->DeviceName);
// Make sure we free any memory reserve for slow Queue
for (i=0; i<MAX_NUM_APP; i++) {
@@ -388,19 +273,14 @@ void ft1000_DestroyDevice(struct net_device *dev)
kfree(ptr);
}
}
-
-// devfs_unregister(ft1000Handle[info->CardNumber]);
-
info->DeviceCreated = FALSE;
-
- pdevobj[info->CardNumber] = NULL;
}
}
//---------------------------------------------------------------------------
-// Function: ft1000_ChOpen
+// Function: ft1000_open
//
// Parameters:
//
@@ -409,28 +289,19 @@ void ft1000_DestroyDevice(struct net_device *dev)
// Notes:
//
//---------------------------------------------------------------------------
-static int ft1000_ChOpen (struct inode *Inode, struct file *File)
+static int ft1000_open (struct inode *inode, struct file *file)
{
struct ft1000_info *info;
+ struct ft1000_device *dev = (struct ft1000_device *)inode->i_private;
int i,num;
- DEBUG("ft1000_ChOpen called\n");
- num = (MINOR(Inode->i_rdev) & 0xf);
- DEBUG("ft1000_ChOpen: minor number=%d\n", num);
+ DEBUG("%s called\n", __func__);
+ num = (MINOR(inode->i_rdev) & 0xf);
+ DEBUG("ft1000_open: minor number=%d\n", num);
- for (i=0; i<5; i++)
- DEBUG("pdevobj[%d]=%p\n", i, pdevobj[i]); //aelias [+] reason: down
+ info = file->private_data = netdev_priv(dev->net);
- if ( pdevobj[num] != NULL )
- //info = (struct ft1000_info *)(pdevobj[num]->net->priv);
- info = (struct ft1000_info *)netdev_priv(pdevobj[num]->net);
- else
- {
- DEBUG("ft1000_ChOpen: can not find device object %d\n", num);
- return -1;
- }
-
- DEBUG("f_owner = %p number of application = %d\n", (&File->f_owner), info->appcnt );
+ DEBUG("f_owner = %p number of application = %d\n", (&file->f_owner), info->appcnt );
// Check if maximum number of application exceeded
if (info->appcnt > MAX_NUM_APP) {
@@ -452,21 +323,19 @@ static int ft1000_ChOpen (struct inode *Inode, struct file *File)
}
info->appcnt++;
- info->app_info[i].fileobject = &File->f_owner;
+ info->app_info[i].fileobject = &file->f_owner;
info->app_info[i].nTxMsg = 0;
info->app_info[i].nRxMsg = 0;
info->app_info[i].nTxMsgReject = 0;
info->app_info[i].nRxMsgMiss = 0;
- File->private_data = pdevobj[num]->net;
-
- nonseekable_open(Inode, File);
+ nonseekable_open(inode, file);
return 0;
}
//---------------------------------------------------------------------------
-// Function: ft1000_ChPoll
+// Function: ft1000_poll_dev
//
// Parameters:
//
@@ -476,47 +345,47 @@ static int ft1000_ChOpen (struct inode *Inode, struct file *File)
//
//---------------------------------------------------------------------------
-static unsigned int ft1000_ChPoll(struct file *file, poll_table *wait)
+static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait)
{
struct net_device *dev = file->private_data;
struct ft1000_info *info;
int i;
- //DEBUG("ft1000_ChPoll called\n");
+ //DEBUG("ft1000_poll_dev called\n");
if (ft1000_flarion_cnt == 0) {
- DEBUG("FT1000:ft1000_ChPoll called when ft1000_flarion_cnt is zero\n");
+ DEBUG("FT1000:ft1000_poll_dev called when ft1000_flarion_cnt is zero\n");
return (-EBADF);
}
- info = (struct ft1000_info *) netdev_priv(dev);
+ info = netdev_priv(dev);
// Search for matching file object
for (i=0; i<MAX_NUM_APP; i++) {
if ( info->app_info[i].fileobject == &file->f_owner) {
- //DEBUG("FT1000:ft1000_ChIoctl: Message is for AppId = %d\n", info->app_info[i].app_id);
+ //DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", info->app_info[i].app_id);
break;
}
}
// Could not find application info block
if (i == MAX_NUM_APP) {
- DEBUG("FT1000:ft1000_ChIoctl:Could not find application info block\n");
+ DEBUG("FT1000:ft1000_ioctl:Could not find application info block\n");
return ( -EACCES );
}
if (list_empty(&info->app_info[i].app_sqlist) == 0) {
- DEBUG("FT1000:ft1000_ChPoll:Message detected in slow queue\n");
+ DEBUG("FT1000:ft1000_poll_dev:Message detected in slow queue\n");
return(POLLIN | POLLRDNORM | POLLPRI);
}
poll_wait (file, &info->app_info[i].wait_dpram_msg, wait);
- //DEBUG("FT1000:ft1000_ChPoll:Polling for data from DSP\n");
+ //DEBUG("FT1000:ft1000_poll_dev:Polling for data from DSP\n");
return (0);
}
//---------------------------------------------------------------------------
-// Function: ft1000_ChIoctl
+// Function: ft1000_ioctl
//
// Parameters:
//
@@ -525,11 +394,10 @@ static unsigned int ft1000_ChPoll(struct file *file, poll_table *wait)
// Notes:
//
//---------------------------------------------------------------------------
-static long ft1000_ChIoctl (struct file *File, unsigned int Command,
- unsigned long Argument)
+static long ft1000_ioctl (struct file *file, unsigned int command,
+ unsigned long argument)
{
- void __user *argp = (void __user *)Argument;
- struct net_device *dev;
+ void __user *argp = (void __user *)argument;
struct ft1000_info *info;
struct ft1000_device *ft1000dev;
int result=0;
@@ -550,25 +418,24 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
unsigned short ledStat=0;
unsigned short conStat=0;
- //DEBUG("ft1000_ChIoctl called\n");
+ //DEBUG("ft1000_ioctl called\n");
if (ft1000_flarion_cnt == 0) {
- DEBUG("FT1000:ft1000_ChIoctl called when ft1000_flarion_cnt is zero\n");
+ DEBUG("FT1000:ft1000_ioctl called when ft1000_flarion_cnt is zero\n");
return (-EBADF);
}
- //DEBUG("FT1000:ft1000_ChIoctl:Command = 0x%x Argument = 0x%8x\n", Command, (u32)Argument);
+ //DEBUG("FT1000:ft1000_ioctl:command = 0x%x argument = 0x%8x\n", command, (u32)argument);
- dev = File->private_data;
- info = (struct ft1000_info *) netdev_priv(dev);
- ft1000dev = info->pFt1000Dev;
- cmd = _IOC_NR(Command);
- //DEBUG("FT1000:ft1000_ChIoctl:cmd = 0x%x\n", cmd);
+ info = file->private_data;
+ ft1000dev = info->pFt1000Dev;
+ cmd = _IOC_NR(command);
+ //DEBUG("FT1000:ft1000_ioctl:cmd = 0x%x\n", cmd);
// process the command
switch (cmd) {
case IOCTL_REGISTER_CMD:
- DEBUG("FT1000:ft1000_ChIoctl: IOCTL_FT1000_REGISTER called\n");
+ DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_REGISTER called\n");
result = get_user(tempword, (__u16 __user*)argp);
if (result) {
DEBUG("result = %d failed to get_user\n", result);
@@ -577,9 +444,9 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
if (tempword == DSPBCMSGID) {
// Search for matching file object
for (i=0; i<MAX_NUM_APP; i++) {
- if ( info->app_info[i].fileobject == &File->f_owner) {
+ if ( info->app_info[i].fileobject == &file->f_owner) {
info->app_info[i].DspBCMsgFlag = 1;
- DEBUG("FT1000:ft1000_ChIoctl:Registered for broadcast messages\n");
+ DEBUG("FT1000:ft1000_ioctl:Registered for broadcast messages\n");
break;
}
}
@@ -587,34 +454,34 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
break;
case IOCTL_GET_VER_CMD:
- DEBUG("FT1000:ft1000_ChIoctl: IOCTL_FT1000_GET_VER called\n");
+ DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_VER called\n");
get_ver_data.drv_ver = FT1000_DRV_VER;
if (copy_to_user(argp, &get_ver_data, sizeof(get_ver_data)) ) {
- DEBUG("FT1000:ft1000_ChIoctl: copy fault occurred\n");
+ DEBUG("FT1000:ft1000_ioctl: copy fault occurred\n");
result = -EFAULT;
break;
}
- DEBUG("FT1000:ft1000_ChIoctl:driver version = 0x%x\n",(unsigned int)get_ver_data.drv_ver);
+ DEBUG("FT1000:ft1000_ioctl:driver version = 0x%x\n",(unsigned int)get_ver_data.drv_ver);
break;
case IOCTL_CONNECT:
// Connect Message
- DEBUG("FT1000:ft1000_ChIoctl: IOCTL_FT1000_CONNECT\n");
+ DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_CONNECT\n");
ConnectionMsg[79] = 0xfc;
CardSendCommand(ft1000dev, (unsigned short *)ConnectionMsg, 0x4c);
break;
case IOCTL_DISCONNECT:
// Disconnect Message
- DEBUG("FT1000:ft1000_ChIoctl: IOCTL_FT1000_DISCONNECT\n");
+ DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_DISCONNECT\n");
ConnectionMsg[79] = 0xfd;
CardSendCommand(ft1000dev, (unsigned short *)ConnectionMsg, 0x4c);
break;
case IOCTL_GET_DSP_STAT_CMD:
- //DEBUG("FT1000:ft1000_ChIoctl: IOCTL_FT1000_GET_DSP_STAT called\n");
+ //DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DSP_STAT called\n");
memset(&get_stat_data, 0, sizeof(get_stat_data));
memcpy(get_stat_data.DspVer, info->DspVer, DSPVERSZ);
memcpy(get_stat_data.HwSerNum, info->HwSerNum, HWSERNUMSZ);
@@ -622,12 +489,12 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
memcpy(get_stat_data.eui64, info->eui64, EUISZ);
if (info->ProgConStat != 0xFF) {
- ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_LED, (PUCHAR)&ledStat, FT1000_MAG_DSP_LED_INDX);
+ ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_LED, (u8 *)&ledStat, FT1000_MAG_DSP_LED_INDX);
get_stat_data.LedStat = ntohs(ledStat);
- DEBUG("FT1000:ft1000_ChIoctl: LedStat = 0x%x\n", get_stat_data.LedStat);
- ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_CON_STATE, (PUCHAR)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
+ DEBUG("FT1000:ft1000_ioctl: LedStat = 0x%x\n", get_stat_data.LedStat);
+ ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_CON_STATE, (u8 *)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
get_stat_data.ConStat = ntohs(conStat);
- DEBUG("FT1000:ft1000_ChIoctl: ConStat = 0x%x\n", get_stat_data.ConStat);
+ DEBUG("FT1000:ft1000_ioctl: ConStat = 0x%x\n", get_stat_data.ConStat);
}
else {
get_stat_data.ConStat = 0x0f;
@@ -642,7 +509,7 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
get_stat_data.ConTm = (u32)(tv.tv_sec - info->ConTm);
DEBUG("Connection Time = %d\n", (int)get_stat_data.ConTm);
if (copy_to_user(argp, &get_stat_data, sizeof(get_stat_data)) ) {
- DEBUG("FT1000:ft1000_ChIoctl: copy fault occurred\n");
+ DEBUG("FT1000:ft1000_ioctl: copy fault occurred\n");
result = -EFAULT;
break;
}
@@ -650,17 +517,17 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
break;
case IOCTL_SET_DPRAM_CMD:
{
- IOCTL_DPRAM_BLK *dpram_data;
+ IOCTL_DPRAM_BLK *dpram_data = NULL;
//IOCTL_DPRAM_COMMAND dpram_command;
- USHORT qtype;
- USHORT msgsz;
+ u16 qtype;
+ u16 msgsz;
struct pseudo_hdr *ppseudo_hdr;
- PUSHORT pmsg;
- USHORT total_len;
- USHORT app_index;
+ u16 *pmsg;
+ u16 total_len;
+ u16 app_index;
u16 status;
- //DEBUG("FT1000:ft1000_ChIoctl: IOCTL_FT1000_SET_DPRAM called\n");
+ //DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_SET_DPRAM called\n");
if (ft1000_flarion_cnt == 0) {
@@ -679,15 +546,15 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
if (info->CardReady) {
- //DEBUG("FT1000:ft1000_ChIoctl: try to SET_DPRAM \n");
+ //DEBUG("FT1000:ft1000_ioctl: try to SET_DPRAM \n");
// Get the length field to see how many bytes to copy
result = get_user(msgsz, (__u16 __user *)argp);
msgsz = ntohs (msgsz);
- //DEBUG("FT1000:ft1000_ChIoctl: length of message = %d\n", msgsz);
+ //DEBUG("FT1000:ft1000_ioctl: length of message = %d\n", msgsz);
if (msgsz > MAX_CMD_SQSIZE) {
- DEBUG("FT1000:ft1000_ChIoctl: bad message length = %d\n", msgsz);
+ DEBUG("FT1000:ft1000_ioctl: bad message length = %d\n", msgsz);
result = -EINVAL;
break;
}
@@ -697,22 +564,14 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
if (!dpram_data)
break;
- //if ( copy_from_user(&(dpram_command.dpram_blk), (PIOCTL_DPRAM_BLK)Argument, msgsz+2) ) {
- if ( copy_from_user(&dpram_data, argp, msgsz+2) ) {
+ if ( copy_from_user(dpram_data, argp, msgsz+2) ) {
DEBUG("FT1000:ft1000_ChIoctl: copy fault occurred\n");
result = -EFAULT;
}
else {
-#if 0
- // whc - for debugging only
- ptr = (char *)&dpram_data;
- for (i=0; i<msgsz; i++) {
- DEBUG(1,"FT1000:ft1000_ChIoctl: data %d = 0x%x\n", i, *ptr++);
- }
-#endif
// Check if this message came from a registered application
for (i=0; i<MAX_NUM_APP; i++) {
- if ( info->app_info[i].fileobject == &File->f_owner) {
+ if ( info->app_info[i].fileobject == &file->f_owner) {
break;
}
}
@@ -725,16 +584,15 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
app_index = i;
// Check message qtype type which is the lower byte within qos_class
- //qtype = ntohs(dpram_command.dpram_blk.pseudohdr.qos_class) & 0xff;
qtype = ntohs(dpram_data->pseudohdr.qos_class) & 0xff;
- //DEBUG("FT1000_ft1000_ChIoctl: qtype = %d\n", qtype);
+ //DEBUG("FT1000_ft1000_ioctl: qtype = %d\n", qtype);
if (qtype) {
}
else {
// Put message into Slow Queue
// Only put a message into the DPRAM if msg doorbell is available
status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- //DEBUG("FT1000_ft1000_ChIoctl: READ REGISTER tempword=%x\n", tempword);
+ //DEBUG("FT1000_ft1000_ioctl: READ REGISTER tempword=%x\n", tempword);
if (tempword & FT1000_DB_DPRAM_TX) {
// Suspend for 2ms and try again due to DSP doorbell busy
mdelay(2);
@@ -750,7 +608,7 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
mdelay(3);
status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
- DEBUG("FT1000:ft1000_ChIoctl:Doorbell not available\n");
+ DEBUG("FT1000:ft1000_ioctl:Doorbell not available\n");
result = -ENOTTY;
kfree(dpram_data);
break;
@@ -760,13 +618,12 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
}
}
- //DEBUG("FT1000_ft1000_ChIoctl: finished reading register\n");
+ //DEBUG("FT1000_ft1000_ioctl: finished reading register\n");
// Make sure we are within the limits of the slow queue memory limitation
if ( (msgsz < MAX_CMD_SQSIZE) && (msgsz > PSEUDOSZ) ) {
// Need to put sequence number plus new checksum for message
- //pmsg = (PUSHORT)&dpram_command.dpram_blk.pseudohdr;
- pmsg = (PUSHORT)&dpram_data->pseudohdr;
+ pmsg = (u16 *)&dpram_data->pseudohdr;
ppseudo_hdr = (struct pseudo_hdr *)pmsg;
total_len = msgsz+2;
if (total_len & 0x1) {
@@ -785,17 +642,7 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
}
pmsg++;
ppseudo_hdr = (struct pseudo_hdr *)pmsg;
-#if 0
- ptr = dpram_data;
- DEBUG("FT1000:ft1000_ChIoctl: Command Send\n");
- for (i=0; i<total_len; i++) {
- DEBUG("FT1000:ft1000_ChIoctl: data %d = 0x%x\n", i, *ptr++);
- }
-#endif
- //dpram_command.extra = 0;
-
- //CardSendCommand(ft1000dev,(unsigned char*)&dpram_command,total_len+2);
- CardSendCommand(ft1000dev,(unsigned short*)dpram_data,total_len+2);
+ CardSendCommand(ft1000dev,(unsigned short*)dpram_data,total_len+2);
info->app_info[app_index].nTxMsg++;
@@ -807,7 +654,7 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
}
}
else {
- DEBUG("FT1000:ft1000_ChIoctl: Card not ready take messages\n");
+ DEBUG("FT1000:ft1000_ioctl: Card not ready take messages\n");
result = -EACCES;
}
kfree(dpram_data);
@@ -820,7 +667,7 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
IOCTL_DPRAM_BLK __user *pioctl_dpram;
int msglen;
- //DEBUG("FT1000:ft1000_ChIoctl: IOCTL_FT1000_GET_DPRAM called\n");
+ //DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DPRAM called\n");
if (ft1000_flarion_cnt == 0) {
return (-EBADF);
@@ -828,15 +675,15 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
// Search for matching file object
for (i=0; i<MAX_NUM_APP; i++) {
- if ( info->app_info[i].fileobject == &File->f_owner) {
- //DEBUG("FT1000:ft1000_ChIoctl: Message is for AppId = %d\n", info->app_info[i].app_id);
+ if ( info->app_info[i].fileobject == &file->f_owner) {
+ //DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", info->app_info[i].app_id);
break;
}
}
// Could not find application info block
if (i == MAX_NUM_APP) {
- DEBUG("FT1000:ft1000_ChIoctl:Could not find application info block\n");
+ DEBUG("FT1000:ft1000_ioctl:Could not find application info block\n");
result = -EBADF;
break;
}
@@ -844,22 +691,22 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
result = 0;
pioctl_dpram = argp;
if (list_empty(&info->app_info[i].app_sqlist) == 0) {
- //DEBUG("FT1000:ft1000_ChIoctl:Message detected in slow queue\n");
+ //DEBUG("FT1000:ft1000_ioctl:Message detected in slow queue\n");
spin_lock_irqsave(&free_buff_lock, flags);
pdpram_blk = list_entry(info->app_info[i].app_sqlist.next, struct dpram_blk, list);
list_del(&pdpram_blk->list);
info->app_info[i].NumOfMsg--;
- //DEBUG("FT1000:ft1000_ChIoctl:NumOfMsg for app %d = %d\n", i, info->app_info[i].NumOfMsg);
+ //DEBUG("FT1000:ft1000_ioctl:NumOfMsg for app %d = %d\n", i, info->app_info[i].NumOfMsg);
spin_unlock_irqrestore(&free_buff_lock, flags);
msglen = ntohs(*(u16 *)pdpram_blk->pbuffer) + PSEUDOSZ;
result = get_user(msglen, &pioctl_dpram->total_len);
if (result)
break;
msglen = htons(msglen);
- //DEBUG("FT1000:ft1000_ChIoctl:msg length = %x\n", msglen);
+ //DEBUG("FT1000:ft1000_ioctl:msg length = %x\n", msglen);
if(copy_to_user (&pioctl_dpram->pseudohdr, pdpram_blk->pbuffer, msglen))
{
- DEBUG("FT1000:ft1000_ChIoctl: copy fault occurred\n");
+ DEBUG("FT1000:ft1000_ioctl: copy fault occurred\n");
result = -EFAULT;
break;
}
@@ -867,12 +714,12 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
ft1000_free_buffer(pdpram_blk, &freercvpool);
result = msglen;
}
- //DEBUG("FT1000:ft1000_ChIoctl: IOCTL_FT1000_GET_DPRAM no message\n");
+ //DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DPRAM no message\n");
}
break;
default:
- DEBUG("FT1000:ft1000_ChIoctl:unknown command: 0x%x\n", Command);
+ DEBUG("FT1000:ft1000_ioctl:unknown command: 0x%x\n", command);
result = -ENOTTY;
break;
}
@@ -881,7 +728,7 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
}
//---------------------------------------------------------------------------
-// Function: ft1000_ChRelease
+// Function: ft1000_release
//
// Parameters:
//
@@ -890,17 +737,17 @@ static long ft1000_ChIoctl (struct file *File, unsigned int Command,
// Notes:
//
//---------------------------------------------------------------------------
-static int ft1000_ChRelease (struct inode *Inode, struct file *File)
+static int ft1000_release (struct inode *inode, struct file *file)
{
struct ft1000_info *info;
struct net_device *dev;
int i;
struct dpram_blk *pdpram_blk;
- DEBUG("ft1000_ChRelease called\n");
+ DEBUG("ft1000_release called\n");
- dev = File->private_data;
- info = (struct ft1000_info *) netdev_priv(dev);
+ dev = file->private_data;
+ info = netdev_priv(dev);
if (ft1000_flarion_cnt == 0) {
info->appcnt--;
@@ -909,8 +756,8 @@ static int ft1000_ChRelease (struct inode *Inode, struct file *File)
// Search for matching file object
for (i=0; i<MAX_NUM_APP; i++) {
- if ( info->app_info[i].fileobject == &File->f_owner) {
- //DEBUG("FT1000:ft1000_ChIoctl: Message is for AppId = %d\n", info->app_info[i].app_id);
+ if ( info->app_info[i].fileobject == &file->f_owner) {
+ //DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", info->app_info[i].app_id);
break;
}
}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
index 4dd456fbab9b..17546d8ec08d 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
@@ -123,11 +123,11 @@ struct dsp_image_info {
// Notes:
//
//---------------------------------------------------------------------------
-static ULONG check_usb_db (struct ft1000_device *ft1000dev)
+static u32 check_usb_db (struct ft1000_device *ft1000dev)
{
int loopcnt;
- USHORT temp;
- ULONG status;
+ u16 temp;
+ u32 status;
loopcnt = 0;
while (loopcnt < 10)
@@ -190,7 +190,7 @@ static ULONG check_usb_db (struct ft1000_device *ft1000dev)
// Function: get_handshake
//
// Parameters: struct ft1000_device - device structure
-// USHORT expected_value - the handshake value expected
+// u16 expected_value - the handshake value expected
//
// Returns: handshakevalue - success
// HANDSHAKE_TIMEOUT_VALUE - failure
@@ -200,11 +200,11 @@ static ULONG check_usb_db (struct ft1000_device *ft1000dev)
// Notes:
//
//---------------------------------------------------------------------------
-static USHORT get_handshake(struct ft1000_device *ft1000dev, USHORT expected_value)
+static u16 get_handshake(struct ft1000_device *ft1000dev, u16 expected_value)
{
- USHORT handshake;
+ u16 handshake;
int loopcnt;
- ULONG status=0;
+ u32 status=0;
struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
loopcnt = 0;
@@ -228,7 +228,7 @@ static USHORT get_handshake(struct ft1000_device *ft1000dev, USHORT expected_val
status = ft1000_write_register (ft1000dev, FT1000_DB_DNLD_RX, FT1000_REG_DOORBELL);
}
- status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (PUCHAR)&handshake, 1);
+ status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1);
//DEBUG("get_handshake: handshake is %x\n", tempx);
handshake = ntohs(handshake);
//DEBUG("get_handshake: after swap, handshake is %x\n", handshake);
@@ -259,7 +259,7 @@ static USHORT get_handshake(struct ft1000_device *ft1000dev, USHORT expected_val
// Function: put_handshake
//
// Parameters: struct ft1000_device - device structure
-// USHORT handshake_value - handshake to be written
+// u16 handshake_value - handshake to be written
//
// Returns: none
//
@@ -269,30 +269,30 @@ static USHORT get_handshake(struct ft1000_device *ft1000dev, USHORT expected_val
// Notes:
//
//---------------------------------------------------------------------------
-static void put_handshake(struct ft1000_device *ft1000dev,USHORT handshake_value)
+static void put_handshake(struct ft1000_device *ft1000dev,u16 handshake_value)
{
- ULONG tempx;
- USHORT tempword;
- ULONG status;
+ u32 tempx;
+ u16 tempword;
+ u32 status;
- tempx = (ULONG)handshake_value;
+ tempx = (u32)handshake_value;
tempx = ntohl(tempx);
- tempword = (USHORT)(tempx & 0xffff);
+ tempword = (u16)(tempx & 0xffff);
status = ft1000_write_dpram16 (ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, tempword, 0);
- tempword = (USHORT)(tempx >> 16);
+ tempword = (u16)(tempx >> 16);
status = ft1000_write_dpram16 (ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, tempword, 1);
status = ft1000_write_register(ft1000dev, FT1000_DB_DNLD_TX, FT1000_REG_DOORBELL);
}
-static USHORT get_handshake_usb(struct ft1000_device *ft1000dev, USHORT expected_value)
+static u16 get_handshake_usb(struct ft1000_device *ft1000dev, u16 expected_value)
{
- USHORT handshake;
+ u16 handshake;
int loopcnt;
- USHORT temp;
- ULONG status=0;
+ u16 temp;
+ u32 status=0;
struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
loopcnt = 0;
@@ -300,10 +300,10 @@ static USHORT get_handshake_usb(struct ft1000_device *ft1000dev, USHORT expected
while (loopcnt < 100)
{
if (pft1000info->usbboot == 2) {
- status = ft1000_read_dpram32 (ft1000dev, 0, (PUCHAR)&(pft1000info->tempbuf[0]), 64);
+ status = ft1000_read_dpram32 (ft1000dev, 0, (u8 *)&(pft1000info->tempbuf[0]), 64);
for (temp=0; temp<16; temp++)
DEBUG("tempbuf %d = 0x%x\n", temp, pft1000info->tempbuf[temp]);
- status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (PUCHAR)&handshake, 1);
+ status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1);
DEBUG("handshake from read_dpram16 = 0x%x\n", handshake);
if (pft1000info->dspalive == pft1000info->tempbuf[6])
handshake = 0;
@@ -313,7 +313,7 @@ static USHORT get_handshake_usb(struct ft1000_device *ft1000dev, USHORT expected
}
}
else {
- status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (PUCHAR)&handshake, 1);
+ status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1);
}
loopcnt++;
msleep(10);
@@ -327,7 +327,7 @@ static USHORT get_handshake_usb(struct ft1000_device *ft1000dev, USHORT expected
return HANDSHAKE_TIMEOUT_VALUE;
}
-static void put_handshake_usb(struct ft1000_device *ft1000dev,USHORT handshake_value)
+static void put_handshake_usb(struct ft1000_device *ft1000dev,u16 handshake_value)
{
int i;
@@ -346,44 +346,44 @@ static void put_handshake_usb(struct ft1000_device *ft1000dev,USHORT handshake_v
// Notes:
//
//---------------------------------------------------------------------------
-static USHORT get_request_type(struct ft1000_device *ft1000dev)
+static u16 get_request_type(struct ft1000_device *ft1000dev)
{
- USHORT request_type;
- ULONG status;
- USHORT tempword;
- ULONG tempx;
+ u16 request_type;
+ u32 status;
+ u16 tempword;
+ u32 tempx;
struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
if ( pft1000info->bootmode == 1)
{
- status = fix_ft1000_read_dpram32 (ft1000dev, DWNLD_MAG1_TYPE_LOC, (PUCHAR)&tempx);
+ status = fix_ft1000_read_dpram32 (ft1000dev, DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
tempx = ntohl(tempx);
}
else
{
tempx = 0;
- status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_TYPE_LOC, (PUCHAR)&tempword, 1);
+ status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_TYPE_LOC, (u8 *)&tempword, 1);
tempx |= (tempword << 16);
tempx = ntohl(tempx);
}
- request_type = (USHORT)tempx;
+ request_type = (u16)tempx;
//DEBUG("get_request_type: request_type is %x\n", request_type);
return request_type;
}
-static USHORT get_request_type_usb(struct ft1000_device *ft1000dev)
+static u16 get_request_type_usb(struct ft1000_device *ft1000dev)
{
- USHORT request_type;
- ULONG status;
- USHORT tempword;
- ULONG tempx;
+ u16 request_type;
+ u32 status;
+ u16 tempword;
+ u32 tempx;
struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
if ( pft1000info->bootmode == 1)
{
- status = fix_ft1000_read_dpram32 (ft1000dev, DWNLD_MAG1_TYPE_LOC, (PUCHAR)&tempx);
+ status = fix_ft1000_read_dpram32 (ft1000dev, DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
tempx = ntohl(tempx);
}
else
@@ -394,12 +394,12 @@ static USHORT get_request_type_usb(struct ft1000_device *ft1000dev)
}
else {
tempx = 0;
- status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_TYPE_LOC, (PUCHAR)&tempword, 1);
+ status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_TYPE_LOC, (u8 *)&tempword, 1);
}
tempx |= (tempword << 16);
tempx = ntohl(tempx);
}
- request_type = (USHORT)tempx;
+ request_type = (u16)tempx;
//DEBUG("get_request_type: request_type is %x\n", request_type);
return request_type;
@@ -420,22 +420,22 @@ static USHORT get_request_type_usb(struct ft1000_device *ft1000dev)
//---------------------------------------------------------------------------
static long get_request_value(struct ft1000_device *ft1000dev)
{
- ULONG value;
- USHORT tempword;
- ULONG status;
+ u32 value;
+ u16 tempword;
+ u32 status;
struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
if ( pft1000info->bootmode == 1)
{
- status = fix_ft1000_read_dpram32(ft1000dev, DWNLD_MAG1_SIZE_LOC, (PUCHAR)&value);
+ status = fix_ft1000_read_dpram32(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&value);
value = ntohl(value);
}
else
{
- status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_SIZE_LOC, (PUCHAR)&tempword, 0);
+ status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 0);
value = tempword;
- status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_SIZE_LOC, (PUCHAR)&tempword, 1);
+ status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 1);
value |= (tempword << 16);
value = ntohl(value);
}
@@ -449,9 +449,9 @@ static long get_request_value(struct ft1000_device *ft1000dev)
#if 0
static long get_request_value_usb(struct ft1000_device *ft1000dev)
{
- ULONG value;
- USHORT tempword;
- ULONG status;
+ u32 value;
+ u16 tempword;
+ u32 status;
struct ft1000_info * pft1000info = netdev_priv(ft1000dev->net);
if (pft1000info->usbboot == 2) {
@@ -460,7 +460,7 @@ static long get_request_value_usb(struct ft1000_device *ft1000dev)
}
else {
value = 0;
- status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_SIZE_LOC, (PUCHAR)&tempword, 1);
+ status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 1);
}
value |= (tempword << 16);
@@ -490,11 +490,11 @@ static long get_request_value_usb(struct ft1000_device *ft1000dev)
//---------------------------------------------------------------------------
static void put_request_value(struct ft1000_device *ft1000dev, long lvalue)
{
- ULONG tempx;
- ULONG status;
+ u32 tempx;
+ u32 status;
tempx = ntohl(lvalue);
- status = fix_ft1000_write_dpram32(ft1000dev, DWNLD_MAG1_SIZE_LOC, (PUCHAR)&tempx);
+ status = fix_ft1000_write_dpram32(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&tempx);
@@ -516,10 +516,10 @@ static void put_request_value(struct ft1000_device *ft1000dev, long lvalue)
// Notes:
//
//---------------------------------------------------------------------------
-static USHORT hdr_checksum(struct pseudo_hdr *pHdr)
+static u16 hdr_checksum(struct pseudo_hdr *pHdr)
{
- USHORT *usPtr = (USHORT *)pHdr;
- USHORT chksum;
+ u16 *usPtr = (u16 *)pHdr;
+ u16 chksum;
chksum = ((((((usPtr[0] ^ usPtr[1]) ^ usPtr[2]) ^ usPtr[3]) ^
@@ -533,8 +533,8 @@ static USHORT hdr_checksum(struct pseudo_hdr *pHdr)
// Function: write_blk
//
// Parameters: struct ft1000_device - device structure
-// USHORT **pUsFile - DSP image file pointer in USHORT
-// UCHAR **pUcFile - DSP image file pointer in UCHAR
+// u16 **pUsFile - DSP image file pointer in u16
+// u8 **pUcFile - DSP image file pointer in u8
// long word_length - lenght of the buffer to be written
// to DPRAM
//
@@ -546,20 +546,20 @@ static USHORT hdr_checksum(struct pseudo_hdr *pHdr)
// Notes:
//
//---------------------------------------------------------------------------
-static ULONG write_blk (struct ft1000_device *ft1000dev, USHORT **pUsFile, UCHAR **pUcFile, long word_length)
+static u32 write_blk (struct ft1000_device *ft1000dev, u16 **pUsFile, u8 **pUcFile, long word_length)
{
- ULONG Status = STATUS_SUCCESS;
- USHORT dpram;
+ u32 Status = STATUS_SUCCESS;
+ u16 dpram;
long temp_word_length;
int loopcnt, i, j;
- USHORT *pTempFile;
- USHORT tempword;
- USHORT tempbuffer[64];
- USHORT resultbuffer[64];
+ u16 *pTempFile;
+ u16 tempword;
+ u16 tempbuffer[64];
+ u16 resultbuffer[64];
struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
//DEBUG("FT1000:download:start word_length = %d\n",(int)word_length);
- dpram = (USHORT)DWNLD_MAG1_PS_HDR_LOC;
+ dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
tempword = *(*pUsFile);
(*pUsFile)++;
Status = ft1000_write_dpram16(ft1000dev, dpram, tempword, 0);
@@ -569,7 +569,7 @@ static ULONG write_blk (struct ft1000_device *ft1000dev, USHORT **pUsFile, UCHAR
*pUcFile = *pUcFile + 4;
word_length--;
- tempword = (USHORT)word_length;
+ tempword = (u16)word_length;
word_length = (word_length / 16) + 1;
pTempFile = *pUsFile;
temp_word_length = word_length;
@@ -602,24 +602,24 @@ static ULONG write_blk (struct ft1000_device *ft1000dev, USHORT **pUsFile, UCHAR
if (pft1000info->bootmode == 0)
{
if (dpram >= 0x3F4)
- Status = ft1000_write_dpram32 (ft1000dev, dpram, (PUCHAR)&tempbuffer[0], 8);
+ Status = ft1000_write_dpram32 (ft1000dev, dpram, (u8 *)&tempbuffer[0], 8);
else
- Status = ft1000_write_dpram32 (ft1000dev, dpram, (PUCHAR)&tempbuffer[0], 64);
+ Status = ft1000_write_dpram32 (ft1000dev, dpram, (u8 *)&tempbuffer[0], 64);
}
else
{
for (j=0; j<10; j++)
{
- Status = ft1000_write_dpram32 (ft1000dev, dpram, (PUCHAR)&tempbuffer[0], 64);
+ Status = ft1000_write_dpram32 (ft1000dev, dpram, (u8 *)&tempbuffer[0], 64);
if (Status == STATUS_SUCCESS)
{
// Work around for ASIC bit stuffing problem.
if ( (tempbuffer[31] & 0xfe00) == 0xfe00)
{
- Status = ft1000_write_dpram32(ft1000dev, dpram+12, (PUCHAR)&tempbuffer[24], 64);
+ Status = ft1000_write_dpram32(ft1000dev, dpram+12, (u8 *)&tempbuffer[24], 64);
}
// Let's check the data written
- Status = ft1000_read_dpram32 (ft1000dev, dpram, (PUCHAR)&resultbuffer[0], 64);
+ Status = ft1000_read_dpram32 (ft1000dev, dpram, (u8 *)&resultbuffer[0], 64);
if ( (tempbuffer[31] & 0xfe00) == 0xfe00)
{
for (i=0; i<28; i++)
@@ -633,7 +633,7 @@ static ULONG write_blk (struct ft1000_device *ft1000dev, USHORT **pUsFile, UCHAR
break;
}
}
- Status = ft1000_read_dpram32 (ft1000dev, dpram+12, (PUCHAR)&resultbuffer[0], 64);
+ Status = ft1000_read_dpram32 (ft1000dev, dpram+12, (u8 *)&resultbuffer[0], 64);
for (i=0; i<16; i++)
{
if (resultbuffer[i] != tempbuffer[i+24])
@@ -689,8 +689,8 @@ static void usb_dnld_complete (struct urb *urb)
// Function: write_blk_fifo
//
// Parameters: struct ft1000_device - device structure
-// USHORT **pUsFile - DSP image file pointer in USHORT
-// UCHAR **pUcFile - DSP image file pointer in UCHAR
+// u16 **pUsFile - DSP image file pointer in u16
+// u8 **pUcFile - DSP image file pointer in u8
// long word_length - lenght of the buffer to be written
// to DPRAM
//
@@ -702,9 +702,9 @@ static void usb_dnld_complete (struct urb *urb)
// Notes:
//
//---------------------------------------------------------------------------
-static ULONG write_blk_fifo (struct ft1000_device *ft1000dev, USHORT **pUsFile, UCHAR **pUcFile, long word_length)
+static u32 write_blk_fifo (struct ft1000_device *ft1000dev, u16 **pUsFile, u8 **pUcFile, long word_length)
{
- ULONG Status = STATUS_SUCCESS;
+ u32 Status = STATUS_SUCCESS;
int byte_length;
long aligncnt;
@@ -770,36 +770,36 @@ static ULONG write_blk_fifo (struct ft1000_device *ft1000dev, USHORT **pUsFile,
// Returns: status - return code
//---------------------------------------------------------------------------
-u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG FileLength)
+u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, u32 FileLength)
{
- u16 Status = STATUS_SUCCESS;
- UINT uiState;
- USHORT handshake;
- struct pseudo_hdr *pHdr;
- USHORT usHdrLength;
+ u16 status = STATUS_SUCCESS;
+ u32 state;
+ u16 handshake;
+ struct pseudo_hdr *pseudo_header;
+ u16 pseudo_header_len;
long word_length;
- USHORT request;
- USHORT temp;
- USHORT tempword;
+ u16 request;
+ u16 temp;
+ u16 tempword;
- struct dsp_file_hdr *pFileHdr5;
- struct dsp_image_info *pDspImageInfoV6 = NULL;
+ struct dsp_file_hdr *file_hdr;
+ struct dsp_image_info *dsp_img_info = NULL;
long requested_version;
- BOOLEAN bGoodVersion;
- struct drv_msg *pMailBoxData;
- USHORT *pUsData = NULL;
- USHORT *pUsFile = NULL;
- UCHAR *pUcFile = NULL;
- UCHAR *pBootEnd = NULL, *pCodeEnd= NULL;
- int imageN;
+ bool correct_version;
+ struct drv_msg *mailbox_data;
+ u16 *data = NULL;
+ u16 *s_file = NULL;
+ u8 *c_file = NULL;
+ u8 *boot_end = NULL, *code_end= NULL;
+ int image;
long loader_code_address, loader_code_size = 0;
long run_address = 0, run_size = 0;
- ULONG templong;
- ULONG image_chksum = 0;
+ u32 templong;
+ u32 image_chksum = 0;
- USHORT dpram = 0;
- PUCHAR pbuffer;
+ u16 dpram = 0;
+ u8 *pbuffer;
struct prov_record *pprov_record;
struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
@@ -814,24 +814,24 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG FileLe
// Get version id of file, at first 4 bytes of file, for newer files.
//
- uiState = STATE_START_DWNLD;
+ state = STATE_START_DWNLD;
- pFileHdr5 = (struct dsp_file_hdr *)pFileStart;
+ file_hdr = (struct dsp_file_hdr *)pFileStart;
ft1000_write_register (ft1000dev, 0x800, FT1000_REG_MAG_WATERMARK);
- pUsFile = (USHORT *)(pFileStart + pFileHdr5->loader_offset);
- pUcFile = (UCHAR *)(pFileStart + pFileHdr5->loader_offset);
+ s_file = (u16 *)(pFileStart + file_hdr->loader_offset);
+ c_file = (u8 *)(pFileStart + file_hdr->loader_offset);
- pBootEnd = (UCHAR *)(pFileStart + pFileHdr5->loader_code_end);
+ boot_end = (u8 *)(pFileStart + file_hdr->loader_code_end);
- loader_code_address = pFileHdr5->loader_code_address;
- loader_code_size = pFileHdr5->loader_code_size;
- bGoodVersion = FALSE;
+ loader_code_address = file_hdr->loader_code_address;
+ loader_code_size = file_hdr->loader_code_size;
+ correct_version = FALSE;
- while ((Status == STATUS_SUCCESS) && (uiState != STATE_DONE_FILE))
+ while ((status == STATUS_SUCCESS) && (state != STATE_DONE_FILE))
{
- switch (uiState)
+ switch (state)
{
case STATE_START_DWNLD:
DEBUG("FT1000:STATE_START_DWNLD\n");
@@ -848,10 +848,10 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG FileLe
else
{
DEBUG("FT1000:download:Download error: Handshake failed\n");
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
}
- uiState = STATE_BOOT_DWNLD;
+ state = STATE_BOOT_DWNLD;
break;
@@ -878,11 +878,11 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG FileLe
case REQUEST_DONE_BL:
DEBUG("FT1000:REQUEST_DONE_BL\n");
/* Reposition ptrs to beginning of code section */
- pUsFile = (USHORT *)(pBootEnd);
- pUcFile = (UCHAR *)(pBootEnd);
- //DEBUG("FT1000:download:pUsFile = 0x%8x\n", (int)pUsFile);
- //DEBUG("FT1000:download:pUcFile = 0x%8x\n", (int)pUcFile);
- uiState = STATE_CODE_DWNLD;
+ s_file = (u16 *)(boot_end);
+ c_file = (u8 *)(boot_end);
+ //DEBUG("FT1000:download:s_file = 0x%8x\n", (int)s_file);
+ //DEBUG("FT1000:download:c_file = 0x%8x\n", (int)c_file);
+ state = STATE_CODE_DWNLD;
pft1000info->fcodeldr = 1;
break;
case REQUEST_CODE_SEGMENT:
@@ -893,33 +893,33 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG FileLe
if (word_length > MAX_LENGTH)
{
DEBUG("FT1000:download:Download error: Max length exceeded\n");
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
break;
}
- if ( (word_length*2 + pUcFile) > pBootEnd)
+ if ( (word_length*2 + c_file) > boot_end)
{
/*
* Error, beyond boot code range.
*/
DEBUG("FT1000:download:Download error: Requested len=%d exceeds BOOT code boundry.\n",
(int)word_length);
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
break;
}
/*
* Position ASIC DPRAM auto-increment pointer.
*/
- dpram = (USHORT)DWNLD_MAG1_PS_HDR_LOC;
+ dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
if (word_length & 0x1)
word_length++;
word_length = word_length / 2;
- Status = write_blk(ft1000dev, &pUsFile, &pUcFile, word_length);
- //DEBUG("write_blk returned %d\n", Status);
+ status = write_blk(ft1000dev, &s_file, &c_file, word_length);
+ //DEBUG("write_blk returned %d\n", status);
break;
default:
DEBUG("FT1000:download:Download error: Bad request type=%d in BOOT download state.\n",request);
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
break;
}
if (pft1000info->usbboot)
@@ -930,7 +930,7 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG FileLe
else
{
DEBUG("FT1000:download:Download error: Handshake failed\n");
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
}
break;
@@ -959,7 +959,7 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG FileLe
break;
case REQUEST_RUN_ADDRESS:
DEBUG("FT1000:download: REQUEST_RUN_ADDRESS\n");
- if (bGoodVersion)
+ if (correct_version)
{
DEBUG("FT1000:download:run_address = 0x%8x\n", (int)run_address);
put_request_value(ft1000dev, run_address);
@@ -967,13 +967,13 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG FileLe
else
{
DEBUG("FT1000:download:Download error: Got Run address request before image offset request.\n");
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
break;
}
break;
case REQUEST_CODE_LENGTH:
DEBUG("FT1000:download:REQUEST_CODE_LENGTH\n");
- if (bGoodVersion)
+ if (correct_version)
{
DEBUG("FT1000:download:run_size = 0x%8x\n", (int)run_size);
put_request_value(ft1000dev, run_size);
@@ -981,23 +981,23 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG FileLe
else
{
DEBUG("FT1000:download:Download error: Got Size request before image offset request.\n");
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
break;
}
break;
case REQUEST_DONE_CL:
pft1000info->usbboot = 3;
/* Reposition ptrs to beginning of provisioning section */
- pUsFile = (USHORT *)(pFileStart + pFileHdr5->commands_offset);
- pUcFile = (UCHAR *)(pFileStart + pFileHdr5->commands_offset);
- uiState = STATE_DONE_DWNLD;
+ s_file = (u16 *)(pFileStart + file_hdr->commands_offset);
+ c_file = (u8 *)(pFileStart + file_hdr->commands_offset);
+ state = STATE_DONE_DWNLD;
break;
case REQUEST_CODE_SEGMENT:
//DEBUG("FT1000:download: REQUEST_CODE_SEGMENT - CODELOADER\n");
- if (!bGoodVersion)
+ if (!correct_version)
{
DEBUG("FT1000:download:Download error: Got Code Segment request before image offset request.\n");
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
break;
}
#if 0
@@ -1011,28 +1011,28 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG FileLe
#endif
{
DEBUG("FT1000:download:Download error: Max length exceeded\n");
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
break;
}
- if ( (word_length*2 + pUcFile) > pCodeEnd)
+ if ( (word_length*2 + c_file) > code_end)
{
/*
* Error, beyond boot code range.
*/
DEBUG("FT1000:download:Download error: Requested len=%d exceeds DSP code boundry.\n",
(int)word_length);
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
break;
}
/*
* Position ASIC DPRAM auto-increment pointer.
*/
- dpram = (USHORT)DWNLD_MAG1_PS_HDR_LOC;
+ dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
if (word_length & 0x1)
word_length++;
word_length = word_length / 2;
- write_blk_fifo (ft1000dev, &pUsFile, &pUcFile, word_length);
+ write_blk_fifo (ft1000dev, &s_file, &c_file, word_length);
if (pft1000info->usbboot == 0)
pft1000info->usbboot++;
if (pft1000info->usbboot == 1) {
@@ -1047,14 +1047,14 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG FileLe
// Convert length from byte count to word count. Make sure we round up.
word_length = (long)(pft1000info->DSPInfoBlklen + 1)/2;
put_request_value(ft1000dev, word_length);
- pMailBoxData = (struct drv_msg *)&(pft1000info->DSPInfoBlk[0]);
+ mailbox_data = (struct drv_msg *)&(pft1000info->DSPInfoBlk[0]);
/*
* Position ASIC DPRAM auto-increment pointer.
*/
- pUsData = (USHORT *)&pMailBoxData->data[0];
- dpram = (USHORT)DWNLD_MAG1_PS_HDR_LOC;
+ data = (u16 *)&mailbox_data->data[0];
+ dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
if (word_length & 0x1)
word_length++;
@@ -1064,25 +1064,25 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG FileLe
for (; word_length > 0; word_length--) /* In words */
{
- templong = *pUsData++;
- templong |= (*pUsData++ << 16);
- Status = fix_ft1000_write_dpram32 (ft1000dev, dpram++, (PUCHAR)&templong);
+ templong = *data++;
+ templong |= (*data++ << 16);
+ status = fix_ft1000_write_dpram32 (ft1000dev, dpram++, (u8 *)&templong);
}
break;
case REQUEST_VERSION_INFO:
DEBUG("FT1000:download:REQUEST_VERSION_INFO\n");
- word_length = pFileHdr5->version_data_size;
+ word_length = file_hdr->version_data_size;
put_request_value(ft1000dev, word_length);
/*
* Position ASIC DPRAM auto-increment pointer.
*/
- pUsFile = (USHORT *)(pFileStart + pFileHdr5->version_data_offset);
+ s_file = (u16 *)(pFileStart + file_hdr->version_data_offset);
- dpram = (USHORT)DWNLD_MAG1_PS_HDR_LOC;
+ dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
if (word_length & 0x1)
word_length++;
@@ -1092,59 +1092,59 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG FileLe
for (; word_length > 0; word_length--) /* In words */
{
- templong = ntohs(*pUsFile++);
- temp = ntohs(*pUsFile++);
+ templong = ntohs(*s_file++);
+ temp = ntohs(*s_file++);
templong |= (temp << 16);
- Status = fix_ft1000_write_dpram32 (ft1000dev, dpram++, (PUCHAR)&templong);
+ status = fix_ft1000_write_dpram32 (ft1000dev, dpram++, (u8 *)&templong);
}
break;
case REQUEST_CODE_BY_VERSION:
DEBUG("FT1000:download:REQUEST_CODE_BY_VERSION\n");
- bGoodVersion = FALSE;
+ correct_version = FALSE;
requested_version = get_request_value(ft1000dev);
- pDspImageInfoV6 = (struct dsp_image_info *)(pFileStart + sizeof(struct dsp_file_hdr ));
+ dsp_img_info = (struct dsp_image_info *)(pFileStart + sizeof(struct dsp_file_hdr ));
- for (imageN = 0; imageN < pFileHdr5->nDspImages; imageN++)
+ for (image = 0; image < file_hdr->nDspImages; image++)
{
- temp = (USHORT)(pDspImageInfoV6->version);
+ temp = (u16)(dsp_img_info->version);
templong = temp;
- temp = (USHORT)(pDspImageInfoV6->version >> 16);
+ temp = (u16)(dsp_img_info->version >> 16);
templong |= (temp << 16);
- if (templong == (ULONG)requested_version)
+ if (templong == (u32)requested_version)
{
- bGoodVersion = TRUE;
- DEBUG("FT1000:download: bGoodVersion is TRUE\n");
- pUsFile = (USHORT *)(pFileStart + pDspImageInfoV6->begin_offset);
- pUcFile = (UCHAR *)(pFileStart + pDspImageInfoV6->begin_offset);
- pCodeEnd = (UCHAR *)(pFileStart + pDspImageInfoV6->end_offset);
- run_address = pDspImageInfoV6->run_address;
- run_size = pDspImageInfoV6->image_size;
- image_chksum = (ULONG)pDspImageInfoV6->checksum;
+ correct_version = TRUE;
+ DEBUG("FT1000:download: correct_version is TRUE\n");
+ s_file = (u16 *)(pFileStart + dsp_img_info->begin_offset);
+ c_file = (u8 *)(pFileStart + dsp_img_info->begin_offset);
+ code_end = (u8 *)(pFileStart + dsp_img_info->end_offset);
+ run_address = dsp_img_info->run_address;
+ run_size = dsp_img_info->image_size;
+ image_chksum = (u32)dsp_img_info->checksum;
break;
}
- pDspImageInfoV6++;
+ dsp_img_info++;
} //end of for
- if (!bGoodVersion)
+ if (!correct_version)
{
/*
* Error, beyond boot code range.
*/
DEBUG("FT1000:download:Download error: Bad Version Request = 0x%x.\n",(int)requested_version);
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
break;
}
break;
default:
DEBUG("FT1000:download:Download error: Bad request type=%d in CODE download state.\n",request);
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
break;
}
if (pft1000info->usbboot)
@@ -1155,94 +1155,94 @@ u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG FileLe
else
{
DEBUG("FT1000:download:Download error: Handshake failed\n");
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
}
break;
case STATE_DONE_DWNLD:
DEBUG("FT1000:download:Code loader is done...\n");
- uiState = STATE_SECTION_PROV;
+ state = STATE_SECTION_PROV;
break;
case STATE_SECTION_PROV:
DEBUG("FT1000:download:STATE_SECTION_PROV\n");
- pHdr = (struct pseudo_hdr *)pUcFile;
+ pseudo_header = (struct pseudo_hdr *)c_file;
- if (pHdr->checksum == hdr_checksum(pHdr))
+ if (pseudo_header->checksum == hdr_checksum(pseudo_header))
{
- if (pHdr->portdest != 0x80 /* Dsp OAM */)
+ if (pseudo_header->portdest != 0x80 /* Dsp OAM */)
{
- uiState = STATE_DONE_PROV;
+ state = STATE_DONE_PROV;
break;
}
- usHdrLength = ntohs(pHdr->length); /* Byte length for PROV records */
+ pseudo_header_len = ntohs(pseudo_header->length); /* Byte length for PROV records */
// Get buffer for provisioning data
- pbuffer = kmalloc((usHdrLength + sizeof(struct pseudo_hdr)), GFP_ATOMIC);
+ pbuffer = kmalloc((pseudo_header_len + sizeof(struct pseudo_hdr)), GFP_ATOMIC);
if (pbuffer) {
- memcpy(pbuffer, (void *)pUcFile, (UINT)(usHdrLength + sizeof(struct pseudo_hdr)));
+ memcpy(pbuffer, (void *)c_file, (u32)(pseudo_header_len + sizeof(struct pseudo_hdr)));
// link provisioning data
pprov_record = kmalloc(sizeof(struct prov_record), GFP_ATOMIC);
if (pprov_record) {
pprov_record->pprov_data = pbuffer;
list_add_tail (&pprov_record->list, &pft1000info->prov_list);
// Move to next entry if available
- pUcFile = (UCHAR *)((unsigned long)pUcFile + (UINT)((usHdrLength + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr));
- if ( (unsigned long)(pUcFile) - (unsigned long)(pFileStart) >= (unsigned long)FileLength) {
- uiState = STATE_DONE_FILE;
+ c_file = (u8 *)((unsigned long)c_file + (u32)((pseudo_header_len + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr));
+ if ( (unsigned long)(c_file) - (unsigned long)(pFileStart) >= (unsigned long)FileLength) {
+ state = STATE_DONE_FILE;
}
}
else {
kfree(pbuffer);
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
}
}
else {
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
}
}
else
{
/* Checksum did not compute */
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
}
- DEBUG("ft1000:download: after STATE_SECTION_PROV, uiState = %d, Status= %d\n", uiState, Status);
+ DEBUG("ft1000:download: after STATE_SECTION_PROV, state = %d, status= %d\n", state, status);
break;
case STATE_DONE_PROV:
DEBUG("FT1000:download:STATE_DONE_PROV\n");
- uiState = STATE_DONE_FILE;
+ state = STATE_DONE_FILE;
break;
default:
- Status = STATUS_FAILURE;
+ status = STATUS_FAILURE;
break;
} /* End Switch */
- if (Status != STATUS_SUCCESS) {
+ if (status != STATUS_SUCCESS) {
break;
}
/****
// Check if Card is present
- Status = Harley_Read_Register(&temp, FT1000_REG_SUP_IMASK);
- if ( (Status != NDIS_STATUS_SUCCESS) || (temp == 0x0000) ) {
+ status = Harley_Read_Register(&temp, FT1000_REG_SUP_IMASK);
+ if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0x0000) ) {
break;
}
- Status = Harley_Read_Register(&temp, FT1000_REG_ASIC_ID);
- if ( (Status != NDIS_STATUS_SUCCESS) || (temp == 0xffff) ) {
+ status = Harley_Read_Register(&temp, FT1000_REG_ASIC_ID);
+ if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0xffff) ) {
break;
}
****/
} /* End while */
- DEBUG("Download exiting with status = 0x%8x\n", Status);
+ DEBUG("Download exiting with status = 0x%8x\n", status);
ft1000_write_register(ft1000dev, FT1000_DB_DNLD_TX, FT1000_REG_DOORBELL);
- return Status;
+ return status;
}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index 5b89ee2a2971..643a63794ade 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -41,37 +41,9 @@ static int ft1000_chkcard (struct ft1000_device *dev);
//Jim
static u8 tempbuffer[1600];
-static unsigned long gCardIndex;
#define MAX_RCV_LOOP 100
-/****************************************************************
- * ft1000_control_complete
- ****************************************************************/
-static void ft1000_control_complete(struct urb *urb)
-{
- struct ft1000_device *ft1000dev = (struct ft1000_device *)urb->context;
-
- //DEBUG("FT1000_CONTROL_COMPLETE ENTERED\n");
- if (ft1000dev == NULL )
- {
- DEBUG("NULL ft1000dev, failure\n");
- return ;
- }
- else if ( ft1000dev->dev == NULL )
- {
- DEBUG("NULL ft1000dev->dev, failure\n");
- return ;
- }
-
- if(waitqueue_active(&ft1000dev->control_wait))
- {
- wake_up(&ft1000dev->control_wait);
- }
-
- //DEBUG("FT1000_CONTROL_COMPLETE RETURNED\n");
-}
-
//---------------------------------------------------------------------------
// Function: ft1000_control
//
@@ -187,7 +159,7 @@ u16 ft1000_read_register(struct ft1000_device *ft1000dev, u16* Data, u16 nRegInd
// Notes:
//
//---------------------------------------------------------------------------
-u16 ft1000_write_register(struct ft1000_device *ft1000dev, USHORT value, u16 nRegIndx)
+u16 ft1000_write_register(struct ft1000_device *ft1000dev, u16 value, u16 nRegIndx)
{
u16 ret = STATUS_SUCCESS;
@@ -223,7 +195,7 @@ u16 ft1000_write_register(struct ft1000_device *ft1000dev, USHORT value, u16 nRe
//
//---------------------------------------------------------------------------
-u16 ft1000_read_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer, USHORT cnt)
+u16 ft1000_read_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer, u16 cnt)
{
u16 ret = STATUS_SUCCESS;
@@ -262,7 +234,7 @@ u16 ft1000_read_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buf
// Notes:
//
//---------------------------------------------------------------------------
-u16 ft1000_write_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer, USHORT cnt)
+u16 ft1000_write_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer, u16 cnt)
{
u16 ret = STATUS_SUCCESS;
@@ -299,7 +271,7 @@ u16 ft1000_write_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR bu
// Notes:
//
//---------------------------------------------------------------------------
-u16 ft1000_read_dpram16(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer, u8 highlow)
+u16 ft1000_read_dpram16(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer, u8 highlow)
{
u16 ret = STATUS_SUCCESS;
@@ -347,7 +319,7 @@ u16 ft1000_read_dpram16(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buf
// Notes:
//
//---------------------------------------------------------------------------
-u16 ft1000_write_dpram16(struct ft1000_device *ft1000dev, USHORT indx, USHORT value, u8 highlow)
+u16 ft1000_write_dpram16(struct ft1000_device *ft1000dev, u16 indx, u16 value, u8 highlow)
{
u16 ret = STATUS_SUCCESS;
@@ -392,10 +364,10 @@ u16 ft1000_write_dpram16(struct ft1000_device *ft1000dev, USHORT indx, USHORT va
// Notes:
//
//---------------------------------------------------------------------------
-u16 fix_ft1000_read_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer)
+u16 fix_ft1000_read_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer)
{
- UCHAR buf[16];
- USHORT pos;
+ u8 buf[16];
+ u16 pos;
u16 ret = STATUS_SUCCESS;
//DEBUG("fix_ft1000_read_dpram32: indx: %d \n", indx);
@@ -441,14 +413,14 @@ u16 fix_ft1000_read_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR
// Notes:
//
//---------------------------------------------------------------------------
-u16 fix_ft1000_write_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer)
+u16 fix_ft1000_write_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer)
{
- USHORT pos1;
- USHORT pos2;
- USHORT i;
- UCHAR buf[32];
- UCHAR resultbuffer[32];
- PUCHAR pdata;
+ u16 pos1;
+ u16 pos2;
+ u16 i;
+ u8 buf[32];
+ u8 resultbuffer[32];
+ u8 *pdata;
u16 ret = STATUS_SUCCESS;
//DEBUG("fix_ft1000_write_dpram32: Entered:\n");
@@ -472,7 +444,7 @@ u16 fix_ft1000_write_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHA
return ret;
}
- ret = ft1000_read_dpram32(ft1000dev, pos1, (PUCHAR)&resultbuffer[0], 16);
+ ret = ft1000_read_dpram32(ft1000dev, pos1, (u8 *)&resultbuffer[0], 16);
if (ret == STATUS_SUCCESS)
{
buffer = pdata;
@@ -487,8 +459,8 @@ u16 fix_ft1000_write_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHA
if (ret == STATUS_FAILURE)
{
- ret = ft1000_write_dpram32(ft1000dev, pos1, (PUCHAR)&tempbuffer[0], 16);
- ret = ft1000_read_dpram32(ft1000dev, pos1, (PUCHAR)&resultbuffer[0], 16);
+ ret = ft1000_write_dpram32(ft1000dev, pos1, (u8 *)&tempbuffer[0], 16);
+ ret = ft1000_read_dpram32(ft1000dev, pos1, (u8 *)&resultbuffer[0], 16);
if (ret == STATUS_SUCCESS)
{
buffer = pdata;
@@ -518,10 +490,10 @@ u16 fix_ft1000_write_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHA
//
// Returns: None
//-----------------------------------------------------------------------
-static void card_reset_dsp (struct ft1000_device *ft1000dev, BOOLEAN value)
+static void card_reset_dsp (struct ft1000_device *ft1000dev, bool value)
{
u16 status = STATUS_SUCCESS;
- USHORT tempword;
+ u16 tempword;
status = ft1000_write_register (ft1000dev, HOST_INTF_BE, FT1000_REG_SUP_CTRL);
status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_SUP_CTRL);
@@ -620,8 +592,8 @@ void CardSendCommand(struct ft1000_device *ft1000dev, void *ptempbuffer, int siz
int dsp_reload(struct ft1000_device *ft1000dev)
{
u16 status;
- USHORT tempword;
- ULONG templong;
+ u16 tempword;
+ u32 templong;
struct ft1000_info *pft1000info;
@@ -648,7 +620,7 @@ int dsp_reload(struct ft1000_device *ft1000dev)
status = ft1000_write_register (ft1000dev, HOST_INTF_BE, FT1000_REG_SUP_CTRL);
// Let's check for FEFE
- status = ft1000_read_dpram32 (ft1000dev, FT1000_MAG_DPRAM_FEFE_INDX, (PUCHAR)&templong, 4);
+ status = ft1000_read_dpram32 (ft1000dev, FT1000_MAG_DPRAM_FEFE_INDX, (u8 *)&templong, 4);
DEBUG("templong (fefe) = 0x%8x\n", templong);
// call codeloader
@@ -753,7 +725,7 @@ static int ft1000_reset_card (struct net_device *dev)
// Initialize DSP heartbeat area to ho
ft1000_write_dpram16(ft1000dev, FT1000_MAG_HI_HO, ho_mag, FT1000_MAG_HI_HO_INDX);
- ft1000_read_dpram16(ft1000dev, FT1000_MAG_HI_HO, (PCHAR)&tempword, FT1000_MAG_HI_HO_INDX);
+ ft1000_read_dpram16(ft1000dev, FT1000_MAG_HI_HO, (u8 *)&tempword, FT1000_MAG_HI_HO_INDX);
DEBUG("ft1000_hw:ft1000_reset_card:hi_ho value = 0x%x\n", tempword);
@@ -800,8 +772,7 @@ u16 init_ft1000_netdev(struct ft1000_device *ft1000dev)
int i, ret_val;
struct list_head *cur, *tmp;
char card_nr[2];
-
- gCardIndex=0; //mbelian
+ unsigned long gCardIndex = 0;
DEBUG("Enter init_ft1000_netdev...\n");
@@ -813,7 +784,7 @@ u16 init_ft1000_netdev(struct ft1000_device *ft1000dev)
return -ENOMEM;
}
- pInfo = (struct ft1000_info *) netdev_priv(netdev);
+ pInfo = netdev_priv(netdev);
//DEBUG("init_ft1000_netdev: gFt1000Info=%x, netdev=%x, ft1000dev=%x\n", gFt1000Info, netdev, ft1000dev);
@@ -821,9 +792,6 @@ u16 init_ft1000_netdev(struct ft1000_device *ft1000dev)
dev_alloc_name(netdev, netdev->name);
- //for the first inserted card, decide the card index beginning number, in case there are existing network interfaces
- if ( gCardIndex == 0 )
- {
DEBUG("init_ft1000_netdev: network device name is %s\n", netdev->name);
if ( strncmp(netdev->name,"eth", 3) == 0) {
@@ -843,13 +811,6 @@ u16 init_ft1000_netdev(struct ft1000_device *ft1000dev)
ret_val = -ENXIO;
goto err_net;
}
- }
- else
- {
- //not the first inserted card, increase card number by 1
- pInfo->CardNumber = gCardIndex;
- /*DEBUG("card number = %d\n", pInfo->CardNumber);*/ //mbelian
- }
memset(&pInfo->stats, 0, sizeof(struct net_device_stats) );
@@ -862,7 +823,6 @@ u16 init_ft1000_netdev(struct ft1000_device *ft1000dev)
pInfo->mediastate = 0;
pInfo->fifo_cnt = 0;
pInfo->DeviceCreated = FALSE;
- pInfo->DeviceMajor = 0;
pInfo->CurrentInterruptEnableMask = ISR_DEFAULT_MASK;
pInfo->InterruptsEnabled = FALSE;
pInfo->CardReady = 0;
@@ -874,13 +834,11 @@ u16 init_ft1000_netdev(struct ft1000_device *ft1000dev)
pInfo->fCondResetPend = 0;
pInfo->usbboot = 0;
pInfo->dspalive = 0;
- for (i=0;i<32 ;i++ )
- {
- pInfo->tempbuf[i] = 0;
- }
+ memset(&pInfo->tempbuf[0], 0, sizeof(pInfo->tempbuf));
INIT_LIST_HEAD(&pInfo->prov_list);
+ INIT_LIST_HEAD(&pInfo->nodes.list);
//mbelian
#ifdef HAVE_NET_DEVICE_OPS
netdev->netdev_ops = &ftnet_ops;
@@ -982,7 +940,7 @@ int reg_ft1000_netdev(struct ft1000_device *ft1000dev, struct usb_interface *int
//Create character device, implemented by Jim
- ft1000_CreateDevice(ft1000dev);
+ ft1000_create_dev(ft1000dev);
DEBUG ("reg_ft1000_netdev returned\n");
@@ -1026,178 +984,6 @@ static void ft1000_usb_transmit_complete(struct urb *urb)
//DEBUG("Return from ft1000_usb_transmit_complete\n");
}
-
-/****************************************************************
- * ft1000_control
- ****************************************************************/
-static int ft1000_read_fifo_reg(struct ft1000_device *ft1000dev,unsigned int pipe,
- u8 request,
- u8 requesttype,
- u16 value,
- u16 index,
- void *data,
- u16 size,
- int timeout)
-{
- u16 ret;
-
- DECLARE_WAITQUEUE(wait, current);
- struct urb *urb;
- struct usb_ctrlrequest *dr;
- int status;
-
- if (ft1000dev == NULL )
- {
- DEBUG("NULL ft1000dev, failure\n");
- return STATUS_FAILURE;
- }
- else if ( ft1000dev->dev == NULL )
- {
- DEBUG("NULL ft1000dev->dev, failure\n");
- return STATUS_FAILURE;
- }
-
- spin_lock(&ft1000dev->device_lock);
-
- if(in_interrupt())
- {
- spin_unlock(&ft1000dev->device_lock);
- return -EBUSY;
- }
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- dr = kmalloc(sizeof(struct usb_ctrlrequest), in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
-
- if(!urb || !dr)
- {
- kfree(dr);
- usb_free_urb(urb);
- spin_unlock(&ft1000dev->device_lock);
- return -ENOMEM;
- }
-
-
-
- dr->bRequestType = requesttype;
- dr->bRequest = request;
- dr->wValue = value;
- dr->wIndex = index;
- dr->wLength = size;
-
- usb_fill_control_urb(urb, ft1000dev->dev, pipe, (char*)dr, (void*)data, size, (void *)ft1000_control_complete, (void*)ft1000dev);
-
-
- init_waitqueue_head(&ft1000dev->control_wait);
-
- set_current_state(TASK_INTERRUPTIBLE);
-
- add_wait_queue(&ft1000dev->control_wait, &wait);
-
-
-
-
- status = usb_submit_urb(urb, GFP_KERNEL);
-
- if(status)
- {
- usb_free_urb(urb);
- kfree(dr);
- remove_wait_queue(&ft1000dev->control_wait, &wait);
- spin_unlock(&ft1000dev->device_lock);
- return status;
- }
-
- if(urb->status == -EINPROGRESS)
- {
- while(timeout && urb->status == -EINPROGRESS)
- {
- status = timeout = schedule_timeout(timeout);
- }
- }
- else
- {
- status = 1;
- }
-
- remove_wait_queue(&ft1000dev->control_wait, &wait);
-
- if(!status)
- {
- usb_unlink_urb(urb);
- printk("ft1000 timeout\n");
- status = -ETIMEDOUT;
- }
- else
- {
- status = urb->status;
-
- if(urb->status)
- {
- printk("ft1000 control message failed (urb addr: %p) with error number: %i\n", urb, (int)status);
-
- usb_clear_halt(ft1000dev->dev, usb_rcvctrlpipe(ft1000dev->dev, 0));
- usb_clear_halt(ft1000dev->dev, usb_sndctrlpipe(ft1000dev->dev, 0));
- usb_unlink_urb(urb);
- }
- }
-
-
-
- usb_free_urb(urb);
- kfree(dr);
- spin_unlock(&ft1000dev->device_lock);
- return ret;
-
-
-}
-
-//---------------------------------------------------------------------------
-// Function: ft1000_read_fifo_len
-//
-// Parameters: ft1000dev - device structure
-//
-//
-// Returns: none
-//
-// Description: read the fifo length register content
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-static inline u16 ft1000_read_fifo_len (struct net_device *dev)
-{
- u16 temp;
- u16 ret;
-
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
- struct ft1000_device *ft1000dev = info->pFt1000Dev;
-// DEBUG("ft1000_read_fifo_len: enter ft1000dev %x\n", ft1000dev); //aelias [-] reason: warning: format ???%x??? expects type ???unsigned int???, but argument 2 has type ???struct ft1000_device *???
- DEBUG("ft1000_read_fifo_len: enter ft1000dev %p\n", ft1000dev); //aelias [+] reason: up
-
- ret = STATUS_SUCCESS;
-
- ret = ft1000_read_fifo_reg(ft1000dev,
- usb_rcvctrlpipe(ft1000dev->dev,0),
- HARLEY_READ_REGISTER,
- HARLEY_READ_OPERATION,
- 0,
- FT1000_REG_MAG_UFSR,
- &temp,
- 2,
- LARGE_TIMEOUT);
-
- if (ret>0)
- ret = STATUS_SUCCESS;
- else
- ret = STATUS_FAILURE;
-
- DEBUG("ft1000_read_fifo_len: returned %d\n", temp);
-
- return (temp- 16);
-
-}
-
-
//---------------------------------------------------------------------------
//
// Function: ft1000_copy_down_pkt
@@ -1219,16 +1005,15 @@ static int ft1000_copy_down_pkt (struct net_device *netdev, u8 *packet, u16 len)
struct ft1000_device *pFt1000Dev = pInfo->pFt1000Dev;
- int i, count, ret;
- USHORT *pTemp;
- USHORT checksum;
+ int count, ret;
u8 *t;
+ struct pseudo_hdr hdr;
if (!pInfo->CardReady)
{
DEBUG("ft1000_copy_down_pkt::Card Not Ready\n");
- return STATUS_FAILURE;
+ return -ENODEV;
}
@@ -1240,27 +1025,27 @@ static int ft1000_copy_down_pkt (struct net_device *netdev, u8 *packet, u16 len)
{
DEBUG("Error:ft1000_copy_down_pkt:Message Size Overflow!\n");
DEBUG("size = %d\n", count);
- return STATUS_FAILURE;
+ return -EINVAL;
}
if ( count % 4)
count = count + (4- (count %4) );
- pTemp = (PUSHORT)&(pFt1000Dev->tx_buf[0]);
- *pTemp ++ = ntohs(count);
- *pTemp ++ = 0x1020;
- *pTemp ++ = 0x2010;
- *pTemp ++ = 0x9100;
- *pTemp ++ = 0;
- *pTemp ++ = 0;
- *pTemp ++ = 0;
- pTemp = (PUSHORT)&(pFt1000Dev->tx_buf[0]);
- checksum = *pTemp ++;
- for (i=1; i<7; i++)
- {
- checksum ^= *pTemp ++;
- }
- *pTemp++ = checksum;
+ memset(&hdr, 0, sizeof(struct pseudo_hdr));
+
+ hdr.length = ntohs(count);
+ hdr.source = 0x10;
+ hdr.destination = 0x20;
+ hdr.portdest = 0x20;
+ hdr.portsrc = 0x10;
+ hdr.sh_str_id = 0x91;
+ hdr.control = 0x00;
+
+ hdr.checksum = hdr.length ^ hdr.source ^ hdr.destination ^
+ hdr.portdest ^ hdr.portsrc ^ hdr.sh_str_id ^
+ hdr.control;
+
+ memcpy(&pFt1000Dev->tx_buf[0], &hdr, sizeof(hdr));
memcpy(&(pFt1000Dev->tx_buf[sizeof(struct pseudo_hdr)]), packet, len);
netif_stop_queue(netdev);
@@ -1283,25 +1068,18 @@ static int ft1000_copy_down_pkt (struct net_device *netdev, u8 *packet, u16 len)
}*/
- ret = usb_submit_urb(pFt1000Dev->tx_urb, GFP_ATOMIC);
- if(ret)
- {
+ ret = usb_submit_urb(pFt1000Dev->tx_urb, GFP_ATOMIC);
+ if (ret) {
DEBUG("ft1000 failed tx_urb %d\n", ret);
-
- return STATUS_FAILURE;
-
- }
- else
- {
- //DEBUG("ft1000 sucess tx_urb %d\n", ret);
-
- pInfo->stats.tx_packets++;
- pInfo->stats.tx_bytes += (len+14);
- }
+ return ret;
+ } else {
+ pInfo->stats.tx_packets++;
+ pInfo->stats.tx_bytes += (len+14);
+ }
//DEBUG("ft1000_copy_down_pkt() exit\n");
- return STATUS_SUCCESS;
+ return 0;
}
//---------------------------------------------------------------------------
@@ -1331,14 +1109,13 @@ static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
if ( skb == NULL )
{
DEBUG ("ft1000_hw: ft1000_start_xmit:skb == NULL!!!\n" );
- return STATUS_FAILURE;
+ return NETDEV_TX_OK;
}
if ( pFt1000Dev->status & FT1000_STATUS_CLOSING)
{
DEBUG("network driver is closed, return\n");
- dev_kfree_skb(skb);
- return STATUS_SUCCESS;
+ goto err;
}
//DEBUG("ft1000_start_xmit 1:length of packet = %d\n", skb->len);
@@ -1357,28 +1134,24 @@ static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
/* Drop packet is mediastate is down */
DEBUG("ft1000_hw:ft1000_start_xmit:mediastate is down\n");
- dev_kfree_skb(skb);
- return STATUS_SUCCESS;
+ goto err;
}
if ( (skb->len < ENET_HEADER_SIZE) || (skb->len > ENET_MAX_SIZE) )
{
/* Drop packet which has invalid size */
DEBUG("ft1000_hw:ft1000_start_xmit:invalid ethernet length\n");
- dev_kfree_skb(skb);
- return STATUS_SUCCESS;
+ goto err;
}
//mbelian
- if(ft1000_copy_down_pkt (dev, (pdata+ENET_HEADER_SIZE-2), skb->len - ENET_HEADER_SIZE + 2) == STATUS_FAILURE)
- {
- dev_kfree_skb(skb);
- return STATUS_SUCCESS;
- }
+ ft1000_copy_down_pkt(dev, (pdata+ENET_HEADER_SIZE-2),
+ skb->len - ENET_HEADER_SIZE + 2);
- dev_kfree_skb(skb);
+err:
+ dev_kfree_skb(skb);
//DEBUG(" ft1000_start_xmit() exit\n");
- return 0;
+ return NETDEV_TX_OK;
}
//---------------------------------------------------------------------------
@@ -1424,7 +1197,7 @@ static int ft1000_copy_up_pkt (struct urb *urb)
//DEBUG("ft1000_copy_up_pkt: transfer_buffer_length=%d, actual_buffer_len=%d\n",
// urb->transfer_buffer_length, urb->actual_length);
- chksum = (PUSHORT)ft1000dev->rx_buf;
+ chksum = (u16 *)ft1000dev->rx_buf;
tempword = *chksum++;
for (i=1; i<7; i++)
@@ -1521,7 +1294,7 @@ static int ft1000_submit_rx_urb(struct ft1000_info *info)
{
DEBUG("network driver is closed, return\n");
//usb_kill_urb(pFt1000Dev->rx_urb); //mbelian
- return STATUS_SUCCESS;
+ return -ENODEV;
}
usb_fill_bulk_urb(pFt1000Dev->rx_urb,
@@ -1536,12 +1309,12 @@ static int ft1000_submit_rx_urb(struct ft1000_info *info)
if((result = usb_submit_urb(pFt1000Dev->rx_urb, GFP_ATOMIC)))
{
printk("ft1000_submit_rx_urb: submitting rx_urb %d failed\n", result);
- return STATUS_FAILURE;
+ return result;
}
//DEBUG("ft1000_submit_rx_urb exit: result=%d\n", result);
- return STATUS_SUCCESS;
+ return 0;
}
//---------------------------------------------------------------------------
@@ -1560,8 +1333,9 @@ static int ft1000_submit_rx_urb(struct ft1000_info *info)
//---------------------------------------------------------------------------
static int ft1000_open (struct net_device *dev)
{
- struct ft1000_info *pInfo = (struct ft1000_info *)netdev_priv(dev);
+ struct ft1000_info *pInfo = netdev_priv(dev);
struct timeval tv; //mbelian
+ int ret;
DEBUG("ft1000_open is called for card %d\n", pInfo->CardNumber);
//DEBUG("ft1000_open: dev->addr=%x, dev->addr_len=%d\n", dev->addr, dev->addr_len);
@@ -1579,8 +1353,9 @@ static int ft1000_open (struct net_device *dev)
netif_carrier_on(dev); //mbelian
- ft1000_submit_rx_urb(pInfo);
- return 0;
+ ret = ft1000_submit_rx_urb(pInfo);
+
+ return ret;
}
//---------------------------------------------------------------------------
@@ -1599,7 +1374,7 @@ static int ft1000_open (struct net_device *dev)
//---------------------------------------------------------------------------
int ft1000_close(struct net_device *net)
{
- struct ft1000_info *pInfo = (struct ft1000_info *) netdev_priv(net);
+ struct ft1000_info *pInfo = netdev_priv(net);
struct ft1000_device *ft1000dev = pInfo->pFt1000Dev;
//DEBUG ("ft1000_close: netdev->refcnt=%d\n", net->refcnt);
@@ -1622,7 +1397,7 @@ int ft1000_close(struct net_device *net)
static struct net_device_stats *ft1000_netdev_stats(struct net_device *dev)
{
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
return &(info->stats); //mbelian
}
@@ -1648,7 +1423,7 @@ Jim
static int ft1000_chkcard (struct ft1000_device *dev) {
u16 tempword;
u16 status;
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev->net);
+ struct ft1000_info *info = netdev_priv(dev->net);
if (info->fCondResetPend)
{
@@ -1692,13 +1467,13 @@ static int ft1000_chkcard (struct ft1000_device *dev) {
// = 1 (successful)
//
//---------------------------------------------------------------------------
-static BOOLEAN ft1000_receive_cmd (struct ft1000_device *dev, u16 *pbuffer, int maxsz, u16 *pnxtph) {
+static bool ft1000_receive_cmd (struct ft1000_device *dev, u16 *pbuffer, int maxsz, u16 *pnxtph) {
u16 size, ret;
u16 *ppseudohdr;
int i;
u16 tempword;
- ret = ft1000_read_dpram16(dev, FT1000_MAG_PH_LEN, (PUCHAR)&size, FT1000_MAG_PH_LEN_INDX);
+ ret = ft1000_read_dpram16(dev, FT1000_MAG_PH_LEN, (u8 *)&size, FT1000_MAG_PH_LEN_INDX);
size = ntohs(size) + PSEUDOSZ;
if (size > maxsz) {
DEBUG("FT1000:ft1000_receive_cmd:Invalid command length = %d\n", size);
@@ -1748,15 +1523,15 @@ static BOOLEAN ft1000_receive_cmd (struct ft1000_device *dev, u16 *pbuffer, int
static int ft1000_dsp_prov(void *arg)
{
struct ft1000_device *dev = (struct ft1000_device *)arg;
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev->net);
+ struct ft1000_info *info = netdev_priv(dev->net);
u16 tempword;
u16 len;
u16 i=0;
struct prov_record *ptr;
struct pseudo_hdr *ppseudo_hdr;
- PUSHORT pmsg;
+ u16 *pmsg;
u16 status;
- USHORT TempShortBuf [256];
+ u16 TempShortBuf [256];
DEBUG("*** DspProv Entered\n");
@@ -1792,7 +1567,7 @@ static int ft1000_dsp_prov(void *arg)
len = htons(len);
len += PSEUDOSZ;
- pmsg = (PUSHORT)ptr->pprov_data;
+ pmsg = (u16 *)ptr->pprov_data;
ppseudo_hdr = (struct pseudo_hdr *)pmsg;
// Insert slow queue sequence number
ppseudo_hdr->seq_num = info->squeseqnum++;
@@ -1809,7 +1584,7 @@ static int ft1000_dsp_prov(void *arg)
TempShortBuf[1] = htons (len);
memcpy(&TempShortBuf[2], ppseudo_hdr, len);
- status = ft1000_write_dpram32 (dev, 0, (PUCHAR)&TempShortBuf[0], (unsigned short)(len+2));
+ status = ft1000_write_dpram32 (dev, 0, (u8 *)&TempShortBuf[0], (unsigned short)(len+2));
status = ft1000_write_register (dev, FT1000_DB_DPRAM_TX, FT1000_REG_DOORBELL);
list_del(&ptr->list);
@@ -1831,7 +1606,7 @@ static int ft1000_dsp_prov(void *arg)
static int ft1000_proc_drvmsg (struct ft1000_device *dev, u16 size) {
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev->net);
+ struct ft1000_info *info = netdev_priv(dev->net);
u16 msgtype;
u16 tempword;
struct media_msg *pmediamsg;
@@ -1839,7 +1614,7 @@ static int ft1000_proc_drvmsg (struct ft1000_device *dev, u16 size) {
struct drv_msg *pdrvmsg;
u16 i;
struct pseudo_hdr *ppseudo_hdr;
- PUSHORT pmsg;
+ u16 *pmsg;
u16 status;
union {
u8 byte[2];
@@ -1971,7 +1746,7 @@ static int ft1000_proc_drvmsg (struct ft1000_device *dev, u16 size) {
tempword = ntohs(pdrvmsg->length);
info->DSPInfoBlklen = tempword;
if (tempword < (MAX_DSP_SESS_REC-4) ) {
- pmsg = (PUSHORT)&pdrvmsg->data[0];
+ pmsg = (u16 *)&pdrvmsg->data[0];
for (i=0; i<((tempword+1)/2); i++) {
DEBUG("FT1000:drivermsg:dsp info data = 0x%x\n", *pmsg);
info->DSPInfoBlk[i+10] = *pmsg++;
@@ -2003,10 +1778,10 @@ static int ft1000_proc_drvmsg (struct ft1000_device *dev, u16 size) {
// Put message into Slow Queue
// Form Pseudo header
- pmsg = (PUSHORT)info->DSPInfoBlk;
+ pmsg = (u16 *)info->DSPInfoBlk;
*pmsg++ = 0;
*pmsg++ = htons(info->DSPInfoBlklen+20+info->DSPInfoBlklen);
- ppseudo_hdr = (struct pseudo_hdr *)(PUSHORT)&info->DSPInfoBlk[2];
+ ppseudo_hdr = (struct pseudo_hdr *)(u16 *)&info->DSPInfoBlk[2];
ppseudo_hdr->length = htons(info->DSPInfoBlklen+4+info->DSPInfoBlklen);
ppseudo_hdr->source = 0x10;
ppseudo_hdr->destination = 0x20;
@@ -2028,7 +1803,7 @@ static int ft1000_proc_drvmsg (struct ft1000_device *dev, u16 size) {
}
info->DSPInfoBlk[10] = 0x7200;
info->DSPInfoBlk[11] = htons(info->DSPInfoBlklen);
- status = ft1000_write_dpram32 (dev, 0, (PUCHAR)&info->DSPInfoBlk[0], (unsigned short)(info->DSPInfoBlklen+22));
+ status = ft1000_write_dpram32 (dev, 0, (u8 *)&info->DSPInfoBlk[0], (unsigned short)(info->DSPInfoBlklen+22));
status = ft1000_write_register (dev, FT1000_DB_DPRAM_TX, FT1000_REG_DOORBELL);
info->DrvMsgPend = 0;
@@ -2053,7 +1828,7 @@ static int ft1000_proc_drvmsg (struct ft1000_device *dev, u16 size) {
if ( (tempword & FT1000_DB_DPRAM_TX) == 0) {
// Put message into Slow Queue
// Form Pseudo header
- pmsg = (PUSHORT)&tempbuffer[0];
+ pmsg = (u16 *)&tempbuffer[0];
ppseudo_hdr = (struct pseudo_hdr *)pmsg;
ppseudo_hdr->length = htons(0x0012);
ppseudo_hdr->source = 0x10;
@@ -2074,7 +1849,7 @@ static int ft1000_proc_drvmsg (struct ft1000_device *dev, u16 size) {
for (i=1; i<7; i++) {
ppseudo_hdr->checksum ^= *pmsg++;
}
- pmsg = (PUSHORT)&tempbuffer[16];
+ pmsg = (u16 *)&tempbuffer[16];
*pmsg++ = htons(RSP_DRV_ERR_RPT_MSG);
*pmsg++ = htons(0x000e);
*pmsg++ = htons(info->DSP_TIME[0]);
@@ -2089,7 +1864,7 @@ static int ft1000_proc_drvmsg (struct ft1000_device *dev, u16 size) {
*pmsg++ = convert.wrd;
*pmsg++ = htons(info->DrvErrNum);
- CardSendCommand (dev, (unsigned char*)&tempbuffer[0], (USHORT)(0x0012 + PSEUDOSZ));
+ CardSendCommand (dev, (unsigned char*)&tempbuffer[0], (u16)(0x0012 + PSEUDOSZ));
info->DrvErrNum = 0;
}
info->DrvMsgPend = 0;
@@ -2114,15 +1889,15 @@ out:
int ft1000_poll(void* dev_id) {
struct ft1000_device *dev = (struct ft1000_device *)dev_id;
- struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev->net);
+ struct ft1000_info *info = netdev_priv(dev->net);
u16 tempword;
u16 status;
u16 size;
int i;
- USHORT data;
- USHORT modulo;
- USHORT portid;
+ u16 data;
+ u16 modulo;
+ u16 portid;
u16 nxtph;
struct dpram_blk *pdpram_blk;
struct pseudo_hdr *ppseudo_hdr;
@@ -2143,14 +1918,14 @@ int ft1000_poll(void* dev_id) {
if (tempword & FT1000_DB_DPRAM_RX) {
//DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_DPRAM_RX\n");
- status = ft1000_read_dpram16(dev, 0x200, (PUCHAR)&data, 0);
+ status = ft1000_read_dpram16(dev, 0x200, (u8 *)&data, 0);
//DEBUG("ft1000_poll:FT1000_DB_DPRAM_RX:ft1000_read_dpram16:size = 0x%x\n", data);
size = ntohs(data) + 16 + 2; //wai
if (size % 4) {
modulo = 4 - (size % 4);
size = size + modulo;
}
- status = ft1000_read_dpram16(dev, 0x201, (PUCHAR)&portid, 1);
+ status = ft1000_read_dpram16(dev, 0x201, (u8 *)&portid, 1);
portid &= 0xff;
//DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_DPRAM_RX : portid 0x%x\n", portid);
@@ -2285,7 +2060,7 @@ int ft1000_poll(void* dev_id) {
status = ft1000_write_register (dev, FT1000_ASIC_RESET_REQ, FT1000_REG_DOORBELL);
status = ft1000_write_register (dev, HOST_INTF_BE, FT1000_REG_SUP_CTRL);
// copy dsp session record from Adapter block
- status = ft1000_write_dpram32 (dev, 0, (PUCHAR)&info->DSPSess.Rec[0], 1024);
+ status = ft1000_write_dpram32 (dev, 0, (u8 *)&info->DSPSess.Rec[0], 1024);
// Program WMARK register
status = ft1000_write_register (dev, 0x600, FT1000_REG_MAG_WATERMARK);
// ring doorbell to tell DSP that ASIC is out of reset
@@ -2299,10 +2074,10 @@ int ft1000_poll(void* dev_id) {
if (info->fAppMsgPend == 0) {
// Reset ASIC and DSP
- status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER0, (PUCHAR)&(info->DSP_TIME[0]), FT1000_MAG_DSP_TIMER0_INDX);
- status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER1, (PUCHAR)&(info->DSP_TIME[1]), FT1000_MAG_DSP_TIMER1_INDX);
- status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER2, (PUCHAR)&(info->DSP_TIME[2]), FT1000_MAG_DSP_TIMER2_INDX);
- status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER3, (PUCHAR)&(info->DSP_TIME[3]), FT1000_MAG_DSP_TIMER3_INDX);
+ status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER0, (u8 *)&(info->DSP_TIME[0]), FT1000_MAG_DSP_TIMER0_INDX);
+ status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER1, (u8 *)&(info->DSP_TIME[1]), FT1000_MAG_DSP_TIMER1_INDX);
+ status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER2, (u8 *)&(info->DSP_TIME[2]), FT1000_MAG_DSP_TIMER2_INDX);
+ status = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER3, (u8 *)&(info->DSP_TIME[3]), FT1000_MAG_DSP_TIMER3_INDX);
info->CardReady = 0;
info->DrvErrNum = DSP_CONDRESET_INFO;
DEBUG("ft1000_hw:DSP conditional reset requested\n");
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.h b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.h
index c58074131014..ab9312f9f326 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.h
@@ -4,7 +4,7 @@
#include "ft1000_usb.h"
-extern u16 ft1000_read_register(struct usb_device *dev, PUSHORT Data, u8 nRegIndx);
-extern u16 ft1000_write_register(struct usb_device *dev, USHORT value, u8 nRegIndx);
+extern u16 ft1000_read_register(struct usb_device *dev, u16 *Data, u8 nRegIndx);
+extern u16 ft1000_write_register(struct usb_device *dev, u16 value, u8 nRegIndx);
#endif
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
index 36cdd588fa98..b87542abbe86 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
@@ -1,3 +1,24 @@
+/*
+ * ft1000_proc.c - ft1000 proc interface
+ *
+ * Copyright (C) 2009-2010 Quintec
+ * (C) 2010 Open-nandra
+ * <marek.belisko@open-nandra.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
@@ -14,219 +35,217 @@
#define PUTX_TO_PAGE(len,page,message,size,var) \
len += snprintf(page+len, PAGE_SIZE - len, message); \
- for(i = 0; i < (size - 1); i++) \
- { \
+ for (i = 0; i < (size - 1); i++) {\
len += snprintf(page+len, PAGE_SIZE - len, "%02x:", var[i]); \
} \
len += snprintf(page+len, PAGE_SIZE - len, "%02x\n", var[i])
#define PUTD_TO_PAGE(len,page,message,size,var) \
len += snprintf(page+len, PAGE_SIZE - len, message); \
- for(i = 0; i < (size - 1); i++) \
- { \
+ for (i = 0; i < (size - 1); i++) {\
len += snprintf(page+len, PAGE_SIZE - len, "%d.", var[i]); \
} \
len += snprintf(page+len, PAGE_SIZE - len, "%d\n", var[i])
-
-
-//#ifdef INIT_NET_NS
#define FTNET_PROC init_net.proc_net
-//#else
-//#define FTNET_PROC proc_net
-//#endif
-u16 ft1000_read_dpram16 (struct ft1000_device *ft1000dev, USHORT indx,
- PUCHAR buffer, u8 highlow);
+u16 ft1000_read_dpram16 (struct ft1000_device *ft1000dev, u16 indx,
+ u8 *buffer, u8 highlow);
static int
-ft1000ReadProc (char *page, char **start, off_t off, int count, int *eof,
+ft1000ReadProc(char *page, char **start, off_t off, int count, int *eof,
void *data)
{
- struct net_device *dev;
- int len;
- int i;
- unsigned short ledStat;
- unsigned short conStat;
+ struct net_device *dev;
+ int len;
+ int i;
+ unsigned short ledStat;
+ unsigned short conStat;
struct ft1000_info *info;
- char *status[] = { "Idle (Disconnect)", "Searching", "Active (Connected)",
- "Waiting for L2", "Sleep", "No Coverage", "", ""
- };
-
- char *signal[] = { "", "*", "**", "***", "****" };
- int strength;
- int quality;
- struct timeval tv;
- time_t delta;
-
- dev = (struct net_device *) data;
- info = (struct ft1000_info *) netdev_priv(dev);
-
- if (off > 0)
- {
- *eof = 1;
- return 0;
- }
-
-
- if (info->ProgConStat != 0xFF)
- {
- ft1000_read_dpram16 (info->pFt1000Dev, FT1000_MAG_DSP_LED,
- (PUCHAR) & ledStat, FT1000_MAG_DSP_LED_INDX);
- info->LedStat = ntohs (ledStat);
-
- ft1000_read_dpram16 (info->pFt1000Dev, FT1000_MAG_DSP_CON_STATE,
- (PUCHAR) & conStat, FT1000_MAG_DSP_CON_STATE_INDX);
- info->ConStat = ntohs (conStat);
- do_gettimeofday (&tv);
- delta = (tv.tv_sec - info->ConTm);
- }
- else
- {
- info->ConStat = 0xf;
- delta = 0;
- }
-
-
-
- i = (info->LedStat) & 0xf;
- switch (i)
- {
- case 0x1:
- strength = 1;
- break;
- case 0x3:
- strength = 2;
- break;
- case 0x7:
- strength = 3;
- break;
- case 0xf:
- strength = 4;
- break;
- default:
- strength = 0;
- }
-
- i = (info->LedStat >> 8) & 0xf;
- switch (i)
- {
- case 0x1:
- quality = 1;
- break;
- case 0x3:
- quality = 2;
- break;
- case 0x7:
- quality = 3;
- break;
- case 0xf:
- quality = 4;
- break;
- default:
- quality = 0;
- }
-
-
- len = 0;
- PUTM_TO_PAGE (len, page, "Connection Time: %02ld:%02ld:%02ld\n",
- ((delta / 3600) % 24), ((delta / 60) % 60), (delta % 60));
- PUTM_TO_PAGE (len, page, "Connection Time[s]: %ld\n", delta);
- PUTM_TO_PAGE (len, page, "Asic ID: %s\n",
- (info->AsicID) ==
- ELECTRABUZZ_ID ? "ELECTRABUZZ ASIC" : "MAGNEMITE ASIC");
- PUTX_TO_PAGE (len, page, "SKU: ", SKUSZ, info->Sku);
- PUTX_TO_PAGE (len, page, "EUI64: ", EUISZ, info->eui64);
- PUTD_TO_PAGE (len, page, "DSP version number: ", DSPVERSZ, info->DspVer);
- PUTX_TO_PAGE (len, page, "Hardware Serial Number: ", HWSERNUMSZ,
- info->HwSerNum);
- PUTX_TO_PAGE (len, page, "Caliberation Version: ", CALVERSZ,
- info->RfCalVer);
- PUTD_TO_PAGE (len, page, "Caliberation Date: ", CALDATESZ, info->RfCalDate);
- PUTM_TO_PAGE (len, page, "Media State: %s\n",
- (info->mediastate) ? "link" : "no link");
- PUTM_TO_PAGE (len, page, "Connection Status: %s\n",
- status[((info->ConStat) & 0x7)]);
- PUTM_TO_PAGE (len, page, "RX packets: %ld\n", info->stats.rx_packets);
- PUTM_TO_PAGE (len, page, "TX packets: %ld\n", info->stats.tx_packets);
- PUTM_TO_PAGE (len, page, "RX bytes: %ld\n", info->stats.rx_bytes);
- PUTM_TO_PAGE (len, page, "TX bytes: %ld\n", info->stats.tx_bytes);
- PUTM_TO_PAGE (len, page, "Signal Strength: %s\n", signal[strength]);
- PUTM_TO_PAGE (len, page, "Signal Quality: %s\n", signal[quality]);
-
-
-
-
- return len;
+ char *status[] = {
+ "Idle (Disconnect)",
+ "Searching",
+ "Active (Connected)",
+ "Waiting for L2",
+ "Sleep",
+ "No Coverage",
+ "",
+ "",
+ };
+
+ char *signal[] = { "", "*", "**", "***", "****" };
+ int strength;
+ int quality;
+ struct timeval tv;
+ time_t delta;
+
+ dev = (struct net_device *) data;
+ info = netdev_priv(dev);
+
+ if (off > 0) {
+ *eof = 1;
+ return 0;
+ }
+
+
+ if (info->ProgConStat != 0xFF) {
+ ft1000_read_dpram16(info->pFt1000Dev, FT1000_MAG_DSP_LED,
+ (u8 *)&ledStat, FT1000_MAG_DSP_LED_INDX);
+ info->LedStat = ntohs(ledStat);
+
+ ft1000_read_dpram16(info->pFt1000Dev, FT1000_MAG_DSP_CON_STATE,
+ (u8 *)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
+ info->ConStat = ntohs(conStat);
+ do_gettimeofday(&tv);
+ delta = (tv.tv_sec - info->ConTm);
+ } else {
+ info->ConStat = 0xf;
+ delta = 0;
+ }
+
+ i = (info->LedStat) & 0xf;
+ switch (i) {
+ case 0x1:
+ strength = 1;
+ break;
+ case 0x3:
+ strength = 2;
+ break;
+ case 0x7:
+ strength = 3;
+ break;
+ case 0xf:
+ strength = 4;
+ break;
+ default:
+ strength = 0;
+ }
+
+ i = (info->LedStat >> 8) & 0xf;
+ switch (i) {
+ case 0x1:
+ quality = 1;
+ break;
+ case 0x3:
+ quality = 2;
+ break;
+ case 0x7:
+ quality = 3;
+ break;
+ case 0xf:
+ quality = 4;
+ break;
+ default:
+ quality = 0;
+ }
+
+ len = 0;
+ PUTM_TO_PAGE(len, page, "Connection Time: %02ld:%02ld:%02ld\n",
+ ((delta / 3600) % 24), ((delta / 60) % 60), (delta % 60));
+ PUTM_TO_PAGE(len, page, "Connection Time[s]: %ld\n", delta);
+ PUTM_TO_PAGE(len, page, "Asic ID: %s\n",
+ (info->AsicID) ==
+ ELECTRABUZZ_ID ? "ELECTRABUZZ ASIC" : "MAGNEMITE ASIC");
+ PUTX_TO_PAGE(len, page, "SKU: ", SKUSZ, info->Sku);
+ PUTX_TO_PAGE(len, page, "EUI64: ", EUISZ, info->eui64);
+ PUTD_TO_PAGE(len, page, "DSP version number: ", DSPVERSZ, info->DspVer);
+ PUTX_TO_PAGE(len, page, "Hardware Serial Number: ", HWSERNUMSZ,
+ info->HwSerNum);
+ PUTX_TO_PAGE(len, page, "Caliberation Version: ", CALVERSZ,
+ info->RfCalVer);
+ PUTD_TO_PAGE(len, page, "Caliberation Date: ", CALDATESZ,
+ info->RfCalDate);
+ PUTM_TO_PAGE(len, page, "Media State: %s\n",
+ (info->mediastate) ? "link" : "no link");
+ PUTM_TO_PAGE(len, page, "Connection Status: %s\n",
+ status[((info->ConStat) & 0x7)]);
+ PUTM_TO_PAGE(len, page, "RX packets: %ld\n", info->stats.rx_packets);
+ PUTM_TO_PAGE(len, page, "TX packets: %ld\n", info->stats.tx_packets);
+ PUTM_TO_PAGE(len, page, "RX bytes: %ld\n", info->stats.rx_bytes);
+ PUTM_TO_PAGE(len, page, "TX bytes: %ld\n", info->stats.tx_bytes);
+ PUTM_TO_PAGE(len, page, "Signal Strength: %s\n", signal[strength]);
+ PUTM_TO_PAGE(len, page, "Signal Quality: %s\n", signal[quality]);
+
+ return len;
}
static int
-ft1000NotifyProc (struct notifier_block *this, unsigned long event, void *ptr)
+ft1000NotifyProc(struct notifier_block *this, unsigned long event, void *ptr)
{
- struct net_device *dev = ptr;
+ struct net_device *dev = ptr;
struct ft1000_info *info;
- struct proc_dir_entry *ft1000_proc_file;
-
-info = (struct ft1000_info *) netdev_priv(dev);
-
-
- switch (event)
- {
- case NETDEV_CHANGENAME:
- remove_proc_entry (info->netdevname, info->ft1000_proc_dir);
- ft1000_proc_file = create_proc_read_entry (dev->name, 0644,
- info->ft1000_proc_dir,
- ft1000ReadProc, dev);
- snprintf (info->netdevname, IFNAMSIZ, "%s", dev->name);
- break;
- }
- return NOTIFY_DONE;
+ struct proc_dir_entry *ft1000_proc_file;
+
+ info = netdev_priv(dev);
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ remove_proc_entry(info->netdevname, info->ft1000_proc_dir);
+ ft1000_proc_file = create_proc_read_entry(dev->name, 0644,
+ info->ft1000_proc_dir,
+ ft1000ReadProc, dev);
+ snprintf(info->netdevname, IFNAMSIZ, "%s", dev->name);
+ break;
+ }
+
+ return NOTIFY_DONE;
}
static struct notifier_block ft1000_netdev_notifier = {
- .notifier_call = ft1000NotifyProc
+ .notifier_call = ft1000NotifyProc,
};
-void
-ft1000InitProc (struct net_device *dev)
+int ft1000_init_proc(struct net_device *dev)
{
struct ft1000_info *info;
- struct proc_dir_entry *ft1000_proc_file;
- info = (struct ft1000_info *) netdev_priv(dev);
-
-
- info->ft1000_proc_dir = proc_mkdir (FT1000_PROC_DIR, FTNET_PROC);
- if (info->ft1000_proc_dir == NULL)
- {
- remove_proc_entry (FT1000_PROC_DIR, FTNET_PROC);
- }
-
-
- ft1000_proc_file =
- create_proc_read_entry (dev->name, 0644, info->ft1000_proc_dir,
- ft1000ReadProc, dev);
- if (ft1000_proc_file == NULL)
- {
- remove_proc_entry (info->netdevname, info->ft1000_proc_dir);
- }
-
- snprintf (info->netdevname, IFNAMSIZ, "%s", dev->name);
- register_netdevice_notifier (&ft1000_netdev_notifier);
- return;
+ struct proc_dir_entry *ft1000_proc_file;
+ int ret = 0;
+
+ info = netdev_priv(dev);
+
+ info->ft1000_proc_dir = proc_mkdir(FT1000_PROC_DIR, FTNET_PROC);
+ if (info->ft1000_proc_dir == NULL) {
+ printk(KERN_WARNING "Unable to create %s dir.\n",
+ FT1000_PROC_DIR);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ft1000_proc_file =
+ create_proc_read_entry(dev->name, 0644,
+ info->ft1000_proc_dir, ft1000ReadProc, dev);
+
+ if (ft1000_proc_file == NULL) {
+ printk(KERN_WARNING "Unable to create /proc entry.\n");
+ ret = -EINVAL;
+ goto fail_entry;
+ }
+
+ snprintf(info->netdevname, IFNAMSIZ, "%s", dev->name);
+
+ ret = register_netdevice_notifier(&ft1000_netdev_notifier);
+ if (ret)
+ goto fail_notif;
+
+ return 0;
+
+fail_notif:
+ remove_proc_entry(info->netdevname, info->ft1000_proc_dir);
+fail_entry:
+ remove_proc_entry(FT1000_PROC_DIR, FTNET_PROC);
+fail:
+ return ret;
}
-void
-ft1000CleanupProc(struct ft1000_info *info)
+void ft1000_cleanup_proc(struct ft1000_info *info)
{
- remove_proc_entry (info->netdevname, info->ft1000_proc_dir);
- remove_proc_entry (FT1000_PROC_DIR, FTNET_PROC);
- unregister_netdevice_notifier (&ft1000_netdev_notifier);
-
- return;
+ remove_proc_entry(info->netdevname, info->ft1000_proc_dir);
+ remove_proc_entry(FT1000_PROC_DIR, FTNET_PROC);
+ unregister_netdevice_notifier(&ft1000_netdev_notifier);
}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
index 28f55b2030e9..79482ac1c489 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
@@ -36,7 +36,7 @@ static struct usb_device_id id_table[] = {
MODULE_DEVICE_TABLE(usb, id_table);
-static BOOLEAN gPollingfailed = FALSE;
+static bool gPollingfailed = FALSE;
int ft1000_poll_thread(void *arg)
{
int ret = STATUS_SUCCESS;
@@ -64,7 +64,7 @@ static int ft1000_probe(struct usb_interface *interface,
int i, ret = 0, size;
struct ft1000_device *ft1000dev;
- struct ft1000_info *pft1000info;
+ struct ft1000_info *pft1000info = NULL;
const struct firmware *dsp_fw;
ft1000dev = kmalloc(sizeof(struct ft1000_device), GFP_KERNEL);
@@ -84,7 +84,6 @@ static int ft1000_probe(struct usb_interface *interface,
ft1000dev->dev = dev;
ft1000dev->status = 0;
ft1000dev->net = NULL;
- spin_lock_init(&ft1000dev->device_lock);
ft1000dev->tx_urb = usb_alloc_urb(0, GFP_ATOMIC);
ft1000dev->rx_urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -164,7 +163,7 @@ static int ft1000_probe(struct usb_interface *interface,
if (ret)
goto err_load;
- pft1000info = (struct ft1000_info *) netdev_priv(ft1000dev->net);
+ pft1000info = netdev_priv(ft1000dev->net);
DEBUG("In probe: pft1000info=%p\n", pft1000info);
ret = dsp_reload(ft1000dev);
@@ -176,14 +175,18 @@ static int ft1000_probe(struct usb_interface *interface,
gPollingfailed = FALSE;
pft1000info->pPollThread =
kthread_run(ft1000_poll_thread, ft1000dev, "ft1000_poll");
+
+ if (IS_ERR(pft1000info->pPollThread)) {
+ ret = PTR_ERR(pft1000info->pPollThread);
+ goto err_load;
+ }
+
msleep(500);
while (!pft1000info->CardReady) {
if (gPollingfailed) {
- if (pft1000info->pPollThread)
- kthread_stop(pft1000info->pPollThread);
ret = -EIO;
- goto err_load;
+ goto err_thread;
}
msleep(100);
DEBUG("ft1000_probe::Waiting for Card Ready\n");
@@ -193,14 +196,21 @@ static int ft1000_probe(struct usb_interface *interface,
ret = reg_ft1000_netdev(ft1000dev, interface);
if (ret)
- goto err_load;
+ goto err_thread;
- pft1000info->NetDevRegDone = 1;
+ ret = ft1000_init_proc(ft1000dev->net);
+ if (ret)
+ goto err_proc;
- ft1000InitProc(ft1000dev->net);
+ pft1000info->NetDevRegDone = 1;
return 0;
+err_proc:
+ unregister_netdev(ft1000dev->net);
+ free_netdev(ft1000dev->net);
+err_thread:
+ kthread_stop(pft1000info->pPollThread);
err_load:
kfree(pFileStart);
err_fw:
@@ -218,7 +228,7 @@ static void ft1000_disconnect(struct usb_interface *interface)
DEBUG("In disconnect pft1000info=%p\n", pft1000info);
if (pft1000info) {
- ft1000CleanupProc(pft1000info);
+ ft1000_cleanup_proc(pft1000info);
if (pft1000info->pPollThread)
kthread_stop(pft1000info->pPollThread);
@@ -226,7 +236,7 @@ static void ft1000_disconnect(struct usb_interface *interface)
if (pft1000info->pFt1000Dev->net) {
DEBUG("ft1000_disconnect: destroy char driver\n");
- ft1000_DestroyDevice(pft1000info->pFt1000Dev->net);
+ ft1000_destroy_dev(pft1000info->pFt1000Dev->net);
unregister_netdev(pft1000info->pFt1000Dev->net);
DEBUG
("ft1000_disconnect: network device unregisterd\n");
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
index a9d419a98a06..a143e9ca4f08 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
@@ -98,16 +98,6 @@ struct prov_record {
/*end of Jim*/
#define DEBUG(args...) printk(KERN_INFO args)
-#define UCHAR u8
-#define USHORT u16
-#define ULONG u32 /* WTF ??? */
-#define BOOLEAN u8
-#define PULONG u32 *
-#define PUSHORT u16 *
-#define PUCHAR u8 *
-#define PCHAR u8 *
-#define UINT u32
-
#define FALSE 0
#define TRUE 1
@@ -372,15 +362,15 @@ struct prov_record {
-#define ISR_EMPTY (UCHAR)0x00 // no bits set in ISR
+#define ISR_EMPTY (u8)0x00 // no bits set in ISR
-#define ISR_DOORBELL_ACK (UCHAR)0x01 // the doorbell i sent has been recieved.
+#define ISR_DOORBELL_ACK (u8)0x01 // the doorbell i sent has been recieved.
-#define ISR_DOORBELL_PEND (UCHAR)0x02 // doorbell for me
+#define ISR_DOORBELL_PEND (u8)0x02 // doorbell for me
-#define ISR_RCV (UCHAR)0x04 // packet received with no errors
+#define ISR_RCV (u8)0x04 // packet received with no errors
-#define ISR_WATERMARK (UCHAR)0x08 //
+#define ISR_WATERMARK (u8)0x08 //
@@ -466,12 +456,9 @@ struct ft1000_device
{
struct usb_device *dev;
struct net_device *net;
- spinlock_t device_lock;
u32 status;
- wait_queue_head_t control_wait;
-
struct urb *rx_urb;
struct urb *tx_urb;
@@ -486,6 +473,13 @@ struct ft1000_device
// struct net_device_stats stats; //mbelian
} __attribute__ ((packed));
+struct ft1000_debug_dirs {
+ struct list_head list;
+ struct dentry *dent;
+ struct dentry *file;
+ int int_number;
+};
+
struct ft1000_info {
struct ft1000_device *pFt1000Dev;
struct net_device_stats stats;
@@ -497,9 +491,9 @@ struct ft1000_info {
unsigned char usbboot;
unsigned short dspalive;
u16 ASIC_ID;
- BOOLEAN fProvComplete;
- BOOLEAN fCondResetPend;
- BOOLEAN fAppMsgPend;
+ bool fProvComplete;
+ bool fCondResetPend;
+ bool fAppMsgPend;
char *pfwimg;
int fwimgsz;
u16 DrvErrNum;
@@ -520,7 +514,7 @@ struct ft1000_info {
int NetDevRegDone;
u8 CardNumber;
u8 DeviceName[15];
- int DeviceMajor;
+ struct ft1000_debug_dirs nodes;
int registered;
int mediastate;
int dhcpflg;
@@ -567,26 +561,26 @@ struct dpram_blk {
} __attribute__ ((packed));
u16 ft1000_read_register(struct ft1000_device *ft1000dev, u16* Data, u16 nRegIndx);
-u16 ft1000_write_register(struct ft1000_device *ft1000dev, USHORT value, u16 nRegIndx);
-u16 ft1000_read_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer, USHORT cnt);
-u16 ft1000_write_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer, USHORT cnt);
-u16 ft1000_read_dpram16(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer, u8 highlow);
-u16 ft1000_write_dpram16(struct ft1000_device *ft1000dev, USHORT indx, USHORT value, u8 highlow);
-u16 fix_ft1000_read_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer);
-u16 fix_ft1000_write_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer);
+u16 ft1000_write_register(struct ft1000_device *ft1000dev, u16 value, u16 nRegIndx);
+u16 ft1000_read_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer, u16 cnt);
+u16 ft1000_write_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer, u16 cnt);
+u16 ft1000_read_dpram16(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer, u8 highlow);
+u16 ft1000_write_dpram16(struct ft1000_device *ft1000dev, u16 indx, u16 value, u8 highlow);
+u16 fix_ft1000_read_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer);
+u16 fix_ft1000_write_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer);
extern void *pFileStart;
extern size_t FileLength;
extern int numofmsgbuf;
int ft1000_close (struct net_device *dev);
-u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG FileLength);
+u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, u32 FileLength);
extern struct list_head freercvpool;
extern spinlock_t free_buff_lock; // lock to arbitrate free buffer list for receive command data
-int ft1000_CreateDevice(struct ft1000_device *dev);
-void ft1000_DestroyDevice(struct net_device *dev);
+int ft1000_create_dev(struct ft1000_device *dev);
+void ft1000_destroy_dev(struct net_device *dev);
extern void CardSendCommand(struct ft1000_device *ft1000dev, void *ptempbuffer, int size);
struct dpram_blk *ft1000_get_buffer(struct list_head *bufflist);
@@ -600,8 +594,8 @@ struct usb_interface;
int reg_ft1000_netdev(struct ft1000_device *ft1000dev, struct usb_interface *intf);
int ft1000_poll(void* dev_id);
-void ft1000InitProc(struct net_device *dev);
-void ft1000CleanupProc(struct ft1000_info *info);
+int ft1000_init_proc(struct net_device *dev);
+void ft1000_cleanup_proc(struct ft1000_info *info);
diff --git a/drivers/staging/go7007/go7007-driver.c b/drivers/staging/go7007/go7007-driver.c
index 48d4e483d8a4..6c9279a6d606 100644
--- a/drivers/staging/go7007/go7007-driver.c
+++ b/drivers/staging/go7007/go7007-driver.c
@@ -624,7 +624,7 @@ struct go7007 *go7007_alloc(struct go7007_board_info *board, struct device *dev)
go->dvd_mode = 0;
go->interlace_coding = 0;
for (i = 0; i < 4; ++i)
- go->modet[i].enable = 0;;
+ go->modet[i].enable = 0;
for (i = 0; i < 1624; ++i)
go->modet_map[i] = 0;
go->audio_deliver = NULL;
diff --git a/drivers/staging/hv/Makefile b/drivers/staging/hv/Makefile
index b46349bb43bf..acd39bd75b1c 100644
--- a/drivers/staging/hv/Makefile
+++ b/drivers/staging/hv/Makefile
@@ -5,7 +5,7 @@ obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
hv_vmbus-y := vmbus_drv.o osd.o \
- vmbus.o hv.o connection.o channel.o \
+ hv.o connection.o channel.o \
channel_mgmt.o ring_buffer.o
hv_storvsc-y := storvsc_drv.o storvsc.o
hv_blkvsc-y := blkvsc_drv.o blkvsc.o
diff --git a/drivers/staging/hv/blkvsc.c b/drivers/staging/hv/blkvsc.c
index d5b0abd771ab..bc16d9172eb2 100644
--- a/drivers/staging/hv/blkvsc.c
+++ b/drivers/staging/hv/blkvsc.c
@@ -25,24 +25,24 @@
#include "osd.h"
#include "storvsc.c"
-static const char *gBlkDriverName = "blkvsc";
+static const char *g_blk_driver_name = "blkvsc";
/* {32412632-86cb-44a2-9b5c-50d1417354f5} */
-static const struct hv_guid gBlkVscDeviceType = {
+static const struct hv_guid g_blk_device_type = {
.data = {
0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5
}
};
-static int BlkVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
+static int blk_vsc_on_device_add(struct hv_device *device, void *additional_info)
{
- struct storvsc_device_info *deviceInfo;
+ struct storvsc_device_info *device_info;
int ret = 0;
- deviceInfo = (struct storvsc_device_info *)AdditionalInfo;
+ device_info = (struct storvsc_device_info *)additional_info;
- ret = StorVscOnDeviceAdd(Device, AdditionalInfo);
+ ret = stor_vsc_on_device_add(device, additional_info);
if (ret != 0)
return ret;
@@ -51,31 +51,31 @@ static int BlkVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
* id. For IDE devices, the device instance id is formatted as
* <bus id> * - <device id> - 8899 - 000000000000.
*/
- deviceInfo->PathId = Device->deviceInstance.data[3] << 24 |
- Device->deviceInstance.data[2] << 16 |
- Device->deviceInstance.data[1] << 8 |
- Device->deviceInstance.data[0];
+ device_info->path_id = device->deviceInstance.data[3] << 24 |
+ device->deviceInstance.data[2] << 16 |
+ device->deviceInstance.data[1] << 8 |
+ device->deviceInstance.data[0];
- deviceInfo->TargetId = Device->deviceInstance.data[5] << 8 |
- Device->deviceInstance.data[4];
+ device_info->target_id = device->deviceInstance.data[5] << 8 |
+ device->deviceInstance.data[4];
return ret;
}
-int BlkVscInitialize(struct hv_driver *Driver)
+int blk_vsc_initialize(struct hv_driver *driver)
{
- struct storvsc_driver_object *storDriver;
+ struct storvsc_driver_object *stor_driver;
int ret = 0;
- storDriver = (struct storvsc_driver_object *)Driver;
+ stor_driver = (struct storvsc_driver_object *)driver;
/* Make sure we are at least 2 pages since 1 page is used for control */
- /* ASSERT(storDriver->RingBufferSize >= (PAGE_SIZE << 1)); */
+ /* ASSERT(stor_driver->RingBufferSize >= (PAGE_SIZE << 1)); */
- Driver->name = gBlkDriverName;
- memcpy(&Driver->deviceType, &gBlkVscDeviceType, sizeof(struct hv_guid));
+ driver->name = g_blk_driver_name;
+ memcpy(&driver->deviceType, &g_blk_device_type, sizeof(struct hv_guid));
- storDriver->RequestExtSize = sizeof(struct storvsc_request_extension);
+ stor_driver->request_ext_size = sizeof(struct storvsc_request_extension);
/*
* Divide the ring buffer data size (which is 1 page less than the ring
@@ -83,20 +83,20 @@ int BlkVscInitialize(struct hv_driver *Driver)
* by the max request size (which is
* vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
*/
- storDriver->MaxOutstandingRequestsPerChannel =
- ((storDriver->RingBufferSize - PAGE_SIZE) /
+ stor_driver->max_outstanding_req_per_channel =
+ ((stor_driver->ring_buffer_size - PAGE_SIZE) /
ALIGN_UP(MAX_MULTIPAGE_BUFFER_PACKET +
sizeof(struct vstor_packet) + sizeof(u64),
sizeof(u64)));
DPRINT_INFO(BLKVSC, "max io outstd %u",
- storDriver->MaxOutstandingRequestsPerChannel);
+ stor_driver->max_outstanding_req_per_channel);
/* Setup the dispatch table */
- storDriver->Base.OnDeviceAdd = BlkVscOnDeviceAdd;
- storDriver->Base.OnDeviceRemove = StorVscOnDeviceRemove;
- storDriver->Base.OnCleanup = StorVscOnCleanup;
- storDriver->OnIORequest = StorVscOnIORequest;
+ stor_driver->base.OnDeviceAdd = blk_vsc_on_device_add;
+ stor_driver->base.OnDeviceRemove = stor_vsc_on_device_remove;
+ stor_driver->base.OnCleanup = stor_vsc_on_cleanup;
+ stor_driver->on_io_request = stor_vsc_on_io_request;
return ret;
}
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
index 3f81ca591064..4fb809485d9e 100644
--- a/drivers/staging/hv/blkvsc_drv.c
+++ b/drivers/staging/hv/blkvsc_drv.c
@@ -177,13 +177,13 @@ static int blkvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
struct driver_context *drv_ctx = &g_blkvsc_drv.drv_ctx;
int ret;
- storvsc_drv_obj->RingBufferSize = blkvsc_ringbuffer_size;
+ storvsc_drv_obj->ring_buffer_size = blkvsc_ringbuffer_size;
/* Callback to client driver to complete the initialization */
- drv_init(&storvsc_drv_obj->Base);
+ drv_init(&storvsc_drv_obj->base);
- drv_ctx->driver.name = storvsc_drv_obj->Base.name;
- memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType,
+ drv_ctx->driver.name = storvsc_drv_obj->base.name;
+ memcpy(&drv_ctx->class_id, &storvsc_drv_obj->base.deviceType,
sizeof(struct hv_guid));
drv_ctx->probe = blkvsc_probe;
@@ -230,8 +230,8 @@ static void blkvsc_drv_exit(void)
device_unregister(current_dev);
}
- if (storvsc_drv_obj->Base.OnCleanup)
- storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base);
+ if (storvsc_drv_obj->base.OnCleanup)
+ storvsc_drv_obj->base.OnCleanup(&storvsc_drv_obj->base);
vmbus_child_driver_unregister(drv_ctx);
@@ -262,7 +262,7 @@ static int blkvsc_probe(struct device *device)
DPRINT_DBG(BLKVSC_DRV, "blkvsc_probe - enter");
- if (!storvsc_drv_obj->Base.OnDeviceAdd) {
+ if (!storvsc_drv_obj->base.OnDeviceAdd) {
DPRINT_ERR(BLKVSC_DRV, "OnDeviceAdd() not set");
ret = -1;
goto Cleanup;
@@ -284,7 +284,7 @@ static int blkvsc_probe(struct device *device)
blkdev->request_pool = kmem_cache_create(dev_name(&device_ctx->device),
sizeof(struct blkvsc_request) +
- storvsc_drv_obj->RequestExtSize, 0,
+ storvsc_drv_obj->request_ext_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!blkdev->request_pool) {
ret = -ENOMEM;
@@ -293,7 +293,7 @@ static int blkvsc_probe(struct device *device)
/* Call to the vsc driver to add the device */
- ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj, &device_info);
+ ret = storvsc_drv_obj->base.OnDeviceAdd(device_obj, &device_info);
if (ret != 0) {
DPRINT_ERR(BLKVSC_DRV, "unable to add blkvsc device");
goto Cleanup;
@@ -301,9 +301,9 @@ static int blkvsc_probe(struct device *device)
blkdev->device_ctx = device_ctx;
/* this identified the device 0 or 1 */
- blkdev->target = device_info.TargetId;
+ blkdev->target = device_info.target_id;
/* this identified the ide ctrl 0 or 1 */
- blkdev->path = device_info.PathId;
+ blkdev->path = device_info.path_id;
dev_set_drvdata(device, blkdev);
@@ -368,6 +368,7 @@ static int blkvsc_probe(struct device *device)
blkdev->gd->first_minor = 0;
blkdev->gd->fops = &block_ops;
blkdev->gd->private_data = blkdev;
+ blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
blkvsc_do_inquiry(blkdev);
@@ -391,7 +392,7 @@ static int blkvsc_probe(struct device *device)
return ret;
Remove:
- storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
+ storvsc_drv_obj->base.OnDeviceRemove(device_obj);
Cleanup:
if (blkdev) {
@@ -459,9 +460,9 @@ static int blkvsc_do_flush(struct block_device_context *blkdev)
blkvsc_req->req = NULL;
blkvsc_req->write = 0;
- blkvsc_req->request.DataBuffer.PfnArray[0] = 0;
- blkvsc_req->request.DataBuffer.Offset = 0;
- blkvsc_req->request.DataBuffer.Length = 0;
+ blkvsc_req->request.data_buffer.PfnArray[0] = 0;
+ blkvsc_req->request.data_buffer.Offset = 0;
+ blkvsc_req->request.data_buffer.Length = 0;
blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE;
blkvsc_req->cmd_len = 10;
@@ -506,9 +507,9 @@ static int blkvsc_do_inquiry(struct block_device_context *blkdev)
blkvsc_req->req = NULL;
blkvsc_req->write = 0;
- blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
- blkvsc_req->request.DataBuffer.Offset = 0;
- blkvsc_req->request.DataBuffer.Length = 64;
+ blkvsc_req->request.data_buffer.PfnArray[0] = page_to_pfn(page_buf);
+ blkvsc_req->request.data_buffer.Offset = 0;
+ blkvsc_req->request.data_buffer.Length = 64;
blkvsc_req->cmnd[0] = INQUIRY;
blkvsc_req->cmnd[1] = 0x1; /* Get product data */
@@ -593,9 +594,9 @@ static int blkvsc_do_read_capacity(struct block_device_context *blkdev)
blkvsc_req->req = NULL;
blkvsc_req->write = 0;
- blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
- blkvsc_req->request.DataBuffer.Offset = 0;
- blkvsc_req->request.DataBuffer.Length = 8;
+ blkvsc_req->request.data_buffer.PfnArray[0] = page_to_pfn(page_buf);
+ blkvsc_req->request.data_buffer.Offset = 0;
+ blkvsc_req->request.data_buffer.Length = 8;
blkvsc_req->cmnd[0] = READ_CAPACITY;
blkvsc_req->cmd_len = 16;
@@ -614,7 +615,7 @@ static int blkvsc_do_read_capacity(struct block_device_context *blkdev)
wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
/* check error */
- if (blkvsc_req->request.Status) {
+ if (blkvsc_req->request.status) {
scsi_normalize_sense(blkvsc_req->sense_buffer,
SCSI_SENSE_BUFFERSIZE, &sense_hdr);
@@ -670,9 +671,9 @@ static int blkvsc_do_read_capacity16(struct block_device_context *blkdev)
blkvsc_req->req = NULL;
blkvsc_req->write = 0;
- blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
- blkvsc_req->request.DataBuffer.Offset = 0;
- blkvsc_req->request.DataBuffer.Length = 12;
+ blkvsc_req->request.data_buffer.PfnArray[0] = page_to_pfn(page_buf);
+ blkvsc_req->request.data_buffer.Offset = 0;
+ blkvsc_req->request.data_buffer.Length = 12;
blkvsc_req->cmnd[0] = 0x9E; /* READ_CAPACITY16; */
blkvsc_req->cmd_len = 16;
@@ -691,7 +692,7 @@ static int blkvsc_do_read_capacity16(struct block_device_context *blkdev)
wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
/* check error */
- if (blkvsc_req->request.Status) {
+ if (blkvsc_req->request.status) {
scsi_normalize_sense(blkvsc_req->sense_buffer,
SCSI_SENSE_BUFFERSIZE, &sense_hdr);
if (sense_hdr.asc == 0x3A) {
@@ -741,14 +742,14 @@ static int blkvsc_remove(struct device *device)
DPRINT_DBG(BLKVSC_DRV, "blkvsc_remove()\n");
- if (!storvsc_drv_obj->Base.OnDeviceRemove)
+ if (!storvsc_drv_obj->base.OnDeviceRemove)
return -1;
/*
* Call to the vsc driver to let it know that the device is being
* removed
*/
- ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
+ ret = storvsc_drv_obj->base.OnDeviceRemove(device_obj);
if (ret != 0) {
/* TODO: */
DPRINT_ERR(BLKVSC_DRV,
@@ -865,38 +866,38 @@ static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req,
(blkvsc_req->write) ? "WRITE" : "READ",
(unsigned long) blkvsc_req->sector_start,
blkvsc_req->sector_count,
- blkvsc_req->request.DataBuffer.Offset,
- blkvsc_req->request.DataBuffer.Length);
+ blkvsc_req->request.data_buffer.Offset,
+ blkvsc_req->request.data_buffer.Length);
#if 0
- for (i = 0; i < (blkvsc_req->request.DataBuffer.Length >> 12); i++) {
+ for (i = 0; i < (blkvsc_req->request.data_buffer.Length >> 12); i++) {
DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - "
"req %p pfn[%d] %llx\n",
blkvsc_req, i,
- blkvsc_req->request.DataBuffer.PfnArray[i]);
+ blkvsc_req->request.data_buffer.PfnArray[i]);
}
#endif
storvsc_req = &blkvsc_req->request;
- storvsc_req->Extension = (void *)((unsigned long)blkvsc_req +
+ storvsc_req->extension = (void *)((unsigned long)blkvsc_req +
sizeof(struct blkvsc_request));
- storvsc_req->Type = blkvsc_req->write ? WRITE_TYPE : READ_TYPE;
+ storvsc_req->type = blkvsc_req->write ? WRITE_TYPE : READ_TYPE;
- storvsc_req->OnIOCompletion = request_completion;
- storvsc_req->Context = blkvsc_req;
+ storvsc_req->on_io_completion = request_completion;
+ storvsc_req->context = blkvsc_req;
- storvsc_req->Host = blkdev->port;
- storvsc_req->Bus = blkdev->path;
- storvsc_req->TargetId = blkdev->target;
- storvsc_req->LunId = 0; /* this is not really used at all */
+ storvsc_req->host = blkdev->port;
+ storvsc_req->bus = blkdev->path;
+ storvsc_req->target_id = blkdev->target;
+ storvsc_req->lun_id = 0; /* this is not really used at all */
- storvsc_req->CdbLen = blkvsc_req->cmd_len;
- storvsc_req->Cdb = blkvsc_req->cmnd;
+ storvsc_req->cdb_len = blkvsc_req->cmd_len;
+ storvsc_req->cdb = blkvsc_req->cmnd;
- storvsc_req->SenseBuffer = blkvsc_req->sense_buffer;
- storvsc_req->SenseBufferSize = SCSI_SENSE_BUFFERSIZE;
+ storvsc_req->sense_buffer = blkvsc_req->sense_buffer;
+ storvsc_req->sense_buffer_size = SCSI_SENSE_BUFFERSIZE;
- ret = storvsc_drv_obj->OnIORequest(&blkdev->device_ctx->device_obj,
+ ret = storvsc_drv_obj->on_io_request(&blkdev->device_ctx->device_obj,
&blkvsc_req->request);
if (ret == 0)
blkdev->num_outstanding_reqs++;
@@ -992,8 +993,10 @@ static int blkvsc_do_request(struct block_device_context *blkdev,
blkvsc_req->dev = blkdev;
blkvsc_req->req = req;
- blkvsc_req->request.DataBuffer.Offset = bvec->bv_offset;
- blkvsc_req->request.DataBuffer.Length = 0;
+ blkvsc_req->request.data_buffer.Offset
+ = bvec->bv_offset;
+ blkvsc_req->request.data_buffer.Length
+ = 0;
/* Add to the group */
blkvsc_req->group = group;
@@ -1007,8 +1010,11 @@ static int blkvsc_do_request(struct block_device_context *blkdev,
}
/* Add the curr bvec/segment to the curr blkvsc_req */
- blkvsc_req->request.DataBuffer.PfnArray[databuf_idx] = page_to_pfn(bvec->bv_page);
- blkvsc_req->request.DataBuffer.Length += bvec->bv_len;
+ blkvsc_req->request.data_buffer.
+ PfnArray[databuf_idx]
+ = page_to_pfn(bvec->bv_page);
+ blkvsc_req->request.data_buffer.Length
+ += bvec->bv_len;
prev_bvec = bvec;
@@ -1073,7 +1079,7 @@ static int blkvsc_do_request(struct block_device_context *blkdev,
static void blkvsc_cmd_completion(struct hv_storvsc_request *request)
{
struct blkvsc_request *blkvsc_req =
- (struct blkvsc_request *)request->Context;
+ (struct blkvsc_request *)request->context;
struct block_device_context *blkdev =
(struct block_device_context *)blkvsc_req->dev;
struct scsi_sense_hdr sense_hdr;
@@ -1083,7 +1089,7 @@ static void blkvsc_cmd_completion(struct hv_storvsc_request *request)
blkdev->num_outstanding_reqs--;
- if (blkvsc_req->request.Status)
+ if (blkvsc_req->request.status)
if (scsi_normalize_sense(blkvsc_req->sense_buffer,
SCSI_SENSE_BUFFERSIZE, &sense_hdr))
scsi_print_sense_hdr("blkvsc", &sense_hdr);
@@ -1095,7 +1101,7 @@ static void blkvsc_cmd_completion(struct hv_storvsc_request *request)
static void blkvsc_request_completion(struct hv_storvsc_request *request)
{
struct blkvsc_request *blkvsc_req =
- (struct blkvsc_request *)request->Context;
+ (struct blkvsc_request *)request->context;
struct block_device_context *blkdev =
(struct block_device_context *)blkvsc_req->dev;
unsigned long flags;
@@ -1110,7 +1116,7 @@ static void blkvsc_request_completion(struct hv_storvsc_request *request)
(blkvsc_req->write) ? "WRITE" : "READ",
(unsigned long)blkvsc_req->sector_start,
blkvsc_req->sector_count,
- blkvsc_req->request.DataBuffer.Length,
+ blkvsc_req->request.data_buffer.Length,
blkvsc_req->group->outstanding,
blkdev->num_outstanding_reqs);
@@ -1137,7 +1143,7 @@ static void blkvsc_request_completion(struct hv_storvsc_request *request)
list_del(&comp_req->req_entry);
if (!__blk_end_request(comp_req->req,
- (!comp_req->request.Status ? 0 : -EIO),
+ (!comp_req->request.status ? 0 : -EIO),
comp_req->sector_count * blkdev->sector_size)) {
/*
* All the sectors have been xferred ie the
@@ -1195,7 +1201,7 @@ static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
if (comp_req->req) {
ret = __blk_end_request(comp_req->req,
- (!comp_req->request.Status ? 0 : -EIO),
+ (!comp_req->request.status ? 0 : -EIO),
comp_req->sector_count *
blkdev->sector_size);
@@ -1482,7 +1488,7 @@ static int __init blkvsc_init(void)
DPRINT_INFO(BLKVSC_DRV, "Blkvsc initializing....");
- ret = blkvsc_drv_init(BlkVscInitialize);
+ ret = blkvsc_drv_init(blk_vsc_initialize);
return ret;
}
diff --git a/drivers/staging/hv/channel.c b/drivers/staging/hv/channel.c
index 26ebc77f22b7..45a627d77b41 100644
--- a/drivers/staging/hv/channel.c
+++ b/drivers/staging/hv/channel.c
@@ -43,24 +43,24 @@ static void DumpMonitorPage(struct hv_monitor_page *MonitorPage)
int j = 0;
DPRINT_DBG(VMBUS, "monitorPage - %p, trigger state - %d",
- MonitorPage, MonitorPage->TriggerState);
+ MonitorPage, MonitorPage->trigger_state);
for (i = 0; i < 4; i++)
DPRINT_DBG(VMBUS, "trigger group (%d) - %llx", i,
- MonitorPage->TriggerGroup[i].AsUINT64);
+ MonitorPage->trigger_group[i].as_uint64);
for (i = 0; i < 4; i++) {
for (j = 0; j < 32; j++) {
DPRINT_DBG(VMBUS, "latency (%d)(%d) - %llx", i, j,
- MonitorPage->Latency[i][j]);
+ MonitorPage->latency[i][j]);
}
}
for (i = 0; i < 4; i++) {
for (j = 0; j < 32; j++) {
DPRINT_DBG(VMBUS, "param-conn id (%d)(%d) - %d", i, j,
- MonitorPage->Parameter[i][j].ConnectionId.Asu32);
+ MonitorPage->parameter[i][j].connectionid.asu32);
DPRINT_DBG(VMBUS, "param-flag (%d)(%d) - %d", i, j,
- MonitorPage->Parameter[i][j].FlagNumber);
+ MonitorPage->parameter[i][j].flag_number);
}
}
}
@@ -74,21 +74,21 @@ static void vmbus_setevent(struct vmbus_channel *channel)
{
struct hv_monitor_page *monitorpage;
- if (channel->OfferMsg.MonitorAllocated) {
+ if (channel->offermsg.monitor_allocated) {
/* Each u32 represents 32 channels */
- set_bit(channel->OfferMsg.ChildRelId & 31,
+ set_bit(channel->offermsg.child_relid & 31,
(unsigned long *) gVmbusConnection.SendInterruptPage +
- (channel->OfferMsg.ChildRelId >> 5));
+ (channel->offermsg.child_relid >> 5));
monitorpage = gVmbusConnection.MonitorPages;
monitorpage++; /* Get the child to parent monitor page */
- set_bit(channel->MonitorBit,
- (unsigned long *)&monitorpage->TriggerGroup
- [channel->MonitorGroup].Pending);
+ set_bit(channel->monitor_bit,
+ (unsigned long *)&monitorpage->trigger_group
+ [channel->monitor_grp].pending);
} else {
- VmbusSetEvent(channel->OfferMsg.ChildRelId);
+ VmbusSetEvent(channel->offermsg.child_relid);
}
}
@@ -97,19 +97,19 @@ static void VmbusChannelClearEvent(struct vmbus_channel *channel)
{
struct hv_monitor_page *monitorPage;
- if (Channel->OfferMsg.MonitorAllocated) {
+ if (Channel->offermsg.monitor_allocated) {
/* Each u32 represents 32 channels */
- clear_bit(Channel->OfferMsg.ChildRelId & 31,
+ clear_bit(Channel->offermsg.child_relid & 31,
(unsigned long *)gVmbusConnection.SendInterruptPage +
- (Channel->OfferMsg.ChildRelId >> 5));
+ (Channel->offermsg.child_relid >> 5));
monitorPage =
(struct hv_monitor_page *)gVmbusConnection.MonitorPages;
monitorPage++; /* Get the child to parent monitor page */
- clear_bit(Channel->MonitorBit,
- (unsigned long *)&monitorPage->TriggerGroup
- [Channel->MonitorGroup].Pending);
+ clear_bit(Channel->monitor_bit,
+ (unsigned long *)&monitorPage->trigger_group
+ [Channel->monitor_grp].Pending);
}
}
@@ -121,42 +121,42 @@ void vmbus_get_debug_info(struct vmbus_channel *channel,
struct vmbus_channel_debug_info *debuginfo)
{
struct hv_monitor_page *monitorpage;
- u8 monitor_group = (u8)channel->OfferMsg.MonitorId / 32;
- u8 monitor_offset = (u8)channel->OfferMsg.MonitorId % 32;
+ u8 monitor_group = (u8)channel->offermsg.monitorid / 32;
+ u8 monitor_offset = (u8)channel->offermsg.monitorid % 32;
/* u32 monitorBit = 1 << monitorOffset; */
- debuginfo->RelId = channel->OfferMsg.ChildRelId;
- debuginfo->State = channel->State;
- memcpy(&debuginfo->InterfaceType,
- &channel->OfferMsg.Offer.InterfaceType, sizeof(struct hv_guid));
- memcpy(&debuginfo->InterfaceInstance,
- &channel->OfferMsg.Offer.InterfaceInstance,
+ debuginfo->relid = channel->offermsg.child_relid;
+ debuginfo->state = channel->state;
+ memcpy(&debuginfo->interfacetype,
+ &channel->offermsg.offer.InterfaceType, sizeof(struct hv_guid));
+ memcpy(&debuginfo->interface_instance,
+ &channel->offermsg.offer.InterfaceInstance,
sizeof(struct hv_guid));
monitorpage = (struct hv_monitor_page *)gVmbusConnection.MonitorPages;
- debuginfo->MonitorId = channel->OfferMsg.MonitorId;
+ debuginfo->monitorid = channel->offermsg.monitorid;
- debuginfo->ServerMonitorPending =
- monitorpage->TriggerGroup[monitor_group].Pending;
- debuginfo->ServerMonitorLatency =
- monitorpage->Latency[monitor_group][monitor_offset];
- debuginfo->ServerMonitorConnectionId =
- monitorpage->Parameter[monitor_group]
- [monitor_offset].ConnectionId.u.Id;
+ debuginfo->servermonitor_pending =
+ monitorpage->trigger_group[monitor_group].pending;
+ debuginfo->servermonitor_latency =
+ monitorpage->latency[monitor_group][monitor_offset];
+ debuginfo->servermonitor_connectionid =
+ monitorpage->parameter[monitor_group]
+ [monitor_offset].connectionid.u.id;
monitorpage++;
- debuginfo->ClientMonitorPending =
- monitorpage->TriggerGroup[monitor_group].Pending;
- debuginfo->ClientMonitorLatency =
- monitorpage->Latency[monitor_group][monitor_offset];
- debuginfo->ClientMonitorConnectionId =
- monitorpage->Parameter[monitor_group]
- [monitor_offset].ConnectionId.u.Id;
+ debuginfo->clientmonitor_pending =
+ monitorpage->trigger_group[monitor_group].pending;
+ debuginfo->clientmonitor_latency =
+ monitorpage->latency[monitor_group][monitor_offset];
+ debuginfo->clientmonitor_connectionid =
+ monitorpage->parameter[monitor_group]
+ [monitor_offset].connectionid.u.id;
- RingBufferGetDebugInfo(&channel->Inbound, &debuginfo->Inbound);
- RingBufferGetDebugInfo(&channel->Outbound, &debuginfo->Outbound);
+ ringbuffer_get_debuginfo(&channel->inbound, &debuginfo->inbound);
+ ringbuffer_get_debuginfo(&channel->outbound, &debuginfo->outbound);
}
/*
@@ -176,11 +176,11 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
/* ASSERT(!(SendRingBufferSize & (PAGE_SIZE - 1))); */
/* ASSERT(!(RecvRingBufferSize & (PAGE_SIZE - 1))); */
- newchannel->OnChannelCallback = onchannelcallback;
- newchannel->ChannelCallbackContext = context;
+ newchannel->onchannel_callback = onchannelcallback;
+ newchannel->channel_callback_context = context;
/* Allocate the ring buffer */
- out = osd_PageAlloc((send_ringbuffer_size + recv_ringbuffer_size)
+ out = osd_page_alloc((send_ringbuffer_size + recv_ringbuffer_size)
>> PAGE_SHIFT);
if (!out)
return -ENOMEM;
@@ -189,17 +189,17 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
in = (void *)((unsigned long)out + send_ringbuffer_size);
- newchannel->RingBufferPages = out;
- newchannel->RingBufferPageCount = (send_ringbuffer_size +
+ newchannel->ringbuffer_pages = out;
+ newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
recv_ringbuffer_size) >> PAGE_SHIFT;
- ret = RingBufferInit(&newchannel->Outbound, out, send_ringbuffer_size);
+ ret = ringbuffer_init(&newchannel->outbound, out, send_ringbuffer_size);
if (ret != 0) {
err = ret;
goto errorout;
}
- ret = RingBufferInit(&newchannel->Inbound, in, recv_ringbuffer_size);
+ ret = ringbuffer_init(&newchannel->inbound, in, recv_ringbuffer_size);
if (ret != 0) {
err = ret;
goto errorout;
@@ -210,13 +210,13 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
DPRINT_DBG(VMBUS, "Establishing ring buffer's gpadl for channel %p...",
newchannel);
- newchannel->RingBufferGpadlHandle = 0;
+ newchannel->ringbuffer_gpadlhandle = 0;
ret = vmbus_establish_gpadl(newchannel,
- newchannel->Outbound.RingBuffer,
+ newchannel->outbound.ring_buffer,
send_ringbuffer_size +
recv_ringbuffer_size,
- &newchannel->RingBufferGpadlHandle);
+ &newchannel->ringbuffer_gpadlhandle);
if (ret != 0) {
err = ret;
@@ -225,12 +225,12 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
DPRINT_DBG(VMBUS, "channel %p <relid %d gpadl 0x%x send ring %p "
"size %d recv ring %p size %d, downstreamoffset %d>",
- newchannel, newchannel->OfferMsg.ChildRelId,
- newchannel->RingBufferGpadlHandle,
- newchannel->Outbound.RingBuffer,
- newchannel->Outbound.RingSize,
- newchannel->Inbound.RingBuffer,
- newchannel->Inbound.RingSize,
+ newchannel, newchannel->offermsg.child_relid,
+ newchannel->ringbuffer_gpadlhandle,
+ newchannel->outbound.ring_buffer,
+ newchannel->outbound.ring_size,
+ newchannel->inbound.ring_buffer,
+ newchannel->inbound.ring_size,
send_ringbuffer_size);
/* Create and init the channel open message */
@@ -242,20 +242,20 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
goto errorout;
}
- openInfo->WaitEvent = osd_WaitEventCreate();
- if (!openInfo->WaitEvent) {
+ openInfo->waitevent = osd_waitevent_create();
+ if (!openInfo->waitevent) {
err = -ENOMEM;
goto errorout;
}
- openMsg = (struct vmbus_channel_open_channel *)openInfo->Msg;
- openMsg->Header.MessageType = ChannelMessageOpenChannel;
- openMsg->OpenId = newchannel->OfferMsg.ChildRelId; /* FIXME */
- openMsg->ChildRelId = newchannel->OfferMsg.ChildRelId;
- openMsg->RingBufferGpadlHandle = newchannel->RingBufferGpadlHandle;
- openMsg->DownstreamRingBufferPageOffset = send_ringbuffer_size >>
+ openMsg = (struct vmbus_channel_open_channel *)openInfo->msg;
+ openMsg->header.msgtype = CHANNELMSG_OPENCHANNEL;
+ openMsg->openid = newchannel->offermsg.child_relid; /* FIXME */
+ openMsg->child_relid = newchannel->offermsg.child_relid;
+ openMsg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
+ openMsg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
PAGE_SHIFT;
- openMsg->ServerContextAreaGpadlHandle = 0; /* TODO */
+ openMsg->server_contextarea_gpadlhandle = 0; /* TODO */
if (userdatalen > MAX_USER_DEFINED_BYTES) {
err = -EINVAL;
@@ -263,10 +263,10 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
}
if (userdatalen)
- memcpy(openMsg->UserData, userdata, userdatalen);
+ memcpy(openMsg->userdata, userdata, userdatalen);
spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
- list_add_tail(&openInfo->MsgListEntry,
+ list_add_tail(&openInfo->msglistentry,
&gVmbusConnection.ChannelMsgList);
spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
@@ -280,27 +280,27 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
}
/* FIXME: Need to time-out here */
- osd_WaitEventWait(openInfo->WaitEvent);
+ osd_waitevent_wait(openInfo->waitevent);
- if (openInfo->Response.OpenResult.Status == 0)
+ if (openInfo->response.open_result.status == 0)
DPRINT_INFO(VMBUS, "channel <%p> open success!!", newchannel);
else
DPRINT_INFO(VMBUS, "channel <%p> open failed - %d!!",
- newchannel, openInfo->Response.OpenResult.Status);
+ newchannel, openInfo->response.open_result.status);
Cleanup:
spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
- list_del(&openInfo->MsgListEntry);
+ list_del(&openInfo->msglistentry);
spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
- kfree(openInfo->WaitEvent);
+ kfree(openInfo->waitevent);
kfree(openInfo);
return 0;
errorout:
- RingBufferCleanup(&newchannel->Outbound);
- RingBufferCleanup(&newchannel->Inbound);
- osd_PageFree(out, (send_ringbuffer_size + recv_ringbuffer_size)
+ ringbuffer_cleanup(&newchannel->outbound);
+ ringbuffer_cleanup(&newchannel->inbound);
+ osd_page_free(out, (send_ringbuffer_size + recv_ringbuffer_size)
>> PAGE_SHIFT);
kfree(openInfo);
return err;
@@ -322,7 +322,7 @@ static void dump_gpadl_body(struct vmbus_channel_gpadl_body *gpadl, u32 len)
for (i = 0; i < pfncount; i++)
DPRINT_DBG(VMBUS, "gpadl body - %d) pfn %llu",
- i, gpadl->Pfn[i]);
+ i, gpadl->pfn[i]);
}
/*
@@ -336,18 +336,18 @@ static void dump_gpadl_header(struct vmbus_channel_gpadl_header *gpadl)
DPRINT_DBG(VMBUS,
"gpadl header - relid %d, range count %d, range buflen %d",
- gpadl->ChildRelId, gpadl->RangeCount, gpadl->RangeBufLen);
- for (i = 0; i < gpadl->RangeCount; i++) {
- pagecount = gpadl->Range[i].ByteCount >> PAGE_SHIFT;
+ gpadl->child_relid, gpadl->rangecount, gpadl->range_buflen);
+ for (i = 0; i < gpadl->rangecount; i++) {
+ pagecount = gpadl->range[i].ByteCount >> PAGE_SHIFT;
pagecount = (pagecount > 26) ? 26 : pagecount;
DPRINT_DBG(VMBUS, "gpadl range %d - len %d offset %d "
- "page count %d", i, gpadl->Range[i].ByteCount,
- gpadl->Range[i].ByteOffset, pagecount);
+ "page count %d", i, gpadl->range[i].ByteCount,
+ gpadl->range[i].ByteOffset, pagecount);
for (j = 0; j < pagecount; j++)
DPRINT_DBG(VMBUS, "%d) pfn %llu", j,
- gpadl->Range[i].PfnArray[j]);
+ gpadl->range[i].PfnArray[j]);
}
}
@@ -391,18 +391,18 @@ static int create_gpadl_header(void *kbuffer, u32 size,
if (!msgheader)
goto nomem;
- INIT_LIST_HEAD(&msgheader->SubMsgList);
- msgheader->MessageSize = msgsize;
+ INIT_LIST_HEAD(&msgheader->submsglist);
+ msgheader->msgsize = msgsize;
gpadl_header = (struct vmbus_channel_gpadl_header *)
- msgheader->Msg;
- gpadl_header->RangeCount = 1;
- gpadl_header->RangeBufLen = sizeof(struct gpa_range) +
+ msgheader->msg;
+ gpadl_header->rangecount = 1;
+ gpadl_header->range_buflen = sizeof(struct gpa_range) +
pagecount * sizeof(u64);
- gpadl_header->Range[0].ByteOffset = 0;
- gpadl_header->Range[0].ByteCount = size;
+ gpadl_header->range[0].ByteOffset = 0;
+ gpadl_header->range[0].ByteCount = size;
for (i = 0; i < pfncount; i++)
- gpadl_header->Range[0].PfnArray[i] = pfn+i;
+ gpadl_header->range[0].PfnArray[i] = pfn+i;
*msginfo = msgheader;
*messagecount = 1;
@@ -428,10 +428,10 @@ static int create_gpadl_header(void *kbuffer, u32 size,
/* FIXME: we probably need to more if this fails */
if (!msgbody)
goto nomem;
- msgbody->MessageSize = msgsize;
+ msgbody->msgsize = msgsize;
(*messagecount)++;
gpadl_body =
- (struct vmbus_channel_gpadl_body *)msgbody->Msg;
+ (struct vmbus_channel_gpadl_body *)msgbody->msg;
/*
* FIXME:
@@ -440,11 +440,11 @@ static int create_gpadl_header(void *kbuffer, u32 size,
*/
/* gpadl_body->Gpadl = kbuffer; */
for (i = 0; i < pfncurr; i++)
- gpadl_body->Pfn[i] = pfn + pfnsum + i;
+ gpadl_body->pfn[i] = pfn + pfnsum + i;
/* add to msg header */
- list_add_tail(&msgbody->MsgListEntry,
- &msgheader->SubMsgList);
+ list_add_tail(&msgbody->msglistentry,
+ &msgheader->submsglist);
pfnsum += pfncurr;
pfnleft -= pfncurr;
}
@@ -456,17 +456,17 @@ static int create_gpadl_header(void *kbuffer, u32 size,
msgheader = kzalloc(msgsize, GFP_KERNEL);
if (msgheader == NULL)
goto nomem;
- msgheader->MessageSize = msgsize;
+ msgheader->msgsize = msgsize;
gpadl_header = (struct vmbus_channel_gpadl_header *)
- msgheader->Msg;
- gpadl_header->RangeCount = 1;
- gpadl_header->RangeBufLen = sizeof(struct gpa_range) +
+ msgheader->msg;
+ gpadl_header->rangecount = 1;
+ gpadl_header->range_buflen = sizeof(struct gpa_range) +
pagecount * sizeof(u64);
- gpadl_header->Range[0].ByteOffset = 0;
- gpadl_header->Range[0].ByteCount = size;
+ gpadl_header->range[0].ByteOffset = 0;
+ gpadl_header->range[0].ByteCount = size;
for (i = 0; i < pagecount; i++)
- gpadl_header->Range[0].PfnArray[i] = pfn+i;
+ gpadl_header->range[0].PfnArray[i] = pfn+i;
*msginfo = msgheader;
*messagecount = 1;
@@ -508,21 +508,21 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
if (ret)
return ret;
- msginfo->WaitEvent = osd_WaitEventCreate();
- if (!msginfo->WaitEvent) {
+ msginfo->waitevent = osd_waitevent_create();
+ if (!msginfo->waitevent) {
ret = -ENOMEM;
goto Cleanup;
}
- gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->Msg;
- gpadlmsg->Header.MessageType = ChannelMessageGpadlHeader;
- gpadlmsg->ChildRelId = channel->OfferMsg.ChildRelId;
- gpadlmsg->Gpadl = next_gpadl_handle;
+ gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
+ gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
+ gpadlmsg->child_relid = channel->offermsg.child_relid;
+ gpadlmsg->gpadl = next_gpadl_handle;
dump_gpadl_header(gpadlmsg);
spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
- list_add_tail(&msginfo->MsgListEntry,
+ list_add_tail(&msginfo->msglistentry,
&gVmbusConnection.ChannelMsgList);
spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
@@ -530,9 +530,9 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
kbuffer, size, msgcount);
DPRINT_DBG(VMBUS, "Sending GPADL Header - len %zd",
- msginfo->MessageSize - sizeof(*msginfo));
+ msginfo->msgsize - sizeof(*msginfo));
- ret = VmbusPostMessage(gpadlmsg, msginfo->MessageSize -
+ ret = VmbusPostMessage(gpadlmsg, msginfo->msgsize -
sizeof(*msginfo));
if (ret != 0) {
DPRINT_ERR(VMBUS, "Unable to open channel - %d", ret);
@@ -540,48 +540,48 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
}
if (msgcount > 1) {
- list_for_each(curr, &msginfo->SubMsgList) {
+ list_for_each(curr, &msginfo->submsglist) {
/* FIXME: should this use list_entry() instead ? */
submsginfo = (struct vmbus_channel_msginfo *)curr;
gpadl_body =
- (struct vmbus_channel_gpadl_body *)submsginfo->Msg;
+ (struct vmbus_channel_gpadl_body *)submsginfo->msg;
- gpadl_body->Header.MessageType =
- ChannelMessageGpadlBody;
- gpadl_body->Gpadl = next_gpadl_handle;
+ gpadl_body->header.msgtype =
+ CHANNELMSG_GPADL_BODY;
+ gpadl_body->gpadl = next_gpadl_handle;
DPRINT_DBG(VMBUS, "Sending GPADL Body - len %zd",
- submsginfo->MessageSize -
+ submsginfo->msgsize -
sizeof(*submsginfo));
- dump_gpadl_body(gpadl_body, submsginfo->MessageSize -
+ dump_gpadl_body(gpadl_body, submsginfo->msgsize -
sizeof(*submsginfo));
ret = VmbusPostMessage(gpadl_body,
- submsginfo->MessageSize -
+ submsginfo->msgsize -
sizeof(*submsginfo));
if (ret != 0)
goto Cleanup;
}
}
- osd_WaitEventWait(msginfo->WaitEvent);
+ osd_waitevent_wait(msginfo->waitevent);
/* At this point, we received the gpadl created msg */
DPRINT_DBG(VMBUS, "Received GPADL created "
"(relid %d, status %d handle %x)",
- channel->OfferMsg.ChildRelId,
- msginfo->Response.GpadlCreated.CreationStatus,
- gpadlmsg->Gpadl);
+ channel->offermsg.child_relid,
+ msginfo->response.gpadl_created.creation_status,
+ gpadlmsg->gpadl);
- *gpadl_handle = gpadlmsg->Gpadl;
+ *gpadl_handle = gpadlmsg->gpadl;
Cleanup:
spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
- list_del(&msginfo->MsgListEntry);
+ list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
- kfree(msginfo->WaitEvent);
+ kfree(msginfo->waitevent);
kfree(msginfo);
return ret;
}
@@ -604,20 +604,20 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
if (!info)
return -ENOMEM;
- info->WaitEvent = osd_WaitEventCreate();
- if (!info->WaitEvent) {
+ info->waitevent = osd_waitevent_create();
+ if (!info->waitevent) {
kfree(info);
return -ENOMEM;
}
- msg = (struct vmbus_channel_gpadl_teardown *)info->Msg;
+ msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
- msg->Header.MessageType = ChannelMessageGpadlTeardown;
- msg->ChildRelId = channel->OfferMsg.ChildRelId;
- msg->Gpadl = gpadl_handle;
+ msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
+ msg->child_relid = channel->offermsg.child_relid;
+ msg->gpadl = gpadl_handle;
spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
- list_add_tail(&info->MsgListEntry,
+ list_add_tail(&info->msglistentry,
&gVmbusConnection.ChannelMsgList);
spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
@@ -628,14 +628,14 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
/* something... */
}
- osd_WaitEventWait(info->WaitEvent);
+ osd_waitevent_wait(info->waitevent);
/* Received a torndown response */
spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
- list_del(&info->MsgListEntry);
+ list_del(&info->msglistentry);
spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
- kfree(info->WaitEvent);
+ kfree(info->waitevent);
kfree(info);
return ret;
}
@@ -652,7 +652,7 @@ void vmbus_close(struct vmbus_channel *channel)
int ret;
/* Stop callback and cancel the timer asap */
- channel->OnChannelCallback = NULL;
+ channel->onchannel_callback = NULL;
del_timer_sync(&channel->poll_timer);
/* Send a closing message */
@@ -663,11 +663,11 @@ void vmbus_close(struct vmbus_channel *channel)
if (!info)
return;
- /* info->waitEvent = osd_WaitEventCreate(); */
+ /* info->waitEvent = osd_waitevent_create(); */
- msg = (struct vmbus_channel_close_channel *)info->Msg;
- msg->Header.MessageType = ChannelMessageCloseChannel;
- msg->ChildRelId = channel->OfferMsg.ChildRelId;
+ msg = (struct vmbus_channel_close_channel *)info->msg;
+ msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
+ msg->child_relid = channel->offermsg.child_relid;
ret = VmbusPostMessage(msg, sizeof(struct vmbus_channel_close_channel));
if (ret != 0) {
@@ -676,17 +676,17 @@ void vmbus_close(struct vmbus_channel *channel)
}
/* Tear down the gpadl for the channel's ring buffer */
- if (channel->RingBufferGpadlHandle)
+ if (channel->ringbuffer_gpadlhandle)
vmbus_teardown_gpadl(channel,
- channel->RingBufferGpadlHandle);
+ channel->ringbuffer_gpadlhandle);
/* TODO: Send a msg to release the childRelId */
/* Cleanup the ring buffers for this channel */
- RingBufferCleanup(&channel->Outbound);
- RingBufferCleanup(&channel->Inbound);
+ ringbuffer_cleanup(&channel->outbound);
+ ringbuffer_cleanup(&channel->inbound);
- osd_PageFree(channel->RingBufferPages, channel->RingBufferPageCount);
+ osd_page_free(channel->ringbuffer_pages, channel->ringbuffer_pagecount);
kfree(info);
@@ -696,9 +696,9 @@ void vmbus_close(struct vmbus_channel *channel)
* caller will free the channel
*/
- if (channel->State == CHANNEL_OPEN_STATE) {
+ if (channel->state == CHANNEL_OPEN_STATE) {
spin_lock_irqsave(&gVmbusConnection.channel_lock, flags);
- list_del(&channel->ListEntry);
+ list_del(&channel->listentry);
spin_unlock_irqrestore(&gVmbusConnection.channel_lock, flags);
free_channel(channel);
@@ -752,10 +752,10 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
- ret = RingBufferWrite(&channel->Outbound, bufferlist, 3);
+ ret = ringbuffer_write(&channel->outbound, bufferlist, 3);
/* TODO: We should determine if this is optional */
- if (ret == 0 && !GetRingBufferInterruptMask(&channel->Outbound))
+ if (ret == 0 && !get_ringbuffer_interrupt_mask(&channel->outbound))
vmbus_setevent(channel);
return ret;
@@ -817,10 +817,10 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
- ret = RingBufferWrite(&channel->Outbound, bufferlist, 3);
+ ret = ringbuffer_write(&channel->outbound, bufferlist, 3);
/* TODO: We should determine if this is optional */
- if (ret == 0 && !GetRingBufferInterruptMask(&channel->Outbound))
+ if (ret == 0 && !get_ringbuffer_interrupt_mask(&channel->outbound))
vmbus_setevent(channel);
return ret;
@@ -886,10 +886,10 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
- ret = RingBufferWrite(&channel->Outbound, bufferlist, 3);
+ ret = ringbuffer_write(&channel->outbound, bufferlist, 3);
/* TODO: We should determine if this is optional */
- if (ret == 0 && !GetRingBufferInterruptMask(&channel->Outbound))
+ if (ret == 0 && !get_ringbuffer_interrupt_mask(&channel->outbound))
vmbus_setevent(channel);
return ret;
@@ -923,7 +923,7 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
spin_lock_irqsave(&channel->inbound_lock, flags);
- ret = RingBufferPeek(&channel->Inbound, &desc,
+ ret = ringbuffer_peek(&channel->inbound, &desc,
sizeof(struct vmpacket_descriptor));
if (ret != 0) {
spin_unlock_irqrestore(&channel->inbound_lock, flags);
@@ -940,7 +940,7 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
DPRINT_DBG(VMBUS, "packet received on channel %p relid %d <type %d "
"flag %d tid %llx pktlen %d datalen %d> ",
- channel, channel->OfferMsg.ChildRelId, desc.Type,
+ channel, channel->offermsg.child_relid, desc.Type,
desc.Flags, desc.TransactionId, packetlen, userlen);
*buffer_actual_len = userlen;
@@ -956,7 +956,7 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
*requestid = desc.TransactionId;
/* Copy over the packet to the user buffer */
- ret = RingBufferRead(&channel->Inbound, buffer, userlen,
+ ret = ringbuffer_read(&channel->inbound, buffer, userlen,
(desc.DataOffset8 << 3));
spin_unlock_irqrestore(&channel->inbound_lock, flags);
@@ -983,7 +983,7 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
spin_lock_irqsave(&channel->inbound_lock, flags);
- ret = RingBufferPeek(&channel->Inbound, &desc,
+ ret = ringbuffer_peek(&channel->inbound, &desc,
sizeof(struct vmpacket_descriptor));
if (ret != 0) {
spin_unlock_irqrestore(&channel->inbound_lock, flags);
@@ -999,7 +999,7 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
DPRINT_DBG(VMBUS, "packet received on channel %p relid %d <type %d "
"flag %d tid %llx pktlen %d datalen %d> ",
- channel, channel->OfferMsg.ChildRelId, desc.Type,
+ channel, channel->offermsg.child_relid, desc.Type,
desc.Flags, desc.TransactionId, packetlen, userlen);
*buffer_actual_len = packetlen;
@@ -1015,7 +1015,7 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
*requestid = desc.TransactionId;
/* Copy over the entire packet to the user buffer */
- ret = RingBufferRead(&channel->Inbound, buffer, packetlen, 0);
+ ret = ringbuffer_read(&channel->inbound, buffer, packetlen, 0);
spin_unlock_irqrestore(&channel->inbound_lock, flags);
return 0;
@@ -1030,7 +1030,7 @@ void vmbus_onchannel_event(struct vmbus_channel *channel)
dump_vmbus_channel(channel);
/* ASSERT(Channel->OnChannelCallback); */
- channel->OnChannelCallback(channel->ChannelCallbackContext);
+ channel->onchannel_callback(channel->channel_callback_context);
mod_timer(&channel->poll_timer, jiffies + usecs_to_jiffies(100));
}
@@ -1042,8 +1042,8 @@ void vmbus_ontimer(unsigned long data)
{
struct vmbus_channel *channel = (struct vmbus_channel *)data;
- if (channel->OnChannelCallback)
- channel->OnChannelCallback(channel->ChannelCallbackContext);
+ if (channel->onchannel_callback)
+ channel->onchannel_callback(channel->channel_callback_context);
}
/*
@@ -1051,7 +1051,7 @@ void vmbus_ontimer(unsigned long data)
*/
static void dump_vmbus_channel(struct vmbus_channel *channel)
{
- DPRINT_DBG(VMBUS, "Channel (%d)", channel->OfferMsg.ChildRelId);
- DumpRingInfo(&channel->Outbound, "Outbound ");
- DumpRingInfo(&channel->Inbound, "Inbound ");
+ DPRINT_DBG(VMBUS, "Channel (%d)", channel->offermsg.child_relid);
+ dump_ring_info(&channel->outbound, "Outbound ");
+ dump_ring_info(&channel->inbound, "Inbound ");
}
diff --git a/drivers/staging/hv/channel_mgmt.c b/drivers/staging/hv/channel_mgmt.c
index 45dbe305afed..d44d5c39f68b 100644
--- a/drivers/staging/hv/channel_mgmt.c
+++ b/drivers/staging/hv/channel_mgmt.c
@@ -251,8 +251,8 @@ static struct vmbus_channel *alloc_channel(void)
channel->poll_timer.data = (unsigned long)channel;
channel->poll_timer.function = vmbus_ontimer;
- channel->ControlWQ = create_workqueue("hv_vmbus_ctl");
- if (!channel->ControlWQ) {
+ channel->controlwq = create_workqueue("hv_vmbus_ctl");
+ if (!channel->controlwq) {
kfree(channel);
return NULL;
}
@@ -263,12 +263,14 @@ static struct vmbus_channel *alloc_channel(void)
/*
* release_hannel - Release the vmbus channel object itself
*/
-static inline void release_channel(void *context)
+static void release_channel(struct work_struct *work)
{
- struct vmbus_channel *channel = context;
+ struct vmbus_channel *channel = container_of(work,
+ struct vmbus_channel,
+ work);
DPRINT_DBG(VMBUS, "releasing channel (%p)", channel);
- destroy_workqueue(channel->ControlWQ);
+ destroy_workqueue(channel->controlwq);
DPRINT_DBG(VMBUS, "channel released (%p)", channel);
kfree(channel);
@@ -286,8 +288,8 @@ void free_channel(struct vmbus_channel *channel)
* workqueue/thread context
* ie we can't destroy ourselves.
*/
- osd_schedule_callback(gVmbusConnection.WorkQueue, release_channel,
- channel);
+ INIT_WORK(&channel->work, release_channel);
+ queue_work(gVmbusConnection.WorkQueue, &channel->work);
}
@@ -308,29 +310,46 @@ static void count_hv_channel(void)
spin_unlock_irqrestore(&gVmbusConnection.channel_lock, flags);
}
+/*
+ * vmbus_process_rescind_offer -
+ * Rescind the offer by initiating a device removal
+ */
+static void vmbus_process_rescind_offer(struct work_struct *work)
+{
+ struct vmbus_channel *channel = container_of(work,
+ struct vmbus_channel,
+ work);
+
+ vmbus_child_device_unregister(channel->device_obj);
+}
/*
* vmbus_process_offer - Process the offer by creating a channel/device
* associated with this offer
*/
-static void vmbus_process_offer(void *context)
+static void vmbus_process_offer(struct work_struct *work)
{
- struct vmbus_channel *newchannel = context;
+ struct vmbus_channel *newchannel = container_of(work,
+ struct vmbus_channel,
+ work);
struct vmbus_channel *channel;
bool fnew = true;
int ret;
int cnt;
unsigned long flags;
+ /* The next possible work is rescind handling */
+ INIT_WORK(&newchannel->work, vmbus_process_rescind_offer);
+
/* Make sure this is a new offer */
spin_lock_irqsave(&gVmbusConnection.channel_lock, flags);
- list_for_each_entry(channel, &gVmbusConnection.ChannelList, ListEntry) {
- if (!memcmp(&channel->OfferMsg.Offer.InterfaceType,
- &newchannel->OfferMsg.Offer.InterfaceType,
+ list_for_each_entry(channel, &gVmbusConnection.ChannelList, listentry) {
+ if (!memcmp(&channel->offermsg.offer.InterfaceType,
+ &newchannel->offermsg.offer.InterfaceType,
sizeof(struct hv_guid)) &&
- !memcmp(&channel->OfferMsg.Offer.InterfaceInstance,
- &newchannel->OfferMsg.Offer.InterfaceInstance,
+ !memcmp(&channel->offermsg.offer.InterfaceInstance,
+ &newchannel->offermsg.offer.InterfaceInstance,
sizeof(struct hv_guid))) {
fnew = false;
break;
@@ -338,14 +357,14 @@ static void vmbus_process_offer(void *context)
}
if (fnew)
- list_add_tail(&newchannel->ListEntry,
+ list_add_tail(&newchannel->listentry,
&gVmbusConnection.ChannelList);
spin_unlock_irqrestore(&gVmbusConnection.channel_lock, flags);
if (!fnew) {
DPRINT_DBG(VMBUS, "Ignoring duplicate offer for relid (%d)",
- newchannel->OfferMsg.ChildRelId);
+ newchannel->offermsg.child_relid);
free_channel(newchannel);
return;
}
@@ -355,27 +374,27 @@ static void vmbus_process_offer(void *context)
* We need to set the DeviceObject field before calling
* VmbusChildDeviceAdd()
*/
- newchannel->DeviceObject = VmbusChildDeviceCreate(
- &newchannel->OfferMsg.Offer.InterfaceType,
- &newchannel->OfferMsg.Offer.InterfaceInstance,
+ newchannel->device_obj = vmbus_child_device_create(
+ &newchannel->offermsg.offer.InterfaceType,
+ &newchannel->offermsg.offer.InterfaceInstance,
newchannel);
DPRINT_DBG(VMBUS, "child device object allocated - %p",
- newchannel->DeviceObject);
+ newchannel->device_obj);
/*
* Add the new device to the bus. This will kick off device-driver
* binding which eventually invokes the device driver's AddDevice()
* method.
*/
- ret = VmbusChildDeviceAdd(newchannel->DeviceObject);
+ ret = VmbusChildDeviceAdd(newchannel->device_obj);
if (ret != 0) {
DPRINT_ERR(VMBUS,
"unable to add child device object (relid %d)",
- newchannel->OfferMsg.ChildRelId);
+ newchannel->offermsg.child_relid);
spin_lock_irqsave(&gVmbusConnection.channel_lock, flags);
- list_del(&newchannel->ListEntry);
+ list_del(&newchannel->listentry);
spin_unlock_irqrestore(&gVmbusConnection.channel_lock, flags);
free_channel(newchannel);
@@ -385,11 +404,11 @@ static void vmbus_process_offer(void *context)
* so that when we do close the channel normally, we
* can cleanup properly
*/
- newchannel->State = CHANNEL_OPEN_STATE;
+ newchannel->state = CHANNEL_OPEN_STATE;
/* Open IC channels */
for (cnt = 0; cnt < MAX_MSG_TYPES; cnt++) {
- if (memcmp(&newchannel->OfferMsg.Offer.InterfaceType,
+ if (memcmp(&newchannel->offermsg.offer.InterfaceType,
&hv_cb_utils[cnt].data,
sizeof(struct hv_guid)) == 0 &&
vmbus_open(newchannel, 2 * PAGE_SIZE,
@@ -406,17 +425,6 @@ static void vmbus_process_offer(void *context)
}
/*
- * vmbus_process_rescind_offer -
- * Rescind the offer by initiating a device removal
- */
-static void vmbus_process_rescind_offer(void *context)
-{
- struct vmbus_channel *channel = context;
-
- VmbusChildDeviceRemove(channel->DeviceObject);
-}
-
-/*
* vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
*
* We ignore all offers except network and storage offers. For each network and
@@ -434,7 +442,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
offer = (struct vmbus_channel_offer_channel *)hdr;
for (i = 0; i < MAX_NUM_DEVICE_CLASSES_SUPPORTED; i++) {
- if (memcmp(&offer->Offer.InterfaceType,
+ if (memcmp(&offer->offer.InterfaceType,
&gSupportedDeviceClasses[i], sizeof(struct hv_guid)) == 0) {
fsupported = 1;
break;
@@ -443,12 +451,12 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
if (!fsupported) {
DPRINT_DBG(VMBUS, "Ignoring channel offer notification for "
- "child relid %d", offer->ChildRelId);
+ "child relid %d", offer->child_relid);
return;
}
- guidtype = &offer->Offer.InterfaceType;
- guidinstance = &offer->Offer.InterfaceInstance;
+ guidtype = &offer->offer.InterfaceType;
+ guidinstance = &offer->offer.InterfaceInstance;
DPRINT_INFO(VMBUS, "Channel offer notification - "
"child relid %d monitor id %d allocated %d, "
@@ -456,8 +464,8 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
"%02x%02x%02x%02x%02x%02x%02x%02x} "
"instance {%02x%02x%02x%02x-%02x%02x-%02x%02x-"
"%02x%02x%02x%02x%02x%02x%02x%02x}",
- offer->ChildRelId, offer->MonitorId,
- offer->MonitorAllocated,
+ offer->child_relid, offer->monitorid,
+ offer->monitor_allocated,
guidtype->data[3], guidtype->data[2],
guidtype->data[1], guidtype->data[0],
guidtype->data[5], guidtype->data[4],
@@ -484,14 +492,14 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
DPRINT_DBG(VMBUS, "channel object allocated - %p", newchannel);
- memcpy(&newchannel->OfferMsg, offer,
+ memcpy(&newchannel->offermsg, offer,
sizeof(struct vmbus_channel_offer_channel));
- newchannel->MonitorGroup = (u8)offer->MonitorId / 32;
- newchannel->MonitorBit = (u8)offer->MonitorId % 32;
+ newchannel->monitor_grp = (u8)offer->monitorid / 32;
+ newchannel->monitor_bit = (u8)offer->monitorid % 32;
/* TODO: Make sure the offer comes from our parent partition */
- osd_schedule_callback(newchannel->ControlWQ, vmbus_process_offer,
- newchannel);
+ INIT_WORK(&newchannel->work, vmbus_process_offer);
+ queue_work(newchannel->controlwq, &newchannel->work);
}
/*
@@ -505,16 +513,16 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
struct vmbus_channel *channel;
rescind = (struct vmbus_channel_rescind_offer *)hdr;
- channel = GetChannelFromRelId(rescind->ChildRelId);
+ channel = GetChannelFromRelId(rescind->child_relid);
if (channel == NULL) {
DPRINT_DBG(VMBUS, "channel not found for relId %d",
- rescind->ChildRelId);
+ rescind->child_relid);
return;
}
- osd_schedule_callback(channel->ControlWQ,
- vmbus_process_rescind_offer,
- channel);
+ /* work is initialized for vmbus_process_rescind_offer() from
+ * vmbus_process_offer() where the channel got created */
+ queue_work(channel->controlwq, &channel->work);
}
/*
@@ -545,7 +553,7 @@ static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
unsigned long flags;
result = (struct vmbus_channel_open_result *)hdr;
- DPRINT_DBG(VMBUS, "vmbus open result - %d", result->Status);
+ DPRINT_DBG(VMBUS, "vmbus open result - %d", result->status);
/*
* Find the open msg, copy the result and signal/unblock the wait event
@@ -556,17 +564,17 @@ static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
/* FIXME: this should probably use list_entry() instead */
msginfo = (struct vmbus_channel_msginfo *)curr;
requestheader =
- (struct vmbus_channel_message_header *)msginfo->Msg;
+ (struct vmbus_channel_message_header *)msginfo->msg;
- if (requestheader->MessageType == ChannelMessageOpenChannel) {
+ if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
openmsg =
- (struct vmbus_channel_open_channel *)msginfo->Msg;
- if (openmsg->ChildRelId == result->ChildRelId &&
- openmsg->OpenId == result->OpenId) {
- memcpy(&msginfo->Response.OpenResult,
+ (struct vmbus_channel_open_channel *)msginfo->msg;
+ if (openmsg->child_relid == result->child_relid &&
+ openmsg->openid == result->openid) {
+ memcpy(&msginfo->response.open_result,
result,
sizeof(struct vmbus_channel_open_result));
- osd_WaitEventSet(msginfo->WaitEvent);
+ osd_waitevent_set(msginfo->waitevent);
break;
}
}
@@ -592,7 +600,7 @@ static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
DPRINT_DBG(VMBUS, "vmbus gpadl created result - %d",
- gpadlcreated->CreationStatus);
+ gpadlcreated->creation_status);
/*
* Find the establish msg, copy the result and signal/unblock the wait
@@ -604,19 +612,19 @@ static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
/* FIXME: this should probably use list_entry() instead */
msginfo = (struct vmbus_channel_msginfo *)curr;
requestheader =
- (struct vmbus_channel_message_header *)msginfo->Msg;
+ (struct vmbus_channel_message_header *)msginfo->msg;
- if (requestheader->MessageType == ChannelMessageGpadlHeader) {
+ if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
gpadlheader =
(struct vmbus_channel_gpadl_header *)requestheader;
- if ((gpadlcreated->ChildRelId ==
- gpadlheader->ChildRelId) &&
- (gpadlcreated->Gpadl == gpadlheader->Gpadl)) {
- memcpy(&msginfo->Response.GpadlCreated,
+ if ((gpadlcreated->child_relid ==
+ gpadlheader->child_relid) &&
+ (gpadlcreated->gpadl == gpadlheader->gpadl)) {
+ memcpy(&msginfo->response.gpadl_created,
gpadlcreated,
sizeof(struct vmbus_channel_gpadl_created));
- osd_WaitEventSet(msginfo->WaitEvent);
+ osd_waitevent_set(msginfo->waitevent);
break;
}
}
@@ -652,17 +660,17 @@ static void vmbus_ongpadl_torndown(
/* FIXME: this should probably use list_entry() instead */
msginfo = (struct vmbus_channel_msginfo *)curr;
requestheader =
- (struct vmbus_channel_message_header *)msginfo->Msg;
+ (struct vmbus_channel_message_header *)msginfo->msg;
- if (requestheader->MessageType == ChannelMessageGpadlTeardown) {
+ if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
gpadl_teardown =
(struct vmbus_channel_gpadl_teardown *)requestheader;
- if (gpadl_torndown->Gpadl == gpadl_teardown->Gpadl) {
- memcpy(&msginfo->Response.GpadlTorndown,
+ if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
+ memcpy(&msginfo->response.gpadl_torndown,
gpadl_torndown,
sizeof(struct vmbus_channel_gpadl_torndown));
- osd_WaitEventSet(msginfo->WaitEvent);
+ osd_waitevent_set(msginfo->waitevent);
break;
}
}
@@ -694,16 +702,16 @@ static void vmbus_onversion_response(
/* FIXME: this should probably use list_entry() instead */
msginfo = (struct vmbus_channel_msginfo *)curr;
requestheader =
- (struct vmbus_channel_message_header *)msginfo->Msg;
+ (struct vmbus_channel_message_header *)msginfo->msg;
- if (requestheader->MessageType ==
- ChannelMessageInitiateContact) {
+ if (requestheader->msgtype ==
+ CHANNELMSG_INITIATE_CONTACT) {
initiate =
(struct vmbus_channel_initiate_contact *)requestheader;
- memcpy(&msginfo->Response.VersionResponse,
+ memcpy(&msginfo->response.version_response,
version_response,
sizeof(struct vmbus_channel_version_response));
- osd_WaitEventSet(msginfo->WaitEvent);
+ osd_waitevent_set(msginfo->waitevent);
}
}
spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
@@ -711,24 +719,24 @@ static void vmbus_onversion_response(
/* Channel message dispatch table */
static struct vmbus_channel_message_table_entry
- gChannelMessageTable[ChannelMessageCount] = {
- {ChannelMessageInvalid, NULL},
- {ChannelMessageOfferChannel, vmbus_onoffer},
- {ChannelMessageRescindChannelOffer, vmbus_onoffer_rescind},
- {ChannelMessageRequestOffers, NULL},
- {ChannelMessageAllOffersDelivered, vmbus_onoffers_delivered},
- {ChannelMessageOpenChannel, NULL},
- {ChannelMessageOpenChannelResult, vmbus_onopen_result},
- {ChannelMessageCloseChannel, NULL},
- {ChannelMessageGpadlHeader, NULL},
- {ChannelMessageGpadlBody, NULL},
- {ChannelMessageGpadlCreated, vmbus_ongpadl_created},
- {ChannelMessageGpadlTeardown, NULL},
- {ChannelMessageGpadlTorndown, vmbus_ongpadl_torndown},
- {ChannelMessageRelIdReleased, NULL},
- {ChannelMessageInitiateContact, NULL},
- {ChannelMessageVersionResponse, vmbus_onversion_response},
- {ChannelMessageUnload, NULL},
+ gChannelMessageTable[CHANNELMSG_COUNT] = {
+ {CHANNELMSG_INVALID, NULL},
+ {CHANNELMSG_OFFERCHANNEL, vmbus_onoffer},
+ {CHANNELMSG_RESCIND_CHANNELOFFER, vmbus_onoffer_rescind},
+ {CHANNELMSG_REQUESTOFFERS, NULL},
+ {CHANNELMSG_ALLOFFERS_DELIVERED, vmbus_onoffers_delivered},
+ {CHANNELMSG_OPENCHANNEL, NULL},
+ {CHANNELMSG_OPENCHANNEL_RESULT, vmbus_onopen_result},
+ {CHANNELMSG_CLOSECHANNEL, NULL},
+ {CHANNELMSG_GPADL_HEADER, NULL},
+ {CHANNELMSG_GPADL_BODY, NULL},
+ {CHANNELMSG_GPADL_CREATED, vmbus_ongpadl_created},
+ {CHANNELMSG_GPADL_TEARDOWN, NULL},
+ {CHANNELMSG_GPADL_TORNDOWN, vmbus_ongpadl_torndown},
+ {CHANNELMSG_RELID_RELEASED, NULL},
+ {CHANNELMSG_INITIATE_CONTACT, NULL},
+ {CHANNELMSG_VERSION_RESPONSE, vmbus_onversion_response},
+ {CHANNELMSG_UNLOAD, NULL},
};
/*
@@ -742,29 +750,25 @@ void vmbus_onmessage(void *context)
struct vmbus_channel_message_header *hdr;
int size;
- hdr = (struct vmbus_channel_message_header *)msg->u.Payload;
- size = msg->Header.PayloadSize;
+ hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+ size = msg->header.payload_size;
- DPRINT_DBG(VMBUS, "message type %d size %d", hdr->MessageType, size);
+ DPRINT_DBG(VMBUS, "message type %d size %d", hdr->msgtype, size);
- if (hdr->MessageType >= ChannelMessageCount) {
+ if (hdr->msgtype >= CHANNELMSG_COUNT) {
DPRINT_ERR(VMBUS,
"Received invalid channel message type %d size %d",
- hdr->MessageType, size);
+ hdr->msgtype, size);
print_hex_dump_bytes("", DUMP_PREFIX_NONE,
- (unsigned char *)msg->u.Payload, size);
- kfree(msg);
+ (unsigned char *)msg->u.payload, size);
return;
}
- if (gChannelMessageTable[hdr->MessageType].messageHandler)
- gChannelMessageTable[hdr->MessageType].messageHandler(hdr);
+ if (gChannelMessageTable[hdr->msgtype].messageHandler)
+ gChannelMessageTable[hdr->msgtype].messageHandler(hdr);
else
DPRINT_ERR(VMBUS, "Unhandled channel message type %d",
- hdr->MessageType);
-
- /* Free the msg that was allocated in VmbusOnMsgDPC() */
- kfree(msg);
+ hdr->msgtype);
}
/*
@@ -782,15 +786,15 @@ int vmbus_request_offers(void)
if (!msginfo)
return -ENOMEM;
- msginfo->WaitEvent = osd_WaitEventCreate();
- if (!msginfo->WaitEvent) {
+ msginfo->waitevent = osd_waitevent_create();
+ if (!msginfo->waitevent) {
kfree(msginfo);
return -ENOMEM;
}
- msg = (struct vmbus_channel_message_header *)msginfo->Msg;
+ msg = (struct vmbus_channel_message_header *)msginfo->msg;
- msg->MessageType = ChannelMessageRequestOffers;
+ msg->msgtype = CHANNELMSG_REQUESTOFFERS;
/*SpinlockAcquire(gVmbusConnection.channelMsgLock);
INSERT_TAIL_LIST(&gVmbusConnection.channelMsgList,
@@ -808,7 +812,7 @@ int vmbus_request_offers(void)
goto Cleanup;
}
- /* osd_WaitEventWait(msgInfo->waitEvent); */
+ /* osd_waitevent_wait(msgInfo->waitEvent); */
/*SpinlockAcquire(gVmbusConnection.channelMsgLock);
REMOVE_ENTRY_LIST(&msgInfo->msgListEntry);
@@ -817,7 +821,7 @@ int vmbus_request_offers(void)
Cleanup:
if (msginfo) {
- kfree(msginfo->WaitEvent);
+ kfree(msginfo->waitevent);
kfree(msginfo);
}
@@ -837,17 +841,17 @@ void vmbus_release_unattached_channels(void)
spin_lock_irqsave(&gVmbusConnection.channel_lock, flags);
list_for_each_entry_safe(channel, pos, &gVmbusConnection.ChannelList,
- ListEntry) {
+ listentry) {
if (channel == start)
break;
- if (!channel->DeviceObject->Driver) {
- list_del(&channel->ListEntry);
+ if (!channel->device_obj->Driver) {
+ list_del(&channel->listentry);
DPRINT_INFO(VMBUS,
"Releasing unattached device object %p",
- channel->DeviceObject);
+ channel->device_obj);
- VmbusChildDeviceRemove(channel->DeviceObject);
+ vmbus_child_device_unregister(channel->device_obj);
free_channel(channel);
} else {
if (!start)
diff --git a/drivers/staging/hv/channel_mgmt.h b/drivers/staging/hv/channel_mgmt.h
index d16cc0811169..de6b2a0ebf70 100644
--- a/drivers/staging/hv/channel_mgmt.h
+++ b/drivers/staging/hv/channel_mgmt.h
@@ -33,60 +33,60 @@
/* Version 1 messages */
enum vmbus_channel_message_type {
- ChannelMessageInvalid = 0,
- ChannelMessageOfferChannel = 1,
- ChannelMessageRescindChannelOffer = 2,
- ChannelMessageRequestOffers = 3,
- ChannelMessageAllOffersDelivered = 4,
- ChannelMessageOpenChannel = 5,
- ChannelMessageOpenChannelResult = 6,
- ChannelMessageCloseChannel = 7,
- ChannelMessageGpadlHeader = 8,
- ChannelMessageGpadlBody = 9,
- ChannelMessageGpadlCreated = 10,
- ChannelMessageGpadlTeardown = 11,
- ChannelMessageGpadlTorndown = 12,
- ChannelMessageRelIdReleased = 13,
- ChannelMessageInitiateContact = 14,
- ChannelMessageVersionResponse = 15,
- ChannelMessageUnload = 16,
+ CHANNELMSG_INVALID = 0,
+ CHANNELMSG_OFFERCHANNEL = 1,
+ CHANNELMSG_RESCIND_CHANNELOFFER = 2,
+ CHANNELMSG_REQUESTOFFERS = 3,
+ CHANNELMSG_ALLOFFERS_DELIVERED = 4,
+ CHANNELMSG_OPENCHANNEL = 5,
+ CHANNELMSG_OPENCHANNEL_RESULT = 6,
+ CHANNELMSG_CLOSECHANNEL = 7,
+ CHANNELMSG_GPADL_HEADER = 8,
+ CHANNELMSG_GPADL_BODY = 9,
+ CHANNELMSG_GPADL_CREATED = 10,
+ CHANNELMSG_GPADL_TEARDOWN = 11,
+ CHANNELMSG_GPADL_TORNDOWN = 12,
+ CHANNELMSG_RELID_RELEASED = 13,
+ CHANNELMSG_INITIATE_CONTACT = 14,
+ CHANNELMSG_VERSION_RESPONSE = 15,
+ CHANNELMSG_UNLOAD = 16,
#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
- ChannelMessageViewRangeAdd = 17,
- ChannelMessageViewRangeRemove = 18,
+ CHANNELMSG_VIEWRANGE_ADD = 17,
+ CHANNELMSG_VIEWRANGE_REMOVE = 18,
#endif
- ChannelMessageCount
+ CHANNELMSG_COUNT
};
struct vmbus_channel_message_header {
- enum vmbus_channel_message_type MessageType;
- u32 Padding;
+ enum vmbus_channel_message_type msgtype;
+ u32 padding;
} __attribute__((packed));
/* Query VMBus Version parameters */
struct vmbus_channel_query_vmbus_version {
- struct vmbus_channel_message_header Header;
- u32 Version;
+ struct vmbus_channel_message_header header;
+ u32 version;
} __attribute__((packed));
/* VMBus Version Supported parameters */
struct vmbus_channel_version_supported {
- struct vmbus_channel_message_header Header;
- bool VersionSupported;
+ struct vmbus_channel_message_header header;
+ bool version_supported;
} __attribute__((packed));
/* Offer Channel parameters */
struct vmbus_channel_offer_channel {
- struct vmbus_channel_message_header Header;
- struct vmbus_channel_offer Offer;
- u32 ChildRelId;
- u8 MonitorId;
- bool MonitorAllocated;
+ struct vmbus_channel_message_header header;
+ struct vmbus_channel_offer offer;
+ u32 child_relid;
+ u8 monitorid;
+ bool monitor_allocated;
} __attribute__((packed));
/* Rescind Offer parameters */
struct vmbus_channel_rescind_offer {
- struct vmbus_channel_message_header Header;
- u32 ChildRelId;
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
} __attribute__((packed));
/*
@@ -100,43 +100,43 @@ struct vmbus_channel_rescind_offer {
/* Open Channel parameters */
struct vmbus_channel_open_channel {
- struct vmbus_channel_message_header Header;
+ struct vmbus_channel_message_header header;
/* Identifies the specific VMBus channel that is being opened. */
- u32 ChildRelId;
+ u32 child_relid;
/* ID making a particular open request at a channel offer unique. */
- u32 OpenId;
+ u32 openid;
/* GPADL for the channel's ring buffer. */
- u32 RingBufferGpadlHandle;
+ u32 ringbuffer_gpadlhandle;
/* GPADL for the channel's server context save area. */
- u32 ServerContextAreaGpadlHandle;
+ u32 server_contextarea_gpadlhandle;
/*
* The upstream ring buffer begins at offset zero in the memory
* described by RingBufferGpadlHandle. The downstream ring buffer
* follows it at this offset (in pages).
*/
- u32 DownstreamRingBufferPageOffset;
+ u32 downstream_ringbuffer_pageoffset;
/* User-specific data to be passed along to the server endpoint. */
- unsigned char UserData[MAX_USER_DEFINED_BYTES];
+ unsigned char userdata[MAX_USER_DEFINED_BYTES];
} __attribute__((packed));
/* Open Channel Result parameters */
struct vmbus_channel_open_result {
- struct vmbus_channel_message_header Header;
- u32 ChildRelId;
- u32 OpenId;
- u32 Status;
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 openid;
+ u32 status;
} __attribute__((packed));
/* Close channel parameters; */
struct vmbus_channel_close_channel {
- struct vmbus_channel_message_header Header;
- u32 ChildRelId;
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
} __attribute__((packed));
/* Channel Message GPADL */
@@ -151,72 +151,72 @@ struct vmbus_channel_close_channel {
* follow-up packet that contains more.
*/
struct vmbus_channel_gpadl_header {
- struct vmbus_channel_message_header Header;
- u32 ChildRelId;
- u32 Gpadl;
- u16 RangeBufLen;
- u16 RangeCount;
- struct gpa_range Range[0];
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 gpadl;
+ u16 range_buflen;
+ u16 rangecount;
+ struct gpa_range range[0];
} __attribute__((packed));
/* This is the followup packet that contains more PFNs. */
struct vmbus_channel_gpadl_body {
- struct vmbus_channel_message_header Header;
- u32 MessageNumber;
- u32 Gpadl;
- u64 Pfn[0];
+ struct vmbus_channel_message_header header;
+ u32 msgnumber;
+ u32 gpadl;
+ u64 pfn[0];
} __attribute__((packed));
struct vmbus_channel_gpadl_created {
- struct vmbus_channel_message_header Header;
- u32 ChildRelId;
- u32 Gpadl;
- u32 CreationStatus;
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 gpadl;
+ u32 creation_status;
} __attribute__((packed));
struct vmbus_channel_gpadl_teardown {
- struct vmbus_channel_message_header Header;
- u32 ChildRelId;
- u32 Gpadl;
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 gpadl;
} __attribute__((packed));
struct vmbus_channel_gpadl_torndown {
- struct vmbus_channel_message_header Header;
- u32 Gpadl;
+ struct vmbus_channel_message_header header;
+ u32 gpadl;
} __attribute__((packed));
#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
struct vmbus_channel_view_range_add {
- struct vmbus_channel_message_header Header;
- PHYSICAL_ADDRESS ViewRangeBase;
- u64 ViewRangeLength;
- u32 ChildRelId;
+ struct vmbus_channel_message_header header;
+ PHYSICAL_ADDRESS viewrange_base;
+ u64 viewrange_length;
+ u32 child_relid;
} __attribute__((packed));
struct vmbus_channel_view_range_remove {
- struct vmbus_channel_message_header Header;
- PHYSICAL_ADDRESS ViewRangeBase;
- u32 ChildRelId;
+ struct vmbus_channel_message_header header;
+ PHYSICAL_ADDRESS viewrange_base;
+ u32 child_relid;
} __attribute__((packed));
#endif
struct vmbus_channel_relid_released {
- struct vmbus_channel_message_header Header;
- u32 ChildRelId;
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
} __attribute__((packed));
struct vmbus_channel_initiate_contact {
- struct vmbus_channel_message_header Header;
- u32 VMBusVersionRequested;
- u32 Padding2;
- u64 InterruptPage;
- u64 MonitorPage1;
- u64 MonitorPage2;
+ struct vmbus_channel_message_header header;
+ u32 vmbus_version_requested;
+ u32 padding2;
+ u64 interrupt_page;
+ u64 monitor_page1;
+ u64 monitor_page2;
} __attribute__((packed));
struct vmbus_channel_version_response {
- struct vmbus_channel_message_header Header;
- bool VersionSupported;
+ struct vmbus_channel_message_header header;
+ bool version_supported;
} __attribute__((packed));
enum vmbus_channel_state {
@@ -226,54 +226,55 @@ enum vmbus_channel_state {
};
struct vmbus_channel {
- struct list_head ListEntry;
+ struct list_head listentry;
- struct hv_device *DeviceObject;
+ struct hv_device *device_obj;
struct timer_list poll_timer; /* SA-111 workaround */
+ struct work_struct work;
- enum vmbus_channel_state State;
+ enum vmbus_channel_state state;
- struct vmbus_channel_offer_channel OfferMsg;
+ struct vmbus_channel_offer_channel offermsg;
/*
* These are based on the OfferMsg.MonitorId.
* Save it here for easy access.
*/
- u8 MonitorGroup;
- u8 MonitorBit;
+ u8 monitor_grp;
+ u8 monitor_bit;
- u32 RingBufferGpadlHandle;
+ u32 ringbuffer_gpadlhandle;
/* Allocated memory for ring buffer */
- void *RingBufferPages;
- u32 RingBufferPageCount;
- struct hv_ring_buffer_info Outbound; /* send to parent */
- struct hv_ring_buffer_info Inbound; /* receive from parent */
+ void *ringbuffer_pages;
+ u32 ringbuffer_pagecount;
+ struct hv_ring_buffer_info outbound; /* send to parent */
+ struct hv_ring_buffer_info inbound; /* receive from parent */
spinlock_t inbound_lock;
- struct workqueue_struct *ControlWQ;
+ struct workqueue_struct *controlwq;
/* Channel callback are invoked in this workqueue context */
/* HANDLE dataWorkQueue; */
- void (*OnChannelCallback)(void *context);
- void *ChannelCallbackContext;
+ void (*onchannel_callback)(void *context);
+ void *channel_callback_context;
};
struct vmbus_channel_debug_info {
- u32 RelId;
- enum vmbus_channel_state State;
- struct hv_guid InterfaceType;
- struct hv_guid InterfaceInstance;
- u32 MonitorId;
- u32 ServerMonitorPending;
- u32 ServerMonitorLatency;
- u32 ServerMonitorConnectionId;
- u32 ClientMonitorPending;
- u32 ClientMonitorLatency;
- u32 ClientMonitorConnectionId;
-
- struct hv_ring_buffer_debug_info Inbound;
- struct hv_ring_buffer_debug_info Outbound;
+ u32 relid;
+ enum vmbus_channel_state state;
+ struct hv_guid interfacetype;
+ struct hv_guid interface_instance;
+ u32 monitorid;
+ u32 servermonitor_pending;
+ u32 servermonitor_latency;
+ u32 servermonitor_connectionid;
+ u32 clientmonitor_pending;
+ u32 clientmonitor_latency;
+ u32 clientmonitor_connectionid;
+
+ struct hv_ring_buffer_debug_info inbound;
+ struct hv_ring_buffer_debug_info outbound;
};
/*
@@ -282,28 +283,28 @@ struct vmbus_channel_debug_info {
*/
struct vmbus_channel_msginfo {
/* Bookkeeping stuff */
- struct list_head MsgListEntry;
+ struct list_head msglistentry;
/* So far, this is only used to handle gpadl body message */
- struct list_head SubMsgList;
+ struct list_head submsglist;
/* Synchronize the request/response if needed */
- struct osd_waitevent *WaitEvent;
+ struct osd_waitevent *waitevent;
union {
- struct vmbus_channel_version_supported VersionSupported;
- struct vmbus_channel_open_result OpenResult;
- struct vmbus_channel_gpadl_torndown GpadlTorndown;
- struct vmbus_channel_gpadl_created GpadlCreated;
- struct vmbus_channel_version_response VersionResponse;
- } Response;
-
- u32 MessageSize;
+ struct vmbus_channel_version_supported version_supported;
+ struct vmbus_channel_open_result open_result;
+ struct vmbus_channel_gpadl_torndown gpadl_torndown;
+ struct vmbus_channel_gpadl_created gpadl_created;
+ struct vmbus_channel_version_response version_response;
+ } response;
+
+ u32 msgsize;
/*
* The channel message that goes out on the "wire".
* It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
*/
- unsigned char Msg[0];
+ unsigned char msg[0];
};
diff --git a/drivers/staging/hv/connection.c b/drivers/staging/hv/connection.c
index f8477072ace4..c2e298ff4834 100644
--- a/drivers/staging/hv/connection.c
+++ b/drivers/staging/hv/connection.c
@@ -66,7 +66,7 @@ int VmbusConnect(void)
* Setup the vmbus event connection for channel interrupt
* abstraction stuff
*/
- gVmbusConnection.InterruptPage = osd_PageAlloc(1);
+ gVmbusConnection.InterruptPage = osd_page_alloc(1);
if (gVmbusConnection.InterruptPage == NULL) {
ret = -1;
goto Cleanup;
@@ -81,7 +81,7 @@ int VmbusConnect(void)
* Setup the monitor notification facility. The 1st page for
* parent->child and the 2nd page for child->parent
*/
- gVmbusConnection.MonitorPages = osd_PageAlloc(2);
+ gVmbusConnection.MonitorPages = osd_page_alloc(2);
if (gVmbusConnection.MonitorPages == NULL) {
ret = -1;
goto Cleanup;
@@ -95,19 +95,19 @@ int VmbusConnect(void)
goto Cleanup;
}
- msgInfo->WaitEvent = osd_WaitEventCreate();
- if (!msgInfo->WaitEvent) {
+ msgInfo->waitevent = osd_waitevent_create();
+ if (!msgInfo->waitevent) {
ret = -ENOMEM;
goto Cleanup;
}
- msg = (struct vmbus_channel_initiate_contact *)msgInfo->Msg;
+ msg = (struct vmbus_channel_initiate_contact *)msgInfo->msg;
- msg->Header.MessageType = ChannelMessageInitiateContact;
- msg->VMBusVersionRequested = VMBUS_REVISION_NUMBER;
- msg->InterruptPage = virt_to_phys(gVmbusConnection.InterruptPage);
- msg->MonitorPage1 = virt_to_phys(gVmbusConnection.MonitorPages);
- msg->MonitorPage2 = virt_to_phys(
+ msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
+ msg->vmbus_version_requested = VMBUS_REVISION_NUMBER;
+ msg->interrupt_page = virt_to_phys(gVmbusConnection.InterruptPage);
+ msg->monitor_page1 = virt_to_phys(gVmbusConnection.MonitorPages);
+ msg->monitor_page2 = virt_to_phys(
(void *)((unsigned long)gVmbusConnection.MonitorPages +
PAGE_SIZE));
@@ -116,30 +116,30 @@ int VmbusConnect(void)
* receive the response before returning from this routine
*/
spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
- list_add_tail(&msgInfo->MsgListEntry,
+ list_add_tail(&msgInfo->msglistentry,
&gVmbusConnection.ChannelMsgList);
spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
DPRINT_DBG(VMBUS, "Vmbus connection - interrupt pfn %llx, "
"monitor1 pfn %llx,, monitor2 pfn %llx",
- msg->InterruptPage, msg->MonitorPage1, msg->MonitorPage2);
+ msg->interrupt_page, msg->monitor_page1, msg->monitor_page2);
DPRINT_DBG(VMBUS, "Sending channel initiate msg...");
ret = VmbusPostMessage(msg,
sizeof(struct vmbus_channel_initiate_contact));
if (ret != 0) {
- list_del(&msgInfo->MsgListEntry);
+ list_del(&msgInfo->msglistentry);
goto Cleanup;
}
/* Wait for the connection response */
- osd_WaitEventWait(msgInfo->WaitEvent);
+ osd_waitevent_wait(msgInfo->waitevent);
- list_del(&msgInfo->MsgListEntry);
+ list_del(&msgInfo->msglistentry);
/* Check if successful */
- if (msgInfo->Response.VersionResponse.VersionSupported) {
+ if (msgInfo->response.version_response.version_supported) {
DPRINT_INFO(VMBUS, "Vmbus connected!!");
gVmbusConnection.ConnectState = Connected;
@@ -151,7 +151,7 @@ int VmbusConnect(void)
goto Cleanup;
}
- kfree(msgInfo->WaitEvent);
+ kfree(msgInfo->waitevent);
kfree(msgInfo);
return 0;
@@ -162,17 +162,17 @@ Cleanup:
destroy_workqueue(gVmbusConnection.WorkQueue);
if (gVmbusConnection.InterruptPage) {
- osd_PageFree(gVmbusConnection.InterruptPage, 1);
+ osd_page_free(gVmbusConnection.InterruptPage, 1);
gVmbusConnection.InterruptPage = NULL;
}
if (gVmbusConnection.MonitorPages) {
- osd_PageFree(gVmbusConnection.MonitorPages, 2);
+ osd_page_free(gVmbusConnection.MonitorPages, 2);
gVmbusConnection.MonitorPages = NULL;
}
if (msgInfo) {
- kfree(msgInfo->WaitEvent);
+ kfree(msgInfo->waitevent);
kfree(msgInfo);
}
@@ -195,14 +195,14 @@ int VmbusDisconnect(void)
if (!msg)
return -ENOMEM;
- msg->MessageType = ChannelMessageUnload;
+ msg->msgtype = CHANNELMSG_UNLOAD;
ret = VmbusPostMessage(msg,
sizeof(struct vmbus_channel_message_header));
if (ret != 0)
goto Cleanup;
- osd_PageFree(gVmbusConnection.InterruptPage, 1);
+ osd_page_free(gVmbusConnection.InterruptPage, 1);
/* TODO: iterate thru the msg list and free up */
destroy_workqueue(gVmbusConnection.WorkQueue);
@@ -226,8 +226,8 @@ struct vmbus_channel *GetChannelFromRelId(u32 relId)
unsigned long flags;
spin_lock_irqsave(&gVmbusConnection.channel_lock, flags);
- list_for_each_entry(channel, &gVmbusConnection.ChannelList, ListEntry) {
- if (channel->OfferMsg.ChildRelId == relId) {
+ list_for_each_entry(channel, &gVmbusConnection.ChannelList, listentry) {
+ if (channel->offermsg.child_relid == relId) {
foundChannel = channel;
break;
}
@@ -309,9 +309,9 @@ int VmbusPostMessage(void *buffer, size_t bufferLen)
{
union hv_connection_id connId;
- connId.Asu32 = 0;
- connId.u.Id = VMBUS_MESSAGE_CONNECTION_ID;
- return HvPostMessage(connId, 1, buffer, bufferLen);
+ connId.asu32 = 0;
+ connId.u.id = VMBUS_MESSAGE_CONNECTION_ID;
+ return hv_post_message(connId, 1, buffer, bufferLen);
}
/*
@@ -324,5 +324,5 @@ int VmbusSetEvent(u32 childRelId)
(unsigned long *)gVmbusConnection.SendInterruptPage +
(childRelId >> 5));
- return HvSignalEvent();
+ return hv_signal_event();
}
diff --git a/drivers/staging/hv/hv.c b/drivers/staging/hv/hv.c
index 86b1ddd90404..a34d713d9c57 100644
--- a/drivers/staging/hv/hv.c
+++ b/drivers/staging/hv/hv.c
@@ -28,17 +28,18 @@
#include "vmbus_private.h"
/* The one and only */
-struct hv_context gHvContext = {
- .SynICInitialized = false,
- .HypercallPage = NULL,
- .SignalEventParam = NULL,
- .SignalEventBuffer = NULL,
+struct hv_context hv_context = {
+ .synic_initialized = false,
+ .hypercall_page = NULL,
+ .signal_event_param = NULL,
+ .signal_event_buffer = NULL,
};
/*
- * HvQueryHypervisorPresence - Query the cpuid for presense of windows hypervisor
+ * query_hypervisor_presence
+ * - Query the cpuid for presense of windows hypervisor
*/
-static int HvQueryHypervisorPresence(void)
+static int query_hypervisor_presence(void)
{
unsigned int eax;
unsigned int ebx;
@@ -50,22 +51,22 @@ static int HvQueryHypervisorPresence(void)
ebx = 0;
ecx = 0;
edx = 0;
- op = HvCpuIdFunctionVersionAndFeatures;
+ op = HVCPUID_VERSION_FEATURES;
cpuid(op, &eax, &ebx, &ecx, &edx);
return ecx & HV_PRESENT_BIT;
}
/*
- * HvQueryHypervisorInfo - Get version info of the windows hypervisor
+ * query_hypervisor_info - Get version info of the windows hypervisor
*/
-static int HvQueryHypervisorInfo(void)
+static int query_hypervisor_info(void)
{
unsigned int eax;
unsigned int ebx;
unsigned int ecx;
unsigned int edx;
- unsigned int maxLeaf;
+ unsigned int max_leaf;
unsigned int op;
/*
@@ -76,7 +77,7 @@ static int HvQueryHypervisorInfo(void)
ebx = 0;
ecx = 0;
edx = 0;
- op = HvCpuIdFunctionHvVendorAndMaxFunction;
+ op = HVCPUID_VENDOR_MAXFUNCTION;
cpuid(op, &eax, &ebx, &ecx, &edx);
DPRINT_INFO(VMBUS, "Vendor ID: %c%c%c%c%c%c%c%c%c%c%c%c",
@@ -93,12 +94,12 @@ static int HvQueryHypervisorInfo(void)
((edx >> 16) & 0xFF),
((edx >> 24) & 0xFF));
- maxLeaf = eax;
+ max_leaf = eax;
eax = 0;
ebx = 0;
ecx = 0;
edx = 0;
- op = HvCpuIdFunctionHvInterface;
+ op = HVCPUID_INTERFACE;
cpuid(op, &eax, &ebx, &ecx, &edx);
DPRINT_INFO(VMBUS, "Interface ID: %c%c%c%c",
@@ -107,12 +108,12 @@ static int HvQueryHypervisorInfo(void)
((eax >> 16) & 0xFF),
((eax >> 24) & 0xFF));
- if (maxLeaf >= HvCpuIdFunctionMsHvVersion) {
+ if (max_leaf >= HVCPUID_VERSION) {
eax = 0;
ebx = 0;
ecx = 0;
edx = 0;
- op = HvCpuIdFunctionMsHvVersion;
+ op = HVCPUID_VERSION;
cpuid(op, &eax, &ebx, &ecx, &edx);
DPRINT_INFO(VMBUS, "OS Build:%d-%d.%d-%d-%d.%d",\
eax,
@@ -122,80 +123,81 @@ static int HvQueryHypervisorInfo(void)
edx >> 24,
edx & 0xFFFFFF);
}
- return maxLeaf;
+ return max_leaf;
}
/*
- * HvDoHypercall - Invoke the specified hypercall
+ * do_hypercall- Invoke the specified hypercall
*/
-static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
+static u64 do_hypercall(u64 control, void *input, void *output)
{
#ifdef CONFIG_X86_64
- u64 hvStatus = 0;
- u64 inputAddress = (Input) ? virt_to_phys(Input) : 0;
- u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
- volatile void *hypercallPage = gHvContext.HypercallPage;
+ u64 hv_status = 0;
+ u64 input_address = (input) ? virt_to_phys(input) : 0;
+ u64 output_address = (output) ? virt_to_phys(output) : 0;
+ volatile void *hypercall_page = hv_context.hypercall_page;
DPRINT_DBG(VMBUS, "Hypercall <control %llx input phys %llx virt %p "
"output phys %llx virt %p hypercall %p>",
- Control, inputAddress, Input,
- outputAddress, Output, hypercallPage);
+ control, input_address, input,
+ output_address, output, hypercall_page);
- __asm__ __volatile__("mov %0, %%r8" : : "r" (outputAddress) : "r8");
- __asm__ __volatile__("call *%3" : "=a" (hvStatus) :
- "c" (Control), "d" (inputAddress),
- "m" (hypercallPage));
+ __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
+ __asm__ __volatile__("call *%3" : "=a" (hv_status) :
+ "c" (control), "d" (input_address),
+ "m" (hypercall_page));
- DPRINT_DBG(VMBUS, "Hypercall <return %llx>", hvStatus);
+ DPRINT_DBG(VMBUS, "Hypercall <return %llx>", hv_status);
- return hvStatus;
+ return hv_status;
#else
- u32 controlHi = Control >> 32;
- u32 controlLo = Control & 0xFFFFFFFF;
- u32 hvStatusHi = 1;
- u32 hvStatusLo = 1;
- u64 inputAddress = (Input) ? virt_to_phys(Input) : 0;
- u32 inputAddressHi = inputAddress >> 32;
- u32 inputAddressLo = inputAddress & 0xFFFFFFFF;
- u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
- u32 outputAddressHi = outputAddress >> 32;
- u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
- volatile void *hypercallPage = gHvContext.HypercallPage;
+ u32 control_hi = control >> 32;
+ u32 control_lo = control & 0xFFFFFFFF;
+ u32 hv_status_hi = 1;
+ u32 hv_status_lo = 1;
+ u64 input_address = (input) ? virt_to_phys(input) : 0;
+ u32 input_address_hi = input_address >> 32;
+ u32 input_address_lo = input_address & 0xFFFFFFFF;
+ u64 output_address = (output) ? virt_to_phys(output) : 0;
+ u32 output_address_hi = output_address >> 32;
+ u32 output_address_lo = output_address & 0xFFFFFFFF;
+ volatile void *hypercall_page = hv_context.hypercall_page;
DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
- Control, Input, Output);
+ control, input, output);
- __asm__ __volatile__ ("call *%8" : "=d"(hvStatusHi),
- "=a"(hvStatusLo) : "d" (controlHi),
- "a" (controlLo), "b" (inputAddressHi),
- "c" (inputAddressLo), "D"(outputAddressHi),
- "S"(outputAddressLo), "m" (hypercallPage));
+ __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
+ "=a"(hv_status_lo) : "d" (control_hi),
+ "a" (control_lo), "b" (input_address_hi),
+ "c" (input_address_lo), "D"(output_address_hi),
+ "S"(output_address_lo), "m" (hypercall_page));
DPRINT_DBG(VMBUS, "Hypercall <return %llx>",
- hvStatusLo | ((u64)hvStatusHi << 32));
+ hv_status_lo | ((u64)hv_status_hi << 32));
- return hvStatusLo | ((u64)hvStatusHi << 32);
+ return hv_status_lo | ((u64)hv_status_hi << 32);
#endif /* !x86_64 */
}
/*
- * HvInit - Main initialization routine.
+ * hv_init - Main initialization routine.
*
* This routine must be called before any other routines in here are called
*/
-int HvInit(void)
+int hv_init(void)
{
int ret = 0;
- int maxLeaf;
- union hv_x64_msr_hypercall_contents hypercallMsr;
- void *virtAddr = NULL;
+ int max_leaf;
+ union hv_x64_msr_hypercall_contents hypercall_msr;
+ void *virtaddr = NULL;
- memset(gHvContext.synICEventPage, 0, sizeof(void *) * MAX_NUM_CPUS);
- memset(gHvContext.synICMessagePage, 0, sizeof(void *) * MAX_NUM_CPUS);
+ memset(hv_context.synic_event_page, 0, sizeof(void *) * MAX_NUM_CPUS);
+ memset(hv_context.synic_message_page, 0,
+ sizeof(void *) * MAX_NUM_CPUS);
- if (!HvQueryHypervisorPresence()) {
+ if (!query_hypervisor_presence()) {
DPRINT_ERR(VMBUS, "No Windows hypervisor detected!!");
goto Cleanup;
}
@@ -203,146 +205,148 @@ int HvInit(void)
DPRINT_INFO(VMBUS,
"Windows hypervisor detected! Retrieving more info...");
- maxLeaf = HvQueryHypervisorInfo();
+ max_leaf = query_hypervisor_info();
/* HvQueryHypervisorFeatures(maxLeaf); */
/*
* We only support running on top of Hyper-V
*/
- rdmsrl(HV_X64_MSR_GUEST_OS_ID, gHvContext.GuestId);
+ rdmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
- if (gHvContext.GuestId != 0) {
+ if (hv_context.guestid != 0) {
DPRINT_ERR(VMBUS, "Unknown guest id (0x%llx)!!",
- gHvContext.GuestId);
+ hv_context.guestid);
goto Cleanup;
}
/* Write our OS info */
wrmsrl(HV_X64_MSR_GUEST_OS_ID, HV_LINUX_GUEST_ID);
- gHvContext.GuestId = HV_LINUX_GUEST_ID;
+ hv_context.guestid = HV_LINUX_GUEST_ID;
/* See if the hypercall page is already set */
- rdmsrl(HV_X64_MSR_HYPERCALL, hypercallMsr.AsUINT64);
+ rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
/*
* Allocate the hypercall page memory
- * virtAddr = osd_PageAlloc(1);
+ * virtaddr = osd_page_alloc(1);
*/
- virtAddr = osd_VirtualAllocExec(PAGE_SIZE);
+ virtaddr = osd_virtual_alloc_exec(PAGE_SIZE);
- if (!virtAddr) {
+ if (!virtaddr) {
DPRINT_ERR(VMBUS,
"unable to allocate hypercall page!!");
goto Cleanup;
}
- hypercallMsr.Enable = 1;
+ hypercall_msr.enable = 1;
- hypercallMsr.GuestPhysicalAddress = vmalloc_to_pfn(virtAddr);
- wrmsrl(HV_X64_MSR_HYPERCALL, hypercallMsr.AsUINT64);
+ hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr);
+ wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
/* Confirm that hypercall page did get setup. */
- hypercallMsr.AsUINT64 = 0;
- rdmsrl(HV_X64_MSR_HYPERCALL, hypercallMsr.AsUINT64);
+ hypercall_msr.as_uint64 = 0;
+ rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
- if (!hypercallMsr.Enable) {
+ if (!hypercall_msr.enable) {
DPRINT_ERR(VMBUS, "unable to set hypercall page!!");
goto Cleanup;
}
- gHvContext.HypercallPage = virtAddr;
+ hv_context.hypercall_page = virtaddr;
DPRINT_INFO(VMBUS, "Hypercall page VA=%p, PA=0x%0llx",
- gHvContext.HypercallPage,
- (u64)hypercallMsr.GuestPhysicalAddress << PAGE_SHIFT);
+ hv_context.hypercall_page,
+ (u64)hypercall_msr.guest_physical_address << PAGE_SHIFT);
/* Setup the global signal event param for the signal event hypercall */
- gHvContext.SignalEventBuffer =
+ hv_context.signal_event_buffer =
kmalloc(sizeof(struct hv_input_signal_event_buffer),
GFP_KERNEL);
- if (!gHvContext.SignalEventBuffer)
+ if (!hv_context.signal_event_buffer)
goto Cleanup;
- gHvContext.SignalEventParam =
+ hv_context.signal_event_param =
(struct hv_input_signal_event *)
- (ALIGN_UP((unsigned long)gHvContext.SignalEventBuffer,
+ (ALIGN_UP((unsigned long)
+ hv_context.signal_event_buffer,
HV_HYPERCALL_PARAM_ALIGN));
- gHvContext.SignalEventParam->ConnectionId.Asu32 = 0;
- gHvContext.SignalEventParam->ConnectionId.u.Id =
+ hv_context.signal_event_param->connectionid.asu32 = 0;
+ hv_context.signal_event_param->connectionid.u.id =
VMBUS_EVENT_CONNECTION_ID;
- gHvContext.SignalEventParam->FlagNumber = 0;
- gHvContext.SignalEventParam->RsvdZ = 0;
+ hv_context.signal_event_param->flag_number = 0;
+ hv_context.signal_event_param->rsvdz = 0;
return ret;
Cleanup:
- if (virtAddr) {
- if (hypercallMsr.Enable) {
- hypercallMsr.AsUINT64 = 0;
- wrmsrl(HV_X64_MSR_HYPERCALL, hypercallMsr.AsUINT64);
+ if (virtaddr) {
+ if (hypercall_msr.enable) {
+ hypercall_msr.as_uint64 = 0;
+ wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
}
- vfree(virtAddr);
+ vfree(virtaddr);
}
ret = -1;
return ret;
}
/*
- * HvCleanup - Cleanup routine.
+ * hv_cleanup - Cleanup routine.
*
* This routine is called normally during driver unloading or exiting.
*/
-void HvCleanup(void)
+void hv_cleanup(void)
{
- union hv_x64_msr_hypercall_contents hypercallMsr;
+ union hv_x64_msr_hypercall_contents hypercall_msr;
- kfree(gHvContext.SignalEventBuffer);
- gHvContext.SignalEventBuffer = NULL;
- gHvContext.SignalEventParam = NULL;
+ kfree(hv_context.signal_event_buffer);
+ hv_context.signal_event_buffer = NULL;
+ hv_context.signal_event_param = NULL;
- if (gHvContext.HypercallPage) {
- hypercallMsr.AsUINT64 = 0;
- wrmsrl(HV_X64_MSR_HYPERCALL, hypercallMsr.AsUINT64);
- vfree(gHvContext.HypercallPage);
- gHvContext.HypercallPage = NULL;
+ if (hv_context.hypercall_page) {
+ hypercall_msr.as_uint64 = 0;
+ wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+ vfree(hv_context.hypercall_page);
+ hv_context.hypercall_page = NULL;
}
}
/*
- * HvPostMessage - Post a message using the hypervisor message IPC.
+ * hv_post_message - Post a message using the hypervisor message IPC.
*
* This involves a hypercall.
*/
-u16 HvPostMessage(union hv_connection_id connectionId,
- enum hv_message_type messageType,
- void *payload, size_t payloadSize)
+u16 hv_post_message(union hv_connection_id connection_id,
+ enum hv_message_type message_type,
+ void *payload, size_t payload_size)
{
- struct alignedInput {
+ struct aligned_input {
u64 alignment8;
struct hv_input_post_message msg;
};
- struct hv_input_post_message *alignedMsg;
+ struct hv_input_post_message *aligned_msg;
u16 status;
unsigned long addr;
- if (payloadSize > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
+ if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
return -1;
- addr = (unsigned long)kmalloc(sizeof(struct alignedInput), GFP_ATOMIC);
+ addr = (unsigned long)kmalloc(sizeof(struct aligned_input), GFP_ATOMIC);
if (!addr)
return -1;
- alignedMsg = (struct hv_input_post_message *)
+ aligned_msg = (struct hv_input_post_message *)
(ALIGN_UP(addr, HV_HYPERCALL_PARAM_ALIGN));
- alignedMsg->ConnectionId = connectionId;
- alignedMsg->MessageType = messageType;
- alignedMsg->PayloadSize = payloadSize;
- memcpy((void *)alignedMsg->Payload, payload, payloadSize);
+ aligned_msg->connectionid = connection_id;
+ aligned_msg->message_type = message_type;
+ aligned_msg->payload_size = payload_size;
+ memcpy((void *)aligned_msg->payload, payload, payload_size);
- status = HvDoHypercall(HvCallPostMessage, alignedMsg, NULL) & 0xFFFF;
+ status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL)
+ & 0xFFFF;
kfree((void *)addr);
@@ -351,38 +355,40 @@ u16 HvPostMessage(union hv_connection_id connectionId,
/*
- * HvSignalEvent - Signal an event on the specified connection using the hypervisor event IPC.
+ * hv_signal_event -
+ * Signal an event on the specified connection using the hypervisor event IPC.
*
* This involves a hypercall.
*/
-u16 HvSignalEvent(void)
+u16 hv_signal_event(void)
{
u16 status;
- status = HvDoHypercall(HvCallSignalEvent, gHvContext.SignalEventParam,
+ status = do_hypercall(HVCALL_SIGNAL_EVENT,
+ hv_context.signal_event_param,
NULL) & 0xFFFF;
return status;
}
/*
- * HvSynicInit - Initialize the Synthethic Interrupt Controller.
+ * hv_synic_init - Initialize the Synthethic Interrupt Controller.
*
* If it is already initialized by another entity (ie x2v shim), we need to
* retrieve the initialized message and event pages. Otherwise, we create and
* initialize the message and event pages.
*/
-void HvSynicInit(void *irqarg)
+void hv_synic_init(void *irqarg)
{
u64 version;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
- union hv_synic_sint sharedSint;
+ union hv_synic_sint shared_sint;
union hv_synic_scontrol sctrl;
- u32 irqVector = *((u32 *)(irqarg));
+ u32 irq_vector = *((u32 *)(irqarg));
int cpu = smp_processor_id();
- if (!gHvContext.HypercallPage)
+ if (!hv_context.hypercall_page)
return;
/* Check the version */
@@ -390,110 +396,112 @@ void HvSynicInit(void *irqarg)
DPRINT_INFO(VMBUS, "SynIC version: %llx", version);
- gHvContext.synICMessagePage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC);
+ hv_context.synic_message_page[cpu] =
+ (void *)get_zeroed_page(GFP_ATOMIC);
- if (gHvContext.synICMessagePage[cpu] == NULL) {
+ if (hv_context.synic_message_page[cpu] == NULL) {
DPRINT_ERR(VMBUS,
"unable to allocate SYNIC message page!!");
goto Cleanup;
}
- gHvContext.synICEventPage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC);
+ hv_context.synic_event_page[cpu] =
+ (void *)get_zeroed_page(GFP_ATOMIC);
- if (gHvContext.synICEventPage[cpu] == NULL) {
+ if (hv_context.synic_event_page[cpu] == NULL) {
DPRINT_ERR(VMBUS,
"unable to allocate SYNIC event page!!");
goto Cleanup;
}
/* Setup the Synic's message page */
- rdmsrl(HV_X64_MSR_SIMP, simp.AsUINT64);
- simp.SimpEnabled = 1;
- simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[cpu])
+ rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
+ simp.simp_enabled = 1;
+ simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu])
>> PAGE_SHIFT;
- DPRINT_DBG(VMBUS, "HV_X64_MSR_SIMP msr set to: %llx", simp.AsUINT64);
+ DPRINT_DBG(VMBUS, "HV_X64_MSR_SIMP msr set to: %llx", simp.as_uint64);
- wrmsrl(HV_X64_MSR_SIMP, simp.AsUINT64);
+ wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
/* Setup the Synic's event page */
- rdmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
- siefp.SiefpEnabled = 1;
- siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[cpu])
+ rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
+ siefp.siefp_enabled = 1;
+ siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu])
>> PAGE_SHIFT;
- DPRINT_DBG(VMBUS, "HV_X64_MSR_SIEFP msr set to: %llx", siefp.AsUINT64);
+ DPRINT_DBG(VMBUS, "HV_X64_MSR_SIEFP msr set to: %llx", siefp.as_uint64);
- wrmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
+ wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
/* Setup the interception SINT. */
/* wrmsrl((HV_X64_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX), */
- /* interceptionSint.AsUINT64); */
+ /* interceptionSint.as_uint64); */
/* Setup the shared SINT. */
- rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64);
+ rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
- sharedSint.AsUINT64 = 0;
- sharedSint.Vector = irqVector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
- sharedSint.Masked = false;
- sharedSint.AutoEoi = true;
+ shared_sint.as_uint64 = 0;
+ shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
+ shared_sint.masked = false;
+ shared_sint.auto_eoi = true;
DPRINT_DBG(VMBUS, "HV_X64_MSR_SINT1 msr set to: %llx",
- sharedSint.AsUINT64);
+ shared_sint.as_uint64);
- wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64);
+ wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
/* Enable the global synic bit */
- rdmsrl(HV_X64_MSR_SCONTROL, sctrl.AsUINT64);
- sctrl.Enable = 1;
+ rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
+ sctrl.enable = 1;
- wrmsrl(HV_X64_MSR_SCONTROL, sctrl.AsUINT64);
+ wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
- gHvContext.SynICInitialized = true;
+ hv_context.synic_initialized = true;
return;
Cleanup:
- if (gHvContext.synICEventPage[cpu])
- osd_PageFree(gHvContext.synICEventPage[cpu], 1);
+ if (hv_context.synic_event_page[cpu])
+ osd_page_free(hv_context.synic_event_page[cpu], 1);
- if (gHvContext.synICMessagePage[cpu])
- osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
+ if (hv_context.synic_message_page[cpu])
+ osd_page_free(hv_context.synic_message_page[cpu], 1);
return;
}
/*
- * HvSynicCleanup - Cleanup routine for HvSynicInit().
+ * hv_synic_cleanup - Cleanup routine for hv_synic_init().
*/
-void HvSynicCleanup(void *arg)
+void hv_synic_cleanup(void *arg)
{
- union hv_synic_sint sharedSint;
+ union hv_synic_sint shared_sint;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
int cpu = smp_processor_id();
- if (!gHvContext.SynICInitialized)
+ if (!hv_context.synic_initialized)
return;
- rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64);
+ rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
- sharedSint.Masked = 1;
+ shared_sint.masked = 1;
/* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */
- wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64);
+ wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
- rdmsrl(HV_X64_MSR_SIMP, simp.AsUINT64);
- simp.SimpEnabled = 0;
- simp.BaseSimpGpa = 0;
+ rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
+ simp.simp_enabled = 0;
+ simp.base_simp_gpa = 0;
- wrmsrl(HV_X64_MSR_SIMP, simp.AsUINT64);
+ wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
- rdmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
- siefp.SiefpEnabled = 0;
- siefp.BaseSiefpGpa = 0;
+ rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
+ siefp.siefp_enabled = 0;
+ siefp.base_siefp_gpa = 0;
- wrmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
+ wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
- osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
- osd_PageFree(gHvContext.synICEventPage[cpu], 1);
+ osd_page_free(hv_context.synic_message_page[cpu], 1);
+ osd_page_free(hv_context.synic_event_page[cpu], 1);
}
diff --git a/drivers/staging/hv/hv.h b/drivers/staging/hv/hv.h
index 41f5ebb86e17..829aff81bb30 100644
--- a/drivers/staging/hv/hv.h
+++ b/drivers/staging/hv/hv.h
@@ -92,49 +92,49 @@ static const struct hv_guid VMBUS_SERVICE_ID = {
struct hv_input_signal_event_buffer {
- u64 Align8;
- struct hv_input_signal_event Event;
+ u64 align8;
+ struct hv_input_signal_event event;
};
struct hv_context {
/* We only support running on top of Hyper-V
* So at this point this really can only contain the Hyper-V ID
*/
- u64 GuestId;
+ u64 guestid;
- void *HypercallPage;
+ void *hypercall_page;
- bool SynICInitialized;
+ bool synic_initialized;
/*
* This is used as an input param to HvCallSignalEvent hypercall. The
* input param is immutable in our usage and must be dynamic mem (vs
* stack or global). */
- struct hv_input_signal_event_buffer *SignalEventBuffer;
+ struct hv_input_signal_event_buffer *signal_event_buffer;
/* 8-bytes aligned of the buffer above */
- struct hv_input_signal_event *SignalEventParam;
+ struct hv_input_signal_event *signal_event_param;
- void *synICMessagePage[MAX_NUM_CPUS];
- void *synICEventPage[MAX_NUM_CPUS];
+ void *synic_message_page[MAX_NUM_CPUS];
+ void *synic_event_page[MAX_NUM_CPUS];
};
-extern struct hv_context gHvContext;
+extern struct hv_context hv_context;
/* Hv Interface */
-extern int HvInit(void);
+extern int hv_init(void);
-extern void HvCleanup(void);
+extern void hv_cleanup(void);
-extern u16 HvPostMessage(union hv_connection_id connectionId,
- enum hv_message_type messageType,
- void *payload, size_t payloadSize);
+extern u16 hv_post_message(union hv_connection_id connection_id,
+ enum hv_message_type message_type,
+ void *payload, size_t payload_size);
-extern u16 HvSignalEvent(void);
+extern u16 hv_signal_event(void);
-extern void HvSynicInit(void *irqarg);
+extern void hv_synic_init(void *irqarg);
-extern void HvSynicCleanup(void *arg);
+extern void hv_synic_cleanup(void *arg);
#endif /* __HV_H__ */
diff --git a/drivers/staging/hv/hv_api.h b/drivers/staging/hv/hv_api.h
index 9eb818ee07ba..70e863ad0464 100644
--- a/drivers/staging/hv/hv_api.h
+++ b/drivers/staging/hv/hv_api.h
@@ -510,21 +510,21 @@
/*
* The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
- * is set by CPUID(HvCpuIdFunctionVersionAndFeatures).
+ * is set by CPUID(HVCPUID_VERSION_FEATURES).
*/
enum hv_cpuid_function {
- HvCpuIdFunctionVersionAndFeatures = 0x00000001,
- HvCpuIdFunctionHvVendorAndMaxFunction = 0x40000000,
- HvCpuIdFunctionHvInterface = 0x40000001,
+ HVCPUID_VERSION_FEATURES = 0x00000001,
+ HVCPUID_VENDOR_MAXFUNCTION = 0x40000000,
+ HVCPUID_INTERFACE = 0x40000001,
/*
* The remaining functions depend on the value of
- * HvCpuIdFunctionInterface
+ * HVCPUID_INTERFACE
*/
- HvCpuIdFunctionMsHvVersion = 0x40000002,
- HvCpuIdFunctionMsHvFeatures = 0x40000003,
- HvCpuIdFunctionMsHvEnlightenmentInformation = 0x40000004,
- HvCpuIdFunctionMsHvImplementationLimits = 0x40000005,
+ HVCPUID_VERSION = 0x40000002,
+ HVCPUID_FEATURES = 0x40000003,
+ HVCPUID_ENLIGHTENMENT_INFO = 0x40000004,
+ HVCPUID_IMPLEMENTATION_LIMITS = 0x40000005,
};
/* Define the virtual APIC registers */
@@ -575,30 +575,30 @@ enum hv_cpuid_function {
/* Define hypervisor message types. */
enum hv_message_type {
- HvMessageTypeNone = 0x00000000,
+ HVMSG_NONE = 0x00000000,
/* Memory access messages. */
- HvMessageTypeUnmappedGpa = 0x80000000,
- HvMessageTypeGpaIntercept = 0x80000001,
+ HVMSG_UNMAPPED_GPA = 0x80000000,
+ HVMSG_GPA_INTERCEPT = 0x80000001,
/* Timer notification messages. */
- HvMessageTimerExpired = 0x80000010,
+ HVMSG_TIMER_EXPIRED = 0x80000010,
/* Error messages. */
- HvMessageTypeInvalidVpRegisterValue = 0x80000020,
- HvMessageTypeUnrecoverableException = 0x80000021,
- HvMessageTypeUnsupportedFeature = 0x80000022,
+ HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
+ HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021,
+ HVMSG_UNSUPPORTED_FEATURE = 0x80000022,
/* Trace buffer complete messages. */
- HvMessageTypeEventLogBufferComplete = 0x80000040,
+ HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040,
/* Platform-specific processor intercept messages. */
- HvMessageTypeX64IoPortIntercept = 0x80010000,
- HvMessageTypeX64MsrIntercept = 0x80010001,
- HvMessageTypeX64CpuidIntercept = 0x80010002,
- HvMessageTypeX64ExceptionIntercept = 0x80010003,
- HvMessageTypeX64ApicEoi = 0x80010004,
- HvMessageTypeX64LegacyFpError = 0x80010005
+ HVMSG_X64_IOPORT_INTERCEPT = 0x80010000,
+ HVMSG_X64_MSR_INTERCEPT = 0x80010001,
+ HVMSG_X64_CPUID_INTERCEPT = 0x80010002,
+ HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003,
+ HVMSG_X64_APIC_EOI = 0x80010004,
+ HVMSG_X64_LEGACY_FP_ERROR = 0x80010005
};
/* Define the number of synthetic interrupt sources. */
@@ -610,103 +610,103 @@ enum hv_message_type {
/* Define connection identifier type. */
union hv_connection_id {
- u32 Asu32;
+ u32 asu32;
struct {
- u32 Id:24;
- u32 Reserved:8;
+ u32 id:24;
+ u32 reserved:8;
} u;
};
/* Define port identifier type. */
union hv_port_id {
- u32 Asu32;
+ u32 asu32;
struct {
- u32 Id:24;
- u32 Reserved:8;
+ u32 id:24;
+ u32 reserved:8;
} u ;
};
/* Define port type. */
enum hv_port_type {
- HvPortTypeMessage = 1,
- HvPortTypeEvent = 2,
- HvPortTypeMonitor = 3
+ HVPORT_MSG = 1,
+ HVPORT_EVENT = 2,
+ HVPORT_MONITOR = 3
};
/* Define port information structure. */
struct hv_port_info {
- enum hv_port_type PortType;
- u32 Padding;
+ enum hv_port_type port_type;
+ u32 padding;
union {
struct {
- u32 TargetSint;
- u32 TargetVp;
- u64 RsvdZ;
- } MessagePortInfo;
+ u32 target_sint;
+ u32 target_vp;
+ u64 rsvdz;
+ } message_port_info;
struct {
- u32 TargetSint;
- u32 TargetVp;
- u16 BaseFlagNumber;
- u16 FlagCount;
- u32 RsvdZ;
- } EventPortInfo;
+ u32 target_sint;
+ u32 target_vp;
+ u16 base_flag_bumber;
+ u16 flag_count;
+ u32 rsvdz;
+ } event_port_info;
struct {
- u64 MonitorAddress;
- u64 RsvdZ;
- } MonitorPortInfo;
+ u64 monitor_address;
+ u64 rsvdz;
+ } monitor_port_info;
};
};
struct hv_connection_info {
- enum hv_port_type PortType;
- u32 Padding;
+ enum hv_port_type port_type;
+ u32 padding;
union {
struct {
- u64 RsvdZ;
- } MessageConnectionInfo;
+ u64 rsvdz;
+ } message_connection_info;
struct {
- u64 RsvdZ;
- } EventConnectionInfo;
+ u64 rsvdz;
+ } event_connection_info;
struct {
- u64 MonitorAddress;
- } MonitorConnectionInfo;
+ u64 monitor_address;
+ } monitor_connection_info;
};
};
/* Define synthetic interrupt controller message flags. */
union hv_message_flags {
- u8 Asu8;
+ u8 asu8;
struct {
- u8 MessagePending:1;
- u8 Reserved:7;
+ u8 msg_pending:1;
+ u8 reserved:7;
};
};
/* Define synthetic interrupt controller message header. */
struct hv_message_header {
- enum hv_message_type MessageType;
- u8 PayloadSize;
- union hv_message_flags MessageFlags;
- u8 Reserved[2];
+ enum hv_message_type message_type;
+ u8 payload_size;
+ union hv_message_flags message_flags;
+ u8 reserved[2];
union {
- u64 Sender;
- union hv_port_id Port;
+ u64 sender;
+ union hv_port_id port;
};
};
/* Define timer message payload structure. */
struct hv_timer_message_payload {
- u32 TimerIndex;
- u32 Reserved;
- u64 ExpirationTime; /* When the timer expired */
- u64 DeliveryTime; /* When the message was delivered */
+ u32 timer_index;
+ u32 reserved;
+ u64 expiration_time; /* When the timer expired */
+ u64 delivery_time; /* When the message was delivered */
};
/* Define synthetic interrupt controller message format. */
struct hv_message {
- struct hv_message_header Header;
+ struct hv_message_header header;
union {
- u64 Payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
+ u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
} u ;
};
@@ -715,82 +715,82 @@ struct hv_message {
/* Define the synthetic interrupt message page layout. */
struct hv_message_page {
- struct hv_message SintMessage[HV_SYNIC_SINT_COUNT];
+ struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
};
/* Define the synthetic interrupt controller event flags format. */
union hv_synic_event_flags {
- u8 Flags8[HV_EVENT_FLAGS_BYTE_COUNT];
- u32 Flags32[HV_EVENT_FLAGS_DWORD_COUNT];
+ u8 flags8[HV_EVENT_FLAGS_BYTE_COUNT];
+ u32 flags32[HV_EVENT_FLAGS_DWORD_COUNT];
};
/* Define the synthetic interrupt flags page layout. */
struct hv_synic_event_flags_page {
- union hv_synic_event_flags SintEventFlags[HV_SYNIC_SINT_COUNT];
+ union hv_synic_event_flags sintevent_flags[HV_SYNIC_SINT_COUNT];
};
/* Define SynIC control register. */
union hv_synic_scontrol {
- u64 AsUINT64;
+ u64 as_uint64;
struct {
- u64 Enable:1;
- u64 Reserved:63;
+ u64 enable:1;
+ u64 reserved:63;
};
};
/* Define synthetic interrupt source. */
union hv_synic_sint {
- u64 AsUINT64;
+ u64 as_uint64;
struct {
- u64 Vector:8;
- u64 Reserved1:8;
- u64 Masked:1;
- u64 AutoEoi:1;
- u64 Reserved2:46;
+ u64 vector:8;
+ u64 reserved1:8;
+ u64 masked:1;
+ u64 auto_eoi:1;
+ u64 reserved2:46;
};
};
/* Define the format of the SIMP register */
union hv_synic_simp {
- u64 AsUINT64;
+ u64 as_uint64;
struct {
- u64 SimpEnabled:1;
- u64 Preserved:11;
- u64 BaseSimpGpa:52;
+ u64 simp_enabled:1;
+ u64 preserved:11;
+ u64 base_simp_gpa:52;
};
};
/* Define the format of the SIEFP register */
union hv_synic_siefp {
- u64 AsUINT64;
+ u64 as_uint64;
struct {
- u64 SiefpEnabled:1;
- u64 Preserved:11;
- u64 BaseSiefpGpa:52;
+ u64 siefp_enabled:1;
+ u64 preserved:11;
+ u64 base_siefp_gpa:52;
};
};
/* Definitions for the monitored notification facility */
union hv_monitor_trigger_group {
- u64 AsUINT64;
+ u64 as_uint64;
struct {
- u32 Pending;
- u32 Armed;
+ u32 pending;
+ u32 armed;
};
};
struct hv_monitor_parameter {
- union hv_connection_id ConnectionId;
- u16 FlagNumber;
- u16 RsvdZ;
+ union hv_connection_id connectionid;
+ u16 flagnumber;
+ u16 rsvdz;
};
union hv_monitor_trigger_state {
- u32 Asu32;
+ u32 asu32;
struct {
- u32 GroupEnable:4;
- u32 RsvdZ:28;
+ u32 group_enable:4;
+ u32 rsvdz:28;
};
};
@@ -814,42 +814,42 @@ union hv_monitor_trigger_state {
/* | 840 | Rsvd4[0] | */
/* ------------------------------------------------------ */
struct hv_monitor_page {
- union hv_monitor_trigger_state TriggerState;
- u32 RsvdZ1;
+ union hv_monitor_trigger_state trigger_state;
+ u32 rsvdz1;
- union hv_monitor_trigger_group TriggerGroup[4];
- u64 RsvdZ2[3];
+ union hv_monitor_trigger_group trigger_group[4];
+ u64 rsvdz2[3];
- s32 NextCheckTime[4][32];
+ s32 next_checktime[4][32];
- u16 Latency[4][32];
- u64 RsvdZ3[32];
+ u16 latency[4][32];
+ u64 rsvdz3[32];
- struct hv_monitor_parameter Parameter[4][32];
+ struct hv_monitor_parameter parameter[4][32];
- u8 RsvdZ4[1984];
+ u8 rsvdz4[1984];
};
/* Declare the various hypercall operations. */
enum hv_call_code {
- HvCallPostMessage = 0x005c,
- HvCallSignalEvent = 0x005d,
+ HVCALL_POST_MESSAGE = 0x005c,
+ HVCALL_SIGNAL_EVENT = 0x005d,
};
-/* Definition of the HvPostMessage hypercall input structure. */
+/* Definition of the hv_post_message hypercall input structure. */
struct hv_input_post_message {
- union hv_connection_id ConnectionId;
- u32 Reserved;
- enum hv_message_type MessageType;
- u32 PayloadSize;
- u64 Payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
+ union hv_connection_id connectionid;
+ u32 reserved;
+ enum hv_message_type message_type;
+ u32 payload_size;
+ u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
};
-/* Definition of the HvSignalEvent hypercall input structure. */
+/* Definition of the hv_signal_event hypercall input structure. */
struct hv_input_signal_event {
- union hv_connection_id ConnectionId;
- u16 FlagNumber;
- u16 RsvdZ;
+ union hv_connection_id connectionid;
+ u16 flag_number;
+ u16 rsvdz;
};
/*
@@ -859,16 +859,16 @@ struct hv_input_signal_event {
/* Version info reported by guest OS's */
enum hv_guest_os_vendor {
- HvGuestOsVendorMicrosoft = 0x0001
+ HVGUESTOS_VENDOR_MICROSOFT = 0x0001
};
enum hv_guest_os_microsoft_ids {
- HvGuestOsMicrosoftUndefined = 0x00,
- HvGuestOsMicrosoftMSDOS = 0x01,
- HvGuestOsMicrosoftWindows3x = 0x02,
- HvGuestOsMicrosoftWindows9x = 0x03,
- HvGuestOsMicrosoftWindowsNT = 0x04,
- HvGuestOsMicrosoftWindowsCE = 0x05
+ HVGUESTOS_MICROSOFT_UNDEFINED = 0x00,
+ HVGUESTOS_MICROSOFT_MSDOS = 0x01,
+ HVGUESTOS_MICROSOFT_WINDOWS3X = 0x02,
+ HVGUESTOS_MICROSOFT_WINDOWS9X = 0x03,
+ HVGUESTOS_MICROSOFT_WINDOWSNT = 0x04,
+ HVGUESTOS_MICROSOFT_WINDOWSCE = 0x05
};
/*
@@ -877,14 +877,14 @@ enum hv_guest_os_microsoft_ids {
#define HV_X64_MSR_GUEST_OS_ID 0x40000000
union hv_x64_msr_guest_os_id_contents {
- u64 AsUINT64;
+ u64 as_uint64;
struct {
- u64 BuildNumber:16;
- u64 ServiceVersion:8; /* Service Pack, etc. */
- u64 MinorVersion:8;
- u64 MajorVersion:8;
- u64 OsId:8; /* enum hv_guest_os_microsoft_ids (if Vendor=MS) */
- u64 VendorId:16; /* enum hv_guest_os_vendor */
+ u64 build_number:16;
+ u64 service_version:8; /* Service Pack, etc. */
+ u64 minor_version:8;
+ u64 major_version:8;
+ u64 os_id:8; /* enum hv_guest_os_microsoft_ids (if Vendor=MS) */
+ u64 vendor_id:16; /* enum hv_guest_os_vendor */
};
};
@@ -894,11 +894,11 @@ union hv_x64_msr_guest_os_id_contents {
#define HV_X64_MSR_HYPERCALL 0x40000001
union hv_x64_msr_hypercall_contents {
- u64 AsUINT64;
+ u64 as_uint64;
struct {
- u64 Enable:1;
- u64 Reserved:11;
- u64 GuestPhysicalAddress:52;
+ u64 enable:1;
+ u64 reserved:11;
+ u64 guest_physical_address:52;
};
};
diff --git a/drivers/staging/hv/hv_utils.c b/drivers/staging/hv/hv_utils.c
index a99e900ec4c9..0074581f20e8 100644
--- a/drivers/staging/hv/hv_utils.c
+++ b/drivers/staging/hv/hv_utils.c
@@ -38,12 +38,14 @@
#include "vmbus_api.h"
#include "utils.h"
+static u8 *shut_txf_buf;
+static u8 *time_txf_buf;
+static u8 *hbeat_txf_buf;
static void shutdown_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
- u8 *buf;
- u32 buflen, recvlen;
+ u32 recvlen;
u64 requestid;
u8 execute_shutdown = false;
@@ -52,24 +54,23 @@ static void shutdown_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
struct icmsg_negotiate *negop = NULL;
- buflen = PAGE_SIZE;
- buf = kmalloc(buflen, GFP_ATOMIC);
-
- vmbus_recvpacket(channel, buf, buflen, &recvlen, &requestid);
+ vmbus_recvpacket(channel, shut_txf_buf,
+ PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
DPRINT_DBG(VMBUS, "shutdown packet: len=%d, requestid=%lld",
recvlen, requestid);
- icmsghdrp = (struct icmsg_hdr *)&buf[
+ icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- prep_negotiate_resp(icmsghdrp, negop, buf);
+ prep_negotiate_resp(icmsghdrp, negop, shut_txf_buf);
} else {
- shutdown_msg = (struct shutdown_msg_data *)&buf[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
+ shutdown_msg =
+ (struct shutdown_msg_data *)&shut_txf_buf[
+ sizeof(struct vmbuspipe_hdr) +
+ sizeof(struct icmsg_hdr)];
switch (shutdown_msg->flags) {
case 0:
@@ -93,13 +94,11 @@ static void shutdown_onchannelcallback(void *context)
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
- vmbus_sendpacket(channel, buf,
+ vmbus_sendpacket(channel, shut_txf_buf,
recvlen, requestid,
VmbusPacketTypeDataInBand, 0);
}
- kfree(buf);
-
if (execute_shutdown == true)
orderly_poweroff(false);
}
@@ -150,28 +149,25 @@ static inline void adj_guesttime(u64 hosttime, u8 flags)
static void timesync_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
- u8 *buf;
- u32 buflen, recvlen;
+ u32 recvlen;
u64 requestid;
struct icmsg_hdr *icmsghdrp;
struct ictimesync_data *timedatap;
- buflen = PAGE_SIZE;
- buf = kmalloc(buflen, GFP_ATOMIC);
-
- vmbus_recvpacket(channel, buf, buflen, &recvlen, &requestid);
+ vmbus_recvpacket(channel, time_txf_buf,
+ PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
DPRINT_DBG(VMBUS, "timesync packet: recvlen=%d, requestid=%lld",
recvlen, requestid);
- icmsghdrp = (struct icmsg_hdr *)&buf[
+ icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- prep_negotiate_resp(icmsghdrp, NULL, buf);
+ prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf);
} else {
- timedatap = (struct ictimesync_data *)&buf[
+ timedatap = (struct ictimesync_data *)&time_txf_buf[
sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
adj_guesttime(timedatap->parenttime, timedatap->flags);
@@ -180,12 +176,10 @@ static void timesync_onchannelcallback(void *context)
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
- vmbus_sendpacket(channel, buf,
+ vmbus_sendpacket(channel, time_txf_buf,
recvlen, requestid,
VmbusPacketTypeDataInBand, 0);
}
-
- kfree(buf);
}
/*
@@ -196,30 +190,28 @@ static void timesync_onchannelcallback(void *context)
static void heartbeat_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
- u8 *buf;
- u32 buflen, recvlen;
+ u32 recvlen;
u64 requestid;
struct icmsg_hdr *icmsghdrp;
struct heartbeat_msg_data *heartbeat_msg;
- buflen = PAGE_SIZE;
- buf = kmalloc(buflen, GFP_ATOMIC);
-
- vmbus_recvpacket(channel, buf, buflen, &recvlen, &requestid);
+ vmbus_recvpacket(channel, hbeat_txf_buf,
+ PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
DPRINT_DBG(VMBUS, "heartbeat packet: len=%d, requestid=%lld",
recvlen, requestid);
- icmsghdrp = (struct icmsg_hdr *)&buf[
+ icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- prep_negotiate_resp(icmsghdrp, NULL, buf);
+ prep_negotiate_resp(icmsghdrp, NULL, hbeat_txf_buf);
} else {
- heartbeat_msg = (struct heartbeat_msg_data *)&buf[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
+ heartbeat_msg =
+ (struct heartbeat_msg_data *)&hbeat_txf_buf[
+ sizeof(struct vmbuspipe_hdr) +
+ sizeof(struct icmsg_hdr)];
DPRINT_DBG(VMBUS, "heartbeat seq = %lld",
heartbeat_msg->seq_num);
@@ -230,12 +222,10 @@ static void heartbeat_onchannelcallback(void *context)
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
- vmbus_sendpacket(channel, buf,
+ vmbus_sendpacket(channel, hbeat_txf_buf,
recvlen, requestid,
VmbusPacketTypeDataInBand, 0);
}
-
- kfree(buf);
}
static const struct pci_device_id __initconst
@@ -268,15 +258,28 @@ static int __init init_hyperv_utils(void)
if (!dmi_check_system(hv_utils_dmi_table))
return -ENODEV;
- hv_cb_utils[HV_SHUTDOWN_MSG].channel->OnChannelCallback =
+ shut_txf_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ time_txf_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ hbeat_txf_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+
+ if (!shut_txf_buf || !time_txf_buf || !hbeat_txf_buf) {
+ printk(KERN_INFO
+ "Unable to allocate memory for receive buffer\n");
+ kfree(shut_txf_buf);
+ kfree(time_txf_buf);
+ kfree(hbeat_txf_buf);
+ return -ENOMEM;
+ }
+
+ hv_cb_utils[HV_SHUTDOWN_MSG].channel->onchannel_callback =
&shutdown_onchannelcallback;
hv_cb_utils[HV_SHUTDOWN_MSG].callback = &shutdown_onchannelcallback;
- hv_cb_utils[HV_TIMESYNC_MSG].channel->OnChannelCallback =
+ hv_cb_utils[HV_TIMESYNC_MSG].channel->onchannel_callback =
&timesync_onchannelcallback;
hv_cb_utils[HV_TIMESYNC_MSG].callback = &timesync_onchannelcallback;
- hv_cb_utils[HV_HEARTBEAT_MSG].channel->OnChannelCallback =
+ hv_cb_utils[HV_HEARTBEAT_MSG].channel->onchannel_callback =
&heartbeat_onchannelcallback;
hv_cb_utils[HV_HEARTBEAT_MSG].callback = &heartbeat_onchannelcallback;
@@ -287,17 +290,21 @@ static void exit_hyperv_utils(void)
{
printk(KERN_INFO "De-Registered HyperV Utility Driver\n");
- hv_cb_utils[HV_SHUTDOWN_MSG].channel->OnChannelCallback =
+ hv_cb_utils[HV_SHUTDOWN_MSG].channel->onchannel_callback =
&chn_cb_negotiate;
hv_cb_utils[HV_SHUTDOWN_MSG].callback = &chn_cb_negotiate;
- hv_cb_utils[HV_TIMESYNC_MSG].channel->OnChannelCallback =
+ hv_cb_utils[HV_TIMESYNC_MSG].channel->onchannel_callback =
&chn_cb_negotiate;
hv_cb_utils[HV_TIMESYNC_MSG].callback = &chn_cb_negotiate;
- hv_cb_utils[HV_HEARTBEAT_MSG].channel->OnChannelCallback =
+ hv_cb_utils[HV_HEARTBEAT_MSG].channel->onchannel_callback =
&chn_cb_negotiate;
hv_cb_utils[HV_HEARTBEAT_MSG].callback = &chn_cb_negotiate;
+
+ kfree(shut_txf_buf);
+ kfree(time_txf_buf);
+ kfree(hbeat_txf_buf);
}
module_init(init_hyperv_utils);
diff --git a/drivers/staging/hv/netvsc.c b/drivers/staging/hv/netvsc.c
index 4c2632cb19e9..0edbe7483a4c 100644
--- a/drivers/staging/hv/netvsc.c
+++ b/drivers/staging/hv/netvsc.c
@@ -31,147 +31,149 @@
/* Globals */
-static const char *gDriverName = "netvsc";
+static const char *driver_name = "netvsc";
/* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
-static const struct hv_guid gNetVscDeviceType = {
+static const struct hv_guid netvsc_device_type = {
.data = {
0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E
}
};
-static int NetVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo);
+static int netvsc_device_add(struct hv_device *device, void *additional_info);
-static int NetVscOnDeviceRemove(struct hv_device *Device);
+static int netvsc_device_remove(struct hv_device *device);
-static void NetVscOnCleanup(struct hv_driver *Driver);
+static void netvsc_cleanup(struct hv_driver *driver);
-static void NetVscOnChannelCallback(void *context);
+static void netvsc_channel_cb(void *context);
-static int NetVscInitializeSendBufferWithNetVsp(struct hv_device *Device);
+static int netvsc_init_send_buf(struct hv_device *device);
-static int NetVscInitializeReceiveBufferWithNetVsp(struct hv_device *Device);
+static int netvsc_init_recv_buf(struct hv_device *device);
-static int NetVscDestroySendBuffer(struct netvsc_device *NetDevice);
+static int netvsc_destroy_send_buf(struct netvsc_device *net_device);
-static int NetVscDestroyReceiveBuffer(struct netvsc_device *NetDevice);
+static int netvsc_destroy_recv_buf(struct netvsc_device *net_device);
-static int NetVscConnectToVsp(struct hv_device *Device);
+static int netvsc_connect_vsp(struct hv_device *device);
-static void NetVscOnSendCompletion(struct hv_device *Device,
- struct vmpacket_descriptor *Packet);
+static void netvsc_send_completion(struct hv_device *device,
+ struct vmpacket_descriptor *packet);
-static int NetVscOnSend(struct hv_device *Device,
- struct hv_netvsc_packet *Packet);
+static int netvsc_send(struct hv_device *device,
+ struct hv_netvsc_packet *packet);
-static void NetVscOnReceive(struct hv_device *Device,
- struct vmpacket_descriptor *Packet);
+static void netvsc_receive(struct hv_device *device,
+ struct vmpacket_descriptor *packet);
-static void NetVscOnReceiveCompletion(void *Context);
+static void netvsc_receive_completion(void *context);
-static void NetVscSendReceiveCompletion(struct hv_device *Device,
- u64 TransactionId);
+static void netvsc_send_recv_completion(struct hv_device *device,
+ u64 transaction_id);
-static struct netvsc_device *AllocNetDevice(struct hv_device *Device)
+static struct netvsc_device *alloc_net_device(struct hv_device *device)
{
- struct netvsc_device *netDevice;
+ struct netvsc_device *net_device;
- netDevice = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
- if (!netDevice)
+ net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
+ if (!net_device)
return NULL;
/* Set to 2 to allow both inbound and outbound traffic */
- atomic_cmpxchg(&netDevice->RefCount, 0, 2);
+ atomic_cmpxchg(&net_device->refcnt, 0, 2);
- netDevice->Device = Device;
- Device->Extension = netDevice;
+ net_device->dev = device;
+ device->Extension = net_device;
- return netDevice;
+ return net_device;
}
-static void FreeNetDevice(struct netvsc_device *Device)
+static void free_net_device(struct netvsc_device *device)
{
- WARN_ON(atomic_read(&Device->RefCount) == 0);
- Device->Device->Extension = NULL;
- kfree(Device);
+ WARN_ON(atomic_read(&device->refcnt) == 0);
+ device->dev->Extension = NULL;
+ kfree(device);
}
/* Get the net device object iff exists and its refcount > 1 */
-static struct netvsc_device *GetOutboundNetDevice(struct hv_device *Device)
+static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{
- struct netvsc_device *netDevice;
+ struct netvsc_device *net_device;
- netDevice = Device->Extension;
- if (netDevice && atomic_read(&netDevice->RefCount) > 1)
- atomic_inc(&netDevice->RefCount);
+ net_device = device->Extension;
+ if (net_device && atomic_read(&net_device->refcnt) > 1)
+ atomic_inc(&net_device->refcnt);
else
- netDevice = NULL;
+ net_device = NULL;
- return netDevice;
+ return net_device;
}
/* Get the net device object iff exists and its refcount > 0 */
-static struct netvsc_device *GetInboundNetDevice(struct hv_device *Device)
+static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
{
- struct netvsc_device *netDevice;
+ struct netvsc_device *net_device;
- netDevice = Device->Extension;
- if (netDevice && atomic_read(&netDevice->RefCount))
- atomic_inc(&netDevice->RefCount);
+ net_device = device->Extension;
+ if (net_device && atomic_read(&net_device->refcnt))
+ atomic_inc(&net_device->refcnt);
else
- netDevice = NULL;
+ net_device = NULL;
- return netDevice;
+ return net_device;
}
-static void PutNetDevice(struct hv_device *Device)
+static void put_net_device(struct hv_device *device)
{
- struct netvsc_device *netDevice;
+ struct netvsc_device *net_device;
- netDevice = Device->Extension;
+ net_device = device->Extension;
/* ASSERT(netDevice); */
- atomic_dec(&netDevice->RefCount);
+ atomic_dec(&net_device->refcnt);
}
-static struct netvsc_device *ReleaseOutboundNetDevice(struct hv_device *Device)
+static struct netvsc_device *release_outbound_net_device(
+ struct hv_device *device)
{
- struct netvsc_device *netDevice;
+ struct netvsc_device *net_device;
- netDevice = Device->Extension;
- if (netDevice == NULL)
+ net_device = device->Extension;
+ if (net_device == NULL)
return NULL;
/* Busy wait until the ref drop to 2, then set it to 1 */
- while (atomic_cmpxchg(&netDevice->RefCount, 2, 1) != 2)
+ while (atomic_cmpxchg(&net_device->refcnt, 2, 1) != 2)
udelay(100);
- return netDevice;
+ return net_device;
}
-static struct netvsc_device *ReleaseInboundNetDevice(struct hv_device *Device)
+static struct netvsc_device *release_inbound_net_device(
+ struct hv_device *device)
{
- struct netvsc_device *netDevice;
+ struct netvsc_device *net_device;
- netDevice = Device->Extension;
- if (netDevice == NULL)
+ net_device = device->Extension;
+ if (net_device == NULL)
return NULL;
/* Busy wait until the ref drop to 1, then set it to 0 */
- while (atomic_cmpxchg(&netDevice->RefCount, 1, 0) != 1)
+ while (atomic_cmpxchg(&net_device->refcnt, 1, 0) != 1)
udelay(100);
- Device->Extension = NULL;
- return netDevice;
+ device->Extension = NULL;
+ return net_device;
}
/*
- * NetVscInitialize - Main entry point
+ * netvsc_initialize - Main entry point
*/
-int NetVscInitialize(struct hv_driver *drv)
+int netvsc_initialize(struct hv_driver *drv)
{
struct netvsc_driver *driver = (struct netvsc_driver *)drv;
@@ -185,8 +187,8 @@ int NetVscInitialize(struct hv_driver *drv)
/* Make sure we are at least 2 pages since 1 page is used for control */
/* ASSERT(driver->RingBufferSize >= (PAGE_SIZE << 1)); */
- drv->name = gDriverName;
- memcpy(&drv->deviceType, &gNetVscDeviceType, sizeof(struct hv_guid));
+ drv->name = driver_name;
+ memcpy(&drv->deviceType, &netvsc_device_type, sizeof(struct hv_guid));
/* Make sure it is set by the caller */
/* FIXME: These probably should still be tested in some way */
@@ -194,24 +196,24 @@ int NetVscInitialize(struct hv_driver *drv)
/* ASSERT(driver->OnLinkStatusChanged); */
/* Setup the dispatch table */
- driver->Base.OnDeviceAdd = NetVscOnDeviceAdd;
- driver->Base.OnDeviceRemove = NetVscOnDeviceRemove;
- driver->Base.OnCleanup = NetVscOnCleanup;
+ driver->base.OnDeviceAdd = netvsc_device_add;
+ driver->base.OnDeviceRemove = netvsc_device_remove;
+ driver->base.OnCleanup = netvsc_cleanup;
- driver->OnSend = NetVscOnSend;
+ driver->send = netvsc_send;
- RndisFilterInit(driver);
+ rndis_filter_init(driver);
return 0;
}
-static int NetVscInitializeReceiveBufferWithNetVsp(struct hv_device *Device)
+static int netvsc_init_recv_buf(struct hv_device *device)
{
int ret = 0;
- struct netvsc_device *netDevice;
- struct nvsp_message *initPacket;
+ struct netvsc_device *net_device;
+ struct nvsp_message *init_packet;
- netDevice = GetOutboundNetDevice(Device);
- if (!netDevice) {
+ net_device = get_outbound_net_device(device);
+ if (!net_device) {
DPRINT_ERR(NETVSC, "unable to get net device..."
"device being destroyed?");
return -1;
@@ -220,12 +222,12 @@ static int NetVscInitializeReceiveBufferWithNetVsp(struct hv_device *Device)
/* page-size grandularity */
/* ASSERT((netDevice->ReceiveBufferSize & (PAGE_SIZE - 1)) == 0); */
- netDevice->ReceiveBuffer =
- osd_PageAlloc(netDevice->ReceiveBufferSize >> PAGE_SHIFT);
- if (!netDevice->ReceiveBuffer) {
+ net_device->recv_buf =
+ osd_page_alloc(net_device->recv_buf_size >> PAGE_SHIFT);
+ if (!net_device->recv_buf) {
DPRINT_ERR(NETVSC,
"unable to allocate receive buffer of size %d",
- netDevice->ReceiveBufferSize);
+ net_device->recv_buf_size);
ret = -1;
goto Cleanup;
}
@@ -240,32 +242,34 @@ static int NetVscInitializeReceiveBufferWithNetVsp(struct hv_device *Device)
* channel. Note: This call uses the vmbus connection rather
* than the channel to establish the gpadl handle.
*/
- ret = vmbus_establish_gpadl(Device->channel, netDevice->ReceiveBuffer,
- netDevice->ReceiveBufferSize,
- &netDevice->ReceiveBufferGpadlHandle);
+ ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
+ net_device->recv_buf_size,
+ &net_device->recv_buf_gpadl_handle);
if (ret != 0) {
DPRINT_ERR(NETVSC,
"unable to establish receive buffer's gpadl");
goto Cleanup;
}
- /* osd_WaitEventWait(ext->ChannelInitEvent); */
+ /* osd_waitevent_wait(ext->ChannelInitEvent); */
/* Notify the NetVsp of the gpadl handle */
DPRINT_INFO(NETVSC, "Sending NvspMessage1TypeSendReceiveBuffer...");
- initPacket = &netDevice->ChannelInitPacket;
+ init_packet = &net_device->channel_init_pkt;
- memset(initPacket, 0, sizeof(struct nvsp_message));
+ memset(init_packet, 0, sizeof(struct nvsp_message));
- initPacket->Header.MessageType = NvspMessage1TypeSendReceiveBuffer;
- initPacket->Messages.Version1Messages.SendReceiveBuffer.GpadlHandle = netDevice->ReceiveBufferGpadlHandle;
- initPacket->Messages.Version1Messages.SendReceiveBuffer.Id = NETVSC_RECEIVE_BUFFER_ID;
+ init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
+ init_packet->msg.v1_msg.send_recv_buf.
+ gpadl_handle = net_device->recv_buf_gpadl_handle;
+ init_packet->msg.v1_msg.
+ send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
/* Send the gpadl notification request */
- ret = vmbus_sendpacket(Device->channel, initPacket,
+ ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
- (unsigned long)initPacket,
+ (unsigned long)init_packet,
VmbusPacketTypeDataInBand,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0) {
@@ -274,13 +278,15 @@ static int NetVscInitializeReceiveBufferWithNetVsp(struct hv_device *Device)
goto Cleanup;
}
- osd_WaitEventWait(netDevice->ChannelInitEvent);
+ osd_waitevent_wait(net_device->channel_init_event);
/* Check the response */
- if (initPacket->Messages.Version1Messages.SendReceiveBufferComplete.Status != NvspStatusSuccess) {
+ if (init_packet->msg.v1_msg.
+ send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
DPRINT_ERR(NETVSC, "Unable to complete receive buffer "
"initialzation with NetVsp - status %d",
- initPacket->Messages.Version1Messages.SendReceiveBufferComplete.Status);
+ init_packet->msg.v1_msg.
+ send_recv_buf_complete.status);
ret = -1;
goto Cleanup;
}
@@ -289,32 +295,36 @@ static int NetVscInitializeReceiveBufferWithNetVsp(struct hv_device *Device)
/* ASSERT(netDevice->ReceiveSectionCount == 0); */
/* ASSERT(netDevice->ReceiveSections == NULL); */
- netDevice->ReceiveSectionCount = initPacket->Messages.Version1Messages.SendReceiveBufferComplete.NumSections;
+ net_device->recv_section_cnt = init_packet->msg.
+ v1_msg.send_recv_buf_complete.num_sections;
- netDevice->ReceiveSections = kmalloc(netDevice->ReceiveSectionCount * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
- if (netDevice->ReceiveSections == NULL) {
+ net_device->recv_section = kmalloc(net_device->recv_section_cnt
+ * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
+ if (net_device->recv_section == NULL) {
ret = -1;
goto Cleanup;
}
- memcpy(netDevice->ReceiveSections,
- initPacket->Messages.Version1Messages.SendReceiveBufferComplete.Sections,
- netDevice->ReceiveSectionCount * sizeof(struct nvsp_1_receive_buffer_section));
+ memcpy(net_device->recv_section,
+ init_packet->msg.v1_msg.
+ send_recv_buf_complete.sections,
+ net_device->recv_section_cnt *
+ sizeof(struct nvsp_1_receive_buffer_section));
DPRINT_INFO(NETVSC, "Receive sections info (count %d, offset %d, "
"endoffset %d, suballoc size %d, num suballocs %d)",
- netDevice->ReceiveSectionCount,
- netDevice->ReceiveSections[0].Offset,
- netDevice->ReceiveSections[0].EndOffset,
- netDevice->ReceiveSections[0].SubAllocationSize,
- netDevice->ReceiveSections[0].NumSubAllocations);
+ net_device->recv_section_cnt,
+ net_device->recv_section[0].offset,
+ net_device->recv_section[0].end_offset,
+ net_device->recv_section[0].sub_alloc_size,
+ net_device->recv_section[0].num_sub_allocs);
/*
* For 1st release, there should only be 1 section that represents the
* entire receive buffer
*/
- if (netDevice->ReceiveSectionCount != 1 ||
- netDevice->ReceiveSections->Offset != 0) {
+ if (net_device->recv_section_cnt != 1 ||
+ net_device->recv_section->offset != 0) {
ret = -1;
goto Cleanup;
}
@@ -322,26 +332,26 @@ static int NetVscInitializeReceiveBufferWithNetVsp(struct hv_device *Device)
goto Exit;
Cleanup:
- NetVscDestroyReceiveBuffer(netDevice);
+ netvsc_destroy_recv_buf(net_device);
Exit:
- PutNetDevice(Device);
+ put_net_device(device);
return ret;
}
-static int NetVscInitializeSendBufferWithNetVsp(struct hv_device *Device)
+static int netvsc_init_send_buf(struct hv_device *device)
{
int ret = 0;
- struct netvsc_device *netDevice;
- struct nvsp_message *initPacket;
+ struct netvsc_device *net_device;
+ struct nvsp_message *init_packet;
- netDevice = GetOutboundNetDevice(Device);
- if (!netDevice) {
+ net_device = get_outbound_net_device(device);
+ if (!net_device) {
DPRINT_ERR(NETVSC, "unable to get net device..."
"device being destroyed?");
return -1;
}
- if (netDevice->SendBufferSize <= 0) {
+ if (net_device->send_buf_size <= 0) {
ret = -EINVAL;
goto Cleanup;
}
@@ -349,11 +359,11 @@ static int NetVscInitializeSendBufferWithNetVsp(struct hv_device *Device)
/* page-size grandularity */
/* ASSERT((netDevice->SendBufferSize & (PAGE_SIZE - 1)) == 0); */
- netDevice->SendBuffer =
- osd_PageAlloc(netDevice->SendBufferSize >> PAGE_SHIFT);
- if (!netDevice->SendBuffer) {
+ net_device->send_buf =
+ osd_page_alloc(net_device->send_buf_size >> PAGE_SHIFT);
+ if (!net_device->send_buf) {
DPRINT_ERR(NETVSC, "unable to allocate send buffer of size %d",
- netDevice->SendBufferSize);
+ net_device->send_buf_size);
ret = -1;
goto Cleanup;
}
@@ -367,31 +377,33 @@ static int NetVscInitializeSendBufferWithNetVsp(struct hv_device *Device)
* channel. Note: This call uses the vmbus connection rather
* than the channel to establish the gpadl handle.
*/
- ret = vmbus_establish_gpadl(Device->channel, netDevice->SendBuffer,
- netDevice->SendBufferSize,
- &netDevice->SendBufferGpadlHandle);
+ ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
+ net_device->send_buf_size,
+ &net_device->send_buf_gpadl_handle);
if (ret != 0) {
DPRINT_ERR(NETVSC, "unable to establish send buffer's gpadl");
goto Cleanup;
}
- /* osd_WaitEventWait(ext->ChannelInitEvent); */
+ /* osd_waitevent_wait(ext->ChannelInitEvent); */
/* Notify the NetVsp of the gpadl handle */
DPRINT_INFO(NETVSC, "Sending NvspMessage1TypeSendSendBuffer...");
- initPacket = &netDevice->ChannelInitPacket;
+ init_packet = &net_device->channel_init_pkt;
- memset(initPacket, 0, sizeof(struct nvsp_message));
+ memset(init_packet, 0, sizeof(struct nvsp_message));
- initPacket->Header.MessageType = NvspMessage1TypeSendSendBuffer;
- initPacket->Messages.Version1Messages.SendReceiveBuffer.GpadlHandle = netDevice->SendBufferGpadlHandle;
- initPacket->Messages.Version1Messages.SendReceiveBuffer.Id = NETVSC_SEND_BUFFER_ID;
+ init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
+ init_packet->msg.v1_msg.send_recv_buf.
+ gpadl_handle = net_device->send_buf_gpadl_handle;
+ init_packet->msg.v1_msg.send_recv_buf.id =
+ NETVSC_SEND_BUFFER_ID;
/* Send the gpadl notification request */
- ret = vmbus_sendpacket(Device->channel, initPacket,
+ ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
- (unsigned long)initPacket,
+ (unsigned long)init_packet,
VmbusPacketTypeDataInBand,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0) {
@@ -400,32 +412,35 @@ static int NetVscInitializeSendBufferWithNetVsp(struct hv_device *Device)
goto Cleanup;
}
- osd_WaitEventWait(netDevice->ChannelInitEvent);
+ osd_waitevent_wait(net_device->channel_init_event);
/* Check the response */
- if (initPacket->Messages.Version1Messages.SendSendBufferComplete.Status != NvspStatusSuccess) {
+ if (init_packet->msg.v1_msg.
+ send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
DPRINT_ERR(NETVSC, "Unable to complete send buffer "
"initialzation with NetVsp - status %d",
- initPacket->Messages.Version1Messages.SendSendBufferComplete.Status);
+ init_packet->msg.v1_msg.
+ send_send_buf_complete.status);
ret = -1;
goto Cleanup;
}
- netDevice->SendSectionSize = initPacket->Messages.Version1Messages.SendSendBufferComplete.SectionSize;
+ net_device->send_section_size = init_packet->
+ msg.v1_msg.send_send_buf_complete.section_size;
goto Exit;
Cleanup:
- NetVscDestroySendBuffer(netDevice);
+ netvsc_destroy_send_buf(net_device);
Exit:
- PutNetDevice(Device);
+ put_net_device(device);
return ret;
}
-static int NetVscDestroyReceiveBuffer(struct netvsc_device *NetDevice)
+static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
{
- struct nvsp_message *revokePacket;
+ struct nvsp_message *revoke_packet;
int ret = 0;
/*
@@ -434,20 +449,23 @@ static int NetVscDestroyReceiveBuffer(struct netvsc_device *NetDevice)
* NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
* to send a revoke msg here
*/
- if (NetDevice->ReceiveSectionCount) {
+ if (net_device->recv_section_cnt) {
DPRINT_INFO(NETVSC,
"Sending NvspMessage1TypeRevokeReceiveBuffer...");
/* Send the revoke receive buffer */
- revokePacket = &NetDevice->RevokePacket;
- memset(revokePacket, 0, sizeof(struct nvsp_message));
+ revoke_packet = &net_device->revoke_packet;
+ memset(revoke_packet, 0, sizeof(struct nvsp_message));
- revokePacket->Header.MessageType = NvspMessage1TypeRevokeReceiveBuffer;
- revokePacket->Messages.Version1Messages.RevokeReceiveBuffer.Id = NETVSC_RECEIVE_BUFFER_ID;
+ revoke_packet->hdr.msg_type =
+ NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
+ revoke_packet->msg.v1_msg.
+ revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
- ret = vmbus_sendpacket(NetDevice->Device->channel, revokePacket,
+ ret = vmbus_sendpacket(net_device->dev->channel,
+ revoke_packet,
sizeof(struct nvsp_message),
- (unsigned long)revokePacket,
+ (unsigned long)revoke_packet,
VmbusPacketTypeDataInBand, 0);
/*
* If we failed here, we might as well return and
@@ -461,11 +479,11 @@ static int NetVscDestroyReceiveBuffer(struct netvsc_device *NetDevice)
}
/* Teardown the gpadl on the vsp end */
- if (NetDevice->ReceiveBufferGpadlHandle) {
+ if (net_device->recv_buf_gpadl_handle) {
DPRINT_INFO(NETVSC, "Tearing down receive buffer's GPADL...");
- ret = vmbus_teardown_gpadl(NetDevice->Device->channel,
- NetDevice->ReceiveBufferGpadlHandle);
+ ret = vmbus_teardown_gpadl(net_device->dev->channel,
+ net_device->recv_buf_gpadl_handle);
/* If we failed here, we might as well return and have a leak rather than continue and a bugchk */
if (ret != 0) {
@@ -473,30 +491,30 @@ static int NetVscDestroyReceiveBuffer(struct netvsc_device *NetDevice)
"unable to teardown receive buffer's gpadl");
return -1;
}
- NetDevice->ReceiveBufferGpadlHandle = 0;
+ net_device->recv_buf_gpadl_handle = 0;
}
- if (NetDevice->ReceiveBuffer) {
+ if (net_device->recv_buf) {
DPRINT_INFO(NETVSC, "Freeing up receive buffer...");
/* Free up the receive buffer */
- osd_PageFree(NetDevice->ReceiveBuffer,
- NetDevice->ReceiveBufferSize >> PAGE_SHIFT);
- NetDevice->ReceiveBuffer = NULL;
+ osd_page_free(net_device->recv_buf,
+ net_device->recv_buf_size >> PAGE_SHIFT);
+ net_device->recv_buf = NULL;
}
- if (NetDevice->ReceiveSections) {
- NetDevice->ReceiveSectionCount = 0;
- kfree(NetDevice->ReceiveSections);
- NetDevice->ReceiveSections = NULL;
+ if (net_device->recv_section) {
+ net_device->recv_section_cnt = 0;
+ kfree(net_device->recv_section);
+ net_device->recv_section = NULL;
}
return ret;
}
-static int NetVscDestroySendBuffer(struct netvsc_device *NetDevice)
+static int netvsc_destroy_send_buf(struct netvsc_device *net_device)
{
- struct nvsp_message *revokePacket;
+ struct nvsp_message *revoke_packet;
int ret = 0;
/*
@@ -505,20 +523,23 @@ static int NetVscDestroySendBuffer(struct netvsc_device *NetDevice)
* NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
* to send a revoke msg here
*/
- if (NetDevice->SendSectionSize) {
+ if (net_device->send_section_size) {
DPRINT_INFO(NETVSC,
"Sending NvspMessage1TypeRevokeSendBuffer...");
/* Send the revoke send buffer */
- revokePacket = &NetDevice->RevokePacket;
- memset(revokePacket, 0, sizeof(struct nvsp_message));
+ revoke_packet = &net_device->revoke_packet;
+ memset(revoke_packet, 0, sizeof(struct nvsp_message));
- revokePacket->Header.MessageType = NvspMessage1TypeRevokeSendBuffer;
- revokePacket->Messages.Version1Messages.RevokeSendBuffer.Id = NETVSC_SEND_BUFFER_ID;
+ revoke_packet->hdr.msg_type =
+ NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
+ revoke_packet->msg.v1_msg.
+ revoke_send_buf.id = NETVSC_SEND_BUFFER_ID;
- ret = vmbus_sendpacket(NetDevice->Device->channel, revokePacket,
+ ret = vmbus_sendpacket(net_device->dev->channel,
+ revoke_packet,
sizeof(struct nvsp_message),
- (unsigned long)revokePacket,
+ (unsigned long)revoke_packet,
VmbusPacketTypeDataInBand, 0);
/*
* If we failed here, we might as well return and have a leak
@@ -532,10 +553,10 @@ static int NetVscDestroySendBuffer(struct netvsc_device *NetDevice)
}
/* Teardown the gpadl on the vsp end */
- if (NetDevice->SendBufferGpadlHandle) {
+ if (net_device->send_buf_gpadl_handle) {
DPRINT_INFO(NETVSC, "Tearing down send buffer's GPADL...");
- ret = vmbus_teardown_gpadl(NetDevice->Device->channel,
- NetDevice->SendBufferGpadlHandle);
+ ret = vmbus_teardown_gpadl(net_device->dev->channel,
+ net_device->send_buf_gpadl_handle);
/*
* If we failed here, we might as well return and have a leak
@@ -546,49 +567,51 @@ static int NetVscDestroySendBuffer(struct netvsc_device *NetDevice)
"gpadl");
return -1;
}
- NetDevice->SendBufferGpadlHandle = 0;
+ net_device->send_buf_gpadl_handle = 0;
}
- if (NetDevice->SendBuffer) {
+ if (net_device->send_buf) {
DPRINT_INFO(NETVSC, "Freeing up send buffer...");
/* Free up the receive buffer */
- osd_PageFree(NetDevice->SendBuffer,
- NetDevice->SendBufferSize >> PAGE_SHIFT);
- NetDevice->SendBuffer = NULL;
+ osd_page_free(net_device->send_buf,
+ net_device->send_buf_size >> PAGE_SHIFT);
+ net_device->send_buf = NULL;
}
return ret;
}
-static int NetVscConnectToVsp(struct hv_device *Device)
+static int netvsc_connect_vsp(struct hv_device *device)
{
int ret;
- struct netvsc_device *netDevice;
- struct nvsp_message *initPacket;
- int ndisVersion;
+ struct netvsc_device *net_device;
+ struct nvsp_message *init_packet;
+ int ndis_version;
- netDevice = GetOutboundNetDevice(Device);
- if (!netDevice) {
+ net_device = get_outbound_net_device(device);
+ if (!net_device) {
DPRINT_ERR(NETVSC, "unable to get net device..."
"device being destroyed?");
return -1;
}
- initPacket = &netDevice->ChannelInitPacket;
+ init_packet = &net_device->channel_init_pkt;
- memset(initPacket, 0, sizeof(struct nvsp_message));
- initPacket->Header.MessageType = NvspMessageTypeInit;
- initPacket->Messages.InitMessages.Init.MinProtocolVersion = NVSP_MIN_PROTOCOL_VERSION;
- initPacket->Messages.InitMessages.Init.MaxProtocolVersion = NVSP_MAX_PROTOCOL_VERSION;
+ memset(init_packet, 0, sizeof(struct nvsp_message));
+ init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
+ init_packet->msg.init_msg.init.min_protocol_ver =
+ NVSP_MIN_PROTOCOL_VERSION;
+ init_packet->msg.init_msg.init.max_protocol_ver =
+ NVSP_MAX_PROTOCOL_VERSION;
DPRINT_INFO(NETVSC, "Sending NvspMessageTypeInit...");
/* Send the init request */
- ret = vmbus_sendpacket(Device->channel, initPacket,
+ ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
- (unsigned long)initPacket,
+ (unsigned long)init_packet,
VmbusPacketTypeDataInBand,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -597,47 +620,52 @@ static int NetVscConnectToVsp(struct hv_device *Device)
goto Cleanup;
}
- osd_WaitEventWait(netDevice->ChannelInitEvent);
+ osd_waitevent_wait(net_device->channel_init_event);
/* Now, check the response */
/* ASSERT(initPacket->Messages.InitMessages.InitComplete.MaximumMdlChainLength <= MAX_MULTIPAGE_BUFFER_COUNT); */
DPRINT_INFO(NETVSC, "NvspMessageTypeInit status(%d) max mdl chain (%d)",
- initPacket->Messages.InitMessages.InitComplete.Status,
- initPacket->Messages.InitMessages.InitComplete.MaximumMdlChainLength);
+ init_packet->msg.init_msg.init_complete.status,
+ init_packet->msg.init_msg.
+ init_complete.max_mdl_chain_len);
- if (initPacket->Messages.InitMessages.InitComplete.Status !=
- NvspStatusSuccess) {
+ if (init_packet->msg.init_msg.init_complete.status !=
+ NVSP_STAT_SUCCESS) {
DPRINT_ERR(NETVSC,
"unable to initialize with netvsp (status 0x%x)",
- initPacket->Messages.InitMessages.InitComplete.Status);
+ init_packet->msg.init_msg.init_complete.status);
ret = -1;
goto Cleanup;
}
- if (initPacket->Messages.InitMessages.InitComplete.NegotiatedProtocolVersion != NVSP_PROTOCOL_VERSION_1) {
+ if (init_packet->msg.init_msg.init_complete.
+ negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
DPRINT_ERR(NETVSC, "unable to initialize with netvsp "
"(version expected 1 got %d)",
- initPacket->Messages.InitMessages.InitComplete.NegotiatedProtocolVersion);
+ init_packet->msg.init_msg.
+ init_complete.negotiated_protocol_ver);
ret = -1;
goto Cleanup;
}
DPRINT_INFO(NETVSC, "Sending NvspMessage1TypeSendNdisVersion...");
/* Send the ndis version */
- memset(initPacket, 0, sizeof(struct nvsp_message));
+ memset(init_packet, 0, sizeof(struct nvsp_message));
- ndisVersion = 0x00050000;
+ ndis_version = 0x00050000;
- initPacket->Header.MessageType = NvspMessage1TypeSendNdisVersion;
- initPacket->Messages.Version1Messages.SendNdisVersion.NdisMajorVersion =
- (ndisVersion & 0xFFFF0000) >> 16;
- initPacket->Messages.Version1Messages.SendNdisVersion.NdisMinorVersion =
- ndisVersion & 0xFFFF;
+ init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
+ init_packet->msg.v1_msg.
+ send_ndis_ver.ndis_major_ver =
+ (ndis_version & 0xFFFF0000) >> 16;
+ init_packet->msg.v1_msg.
+ send_ndis_ver.ndis_minor_ver =
+ ndis_version & 0xFFFF;
/* Send the init request */
- ret = vmbus_sendpacket(Device->channel, initPacket,
+ ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
- (unsigned long)initPacket,
+ (unsigned long)init_packet,
VmbusPacketTypeDataInBand, 0);
if (ret != 0) {
DPRINT_ERR(NETVSC,
@@ -651,51 +679,52 @@ static int NetVscConnectToVsp(struct hv_device *Device)
* packet) since our Vmbus always set the
* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED flag
*/
- /* osd_WaitEventWait(NetVscChannel->ChannelInitEvent); */
+ /* osd_waitevent_wait(NetVscChannel->ChannelInitEvent); */
/* Post the big receive buffer to NetVSP */
- ret = NetVscInitializeReceiveBufferWithNetVsp(Device);
+ ret = netvsc_init_recv_buf(device);
if (ret == 0)
- ret = NetVscInitializeSendBufferWithNetVsp(Device);
+ ret = netvsc_init_send_buf(device);
Cleanup:
- PutNetDevice(Device);
+ put_net_device(device);
return ret;
}
-static void NetVscDisconnectFromVsp(struct netvsc_device *NetDevice)
+static void NetVscDisconnectFromVsp(struct netvsc_device *net_device)
{
- NetVscDestroyReceiveBuffer(NetDevice);
- NetVscDestroySendBuffer(NetDevice);
+ netvsc_destroy_recv_buf(net_device);
+ netvsc_destroy_send_buf(net_device);
}
/*
- * NetVscOnDeviceAdd - Callback when the device belonging to this driver is added
+ * netvsc_device_add - Callback when the device belonging to this
+ * driver is added
*/
-static int NetVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
+static int netvsc_device_add(struct hv_device *device, void *additional_info)
{
int ret = 0;
int i;
- struct netvsc_device *netDevice;
+ struct netvsc_device *net_device;
struct hv_netvsc_packet *packet, *pos;
- struct netvsc_driver *netDriver =
- (struct netvsc_driver *)Device->Driver;
+ struct netvsc_driver *net_driver =
+ (struct netvsc_driver *)device->Driver;
- netDevice = AllocNetDevice(Device);
- if (!netDevice) {
+ net_device = alloc_net_device(device);
+ if (!net_device) {
ret = -1;
goto Cleanup;
}
- DPRINT_DBG(NETVSC, "netvsc channel object allocated - %p", netDevice);
+ DPRINT_DBG(NETVSC, "netvsc channel object allocated - %p", net_device);
/* Initialize the NetVSC channel extension */
- netDevice->ReceiveBufferSize = NETVSC_RECEIVE_BUFFER_SIZE;
- spin_lock_init(&netDevice->receive_packet_list_lock);
+ net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
+ spin_lock_init(&net_device->recv_pkt_list_lock);
- netDevice->SendBufferSize = NETVSC_SEND_BUFFER_SIZE;
+ net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
- INIT_LIST_HEAD(&netDevice->ReceivePacketList);
+ INIT_LIST_HEAD(&net_device->recv_pkt_list);
for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
@@ -707,19 +736,19 @@ static int NetVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
NETVSC_RECEIVE_PACKETLIST_COUNT, i);
break;
}
- list_add_tail(&packet->ListEntry,
- &netDevice->ReceivePacketList);
+ list_add_tail(&packet->list_ent,
+ &net_device->recv_pkt_list);
}
- netDevice->ChannelInitEvent = osd_WaitEventCreate();
- if (!netDevice->ChannelInitEvent) {
+ net_device->channel_init_event = osd_waitevent_create();
+ if (!net_device->channel_init_event) {
ret = -ENOMEM;
goto Cleanup;
}
/* Open the channel */
- ret = vmbus_open(Device->channel, netDriver->RingBufferSize,
- netDriver->RingBufferSize, NULL, 0,
- NetVscOnChannelCallback, Device);
+ ret = vmbus_open(device->channel, net_driver->ring_buf_size,
+ net_driver->ring_buf_size, NULL, 0,
+ netvsc_channel_cb, device);
if (ret != 0) {
DPRINT_ERR(NETVSC, "unable to open channel: %d", ret);
@@ -731,7 +760,7 @@ static int NetVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
DPRINT_INFO(NETVSC, "*** NetVSC channel opened successfully! ***");
/* Connect with the NetVsp */
- ret = NetVscConnectToVsp(Device);
+ ret = netvsc_connect_vsp(device);
if (ret != 0) {
DPRINT_ERR(NETVSC, "unable to connect to NetVSP - %d", ret);
ret = -1;
@@ -745,174 +774,178 @@ static int NetVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
close:
/* Now, we can close the channel safely */
- vmbus_close(Device->channel);
+ vmbus_close(device->channel);
Cleanup:
- if (netDevice) {
- kfree(netDevice->ChannelInitEvent);
+ if (net_device) {
+ kfree(net_device->channel_init_event);
list_for_each_entry_safe(packet, pos,
- &netDevice->ReceivePacketList,
- ListEntry) {
- list_del(&packet->ListEntry);
+ &net_device->recv_pkt_list,
+ list_ent) {
+ list_del(&packet->list_ent);
kfree(packet);
}
- ReleaseOutboundNetDevice(Device);
- ReleaseInboundNetDevice(Device);
+ release_outbound_net_device(device);
+ release_inbound_net_device(device);
- FreeNetDevice(netDevice);
+ free_net_device(net_device);
}
return ret;
}
/*
- * NetVscOnDeviceRemove - Callback when the root bus device is removed
+ * netvsc_device_remove - Callback when the root bus device is removed
*/
-static int NetVscOnDeviceRemove(struct hv_device *Device)
+static int netvsc_device_remove(struct hv_device *device)
{
- struct netvsc_device *netDevice;
- struct hv_netvsc_packet *netvscPacket, *pos;
+ struct netvsc_device *net_device;
+ struct hv_netvsc_packet *netvsc_packet, *pos;
DPRINT_INFO(NETVSC, "Disabling outbound traffic on net device (%p)...",
- Device->Extension);
+ device->Extension);
/* Stop outbound traffic ie sends and receives completions */
- netDevice = ReleaseOutboundNetDevice(Device);
- if (!netDevice) {
+ net_device = release_outbound_net_device(device);
+ if (!net_device) {
DPRINT_ERR(NETVSC, "No net device present!!");
return -1;
}
/* Wait for all send completions */
- while (atomic_read(&netDevice->NumOutstandingSends)) {
+ while (atomic_read(&net_device->num_outstanding_sends)) {
DPRINT_INFO(NETVSC, "waiting for %d requests to complete...",
- atomic_read(&netDevice->NumOutstandingSends));
+ atomic_read(&net_device->num_outstanding_sends));
udelay(100);
}
DPRINT_INFO(NETVSC, "Disconnecting from netvsp...");
- NetVscDisconnectFromVsp(netDevice);
+ NetVscDisconnectFromVsp(net_device);
DPRINT_INFO(NETVSC, "Disabling inbound traffic on net device (%p)...",
- Device->Extension);
+ device->Extension);
/* Stop inbound traffic ie receives and sends completions */
- netDevice = ReleaseInboundNetDevice(Device);
+ net_device = release_inbound_net_device(device);
/* At this point, no one should be accessing netDevice except in here */
- DPRINT_INFO(NETVSC, "net device (%p) safe to remove", netDevice);
+ DPRINT_INFO(NETVSC, "net device (%p) safe to remove", net_device);
/* Now, we can close the channel safely */
- vmbus_close(Device->channel);
+ vmbus_close(device->channel);
/* Release all resources */
- list_for_each_entry_safe(netvscPacket, pos,
- &netDevice->ReceivePacketList, ListEntry) {
- list_del(&netvscPacket->ListEntry);
- kfree(netvscPacket);
+ list_for_each_entry_safe(netvsc_packet, pos,
+ &net_device->recv_pkt_list, list_ent) {
+ list_del(&netvsc_packet->list_ent);
+ kfree(netvsc_packet);
}
- kfree(netDevice->ChannelInitEvent);
- FreeNetDevice(netDevice);
+ kfree(net_device->channel_init_event);
+ free_net_device(net_device);
return 0;
}
/*
- * NetVscOnCleanup - Perform any cleanup when the driver is removed
+ * netvsc_cleanup - Perform any cleanup when the driver is removed
*/
-static void NetVscOnCleanup(struct hv_driver *drv)
+static void netvsc_cleanup(struct hv_driver *drv)
{
}
-static void NetVscOnSendCompletion(struct hv_device *Device,
- struct vmpacket_descriptor *Packet)
+static void netvsc_send_completion(struct hv_device *device,
+ struct vmpacket_descriptor *packet)
{
- struct netvsc_device *netDevice;
- struct nvsp_message *nvspPacket;
- struct hv_netvsc_packet *nvscPacket;
+ struct netvsc_device *net_device;
+ struct nvsp_message *nvsp_packet;
+ struct hv_netvsc_packet *nvsc_packet;
- netDevice = GetInboundNetDevice(Device);
- if (!netDevice) {
+ net_device = get_inbound_net_device(device);
+ if (!net_device) {
DPRINT_ERR(NETVSC, "unable to get net device..."
"device being destroyed?");
return;
}
- nvspPacket = (struct nvsp_message *)((unsigned long)Packet + (Packet->DataOffset8 << 3));
+ nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
+ (packet->DataOffset8 << 3));
DPRINT_DBG(NETVSC, "send completion packet - type %d",
- nvspPacket->Header.MessageType);
+ nvsp_packet->hdr.msg_type);
- if ((nvspPacket->Header.MessageType == NvspMessageTypeInitComplete) ||
- (nvspPacket->Header.MessageType ==
- NvspMessage1TypeSendReceiveBufferComplete) ||
- (nvspPacket->Header.MessageType ==
- NvspMessage1TypeSendSendBufferComplete)) {
+ if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
+ (nvsp_packet->hdr.msg_type ==
+ NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
+ (nvsp_packet->hdr.msg_type ==
+ NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
/* Copy the response back */
- memcpy(&netDevice->ChannelInitPacket, nvspPacket,
+ memcpy(&net_device->channel_init_pkt, nvsp_packet,
sizeof(struct nvsp_message));
- osd_WaitEventSet(netDevice->ChannelInitEvent);
- } else if (nvspPacket->Header.MessageType ==
- NvspMessage1TypeSendRNDISPacketComplete) {
+ osd_waitevent_set(net_device->channel_init_event);
+ } else if (nvsp_packet->hdr.msg_type ==
+ NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
/* Get the send context */
- nvscPacket = (struct hv_netvsc_packet *)(unsigned long)Packet->TransactionId;
+ nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
+ packet->TransactionId;
/* ASSERT(nvscPacket); */
/* Notify the layer above us */
- nvscPacket->Completion.Send.OnSendCompletion(nvscPacket->Completion.Send.SendCompletionContext);
+ nvsc_packet->completion.send.send_completion(
+ nvsc_packet->completion.send.send_completion_ctx);
- atomic_dec(&netDevice->NumOutstandingSends);
+ atomic_dec(&net_device->num_outstanding_sends);
} else {
DPRINT_ERR(NETVSC, "Unknown send completion packet type - "
- "%d received!!", nvspPacket->Header.MessageType);
+ "%d received!!", nvsp_packet->hdr.msg_type);
}
- PutNetDevice(Device);
+ put_net_device(device);
}
-static int NetVscOnSend(struct hv_device *Device,
- struct hv_netvsc_packet *Packet)
+static int netvsc_send(struct hv_device *device,
+ struct hv_netvsc_packet *packet)
{
- struct netvsc_device *netDevice;
+ struct netvsc_device *net_device;
int ret = 0;
struct nvsp_message sendMessage;
- netDevice = GetOutboundNetDevice(Device);
- if (!netDevice) {
+ net_device = get_outbound_net_device(device);
+ if (!net_device) {
DPRINT_ERR(NETVSC, "net device (%p) shutting down..."
- "ignoring outbound packets", netDevice);
+ "ignoring outbound packets", net_device);
return -2;
}
- sendMessage.Header.MessageType = NvspMessage1TypeSendRNDISPacket;
- if (Packet->IsDataPacket) {
+ sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
+ if (packet->is_data_pkt) {
/* 0 is RMC_DATA; */
- sendMessage.Messages.Version1Messages.SendRNDISPacket.ChannelType = 0;
+ sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
} else {
/* 1 is RMC_CONTROL; */
- sendMessage.Messages.Version1Messages.SendRNDISPacket.ChannelType = 1;
+ sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
}
/* Not using send buffer section */
- sendMessage.Messages.Version1Messages.SendRNDISPacket.SendBufferSectionIndex = 0xFFFFFFFF;
- sendMessage.Messages.Version1Messages.SendRNDISPacket.SendBufferSectionSize = 0;
-
- if (Packet->PageBufferCount) {
- ret = vmbus_sendpacket_pagebuffer(Device->channel,
- Packet->PageBuffers,
- Packet->PageBufferCount,
+ sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
+ 0xFFFFFFFF;
+ sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
+
+ if (packet->page_buf_cnt) {
+ ret = vmbus_sendpacket_pagebuffer(device->channel,
+ packet->page_buf,
+ packet->page_buf_cnt,
&sendMessage,
sizeof(struct nvsp_message),
- (unsigned long)Packet);
+ (unsigned long)packet);
} else {
- ret = vmbus_sendpacket(Device->channel, &sendMessage,
+ ret = vmbus_sendpacket(device->channel, &sendMessage,
sizeof(struct nvsp_message),
- (unsigned long)Packet,
+ (unsigned long)packet,
VmbusPacketTypeDataInBand,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -920,31 +953,31 @@ static int NetVscOnSend(struct hv_device *Device,
if (ret != 0)
DPRINT_ERR(NETVSC, "Unable to send packet %p ret %d",
- Packet, ret);
+ packet, ret);
- atomic_inc(&netDevice->NumOutstandingSends);
- PutNetDevice(Device);
+ atomic_inc(&net_device->num_outstanding_sends);
+ put_net_device(device);
return ret;
}
-static void NetVscOnReceive(struct hv_device *Device,
- struct vmpacket_descriptor *Packet)
+static void netvsc_receive(struct hv_device *device,
+ struct vmpacket_descriptor *packet)
{
- struct netvsc_device *netDevice;
- struct vmtransfer_page_packet_header *vmxferpagePacket;
- struct nvsp_message *nvspPacket;
- struct hv_netvsc_packet *netvscPacket = NULL;
+ struct netvsc_device *net_device;
+ struct vmtransfer_page_packet_header *vmxferpage_packet;
+ struct nvsp_message *nvsp_packet;
+ struct hv_netvsc_packet *netvsc_packet = NULL;
unsigned long start;
- unsigned long end, endVirtual;
+ unsigned long end, end_virtual;
/* struct netvsc_driver *netvscDriver; */
- struct xferpage_packet *xferpagePacket = NULL;
+ struct xferpage_packet *xferpage_packet = NULL;
int i, j;
- int count = 0, bytesRemain = 0;
+ int count = 0, bytes_remain = 0;
unsigned long flags;
LIST_HEAD(listHead);
- netDevice = GetInboundNetDevice(Device);
- if (!netDevice) {
+ net_device = get_inbound_net_device(device);
+ if (!net_device) {
DPRINT_ERR(NETVSC, "unable to get net device..."
"device being destroyed?");
return;
@@ -954,39 +987,40 @@ static void NetVscOnReceive(struct hv_device *Device,
* All inbound packets other than send completion should be xfer page
* packet
*/
- if (Packet->Type != VmbusPacketTypeDataUsingTransferPages) {
+ if (packet->Type != VmbusPacketTypeDataUsingTransferPages) {
DPRINT_ERR(NETVSC, "Unknown packet type received - %d",
- Packet->Type);
- PutNetDevice(Device);
+ packet->Type);
+ put_net_device(device);
return;
}
- nvspPacket = (struct nvsp_message *)((unsigned long)Packet +
- (Packet->DataOffset8 << 3));
+ nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
+ (packet->DataOffset8 << 3));
/* Make sure this is a valid nvsp packet */
- if (nvspPacket->Header.MessageType != NvspMessage1TypeSendRNDISPacket) {
+ if (nvsp_packet->hdr.msg_type !=
+ NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
DPRINT_ERR(NETVSC, "Unknown nvsp packet type received - %d",
- nvspPacket->Header.MessageType);
- PutNetDevice(Device);
+ nvsp_packet->hdr.msg_type);
+ put_net_device(device);
return;
}
DPRINT_DBG(NETVSC, "NVSP packet received - type %d",
- nvspPacket->Header.MessageType);
+ nvsp_packet->hdr.msg_type);
- vmxferpagePacket = (struct vmtransfer_page_packet_header *)Packet;
+ vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
- if (vmxferpagePacket->TransferPageSetId != NETVSC_RECEIVE_BUFFER_ID) {
+ if (vmxferpage_packet->TransferPageSetId != NETVSC_RECEIVE_BUFFER_ID) {
DPRINT_ERR(NETVSC, "Invalid xfer page set id - "
"expecting %x got %x", NETVSC_RECEIVE_BUFFER_ID,
- vmxferpagePacket->TransferPageSetId);
- PutNetDevice(Device);
+ vmxferpage_packet->TransferPageSetId);
+ put_net_device(device);
return;
}
DPRINT_DBG(NETVSC, "xfer page - range count %d",
- vmxferpagePacket->RangeCount);
+ vmxferpage_packet->RangeCount);
/*
* Grab free packets (range count + 1) to represent this xfer
@@ -994,13 +1028,13 @@ static void NetVscOnReceive(struct hv_device *Device,
* We grab it here so that we know exactly how many we can
* fulfil
*/
- spin_lock_irqsave(&netDevice->receive_packet_list_lock, flags);
- while (!list_empty(&netDevice->ReceivePacketList)) {
- list_move_tail(netDevice->ReceivePacketList.next, &listHead);
- if (++count == vmxferpagePacket->RangeCount + 1)
+ spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
+ while (!list_empty(&net_device->recv_pkt_list)) {
+ list_move_tail(net_device->recv_pkt_list.next, &listHead);
+ if (++count == vmxferpage_packet->RangeCount + 1)
break;
}
- spin_unlock_irqrestore(&netDevice->receive_packet_list_lock, flags);
+ spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
/*
* We need at least 2 netvsc pkts (1 to represent the xfer
@@ -1010,140 +1044,149 @@ static void NetVscOnReceive(struct hv_device *Device,
if (count < 2) {
DPRINT_ERR(NETVSC, "Got only %d netvsc pkt...needed %d pkts. "
"Dropping this xfer page packet completely!",
- count, vmxferpagePacket->RangeCount + 1);
+ count, vmxferpage_packet->RangeCount + 1);
/* Return it to the freelist */
- spin_lock_irqsave(&netDevice->receive_packet_list_lock, flags);
+ spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
for (i = count; i != 0; i--) {
list_move_tail(listHead.next,
- &netDevice->ReceivePacketList);
+ &net_device->recv_pkt_list);
}
- spin_unlock_irqrestore(&netDevice->receive_packet_list_lock,
+ spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
flags);
- NetVscSendReceiveCompletion(Device,
- vmxferpagePacket->d.TransactionId);
+ netvsc_send_recv_completion(device,
+ vmxferpage_packet->d.TransactionId);
- PutNetDevice(Device);
+ put_net_device(device);
return;
}
/* Remove the 1st packet to represent the xfer page packet itself */
- xferpagePacket = (struct xferpage_packet *)listHead.next;
- list_del(&xferpagePacket->ListEntry);
+ xferpage_packet = (struct xferpage_packet *)listHead.next;
+ list_del(&xferpage_packet->list_ent);
/* This is how much we can satisfy */
- xferpagePacket->Count = count - 1;
+ xferpage_packet->count = count - 1;
/* ASSERT(xferpagePacket->Count > 0 && xferpagePacket->Count <= */
/* vmxferpagePacket->RangeCount); */
- if (xferpagePacket->Count != vmxferpagePacket->RangeCount) {
+ if (xferpage_packet->count != vmxferpage_packet->RangeCount) {
DPRINT_INFO(NETVSC, "Needed %d netvsc pkts to satisy this xfer "
- "page...got %d", vmxferpagePacket->RangeCount,
- xferpagePacket->Count);
+ "page...got %d", vmxferpage_packet->RangeCount,
+ xferpage_packet->count);
}
/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
for (i = 0; i < (count - 1); i++) {
- netvscPacket = (struct hv_netvsc_packet *)listHead.next;
- list_del(&netvscPacket->ListEntry);
+ netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
+ list_del(&netvsc_packet->list_ent);
/* Initialize the netvsc packet */
- netvscPacket->XferPagePacket = xferpagePacket;
- netvscPacket->Completion.Recv.OnReceiveCompletion =
- NetVscOnReceiveCompletion;
- netvscPacket->Completion.Recv.ReceiveCompletionContext =
- netvscPacket;
- netvscPacket->Device = Device;
+ netvsc_packet->xfer_page_pkt = xferpage_packet;
+ netvsc_packet->completion.recv.recv_completion =
+ netvsc_receive_completion;
+ netvsc_packet->completion.recv.recv_completion_ctx =
+ netvsc_packet;
+ netvsc_packet->device = device;
/* Save this so that we can send it back */
- netvscPacket->Completion.Recv.ReceiveCompletionTid =
- vmxferpagePacket->d.TransactionId;
+ netvsc_packet->completion.recv.recv_completion_tid =
+ vmxferpage_packet->d.TransactionId;
- netvscPacket->TotalDataBufferLength =
- vmxferpagePacket->Ranges[i].ByteCount;
- netvscPacket->PageBufferCount = 1;
+ netvsc_packet->total_data_buflen =
+ vmxferpage_packet->Ranges[i].ByteCount;
+ netvsc_packet->page_buf_cnt = 1;
/* ASSERT(vmxferpagePacket->Ranges[i].ByteOffset + */
/* vmxferpagePacket->Ranges[i].ByteCount < */
/* netDevice->ReceiveBufferSize); */
- netvscPacket->PageBuffers[0].Length =
- vmxferpagePacket->Ranges[i].ByteCount;
+ netvsc_packet->page_buf[0].Length =
+ vmxferpage_packet->Ranges[i].ByteCount;
- start = virt_to_phys((void *)((unsigned long)netDevice->ReceiveBuffer + vmxferpagePacket->Ranges[i].ByteOffset));
+ start = virt_to_phys((void *)((unsigned long)net_device->
+ recv_buf + vmxferpage_packet->Ranges[i].ByteOffset));
- netvscPacket->PageBuffers[0].Pfn = start >> PAGE_SHIFT;
- endVirtual = (unsigned long)netDevice->ReceiveBuffer
- + vmxferpagePacket->Ranges[i].ByteOffset
- + vmxferpagePacket->Ranges[i].ByteCount - 1;
- end = virt_to_phys((void *)endVirtual);
+ netvsc_packet->page_buf[0].Pfn = start >> PAGE_SHIFT;
+ end_virtual = (unsigned long)net_device->recv_buf
+ + vmxferpage_packet->Ranges[i].ByteOffset
+ + vmxferpage_packet->Ranges[i].ByteCount - 1;
+ end = virt_to_phys((void *)end_virtual);
/* Calculate the page relative offset */
- netvscPacket->PageBuffers[0].Offset =
- vmxferpagePacket->Ranges[i].ByteOffset & (PAGE_SIZE - 1);
+ netvsc_packet->page_buf[0].Offset =
+ vmxferpage_packet->Ranges[i].ByteOffset &
+ (PAGE_SIZE - 1);
if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
/* Handle frame across multiple pages: */
- netvscPacket->PageBuffers[0].Length =
- (netvscPacket->PageBuffers[0].Pfn << PAGE_SHIFT)
+ netvsc_packet->page_buf[0].Length =
+ (netvsc_packet->page_buf[0].Pfn <<
+ PAGE_SHIFT)
+ PAGE_SIZE - start;
- bytesRemain = netvscPacket->TotalDataBufferLength -
- netvscPacket->PageBuffers[0].Length;
+ bytes_remain = netvsc_packet->total_data_buflen -
+ netvsc_packet->page_buf[0].Length;
for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
- netvscPacket->PageBuffers[j].Offset = 0;
- if (bytesRemain <= PAGE_SIZE) {
- netvscPacket->PageBuffers[j].Length = bytesRemain;
- bytesRemain = 0;
+ netvsc_packet->page_buf[j].Offset = 0;
+ if (bytes_remain <= PAGE_SIZE) {
+ netvsc_packet->page_buf[j].Length =
+ bytes_remain;
+ bytes_remain = 0;
} else {
- netvscPacket->PageBuffers[j].Length = PAGE_SIZE;
- bytesRemain -= PAGE_SIZE;
+ netvsc_packet->page_buf[j].Length =
+ PAGE_SIZE;
+ bytes_remain -= PAGE_SIZE;
}
- netvscPacket->PageBuffers[j].Pfn =
- virt_to_phys((void *)(endVirtual - bytesRemain)) >> PAGE_SHIFT;
- netvscPacket->PageBufferCount++;
- if (bytesRemain == 0)
+ netvsc_packet->page_buf[j].Pfn =
+ virt_to_phys((void *)(end_virtual -
+ bytes_remain)) >> PAGE_SHIFT;
+ netvsc_packet->page_buf_cnt++;
+ if (bytes_remain == 0)
break;
}
/* ASSERT(bytesRemain == 0); */
}
DPRINT_DBG(NETVSC, "[%d] - (abs offset %u len %u) => "
"(pfn %llx, offset %u, len %u)", i,
- vmxferpagePacket->Ranges[i].ByteOffset,
- vmxferpagePacket->Ranges[i].ByteCount,
- netvscPacket->PageBuffers[0].Pfn,
- netvscPacket->PageBuffers[0].Offset,
- netvscPacket->PageBuffers[0].Length);
+ vmxferpage_packet->Ranges[i].ByteOffset,
+ vmxferpage_packet->Ranges[i].ByteCount,
+ netvsc_packet->page_buf[0].Pfn,
+ netvsc_packet->page_buf[0].Offset,
+ netvsc_packet->page_buf[0].Length);
/* Pass it to the upper layer */
- ((struct netvsc_driver *)Device->Driver)->OnReceiveCallback(Device, netvscPacket);
+ ((struct netvsc_driver *)device->Driver)->
+ recv_cb(device, netvsc_packet);
- NetVscOnReceiveCompletion(netvscPacket->Completion.Recv.ReceiveCompletionContext);
+ netvsc_receive_completion(netvsc_packet->
+ completion.recv.recv_completion_ctx);
}
/* ASSERT(list_empty(&listHead)); */
- PutNetDevice(Device);
+ put_net_device(device);
}
-static void NetVscSendReceiveCompletion(struct hv_device *Device,
- u64 TransactionId)
+static void netvsc_send_recv_completion(struct hv_device *device,
+ u64 transaction_id)
{
struct nvsp_message recvcompMessage;
int retries = 0;
int ret;
DPRINT_DBG(NETVSC, "Sending receive completion pkt - %llx",
- TransactionId);
+ transaction_id);
- recvcompMessage.Header.MessageType =
- NvspMessage1TypeSendRNDISPacketComplete;
+ recvcompMessage.hdr.msg_type =
+ NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
/* FIXME: Pass in the status */
- recvcompMessage.Messages.Version1Messages.SendRNDISPacketComplete.Status = NvspStatusSuccess;
+ recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
+ NVSP_STAT_SUCCESS;
retry_send_cmplt:
/* Send the completion */
- ret = vmbus_sendpacket(Device->channel, &recvcompMessage,
- sizeof(struct nvsp_message), TransactionId,
+ ret = vmbus_sendpacket(device->channel, &recvcompMessage,
+ sizeof(struct nvsp_message), transaction_id,
VmbusPacketTypeCompletion, 0);
if (ret == 0) {
/* success */
@@ -1152,7 +1195,7 @@ retry_send_cmplt:
/* no more room...wait a bit and attempt to retry 3 times */
retries++;
DPRINT_ERR(NETVSC, "unable to send receive completion pkt "
- "(tid %llx)...retrying %d", TransactionId, retries);
+ "(tid %llx)...retrying %d", transaction_id, retries);
if (retries < 4) {
udelay(100);
@@ -1160,22 +1203,22 @@ retry_send_cmplt:
} else {
DPRINT_ERR(NETVSC, "unable to send receive completion "
"pkt (tid %llx)...give up retrying",
- TransactionId);
+ transaction_id);
}
} else {
DPRINT_ERR(NETVSC, "unable to send receive completion pkt - "
- "%llx", TransactionId);
+ "%llx", transaction_id);
}
}
/* Send a receive completion packet to RNDIS device (ie NetVsp) */
-static void NetVscOnReceiveCompletion(void *Context)
+static void netvsc_receive_completion(void *context)
{
- struct hv_netvsc_packet *packet = Context;
- struct hv_device *device = (struct hv_device *)packet->Device;
- struct netvsc_device *netDevice;
- u64 transactionId = 0;
- bool fSendReceiveComp = false;
+ struct hv_netvsc_packet *packet = context;
+ struct hv_device *device = (struct hv_device *)packet->device;
+ struct netvsc_device *net_device;
+ u64 transaction_id = 0;
+ bool fsend_receive_comp = false;
unsigned long flags;
/* ASSERT(packet->XferPagePacket); */
@@ -1185,49 +1228,49 @@ static void NetVscOnReceiveCompletion(void *Context)
* send out receive completion, we are using GetInboundNetDevice()
* since we may have disable outbound traffic already.
*/
- netDevice = GetInboundNetDevice(device);
- if (!netDevice) {
+ net_device = get_inbound_net_device(device);
+ if (!net_device) {
DPRINT_ERR(NETVSC, "unable to get net device..."
"device being destroyed?");
return;
}
/* Overloading use of the lock. */
- spin_lock_irqsave(&netDevice->receive_packet_list_lock, flags);
+ spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
/* ASSERT(packet->XferPagePacket->Count > 0); */
- packet->XferPagePacket->Count--;
+ packet->xfer_page_pkt->count--;
/*
* Last one in the line that represent 1 xfer page packet.
* Return the xfer page packet itself to the freelist
*/
- if (packet->XferPagePacket->Count == 0) {
- fSendReceiveComp = true;
- transactionId = packet->Completion.Recv.ReceiveCompletionTid;
- list_add_tail(&packet->XferPagePacket->ListEntry,
- &netDevice->ReceivePacketList);
+ if (packet->xfer_page_pkt->count == 0) {
+ fsend_receive_comp = true;
+ transaction_id = packet->completion.recv.recv_completion_tid;
+ list_add_tail(&packet->xfer_page_pkt->list_ent,
+ &net_device->recv_pkt_list);
}
/* Put the packet back */
- list_add_tail(&packet->ListEntry, &netDevice->ReceivePacketList);
- spin_unlock_irqrestore(&netDevice->receive_packet_list_lock, flags);
+ list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
+ spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
/* Send a receive completion for the xfer page packet */
- if (fSendReceiveComp)
- NetVscSendReceiveCompletion(device, transactionId);
+ if (fsend_receive_comp)
+ netvsc_send_recv_completion(device, transaction_id);
- PutNetDevice(device);
+ put_net_device(device);
}
-static void NetVscOnChannelCallback(void *Context)
+static void netvsc_channel_cb(void *context)
{
int ret;
- struct hv_device *device = Context;
- struct netvsc_device *netDevice;
- u32 bytesRecvd;
- u64 requestId;
+ struct hv_device *device = context;
+ struct netvsc_device *net_device;
+ u32 bytes_recvd;
+ u64 request_id;
unsigned char *packet;
struct vmpacket_descriptor *desc;
unsigned char *buffer;
@@ -1236,42 +1279,42 @@ static void NetVscOnChannelCallback(void *Context)
/* ASSERT(device); */
packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!packet)
return;
buffer = packet;
- netDevice = GetInboundNetDevice(device);
- if (!netDevice) {
+ net_device = get_inbound_net_device(device);
+ if (!net_device) {
DPRINT_ERR(NETVSC, "net device (%p) shutting down..."
- "ignoring inbound packets", netDevice);
+ "ignoring inbound packets", net_device);
goto out;
}
do {
ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
- &bytesRecvd, &requestId);
+ &bytes_recvd, &request_id);
if (ret == 0) {
- if (bytesRecvd > 0) {
+ if (bytes_recvd > 0) {
DPRINT_DBG(NETVSC, "receive %d bytes, tid %llx",
- bytesRecvd, requestId);
+ bytes_recvd, request_id);
desc = (struct vmpacket_descriptor *)buffer;
switch (desc->Type) {
case VmbusPacketTypeCompletion:
- NetVscOnSendCompletion(device, desc);
+ netvsc_send_completion(device, desc);
break;
case VmbusPacketTypeDataUsingTransferPages:
- NetVscOnReceive(device, desc);
+ netvsc_receive(device, desc);
break;
default:
DPRINT_ERR(NETVSC,
"unhandled packet type %d, "
"tid %llx len %d\n",
- desc->Type, requestId,
- bytesRecvd);
+ desc->Type, request_id,
+ bytes_recvd);
break;
}
@@ -1293,20 +1336,20 @@ static void NetVscOnChannelCallback(void *Context)
}
} else if (ret == -2) {
/* Handle large packet */
- buffer = kmalloc(bytesRecvd, GFP_ATOMIC);
+ buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
if (buffer == NULL) {
/* Try again next time around */
DPRINT_ERR(NETVSC,
"unable to allocate buffer of size "
- "(%d)!!", bytesRecvd);
+ "(%d)!!", bytes_recvd);
break;
}
- bufferlen = bytesRecvd;
+ bufferlen = bytes_recvd;
}
} while (1);
- PutNetDevice(device);
+ put_net_device(device);
out:
kfree(buffer);
return;
diff --git a/drivers/staging/hv/netvsc.h b/drivers/staging/hv/netvsc.h
index c71dce5b3f7c..932a77ccdc04 100644
--- a/drivers/staging/hv/netvsc.h
+++ b/drivers/staging/hv/netvsc.h
@@ -38,48 +38,48 @@
#define NVSP_MAX_PROTOCOL_VERSION NVSP_PROTOCOL_VERSION_1
enum {
- NvspMessageTypeNone = 0,
+ NVSP_MSG_TYPE_NONE = 0,
/* Init Messages */
- NvspMessageTypeInit = 1,
- NvspMessageTypeInitComplete = 2,
+ NVSP_MSG_TYPE_INIT = 1,
+ NVSP_MSG_TYPE_INIT_COMPLETE = 2,
- NvspVersionMessageStart = 100,
+ NVSP_VERSION_MSG_START = 100,
/* Version 1 Messages */
- NvspMessage1TypeSendNdisVersion = NvspVersionMessageStart,
+ NVSP_MSG1_TYPE_SEND_NDIS_VER = NVSP_VERSION_MSG_START,
- NvspMessage1TypeSendReceiveBuffer,
- NvspMessage1TypeSendReceiveBufferComplete,
- NvspMessage1TypeRevokeReceiveBuffer,
+ NVSP_MSG1_TYPE_SEND_RECV_BUF,
+ NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE,
+ NVSP_MSG1_TYPE_REVOKE_RECV_BUF,
- NvspMessage1TypeSendSendBuffer,
- NvspMessage1TypeSendSendBufferComplete,
- NvspMessage1TypeRevokeSendBuffer,
+ NVSP_MSG1_TYPE_SEND_SEND_BUF,
+ NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE,
+ NVSP_MSG1_TYPE_REVOKE_SEND_BUF,
- NvspMessage1TypeSendRNDISPacket,
- NvspMessage1TypeSendRNDISPacketComplete,
+ NVSP_MSG1_TYPE_SEND_RNDIS_PKT,
+ NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
/*
* This should be set to the number of messages for the version with
* the maximum number of messages.
*/
- NvspNumMessagePerVersion = 9,
+ NVSP_NUM_MSG_PER_VERSION = 9,
};
enum {
- NvspStatusNone = 0,
- NvspStatusSuccess,
- NvspStatusFailure,
- NvspStatusProtocolVersionRangeTooNew,
- NvspStatusProtocolVersionRangeTooOld,
- NvspStatusInvalidRndisPacket,
- NvspStatusBusy,
- NvspStatusMax,
+ NVSP_STAT_NONE = 0,
+ NVSP_STAT_SUCCESS,
+ NVSP_STAT_FAIL,
+ NVSP_STAT_PROTOCOL_TOO_NEW,
+ NVSP_STAT_PROTOCOL_TOO_OLD,
+ NVSP_STAT_INVALID_RNDIS_PKT,
+ NVSP_STAT_BUSY,
+ NVSP_STAT_MAX,
};
struct nvsp_message_header {
- u32 MessageType;
+ u32 msg_type;
};
/* Init Messages */
@@ -90,8 +90,8 @@ struct nvsp_message_header {
* versioning (i.e. this message will be the same for ever).
*/
struct nvsp_message_init {
- u32 MinProtocolVersion;
- u32 MaxProtocolVersion;
+ u32 min_protocol_ver;
+ u32 max_protocol_ver;
} __attribute__((packed));
/*
@@ -100,14 +100,14 @@ struct nvsp_message_init {
* (i.e. this message will be the same for ever).
*/
struct nvsp_message_init_complete {
- u32 NegotiatedProtocolVersion;
- u32 MaximumMdlChainLength;
- u32 Status;
+ u32 negotiated_protocol_ver;
+ u32 max_mdl_chain_len;
+ u32 status;
} __attribute__((packed));
union nvsp_message_init_uber {
- struct nvsp_message_init Init;
- struct nvsp_message_init_complete InitComplete;
+ struct nvsp_message_init init;
+ struct nvsp_message_init_complete init_complete;
} __attribute__((packed));
/* Version 1 Messages */
@@ -117,8 +117,8 @@ union nvsp_message_init_uber {
* can use this information when handling OIDs sent by the VSC.
*/
struct nvsp_1_message_send_ndis_version {
- u32 NdisMajorVersion;
- u32 NdisMinorVersion;
+ u32 ndis_major_ver;
+ u32 ndis_minor_ver;
} __attribute__((packed));
/*
@@ -126,15 +126,15 @@ struct nvsp_1_message_send_ndis_version {
* can then use the receive buffer to send data to the VSC.
*/
struct nvsp_1_message_send_receive_buffer {
- u32 GpadlHandle;
- u16 Id;
+ u32 gpadl_handle;
+ u16 id;
} __attribute__((packed));
struct nvsp_1_receive_buffer_section {
- u32 Offset;
- u32 SubAllocationSize;
- u32 NumSubAllocations;
- u32 EndOffset;
+ u32 offset;
+ u32 sub_alloc_size;
+ u32 num_sub_allocs;
+ u32 end_offset;
} __attribute__((packed));
/*
@@ -143,8 +143,8 @@ struct nvsp_1_receive_buffer_section {
* buffer.
*/
struct nvsp_1_message_send_receive_buffer_complete {
- u32 Status;
- u32 NumSections;
+ u32 status;
+ u32 num_sections;
/*
* The receive buffer is split into two parts, a large suballocation
@@ -165,7 +165,7 @@ struct nvsp_1_message_send_receive_buffer_complete {
* LargeOffset SmallOffset
*/
- struct nvsp_1_receive_buffer_section Sections[1];
+ struct nvsp_1_receive_buffer_section sections[1];
} __attribute__((packed));
/*
@@ -174,7 +174,7 @@ struct nvsp_1_message_send_receive_buffer_complete {
* again.
*/
struct nvsp_1_message_revoke_receive_buffer {
- u16 Id;
+ u16 id;
};
/*
@@ -182,8 +182,8 @@ struct nvsp_1_message_revoke_receive_buffer {
* can then use the send buffer to send data to the VSP.
*/
struct nvsp_1_message_send_send_buffer {
- u32 GpadlHandle;
- u16 Id;
+ u32 gpadl_handle;
+ u16 id;
} __attribute__((packed));
/*
@@ -192,7 +192,7 @@ struct nvsp_1_message_send_send_buffer {
* buffer.
*/
struct nvsp_1_message_send_send_buffer_complete {
- u32 Status;
+ u32 status;
/*
* The VSC gets to choose the size of the send buffer and the VSP gets
@@ -200,7 +200,7 @@ struct nvsp_1_message_send_send_buffer_complete {
* dynamic reconfigurations when the cost of GPA-direct buffers
* decreases.
*/
- u32 SectionSize;
+ u32 section_size;
} __attribute__((packed));
/*
@@ -208,7 +208,7 @@ struct nvsp_1_message_send_send_buffer_complete {
* completes this transaction, the vsp should never use the send buffer again.
*/
struct nvsp_1_message_revoke_send_buffer {
- u16 Id;
+ u16 id;
};
/*
@@ -221,7 +221,7 @@ struct nvsp_1_message_send_rndis_packet {
* channels of communication. However, the Network VSP only has one.
* Therefore, the channel travels with the RNDIS packet.
*/
- u32 ChannelType;
+ u32 channel_type;
/*
* This field is used to send part or all of the data through a send
@@ -229,8 +229,8 @@ struct nvsp_1_message_send_rndis_packet {
* index is 0xFFFFFFFF, then the send buffer is not being used and all
* of the data was sent through other VMBus mechanisms.
*/
- u32 SendBufferSectionIndex;
- u32 SendBufferSectionSize;
+ u32 send_buf_section_index;
+ u32 send_buf_section_size;
} __attribute__((packed));
/*
@@ -239,35 +239,35 @@ struct nvsp_1_message_send_rndis_packet {
* message cannot use any resources associated with the original RNDIS packet.
*/
struct nvsp_1_message_send_rndis_packet_complete {
- u32 Status;
+ u32 status;
};
union nvsp_1_message_uber {
- struct nvsp_1_message_send_ndis_version SendNdisVersion;
+ struct nvsp_1_message_send_ndis_version send_ndis_ver;
- struct nvsp_1_message_send_receive_buffer SendReceiveBuffer;
+ struct nvsp_1_message_send_receive_buffer send_recv_buf;
struct nvsp_1_message_send_receive_buffer_complete
- SendReceiveBufferComplete;
- struct nvsp_1_message_revoke_receive_buffer RevokeReceiveBuffer;
+ send_recv_buf_complete;
+ struct nvsp_1_message_revoke_receive_buffer revoke_recv_buf;
- struct nvsp_1_message_send_send_buffer SendSendBuffer;
- struct nvsp_1_message_send_send_buffer_complete SendSendBufferComplete;
- struct nvsp_1_message_revoke_send_buffer RevokeSendBuffer;
+ struct nvsp_1_message_send_send_buffer send_send_buf;
+ struct nvsp_1_message_send_send_buffer_complete send_send_buf_complete;
+ struct nvsp_1_message_revoke_send_buffer revoke_send_buf;
- struct nvsp_1_message_send_rndis_packet SendRNDISPacket;
+ struct nvsp_1_message_send_rndis_packet send_rndis_pkt;
struct nvsp_1_message_send_rndis_packet_complete
- SendRNDISPacketComplete;
+ send_rndis_pkt_complete;
} __attribute__((packed));
union nvsp_all_messages {
- union nvsp_message_init_uber InitMessages;
- union nvsp_1_message_uber Version1Messages;
+ union nvsp_message_init_uber init_msg;
+ union nvsp_1_message_uber v1_msg;
} __attribute__((packed));
/* ALL Messages */
struct nvsp_message {
- struct nvsp_message_header Header;
- union nvsp_all_messages Messages;
+ struct nvsp_message_header hdr;
+ union nvsp_all_messages msg;
} __attribute__((packed));
@@ -293,39 +293,39 @@ struct nvsp_message {
/* Per netvsc channel-specific */
struct netvsc_device {
- struct hv_device *Device;
+ struct hv_device *dev;
- atomic_t RefCount;
- atomic_t NumOutstandingSends;
+ atomic_t refcnt;
+ atomic_t num_outstanding_sends;
/*
* List of free preallocated hv_netvsc_packet to represent receive
* packet
*/
- struct list_head ReceivePacketList;
- spinlock_t receive_packet_list_lock;
+ struct list_head recv_pkt_list;
+ spinlock_t recv_pkt_list_lock;
/* Send buffer allocated by us but manages by NetVSP */
- void *SendBuffer;
- u32 SendBufferSize;
- u32 SendBufferGpadlHandle;
- u32 SendSectionSize;
+ void *send_buf;
+ u32 send_buf_size;
+ u32 send_buf_gpadl_handle;
+ u32 send_section_size;
/* Receive buffer allocated by us but manages by NetVSP */
- void *ReceiveBuffer;
- u32 ReceiveBufferSize;
- u32 ReceiveBufferGpadlHandle;
- u32 ReceiveSectionCount;
- struct nvsp_1_receive_buffer_section *ReceiveSections;
+ void *recv_buf;
+ u32 recv_buf_size;
+ u32 recv_buf_gpadl_handle;
+ u32 recv_section_cnt;
+ struct nvsp_1_receive_buffer_section *recv_section;
/* Used for NetVSP initialization protocol */
- struct osd_waitevent *ChannelInitEvent;
- struct nvsp_message ChannelInitPacket;
+ struct osd_waitevent *channel_init_event;
+ struct nvsp_message channel_init_pkt;
- struct nvsp_message RevokePacket;
+ struct nvsp_message revoke_packet;
/* unsigned char HwMacAddr[HW_MACADDR_LEN]; */
/* Holds rndis device info */
- void *Extension;
+ void *extension;
};
#endif /* _NETVSC_H_ */
diff --git a/drivers/staging/hv/netvsc_api.h b/drivers/staging/hv/netvsc_api.h
index 4b5b3ac458c8..b4bed3636594 100644
--- a/drivers/staging/hv/netvsc_api.h
+++ b/drivers/staging/hv/netvsc_api.h
@@ -32,10 +32,10 @@ struct hv_netvsc_packet;
/* Represent the xfer page packet which contains 1 or more netvsc packet */
struct xferpage_packet {
- struct list_head ListEntry;
+ struct list_head list_ent;
/* # of netvsc packets this xfer packet contains */
- u32 Count;
+ u32 count;
};
/* The number of pages which are enough to cover jumbo frame buffer. */
@@ -47,70 +47,70 @@ struct xferpage_packet {
*/
struct hv_netvsc_packet {
/* Bookkeeping stuff */
- struct list_head ListEntry;
+ struct list_head list_ent;
- struct hv_device *Device;
- bool IsDataPacket;
+ struct hv_device *device;
+ bool is_data_pkt;
/*
* Valid only for receives when we break a xfer page packet
* into multiple netvsc packets
*/
- struct xferpage_packet *XferPagePacket;
+ struct xferpage_packet *xfer_page_pkt;
union {
struct{
- u64 ReceiveCompletionTid;
- void *ReceiveCompletionContext;
- void (*OnReceiveCompletion)(void *context);
- } Recv;
+ u64 recv_completion_tid;
+ void *recv_completion_ctx;
+ void (*recv_completion)(void *context);
+ } recv;
struct{
- u64 SendCompletionTid;
- void *SendCompletionContext;
- void (*OnSendCompletion)(void *context);
- } Send;
- } Completion;
+ u64 send_completion_tid;
+ void *send_completion_ctx;
+ void (*send_completion)(void *context);
+ } send;
+ } completion;
- /* This points to the memory after PageBuffers */
- void *Extension;
+ /* This points to the memory after page_buf */
+ void *extension;
- u32 TotalDataBufferLength;
+ u32 total_data_buflen;
/* Points to the send/receive buffer where the ethernet frame is */
- u32 PageBufferCount;
- struct hv_page_buffer PageBuffers[NETVSC_PACKET_MAXPAGE];
+ u32 page_buf_cnt;
+ struct hv_page_buffer page_buf[NETVSC_PACKET_MAXPAGE];
};
/* Represents the net vsc driver */
struct netvsc_driver {
/* Must be the first field */
/* Which is a bug FIXME! */
- struct hv_driver Base;
+ struct hv_driver base;
- u32 RingBufferSize;
- u32 RequestExtSize;
+ u32 ring_buf_size;
+ u32 req_ext_size;
/*
* This is set by the caller to allow us to callback when we
* receive a packet from the "wire"
*/
- int (*OnReceiveCallback)(struct hv_device *dev,
+ int (*recv_cb)(struct hv_device *dev,
struct hv_netvsc_packet *packet);
- void (*OnLinkStatusChanged)(struct hv_device *dev, u32 Status);
+ void (*link_status_change)(struct hv_device *dev, u32 status);
/* Specific to this driver */
- int (*OnSend)(struct hv_device *dev, struct hv_netvsc_packet *packet);
+ int (*send)(struct hv_device *dev, struct hv_netvsc_packet *packet);
- void *Context;
+ void *ctx;
};
struct netvsc_device_info {
- unsigned char MacAddr[6];
- bool LinkState; /* 0 - link up, 1 - link down */
+ unsigned char mac_adr[6];
+ bool link_state; /* 0 - link up, 1 - link down */
};
/* Interface */
-int NetVscInitialize(struct hv_driver *drv);
-int RndisFilterOnOpen(struct hv_device *Device);
-int RndisFilterOnClose(struct hv_device *Device);
+int netvsc_initialize(struct hv_driver *drv);
+int rndis_filter_open(struct hv_device *dev);
+int rndis_filter_close(struct hv_device *dev);
#endif /* _NETVSC_API_H_ */
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 141535295a41..b41c9640b72d 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -66,6 +66,9 @@ MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
/* The one and only one */
static struct netvsc_driver_context g_netvsc_drv;
+/* no-op so the netdev core doesn't return -EINVAL when modifying the the
+ * multicast address list in SIOCADDMULTI. hv is setup to get all multicast
+ * when it calls RndisFilterOnOpen() */
static void netvsc_set_multicast_list(struct net_device *net)
{
}
@@ -78,7 +81,7 @@ static int netvsc_open(struct net_device *net)
if (netif_carrier_ok(net)) {
/* Open up the device */
- ret = RndisFilterOnOpen(device_obj);
+ ret = rndis_filter_open(device_obj);
if (ret != 0) {
DPRINT_ERR(NETVSC_DRV,
"unable to open device (ret %d).", ret);
@@ -101,7 +104,7 @@ static int netvsc_close(struct net_device *net)
netif_stop_queue(net);
- ret = RndisFilterOnClose(device_obj);
+ ret = rndis_filter_close(device_obj);
if (ret != 0)
DPRINT_ERR(NETVSC_DRV, "unable to close device (ret %d).", ret);
@@ -112,7 +115,7 @@ static void netvsc_xmit_completion(void *context)
{
struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
struct sk_buff *skb = (struct sk_buff *)
- (unsigned long)packet->Completion.Send.SendCompletionTid;
+ (unsigned long)packet->completion.send.send_completion_tid;
kfree(packet);
@@ -151,7 +154,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
/* Allocate a netvsc packet based on # of frags. */
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
(num_pages * sizeof(struct hv_page_buffer)) +
- net_drv_obj->RequestExtSize, GFP_ATOMIC);
+ net_drv_obj->req_ext_size, GFP_ATOMIC);
if (!packet) {
/* out of memory, silently drop packet */
DPRINT_ERR(NETVSC_DRV, "unable to allocate hv_netvsc_packet");
@@ -161,40 +164,40 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
return NETDEV_TX_OK;
}
- packet->Extension = (void *)(unsigned long)packet +
+ packet->extension = (void *)(unsigned long)packet +
sizeof(struct hv_netvsc_packet) +
(num_pages * sizeof(struct hv_page_buffer));
/* Setup the rndis header */
- packet->PageBufferCount = num_pages;
+ packet->page_buf_cnt = num_pages;
/* TODO: Flush all write buffers/ memory fence ??? */
/* wmb(); */
/* Initialize it from the skb */
- packet->TotalDataBufferLength = skb->len;
+ packet->total_data_buflen = skb->len;
/* Start filling in the page buffers starting after RNDIS buffer. */
- packet->PageBuffers[1].Pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
- packet->PageBuffers[1].Offset
+ packet->page_buf[1].Pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
+ packet->page_buf[1].Offset
= (unsigned long)skb->data & (PAGE_SIZE - 1);
- packet->PageBuffers[1].Length = skb_headlen(skb);
+ packet->page_buf[1].Length = skb_headlen(skb);
/* Additional fragments are after SKB data */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- packet->PageBuffers[i+2].Pfn = page_to_pfn(f->page);
- packet->PageBuffers[i+2].Offset = f->page_offset;
- packet->PageBuffers[i+2].Length = f->size;
+ packet->page_buf[i+2].Pfn = page_to_pfn(f->page);
+ packet->page_buf[i+2].Offset = f->page_offset;
+ packet->page_buf[i+2].Length = f->size;
}
/* Set the completion routine */
- packet->Completion.Send.OnSendCompletion = netvsc_xmit_completion;
- packet->Completion.Send.SendCompletionContext = packet;
- packet->Completion.Send.SendCompletionTid = (unsigned long)skb;
+ packet->completion.send.send_completion = netvsc_xmit_completion;
+ packet->completion.send.send_completion_ctx = packet;
+ packet->completion.send.send_completion_tid = (unsigned long)skb;
- ret = net_drv_obj->OnSend(&net_device_ctx->device_ctx->device_obj,
+ ret = net_drv_obj->send(&net_device_ctx->device_ctx->device_obj,
packet);
if (ret == 0) {
net->stats.tx_bytes += skb->len;
@@ -233,6 +236,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj,
if (status == 1) {
netif_carrier_on(net);
netif_wake_queue(net);
+ netif_notify_peers(net);
} else {
netif_carrier_off(net);
netif_stop_queue(net);
@@ -260,7 +264,7 @@ static int netvsc_recv_callback(struct hv_device *device_obj,
}
/* Allocate a skb - TODO direct I/O to pages? */
- skb = netdev_alloc_skb_ip_align(net, packet->TotalDataBufferLength);
+ skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
if (unlikely(!skb)) {
++net->stats.rx_dropped;
return 0;
@@ -273,17 +277,17 @@ static int netvsc_recv_callback(struct hv_device *device_obj,
* Copy to skb. This copy is needed here since the memory pointed by
* hv_netvsc_packet cannot be deallocated
*/
- for (i = 0; i < packet->PageBufferCount; i++) {
- data = kmap_atomic(pfn_to_page(packet->PageBuffers[i].Pfn),
+ for (i = 0; i < packet->page_buf_cnt; i++) {
+ data = kmap_atomic(pfn_to_page(packet->page_buf[i].Pfn),
KM_IRQ1);
data = (void *)(unsigned long)data +
- packet->PageBuffers[i].Offset;
+ packet->page_buf[i].Offset;
- memcpy(skb_put(skb, packet->PageBuffers[i].Length), data,
- packet->PageBuffers[i].Length);
+ memcpy(skb_put(skb, packet->page_buf[i].Length), data,
+ packet->page_buf[i].Length);
kunmap_atomic((void *)((unsigned long)data -
- packet->PageBuffers[i].Offset), KM_IRQ1);
+ packet->page_buf[i].Offset), KM_IRQ1);
}
local_irq_restore(flags);
@@ -346,7 +350,7 @@ static int netvsc_probe(struct device *device)
struct netvsc_device_info device_info;
int ret;
- if (!net_drv_obj->Base.OnDeviceAdd)
+ if (!net_drv_obj->base.OnDeviceAdd)
return -1;
net = alloc_etherdev(sizeof(struct net_device_context));
@@ -355,7 +359,6 @@ static int netvsc_probe(struct device *device)
/* Set initial state */
netif_carrier_off(net);
- netif_stop_queue(net);
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = device_ctx;
@@ -363,7 +366,7 @@ static int netvsc_probe(struct device *device)
dev_set_drvdata(device, net);
/* Notify the netvsc driver of the new device */
- ret = net_drv_obj->Base.OnDeviceAdd(device_obj, &device_info);
+ ret = net_drv_obj->base.OnDeviceAdd(device_obj, &device_info);
if (ret != 0) {
free_netdev(net);
dev_set_drvdata(device, NULL);
@@ -382,10 +385,10 @@ static int netvsc_probe(struct device *device)
* out of sync with the device's link status
*/
if (!netif_carrier_ok(net))
- if (!device_info.LinkState)
+ if (!device_info.link_state)
netif_carrier_on(net);
- memcpy(net->dev_addr, device_info.MacAddr, ETH_ALEN);
+ memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
net->netdev_ops = &device_ops;
@@ -398,7 +401,7 @@ static int netvsc_probe(struct device *device)
ret = register_netdev(net);
if (ret != 0) {
/* Remove the device and release the resource */
- net_drv_obj->Base.OnDeviceRemove(device_obj);
+ net_drv_obj->base.OnDeviceRemove(device_obj);
free_netdev(net);
}
@@ -422,7 +425,7 @@ static int netvsc_remove(struct device *device)
return 0;
}
- if (!net_drv_obj->Base.OnDeviceRemove)
+ if (!net_drv_obj->base.OnDeviceRemove)
return -1;
/* Stop outbound asap */
@@ -435,7 +438,7 @@ static int netvsc_remove(struct device *device)
* Call to the vsc driver to let it know that the device is being
* removed
*/
- ret = net_drv_obj->Base.OnDeviceRemove(device_obj);
+ ret = net_drv_obj->base.OnDeviceRemove(device_obj);
if (ret != 0) {
/* TODO: */
DPRINT_ERR(NETVSC, "unable to remove vsc device (ret %d)", ret);
@@ -481,8 +484,8 @@ static void netvsc_drv_exit(void)
device_unregister(current_dev);
}
- if (netvsc_drv_obj->Base.OnCleanup)
- netvsc_drv_obj->Base.OnCleanup(&netvsc_drv_obj->Base);
+ if (netvsc_drv_obj->base.OnCleanup)
+ netvsc_drv_obj->base.OnCleanup(&netvsc_drv_obj->base);
vmbus_child_driver_unregister(drv_ctx);
@@ -495,15 +498,15 @@ static int netvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
struct driver_context *drv_ctx = &g_netvsc_drv.drv_ctx;
int ret;
- net_drv_obj->RingBufferSize = ring_size * PAGE_SIZE;
- net_drv_obj->OnReceiveCallback = netvsc_recv_callback;
- net_drv_obj->OnLinkStatusChanged = netvsc_linkstatus_callback;
+ net_drv_obj->ring_buf_size = ring_size * PAGE_SIZE;
+ net_drv_obj->recv_cb = netvsc_recv_callback;
+ net_drv_obj->link_status_change = netvsc_linkstatus_callback;
/* Callback to client driver to complete the initialization */
- drv_init(&net_drv_obj->Base);
+ drv_init(&net_drv_obj->base);
- drv_ctx->driver.name = net_drv_obj->Base.name;
- memcpy(&drv_ctx->class_id, &net_drv_obj->Base.deviceType,
+ drv_ctx->driver.name = net_drv_obj->base.name;
+ memcpy(&drv_ctx->class_id, &net_drv_obj->base.deviceType,
sizeof(struct hv_guid));
drv_ctx->probe = netvsc_probe;
@@ -536,7 +539,7 @@ static int __init netvsc_init(void)
if (!dmi_check_system(hv_netvsc_dmi_table))
return -ENODEV;
- return netvsc_drv_init(NetVscInitialize);
+ return netvsc_drv_init(netvsc_initialize);
}
static void __exit netvsc_exit(void)
diff --git a/drivers/staging/hv/osd.c b/drivers/staging/hv/osd.c
index 8c3eb278a81f..b5a3940331b3 100644
--- a/drivers/staging/hv/osd.c
+++ b/drivers/staging/hv/osd.c
@@ -43,13 +43,7 @@
#include <linux/slab.h>
#include "osd.h"
-struct osd_callback_struct {
- struct work_struct work;
- void (*callback)(void *);
- void *data;
-};
-
-void *osd_VirtualAllocExec(unsigned int size)
+void *osd_virtual_alloc_exec(unsigned int size)
{
#ifdef __x86_64__
return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL_EXEC);
@@ -60,7 +54,7 @@ void *osd_VirtualAllocExec(unsigned int size)
}
/**
- * osd_PageAlloc() - Allocate pages
+ * osd_page_alloc() - Allocate pages
* @count: Total number of Kernel pages you want to allocate
*
* Tries to allocate @count number of consecutive free kernel pages.
@@ -68,7 +62,7 @@ void *osd_VirtualAllocExec(unsigned int size)
* If successfull it will return pointer to the @count pages.
* Mainly used by Hyper-V drivers.
*/
-void *osd_PageAlloc(unsigned int count)
+void *osd_page_alloc(unsigned int count)
{
void *p;
@@ -85,26 +79,26 @@ void *osd_PageAlloc(unsigned int count)
/* if (p) memset(p, 0, PAGE_SIZE); */
/* return p; */
}
-EXPORT_SYMBOL_GPL(osd_PageAlloc);
+EXPORT_SYMBOL_GPL(osd_page_alloc);
/**
- * osd_PageFree() - Free pages
+ * osd_page_free() - Free pages
* @page: Pointer to the first page to be freed
* @count: Total number of Kernel pages you free
*
- * Frees the pages allocated by osd_PageAlloc()
+ * Frees the pages allocated by osd_page_alloc()
* Mainly used by Hyper-V drivers.
*/
-void osd_PageFree(void *page, unsigned int count)
+void osd_page_free(void *page, unsigned int count)
{
free_pages((unsigned long)page, get_order(count * PAGE_SIZE));
/*struct page* p = virt_to_page(page);
__free_page(p);*/
}
-EXPORT_SYMBOL_GPL(osd_PageFree);
+EXPORT_SYMBOL_GPL(osd_page_free);
/**
- * osd_WaitEventCreate() - Create the event queue
+ * osd_waitevent_create() - Create the event queue
*
* Allocates memory for a &struct osd_waitevent. And then calls
* init_waitqueue_head to set up the wait queue for the event.
@@ -114,7 +108,7 @@ EXPORT_SYMBOL_GPL(osd_PageFree);
* Returns pointer to &struct osd_waitevent
* Mainly used by Hyper-V drivers.
*/
-struct osd_waitevent *osd_WaitEventCreate(void)
+struct osd_waitevent *osd_waitevent_create(void)
{
struct osd_waitevent *wait = kmalloc(sizeof(struct osd_waitevent),
GFP_KERNEL);
@@ -125,14 +119,14 @@ struct osd_waitevent *osd_WaitEventCreate(void)
init_waitqueue_head(&wait->event);
return wait;
}
-EXPORT_SYMBOL_GPL(osd_WaitEventCreate);
+EXPORT_SYMBOL_GPL(osd_waitevent_create);
/**
- * osd_WaitEventSet() - Wake up the process
- * @waitEvent: Structure to event to be woken up
+ * osd_waitevent_set() - Wake up the process
+ * @wait_event: Structure to event to be woken up
*
- * @waitevent is of type &struct osd_waitevent
+ * @wait_event is of type &struct osd_waitevent
*
* Wake up the sleeping process so it can do some work.
* And set condition indicator in &struct osd_waitevent to indicate
@@ -140,18 +134,18 @@ EXPORT_SYMBOL_GPL(osd_WaitEventCreate);
*
* Only used by Network and Storage Hyper-V drivers.
*/
-void osd_WaitEventSet(struct osd_waitevent *waitEvent)
+void osd_waitevent_set(struct osd_waitevent *wait_event)
{
- waitEvent->condition = 1;
- wake_up_interruptible(&waitEvent->event);
+ wait_event->condition = 1;
+ wake_up_interruptible(&wait_event->event);
}
-EXPORT_SYMBOL_GPL(osd_WaitEventSet);
+EXPORT_SYMBOL_GPL(osd_waitevent_set);
/**
- * osd_WaitEventWait() - Wait for event till condition is true
- * @waitEvent: Structure to event to be put to sleep
+ * osd_waitevent_wait() - Wait for event till condition is true
+ * @wait_event: Structure to event to be put to sleep
*
- * @waitevent is of type &struct osd_waitevent
+ * @wait_event is of type &struct osd_waitevent
*
* Set up the process to sleep until waitEvent->condition get true.
* And set condition indicator in &struct osd_waitevent to indicate
@@ -161,25 +155,25 @@ EXPORT_SYMBOL_GPL(osd_WaitEventSet);
*
* Mainly used by Hyper-V drivers.
*/
-int osd_WaitEventWait(struct osd_waitevent *waitEvent)
+int osd_waitevent_wait(struct osd_waitevent *wait_event)
{
int ret = 0;
- ret = wait_event_interruptible(waitEvent->event,
- waitEvent->condition);
- waitEvent->condition = 0;
+ ret = wait_event_interruptible(wait_event->event,
+ wait_event->condition);
+ wait_event->condition = 0;
return ret;
}
-EXPORT_SYMBOL_GPL(osd_WaitEventWait);
+EXPORT_SYMBOL_GPL(osd_waitevent_wait);
/**
- * osd_WaitEventWaitEx() - Wait for event or timeout for process wakeup
- * @waitEvent: Structure to event to be put to sleep
- * @TimeoutInMs: Total number of Milliseconds to wait before waking up
+ * osd_waitevent_waitex() - Wait for event or timeout for process wakeup
+ * @wait_event: Structure to event to be put to sleep
+ * @timeout_in_ms: Total number of Milliseconds to wait before waking up
*
- * @waitevent is of type &struct osd_waitevent
+ * @wait_event is of type &struct osd_waitevent
* Set up the process to sleep until @waitEvent->condition get true or
- * @TimeoutInMs (Time out in Milliseconds) has been reached.
+ * @timeout_in_ms (Time out in Milliseconds) has been reached.
* And set condition indicator in &struct osd_waitevent to indicate
* the process is in a sleeping state.
*
@@ -187,42 +181,14 @@ EXPORT_SYMBOL_GPL(osd_WaitEventWait);
*
* Mainly used by Hyper-V drivers.
*/
-int osd_WaitEventWaitEx(struct osd_waitevent *waitEvent, u32 TimeoutInMs)
+int osd_waitevent_waitex(struct osd_waitevent *wait_event, u32 timeout_in_ms)
{
int ret = 0;
- ret = wait_event_interruptible_timeout(waitEvent->event,
- waitEvent->condition,
- msecs_to_jiffies(TimeoutInMs));
- waitEvent->condition = 0;
+ ret = wait_event_interruptible_timeout(wait_event->event,
+ wait_event->condition,
+ msecs_to_jiffies(timeout_in_ms));
+ wait_event->condition = 0;
return ret;
}
-EXPORT_SYMBOL_GPL(osd_WaitEventWaitEx);
-
-static void osd_callback_work(struct work_struct *work)
-{
- struct osd_callback_struct *cb = container_of(work,
- struct osd_callback_struct,
- work);
- (cb->callback)(cb->data);
- kfree(cb);
-}
-
-int osd_schedule_callback(struct workqueue_struct *wq,
- void (*func)(void *),
- void *data)
-{
- struct osd_callback_struct *cb;
-
- cb = kmalloc(sizeof(*cb), GFP_KERNEL);
- if (!cb) {
- printk(KERN_ERR "unable to allocate memory in osd_schedule_callback\n");
- return -1;
- }
-
- cb->callback = func;
- cb->data = data;
- INIT_WORK(&cb->work, osd_callback_work);
- return queue_work(wq, &cb->work);
-}
-
+EXPORT_SYMBOL_GPL(osd_waitevent_waitex);
diff --git a/drivers/staging/hv/osd.h b/drivers/staging/hv/osd.h
index ce064e8ea644..870ef0768833 100644
--- a/drivers/staging/hv/osd.h
+++ b/drivers/staging/hv/osd.h
@@ -50,21 +50,17 @@ struct osd_waitevent {
/* Osd routines */
-extern void *osd_VirtualAllocExec(unsigned int size);
+extern void *osd_virtual_alloc_exec(unsigned int size);
-extern void *osd_PageAlloc(unsigned int count);
-extern void osd_PageFree(void *page, unsigned int count);
+extern void *osd_page_alloc(unsigned int count);
+extern void osd_page_free(void *page, unsigned int count);
-extern struct osd_waitevent *osd_WaitEventCreate(void);
-extern void osd_WaitEventSet(struct osd_waitevent *waitEvent);
-extern int osd_WaitEventWait(struct osd_waitevent *waitEvent);
+extern struct osd_waitevent *osd_waitevent_create(void);
+extern void osd_waitevent_set(struct osd_waitevent *wait_event);
+extern int osd_waitevent_wait(struct osd_waitevent *wait_event);
-/* If >0, waitEvent got signaled. If ==0, timeout. If < 0, error */
-extern int osd_WaitEventWaitEx(struct osd_waitevent *waitEvent,
- u32 TimeoutInMs);
-
-int osd_schedule_callback(struct workqueue_struct *wq,
- void (*func)(void *),
- void *data);
+/* If >0, wait_event got signaled. If ==0, timeout. If < 0, error */
+extern int osd_waitevent_waitex(struct osd_waitevent *wait_event,
+ u32 timeout_in_ms);
#endif /* _OSD_H_ */
diff --git a/drivers/staging/hv/ring_buffer.c b/drivers/staging/hv/ring_buffer.c
index d78c569ac94a..4d53392f1e60 100644
--- a/drivers/staging/hv/ring_buffer.c
+++ b/drivers/staging/hv/ring_buffer.c
@@ -38,7 +38,7 @@
/*++
Name:
- GetRingBufferAvailBytes()
+ get_ringbuffer_availbytes()
Description:
Get number of bytes available to read and to write to
@@ -46,33 +46,34 @@ Description:
--*/
static inline void
-GetRingBufferAvailBytes(struct hv_ring_buffer_info *rbi, u32 *read, u32 *write)
+get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
+ u32 *read, u32 *write)
{
u32 read_loc, write_loc;
/* Capture the read/write indices before they changed */
- read_loc = rbi->RingBuffer->ReadIndex;
- write_loc = rbi->RingBuffer->WriteIndex;
+ read_loc = rbi->ring_buffer->read_index;
+ write_loc = rbi->ring_buffer->write_index;
- *write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->RingDataSize);
- *read = rbi->RingDataSize - *write;
+ *write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->ring_datasize);
+ *read = rbi->ring_datasize - *write;
}
/*++
Name:
- GetNextWriteLocation()
+ get_next_write_location()
Description:
Get the next write location for the specified ring buffer
--*/
static inline u32
-GetNextWriteLocation(struct hv_ring_buffer_info *RingInfo)
+get_next_write_location(struct hv_ring_buffer_info *ring_info)
{
- u32 next = RingInfo->RingBuffer->WriteIndex;
+ u32 next = ring_info->ring_buffer->write_index;
- /* ASSERT(next < RingInfo->RingDataSize); */
+ /* ASSERT(next < ring_info->RingDataSize); */
return next;
}
@@ -80,34 +81,34 @@ GetNextWriteLocation(struct hv_ring_buffer_info *RingInfo)
/*++
Name:
- SetNextWriteLocation()
+ set_next_write_location()
Description:
Set the next write location for the specified ring buffer
--*/
static inline void
-SetNextWriteLocation(struct hv_ring_buffer_info *RingInfo,
- u32 NextWriteLocation)
+set_next_write_location(struct hv_ring_buffer_info *ring_info,
+ u32 next_write_location)
{
- RingInfo->RingBuffer->WriteIndex = NextWriteLocation;
+ ring_info->ring_buffer->write_index = next_write_location;
}
/*++
Name:
- GetNextReadLocation()
+ get_next_read_location()
Description:
Get the next read location for the specified ring buffer
--*/
static inline u32
-GetNextReadLocation(struct hv_ring_buffer_info *RingInfo)
+get_next_read_location(struct hv_ring_buffer_info *ring_info)
{
- u32 next = RingInfo->RingBuffer->ReadIndex;
+ u32 next = ring_info->ring_buffer->read_index;
- /* ASSERT(next < RingInfo->RingDataSize); */
+ /* ASSERT(next < ring_info->RingDataSize); */
return next;
}
@@ -115,7 +116,7 @@ GetNextReadLocation(struct hv_ring_buffer_info *RingInfo)
/*++
Name:
- GetNextReadLocationWithOffset()
+ get_next_readlocation_withoffset()
Description:
Get the next read location + offset for the specified ring buffer.
@@ -123,13 +124,14 @@ Description:
--*/
static inline u32
-GetNextReadLocationWithOffset(struct hv_ring_buffer_info *RingInfo, u32 Offset)
+get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
+ u32 offset)
{
- u32 next = RingInfo->RingBuffer->ReadIndex;
+ u32 next = ring_info->ring_buffer->read_index;
- /* ASSERT(next < RingInfo->RingDataSize); */
- next += Offset;
- next %= RingInfo->RingDataSize;
+ /* ASSERT(next < ring_info->RingDataSize); */
+ next += offset;
+ next %= ring_info->ring_datasize;
return next;
}
@@ -137,141 +139,145 @@ GetNextReadLocationWithOffset(struct hv_ring_buffer_info *RingInfo, u32 Offset)
/*++
Name:
- SetNextReadLocation()
+ set_next_read_location()
Description:
Set the next read location for the specified ring buffer
--*/
static inline void
-SetNextReadLocation(struct hv_ring_buffer_info *RingInfo, u32 NextReadLocation)
+set_next_read_location(struct hv_ring_buffer_info *ring_info,
+ u32 next_read_location)
{
- RingInfo->RingBuffer->ReadIndex = NextReadLocation;
+ ring_info->ring_buffer->read_index = next_read_location;
}
/*++
Name:
- GetRingBuffer()
+ get_ring_buffer()
Description:
Get the start of the ring buffer
--*/
static inline void *
-GetRingBuffer(struct hv_ring_buffer_info *RingInfo)
+get_ring_buffer(struct hv_ring_buffer_info *ring_info)
{
- return (void *)RingInfo->RingBuffer->Buffer;
+ return (void *)ring_info->ring_buffer->buffer;
}
/*++
Name:
- GetRingBufferSize()
+ get_ring_buffersize()
Description:
Get the size of the ring buffer
--*/
static inline u32
-GetRingBufferSize(struct hv_ring_buffer_info *RingInfo)
+get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
{
- return RingInfo->RingDataSize;
+ return ring_info->ring_datasize;
}
/*++
Name:
- GetRingBufferIndices()
+ get_ring_bufferindices()
Description:
Get the read and write indices as u64 of the specified ring buffer
--*/
static inline u64
-GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo)
+get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
{
- return (u64)RingInfo->RingBuffer->WriteIndex << 32;
+ return (u64)ring_info->ring_buffer->write_index << 32;
}
/*++
Name:
- DumpRingInfo()
+ dump_ring_info()
Description:
Dump out to console the ring buffer info
--*/
-void DumpRingInfo(struct hv_ring_buffer_info *RingInfo, char *Prefix)
+void dump_ring_info(struct hv_ring_buffer_info *ring_info, char *prefix)
{
- u32 bytesAvailToWrite;
- u32 bytesAvailToRead;
+ u32 bytes_avail_towrite;
+ u32 bytes_avail_toread;
- GetRingBufferAvailBytes(RingInfo,
- &bytesAvailToRead,
- &bytesAvailToWrite);
+ get_ringbuffer_availbytes(ring_info,
+ &bytes_avail_toread,
+ &bytes_avail_towrite);
DPRINT(VMBUS,
DEBUG_RING_LVL,
"%s <<ringinfo %p buffer %p avail write %u "
"avail read %u read idx %u write idx %u>>",
- Prefix,
- RingInfo,
- RingInfo->RingBuffer->Buffer,
- bytesAvailToWrite,
- bytesAvailToRead,
- RingInfo->RingBuffer->ReadIndex,
- RingInfo->RingBuffer->WriteIndex);
+ prefix,
+ ring_info,
+ ring_info->ring_buffer->buffer,
+ bytes_avail_towrite,
+ bytes_avail_toread,
+ ring_info->ring_buffer->read_index,
+ ring_info->ring_buffer->write_index);
}
/* Internal routines */
static u32
-CopyToRingBuffer(
- struct hv_ring_buffer_info *RingInfo,
- u32 StartWriteOffset,
- void *Src,
- u32 SrcLen);
+copyto_ringbuffer(
+ struct hv_ring_buffer_info *ring_info,
+ u32 start_write_offset,
+ void *src,
+ u32 srclen);
static u32
-CopyFromRingBuffer(
- struct hv_ring_buffer_info *RingInfo,
- void *Dest,
- u32 DestLen,
- u32 StartReadOffset);
+copyfrom_ringbuffer(
+ struct hv_ring_buffer_info *ring_info,
+ void *dest,
+ u32 destlen,
+ u32 start_read_offset);
/*++
Name:
- RingBufferGetDebugInfo()
+ ringbuffer_get_debuginfo()
Description:
Get various debug metrics for the specified ring buffer
--*/
-void RingBufferGetDebugInfo(struct hv_ring_buffer_info *RingInfo,
+void ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info)
{
- u32 bytesAvailToWrite;
- u32 bytesAvailToRead;
-
- if (RingInfo->RingBuffer) {
- GetRingBufferAvailBytes(RingInfo,
- &bytesAvailToRead,
- &bytesAvailToWrite);
-
- debug_info->BytesAvailToRead = bytesAvailToRead;
- debug_info->BytesAvailToWrite = bytesAvailToWrite;
- debug_info->CurrentReadIndex = RingInfo->RingBuffer->ReadIndex;
- debug_info->CurrentWriteIndex = RingInfo->RingBuffer->WriteIndex;
- debug_info->CurrentInterruptMask = RingInfo->RingBuffer->InterruptMask;
+ u32 bytes_avail_towrite;
+ u32 bytes_avail_toread;
+
+ if (ring_info->ring_buffer) {
+ get_ringbuffer_availbytes(ring_info,
+ &bytes_avail_toread,
+ &bytes_avail_towrite);
+
+ debug_info->bytes_avail_toread = bytes_avail_toread;
+ debug_info->bytes_avail_towrite = bytes_avail_towrite;
+ debug_info->current_read_index =
+ ring_info->ring_buffer->read_index;
+ debug_info->current_write_index =
+ ring_info->ring_buffer->write_index;
+ debug_info->current_interrupt_mask =
+ ring_info->ring_buffer->interrupt_mask;
}
}
@@ -279,40 +285,42 @@ void RingBufferGetDebugInfo(struct hv_ring_buffer_info *RingInfo,
/*++
Name:
- GetRingBufferInterruptMask()
+ get_ringbuffer_interrupt_mask()
Description:
Get the interrupt mask for the specified ring buffer
--*/
-u32 GetRingBufferInterruptMask(struct hv_ring_buffer_info *rbi)
+u32 get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
{
- return rbi->RingBuffer->InterruptMask;
+ return rbi->ring_buffer->interrupt_mask;
}
/*++
Name:
- RingBufferInit()
+ ringbuffer_init()
Description:
Initialize the ring buffer
--*/
-int RingBufferInit(struct hv_ring_buffer_info *RingInfo, void *Buffer, u32 BufferLen)
+int ringbuffer_init(struct hv_ring_buffer_info *ring_info,
+ void *buffer, u32 buflen)
{
if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
return -EINVAL;
- memset(RingInfo, 0, sizeof(struct hv_ring_buffer_info));
+ memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
- RingInfo->RingBuffer = (struct hv_ring_buffer *)Buffer;
- RingInfo->RingBuffer->ReadIndex = RingInfo->RingBuffer->WriteIndex = 0;
+ ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
+ ring_info->ring_buffer->read_index =
+ ring_info->ring_buffer->write_index = 0;
- RingInfo->RingSize = BufferLen;
- RingInfo->RingDataSize = BufferLen - sizeof(struct hv_ring_buffer);
+ ring_info->ring_size = buflen;
+ ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
- spin_lock_init(&RingInfo->ring_lock);
+ spin_lock_init(&ring_info->ring_lock);
return 0;
}
@@ -320,97 +328,97 @@ int RingBufferInit(struct hv_ring_buffer_info *RingInfo, void *Buffer, u32 Buffe
/*++
Name:
- RingBufferCleanup()
+ ringbuffer_cleanup()
Description:
Cleanup the ring buffer
--*/
-void RingBufferCleanup(struct hv_ring_buffer_info *RingInfo)
+void ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
{
}
/*++
Name:
- RingBufferWrite()
+ ringbuffer_write()
Description:
Write to the ring buffer
--*/
-int RingBufferWrite(struct hv_ring_buffer_info *OutRingInfo,
+int ringbuffer_write(struct hv_ring_buffer_info *outring_info,
struct scatterlist *sglist, u32 sgcount)
{
int i = 0;
- u32 byteAvailToWrite;
- u32 byteAvailToRead;
- u32 totalBytesToWrite = 0;
+ u32 bytes_avail_towrite;
+ u32 bytes_avail_toread;
+ u32 totalbytes_towrite = 0;
struct scatterlist *sg;
- volatile u32 nextWriteLocation;
- u64 prevIndices = 0;
+ volatile u32 next_write_location;
+ u64 prev_indices = 0;
unsigned long flags;
for_each_sg(sglist, sg, sgcount, i)
{
- totalBytesToWrite += sg->length;
+ totalbytes_towrite += sg->length;
}
- totalBytesToWrite += sizeof(u64);
+ totalbytes_towrite += sizeof(u64);
- spin_lock_irqsave(&OutRingInfo->ring_lock, flags);
+ spin_lock_irqsave(&outring_info->ring_lock, flags);
- GetRingBufferAvailBytes(OutRingInfo,
- &byteAvailToRead,
- &byteAvailToWrite);
+ get_ringbuffer_availbytes(outring_info,
+ &bytes_avail_toread,
+ &bytes_avail_towrite);
- DPRINT_DBG(VMBUS, "Writing %u bytes...", totalBytesToWrite);
+ DPRINT_DBG(VMBUS, "Writing %u bytes...", totalbytes_towrite);
- /* DumpRingInfo(OutRingInfo, "BEFORE "); */
+ /* Dumpring_info(Outring_info, "BEFORE "); */
/* If there is only room for the packet, assume it is full. */
/* Otherwise, the next time around, we think the ring buffer */
/* is empty since the read index == write index */
- if (byteAvailToWrite <= totalBytesToWrite) {
+ if (bytes_avail_towrite <= totalbytes_towrite) {
DPRINT_DBG(VMBUS,
"No more space left on outbound ring buffer "
"(needed %u, avail %u)",
- totalBytesToWrite,
- byteAvailToWrite);
+ totalbytes_towrite,
+ bytes_avail_towrite);
- spin_unlock_irqrestore(&OutRingInfo->ring_lock, flags);
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
return -1;
}
/* Write to the ring buffer */
- nextWriteLocation = GetNextWriteLocation(OutRingInfo);
+ next_write_location = get_next_write_location(outring_info);
for_each_sg(sglist, sg, sgcount, i)
{
- nextWriteLocation = CopyToRingBuffer(OutRingInfo,
- nextWriteLocation,
+ next_write_location = copyto_ringbuffer(outring_info,
+ next_write_location,
sg_virt(sg),
sg->length);
}
/* Set previous packet start */
- prevIndices = GetRingBufferIndices(OutRingInfo);
+ prev_indices = get_ring_bufferindices(outring_info);
- nextWriteLocation = CopyToRingBuffer(OutRingInfo,
- nextWriteLocation,
- &prevIndices,
+ next_write_location = copyto_ringbuffer(outring_info,
+ next_write_location,
+ &prev_indices,
sizeof(u64));
/* Make sure we flush all writes before updating the writeIndex */
mb();
/* Now, update the write location */
- SetNextWriteLocation(OutRingInfo, nextWriteLocation);
+ set_next_write_location(outring_info, next_write_location);
- /* DumpRingInfo(OutRingInfo, "AFTER "); */
+ /* Dumpring_info(Outring_info, "AFTER "); */
- spin_unlock_irqrestore(&OutRingInfo->ring_lock, flags);
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
return 0;
}
@@ -418,47 +426,48 @@ int RingBufferWrite(struct hv_ring_buffer_info *OutRingInfo,
/*++
Name:
- RingBufferPeek()
+ ringbuffer_peek()
Description:
Read without advancing the read index
--*/
-int RingBufferPeek(struct hv_ring_buffer_info *InRingInfo, void *Buffer, u32 BufferLen)
+int ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
+ void *Buffer, u32 buflen)
{
- u32 bytesAvailToWrite;
- u32 bytesAvailToRead;
- u32 nextReadLocation = 0;
+ u32 bytes_avail_towrite;
+ u32 bytes_avail_toread;
+ u32 next_read_location = 0;
unsigned long flags;
- spin_lock_irqsave(&InRingInfo->ring_lock, flags);
+ spin_lock_irqsave(&Inring_info->ring_lock, flags);
- GetRingBufferAvailBytes(InRingInfo,
- &bytesAvailToRead,
- &bytesAvailToWrite);
+ get_ringbuffer_availbytes(Inring_info,
+ &bytes_avail_toread,
+ &bytes_avail_towrite);
/* Make sure there is something to read */
- if (bytesAvailToRead < BufferLen) {
+ if (bytes_avail_toread < buflen) {
/* DPRINT_DBG(VMBUS,
"got callback but not enough to read "
"<avail to read %d read size %d>!!",
- bytesAvailToRead,
+ bytes_avail_toread,
BufferLen); */
- spin_unlock_irqrestore(&InRingInfo->ring_lock, flags);
+ spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
return -1;
}
/* Convert to byte offset */
- nextReadLocation = GetNextReadLocation(InRingInfo);
+ next_read_location = get_next_read_location(Inring_info);
- nextReadLocation = CopyFromRingBuffer(InRingInfo,
+ next_read_location = copyfrom_ringbuffer(Inring_info,
Buffer,
- BufferLen,
- nextReadLocation);
+ buflen,
+ next_read_location);
- spin_unlock_irqrestore(&InRingInfo->ring_lock, flags);
+ spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
return 0;
}
@@ -467,58 +476,59 @@ int RingBufferPeek(struct hv_ring_buffer_info *InRingInfo, void *Buffer, u32 Buf
/*++
Name:
- RingBufferRead()
+ ringbuffer_read()
Description:
Read and advance the read index
--*/
-int RingBufferRead(struct hv_ring_buffer_info *InRingInfo, void *Buffer,
- u32 BufferLen, u32 Offset)
+int ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
+ u32 buflen, u32 offset)
{
- u32 bytesAvailToWrite;
- u32 bytesAvailToRead;
- u32 nextReadLocation = 0;
- u64 prevIndices = 0;
+ u32 bytes_avail_towrite;
+ u32 bytes_avail_toread;
+ u32 next_read_location = 0;
+ u64 prev_indices = 0;
unsigned long flags;
- if (BufferLen <= 0)
+ if (buflen <= 0)
return -EINVAL;
- spin_lock_irqsave(&InRingInfo->ring_lock, flags);
+ spin_lock_irqsave(&inring_info->ring_lock, flags);
- GetRingBufferAvailBytes(InRingInfo,
- &bytesAvailToRead,
- &bytesAvailToWrite);
+ get_ringbuffer_availbytes(inring_info,
+ &bytes_avail_toread,
+ &bytes_avail_towrite);
- DPRINT_DBG(VMBUS, "Reading %u bytes...", BufferLen);
+ DPRINT_DBG(VMBUS, "Reading %u bytes...", buflen);
- /* DumpRingInfo(InRingInfo, "BEFORE "); */
+ /* Dumpring_info(Inring_info, "BEFORE "); */
/* Make sure there is something to read */
- if (bytesAvailToRead < BufferLen) {
+ if (bytes_avail_toread < buflen) {
DPRINT_DBG(VMBUS,
"got callback but not enough to read "
"<avail to read %d read size %d>!!",
- bytesAvailToRead,
- BufferLen);
+ bytes_avail_toread,
+ buflen);
- spin_unlock_irqrestore(&InRingInfo->ring_lock, flags);
+ spin_unlock_irqrestore(&inring_info->ring_lock, flags);
return -1;
}
- nextReadLocation = GetNextReadLocationWithOffset(InRingInfo, Offset);
+ next_read_location =
+ get_next_readlocation_withoffset(inring_info, offset);
- nextReadLocation = CopyFromRingBuffer(InRingInfo,
- Buffer,
- BufferLen,
- nextReadLocation);
+ next_read_location = copyfrom_ringbuffer(inring_info,
+ buffer,
+ buflen,
+ next_read_location);
- nextReadLocation = CopyFromRingBuffer(InRingInfo,
- &prevIndices,
+ next_read_location = copyfrom_ringbuffer(inring_info,
+ &prev_indices,
sizeof(u64),
- nextReadLocation);
+ next_read_location);
/* Make sure all reads are done before we update the read index since */
/* the writer may start writing to the read area once the read index */
@@ -526,11 +536,11 @@ int RingBufferRead(struct hv_ring_buffer_info *InRingInfo, void *Buffer,
mb();
/* Update the read index */
- SetNextReadLocation(InRingInfo, nextReadLocation);
+ set_next_read_location(inring_info, next_read_location);
- /* DumpRingInfo(InRingInfo, "AFTER "); */
+ /* Dumpring_info(Inring_info, "AFTER "); */
- spin_unlock_irqrestore(&InRingInfo->ring_lock, flags);
+ spin_unlock_irqrestore(&inring_info->ring_lock, flags);
return 0;
}
@@ -539,7 +549,7 @@ int RingBufferRead(struct hv_ring_buffer_info *InRingInfo, void *Buffer,
/*++
Name:
- CopyToRingBuffer()
+ copyto_ringbuffer()
Description:
Helper routine to copy from source to ring buffer.
@@ -547,37 +557,37 @@ Description:
--*/
static u32
-CopyToRingBuffer(
- struct hv_ring_buffer_info *RingInfo,
- u32 StartWriteOffset,
- void *Src,
- u32 SrcLen)
+copyto_ringbuffer(
+ struct hv_ring_buffer_info *ring_info,
+ u32 start_write_offset,
+ void *src,
+ u32 srclen)
{
- void *ringBuffer = GetRingBuffer(RingInfo);
- u32 ringBufferSize = GetRingBufferSize(RingInfo);
- u32 fragLen;
+ void *ring_buffer = get_ring_buffer(ring_info);
+ u32 ring_buffer_size = get_ring_buffersize(ring_info);
+ u32 frag_len;
/* wrap-around detected! */
- if (SrcLen > ringBufferSize - StartWriteOffset) {
+ if (srclen > ring_buffer_size - start_write_offset) {
DPRINT_DBG(VMBUS, "wrap-around detected!");
- fragLen = ringBufferSize - StartWriteOffset;
- memcpy(ringBuffer + StartWriteOffset, Src, fragLen);
- memcpy(ringBuffer, Src + fragLen, SrcLen - fragLen);
+ frag_len = ring_buffer_size - start_write_offset;
+ memcpy(ring_buffer + start_write_offset, src, frag_len);
+ memcpy(ring_buffer, src + frag_len, srclen - frag_len);
} else
- memcpy(ringBuffer + StartWriteOffset, Src, SrcLen);
+ memcpy(ring_buffer + start_write_offset, src, srclen);
- StartWriteOffset += SrcLen;
- StartWriteOffset %= ringBufferSize;
+ start_write_offset += srclen;
+ start_write_offset %= ring_buffer_size;
- return StartWriteOffset;
+ return start_write_offset;
}
/*++
Name:
- CopyFromRingBuffer()
+ copyfrom_ringbuffer()
Description:
Helper routine to copy to source from ring buffer.
@@ -585,34 +595,34 @@ Description:
--*/
static u32
-CopyFromRingBuffer(
- struct hv_ring_buffer_info *RingInfo,
- void *Dest,
- u32 DestLen,
- u32 StartReadOffset)
+copyfrom_ringbuffer(
+ struct hv_ring_buffer_info *ring_info,
+ void *dest,
+ u32 destlen,
+ u32 start_read_offset)
{
- void *ringBuffer = GetRingBuffer(RingInfo);
- u32 ringBufferSize = GetRingBufferSize(RingInfo);
+ void *ring_buffer = get_ring_buffer(ring_info);
+ u32 ring_buffer_size = get_ring_buffersize(ring_info);
- u32 fragLen;
+ u32 frag_len;
/* wrap-around detected at the src */
- if (DestLen > ringBufferSize - StartReadOffset) {
+ if (destlen > ring_buffer_size - start_read_offset) {
DPRINT_DBG(VMBUS, "src wrap-around detected!");
- fragLen = ringBufferSize - StartReadOffset;
+ frag_len = ring_buffer_size - start_read_offset;
- memcpy(Dest, ringBuffer + StartReadOffset, fragLen);
- memcpy(Dest + fragLen, ringBuffer, DestLen - fragLen);
+ memcpy(dest, ring_buffer + start_read_offset, frag_len);
+ memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
} else
- memcpy(Dest, ringBuffer + StartReadOffset, DestLen);
+ memcpy(dest, ring_buffer + start_read_offset, destlen);
- StartReadOffset += DestLen;
- StartReadOffset %= ringBufferSize;
+ start_read_offset += destlen;
+ start_read_offset %= ring_buffer_size;
- return StartReadOffset;
+ return start_read_offset;
}
diff --git a/drivers/staging/hv/ring_buffer.h b/drivers/staging/hv/ring_buffer.h
index a7f1717c6a56..7bd6ecf2f015 100644
--- a/drivers/staging/hv/ring_buffer.h
+++ b/drivers/staging/hv/ring_buffer.h
@@ -29,18 +29,18 @@
struct hv_ring_buffer {
/* Offset in bytes from the start of ring data below */
- volatile u32 WriteIndex;
+ volatile u32 write_index;
/* Offset in bytes from the start of ring data below */
- volatile u32 ReadIndex;
+ volatile u32 read_index;
- volatile u32 InterruptMask;
+ volatile u32 interrupt_mask;
/* Pad it to PAGE_SIZE so that data starts on page boundary */
- u8 Reserved[4084];
+ u8 reserved[4084];
/* NOTE:
- * The InterruptMask field is used only for channels but since our
+ * The interrupt_mask field is used only for channels but since our
* vmbus connection also uses this data structure and its data starts
* here, we commented out this field.
*/
@@ -50,24 +50,24 @@ struct hv_ring_buffer {
* Ring data starts here + RingDataStartOffset
* !!! DO NOT place any fields below this !!!
*/
- u8 Buffer[0];
+ u8 buffer[0];
} __attribute__((packed));
struct hv_ring_buffer_info {
- struct hv_ring_buffer *RingBuffer;
- u32 RingSize; /* Include the shared header */
+ struct hv_ring_buffer *ring_buffer;
+ u32 ring_size; /* Include the shared header */
spinlock_t ring_lock;
- u32 RingDataSize; /* < ringSize */
- u32 RingDataStartOffset;
+ u32 ring_datasize; /* < ring_size */
+ u32 ring_data_startoffset;
};
struct hv_ring_buffer_debug_info {
- u32 CurrentInterruptMask;
- u32 CurrentReadIndex;
- u32 CurrentWriteIndex;
- u32 BytesAvailToRead;
- u32 BytesAvailToWrite;
+ u32 current_interrupt_mask;
+ u32 current_read_index;
+ u32 current_write_index;
+ u32 bytes_avail_toread;
+ u32 bytes_avail_towrite;
};
@@ -75,28 +75,28 @@ struct hv_ring_buffer_debug_info {
/* Interface */
-int RingBufferInit(struct hv_ring_buffer_info *RingInfo, void *Buffer,
- u32 BufferLen);
+int ringbuffer_init(struct hv_ring_buffer_info *ring_info, void *buffer,
+ u32 buflen);
-void RingBufferCleanup(struct hv_ring_buffer_info *RingInfo);
+void ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
-int RingBufferWrite(struct hv_ring_buffer_info *RingInfo,
+int ringbuffer_write(struct hv_ring_buffer_info *ring_info,
struct scatterlist *sglist,
u32 sgcount);
-int RingBufferPeek(struct hv_ring_buffer_info *RingInfo, void *Buffer,
- u32 BufferLen);
+int ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
+ u32 buflen);
-int RingBufferRead(struct hv_ring_buffer_info *RingInfo,
- void *Buffer,
- u32 BufferLen,
- u32 Offset);
+int ringbuffer_read(struct hv_ring_buffer_info *ring_info,
+ void *buffer,
+ u32 buflen,
+ u32 offset);
-u32 GetRingBufferInterruptMask(struct hv_ring_buffer_info *RingInfo);
+u32 get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *ring_info);
-void DumpRingInfo(struct hv_ring_buffer_info *RingInfo, char *Prefix);
+void dump_ring_info(struct hv_ring_buffer_info *ring_info, char *prefix);
-void RingBufferGetDebugInfo(struct hv_ring_buffer_info *RingInfo,
+void ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
#endif /* _RING_BUFFER_H_ */
diff --git a/drivers/staging/hv/rndis.h b/drivers/staging/hv/rndis.h
index 723e1f15b90d..014de047b86d 100644
--- a/drivers/staging/hv/rndis.h
+++ b/drivers/staging/hv/rndis.h
@@ -288,24 +288,24 @@
#define RNDIS_DF_RAW_DATA 0x00000004
/* Remote NDIS medium types. */
-#define RNdisMedium802_3 0x00000000
-#define RNdisMedium802_5 0x00000001
-#define RNdisMediumFddi 0x00000002
-#define RNdisMediumWan 0x00000003
-#define RNdisMediumLocalTalk 0x00000004
-#define RNdisMediumArcnetRaw 0x00000006
-#define RNdisMediumArcnet878_2 0x00000007
-#define RNdisMediumAtm 0x00000008
-#define RNdisMediumWirelessWan 0x00000009
-#define RNdisMediumIrda 0x0000000a
-#define RNdisMediumCoWan 0x0000000b
+#define RNDIS_MEDIUM_802_3 0x00000000
+#define RNDIS_MEDIUM_802_5 0x00000001
+#define RNDIS_MEDIUM_FDDI 0x00000002
+#define RNDIS_MEDIUM_WAN 0x00000003
+#define RNDIS_MEDIUM_LOCAL_TALK 0x00000004
+#define RNDIS_MEDIUM_ARCNET_RAW 0x00000006
+#define RNDIS_MEDIUM_ARCNET_878_2 0x00000007
+#define RNDIS_MEDIUM_ATM 0x00000008
+#define RNDIS_MEDIUM_WIRELESS_WAN 0x00000009
+#define RNDIS_MEDIUM_IRDA 0x0000000a
+#define RNDIS_MEDIUM_CO_WAN 0x0000000b
/* Not a real medium, defined as an upper-bound */
-#define RNdisMediumMax 0x0000000d
+#define RNDIS_MEDIUM_MAX 0x0000000d
/* Remote NDIS medium connection states. */
-#define RNdisMediaStateConnected 0x00000000
-#define RNdisMediaStateDisconnected 0x00000001
+#define RNDIS_MEDIA_STATE_CONNECTED 0x00000000
+#define RNDIS_MEDIA_STATE_DISCONNECTED 0x00000001
/* Remote NDIS version numbers */
#define RNDIS_MAJOR_VERSION 0x00000001
@@ -314,106 +314,106 @@
/* NdisInitialize message */
struct rndis_initialize_request {
- u32 RequestId;
- u32 MajorVersion;
- u32 MinorVersion;
- u32 MaxTransferSize;
+ u32 req_id;
+ u32 major_ver;
+ u32 minor_ver;
+ u32 max_xfer_size;
};
/* Response to NdisInitialize */
struct rndis_initialize_complete {
- u32 RequestId;
- u32 Status;
- u32 MajorVersion;
- u32 MinorVersion;
- u32 DeviceFlags;
- u32 Medium;
- u32 MaxPacketsPerMessage;
- u32 MaxTransferSize;
- u32 PacketAlignmentFactor;
- u32 AFListOffset;
- u32 AFListSize;
+ u32 req_id;
+ u32 status;
+ u32 major_ver;
+ u32 minor_ver;
+ u32 dev_flags;
+ u32 medium;
+ u32 max_pkt_per_msg;
+ u32 max_xfer_size;
+ u32 pkt_alignment_factor;
+ u32 af_list_offset;
+ u32 af_list_size;
};
/* Call manager devices only: Information about an address family */
/* supported by the device is appended to the response to NdisInitialize. */
struct rndis_co_address_family {
- u32 AddressFamily;
- u32 MajorVersion;
- u32 MinorVersion;
+ u32 address_family;
+ u32 major_ver;
+ u32 minor_ver;
};
/* NdisHalt message */
struct rndis_halt_request {
- u32 RequestId;
+ u32 req_id;
};
/* NdisQueryRequest message */
struct rndis_query_request {
- u32 RequestId;
- u32 Oid;
- u32 InformationBufferLength;
- u32 InformationBufferOffset;
- u32 DeviceVcHandle;
+ u32 req_id;
+ u32 oid;
+ u32 info_buflen;
+ u32 info_buf_offset;
+ u32 dev_vc_handle;
};
/* Response to NdisQueryRequest */
struct rndis_query_complete {
- u32 RequestId;
- u32 Status;
- u32 InformationBufferLength;
- u32 InformationBufferOffset;
+ u32 req_id;
+ u32 status;
+ u32 info_buflen;
+ u32 info_buf_offset;
};
/* NdisSetRequest message */
struct rndis_set_request {
- u32 RequestId;
- u32 Oid;
- u32 InformationBufferLength;
- u32 InformationBufferOffset;
- u32 DeviceVcHandle;
+ u32 req_id;
+ u32 oid;
+ u32 info_buflen;
+ u32 info_buf_offset;
+ u32 dev_vc_handle;
};
/* Response to NdisSetRequest */
struct rndis_set_complete {
- u32 RequestId;
- u32 Status;
+ u32 req_id;
+ u32 status;
};
/* NdisReset message */
struct rndis_reset_request {
- u32 Reserved;
+ u32 reserved;
};
/* Response to NdisReset */
struct rndis_reset_complete {
- u32 Status;
- u32 AddressingReset;
+ u32 status;
+ u32 addressing_reset;
};
/* NdisMIndicateStatus message */
struct rndis_indicate_status {
- u32 Status;
- u32 StatusBufferLength;
- u32 StatusBufferOffset;
+ u32 status;
+ u32 status_buflen;
+ u32 status_buf_offset;
};
/* Diagnostic information passed as the status buffer in */
/* struct rndis_indicate_status messages signifying error conditions. */
struct rndis_diagnostic_info {
- u32 DiagStatus;
- u32 ErrorOffset;
+ u32 diag_status;
+ u32 error_offset;
};
/* NdisKeepAlive message */
struct rndis_keepalive_request {
- u32 RequestId;
+ u32 req_id;
};
/* Response to NdisKeepAlive */
struct rndis_keepalive_complete {
- u32 RequestId;
- u32 Status;
+ u32 req_id;
+ u32 status;
};
/*
@@ -422,39 +422,39 @@ struct rndis_keepalive_complete {
* to 0 for connectionless data, otherwise it contains the VC handle.
*/
struct rndis_packet {
- u32 DataOffset;
- u32 DataLength;
- u32 OOBDataOffset;
- u32 OOBDataLength;
- u32 NumOOBDataElements;
- u32 PerPacketInfoOffset;
- u32 PerPacketInfoLength;
- u32 VcHandle;
- u32 Reserved;
+ u32 data_offset;
+ u32 data_len;
+ u32 oob_data_offset;
+ u32 oob_data_len;
+ u32 num_oob_data_elements;
+ u32 per_pkt_info_offset;
+ u32 per_pkt_info_len;
+ u32 vc_handle;
+ u32 reserved;
};
/* Optional Out of Band data associated with a Data message. */
struct rndis_oobd {
- u32 Size;
- u32 Type;
- u32 ClassInformationOffset;
+ u32 size;
+ u32 type;
+ u32 class_info_offset;
};
/* Packet extension field contents associated with a Data message. */
struct rndis_per_packet_info {
- u32 Size;
- u32 Type;
- u32 PerPacketInformationOffset;
+ u32 size;
+ u32 type;
+ u32 per_pkt_info_offset;
};
/* Format of Information buffer passed in a SetRequest for the OID */
/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
struct rndis_config_parameter_info {
- u32 ParameterNameOffset;
- u32 ParameterNameLength;
- u32 ParameterType;
- u32 ParameterValueOffset;
- u32 ParameterValueLength;
+ u32 parameter_name_offset;
+ u32 parameter_name_length;
+ u32 parameter_type;
+ u32 parameter_value_offset;
+ u32 parameter_value_length;
};
/* Values for ParameterType in struct rndis_config_parameter_info */
@@ -466,187 +466,188 @@ struct rndis_config_parameter_info {
/* CoNdisMiniportCreateVc message */
struct rcondis_mp_create_vc {
- u32 RequestId;
- u32 NdisVcHandle;
+ u32 req_id;
+ u32 ndis_vc_handle;
};
/* Response to CoNdisMiniportCreateVc */
struct rcondis_mp_create_vc_complete {
- u32 RequestId;
- u32 DeviceVcHandle;
- u32 Status;
+ u32 req_id;
+ u32 dev_vc_handle;
+ u32 status;
};
/* CoNdisMiniportDeleteVc message */
struct rcondis_mp_delete_vc {
- u32 RequestId;
- u32 DeviceVcHandle;
+ u32 req_id;
+ u32 dev_vc_handle;
};
/* Response to CoNdisMiniportDeleteVc */
struct rcondis_mp_delete_vc_complete {
- u32 RequestId;
- u32 Status;
+ u32 req_id;
+ u32 status;
};
/* CoNdisMiniportQueryRequest message */
struct rcondis_mp_query_request {
- u32 RequestId;
- u32 RequestType;
- u32 Oid;
- u32 DeviceVcHandle;
- u32 InformationBufferLength;
- u32 InformationBufferOffset;
+ u32 req_id;
+ u32 request_type;
+ u32 oid;
+ u32 dev_vc_handle;
+ u32 info_buflen;
+ u32 info_buf_offset;
};
/* CoNdisMiniportSetRequest message */
struct rcondis_mp_set_request {
- u32 RequestId;
- u32 RequestType;
- u32 Oid;
- u32 DeviceVcHandle;
- u32 InformationBufferLength;
- u32 InformationBufferOffset;
+ u32 req_id;
+ u32 request_type;
+ u32 oid;
+ u32 dev_vc_handle;
+ u32 info_buflen;
+ u32 info_buf_offset;
};
/* CoNdisIndicateStatus message */
struct rcondis_indicate_status {
- u32 NdisVcHandle;
- u32 Status;
- u32 StatusBufferLength;
- u32 StatusBufferOffset;
+ u32 ndis_vc_handle;
+ u32 status;
+ u32 status_buflen;
+ u32 status_buf_offset;
};
/* CONDIS Call/VC parameters */
struct rcondis_specific_parameters {
- u32 ParameterType;
- u32 ParameterLength;
- u32 ParameterOffset;
+ u32 parameter_type;
+ u32 parameter_length;
+ u32 parameter_lffset;
};
struct rcondis_media_parameters {
- u32 Flags;
- u32 Reserved1;
- u32 Reserved2;
- struct rcondis_specific_parameters MediaSpecific;
+ u32 flags;
+ u32 reserved1;
+ u32 reserved2;
+ struct rcondis_specific_parameters media_specific;
};
struct rndis_flowspec {
- u32 TokenRate;
- u32 TokenBucketSize;
- u32 PeakBandwidth;
- u32 Latency;
- u32 DelayVariation;
- u32 ServiceType;
- u32 MaxSduSize;
- u32 MinimumPolicedSize;
+ u32 token_rate;
+ u32 token_bucket_size;
+ u32 peak_bandwidth;
+ u32 latency;
+ u32 delay_variation;
+ u32 service_type;
+ u32 max_sdu_size;
+ u32 minimum_policed_size;
};
struct rcondis_call_manager_parameters {
- struct rndis_flowspec Transmit;
- struct rndis_flowspec Receive;
- struct rcondis_specific_parameters CallMgrSpecific;
+ struct rndis_flowspec transmit;
+ struct rndis_flowspec receive;
+ struct rcondis_specific_parameters call_mgr_specific;
};
/* CoNdisMiniportActivateVc message */
struct rcondis_mp_activate_vc_request {
- u32 RequestId;
- u32 Flags;
- u32 DeviceVcHandle;
- u32 MediaParamsOffset;
- u32 MediaParamsLength;
- u32 CallMgrParamsOffset;
- u32 CallMgrParamsLength;
+ u32 req_id;
+ u32 flags;
+ u32 dev_vc_handle;
+ u32 media_params_offset;
+ u32 media_params_length;
+ u32 call_mgr_params_offset;
+ u32 call_mgr_params_length;
};
/* Response to CoNdisMiniportActivateVc */
struct rcondis_mp_activate_vc_complete {
- u32 RequestId;
- u32 Status;
+ u32 req_id;
+ u32 status;
};
/* CoNdisMiniportDeactivateVc message */
struct rcondis_mp_deactivate_vc_request {
- u32 RequestId;
- u32 Flags;
- u32 DeviceVcHandle;
+ u32 req_id;
+ u32 flags;
+ u32 dev_vc_handle;
};
/* Response to CoNdisMiniportDeactivateVc */
struct rcondis_mp_deactivate_vc_complete {
- u32 RequestId;
- u32 Status;
+ u32 req_id;
+ u32 status;
};
/* union with all of the RNDIS messages */
union rndis_message_container {
- struct rndis_packet Packet;
- struct rndis_initialize_request InitializeRequest;
- struct rndis_halt_request HaltRequest;
- struct rndis_query_request QueryRequest;
- struct rndis_set_request SetRequest;
- struct rndis_reset_request ResetRequest;
- struct rndis_keepalive_request KeepaliveRequest;
- struct rndis_indicate_status IndicateStatus;
- struct rndis_initialize_complete InitializeComplete;
- struct rndis_query_complete QueryComplete;
- struct rndis_set_complete SetComplete;
- struct rndis_reset_complete ResetComplete;
- struct rndis_keepalive_complete KeepaliveComplete;
- struct rcondis_mp_create_vc CoMiniportCreateVc;
- struct rcondis_mp_delete_vc CoMiniportDeleteVc;
- struct rcondis_indicate_status CoIndicateStatus;
- struct rcondis_mp_activate_vc_request CoMiniportActivateVc;
- struct rcondis_mp_deactivate_vc_request CoMiniportDeactivateVc;
- struct rcondis_mp_create_vc_complete CoMiniportCreateVcComplete;
- struct rcondis_mp_delete_vc_complete CoMiniportDeleteVcComplete;
- struct rcondis_mp_activate_vc_complete CoMiniportActivateVcComplete;
- struct rcondis_mp_deactivate_vc_complete CoMiniportDeactivateVcComplete;
+ struct rndis_packet pkt;
+ struct rndis_initialize_request init_req;
+ struct rndis_halt_request halt_req;
+ struct rndis_query_request query_req;
+ struct rndis_set_request set_req;
+ struct rndis_reset_request reset_req;
+ struct rndis_keepalive_request keep_alive_req;
+ struct rndis_indicate_status indicate_status;
+ struct rndis_initialize_complete init_complete;
+ struct rndis_query_complete query_complete;
+ struct rndis_set_complete set_complete;
+ struct rndis_reset_complete reset_complete;
+ struct rndis_keepalive_complete keep_alive_complete;
+ struct rcondis_mp_create_vc co_miniport_create_vc;
+ struct rcondis_mp_delete_vc co_miniport_delete_vc;
+ struct rcondis_indicate_status co_indicate_status;
+ struct rcondis_mp_activate_vc_request co_miniport_activate_vc;
+ struct rcondis_mp_deactivate_vc_request co_miniport_deactivate_vc;
+ struct rcondis_mp_create_vc_complete co_miniport_create_vc_complete;
+ struct rcondis_mp_delete_vc_complete co_miniport_delete_vc_complete;
+ struct rcondis_mp_activate_vc_complete co_miniport_activate_vc_complete;
+ struct rcondis_mp_deactivate_vc_complete
+ co_miniport_deactivate_vc_complete;
};
/* Remote NDIS message format */
struct rndis_message {
- u32 NdisMessageType;
+ u32 ndis_msg_type;
/* Total length of this message, from the beginning */
/* of the sruct rndis_message, in bytes. */
- u32 MessageLength;
+ u32 msg_len;
/* Actual message */
- union rndis_message_container Message;
+ union rndis_message_container msg;
};
/* Handy macros */
/* get the size of an RNDIS message. Pass in the message type, */
/* struct rndis_set_request, struct rndis_packet for example */
-#define RNDIS_MESSAGE_SIZE(Message) \
- (sizeof(Message) + (sizeof(struct rndis_message) - \
+#define RNDIS_MESSAGE_SIZE(msg) \
+ (sizeof(msg) + (sizeof(struct rndis_message) - \
sizeof(union rndis_message_container)))
/* get pointer to info buffer with message pointer */
-#define MESSAGE_TO_INFO_BUFFER(Message) \
- (((unsigned char *)(Message)) + Message->InformationBufferOffset)
+#define MESSAGE_TO_INFO_BUFFER(msg) \
+ (((unsigned char *)(msg)) + msg->info_buf_offset)
/* get pointer to status buffer with message pointer */
-#define MESSAGE_TO_STATUS_BUFFER(Message) \
- (((unsigned char *)(Message)) + Message->StatusBufferOffset)
+#define MESSAGE_TO_STATUS_BUFFER(msg) \
+ (((unsigned char *)(msg)) + msg->status_buf_offset)
/* get pointer to OOBD buffer with message pointer */
-#define MESSAGE_TO_OOBD_BUFFER(Message) \
- (((unsigned char *)(Message)) + Message->OOBDataOffset)
+#define MESSAGE_TO_OOBD_BUFFER(msg) \
+ (((unsigned char *)(msg)) + msg->oob_data_offset)
/* get pointer to data buffer with message pointer */
-#define MESSAGE_TO_DATA_BUFFER(Message) \
- (((unsigned char *)(Message)) + Message->PerPacketInfoOffset)
+#define MESSAGE_TO_DATA_BUFFER(msg) \
+ (((unsigned char *)(msg)) + msg->per_pkt_info_offset)
/* get pointer to contained message from NDIS_MESSAGE pointer */
-#define RNDIS_MESSAGE_PTR_TO_MESSAGE_PTR(RndisMessage) \
- ((void *) &RndisMessage->Message)
+#define RNDIS_MESSAGE_PTR_TO_MESSAGE_PTR(rndis_msg) \
+ ((void *) &rndis_msg->msg)
/* get pointer to contained message from NDIS_MESSAGE pointer */
-#define RNDIS_MESSAGE_RAW_PTR_TO_MESSAGE_PTR(RndisMessage) \
- ((void *) RndisMessage)
+#define RNDIS_MESSAGE_RAW_PTR_TO_MESSAGE_PTR(rndis_msg) \
+ ((void *) rndis_msg)
#endif /* _RNDIS_H_ */
diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
index fa2141f454f0..53676dcbf381 100644
--- a/drivers/staging/hv/rndis_filter.c
+++ b/drivers/staging/hv/rndis_filter.c
@@ -32,7 +32,7 @@
/* Data types */
struct rndis_filter_driver_object {
/* The original driver */
- struct netvsc_driver InnerDriver;
+ struct netvsc_driver inner_drv;
};
enum rndis_device_state {
@@ -43,63 +43,63 @@ enum rndis_device_state {
};
struct rndis_device {
- struct netvsc_device *NetDevice;
+ struct netvsc_device *net_dev;
- enum rndis_device_state State;
- u32 LinkStatus;
- atomic_t NewRequestId;
+ enum rndis_device_state state;
+ u32 link_stat;
+ atomic_t new_req_id;
spinlock_t request_lock;
- struct list_head RequestList;
+ struct list_head req_list;
- unsigned char HwMacAddr[ETH_ALEN];
+ unsigned char hw_mac_adr[ETH_ALEN];
};
struct rndis_request {
- struct list_head ListEntry;
- struct osd_waitevent *WaitEvent;
+ struct list_head list_ent;
+ struct osd_waitevent *waitevent;
/*
* FIXME: We assumed a fixed size response here. If we do ever need to
* handle a bigger response, we can either define a max response
* message or add a response buffer variable above this field
*/
- struct rndis_message ResponseMessage;
+ struct rndis_message response_msg;
/* Simplify allocation by having a netvsc packet inline */
- struct hv_netvsc_packet Packet;
- struct hv_page_buffer Buffer;
+ struct hv_netvsc_packet pkt;
+ struct hv_page_buffer buf;
/* FIXME: We assumed a fixed size request here. */
- struct rndis_message RequestMessage;
+ struct rndis_message request_msg;
};
struct rndis_filter_packet {
- void *CompletionContext;
- void (*OnCompletion)(void *context);
- struct rndis_message Message;
+ void *completion_ctx;
+ void (*completion)(void *context);
+ struct rndis_message msg;
};
-static int RndisFilterOnDeviceAdd(struct hv_device *Device,
- void *AdditionalInfo);
+static int rndis_filte_device_add(struct hv_device *dev,
+ void *additional_info);
-static int RndisFilterOnDeviceRemove(struct hv_device *Device);
+static int rndis_filter_device_remove(struct hv_device *dev);
-static void RndisFilterOnCleanup(struct hv_driver *Driver);
+static void rndis_filter_cleanup(struct hv_driver *drv);
-static int RndisFilterOnSend(struct hv_device *Device,
- struct hv_netvsc_packet *Packet);
+static int rndis_filter_send(struct hv_device *dev,
+ struct hv_netvsc_packet *pkt);
-static void RndisFilterOnSendCompletion(void *Context);
+static void rndis_filter_send_completion(void *ctx);
-static void RndisFilterOnSendRequestCompletion(void *Context);
+static void rndis_filter_send_request_completion(void *ctx);
/* The one and only */
-static struct rndis_filter_driver_object gRndisFilter;
+static struct rndis_filter_driver_object rndis_filter;
-static struct rndis_device *GetRndisDevice(void)
+static struct rndis_device *get_rndis_device(void)
{
struct rndis_device *device;
@@ -109,19 +109,19 @@ static struct rndis_device *GetRndisDevice(void)
spin_lock_init(&device->request_lock);
- INIT_LIST_HEAD(&device->RequestList);
+ INIT_LIST_HEAD(&device->req_list);
- device->State = RNDIS_DEV_UNINITIALIZED;
+ device->state = RNDIS_DEV_UNINITIALIZED;
return device;
}
-static struct rndis_request *GetRndisRequest(struct rndis_device *Device,
- u32 MessageType,
- u32 MessageLength)
+static struct rndis_request *get_rndis_request(struct rndis_device *dev,
+ u32 msg_type,
+ u32 msg_len)
{
struct rndis_request *request;
- struct rndis_message *rndisMessage;
+ struct rndis_message *rndis_msg;
struct rndis_set_request *set;
unsigned long flags;
@@ -129,61 +129,61 @@ static struct rndis_request *GetRndisRequest(struct rndis_device *Device,
if (!request)
return NULL;
- request->WaitEvent = osd_WaitEventCreate();
- if (!request->WaitEvent) {
+ request->waitevent = osd_waitevent_create();
+ if (!request->waitevent) {
kfree(request);
return NULL;
}
- rndisMessage = &request->RequestMessage;
- rndisMessage->NdisMessageType = MessageType;
- rndisMessage->MessageLength = MessageLength;
+ rndis_msg = &request->request_msg;
+ rndis_msg->ndis_msg_type = msg_type;
+ rndis_msg->msg_len = msg_len;
/*
* Set the request id. This field is always after the rndis header for
* request/response packet types so we just used the SetRequest as a
* template
*/
- set = &rndisMessage->Message.SetRequest;
- set->RequestId = atomic_inc_return(&Device->NewRequestId);
+ set = &rndis_msg->msg.set_req;
+ set->req_id = atomic_inc_return(&dev->new_req_id);
/* Add to the request list */
- spin_lock_irqsave(&Device->request_lock, flags);
- list_add_tail(&request->ListEntry, &Device->RequestList);
- spin_unlock_irqrestore(&Device->request_lock, flags);
+ spin_lock_irqsave(&dev->request_lock, flags);
+ list_add_tail(&request->list_ent, &dev->req_list);
+ spin_unlock_irqrestore(&dev->request_lock, flags);
return request;
}
-static void PutRndisRequest(struct rndis_device *Device,
- struct rndis_request *Request)
+static void put_rndis_request(struct rndis_device *dev,
+ struct rndis_request *req)
{
unsigned long flags;
- spin_lock_irqsave(&Device->request_lock, flags);
- list_del(&Request->ListEntry);
- spin_unlock_irqrestore(&Device->request_lock, flags);
+ spin_lock_irqsave(&dev->request_lock, flags);
+ list_del(&req->list_ent);
+ spin_unlock_irqrestore(&dev->request_lock, flags);
- kfree(Request->WaitEvent);
- kfree(Request);
+ kfree(req->waitevent);
+ kfree(req);
}
-static void DumpRndisMessage(struct rndis_message *RndisMessage)
+static void dump_rndis_message(struct rndis_message *rndis_msg)
{
- switch (RndisMessage->NdisMessageType) {
+ switch (rndis_msg->ndis_msg_type) {
case REMOTE_NDIS_PACKET_MSG:
DPRINT_DBG(NETVSC, "REMOTE_NDIS_PACKET_MSG (len %u, "
"data offset %u data len %u, # oob %u, "
"oob offset %u, oob len %u, pkt offset %u, "
"pkt len %u",
- RndisMessage->MessageLength,
- RndisMessage->Message.Packet.DataOffset,
- RndisMessage->Message.Packet.DataLength,
- RndisMessage->Message.Packet.NumOOBDataElements,
- RndisMessage->Message.Packet.OOBDataOffset,
- RndisMessage->Message.Packet.OOBDataLength,
- RndisMessage->Message.Packet.PerPacketInfoOffset,
- RndisMessage->Message.Packet.PerPacketInfoLength);
+ rndis_msg->msg_len,
+ rndis_msg->msg.pkt.data_offset,
+ rndis_msg->msg.pkt.data_len,
+ rndis_msg->msg.pkt.num_oob_data_elements,
+ rndis_msg->msg.pkt.oob_data_offset,
+ rndis_msg->msg.pkt.oob_data_len,
+ rndis_msg->msg.pkt.per_pkt_info_offset,
+ rndis_msg->msg.pkt.per_pkt_info_len);
break;
case REMOTE_NDIS_INITIALIZE_CMPLT:
@@ -191,147 +191,157 @@ static void DumpRndisMessage(struct rndis_message *RndisMessage)
"(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
"device flags %d, max xfer size 0x%x, max pkts %u, "
"pkt aligned %u)",
- RndisMessage->MessageLength,
- RndisMessage->Message.InitializeComplete.RequestId,
- RndisMessage->Message.InitializeComplete.Status,
- RndisMessage->Message.InitializeComplete.MajorVersion,
- RndisMessage->Message.InitializeComplete.MinorVersion,
- RndisMessage->Message.InitializeComplete.DeviceFlags,
- RndisMessage->Message.InitializeComplete.MaxTransferSize,
- RndisMessage->Message.InitializeComplete.MaxPacketsPerMessage,
- RndisMessage->Message.InitializeComplete.PacketAlignmentFactor);
+ rndis_msg->msg_len,
+ rndis_msg->msg.init_complete.req_id,
+ rndis_msg->msg.init_complete.status,
+ rndis_msg->msg.init_complete.major_ver,
+ rndis_msg->msg.init_complete.minor_ver,
+ rndis_msg->msg.init_complete.dev_flags,
+ rndis_msg->msg.init_complete.max_xfer_size,
+ rndis_msg->msg.init_complete.
+ max_pkt_per_msg,
+ rndis_msg->msg.init_complete.
+ pkt_alignment_factor);
break;
case REMOTE_NDIS_QUERY_CMPLT:
DPRINT_DBG(NETVSC, "REMOTE_NDIS_QUERY_CMPLT "
"(len %u, id 0x%x, status 0x%x, buf len %u, "
"buf offset %u)",
- RndisMessage->MessageLength,
- RndisMessage->Message.QueryComplete.RequestId,
- RndisMessage->Message.QueryComplete.Status,
- RndisMessage->Message.QueryComplete.InformationBufferLength,
- RndisMessage->Message.QueryComplete.InformationBufferOffset);
+ rndis_msg->msg_len,
+ rndis_msg->msg.query_complete.req_id,
+ rndis_msg->msg.query_complete.status,
+ rndis_msg->msg.query_complete.
+ info_buflen,
+ rndis_msg->msg.query_complete.
+ info_buf_offset);
break;
case REMOTE_NDIS_SET_CMPLT:
DPRINT_DBG(NETVSC,
"REMOTE_NDIS_SET_CMPLT (len %u, id 0x%x, status 0x%x)",
- RndisMessage->MessageLength,
- RndisMessage->Message.SetComplete.RequestId,
- RndisMessage->Message.SetComplete.Status);
+ rndis_msg->msg_len,
+ rndis_msg->msg.set_complete.req_id,
+ rndis_msg->msg.set_complete.status);
break;
case REMOTE_NDIS_INDICATE_STATUS_MSG:
DPRINT_DBG(NETVSC, "REMOTE_NDIS_INDICATE_STATUS_MSG "
"(len %u, status 0x%x, buf len %u, buf offset %u)",
- RndisMessage->MessageLength,
- RndisMessage->Message.IndicateStatus.Status,
- RndisMessage->Message.IndicateStatus.StatusBufferLength,
- RndisMessage->Message.IndicateStatus.StatusBufferOffset);
+ rndis_msg->msg_len,
+ rndis_msg->msg.indicate_status.status,
+ rndis_msg->msg.indicate_status.status_buflen,
+ rndis_msg->msg.indicate_status.status_buf_offset);
break;
default:
DPRINT_DBG(NETVSC, "0x%x (len %u)",
- RndisMessage->NdisMessageType,
- RndisMessage->MessageLength);
+ rndis_msg->ndis_msg_type,
+ rndis_msg->msg_len);
break;
}
}
-static int RndisFilterSendRequest(struct rndis_device *Device,
- struct rndis_request *Request)
+static int rndis_filter_send_request(struct rndis_device *dev,
+ struct rndis_request *req)
{
int ret;
struct hv_netvsc_packet *packet;
/* Setup the packet to send it */
- packet = &Request->Packet;
+ packet = &req->pkt;
- packet->IsDataPacket = false;
- packet->TotalDataBufferLength = Request->RequestMessage.MessageLength;
- packet->PageBufferCount = 1;
+ packet->is_data_pkt = false;
+ packet->total_data_buflen = req->request_msg.msg_len;
+ packet->page_buf_cnt = 1;
- packet->PageBuffers[0].Pfn = virt_to_phys(&Request->RequestMessage) >>
+ packet->page_buf[0].Pfn = virt_to_phys(&req->request_msg) >>
PAGE_SHIFT;
- packet->PageBuffers[0].Length = Request->RequestMessage.MessageLength;
- packet->PageBuffers[0].Offset =
- (unsigned long)&Request->RequestMessage & (PAGE_SIZE - 1);
+ packet->page_buf[0].Length = req->request_msg.msg_len;
+ packet->page_buf[0].Offset =
+ (unsigned long)&req->request_msg & (PAGE_SIZE - 1);
- packet->Completion.Send.SendCompletionContext = Request;/* packet; */
- packet->Completion.Send.OnSendCompletion =
- RndisFilterOnSendRequestCompletion;
- packet->Completion.Send.SendCompletionTid = (unsigned long)Device;
+ packet->completion.send.send_completion_ctx = req;/* packet; */
+ packet->completion.send.send_completion =
+ rndis_filter_send_request_completion;
+ packet->completion.send.send_completion_tid = (unsigned long)dev;
- ret = gRndisFilter.InnerDriver.OnSend(Device->NetDevice->Device, packet);
+ ret = rndis_filter.inner_drv.send(dev->net_dev->dev, packet);
return ret;
}
-static void RndisFilterReceiveResponse(struct rndis_device *Device,
- struct rndis_message *Response)
+static void rndis_filter_receive_response(struct rndis_device *dev,
+ struct rndis_message *resp)
{
struct rndis_request *request = NULL;
bool found = false;
unsigned long flags;
- spin_lock_irqsave(&Device->request_lock, flags);
- list_for_each_entry(request, &Device->RequestList, ListEntry) {
+ spin_lock_irqsave(&dev->request_lock, flags);
+ list_for_each_entry(request, &dev->req_list, list_ent) {
/*
* All request/response message contains RequestId as the 1st
* field
*/
- if (request->RequestMessage.Message.InitializeRequest.RequestId
- == Response->Message.InitializeComplete.RequestId) {
+ if (request->request_msg.msg.init_req.req_id
+ == resp->msg.init_complete.req_id) {
DPRINT_DBG(NETVSC, "found rndis request for "
"this response (id 0x%x req type 0x%x res "
"type 0x%x)",
- request->RequestMessage.Message.InitializeRequest.RequestId,
- request->RequestMessage.NdisMessageType,
- Response->NdisMessageType);
+ request->request_msg.msg.
+ init_req.req_id,
+ request->request_msg.ndis_msg_type,
+ resp->ndis_msg_type);
found = true;
break;
}
}
- spin_unlock_irqrestore(&Device->request_lock, flags);
+ spin_unlock_irqrestore(&dev->request_lock, flags);
if (found) {
- if (Response->MessageLength <= sizeof(struct rndis_message)) {
- memcpy(&request->ResponseMessage, Response,
- Response->MessageLength);
+ if (resp->msg_len <= sizeof(struct rndis_message)) {
+ memcpy(&request->response_msg, resp,
+ resp->msg_len);
} else {
DPRINT_ERR(NETVSC, "rndis response buffer overflow "
"detected (size %u max %zu)",
- Response->MessageLength,
+ resp->msg_len,
sizeof(struct rndis_filter_packet));
- if (Response->NdisMessageType ==
+ if (resp->ndis_msg_type ==
REMOTE_NDIS_RESET_CMPLT) {
/* does not have a request id field */
- request->ResponseMessage.Message.ResetComplete.Status = STATUS_BUFFER_OVERFLOW;
+ request->response_msg.msg.reset_complete.
+ status = STATUS_BUFFER_OVERFLOW;
} else {
- request->ResponseMessage.Message.InitializeComplete.Status = STATUS_BUFFER_OVERFLOW;
+ request->response_msg.msg.
+ init_complete.status =
+ STATUS_BUFFER_OVERFLOW;
}
}
- osd_WaitEventSet(request->WaitEvent);
+ osd_waitevent_set(request->waitevent);
} else {
DPRINT_ERR(NETVSC, "no rndis request found for this response "
"(id 0x%x res type 0x%x)",
- Response->Message.InitializeComplete.RequestId,
- Response->NdisMessageType);
+ resp->msg.init_complete.req_id,
+ resp->ndis_msg_type);
}
}
-static void RndisFilterReceiveIndicateStatus(struct rndis_device *Device,
- struct rndis_message *Response)
+static void rndis_filter_receive_indicate_status(struct rndis_device *dev,
+ struct rndis_message *resp)
{
struct rndis_indicate_status *indicate =
- &Response->Message.IndicateStatus;
-
- if (indicate->Status == RNDIS_STATUS_MEDIA_CONNECT) {
- gRndisFilter.InnerDriver.OnLinkStatusChanged(Device->NetDevice->Device, 1);
- } else if (indicate->Status == RNDIS_STATUS_MEDIA_DISCONNECT) {
- gRndisFilter.InnerDriver.OnLinkStatusChanged(Device->NetDevice->Device, 0);
+ &resp->msg.indicate_status;
+
+ if (indicate->status == RNDIS_STATUS_MEDIA_CONNECT) {
+ rndis_filter.inner_drv.link_status_change(
+ dev->net_dev->dev, 1);
+ } else if (indicate->status == RNDIS_STATUS_MEDIA_DISCONNECT) {
+ rndis_filter.inner_drv.link_status_change(
+ dev->net_dev->dev, 0);
} else {
/*
* TODO:
@@ -339,18 +349,18 @@ static void RndisFilterReceiveIndicateStatus(struct rndis_device *Device,
}
}
-static void RndisFilterReceiveData(struct rndis_device *Device,
- struct rndis_message *Message,
- struct hv_netvsc_packet *Packet)
+static void rndis_filter_receive_data(struct rndis_device *dev,
+ struct rndis_message *msg,
+ struct hv_netvsc_packet *pkt)
{
- struct rndis_packet *rndisPacket;
- u32 dataOffset;
+ struct rndis_packet *rndis_pkt;
+ u32 data_offset;
/* empty ethernet frame ?? */
/* ASSERT(Packet->PageBuffers[0].Length > */
/* RNDIS_MESSAGE_SIZE(struct rndis_packet)); */
- rndisPacket = &Message->Message.Packet;
+ rndis_pkt = &msg->msg.pkt;
/*
* FIXME: Handle multiple rndis pkt msgs that maybe enclosed in this
@@ -358,48 +368,48 @@ static void RndisFilterReceiveData(struct rndis_device *Device,
*/
/* Remove the rndis header and pass it back up the stack */
- dataOffset = RNDIS_HEADER_SIZE + rndisPacket->DataOffset;
+ data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
- Packet->TotalDataBufferLength -= dataOffset;
- Packet->PageBuffers[0].Offset += dataOffset;
- Packet->PageBuffers[0].Length -= dataOffset;
+ pkt->total_data_buflen -= data_offset;
+ pkt->page_buf[0].Offset += data_offset;
+ pkt->page_buf[0].Length -= data_offset;
- Packet->IsDataPacket = true;
+ pkt->is_data_pkt = true;
- gRndisFilter.InnerDriver.OnReceiveCallback(Device->NetDevice->Device,
- Packet);
+ rndis_filter.inner_drv.recv_cb(dev->net_dev->dev,
+ pkt);
}
-static int RndisFilterOnReceive(struct hv_device *Device,
- struct hv_netvsc_packet *Packet)
+static int rndis_filter_receive(struct hv_device *dev,
+ struct hv_netvsc_packet *pkt)
{
- struct netvsc_device *netDevice = Device->Extension;
- struct rndis_device *rndisDevice;
- struct rndis_message rndisMessage;
- struct rndis_message *rndisHeader;
+ struct netvsc_device *net_dev = dev->Extension;
+ struct rndis_device *rndis_dev;
+ struct rndis_message rndis_msg;
+ struct rndis_message *rndis_hdr;
- if (!netDevice)
+ if (!net_dev)
return -EINVAL;
/* Make sure the rndis device state is initialized */
- if (!netDevice->Extension) {
+ if (!net_dev->extension) {
DPRINT_ERR(NETVSC, "got rndis message but no rndis device..."
"dropping this message!");
return -1;
}
- rndisDevice = (struct rndis_device *)netDevice->Extension;
- if (rndisDevice->State == RNDIS_DEV_UNINITIALIZED) {
+ rndis_dev = (struct rndis_device *)net_dev->extension;
+ if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) {
DPRINT_ERR(NETVSC, "got rndis message but rndis device "
"uninitialized...dropping this message!");
return -1;
}
- rndisHeader = (struct rndis_message *)kmap_atomic(
- pfn_to_page(Packet->PageBuffers[0].Pfn), KM_IRQ0);
+ rndis_hdr = (struct rndis_message *)kmap_atomic(
+ pfn_to_page(pkt->page_buf[0].Pfn), KM_IRQ0);
- rndisHeader = (void *)((unsigned long)rndisHeader +
- Packet->PageBuffers[0].Offset);
+ rndis_hdr = (void *)((unsigned long)rndis_hdr +
+ pkt->page_buf[0].Offset);
/* Make sure we got a valid rndis message */
/*
@@ -408,39 +418,39 @@ static int RndisFilterOnReceive(struct hv_device *Device,
* range shows 52 bytes
* */
#if 0
- if (Packet->TotalDataBufferLength != rndisHeader->MessageLength) {
- kunmap_atomic(rndisHeader - Packet->PageBuffers[0].Offset,
+ if (pkt->total_data_buflen != rndis_hdr->msg_len) {
+ kunmap_atomic(rndis_hdr - pkt->page_buf[0].Offset,
KM_IRQ0);
DPRINT_ERR(NETVSC, "invalid rndis message? (expected %u "
"bytes got %u)...dropping this message!",
- rndisHeader->MessageLength,
- Packet->TotalDataBufferLength);
+ rndis_hdr->msg_len,
+ pkt->total_data_buflen);
return -1;
}
#endif
- if ((rndisHeader->NdisMessageType != REMOTE_NDIS_PACKET_MSG) &&
- (rndisHeader->MessageLength > sizeof(struct rndis_message))) {
+ if ((rndis_hdr->ndis_msg_type != REMOTE_NDIS_PACKET_MSG) &&
+ (rndis_hdr->msg_len > sizeof(struct rndis_message))) {
DPRINT_ERR(NETVSC, "incoming rndis message buffer overflow "
"detected (got %u, max %zu)...marking it an error!",
- rndisHeader->MessageLength,
+ rndis_hdr->msg_len,
sizeof(struct rndis_message));
}
- memcpy(&rndisMessage, rndisHeader,
- (rndisHeader->MessageLength > sizeof(struct rndis_message)) ?
+ memcpy(&rndis_msg, rndis_hdr,
+ (rndis_hdr->msg_len > sizeof(struct rndis_message)) ?
sizeof(struct rndis_message) :
- rndisHeader->MessageLength);
+ rndis_hdr->msg_len);
- kunmap_atomic(rndisHeader - Packet->PageBuffers[0].Offset, KM_IRQ0);
+ kunmap_atomic(rndis_hdr - pkt->page_buf[0].Offset, KM_IRQ0);
- DumpRndisMessage(&rndisMessage);
+ dump_rndis_message(&rndis_msg);
- switch (rndisMessage.NdisMessageType) {
+ switch (rndis_msg.ndis_msg_type) {
case REMOTE_NDIS_PACKET_MSG:
/* data msg */
- RndisFilterReceiveData(rndisDevice, &rndisMessage, Packet);
+ rndis_filter_receive_data(rndis_dev, &rndis_msg, pkt);
break;
case REMOTE_NDIS_INITIALIZE_CMPLT:
@@ -449,37 +459,37 @@ static int RndisFilterOnReceive(struct hv_device *Device,
/* case REMOTE_NDIS_RESET_CMPLT: */
/* case REMOTE_NDIS_KEEPALIVE_CMPLT: */
/* completion msgs */
- RndisFilterReceiveResponse(rndisDevice, &rndisMessage);
+ rndis_filter_receive_response(rndis_dev, &rndis_msg);
break;
case REMOTE_NDIS_INDICATE_STATUS_MSG:
/* notification msgs */
- RndisFilterReceiveIndicateStatus(rndisDevice, &rndisMessage);
+ rndis_filter_receive_indicate_status(rndis_dev, &rndis_msg);
break;
default:
DPRINT_ERR(NETVSC, "unhandled rndis message (type %u len %u)",
- rndisMessage.NdisMessageType,
- rndisMessage.MessageLength);
+ rndis_msg.ndis_msg_type,
+ rndis_msg.msg_len);
break;
}
return 0;
}
-static int RndisFilterQueryDevice(struct rndis_device *Device, u32 Oid,
- void *Result, u32 *ResultSize)
+static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
+ void *result, u32 *result_size)
{
struct rndis_request *request;
- u32 inresultSize = *ResultSize;
+ u32 inresult_size = *result_size;
struct rndis_query_request *query;
- struct rndis_query_complete *queryComplete;
+ struct rndis_query_complete *query_complete;
int ret = 0;
- if (!Result)
+ if (!result)
return -EINVAL;
- *ResultSize = 0;
- request = GetRndisRequest(Device, REMOTE_NDIS_QUERY_MSG,
+ *result_size = 0;
+ request = get_rndis_request(dev, REMOTE_NDIS_QUERY_MSG,
RNDIS_MESSAGE_SIZE(struct rndis_query_request));
if (!request) {
ret = -1;
@@ -487,71 +497,71 @@ static int RndisFilterQueryDevice(struct rndis_device *Device, u32 Oid,
}
/* Setup the rndis query */
- query = &request->RequestMessage.Message.QueryRequest;
- query->Oid = Oid;
- query->InformationBufferOffset = sizeof(struct rndis_query_request);
- query->InformationBufferLength = 0;
- query->DeviceVcHandle = 0;
+ query = &request->request_msg.msg.query_req;
+ query->oid = oid;
+ query->info_buf_offset = sizeof(struct rndis_query_request);
+ query->info_buflen = 0;
+ query->dev_vc_handle = 0;
- ret = RndisFilterSendRequest(Device, request);
+ ret = rndis_filter_send_request(dev, request);
if (ret != 0)
goto Cleanup;
- osd_WaitEventWait(request->WaitEvent);
+ osd_waitevent_wait(request->waitevent);
/* Copy the response back */
- queryComplete = &request->ResponseMessage.Message.QueryComplete;
+ query_complete = &request->response_msg.msg.query_complete;
- if (queryComplete->InformationBufferLength > inresultSize) {
+ if (query_complete->info_buflen > inresult_size) {
ret = -1;
goto Cleanup;
}
- memcpy(Result,
- (void *)((unsigned long)queryComplete +
- queryComplete->InformationBufferOffset),
- queryComplete->InformationBufferLength);
+ memcpy(result,
+ (void *)((unsigned long)query_complete +
+ query_complete->info_buf_offset),
+ query_complete->info_buflen);
- *ResultSize = queryComplete->InformationBufferLength;
+ *result_size = query_complete->info_buflen;
Cleanup:
if (request)
- PutRndisRequest(Device, request);
+ put_rndis_request(dev, request);
return ret;
}
-static int RndisFilterQueryDeviceMac(struct rndis_device *Device)
+static int rndis_filter_query_device_mac(struct rndis_device *dev)
{
u32 size = ETH_ALEN;
- return RndisFilterQueryDevice(Device,
+ return rndis_filter_query_device(dev,
RNDIS_OID_802_3_PERMANENT_ADDRESS,
- Device->HwMacAddr, &size);
+ dev->hw_mac_adr, &size);
}
-static int RndisFilterQueryDeviceLinkStatus(struct rndis_device *Device)
+static int rndis_filter_query_device_link_status(struct rndis_device *dev)
{
u32 size = sizeof(u32);
- return RndisFilterQueryDevice(Device,
+ return rndis_filter_query_device(dev,
RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
- &Device->LinkStatus, &size);
+ &dev->link_stat, &size);
}
-static int RndisFilterSetPacketFilter(struct rndis_device *Device,
- u32 NewFilter)
+static int rndis_filter_set_packet_filter(struct rndis_device *dev,
+ u32 new_filter)
{
struct rndis_request *request;
struct rndis_set_request *set;
- struct rndis_set_complete *setComplete;
+ struct rndis_set_complete *set_complete;
u32 status;
int ret;
/* ASSERT(RNDIS_MESSAGE_SIZE(struct rndis_set_request) + sizeof(u32) <= */
/* sizeof(struct rndis_message)); */
- request = GetRndisRequest(Device, REMOTE_NDIS_SET_MSG,
+ request = get_rndis_request(dev, REMOTE_NDIS_SET_MSG,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
sizeof(u32));
if (!request) {
@@ -560,19 +570,19 @@ static int RndisFilterSetPacketFilter(struct rndis_device *Device,
}
/* Setup the rndis set */
- set = &request->RequestMessage.Message.SetRequest;
- set->Oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
- set->InformationBufferLength = sizeof(u32);
- set->InformationBufferOffset = sizeof(struct rndis_set_request);
+ set = &request->request_msg.msg.set_req;
+ set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
+ set->info_buflen = sizeof(u32);
+ set->info_buf_offset = sizeof(struct rndis_set_request);
memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
- &NewFilter, sizeof(u32));
+ &new_filter, sizeof(u32));
- ret = RndisFilterSendRequest(Device, request);
+ ret = rndis_filter_send_request(dev, request);
if (ret != 0)
goto Cleanup;
- ret = osd_WaitEventWaitEx(request->WaitEvent, 2000/*2sec*/);
+ ret = osd_waitevent_waitex(request->waitevent, 2000/*2sec*/);
if (!ret) {
ret = -1;
DPRINT_ERR(NETVSC, "timeout before we got a set response...");
@@ -584,27 +594,27 @@ static int RndisFilterSetPacketFilter(struct rndis_device *Device,
} else {
if (ret > 0)
ret = 0;
- setComplete = &request->ResponseMessage.Message.SetComplete;
- status = setComplete->Status;
+ set_complete = &request->response_msg.msg.set_complete;
+ status = set_complete->status;
}
Cleanup:
if (request)
- PutRndisRequest(Device, request);
+ put_rndis_request(dev, request);
Exit:
return ret;
}
-int RndisFilterInit(struct netvsc_driver *Driver)
+int rndis_filter_init(struct netvsc_driver *drv)
{
DPRINT_DBG(NETVSC, "sizeof(struct rndis_filter_packet) == %zd",
sizeof(struct rndis_filter_packet));
- Driver->RequestExtSize = sizeof(struct rndis_filter_packet);
+ drv->req_ext_size = sizeof(struct rndis_filter_packet);
/* Driver->Context = rndisDriver; */
- memset(&gRndisFilter, 0, sizeof(struct rndis_filter_driver_object));
+ memset(&rndis_filter, 0, sizeof(struct rndis_filter_driver_object));
/*rndisDriver->Driver = Driver;
@@ -612,38 +622,38 @@ int RndisFilterInit(struct netvsc_driver *Driver)
rndisDriver->OnLinkStatusChanged = Driver->OnLinkStatusChanged;*/
/* Save the original dispatch handlers before we override it */
- gRndisFilter.InnerDriver.Base.OnDeviceAdd = Driver->Base.OnDeviceAdd;
- gRndisFilter.InnerDriver.Base.OnDeviceRemove =
- Driver->Base.OnDeviceRemove;
- gRndisFilter.InnerDriver.Base.OnCleanup = Driver->Base.OnCleanup;
+ rndis_filter.inner_drv.base.OnDeviceAdd = drv->base.OnDeviceAdd;
+ rndis_filter.inner_drv.base.OnDeviceRemove =
+ drv->base.OnDeviceRemove;
+ rndis_filter.inner_drv.base.OnCleanup = drv->base.OnCleanup;
/* ASSERT(Driver->OnSend); */
/* ASSERT(Driver->OnReceiveCallback); */
- gRndisFilter.InnerDriver.OnSend = Driver->OnSend;
- gRndisFilter.InnerDriver.OnReceiveCallback = Driver->OnReceiveCallback;
- gRndisFilter.InnerDriver.OnLinkStatusChanged =
- Driver->OnLinkStatusChanged;
+ rndis_filter.inner_drv.send = drv->send;
+ rndis_filter.inner_drv.recv_cb = drv->recv_cb;
+ rndis_filter.inner_drv.link_status_change =
+ drv->link_status_change;
/* Override */
- Driver->Base.OnDeviceAdd = RndisFilterOnDeviceAdd;
- Driver->Base.OnDeviceRemove = RndisFilterOnDeviceRemove;
- Driver->Base.OnCleanup = RndisFilterOnCleanup;
- Driver->OnSend = RndisFilterOnSend;
+ drv->base.OnDeviceAdd = rndis_filte_device_add;
+ drv->base.OnDeviceRemove = rndis_filter_device_remove;
+ drv->base.OnCleanup = rndis_filter_cleanup;
+ drv->send = rndis_filter_send;
/* Driver->QueryLinkStatus = RndisFilterQueryDeviceLinkStatus; */
- Driver->OnReceiveCallback = RndisFilterOnReceive;
+ drv->recv_cb = rndis_filter_receive;
return 0;
}
-static int RndisFilterInitDevice(struct rndis_device *Device)
+static int rndis_filter_init_device(struct rndis_device *dev)
{
struct rndis_request *request;
struct rndis_initialize_request *init;
- struct rndis_initialize_complete *initComplete;
+ struct rndis_initialize_complete *init_complete;
u32 status;
int ret;
- request = GetRndisRequest(Device, REMOTE_NDIS_INITIALIZE_MSG,
+ request = get_rndis_request(dev, REMOTE_NDIS_INITIALIZE_MSG,
RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
if (!request) {
ret = -1;
@@ -651,105 +661,105 @@ static int RndisFilterInitDevice(struct rndis_device *Device)
}
/* Setup the rndis set */
- init = &request->RequestMessage.Message.InitializeRequest;
- init->MajorVersion = RNDIS_MAJOR_VERSION;
- init->MinorVersion = RNDIS_MINOR_VERSION;
+ init = &request->request_msg.msg.init_req;
+ init->major_ver = RNDIS_MAJOR_VERSION;
+ init->minor_ver = RNDIS_MINOR_VERSION;
/* FIXME: Use 1536 - rounded ethernet frame size */
- init->MaxTransferSize = 2048;
+ init->max_xfer_size = 2048;
- Device->State = RNDIS_DEV_INITIALIZING;
+ dev->state = RNDIS_DEV_INITIALIZING;
- ret = RndisFilterSendRequest(Device, request);
+ ret = rndis_filter_send_request(dev, request);
if (ret != 0) {
- Device->State = RNDIS_DEV_UNINITIALIZED;
+ dev->state = RNDIS_DEV_UNINITIALIZED;
goto Cleanup;
}
- osd_WaitEventWait(request->WaitEvent);
+ osd_waitevent_wait(request->waitevent);
- initComplete = &request->ResponseMessage.Message.InitializeComplete;
- status = initComplete->Status;
+ init_complete = &request->response_msg.msg.init_complete;
+ status = init_complete->status;
if (status == RNDIS_STATUS_SUCCESS) {
- Device->State = RNDIS_DEV_INITIALIZED;
+ dev->state = RNDIS_DEV_INITIALIZED;
ret = 0;
} else {
- Device->State = RNDIS_DEV_UNINITIALIZED;
+ dev->state = RNDIS_DEV_UNINITIALIZED;
ret = -1;
}
Cleanup:
if (request)
- PutRndisRequest(Device, request);
+ put_rndis_request(dev, request);
return ret;
}
-static void RndisFilterHaltDevice(struct rndis_device *Device)
+static void rndis_filter_halt_device(struct rndis_device *dev)
{
struct rndis_request *request;
struct rndis_halt_request *halt;
/* Attempt to do a rndis device halt */
- request = GetRndisRequest(Device, REMOTE_NDIS_HALT_MSG,
+ request = get_rndis_request(dev, REMOTE_NDIS_HALT_MSG,
RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
if (!request)
goto Cleanup;
/* Setup the rndis set */
- halt = &request->RequestMessage.Message.HaltRequest;
- halt->RequestId = atomic_inc_return(&Device->NewRequestId);
+ halt = &request->request_msg.msg.halt_req;
+ halt->req_id = atomic_inc_return(&dev->new_req_id);
/* Ignore return since this msg is optional. */
- RndisFilterSendRequest(Device, request);
+ rndis_filter_send_request(dev, request);
- Device->State = RNDIS_DEV_UNINITIALIZED;
+ dev->state = RNDIS_DEV_UNINITIALIZED;
Cleanup:
if (request)
- PutRndisRequest(Device, request);
+ put_rndis_request(dev, request);
return;
}
-static int RndisFilterOpenDevice(struct rndis_device *Device)
+static int rndis_filter_open_device(struct rndis_device *dev)
{
int ret;
- if (Device->State != RNDIS_DEV_INITIALIZED)
+ if (dev->state != RNDIS_DEV_INITIALIZED)
return 0;
- ret = RndisFilterSetPacketFilter(Device,
+ ret = rndis_filter_set_packet_filter(dev,
NDIS_PACKET_TYPE_BROADCAST |
NDIS_PACKET_TYPE_ALL_MULTICAST |
NDIS_PACKET_TYPE_DIRECTED);
if (ret == 0)
- Device->State = RNDIS_DEV_DATAINITIALIZED;
+ dev->state = RNDIS_DEV_DATAINITIALIZED;
return ret;
}
-static int RndisFilterCloseDevice(struct rndis_device *Device)
+static int rndis_filter_close_device(struct rndis_device *dev)
{
int ret;
- if (Device->State != RNDIS_DEV_DATAINITIALIZED)
+ if (dev->state != RNDIS_DEV_DATAINITIALIZED)
return 0;
- ret = RndisFilterSetPacketFilter(Device, 0);
+ ret = rndis_filter_set_packet_filter(dev, 0);
if (ret == 0)
- Device->State = RNDIS_DEV_INITIALIZED;
+ dev->state = RNDIS_DEV_INITIALIZED;
return ret;
}
-static int RndisFilterOnDeviceAdd(struct hv_device *Device,
- void *AdditionalInfo)
+static int rndis_filte_device_add(struct hv_device *dev,
+ void *additional_info)
{
int ret;
struct netvsc_device *netDevice;
struct rndis_device *rndisDevice;
- struct netvsc_device_info *deviceInfo = AdditionalInfo;
+ struct netvsc_device_info *deviceInfo = additional_info;
- rndisDevice = GetRndisDevice();
+ rndisDevice = get_rndis_device();
if (!rndisDevice)
return -1;
@@ -760,7 +770,7 @@ static int RndisFilterOnDeviceAdd(struct hv_device *Device,
* NOTE! Once the channel is created, we may get a receive callback
* (RndisFilterOnReceive()) before this call is completed
*/
- ret = gRndisFilter.InnerDriver.Base.OnDeviceAdd(Device, AdditionalInfo);
+ ret = rndis_filter.inner_drv.base.OnDeviceAdd(dev, additional_info);
if (ret != 0) {
kfree(rndisDevice);
return ret;
@@ -768,15 +778,15 @@ static int RndisFilterOnDeviceAdd(struct hv_device *Device,
/* Initialize the rndis device */
- netDevice = Device->Extension;
+ netDevice = dev->Extension;
/* ASSERT(netDevice); */
/* ASSERT(netDevice->Device); */
- netDevice->Extension = rndisDevice;
- rndisDevice->NetDevice = netDevice;
+ netDevice->extension = rndisDevice;
+ rndisDevice->net_dev = netDevice;
/* Send the rndis initialization message */
- ret = RndisFilterInitDevice(rndisDevice);
+ ret = rndis_filter_init_device(rndisDevice);
if (ret != 0) {
/*
* TODO: If rndis init failed, we will need to shut down the
@@ -785,7 +795,7 @@ static int RndisFilterOnDeviceAdd(struct hv_device *Device,
}
/* Get the mac address */
- ret = RndisFilterQueryDeviceMac(rndisDevice);
+ ret = rndis_filter_query_device_mac(rndisDevice);
if (ret != 0) {
/*
* TODO: shutdown rndis device and the channel
@@ -793,62 +803,62 @@ static int RndisFilterOnDeviceAdd(struct hv_device *Device,
}
DPRINT_INFO(NETVSC, "Device 0x%p mac addr %pM",
- rndisDevice, rndisDevice->HwMacAddr);
+ rndisDevice, rndisDevice->hw_mac_adr);
- memcpy(deviceInfo->MacAddr, rndisDevice->HwMacAddr, ETH_ALEN);
+ memcpy(deviceInfo->mac_adr, rndisDevice->hw_mac_adr, ETH_ALEN);
- RndisFilterQueryDeviceLinkStatus(rndisDevice);
+ rndis_filter_query_device_link_status(rndisDevice);
- deviceInfo->LinkState = rndisDevice->LinkStatus;
+ deviceInfo->link_state = rndisDevice->link_stat;
DPRINT_INFO(NETVSC, "Device 0x%p link state %s", rndisDevice,
- ((deviceInfo->LinkState) ? ("down") : ("up")));
+ ((deviceInfo->link_state) ? ("down") : ("up")));
return ret;
}
-static int RndisFilterOnDeviceRemove(struct hv_device *Device)
+static int rndis_filter_device_remove(struct hv_device *dev)
{
- struct netvsc_device *netDevice = Device->Extension;
- struct rndis_device *rndisDevice = netDevice->Extension;
+ struct netvsc_device *net_dev = dev->Extension;
+ struct rndis_device *rndis_dev = net_dev->extension;
/* Halt and release the rndis device */
- RndisFilterHaltDevice(rndisDevice);
+ rndis_filter_halt_device(rndis_dev);
- kfree(rndisDevice);
- netDevice->Extension = NULL;
+ kfree(rndis_dev);
+ net_dev->extension = NULL;
/* Pass control to inner driver to remove the device */
- gRndisFilter.InnerDriver.Base.OnDeviceRemove(Device);
+ rndis_filter.inner_drv.base.OnDeviceRemove(dev);
return 0;
}
-static void RndisFilterOnCleanup(struct hv_driver *Driver)
+static void rndis_filter_cleanup(struct hv_driver *drv)
{
}
-int RndisFilterOnOpen(struct hv_device *Device)
+int rndis_filter_open(struct hv_device *dev)
{
- struct netvsc_device *netDevice = Device->Extension;
+ struct netvsc_device *netDevice = dev->Extension;
if (!netDevice)
return -EINVAL;
- return RndisFilterOpenDevice(netDevice->Extension);
+ return rndis_filter_open_device(netDevice->extension);
}
-int RndisFilterOnClose(struct hv_device *Device)
+int rndis_filter_close(struct hv_device *dev)
{
- struct netvsc_device *netDevice = Device->Extension;
+ struct netvsc_device *netDevice = dev->Extension;
if (!netDevice)
return -EINVAL;
- return RndisFilterCloseDevice(netDevice->Extension);
+ return rndis_filter_close_device(netDevice->extension);
}
-static int RndisFilterOnSend(struct hv_device *Device,
- struct hv_netvsc_packet *Packet)
+static int rndis_filter_send(struct hv_device *dev,
+ struct hv_netvsc_packet *pkt)
{
int ret;
struct rndis_filter_packet *filterPacket;
@@ -857,62 +867,62 @@ static int RndisFilterOnSend(struct hv_device *Device,
u32 rndisMessageSize;
/* Add the rndis header */
- filterPacket = (struct rndis_filter_packet *)Packet->Extension;
+ filterPacket = (struct rndis_filter_packet *)pkt->extension;
/* ASSERT(filterPacket); */
memset(filterPacket, 0, sizeof(struct rndis_filter_packet));
- rndisMessage = &filterPacket->Message;
+ rndisMessage = &filterPacket->msg;
rndisMessageSize = RNDIS_MESSAGE_SIZE(struct rndis_packet);
- rndisMessage->NdisMessageType = REMOTE_NDIS_PACKET_MSG;
- rndisMessage->MessageLength = Packet->TotalDataBufferLength +
+ rndisMessage->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
+ rndisMessage->msg_len = pkt->total_data_buflen +
rndisMessageSize;
- rndisPacket = &rndisMessage->Message.Packet;
- rndisPacket->DataOffset = sizeof(struct rndis_packet);
- rndisPacket->DataLength = Packet->TotalDataBufferLength;
+ rndisPacket = &rndisMessage->msg.pkt;
+ rndisPacket->data_offset = sizeof(struct rndis_packet);
+ rndisPacket->data_len = pkt->total_data_buflen;
- Packet->IsDataPacket = true;
- Packet->PageBuffers[0].Pfn = virt_to_phys(rndisMessage) >> PAGE_SHIFT;
- Packet->PageBuffers[0].Offset =
+ pkt->is_data_pkt = true;
+ pkt->page_buf[0].Pfn = virt_to_phys(rndisMessage) >> PAGE_SHIFT;
+ pkt->page_buf[0].Offset =
(unsigned long)rndisMessage & (PAGE_SIZE-1);
- Packet->PageBuffers[0].Length = rndisMessageSize;
+ pkt->page_buf[0].Length = rndisMessageSize;
/* Save the packet send completion and context */
- filterPacket->OnCompletion = Packet->Completion.Send.OnSendCompletion;
- filterPacket->CompletionContext =
- Packet->Completion.Send.SendCompletionContext;
+ filterPacket->completion = pkt->completion.send.send_completion;
+ filterPacket->completion_ctx =
+ pkt->completion.send.send_completion_ctx;
/* Use ours */
- Packet->Completion.Send.OnSendCompletion = RndisFilterOnSendCompletion;
- Packet->Completion.Send.SendCompletionContext = filterPacket;
+ pkt->completion.send.send_completion = rndis_filter_send_completion;
+ pkt->completion.send.send_completion_ctx = filterPacket;
- ret = gRndisFilter.InnerDriver.OnSend(Device, Packet);
+ ret = rndis_filter.inner_drv.send(dev, pkt);
if (ret != 0) {
/*
* Reset the completion to originals to allow retries from
* above
*/
- Packet->Completion.Send.OnSendCompletion =
- filterPacket->OnCompletion;
- Packet->Completion.Send.SendCompletionContext =
- filterPacket->CompletionContext;
+ pkt->completion.send.send_completion =
+ filterPacket->completion;
+ pkt->completion.send.send_completion_ctx =
+ filterPacket->completion_ctx;
}
return ret;
}
-static void RndisFilterOnSendCompletion(void *Context)
+static void rndis_filter_send_completion(void *ctx)
{
- struct rndis_filter_packet *filterPacket = Context;
+ struct rndis_filter_packet *filterPacket = ctx;
/* Pass it back to the original handler */
- filterPacket->OnCompletion(filterPacket->CompletionContext);
+ filterPacket->completion(filterPacket->completion_ctx);
}
-static void RndisFilterOnSendRequestCompletion(void *Context)
+static void rndis_filter_send_request_completion(void *ctx)
{
/* Noop */
}
diff --git a/drivers/staging/hv/rndis_filter.h b/drivers/staging/hv/rndis_filter.h
index 764b9bf3e5dc..4da18f3cbade 100644
--- a/drivers/staging/hv/rndis_filter.h
+++ b/drivers/staging/hv/rndis_filter.h
@@ -50,6 +50,6 @@
/* Interface */
-extern int RndisFilterInit(struct netvsc_driver *driver);
+extern int rndis_filter_init(struct netvsc_driver *driver);
#endif /* _RNDISFILTER_H_ */
diff --git a/drivers/staging/hv/storvsc.c b/drivers/staging/hv/storvsc.c
index 19e87f689fa0..9295113e09b9 100644
--- a/drivers/staging/hv/storvsc.c
+++ b/drivers/staging/hv/storvsc.c
@@ -34,43 +34,43 @@
struct storvsc_request_extension {
/* LIST_ENTRY ListEntry; */
- struct hv_storvsc_request *Request;
- struct hv_device *Device;
+ struct hv_storvsc_request *request;
+ struct hv_device *device;
/* Synchronize the request/response if needed */
- struct osd_waitevent *WaitEvent;
+ struct osd_waitevent *wait_event;
- struct vstor_packet VStorPacket;
+ struct vstor_packet vstor_packet;
};
/* A storvsc device is a device object that contains a vmbus channel */
struct storvsc_device {
- struct hv_device *Device;
+ struct hv_device *device;
/* 0 indicates the device is being destroyed */
- atomic_t RefCount;
+ atomic_t ref_count;
- atomic_t NumOutstandingRequests;
+ atomic_t num_outstanding_req;
/*
* Each unique Port/Path/Target represents 1 channel ie scsi
* controller. In reality, the pathid, targetid is always 0
* and the port is set by us
*/
- unsigned int PortNumber;
- unsigned char PathId;
- unsigned char TargetId;
+ unsigned int port_number;
+ unsigned char path_id;
+ unsigned char target_id;
/* LIST_ENTRY OutstandingRequestList; */
/* HANDLE OutstandingRequestLock; */
/* Used for vsc/vsp channel reset process */
- struct storvsc_request_extension InitRequest;
- struct storvsc_request_extension ResetRequest;
+ struct storvsc_request_extension init_request;
+ struct storvsc_request_extension reset_request;
};
-static const char *gDriverName = "storvsc";
+static const char *g_driver_name = "storvsc";
/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
static const struct hv_guid gStorVscDeviceType = {
@@ -81,131 +81,133 @@ static const struct hv_guid gStorVscDeviceType = {
};
-static inline struct storvsc_device *AllocStorDevice(struct hv_device *Device)
+static inline struct storvsc_device *alloc_stor_device(struct hv_device *device)
{
- struct storvsc_device *storDevice;
+ struct storvsc_device *stor_device;
- storDevice = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
- if (!storDevice)
+ stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
+ if (!stor_device)
return NULL;
/* Set to 2 to allow both inbound and outbound traffics */
- /* (ie GetStorDevice() and MustGetStorDevice()) to proceed. */
- atomic_cmpxchg(&storDevice->RefCount, 0, 2);
+ /* (ie get_stor_device() and must_get_stor_device()) to proceed. */
+ atomic_cmpxchg(&stor_device->ref_count, 0, 2);
- storDevice->Device = Device;
- Device->Extension = storDevice;
+ stor_device->device = device;
+ device->Extension = stor_device;
- return storDevice;
+ return stor_device;
}
-static inline void FreeStorDevice(struct storvsc_device *Device)
+static inline void free_stor_device(struct storvsc_device *device)
{
- /* ASSERT(atomic_read(&Device->RefCount) == 0); */
- kfree(Device);
+ /* ASSERT(atomic_read(&device->ref_count) == 0); */
+ kfree(device);
}
/* Get the stordevice object iff exists and its refcount > 1 */
-static inline struct storvsc_device *GetStorDevice(struct hv_device *Device)
+static inline struct storvsc_device *get_stor_device(struct hv_device *device)
{
- struct storvsc_device *storDevice;
+ struct storvsc_device *stor_device;
- storDevice = (struct storvsc_device *)Device->Extension;
- if (storDevice && atomic_read(&storDevice->RefCount) > 1)
- atomic_inc(&storDevice->RefCount);
+ stor_device = (struct storvsc_device *)device->Extension;
+ if (stor_device && atomic_read(&stor_device->ref_count) > 1)
+ atomic_inc(&stor_device->ref_count);
else
- storDevice = NULL;
+ stor_device = NULL;
- return storDevice;
+ return stor_device;
}
/* Get the stordevice object iff exists and its refcount > 0 */
-static inline struct storvsc_device *MustGetStorDevice(struct hv_device *Device)
+static inline struct storvsc_device *must_get_stor_device(
+ struct hv_device *device)
{
- struct storvsc_device *storDevice;
+ struct storvsc_device *stor_device;
- storDevice = (struct storvsc_device *)Device->Extension;
- if (storDevice && atomic_read(&storDevice->RefCount))
- atomic_inc(&storDevice->RefCount);
+ stor_device = (struct storvsc_device *)device->Extension;
+ if (stor_device && atomic_read(&stor_device->ref_count))
+ atomic_inc(&stor_device->ref_count);
else
- storDevice = NULL;
+ stor_device = NULL;
- return storDevice;
+ return stor_device;
}
-static inline void PutStorDevice(struct hv_device *Device)
+static inline void put_stor_device(struct hv_device *device)
{
- struct storvsc_device *storDevice;
+ struct storvsc_device *stor_device;
- storDevice = (struct storvsc_device *)Device->Extension;
- /* ASSERT(storDevice); */
+ stor_device = (struct storvsc_device *)device->Extension;
+ /* ASSERT(stor_device); */
- atomic_dec(&storDevice->RefCount);
- /* ASSERT(atomic_read(&storDevice->RefCount)); */
+ atomic_dec(&stor_device->ref_count);
+ /* ASSERT(atomic_read(&stor_device->ref_count)); */
}
-/* Drop ref count to 1 to effectively disable GetStorDevice() */
-static inline struct storvsc_device *ReleaseStorDevice(struct hv_device *Device)
+/* Drop ref count to 1 to effectively disable get_stor_device() */
+static inline struct storvsc_device *release_stor_device(
+ struct hv_device *device)
{
- struct storvsc_device *storDevice;
+ struct storvsc_device *stor_device;
- storDevice = (struct storvsc_device *)Device->Extension;
- /* ASSERT(storDevice); */
+ stor_device = (struct storvsc_device *)device->Extension;
+ /* ASSERT(stor_device); */
/* Busy wait until the ref drop to 2, then set it to 1 */
- while (atomic_cmpxchg(&storDevice->RefCount, 2, 1) != 2)
+ while (atomic_cmpxchg(&stor_device->ref_count, 2, 1) != 2)
udelay(100);
- return storDevice;
+ return stor_device;
}
-/* Drop ref count to 0. No one can use StorDevice object. */
-static inline struct storvsc_device *FinalReleaseStorDevice(
- struct hv_device *Device)
+/* Drop ref count to 0. No one can use stor_device object. */
+static inline struct storvsc_device *final_release_stor_device(
+ struct hv_device *device)
{
- struct storvsc_device *storDevice;
+ struct storvsc_device *stor_device;
- storDevice = (struct storvsc_device *)Device->Extension;
- /* ASSERT(storDevice); */
+ stor_device = (struct storvsc_device *)device->Extension;
+ /* ASSERT(stor_device); */
/* Busy wait until the ref drop to 1, then set it to 0 */
- while (atomic_cmpxchg(&storDevice->RefCount, 1, 0) != 1)
+ while (atomic_cmpxchg(&stor_device->ref_count, 1, 0) != 1)
udelay(100);
- Device->Extension = NULL;
- return storDevice;
+ device->Extension = NULL;
+ return stor_device;
}
-static int StorVscChannelInit(struct hv_device *Device)
+static int stor_vsc_channel_init(struct hv_device *device)
{
- struct storvsc_device *storDevice;
+ struct storvsc_device *stor_device;
struct storvsc_request_extension *request;
- struct vstor_packet *vstorPacket;
+ struct vstor_packet *vstor_packet;
int ret;
- storDevice = GetStorDevice(Device);
- if (!storDevice) {
+ stor_device = get_stor_device(device);
+ if (!stor_device) {
DPRINT_ERR(STORVSC, "unable to get stor device..."
"device being destroyed?");
return -1;
}
- request = &storDevice->InitRequest;
- vstorPacket = &request->VStorPacket;
+ request = &stor_device->init_request;
+ vstor_packet = &request->vstor_packet;
/*
* Now, initiate the vsc/vsp initialization protocol on the open
* channel
*/
memset(request, 0, sizeof(struct storvsc_request_extension));
- request->WaitEvent = osd_WaitEventCreate();
- if (!request->WaitEvent) {
+ request->wait_event = osd_waitevent_create();
+ if (!request->wait_event) {
ret = -ENOMEM;
goto nomem;
}
- vstorPacket->Operation = VStorOperationBeginInitialization;
- vstorPacket->Flags = REQUEST_COMPLETION_FLAG;
+ vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
/*SpinlockAcquire(gDriverExt.packetListLock);
INSERT_TAIL_LIST(&gDriverExt.packetList, &packet->listEntry.entry);
@@ -213,7 +215,7 @@ static int StorVscChannelInit(struct hv_device *Device)
DPRINT_INFO(STORVSC, "BEGIN_INITIALIZATION_OPERATION...");
- ret = vmbus_sendpacket(Device->channel, vstorPacket,
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
(unsigned long)request,
VmbusPacketTypeDataInBand,
@@ -224,27 +226,27 @@ static int StorVscChannelInit(struct hv_device *Device)
goto Cleanup;
}
- osd_WaitEventWait(request->WaitEvent);
+ osd_waitevent_wait(request->wait_event);
- if (vstorPacket->Operation != VStorOperationCompleteIo ||
- vstorPacket->Status != 0) {
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0) {
DPRINT_ERR(STORVSC, "BEGIN_INITIALIZATION_OPERATION failed "
"(op %d status 0x%x)",
- vstorPacket->Operation, vstorPacket->Status);
+ vstor_packet->operation, vstor_packet->status);
goto Cleanup;
}
DPRINT_INFO(STORVSC, "QUERY_PROTOCOL_VERSION_OPERATION...");
/* reuse the packet for version range supported */
- memset(vstorPacket, 0, sizeof(struct vstor_packet));
- vstorPacket->Operation = VStorOperationQueryProtocolVersion;
- vstorPacket->Flags = REQUEST_COMPLETION_FLAG;
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
- vstorPacket->Version.MajorMinor = VMSTOR_PROTOCOL_VERSION_CURRENT;
- FILL_VMSTOR_REVISION(vstorPacket->Version.Revision);
+ vstor_packet->version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
+ FILL_VMSTOR_REVISION(vstor_packet->version.revision);
- ret = vmbus_sendpacket(Device->channel, vstorPacket,
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
(unsigned long)request,
VmbusPacketTypeDataInBand,
@@ -255,27 +257,27 @@ static int StorVscChannelInit(struct hv_device *Device)
goto Cleanup;
}
- osd_WaitEventWait(request->WaitEvent);
+ osd_waitevent_wait(request->wait_event);
/* TODO: Check returned version */
- if (vstorPacket->Operation != VStorOperationCompleteIo ||
- vstorPacket->Status != 0) {
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0) {
DPRINT_ERR(STORVSC, "QUERY_PROTOCOL_VERSION_OPERATION failed "
"(op %d status 0x%x)",
- vstorPacket->Operation, vstorPacket->Status);
+ vstor_packet->operation, vstor_packet->status);
goto Cleanup;
}
/* Query channel properties */
DPRINT_INFO(STORVSC, "QUERY_PROPERTIES_OPERATION...");
- memset(vstorPacket, 0, sizeof(struct vstor_packet));
- vstorPacket->Operation = VStorOperationQueryProperties;
- vstorPacket->Flags = REQUEST_COMPLETION_FLAG;
- vstorPacket->StorageChannelProperties.PortNumber =
- storDevice->PortNumber;
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+ vstor_packet->storage_channel_properties.port_number =
+ stor_device->port_number;
- ret = vmbus_sendpacket(Device->channel, vstorPacket,
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
(unsigned long)request,
VmbusPacketTypeDataInBand,
@@ -287,31 +289,32 @@ static int StorVscChannelInit(struct hv_device *Device)
goto Cleanup;
}
- osd_WaitEventWait(request->WaitEvent);
+ osd_waitevent_wait(request->wait_event);
/* TODO: Check returned version */
- if (vstorPacket->Operation != VStorOperationCompleteIo ||
- vstorPacket->Status != 0) {
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0) {
DPRINT_ERR(STORVSC, "QUERY_PROPERTIES_OPERATION failed "
"(op %d status 0x%x)",
- vstorPacket->Operation, vstorPacket->Status);
+ vstor_packet->operation, vstor_packet->status);
goto Cleanup;
}
- storDevice->PathId = vstorPacket->StorageChannelProperties.PathId;
- storDevice->TargetId = vstorPacket->StorageChannelProperties.TargetId;
+ stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
+ stor_device->target_id
+ = vstor_packet->storage_channel_properties.target_id;
DPRINT_DBG(STORVSC, "channel flag 0x%x, max xfer len 0x%x",
- vstorPacket->StorageChannelProperties.Flags,
- vstorPacket->StorageChannelProperties.MaxTransferBytes);
+ vstor_packet->storage_channel_properties.flags,
+ vstor_packet->storage_channel_properties.max_transfer_bytes);
DPRINT_INFO(STORVSC, "END_INITIALIZATION_OPERATION...");
- memset(vstorPacket, 0, sizeof(struct vstor_packet));
- vstorPacket->Operation = VStorOperationEndInitialization;
- vstorPacket->Flags = REQUEST_COMPLETION_FLAG;
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
- ret = vmbus_sendpacket(Device->channel, vstorPacket,
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
(unsigned long)request,
VmbusPacketTypeDataInBand,
@@ -323,125 +326,125 @@ static int StorVscChannelInit(struct hv_device *Device)
goto Cleanup;
}
- osd_WaitEventWait(request->WaitEvent);
+ osd_waitevent_wait(request->wait_event);
- if (vstorPacket->Operation != VStorOperationCompleteIo ||
- vstorPacket->Status != 0) {
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0) {
DPRINT_ERR(STORVSC, "END_INITIALIZATION_OPERATION failed "
"(op %d status 0x%x)",
- vstorPacket->Operation, vstorPacket->Status);
+ vstor_packet->operation, vstor_packet->status);
goto Cleanup;
}
DPRINT_INFO(STORVSC, "**** storage channel up and running!! ****");
Cleanup:
- kfree(request->WaitEvent);
- request->WaitEvent = NULL;
+ kfree(request->wait_event);
+ request->wait_event = NULL;
nomem:
- PutStorDevice(Device);
+ put_stor_device(device);
return ret;
}
-static void StorVscOnIOCompletion(struct hv_device *Device,
- struct vstor_packet *VStorPacket,
- struct storvsc_request_extension *RequestExt)
+static void stor_vsc_on_io_completion(struct hv_device *device,
+ struct vstor_packet *vstor_packet,
+ struct storvsc_request_extension *request_ext)
{
struct hv_storvsc_request *request;
- struct storvsc_device *storDevice;
+ struct storvsc_device *stor_device;
- storDevice = MustGetStorDevice(Device);
- if (!storDevice) {
+ stor_device = must_get_stor_device(device);
+ if (!stor_device) {
DPRINT_ERR(STORVSC, "unable to get stor device..."
"device being destroyed?");
return;
}
DPRINT_DBG(STORVSC, "IO_COMPLETE_OPERATION - request extension %p "
- "completed bytes xfer %u", RequestExt,
- VStorPacket->VmSrb.DataTransferLength);
+ "completed bytes xfer %u", request_ext,
+ vstor_packet->vm_srb.data_transfer_length);
- /* ASSERT(RequestExt != NULL); */
- /* ASSERT(RequestExt->Request != NULL); */
+ /* ASSERT(request_ext != NULL); */
+ /* ASSERT(request_ext->request != NULL); */
- request = RequestExt->Request;
+ request = request_ext->request;
/* ASSERT(request->OnIOCompletion != NULL); */
/* Copy over the status...etc */
- request->Status = VStorPacket->VmSrb.ScsiStatus;
+ request->status = vstor_packet->vm_srb.scsi_status;
- if (request->Status != 0 || VStorPacket->VmSrb.SrbStatus != 1) {
+ if (request->status != 0 || vstor_packet->vm_srb.srb_status != 1) {
DPRINT_WARN(STORVSC,
"cmd 0x%x scsi status 0x%x srb status 0x%x\n",
- request->Cdb[0], VStorPacket->VmSrb.ScsiStatus,
- VStorPacket->VmSrb.SrbStatus);
+ request->cdb[0], vstor_packet->vm_srb.scsi_status,
+ vstor_packet->vm_srb.srb_status);
}
- if ((request->Status & 0xFF) == 0x02) {
+ if ((request->status & 0xFF) == 0x02) {
/* CHECK_CONDITION */
- if (VStorPacket->VmSrb.SrbStatus & 0x80) {
+ if (vstor_packet->vm_srb.srb_status & 0x80) {
/* autosense data available */
DPRINT_WARN(STORVSC, "storvsc pkt %p autosense data "
- "valid - len %d\n", RequestExt,
- VStorPacket->VmSrb.SenseInfoLength);
+ "valid - len %d\n", request_ext,
+ vstor_packet->vm_srb.sense_info_length);
- /* ASSERT(VStorPacket->VmSrb.SenseInfoLength <= */
+ /* ASSERT(vstor_packet->vm_srb.sense_info_length <= */
/* request->SenseBufferSize); */
- memcpy(request->SenseBuffer,
- VStorPacket->VmSrb.SenseData,
- VStorPacket->VmSrb.SenseInfoLength);
+ memcpy(request->sense_buffer,
+ vstor_packet->vm_srb.sense_data,
+ vstor_packet->vm_srb.sense_info_length);
- request->SenseBufferSize =
- VStorPacket->VmSrb.SenseInfoLength;
+ request->sense_buffer_size =
+ vstor_packet->vm_srb.sense_info_length;
}
}
/* TODO: */
- request->BytesXfer = VStorPacket->VmSrb.DataTransferLength;
+ request->bytes_xfer = vstor_packet->vm_srb.data_transfer_length;
- request->OnIOCompletion(request);
+ request->on_io_completion(request);
- atomic_dec(&storDevice->NumOutstandingRequests);
+ atomic_dec(&stor_device->num_outstanding_req);
- PutStorDevice(Device);
+ put_stor_device(device);
}
-static void StorVscOnReceive(struct hv_device *Device,
- struct vstor_packet *VStorPacket,
- struct storvsc_request_extension *RequestExt)
+static void stor_vsc_on_receive(struct hv_device *device,
+ struct vstor_packet *vstor_packet,
+ struct storvsc_request_extension *request_ext)
{
- switch (VStorPacket->Operation) {
- case VStorOperationCompleteIo:
+ switch (vstor_packet->operation) {
+ case VSTOR_OPERATION_COMPLETE_IO:
DPRINT_DBG(STORVSC, "IO_COMPLETE_OPERATION");
- StorVscOnIOCompletion(Device, VStorPacket, RequestExt);
+ stor_vsc_on_io_completion(device, vstor_packet, request_ext);
break;
- case VStorOperationRemoveDevice:
+ case VSTOR_OPERATION_REMOVE_DEVICE:
DPRINT_INFO(STORVSC, "REMOVE_DEVICE_OPERATION");
/* TODO: */
break;
default:
DPRINT_INFO(STORVSC, "Unknown operation received - %d",
- VStorPacket->Operation);
+ vstor_packet->operation);
break;
}
}
-static void StorVscOnChannelCallback(void *context)
+static void stor_vsc_on_channel_callback(void *context)
{
struct hv_device *device = (struct hv_device *)context;
- struct storvsc_device *storDevice;
- u32 bytesRecvd;
- u64 requestId;
+ struct storvsc_device *stor_device;
+ u32 bytes_recvd;
+ u64 request_id;
unsigned char packet[ALIGN_UP(sizeof(struct vstor_packet), 8)];
struct storvsc_request_extension *request;
int ret;
/* ASSERT(device); */
- storDevice = MustGetStorDevice(device);
- if (!storDevice) {
+ stor_device = must_get_stor_device(device);
+ if (!stor_device) {
DPRINT_ERR(STORVSC, "unable to get stor device..."
"device being destroyed?");
return;
@@ -450,32 +453,33 @@ static void StorVscOnChannelCallback(void *context)
do {
ret = vmbus_recvpacket(device->channel, packet,
ALIGN_UP(sizeof(struct vstor_packet), 8),
- &bytesRecvd, &requestId);
- if (ret == 0 && bytesRecvd > 0) {
+ &bytes_recvd, &request_id);
+ if (ret == 0 && bytes_recvd > 0) {
DPRINT_DBG(STORVSC, "receive %d bytes - tid %llx",
- bytesRecvd, requestId);
+ bytes_recvd, request_id);
- /* ASSERT(bytesRecvd == sizeof(struct vstor_packet)); */
+ /* ASSERT(bytes_recvd ==
+ sizeof(struct vstor_packet)); */
request = (struct storvsc_request_extension *)
- (unsigned long)requestId;
+ (unsigned long)request_id;
/* ASSERT(request);c */
- /* if (vstorPacket.Flags & SYNTHETIC_FLAG) */
- if ((request == &storDevice->InitRequest) ||
- (request == &storDevice->ResetRequest)) {
+ /* if (vstor_packet.Flags & SYNTHETIC_FLAG) */
+ if ((request == &stor_device->init_request) ||
+ (request == &stor_device->reset_request)) {
/* DPRINT_INFO(STORVSC,
* "reset completion - operation "
* "%u status %u",
- * vstorPacket.Operation,
- * vstorPacket.Status); */
+ * vstor_packet.Operation,
+ * vstor_packet.Status); */
- memcpy(&request->VStorPacket, packet,
+ memcpy(&request->vstor_packet, packet,
sizeof(struct vstor_packet));
- osd_WaitEventSet(request->WaitEvent);
+ osd_waitevent_set(request->wait_event);
} else {
- StorVscOnReceive(device,
+ stor_vsc_on_receive(device,
(struct vstor_packet *)packet,
request);
}
@@ -485,52 +489,55 @@ static void StorVscOnChannelCallback(void *context)
}
} while (1);
- PutStorDevice(device);
+ put_stor_device(device);
return;
}
-static int StorVscConnectToVsp(struct hv_device *Device)
+static int stor_vsc_connect_to_vsp(struct hv_device *device)
{
struct vmstorage_channel_properties props;
- struct storvsc_driver_object *storDriver;
+ struct storvsc_driver_object *stor_driver;
int ret;
- storDriver = (struct storvsc_driver_object *)Device->Driver;
+ stor_driver = (struct storvsc_driver_object *)device->Driver;
memset(&props, 0, sizeof(struct vmstorage_channel_properties));
/* Open the channel */
- ret = vmbus_open(Device->channel,
- storDriver->RingBufferSize, storDriver->RingBufferSize,
+ ret = vmbus_open(device->channel,
+ stor_driver->ring_buffer_size,
+ stor_driver->ring_buffer_size,
(void *)&props,
sizeof(struct vmstorage_channel_properties),
- StorVscOnChannelCallback, Device);
+ stor_vsc_on_channel_callback, device);
DPRINT_DBG(STORVSC, "storage props: path id %d, tgt id %d, max xfer %d",
- props.PathId, props.TargetId, props.MaxTransferBytes);
+ props.path_id, props.target_id, props.max_transfer_bytes);
if (ret != 0) {
DPRINT_ERR(STORVSC, "unable to open channel: %d", ret);
return -1;
}
- ret = StorVscChannelInit(Device);
+ ret = stor_vsc_channel_init(device);
return ret;
}
/*
- * StorVscOnDeviceAdd - Callback when the device belonging to this driver is added
+ * stor_vsc_on_device_add - Callback when the device belonging to this driver
+ * is added
*/
-static int StorVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
+static int stor_vsc_on_device_add(struct hv_device *device,
+ void *additional_info)
{
- struct storvsc_device *storDevice;
+ struct storvsc_device *stor_device;
/* struct vmstorage_channel_properties *props; */
- struct storvsc_device_info *deviceInfo;
+ struct storvsc_device_info *device_info;
int ret = 0;
- deviceInfo = (struct storvsc_device_info *)AdditionalInfo;
- storDevice = AllocStorDevice(Device);
- if (!storDevice) {
+ device_info = (struct storvsc_device_info *)additional_info;
+ stor_device = alloc_stor_device(device);
+ if (!stor_device) {
ret = -1;
goto Cleanup;
}
@@ -550,103 +557,103 @@ static int StorVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
storChannel->PathId = props->PathId;
storChannel->TargetId = props->TargetId; */
- storDevice->PortNumber = deviceInfo->PortNumber;
+ stor_device->port_number = device_info->port_number;
/* Send it back up */
- ret = StorVscConnectToVsp(Device);
+ ret = stor_vsc_connect_to_vsp(device);
- /* deviceInfo->PortNumber = storDevice->PortNumber; */
- deviceInfo->PathId = storDevice->PathId;
- deviceInfo->TargetId = storDevice->TargetId;
+ /* device_info->PortNumber = stor_device->PortNumber; */
+ device_info->path_id = stor_device->path_id;
+ device_info->target_id = stor_device->target_id;
DPRINT_DBG(STORVSC, "assigned port %u, path %u target %u\n",
- storDevice->PortNumber, storDevice->PathId,
- storDevice->TargetId);
+ stor_device->port_number, stor_device->path_id,
+ stor_device->target_id);
Cleanup:
return ret;
}
/*
- * StorVscOnDeviceRemove - Callback when the our device is being removed
+ * stor_vsc_on_device_remove - Callback when the our device is being removed
*/
-static int StorVscOnDeviceRemove(struct hv_device *Device)
+static int stor_vsc_on_device_remove(struct hv_device *device)
{
- struct storvsc_device *storDevice;
+ struct storvsc_device *stor_device;
DPRINT_INFO(STORVSC, "disabling storage device (%p)...",
- Device->Extension);
+ device->Extension);
- storDevice = ReleaseStorDevice(Device);
+ stor_device = release_stor_device(device);
/*
* At this point, all outbound traffic should be disable. We
* only allow inbound traffic (responses) to proceed so that
* outstanding requests can be completed.
*/
- while (atomic_read(&storDevice->NumOutstandingRequests)) {
+ while (atomic_read(&stor_device->num_outstanding_req)) {
DPRINT_INFO(STORVSC, "waiting for %d requests to complete...",
- atomic_read(&storDevice->NumOutstandingRequests));
+ atomic_read(&stor_device->num_outstanding_req));
udelay(100);
}
DPRINT_INFO(STORVSC, "removing storage device (%p)...",
- Device->Extension);
+ device->Extension);
- storDevice = FinalReleaseStorDevice(Device);
+ stor_device = final_release_stor_device(device);
- DPRINT_INFO(STORVSC, "storage device (%p) safe to remove", storDevice);
+ DPRINT_INFO(STORVSC, "storage device (%p) safe to remove", stor_device);
/* Close the channel */
- vmbus_close(Device->channel);
+ vmbus_close(device->channel);
- FreeStorDevice(storDevice);
+ free_stor_device(stor_device);
return 0;
}
-int StorVscOnHostReset(struct hv_device *Device)
+int stor_vsc_on_host_reset(struct hv_device *device)
{
- struct storvsc_device *storDevice;
+ struct storvsc_device *stor_device;
struct storvsc_request_extension *request;
- struct vstor_packet *vstorPacket;
+ struct vstor_packet *vstor_packet;
int ret;
DPRINT_INFO(STORVSC, "resetting host adapter...");
- storDevice = GetStorDevice(Device);
- if (!storDevice) {
+ stor_device = get_stor_device(device);
+ if (!stor_device) {
DPRINT_ERR(STORVSC, "unable to get stor device..."
"device being destroyed?");
return -1;
}
- request = &storDevice->ResetRequest;
- vstorPacket = &request->VStorPacket;
+ request = &stor_device->reset_request;
+ vstor_packet = &request->vstor_packet;
- request->WaitEvent = osd_WaitEventCreate();
- if (!request->WaitEvent) {
+ request->wait_event = osd_waitevent_create();
+ if (!request->wait_event) {
ret = -ENOMEM;
goto Cleanup;
}
- vstorPacket->Operation = VStorOperationResetBus;
- vstorPacket->Flags = REQUEST_COMPLETION_FLAG;
- vstorPacket->VmSrb.PathId = storDevice->PathId;
+ vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+ vstor_packet->vm_srb.path_id = stor_device->path_id;
- ret = vmbus_sendpacket(Device->channel, vstorPacket,
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
- (unsigned long)&storDevice->ResetRequest,
+ (unsigned long)&stor_device->reset_request,
VmbusPacketTypeDataInBand,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0) {
DPRINT_ERR(STORVSC, "Unable to send reset packet %p ret %d",
- vstorPacket, ret);
+ vstor_packet, ret);
goto Cleanup;
}
/* FIXME: Add a timeout */
- osd_WaitEventWait(request->WaitEvent);
+ osd_waitevent_wait(request->wait_event);
- kfree(request->WaitEvent);
+ kfree(request->wait_event);
DPRINT_INFO(STORVSC, "host adapter reset completed");
/*
@@ -655,118 +662,118 @@ int StorVscOnHostReset(struct hv_device *Device)
*/
Cleanup:
- PutStorDevice(Device);
+ put_stor_device(device);
return ret;
}
/*
- * StorVscOnIORequest - Callback to initiate an I/O request
+ * stor_vsc_on_io_request - Callback to initiate an I/O request
*/
-static int StorVscOnIORequest(struct hv_device *Device,
- struct hv_storvsc_request *Request)
+static int stor_vsc_on_io_request(struct hv_device *device,
+ struct hv_storvsc_request *request)
{
- struct storvsc_device *storDevice;
- struct storvsc_request_extension *requestExtension;
- struct vstor_packet *vstorPacket;
+ struct storvsc_device *stor_device;
+ struct storvsc_request_extension *request_extension;
+ struct vstor_packet *vstor_packet;
int ret = 0;
- requestExtension =
- (struct storvsc_request_extension *)Request->Extension;
- vstorPacket = &requestExtension->VStorPacket;
- storDevice = GetStorDevice(Device);
+ request_extension =
+ (struct storvsc_request_extension *)request->extension;
+ vstor_packet = &request_extension->vstor_packet;
+ stor_device = get_stor_device(device);
DPRINT_DBG(STORVSC, "enter - Device %p, DeviceExt %p, Request %p, "
- "Extension %p", Device, storDevice, Request,
- requestExtension);
+ "Extension %p", device, stor_device, request,
+ request_extension);
DPRINT_DBG(STORVSC, "req %p len %d bus %d, target %d, lun %d cdblen %d",
- Request, Request->DataBuffer.Length, Request->Bus,
- Request->TargetId, Request->LunId, Request->CdbLen);
+ request, request->data_buffer.Length, request->bus,
+ request->target_id, request->lun_id, request->cdb_len);
- if (!storDevice) {
+ if (!stor_device) {
DPRINT_ERR(STORVSC, "unable to get stor device..."
"device being destroyed?");
return -2;
}
- /* print_hex_dump_bytes("", DUMP_PREFIX_NONE, Request->Cdb,
- * Request->CdbLen); */
+ /* print_hex_dump_bytes("", DUMP_PREFIX_NONE, request->Cdb,
+ * request->CdbLen); */
- requestExtension->Request = Request;
- requestExtension->Device = Device;
+ request_extension->request = request;
+ request_extension->device = device;
- memset(vstorPacket, 0 , sizeof(struct vstor_packet));
+ memset(vstor_packet, 0 , sizeof(struct vstor_packet));
- vstorPacket->Flags |= REQUEST_COMPLETION_FLAG;
+ vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
- vstorPacket->VmSrb.Length = sizeof(struct vmscsi_request);
+ vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
- vstorPacket->VmSrb.PortNumber = Request->Host;
- vstorPacket->VmSrb.PathId = Request->Bus;
- vstorPacket->VmSrb.TargetId = Request->TargetId;
- vstorPacket->VmSrb.Lun = Request->LunId;
+ vstor_packet->vm_srb.port_number = request->host;
+ vstor_packet->vm_srb.path_id = request->bus;
+ vstor_packet->vm_srb.target_id = request->target_id;
+ vstor_packet->vm_srb.lun = request->lun_id;
- vstorPacket->VmSrb.SenseInfoLength = SENSE_BUFFER_SIZE;
+ vstor_packet->vm_srb.sense_info_length = SENSE_BUFFER_SIZE;
/* Copy over the scsi command descriptor block */
- vstorPacket->VmSrb.CdbLength = Request->CdbLen;
- memcpy(&vstorPacket->VmSrb.Cdb, Request->Cdb, Request->CdbLen);
+ vstor_packet->vm_srb.cdb_length = request->cdb_len;
+ memcpy(&vstor_packet->vm_srb.cdb, request->cdb, request->cdb_len);
- vstorPacket->VmSrb.DataIn = Request->Type;
- vstorPacket->VmSrb.DataTransferLength = Request->DataBuffer.Length;
+ vstor_packet->vm_srb.data_in = request->type;
+ vstor_packet->vm_srb.data_transfer_length = request->data_buffer.Length;
- vstorPacket->Operation = VStorOperationExecuteSRB;
+ vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
DPRINT_DBG(STORVSC, "srb - len %d port %d, path %d, target %d, "
"lun %d senselen %d cdblen %d",
- vstorPacket->VmSrb.Length,
- vstorPacket->VmSrb.PortNumber,
- vstorPacket->VmSrb.PathId,
- vstorPacket->VmSrb.TargetId,
- vstorPacket->VmSrb.Lun,
- vstorPacket->VmSrb.SenseInfoLength,
- vstorPacket->VmSrb.CdbLength);
-
- if (requestExtension->Request->DataBuffer.Length) {
- ret = vmbus_sendpacket_multipagebuffer(Device->channel,
- &requestExtension->Request->DataBuffer,
- vstorPacket,
+ vstor_packet->vm_srb.length,
+ vstor_packet->vm_srb.port_number,
+ vstor_packet->vm_srb.path_id,
+ vstor_packet->vm_srb.target_id,
+ vstor_packet->vm_srb.lun,
+ vstor_packet->vm_srb.sense_info_length,
+ vstor_packet->vm_srb.cdb_length);
+
+ if (request_extension->request->data_buffer.Length) {
+ ret = vmbus_sendpacket_multipagebuffer(device->channel,
+ &request_extension->request->data_buffer,
+ vstor_packet,
sizeof(struct vstor_packet),
- (unsigned long)requestExtension);
+ (unsigned long)request_extension);
} else {
- ret = vmbus_sendpacket(Device->channel, vstorPacket,
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
- (unsigned long)requestExtension,
+ (unsigned long)request_extension,
VmbusPacketTypeDataInBand,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
}
if (ret != 0) {
DPRINT_DBG(STORVSC, "Unable to send packet %p ret %d",
- vstorPacket, ret);
+ vstor_packet, ret);
}
- atomic_inc(&storDevice->NumOutstandingRequests);
+ atomic_inc(&stor_device->num_outstanding_req);
- PutStorDevice(Device);
+ put_stor_device(device);
return ret;
}
/*
- * StorVscOnCleanup - Perform any cleanup when the driver is removed
+ * stor_vsc_on_cleanup - Perform any cleanup when the driver is removed
*/
-static void StorVscOnCleanup(struct hv_driver *Driver)
+static void stor_vsc_on_cleanup(struct hv_driver *driver)
{
}
/*
- * StorVscInitialize - Main entry point
+ * stor_vsc_initialize - Main entry point
*/
-int StorVscInitialize(struct hv_driver *Driver)
+int stor_vsc_initialize(struct hv_driver *driver)
{
- struct storvsc_driver_object *storDriver;
+ struct storvsc_driver_object *stor_driver;
- storDriver = (struct storvsc_driver_object *)Driver;
+ stor_driver = (struct storvsc_driver_object *)driver;
DPRINT_DBG(STORVSC, "sizeof(STORVSC_REQUEST)=%zd "
"sizeof(struct storvsc_request_extension)=%zd "
@@ -778,13 +785,14 @@ int StorVscInitialize(struct hv_driver *Driver)
sizeof(struct vmscsi_request));
/* Make sure we are at least 2 pages since 1 page is used for control */
- /* ASSERT(storDriver->RingBufferSize >= (PAGE_SIZE << 1)); */
+ /* ASSERT(stor_driver->RingBufferSize >= (PAGE_SIZE << 1)); */
- Driver->name = gDriverName;
- memcpy(&Driver->deviceType, &gStorVscDeviceType,
+ driver->name = g_driver_name;
+ memcpy(&driver->deviceType, &gStorVscDeviceType,
sizeof(struct hv_guid));
- storDriver->RequestExtSize = sizeof(struct storvsc_request_extension);
+ stor_driver->request_ext_size =
+ sizeof(struct storvsc_request_extension);
/*
* Divide the ring buffer data size (which is 1 page less
@@ -792,22 +800,22 @@ int StorVscInitialize(struct hv_driver *Driver)
* the ring buffer indices) by the max request size (which is
* vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
*/
- storDriver->MaxOutstandingRequestsPerChannel =
- ((storDriver->RingBufferSize - PAGE_SIZE) /
+ stor_driver->max_outstanding_req_per_channel =
+ ((stor_driver->ring_buffer_size - PAGE_SIZE) /
ALIGN_UP(MAX_MULTIPAGE_BUFFER_PACKET +
sizeof(struct vstor_packet) + sizeof(u64),
sizeof(u64)));
DPRINT_INFO(STORVSC, "max io %u, currently %u\n",
- storDriver->MaxOutstandingRequestsPerChannel,
+ stor_driver->max_outstanding_req_per_channel,
STORVSC_MAX_IO_REQUESTS);
/* Setup the dispatch table */
- storDriver->Base.OnDeviceAdd = StorVscOnDeviceAdd;
- storDriver->Base.OnDeviceRemove = StorVscOnDeviceRemove;
- storDriver->Base.OnCleanup = StorVscOnCleanup;
+ stor_driver->base.OnDeviceAdd = stor_vsc_on_device_add;
+ stor_driver->base.OnDeviceRemove = stor_vsc_on_device_remove;
+ stor_driver->base.OnCleanup = stor_vsc_on_cleanup;
- storDriver->OnIORequest = StorVscOnIORequest;
+ stor_driver->on_io_request = stor_vsc_on_io_request;
return 0;
}
diff --git a/drivers/staging/hv/storvsc_api.h b/drivers/staging/hv/storvsc_api.h
index 8505a1c5f9ee..fbf57556d890 100644
--- a/drivers/staging/hv/storvsc_api.h
+++ b/drivers/staging/hv/storvsc_api.h
@@ -53,58 +53,58 @@ enum storvsc_request_type{
};
struct hv_storvsc_request {
- enum storvsc_request_type Type;
- u32 Host;
- u32 Bus;
- u32 TargetId;
- u32 LunId;
- u8 *Cdb;
- u32 CdbLen;
- u32 Status;
- u32 BytesXfer;
+ enum storvsc_request_type type;
+ u32 host;
+ u32 bus;
+ u32 target_id;
+ u32 lun_id;
+ u8 *cdb;
+ u32 cdb_len;
+ u32 status;
+ u32 bytes_xfer;
- unsigned char *SenseBuffer;
- u32 SenseBufferSize;
+ unsigned char *sense_buffer;
+ u32 sense_buffer_size;
- void *Context;
+ void *context;
- void (*OnIOCompletion)(struct hv_storvsc_request *Request);
+ void (*on_io_completion)(struct hv_storvsc_request *request);
/* This points to the memory after DataBuffer */
- void *Extension;
+ void *extension;
- struct hv_multipage_buffer DataBuffer;
+ struct hv_multipage_buffer data_buffer;
};
/* Represents the block vsc driver */
struct storvsc_driver_object {
/* Must be the first field */
/* Which is a bug FIXME! */
- struct hv_driver Base;
+ struct hv_driver base;
/* Set by caller (in bytes) */
- u32 RingBufferSize;
+ u32 ring_buffer_size;
/* Allocate this much private extension for each I/O request */
- u32 RequestExtSize;
+ u32 request_ext_size;
/* Maximum # of requests in flight per channel/device */
- u32 MaxOutstandingRequestsPerChannel;
+ u32 max_outstanding_req_per_channel;
/* Specific to this driver */
- int (*OnIORequest)(struct hv_device *Device,
- struct hv_storvsc_request *Request);
+ int (*on_io_request)(struct hv_device *device,
+ struct hv_storvsc_request *request);
};
struct storvsc_device_info {
- unsigned int PortNumber;
- unsigned char PathId;
- unsigned char TargetId;
+ unsigned int port_number;
+ unsigned char path_id;
+ unsigned char target_id;
};
/* Interface */
-int StorVscInitialize(struct hv_driver *driver);
-int StorVscOnHostReset(struct hv_device *Device);
-int BlkVscInitialize(struct hv_driver *driver);
+int stor_vsc_initialize(struct hv_driver *driver);
+int stor_vsc_on_host_reset(struct hv_device *device);
+int blk_vsc_initialize(struct hv_driver *driver);
#endif /* _STORVSC_API_H_ */
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
index 6f8d67d0d64f..17f1b344fbc5 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -140,28 +140,28 @@ static int storvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
struct storvsc_driver_object *storvsc_drv_obj = &g_storvsc_drv.drv_obj;
struct driver_context *drv_ctx = &g_storvsc_drv.drv_ctx;
- storvsc_drv_obj->RingBufferSize = storvsc_ringbuffer_size;
+ storvsc_drv_obj->ring_buffer_size = storvsc_ringbuffer_size;
/* Callback to client driver to complete the initialization */
- drv_init(&storvsc_drv_obj->Base);
+ drv_init(&storvsc_drv_obj->base);
DPRINT_INFO(STORVSC_DRV,
"request extension size %u, max outstanding reqs %u",
- storvsc_drv_obj->RequestExtSize,
- storvsc_drv_obj->MaxOutstandingRequestsPerChannel);
+ storvsc_drv_obj->request_ext_size,
+ storvsc_drv_obj->max_outstanding_req_per_channel);
- if (storvsc_drv_obj->MaxOutstandingRequestsPerChannel <
+ if (storvsc_drv_obj->max_outstanding_req_per_channel <
STORVSC_MAX_IO_REQUESTS) {
DPRINT_ERR(STORVSC_DRV,
"The number of outstanding io requests (%d) "
"is larger than that supported (%d) internally.",
STORVSC_MAX_IO_REQUESTS,
- storvsc_drv_obj->MaxOutstandingRequestsPerChannel);
+ storvsc_drv_obj->max_outstanding_req_per_channel);
return -1;
}
- drv_ctx->driver.name = storvsc_drv_obj->Base.name;
- memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType,
+ drv_ctx->driver.name = storvsc_drv_obj->base.name;
+ memcpy(&drv_ctx->class_id, &storvsc_drv_obj->base.deviceType,
sizeof(struct hv_guid));
drv_ctx->probe = storvsc_probe;
@@ -206,8 +206,8 @@ static void storvsc_drv_exit(void)
device_unregister(current_dev);
}
- if (storvsc_drv_obj->Base.OnCleanup)
- storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base);
+ if (storvsc_drv_obj->base.OnCleanup)
+ storvsc_drv_obj->base.OnCleanup(&storvsc_drv_obj->base);
vmbus_child_driver_unregister(drv_ctx);
return;
@@ -231,7 +231,7 @@ static int storvsc_probe(struct device *device)
struct host_device_context *host_device_ctx;
struct storvsc_device_info device_info;
- if (!storvsc_drv_obj->Base.OnDeviceAdd)
+ if (!storvsc_drv_obj->base.OnDeviceAdd)
return -1;
host = scsi_host_alloc(&scsi_driver,
@@ -252,7 +252,7 @@ static int storvsc_probe(struct device *device)
host_device_ctx->request_pool =
kmem_cache_create(dev_name(&device_ctx->device),
sizeof(struct storvsc_cmd_request) +
- storvsc_drv_obj->RequestExtSize, 0,
+ storvsc_drv_obj->request_ext_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!host_device_ctx->request_pool) {
@@ -260,9 +260,9 @@ static int storvsc_probe(struct device *device)
return -ENOMEM;
}
- device_info.PortNumber = host->host_no;
+ device_info.port_number = host->host_no;
/* Call to the vsc driver to add the device */
- ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj,
+ ret = storvsc_drv_obj->base.OnDeviceAdd(device_obj,
(void *)&device_info);
if (ret != 0) {
DPRINT_ERR(STORVSC_DRV, "unable to add scsi vsc device");
@@ -272,8 +272,8 @@ static int storvsc_probe(struct device *device)
}
/* host_device_ctx->port = device_info.PortNumber; */
- host_device_ctx->path = device_info.PathId;
- host_device_ctx->target = device_info.TargetId;
+ host_device_ctx->path = device_info.path_id;
+ host_device_ctx->target = device_info.target_id;
/* max # of devices per target */
host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
@@ -287,7 +287,7 @@ static int storvsc_probe(struct device *device)
if (ret != 0) {
DPRINT_ERR(STORVSC_DRV, "unable to add scsi host device");
- storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
+ storvsc_drv_obj->base.OnDeviceRemove(device_obj);
kmem_cache_destroy(host_device_ctx->request_pool);
scsi_host_put(host);
@@ -317,14 +317,14 @@ static int storvsc_remove(struct device *device)
(struct host_device_context *)host->hostdata;
- if (!storvsc_drv_obj->Base.OnDeviceRemove)
+ if (!storvsc_drv_obj->base.OnDeviceRemove)
return -1;
/*
* Call to the vsc driver to let it know that the device is being
* removed
*/
- ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
+ ret = storvsc_drv_obj->base.OnDeviceRemove(device_obj);
if (ret != 0) {
/* TODO: */
DPRINT_ERR(STORVSC, "unable to remove vsc device (ret %d)",
@@ -350,7 +350,7 @@ static int storvsc_remove(struct device *device)
static void storvsc_commmand_completion(struct hv_storvsc_request *request)
{
struct storvsc_cmd_request *cmd_request =
- (struct storvsc_cmd_request *)request->Context;
+ (struct storvsc_cmd_request *)request->context;
struct scsi_cmnd *scmnd = cmd_request->cmd;
struct host_device_context *host_device_ctx =
(struct host_device_context *)scmnd->device->host->hostdata;
@@ -375,16 +375,17 @@ static void storvsc_commmand_completion(struct hv_storvsc_request *request)
cmd_request->bounce_sgl_count);
}
- scmnd->result = request->Status;
+ scmnd->result = request->status;
if (scmnd->result) {
if (scsi_normalize_sense(scmnd->sense_buffer,
- request->SenseBufferSize, &sense_hdr))
+ request->sense_buffer_size, &sense_hdr))
scsi_print_sense_hdr("storvsc", &sense_hdr);
}
- /* ASSERT(request->BytesXfer <= request->DataBuffer.Length); */
- scsi_set_resid(scmnd, request->DataBuffer.Length - request->BytesXfer);
+ /* ASSERT(request->BytesXfer <= request->data_buffer.Length); */
+ scsi_set_resid(scmnd,
+ request->data_buffer.Length - request->bytes_xfer);
scsi_done_fn = scmnd->scsi_done;
@@ -657,42 +658,42 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
request = &cmd_request->request;
- request->Extension =
+ request->extension =
(void *)((unsigned long)cmd_request + request_size);
DPRINT_DBG(STORVSC_DRV, "req %p size %d ext %d", request, request_size,
- storvsc_drv_obj->RequestExtSize);
+ storvsc_drv_obj->request_ext_size);
/* Build the SRB */
switch (scmnd->sc_data_direction) {
case DMA_TO_DEVICE:
- request->Type = WRITE_TYPE;
+ request->type = WRITE_TYPE;
break;
case DMA_FROM_DEVICE:
- request->Type = READ_TYPE;
+ request->type = READ_TYPE;
break;
default:
- request->Type = UNKNOWN_TYPE;
+ request->type = UNKNOWN_TYPE;
break;
}
- request->OnIOCompletion = storvsc_commmand_completion;
- request->Context = cmd_request;/* scmnd; */
+ request->on_io_completion = storvsc_commmand_completion;
+ request->context = cmd_request;/* scmnd; */
/* request->PortId = scmnd->device->channel; */
- request->Host = host_device_ctx->port;
- request->Bus = scmnd->device->channel;
- request->TargetId = scmnd->device->id;
- request->LunId = scmnd->device->lun;
+ request->host = host_device_ctx->port;
+ request->bus = scmnd->device->channel;
+ request->target_id = scmnd->device->id;
+ request->lun_id = scmnd->device->lun;
/* ASSERT(scmnd->cmd_len <= 16); */
- request->CdbLen = scmnd->cmd_len;
- request->Cdb = scmnd->cmnd;
+ request->cdb_len = scmnd->cmd_len;
+ request->cdb = scmnd->cmnd;
- request->SenseBuffer = scmnd->sense_buffer;
- request->SenseBufferSize = SCSI_SENSE_BUFFERSIZE;
+ request->sense_buffer = scmnd->sense_buffer;
+ request->sense_buffer_size = SCSI_SENSE_BUFFERSIZE;
- request->DataBuffer.Length = scsi_bufflen(scmnd);
+ request->data_buffer.Length = scsi_bufflen(scmnd);
if (scsi_sg_count(scmnd)) {
sgl = (struct scatterlist *)scsi_sglist(scmnd);
sg_count = scsi_sg_count(scmnd);
@@ -733,25 +734,25 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
sg_count = cmd_request->bounce_sgl_count;
}
- request->DataBuffer.Offset = sgl[0].offset;
+ request->data_buffer.Offset = sgl[0].offset;
for (i = 0; i < sg_count; i++) {
DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n",
i, sgl[i].length, sgl[i].offset);
- request->DataBuffer.PfnArray[i] =
+ request->data_buffer.PfnArray[i] =
page_to_pfn(sg_page((&sgl[i])));
}
} else if (scsi_sglist(scmnd)) {
/* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */
- request->DataBuffer.Offset =
+ request->data_buffer.Offset =
virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
- request->DataBuffer.PfnArray[0] =
+ request->data_buffer.PfnArray[0] =
virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
}
retry_request:
/* Invokes the vsc to start an IO */
- ret = storvsc_drv_obj->OnIORequest(&device_ctx->device_obj,
+ ret = storvsc_drv_obj->on_io_request(&device_ctx->device_obj,
&cmd_request->request);
if (ret == -1) {
/* no more space */
@@ -844,7 +845,7 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
scmnd->device, &device_ctx->device_obj);
/* Invokes the vsc to reset the host/bus */
- ret = StorVscOnHostReset(&device_ctx->device_obj);
+ ret = stor_vsc_on_host_reset(&device_ctx->device_obj);
if (ret != 0)
return ret;
@@ -939,7 +940,7 @@ static int __init storvsc_init(void)
int ret;
DPRINT_INFO(STORVSC_DRV, "Storvsc initializing....");
- ret = storvsc_drv_init(StorVscInitialize);
+ ret = storvsc_drv_init(stor_vsc_initialize);
return ret;
}
diff --git a/drivers/staging/hv/vmbus.c b/drivers/staging/hv/vmbus.c
deleted file mode 100644
index d449daf81976..000000000000
--- a/drivers/staging/hv/vmbus.c
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * Copyright (c) 2009, Microsoft Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Authors:
- * Haiyang Zhang <haiyangz@microsoft.com>
- * Hank Janssen <hjanssen@microsoft.com>
- *
- */
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include "osd.h"
-#include "logging.h"
-#include "version_info.h"
-#include "vmbus_private.h"
-
-static const char *gDriverName = "vmbus";
-
-/*
- * Windows vmbus does not defined this.
- * We defined this to be consistent with other devices
- */
-/* {c5295816-f63a-4d5f-8d1a-4daf999ca185} */
-static const struct hv_guid gVmbusDeviceType = {
- .data = {
- 0x16, 0x58, 0x29, 0xc5, 0x3a, 0xf6, 0x5f, 0x4d,
- 0x8d, 0x1a, 0x4d, 0xaf, 0x99, 0x9c, 0xa1, 0x85
- }
-};
-
-/* {ac3760fc-9adf-40aa-9427-a70ed6de95c5} */
-static const struct hv_guid gVmbusDeviceId = {
- .data = {
- 0xfc, 0x60, 0x37, 0xac, 0xdf, 0x9a, 0xaa, 0x40,
- 0x94, 0x27, 0xa7, 0x0e, 0xd6, 0xde, 0x95, 0xc5
- }
-};
-
-static struct hv_driver *gDriver; /* vmbus driver object */
-static struct hv_device *gDevice; /* vmbus root device */
-
-/*
- * VmbusGetChannelOffers - Retrieve the channel offers from the parent partition
- */
-static void VmbusGetChannelOffers(void)
-{
- vmbus_request_offers();
-}
-
-/*
- * VmbusCreateChildDevice - Creates the child device on the bus that represents the channel offer
- */
-struct hv_device *VmbusChildDeviceCreate(struct hv_guid *DeviceType,
- struct hv_guid *DeviceInstance,
- struct vmbus_channel *channel)
-{
- struct vmbus_driver *vmbusDriver = (struct vmbus_driver *)gDriver;
-
- return vmbusDriver->OnChildDeviceCreate(DeviceType, DeviceInstance,
- channel);
-}
-
-/*
- * VmbusChildDeviceAdd - Registers the child device with the vmbus
- */
-int VmbusChildDeviceAdd(struct hv_device *ChildDevice)
-{
- struct vmbus_driver *vmbusDriver = (struct vmbus_driver *)gDriver;
-
- return vmbusDriver->OnChildDeviceAdd(gDevice, ChildDevice);
-}
-
-/*
- * VmbusChildDeviceRemove Unregisters the child device from the vmbus
- */
-void VmbusChildDeviceRemove(struct hv_device *ChildDevice)
-{
- struct vmbus_driver *vmbusDriver = (struct vmbus_driver *)gDriver;
-
- vmbusDriver->OnChildDeviceRemove(ChildDevice);
-}
-
-/*
- * VmbusOnDeviceAdd - Callback when the root bus device is added
- */
-static int VmbusOnDeviceAdd(struct hv_device *dev, void *AdditionalInfo)
-{
- u32 *irqvector = AdditionalInfo;
- int ret;
-
- gDevice = dev;
-
- memcpy(&gDevice->deviceType, &gVmbusDeviceType, sizeof(struct hv_guid));
- memcpy(&gDevice->deviceInstance, &gVmbusDeviceId,
- sizeof(struct hv_guid));
-
- /* strcpy(dev->name, "vmbus"); */
- /* SynIC setup... */
- on_each_cpu(HvSynicInit, (void *)irqvector, 1);
-
- /* Connect to VMBus in the root partition */
- ret = VmbusConnect();
-
- /* VmbusSendEvent(device->localPortId+1); */
- return ret;
-}
-
-/*
- * VmbusOnDeviceRemove - Callback when the root bus device is removed
- */
-static int VmbusOnDeviceRemove(struct hv_device *dev)
-{
- int ret = 0;
-
- vmbus_release_unattached_channels();
- VmbusDisconnect();
- on_each_cpu(HvSynicCleanup, NULL, 1);
- return ret;
-}
-
-/*
- * VmbusOnCleanup - Perform any cleanup when the driver is removed
- */
-static void VmbusOnCleanup(struct hv_driver *drv)
-{
- /* struct vmbus_driver *driver = (struct vmbus_driver *)drv; */
-
- HvCleanup();
-}
-
-/*
- * VmbusOnMsgDPC - DPC routine to handle messages from the hypervisior
- */
-static void VmbusOnMsgDPC(struct hv_driver *drv)
-{
- int cpu = smp_processor_id();
- void *page_addr = gHvContext.synICMessagePage[cpu];
- struct hv_message *msg = (struct hv_message *)page_addr +
- VMBUS_MESSAGE_SINT;
- struct hv_message *copied;
-
- while (1) {
- if (msg->Header.MessageType == HvMessageTypeNone) {
- /* no msg */
- break;
- } else {
- copied = kmemdup(msg, sizeof(*copied), GFP_ATOMIC);
- if (copied == NULL)
- continue;
-
- osd_schedule_callback(gVmbusConnection.WorkQueue,
- vmbus_onmessage,
- (void *)copied);
- }
-
- msg->Header.MessageType = HvMessageTypeNone;
-
- /*
- * Make sure the write to MessageType (ie set to
- * HvMessageTypeNone) happens before we read the
- * MessagePending and EOMing. Otherwise, the EOMing
- * will not deliver any more messages since there is
- * no empty slot
- */
- mb();
-
- if (msg->Header.MessageFlags.MessagePending) {
- /*
- * This will cause message queue rescan to
- * possibly deliver another msg from the
- * hypervisor
- */
- wrmsrl(HV_X64_MSR_EOM, 0);
- }
- }
-}
-
-/*
- * VmbusOnEventDPC - DPC routine to handle events from the hypervisior
- */
-static void VmbusOnEventDPC(struct hv_driver *drv)
-{
- /* TODO: Process any events */
- VmbusOnEvents();
-}
-
-/*
- * VmbusOnISR - ISR routine
- */
-static int VmbusOnISR(struct hv_driver *drv)
-{
- int ret = 0;
- int cpu = smp_processor_id();
- void *page_addr;
- struct hv_message *msg;
- union hv_synic_event_flags *event;
-
- page_addr = gHvContext.synICMessagePage[cpu];
- msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
-
- /* Check if there are actual msgs to be process */
- if (msg->Header.MessageType != HvMessageTypeNone) {
- DPRINT_DBG(VMBUS, "received msg type %d size %d",
- msg->Header.MessageType,
- msg->Header.PayloadSize);
- ret |= 0x1;
- }
-
- /* TODO: Check if there are events to be process */
- page_addr = gHvContext.synICEventPage[cpu];
- event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
-
- /* Since we are a child, we only need to check bit 0 */
- if (test_and_clear_bit(0, (unsigned long *) &event->Flags32[0])) {
- DPRINT_DBG(VMBUS, "received event %d", event->Flags32[0]);
- ret |= 0x2;
- }
-
- return ret;
-}
-
-/*
- * VmbusInitialize - Main entry point
- */
-int VmbusInitialize(struct hv_driver *drv)
-{
- struct vmbus_driver *driver = (struct vmbus_driver *)drv;
- int ret;
-
- DPRINT_INFO(VMBUS, "+++++++ HV Driver version = %s +++++++",
- HV_DRV_VERSION);
- DPRINT_INFO(VMBUS, "+++++++ Vmbus supported version = %d +++++++",
- VMBUS_REVISION_NUMBER);
- DPRINT_INFO(VMBUS, "+++++++ Vmbus using SINT %d +++++++",
- VMBUS_MESSAGE_SINT);
- DPRINT_DBG(VMBUS, "sizeof(vmbus_channel_packet_page_buffer)=%zd, "
- "sizeof(VMBUS_CHANNEL_PACKET_MULITPAGE_BUFFER)=%zd",
- sizeof(struct vmbus_channel_packet_page_buffer),
- sizeof(struct vmbus_channel_packet_multipage_buffer));
-
- drv->name = gDriverName;
- memcpy(&drv->deviceType, &gVmbusDeviceType, sizeof(struct hv_guid));
-
- /* Setup dispatch table */
- driver->Base.OnDeviceAdd = VmbusOnDeviceAdd;
- driver->Base.OnDeviceRemove = VmbusOnDeviceRemove;
- driver->Base.OnCleanup = VmbusOnCleanup;
- driver->OnIsr = VmbusOnISR;
- driver->OnMsgDpc = VmbusOnMsgDPC;
- driver->OnEventDpc = VmbusOnEventDPC;
- driver->GetChannelOffers = VmbusGetChannelOffers;
-
- /* Hypervisor initialization...setup hypercall page..etc */
- ret = HvInit();
- if (ret != 0)
- DPRINT_ERR(VMBUS, "Unable to initialize the hypervisor - 0x%x",
- ret);
- gDriver = drv;
-
- return ret;
-}
diff --git a/drivers/staging/hv/vmbus_api.h b/drivers/staging/hv/vmbus_api.h
index 2af42e550076..2da3f52610b3 100644
--- a/drivers/staging/hv/vmbus_api.h
+++ b/drivers/staging/hv/vmbus_api.h
@@ -115,28 +115,4 @@ struct hv_device {
void *Extension;
};
-/* Vmbus driver object */
-struct vmbus_driver {
- /* !! Must be the 1st field !! */
- /* FIXME if ^, then someone is doing somthing stupid */
- struct hv_driver Base;
-
- /* Set by the caller */
- struct hv_device * (*OnChildDeviceCreate)(struct hv_guid *DeviceType,
- struct hv_guid *DeviceInstance,
- struct vmbus_channel *channel);
- void (*OnChildDeviceDestroy)(struct hv_device *device);
- int (*OnChildDeviceAdd)(struct hv_device *RootDevice,
- struct hv_device *ChildDevice);
- void (*OnChildDeviceRemove)(struct hv_device *device);
-
- /* Set by the callee */
- int (*OnIsr)(struct hv_driver *driver);
- void (*OnMsgDpc)(struct hv_driver *driver);
- void (*OnEventDpc)(struct hv_driver *driver);
- void (*GetChannelOffers)(void);
-};
-
-int VmbusInitialize(struct hv_driver *drv);
-
#endif /* _VMBUS_API_H_ */
diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
index 0d9f3a411e72..84fdb64d3ceb 100644
--- a/drivers/staging/hv/vmbus_drv.c
+++ b/drivers/staging/hv/vmbus_drv.c
@@ -33,6 +33,7 @@
#include "logging.h"
#include "vmbus.h"
#include "channel.h"
+#include "vmbus_private.h"
/* FIXME! We need to do this dynamically for PIC and APIC system */
@@ -46,7 +47,7 @@ struct vmbus_driver_context {
/* The driver field is not used in here. Instead, the bus field is */
/* used to represent the driver */
struct driver_context drv_ctx;
- struct vmbus_driver drv_obj;
+ struct hv_driver drv_obj;
struct bus_type bus;
struct tasklet_struct msg_dpc;
@@ -69,13 +70,6 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id);
static void vmbus_device_release(struct device *device);
static void vmbus_bus_release(struct device *device);
-static struct hv_device *vmbus_child_device_create(struct hv_guid *type,
- struct hv_guid *instance,
- struct vmbus_channel *channel);
-static void vmbus_child_device_destroy(struct hv_device *device_obj);
-static int vmbus_child_device_register(struct hv_device *root_device_obj,
- struct hv_device *child_device_obj);
-static void vmbus_child_device_unregister(struct hv_device *child_device_obj);
static ssize_t vmbus_show_device_attr(struct device *dev,
struct device_attribute *dev_attr,
char *buf);
@@ -129,6 +123,182 @@ static struct vmbus_driver_context g_vmbus_drv = {
.bus.dev_attrs = vmbus_device_attrs,
};
+static const char *gDriverName = "hyperv";
+
+/*
+ * Windows vmbus does not defined this.
+ * We defined this to be consistent with other devices
+ */
+/* {c5295816-f63a-4d5f-8d1a-4daf999ca185} */
+static const struct hv_guid gVmbusDeviceType = {
+ .data = {
+ 0x16, 0x58, 0x29, 0xc5, 0x3a, 0xf6, 0x5f, 0x4d,
+ 0x8d, 0x1a, 0x4d, 0xaf, 0x99, 0x9c, 0xa1, 0x85
+ }
+};
+
+/* {ac3760fc-9adf-40aa-9427-a70ed6de95c5} */
+static const struct hv_guid gVmbusDeviceId = {
+ .data = {
+ 0xfc, 0x60, 0x37, 0xac, 0xdf, 0x9a, 0xaa, 0x40,
+ 0x94, 0x27, 0xa7, 0x0e, 0xd6, 0xde, 0x95, 0xc5
+ }
+};
+
+static struct hv_device *gDevice; /* vmbus root device */
+
+/*
+ * VmbusChildDeviceAdd - Registers the child device with the vmbus
+ */
+int VmbusChildDeviceAdd(struct hv_device *ChildDevice)
+{
+ return vmbus_child_device_register(gDevice, ChildDevice);
+}
+
+/*
+ * VmbusOnDeviceAdd - Callback when the root bus device is added
+ */
+static int VmbusOnDeviceAdd(struct hv_device *dev, void *AdditionalInfo)
+{
+ u32 *irqvector = AdditionalInfo;
+ int ret;
+
+ gDevice = dev;
+
+ memcpy(&gDevice->deviceType, &gVmbusDeviceType, sizeof(struct hv_guid));
+ memcpy(&gDevice->deviceInstance, &gVmbusDeviceId,
+ sizeof(struct hv_guid));
+
+ /* strcpy(dev->name, "vmbus"); */
+ /* SynIC setup... */
+ on_each_cpu(hv_synic_init, (void *)irqvector, 1);
+
+ /* Connect to VMBus in the root partition */
+ ret = VmbusConnect();
+
+ /* VmbusSendEvent(device->localPortId+1); */
+ return ret;
+}
+
+/*
+ * VmbusOnDeviceRemove - Callback when the root bus device is removed
+ */
+static int VmbusOnDeviceRemove(struct hv_device *dev)
+{
+ int ret = 0;
+
+ vmbus_release_unattached_channels();
+ VmbusDisconnect();
+ on_each_cpu(hv_synic_cleanup, NULL, 1);
+ return ret;
+}
+
+/*
+ * VmbusOnCleanup - Perform any cleanup when the driver is removed
+ */
+static void VmbusOnCleanup(struct hv_driver *drv)
+{
+ /* struct vmbus_driver *driver = (struct vmbus_driver *)drv; */
+
+ hv_cleanup();
+}
+
+struct onmessage_work_context {
+ struct work_struct work;
+ struct hv_message msg;
+};
+
+static void vmbus_onmessage_work(struct work_struct *work)
+{
+ struct onmessage_work_context *ctx;
+
+ ctx = container_of(work, struct onmessage_work_context,
+ work);
+ vmbus_onmessage(&ctx->msg);
+ kfree(ctx);
+}
+
+/*
+ * vmbus_on_msg_dpc - DPC routine to handle messages from the hypervisior
+ */
+static void vmbus_on_msg_dpc(struct hv_driver *drv)
+{
+ int cpu = smp_processor_id();
+ void *page_addr = hv_context.synic_message_page[cpu];
+ struct hv_message *msg = (struct hv_message *)page_addr +
+ VMBUS_MESSAGE_SINT;
+ struct onmessage_work_context *ctx;
+
+ while (1) {
+ if (msg->header.message_type == HVMSG_NONE) {
+ /* no msg */
+ break;
+ } else {
+ ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (ctx == NULL)
+ continue;
+ INIT_WORK(&ctx->work, vmbus_onmessage_work);
+ memcpy(&ctx->msg, msg, sizeof(*msg));
+ queue_work(gVmbusConnection.WorkQueue, &ctx->work);
+ }
+
+ msg->header.message_type = HVMSG_NONE;
+
+ /*
+ * Make sure the write to MessageType (ie set to
+ * HVMSG_NONE) happens before we read the
+ * MessagePending and EOMing. Otherwise, the EOMing
+ * will not deliver any more messages since there is
+ * no empty slot
+ */
+ mb();
+
+ if (msg->header.message_flags.msg_pending) {
+ /*
+ * This will cause message queue rescan to
+ * possibly deliver another msg from the
+ * hypervisor
+ */
+ wrmsrl(HV_X64_MSR_EOM, 0);
+ }
+ }
+}
+
+/*
+ * vmbus_on_isr - ISR routine
+ */
+static int vmbus_on_isr(struct hv_driver *drv)
+{
+ int ret = 0;
+ int cpu = smp_processor_id();
+ void *page_addr;
+ struct hv_message *msg;
+ union hv_synic_event_flags *event;
+
+ page_addr = hv_context.synic_message_page[cpu];
+ msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
+
+ /* Check if there are actual msgs to be process */
+ if (msg->header.message_type != HVMSG_NONE) {
+ DPRINT_DBG(VMBUS, "received msg type %d size %d",
+ msg->header.message_type,
+ msg->header.payload_size);
+ ret |= 0x1;
+ }
+
+ /* TODO: Check if there are events to be process */
+ page_addr = hv_context.synic_event_page[cpu];
+ event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
+
+ /* Since we are a child, we only need to check bit 0 */
+ if (test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) {
+ DPRINT_DBG(VMBUS, "received event %d", event->flags32[0]);
+ ret |= 0x2;
+ }
+
+ return ret;
+}
+
static void get_channel_info(struct hv_device *device,
struct hv_device_info *info)
{
@@ -139,35 +309,38 @@ static void get_channel_info(struct hv_device *device,
vmbus_get_debug_info(device->channel, &debug_info);
- info->ChannelId = debug_info.RelId;
- info->ChannelState = debug_info.State;
- memcpy(&info->ChannelType, &debug_info.InterfaceType,
+ info->ChannelId = debug_info.relid;
+ info->ChannelState = debug_info.state;
+ memcpy(&info->ChannelType, &debug_info.interfacetype,
sizeof(struct hv_guid));
- memcpy(&info->ChannelInstance, &debug_info.InterfaceInstance,
+ memcpy(&info->ChannelInstance, &debug_info.interface_instance,
sizeof(struct hv_guid));
- info->MonitorId = debug_info.MonitorId;
-
- info->ServerMonitorPending = debug_info.ServerMonitorPending;
- info->ServerMonitorLatency = debug_info.ServerMonitorLatency;
- info->ServerMonitorConnectionId = debug_info.ServerMonitorConnectionId;
-
- info->ClientMonitorPending = debug_info.ClientMonitorPending;
- info->ClientMonitorLatency = debug_info.ClientMonitorLatency;
- info->ClientMonitorConnectionId = debug_info.ClientMonitorConnectionId;
-
- info->Inbound.InterruptMask = debug_info.Inbound.CurrentInterruptMask;
- info->Inbound.ReadIndex = debug_info.Inbound.CurrentReadIndex;
- info->Inbound.WriteIndex = debug_info.Inbound.CurrentWriteIndex;
- info->Inbound.BytesAvailToRead = debug_info.Inbound.BytesAvailToRead;
- info->Inbound.BytesAvailToWrite = debug_info.Inbound.BytesAvailToWrite;
-
- info->Outbound.InterruptMask = debug_info.Outbound.CurrentInterruptMask;
- info->Outbound.ReadIndex = debug_info.Outbound.CurrentReadIndex;
- info->Outbound.WriteIndex = debug_info.Outbound.CurrentWriteIndex;
- info->Outbound.BytesAvailToRead = debug_info.Outbound.BytesAvailToRead;
+ info->MonitorId = debug_info.monitorid;
+
+ info->ServerMonitorPending = debug_info.servermonitor_pending;
+ info->ServerMonitorLatency = debug_info.servermonitor_latency;
+ info->ServerMonitorConnectionId = debug_info.servermonitor_connectionid;
+
+ info->ClientMonitorPending = debug_info.clientmonitor_pending;
+ info->ClientMonitorLatency = debug_info.clientmonitor_latency;
+ info->ClientMonitorConnectionId = debug_info.clientmonitor_connectionid;
+
+ info->Inbound.InterruptMask = debug_info.inbound.current_interrupt_mask;
+ info->Inbound.ReadIndex = debug_info.inbound.current_read_index;
+ info->Inbound.WriteIndex = debug_info.inbound.current_write_index;
+ info->Inbound.BytesAvailToRead = debug_info.inbound.bytes_avail_toread;
+ info->Inbound.BytesAvailToWrite =
+ debug_info.inbound.bytes_avail_towrite;
+
+ info->Outbound.InterruptMask =
+ debug_info.outbound.current_interrupt_mask;
+ info->Outbound.ReadIndex = debug_info.outbound.current_read_index;
+ info->Outbound.WriteIndex = debug_info.outbound.current_write_index;
+ info->Outbound.BytesAvailToRead =
+ debug_info.outbound.bytes_avail_toread;
info->Outbound.BytesAvailToWrite =
- debug_info.Outbound.BytesAvailToWrite;
+ debug_info.outbound.bytes_avail_towrite;
}
/*
@@ -286,44 +459,55 @@ static ssize_t vmbus_show_device_attr(struct device *dev,
* - setup the vmbus root device
* - retrieve the channel offers
*/
-static int vmbus_bus_init(int (*drv_init)(struct hv_driver *drv))
+static int vmbus_bus_init(void)
{
struct vmbus_driver_context *vmbus_drv_ctx = &g_vmbus_drv;
- struct vmbus_driver *vmbus_drv_obj = &g_vmbus_drv.drv_obj;
+ struct hv_driver *driver = &g_vmbus_drv.drv_obj;
struct vm_device *dev_ctx = &g_vmbus_drv.device_ctx;
int ret;
unsigned int vector;
- /*
- * Set this up to allow lower layer to callback to add/remove child
- * devices on the bus
- */
- vmbus_drv_obj->OnChildDeviceCreate = vmbus_child_device_create;
- vmbus_drv_obj->OnChildDeviceDestroy = vmbus_child_device_destroy;
- vmbus_drv_obj->OnChildDeviceAdd = vmbus_child_device_register;
- vmbus_drv_obj->OnChildDeviceRemove = vmbus_child_device_unregister;
-
- /* Call to bus driver to initialize */
- ret = drv_init(&vmbus_drv_obj->Base);
+ DPRINT_INFO(VMBUS, "+++++++ HV Driver version = %s +++++++",
+ HV_DRV_VERSION);
+ DPRINT_INFO(VMBUS, "+++++++ Vmbus supported version = %d +++++++",
+ VMBUS_REVISION_NUMBER);
+ DPRINT_INFO(VMBUS, "+++++++ Vmbus using SINT %d +++++++",
+ VMBUS_MESSAGE_SINT);
+ DPRINT_DBG(VMBUS, "sizeof(vmbus_channel_packet_page_buffer)=%zd, "
+ "sizeof(VMBUS_CHANNEL_PACKET_MULITPAGE_BUFFER)=%zd",
+ sizeof(struct vmbus_channel_packet_page_buffer),
+ sizeof(struct vmbus_channel_packet_multipage_buffer));
+
+ driver->name = gDriverName;
+ memcpy(&driver->deviceType, &gVmbusDeviceType, sizeof(struct hv_guid));
+
+ /* Setup dispatch table */
+ driver->OnDeviceAdd = VmbusOnDeviceAdd;
+ driver->OnDeviceRemove = VmbusOnDeviceRemove;
+ driver->OnCleanup = VmbusOnCleanup;
+
+ /* Hypervisor initialization...setup hypercall page..etc */
+ ret = hv_init();
if (ret != 0) {
- DPRINT_ERR(VMBUS_DRV, "Unable to initialize vmbus (%d)", ret);
+ DPRINT_ERR(VMBUS, "Unable to initialize the hypervisor - 0x%x",
+ ret);
goto cleanup;
}
/* Sanity checks */
- if (!vmbus_drv_obj->Base.OnDeviceAdd) {
+ if (!driver->OnDeviceAdd) {
DPRINT_ERR(VMBUS_DRV, "OnDeviceAdd() routine not set");
ret = -1;
goto cleanup;
}
- vmbus_drv_ctx->bus.name = vmbus_drv_obj->Base.name;
+ vmbus_drv_ctx->bus.name = driver->name;
/* Initialize the bus context */
tasklet_init(&vmbus_drv_ctx->msg_dpc, vmbus_msg_dpc,
- (unsigned long)vmbus_drv_obj);
+ (unsigned long)driver);
tasklet_init(&vmbus_drv_ctx->event_dpc, vmbus_event_dpc,
- (unsigned long)vmbus_drv_obj);
+ (unsigned long)driver);
/* Now, register the bus driver with LDM */
ret = bus_register(&vmbus_drv_ctx->bus);
@@ -334,7 +518,7 @@ static int vmbus_bus_init(int (*drv_init)(struct hv_driver *drv))
/* Get the interrupt resource */
ret = request_irq(vmbus_irq, vmbus_isr, IRQF_SAMPLE_RANDOM,
- vmbus_drv_obj->Base.name, NULL);
+ driver->name, NULL);
if (ret != 0) {
DPRINT_ERR(VMBUS_DRV, "ERROR - Unable to request IRQ %d",
@@ -352,7 +536,7 @@ static int vmbus_bus_init(int (*drv_init)(struct hv_driver *drv))
/* Call to bus driver to add the root device */
memset(dev_ctx, 0, sizeof(struct vm_device));
- ret = vmbus_drv_obj->Base.OnDeviceAdd(&dev_ctx->device_obj, &vector);
+ ret = driver->OnDeviceAdd(&dev_ctx->device_obj, &vector);
if (ret != 0) {
DPRINT_ERR(VMBUS_DRV,
"ERROR - Unable to add vmbus root device");
@@ -392,9 +576,7 @@ static int vmbus_bus_init(int (*drv_init)(struct hv_driver *drv))
goto cleanup;
}
-
- vmbus_drv_obj->GetChannelOffers();
-
+ vmbus_request_offers();
wait_for_completion(&hv_channel_ready);
cleanup:
@@ -408,17 +590,17 @@ cleanup:
*/
static void vmbus_bus_exit(void)
{
- struct vmbus_driver *vmbus_drv_obj = &g_vmbus_drv.drv_obj;
+ struct hv_driver *driver = &g_vmbus_drv.drv_obj;
struct vmbus_driver_context *vmbus_drv_ctx = &g_vmbus_drv;
struct vm_device *dev_ctx = &g_vmbus_drv.device_ctx;
/* Remove the root device */
- if (vmbus_drv_obj->Base.OnDeviceRemove)
- vmbus_drv_obj->Base.OnDeviceRemove(&dev_ctx->device_obj);
+ if (driver->OnDeviceRemove)
+ driver->OnDeviceRemove(&dev_ctx->device_obj);
- if (vmbus_drv_obj->Base.OnCleanup)
- vmbus_drv_obj->Base.OnCleanup(&vmbus_drv_obj->Base);
+ if (driver->OnCleanup)
+ driver->OnCleanup(driver);
/* Unregister the root bus device */
device_unregister(&dev_ctx->device);
@@ -446,7 +628,6 @@ static void vmbus_bus_exit(void)
*/
int vmbus_child_driver_register(struct driver_context *driver_ctx)
{
- struct vmbus_driver *vmbus_drv_obj = &g_vmbus_drv.drv_obj;
int ret;
DPRINT_INFO(VMBUS_DRV, "child driver (%p) registering - name %s",
@@ -457,7 +638,7 @@ int vmbus_child_driver_register(struct driver_context *driver_ctx)
ret = driver_register(&driver_ctx->driver);
- vmbus_drv_obj->GetChannelOffers();
+ vmbus_request_offers();
return ret;
}
@@ -489,9 +670,9 @@ EXPORT_SYMBOL(vmbus_child_driver_unregister);
* vmbus_child_device_create - Creates and registers a new child device
* on the vmbus.
*/
-static struct hv_device *vmbus_child_device_create(struct hv_guid *type,
- struct hv_guid *instance,
- struct vmbus_channel *channel)
+struct hv_device *vmbus_child_device_create(struct hv_guid *type,
+ struct hv_guid *instance,
+ struct vmbus_channel *channel)
{
struct vm_device *child_device_ctx;
struct hv_device *child_device_obj;
@@ -538,8 +719,8 @@ static struct hv_device *vmbus_child_device_create(struct hv_guid *type,
/*
* vmbus_child_device_register - Register the child device on the specified bus
*/
-static int vmbus_child_device_register(struct hv_device *root_device_obj,
- struct hv_device *child_device_obj)
+int vmbus_child_device_register(struct hv_device *root_device_obj,
+ struct hv_device *child_device_obj)
{
int ret = 0;
struct vm_device *root_device_ctx =
@@ -583,7 +764,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
* vmbus_child_device_unregister - Remove the specified child device
* from the vmbus.
*/
-static void vmbus_child_device_unregister(struct hv_device *device_obj)
+void vmbus_child_device_unregister(struct hv_device *device_obj)
{
struct vm_device *device_ctx = to_vm_device(device_obj);
@@ -601,13 +782,6 @@ static void vmbus_child_device_unregister(struct hv_device *device_obj)
}
/*
- * vmbus_child_device_destroy - Destroy the specified child device on the vmbus.
- */
-static void vmbus_child_device_destroy(struct hv_device *device_obj)
-{
-}
-
-/*
* vmbus_uevent - add uevent for our device
*
* This routine is invoked when a device is added or removed on the vmbus to
@@ -701,7 +875,7 @@ static int vmbus_match(struct device *device, struct device_driver *driver)
struct vmbus_driver_context *vmbus_drv_ctx =
(struct vmbus_driver_context *)driver_ctx;
- device_ctx->device_obj.Driver = &vmbus_drv_ctx->drv_obj.Base;
+ device_ctx->device_obj.Driver = &vmbus_drv_ctx->drv_obj;
DPRINT_INFO(VMBUS_DRV,
"device object (%p) set to driver object (%p)",
&device_ctx->device_obj,
@@ -849,7 +1023,6 @@ static void vmbus_device_release(struct device *device)
{
struct vm_device *device_ctx = device_to_vm_device(device);
- /* vmbus_child_device_destroy(&device_ctx->device_obj); */
kfree(device_ctx);
/* !!DO NOT REFERENCE device_ctx anymore at this point!! */
@@ -860,36 +1033,28 @@ static void vmbus_device_release(struct device *device)
*/
static void vmbus_msg_dpc(unsigned long data)
{
- struct vmbus_driver *vmbus_drv_obj = (struct vmbus_driver *)data;
-
- /* ASSERT(vmbus_drv_obj->OnMsgDpc != NULL); */
+ struct hv_driver *driver = (struct hv_driver *)data;
/* Call to bus driver to handle interrupt */
- vmbus_drv_obj->OnMsgDpc(&vmbus_drv_obj->Base);
+ vmbus_on_msg_dpc(driver);
}
/*
- * vmbus_msg_dpc - Tasklet routine to handle hypervisor events
+ * vmbus_event_dpc - Tasklet routine to handle hypervisor events
*/
static void vmbus_event_dpc(unsigned long data)
{
- struct vmbus_driver *vmbus_drv_obj = (struct vmbus_driver *)data;
-
- /* ASSERT(vmbus_drv_obj->OnEventDpc != NULL); */
-
/* Call to bus driver to handle interrupt */
- vmbus_drv_obj->OnEventDpc(&vmbus_drv_obj->Base);
+ VmbusOnEvents();
}
static irqreturn_t vmbus_isr(int irq, void *dev_id)
{
- struct vmbus_driver *vmbus_driver_obj = &g_vmbus_drv.drv_obj;
+ struct hv_driver *driver = &g_vmbus_drv.drv_obj;
int ret;
- /* ASSERT(vmbus_driver_obj->OnIsr != NULL); */
-
/* Call to bus driver to handle interrupt */
- ret = vmbus_driver_obj->OnIsr(&vmbus_driver_obj->Base);
+ ret = vmbus_on_isr(driver);
/* Schedules a dpc if necessary */
if (ret > 0) {
@@ -928,7 +1093,7 @@ static int __init vmbus_init(void)
if (!dmi_check_system(microsoft_hv_dmi_table))
return -ENODEV;
- return vmbus_bus_init(VmbusInitialize);
+ return vmbus_bus_init();
}
static void __exit vmbus_exit(void)
diff --git a/drivers/staging/hv/vmbus_private.h b/drivers/staging/hv/vmbus_private.h
index 09eaec964b30..07f6d22eeabb 100644
--- a/drivers/staging/hv/vmbus_private.h
+++ b/drivers/staging/hv/vmbus_private.h
@@ -102,13 +102,14 @@ extern struct VMBUS_CONNECTION gVmbusConnection;
/* General vmbus interface */
-struct hv_device *VmbusChildDeviceCreate(struct hv_guid *deviceType,
+struct hv_device *vmbus_child_device_create(struct hv_guid *deviceType,
struct hv_guid *deviceInstance,
struct vmbus_channel *channel);
int VmbusChildDeviceAdd(struct hv_device *Device);
-
-void VmbusChildDeviceRemove(struct hv_device *Device);
+int vmbus_child_device_register(struct hv_device *root_device_obj,
+ struct hv_device *child_device_obj);
+void vmbus_child_device_unregister(struct hv_device *device_obj);
/* static void */
/* VmbusChildDeviceDestroy( */
diff --git a/drivers/staging/hv/vstorage.h b/drivers/staging/hv/vstorage.h
index 4ea597d7a7d7..ae8be84394d5 100644
--- a/drivers/staging/hv/vstorage.h
+++ b/drivers/staging/hv/vstorage.h
@@ -27,15 +27,17 @@
#define REVISION_STRING(REVISION_) #REVISION_
#define FILL_VMSTOR_REVISION(RESULT_LVALUE_) \
-{ \
- char *revisionString = REVISION_STRING($Revision : 6 $) + 11; \
- RESULT_LVALUE_ = 0; \
- while (*revisionString >= '0' && *revisionString <= '9') { \
- RESULT_LVALUE_ *= 10; \
- RESULT_LVALUE_ += *revisionString - '0'; \
- revisionString++; \
- } \
-}
+ do { \
+ char *revision_string \
+ = REVISION_STRING($Rev : 6 $) + 6; \
+ RESULT_LVALUE_ = 0; \
+ while (*revision_string >= '0' \
+ && *revision_string <= '9') { \
+ RESULT_LVALUE_ *= 10; \
+ RESULT_LVALUE_ += *revision_string - '0'; \
+ revision_string++; \
+ } \
+ } while (0)
/* Major/minor macros. Minor version is in LSB, meaning that earlier flat */
/* version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1). */
@@ -65,17 +67,17 @@
/* Packet structure describing virtual storage requests. */
enum vstor_packet_operation {
- VStorOperationCompleteIo = 1,
- VStorOperationRemoveDevice = 2,
- VStorOperationExecuteSRB = 3,
- VStorOperationResetLun = 4,
- VStorOperationResetAdapter = 5,
- VStorOperationResetBus = 6,
- VStorOperationBeginInitialization = 7,
- VStorOperationEndInitialization = 8,
- VStorOperationQueryProtocolVersion = 9,
- VStorOperationQueryProperties = 10,
- VStorOperationMaximum = 10
+ VSTOR_OPERATION_COMPLETE_IO = 1,
+ VSTOR_OPERATION_REMOVE_DEVICE = 2,
+ VSTOR_OPERATION_EXECUTE_SRB = 3,
+ VSTOR_OPERATION_RESET_LUN = 4,
+ VSTOR_OPERATION_RESET_ADAPTER = 5,
+ VSTOR_OPERATION_RESET_BUS = 6,
+ VSTOR_OPERATION_BEGIN_INITIALIZATION = 7,
+ VSTOR_OPERATION_END_INITIALIZATION = 8,
+ VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9,
+ VSTOR_OPERATION_QUERY_PROPERTIES = 10,
+ VSTOR_OPERATION_MAXIMUM = 10
};
/*
@@ -89,31 +91,29 @@ enum vstor_packet_operation {
#define SENSE_BUFFER_SIZE 0x12
#endif
-#define MAX_DATA_BUFFER_LENGTH_WITH_PADDING 0x14
+#define MAX_DATA_BUF_LEN_WITH_PADDING 0x14
struct vmscsi_request {
- unsigned short Length;
- unsigned char SrbStatus;
- unsigned char ScsiStatus;
+ unsigned short length;
+ unsigned char srb_status;
+ unsigned char scsi_status;
- unsigned char PortNumber;
- unsigned char PathId;
- unsigned char TargetId;
- unsigned char Lun;
+ unsigned char port_number;
+ unsigned char path_id;
+ unsigned char target_id;
+ unsigned char lun;
- unsigned char CdbLength;
- unsigned char SenseInfoLength;
- unsigned char DataIn;
- unsigned char Reserved;
+ unsigned char cdb_length;
+ unsigned char sense_info_length;
+ unsigned char data_in;
+ unsigned char reserved;
- unsigned int DataTransferLength;
+ unsigned int data_transfer_length;
union {
- unsigned char Cdb[CDB16GENERIC_LENGTH];
-
- unsigned char SenseData[SENSE_BUFFER_SIZE];
-
- unsigned char ReservedArray[MAX_DATA_BUFFER_LENGTH_WITH_PADDING];
+ unsigned char cdb[CDB16GENERIC_LENGTH];
+ unsigned char sense_data[SENSE_BUFFER_SIZE];
+ unsigned char reserved_array[MAX_DATA_BUF_LEN_WITH_PADDING];
};
} __attribute((packed));
@@ -123,24 +123,24 @@ struct vmscsi_request {
* properties of the channel.
*/
struct vmstorage_channel_properties {
- unsigned short ProtocolVersion;
- unsigned char PathId;
- unsigned char TargetId;
+ unsigned short protocol_version;
+ unsigned char path_id;
+ unsigned char target_id;
/* Note: port number is only really known on the client side */
- unsigned int PortNumber;
- unsigned int Flags;
- unsigned int MaxTransferBytes;
+ unsigned int port_number;
+ unsigned int flags;
+ unsigned int max_transfer_bytes;
/* This id is unique for each channel and will correspond with */
/* vendor specific data in the inquirydata */
- unsigned long long UniqueId;
+ unsigned long long unique_id;
} __attribute__((packed));
/* This structure is sent during the storage protocol negotiations. */
struct vmstorage_protocol_version {
/* Major (MSW) and minor (LSW) version numbers. */
- unsigned short MajorMinor;
+ unsigned short major_minor;
/*
* Revision number is auto-incremented whenever this file is changed
@@ -148,7 +148,7 @@ struct vmstorage_protocol_version {
* definitely indicate incompatibility--but it does indicate mismatched
* builds.
*/
- unsigned short Revision;
+ unsigned short revision;
} __attribute__((packed));
/* Channel Property Flags */
@@ -157,13 +157,13 @@ struct vmstorage_protocol_version {
struct vstor_packet {
/* Requested operation type */
- enum vstor_packet_operation Operation;
+ enum vstor_packet_operation operation;
/* Flags - see below for values */
- unsigned int Flags;
+ unsigned int flags;
/* Status of the request returned from the server side. */
- unsigned int Status;
+ unsigned int status;
/* Data payload area */
union {
@@ -171,13 +171,13 @@ struct vstor_packet {
* Structure used to forward SCSI commands from the
* client to the server.
*/
- struct vmscsi_request VmSrb;
+ struct vmscsi_request vm_srb;
/* Structure used to query channel properties. */
- struct vmstorage_channel_properties StorageChannelProperties;
+ struct vmstorage_channel_properties storage_channel_properties;
/* Used during version negotiations. */
- struct vmstorage_protocol_version Version;
+ struct vmstorage_protocol_version version;
};
} __attribute__((packed));
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio b/drivers/staging/iio/Documentation/sysfs-bus-iio
index fdb017a1c1a2..2dde97de75f8 100644
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio
+++ b/drivers/staging/iio/Documentation/sysfs-bus-iio
@@ -1,11 +1,12 @@
-What: /sys/bus/iio/devices/device[n]
+What: /sys/bus/iio/devices/deviceX
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
Hardware chip or device accessed by on communication port.
- Corresponds to a grouping of sensor channels.
+ Corresponds to a grouping of sensor channels. X is the IIO
+ index of the device.
-What: /sys/bus/iio/devices/trigger[n]
+What: /sys/bus/iio/devices/triggerX
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -13,25 +14,26 @@ Description:
May be provided by a device driver that also has an IIO device
based on hardware generated events (e.g. data ready) or
provided by a separate driver for other hardware (e.g.
- periodic timer, gpio or high resolution timer).
+ periodic timer, GPIO or high resolution timer).
Contains trigger type specific elements. These do not
generalize well and hence are not documented in this file.
+ X is the IIO index of the trigger.
-What: /sys/bus/iio/devices/device[n]:buffer
+What: /sys/bus/iio/devices/deviceX:buffer
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
- Link to /sys/class/iio/device[n]/device[n]:buffer. n indicates
+ Link to /sys/class/iio/deviceX/deviceX:buffer. X indicates
the device with which this buffer buffer is associated.
-What: /sys/.../device[n]/name
+What: /sys/bus/iio/devices/deviceX/name
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
- Description of the physical chip / device. Typically a part
- number.
+ Description of the physical chip / device for device X.
+ Typically a part number.
-What: /sys/.../device[n]/sampling_frequency
+What: /sys/bus/iio/devices/deviceX/sampling_frequency
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -44,158 +46,233 @@ Description:
relevant directories. If it effects all of the above
then it is to be found in the base device directory as here.
-What: /sys/.../device[n]/sampling_frequency_available
+What: /sys/bus/iio/devices/deviceX/sampling_frequency_available
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
When the internal sampling clock can only take a small
- discrete set of values, this file lists those availale.
+ discrete set of values, this file lists those available.
-What: /sys/.../device[n]/in[m][_name]_raw
+What: /sys/bus/iio/devices/deviceX/inY_raw
+What: /sys/bus/iio/devices/deviceX/inY_supply_raw
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
Raw (unscaled no bias removal etc) voltage measurement from
- channel m. name is used in special cases where this does
- not correspond to externally available input (e.g. supply
- voltage monitoring in which case the file is in_supply_raw).
- If the device supports events on this channel then m must be
- specified (even on named channels) so as to allow the source
- of event codes to be identified.
-
-What: /sys/.../device[n]/in[m][_name]_offset
+ channel Y. In special cases where the channel does not
+ correspond to externally available input one of the named
+ versions may be used. The number must always be specified and
+ unique to allow association with event codes.
+
+What: /sys/bus/iio/devices/deviceX/inY-inZ_raw
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
- If known for a device, offset to be added to in[m]_raw prior
- to scaling by in[_name][m]_scale in order to obtain voltage in
- millivolts. Not present if the offset is always 0 or unknown.
- If m is not present, then voltage offset applies to all in
- channels. May be writable if a variable offset is controlled
- by the device. Note that this is different to calibbias which
- is for devices that apply offsets to compensate for variation
- between different instances of the part, typically adjusted by
- using some hardware supported calibration procedure.
+ Raw (unscaled) differential voltage measurement equivalent to
+ channel Y - channel Z where these channel numbers apply to the
+ physically equivalent inputs when non differential readings are
+ separately available. In differential only parts, then all that
+ is required is a consistent labeling.
-What: /sys/.../device[n]/in[m][_name]_offset_available
+What: /sys/bus/iio/devices/deviceX/temp_raw
+What: /sys/bus/iio/devices/deviceX/temp_x_raw
+What: /sys/bus/iio/devices/deviceX/temp_y_raw
+What: /sys/bus/iio/devices/deviceX/temp_z_raw
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
- If a small number of discrete offset values are available, this
- will be a space separated list. If these are independant (but
- options the same) for individual offsets then m should not be
- present.
+ Raw (unscaled no bias removal etc) temperature measurement.
+ It an axis is specified it generally means that the temperature
+ sensor is associated with one part of a compound device (e.g.
+ a gyroscope axis).
-What: /sys/.../device[n]/in[m][_name]_offset_[min|max]
+What: /sys/bus/iio/devices/deviceX/accel_x_raw
+What: /sys/bus/iio/devices/deviceX/accel_y_raw
+What: /sys/bus/iio/devices/deviceX/accel_z_raw
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
- If a more or less continuous range of voltage offsets are
- supported then these specify the minimum and maximum. If shared
- by all in channels then m is not present.
+ Acceleration in direction x, y or z (may be arbitrarily assigned
+ but should match other such assignments on device)
+ channel m (not present if only one accelerometer channel at
+ this orientation). Has all of the equivalent parameters as per
+ inY. Units after application of scale and offset are m/s^2.
-What: /sys/.../device[n]/in[m][_name]_calibbias
+What: /sys/bus/iio/devices/deviceX/gyro_x_raw
+What: /sys/bus/iio/devices/deviceX/gyro_y_raw
+What: /sys/bus/iio/devices/deviceX/gyro_z_raw
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
- Hardware applied calibration offset. (assumed to fix production
- inaccuracies)
+ Angular velocity about axis x, y or z (may be arbitrarily
+ assigned) Data converted by application of offset then scale to
+ radians per second. Has all the equivalent parameters as
+ per inY.
-What /sys/.../device[n]/in[m][_name]_calibscale
+What: /sys/bus/iio/devices/deviceX/incli_x_raw
+What: /sys/bus/iio/devices/deviceX/incli_y_raw
+What: /sys/bus/iio/devices/deviceX/incli_z_raw
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
- Hardware applied calibration scale factor. (assumed to fix
- production inaccuracies)
+ Inclination raw reading about axis x, y or z (may be
+ arbitrarily assigned). Data converted by application of offset
+ and scale to Degrees.
-What: /sys/.../device[n]/in[m][_name]_scale
+What: /sys/bus/iio/devices/deviceX/magn_x_raw
+What: /sys/bus/iio/devices/deviceX/magn_y_raw
+What: /sys/bus/iio/devices/deviceX/magn_z_raw
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
- If known for a device, scale to be applied to volt[m]_raw post
- addition of in[_name][m]_offset in order to obtain the measured
- voltage in millivolts. If shared across all in channels then
- m is not present.
+ Magnetic field along axis x, y or z (may be arbitrarily
+ assigned) channel m (not present if only one magnetometer
+ at this orientation). Data converted by application of
+ offset then scale to Gauss. Has all the equivalent modifiers
+ as per inY.
-What: /sys/.../device[n]/in[m]-in[o]_raw
-KernelVersion: 2.6.35
+What: /sys/bus/iio/devices/deviceX/accel_x_peak_raw
+What: /sys/bus/iio/devices/deviceX/accel_y_peak_raw
+What: /sys/bus/iio/devices/deviceX/accel_z_peak_raw
+KernelVersion: 2.6.36
Contact: linux-iio@vger.kernel.org
Description:
- Raw (unscaled) differential voltage measurement equivalent to
- channel m - channel o where these channel numbers apply to the
- physically equivalent inputs when non differential readings are
- separately available. In differential only parts, then all that
- is required is a consistent labelling.
+ Some devices provide a store of the highest value seen since
+ some reset condition. These attributes allow access to this
+ and are otherwise the direct equivalent of the
+ <type>Y[_name]_raw attributes.
-What: /sys/.../device[n]/accel[_x|_y|_z][m]_raw
-KernelVersion: 2.6.35
+What: /sys/bus/iio/devices/deviceX/accel_xyz_squared_peak_raw
+KernelVersion: 2.6.36
Contact: linux-iio@vger.kernel.org
Description:
- Acceleration in direction x, y or z (may be arbitrarily assigned
- but should match other such assignments on device)
- channel m (not present if only one accelerometer channel at
- this orientation). Has all of the equivalent parameters as per
- in[m]. Units after application of scale and offset are m/s^2.
+ A computed peak value based on the sum squared magnitude of
+ the underlying value in the specified directions.
-What: /sys/.../device[n]/gyro[_x|_y|_z][m]_raw
+What: /sys/bus/iio/devices/deviceX/accel_offset
+What: /sys/bus/iio/devices/deviceX/temp_offset
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
- Angular velocity about axis x, y or z (may be arbitrarily
- assigned) channel m (not present if only one gyroscope at
- this orientation).
- Data converted by application of offset then scale to
- radians per second. Has all the equivalent parameters as
- per in[m].
-
-What: /sys/.../device[n]/incli[_x|_y|_z][m]_raw
+ If known for a device, offset to be added to <type>[Y]_raw prior
+ to scaling by <type>[Y]_scale in order to obtain value in the
+ <type> units as specified in <type>[y]_raw documentation.
+ Not present if the offset is always 0 or unknown. If Y is not
+ present, then the offset applies to all in channels of <type>.
+ May be writable if a variable offset can be applied on the
+ device. Note that this is different to calibbias which
+ is for devices (or drivers) that apply offsets to compensate
+ for variation between different instances of the part, typically
+ adjusted by using some hardware supported calibration procedure.
+
+What: /sys/bus/iio/devices/deviceX/inY_scale
+What: /sys/bus/iio/devices/deviceX/inY_supply_scale
+What: /sys/bus/iio/devices/deviceX/in_scale
+What: /sys/bus/iio/devices/deviceX/accel_scale
+What: /sys/bus/iio/devices/deviceX/accel_peak_scale
+What: /sys/bus/iio/devices/deviceX/gyro_scale
+What: /sys/bus/iio/devices/deviceX/magn_scale
+What: /sys/bus/iio/devices/deviceX/magn_x_scale
+What: /sys/bus/iio/devices/deviceX/magn_y_scale
+What: /sys/bus/iio/devices/deviceX/magn_z_scale
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
- Inclination raw reading about axis x, y or z (may be arbitarily
- assigned) channel m (not present if only one inclinometer at
- this orientation). Data converted by application of offset
- and scale to Degrees.
-
-What: /sys/.../device[n]/magn[_x|_y|_z][m]_raw
+ If known for a device, scale to be applied to <type>Y[_name]_raw
+ post addition of <type>[Y][_name]_offset in order to obtain the
+ measured value in <type> units as specified in
+ <type>[Y][_name]_raw documentation.. If shared across all in
+ channels then Y is not present and the value is called
+ <type>[Y][_name]_scale. The peak modifier means this value
+ is applied to <type>Y[_name]_peak_raw values.
+
+What: /sys/bus/iio/devices/deviceX/accel_x_calibbias
+What: /sys/bus/iio/devices/deviceX/accel_y_calibbias
+What: /sys/bus/iio/devices/deviceX/accel_z_calibbias
+What: /sys/bus/iio/devices/deviceX/gyro_x_calibbias
+What: /sys/bus/iio/devices/deviceX/gyro_y_calibbias
+What: /sys/bus/iio/devices/deviceX/gyro_z_calibbias
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
- Magnetic field along axis x, y or z (may be arbitrarily
- assigned) channel m (not present if only one magnetometer
- at this orientation). Data converted by application of
- offset then scale to Gauss. Has all the equivalent modifiers
- as per in[m].
-
-What: /sys/.../device[n]/device[n]:event[m]
+ Hardware applied calibration offset. (assumed to fix production
+ inaccuracies). If shared across all channels, <type>_calibbias
+ is used.
+
+What /sys/bus/iio/devices/deviceX/inY_calibscale
+What /sys/bus/iio/devices/deviceX/inY_supply_calibscale
+What /sys/bus/iio/devices/deviceX/in_calibscale
+What /sys/bus/iio/devices/deviceX/accel_x_calibscale
+What /sys/bus/iio/devices/deviceX/accel_y_calibscale
+What /sys/bus/iio/devices/deviceX/accel_z_calibscale
+What /sys/bus/iio/devices/deviceX/gyro_x_calibscale
+What /sys/bus/iio/devices/deviceX/gyro_y_calibscale
+What /sys/bus/iio/devices/deviceX/gyro_z_calibscale
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
- Configuration of which hardware generated events are passed up to
- userspace. Some of these are a bit complex to generalize so this
- section is a work in progress.
+ Hardware applied calibration scale factor. (assumed to fix
+ production inaccuracies). If shared across all channels,
+ <type>_calibscale is used.
-What: /sys/.../device[n]:event[m]/dev
-KernelVersion: 2.6.35
+What: /sys/bus/iio/devices/deviceX/accel_scale_available
+KernelVersion: 2.635
Contact: linux-iio@vger.kernel.org
Description:
- major:minor character device numbers for the event line.
+ If a discrete set of scale values are available, they
+ are listed in this attribute.
-Taking accel_x0 as an example
+What: /sys/bus/iio/devices/deviceX/deviceX:eventY
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Configuration of which hardware generated events are passed up
+ to user-space.
-What: /sys/.../device[n]:event[m]/accel_x0_thresh[_rising|_falling]_en
+What: /sys/bus/iio/devices/deviceX:event/dev
+What: /sys/bus/iio/devices/deviceX:eventY/dev
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ major:minor character device numbers for the event line Y of
+ device X.
+
+What: /sys/.../deviceX:eventY/accel_x_thresh_rising_en
+What: /sys/.../deviceX:eventY/accel_x_thresh_falling_en
+What: /sys/.../deviceX:eventY/accel_y_thresh_rising_en
+What: /sys/.../deviceX:eventY/accel_y_thresh_falling_en
+What: /sys/.../deviceX:eventY/accel_z_thresh_rising_en
+What: /sys/.../deviceX:eventY/accel_z_thresh_falling_en
+What: /sys/.../deviceX:eventY/gyro_x_thresh_rising_en
+What: /sys/.../deviceX:eventY/gyro_x_thresh_falling_en
+What: /sys/.../deviceX:eventY/gyro_y_thresh_rising_en
+What: /sys/.../deviceX:eventY/gyro_y_thresh_falling_en
+What: /sys/.../deviceX:eventY/gyro_z_thresh_rising_en
+What: /sys/.../deviceX:eventY/gyro_z_thresh_falling_en
+What: /sys/.../deviceX:eventY/magn_x_thresh_rising_en
+What: /sys/.../deviceX:eventY/magn_x_thresh_falling_en
+What: /sys/.../deviceX:eventY/magn_y_thresh_rising_en
+What: /sys/.../deviceX:eventY/magn_y_thresh_falling_en
+What: /sys/.../deviceX:eventY/magn_z_thresh_rising_en
+What: /sys/.../deviceX:eventY/magn_z_thresh_falling_en
+What: /sys/.../deviceX:eventY/inZ_supply_thresh_rising_en
+What: /sys/.../deviceX:eventY/inZ_supply_thresh_falling_en
+What: /sys/.../deviceX:eventY/inZ_thresh_rising_en
+What: /sys/.../deviceX:eventY/inZ_thresh_falling_en
+What: /sys/.../deviceX:eventY/temp_thresh_rising_en
+What: /sys/.../deviceX:eventY/temp_thresh_falling_en
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
- Event generated when accel_x0 passes a threshold in the specfied
+ Event generated when channel passes a threshold in the specified
(_rising|_falling) direction. If the direction is not specified,
then either the device will report an event which ever direction
a single threshold value is called in (e.g.
- accel_x0_<raw|input>_thresh_value) or
- accel_x0_<raw|input>_thresh_rising_value and
- accel_x0_<raw|input>_thresh_falling_value may take different
- values, but the device can only enable both thresholds or
- neither.
+ <type>[Z][_name]_<raw|input>_thresh_value) or
+ <type>[Z][_name]_<raw|input>_thresh_rising_value and
+ <type>[Z][_name]_<raw|input>_thresh_falling_value may take
+ different values, but the device can only enable both thresholds
+ or neither.
Note the driver will assume the last p events requested are
to be enabled where p is however many it supports (which may
vary depending on the exact set requested. So if you want to be
@@ -205,186 +282,338 @@ Description:
a given event type is enabled a future point (and not those for
whatever event was previously enabled).
-What: /sys/.../accel_x0_<raw|input>_thresh[_rising|_falling]_value
+What: /sys/.../deviceX:eventY/accel_x_roc_rising_en
+What: /sys/.../deviceX:eventY/accel_x_roc_falling_en
+What: /sys/.../deviceX:eventY/accel_y_roc_rising_en
+What: /sys/.../deviceX:eventY/accel_y_roc_falling_en
+What: /sys/.../deviceX:eventY/accel_z_roc_rising_en
+What: /sys/.../deviceX:eventY/accel_z_roc_falling_en
+What: /sys/.../deviceX:eventY/gyro_x_roc_rising_en
+What: /sys/.../deviceX:eventY/gyro_x_roc_falling_en
+What: /sys/.../deviceX:eventY/gyro_y_roc_rising_en
+What: /sys/.../deviceX:eventY/gyro_y_roc_falling_en
+What: /sys/.../deviceX:eventY/gyro_z_roc_rising_en
+What: /sys/.../deviceX:eventY/gyro_z_roc_falling_en
+What: /sys/.../deviceX:eventY/magn_x_roc_rising_en
+What: /sys/.../deviceX:eventY/magn_x_roc_falling_en
+What: /sys/.../deviceX:eventY/magn_y_roc_rising_en
+What: /sys/.../deviceX:eventY/magn_y_roc_falling_en
+What: /sys/.../deviceX:eventY/magn_z_roc_rising_en
+What: /sys/.../deviceX:eventY/magn_z_roc_falling_en
+What: /sys/.../deviceX:eventY/inZ_supply_roc_rising_en
+What: /sys/.../deviceX:eventY/inZ_supply_roc_falling_en
+What: /sys/.../deviceX:eventY/inZ_roc_rising_en
+What: /sys/.../deviceX:eventY/inZ_roc_falling_en
+What: /sys/.../deviceX:eventY/temp_roc_rising_en
+What: /sys/.../deviceX:eventY/temp_roc_falling_en
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ Event generated when channel passes a threshold on the rate of
+ change (1st differential) in the specified (_rising|_falling)
+ direction. If the direction is not specified, then either the
+ device will report an event which ever direction a single
+ threshold value is called in (e.g.
+ <type>[Z][_name]_<raw|input>_roc_value) or
+ <type>[Z][_name]_<raw|input>_roc_rising_value and
+ <type>[Z][_name]_<raw|input>_roc_falling_value may take
+ different values, but the device can only enable both rate of
+ change thresholds or neither.
+ Note the driver will assume the last p events requested are
+ to be enabled where p is however many it supports (which may
+ vary depending on the exact set requested. So if you want to be
+ sure you have set what you think you have, check the contents of
+ these attributes after everything is configured. Drivers may
+ have to buffer any parameters so that they are consistent when
+ a given event type is enabled a future point (and not those for
+ whatever event was previously enabled).
+
+What: /sys/.../deviceX:eventY/accel_x_raw_thresh_rising_value
+What: /sys/.../deviceX:eventY/accel_x_raw_thresh_falling_value
+What: /sys/.../deviceX:eventY/accel_y_raw_thresh_rising_value
+What: /sys/.../deviceX:eventY/accel_y_raw_thresh_falling_value
+What: /sys/.../deviceX:eventY/accel_z_raw_thresh_rising_value
+What: /sys/.../deviceX:eventY/accel_z_raw_thresh_falling_value
+What: /sys/.../deviceX:eventY/gyro_x_raw_thresh_rising_value
+What: /sys/.../deviceX:eventY/gyro_x_raw_thresh_falling_value
+What: /sys/.../deviceX:eventY/gyro_y_raw_thresh_rising_value
+What: /sys/.../deviceX:eventY/gyro_y_raw_thresh_falling_value
+What: /sys/.../deviceX:eventY/gyro_z_raw_thresh_rising_value
+What: /sys/.../deviceX:eventY/gyro_z_raw_thresh_falling_value
+What: /sys/.../deviceX:eventY/magn_x_raw_thresh_rising_value
+What: /sys/.../deviceX:eventY/magn_x_raw_thresh_falling_value
+What: /sys/.../deviceX:eventY/magn_y_raw_thresh_rising_value
+What: /sys/.../deviceX:eventY/magn_y_raw_thresh_falling_value
+What: /sys/.../deviceX:eventY/magn_z_raw_thresh_rising_value
+What: /sys/.../deviceX:eventY/magn_z_raw_thresh_falling_value
+What: /sys/.../deviceX:eventY/inZ_supply_raw_thresh_rising_value
+What: /sys/.../deviceX:eventY/inZ_supply_raw_thresh_falling_value
+What: /sys/.../deviceX:eventY/inZ_raw_thresh_falling_value
+What: /sys/.../deviceX:eventY/inZ_raw_thresh_falling_value
+What: /sys/.../deviceX:eventY/temp_raw_thresh_falling_value
+What: /sys/.../deviceX:eventY/temp_raw_thresh_falling_value
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
Specifies the value of threshold that the device is comparing
against for the events enabled by
- accel_x0_<raw|input>_thresh[_rising|falling]_en.
- If seperate exist for the two directions, but direction is
- not specified for this attribute, then a single threshold value
- applies to both directions.
+ <type>Z[_name]_thresh[_rising|falling]_en.
+ If separate attributes exist for the two directions, but
+ direction is not specified for this attribute, then a single
+ threshold value applies to both directions.
The raw or input element of the name indicates whether the
value is in raw device units or in processed units (as _raw
and _input do on sysfs direct channel read attributes).
-What: /sys/.../accel_x0_thresh[_rising|_falling]_meanperiod
+What: /sys/.../deviceX:eventY/accel_x_raw_roc_rising_value
+What: /sys/.../deviceX:eventY/accel_x_raw_roc_falling_value
+What: /sys/.../deviceX:eventY/accel_y_raw_roc_rising_value
+What: /sys/.../deviceX:eventY/accel_y_raw_roc_falling_value
+What: /sys/.../deviceX:eventY/accel_z_raw_roc_rising_value
+What: /sys/.../deviceX:eventY/accel_z_raw_roc_falling_value
+What: /sys/.../deviceX:eventY/gyro_x_raw_roc_rising_value
+What: /sys/.../deviceX:eventY/gyro_x_raw_roc_falling_value
+What: /sys/.../deviceX:eventY/gyro_y_raw_roc_rising_value
+What: /sys/.../deviceX:eventY/gyro_y_raw_roc_falling_value
+What: /sys/.../deviceX:eventY/gyro_z_raw_roc_rising_value
+What: /sys/.../deviceX:eventY/gyro_z_raw_roc_falling_value
+What: /sys/.../deviceX:eventY/magn_x_raw_roc_rising_value
+What: /sys/.../deviceX:eventY/magn_x_raw_roc_falling_value
+What: /sys/.../deviceX:eventY/magn_y_raw_roc_rising_value
+What: /sys/.../deviceX:eventY/magn_y_raw_roc_falling_value
+What: /sys/.../deviceX:eventY/magn_z_raw_roc_rising_value
+What: /sys/.../deviceX:eventY/magn_z_raw_roc_falling_value
+What: /sys/.../deviceX:eventY/inZ_supply_raw_roc_rising_value
+What: /sys/.../deviceX:eventY/inZ_supply_raw_roc_falling_value
+What: /sys/.../deviceX:eventY/inZ_raw_roc_falling_value
+What: /sys/.../deviceX:eventY/inZ_raw_roc_falling_value
+What: /sys/.../deviceX:eventY/temp_raw_roc_falling_value
+What: /sys/.../deviceX:eventY/temp_raw_roc_falling_value
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
- Period of time (in seconds) over which the raw channel value
- is averaged before being compared with the threshold set in
- accel_x0_thresh[_rising|_falling]_meanperiod. If direction is
- not specified then this mean period applies to both directions.
+ Specifies the value of rate of change threshold that the
+ device is comparing against for the events enabled by
+ <type>[Z][_name]_roc[_rising|falling]_en.
+ If separate attributes exist for the two directions,
+ but direction is not specified for this attribute,
+ then a single threshold value applies to both directions.
+ The raw or input element of the name indicates whether the
+ value is in raw device units or in processed units (as _raw
+ and _input do on sysfs direct channel read attributes).
-What: /sys/.../accel_x0_thresh[_rising|_falling]_period
+What: /sys/.../deviceX:eventY/accel_x_thresh_rising_period
+What: /sys/.../deviceX:eventY/accel_x_thresh_falling_period
+hat: /sys/.../deviceX:eventY/accel_x_roc_rising_period
+What: /sys/.../deviceX:eventY/accel_x_roc_falling_period
+What: /sys/.../deviceX:eventY/accel_y_thresh_rising_period
+What: /sys/.../deviceX:eventY/accel_y_thresh_falling_period
+What: /sys/.../deviceX:eventY/accel_y_roc_rising_period
+What: /sys/.../deviceX:eventY/accel_y_roc_falling_period
+What: /sys/.../deviceX:eventY/accel_z_thresh_rising_period
+What: /sys/.../deviceX:eventY/accel_z_thresh_falling_period
+What: /sys/.../deviceX:eventY/accel_z_roc_rising_period
+What: /sys/.../deviceX:eventY/accel_z_roc_falling_period
+What: /sys/.../deviceX:eventY/gyro_x_thresh_rising_period
+What: /sys/.../deviceX:eventY/gyro_x_thresh_falling_period
+What: /sys/.../deviceX:eventY/gyro_x_roc_rising_period
+What: /sys/.../deviceX:eventY/gyro_x_roc_falling_period
+What: /sys/.../deviceX:eventY/gyro_y_thresh_rising_period
+What: /sys/.../deviceX:eventY/gyro_y_thresh_falling_period
+What: /sys/.../deviceX:eventY/gyro_y_roc_rising_period
+What: /sys/.../deviceX:eventY/gyro_y_roc_falling_period
+What: /sys/.../deviceX:eventY/gyro_z_thresh_rising_period
+What: /sys/.../deviceX:eventY/gyro_z_thresh_falling_period
+What: /sys/.../deviceX:eventY/gyro_z_roc_rising_period
+What: /sys/.../deviceX:eventY/gyro_z_roc_falling_period
+What: /sys/.../deviceX:eventY/magn_x_thresh_rising_period
+What: /sys/.../deviceX:eventY/magn_x_thresh_falling_period
+What: /sys/.../deviceX:eventY/magn_x_roc_rising_period
+What: /sys/.../deviceX:eventY/magn_x_roc_falling_period
+What: /sys/.../deviceX:eventY/magn_y_thresh_rising_period
+What: /sys/.../deviceX:eventY/magn_y_thresh_falling_period
+What: /sys/.../deviceX:eventY/magn_y_roc_rising_period
+What: /sys/.../deviceX:eventY/magn_y_roc_falling_period
+What: /sys/.../deviceX:eventY/magn_z_thresh_rising_period
+What: /sys/.../deviceX:eventY/magn_z_thresh_falling_period
+What: /sys/.../deviceX:eventY/magn_z_roc_rising_period
+What: /sys/.../deviceX:eventY/magn_z_roc_falling_period
+What: /sys/.../deviceX:eventY/inZ_supply_thresh_rising_period
+What: /sys/.../deviceX:eventY/inZ_supply_thresh_falling_period
+What: /sys/.../deviceX:eventY/inz_supply_roc_rising_period
+What: /sys/.../deviceX:eventY/inZ_supply_roc_falling_period
+What: /sys/.../deviceX:eventY/inZ_thresh_rising_period
+What: /sys/.../deviceX:eventY/inZ_thresh_falling_period
+What: /sys/.../deviceX:eventY/inZ_roc_rising_period
+What: /sys/.../deviceX:eventY/inZ_roc_falling_period
+What: /sys/.../deviceX:eventY/temp_thresh_rising_period
+What: /sys/.../deviceX:eventY/temp_thresh_falling_period
+What: /sys/.../deviceX:eventY/temp_roc_rising_period
+What: /sys/.../deviceX:eventY/temp_roc_falling_period
+What: /sys/.../deviceX:eventY/accel_x&y&z_mag_falling_period
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
- Period of time (in seconds) for which the threshold must be
- passed before an event is generated. If direction is not
+ Period of time (in seconds) for which the condition must be
+ met before an event is generated. If direction is not
specified then this period applies to both directions.
-What: /sys/.../device[n]:event[m]/accel_x0_mag[_rising|_falling]_en
+What: /sys/.../deviceX:eventY/accel_mag_en
+What: /sys/.../deviceX:eventY/accel_mag_rising_en
+What: /sys/.../deviceX:eventY/accel_mag_falling_en
+What: /sys/.../deviceX:eventY/accel_x_mag_en
+What: /sys/.../deviceX:eventY/accel_x_mag_rising_en
+What: /sys/.../deviceX:eventY/accel_x_mag_falling_en
+What: /sys/.../deviceX:eventY/accel_y_mag_en
+What: /sys/.../deviceX:eventY/accel_y_mag_rising_en
+What: /sys/.../deviceX:eventY/accel_y_mag_falling_en
+What: /sys/.../deviceX:eventY/accel_z_mag_en
+What: /sys/.../deviceX:eventY/accel_z_mag_rising_en
+What: /sys/.../deviceX:eventY/accel_z_mag_falling_en
+What: /sys/.../deviceX:eventY/accel_x&y&z_mag_rising_en
+What: /sys/.../deviceX:eventY/accel_x&y&z_mag_falling_en
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
- Similar to accel_x0_thresh[_rising|_falling]_en, but here the
+ Similar to accel_x_thresh[_rising|_falling]_en, but here the
magnitude of the channel is compared to the threshold, not its
signed value.
-What: /sys/.../accel_x0_<raw|input>_mag[_rising|_falling]_value
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- The value to which the magnitude of the channel is compared.
-
-What: /sys/.../accel_x0_mag[_rising|_falling]_meanperiod
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- Period of time (in seconds) over which the value of the channel
- is averaged before being compared to the threshold
-
-What: /sys/.../accel_x0_mag[_rising|_falling]_period
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- Period of time (in seconds) for which the condition must be true
- before an event occurs.
-
-What: /sys/.../device[n]:event[m]/accel_x0_roc[_rising|_falling]_en
+What: /sys/.../accel_raw_mag_value
+What: /sys/.../accel_x_raw_mag_rising_value
+What: /sys/.../accel_y_raw_mag_rising_value
+What: /sys/.../accel_z_raw_mag_rising_value
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
- Similar to accel_x0_thresh[_rising|_falling]_en, but here the
- first differential is compared with the threshold.
+ The value to which the magnitude of the channel is compared. If
+ number or direction is not specified, applies to all channels of
+ this type.
-What: /sys/.../accel_x0_<raw|input>_roc[_rising|_falling]_value
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- The value to which the first differential of the channel is
- compared.
-
-What: /sys/.../accel_x0_roc[_rising|_falling]_meanperiod
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- Period of time (in seconds) over which the value of the channel
- is averaged before being compared to the threshold
-
-What: /sys/.../accel_x0_roc[_rising|_falling]_period
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- Period of time (in seconds) for which the condition must be true
- before an event occurs.
-
-What: /sys/.../device[n]/device[n]:buffer:event/dev
+What: /sys/bus/iio/devices/deviceX:buffer:event/dev
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
- Buffer for device n event character device major:minor numbers.
+ Buffer for device X event character device major:minor numbers.
-What: /sys/.../device[n]/device[n]:buffer:access/dev
+What: /sys/bus/iio/devices/deviceX:buffer:access/dev
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
- Buffer for device n access character device o major:minor numbers.
+ Buffer for device X access character device major:minor numbers.
-What: /sys/.../device[n]:buffer/trigger
+What: /sys/bus/iio/devices/deviceX:buffer/trigger
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
The name of the trigger source being used, as per string given
- in /sys/class/iio/trigger[n]/name.
+ in /sys/class/iio/triggerY/name.
-What: /sys/.../device[n]:buffer/length
+What: /sys/bus/iio/devices/deviceX:buffer/length
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
Number of scans contained by the buffer.
-What: /sys/.../device[n]:buffer/bytes_per_datum
+What: /sys/bus/iio/devices/deviceX:buffer/bytes_per_datum
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
Bytes per scan. Due to alignment fun, the scan may be larger
than implied directly by the scan_element parameters.
-What: /sys/.../device[n]:buffer/enable
+What: /sys/bus/iio/devices/deviceX:buffer/enable
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
Actually start the buffer capture up. Will start trigger
if first device and appropriate.
-What: /sys/.../device[n]:buffer/alignment
-KernelVersion: 2.6.35
-Contact: linux-iio@vger.kernel.org
-Description:
- Minimum data alignment. Scan elements larger than this are
- aligned to the nearest power of 2 times this. (may not be
- true in weird hardware buffers that pack data well)
-
-What: /sys/.../device[n]/buffer/scan_elements
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
Directory containing interfaces for elements that will be
captured for a single triggered sample set in the buffer.
-What: /sys/.../device[n]/buffer/scan_elements/accel_x0_en
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_x_en
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_y_en
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_z_en
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_x_en
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_y_en
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_z_en
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_x_en
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_y_en
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_z_en
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/timestamp_en
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/inY_supply_en
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/inY_en
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/inY-inZ_en
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/incli_x_en
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/incli_y_en
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
Scan element control for triggered data capture.
-What: /sys/.../device[n]/buffer/scan_elements/accel[_x0]_type
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_type
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_type
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_type
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/incli_type
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/inY_type
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/in-in_type
+What: /sys/.../deviceX:buffer/scan_elements/inY_supply_type
+What: /sys/.../deviceX:buffer/scan_elements/timestamp_type
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
Description of the scan element data storage within the buffer
- and hence the form in which it is read from userspace.
- Form is [s|u]bits/storagebits. s or u specifies if signed
- (2's complement) or unsigned. bits is the number of bits of
- data and storagebits is the space (after padding) that it
- occupies in the buffer. Note that some devices will have
- additional information in the unused bits so to get a clean
- value, the bits value must be used to mask the buffer output
- value appropriately. The storagebits value also specifies the
- data alignment. So s48/64 will be a signed 48 bit integer
- stored in a 64 bit location aligned to a a64 bit boundary.
+ and hence the form in which it is read from user-space.
+ Form is [s|u]bits/storagebits[>>shift]. s or u specifies if
+ signed (2's complement) or unsigned. bits is the number of bits
+ of data and storagebits is the space (after padding) that it
+ occupies in the buffer. shift if specified, is the shift that
+ needs to be applied prior to masking out unused bits. Some
+ devices put their data in the middle of the transferred elements
+ with additional information on both sides. Note that some
+ devices will have additional information in the unused bits
+ so to get a clean value, the bits value must be used to mask
+ the buffer output value appropriately. The storagebits value
+ also specifies the data alignment. So s48/64>>2 will be a
+ signed 48 bit integer stored in a 64 bit location aligned to
+ a a64 bit boundary. To obtain the clean value, shift right 2
+ and apply a mask to zero the top 16 bits of the result.
For other storage combinations this attribute will be extended
appropriately.
-What: /sys/.../device[n]/buffer/scan_elements/accel[_x0]_index
+What: /sys/.../deviceX:buffer/scan_elements/accel_type_available
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ If the type parameter can take one of a small set of values,
+ this attribute lists them.
+
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/inY_index
+What: /sys/.../deviceX:buffer/scan_elements/inY_supply_index
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_x_index
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_y_index
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_z_index
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_x_index
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_y_index
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_z_index
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_x_index
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_y_index
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_z_index
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/incli_x_index
+What: /sys/bus/iio/devices/deviceX:buffer/scan_elements/incli_y_index
+What: /sys/.../deviceX:buffer/scan_elements/timestamp_index
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
A single positive integer specifying the position of this
- scan element in the buffer. Note these are not dependant on
- what is enabled and may not be contiguous. Thus for userspace
+ scan element in the buffer. Note these are not dependent on
+ what is enabled and may not be contiguous. Thus for user-space
to establish the full layout these must be used in conjunction
with all _en attributes to establish which channels are present,
and the relevant _type attributes to establish the data storage
format.
-
-What: /sys/.../device[n]/buffer/scan_elements/accel[_x0]_shift
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- A bit shift (to right) that must be applied prior to
- extracting the bits specified by accel[_x0]_precision.
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-dds b/drivers/staging/iio/Documentation/sysfs-bus-iio-dds
new file mode 100644
index 000000000000..ffdd5478a35d
--- /dev/null
+++ b/drivers/staging/iio/Documentation/sysfs-bus-iio-dds
@@ -0,0 +1,93 @@
+
+What: /sys/bus/iio/devices/.../ddsX_freqY
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ Stores frequency into tuning word Y.
+ There will be more than one ddsX_freqY file, which allows for
+ pin controlled FSK Frequency Shift Keying
+ (ddsX_pincontrol_freq_en is active) or the user can control
+ the desired active tuning word by writing Y to the
+ ddsX_freqsymbol file.
+
+What: /sys/bus/iio/devices/.../ddsX_freqY_scale
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ Scale to be applied to ddsX_freqY in order to obtain the
+ desired value in Hz. If shared across all frequency registers
+ Y is not present. It is also possible X is not present if
+ shared across all channels.
+
+What: /sys/bus/iio/devices/.../ddsX_freqsymbol
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ Specifies the active output frequency tuning word. The value
+ corresponds to the Y in ddsX_freqY. To exit this mode the user
+ can write ddsX_pincontrol_freq_en or ddsX_out_enable file.
+
+What: /sys/bus/iio/devices/.../ddsX_phaseY
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ Stores phase into Y.
+ There will be more than one ddsX_phaseY file, which allows for
+ pin controlled PSK Phase Shift Keying
+ (ddsX_pincontrol_phase_en is active) or the user can
+ control the desired phase Y which is added to the phase
+ accumulator output by writing Y to the en_phase file.
+
+What: /sys/bus/iio/devices/.../ddsX_phaseY_scale
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ Scale to be applied to ddsX_phaseY in order to obtain the
+ desired value in rad. If shared across all phase registers
+ Y is not present. It is also possible X is not present if
+ shared across all channels.
+
+What: /sys/bus/iio/devices/.../ddsX_phasesymbol
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ Specifies the active phase Y which is added to the phase
+ accumulator output. The value corresponds to the Y in
+ ddsX_phaseY. To exit this mode the user can write
+ ddsX_pincontrol_phase_en or disable file.
+
+What: /sys/bus/iio/devices/.../ddsX_pincontrol_en
+What: /sys/bus/iio/devices/.../ddsX_pincontrol_freq_en
+What: /sys/bus/iio/devices/.../ddsX_pincontrol_phase_en
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ ddsX_pincontrol_en: Both, the active frequency and phase is
+ controlled by the respective phase and frequency control inputs.
+ In case the device in question allows to independent controls,
+ then there are dedicated files (ddsX_pincontrol_freq_en,
+ ddsX_pincontrol_phase_en).
+
+What: /sys/bus/iio/devices/.../ddsX_out_enable
+What: /sys/bus/iio/devices/.../ddsX_outY_enable
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ ddsX_outY_enable controls signal generation on output Y of
+ channel X. Y may be suppressed if all channels are
+ controlled together.
+
+What: /sys/bus/iio/devices/.../ddsX_outY_wavetype
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ Specifies the output waveform.
+ (sine, triangle, ramp, square, ...)
+ For a list of available output waveform options read
+ available_output_modes.
+
+What: /sys/bus/iio/devices/.../ddsX_outY_wavetype_available
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ Lists all available output waveform options.
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index ed48815a916b..e2ac07d86110 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -42,11 +42,15 @@ config IIO_TRIGGER
source "drivers/staging/iio/accel/Kconfig"
source "drivers/staging/iio/adc/Kconfig"
+source "drivers/staging/iio/addac/Kconfig"
+source "drivers/staging/iio/dac/Kconfig"
+source "drivers/staging/iio/dds/Kconfig"
source "drivers/staging/iio/gyro/Kconfig"
source "drivers/staging/iio/imu/Kconfig"
source "drivers/staging/iio/light/Kconfig"
source "drivers/staging/iio/magnetometer/Kconfig"
-
+source "drivers/staging/iio/meter/Kconfig"
+source "drivers/staging/iio/resolver/Kconfig"
source "drivers/staging/iio/trigger/Kconfig"
endif # IIO
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index e909674920fc..f9b5fb2fe8f1 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -11,8 +11,13 @@ obj-$(CONFIG_IIO_SW_RING) += ring_sw.o
obj-y += accel/
obj-y += adc/
+obj-y += addac/
+obj-y += dac/
+obj-y += dds/
obj-y += gyro/
obj-y += imu/
obj-y += light/
-obj-y += trigger/
obj-y += magnetometer/
+obj-y += meter/
+obj-y += resolver/
+obj-y += trigger/
diff --git a/drivers/staging/iio/TODO b/drivers/staging/iio/TODO
index 898cba1c939f..d1ad35e24abb 100644
--- a/drivers/staging/iio/TODO
+++ b/drivers/staging/iio/TODO
@@ -61,6 +61,10 @@ necessitate a header that is also visible from arch board
files. (avoided at the moment to keep the driver set
contained in staging).
+ADI Drivers:
+CC the device-drivers-devel@blackfin.uclinux.org mailing list when
+e-mailing the normal IIO list (see below).
+
Documentation
1) Lots of cleanup and expansion.
2) Some device require indvidual docs.
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index 5926c03be1a5..a34f1d3e673c 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -3,6 +3,33 @@
#
comment "Accelerometers"
+config ADIS16201
+ tristate "Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer"
+ depends on SPI
+ select IIO_TRIGGER if IIO_RING_BUFFER
+ select IIO_SW_RING if IIO_RING_BUFFER
+ help
+ Say yes here to build support for Analog Devices adis16201 dual-axis
+ digital inclinometer and accelerometer.
+
+config ADIS16203
+ tristate "Analog Devices ADIS16203 Programmable 360 Degrees Inclinometer"
+ depends on SPI
+ select IIO_TRIGGER if IIO_RING_BUFFER
+ select IIO_SW_RING if IIO_RING_BUFFER
+ help
+ Say yes here to build support for Analog Devices adis16203 Programmable
+ 360 Degrees Inclinometer.
+
+config ADIS16204
+ tristate "Analog Devices ADIS16204 Programmable High-g Digital Impact Sensor and Recorder"
+ depends on SPI
+ select IIO_TRIGGER if IIO_RING_BUFFER
+ select IIO_SW_RING if IIO_RING_BUFFER
+ help
+ Say yes here to build support for Analog Devices adis16204 Programmable
+ High-g Digital Impact Sensor and Recorder.
+
config ADIS16209
tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
depends on SPI
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index ff84703a16f6..1b2a6d3ddafa 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -2,6 +2,18 @@
# Makefile for industrial I/O accelerometer drivers
#
+adis16201-y := adis16201_core.o
+adis16201-$(CONFIG_IIO_RING_BUFFER) += adis16201_ring.o adis16201_trigger.o
+obj-$(CONFIG_ADIS16201) += adis16201.o
+
+adis16203-y := adis16203_core.o
+adis16203-$(CONFIG_IIO_RING_BUFFER) += adis16203_ring.o adis16203_trigger.o
+obj-$(CONFIG_ADIS16203) += adis16203.o
+
+adis16204-y := adis16204_core.o
+adis16204-$(CONFIG_IIO_RING_BUFFER) += adis16204_ring.o adis16204_trigger.o
+obj-$(CONFIG_ADIS16204) += adis16204.o
+
adis16209-y := adis16209_core.o
adis16209-$(CONFIG_IIO_RING_BUFFER) += adis16209_ring.o adis16209_trigger.o
obj-$(CONFIG_ADIS16209) += adis16209.o
diff --git a/drivers/staging/iio/accel/accel.h b/drivers/staging/iio/accel/accel.h
index f5f61b2497aa..50651f835cea 100644
--- a/drivers/staging/iio/accel/accel.h
+++ b/drivers/staging/iio/accel/accel.h
@@ -65,3 +65,23 @@
#define IIO_DEV_ATTR_ACCEL_Z(_show, _addr) \
IIO_DEVICE_ATTR(accel_z_raw, S_IRUGO, _show, NULL, _addr)
+#define IIO_DEV_ATTR_ACCEL_XY(_show, _addr) \
+ IIO_DEVICE_ATTR(accel_xy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_PEAK(_show, _addr) \
+ IIO_DEVICE_ATTR(accel_peak, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_XPEAK(_show, _addr) \
+ IIO_DEVICE_ATTR(accel_xpeak, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_YPEAK(_show, _addr) \
+ IIO_DEVICE_ATTR(accel_ypeak, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_ZPEAK(_show, _addr) \
+ IIO_DEVICE_ATTR(accel_zpeak, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_XYPEAK(_show, _addr) \
+ IIO_DEVICE_ATTR(accel_xypeak, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_XYZPEAK(_show, _addr) \
+ IIO_DEVICE_ATTR(accel_xyzpeak, S_IRUGO, _show, NULL, _addr)
diff --git a/drivers/staging/iio/accel/adis16201.h b/drivers/staging/iio/accel/adis16201.h
new file mode 100644
index 000000000000..c9bf22c13428
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16201.h
@@ -0,0 +1,150 @@
+#ifndef SPI_ADIS16201_H_
+#define SPI_ADIS16201_H_
+
+#define ADIS16201_STARTUP_DELAY 220 /* ms */
+
+#define ADIS16201_READ_REG(a) a
+#define ADIS16201_WRITE_REG(a) ((a) | 0x80)
+
+#define ADIS16201_FLASH_CNT 0x00 /* Flash memory write count */
+#define ADIS16201_SUPPLY_OUT 0x02 /* Output, power supply */
+#define ADIS16201_XACCL_OUT 0x04 /* Output, x-axis accelerometer */
+#define ADIS16201_YACCL_OUT 0x06 /* Output, y-axis accelerometer */
+#define ADIS16201_AUX_ADC 0x08 /* Output, auxiliary ADC input */
+#define ADIS16201_TEMP_OUT 0x0A /* Output, temperature */
+#define ADIS16201_XINCL_OUT 0x0C /* Output, x-axis inclination */
+#define ADIS16201_YINCL_OUT 0x0E /* Output, y-axis inclination */
+#define ADIS16201_XACCL_OFFS 0x10 /* Calibration, x-axis acceleration offset */
+#define ADIS16201_YACCL_OFFS 0x12 /* Calibration, y-axis acceleration offset */
+#define ADIS16201_XACCL_SCALE 0x14 /* x-axis acceleration scale factor */
+#define ADIS16201_YACCL_SCALE 0x16 /* y-axis acceleration scale factor */
+#define ADIS16201_XINCL_OFFS 0x18 /* Calibration, x-axis inclination offset */
+#define ADIS16201_YINCL_OFFS 0x1A /* Calibration, y-axis inclination offset */
+#define ADIS16201_XINCL_SCALE 0x1C /* x-axis inclination scale factor */
+#define ADIS16201_YINCL_SCALE 0x1E /* y-axis inclination scale factor */
+#define ADIS16201_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */
+#define ADIS16201_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */
+#define ADIS16201_ALM_SMPL1 0x24 /* Alarm 1, sample period */
+#define ADIS16201_ALM_SMPL2 0x26 /* Alarm 2, sample period */
+#define ADIS16201_ALM_CTRL 0x28 /* Alarm control */
+#define ADIS16201_AUX_DAC 0x30 /* Auxiliary DAC data */
+#define ADIS16201_GPIO_CTRL 0x32 /* General-purpose digital input/output control */
+#define ADIS16201_MSC_CTRL 0x34 /* Miscellaneous control */
+#define ADIS16201_SMPL_PRD 0x36 /* Internal sample period (rate) control */
+#define ADIS16201_AVG_CNT 0x38 /* Operation, filter configuration */
+#define ADIS16201_SLP_CNT 0x3A /* Operation, sleep mode control */
+#define ADIS16201_DIAG_STAT 0x3C /* Diagnostics, system status register */
+#define ADIS16201_GLOB_CMD 0x3E /* Operation, system command register */
+
+#define ADIS16201_OUTPUTS 7
+
+/* MSC_CTRL */
+#define ADIS16201_MSC_CTRL_SELF_TEST_EN (1 << 8) /* Self-test enable */
+#define ADIS16201_MSC_CTRL_DATA_RDY_EN (1 << 2) /* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16201_MSC_CTRL_ACTIVE_HIGH (1 << 1) /* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16201_MSC_CTRL_DATA_RDY_DIO1 (1 << 0) /* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+
+/* DIAG_STAT */
+#define ADIS16201_DIAG_STAT_ALARM2 (1<<9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16201_DIAG_STAT_ALARM1 (1<<8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16201_DIAG_STAT_SPI_FAIL (1<<3) /* SPI communications failure */
+#define ADIS16201_DIAG_STAT_FLASH_UPT (1<<2) /* Flash update failure */
+#define ADIS16201_DIAG_STAT_POWER_HIGH (1<<1) /* Power supply above 3.625 V */
+#define ADIS16201_DIAG_STAT_POWER_LOW (1<<0) /* Power supply below 3.15 V */
+
+/* GLOB_CMD */
+#define ADIS16201_GLOB_CMD_SW_RESET (1<<7)
+#define ADIS16201_GLOB_CMD_FACTORY_CAL (1<<1)
+
+#define ADIS16201_MAX_TX 14
+#define ADIS16201_MAX_RX 14
+
+#define ADIS16201_ERROR_ACTIVE (1<<14)
+
+/**
+ * struct adis16201_state - device instance specific data
+ * @us: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct adis16201_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+
+int adis16201_set_irq(struct device *dev, bool enable);
+
+#ifdef CONFIG_IIO_RING_BUFFER
+enum adis16201_scan {
+ ADIS16201_SCAN_SUPPLY,
+ ADIS16201_SCAN_ACC_X,
+ ADIS16201_SCAN_ACC_Y,
+ ADIS16201_SCAN_AUX_ADC,
+ ADIS16201_SCAN_TEMP,
+ ADIS16201_SCAN_INCLI_X,
+ ADIS16201_SCAN_INCLI_Y,
+};
+
+void adis16201_remove_trigger(struct iio_dev *indio_dev);
+int adis16201_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16201_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+int adis16201_configure_ring(struct iio_dev *indio_dev);
+void adis16201_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16201_initialize_ring(struct iio_ring_buffer *ring);
+void adis16201_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16201_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16201_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+adis16201_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int adis16201_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void adis16201_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16201_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+
+static inline void adis16201_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16201_H_ */
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
new file mode 100644
index 000000000000..79b785a0013a
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16201_core.c
@@ -0,0 +1,659 @@
+/*
+ * ADIS16201 Programmable Digital Vibration Sensor driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "accel.h"
+#include "inclinometer.h"
+#include "../gyro/gyro.h"
+#include "../adc/adc.h"
+
+#include "adis16201.h"
+
+#define DRIVER_NAME "adis16201"
+
+static int adis16201_check_status(struct device *dev);
+
+/**
+ * adis16201_spi_write_reg_8() - write single byte to a register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be written
+ * @val: the value to write
+ **/
+static int adis16201_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16201_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16201_WRITE_REG(reg_address);
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16201_spi_write_reg_16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: value to be written
+ **/
+static int adis16201_spi_write_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16201_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .tx_buf = st->tx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16201_WRITE_REG(lower_reg_address);
+ st->tx[1] = value & 0xFF;
+ st->tx[2] = ADIS16201_WRITE_REG(lower_reg_address + 1);
+ st->tx[3] = (value >> 8) & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16201_spi_read_reg_16() - read 2 bytes from a 16-bit register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: somewhere to pass back the value read
+ **/
+static int adis16201_spi_read_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16201_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 20,
+ }, {
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 20,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16201_READ_REG(lower_reg_address);
+ st->tx[1] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
+ lower_reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[0] << 8) | st->rx[1];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static ssize_t adis16201_read_12bit_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16201_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16201_ERROR_ACTIVE) {
+ ret = adis16201_check_status(dev);
+ if (ret)
+ return ret;
+ }
+
+ return sprintf(buf, "%u\n", val & 0x0FFF);
+}
+
+static ssize_t adis16201_read_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+ u16 val;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+
+ ret = adis16201_spi_read_reg_16(dev, ADIS16201_TEMP_OUT, (u16 *)&val);
+ if (ret)
+ goto error_ret;
+
+ if (val & ADIS16201_ERROR_ACTIVE) {
+ ret = adis16201_check_status(dev);
+ if (ret)
+ goto error_ret;
+ }
+
+ val &= 0xFFF;
+ ret = sprintf(buf, "%d\n", val);
+
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static ssize_t adis16201_read_9bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ s16 val = 0;
+ ssize_t ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ ret = adis16201_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+ if (!ret) {
+ if (val & ADIS16201_ERROR_ACTIVE) {
+ ret = adis16201_check_status(dev);
+ if (ret)
+ goto error_ret;
+ }
+ val = ((s16)(val << 7) >> 7);
+ ret = sprintf(buf, "%d\n", val);
+ }
+
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16201_read_12bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ s16 val = 0;
+ ssize_t ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ ret = adis16201_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+ if (!ret) {
+ if (val & ADIS16201_ERROR_ACTIVE) {
+ ret = adis16201_check_status(dev);
+ if (ret)
+ goto error_ret;
+ }
+
+ val = ((s16)(val << 4) >> 4);
+ ret = sprintf(buf, "%d\n", val);
+ }
+
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16201_read_14bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ s16 val = 0;
+ ssize_t ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ ret = adis16201_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+ if (!ret) {
+ if (val & ADIS16201_ERROR_ACTIVE) {
+ ret = adis16201_check_status(dev);
+ if (ret)
+ goto error_ret;
+ }
+
+ val = ((s16)(val << 2) >> 2);
+ ret = sprintf(buf, "%d\n", val);
+ }
+
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16201_write_16bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = adis16201_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static int adis16201_reset(struct device *dev)
+{
+ int ret;
+ ret = adis16201_spi_write_reg_8(dev,
+ ADIS16201_GLOB_CMD,
+ ADIS16201_GLOB_CMD_SW_RESET);
+ if (ret)
+ dev_err(dev, "problem resetting device");
+
+ return ret;
+}
+
+static ssize_t adis16201_write_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -EINVAL;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return adis16201_reset(dev);
+ }
+ return -EINVAL;
+}
+
+int adis16201_set_irq(struct device *dev, bool enable)
+{
+ int ret = 0;
+ u16 msc;
+
+ ret = adis16201_spi_read_reg_16(dev, ADIS16201_MSC_CTRL, &msc);
+ if (ret)
+ goto error_ret;
+
+ msc |= ADIS16201_MSC_CTRL_ACTIVE_HIGH;
+ msc &= ~ADIS16201_MSC_CTRL_DATA_RDY_DIO1;
+ if (enable)
+ msc |= ADIS16201_MSC_CTRL_DATA_RDY_EN;
+ else
+ msc &= ~ADIS16201_MSC_CTRL_DATA_RDY_EN;
+
+ ret = adis16201_spi_write_reg_16(dev, ADIS16201_MSC_CTRL, msc);
+
+error_ret:
+ return ret;
+}
+
+static int adis16201_check_status(struct device *dev)
+{
+ u16 status;
+ int ret;
+
+ ret = adis16201_spi_read_reg_16(dev, ADIS16201_DIAG_STAT, &status);
+ if (ret < 0) {
+ dev_err(dev, "Reading status failed\n");
+ goto error_ret;
+ }
+ ret = status & 0xF;
+ if (ret)
+ ret = -EFAULT;
+
+ if (status & ADIS16201_DIAG_STAT_SPI_FAIL)
+ dev_err(dev, "SPI failure\n");
+ if (status & ADIS16201_DIAG_STAT_FLASH_UPT)
+ dev_err(dev, "Flash update failed\n");
+ if (status & ADIS16201_DIAG_STAT_POWER_HIGH)
+ dev_err(dev, "Power supply above 3.625V\n");
+ if (status & ADIS16201_DIAG_STAT_POWER_LOW)
+ dev_err(dev, "Power supply below 3.15V\n");
+
+error_ret:
+ return ret;
+}
+
+static int adis16201_self_test(struct device *dev)
+{
+ int ret;
+ ret = adis16201_spi_write_reg_16(dev,
+ ADIS16201_MSC_CTRL,
+ ADIS16201_MSC_CTRL_SELF_TEST_EN);
+ if (ret) {
+ dev_err(dev, "problem starting self test");
+ goto err_ret;
+ }
+
+ ret = adis16201_check_status(dev);
+
+err_ret:
+ return ret;
+}
+
+static int adis16201_initial_setup(struct adis16201_state *st)
+{
+ int ret;
+ struct device *dev = &st->indio_dev->dev;
+
+ /* Disable IRQ */
+ ret = adis16201_set_irq(dev, false);
+ if (ret) {
+ dev_err(dev, "disable irq failed");
+ goto err_ret;
+ }
+
+ /* Do self test */
+ ret = adis16201_self_test(dev);
+ if (ret) {
+ dev_err(dev, "self test failure");
+ goto err_ret;
+ }
+
+ /* Read status register to check the result */
+ ret = adis16201_check_status(dev);
+ if (ret) {
+ adis16201_reset(dev);
+ dev_err(dev, "device not playing ball -> reset");
+ msleep(ADIS16201_STARTUP_DELAY);
+ ret = adis16201_check_status(dev);
+ if (ret) {
+ dev_err(dev, "giving up");
+ goto err_ret;
+ }
+ }
+
+ printk(KERN_INFO DRIVER_NAME ": at CS%d (irq %d)\n",
+ st->us->chip_select, st->us->irq);
+
+err_ret:
+ return ret;
+}
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(0, supply, adis16201_read_12bit_unsigned,
+ ADIS16201_SUPPLY_OUT);
+static IIO_CONST_ATTR(in0_supply_scale, "0.00122");
+static IIO_DEV_ATTR_IN_RAW(1, adis16201_read_12bit_unsigned,
+ ADIS16201_AUX_ADC);
+static IIO_CONST_ATTR(in1_scale, "0.00061");
+
+static IIO_DEV_ATTR_ACCEL_X(adis16201_read_14bit_signed,
+ ADIS16201_XACCL_OUT);
+static IIO_DEV_ATTR_ACCEL_Y(adis16201_read_14bit_signed,
+ ADIS16201_YACCL_OUT);
+static IIO_DEV_ATTR_ACCEL_X_OFFSET(S_IWUSR | S_IRUGO,
+ adis16201_read_12bit_signed,
+ adis16201_write_16bit,
+ ADIS16201_XACCL_OFFS);
+static IIO_DEV_ATTR_ACCEL_Y_OFFSET(S_IWUSR | S_IRUGO,
+ adis16201_read_12bit_signed,
+ adis16201_write_16bit,
+ ADIS16201_YACCL_OFFS);
+static IIO_CONST_ATTR(accel_scale, "0.4625");
+
+static IIO_DEV_ATTR_INCLI_X(adis16201_read_14bit_signed,
+ ADIS16201_XINCL_OUT);
+static IIO_DEV_ATTR_INCLI_Y(adis16201_read_14bit_signed,
+ ADIS16201_YINCL_OUT);
+static IIO_DEV_ATTR_INCLI_X_OFFSET(S_IWUSR | S_IRUGO,
+ adis16201_read_9bit_signed,
+ adis16201_write_16bit,
+ ADIS16201_XACCL_OFFS);
+static IIO_DEV_ATTR_INCLI_Y_OFFSET(S_IWUSR | S_IRUGO,
+ adis16201_read_9bit_signed,
+ adis16201_write_16bit,
+ ADIS16201_YACCL_OFFS);
+static IIO_CONST_ATTR(incli_scale, "0.1");
+
+static IIO_DEV_ATTR_TEMP_RAW(adis16201_read_temp);
+static IIO_CONST_ATTR(temp_offset, "25");
+static IIO_CONST_ATTR(temp_scale, "-0.47");
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16201_write_reset, 0);
+
+static IIO_CONST_ATTR(name, "adis16201");
+
+static struct attribute *adis16201_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group adis16201_event_attribute_group = {
+ .attrs = adis16201_event_attributes,
+};
+
+static struct attribute *adis16201_attributes[] = {
+ &iio_dev_attr_in0_supply_raw.dev_attr.attr,
+ &iio_const_attr_in0_supply_scale.dev_attr.attr,
+ &iio_dev_attr_temp_raw.dev_attr.attr,
+ &iio_const_attr_temp_offset.dev_attr.attr,
+ &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ &iio_dev_attr_in1_raw.dev_attr.attr,
+ &iio_const_attr_in1_scale.dev_attr.attr,
+ &iio_dev_attr_accel_x_raw.dev_attr.attr,
+ &iio_dev_attr_accel_y_raw.dev_attr.attr,
+ &iio_dev_attr_accel_x_offset.dev_attr.attr,
+ &iio_dev_attr_accel_y_offset.dev_attr.attr,
+ &iio_const_attr_accel_scale.dev_attr.attr,
+ &iio_dev_attr_incli_x_raw.dev_attr.attr,
+ &iio_dev_attr_incli_y_raw.dev_attr.attr,
+ &iio_dev_attr_incli_x_offset.dev_attr.attr,
+ &iio_dev_attr_incli_y_offset.dev_attr.attr,
+ &iio_const_attr_incli_scale.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16201_attribute_group = {
+ .attrs = adis16201_attributes,
+};
+
+static int __devinit adis16201_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct adis16201_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADIS16201_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADIS16201_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &adis16201_event_attribute_group;
+ st->indio_dev->attrs = &adis16201_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis16201_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = adis16201_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq) {
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING,
+ "adis16201");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = adis16201_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ /* Get the device into a sane initial state */
+ ret = adis16201_initial_setup(st);
+ if (ret)
+ goto error_remove_trigger;
+ return 0;
+
+error_remove_trigger:
+ adis16201_remove_trigger(st->indio_dev);
+error_unregister_line:
+ if (spi->irq)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ adis16201_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ adis16201_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int adis16201_remove(struct spi_device *spi)
+{
+ struct adis16201_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ flush_scheduled_work();
+
+ adis16201_remove_trigger(indio_dev);
+ if (spi->irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ adis16201_uninitialize_ring(indio_dev->ring);
+ iio_device_unregister(indio_dev);
+ adis16201_unconfigure_ring(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver adis16201_driver = {
+ .driver = {
+ .name = "adis16201",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16201_probe,
+ .remove = __devexit_p(adis16201_remove),
+};
+
+static __init int adis16201_init(void)
+{
+ return spi_register_driver(&adis16201_driver);
+}
+module_init(adis16201_init);
+
+static __exit void adis16201_exit(void)
+{
+ spi_unregister_driver(&adis16201_driver);
+}
+module_exit(adis16201_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16201 Programmable Digital Vibration Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/adis16201_ring.c b/drivers/staging/iio/accel/adis16201_ring.c
new file mode 100644
index 000000000000..e6870a2721f1
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16201_ring.c
@@ -0,0 +1,218 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_sw.h"
+#include "accel.h"
+#include "../trigger.h"
+#include "adis16201.h"
+
+static IIO_SCAN_EL_C(in_supply, ADIS16201_SCAN_SUPPLY, ADIS16201_SUPPLY_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(in_supply, u, 12, 16);
+static IIO_SCAN_EL_C(accel_x, ADIS16201_SCAN_ACC_X, ADIS16201_XACCL_OUT, NULL);
+static IIO_SCAN_EL_C(accel_y, ADIS16201_SCAN_ACC_Y, ADIS16201_YACCL_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(accel, s, 14, 16);
+static IIO_SCAN_EL_C(in0, ADIS16201_SCAN_AUX_ADC, ADIS16201_AUX_ADC, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(in0, u, 12, 16);
+static IIO_SCAN_EL_C(temp, ADIS16201_SCAN_TEMP, ADIS16201_TEMP_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(temp, u, 12, 16);
+static IIO_SCAN_EL_C(incli_x, ADIS16201_SCAN_INCLI_X,
+ ADIS16201_XINCL_OUT, NULL);
+static IIO_SCAN_EL_C(incli_y, ADIS16201_SCAN_INCLI_Y,
+ ADIS16201_YINCL_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(incli, s, 14, 16);
+static IIO_SCAN_EL_TIMESTAMP(7);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(timestamp, s, 64, 64);
+
+static struct attribute *adis16201_scan_el_attrs[] = {
+ &iio_scan_el_in_supply.dev_attr.attr,
+ &iio_const_attr_in_supply_index.dev_attr.attr,
+ &iio_const_attr_in_supply_type.dev_attr.attr,
+ &iio_scan_el_accel_x.dev_attr.attr,
+ &iio_const_attr_accel_x_index.dev_attr.attr,
+ &iio_scan_el_accel_y.dev_attr.attr,
+ &iio_const_attr_accel_y_index.dev_attr.attr,
+ &iio_const_attr_accel_type.dev_attr.attr,
+ &iio_scan_el_in0.dev_attr.attr,
+ &iio_const_attr_in0_index.dev_attr.attr,
+ &iio_const_attr_in0_type.dev_attr.attr,
+ &iio_scan_el_temp.dev_attr.attr,
+ &iio_const_attr_temp_index.dev_attr.attr,
+ &iio_const_attr_temp_type.dev_attr.attr,
+ &iio_scan_el_incli_x.dev_attr.attr,
+ &iio_const_attr_incli_x_index.dev_attr.attr,
+ &iio_scan_el_incli_y.dev_attr.attr,
+ &iio_const_attr_incli_y_index.dev_attr.attr,
+ &iio_const_attr_incli_type.dev_attr.attr,
+ &iio_scan_el_timestamp.dev_attr.attr,
+ &iio_const_attr_timestamp_index.dev_attr.attr,
+ &iio_const_attr_timestamp_type.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group adis16201_scan_el_group = {
+ .attrs = adis16201_scan_el_attrs,
+ .name = "scan_elements",
+};
+
+/**
+ * adis16201_poll_func_th() top half interrupt handler called by trigger
+ * @private_data: iio_dev
+ **/
+static void adis16201_poll_func_th(struct iio_dev *indio_dev, s64 time)
+{
+ struct adis16201_state *st = iio_dev_get_devdata(indio_dev);
+ st->last_timestamp = time;
+ schedule_work(&st->work_trigger_to_ring);
+}
+
+/**
+ * adis16201_read_ring_data() read data registers which will be placed into ring
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read
+ **/
+static int adis16201_read_ring_data(struct device *dev, u8 *rx)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16201_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[ADIS16201_OUTPUTS + 1];
+ int ret;
+ int i;
+
+ mutex_lock(&st->buf_lock);
+
+ spi_message_init(&msg);
+
+ memset(xfers, 0, sizeof(xfers));
+ for (i = 0; i <= ADIS16201_OUTPUTS; i++) {
+ xfers[i].bits_per_word = 8;
+ xfers[i].cs_change = 1;
+ xfers[i].len = 2;
+ xfers[i].delay_usecs = 20;
+ xfers[i].tx_buf = st->tx + 2 * i;
+ st->tx[2 * i] = ADIS16201_READ_REG(ADIS16201_SUPPLY_OUT + 2 * i);
+ st->tx[2 * i + 1] = 0;
+ if (i >= 1)
+ xfers[i].rx_buf = rx + 2 * (i - 1);
+ spi_message_add_tail(&xfers[i], &msg);
+ }
+
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ dev_err(&st->us->dev, "problem when burst reading");
+
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
+ * specific to be rolled into the core.
+ */
+static void adis16201_trigger_bh_to_ring(struct work_struct *work_s)
+{
+ struct adis16201_state *st
+ = container_of(work_s, struct adis16201_state,
+ work_trigger_to_ring);
+ struct iio_ring_buffer *ring = st->indio_dev->ring;
+
+ int i = 0;
+ s16 *data;
+ size_t datasize = ring->access.get_bytes_per_datum(ring);
+
+ data = kmalloc(datasize, GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&st->us->dev, "memory alloc failed in ring bh");
+ return;
+ }
+
+ if (ring->scan_count)
+ if (adis16201_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
+ for (; i < ring->scan_count; i++)
+ data[i] = be16_to_cpup(
+ (__be16 *)&(st->rx[i*2]));
+
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (ring->scan_timestamp)
+ *((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
+
+ ring->access.store_to(ring,
+ (u8 *)data,
+ st->last_timestamp);
+
+ iio_trigger_notify_done(st->indio_dev->trig);
+ kfree(data);
+
+ return;
+}
+
+void adis16201_unconfigure_ring(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->pollfunc);
+ iio_sw_rb_free(indio_dev->ring);
+}
+
+int adis16201_configure_ring(struct iio_dev *indio_dev)
+{
+ int ret = 0;
+ struct adis16201_state *st = indio_dev->dev_data;
+ struct iio_ring_buffer *ring;
+ INIT_WORK(&st->work_trigger_to_ring, adis16201_trigger_bh_to_ring);
+
+ ring = iio_sw_rb_allocate(indio_dev);
+ if (!ring) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ indio_dev->ring = ring;
+ /* Effectively select the ring buffer implementation */
+ iio_ring_sw_register_funcs(&ring->access);
+ ring->bpe = 2;
+ ring->scan_el_attrs = &adis16201_scan_el_group;
+ ring->scan_timestamp = true;
+ ring->preenable = &iio_sw_ring_preenable;
+ ring->postenable = &iio_triggered_ring_postenable;
+ ring->predisable = &iio_triggered_ring_predisable;
+ ring->owner = THIS_MODULE;
+
+ /* Set default scan mode */
+ iio_scan_mask_set(ring, iio_scan_el_in_supply.number);
+ iio_scan_mask_set(ring, iio_scan_el_accel_x.number);
+ iio_scan_mask_set(ring, iio_scan_el_accel_y.number);
+ iio_scan_mask_set(ring, iio_scan_el_temp.number);
+ iio_scan_mask_set(ring, iio_scan_el_in0.number);
+ iio_scan_mask_set(ring, iio_scan_el_incli_x.number);
+ iio_scan_mask_set(ring, iio_scan_el_incli_y.number);
+
+ ret = iio_alloc_pollfunc(indio_dev, NULL, &adis16201_poll_func_th);
+ if (ret)
+ goto error_iio_sw_rb_free;
+
+ indio_dev->modes |= INDIO_RING_TRIGGERED;
+ return 0;
+
+error_iio_sw_rb_free:
+ iio_sw_rb_free(indio_dev->ring);
+ return ret;
+}
+
+int adis16201_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return iio_ring_buffer_register(ring, 0);
+}
+
+void adis16201_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+ iio_ring_buffer_unregister(ring);
+}
diff --git a/drivers/staging/iio/accel/adis16201_trigger.c b/drivers/staging/iio/accel/adis16201_trigger.c
new file mode 100644
index 000000000000..8a9cea1986e7
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16201_trigger.c
@@ -0,0 +1,122 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../trigger.h"
+#include "adis16201.h"
+
+/**
+ * adis16201_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static int adis16201_data_rdy_trig_poll(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct adis16201_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_trigger *trig = st->trig;
+
+ iio_trigger_poll(trig, timestamp);
+
+ return IRQ_HANDLED;
+}
+
+IIO_EVENT_SH(data_rdy_trig, &adis16201_data_rdy_trig_poll);
+
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *adis16201_trigger_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+
+static const struct attribute_group adis16201_trigger_attr_group = {
+ .attrs = adis16201_trigger_attrs,
+};
+
+/**
+ * adis16201_data_rdy_trigger_set_state() set datardy interrupt state
+ **/
+static int adis16201_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct adis16201_state *st = trig->private_data;
+ struct iio_dev *indio_dev = st->indio_dev;
+ int ret = 0;
+
+ dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
+ ret = adis16201_set_irq(&st->indio_dev->dev, state);
+ if (state == false) {
+ iio_remove_event_from_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]
+ ->ev_list);
+ flush_scheduled_work();
+ } else {
+ iio_add_event_to_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]->ev_list);
+ }
+ return ret;
+}
+
+/**
+ * adis16201_trig_try_reen() try renabling irq for data rdy trigger
+ * @trig: the datardy trigger
+ **/
+static int adis16201_trig_try_reen(struct iio_trigger *trig)
+{
+ struct adis16201_state *st = trig->private_data;
+ enable_irq(st->us->irq);
+ return 0;
+}
+
+int adis16201_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct adis16201_state *st = indio_dev->dev_data;
+
+ st->trig = iio_allocate_trigger();
+ st->trig->name = kasprintf(GFP_KERNEL,
+ "adis16201-dev%d",
+ indio_dev->id);
+ if (!st->trig->name) {
+ ret = -ENOMEM;
+ goto error_free_trig;
+ }
+ st->trig->dev.parent = &st->us->dev;
+ st->trig->owner = THIS_MODULE;
+ st->trig->private_data = st;
+ st->trig->set_trigger_state = &adis16201_data_rdy_trigger_set_state;
+ st->trig->try_reenable = &adis16201_trig_try_reen;
+ st->trig->control_attrs = &adis16201_trigger_attr_group;
+ ret = iio_trigger_register(st->trig);
+
+ /* select default trigger */
+ indio_dev->trig = st->trig;
+ if (ret)
+ goto error_free_trig_name;
+
+ return 0;
+
+error_free_trig_name:
+ kfree(st->trig->name);
+error_free_trig:
+ iio_free_trigger(st->trig);
+
+ return ret;
+}
+
+void adis16201_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct adis16201_state *state = indio_dev->dev_data;
+
+ iio_trigger_unregister(state->trig);
+ kfree(state->trig->name);
+ iio_free_trigger(state->trig);
+}
diff --git a/drivers/staging/iio/accel/adis16203.h b/drivers/staging/iio/accel/adis16203.h
new file mode 100644
index 000000000000..b39323eac9e3
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16203.h
@@ -0,0 +1,143 @@
+#ifndef SPI_ADIS16203_H_
+#define SPI_ADIS16203_H_
+
+#define ADIS16203_STARTUP_DELAY 220 /* ms */
+
+#define ADIS16203_READ_REG(a) a
+#define ADIS16203_WRITE_REG(a) ((a) | 0x80)
+
+#define ADIS16203_FLASH_CNT 0x00 /* Flash memory write count */
+#define ADIS16203_SUPPLY_OUT 0x02 /* Output, power supply */
+#define ADIS16203_AUX_ADC 0x08 /* Output, auxiliary ADC input */
+#define ADIS16203_TEMP_OUT 0x0A /* Output, temperature */
+#define ADIS16203_XINCL_OUT 0x0C /* Output, x-axis inclination */
+#define ADIS16203_YINCL_OUT 0x0E /* Output, y-axis inclination */
+#define ADIS16203_INCL_NULL 0x18 /* Incline null calibration */
+#define ADIS16203_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */
+#define ADIS16203_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */
+#define ADIS16203_ALM_SMPL1 0x24 /* Alarm 1, sample period */
+#define ADIS16203_ALM_SMPL2 0x26 /* Alarm 2, sample period */
+#define ADIS16203_ALM_CTRL 0x28 /* Alarm control */
+#define ADIS16203_AUX_DAC 0x30 /* Auxiliary DAC data */
+#define ADIS16203_GPIO_CTRL 0x32 /* General-purpose digital input/output control */
+#define ADIS16203_MSC_CTRL 0x34 /* Miscellaneous control */
+#define ADIS16203_SMPL_PRD 0x36 /* Internal sample period (rate) control */
+#define ADIS16203_AVG_CNT 0x38 /* Operation, filter configuration */
+#define ADIS16203_SLP_CNT 0x3A /* Operation, sleep mode control */
+#define ADIS16203_DIAG_STAT 0x3C /* Diagnostics, system status register */
+#define ADIS16203_GLOB_CMD 0x3E /* Operation, system command register */
+
+#define ADIS16203_OUTPUTS 5
+
+/* MSC_CTRL */
+#define ADIS16203_MSC_CTRL_PWRUP_SELF_TEST (1 << 10) /* Self-test at power-on: 1 = disabled, 0 = enabled */
+#define ADIS16203_MSC_CTRL_REVERSE_ROT_EN (1 << 9) /* Reverses rotation of both inclination outputs */
+#define ADIS16203_MSC_CTRL_SELF_TEST_EN (1 << 8) /* Self-test enable */
+#define ADIS16203_MSC_CTRL_DATA_RDY_EN (1 << 2) /* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16203_MSC_CTRL_ACTIVE_HIGH (1 << 1) /* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16203_MSC_CTRL_DATA_RDY_DIO1 (1 << 0) /* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+
+/* DIAG_STAT */
+#define ADIS16203_DIAG_STAT_ALARM2 (1<<9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16203_DIAG_STAT_ALARM1 (1<<8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16203_DIAG_STAT_SELFTEST_FAIL (1<<5) /* Self-test diagnostic error flag */
+#define ADIS16203_DIAG_STAT_SPI_FAIL (1<<3) /* SPI communications failure */
+#define ADIS16203_DIAG_STAT_FLASH_UPT (1<<2) /* Flash update failure */
+#define ADIS16203_DIAG_STAT_POWER_HIGH (1<<1) /* Power supply above 3.625 V */
+#define ADIS16203_DIAG_STAT_POWER_LOW (1<<0) /* Power supply below 3.15 V */
+
+/* GLOB_CMD */
+#define ADIS16203_GLOB_CMD_SW_RESET (1<<7)
+#define ADIS16203_GLOB_CMD_CLEAR_STAT (1<<4)
+#define ADIS16203_GLOB_CMD_FACTORY_CAL (1<<1)
+
+#define ADIS16203_MAX_TX 12
+#define ADIS16203_MAX_RX 10
+
+#define ADIS16203_ERROR_ACTIVE (1<<14)
+
+/**
+ * struct adis16203_state - device instance specific data
+ * @us: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct adis16203_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+
+int adis16203_set_irq(struct device *dev, bool enable);
+
+#ifdef CONFIG_IIO_RING_BUFFER
+enum adis16203_scan {
+ ADIS16203_SCAN_SUPPLY,
+ ADIS16203_SCAN_AUX_ADC,
+ ADIS16203_SCAN_TEMP,
+ ADIS16203_SCAN_INCLI_X,
+ ADIS16203_SCAN_INCLI_Y,
+};
+
+void adis16203_remove_trigger(struct iio_dev *indio_dev);
+int adis16203_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16203_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+int adis16203_configure_ring(struct iio_dev *indio_dev);
+void adis16203_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16203_initialize_ring(struct iio_ring_buffer *ring);
+void adis16203_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16203_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16203_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+adis16203_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int adis16203_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void adis16203_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16203_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+
+static inline void adis16203_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16203_H_ */
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
new file mode 100644
index 000000000000..b57f19087a93
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -0,0 +1,568 @@
+/*
+ * ADIS16203 Programmable Digital Vibration Sensor driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "accel.h"
+#include "inclinometer.h"
+#include "../gyro/gyro.h"
+#include "../adc/adc.h"
+
+#include "adis16203.h"
+
+#define DRIVER_NAME "adis16203"
+
+static int adis16203_check_status(struct device *dev);
+
+/**
+ * adis16203_spi_write_reg_8() - write single byte to a register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be written
+ * @val: the value to write
+ **/
+static int adis16203_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16203_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16203_WRITE_REG(reg_address);
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16203_spi_write_reg_16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: value to be written
+ **/
+static int adis16203_spi_write_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16203_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .tx_buf = st->tx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16203_WRITE_REG(lower_reg_address);
+ st->tx[1] = value & 0xFF;
+ st->tx[2] = ADIS16203_WRITE_REG(lower_reg_address + 1);
+ st->tx[3] = (value >> 8) & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16203_spi_read_reg_16() - read 2 bytes from a 16-bit register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: somewhere to pass back the value read
+ **/
+static int adis16203_spi_read_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16203_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 20,
+ }, {
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 20,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16203_READ_REG(lower_reg_address);
+ st->tx[1] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
+ lower_reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[0] << 8) | st->rx[1];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static ssize_t adis16203_read_12bit_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16203_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16203_ERROR_ACTIVE)
+ adis16203_check_status(dev);
+
+ return sprintf(buf, "%u\n", val & 0x0FFF);
+}
+
+static ssize_t adis16203_read_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+ u16 val;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+
+ ret = adis16203_spi_read_reg_16(dev, ADIS16203_TEMP_OUT, (u16 *)&val);
+ if (ret)
+ goto error_ret;
+
+ if (val & ADIS16203_ERROR_ACTIVE)
+ adis16203_check_status(dev);
+
+ val &= 0xFFF;
+ ret = sprintf(buf, "%d\n", val);
+
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static ssize_t adis16203_read_14bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ s16 val = 0;
+ ssize_t ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ ret = adis16203_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+ if (!ret) {
+ if (val & ADIS16203_ERROR_ACTIVE)
+ adis16203_check_status(dev);
+
+ val = ((s16)(val << 2) >> 2);
+ ret = sprintf(buf, "%d\n", val);
+ }
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16203_write_16bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = adis16203_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static int adis16203_reset(struct device *dev)
+{
+ int ret;
+ ret = adis16203_spi_write_reg_8(dev,
+ ADIS16203_GLOB_CMD,
+ ADIS16203_GLOB_CMD_SW_RESET);
+ if (ret)
+ dev_err(dev, "problem resetting device");
+
+ return ret;
+}
+
+static ssize_t adis16203_write_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -EINVAL;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return adis16203_reset(dev);
+ }
+ return -EINVAL;
+}
+
+int adis16203_set_irq(struct device *dev, bool enable)
+{
+ int ret = 0;
+ u16 msc;
+
+ ret = adis16203_spi_read_reg_16(dev, ADIS16203_MSC_CTRL, &msc);
+ if (ret)
+ goto error_ret;
+
+ msc |= ADIS16203_MSC_CTRL_ACTIVE_HIGH;
+ msc &= ~ADIS16203_MSC_CTRL_DATA_RDY_DIO1;
+ if (enable)
+ msc |= ADIS16203_MSC_CTRL_DATA_RDY_EN;
+ else
+ msc &= ~ADIS16203_MSC_CTRL_DATA_RDY_EN;
+
+ ret = adis16203_spi_write_reg_16(dev, ADIS16203_MSC_CTRL, msc);
+
+error_ret:
+ return ret;
+}
+
+static int adis16203_check_status(struct device *dev)
+{
+ u16 status;
+ int ret;
+
+ ret = adis16203_spi_read_reg_16(dev, ADIS16203_DIAG_STAT, &status);
+ if (ret < 0) {
+ dev_err(dev, "Reading status failed\n");
+ goto error_ret;
+ }
+ ret = status & 0x1F;
+
+ if (status & ADIS16203_DIAG_STAT_SELFTEST_FAIL)
+ dev_err(dev, "Self test failure\n");
+ if (status & ADIS16203_DIAG_STAT_SPI_FAIL)
+ dev_err(dev, "SPI failure\n");
+ if (status & ADIS16203_DIAG_STAT_FLASH_UPT)
+ dev_err(dev, "Flash update failed\n");
+ if (status & ADIS16203_DIAG_STAT_POWER_HIGH)
+ dev_err(dev, "Power supply above 3.625V\n");
+ if (status & ADIS16203_DIAG_STAT_POWER_LOW)
+ dev_err(dev, "Power supply below 3.15V\n");
+
+error_ret:
+ return ret;
+}
+
+static int adis16203_self_test(struct device *dev)
+{
+ int ret;
+ ret = adis16203_spi_write_reg_16(dev,
+ ADIS16203_MSC_CTRL,
+ ADIS16203_MSC_CTRL_SELF_TEST_EN);
+ if (ret) {
+ dev_err(dev, "problem starting self test");
+ goto err_ret;
+ }
+
+ adis16203_check_status(dev);
+
+err_ret:
+ return ret;
+}
+
+static int adis16203_initial_setup(struct adis16203_state *st)
+{
+ int ret;
+ struct device *dev = &st->indio_dev->dev;
+
+ /* Disable IRQ */
+ ret = adis16203_set_irq(dev, false);
+ if (ret) {
+ dev_err(dev, "disable irq failed");
+ goto err_ret;
+ }
+
+ /* Do self test */
+ ret = adis16203_self_test(dev);
+ if (ret) {
+ dev_err(dev, "self test failure");
+ goto err_ret;
+ }
+
+ /* Read status register to check the result */
+ ret = adis16203_check_status(dev);
+ if (ret) {
+ adis16203_reset(dev);
+ dev_err(dev, "device not playing ball -> reset");
+ msleep(ADIS16203_STARTUP_DELAY);
+ ret = adis16203_check_status(dev);
+ if (ret) {
+ dev_err(dev, "giving up");
+ goto err_ret;
+ }
+ }
+
+ printk(KERN_INFO DRIVER_NAME ": at CS%d (irq %d)\n",
+ st->us->chip_select, st->us->irq);
+
+err_ret:
+ return ret;
+}
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(0, supply, adis16203_read_12bit_unsigned,
+ ADIS16203_SUPPLY_OUT);
+static IIO_CONST_ATTR(in0_supply_scale, "0.00122");
+static IIO_DEV_ATTR_IN_RAW(1, adis16203_read_12bit_unsigned,
+ ADIS16203_AUX_ADC);
+static IIO_CONST_ATTR(in1_scale, "0.00061");
+
+static IIO_DEV_ATTR_INCLI_X(adis16203_read_14bit_signed,
+ ADIS16203_XINCL_OUT);
+static IIO_DEV_ATTR_INCLI_Y(adis16203_read_14bit_signed,
+ ADIS16203_YINCL_OUT);
+static IIO_DEV_ATTR_INCLI_X_OFFSET(S_IWUSR | S_IRUGO,
+ adis16203_read_14bit_signed,
+ adis16203_write_16bit,
+ ADIS16203_INCL_NULL);
+static IIO_CONST_ATTR(incli_scale, "0.025");
+
+static IIO_DEV_ATTR_TEMP_RAW(adis16203_read_temp);
+static IIO_CONST_ATTR(temp_offset, "25");
+static IIO_CONST_ATTR(temp_scale, "-0.47");
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16203_write_reset, 0);
+
+static IIO_CONST_ATTR(name, "adis16203");
+
+static struct attribute *adis16203_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group adis16203_event_attribute_group = {
+ .attrs = adis16203_event_attributes,
+};
+
+static struct attribute *adis16203_attributes[] = {
+ &iio_dev_attr_in0_supply_raw.dev_attr.attr,
+ &iio_const_attr_in0_supply_scale.dev_attr.attr,
+ &iio_dev_attr_temp_raw.dev_attr.attr,
+ &iio_const_attr_temp_offset.dev_attr.attr,
+ &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ &iio_dev_attr_in1_raw.dev_attr.attr,
+ &iio_const_attr_in1_scale.dev_attr.attr,
+ &iio_dev_attr_incli_x_raw.dev_attr.attr,
+ &iio_dev_attr_incli_y_raw.dev_attr.attr,
+ &iio_dev_attr_incli_x_offset.dev_attr.attr,
+ &iio_const_attr_incli_scale.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16203_attribute_group = {
+ .attrs = adis16203_attributes,
+};
+
+static int __devinit adis16203_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct adis16203_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADIS16203_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADIS16203_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &adis16203_event_attribute_group;
+ st->indio_dev->attrs = &adis16203_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis16203_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = adis16203_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq) {
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING,
+ "adis16203");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = adis16203_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ /* Get the device into a sane initial state */
+ ret = adis16203_initial_setup(st);
+ if (ret)
+ goto error_remove_trigger;
+ return 0;
+
+error_remove_trigger:
+ adis16203_remove_trigger(st->indio_dev);
+error_unregister_line:
+ if (spi->irq)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ adis16203_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ adis16203_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int adis16203_remove(struct spi_device *spi)
+{
+ struct adis16203_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ flush_scheduled_work();
+
+ adis16203_remove_trigger(indio_dev);
+ if (spi->irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ adis16203_uninitialize_ring(indio_dev->ring);
+ iio_device_unregister(indio_dev);
+ adis16203_unconfigure_ring(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver adis16203_driver = {
+ .driver = {
+ .name = "adis16203",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16203_probe,
+ .remove = __devexit_p(adis16203_remove),
+};
+
+static __init int adis16203_init(void)
+{
+ return spi_register_driver(&adis16203_driver);
+}
+module_init(adis16203_init);
+
+static __exit void adis16203_exit(void)
+{
+ spi_unregister_driver(&adis16203_driver);
+}
+module_exit(adis16203_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16203 Programmable Digital Vibration Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/adis16203_ring.c b/drivers/staging/iio/accel/adis16203_ring.c
new file mode 100644
index 000000000000..3d774f7efa25
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16203_ring.c
@@ -0,0 +1,211 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_sw.h"
+#include "accel.h"
+#include "../trigger.h"
+#include "adis16203.h"
+
+static IIO_SCAN_EL_C(in_supply, ADIS16203_SCAN_SUPPLY, ADIS16203_SUPPLY_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(in_supply, u, 12, 16);
+static IIO_SCAN_EL_C(in0, ADIS16203_SCAN_AUX_ADC, ADIS16203_AUX_ADC, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(in0, u, 12, 16);
+static IIO_SCAN_EL_C(temp, ADIS16203_SCAN_TEMP, ADIS16203_TEMP_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(temp, u, 12, 16);
+static IIO_SCAN_EL_C(incli_x, ADIS16203_SCAN_INCLI_X,
+ ADIS16203_XINCL_OUT, NULL);
+static IIO_SCAN_EL_C(incli_y, ADIS16203_SCAN_INCLI_Y,
+ ADIS16203_YINCL_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(incli, s, 14, 16);
+static IIO_SCAN_EL_TIMESTAMP(5);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(timestamp, s, 64, 64);
+
+static struct attribute *adis16203_scan_el_attrs[] = {
+ &iio_scan_el_in_supply.dev_attr.attr,
+ &iio_const_attr_in_supply_index.dev_attr.attr,
+ &iio_const_attr_in_supply_type.dev_attr.attr,
+ &iio_scan_el_in0.dev_attr.attr,
+ &iio_const_attr_in0_index.dev_attr.attr,
+ &iio_const_attr_in0_type.dev_attr.attr,
+ &iio_scan_el_temp.dev_attr.attr,
+ &iio_const_attr_temp_index.dev_attr.attr,
+ &iio_const_attr_temp_type.dev_attr.attr,
+ &iio_scan_el_incli_x.dev_attr.attr,
+ &iio_const_attr_incli_x_index.dev_attr.attr,
+ &iio_scan_el_incli_y.dev_attr.attr,
+ &iio_const_attr_incli_y_index.dev_attr.attr,
+ &iio_const_attr_incli_type.dev_attr.attr,
+ &iio_scan_el_timestamp.dev_attr.attr,
+ &iio_const_attr_timestamp_index.dev_attr.attr,
+ &iio_const_attr_timestamp_type.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group adis16203_scan_el_group = {
+ .attrs = adis16203_scan_el_attrs,
+ .name = "scan_elements",
+};
+
+/**
+ * adis16203_poll_func_th() top half interrupt handler called by trigger
+ * @private_data: iio_dev
+ **/
+static void adis16203_poll_func_th(struct iio_dev *indio_dev, s64 timestamp)
+{
+ struct adis16203_state *st = iio_dev_get_devdata(indio_dev);
+ st->last_timestamp = timestamp;
+ schedule_work(&st->work_trigger_to_ring);
+}
+
+/**
+ * adis16203_read_ring_data() read data registers which will be placed into ring
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read
+ **/
+static int adis16203_read_ring_data(struct device *dev, u8 *rx)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16203_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[ADIS16203_OUTPUTS + 1];
+ int ret;
+ int i;
+
+ mutex_lock(&st->buf_lock);
+
+ spi_message_init(&msg);
+
+ memset(xfers, 0, sizeof(xfers));
+ for (i = 0; i <= ADIS16203_OUTPUTS; i++) {
+ xfers[i].bits_per_word = 8;
+ xfers[i].cs_change = 1;
+ xfers[i].len = 2;
+ xfers[i].delay_usecs = 20;
+ xfers[i].tx_buf = st->tx + 2 * i;
+ if (i < 1) /* SUPPLY_OUT: 0x02, AUX_ADC: 0x08 */
+ st->tx[2 * i] = ADIS16203_READ_REG(ADIS16203_SUPPLY_OUT + 2 * i);
+ else
+ st->tx[2 * i] = ADIS16203_READ_REG(ADIS16203_SUPPLY_OUT + 2 * i + 6);
+ st->tx[2 * i + 1] = 0;
+ if (i >= 1)
+ xfers[i].rx_buf = rx + 2 * (i - 1);
+ spi_message_add_tail(&xfers[i], &msg);
+ }
+
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ dev_err(&st->us->dev, "problem when burst reading");
+
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
+ * specific to be rolled into the core.
+ */
+static void adis16203_trigger_bh_to_ring(struct work_struct *work_s)
+{
+ struct adis16203_state *st
+ = container_of(work_s, struct adis16203_state,
+ work_trigger_to_ring);
+ struct iio_ring_buffer *ring = st->indio_dev->ring;
+
+ int i = 0;
+ s16 *data;
+ size_t datasize = ring->access.get_bytes_per_datum(ring);
+
+ data = kmalloc(datasize, GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&st->us->dev, "memory alloc failed in ring bh");
+ return;
+ }
+
+ if (ring->scan_count)
+ if (adis16203_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
+ for (; i < ring->scan_count; i++)
+ data[i] = be16_to_cpup(
+ (__be16 *)&(st->rx[i*2]));
+
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (ring->scan_timestamp)
+ *((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
+
+ ring->access.store_to(ring,
+ (u8 *)data,
+ st->last_timestamp);
+
+ iio_trigger_notify_done(st->indio_dev->trig);
+ kfree(data);
+
+ return;
+}
+
+void adis16203_unconfigure_ring(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->pollfunc);
+ iio_sw_rb_free(indio_dev->ring);
+}
+
+int adis16203_configure_ring(struct iio_dev *indio_dev)
+{
+ int ret = 0;
+ struct adis16203_state *st = indio_dev->dev_data;
+ struct iio_ring_buffer *ring;
+ INIT_WORK(&st->work_trigger_to_ring, adis16203_trigger_bh_to_ring);
+
+ ring = iio_sw_rb_allocate(indio_dev);
+ if (!ring) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ indio_dev->ring = ring;
+ /* Effectively select the ring buffer implementation */
+ iio_ring_sw_register_funcs(&ring->access);
+ ring->bpe = 2;
+ ring->scan_el_attrs = &adis16203_scan_el_group;
+ ring->scan_timestamp = true;
+ ring->preenable = &iio_sw_ring_preenable;
+ ring->postenable = &iio_triggered_ring_postenable;
+ ring->predisable = &iio_triggered_ring_predisable;
+ ring->owner = THIS_MODULE;
+
+ /* Set default scan mode */
+ iio_scan_mask_set(ring, iio_scan_el_in_supply.number);
+ iio_scan_mask_set(ring, iio_scan_el_temp.number);
+ iio_scan_mask_set(ring, iio_scan_el_in0.number);
+ iio_scan_mask_set(ring, iio_scan_el_incli_x.number);
+ iio_scan_mask_set(ring, iio_scan_el_incli_y.number);
+
+ ret = iio_alloc_pollfunc(indio_dev, NULL, &adis16203_poll_func_th);
+ if (ret)
+ goto error_iio_sw_rb_free;
+
+ indio_dev->modes |= INDIO_RING_TRIGGERED;
+ return 0;
+
+error_iio_sw_rb_free:
+ iio_sw_rb_free(indio_dev->ring);
+ return ret;
+}
+
+int adis16203_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return iio_ring_buffer_register(ring, 0);
+}
+
+void adis16203_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+ iio_ring_buffer_unregister(ring);
+}
diff --git a/drivers/staging/iio/accel/adis16203_trigger.c b/drivers/staging/iio/accel/adis16203_trigger.c
new file mode 100644
index 000000000000..50be51c25dc2
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16203_trigger.c
@@ -0,0 +1,122 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../trigger.h"
+#include "adis16203.h"
+
+/**
+ * adis16203_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static int adis16203_data_rdy_trig_poll(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct adis16203_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_trigger *trig = st->trig;
+
+ iio_trigger_poll(trig, timestamp);
+
+ return IRQ_HANDLED;
+}
+
+IIO_EVENT_SH(data_rdy_trig, &adis16203_data_rdy_trig_poll);
+
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *adis16203_trigger_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+
+static const struct attribute_group adis16203_trigger_attr_group = {
+ .attrs = adis16203_trigger_attrs,
+};
+
+/**
+ * adis16203_data_rdy_trigger_set_state() set datardy interrupt state
+ **/
+static int adis16203_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct adis16203_state *st = trig->private_data;
+ struct iio_dev *indio_dev = st->indio_dev;
+ int ret = 0;
+
+ dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
+ ret = adis16203_set_irq(&st->indio_dev->dev, state);
+ if (state == false) {
+ iio_remove_event_from_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]
+ ->ev_list);
+ flush_scheduled_work();
+ } else {
+ iio_add_event_to_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]->ev_list);
+ }
+ return ret;
+}
+
+/**
+ * adis16203_trig_try_reen() try renabling irq for data rdy trigger
+ * @trig: the datardy trigger
+ **/
+static int adis16203_trig_try_reen(struct iio_trigger *trig)
+{
+ struct adis16203_state *st = trig->private_data;
+ enable_irq(st->us->irq);
+ return 0;
+}
+
+int adis16203_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct adis16203_state *st = indio_dev->dev_data;
+
+ st->trig = iio_allocate_trigger();
+ st->trig->name = kasprintf(GFP_KERNEL,
+ "adis16203-dev%d",
+ indio_dev->id);
+ if (!st->trig->name) {
+ ret = -ENOMEM;
+ goto error_free_trig;
+ }
+ st->trig->dev.parent = &st->us->dev;
+ st->trig->owner = THIS_MODULE;
+ st->trig->private_data = st;
+ st->trig->set_trigger_state = &adis16203_data_rdy_trigger_set_state;
+ st->trig->try_reenable = &adis16203_trig_try_reen;
+ st->trig->control_attrs = &adis16203_trigger_attr_group;
+ ret = iio_trigger_register(st->trig);
+
+ /* select default trigger */
+ indio_dev->trig = st->trig;
+ if (ret)
+ goto error_free_trig_name;
+
+ return 0;
+
+error_free_trig_name:
+ kfree(st->trig->name);
+error_free_trig:
+ iio_free_trigger(st->trig);
+
+ return ret;
+}
+
+void adis16203_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct adis16203_state *state = indio_dev->dev_data;
+
+ iio_trigger_unregister(state->trig);
+ kfree(state->trig->name);
+ iio_free_trigger(state->trig);
+}
diff --git a/drivers/staging/iio/accel/adis16204.h b/drivers/staging/iio/accel/adis16204.h
new file mode 100644
index 000000000000..e9ed7cb048cf
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16204.h
@@ -0,0 +1,151 @@
+#ifndef SPI_ADIS16204_H_
+#define SPI_ADIS16204_H_
+
+#define ADIS16204_STARTUP_DELAY 220 /* ms */
+
+#define ADIS16204_READ_REG(a) a
+#define ADIS16204_WRITE_REG(a) ((a) | 0x80)
+
+#define ADIS16204_FLASH_CNT 0x00 /* Flash memory write count */
+#define ADIS16204_SUPPLY_OUT 0x02 /* Output, power supply */
+#define ADIS16204_XACCL_OUT 0x04 /* Output, x-axis accelerometer */
+#define ADIS16204_YACCL_OUT 0x06 /* Output, y-axis accelerometer */
+#define ADIS16204_AUX_ADC 0x08 /* Output, auxiliary ADC input */
+#define ADIS16204_TEMP_OUT 0x0A /* Output, temperature */
+#define ADIS16204_X_PEAK_OUT 0x0C /* Twos complement */
+#define ADIS16204_Y_PEAK_OUT 0x0E /* Twos complement */
+#define ADIS16204_XACCL_NULL 0x10 /* Calibration, x-axis acceleration offset null */
+#define ADIS16204_YACCL_NULL 0x12 /* Calibration, y-axis acceleration offset null */
+#define ADIS16204_XACCL_SCALE 0x14 /* X-axis scale factor calibration register */
+#define ADIS16204_YACCL_SCALE 0x16 /* Y-axis scale factor calibration register */
+#define ADIS16204_XY_RSS_OUT 0x18 /* XY combined acceleration (RSS) */
+#define ADIS16204_XY_PEAK_OUT 0x1A /* Peak, XY combined output (RSS) */
+#define ADIS16204_CAP_BUF_1 0x1C /* Capture buffer output register 1 */
+#define ADIS16204_CAP_BUF_2 0x1E /* Capture buffer output register 2 */
+#define ADIS16204_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */
+#define ADIS16204_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */
+#define ADIS16204_ALM_CTRL 0x28 /* Alarm control */
+#define ADIS16204_CAPT_PNTR 0x2A /* Capture register address pointer */
+#define ADIS16204_AUX_DAC 0x30 /* Auxiliary DAC data */
+#define ADIS16204_GPIO_CTRL 0x32 /* General-purpose digital input/output control */
+#define ADIS16204_MSC_CTRL 0x34 /* Miscellaneous control */
+#define ADIS16204_SMPL_PRD 0x36 /* Internal sample period (rate) control */
+#define ADIS16204_AVG_CNT 0x38 /* Operation, filter configuration */
+#define ADIS16204_SLP_CNT 0x3A /* Operation, sleep mode control */
+#define ADIS16204_DIAG_STAT 0x3C /* Diagnostics, system status register */
+#define ADIS16204_GLOB_CMD 0x3E /* Operation, system command register */
+
+#define ADIS16204_OUTPUTS 5
+
+/* MSC_CTRL */
+#define ADIS16204_MSC_CTRL_PWRUP_SELF_TEST (1 << 10) /* Self-test at power-on: 1 = disabled, 0 = enabled */
+#define ADIS16204_MSC_CTRL_SELF_TEST_EN (1 << 8) /* Self-test enable */
+#define ADIS16204_MSC_CTRL_DATA_RDY_EN (1 << 2) /* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16204_MSC_CTRL_ACTIVE_HIGH (1 << 1) /* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16204_MSC_CTRL_DATA_RDY_DIO2 (1 << 0) /* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
+
+/* DIAG_STAT */
+#define ADIS16204_DIAG_STAT_ALARM2 (1<<9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16204_DIAG_STAT_ALARM1 (1<<8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16204_DIAG_STAT_SELFTEST_FAIL (1<<5) /* Self-test diagnostic error flag: 1 = error condition,
+ 0 = normal operation */
+#define ADIS16204_DIAG_STAT_SPI_FAIL (1<<3) /* SPI communications failure */
+#define ADIS16204_DIAG_STAT_FLASH_UPT (1<<2) /* Flash update failure */
+#define ADIS16204_DIAG_STAT_POWER_HIGH (1<<1) /* Power supply above 3.625 V */
+#define ADIS16204_DIAG_STAT_POWER_LOW (1<<0) /* Power supply below 2.975 V */
+
+/* GLOB_CMD */
+#define ADIS16204_GLOB_CMD_SW_RESET (1<<7)
+#define ADIS16204_GLOB_CMD_CLEAR_STAT (1<<4)
+#define ADIS16204_GLOB_CMD_FACTORY_CAL (1<<1)
+
+#define ADIS16204_MAX_TX 24
+#define ADIS16204_MAX_RX 24
+
+#define ADIS16204_ERROR_ACTIVE (1<<14)
+
+/**
+ * struct adis16204_state - device instance specific data
+ * @us: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct adis16204_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+
+int adis16204_set_irq(struct device *dev, bool enable);
+
+#ifdef CONFIG_IIO_RING_BUFFER
+enum adis16204_scan {
+ ADIS16204_SCAN_SUPPLY,
+ ADIS16204_SCAN_ACC_X,
+ ADIS16204_SCAN_ACC_Y,
+ ADIS16204_SCAN_AUX_ADC,
+ ADIS16204_SCAN_TEMP,
+};
+
+void adis16204_remove_trigger(struct iio_dev *indio_dev);
+int adis16204_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16204_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+int adis16204_configure_ring(struct iio_dev *indio_dev);
+void adis16204_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16204_initialize_ring(struct iio_ring_buffer *ring);
+void adis16204_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16204_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16204_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+adis16204_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int adis16204_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void adis16204_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16204_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+
+static inline void adis16204_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16204_H_ */
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
new file mode 100644
index 000000000000..cc15e40726fc
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16204_core.c
@@ -0,0 +1,613 @@
+/*
+ * ADIS16204 Programmable High-g Digital Impact Sensor and Recorder
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "accel.h"
+#include "../gyro/gyro.h"
+#include "../adc/adc.h"
+
+#include "adis16204.h"
+
+#define DRIVER_NAME "adis16204"
+
+static int adis16204_check_status(struct device *dev);
+
+/**
+ * adis16204_spi_write_reg_8() - write single byte to a register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be written
+ * @val: the value to write
+ **/
+static int adis16204_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16204_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16204_WRITE_REG(reg_address);
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16204_spi_write_reg_16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: value to be written
+ **/
+static int adis16204_spi_write_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16204_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .tx_buf = st->tx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16204_WRITE_REG(lower_reg_address);
+ st->tx[1] = value & 0xFF;
+ st->tx[2] = ADIS16204_WRITE_REG(lower_reg_address + 1);
+ st->tx[3] = (value >> 8) & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16204_spi_read_reg_16() - read 2 bytes from a 16-bit register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: somewhere to pass back the value read
+ **/
+static int adis16204_spi_read_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16204_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 20,
+ }, {
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 20,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16204_READ_REG(lower_reg_address);
+ st->tx[1] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
+ lower_reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[0] << 8) | st->rx[1];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static ssize_t adis16204_read_12bit_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16204_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16204_ERROR_ACTIVE)
+ adis16204_check_status(dev);
+
+ return sprintf(buf, "%u\n", val & 0x0FFF);
+}
+
+static ssize_t adis16204_read_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+ u16 val;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+
+ ret = adis16204_spi_read_reg_16(dev, ADIS16204_TEMP_OUT, (u16 *)&val);
+ if (ret)
+ goto error_ret;
+
+ if (val & ADIS16204_ERROR_ACTIVE)
+ adis16204_check_status(dev);
+
+ val &= 0xFFF;
+ ret = sprintf(buf, "%d\n", val);
+
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static ssize_t adis16204_read_12bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ s16 val = 0;
+ ssize_t ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ ret = adis16204_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+ if (!ret) {
+ if (val & ADIS16204_ERROR_ACTIVE)
+ adis16204_check_status(dev);
+
+ val = ((s16)(val << 4) >> 4);
+ ret = sprintf(buf, "%d\n", val);
+ }
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16204_read_14bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ s16 val = 0;
+ ssize_t ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ ret = adis16204_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+ if (!ret) {
+ if (val & ADIS16204_ERROR_ACTIVE)
+ adis16204_check_status(dev);
+
+ val = ((s16)(val << 2) >> 2);
+ ret = sprintf(buf, "%d\n", val);
+ }
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16204_write_16bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = adis16204_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static int adis16204_reset(struct device *dev)
+{
+ int ret;
+ ret = adis16204_spi_write_reg_8(dev,
+ ADIS16204_GLOB_CMD,
+ ADIS16204_GLOB_CMD_SW_RESET);
+ if (ret)
+ dev_err(dev, "problem resetting device");
+
+ return ret;
+}
+
+static ssize_t adis16204_write_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -EINVAL;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return adis16204_reset(dev);
+ }
+ return -EINVAL;
+}
+
+int adis16204_set_irq(struct device *dev, bool enable)
+{
+ int ret = 0;
+ u16 msc;
+
+ ret = adis16204_spi_read_reg_16(dev, ADIS16204_MSC_CTRL, &msc);
+ if (ret)
+ goto error_ret;
+
+ msc |= ADIS16204_MSC_CTRL_ACTIVE_HIGH;
+ msc &= ~ADIS16204_MSC_CTRL_DATA_RDY_DIO2;
+ if (enable)
+ msc |= ADIS16204_MSC_CTRL_DATA_RDY_EN;
+ else
+ msc &= ~ADIS16204_MSC_CTRL_DATA_RDY_EN;
+
+ ret = adis16204_spi_write_reg_16(dev, ADIS16204_MSC_CTRL, msc);
+
+error_ret:
+ return ret;
+}
+
+static int adis16204_check_status(struct device *dev)
+{
+ u16 status;
+ int ret;
+
+ ret = adis16204_spi_read_reg_16(dev, ADIS16204_DIAG_STAT, &status);
+ if (ret < 0) {
+ dev_err(dev, "Reading status failed\n");
+ goto error_ret;
+ }
+ ret = status & 0x1F;
+
+ if (status & ADIS16204_DIAG_STAT_SELFTEST_FAIL)
+ dev_err(dev, "Self test failure\n");
+ if (status & ADIS16204_DIAG_STAT_SPI_FAIL)
+ dev_err(dev, "SPI failure\n");
+ if (status & ADIS16204_DIAG_STAT_FLASH_UPT)
+ dev_err(dev, "Flash update failed\n");
+ if (status & ADIS16204_DIAG_STAT_POWER_HIGH)
+ dev_err(dev, "Power supply above 3.625V\n");
+ if (status & ADIS16204_DIAG_STAT_POWER_LOW)
+ dev_err(dev, "Power supply below 2.975V\n");
+
+error_ret:
+ return ret;
+}
+
+static int adis16204_self_test(struct device *dev)
+{
+ int ret;
+ ret = adis16204_spi_write_reg_16(dev,
+ ADIS16204_MSC_CTRL,
+ ADIS16204_MSC_CTRL_SELF_TEST_EN);
+ if (ret) {
+ dev_err(dev, "problem starting self test");
+ goto err_ret;
+ }
+
+ adis16204_check_status(dev);
+
+err_ret:
+ return ret;
+}
+
+static int adis16204_initial_setup(struct adis16204_state *st)
+{
+ int ret;
+ struct device *dev = &st->indio_dev->dev;
+
+ /* Disable IRQ */
+ ret = adis16204_set_irq(dev, false);
+ if (ret) {
+ dev_err(dev, "disable irq failed");
+ goto err_ret;
+ }
+
+ /* Do self test */
+ ret = adis16204_self_test(dev);
+ if (ret) {
+ dev_err(dev, "self test failure");
+ goto err_ret;
+ }
+
+ /* Read status register to check the result */
+ ret = adis16204_check_status(dev);
+ if (ret) {
+ adis16204_reset(dev);
+ dev_err(dev, "device not playing ball -> reset");
+ msleep(ADIS16204_STARTUP_DELAY);
+ ret = adis16204_check_status(dev);
+ if (ret) {
+ dev_err(dev, "giving up");
+ goto err_ret;
+ }
+ }
+
+ printk(KERN_INFO DRIVER_NAME ": at CS%d (irq %d)\n",
+ st->us->chip_select, st->us->irq);
+
+err_ret:
+ return ret;
+}
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(0, supply, adis16204_read_12bit_unsigned,
+ ADIS16204_SUPPLY_OUT);
+static IIO_CONST_ATTR(in0_supply_scale, "0.00122");
+static IIO_DEV_ATTR_IN_RAW(1, adis16204_read_12bit_unsigned,
+ ADIS16204_AUX_ADC);
+static IIO_CONST_ATTR(in1_scale, "0.00061");
+
+static IIO_DEV_ATTR_ACCEL_X(adis16204_read_14bit_signed,
+ ADIS16204_XACCL_OUT);
+static IIO_DEV_ATTR_ACCEL_Y(adis16204_read_14bit_signed,
+ ADIS16204_YACCL_OUT);
+static IIO_DEV_ATTR_ACCEL_XY(adis16204_read_14bit_signed,
+ ADIS16204_XY_RSS_OUT);
+static IIO_DEV_ATTR_ACCEL_XPEAK(adis16204_read_14bit_signed,
+ ADIS16204_X_PEAK_OUT);
+static IIO_DEV_ATTR_ACCEL_YPEAK(adis16204_read_14bit_signed,
+ ADIS16204_Y_PEAK_OUT);
+static IIO_DEV_ATTR_ACCEL_XYPEAK(adis16204_read_14bit_signed,
+ ADIS16204_XY_PEAK_OUT);
+static IIO_DEV_ATTR_ACCEL_X_OFFSET(S_IWUSR | S_IRUGO,
+ adis16204_read_12bit_signed,
+ adis16204_write_16bit,
+ ADIS16204_XACCL_NULL);
+static IIO_DEV_ATTR_ACCEL_Y_OFFSET(S_IWUSR | S_IRUGO,
+ adis16204_read_12bit_signed,
+ adis16204_write_16bit,
+ ADIS16204_YACCL_NULL);
+static IIO_CONST_ATTR(accel_x_scale, "0.017125");
+static IIO_CONST_ATTR(accel_y_scale, "0.008407");
+static IIO_CONST_ATTR(accel_xy_scale, "0.017125");
+
+static IIO_DEV_ATTR_TEMP_RAW(adis16204_read_temp);
+static IIO_CONST_ATTR(temp_offset, "25");
+static IIO_CONST_ATTR(temp_scale, "-0.47");
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16204_write_reset, 0);
+
+static IIO_CONST_ATTR(name, "adis16204");
+
+static struct attribute *adis16204_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group adis16204_event_attribute_group = {
+ .attrs = adis16204_event_attributes,
+};
+
+static struct attribute *adis16204_attributes[] = {
+ &iio_dev_attr_in0_supply_raw.dev_attr.attr,
+ &iio_const_attr_in0_supply_scale.dev_attr.attr,
+ &iio_dev_attr_temp_raw.dev_attr.attr,
+ &iio_const_attr_temp_offset.dev_attr.attr,
+ &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ &iio_dev_attr_in1_raw.dev_attr.attr,
+ &iio_const_attr_in1_scale.dev_attr.attr,
+ &iio_dev_attr_accel_x_raw.dev_attr.attr,
+ &iio_dev_attr_accel_y_raw.dev_attr.attr,
+ &iio_dev_attr_accel_xy.dev_attr.attr,
+ &iio_dev_attr_accel_xpeak.dev_attr.attr,
+ &iio_dev_attr_accel_ypeak.dev_attr.attr,
+ &iio_dev_attr_accel_xypeak.dev_attr.attr,
+ &iio_dev_attr_accel_x_offset.dev_attr.attr,
+ &iio_dev_attr_accel_y_offset.dev_attr.attr,
+ &iio_const_attr_accel_x_scale.dev_attr.attr,
+ &iio_const_attr_accel_y_scale.dev_attr.attr,
+ &iio_const_attr_accel_xy_scale.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16204_attribute_group = {
+ .attrs = adis16204_attributes,
+};
+
+static int __devinit adis16204_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct adis16204_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADIS16204_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADIS16204_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &adis16204_event_attribute_group;
+ st->indio_dev->attrs = &adis16204_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis16204_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = adis16204_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq) {
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING,
+ "adis16204");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = adis16204_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ /* Get the device into a sane initial state */
+ ret = adis16204_initial_setup(st);
+ if (ret)
+ goto error_remove_trigger;
+ return 0;
+
+error_remove_trigger:
+ adis16204_remove_trigger(st->indio_dev);
+error_unregister_line:
+ if (spi->irq)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ adis16204_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ adis16204_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int adis16204_remove(struct spi_device *spi)
+{
+ struct adis16204_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ flush_scheduled_work();
+
+ adis16204_remove_trigger(indio_dev);
+ if (spi->irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ adis16204_uninitialize_ring(indio_dev->ring);
+ iio_device_unregister(indio_dev);
+ adis16204_unconfigure_ring(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver adis16204_driver = {
+ .driver = {
+ .name = "adis16204",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16204_probe,
+ .remove = __devexit_p(adis16204_remove),
+};
+
+static __init int adis16204_init(void)
+{
+ return spi_register_driver(&adis16204_driver);
+}
+module_init(adis16204_init);
+
+static __exit void adis16204_exit(void)
+{
+ spi_unregister_driver(&adis16204_driver);
+}
+module_exit(adis16204_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16204 Programmable High-g Digital Impact Sensor and Recorder");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/adis16204_ring.c b/drivers/staging/iio/accel/adis16204_ring.c
new file mode 100644
index 000000000000..420b160fe3ab
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16204_ring.c
@@ -0,0 +1,206 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_sw.h"
+#include "accel.h"
+#include "../trigger.h"
+#include "adis16204.h"
+
+static IIO_SCAN_EL_C(in_supply, ADIS16204_SCAN_SUPPLY, ADIS16204_SUPPLY_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(in_supply, u, 12, 16);
+static IIO_SCAN_EL_C(accel_x, ADIS16204_SCAN_ACC_X, ADIS16204_XACCL_OUT, NULL);
+static IIO_SCAN_EL_C(accel_y, ADIS16204_SCAN_ACC_Y, ADIS16204_YACCL_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(accel, s, 14, 16);
+static IIO_SCAN_EL_C(in0, ADIS16204_SCAN_AUX_ADC, ADIS16204_AUX_ADC, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(in0, u, 12, 16);
+static IIO_SCAN_EL_C(temp, ADIS16204_SCAN_TEMP, ADIS16204_TEMP_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(temp, u, 12, 16);
+static IIO_SCAN_EL_TIMESTAMP(5);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(timestamp, s, 64, 64);
+
+static struct attribute *adis16204_scan_el_attrs[] = {
+ &iio_scan_el_in_supply.dev_attr.attr,
+ &iio_const_attr_in_supply_index.dev_attr.attr,
+ &iio_const_attr_in_supply_type.dev_attr.attr,
+ &iio_scan_el_accel_x.dev_attr.attr,
+ &iio_const_attr_accel_x_index.dev_attr.attr,
+ &iio_scan_el_accel_y.dev_attr.attr,
+ &iio_const_attr_accel_y_index.dev_attr.attr,
+ &iio_const_attr_accel_type.dev_attr.attr,
+ &iio_scan_el_in0.dev_attr.attr,
+ &iio_const_attr_in0_index.dev_attr.attr,
+ &iio_const_attr_in0_type.dev_attr.attr,
+ &iio_scan_el_temp.dev_attr.attr,
+ &iio_const_attr_temp_index.dev_attr.attr,
+ &iio_const_attr_temp_type.dev_attr.attr,
+ &iio_scan_el_timestamp.dev_attr.attr,
+ &iio_const_attr_timestamp_index.dev_attr.attr,
+ &iio_const_attr_timestamp_type.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group adis16204_scan_el_group = {
+ .attrs = adis16204_scan_el_attrs,
+ .name = "scan_elements",
+};
+
+/**
+ * adis16204_poll_func_th() top half interrupt handler called by trigger
+ * @private_data: iio_dev
+ **/
+static void adis16204_poll_func_th(struct iio_dev *indio_dev, s64 timestamp)
+{
+ struct adis16204_state *st = iio_dev_get_devdata(indio_dev);
+ st->last_timestamp = timestamp;
+ schedule_work(&st->work_trigger_to_ring);
+}
+
+/**
+ * adis16204_read_ring_data() read data registers which will be placed into ring
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read
+ **/
+static int adis16204_read_ring_data(struct device *dev, u8 *rx)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16204_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[ADIS16204_OUTPUTS + 1];
+ int ret;
+ int i;
+
+ mutex_lock(&st->buf_lock);
+
+ spi_message_init(&msg);
+
+ memset(xfers, 0, sizeof(xfers));
+ for (i = 0; i <= ADIS16204_OUTPUTS; i++) {
+ xfers[i].bits_per_word = 8;
+ xfers[i].cs_change = 1;
+ xfers[i].len = 2;
+ xfers[i].delay_usecs = 20;
+ xfers[i].tx_buf = st->tx + 2 * i;
+ st->tx[2 * i] = ADIS16204_READ_REG(ADIS16204_SUPPLY_OUT + 2 * i);
+ st->tx[2 * i + 1] = 0;
+ if (i >= 1)
+ xfers[i].rx_buf = rx + 2 * (i - 1);
+ spi_message_add_tail(&xfers[i], &msg);
+ }
+
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ dev_err(&st->us->dev, "problem when burst reading");
+
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
+ * specific to be rolled into the core.
+ */
+static void adis16204_trigger_bh_to_ring(struct work_struct *work_s)
+{
+ struct adis16204_state *st
+ = container_of(work_s, struct adis16204_state,
+ work_trigger_to_ring);
+ struct iio_ring_buffer *ring = st->indio_dev->ring;
+
+ int i = 0;
+ s16 *data;
+ size_t datasize = ring->access.get_bytes_per_datum(ring);
+
+ data = kmalloc(datasize, GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&st->us->dev, "memory alloc failed in ring bh");
+ return;
+ }
+
+ if (ring->scan_count)
+ if (adis16204_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
+ for (; i < ring->scan_count; i++)
+ data[i] = be16_to_cpup(
+ (__be16 *)&(st->rx[i*2]));
+
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (ring->scan_timestamp)
+ *((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
+
+ ring->access.store_to(ring,
+ (u8 *)data,
+ st->last_timestamp);
+
+ iio_trigger_notify_done(st->indio_dev->trig);
+ kfree(data);
+
+ return;
+}
+
+void adis16204_unconfigure_ring(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->pollfunc);
+ iio_sw_rb_free(indio_dev->ring);
+}
+
+int adis16204_configure_ring(struct iio_dev *indio_dev)
+{
+ int ret = 0;
+ struct adis16204_state *st = indio_dev->dev_data;
+ struct iio_ring_buffer *ring;
+ INIT_WORK(&st->work_trigger_to_ring, adis16204_trigger_bh_to_ring);
+
+ ring = iio_sw_rb_allocate(indio_dev);
+ if (!ring) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ indio_dev->ring = ring;
+ /* Effectively select the ring buffer implementation */
+ iio_ring_sw_register_funcs(&ring->access);
+ ring->bpe = 2;
+ ring->scan_el_attrs = &adis16204_scan_el_group;
+ ring->scan_timestamp = true;
+ ring->preenable = &iio_sw_ring_preenable;
+ ring->postenable = &iio_triggered_ring_postenable;
+ ring->predisable = &iio_triggered_ring_predisable;
+ ring->owner = THIS_MODULE;
+
+ /* Set default scan mode */
+ iio_scan_mask_set(ring, iio_scan_el_in_supply.number);
+ iio_scan_mask_set(ring, iio_scan_el_accel_x.number);
+ iio_scan_mask_set(ring, iio_scan_el_accel_y.number);
+ iio_scan_mask_set(ring, iio_scan_el_temp.number);
+ iio_scan_mask_set(ring, iio_scan_el_in0.number);
+
+ ret = iio_alloc_pollfunc(indio_dev, NULL, &adis16204_poll_func_th);
+ if (ret)
+ goto error_iio_sw_rb_free;
+
+ indio_dev->modes |= INDIO_RING_TRIGGERED;
+ return 0;
+
+error_iio_sw_rb_free:
+ iio_sw_rb_free(indio_dev->ring);
+ return ret;
+}
+
+int adis16204_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return iio_ring_buffer_register(ring, 0);
+}
+
+void adis16204_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+ iio_ring_buffer_unregister(ring);
+}
diff --git a/drivers/staging/iio/accel/adis16204_trigger.c b/drivers/staging/iio/accel/adis16204_trigger.c
new file mode 100644
index 000000000000..8e9db90e51eb
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16204_trigger.c
@@ -0,0 +1,122 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../trigger.h"
+#include "adis16204.h"
+
+/**
+ * adis16204_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static int adis16204_data_rdy_trig_poll(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct adis16204_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_trigger *trig = st->trig;
+
+ iio_trigger_poll(trig, timestamp);
+
+ return IRQ_HANDLED;
+}
+
+IIO_EVENT_SH(data_rdy_trig, &adis16204_data_rdy_trig_poll);
+
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *adis16204_trigger_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+
+static const struct attribute_group adis16204_trigger_attr_group = {
+ .attrs = adis16204_trigger_attrs,
+};
+
+/**
+ * adis16204_data_rdy_trigger_set_state() set datardy interrupt state
+ **/
+static int adis16204_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct adis16204_state *st = trig->private_data;
+ struct iio_dev *indio_dev = st->indio_dev;
+ int ret = 0;
+
+ dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
+ ret = adis16204_set_irq(&st->indio_dev->dev, state);
+ if (state == false) {
+ iio_remove_event_from_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]
+ ->ev_list);
+ flush_scheduled_work();
+ } else {
+ iio_add_event_to_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]->ev_list);
+ }
+ return ret;
+}
+
+/**
+ * adis16204_trig_try_reen() try renabling irq for data rdy trigger
+ * @trig: the datardy trigger
+ **/
+static int adis16204_trig_try_reen(struct iio_trigger *trig)
+{
+ struct adis16204_state *st = trig->private_data;
+ enable_irq(st->us->irq);
+ return 0;
+}
+
+int adis16204_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct adis16204_state *st = indio_dev->dev_data;
+
+ st->trig = iio_allocate_trigger();
+ st->trig->name = kasprintf(GFP_KERNEL,
+ "adis16204-dev%d",
+ indio_dev->id);
+ if (!st->trig->name) {
+ ret = -ENOMEM;
+ goto error_free_trig;
+ }
+ st->trig->dev.parent = &st->us->dev;
+ st->trig->owner = THIS_MODULE;
+ st->trig->private_data = st;
+ st->trig->set_trigger_state = &adis16204_data_rdy_trigger_set_state;
+ st->trig->try_reenable = &adis16204_trig_try_reen;
+ st->trig->control_attrs = &adis16204_trigger_attr_group;
+ ret = iio_trigger_register(st->trig);
+
+ /* select default trigger */
+ indio_dev->trig = st->trig;
+ if (ret)
+ goto error_free_trig_name;
+
+ return 0;
+
+error_free_trig_name:
+ kfree(st->trig->name);
+error_free_trig:
+ iio_free_trigger(st->trig);
+
+ return ret;
+}
+
+void adis16204_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct adis16204_state *state = indio_dev->dev_data;
+
+ iio_trigger_unregister(state->trig);
+ kfree(state->trig->name);
+ iio_free_trigger(state->trig);
+}
diff --git a/drivers/staging/iio/accel/adis16209_ring.c b/drivers/staging/iio/accel/adis16209_ring.c
index 033135c6f226..8eba0af98ed5 100644
--- a/drivers/staging/iio/accel/adis16209_ring.c
+++ b/drivers/staging/iio/accel/adis16209_ring.c
@@ -105,7 +105,7 @@ static int adis16209_read_ring_data(struct device *dev, u8 *rx)
xfers[i].bits_per_word = 8;
xfers[i].cs_change = 1;
xfers[i].len = 2;
- xfers[i].delay_usecs = 20;
+ xfers[i].delay_usecs = 30;
xfers[i].tx_buf = st->tx + 2 * i;
st->tx[2 * i]
= ADIS16209_READ_REG(ADIS16209_SUPPLY_OUT + 2 * i);
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index 330d5d6dbba4..1fd088a11076 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -517,7 +517,7 @@ int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
ret = iio_alloc_pollfunc(indio_dev, NULL, &lis3l02dq_poll_func_th);
if (ret)
- goto error_iio_sw_rb_free;;
+ goto error_iio_sw_rb_free;
indio_dev->modes |= INDIO_RING_TRIGGERED;
return 0;
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index acb67677e563..86869cd233ae 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -27,6 +27,41 @@ config MAX1363_RING_BUFFER
Say yes here to include ring buffer support in the MAX1363
ADC driver.
+config AD7150
+ tristate "Analog Devices ad7150/1/6 capacitive sensor driver"
+ depends on I2C
+ help
+ Say yes here to build support for Analog Devices capacitive sensors.
+ (ad7150, ad7151, ad7156) Provides direct access via sysfs.
+
+config AD7152
+ tristate "Analog Devices ad7152/3 capacitive sensor driver"
+ depends on I2C
+ help
+ Say yes here to build support for Analog Devices capacitive sensors.
+ (ad7152, ad7153) Provides direct access via sysfs.
+
+config AD7291
+ tristate "Analog Devices AD7291 temperature sensor driver"
+ depends on I2C
+ help
+ Say yes here to build support for Analog Devices AD7291
+ temperature sensors.
+
+config AD7298
+ tristate "Analog Devices AD7298 temperature sensor and ADC driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD7298
+ temperature sensors and ADC.
+
+config AD7314
+ tristate "Analog Devices AD7314 temperature sensor driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD7314
+ temperature sensors.
+
config AD799X
tristate "Analog Devices AD799x ADC driver"
depends on I2C
@@ -50,9 +85,9 @@ config AD799X_RING_BUFFER
config AD7476
tristate "Analog Devices AD7475/6/7/8 AD7466/7/8 and AD7495 ADC driver"
depends on SPI
- select IIO_RING_BUFFER
+ select IIO_RING_BUFFER
select IIO_SW_RING
- select IIO_TRIGGER
+ select IIO_TRIGGER
help
Say yes here to build support for Analog Devices
AD7475, AD7476, AD7477, AD7478, AD7466, AD7467, AD7468, AD7495
@@ -61,3 +96,55 @@ config AD7476
To compile this driver as a module, choose M here: the
module will be called ad7476.
+
+config AD7887
+ tristate "Analog Devices AD7887 ADC driver"
+ depends on SPI
+ select IIO_RING_BUFFER
+ select IIO_SW_RING
+ select IIO_TRIGGER
+ help
+ Say yes here to build support for Analog Devices
+ AD7887 SPI analog to digital convertor (ADC).
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7887.
+
+config AD7745
+ tristate "Analog Devices AD7745, AD7746 AD7747 capacitive sensor driver"
+ depends on I2C
+ help
+ Say yes here to build support for Analog Devices capacitive sensors.
+ (AD7745, AD7746, AD7747) Provides direct access via sysfs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7745.
+
+config AD7816
+ tristate "Analog Devices AD7816/7/8 temperature sensor and ADC driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD7816/7/8
+ temperature sensors and ADC.
+
+config ADT75
+ tristate "Analog Devices ADT75 temperature sensor driver"
+ depends on I2C
+ help
+ Say yes here to build support for Analog Devices ADT75
+ temperature sensors.
+
+config ADT7310
+ tristate "Analog Devices ADT7310 temperature sensor driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices ADT7310
+ temperature sensors.
+
+config ADT7410
+ tristate "Analog Devices ADT7410 temperature sensor driver"
+ depends on I2C
+ help
+ Say yes here to build support for Analog Devices ADT7410
+ temperature sensors.
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index b62c319bcedd..6f231a2cb777 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -14,3 +14,18 @@ obj-$(CONFIG_AD799X) += ad799x.o
ad7476-y := ad7476_core.o
ad7476-$(CONFIG_IIO_RING_BUFFER) += ad7476_ring.o
obj-$(CONFIG_AD7476) += ad7476.o
+
+ad7887-y := ad7887_core.o
+ad7887-$(CONFIG_IIO_RING_BUFFER) += ad7887_ring.o
+obj-$(CONFIG_AD7887) += ad7887.o
+
+obj-$(CONFIG_AD7150) += ad7150.o
+obj-$(CONFIG_AD7152) += ad7152.o
+obj-$(CONFIG_AD7291) += ad7291.o
+obj-$(CONFIG_AD7298) += ad7298.o
+obj-$(CONFIG_AD7314) += ad7314.o
+obj-$(CONFIG_AD7745) += ad7745.o
+obj-$(CONFIG_AD7816) += ad7816.o
+obj-$(CONFIG_ADT75) += adt75.o
+obj-$(CONFIG_ADT7310) += adt7310.o
+obj-$(CONFIG_ADT7410) += adt7410.o
diff --git a/drivers/staging/iio/adc/ad7150.c b/drivers/staging/iio/adc/ad7150.c
new file mode 100644
index 000000000000..8555766109d8
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7150.c
@@ -0,0 +1,877 @@
+/*
+ * AD7150 capacitive sensor driver supporting AD7150/1/6
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * AD7150 registers definition
+ */
+
+#define AD7150_STATUS 0
+#define AD7150_STATUS_OUT1 (1 << 3)
+#define AD7150_STATUS_OUT2 (1 << 5)
+#define AD7150_CH1_DATA_HIGH 1
+#define AD7150_CH1_DATA_LOW 2
+#define AD7150_CH2_DATA_HIGH 3
+#define AD7150_CH2_DATA_LOW 4
+#define AD7150_CH1_AVG_HIGH 5
+#define AD7150_CH1_AVG_LOW 6
+#define AD7150_CH2_AVG_HIGH 7
+#define AD7150_CH2_AVG_LOW 8
+#define AD7150_CH1_SENSITIVITY 9
+#define AD7150_CH1_THR_HOLD_H 9
+#define AD7150_CH1_TIMEOUT 10
+#define AD7150_CH1_THR_HOLD_L 10
+#define AD7150_CH1_SETUP 11
+#define AD7150_CH2_SENSITIVITY 12
+#define AD7150_CH2_THR_HOLD_H 12
+#define AD7150_CH2_TIMEOUT 13
+#define AD7150_CH2_THR_HOLD_L 13
+#define AD7150_CH2_SETUP 14
+#define AD7150_CFG 15
+#define AD7150_CFG_FIX (1 << 7)
+#define AD7150_PD_TIMER 16
+#define AD7150_CH1_CAPDAC 17
+#define AD7150_CH2_CAPDAC 18
+#define AD7150_SN3 19
+#define AD7150_SN2 20
+#define AD7150_SN1 21
+#define AD7150_SN0 22
+#define AD7150_ID 23
+
+#define AD7150_MAX_CONV_MODE 4
+
+/*
+ * struct ad7150_chip_info - chip specifc information
+ */
+
+struct ad7150_chip_info {
+ const char *name;
+ struct i2c_client *client;
+ struct iio_dev *indio_dev;
+ struct work_struct thresh_work;
+ bool inter;
+ s64 last_timestamp;
+ u16 ch1_threshold; /* Ch1 Threshold (in fixed threshold mode) */
+ u8 ch1_sensitivity; /* Ch1 Sensitivity (in adaptive threshold mode) */
+ u8 ch1_timeout; /* Ch1 Timeout (in adaptive threshold mode) */
+ u8 ch1_setup;
+ u16 ch2_threshold; /* Ch2 Threshold (in fixed threshold mode) */
+ u8 ch2_sensitivity; /* Ch1 Sensitivity (in adaptive threshold mode) */
+ u8 ch2_timeout; /* Ch1 Timeout (in adaptive threshold mode) */
+ u8 ch2_setup;
+ u8 powerdown_timer;
+ char threshold_mode[10]; /* adaptive/fixed threshold mode */
+ int old_state;
+ char *conversion_mode;
+};
+
+struct ad7150_conversion_mode {
+ char *name;
+ u8 reg_cfg;
+};
+
+struct ad7150_conversion_mode ad7150_conv_mode_table[AD7150_MAX_CONV_MODE] = {
+ { "idle", 0 },
+ { "continuous-conversion", 1 },
+ { "single-conversion", 2 },
+ { "power-down", 3 },
+};
+
+/*
+ * ad7150 register access by I2C
+ */
+
+static int ad7150_i2c_read(struct ad7150_chip_info *chip, u8 reg, u8 *data, int len)
+{
+ struct i2c_client *client = chip->client;
+ int ret = 0;
+
+ ret = i2c_master_send(client, &reg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C write error\n");
+ return ret;
+ }
+
+ ret = i2c_master_recv(client, data, len);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C read error\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ad7150_i2c_write(struct ad7150_chip_info *chip, u8 reg, u8 data)
+{
+ struct i2c_client *client = chip->client;
+ int ret = 0;
+
+ u8 tx[2] = {
+ reg,
+ data,
+ };
+
+ ret = i2c_master_send(client, tx, 2);
+ if (ret < 0)
+ dev_err(&client->dev, "I2C write error\n");
+
+ return ret;
+}
+
+/*
+ * sysfs nodes
+ */
+
+#define IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(_show) \
+ IIO_DEVICE_ATTR(available_conversion_modes, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_CONVERSION_MODE(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(conversion_mode, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_AVAIL_THRESHOLD_MODES(_show) \
+ IIO_DEVICE_ATTR(available_threshold_modes, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_THRESHOLD_MODE(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(threshold_mode, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH1_THRESHOLD(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(ch1_threshold, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH2_THRESHOLD(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(ch2_threshold, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH1_SENSITIVITY(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(ch1_sensitivity, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH2_SENSITIVITY(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(ch2_sensitivity, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH1_TIMEOUT(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(ch1_timeout, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH2_TIMEOUT(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(ch2_timeout, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH1_VALUE(_show) \
+ IIO_DEVICE_ATTR(ch1_value, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_CH2_VALUE(_show) \
+ IIO_DEVICE_ATTR(ch2_value, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_CH1_SETUP(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(ch1_setup, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH2_SETUP(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(ch2_setup, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_POWERDOWN_TIMER(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(powerdown_timer, _mode, _show, _store, 0)
+
+static ssize_t ad7150_show_conversion_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+ int len = 0;
+
+ for (i = 0; i < AD7150_MAX_CONV_MODE; i++)
+ len += sprintf(buf + len, "%s\n", ad7150_conv_mode_table[i].name);
+
+ return len;
+}
+
+static IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(ad7150_show_conversion_modes);
+
+static ssize_t ad7150_show_conversion_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%s\n", chip->conversion_mode);
+}
+
+static ssize_t ad7150_store_conversion_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+ u8 cfg;
+ int i;
+
+ ad7150_i2c_read(chip, AD7150_CFG, &cfg, 1);
+
+ for (i = 0; i < AD7150_MAX_CONV_MODE; i++) {
+ if (strncmp(buf, ad7150_conv_mode_table[i].name,
+ strlen(ad7150_conv_mode_table[i].name) - 1) == 0) {
+ chip->conversion_mode = ad7150_conv_mode_table[i].name;
+ cfg |= 0x18 | ad7150_conv_mode_table[i].reg_cfg;
+ ad7150_i2c_write(chip, AD7150_CFG, cfg);
+ return len;
+ }
+ }
+
+ dev_err(dev, "not supported conversion mode\n");
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CONVERSION_MODE(S_IRUGO | S_IWUSR,
+ ad7150_show_conversion_mode,
+ ad7150_store_conversion_mode);
+
+static ssize_t ad7150_show_threshold_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "adaptive\nfixed\n");
+}
+
+static IIO_DEV_ATTR_AVAIL_THRESHOLD_MODES(ad7150_show_threshold_modes);
+
+static ssize_t ad7150_show_ch1_value(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+ u8 data[2];
+
+ ad7150_i2c_read(chip, AD7150_CH1_DATA_HIGH, data, 2);
+ return sprintf(buf, "%d\n", ((int) data[0] << 8) | data[1]);
+}
+
+static IIO_DEV_ATTR_CH1_VALUE(ad7150_show_ch1_value);
+
+static ssize_t ad7150_show_ch2_value(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+ u8 data[2];
+
+ ad7150_i2c_read(chip, AD7150_CH2_DATA_HIGH, data, 2);
+ return sprintf(buf, "%d\n", ((int) data[0] << 8) | data[1]);
+}
+
+static IIO_DEV_ATTR_CH2_VALUE(ad7150_show_ch2_value);
+
+static ssize_t ad7150_show_threshold_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%s\n", chip->threshold_mode);
+}
+
+static ssize_t ad7150_store_threshold_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+ u8 cfg;
+
+ ad7150_i2c_read(chip, AD7150_CFG, &cfg, 1);
+
+ if (strncmp(buf, "fixed", 5) == 0) {
+ strcpy(chip->threshold_mode, "fixed");
+ cfg |= AD7150_CFG_FIX;
+ ad7150_i2c_write(chip, AD7150_CFG, cfg);
+
+ return len;
+ } else if (strncmp(buf, "adaptive", 8) == 0) {
+ strcpy(chip->threshold_mode, "adaptive");
+ cfg &= ~AD7150_CFG_FIX;
+ ad7150_i2c_write(chip, AD7150_CFG, cfg);
+
+ return len;
+ }
+
+ dev_err(dev, "not supported threshold mode\n");
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_THRESHOLD_MODE(S_IRUGO | S_IWUSR,
+ ad7150_show_threshold_mode,
+ ad7150_store_threshold_mode);
+
+static ssize_t ad7150_show_ch1_threshold(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", chip->ch1_threshold);
+}
+
+static ssize_t ad7150_store_ch1_threshold(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x10000)) {
+ ad7150_i2c_write(chip, AD7150_CH1_THR_HOLD_H, data >> 8);
+ ad7150_i2c_write(chip, AD7150_CH1_THR_HOLD_L, data);
+ chip->ch1_threshold = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH1_THRESHOLD(S_IRUGO | S_IWUSR,
+ ad7150_show_ch1_threshold,
+ ad7150_store_ch1_threshold);
+
+static ssize_t ad7150_show_ch2_threshold(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", chip->ch2_threshold);
+}
+
+static ssize_t ad7150_store_ch2_threshold(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x10000)) {
+ ad7150_i2c_write(chip, AD7150_CH2_THR_HOLD_H, data >> 8);
+ ad7150_i2c_write(chip, AD7150_CH2_THR_HOLD_L, data);
+ chip->ch2_threshold = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH2_THRESHOLD(S_IRUGO | S_IWUSR,
+ ad7150_show_ch2_threshold,
+ ad7150_store_ch2_threshold);
+
+static ssize_t ad7150_show_ch1_sensitivity(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", chip->ch1_sensitivity);
+}
+
+static ssize_t ad7150_store_ch1_sensitivity(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x100)) {
+ ad7150_i2c_write(chip, AD7150_CH1_SENSITIVITY, data);
+ chip->ch1_sensitivity = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH1_SENSITIVITY(S_IRUGO | S_IWUSR,
+ ad7150_show_ch1_sensitivity,
+ ad7150_store_ch1_sensitivity);
+
+static ssize_t ad7150_show_ch2_sensitivity(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", chip->ch2_sensitivity);
+}
+
+static ssize_t ad7150_store_ch2_sensitivity(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x100)) {
+ ad7150_i2c_write(chip, AD7150_CH2_SENSITIVITY, data);
+ chip->ch2_sensitivity = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH2_SENSITIVITY(S_IRUGO | S_IWUSR,
+ ad7150_show_ch2_sensitivity,
+ ad7150_store_ch2_sensitivity);
+
+static ssize_t ad7150_show_ch1_timeout(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", chip->ch1_timeout);
+}
+
+static ssize_t ad7150_store_ch1_timeout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x100)) {
+ ad7150_i2c_write(chip, AD7150_CH1_TIMEOUT, data);
+ chip->ch1_timeout = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH1_TIMEOUT(S_IRUGO | S_IWUSR,
+ ad7150_show_ch1_timeout,
+ ad7150_store_ch1_timeout);
+
+static ssize_t ad7150_show_ch2_timeout(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", chip->ch2_timeout);
+}
+
+static ssize_t ad7150_store_ch2_timeout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x100)) {
+ ad7150_i2c_write(chip, AD7150_CH2_TIMEOUT, data);
+ chip->ch2_timeout = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH2_TIMEOUT(S_IRUGO | S_IWUSR,
+ ad7150_show_ch2_timeout,
+ ad7150_store_ch2_timeout);
+
+static ssize_t ad7150_show_ch1_setup(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "0x%02x\n", chip->ch1_setup);
+}
+
+static ssize_t ad7150_store_ch1_setup(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x100)) {
+ ad7150_i2c_write(chip, AD7150_CH1_SETUP, data);
+ chip->ch1_setup = data;
+ return len;
+ }
+
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH1_SETUP(S_IRUGO | S_IWUSR,
+ ad7150_show_ch1_setup,
+ ad7150_store_ch1_setup);
+
+static ssize_t ad7150_show_ch2_setup(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "0x%02x\n", chip->ch2_setup);
+}
+
+static ssize_t ad7150_store_ch2_setup(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x100)) {
+ ad7150_i2c_write(chip, AD7150_CH2_SETUP, data);
+ chip->ch2_setup = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH2_SETUP(S_IRUGO | S_IWUSR,
+ ad7150_show_ch2_setup,
+ ad7150_store_ch2_setup);
+
+static ssize_t ad7150_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+ return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad7150_show_name, NULL, 0);
+
+static ssize_t ad7150_show_powerdown_timer(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "0x%02x\n", chip->powerdown_timer);
+}
+
+static ssize_t ad7150_store_powerdown_timer(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x40)) {
+ chip->powerdown_timer = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_POWERDOWN_TIMER(S_IRUGO | S_IWUSR,
+ ad7150_show_powerdown_timer,
+ ad7150_store_powerdown_timer);
+
+static struct attribute *ad7150_attributes[] = {
+ &iio_dev_attr_available_threshold_modes.dev_attr.attr,
+ &iio_dev_attr_threshold_mode.dev_attr.attr,
+ &iio_dev_attr_ch1_threshold.dev_attr.attr,
+ &iio_dev_attr_ch2_threshold.dev_attr.attr,
+ &iio_dev_attr_ch1_timeout.dev_attr.attr,
+ &iio_dev_attr_ch2_timeout.dev_attr.attr,
+ &iio_dev_attr_ch1_setup.dev_attr.attr,
+ &iio_dev_attr_ch2_setup.dev_attr.attr,
+ &iio_dev_attr_ch1_sensitivity.dev_attr.attr,
+ &iio_dev_attr_ch2_sensitivity.dev_attr.attr,
+ &iio_dev_attr_powerdown_timer.dev_attr.attr,
+ &iio_dev_attr_ch1_value.dev_attr.attr,
+ &iio_dev_attr_ch2_value.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad7150_attribute_group = {
+ .attrs = ad7150_attributes,
+};
+
+/*
+ * threshold events
+ */
+
+#define IIO_EVENT_CODE_CH1_HIGH IIO_BUFFER_EVENT_CODE(0)
+#define IIO_EVENT_CODE_CH1_LOW IIO_BUFFER_EVENT_CODE(1)
+#define IIO_EVENT_CODE_CH2_HIGH IIO_BUFFER_EVENT_CODE(2)
+#define IIO_EVENT_CODE_CH2_LOW IIO_BUFFER_EVENT_CODE(3)
+
+#define IIO_EVENT_ATTR_CH1_HIGH_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(ch1_high, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_ATTR_CH2_HIGH_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(ch2_high, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_ATTR_CH1_LOW_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(ch1_low, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_ATTR_CH2_LOW_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(ch2_low, _evlist, _show, _store, _mask)
+
+static void ad7150_interrupt_handler_bh(struct work_struct *work_s)
+{
+ struct ad7150_chip_info *chip =
+ container_of(work_s, struct ad7150_chip_info, thresh_work);
+ u8 int_status;
+
+ enable_irq(chip->client->irq);
+
+ ad7150_i2c_read(chip, AD7150_STATUS, &int_status, 1);
+
+ if ((int_status & AD7150_STATUS_OUT1) && !(chip->old_state & AD7150_STATUS_OUT1))
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_CH1_HIGH,
+ chip->last_timestamp);
+ else if ((!(int_status & AD7150_STATUS_OUT1)) && (chip->old_state & AD7150_STATUS_OUT1))
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_CH1_LOW,
+ chip->last_timestamp);
+
+ if ((int_status & AD7150_STATUS_OUT2) && !(chip->old_state & AD7150_STATUS_OUT2))
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_CH2_HIGH,
+ chip->last_timestamp);
+ else if ((!(int_status & AD7150_STATUS_OUT2)) && (chip->old_state & AD7150_STATUS_OUT2))
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_CH2_LOW,
+ chip->last_timestamp);
+}
+
+static int ad7150_interrupt_handler_th(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct ad7150_chip_info *chip = dev_info->dev_data;
+
+ chip->last_timestamp = timestamp;
+ schedule_work(&chip->thresh_work);
+
+ return 0;
+}
+
+IIO_EVENT_SH(threshold, &ad7150_interrupt_handler_th);
+
+static ssize_t ad7150_query_out_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ /*
+ * AD7150 provides two logic output channels, which can be used as interrupt
+ * but the pins are not configurable
+ */
+ return sprintf(buf, "1\n");
+}
+
+static ssize_t ad7150_set_out_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return len;
+}
+
+IIO_EVENT_ATTR_CH1_HIGH_SH(iio_event_threshold, ad7150_query_out_mode, ad7150_set_out_mode, 0);
+IIO_EVENT_ATTR_CH2_HIGH_SH(iio_event_threshold, ad7150_query_out_mode, ad7150_set_out_mode, 0);
+IIO_EVENT_ATTR_CH1_LOW_SH(iio_event_threshold, ad7150_query_out_mode, ad7150_set_out_mode, 0);
+IIO_EVENT_ATTR_CH2_LOW_SH(iio_event_threshold, ad7150_query_out_mode, ad7150_set_out_mode, 0);
+
+static struct attribute *ad7150_event_attributes[] = {
+ &iio_event_attr_ch1_high.dev_attr.attr,
+ &iio_event_attr_ch2_high.dev_attr.attr,
+ &iio_event_attr_ch1_low.dev_attr.attr,
+ &iio_event_attr_ch2_low.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ad7150_event_attribute_group = {
+ .attrs = ad7150_event_attributes,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit ad7150_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0, regdone = 0;
+ struct ad7150_chip_info *chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (chip == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ /* this is only used for device removal purposes */
+ i2c_set_clientdata(client, chip);
+
+ chip->client = client;
+ chip->name = id->name;
+
+ chip->indio_dev = iio_allocate_device();
+ if (chip->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_chip;
+ }
+
+ /* Echipabilish that the iio_dev is a child of the i2c device */
+ chip->indio_dev->dev.parent = &client->dev;
+ chip->indio_dev->attrs = &ad7150_attribute_group;
+ chip->indio_dev->event_attrs = &ad7150_event_attribute_group;
+ chip->indio_dev->dev_data = (void *)(chip);
+ chip->indio_dev->driver_module = THIS_MODULE;
+ chip->indio_dev->num_interrupt_lines = 1;
+ chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(chip->indio_dev);
+ if (ret)
+ goto error_free_dev;
+ regdone = 1;
+
+ if (client->irq && gpio_is_valid(irq_to_gpio(client->irq)) > 0) {
+ ret = iio_register_interrupt_line(client->irq,
+ chip->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "ad7150");
+ if (ret)
+ goto error_free_dev;
+
+ iio_add_event_to_list(iio_event_attr_ch2_low.listel,
+ &chip->indio_dev->interrupts[0]->ev_list);
+
+ INIT_WORK(&chip->thresh_work, ad7150_interrupt_handler_bh);
+ }
+
+ dev_err(&client->dev, "%s capacitive sensor registered, irq: %d\n", id->name, client->irq);
+
+ return 0;
+
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(chip->indio_dev);
+ else
+ iio_free_device(chip->indio_dev);
+error_free_chip:
+ kfree(chip);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad7150_remove(struct i2c_client *client)
+{
+ struct ad7150_chip_info *chip = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = chip->indio_dev;
+
+ if (client->irq && gpio_is_valid(irq_to_gpio(client->irq)) > 0)
+ iio_unregister_interrupt_line(indio_dev, 0);
+ iio_device_unregister(indio_dev);
+ kfree(chip);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad7150_id[] = {
+ { "ad7150", 0 },
+ { "ad7151", 0 },
+ { "ad7156", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad7150_id);
+
+static struct i2c_driver ad7150_driver = {
+ .driver = {
+ .name = "ad7150",
+ },
+ .probe = ad7150_probe,
+ .remove = __devexit_p(ad7150_remove),
+ .id_table = ad7150_id,
+};
+
+static __init int ad7150_init(void)
+{
+ return i2c_add_driver(&ad7150_driver);
+}
+
+static __exit void ad7150_exit(void)
+{
+ i2c_del_driver(&ad7150_driver);
+}
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ad7150/1/6 capacitive sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad7150_init);
+module_exit(ad7150_exit);
diff --git a/drivers/staging/iio/adc/ad7152.c b/drivers/staging/iio/adc/ad7152.c
new file mode 100644
index 000000000000..fa7f84062307
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7152.c
@@ -0,0 +1,610 @@
+/*
+ * AD7152 capacitive sensor driver supporting AD7152/3
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * AD7152 registers definition
+ */
+
+#define AD7152_STATUS 0
+#define AD7152_STATUS_RDY1 (1 << 0)
+#define AD7152_STATUS_RDY2 (1 << 1)
+#define AD7152_CH1_DATA_HIGH 1
+#define AD7152_CH1_DATA_LOW 2
+#define AD7152_CH2_DATA_HIGH 3
+#define AD7152_CH2_DATA_LOW 4
+#define AD7152_CH1_OFFS_HIGH 5
+#define AD7152_CH1_OFFS_LOW 6
+#define AD7152_CH2_OFFS_HIGH 7
+#define AD7152_CH2_OFFS_LOW 8
+#define AD7152_CH1_GAIN_HIGH 9
+#define AD7152_CH1_GAIN_LOW 10
+#define AD7152_CH1_SETUP 11
+#define AD7152_CH2_GAIN_HIGH 12
+#define AD7152_CH2_GAIN_LOW 13
+#define AD7152_CH2_SETUP 14
+#define AD7152_CFG 15
+#define AD7152_RESEVERD 16
+#define AD7152_CAPDAC_POS 17
+#define AD7152_CAPDAC_NEG 18
+#define AD7152_CFG2 26
+
+#define AD7152_MAX_CONV_MODE 6
+
+/*
+ * struct ad7152_chip_info - chip specifc information
+ */
+
+struct ad7152_chip_info {
+ const char *name;
+ struct i2c_client *client;
+ struct iio_dev *indio_dev;
+ u16 ch1_offset; /* Channel 1 offset calibration coefficient */
+ u16 ch1_gain; /* Channel 1 gain coefficient */
+ u8 ch1_setup;
+ u16 ch2_offset; /* Channel 2 offset calibration coefficient */
+ u16 ch2_gain; /* Channel 1 gain coefficient */
+ u8 ch2_setup;
+ u8 filter_rate_setup; /* Capacitive channel digital filter setup; conversion time/update rate setup per channel */
+ char *conversion_mode;
+};
+
+struct ad7152_conversion_mode {
+ char *name;
+ u8 reg_cfg;
+};
+
+struct ad7152_conversion_mode ad7152_conv_mode_table[AD7152_MAX_CONV_MODE] = {
+ { "idle", 0 },
+ { "continuous-conversion", 1 },
+ { "single-conversion", 2 },
+ { "power-down", 3 },
+ { "offset-calibration", 5 },
+ { "gain-calibration", 6 },
+};
+
+/*
+ * ad7152 register access by I2C
+ */
+
+static int ad7152_i2c_read(struct ad7152_chip_info *chip, u8 reg, u8 *data, int len)
+{
+ struct i2c_client *client = chip->client;
+ int ret;
+
+ ret = i2c_master_send(client, &reg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C write error\n");
+ return ret;
+ }
+
+ ret = i2c_master_recv(client, data, len);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C read error\n");
+ }
+
+ return ret;
+}
+
+static int ad7152_i2c_write(struct ad7152_chip_info *chip, u8 reg, u8 data)
+{
+ struct i2c_client *client = chip->client;
+ int ret;
+
+ u8 tx[2] = {
+ reg,
+ data,
+ };
+
+ ret = i2c_master_send(client, tx, 2);
+ if (ret < 0)
+ dev_err(&client->dev, "I2C write error\n");
+
+ return ret;
+}
+
+/*
+ * sysfs nodes
+ */
+
+#define IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(_show) \
+ IIO_DEVICE_ATTR(available_conversion_modes, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_CONVERSION_MODE(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(conversion_mode, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH1_OFFSET(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(ch1_offset, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH2_OFFSET(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(ch2_offset, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH1_GAIN(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(ch1_gain, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH2_GAIN(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(ch2_gain, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH1_VALUE(_show) \
+ IIO_DEVICE_ATTR(ch1_value, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_CH2_VALUE(_show) \
+ IIO_DEVICE_ATTR(ch2_value, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_CH1_SETUP(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(ch1_setup, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH2_SETUP(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(ch2_setup, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_FILTER_RATE_SETUP(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(filter_rate_setup, _mode, _show, _store, 0)
+
+static ssize_t ad7152_show_conversion_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+ int len = 0;
+
+ for (i = 0; i < AD7152_MAX_CONV_MODE; i++)
+ len += sprintf(buf + len, "%s ", ad7152_conv_mode_table[i].name);
+
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(ad7152_show_conversion_modes);
+
+static ssize_t ad7152_show_ch1_value(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+ u8 data[2];
+
+ ad7152_i2c_read(chip, AD7152_CH1_DATA_HIGH, data, 2);
+ return sprintf(buf, "%d\n", ((int)data[0] << 8) | data[1]);
+}
+
+static IIO_DEV_ATTR_CH1_VALUE(ad7152_show_ch1_value);
+
+static ssize_t ad7152_show_ch2_value(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+ u8 data[2];
+
+ ad7152_i2c_read(chip, AD7152_CH2_DATA_HIGH, data, 2);
+ return sprintf(buf, "%d\n", ((int)data[0] << 8) | data[1]);
+}
+
+static IIO_DEV_ATTR_CH2_VALUE(ad7152_show_ch2_value);
+
+static ssize_t ad7152_show_conversion_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%s\n", chip->conversion_mode);
+}
+
+static ssize_t ad7152_store_conversion_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+ u8 cfg;
+ int i;
+
+ ad7152_i2c_read(chip, AD7152_CFG, &cfg, 1);
+
+ for (i = 0; i < AD7152_MAX_CONV_MODE; i++)
+ if (strncmp(buf, ad7152_conv_mode_table[i].name,
+ strlen(ad7152_conv_mode_table[i].name) - 1) == 0) {
+ chip->conversion_mode = ad7152_conv_mode_table[i].name;
+ cfg |= 0x18 | ad7152_conv_mode_table[i].reg_cfg;
+ ad7152_i2c_write(chip, AD7152_CFG, cfg);
+ return len;
+ }
+
+ dev_err(dev, "not supported conversion mode\n");
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CONVERSION_MODE(S_IRUGO | S_IWUSR,
+ ad7152_show_conversion_mode,
+ ad7152_store_conversion_mode);
+
+static ssize_t ad7152_show_ch1_offset(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", chip->ch1_offset);
+}
+
+static ssize_t ad7152_store_ch1_offset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x10000)) {
+ ad7152_i2c_write(chip, AD7152_CH1_OFFS_HIGH, data >> 8);
+ ad7152_i2c_write(chip, AD7152_CH1_OFFS_LOW, data);
+ chip->ch1_offset = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH1_OFFSET(S_IRUGO | S_IWUSR,
+ ad7152_show_ch1_offset,
+ ad7152_store_ch1_offset);
+
+static ssize_t ad7152_show_ch2_offset(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", chip->ch2_offset);
+}
+
+static ssize_t ad7152_store_ch2_offset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x10000)) {
+ ad7152_i2c_write(chip, AD7152_CH2_OFFS_HIGH, data >> 8);
+ ad7152_i2c_write(chip, AD7152_CH2_OFFS_LOW, data);
+ chip->ch2_offset = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH2_OFFSET(S_IRUGO | S_IWUSR,
+ ad7152_show_ch2_offset,
+ ad7152_store_ch2_offset);
+
+static ssize_t ad7152_show_ch1_gain(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", chip->ch1_gain);
+}
+
+static ssize_t ad7152_store_ch1_gain(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x10000)) {
+ ad7152_i2c_write(chip, AD7152_CH1_GAIN_HIGH, data >> 8);
+ ad7152_i2c_write(chip, AD7152_CH1_GAIN_LOW, data);
+ chip->ch1_gain = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH1_GAIN(S_IRUGO | S_IWUSR,
+ ad7152_show_ch1_gain,
+ ad7152_store_ch1_gain);
+
+static ssize_t ad7152_show_ch2_gain(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", chip->ch2_gain);
+}
+
+static ssize_t ad7152_store_ch2_gain(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x10000)) {
+ ad7152_i2c_write(chip, AD7152_CH2_GAIN_HIGH, data >> 8);
+ ad7152_i2c_write(chip, AD7152_CH2_GAIN_LOW, data);
+ chip->ch2_gain = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH2_GAIN(S_IRUGO | S_IWUSR,
+ ad7152_show_ch2_gain,
+ ad7152_store_ch2_gain);
+
+static ssize_t ad7152_show_ch1_setup(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "0x%02x\n", chip->ch1_setup);
+}
+
+static ssize_t ad7152_store_ch1_setup(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x100)) {
+ ad7152_i2c_write(chip, AD7152_CH1_SETUP, data);
+ chip->ch1_setup = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH1_SETUP(S_IRUGO | S_IWUSR,
+ ad7152_show_ch1_setup,
+ ad7152_store_ch1_setup);
+
+static ssize_t ad7152_show_ch2_setup(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "0x%02x\n", chip->ch2_setup);
+}
+
+static ssize_t ad7152_store_ch2_setup(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x100)) {
+ ad7152_i2c_write(chip, AD7152_CH2_SETUP, data);
+ chip->ch2_setup = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH2_SETUP(S_IRUGO | S_IWUSR,
+ ad7152_show_ch2_setup,
+ ad7152_store_ch2_setup);
+
+static ssize_t ad7152_show_filter_rate_setup(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "0x%02x\n", chip->filter_rate_setup);
+}
+
+static ssize_t ad7152_store_filter_rate_setup(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x100)) {
+ ad7152_i2c_write(chip, AD7152_CFG2, data);
+ chip->filter_rate_setup = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_FILTER_RATE_SETUP(S_IRUGO | S_IWUSR,
+ ad7152_show_filter_rate_setup,
+ ad7152_store_filter_rate_setup);
+
+static ssize_t ad7152_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7152_chip_info *chip = dev_info->dev_data;
+ return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad7152_show_name, NULL, 0);
+
+static struct attribute *ad7152_attributes[] = {
+ &iio_dev_attr_available_conversion_modes.dev_attr.attr,
+ &iio_dev_attr_conversion_mode.dev_attr.attr,
+ &iio_dev_attr_ch1_gain.dev_attr.attr,
+ &iio_dev_attr_ch2_gain.dev_attr.attr,
+ &iio_dev_attr_ch1_offset.dev_attr.attr,
+ &iio_dev_attr_ch2_offset.dev_attr.attr,
+ &iio_dev_attr_ch1_value.dev_attr.attr,
+ &iio_dev_attr_ch2_value.dev_attr.attr,
+ &iio_dev_attr_ch1_setup.dev_attr.attr,
+ &iio_dev_attr_ch2_setup.dev_attr.attr,
+ &iio_dev_attr_filter_rate_setup.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad7152_attribute_group = {
+ .attrs = ad7152_attributes,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit ad7152_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ struct ad7152_chip_info *chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (chip == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ /* this is only used for device removal purposes */
+ i2c_set_clientdata(client, chip);
+
+ chip->client = client;
+ chip->name = id->name;
+
+ chip->indio_dev = iio_allocate_device();
+ if (chip->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_chip;
+ }
+
+ /* Echipabilish that the iio_dev is a child of the i2c device */
+ chip->indio_dev->dev.parent = &client->dev;
+ chip->indio_dev->attrs = &ad7152_attribute_group;
+ chip->indio_dev->dev_data = (void *)(chip);
+ chip->indio_dev->driver_module = THIS_MODULE;
+ chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(chip->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ dev_err(&client->dev, "%s capacitive sensor registered\n", id->name);
+
+ return 0;
+
+error_free_dev:
+ iio_free_device(chip->indio_dev);
+error_free_chip:
+ kfree(chip);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad7152_remove(struct i2c_client *client)
+{
+ struct ad7152_chip_info *chip = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = chip->indio_dev;
+
+ if (client->irq && gpio_is_valid(irq_to_gpio(client->irq)) > 0)
+ iio_unregister_interrupt_line(indio_dev, 0);
+ iio_device_unregister(indio_dev);
+ kfree(chip);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad7152_id[] = {
+ { "ad7152", 0 },
+ { "ad7153", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad7152_id);
+
+static struct i2c_driver ad7152_driver = {
+ .driver = {
+ .name = "ad7152",
+ },
+ .probe = ad7152_probe,
+ .remove = __devexit_p(ad7152_remove),
+ .id_table = ad7152_id,
+};
+
+static __init int ad7152_init(void)
+{
+ return i2c_add_driver(&ad7152_driver);
+}
+
+static __exit void ad7152_exit(void)
+{
+ i2c_del_driver(&ad7152_driver);
+}
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ad7152/3 capacitive sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad7152_init);
+module_exit(ad7152_exit);
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
new file mode 100644
index 000000000000..34041a72aa52
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -0,0 +1,1039 @@
+/*
+ * AD7291 digital temperature sensor driver supporting AD7291
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * AD7291 registers definition
+ */
+#define AD7291_COMMAND 0
+#define AD7291_VOLTAGE 1
+#define AD7291_T_SENSE 2
+#define AD7291_T_AVERAGE 3
+#define AD7291_VOLTAGE_LIMIT_BASE 4
+#define AD7291_VOLTAGE_LIMIT_COUNT 8
+#define AD7291_T_SENSE_HIGH 0x1c
+#define AD7291_T_SENSE_LOW 0x1d
+#define AD7291_T_SENSE_HYST 0x1e
+#define AD7291_VOLTAGE_ALERT_STATUS 0x1f
+#define AD7291_T_ALERT_STATUS 0x20
+
+/*
+ * AD7291 command
+ */
+#define AD7291_AUTOCYCLE 0x1
+#define AD7291_RESET 0x2
+#define AD7291_ALART_CLEAR 0x4
+#define AD7291_ALART_POLARITY 0x8
+#define AD7291_EXT_REF 0x10
+#define AD7291_NOISE_DELAY 0x20
+#define AD7291_T_SENSE_MASK 0x40
+#define AD7291_VOLTAGE_MASK 0xff00
+#define AD7291_VOLTAGE_OFFSET 0x8
+
+/*
+ * AD7291 value masks
+ */
+#define AD7291_CHANNEL_MASK 0xf000
+#define AD7291_VALUE_MASK 0xfff
+#define AD7291_T_VALUE_SIGN 0x400
+#define AD7291_T_VALUE_FLOAT_OFFSET 2
+#define AD7291_T_VALUE_FLOAT_MASK 0x2
+
+/*
+ * struct ad7291_chip_info - chip specifc information
+ */
+
+struct ad7291_chip_info {
+ const char *name;
+ struct i2c_client *client;
+ struct iio_dev *indio_dev;
+ struct work_struct thresh_work;
+ s64 last_timestamp;
+ u16 command;
+ u8 channels; /* Active voltage channels */
+};
+
+/*
+ * struct ad7291_chip_info - chip specifc information
+ */
+
+struct ad7291_limit_regs {
+ u16 data_high;
+ u16 data_low;
+ u16 hysteresis;
+};
+
+/*
+ * ad7291 register access by I2C
+ */
+static int ad7291_i2c_read(struct ad7291_chip_info *chip, u8 reg, u16 *data)
+{
+ struct i2c_client *client = chip->client;
+ int ret = 0;
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C read error\n");
+ return ret;
+ }
+
+ *data = swab16((u16)ret);
+
+ return 0;
+}
+
+static int ad7291_i2c_write(struct ad7291_chip_info *chip, u8 reg, u16 data)
+{
+ struct i2c_client *client = chip->client;
+ int ret = 0;
+
+ ret = i2c_smbus_write_word_data(client, reg, swab16(data));
+ if (ret < 0)
+ dev_err(&client->dev, "I2C write error\n");
+
+ return ret;
+}
+
+/* Returns negative errno, or else the number of words read. */
+static int ad7291_i2c_read_data(struct ad7291_chip_info *chip, u8 reg, u16 *data)
+{
+ struct i2c_client *client = chip->client;
+ u8 commands[4];
+ int ret = 0;
+ int i, count;
+
+ if (reg == AD7291_T_SENSE || reg == AD7291_T_AVERAGE)
+ count = 2;
+ else if (reg == AD7291_VOLTAGE) {
+ if (!chip->channels) {
+ dev_err(&client->dev, "No voltage channel is selected.\n");
+ return -EINVAL;
+ }
+ count = 2 + chip->channels * 2;
+ } else {
+ dev_err(&client->dev, "I2C wrong data register\n");
+ return -EINVAL;
+ }
+
+ commands[0] = 0;
+ commands[1] = (chip->command >> 8) & 0xff;
+ commands[2] = chip->command & 0xff;
+ commands[3] = reg;
+
+ ret = i2c_master_send(client, commands, 4);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C master send error\n");
+ return ret;
+ }
+
+ ret = i2c_master_recv(client, (u8 *)data, count);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C master receive error\n");
+ return ret;
+ }
+ ret >>= 2;
+
+ for (i = 0; i < ret; i++)
+ data[i] = swab16(data[i]);
+
+ return ret;
+}
+
+static ssize_t ad7291_show_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+
+ if (chip->command & AD7291_AUTOCYCLE)
+ return sprintf(buf, "autocycle\n");
+ else
+ return sprintf(buf, "command\n");
+}
+
+static ssize_t ad7291_store_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+ u16 command;
+ int ret;
+
+ command = chip->command & (~AD7291_AUTOCYCLE);
+ if (strcmp(buf, "autocycle"))
+ command |= AD7291_AUTOCYCLE;
+
+ ret = ad7291_i2c_write(chip, AD7291_COMMAND, command);
+ if (ret)
+ return -EIO;
+
+ chip->command = command;
+
+ return ret;
+}
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+ ad7291_show_mode,
+ ad7291_store_mode,
+ 0);
+
+static ssize_t ad7291_show_available_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "command\nautocycle\n");
+}
+
+static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7291_show_available_modes, NULL, 0);
+
+static ssize_t ad7291_store_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+ u16 command;
+ int ret;
+
+ command = chip->command | AD7291_RESET;
+
+ ret = ad7291_i2c_write(chip, AD7291_COMMAND, command);
+ if (ret)
+ return -EIO;
+
+ return ret;
+}
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR,
+ NULL,
+ ad7291_store_reset,
+ 0);
+
+static ssize_t ad7291_show_ext_ref(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", !!(chip->command & AD7291_EXT_REF));
+}
+
+static ssize_t ad7291_store_ext_ref(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+ u16 command;
+ int ret;
+
+ command = chip->command & (~AD7291_EXT_REF);
+ if (strcmp(buf, "1"))
+ command |= AD7291_EXT_REF;
+
+ ret = ad7291_i2c_write(chip, AD7291_COMMAND, command);
+ if (ret)
+ return -EIO;
+
+ chip->command = command;
+
+ return ret;
+}
+
+static IIO_DEVICE_ATTR(ext_ref, S_IRUGO | S_IWUSR,
+ ad7291_show_ext_ref,
+ ad7291_store_ext_ref,
+ 0);
+
+static ssize_t ad7291_show_noise_delay(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", !!(chip->command & AD7291_NOISE_DELAY));
+}
+
+static ssize_t ad7291_store_noise_delay(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+ u16 command;
+ int ret;
+
+ command = chip->command & (~AD7291_NOISE_DELAY);
+ if (strcmp(buf, "1"))
+ command |= AD7291_NOISE_DELAY;
+
+ ret = ad7291_i2c_write(chip, AD7291_COMMAND, command);
+ if (ret)
+ return -EIO;
+
+ chip->command = command;
+
+ return ret;
+}
+
+static IIO_DEVICE_ATTR(noise_delay, S_IRUGO | S_IWUSR,
+ ad7291_show_noise_delay,
+ ad7291_store_noise_delay,
+ 0);
+
+static ssize_t ad7291_show_t_sense(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+ u16 data;
+ char sign = ' ';
+ int ret;
+
+ ret = ad7291_i2c_read_data(chip, AD7291_T_SENSE, &data);
+ if (ret)
+ return -EIO;
+
+ if (data & AD7291_T_VALUE_SIGN) {
+ /* convert supplement to positive value */
+ data = (AD7291_T_VALUE_SIGN << 1) - data;
+ sign = '-';
+ }
+
+ return sprintf(buf, "%c%d.%.2d\n", sign,
+ (data >> AD7291_T_VALUE_FLOAT_OFFSET),
+ (data & AD7291_T_VALUE_FLOAT_MASK) * 25);
+}
+
+static IIO_DEVICE_ATTR(t_sense, S_IRUGO, ad7291_show_t_sense, NULL, 0);
+
+static ssize_t ad7291_show_t_average(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+ u16 data;
+ char sign = ' ';
+ int ret;
+
+ ret = ad7291_i2c_read_data(chip, AD7291_T_AVERAGE, &data);
+ if (ret)
+ return -EIO;
+
+ if (data & AD7291_T_VALUE_SIGN) {
+ /* convert supplement to positive value */
+ data = (AD7291_T_VALUE_SIGN << 1) - data;
+ sign = '-';
+ }
+
+ return sprintf(buf, "%c%d.%.2d\n", sign,
+ (data >> AD7291_T_VALUE_FLOAT_OFFSET),
+ (data & AD7291_T_VALUE_FLOAT_MASK) * 25);
+}
+
+static IIO_DEVICE_ATTR(t_average, S_IRUGO, ad7291_show_t_average, NULL, 0);
+
+static ssize_t ad7291_show_voltage(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+ u16 data[AD7291_VOLTAGE_LIMIT_COUNT];
+ int i, size, ret;
+
+ ret = ad7291_i2c_read_data(chip, AD7291_VOLTAGE, data);
+ if (ret)
+ return -EIO;
+
+ for (i = 0; i < AD7291_VOLTAGE_LIMIT_COUNT; i++) {
+ if (chip->command & (AD7291_T_SENSE_MASK << i)) {
+ ret = sprintf(buf, "channel[%d]=%d\n", i,
+ data[i] & AD7291_VALUE_MASK);
+ if (ret < 0)
+ break;
+ buf += ret;
+ size += ret;
+ }
+ }
+
+ return size;
+}
+
+static IIO_DEVICE_ATTR(voltage, S_IRUGO, ad7291_show_voltage, NULL, 0);
+
+static ssize_t ad7291_show_channel_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "0x%x\n", (chip->command & AD7291_VOLTAGE_MASK) >>
+ AD7291_VOLTAGE_OFFSET);
+}
+
+static ssize_t ad7291_store_channel_mask(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+ u16 command;
+ unsigned long data;
+ int i, ret;
+
+ ret = strict_strtoul(buf, 16, &data);
+ if (ret || data > 0xff)
+ return -EINVAL;
+
+ command = chip->command & (~AD7291_VOLTAGE_MASK);
+ command |= data << AD7291_VOLTAGE_OFFSET;
+
+ ret = ad7291_i2c_write(chip, AD7291_COMMAND, command);
+ if (ret)
+ return -EIO;
+
+ chip->command = command;
+
+ for (i = 0, chip->channels = 0; i < AD7291_VOLTAGE_LIMIT_COUNT; i++) {
+ if (chip->command & (AD7291_T_SENSE_MASK << i))
+ chip->channels++;
+ }
+
+ return ret;
+}
+
+static IIO_DEVICE_ATTR(channel_mask, S_IRUGO | S_IWUSR,
+ ad7291_show_channel_mask,
+ ad7291_store_channel_mask,
+ 0);
+
+static ssize_t ad7291_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+ return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad7291_show_name, NULL, 0);
+
+static struct attribute *ad7291_attributes[] = {
+ &iio_dev_attr_available_modes.dev_attr.attr,
+ &iio_dev_attr_mode.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_dev_attr_ext_ref.dev_attr.attr,
+ &iio_dev_attr_noise_delay.dev_attr.attr,
+ &iio_dev_attr_t_sense.dev_attr.attr,
+ &iio_dev_attr_t_average.dev_attr.attr,
+ &iio_dev_attr_voltage.dev_attr.attr,
+ &iio_dev_attr_channel_mask.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad7291_attribute_group = {
+ .attrs = ad7291_attributes,
+};
+
+/*
+ * temperature bound events
+ */
+
+#define IIO_EVENT_CODE_AD7291_T_SENSE_HIGH IIO_BUFFER_EVENT_CODE(0)
+#define IIO_EVENT_CODE_AD7291_T_SENSE_LOW IIO_BUFFER_EVENT_CODE(1)
+#define IIO_EVENT_CODE_AD7291_T_AVG_HIGH IIO_BUFFER_EVENT_CODE(2)
+#define IIO_EVENT_CODE_AD7291_T_AVG_LOW IIO_BUFFER_EVENT_CODE(3)
+#define IIO_EVENT_CODE_AD7291_VOLTAGE_BASE IIO_BUFFER_EVENT_CODE(4)
+
+static void ad7291_interrupt_bh(struct work_struct *work_s)
+{
+ struct ad7291_chip_info *chip =
+ container_of(work_s, struct ad7291_chip_info, thresh_work);
+ u16 t_status, v_status;
+ u16 command;
+ int i;
+
+ if (ad7291_i2c_read(chip, AD7291_T_ALERT_STATUS, &t_status))
+ return;
+
+ if (ad7291_i2c_read(chip, AD7291_VOLTAGE_ALERT_STATUS, &v_status))
+ return;
+
+ if (!(t_status || v_status))
+ return;
+
+ command = chip->command | AD7291_ALART_CLEAR;
+ ad7291_i2c_write(chip, AD7291_COMMAND, command);
+
+ command = chip->command & ~AD7291_ALART_CLEAR;
+ ad7291_i2c_write(chip, AD7291_COMMAND, command);
+
+ enable_irq(chip->client->irq);
+
+ for (i = 0; i < 4; i++) {
+ if (t_status & (1 << i))
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_AD7291_T_SENSE_HIGH + i,
+ chip->last_timestamp);
+ }
+
+ for (i = 0; i < AD7291_VOLTAGE_LIMIT_COUNT*2; i++) {
+ if (v_status & (1 << i))
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_AD7291_VOLTAGE_BASE + i,
+ chip->last_timestamp);
+ }
+}
+
+static int ad7291_interrupt(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+
+ chip->last_timestamp = timestamp;
+ schedule_work(&chip->thresh_work);
+
+ return 0;
+}
+
+IIO_EVENT_SH(ad7291, &ad7291_interrupt);
+
+static inline ssize_t ad7291_show_t_bound(struct device *dev,
+ struct device_attribute *attr,
+ u8 bound_reg,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+ u16 data;
+ char sign = ' ';
+ int ret;
+
+ ret = ad7291_i2c_read(chip, bound_reg, &data);
+ if (ret)
+ return -EIO;
+
+ data &= AD7291_VALUE_MASK;
+ if (data & AD7291_T_VALUE_SIGN) {
+ /* convert supplement to positive value */
+ data = (AD7291_T_VALUE_SIGN << 1) - data;
+ sign = '-';
+ }
+
+ return sprintf(buf, "%c%d.%.2d\n", sign,
+ data >> AD7291_T_VALUE_FLOAT_OFFSET,
+ (data & AD7291_T_VALUE_FLOAT_MASK) * 25);
+}
+
+static inline ssize_t ad7291_set_t_bound(struct device *dev,
+ struct device_attribute *attr,
+ u8 bound_reg,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+ long tmp1, tmp2;
+ u16 data;
+ char *pos;
+ int ret;
+
+ pos = strchr(buf, '.');
+
+ ret = strict_strtol(buf, 10, &tmp1);
+
+ if (ret || tmp1 > 127 || tmp1 < -128)
+ return -EINVAL;
+
+ if (pos) {
+ len = strlen(pos);
+ if (len > AD7291_T_VALUE_FLOAT_OFFSET)
+ len = AD7291_T_VALUE_FLOAT_OFFSET;
+ pos[len] = 0;
+ ret = strict_strtol(pos, 10, &tmp2);
+
+ if (!ret)
+ tmp2 = (tmp2 / 25) * 25;
+ }
+
+ if (tmp1 < 0)
+ data = (u16)(-tmp1);
+ else
+ data = (u16)tmp1;
+ data = (data << AD7291_T_VALUE_FLOAT_OFFSET) |
+ (tmp2 & AD7291_T_VALUE_FLOAT_MASK);
+ if (tmp1 < 0)
+ /* convert positive value to supplyment */
+ data = (AD7291_T_VALUE_SIGN << 1) - data;
+
+ ret = ad7291_i2c_write(chip, bound_reg, data);
+ if (ret)
+ return -EIO;
+
+ return ret;
+}
+
+static ssize_t ad7291_show_t_sense_high(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return ad7291_show_t_bound(dev, attr,
+ AD7291_T_SENSE_HIGH, buf);
+}
+
+static inline ssize_t ad7291_set_t_sense_high(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return ad7291_set_t_bound(dev, attr,
+ AD7291_T_SENSE_HIGH, buf, len);
+}
+
+static ssize_t ad7291_show_t_sense_low(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return ad7291_show_t_bound(dev, attr,
+ AD7291_T_SENSE_LOW, buf);
+}
+
+static inline ssize_t ad7291_set_t_sense_low(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return ad7291_set_t_bound(dev, attr,
+ AD7291_T_SENSE_LOW, buf, len);
+}
+
+static ssize_t ad7291_show_t_sense_hyst(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return ad7291_show_t_bound(dev, attr,
+ AD7291_T_SENSE_HYST, buf);
+}
+
+static inline ssize_t ad7291_set_t_sense_hyst(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return ad7291_set_t_bound(dev, attr,
+ AD7291_T_SENSE_HYST, buf, len);
+}
+
+static inline ssize_t ad7291_show_v_bound(struct device *dev,
+ struct device_attribute *attr,
+ u8 bound_reg,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+ u16 data;
+ int ret;
+
+ if (bound_reg < AD7291_VOLTAGE_LIMIT_BASE ||
+ bound_reg >= AD7291_VOLTAGE_LIMIT_BASE +
+ AD7291_VOLTAGE_LIMIT_COUNT)
+ return -EINVAL;
+
+ ret = ad7291_i2c_read(chip, bound_reg, &data);
+ if (ret)
+ return -EIO;
+
+ data &= AD7291_VALUE_MASK;
+
+ return sprintf(buf, "%d\n", data);
+}
+
+static inline ssize_t ad7291_set_v_bound(struct device *dev,
+ struct device_attribute *attr,
+ u8 bound_reg,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7291_chip_info *chip = dev_info->dev_data;
+ unsigned long value;
+ u16 data;
+ int ret;
+
+ if (bound_reg < AD7291_VOLTAGE_LIMIT_BASE ||
+ bound_reg >= AD7291_VOLTAGE_LIMIT_BASE +
+ AD7291_VOLTAGE_LIMIT_COUNT)
+ return -EINVAL;
+
+ ret = strict_strtoul(buf, 10, &value);
+
+ if (ret || value >= 4096)
+ return -EINVAL;
+
+ data = (u16)value;
+ ret = ad7291_i2c_write(chip, bound_reg, data);
+ if (ret)
+ return -EIO;
+
+ return ret;
+}
+
+static int ad7291_get_voltage_limit_regs(const char *channel)
+{
+ int index;
+
+ if (strlen(channel) < 3 && channel[0] != 'v')
+ return -EINVAL;
+
+ index = channel[1] - '0';
+ if (index >= AD7291_VOLTAGE_LIMIT_COUNT)
+ return -EINVAL;
+
+ return index;
+}
+
+static ssize_t ad7291_show_voltage_high(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int regs;
+
+ regs = ad7291_get_voltage_limit_regs(attr->attr.name);
+
+ if (regs < 0)
+ return regs;
+
+ return ad7291_show_t_bound(dev, attr, regs, buf);
+}
+
+static inline ssize_t ad7291_set_voltage_high(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int regs;
+
+ regs = ad7291_get_voltage_limit_regs(attr->attr.name);
+
+ if (regs < 0)
+ return regs;
+
+ return ad7291_set_t_bound(dev, attr, regs, buf, len);
+}
+
+static ssize_t ad7291_show_voltage_low(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int regs;
+
+ regs = ad7291_get_voltage_limit_regs(attr->attr.name);
+
+ if (regs < 0)
+ return regs;
+
+ return ad7291_show_t_bound(dev, attr, regs+1, buf);
+}
+
+static inline ssize_t ad7291_set_voltage_low(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int regs;
+
+ regs = ad7291_get_voltage_limit_regs(attr->attr.name);
+
+ if (regs < 0)
+ return regs;
+
+ return ad7291_set_t_bound(dev, attr, regs+1, buf, len);
+}
+
+static ssize_t ad7291_show_voltage_hyst(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int regs;
+
+ regs = ad7291_get_voltage_limit_regs(attr->attr.name);
+
+ if (regs < 0)
+ return regs;
+
+ return ad7291_show_t_bound(dev, attr, regs+2, buf);
+}
+
+static inline ssize_t ad7291_set_voltage_hyst(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int regs;
+
+ regs = ad7291_get_voltage_limit_regs(attr->attr.name);
+
+ if (regs < 0)
+ return regs;
+
+ return ad7291_set_t_bound(dev, attr, regs+2, buf, len);
+}
+
+IIO_EVENT_ATTR_SH(t_sense_high, iio_event_ad7291,
+ ad7291_show_t_sense_high, ad7291_set_t_sense_high, 0);
+IIO_EVENT_ATTR_SH(t_sense_low, iio_event_ad7291,
+ ad7291_show_t_sense_low, ad7291_set_t_sense_low, 0);
+IIO_EVENT_ATTR_SH(t_sense_hyst, iio_event_ad7291,
+ ad7291_show_t_sense_hyst, ad7291_set_t_sense_hyst, 0);
+
+IIO_EVENT_ATTR_SH(v0_high, iio_event_ad7291,
+ ad7291_show_voltage_high, ad7291_set_voltage_high, 0);
+IIO_EVENT_ATTR_SH(v0_low, iio_event_ad7291,
+ ad7291_show_voltage_low, ad7291_set_voltage_low, 0);
+IIO_EVENT_ATTR_SH(v0_hyst, iio_event_ad7291,
+ ad7291_show_voltage_hyst, ad7291_set_voltage_hyst, 0);
+IIO_EVENT_ATTR_SH(v1_high, iio_event_ad7291,
+ ad7291_show_voltage_high, ad7291_set_voltage_high, 0);
+IIO_EVENT_ATTR_SH(v1_low, iio_event_ad7291,
+ ad7291_show_voltage_low, ad7291_set_voltage_low, 0);
+IIO_EVENT_ATTR_SH(v1_hyst, iio_event_ad7291,
+ ad7291_show_voltage_hyst, ad7291_set_voltage_hyst, 0);
+IIO_EVENT_ATTR_SH(v2_high, iio_event_ad7291,
+ ad7291_show_voltage_high, ad7291_set_voltage_high, 0);
+IIO_EVENT_ATTR_SH(v2_low, iio_event_ad7291,
+ ad7291_show_voltage_low, ad7291_set_voltage_low, 0);
+IIO_EVENT_ATTR_SH(v2_hyst, iio_event_ad7291,
+ ad7291_show_voltage_hyst, ad7291_set_voltage_hyst, 0);
+IIO_EVENT_ATTR_SH(v3_high, iio_event_ad7291,
+ ad7291_show_voltage_high, ad7291_set_voltage_high, 0);
+IIO_EVENT_ATTR_SH(v3_low, iio_event_ad7291,
+ ad7291_show_voltage_low, ad7291_set_voltage_low, 0);
+IIO_EVENT_ATTR_SH(v3_hyst, iio_event_ad7291,
+ ad7291_show_voltage_hyst, ad7291_set_voltage_hyst, 0);
+IIO_EVENT_ATTR_SH(v4_high, iio_event_ad7291,
+ ad7291_show_voltage_high, ad7291_set_voltage_high, 0);
+IIO_EVENT_ATTR_SH(v4_low, iio_event_ad7291,
+ ad7291_show_voltage_low, ad7291_set_voltage_low, 0);
+IIO_EVENT_ATTR_SH(v4_hyst, iio_event_ad7291,
+ ad7291_show_voltage_hyst, ad7291_set_voltage_hyst, 0);
+IIO_EVENT_ATTR_SH(v5_high, iio_event_ad7291,
+ ad7291_show_voltage_high, ad7291_set_voltage_high, 0);
+IIO_EVENT_ATTR_SH(v5_low, iio_event_ad7291,
+ ad7291_show_voltage_low, ad7291_set_voltage_low, 0);
+IIO_EVENT_ATTR_SH(v5_hyst, iio_event_ad7291,
+ ad7291_show_voltage_hyst, ad7291_set_voltage_hyst, 0);
+IIO_EVENT_ATTR_SH(v6_high, iio_event_ad7291,
+ ad7291_show_voltage_high, ad7291_set_voltage_high, 0);
+IIO_EVENT_ATTR_SH(v6_low, iio_event_ad7291,
+ ad7291_show_voltage_low, ad7291_set_voltage_low, 0);
+IIO_EVENT_ATTR_SH(v6_hyst, iio_event_ad7291,
+ ad7291_show_voltage_hyst, ad7291_set_voltage_hyst, 0);
+IIO_EVENT_ATTR_SH(v7_high, iio_event_ad7291,
+ ad7291_show_voltage_high, ad7291_set_voltage_high, 0);
+IIO_EVENT_ATTR_SH(v7_low, iio_event_ad7291,
+ ad7291_show_voltage_low, ad7291_set_voltage_low, 0);
+IIO_EVENT_ATTR_SH(v7_hyst, iio_event_ad7291,
+ ad7291_show_voltage_hyst, ad7291_set_voltage_hyst, 0);
+
+static struct attribute *ad7291_event_attributes[] = {
+ &iio_event_attr_t_sense_high.dev_attr.attr,
+ &iio_event_attr_t_sense_low.dev_attr.attr,
+ &iio_event_attr_t_sense_hyst.dev_attr.attr,
+ &iio_event_attr_v0_high.dev_attr.attr,
+ &iio_event_attr_v0_low.dev_attr.attr,
+ &iio_event_attr_v0_hyst.dev_attr.attr,
+ &iio_event_attr_v1_high.dev_attr.attr,
+ &iio_event_attr_v1_low.dev_attr.attr,
+ &iio_event_attr_v1_hyst.dev_attr.attr,
+ &iio_event_attr_v2_high.dev_attr.attr,
+ &iio_event_attr_v2_low.dev_attr.attr,
+ &iio_event_attr_v2_hyst.dev_attr.attr,
+ &iio_event_attr_v3_high.dev_attr.attr,
+ &iio_event_attr_v3_low.dev_attr.attr,
+ &iio_event_attr_v3_hyst.dev_attr.attr,
+ &iio_event_attr_v4_high.dev_attr.attr,
+ &iio_event_attr_v4_low.dev_attr.attr,
+ &iio_event_attr_v4_hyst.dev_attr.attr,
+ &iio_event_attr_v5_high.dev_attr.attr,
+ &iio_event_attr_v5_low.dev_attr.attr,
+ &iio_event_attr_v5_hyst.dev_attr.attr,
+ &iio_event_attr_v6_high.dev_attr.attr,
+ &iio_event_attr_v6_low.dev_attr.attr,
+ &iio_event_attr_v6_hyst.dev_attr.attr,
+ &iio_event_attr_v7_high.dev_attr.attr,
+ &iio_event_attr_v7_low.dev_attr.attr,
+ &iio_event_attr_v7_hyst.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ad7291_event_attribute_group = {
+ .attrs = ad7291_event_attributes,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit ad7291_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ad7291_chip_info *chip;
+ int ret = 0;
+
+ chip = kzalloc(sizeof(struct ad7291_chip_info), GFP_KERNEL);
+
+ if (chip == NULL)
+ return -ENOMEM;
+
+ /* this is only used for device removal purposes */
+ i2c_set_clientdata(client, chip);
+
+ chip->client = client;
+ chip->name = id->name;
+ chip->command = AD7291_NOISE_DELAY | AD7291_T_SENSE_MASK;
+
+ chip->indio_dev = iio_allocate_device();
+ if (chip->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_chip;
+ }
+
+ chip->indio_dev->dev.parent = &client->dev;
+ chip->indio_dev->attrs = &ad7291_attribute_group;
+ chip->indio_dev->event_attrs = &ad7291_event_attribute_group;
+ chip->indio_dev->dev_data = (void *)chip;
+ chip->indio_dev->driver_module = THIS_MODULE;
+ chip->indio_dev->num_interrupt_lines = 1;
+ chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(chip->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ if (client->irq > 0) {
+ ret = iio_register_interrupt_line(client->irq,
+ chip->indio_dev,
+ 0,
+ IRQF_TRIGGER_LOW,
+ chip->name);
+ if (ret)
+ goto error_unreg_dev;
+
+ /*
+ * The event handler list element refer to iio_event_ad7291.
+ * All event attributes bind to the same event handler.
+ * So, only register event handler once.
+ */
+ iio_add_event_to_list(&iio_event_ad7291,
+ &chip->indio_dev->interrupts[0]->ev_list);
+
+ INIT_WORK(&chip->thresh_work, ad7291_interrupt_bh);
+
+ /* set irq polarity low level */
+ chip->command |= AD7291_ALART_POLARITY;
+ }
+
+ ret = ad7291_i2c_write(chip, AD7291_COMMAND, chip->command);
+ if (ret) {
+ ret = -EIO;
+ goto error_unreg_irq;
+ }
+
+ dev_info(&client->dev, "%s temperature sensor registered.\n",
+ id->name);
+
+ return 0;
+
+error_unreg_irq:
+ iio_unregister_interrupt_line(chip->indio_dev, 0);
+error_unreg_dev:
+ iio_device_unregister(chip->indio_dev);
+error_free_dev:
+ iio_free_device(chip->indio_dev);
+error_free_chip:
+ kfree(chip);
+
+ return ret;
+}
+
+static int __devexit ad7291_remove(struct i2c_client *client)
+{
+ struct ad7291_chip_info *chip = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = chip->indio_dev;
+
+ if (client->irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+ iio_device_unregister(indio_dev);
+ iio_free_device(chip->indio_dev);
+ kfree(chip);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad7291_id[] = {
+ { "ad7291", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad7291_id);
+
+static struct i2c_driver ad7291_driver = {
+ .driver = {
+ .name = "ad7291",
+ },
+ .probe = ad7291_probe,
+ .remove = __devexit_p(ad7291_remove),
+ .id_table = ad7291_id,
+};
+
+static __init int ad7291_init(void)
+{
+ return i2c_add_driver(&ad7291_driver);
+}
+
+static __exit void ad7291_exit(void)
+{
+ i2c_del_driver(&ad7291_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7291 digital"
+ " temperature sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad7291_init);
+module_exit(ad7291_exit);
diff --git a/drivers/staging/iio/adc/ad7298.c b/drivers/staging/iio/adc/ad7298.c
new file mode 100644
index 000000000000..1a080c977637
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7298.c
@@ -0,0 +1,501 @@
+/*
+ * AD7298 digital temperature sensor driver supporting AD7298
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * AD7298 command
+ */
+#define AD7298_PD 0x1
+#define AD7298_T_AVG_MASK 0x2
+#define AD7298_EXT_REF 0x4
+#define AD7298_T_SENSE_MASK 0x20
+#define AD7298_VOLTAGE_MASK 0x3fc0
+#define AD7298_VOLTAGE_OFFSET 0x6
+#define AD7298_VOLTAGE_LIMIT_COUNT 8
+#define AD7298_REPEAT 0x40
+#define AD7298_WRITE 0x80
+
+/*
+ * AD7298 value masks
+ */
+#define AD7298_CHANNEL_MASK 0xf000
+#define AD7298_VALUE_MASK 0xfff
+#define AD7298_T_VALUE_SIGN 0x400
+#define AD7298_T_VALUE_FLOAT_OFFSET 2
+#define AD7298_T_VALUE_FLOAT_MASK 0x2
+
+/*
+ * struct ad7298_chip_info - chip specifc information
+ */
+
+struct ad7298_chip_info {
+ const char *name;
+ struct spi_device *spi_dev;
+ struct iio_dev *indio_dev;
+ u16 command;
+ u16 busy_pin;
+ u8 channels; /* Active voltage channels */
+};
+
+/*
+ * ad7298 register access by SPI
+ */
+static int ad7298_spi_write(struct ad7298_chip_info *chip, u16 data)
+{
+ struct spi_device *spi_dev = chip->spi_dev;
+ int ret = 0;
+
+ data |= AD7298_WRITE;
+ data = cpu_to_be16(data);
+ ret = spi_write(spi_dev, (u8 *)&data, sizeof(data));
+ if (ret < 0)
+ dev_err(&spi_dev->dev, "SPI write error\n");
+
+ return ret;
+}
+
+static int ad7298_spi_read(struct ad7298_chip_info *chip, u16 mask, u16 *data)
+{
+ struct spi_device *spi_dev = chip->spi_dev;
+ int ret = 0;
+ u8 count = chip->channels;
+ u16 command;
+ int i;
+
+ if (mask & AD7298_T_SENSE_MASK) {
+ command = chip->command & ~(AD7298_T_AVG_MASK | AD7298_VOLTAGE_MASK);
+ command |= AD7298_T_SENSE_MASK;
+ count = 1;
+ } else if (mask & AD7298_T_AVG_MASK) {
+ command = chip->command & ~AD7298_VOLTAGE_MASK;
+ command |= AD7298_T_SENSE_MASK | AD7298_T_AVG_MASK;
+ count = 2;
+ } else if (mask & AD7298_VOLTAGE_MASK) {
+ command = chip->command & ~(AD7298_T_AVG_MASK | AD7298_T_SENSE_MASK);
+ count = chip->channels;
+ }
+
+ ret = ad7298_spi_write(chip, chip->command);
+ if (ret < 0) {
+ dev_err(&spi_dev->dev, "SPI write command error\n");
+ return ret;
+ }
+
+ ret = spi_read(spi_dev, (u8 *)&command, sizeof(command));
+ if (ret < 0) {
+ dev_err(&spi_dev->dev, "SPI read error\n");
+ return ret;
+ }
+
+ i = 10000;
+ while (i && gpio_get_value(chip->busy_pin)) {
+ cpu_relax();
+ i--;
+ }
+ if (!i) {
+ dev_err(&spi_dev->dev, "Always in busy convertion.\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < count; i++) {
+ ret = spi_read(spi_dev, (u8 *)&data[i], sizeof(data[i]));
+ if (ret < 0) {
+ dev_err(&spi_dev->dev, "SPI read error\n");
+ return ret;
+ }
+ *data = be16_to_cpu(data[i]);
+ }
+
+ return 0;
+}
+
+static ssize_t ad7298_show_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7298_chip_info *chip = dev_info->dev_data;
+
+ if (chip->command & AD7298_REPEAT)
+ return sprintf(buf, "repeat\n");
+ else
+ return sprintf(buf, "normal\n");
+}
+
+static ssize_t ad7298_store_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7298_chip_info *chip = dev_info->dev_data;
+
+ if (strcmp(buf, "repeat"))
+ chip->command |= AD7298_REPEAT;
+ else
+ chip->command &= (~AD7298_REPEAT);
+
+ return 1;
+}
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+ ad7298_show_mode,
+ ad7298_store_mode,
+ 0);
+
+static ssize_t ad7298_show_available_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "normal\nrepeat\n");
+}
+
+static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7298_show_available_modes, NULL, 0);
+
+static ssize_t ad7298_store_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7298_chip_info *chip = dev_info->dev_data;
+ u16 command;
+ int ret;
+
+ command = chip->command & ~AD7298_PD;
+
+ ret = ad7298_spi_write(chip, command);
+ if (ret)
+ return -EIO;
+
+ command = chip->command | AD7298_PD;
+
+ ret = ad7298_spi_write(chip, command);
+ if (ret)
+ return -EIO;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR,
+ NULL,
+ ad7298_store_reset,
+ 0);
+
+static ssize_t ad7298_show_ext_ref(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7298_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", !!(chip->command & AD7298_EXT_REF));
+}
+
+static ssize_t ad7298_store_ext_ref(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7298_chip_info *chip = dev_info->dev_data;
+ u16 command;
+ int ret;
+
+ command = chip->command & (~AD7298_EXT_REF);
+ if (strcmp(buf, "1"))
+ command |= AD7298_EXT_REF;
+
+ ret = ad7298_spi_write(chip, command);
+ if (ret)
+ return -EIO;
+
+ chip->command = command;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(ext_ref, S_IRUGO | S_IWUSR,
+ ad7298_show_ext_ref,
+ ad7298_store_ext_ref,
+ 0);
+
+static ssize_t ad7298_show_t_sense(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7298_chip_info *chip = dev_info->dev_data;
+ u16 data;
+ char sign = ' ';
+ int ret;
+
+ ret = ad7298_spi_read(chip, AD7298_T_SENSE_MASK, &data);
+ if (ret)
+ return -EIO;
+
+ if (data & AD7298_T_VALUE_SIGN) {
+ /* convert supplement to positive value */
+ data = (AD7298_T_VALUE_SIGN << 1) - data;
+ sign = '-';
+ }
+
+ return sprintf(buf, "%c%d.%.2d\n", sign,
+ (data >> AD7298_T_VALUE_FLOAT_OFFSET),
+ (data & AD7298_T_VALUE_FLOAT_MASK) * 25);
+}
+
+static IIO_DEVICE_ATTR(t_sense, S_IRUGO, ad7298_show_t_sense, NULL, 0);
+
+static ssize_t ad7298_show_t_average(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7298_chip_info *chip = dev_info->dev_data;
+ u16 data[2];
+ char sign = ' ';
+ int ret;
+
+ ret = ad7298_spi_read(chip, AD7298_T_AVG_MASK, data);
+ if (ret)
+ return -EIO;
+
+ if (data[1] & AD7298_T_VALUE_SIGN) {
+ /* convert supplement to positive value */
+ data[1] = (AD7298_T_VALUE_SIGN << 1) - data[1];
+ sign = '-';
+ }
+
+ return sprintf(buf, "%c%d.%.2d\n", sign,
+ (data[1] >> AD7298_T_VALUE_FLOAT_OFFSET),
+ (data[1] & AD7298_T_VALUE_FLOAT_MASK) * 25);
+}
+
+static IIO_DEVICE_ATTR(t_average, S_IRUGO, ad7298_show_t_average, NULL, 0);
+
+static ssize_t ad7298_show_voltage(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7298_chip_info *chip = dev_info->dev_data;
+ u16 data[AD7298_VOLTAGE_LIMIT_COUNT];
+ int i, size, ret;
+
+ ret = ad7298_spi_read(chip, AD7298_VOLTAGE_MASK, data);
+ if (ret)
+ return -EIO;
+
+ for (i = 0; i < AD7298_VOLTAGE_LIMIT_COUNT; i++) {
+ if (chip->command & (AD7298_T_SENSE_MASK << i)) {
+ ret = sprintf(buf, "channel[%d]=%d\n", i,
+ data[i] & AD7298_VALUE_MASK);
+ if (ret < 0)
+ break;
+ buf += ret;
+ size += ret;
+ }
+ }
+
+ return size;
+}
+
+static IIO_DEVICE_ATTR(voltage, S_IRUGO, ad7298_show_voltage, NULL, 0);
+
+static ssize_t ad7298_show_channel_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7298_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "0x%x\n", (chip->command & AD7298_VOLTAGE_MASK) >>
+ AD7298_VOLTAGE_OFFSET);
+}
+
+static ssize_t ad7298_store_channel_mask(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7298_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int i, ret;
+
+ ret = strict_strtoul(buf, 16, &data);
+ if (ret || data > 0xff)
+ return -EINVAL;
+
+ chip->command &= (~AD7298_VOLTAGE_MASK);
+ chip->command |= data << AD7298_VOLTAGE_OFFSET;
+
+ for (i = 0, chip->channels = 0; i < AD7298_VOLTAGE_LIMIT_COUNT; i++) {
+ if (chip->command & (AD7298_T_SENSE_MASK << i))
+ chip->channels++;
+ }
+
+ return ret;
+}
+
+static IIO_DEVICE_ATTR(channel_mask, S_IRUGO | S_IWUSR,
+ ad7298_show_channel_mask,
+ ad7298_store_channel_mask,
+ 0);
+
+static ssize_t ad7298_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7298_chip_info *chip = dev_info->dev_data;
+ return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad7298_show_name, NULL, 0);
+
+static struct attribute *ad7298_attributes[] = {
+ &iio_dev_attr_available_modes.dev_attr.attr,
+ &iio_dev_attr_mode.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_dev_attr_ext_ref.dev_attr.attr,
+ &iio_dev_attr_t_sense.dev_attr.attr,
+ &iio_dev_attr_t_average.dev_attr.attr,
+ &iio_dev_attr_voltage.dev_attr.attr,
+ &iio_dev_attr_channel_mask.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad7298_attribute_group = {
+ .attrs = ad7298_attributes,
+};
+
+/*
+ * device probe and remove
+ */
+static int __devinit ad7298_probe(struct spi_device *spi_dev)
+{
+ struct ad7298_chip_info *chip;
+ unsigned short *pins = spi_dev->dev.platform_data;
+ int ret = 0;
+
+ chip = kzalloc(sizeof(struct ad7298_chip_info), GFP_KERNEL);
+
+ if (chip == NULL)
+ return -ENOMEM;
+
+ /* this is only used for device removal purposes */
+ dev_set_drvdata(&spi_dev->dev, chip);
+
+ chip->spi_dev = spi_dev;
+ chip->name = spi_dev->modalias;
+ chip->busy_pin = pins[0];
+
+ ret = gpio_request(chip->busy_pin, chip->name);
+ if (ret) {
+ dev_err(&spi_dev->dev, "Fail to request busy gpio PIN %d.\n",
+ chip->busy_pin);
+ goto error_free_chip;
+ }
+ gpio_direction_input(chip->busy_pin);
+
+ chip->indio_dev = iio_allocate_device();
+ if (chip->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_gpio;
+ }
+
+ chip->indio_dev->dev.parent = &spi_dev->dev;
+ chip->indio_dev->attrs = &ad7298_attribute_group;
+ chip->indio_dev->dev_data = (void *)chip;
+ chip->indio_dev->driver_module = THIS_MODULE;
+ chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(chip->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ dev_info(&spi_dev->dev, "%s temperature sensor and ADC registered.\n",
+ chip->name);
+
+ return 0;
+
+error_free_dev:
+ iio_free_device(chip->indio_dev);
+error_free_gpio:
+ gpio_free(chip->busy_pin);
+error_free_chip:
+ kfree(chip);
+
+ return ret;
+}
+
+static int __devexit ad7298_remove(struct spi_device *spi_dev)
+{
+ struct ad7298_chip_info *chip = dev_get_drvdata(&spi_dev->dev);
+ struct iio_dev *indio_dev = chip->indio_dev;
+
+ dev_set_drvdata(&spi_dev->dev, NULL);
+ iio_device_unregister(indio_dev);
+ iio_free_device(chip->indio_dev);
+ gpio_free(chip->busy_pin);
+ kfree(chip);
+
+ return 0;
+}
+
+static const struct spi_device_id ad7298_id[] = {
+ { "ad7298", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(spi, ad7298_id);
+
+static struct spi_driver ad7298_driver = {
+ .driver = {
+ .name = "ad7298",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7298_probe,
+ .remove = __devexit_p(ad7298_remove),
+ .id_table = ad7298_id,
+};
+
+static __init int ad7298_init(void)
+{
+ return spi_register_driver(&ad7298_driver);
+}
+
+static __exit void ad7298_exit(void)
+{
+ spi_unregister_driver(&ad7298_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7298 digital"
+ " temperature sensor and ADC driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad7298_init);
+module_exit(ad7298_exit);
diff --git a/drivers/staging/iio/adc/ad7314.c b/drivers/staging/iio/adc/ad7314.c
new file mode 100644
index 000000000000..8c17b1fe9026
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7314.c
@@ -0,0 +1,308 @@
+/*
+ * AD7314 digital temperature sensor driver for AD7314, ADT7301 and ADT7302
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * AD7314 power mode
+ */
+#define AD7314_PD 0x2000
+
+/*
+ * AD7314 temperature masks
+ */
+#define AD7314_TEMP_SIGN 0x200
+#define AD7314_TEMP_MASK 0x7FE0
+#define AD7314_TEMP_OFFSET 5
+#define AD7314_TEMP_FLOAT_OFFSET 2
+#define AD7314_TEMP_FLOAT_MASK 0x3
+
+/*
+ * ADT7301 and ADT7302 temperature masks
+ */
+#define ADT7301_TEMP_SIGN 0x2000
+#define ADT7301_TEMP_MASK 0x2FFF
+#define ADT7301_TEMP_FLOAT_OFFSET 5
+#define ADT7301_TEMP_FLOAT_MASK 0x1F
+
+/*
+ * struct ad7314_chip_info - chip specifc information
+ */
+
+struct ad7314_chip_info {
+ const char *name;
+ struct spi_device *spi_dev;
+ struct iio_dev *indio_dev;
+ s64 last_timestamp;
+ u8 mode;
+};
+
+/*
+ * ad7314 register access by SPI
+ */
+
+static int ad7314_spi_read(struct ad7314_chip_info *chip, u16 *data)
+{
+ struct spi_device *spi_dev = chip->spi_dev;
+ int ret = 0;
+ u16 value;
+
+ ret = spi_read(spi_dev, (u8 *)&value, sizeof(value));
+ if (ret < 0) {
+ dev_err(&spi_dev->dev, "SPI read error\n");
+ return ret;
+ }
+
+ *data = be16_to_cpu((u16)value);
+
+ return ret;
+}
+
+static int ad7314_spi_write(struct ad7314_chip_info *chip, u16 data)
+{
+ struct spi_device *spi_dev = chip->spi_dev;
+ int ret = 0;
+ u16 value = cpu_to_be16(data);
+
+ ret = spi_write(spi_dev, (u8 *)&value, sizeof(value));
+ if (ret < 0)
+ dev_err(&spi_dev->dev, "SPI write error\n");
+
+ return ret;
+}
+
+static ssize_t ad7314_show_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7314_chip_info *chip = dev_info->dev_data;
+
+ if (chip->mode)
+ return sprintf(buf, "power-save\n");
+ else
+ return sprintf(buf, "full\n");
+}
+
+static ssize_t ad7314_store_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7314_chip_info *chip = dev_info->dev_data;
+ u16 mode = 0;
+ int ret;
+
+ if (!strcmp(buf, "full"))
+ mode = AD7314_PD;
+
+ ret = ad7314_spi_write(chip, mode);
+ if (ret)
+ return -EIO;
+
+ chip->mode = mode;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+ ad7314_show_mode,
+ ad7314_store_mode,
+ 0);
+
+static ssize_t ad7314_show_available_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "full\npower-save\n");
+}
+
+static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7314_show_available_modes, NULL, 0);
+
+static ssize_t ad7314_show_temperature(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7314_chip_info *chip = dev_info->dev_data;
+ u16 data;
+ char sign = ' ';
+ int ret;
+
+ if (chip->mode) {
+ ret = ad7314_spi_write(chip, 0);
+ if (ret)
+ return -EIO;
+ }
+
+ ret = ad7314_spi_read(chip, &data);
+ if (ret)
+ return -EIO;
+
+ if (chip->mode)
+ ad7314_spi_write(chip, chip->mode);
+
+ if (strcmp(chip->name, "ad7314")) {
+ data = (data & AD7314_TEMP_MASK) >>
+ AD7314_TEMP_OFFSET;
+ if (data & AD7314_TEMP_SIGN) {
+ data = (AD7314_TEMP_SIGN << 1) - data;
+ sign = '-';
+ }
+
+ return sprintf(buf, "%c%d.%.2d\n", sign,
+ data >> AD7314_TEMP_FLOAT_OFFSET,
+ (data & AD7314_TEMP_FLOAT_MASK) * 25);
+ } else {
+ data &= ADT7301_TEMP_MASK;
+ if (data & ADT7301_TEMP_SIGN) {
+ data = (ADT7301_TEMP_SIGN << 1) - data;
+ sign = '-';
+ }
+
+ return sprintf(buf, "%c%d.%.5d\n", sign,
+ data >> ADT7301_TEMP_FLOAT_OFFSET,
+ (data & ADT7301_TEMP_FLOAT_MASK) * 3125);
+ }
+}
+
+static IIO_DEVICE_ATTR(temperature, S_IRUGO, ad7314_show_temperature, NULL, 0);
+
+static ssize_t ad7314_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7314_chip_info *chip = dev_info->dev_data;
+ return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad7314_show_name, NULL, 0);
+
+static struct attribute *ad7314_attributes[] = {
+ &iio_dev_attr_available_modes.dev_attr.attr,
+ &iio_dev_attr_mode.dev_attr.attr,
+ &iio_dev_attr_temperature.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad7314_attribute_group = {
+ .attrs = ad7314_attributes,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit ad7314_probe(struct spi_device *spi_dev)
+{
+ struct ad7314_chip_info *chip;
+ int ret = 0;
+
+ chip = kzalloc(sizeof(struct ad7314_chip_info), GFP_KERNEL);
+
+ if (chip == NULL)
+ return -ENOMEM;
+
+ /* this is only used for device removal purposes */
+ dev_set_drvdata(&spi_dev->dev, chip);
+
+ chip->spi_dev = spi_dev;
+ chip->name = spi_dev->modalias;
+
+ chip->indio_dev = iio_allocate_device();
+ if (chip->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_chip;
+ }
+
+ chip->indio_dev->dev.parent = &spi_dev->dev;
+ chip->indio_dev->attrs = &ad7314_attribute_group;
+ chip->indio_dev->dev_data = (void *)chip;
+ chip->indio_dev->driver_module = THIS_MODULE;
+
+ ret = iio_device_register(chip->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ dev_info(&spi_dev->dev, "%s temperature sensor registered.\n",
+ chip->name);
+
+ return 0;
+error_free_dev:
+ iio_free_device(chip->indio_dev);
+error_free_chip:
+ kfree(chip);
+
+ return ret;
+}
+
+static int __devexit ad7314_remove(struct spi_device *spi_dev)
+{
+ struct ad7314_chip_info *chip = dev_get_drvdata(&spi_dev->dev);
+ struct iio_dev *indio_dev = chip->indio_dev;
+
+ dev_set_drvdata(&spi_dev->dev, NULL);
+ if (spi_dev->irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+ iio_device_unregister(indio_dev);
+ iio_free_device(chip->indio_dev);
+ kfree(chip);
+
+ return 0;
+}
+
+static const struct spi_device_id ad7314_id[] = {
+ { "adt7301", 0 },
+ { "adt7302", 0 },
+ { "ad7314", 0 },
+ {}
+};
+
+static struct spi_driver ad7314_driver = {
+ .driver = {
+ .name = "ad7314",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7314_probe,
+ .remove = __devexit_p(ad7314_remove),
+ .id_table = ad7314_id,
+};
+
+static __init int ad7314_init(void)
+{
+ return spi_register_driver(&ad7314_driver);
+}
+
+static __exit void ad7314_exit(void)
+{
+ spi_unregister_driver(&ad7314_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7314, ADT7301 and ADT7302 digital"
+ " temperature sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad7314_init);
+module_exit(ad7314_exit);
diff --git a/drivers/staging/iio/adc/ad7476_core.c b/drivers/staging/iio/adc/ad7476_core.c
index deb68c8a6e18..b8b54da67c63 100644
--- a/drivers/staging/iio/adc/ad7476_core.c
+++ b/drivers/staging/iio/adc/ad7476_core.c
@@ -68,7 +68,7 @@ static ssize_t ad7476_show_scale(struct device *dev,
/* Corresponds to Vref / 2^(bits) */
unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
- return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
+ return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
}
static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad7476_show_scale, NULL, 0);
diff --git a/drivers/staging/iio/adc/ad7745.c b/drivers/staging/iio/adc/ad7745.c
new file mode 100644
index 000000000000..ab7ef8450ae2
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7745.c
@@ -0,0 +1,734 @@
+/*
+ * AD774X capacitive sensor driver supporting AD7745/6/7
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * AD774X registers definition
+ */
+
+#define AD774X_STATUS 0
+#define AD774X_STATUS_RDY (1 << 2)
+#define AD774X_STATUS_RDYVT (1 << 1)
+#define AD774X_STATUS_RDYCAP (1 << 0)
+#define AD774X_CAP_DATA_HIGH 1
+#define AD774X_CAP_DATA_MID 2
+#define AD774X_CAP_DATA_LOW 3
+#define AD774X_VT_DATA_HIGH 4
+#define AD774X_VT_DATA_MID 5
+#define AD774X_VT_DATA_LOW 6
+#define AD774X_CAP_SETUP 7
+#define AD774X_VT_SETUP 8
+#define AD774X_EXEC_SETUP 9
+#define AD774X_CFG 10
+#define AD774X_CAPDACA 11
+#define AD774X_CAPDACB 12
+#define AD774X_CAPDAC_EN (1 << 7)
+#define AD774X_CAP_OFFH 13
+#define AD774X_CAP_OFFL 14
+#define AD774X_CAP_GAINH 15
+#define AD774X_CAP_GAINL 16
+#define AD774X_VOLT_GAINH 17
+#define AD774X_VOLT_GAINL 18
+
+#define AD774X_MAX_CONV_MODE 6
+
+/*
+ * struct ad774x_chip_info - chip specifc information
+ */
+
+struct ad774x_chip_info {
+ const char *name;
+ struct i2c_client *client;
+ struct iio_dev *indio_dev;
+ struct work_struct thresh_work;
+ bool inter;
+ s64 last_timestamp;
+ u16 cap_offs; /* Capacitive offset */
+ u16 cap_gain; /* Capacitive gain calibration */
+ u16 volt_gain; /* Voltage gain calibration */
+ u8 cap_setup;
+ u8 vt_setup;
+ u8 exec_setup;
+
+ char *conversion_mode;
+};
+
+struct ad774x_conversion_mode {
+ char *name;
+ u8 reg_cfg;
+};
+
+struct ad774x_conversion_mode ad774x_conv_mode_table[AD774X_MAX_CONV_MODE] = {
+ { "idle", 0 },
+ { "continuous-conversion", 1 },
+ { "single-conversion", 2 },
+ { "power-down", 3 },
+ { "offset-calibration", 5 },
+ { "gain-calibration", 6 },
+};
+
+/*
+ * ad774x register access by I2C
+ */
+
+static int ad774x_i2c_read(struct ad774x_chip_info *chip, u8 reg, u8 *data, int len)
+{
+ struct i2c_client *client = chip->client;
+ int ret;
+
+ ret = i2c_master_send(client, &reg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C write error\n");
+ return ret;
+ }
+
+ ret = i2c_master_recv(client, data, len);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C read error\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ad774x_i2c_write(struct ad774x_chip_info *chip, u8 reg, u8 data)
+{
+ struct i2c_client *client = chip->client;
+ int ret;
+
+ u8 tx[2] = {
+ reg,
+ data,
+ };
+
+ ret = i2c_master_send(client, tx, 2);
+ if (ret < 0)
+ dev_err(&client->dev, "I2C write error\n");
+
+ return ret;
+}
+
+/*
+ * sysfs nodes
+ */
+
+#define IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(_show) \
+ IIO_DEVICE_ATTR(available_conversion_modes, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_CONVERSION_MODE(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(conversion_mode, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CAP_SETUP(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(cap_setup, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_VT_SETUP(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(in0_setup, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_EXEC_SETUP(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(exec_setup, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_VOLT_GAIN(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(in0_gain, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CAP_OFFS(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(cap_offs, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CAP_GAIN(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(cap_gain, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CAP_DATA(_show) \
+ IIO_DEVICE_ATTR(cap0_raw, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_VT_DATA(_show) \
+ IIO_DEVICE_ATTR(in0_raw, S_IRUGO, _show, NULL, 0)
+
+static ssize_t ad774x_show_conversion_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+ int len = 0;
+
+ for (i = 0; i < AD774X_MAX_CONV_MODE; i++)
+ len += sprintf(buf + len, "%s ", ad774x_conv_mode_table[i].name);
+
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(ad774x_show_conversion_modes);
+
+static ssize_t ad774x_show_conversion_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%s\n", chip->conversion_mode);
+}
+
+static ssize_t ad774x_store_conversion_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+ u8 cfg;
+ int i;
+
+ ad774x_i2c_read(chip, AD774X_CFG, &cfg, 1);
+
+ for (i = 0; i < AD774X_MAX_CONV_MODE; i++) {
+ if (strncmp(buf, ad774x_conv_mode_table[i].name,
+ strlen(ad774x_conv_mode_table[i].name) - 1) == 0) {
+ chip->conversion_mode = ad774x_conv_mode_table[i].name;
+ cfg |= 0x18 | ad774x_conv_mode_table[i].reg_cfg;
+ ad774x_i2c_write(chip, AD774X_CFG, cfg);
+ return len;
+ }
+ }
+
+ dev_err(dev, "not supported conversion mode\n");
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CONVERSION_MODE(S_IRUGO | S_IWUSR,
+ ad774x_show_conversion_mode,
+ ad774x_store_conversion_mode);
+
+static ssize_t ad774x_show_dac_value(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ u8 data;
+
+ ad774x_i2c_read(chip, this_attr->address, &data, 1);
+
+ return sprintf(buf, "%02x\n", data & 0x7F);
+}
+
+static ssize_t ad774x_store_dac_value(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if (!ret) {
+ ad774x_i2c_write(chip, this_attr->address,
+ (data ? AD774X_CAPDAC_EN : 0) | (data & 0x7F));
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEVICE_ATTR(capdac0_raw, S_IRUGO | S_IWUSR,
+ ad774x_show_dac_value,
+ ad774x_store_dac_value,
+ AD774X_CAPDACA);
+
+static IIO_DEVICE_ATTR(capdac1_raw, S_IRUGO | S_IWUSR,
+ ad774x_show_dac_value,
+ ad774x_store_dac_value,
+ AD774X_CAPDACB);
+
+static ssize_t ad774x_show_cap_setup(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "0x%02x\n", chip->cap_setup);
+}
+
+static ssize_t ad774x_store_cap_setup(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x100)) {
+ ad774x_i2c_write(chip, AD774X_CAP_SETUP, data);
+ chip->cap_setup = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CAP_SETUP(S_IRUGO | S_IWUSR,
+ ad774x_show_cap_setup,
+ ad774x_store_cap_setup);
+
+static ssize_t ad774x_show_vt_setup(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "0x%02x\n", chip->vt_setup);
+}
+
+static ssize_t ad774x_store_vt_setup(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x100)) {
+ ad774x_i2c_write(chip, AD774X_VT_SETUP, data);
+ chip->vt_setup = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_VT_SETUP(S_IRUGO | S_IWUSR,
+ ad774x_show_vt_setup,
+ ad774x_store_vt_setup);
+
+static ssize_t ad774x_show_exec_setup(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "0x%02x\n", chip->exec_setup);
+}
+
+static ssize_t ad774x_store_exec_setup(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x100)) {
+ ad774x_i2c_write(chip, AD774X_EXEC_SETUP, data);
+ chip->exec_setup = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_EXEC_SETUP(S_IRUGO | S_IWUSR,
+ ad774x_show_exec_setup,
+ ad774x_store_exec_setup);
+
+static ssize_t ad774x_show_volt_gain(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", chip->volt_gain);
+}
+
+static ssize_t ad774x_store_volt_gain(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x10000)) {
+ ad774x_i2c_write(chip, AD774X_VOLT_GAINH, data >> 8);
+ ad774x_i2c_write(chip, AD774X_VOLT_GAINL, data);
+ chip->volt_gain = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_VOLT_GAIN(S_IRUGO | S_IWUSR,
+ ad774x_show_volt_gain,
+ ad774x_store_volt_gain);
+
+static ssize_t ad774x_show_cap_data(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ char tmp[3];
+
+ ad774x_i2c_read(chip, AD774X_CAP_DATA_HIGH, tmp, 3);
+ data = ((int)tmp[0] << 16) | ((int)tmp[1] << 8) | (int)tmp[2];
+
+ return sprintf(buf, "%ld\n", data);
+}
+
+static IIO_DEV_ATTR_CAP_DATA(ad774x_show_cap_data);
+
+static ssize_t ad774x_show_vt_data(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ char tmp[3];
+
+ ad774x_i2c_read(chip, AD774X_VT_DATA_HIGH, tmp, 3);
+ data = ((int)tmp[0] << 16) | ((int)tmp[1] << 8) | (int)tmp[2];
+
+ return sprintf(buf, "%ld\n", data);
+}
+
+static IIO_DEV_ATTR_VT_DATA(ad774x_show_vt_data);
+
+static ssize_t ad774x_show_cap_offs(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", chip->cap_offs);
+}
+
+static ssize_t ad774x_store_cap_offs(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x10000)) {
+ ad774x_i2c_write(chip, AD774X_CAP_OFFH, data >> 8);
+ ad774x_i2c_write(chip, AD774X_CAP_OFFL, data);
+ chip->cap_offs = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CAP_OFFS(S_IRUGO | S_IWUSR,
+ ad774x_show_cap_offs,
+ ad774x_store_cap_offs);
+
+static ssize_t ad774x_show_cap_gain(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", chip->cap_gain);
+}
+
+static ssize_t ad774x_store_cap_gain(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+
+ if ((!ret) && (data < 0x10000)) {
+ ad774x_i2c_write(chip, AD774X_CAP_GAINH, data >> 8);
+ ad774x_i2c_write(chip, AD774X_CAP_GAINL, data);
+ chip->cap_gain = data;
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CAP_GAIN(S_IRUGO | S_IWUSR,
+ ad774x_show_cap_gain,
+ ad774x_store_cap_gain);
+
+static ssize_t ad774x_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+ return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad774x_show_name, NULL, 0);
+
+static struct attribute *ad774x_attributes[] = {
+ &iio_dev_attr_available_conversion_modes.dev_attr.attr,
+ &iio_dev_attr_conversion_mode.dev_attr.attr,
+ &iio_dev_attr_cap_setup.dev_attr.attr,
+ &iio_dev_attr_in0_setup.dev_attr.attr,
+ &iio_dev_attr_exec_setup.dev_attr.attr,
+ &iio_dev_attr_cap_offs.dev_attr.attr,
+ &iio_dev_attr_cap_gain.dev_attr.attr,
+ &iio_dev_attr_in0_gain.dev_attr.attr,
+ &iio_dev_attr_in0_raw.dev_attr.attr,
+ &iio_dev_attr_cap0_raw.dev_attr.attr,
+ &iio_dev_attr_capdac0_raw.dev_attr.attr,
+ &iio_dev_attr_capdac1_raw.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad774x_attribute_group = {
+ .attrs = ad774x_attributes,
+};
+
+/*
+ * data ready events
+ */
+
+#define IIO_EVENT_CODE_CAP_RDY IIO_BUFFER_EVENT_CODE(0)
+#define IIO_EVENT_CODE_VT_RDY IIO_BUFFER_EVENT_CODE(1)
+
+#define IIO_EVENT_ATTR_CAP_RDY_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(cap_rdy, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_ATTR_VT_RDY_SH(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(vt_rdy, _evlist, _show, _store, _mask)
+
+static void ad774x_interrupt_handler_bh(struct work_struct *work_s)
+{
+ struct ad774x_chip_info *chip =
+ container_of(work_s, struct ad774x_chip_info, thresh_work);
+ u8 int_status;
+
+ enable_irq(chip->client->irq);
+
+ ad774x_i2c_read(chip, AD774X_STATUS, &int_status, 1);
+
+ if (int_status & AD774X_STATUS_RDYCAP)
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_CAP_RDY,
+ chip->last_timestamp);
+
+ if (int_status & AD774X_STATUS_RDYVT)
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_VT_RDY,
+ chip->last_timestamp);
+}
+
+static int ad774x_interrupt_handler_th(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct ad774x_chip_info *chip = dev_info->dev_data;
+
+ chip->last_timestamp = timestamp;
+ schedule_work(&chip->thresh_work);
+
+ return 0;
+}
+
+IIO_EVENT_SH(data_rdy, &ad774x_interrupt_handler_th);
+
+static ssize_t ad774x_query_out_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ /*
+ * AD774X provides one /RDY pin, which can be used as interrupt
+ * but the pin is not configurable
+ */
+ return sprintf(buf, "1\n");
+}
+
+static ssize_t ad774x_set_out_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return len;
+}
+
+IIO_EVENT_ATTR_CAP_RDY_SH(iio_event_data_rdy, ad774x_query_out_mode, ad774x_set_out_mode, 0);
+IIO_EVENT_ATTR_VT_RDY_SH(iio_event_data_rdy, ad774x_query_out_mode, ad774x_set_out_mode, 0);
+
+static struct attribute *ad774x_event_attributes[] = {
+ &iio_event_attr_cap_rdy.dev_attr.attr,
+ &iio_event_attr_vt_rdy.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ad774x_event_attribute_group = {
+ .attrs = ad774x_event_attributes,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit ad774x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0, regdone = 0;
+ struct ad774x_chip_info *chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (chip == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ /* this is only used for device removal purposes */
+ i2c_set_clientdata(client, chip);
+
+ chip->client = client;
+ chip->name = id->name;
+
+ chip->indio_dev = iio_allocate_device();
+ if (chip->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_chip;
+ }
+
+ /* Establish that the iio_dev is a child of the i2c device */
+ chip->indio_dev->dev.parent = &client->dev;
+ chip->indio_dev->attrs = &ad774x_attribute_group;
+ chip->indio_dev->event_attrs = &ad774x_event_attribute_group;
+ chip->indio_dev->dev_data = (void *)(chip);
+ chip->indio_dev->driver_module = THIS_MODULE;
+ chip->indio_dev->num_interrupt_lines = 1;
+ chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(chip->indio_dev);
+ if (ret)
+ goto error_free_dev;
+ regdone = 1;
+
+ if (client->irq) {
+ ret = iio_register_interrupt_line(client->irq,
+ chip->indio_dev,
+ 0,
+ IRQF_TRIGGER_FALLING,
+ "ad774x");
+ if (ret)
+ goto error_free_dev;
+
+ iio_add_event_to_list(iio_event_attr_cap_rdy.listel,
+ &chip->indio_dev->interrupts[0]->ev_list);
+
+ INIT_WORK(&chip->thresh_work, ad774x_interrupt_handler_bh);
+ }
+
+ dev_err(&client->dev, "%s capacitive sensor registered, irq: %d\n", id->name, client->irq);
+
+ return 0;
+
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(chip->indio_dev);
+ else
+ iio_free_device(chip->indio_dev);
+error_free_chip:
+ kfree(chip);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad774x_remove(struct i2c_client *client)
+{
+ struct ad774x_chip_info *chip = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = chip->indio_dev;
+
+ if (client->irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+ iio_device_unregister(indio_dev);
+ kfree(chip);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad774x_id[] = {
+ { "ad7745", 0 },
+ { "ad7746", 0 },
+ { "ad7747", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad774x_id);
+
+static struct i2c_driver ad774x_driver = {
+ .driver = {
+ .name = "ad774x",
+ },
+ .probe = ad774x_probe,
+ .remove = __devexit_p(ad774x_remove),
+ .id_table = ad774x_id,
+};
+
+static __init int ad774x_init(void)
+{
+ return i2c_add_driver(&ad774x_driver);
+}
+
+static __exit void ad774x_exit(void)
+{
+ i2c_del_driver(&ad774x_driver);
+}
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ad7745/6/7 capacitive sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad774x_init);
+module_exit(ad774x_exit);
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
new file mode 100644
index 000000000000..ad7415a6b8d9
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -0,0 +1,535 @@
+/*
+ * AD7816 digital temperature sensor driver supporting AD7816/7/8
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * AD7816 config masks
+ */
+#define AD7816_FULL 0x1
+#define AD7816_PD 0x2
+#define AD7816_CS_MASK 0x7
+#define AD7816_CS_MAX 0x4
+
+/*
+ * AD7816 temperature masks
+ */
+#define AD7816_VALUE_OFFSET 6
+#define AD7816_BOUND_VALUE_BASE 0x8
+#define AD7816_BOUND_VALUE_MIN -95
+#define AD7816_BOUND_VALUE_MAX 152
+#define AD7816_TEMP_FLOAT_OFFSET 2
+#define AD7816_TEMP_FLOAT_MASK 0x3
+
+
+/*
+ * struct ad7816_chip_info - chip specifc information
+ */
+
+struct ad7816_chip_info {
+ const char *name;
+ struct spi_device *spi_dev;
+ struct iio_dev *indio_dev;
+ struct work_struct thresh_work;
+ s64 last_timestamp;
+ u16 rdwr_pin;
+ u16 convert_pin;
+ u16 busy_pin;
+ u8 oti_data[AD7816_CS_MAX+1];
+ u8 channel_id; /* 0 always be temperature */
+ u8 mode;
+};
+
+/*
+ * ad7816 data access by SPI
+ */
+static int ad7816_spi_read(struct ad7816_chip_info *chip, u16 *data)
+{
+ struct spi_device *spi_dev = chip->spi_dev;
+ int ret = 0;
+
+ gpio_set_value(chip->rdwr_pin, 1);
+ gpio_set_value(chip->rdwr_pin, 0);
+ ret = spi_write(spi_dev, &chip->channel_id, sizeof(chip->channel_id));
+ if (ret < 0) {
+ dev_err(&spi_dev->dev, "SPI channel setting error\n");
+ return ret;
+ }
+ gpio_set_value(chip->rdwr_pin, 1);
+
+
+ if (chip->mode == AD7816_PD) { /* operating mode 2 */
+ gpio_set_value(chip->convert_pin, 1);
+ gpio_set_value(chip->convert_pin, 0);
+ } else { /* operating mode 1 */
+ gpio_set_value(chip->convert_pin, 0);
+ gpio_set_value(chip->convert_pin, 1);
+ }
+
+ while (gpio_get_value(chip->busy_pin))
+ cpu_relax();
+
+ gpio_set_value(chip->rdwr_pin, 0);
+ gpio_set_value(chip->rdwr_pin, 1);
+ ret = spi_read(spi_dev, (u8 *)data, sizeof(*data));
+ if (ret < 0) {
+ dev_err(&spi_dev->dev, "SPI data read error\n");
+ return ret;
+ }
+
+ *data = be16_to_cpu(*data);
+
+ return ret;
+}
+
+static int ad7816_spi_write(struct ad7816_chip_info *chip, u8 data)
+{
+ struct spi_device *spi_dev = chip->spi_dev;
+ int ret = 0;
+
+ gpio_set_value(chip->rdwr_pin, 1);
+ gpio_set_value(chip->rdwr_pin, 0);
+ ret = spi_write(spi_dev, &data, sizeof(data));
+ if (ret < 0)
+ dev_err(&spi_dev->dev, "SPI oti data write error\n");
+
+ return ret;
+}
+
+static ssize_t ad7816_show_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7816_chip_info *chip = dev_info->dev_data;
+
+ if (chip->mode)
+ return sprintf(buf, "power-save\n");
+ else
+ return sprintf(buf, "full\n");
+}
+
+static ssize_t ad7816_store_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7816_chip_info *chip = dev_info->dev_data;
+
+ if (strcmp(buf, "full")) {
+ gpio_set_value(chip->rdwr_pin, 1);
+ chip->mode = AD7816_FULL;
+ } else {
+ gpio_set_value(chip->rdwr_pin, 0);
+ chip->mode = AD7816_PD;
+ }
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+ ad7816_show_mode,
+ ad7816_store_mode,
+ 0);
+
+static ssize_t ad7816_show_available_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "full\npower-save\n");
+}
+
+static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7816_show_available_modes, NULL, 0);
+
+static ssize_t ad7816_show_channel(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7816_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", chip->channel_id);
+}
+
+static ssize_t ad7816_store_channel(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7816_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+ if (ret)
+ return -EINVAL;
+
+ if (data > AD7816_CS_MAX && data != AD7816_CS_MASK) {
+ dev_err(&chip->spi_dev->dev, "Invalid channel id %lu for %s.\n",
+ data, chip->name);
+ return -EINVAL;
+ } else if (strcmp(chip->name, "ad7818") == 0 && data > 1) {
+ dev_err(&chip->spi_dev->dev,
+ "Invalid channel id %lu for ad7818.\n", data);
+ return -EINVAL;
+ } else if (strcmp(chip->name, "ad7816") == 0 && data > 0) {
+ dev_err(&chip->spi_dev->dev,
+ "Invalid channel id %lu for ad7816.\n", data);
+ return -EINVAL;
+ }
+
+ chip->channel_id = data;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(channel, S_IRUGO | S_IWUSR,
+ ad7816_show_channel,
+ ad7816_store_channel,
+ 0);
+
+
+static ssize_t ad7816_show_value(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7816_chip_info *chip = dev_info->dev_data;
+ u16 data;
+ s8 value;
+ int ret;
+
+ ret = ad7816_spi_read(chip, &data);
+ if (ret)
+ return -EIO;
+
+ data >>= AD7816_VALUE_OFFSET;
+
+ if (chip->channel_id == 0) {
+ value = (s8)((data >> AD7816_TEMP_FLOAT_OFFSET) - 103);
+ data &= AD7816_TEMP_FLOAT_MASK;
+ if (value < 0)
+ data = (1 << AD7816_TEMP_FLOAT_OFFSET) - data;
+ return sprintf(buf, "%d.%.2d\n", value, data * 25);
+ } else
+ return sprintf(buf, "%u\n", data);
+}
+
+static IIO_DEVICE_ATTR(value, S_IRUGO, ad7816_show_value, NULL, 0);
+
+static ssize_t ad7816_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7816_chip_info *chip = dev_info->dev_data;
+ return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad7816_show_name, NULL, 0);
+
+static struct attribute *ad7816_attributes[] = {
+ &iio_dev_attr_available_modes.dev_attr.attr,
+ &iio_dev_attr_mode.dev_attr.attr,
+ &iio_dev_attr_channel.dev_attr.attr,
+ &iio_dev_attr_value.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad7816_attribute_group = {
+ .attrs = ad7816_attributes,
+};
+
+/*
+ * temperature bound events
+ */
+
+#define IIO_EVENT_CODE_AD7816_OTI IIO_BUFFER_EVENT_CODE(0)
+
+static void ad7816_interrupt_bh(struct work_struct *work_s)
+{
+ struct ad7816_chip_info *chip =
+ container_of(work_s, struct ad7816_chip_info, thresh_work);
+
+ enable_irq(chip->spi_dev->irq);
+
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_AD7816_OTI,
+ chip->last_timestamp);
+}
+
+static int ad7816_interrupt(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct ad7816_chip_info *chip = dev_info->dev_data;
+
+ chip->last_timestamp = timestamp;
+ schedule_work(&chip->thresh_work);
+
+ return 0;
+}
+
+IIO_EVENT_SH(ad7816, &ad7816_interrupt);
+
+static ssize_t ad7816_show_oti(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7816_chip_info *chip = dev_info->dev_data;
+ int value;
+
+ if (chip->channel_id > AD7816_CS_MAX) {
+ dev_err(dev, "Invalid oti channel id %d.\n", chip->channel_id);
+ return -EINVAL;
+ } else if (chip->channel_id == 0) {
+ value = AD7816_BOUND_VALUE_MIN +
+ (chip->oti_data[chip->channel_id] -
+ AD7816_BOUND_VALUE_BASE);
+ return sprintf(buf, "%d\n", value);
+ } else
+ return sprintf(buf, "%u\n", chip->oti_data[chip->channel_id]);
+}
+
+static inline ssize_t ad7816_set_oti(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7816_chip_info *chip = dev_info->dev_data;
+ long value;
+ u8 data;
+ int ret;
+
+ ret = strict_strtol(buf, 10, &value);
+
+ if (chip->channel_id > AD7816_CS_MAX) {
+ dev_err(dev, "Invalid oti channel id %d.\n", chip->channel_id);
+ return -EINVAL;
+ } else if (chip->channel_id == 0) {
+ if (ret || value < AD7816_BOUND_VALUE_MIN ||
+ value > AD7816_BOUND_VALUE_MAX)
+ return -EINVAL;
+
+ data = (u8)(value - AD7816_BOUND_VALUE_MIN +
+ AD7816_BOUND_VALUE_BASE);
+ } else {
+ if (ret || value < AD7816_BOUND_VALUE_BASE || value > 255)
+ return -EINVAL;
+
+ data = (u8)value;
+ }
+
+ ret = ad7816_spi_write(chip, data);
+ if (ret)
+ return -EIO;
+
+ chip->oti_data[chip->channel_id] = data;
+
+ return len;
+}
+
+IIO_EVENT_ATTR_SH(oti, iio_event_ad7816,
+ ad7816_show_oti, ad7816_set_oti, 0);
+
+static struct attribute *ad7816_event_attributes[] = {
+ &iio_event_attr_oti.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ad7816_event_attribute_group = {
+ .attrs = ad7816_event_attributes,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit ad7816_probe(struct spi_device *spi_dev)
+{
+ struct ad7816_chip_info *chip;
+ unsigned short *pins = spi_dev->dev.platform_data;
+ int ret = 0;
+ int i;
+
+ if (!pins) {
+ dev_err(&spi_dev->dev, "No necessary GPIO platform data.\n");
+ return -EINVAL;
+ }
+
+ chip = kzalloc(sizeof(struct ad7816_chip_info), GFP_KERNEL);
+
+ if (chip == NULL)
+ return -ENOMEM;
+
+ /* this is only used for device removal purposes */
+ dev_set_drvdata(&spi_dev->dev, chip);
+
+ chip->spi_dev = spi_dev;
+ chip->name = spi_dev->modalias;
+ for (i = 0; i <= AD7816_CS_MAX; i++)
+ chip->oti_data[i] = 203;
+ chip->rdwr_pin = pins[0];
+ chip->convert_pin = pins[1];
+ chip->busy_pin = pins[2];
+
+ ret = gpio_request(chip->rdwr_pin, chip->name);
+ if (ret) {
+ dev_err(&spi_dev->dev, "Fail to request rdwr gpio PIN %d.\n",
+ chip->rdwr_pin);
+ goto error_free_chip;
+ }
+ gpio_direction_input(chip->rdwr_pin);
+ ret = gpio_request(chip->convert_pin, chip->name);
+ if (ret) {
+ dev_err(&spi_dev->dev, "Fail to request convert gpio PIN %d.\n",
+ chip->convert_pin);
+ goto error_free_gpio_rdwr;
+ }
+ gpio_direction_input(chip->convert_pin);
+ ret = gpio_request(chip->busy_pin, chip->name);
+ if (ret) {
+ dev_err(&spi_dev->dev, "Fail to request busy gpio PIN %d.\n",
+ chip->busy_pin);
+ goto error_free_gpio_convert;
+ }
+ gpio_direction_input(chip->busy_pin);
+
+ chip->indio_dev = iio_allocate_device();
+ if (chip->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_gpio;
+ }
+
+ chip->indio_dev->dev.parent = &spi_dev->dev;
+ chip->indio_dev->attrs = &ad7816_attribute_group;
+ chip->indio_dev->event_attrs = &ad7816_event_attribute_group;
+ chip->indio_dev->dev_data = (void *)chip;
+ chip->indio_dev->driver_module = THIS_MODULE;
+ chip->indio_dev->num_interrupt_lines = 1;
+ chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(chip->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ if (spi_dev->irq) {
+ /* Only low trigger is supported in ad7816/7/8 */
+ ret = iio_register_interrupt_line(spi_dev->irq,
+ chip->indio_dev,
+ 0,
+ IRQF_TRIGGER_LOW,
+ chip->name);
+ if (ret)
+ goto error_unreg_dev;
+
+ /*
+ * The event handler list element refer to iio_event_ad7816.
+ * All event attributes bind to the same event handler.
+ * So, only register event handler once.
+ */
+ iio_add_event_to_list(&iio_event_ad7816,
+ &chip->indio_dev->interrupts[0]->ev_list);
+
+ INIT_WORK(&chip->thresh_work, ad7816_interrupt_bh);
+ }
+
+ dev_info(&spi_dev->dev, "%s temperature sensor and ADC registered.\n",
+ chip->name);
+
+ return 0;
+
+error_unreg_dev:
+ iio_device_unregister(chip->indio_dev);
+error_free_dev:
+ iio_free_device(chip->indio_dev);
+error_free_gpio:
+ gpio_free(chip->busy_pin);
+error_free_gpio_convert:
+ gpio_free(chip->convert_pin);
+error_free_gpio_rdwr:
+ gpio_free(chip->rdwr_pin);
+error_free_chip:
+ kfree(chip);
+
+ return ret;
+}
+
+static int __devexit ad7816_remove(struct spi_device *spi_dev)
+{
+ struct ad7816_chip_info *chip = dev_get_drvdata(&spi_dev->dev);
+ struct iio_dev *indio_dev = chip->indio_dev;
+
+ dev_set_drvdata(&spi_dev->dev, NULL);
+ if (spi_dev->irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+ iio_device_unregister(indio_dev);
+ iio_free_device(chip->indio_dev);
+ gpio_free(chip->busy_pin);
+ gpio_free(chip->convert_pin);
+ gpio_free(chip->rdwr_pin);
+ kfree(chip);
+
+ return 0;
+}
+
+static const struct spi_device_id ad7816_id[] = {
+ { "ad7816", 0 },
+ { "ad7817", 0 },
+ { "ad7818", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(spi, ad7816_id);
+
+static struct spi_driver ad7816_driver = {
+ .driver = {
+ .name = "ad7816",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7816_probe,
+ .remove = __devexit_p(ad7816_remove),
+ .id_table = ad7816_id,
+};
+
+static __init int ad7816_init(void)
+{
+ return spi_register_driver(&ad7816_driver);
+}
+
+static __exit void ad7816_exit(void)
+{
+ spi_unregister_driver(&ad7816_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7816/7/8 digital"
+ " temperature sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad7816_init);
+module_exit(ad7816_exit);
diff --git a/drivers/staging/iio/adc/ad7887.h b/drivers/staging/iio/adc/ad7887.h
new file mode 100644
index 000000000000..8c2a218c9496
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7887.h
@@ -0,0 +1,105 @@
+/*
+ * AD7887 SPI ADC driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+#ifndef IIO_ADC_AD7887_H_
+#define IIO_ADC_AD7887_H_
+
+#define AD7887_REF_DIS (1 << 5) /* on-chip reference disable */
+#define AD7887_DUAL (1 << 4) /* dual-channel mode */
+#define AD7887_CH_AIN1 (1 << 3) /* convert on channel 1, DUAL=1 */
+#define AD7887_CH_AIN0 (0 << 3) /* convert on channel 0, DUAL=0,1 */
+#define AD7887_PM_MODE1 (0) /* CS based shutdown */
+#define AD7887_PM_MODE2 (1) /* full on */
+#define AD7887_PM_MODE3 (2) /* auto shutdown after conversion */
+#define AD7887_PM_MODE4 (3) /* standby mode */
+
+enum ad7887_channels {
+ AD7887_CH0,
+ AD7887_CH0_CH1,
+ AD7887_CH1,
+};
+
+#define RES_MASK(bits) ((1 << (bits)) - 1) /* TODO: move this into a common header */
+
+/*
+ * TODO: struct ad7887_platform_data needs to go into include/linux/iio
+ */
+
+struct ad7887_platform_data {
+ /* External Vref voltage applied */
+ u16 vref_mv;
+ /*
+ * AD7887:
+ * In single channel mode en_dual = flase, AIN1/Vref pins assumes its
+ * Vref function. In dual channel mode en_dual = true, AIN1 becomes the
+ * second input channel, and Vref is internally connected to Vdd.
+ */
+ bool en_dual;
+ /*
+ * AD7887:
+ * use_onchip_ref = true, the Vref is internally connected to the 2.500V
+ * Voltage reference. If use_onchip_ref = false, the reference voltage
+ * is supplied by AIN1/Vref
+ */
+ bool use_onchip_ref;
+};
+
+struct ad7887_chip_info {
+ u8 bits; /* number of ADC bits */
+ u8 storagebits; /* number of bits read from the ADC */
+ u8 left_shift; /* number of bits the sample must be shifted */
+ char sign; /* [s]igned or [u]nsigned */
+ u16 int_vref_mv; /* internal reference voltage */
+};
+
+struct ad7887_state {
+ struct iio_dev *indio_dev;
+ struct spi_device *spi;
+ const struct ad7887_chip_info *chip_info;
+ struct regulator *reg;
+ struct work_struct poll_work;
+ atomic_t protect_ring;
+ u16 int_vref_mv;
+ bool en_dual;
+ struct spi_transfer xfer[4];
+ struct spi_message msg[3];
+ struct spi_message *ring_msg;
+ unsigned char tx_cmd_buf[8];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+
+ unsigned char data[4] ____cacheline_aligned;
+};
+
+enum ad7887_supported_device_ids {
+ ID_AD7887
+};
+
+#ifdef CONFIG_IIO_RING_BUFFER
+int ad7887_scan_from_ring(struct ad7887_state *st, long mask);
+int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev);
+void ad7887_ring_cleanup(struct iio_dev *indio_dev);
+#else /* CONFIG_IIO_RING_BUFFER */
+static inline int ad7887_scan_from_ring(struct ad7887_state *st, long mask)
+{
+ return 0;
+}
+
+static inline int
+ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void ad7887_ring_cleanup(struct iio_dev *indio_dev)
+{
+}
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* IIO_ADC_AD7887_H_ */
diff --git a/drivers/staging/iio/adc/ad7887_core.c b/drivers/staging/iio/adc/ad7887_core.c
new file mode 100644
index 000000000000..5d85efab658c
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7887_core.c
@@ -0,0 +1,305 @@
+/*
+ * AD7887 SPI ADC driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_generic.h"
+#include "adc.h"
+
+#include "ad7887.h"
+
+static int ad7887_scan_direct(struct ad7887_state *st, unsigned ch)
+{
+ int ret = spi_sync(st->spi, &st->msg[ch]);
+ if (ret)
+ return ret;
+
+ return (st->data[(ch * 2)] << 8) | st->data[(ch * 2) + 1];
+}
+
+static ssize_t ad7887_scan(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7887_state *st = dev_info->dev_data;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+
+ mutex_lock(&dev_info->mlock);
+ if (iio_ring_enabled(dev_info))
+ ret = ad7887_scan_from_ring(st, 1 << this_attr->address);
+ else
+ ret = ad7887_scan_direct(st, this_attr->address);
+ mutex_unlock(&dev_info->mlock);
+
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", (ret >> st->chip_info->left_shift) &
+ RES_MASK(st->chip_info->bits));
+}
+static IIO_DEV_ATTR_IN_RAW(0, ad7887_scan, 0);
+static IIO_DEV_ATTR_IN_RAW(1, ad7887_scan, 1);
+
+static ssize_t ad7887_show_scale(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ /* Driver currently only support internal vref */
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7887_state *st = iio_dev_get_devdata(dev_info);
+ /* Corresponds to Vref / 2^(bits) */
+ unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
+
+ return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
+}
+static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad7887_show_scale, NULL, 0);
+
+static ssize_t ad7887_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7887_state *st = iio_dev_get_devdata(dev_info);
+
+ return sprintf(buf, "%s\n", spi_get_device_id(st->spi)->name);
+}
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad7887_show_name, NULL, 0);
+
+static struct attribute *ad7887_attributes[] = {
+ &iio_dev_attr_in0_raw.dev_attr.attr,
+ &iio_dev_attr_in1_raw.dev_attr.attr,
+ &iio_dev_attr_in_scale.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ NULL,
+};
+
+static mode_t ad7887_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad7887_state *st = iio_dev_get_devdata(dev_info);
+
+ mode_t mode = attr->mode;
+
+ if ((attr == &iio_dev_attr_in1_raw.dev_attr.attr) && !st->en_dual)
+ mode = 0;
+
+ return mode;
+}
+
+static const struct attribute_group ad7887_attribute_group = {
+ .attrs = ad7887_attributes,
+ .is_visible = ad7887_attr_is_visible,
+};
+
+static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
+ /*
+ * More devices added in future
+ */
+ [ID_AD7887] = {
+ .bits = 12,
+ .storagebits = 16,
+ .left_shift = 0,
+ .sign = IIO_SCAN_EL_TYPE_UNSIGNED,
+ .int_vref_mv = 2500,
+ },
+};
+
+static int __devinit ad7887_probe(struct spi_device *spi)
+{
+ struct ad7887_platform_data *pdata = spi->dev.platform_data;
+ struct ad7887_state *st;
+ int ret, voltage_uv = 0;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ st->reg = regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ goto error_put_reg;
+
+ voltage_uv = regulator_get_voltage(st->reg);
+ }
+
+ st->chip_info =
+ &ad7887_chip_info_tbl[spi_get_device_id(spi)->driver_data];
+
+ spi_set_drvdata(spi, st);
+
+ atomic_set(&st->protect_ring, 0);
+ st->spi = spi;
+
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_disable_reg;
+ }
+
+ /* Estabilish that the iio_dev is a child of the spi device */
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->attrs = &ad7887_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ /* Setup default message */
+
+ st->tx_cmd_buf[0] = AD7887_CH_AIN0 | AD7887_PM_MODE4 |
+ ((pdata && pdata->use_onchip_ref) ?
+ 0 : AD7887_REF_DIS);
+
+ st->xfer[0].rx_buf = &st->data[0];
+ st->xfer[0].tx_buf = &st->tx_cmd_buf[0];
+ st->xfer[0].len = 2;
+
+ spi_message_init(&st->msg[AD7887_CH0]);
+ spi_message_add_tail(&st->xfer[0], &st->msg[AD7887_CH0]);
+
+ if (pdata && pdata->en_dual) {
+ st->tx_cmd_buf[0] |= AD7887_DUAL | AD7887_REF_DIS;
+
+ st->tx_cmd_buf[2] = AD7887_CH_AIN1 | AD7887_DUAL |
+ AD7887_REF_DIS | AD7887_PM_MODE4;
+ st->tx_cmd_buf[4] = AD7887_CH_AIN0 | AD7887_DUAL |
+ AD7887_REF_DIS | AD7887_PM_MODE4;
+ st->tx_cmd_buf[6] = AD7887_CH_AIN1 | AD7887_DUAL |
+ AD7887_REF_DIS | AD7887_PM_MODE4;
+
+ st->xfer[1].rx_buf = &st->data[0];
+ st->xfer[1].tx_buf = &st->tx_cmd_buf[2];
+ st->xfer[1].len = 2;
+
+ st->xfer[2].rx_buf = &st->data[2];
+ st->xfer[2].tx_buf = &st->tx_cmd_buf[4];
+ st->xfer[2].len = 2;
+
+ spi_message_init(&st->msg[AD7887_CH0_CH1]);
+ spi_message_add_tail(&st->xfer[1], &st->msg[AD7887_CH0_CH1]);
+ spi_message_add_tail(&st->xfer[2], &st->msg[AD7887_CH0_CH1]);
+
+ st->xfer[3].rx_buf = &st->data[0];
+ st->xfer[3].tx_buf = &st->tx_cmd_buf[6];
+ st->xfer[3].len = 2;
+
+ spi_message_init(&st->msg[AD7887_CH1]);
+ spi_message_add_tail(&st->xfer[3], &st->msg[AD7887_CH1]);
+
+ st->en_dual = true;
+
+ if (pdata && pdata->vref_mv)
+ st->int_vref_mv = pdata->vref_mv;
+ else if (voltage_uv)
+ st->int_vref_mv = voltage_uv / 1000;
+ else
+ dev_warn(&spi->dev, "reference voltage unspecified\n");
+
+ } else {
+ if (pdata && pdata->vref_mv)
+ st->int_vref_mv = pdata->vref_mv;
+ else if (pdata && pdata->use_onchip_ref)
+ st->int_vref_mv = st->chip_info->int_vref_mv;
+ else
+ dev_warn(&spi->dev, "reference voltage unspecified\n");
+ }
+
+
+ ret = ad7887_register_ring_funcs_and_init(st->indio_dev);
+ if (ret)
+ goto error_free_device;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_free_device;
+
+ ret = iio_ring_buffer_register(st->indio_dev->ring, 0);
+ if (ret)
+ goto error_cleanup_ring;
+ return 0;
+
+error_cleanup_ring:
+ ad7887_ring_cleanup(st->indio_dev);
+ iio_device_unregister(st->indio_dev);
+error_free_device:
+ iio_free_device(st->indio_dev);
+error_disable_reg:
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+error_put_reg:
+ if (!IS_ERR(st->reg))
+ regulator_put(st->reg);
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int ad7887_remove(struct spi_device *spi)
+{
+ struct ad7887_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+ iio_ring_buffer_unregister(indio_dev->ring);
+ ad7887_ring_cleanup(indio_dev);
+ iio_device_unregister(indio_dev);
+ if (!IS_ERR(st->reg)) {
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
+ }
+ kfree(st);
+ return 0;
+}
+
+static const struct spi_device_id ad7887_id[] = {
+ {"ad7887", ID_AD7887},
+ {}
+};
+
+static struct spi_driver ad7887_driver = {
+ .driver = {
+ .name = "ad7887",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7887_probe,
+ .remove = __devexit_p(ad7887_remove),
+ .id_table = ad7887_id,
+};
+
+static int __init ad7887_init(void)
+{
+ return spi_register_driver(&ad7887_driver);
+}
+module_init(ad7887_init);
+
+static void __exit ad7887_exit(void)
+{
+ spi_unregister_driver(&ad7887_driver);
+}
+module_exit(ad7887_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD7887 ADC");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ad7887");
diff --git a/drivers/staging/iio/adc/ad7887_ring.c b/drivers/staging/iio/adc/ad7887_ring.c
new file mode 100644
index 000000000000..6b9cb1f95a1e
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7887_ring.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2010 Analog Devices Inc.
+ * Copyright (C) 2008 Jonathan Cameron
+ *
+ * Licensed under the GPL-2 or later.
+ *
+ * ad7887_ring.c
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+
+#include "../iio.h"
+#include "../ring_generic.h"
+#include "../ring_sw.h"
+#include "../trigger.h"
+#include "../sysfs.h"
+
+#include "ad7887.h"
+
+static IIO_SCAN_EL_C(in0, 0, 0, NULL);
+static IIO_SCAN_EL_C(in1, 1, 0, NULL);
+
+static ssize_t ad7887_show_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_ring_buffer *ring = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = ring->indio_dev;
+ struct ad7887_state *st = indio_dev->dev_data;
+
+ return sprintf(buf, "%c%d/%d>>%d\n", st->chip_info->sign,
+ st->chip_info->bits, st->chip_info->storagebits,
+ st->chip_info->left_shift);
+}
+static IIO_DEVICE_ATTR(in_type, S_IRUGO, ad7887_show_type, NULL, 0);
+
+static struct attribute *ad7887_scan_el_attrs[] = {
+ &iio_scan_el_in0.dev_attr.attr,
+ &iio_const_attr_in0_index.dev_attr.attr,
+ &iio_scan_el_in1.dev_attr.attr,
+ &iio_const_attr_in1_index.dev_attr.attr,
+ &iio_dev_attr_in_type.dev_attr.attr,
+ NULL,
+};
+
+static mode_t ad7887_scan_el_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iio_ring_buffer *ring = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = ring->indio_dev;
+ struct ad7887_state *st = indio_dev->dev_data;
+
+ mode_t mode = attr->mode;
+
+ if ((attr == &iio_scan_el_in1.dev_attr.attr) ||
+ (attr == &iio_const_attr_in1_index.dev_attr.attr))
+ if (!st->en_dual)
+ mode = 0;
+
+ return mode;
+}
+
+static struct attribute_group ad7887_scan_el_group = {
+ .name = "scan_elements",
+ .attrs = ad7887_scan_el_attrs,
+ .is_visible = ad7887_scan_el_attr_is_visible,
+};
+
+int ad7887_scan_from_ring(struct ad7887_state *st, long mask)
+{
+ struct iio_ring_buffer *ring = st->indio_dev->ring;
+ int count = 0, ret;
+ u16 *ring_data;
+
+ if (!(ring->scan_mask & mask)) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+
+ ring_data = kmalloc(ring->access.get_bytes_per_datum(ring), GFP_KERNEL);
+ if (ring_data == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ ret = ring->access.read_last(ring, (u8 *) ring_data);
+ if (ret)
+ goto error_free_ring_data;
+
+ /* for single channel scan the result is stored with zero offset */
+ if ((ring->scan_mask == ((1 << 1) | (1 << 0))) && (mask == (1 << 1)))
+ count = 1;
+
+ ret = be16_to_cpu(ring_data[count]);
+
+error_free_ring_data:
+ kfree(ring_data);
+error_ret:
+ return ret;
+}
+
+/**
+ * ad7887_ring_preenable() setup the parameters of the ring before enabling
+ *
+ * The complex nature of the setting of the nuber of bytes per datum is due
+ * to this driver currently ensuring that the timestamp is stored at an 8
+ * byte boundary.
+ **/
+static int ad7887_ring_preenable(struct iio_dev *indio_dev)
+{
+ struct ad7887_state *st = indio_dev->dev_data;
+ struct iio_ring_buffer *ring = indio_dev->ring;
+ size_t d_size;
+
+ if (indio_dev->ring->access.set_bytes_per_datum) {
+ d_size = st->chip_info->storagebits / 8 + sizeof(s64);
+ if (d_size % 8)
+ d_size += 8 - (d_size % 8);
+ indio_dev->ring->access.set_bytes_per_datum(indio_dev->ring,
+ d_size);
+ }
+
+ switch (ring->scan_mask) {
+ case (1 << 0):
+ st->ring_msg = &st->msg[AD7887_CH0];
+ break;
+ case (1 << 1):
+ st->ring_msg = &st->msg[AD7887_CH1];
+ /* Dummy read: push CH1 setting down to hardware */
+ spi_sync(st->spi, st->ring_msg);
+ break;
+ case ((1 << 1) | (1 << 0)):
+ st->ring_msg = &st->msg[AD7887_CH0_CH1];
+ break;
+ }
+
+ return 0;
+}
+
+static int ad7887_ring_postdisable(struct iio_dev *indio_dev)
+{
+ struct ad7887_state *st = indio_dev->dev_data;
+
+ /* dummy read: restore default CH0 settin */
+ return spi_sync(st->spi, &st->msg[AD7887_CH0]);
+}
+
+/**
+ * ad7887_poll_func_th() th of trigger launched polling to ring buffer
+ *
+ * As sampling only occurs on spi comms occuring, leave timestamping until
+ * then. Some triggers will generate their own time stamp. Currently
+ * there is no way of notifying them when no one cares.
+ **/
+static void ad7887_poll_func_th(struct iio_dev *indio_dev, s64 time)
+{
+ struct ad7887_state *st = indio_dev->dev_data;
+
+ schedule_work(&st->poll_work);
+ return;
+}
+/**
+ * ad7887_poll_bh_to_ring() bh of trigger launched polling to ring buffer
+ * @work_s: the work struct through which this was scheduled
+ *
+ * Currently there is no option in this driver to disable the saving of
+ * timestamps within the ring.
+ * I think the one copy of this at a time was to avoid problems if the
+ * trigger was set far too high and the reads then locked up the computer.
+ **/
+static void ad7887_poll_bh_to_ring(struct work_struct *work_s)
+{
+ struct ad7887_state *st = container_of(work_s, struct ad7887_state,
+ poll_work);
+ struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_sw_ring_buffer *sw_ring = iio_to_sw_ring(indio_dev->ring);
+ struct iio_ring_buffer *ring = indio_dev->ring;
+ s64 time_ns;
+ __u8 *buf;
+ int b_sent;
+ size_t d_size;
+
+ unsigned int bytes = ring->scan_count * st->chip_info->storagebits / 8;
+
+ /* Ensure the timestamp is 8 byte aligned */
+ d_size = bytes + sizeof(s64);
+ if (d_size % sizeof(s64))
+ d_size += sizeof(s64) - (d_size % sizeof(s64));
+
+ /* Ensure only one copy of this function running at a time */
+ if (atomic_inc_return(&st->protect_ring) > 1)
+ return;
+
+ buf = kzalloc(d_size, GFP_KERNEL);
+ if (buf == NULL)
+ return;
+
+ b_sent = spi_sync(st->spi, st->ring_msg);
+ if (b_sent)
+ goto done;
+
+ time_ns = iio_get_time_ns();
+
+ memcpy(buf, st->data, bytes);
+ memcpy(buf + d_size - sizeof(s64), &time_ns, sizeof(time_ns));
+
+ indio_dev->ring->access.store_to(&sw_ring->buf, buf, time_ns);
+done:
+ kfree(buf);
+ atomic_dec(&st->protect_ring);
+}
+
+int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
+{
+ struct ad7887_state *st = indio_dev->dev_data;
+ int ret;
+
+ indio_dev->ring = iio_sw_rb_allocate(indio_dev);
+ if (!indio_dev->ring) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* Effectively select the ring buffer implementation */
+ iio_ring_sw_register_funcs(&indio_dev->ring->access);
+ ret = iio_alloc_pollfunc(indio_dev, NULL, &ad7887_poll_func_th);
+ if (ret)
+ goto error_deallocate_sw_rb;
+
+ /* Ring buffer functions - here trigger setup related */
+
+ indio_dev->ring->preenable = &ad7887_ring_preenable;
+ indio_dev->ring->postenable = &iio_triggered_ring_postenable;
+ indio_dev->ring->predisable = &iio_triggered_ring_predisable;
+ indio_dev->ring->postdisable = &ad7887_ring_postdisable;
+ indio_dev->ring->scan_el_attrs = &ad7887_scan_el_group;
+
+ INIT_WORK(&st->poll_work, &ad7887_poll_bh_to_ring);
+
+ /* Flag that polled ring buffering is possible */
+ indio_dev->modes |= INDIO_RING_TRIGGERED;
+ return 0;
+error_deallocate_sw_rb:
+ iio_sw_rb_free(indio_dev->ring);
+error_ret:
+ return ret;
+}
+
+void ad7887_ring_cleanup(struct iio_dev *indio_dev)
+{
+ /* ensure that the trigger has been detached */
+ if (indio_dev->trig) {
+ iio_put_trigger(indio_dev->trig);
+ iio_trigger_dettach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc);
+ }
+ kfree(indio_dev->pollfunc);
+ iio_sw_rb_free(indio_dev->ring);
+}
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 6309d521a864..89ccf375a188 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -432,7 +432,7 @@ static ssize_t ad799x_show_scale(struct device *dev,
/* Corresponds to Vref / 2^(bits) */
unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
- return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
+ return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
}
static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad799x_show_scale, NULL, 0);
diff --git a/drivers/staging/iio/adc/adt7310.c b/drivers/staging/iio/adc/adt7310.c
new file mode 100644
index 000000000000..771a409ee94c
--- /dev/null
+++ b/drivers/staging/iio/adc/adt7310.c
@@ -0,0 +1,952 @@
+/*
+ * ADT7310 digital temperature sensor driver supporting ADT7310
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * ADT7310 registers definition
+ */
+
+#define ADT7310_STATUS 0
+#define ADT7310_CONFIG 1
+#define ADT7310_TEMPERATURE 2
+#define ADT7310_ID 3
+#define ADT7310_T_CRIT 4
+#define ADT7310_T_HYST 5
+#define ADT7310_T_ALARM_HIGH 6
+#define ADT7310_T_ALARM_LOW 7
+
+/*
+ * ADT7310 status
+ */
+#define ADT7310_STAT_T_LOW 0x10
+#define ADT7310_STAT_T_HIGH 0x20
+#define ADT7310_STAT_T_CRIT 0x40
+#define ADT7310_STAT_NOT_RDY 0x80
+
+/*
+ * ADT7310 config
+ */
+#define ADT7310_FAULT_QUEUE_MASK 0x3
+#define ADT7310_CT_POLARITY 0x4
+#define ADT7310_INT_POLARITY 0x8
+#define ADT7310_EVENT_MODE 0x10
+#define ADT7310_MODE_MASK 0x60
+#define ADT7310_ONESHOT 0x20
+#define ADT7310_SPS 0x40
+#define ADT7310_PD 0x60
+#define ADT7310_RESOLUTION 0x80
+
+/*
+ * ADT7310 masks
+ */
+#define ADT7310_T16_VALUE_SIGN 0x8000
+#define ADT7310_T16_VALUE_FLOAT_OFFSET 7
+#define ADT7310_T16_VALUE_FLOAT_MASK 0x7F
+#define ADT7310_T13_VALUE_SIGN 0x1000
+#define ADT7310_T13_VALUE_OFFSET 3
+#define ADT7310_T13_VALUE_FLOAT_OFFSET 4
+#define ADT7310_T13_VALUE_FLOAT_MASK 0xF
+#define ADT7310_T_HYST_MASK 0xF
+#define ADT7310_DEVICE_ID_MASK 0x7
+#define ADT7310_MANUFACTORY_ID_MASK 0xF8
+#define ADT7310_MANUFACTORY_ID_OFFSET 3
+
+
+#define ADT7310_CMD_REG_MASK 0x28
+#define ADT7310_CMD_REG_OFFSET 3
+#define ADT7310_CMD_READ 0x40
+#define ADT7310_CMD_CON_READ 0x4
+
+#define ADT7310_IRQS 2
+
+/*
+ * struct adt7310_chip_info - chip specifc information
+ */
+
+struct adt7310_chip_info {
+ const char *name;
+ struct spi_device *spi_dev;
+ struct iio_dev *indio_dev;
+ struct work_struct thresh_work;
+ s64 last_timestamp;
+ u8 config;
+};
+
+/*
+ * adt7310 register access by SPI
+ */
+
+static int adt7310_spi_read_word(struct adt7310_chip_info *chip, u8 reg, u16 *data)
+{
+ struct spi_device *spi_dev = chip->spi_dev;
+ u8 command = (reg << ADT7310_CMD_REG_OFFSET) & ADT7310_CMD_REG_MASK;
+ int ret = 0;
+
+ command |= ADT7310_CMD_READ;
+ ret = spi_write(spi_dev, &command, sizeof(command));
+ if (ret < 0) {
+ dev_err(&spi_dev->dev, "SPI write command error\n");
+ return ret;
+ }
+
+ ret = spi_read(spi_dev, (u8 *)data, sizeof(*data));
+ if (ret < 0) {
+ dev_err(&spi_dev->dev, "SPI read word error\n");
+ return ret;
+ }
+
+ *data = be16_to_cpu(*data);
+
+ return 0;
+}
+
+static int adt7310_spi_write_word(struct adt7310_chip_info *chip, u8 reg, u16 data)
+{
+ struct spi_device *spi_dev = chip->spi_dev;
+ u8 buf[3];
+ int ret = 0;
+
+ buf[0] = (reg << ADT7310_CMD_REG_OFFSET) & ADT7310_CMD_REG_MASK;
+ buf[1] = (u8)(data >> 8);
+ buf[2] = (u8)(data & 0xFF);
+
+ ret = spi_write(spi_dev, buf, 3);
+ if (ret < 0) {
+ dev_err(&spi_dev->dev, "SPI write word error\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int adt7310_spi_read_byte(struct adt7310_chip_info *chip, u8 reg, u8 *data)
+{
+ struct spi_device *spi_dev = chip->spi_dev;
+ u8 command = (reg << ADT7310_CMD_REG_OFFSET) & ADT7310_CMD_REG_MASK;
+ int ret = 0;
+
+ command |= ADT7310_CMD_READ;
+ ret = spi_write(spi_dev, &command, sizeof(command));
+ if (ret < 0) {
+ dev_err(&spi_dev->dev, "SPI write command error\n");
+ return ret;
+ }
+
+ ret = spi_read(spi_dev, data, sizeof(*data));
+ if (ret < 0) {
+ dev_err(&spi_dev->dev, "SPI read byte error\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adt7310_spi_write_byte(struct adt7310_chip_info *chip, u8 reg, u8 data)
+{
+ struct spi_device *spi_dev = chip->spi_dev;
+ u8 buf[2];
+ int ret = 0;
+
+ buf[0] = (reg << ADT7310_CMD_REG_OFFSET) & ADT7310_CMD_REG_MASK;
+ buf[1] = data;
+
+ ret = spi_write(spi_dev, buf, 2);
+ if (ret < 0) {
+ dev_err(&spi_dev->dev, "SPI write byte error\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static ssize_t adt7310_show_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7310_chip_info *chip = dev_info->dev_data;
+ u8 config;
+
+ config = chip->config & ADT7310_MODE_MASK;
+
+ switch (config) {
+ case ADT7310_PD:
+ return sprintf(buf, "power-down\n");
+ case ADT7310_ONESHOT:
+ return sprintf(buf, "one-shot\n");
+ case ADT7310_SPS:
+ return sprintf(buf, "sps\n");
+ default:
+ return sprintf(buf, "full\n");
+ }
+}
+
+static ssize_t adt7310_store_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7310_chip_info *chip = dev_info->dev_data;
+ u16 config;
+ int ret;
+
+ ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ config = chip->config & (~ADT7310_MODE_MASK);
+ if (strcmp(buf, "power-down"))
+ config |= ADT7310_PD;
+ else if (strcmp(buf, "one-shot"))
+ config |= ADT7310_ONESHOT;
+ else if (strcmp(buf, "sps"))
+ config |= ADT7310_SPS;
+
+ ret = adt7310_spi_write_byte(chip, ADT7310_CONFIG, config);
+ if (ret)
+ return -EIO;
+
+ chip->config = config;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+ adt7310_show_mode,
+ adt7310_store_mode,
+ 0);
+
+static ssize_t adt7310_show_available_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "full\none-shot\nsps\npower-down\n");
+}
+
+static IIO_DEVICE_ATTR(available_modes, S_IRUGO, adt7310_show_available_modes, NULL, 0);
+
+static ssize_t adt7310_show_resolution(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7310_chip_info *chip = dev_info->dev_data;
+ int ret;
+ int bits;
+
+ ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ if (chip->config & ADT7310_RESOLUTION)
+ bits = 16;
+ else
+ bits = 13;
+
+ return sprintf(buf, "%d bits\n", bits);
+}
+
+static ssize_t adt7310_store_resolution(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7310_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ u16 config;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+ if (ret)
+ return -EINVAL;
+
+ ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ config = chip->config & (~ADT7310_RESOLUTION);
+ if (data)
+ config |= ADT7310_RESOLUTION;
+
+ ret = adt7310_spi_write_byte(chip, ADT7310_CONFIG, config);
+ if (ret)
+ return -EIO;
+
+ chip->config = config;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(resolution, S_IRUGO | S_IWUSR,
+ adt7310_show_resolution,
+ adt7310_store_resolution,
+ 0);
+
+static ssize_t adt7310_show_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7310_chip_info *chip = dev_info->dev_data;
+ u8 id;
+ int ret;
+
+ ret = adt7310_spi_read_byte(chip, ADT7310_ID, &id);
+ if (ret)
+ return -EIO;
+
+ return sprintf(buf, "device id: 0x%x\nmanufactory id: 0x%x\n",
+ id & ADT7310_DEVICE_ID_MASK,
+ (id & ADT7310_MANUFACTORY_ID_MASK) >> ADT7310_MANUFACTORY_ID_OFFSET);
+}
+
+static IIO_DEVICE_ATTR(id, S_IRUGO | S_IWUSR,
+ adt7310_show_id,
+ NULL,
+ 0);
+
+static ssize_t adt7310_convert_temperature(struct adt7310_chip_info *chip,
+ u16 data, char *buf)
+{
+ char sign = ' ';
+
+ if (chip->config & ADT7310_RESOLUTION) {
+ if (data & ADT7310_T16_VALUE_SIGN) {
+ /* convert supplement to positive value */
+ data = (u16)((ADT7310_T16_VALUE_SIGN << 1) - (u32)data);
+ sign = '-';
+ }
+ return sprintf(buf, "%c%d.%.7d\n", sign,
+ (data >> ADT7310_T16_VALUE_FLOAT_OFFSET),
+ (data & ADT7310_T16_VALUE_FLOAT_MASK) * 78125);
+ } else {
+ if (data & ADT7310_T13_VALUE_SIGN) {
+ /* convert supplement to positive value */
+ data >>= ADT7310_T13_VALUE_OFFSET;
+ data = (ADT7310_T13_VALUE_SIGN << 1) - data;
+ sign = '-';
+ }
+ return sprintf(buf, "%c%d.%.4d\n", sign,
+ (data >> ADT7310_T13_VALUE_FLOAT_OFFSET),
+ (data & ADT7310_T13_VALUE_FLOAT_MASK) * 625);
+ }
+}
+
+static ssize_t adt7310_show_value(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7310_chip_info *chip = dev_info->dev_data;
+ u8 status;
+ u16 data;
+ int ret, i = 0;
+
+ do {
+ ret = adt7310_spi_read_byte(chip, ADT7310_STATUS, &status);
+ if (ret)
+ return -EIO;
+ i++;
+ if (i == 10000)
+ return -EIO;
+ } while (status & ADT7310_STAT_NOT_RDY);
+
+ ret = adt7310_spi_read_word(chip, ADT7310_TEMPERATURE, &data);
+ if (ret)
+ return -EIO;
+
+ return adt7310_convert_temperature(chip, data, buf);
+}
+
+static IIO_DEVICE_ATTR(value, S_IRUGO, adt7310_show_value, NULL, 0);
+
+static ssize_t adt7310_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7310_chip_info *chip = dev_info->dev_data;
+ return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, adt7310_show_name, NULL, 0);
+
+static struct attribute *adt7310_attributes[] = {
+ &iio_dev_attr_available_modes.dev_attr.attr,
+ &iio_dev_attr_mode.dev_attr.attr,
+ &iio_dev_attr_resolution.dev_attr.attr,
+ &iio_dev_attr_id.dev_attr.attr,
+ &iio_dev_attr_value.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group adt7310_attribute_group = {
+ .attrs = adt7310_attributes,
+};
+
+/*
+ * temperature bound events
+ */
+
+#define IIO_EVENT_CODE_ADT7310_ABOVE_ALARM IIO_BUFFER_EVENT_CODE(0)
+#define IIO_EVENT_CODE_ADT7310_BELLOW_ALARM IIO_BUFFER_EVENT_CODE(1)
+#define IIO_EVENT_CODE_ADT7310_ABOVE_CRIT IIO_BUFFER_EVENT_CODE(2)
+
+static void adt7310_interrupt_bh(struct work_struct *work_s)
+{
+ struct adt7310_chip_info *chip =
+ container_of(work_s, struct adt7310_chip_info, thresh_work);
+ u8 status;
+
+ if (adt7310_spi_read_byte(chip, ADT7310_STATUS, &status))
+ return;
+
+ if (status & ADT7310_STAT_T_HIGH)
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_ADT7310_ABOVE_ALARM,
+ chip->last_timestamp);
+ if (status & ADT7310_STAT_T_LOW)
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_ADT7310_BELLOW_ALARM,
+ chip->last_timestamp);
+ if (status & ADT7310_STAT_T_CRIT)
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_ADT7310_ABOVE_CRIT,
+ chip->last_timestamp);
+}
+
+static int adt7310_interrupt(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct adt7310_chip_info *chip = dev_info->dev_data;
+
+ chip->last_timestamp = timestamp;
+ schedule_work(&chip->thresh_work);
+
+ return 0;
+}
+
+IIO_EVENT_SH(adt7310, &adt7310_interrupt);
+IIO_EVENT_SH(adt7310_ct, &adt7310_interrupt);
+
+static ssize_t adt7310_show_event_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7310_chip_info *chip = dev_info->dev_data;
+ int ret;
+
+ ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ if (chip->config & ADT7310_EVENT_MODE)
+ return sprintf(buf, "interrupt\n");
+ else
+ return sprintf(buf, "comparator\n");
+}
+
+static ssize_t adt7310_set_event_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7310_chip_info *chip = dev_info->dev_data;
+ u16 config;
+ int ret;
+
+ ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ config = chip->config &= ~ADT7310_EVENT_MODE;
+ if (strcmp(buf, "comparator") != 0)
+ config |= ADT7310_EVENT_MODE;
+
+ ret = adt7310_spi_write_byte(chip, ADT7310_CONFIG, config);
+ if (ret)
+ return -EIO;
+
+ chip->config = config;
+
+ return len;
+}
+
+static ssize_t adt7310_show_available_event_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "comparator\ninterrupt\n");
+}
+
+static ssize_t adt7310_show_fault_queue(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7310_chip_info *chip = dev_info->dev_data;
+ int ret;
+
+ ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ return sprintf(buf, "%d\n", chip->config & ADT7310_FAULT_QUEUE_MASK);
+}
+
+static ssize_t adt7310_set_fault_queue(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7310_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+ u8 config;
+
+ ret = strict_strtoul(buf, 10, &data);
+ if (ret || data > 3)
+ return -EINVAL;
+
+ ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ config = chip->config & ~ADT7310_FAULT_QUEUE_MASK;
+ config |= data;
+ ret = adt7310_spi_write_byte(chip, ADT7310_CONFIG, config);
+ if (ret)
+ return -EIO;
+
+ chip->config = config;
+
+ return len;
+}
+
+static inline ssize_t adt7310_show_t_bound(struct device *dev,
+ struct device_attribute *attr,
+ u8 bound_reg,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7310_chip_info *chip = dev_info->dev_data;
+ u16 data;
+ int ret;
+
+ ret = adt7310_spi_read_word(chip, bound_reg, &data);
+ if (ret)
+ return -EIO;
+
+ return adt7310_convert_temperature(chip, data, buf);
+}
+
+static inline ssize_t adt7310_set_t_bound(struct device *dev,
+ struct device_attribute *attr,
+ u8 bound_reg,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7310_chip_info *chip = dev_info->dev_data;
+ long tmp1, tmp2;
+ u16 data;
+ char *pos;
+ int ret;
+
+ pos = strchr(buf, '.');
+
+ ret = strict_strtol(buf, 10, &tmp1);
+
+ if (ret || tmp1 > 127 || tmp1 < -128)
+ return -EINVAL;
+
+ if (pos) {
+ len = strlen(pos);
+
+ if (chip->config & ADT7310_RESOLUTION) {
+ if (len > ADT7310_T16_VALUE_FLOAT_OFFSET)
+ len = ADT7310_T16_VALUE_FLOAT_OFFSET;
+ pos[len] = 0;
+ ret = strict_strtol(pos, 10, &tmp2);
+
+ if (!ret)
+ tmp2 = (tmp2 / 78125) * 78125;
+ } else {
+ if (len > ADT7310_T13_VALUE_FLOAT_OFFSET)
+ len = ADT7310_T13_VALUE_FLOAT_OFFSET;
+ pos[len] = 0;
+ ret = strict_strtol(pos, 10, &tmp2);
+
+ if (!ret)
+ tmp2 = (tmp2 / 625) * 625;
+ }
+ }
+
+ if (tmp1 < 0)
+ data = (u16)(-tmp1);
+ else
+ data = (u16)tmp1;
+
+ if (chip->config & ADT7310_RESOLUTION) {
+ data = (data << ADT7310_T16_VALUE_FLOAT_OFFSET) |
+ (tmp2 & ADT7310_T16_VALUE_FLOAT_MASK);
+
+ if (tmp1 < 0)
+ /* convert positive value to supplyment */
+ data = (u16)((ADT7310_T16_VALUE_SIGN << 1) - (u32)data);
+ } else {
+ data = (data << ADT7310_T13_VALUE_FLOAT_OFFSET) |
+ (tmp2 & ADT7310_T13_VALUE_FLOAT_MASK);
+
+ if (tmp1 < 0)
+ /* convert positive value to supplyment */
+ data = (ADT7310_T13_VALUE_SIGN << 1) - data;
+ data <<= ADT7310_T13_VALUE_OFFSET;
+ }
+
+ ret = adt7310_spi_write_word(chip, bound_reg, data);
+ if (ret)
+ return -EIO;
+
+ return len;
+}
+
+static ssize_t adt7310_show_t_alarm_high(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt7310_show_t_bound(dev, attr,
+ ADT7310_T_ALARM_HIGH, buf);
+}
+
+static inline ssize_t adt7310_set_t_alarm_high(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt7310_set_t_bound(dev, attr,
+ ADT7310_T_ALARM_HIGH, buf, len);
+}
+
+static ssize_t adt7310_show_t_alarm_low(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt7310_show_t_bound(dev, attr,
+ ADT7310_T_ALARM_LOW, buf);
+}
+
+static inline ssize_t adt7310_set_t_alarm_low(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt7310_set_t_bound(dev, attr,
+ ADT7310_T_ALARM_LOW, buf, len);
+}
+
+static ssize_t adt7310_show_t_crit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt7310_show_t_bound(dev, attr,
+ ADT7310_T_CRIT, buf);
+}
+
+static inline ssize_t adt7310_set_t_crit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt7310_set_t_bound(dev, attr,
+ ADT7310_T_CRIT, buf, len);
+}
+
+static ssize_t adt7310_show_t_hyst(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7310_chip_info *chip = dev_info->dev_data;
+ int ret;
+ u8 t_hyst;
+
+ ret = adt7310_spi_read_byte(chip, ADT7310_T_HYST, &t_hyst);
+ if (ret)
+ return -EIO;
+
+ return sprintf(buf, "%d\n", t_hyst & ADT7310_T_HYST_MASK);
+}
+
+static inline ssize_t adt7310_set_t_hyst(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7310_chip_info *chip = dev_info->dev_data;
+ int ret;
+ unsigned long data;
+ u8 t_hyst;
+
+ ret = strict_strtol(buf, 10, &data);
+
+ if (ret || data > ADT7310_T_HYST_MASK)
+ return -EINVAL;
+
+ t_hyst = (u8)data;
+
+ ret = adt7310_spi_write_byte(chip, ADT7310_T_HYST, t_hyst);
+ if (ret)
+ return -EIO;
+
+ return len;
+}
+
+IIO_EVENT_ATTR_SH(event_mode, iio_event_adt7310,
+ adt7310_show_event_mode, adt7310_set_event_mode, 0);
+IIO_EVENT_ATTR_SH(available_event_modes, iio_event_adt7310,
+ adt7310_show_available_event_modes, NULL, 0);
+IIO_EVENT_ATTR_SH(fault_queue, iio_event_adt7310,
+ adt7310_show_fault_queue, adt7310_set_fault_queue, 0);
+IIO_EVENT_ATTR_SH(t_alarm_high, iio_event_adt7310,
+ adt7310_show_t_alarm_high, adt7310_set_t_alarm_high, 0);
+IIO_EVENT_ATTR_SH(t_alarm_low, iio_event_adt7310,
+ adt7310_show_t_alarm_low, adt7310_set_t_alarm_low, 0);
+IIO_EVENT_ATTR_SH(t_crit, iio_event_adt7310_ct,
+ adt7310_show_t_crit, adt7310_set_t_crit, 0);
+IIO_EVENT_ATTR_SH(t_hyst, iio_event_adt7310,
+ adt7310_show_t_hyst, adt7310_set_t_hyst, 0);
+
+static struct attribute *adt7310_event_int_attributes[] = {
+ &iio_event_attr_event_mode.dev_attr.attr,
+ &iio_event_attr_available_event_modes.dev_attr.attr,
+ &iio_event_attr_fault_queue.dev_attr.attr,
+ &iio_event_attr_t_alarm_high.dev_attr.attr,
+ &iio_event_attr_t_alarm_low.dev_attr.attr,
+ &iio_event_attr_t_hyst.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute *adt7310_event_ct_attributes[] = {
+ &iio_event_attr_event_mode.dev_attr.attr,
+ &iio_event_attr_available_event_modes.dev_attr.attr,
+ &iio_event_attr_fault_queue.dev_attr.attr,
+ &iio_event_attr_t_crit.dev_attr.attr,
+ &iio_event_attr_t_hyst.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group adt7310_event_attribute_group[ADT7310_IRQS] = {
+ {
+ .attrs = adt7310_event_int_attributes,
+ },
+ {
+ .attrs = adt7310_event_ct_attributes,
+ }
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit adt7310_probe(struct spi_device *spi_dev)
+{
+ struct adt7310_chip_info *chip;
+ int ret = 0;
+ unsigned long *adt7310_platform_data = spi_dev->dev.platform_data;
+ unsigned long irq_flags;
+
+ chip = kzalloc(sizeof(struct adt7310_chip_info), GFP_KERNEL);
+
+ if (chip == NULL)
+ return -ENOMEM;
+
+ /* this is only used for device removal purposes */
+ dev_set_drvdata(&spi_dev->dev, chip);
+
+ chip->spi_dev = spi_dev;
+ chip->name = spi_dev->modalias;
+
+ chip->indio_dev = iio_allocate_device();
+ if (chip->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_chip;
+ }
+
+ chip->indio_dev->dev.parent = &spi_dev->dev;
+ chip->indio_dev->attrs = &adt7310_attribute_group;
+ chip->indio_dev->event_attrs = adt7310_event_attribute_group;
+ chip->indio_dev->dev_data = (void *)chip;
+ chip->indio_dev->driver_module = THIS_MODULE;
+ chip->indio_dev->num_interrupt_lines = ADT7310_IRQS;
+ chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(chip->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ /* CT critcal temperature event. line 0 */
+ if (spi_dev->irq) {
+ if (adt7310_platform_data[2])
+ irq_flags = adt7310_platform_data[2];
+ else
+ irq_flags = IRQF_TRIGGER_LOW;
+ ret = iio_register_interrupt_line(spi_dev->irq,
+ chip->indio_dev,
+ 0,
+ irq_flags,
+ chip->name);
+ if (ret)
+ goto error_unreg_dev;
+
+ /*
+ * The event handler list element refer to iio_event_adt7310.
+ * All event attributes bind to the same event handler.
+ * One event handler can only be added to one event list.
+ */
+ iio_add_event_to_list(&iio_event_adt7310,
+ &chip->indio_dev->interrupts[0]->ev_list);
+ }
+
+ /* INT bound temperature alarm event. line 1 */
+ if (adt7310_platform_data[0]) {
+ ret = iio_register_interrupt_line(adt7310_platform_data[0],
+ chip->indio_dev,
+ 1,
+ adt7310_platform_data[1],
+ chip->name);
+ if (ret)
+ goto error_unreg_ct_irq;
+
+ /*
+ * The event handler list element refer to iio_event_adt7310.
+ * All event attributes bind to the same event handler.
+ * One event handler can only be added to one event list.
+ */
+ iio_add_event_to_list(&iio_event_adt7310_ct,
+ &chip->indio_dev->interrupts[1]->ev_list);
+ }
+
+ if (spi_dev->irq && adt7310_platform_data[0]) {
+ INIT_WORK(&chip->thresh_work, adt7310_interrupt_bh);
+
+ ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
+ if (ret) {
+ ret = -EIO;
+ goto error_unreg_int_irq;
+ }
+
+ /* set irq polarity low level */
+ chip->config &= ~ADT7310_CT_POLARITY;
+
+ if (adt7310_platform_data[1] & IRQF_TRIGGER_HIGH)
+ chip->config |= ADT7310_INT_POLARITY;
+ else
+ chip->config &= ~ADT7310_INT_POLARITY;
+
+ ret = adt7310_spi_write_byte(chip, ADT7310_CONFIG, chip->config);
+ if (ret) {
+ ret = -EIO;
+ goto error_unreg_int_irq;
+ }
+ }
+
+ dev_info(&spi_dev->dev, "%s temperature sensor registered.\n",
+ chip->name);
+
+ return 0;
+
+error_unreg_int_irq:
+ iio_unregister_interrupt_line(chip->indio_dev, 1);
+error_unreg_ct_irq:
+ iio_unregister_interrupt_line(chip->indio_dev, 0);
+error_unreg_dev:
+ iio_device_unregister(chip->indio_dev);
+error_free_dev:
+ iio_free_device(chip->indio_dev);
+error_free_chip:
+ kfree(chip);
+
+ return ret;
+}
+
+static int __devexit adt7310_remove(struct spi_device *spi_dev)
+{
+ struct adt7310_chip_info *chip = dev_get_drvdata(&spi_dev->dev);
+ struct iio_dev *indio_dev = chip->indio_dev;
+ unsigned long *adt7310_platform_data = spi_dev->dev.platform_data;
+
+ dev_set_drvdata(&spi_dev->dev, NULL);
+ if (adt7310_platform_data[0])
+ iio_unregister_interrupt_line(indio_dev, 1);
+ if (spi_dev->irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+ iio_device_unregister(indio_dev);
+ iio_free_device(chip->indio_dev);
+ kfree(chip);
+
+ return 0;
+}
+
+static const struct spi_device_id adt7310_id[] = {
+ { "adt7310", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(spi, adt7310_id);
+
+static struct spi_driver adt7310_driver = {
+ .driver = {
+ .name = "adt7310",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = adt7310_probe,
+ .remove = __devexit_p(adt7310_remove),
+ .id_table = adt7310_id,
+};
+
+static __init int adt7310_init(void)
+{
+ return spi_register_driver(&adt7310_driver);
+}
+
+static __exit void adt7310_exit(void)
+{
+ spi_unregister_driver(&adt7310_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADT7310 digital"
+ " temperature sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(adt7310_init);
+module_exit(adt7310_exit);
diff --git a/drivers/staging/iio/adc/adt7410.c b/drivers/staging/iio/adc/adt7410.c
new file mode 100644
index 000000000000..c345f27ec7fc
--- /dev/null
+++ b/drivers/staging/iio/adc/adt7410.c
@@ -0,0 +1,915 @@
+/*
+ * ADT7410 digital temperature sensor driver supporting ADT7410
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * ADT7410 registers definition
+ */
+
+#define ADT7410_TEMPERATURE 0
+#define ADT7410_STATUS 2
+#define ADT7410_CONFIG 3
+#define ADT7410_T_ALARM_HIGH 4
+#define ADT7410_T_ALARM_LOW 6
+#define ADT7410_T_CRIT 8
+#define ADT7410_T_HYST 0xA
+#define ADT7410_ID 0xB
+#define ADT7410_RESET 0x2F
+
+/*
+ * ADT7410 status
+ */
+#define ADT7410_STAT_T_LOW 0x10
+#define ADT7410_STAT_T_HIGH 0x20
+#define ADT7410_STAT_T_CRIT 0x40
+#define ADT7410_STAT_NOT_RDY 0x80
+
+/*
+ * ADT7410 config
+ */
+#define ADT7410_FAULT_QUEUE_MASK 0x3
+#define ADT7410_CT_POLARITY 0x4
+#define ADT7410_INT_POLARITY 0x8
+#define ADT7410_EVENT_MODE 0x10
+#define ADT7410_MODE_MASK 0x60
+#define ADT7410_ONESHOT 0x20
+#define ADT7410_SPS 0x40
+#define ADT7410_PD 0x60
+#define ADT7410_RESOLUTION 0x80
+
+/*
+ * ADT7410 masks
+ */
+#define ADT7410_T16_VALUE_SIGN 0x8000
+#define ADT7410_T16_VALUE_FLOAT_OFFSET 7
+#define ADT7410_T16_VALUE_FLOAT_MASK 0x7F
+#define ADT7410_T13_VALUE_SIGN 0x1000
+#define ADT7410_T13_VALUE_OFFSET 3
+#define ADT7410_T13_VALUE_FLOAT_OFFSET 4
+#define ADT7410_T13_VALUE_FLOAT_MASK 0xF
+#define ADT7410_T_HYST_MASK 0xF
+#define ADT7410_DEVICE_ID_MASK 0xF
+#define ADT7410_MANUFACTORY_ID_MASK 0xF0
+#define ADT7410_MANUFACTORY_ID_OFFSET 4
+
+#define ADT7410_IRQS 2
+
+/*
+ * struct adt7410_chip_info - chip specifc information
+ */
+
+struct adt7410_chip_info {
+ const char *name;
+ struct i2c_client *client;
+ struct iio_dev *indio_dev;
+ struct work_struct thresh_work;
+ s64 last_timestamp;
+ u8 config;
+};
+
+/*
+ * adt7410 register access by I2C
+ */
+
+static int adt7410_i2c_read_word(struct adt7410_chip_info *chip, u8 reg, u16 *data)
+{
+ struct i2c_client *client = chip->client;
+ int ret = 0;
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C read error\n");
+ return ret;
+ }
+
+ *data = swab16((u16)ret);
+
+ return 0;
+}
+
+static int adt7410_i2c_write_word(struct adt7410_chip_info *chip, u8 reg, u16 data)
+{
+ struct i2c_client *client = chip->client;
+ int ret = 0;
+
+ ret = i2c_smbus_write_word_data(client, reg, swab16(data));
+ if (ret < 0)
+ dev_err(&client->dev, "I2C write error\n");
+
+ return ret;
+}
+
+static int adt7410_i2c_read_byte(struct adt7410_chip_info *chip, u8 reg, u8 *data)
+{
+ struct i2c_client *client = chip->client;
+ int ret = 0;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C read error\n");
+ return ret;
+ }
+
+ *data = (u8)ret;
+
+ return 0;
+}
+
+static int adt7410_i2c_write_byte(struct adt7410_chip_info *chip, u8 reg, u8 data)
+{
+ struct i2c_client *client = chip->client;
+ int ret = 0;
+
+ ret = i2c_smbus_write_byte_data(client, reg, data);
+ if (ret < 0)
+ dev_err(&client->dev, "I2C write error\n");
+
+ return ret;
+}
+
+static ssize_t adt7410_show_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7410_chip_info *chip = dev_info->dev_data;
+ u8 config;
+
+ config = chip->config & ADT7410_MODE_MASK;
+
+ switch (config) {
+ case ADT7410_PD:
+ return sprintf(buf, "power-down\n");
+ case ADT7410_ONESHOT:
+ return sprintf(buf, "one-shot\n");
+ case ADT7410_SPS:
+ return sprintf(buf, "sps\n");
+ default:
+ return sprintf(buf, "full\n");
+ }
+}
+
+static ssize_t adt7410_store_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7410_chip_info *chip = dev_info->dev_data;
+ u16 config;
+ int ret;
+
+ ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ config = chip->config & (~ADT7410_MODE_MASK);
+ if (strcmp(buf, "power-down"))
+ config |= ADT7410_PD;
+ else if (strcmp(buf, "one-shot"))
+ config |= ADT7410_ONESHOT;
+ else if (strcmp(buf, "sps"))
+ config |= ADT7410_SPS;
+
+ ret = adt7410_i2c_write_byte(chip, ADT7410_CONFIG, config);
+ if (ret)
+ return -EIO;
+
+ chip->config = config;
+
+ return ret;
+}
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+ adt7410_show_mode,
+ adt7410_store_mode,
+ 0);
+
+static ssize_t adt7410_show_available_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "full\none-shot\nsps\npower-down\n");
+}
+
+static IIO_DEVICE_ATTR(available_modes, S_IRUGO, adt7410_show_available_modes, NULL, 0);
+
+static ssize_t adt7410_show_resolution(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7410_chip_info *chip = dev_info->dev_data;
+ int ret;
+ int bits;
+
+ ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ if (chip->config & ADT7410_RESOLUTION)
+ bits = 16;
+ else
+ bits = 13;
+
+ return sprintf(buf, "%d bits\n", bits);
+}
+
+static ssize_t adt7410_store_resolution(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7410_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ u16 config;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &data);
+ if (ret)
+ return -EINVAL;
+
+ ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ config = chip->config & (~ADT7410_RESOLUTION);
+ if (data)
+ config |= ADT7410_RESOLUTION;
+
+ ret = adt7410_i2c_write_byte(chip, ADT7410_CONFIG, config);
+ if (ret)
+ return -EIO;
+
+ chip->config = config;
+
+ return ret;
+}
+
+static IIO_DEVICE_ATTR(resolution, S_IRUGO | S_IWUSR,
+ adt7410_show_resolution,
+ adt7410_store_resolution,
+ 0);
+
+static ssize_t adt7410_show_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7410_chip_info *chip = dev_info->dev_data;
+ u8 id;
+ int ret;
+
+ ret = adt7410_i2c_read_byte(chip, ADT7410_ID, &id);
+ if (ret)
+ return -EIO;
+
+ return sprintf(buf, "device id: 0x%x\nmanufactory id: 0x%x\n",
+ id & ADT7410_DEVICE_ID_MASK,
+ (id & ADT7410_MANUFACTORY_ID_MASK) >> ADT7410_MANUFACTORY_ID_OFFSET);
+}
+
+static IIO_DEVICE_ATTR(id, S_IRUGO | S_IWUSR,
+ adt7410_show_id,
+ NULL,
+ 0);
+
+static ssize_t adt7410_convert_temperature(struct adt7410_chip_info *chip,
+ u16 data, char *buf)
+{
+ char sign = ' ';
+
+ if (chip->config & ADT7410_RESOLUTION) {
+ if (data & ADT7410_T16_VALUE_SIGN) {
+ /* convert supplement to positive value */
+ data = (u16)((ADT7410_T16_VALUE_SIGN << 1) - (u32)data);
+ sign = '-';
+ }
+ return sprintf(buf, "%c%d.%.7d\n", sign,
+ (data >> ADT7410_T16_VALUE_FLOAT_OFFSET),
+ (data & ADT7410_T16_VALUE_FLOAT_MASK) * 78125);
+ } else {
+ if (data & ADT7410_T13_VALUE_SIGN) {
+ /* convert supplement to positive value */
+ data >>= ADT7410_T13_VALUE_OFFSET;
+ data = (ADT7410_T13_VALUE_SIGN << 1) - data;
+ sign = '-';
+ }
+ return sprintf(buf, "%c%d.%.4d\n", sign,
+ (data >> ADT7410_T13_VALUE_FLOAT_OFFSET),
+ (data & ADT7410_T13_VALUE_FLOAT_MASK) * 625);
+ }
+}
+
+static ssize_t adt7410_show_value(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7410_chip_info *chip = dev_info->dev_data;
+ u8 status;
+ u16 data;
+ int ret, i = 0;
+
+ do {
+ ret = adt7410_i2c_read_byte(chip, ADT7410_STATUS, &status);
+ if (ret)
+ return -EIO;
+ i++;
+ if (i == 10000)
+ return -EIO;
+ } while (status & ADT7410_STAT_NOT_RDY);
+
+ ret = adt7410_i2c_read_word(chip, ADT7410_TEMPERATURE, &data);
+ if (ret)
+ return -EIO;
+
+ return adt7410_convert_temperature(chip, data, buf);
+}
+
+static IIO_DEVICE_ATTR(value, S_IRUGO, adt7410_show_value, NULL, 0);
+
+static ssize_t adt7410_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7410_chip_info *chip = dev_info->dev_data;
+ return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, adt7410_show_name, NULL, 0);
+
+static struct attribute *adt7410_attributes[] = {
+ &iio_dev_attr_available_modes.dev_attr.attr,
+ &iio_dev_attr_mode.dev_attr.attr,
+ &iio_dev_attr_resolution.dev_attr.attr,
+ &iio_dev_attr_id.dev_attr.attr,
+ &iio_dev_attr_value.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group adt7410_attribute_group = {
+ .attrs = adt7410_attributes,
+};
+
+/*
+ * temperature bound events
+ */
+
+#define IIO_EVENT_CODE_ADT7410_ABOVE_ALARM IIO_BUFFER_EVENT_CODE(0)
+#define IIO_EVENT_CODE_ADT7410_BELLOW_ALARM IIO_BUFFER_EVENT_CODE(1)
+#define IIO_EVENT_CODE_ADT7410_ABOVE_CRIT IIO_BUFFER_EVENT_CODE(2)
+
+static void adt7410_interrupt_bh(struct work_struct *work_s)
+{
+ struct adt7410_chip_info *chip =
+ container_of(work_s, struct adt7410_chip_info, thresh_work);
+ u8 status;
+
+ if (adt7410_i2c_read_byte(chip, ADT7410_STATUS, &status))
+ return;
+
+ enable_irq(chip->client->irq);
+
+ if (status & ADT7410_STAT_T_HIGH)
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_ADT7410_ABOVE_ALARM,
+ chip->last_timestamp);
+ if (status & ADT7410_STAT_T_LOW)
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_ADT7410_BELLOW_ALARM,
+ chip->last_timestamp);
+ if (status & ADT7410_STAT_T_CRIT)
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_ADT7410_ABOVE_CRIT,
+ chip->last_timestamp);
+}
+
+static int adt7410_interrupt(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct adt7410_chip_info *chip = dev_info->dev_data;
+
+ chip->last_timestamp = timestamp;
+ schedule_work(&chip->thresh_work);
+
+ return 0;
+}
+
+IIO_EVENT_SH(adt7410, &adt7410_interrupt);
+IIO_EVENT_SH(adt7410_ct, &adt7410_interrupt);
+
+static ssize_t adt7410_show_event_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7410_chip_info *chip = dev_info->dev_data;
+ int ret;
+
+ ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ if (chip->config & ADT7410_EVENT_MODE)
+ return sprintf(buf, "interrupt\n");
+ else
+ return sprintf(buf, "comparator\n");
+}
+
+static ssize_t adt7410_set_event_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7410_chip_info *chip = dev_info->dev_data;
+ u16 config;
+ int ret;
+
+ ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ config = chip->config &= ~ADT7410_EVENT_MODE;
+ if (strcmp(buf, "comparator") != 0)
+ config |= ADT7410_EVENT_MODE;
+
+ ret = adt7410_i2c_write_byte(chip, ADT7410_CONFIG, config);
+ if (ret)
+ return -EIO;
+
+ chip->config = config;
+
+ return ret;
+}
+
+static ssize_t adt7410_show_available_event_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "comparator\ninterrupt\n");
+}
+
+static ssize_t adt7410_show_fault_queue(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7410_chip_info *chip = dev_info->dev_data;
+ int ret;
+
+ ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ return sprintf(buf, "%d\n", chip->config & ADT7410_FAULT_QUEUE_MASK);
+}
+
+static ssize_t adt7410_set_fault_queue(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7410_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+ u8 config;
+
+ ret = strict_strtoul(buf, 10, &data);
+ if (ret || data > 3)
+ return -EINVAL;
+
+ ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ config = chip->config & ~ADT7410_FAULT_QUEUE_MASK;
+ config |= data;
+ ret = adt7410_i2c_write_byte(chip, ADT7410_CONFIG, config);
+ if (ret)
+ return -EIO;
+
+ chip->config = config;
+
+ return ret;
+}
+
+static inline ssize_t adt7410_show_t_bound(struct device *dev,
+ struct device_attribute *attr,
+ u8 bound_reg,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7410_chip_info *chip = dev_info->dev_data;
+ u16 data;
+ int ret;
+
+ ret = adt7410_i2c_read_word(chip, bound_reg, &data);
+ if (ret)
+ return -EIO;
+
+ return adt7410_convert_temperature(chip, data, buf);
+}
+
+static inline ssize_t adt7410_set_t_bound(struct device *dev,
+ struct device_attribute *attr,
+ u8 bound_reg,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7410_chip_info *chip = dev_info->dev_data;
+ long tmp1, tmp2;
+ u16 data;
+ char *pos;
+ int ret;
+
+ pos = strchr(buf, '.');
+
+ ret = strict_strtol(buf, 10, &tmp1);
+
+ if (ret || tmp1 > 127 || tmp1 < -128)
+ return -EINVAL;
+
+ if (pos) {
+ len = strlen(pos);
+
+ if (chip->config & ADT7410_RESOLUTION) {
+ if (len > ADT7410_T16_VALUE_FLOAT_OFFSET)
+ len = ADT7410_T16_VALUE_FLOAT_OFFSET;
+ pos[len] = 0;
+ ret = strict_strtol(pos, 10, &tmp2);
+
+ if (!ret)
+ tmp2 = (tmp2 / 78125) * 78125;
+ } else {
+ if (len > ADT7410_T13_VALUE_FLOAT_OFFSET)
+ len = ADT7410_T13_VALUE_FLOAT_OFFSET;
+ pos[len] = 0;
+ ret = strict_strtol(pos, 10, &tmp2);
+
+ if (!ret)
+ tmp2 = (tmp2 / 625) * 625;
+ }
+ }
+
+ if (tmp1 < 0)
+ data = (u16)(-tmp1);
+ else
+ data = (u16)tmp1;
+
+ if (chip->config & ADT7410_RESOLUTION) {
+ data = (data << ADT7410_T16_VALUE_FLOAT_OFFSET) |
+ (tmp2 & ADT7410_T16_VALUE_FLOAT_MASK);
+
+ if (tmp1 < 0)
+ /* convert positive value to supplyment */
+ data = (u16)((ADT7410_T16_VALUE_SIGN << 1) - (u32)data);
+ } else {
+ data = (data << ADT7410_T13_VALUE_FLOAT_OFFSET) |
+ (tmp2 & ADT7410_T13_VALUE_FLOAT_MASK);
+
+ if (tmp1 < 0)
+ /* convert positive value to supplyment */
+ data = (ADT7410_T13_VALUE_SIGN << 1) - data;
+ data <<= ADT7410_T13_VALUE_OFFSET;
+ }
+
+ ret = adt7410_i2c_write_word(chip, bound_reg, data);
+ if (ret)
+ return -EIO;
+
+ return ret;
+}
+
+static ssize_t adt7410_show_t_alarm_high(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt7410_show_t_bound(dev, attr,
+ ADT7410_T_ALARM_HIGH, buf);
+}
+
+static inline ssize_t adt7410_set_t_alarm_high(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt7410_set_t_bound(dev, attr,
+ ADT7410_T_ALARM_HIGH, buf, len);
+}
+
+static ssize_t adt7410_show_t_alarm_low(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt7410_show_t_bound(dev, attr,
+ ADT7410_T_ALARM_LOW, buf);
+}
+
+static inline ssize_t adt7410_set_t_alarm_low(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt7410_set_t_bound(dev, attr,
+ ADT7410_T_ALARM_LOW, buf, len);
+}
+
+static ssize_t adt7410_show_t_crit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt7410_show_t_bound(dev, attr,
+ ADT7410_T_CRIT, buf);
+}
+
+static inline ssize_t adt7410_set_t_crit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt7410_set_t_bound(dev, attr,
+ ADT7410_T_CRIT, buf, len);
+}
+
+static ssize_t adt7410_show_t_hyst(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7410_chip_info *chip = dev_info->dev_data;
+ int ret;
+ u8 t_hyst;
+
+ ret = adt7410_i2c_read_byte(chip, ADT7410_T_HYST, &t_hyst);
+ if (ret)
+ return -EIO;
+
+ return sprintf(buf, "%d\n", t_hyst & ADT7410_T_HYST_MASK);
+}
+
+static inline ssize_t adt7410_set_t_hyst(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7410_chip_info *chip = dev_info->dev_data;
+ int ret;
+ unsigned long data;
+ u8 t_hyst;
+
+ ret = strict_strtol(buf, 10, &data);
+
+ if (ret || data > ADT7410_T_HYST_MASK)
+ return -EINVAL;
+
+ t_hyst = (u8)data;
+
+ ret = adt7410_i2c_write_byte(chip, ADT7410_T_HYST, t_hyst);
+ if (ret)
+ return -EIO;
+
+ return ret;
+}
+
+IIO_EVENT_ATTR_SH(event_mode, iio_event_adt7410,
+ adt7410_show_event_mode, adt7410_set_event_mode, 0);
+IIO_EVENT_ATTR_SH(available_event_modes, iio_event_adt7410,
+ adt7410_show_available_event_modes, NULL, 0);
+IIO_EVENT_ATTR_SH(fault_queue, iio_event_adt7410,
+ adt7410_show_fault_queue, adt7410_set_fault_queue, 0);
+IIO_EVENT_ATTR_SH(t_alarm_high, iio_event_adt7410,
+ adt7410_show_t_alarm_high, adt7410_set_t_alarm_high, 0);
+IIO_EVENT_ATTR_SH(t_alarm_low, iio_event_adt7410,
+ adt7410_show_t_alarm_low, adt7410_set_t_alarm_low, 0);
+IIO_EVENT_ATTR_SH(t_crit, iio_event_adt7410_ct,
+ adt7410_show_t_crit, adt7410_set_t_crit, 0);
+IIO_EVENT_ATTR_SH(t_hyst, iio_event_adt7410,
+ adt7410_show_t_hyst, adt7410_set_t_hyst, 0);
+
+static struct attribute *adt7410_event_int_attributes[] = {
+ &iio_event_attr_event_mode.dev_attr.attr,
+ &iio_event_attr_available_event_modes.dev_attr.attr,
+ &iio_event_attr_fault_queue.dev_attr.attr,
+ &iio_event_attr_t_alarm_high.dev_attr.attr,
+ &iio_event_attr_t_alarm_low.dev_attr.attr,
+ &iio_event_attr_t_hyst.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute *adt7410_event_ct_attributes[] = {
+ &iio_event_attr_event_mode.dev_attr.attr,
+ &iio_event_attr_available_event_modes.dev_attr.attr,
+ &iio_event_attr_fault_queue.dev_attr.attr,
+ &iio_event_attr_t_crit.dev_attr.attr,
+ &iio_event_attr_t_hyst.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group adt7410_event_attribute_group[ADT7410_IRQS] = {
+ {
+ .attrs = adt7410_event_int_attributes,
+ },
+ {
+ .attrs = adt7410_event_ct_attributes,
+ }
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit adt7410_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adt7410_chip_info *chip;
+ int ret = 0;
+ unsigned long *adt7410_platform_data = client->dev.platform_data;
+
+ chip = kzalloc(sizeof(struct adt7410_chip_info), GFP_KERNEL);
+
+ if (chip == NULL)
+ return -ENOMEM;
+
+ /* this is only used for device removal purposes */
+ i2c_set_clientdata(client, chip);
+
+ chip->client = client;
+ chip->name = id->name;
+
+ chip->indio_dev = iio_allocate_device();
+ if (chip->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_chip;
+ }
+
+ chip->indio_dev->dev.parent = &client->dev;
+ chip->indio_dev->attrs = &adt7410_attribute_group;
+ chip->indio_dev->event_attrs = adt7410_event_attribute_group;
+ chip->indio_dev->dev_data = (void *)chip;
+ chip->indio_dev->driver_module = THIS_MODULE;
+ chip->indio_dev->num_interrupt_lines = ADT7410_IRQS;
+ chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(chip->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ /* CT critcal temperature event. line 0 */
+ if (client->irq) {
+ ret = iio_register_interrupt_line(client->irq,
+ chip->indio_dev,
+ 0,
+ IRQF_TRIGGER_LOW,
+ chip->name);
+ if (ret)
+ goto error_unreg_dev;
+
+ /*
+ * The event handler list element refer to iio_event_adt7410.
+ * All event attributes bind to the same event handler.
+ * One event handler can only be added to one event list.
+ */
+ iio_add_event_to_list(&iio_event_adt7410,
+ &chip->indio_dev->interrupts[0]->ev_list);
+ }
+
+ /* INT bound temperature alarm event. line 1 */
+ if (adt7410_platform_data[0]) {
+ ret = iio_register_interrupt_line(adt7410_platform_data[0],
+ chip->indio_dev,
+ 1,
+ adt7410_platform_data[1],
+ chip->name);
+ if (ret)
+ goto error_unreg_ct_irq;
+
+ /*
+ * The event handler list element refer to iio_event_adt7410.
+ * All event attributes bind to the same event handler.
+ * One event handler can only be added to one event list.
+ */
+ iio_add_event_to_list(&iio_event_adt7410_ct,
+ &chip->indio_dev->interrupts[1]->ev_list);
+ }
+
+ if (client->irq && adt7410_platform_data[0]) {
+ INIT_WORK(&chip->thresh_work, adt7410_interrupt_bh);
+
+ ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
+ if (ret) {
+ ret = -EIO;
+ goto error_unreg_int_irq;
+ }
+
+ /* set irq polarity low level */
+ chip->config &= ~ADT7410_CT_POLARITY;
+
+ if (adt7410_platform_data[1] & IRQF_TRIGGER_HIGH)
+ chip->config |= ADT7410_INT_POLARITY;
+ else
+ chip->config &= ~ADT7410_INT_POLARITY;
+
+ ret = adt7410_i2c_write_byte(chip, ADT7410_CONFIG, chip->config);
+ if (ret) {
+ ret = -EIO;
+ goto error_unreg_int_irq;
+ }
+ }
+
+ dev_info(&client->dev, "%s temperature sensor registered.\n",
+ id->name);
+
+ return 0;
+
+error_unreg_int_irq:
+ iio_unregister_interrupt_line(chip->indio_dev, 1);
+error_unreg_ct_irq:
+ iio_unregister_interrupt_line(chip->indio_dev, 0);
+error_unreg_dev:
+ iio_device_unregister(chip->indio_dev);
+error_free_dev:
+ iio_free_device(chip->indio_dev);
+error_free_chip:
+ kfree(chip);
+
+ return ret;
+}
+
+static int __devexit adt7410_remove(struct i2c_client *client)
+{
+ struct adt7410_chip_info *chip = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = chip->indio_dev;
+ unsigned long *adt7410_platform_data = client->dev.platform_data;
+
+ if (adt7410_platform_data[0])
+ iio_unregister_interrupt_line(indio_dev, 1);
+ if (client->irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+ iio_device_unregister(indio_dev);
+ iio_free_device(chip->indio_dev);
+ kfree(chip);
+
+ return 0;
+}
+
+static const struct i2c_device_id adt7410_id[] = {
+ { "adt7410", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, adt7410_id);
+
+static struct i2c_driver adt7410_driver = {
+ .driver = {
+ .name = "adt7410",
+ },
+ .probe = adt7410_probe,
+ .remove = __devexit_p(adt7410_remove),
+ .id_table = adt7410_id,
+};
+
+static __init int adt7410_init(void)
+{
+ return i2c_add_driver(&adt7410_driver);
+}
+
+static __exit void adt7410_exit(void)
+{
+ i2c_del_driver(&adt7410_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADT7410 digital"
+ " temperature sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(adt7410_init);
+module_exit(adt7410_exit);
diff --git a/drivers/staging/iio/adc/adt75.c b/drivers/staging/iio/adc/adt75.c
new file mode 100644
index 000000000000..aff4d31eb89c
--- /dev/null
+++ b/drivers/staging/iio/adc/adt75.c
@@ -0,0 +1,732 @@
+/*
+ * ADT75 digital temperature sensor driver supporting ADT75
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * ADT75 registers definition
+ */
+
+#define ADT75_TEMPERATURE 0
+#define ADT75_CONFIG 1
+#define ADT75_T_HYST 2
+#define ADT75_T_OS 3
+#define ADT75_ONESHOT 4
+
+/*
+ * ADT75 config
+ */
+#define ADT75_PD 0x1
+#define ADT75_OS_INT 0x2
+#define ADT75_OS_POLARITY 0x4
+#define ADT75_FAULT_QUEUE_MASK 0x18
+#define ADT75_FAULT_QUEUE_OFFSET 3
+#define ADT75_SMBUS_ALART 0x8
+
+/*
+ * ADT75 masks
+ */
+#define ADT75_VALUE_SIGN 0x800
+#define ADT75_VALUE_OFFSET 4
+#define ADT75_VALUE_FLOAT_OFFSET 4
+#define ADT75_VALUE_FLOAT_MASK 0xF
+
+
+/*
+ * struct adt75_chip_info - chip specifc information
+ */
+
+struct adt75_chip_info {
+ const char *name;
+ struct i2c_client *client;
+ struct iio_dev *indio_dev;
+ struct work_struct thresh_work;
+ s64 last_timestamp;
+ u8 config;
+};
+
+/*
+ * adt75 register access by I2C
+ */
+
+static int adt75_i2c_read(struct adt75_chip_info *chip, u8 reg, u8 *data)
+{
+ struct i2c_client *client = chip->client;
+ int ret = 0, len;
+
+ ret = i2c_smbus_write_byte(client, reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C read register address error\n");
+ return ret;
+ }
+
+ if (reg == ADT75_CONFIG || reg == ADT75_ONESHOT)
+ len = 1;
+ else
+ len = 2;
+
+ ret = i2c_master_recv(client, data, len);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C read error\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int adt75_i2c_write(struct adt75_chip_info *chip, u8 reg, u8 data)
+{
+ struct i2c_client *client = chip->client;
+ int ret = 0;
+
+ if (reg == ADT75_CONFIG || reg == ADT75_ONESHOT)
+ ret = i2c_smbus_write_byte_data(client, reg, data);
+ else
+ ret = i2c_smbus_write_word_data(client, reg, data);
+
+ if (ret < 0)
+ dev_err(&client->dev, "I2C write error\n");
+
+ return ret;
+}
+
+static ssize_t adt75_show_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt75_chip_info *chip = dev_info->dev_data;
+
+ if (chip->config & ADT75_PD)
+ return sprintf(buf, "power-save\n");
+ else
+ return sprintf(buf, "full\n");
+}
+
+static ssize_t adt75_store_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt75_chip_info *chip = dev_info->dev_data;
+ int ret;
+ u8 config;
+
+ ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ config = chip->config & ~ADT75_PD;
+ if (!strcmp(buf, "full"))
+ config |= ADT75_PD;
+
+ ret = adt75_i2c_write(chip, ADT75_CONFIG, config);
+ if (ret)
+ return -EIO;
+
+ chip->config = config;
+
+ return ret;
+}
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+ adt75_show_mode,
+ adt75_store_mode,
+ 0);
+
+static ssize_t adt75_show_available_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "full\npower-down\n");
+}
+
+static IIO_DEVICE_ATTR(available_modes, S_IRUGO, adt75_show_available_modes, NULL, 0);
+
+static ssize_t adt75_show_oneshot(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt75_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", !!(chip->config & ADT75_ONESHOT));
+}
+
+static ssize_t adt75_store_oneshot(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt75_chip_info *chip = dev_info->dev_data;
+ unsigned long data = 0;
+ int ret;
+ u8 config;
+
+ ret = strict_strtoul(buf, 10, &data);
+ if (ret)
+ return -EINVAL;
+
+
+ ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ config = chip->config & ~ADT75_ONESHOT;
+ if (data)
+ config |= ADT75_ONESHOT;
+
+ ret = adt75_i2c_write(chip, ADT75_CONFIG, config);
+ if (ret)
+ return -EIO;
+
+ chip->config = config;
+
+ return ret;
+}
+
+static IIO_DEVICE_ATTR(oneshot, S_IRUGO | S_IWUSR,
+ adt75_show_oneshot,
+ adt75_store_oneshot,
+ 0);
+
+static ssize_t adt75_show_value(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt75_chip_info *chip = dev_info->dev_data;
+ u16 data;
+ char sign = ' ';
+ int ret;
+
+ if (chip->config & ADT75_PD) {
+ dev_err(dev, "Can't read value in power-down mode.\n");
+ return -EIO;
+ }
+
+ if (chip->config & ADT75_ONESHOT) {
+ /* write to active converter */
+ ret = i2c_smbus_write_byte(chip->client, ADT75_ONESHOT);
+ if (ret)
+ return -EIO;
+ }
+
+ ret = adt75_i2c_read(chip, ADT75_TEMPERATURE, (u8 *)&data);
+ if (ret)
+ return -EIO;
+
+ data = swab16(data) >> ADT75_VALUE_OFFSET;
+ if (data & ADT75_VALUE_SIGN) {
+ /* convert supplement to positive value */
+ data = (ADT75_VALUE_SIGN << 1) - data;
+ sign = '-';
+ }
+
+ return sprintf(buf, "%c%d.%.4d\n", sign,
+ (data >> ADT75_VALUE_FLOAT_OFFSET),
+ (data & ADT75_VALUE_FLOAT_MASK) * 625);
+}
+
+static IIO_DEVICE_ATTR(value, S_IRUGO, adt75_show_value, NULL, 0);
+
+static ssize_t adt75_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt75_chip_info *chip = dev_info->dev_data;
+ return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, adt75_show_name, NULL, 0);
+
+static struct attribute *adt75_attributes[] = {
+ &iio_dev_attr_available_modes.dev_attr.attr,
+ &iio_dev_attr_mode.dev_attr.attr,
+ &iio_dev_attr_oneshot.dev_attr.attr,
+ &iio_dev_attr_value.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group adt75_attribute_group = {
+ .attrs = adt75_attributes,
+};
+
+/*
+ * temperature bound events
+ */
+
+#define IIO_EVENT_CODE_ADT75_OTI IIO_BUFFER_EVENT_CODE(0)
+
+static void adt75_interrupt_bh(struct work_struct *work_s)
+{
+ struct adt75_chip_info *chip =
+ container_of(work_s, struct adt75_chip_info, thresh_work);
+
+ enable_irq(chip->client->irq);
+
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_ADT75_OTI,
+ chip->last_timestamp);
+}
+
+static int adt75_interrupt(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct adt75_chip_info *chip = dev_info->dev_data;
+
+ chip->last_timestamp = timestamp;
+ schedule_work(&chip->thresh_work);
+
+ return 0;
+}
+
+IIO_EVENT_SH(adt75, &adt75_interrupt);
+
+static ssize_t adt75_show_oti_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt75_chip_info *chip = dev_info->dev_data;
+ int ret;
+
+ /* retrive ALART status */
+ ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ if (chip->config & ADT75_OS_INT)
+ return sprintf(buf, "interrupt\n");
+ else
+ return sprintf(buf, "comparator\n");
+}
+
+static ssize_t adt75_set_oti_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt75_chip_info *chip = dev_info->dev_data;
+ int ret;
+ u8 config;
+
+ /* retrive ALART status */
+ ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ config = chip->config & ~ADT75_OS_INT;
+ if (strcmp(buf, "comparator") != 0)
+ config |= ADT75_OS_INT;
+
+ ret = adt75_i2c_write(chip, ADT75_CONFIG, config);
+ if (ret)
+ return -EIO;
+
+ chip->config = config;
+
+ return ret;
+}
+
+static ssize_t adt75_show_available_oti_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "comparator\ninterrupt\n");
+}
+
+static ssize_t adt75_show_smbus_alart(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt75_chip_info *chip = dev_info->dev_data;
+ int ret;
+
+ /* retrive ALART status */
+ ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ return sprintf(buf, "%d\n", !!(chip->config & ADT75_SMBUS_ALART));
+}
+
+static ssize_t adt75_set_smbus_alart(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt75_chip_info *chip = dev_info->dev_data;
+ unsigned long data = 0;
+ int ret;
+ u8 config;
+
+ ret = strict_strtoul(buf, 10, &data);
+ if (ret)
+ return -EINVAL;
+
+ /* retrive ALART status */
+ ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ config = chip->config & ~ADT75_SMBUS_ALART;
+ if (data)
+ config |= ADT75_SMBUS_ALART;
+
+ ret = adt75_i2c_write(chip, ADT75_CONFIG, config);
+ if (ret)
+ return -EIO;
+
+ chip->config = config;
+
+ return ret;
+}
+
+static ssize_t adt75_show_fault_queue(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt75_chip_info *chip = dev_info->dev_data;
+ int ret;
+
+ /* retrive ALART status */
+ ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ return sprintf(buf, "%d\n", (chip->config & ADT75_FAULT_QUEUE_MASK) >>
+ ADT75_FAULT_QUEUE_OFFSET);
+}
+
+static ssize_t adt75_set_fault_queue(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt75_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+ u8 config;
+
+ ret = strict_strtoul(buf, 10, &data);
+ if (ret || data > 3)
+ return -EINVAL;
+
+ /* retrive ALART status */
+ ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ if (ret)
+ return -EIO;
+
+ config = chip->config & ~ADT75_FAULT_QUEUE_MASK;
+ config |= (data << ADT75_FAULT_QUEUE_OFFSET);
+ ret = adt75_i2c_write(chip, ADT75_CONFIG, config);
+ if (ret)
+ return -EIO;
+
+ chip->config = config;
+
+ return ret;
+}
+static inline ssize_t adt75_show_t_bound(struct device *dev,
+ struct device_attribute *attr,
+ u8 bound_reg,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt75_chip_info *chip = dev_info->dev_data;
+ u16 data;
+ char sign = ' ';
+ int ret;
+
+ ret = adt75_i2c_read(chip, bound_reg, (u8 *)&data);
+ if (ret)
+ return -EIO;
+
+ data = swab16(data) >> ADT75_VALUE_OFFSET;
+ if (data & ADT75_VALUE_SIGN) {
+ /* convert supplement to positive value */
+ data = (ADT75_VALUE_SIGN << 1) - data;
+ sign = '-';
+ }
+
+ return sprintf(buf, "%c%d.%.4d\n", sign,
+ (data >> ADT75_VALUE_FLOAT_OFFSET),
+ (data & ADT75_VALUE_FLOAT_MASK) * 625);
+}
+
+static inline ssize_t adt75_set_t_bound(struct device *dev,
+ struct device_attribute *attr,
+ u8 bound_reg,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt75_chip_info *chip = dev_info->dev_data;
+ long tmp1, tmp2;
+ u16 data;
+ char *pos;
+ int ret;
+
+ pos = strchr(buf, '.');
+
+ ret = strict_strtol(buf, 10, &tmp1);
+
+ if (ret || tmp1 > 127 || tmp1 < -128)
+ return -EINVAL;
+
+ if (pos) {
+ len = strlen(pos);
+ if (len > ADT75_VALUE_FLOAT_OFFSET)
+ len = ADT75_VALUE_FLOAT_OFFSET;
+ pos[len] = 0;
+ ret = strict_strtol(pos, 10, &tmp2);
+
+ if (!ret)
+ tmp2 = (tmp2 / 625) * 625;
+ }
+
+ if (tmp1 < 0)
+ data = (u16)(-tmp1);
+ else
+ data = (u16)tmp1;
+ data = (data << ADT75_VALUE_FLOAT_OFFSET) | (tmp2 & ADT75_VALUE_FLOAT_MASK);
+ if (tmp1 < 0)
+ /* convert positive value to supplyment */
+ data = (ADT75_VALUE_SIGN << 1) - data;
+ data <<= ADT75_VALUE_OFFSET;
+ data = swab16(data);
+
+ ret = adt75_i2c_write(chip, bound_reg, (u8)data);
+ if (ret)
+ return -EIO;
+
+ return ret;
+}
+
+static ssize_t adt75_show_t_os(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt75_show_t_bound(dev, attr,
+ ADT75_T_OS, buf);
+}
+
+static inline ssize_t adt75_set_t_os(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt75_set_t_bound(dev, attr,
+ ADT75_T_OS, buf, len);
+}
+
+static ssize_t adt75_show_t_hyst(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt75_show_t_bound(dev, attr,
+ ADT75_T_HYST, buf);
+}
+
+static inline ssize_t adt75_set_t_hyst(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt75_set_t_bound(dev, attr,
+ ADT75_T_HYST, buf, len);
+}
+
+IIO_EVENT_ATTR_SH(oti_mode, iio_event_adt75,
+ adt75_show_oti_mode, adt75_set_oti_mode, 0);
+IIO_EVENT_ATTR_SH(available_oti_modes, iio_event_adt75,
+ adt75_show_available_oti_modes, NULL, 0);
+IIO_EVENT_ATTR_SH(smbus_alart, iio_event_adt75,
+ adt75_show_smbus_alart, adt75_set_smbus_alart, 0);
+IIO_EVENT_ATTR_SH(fault_queue, iio_event_adt75,
+ adt75_show_fault_queue, adt75_set_fault_queue, 0);
+IIO_EVENT_ATTR_SH(t_os, iio_event_adt75,
+ adt75_show_t_os, adt75_set_t_os, 0);
+IIO_EVENT_ATTR_SH(t_hyst, iio_event_adt75,
+ adt75_show_t_hyst, adt75_set_t_hyst, 0);
+
+static struct attribute *adt75_event_attributes[] = {
+ &iio_event_attr_oti_mode.dev_attr.attr,
+ &iio_event_attr_available_oti_modes.dev_attr.attr,
+ &iio_event_attr_smbus_alart.dev_attr.attr,
+ &iio_event_attr_fault_queue.dev_attr.attr,
+ &iio_event_attr_t_os.dev_attr.attr,
+ &iio_event_attr_t_hyst.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group adt75_event_attribute_group = {
+ .attrs = adt75_event_attributes,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit adt75_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adt75_chip_info *chip;
+ int ret = 0;
+
+ chip = kzalloc(sizeof(struct adt75_chip_info), GFP_KERNEL);
+
+ if (chip == NULL)
+ return -ENOMEM;
+
+ /* this is only used for device removal purposes */
+ i2c_set_clientdata(client, chip);
+
+ chip->client = client;
+ chip->name = id->name;
+
+ chip->indio_dev = iio_allocate_device();
+ if (chip->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_chip;
+ }
+
+ chip->indio_dev->dev.parent = &client->dev;
+ chip->indio_dev->attrs = &adt75_attribute_group;
+ chip->indio_dev->event_attrs = &adt75_event_attribute_group;
+ chip->indio_dev->dev_data = (void *)chip;
+ chip->indio_dev->driver_module = THIS_MODULE;
+ chip->indio_dev->num_interrupt_lines = 1;
+ chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(chip->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ if (client->irq > 0) {
+ ret = iio_register_interrupt_line(client->irq,
+ chip->indio_dev,
+ 0,
+ IRQF_TRIGGER_LOW,
+ chip->name);
+ if (ret)
+ goto error_unreg_dev;
+
+ /*
+ * The event handler list element refer to iio_event_adt75.
+ * All event attributes bind to the same event handler.
+ * So, only register event handler once.
+ */
+ iio_add_event_to_list(&iio_event_adt75,
+ &chip->indio_dev->interrupts[0]->ev_list);
+
+ INIT_WORK(&chip->thresh_work, adt75_interrupt_bh);
+
+ ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ if (ret) {
+ ret = -EIO;
+ goto error_unreg_irq;
+ }
+
+ /* set irq polarity low level */
+ chip->config &= ~ADT75_OS_POLARITY;
+
+ ret = adt75_i2c_write(chip, ADT75_CONFIG, chip->config);
+ if (ret) {
+ ret = -EIO;
+ goto error_unreg_irq;
+ }
+ }
+
+ dev_info(&client->dev, "%s temperature sensor registered.\n",
+ id->name);
+
+ return 0;
+error_unreg_irq:
+ iio_unregister_interrupt_line(chip->indio_dev, 0);
+error_unreg_dev:
+ iio_device_unregister(chip->indio_dev);
+error_free_dev:
+ iio_free_device(chip->indio_dev);
+error_free_chip:
+ kfree(chip);
+
+ return ret;
+}
+
+static int __devexit adt75_remove(struct i2c_client *client)
+{
+ struct adt75_chip_info *chip = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = chip->indio_dev;
+
+ if (client->irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+ iio_device_unregister(indio_dev);
+ iio_free_device(chip->indio_dev);
+ kfree(chip);
+
+ return 0;
+}
+
+static const struct i2c_device_id adt75_id[] = {
+ { "adt75", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, adt75_id);
+
+static struct i2c_driver adt75_driver = {
+ .driver = {
+ .name = "adt75",
+ },
+ .probe = adt75_probe,
+ .remove = __devexit_p(adt75_remove),
+ .id_table = adt75_id,
+};
+
+static __init int adt75_init(void)
+{
+ return i2c_add_driver(&adt75_driver);
+}
+
+static __exit void adt75_exit(void)
+{
+ i2c_del_driver(&adt75_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADT75 digital"
+ " temperature sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(adt75_init);
+module_exit(adt75_exit);
diff --git a/drivers/staging/iio/addac/Kconfig b/drivers/staging/iio/addac/Kconfig
new file mode 100644
index 000000000000..9847baf02700
--- /dev/null
+++ b/drivers/staging/iio/addac/Kconfig
@@ -0,0 +1,25 @@
+#
+# ADDAC drivers
+#
+comment "Analog digital bi-direction convertors"
+
+config ADT7316
+ tristate "Analog Devices ADT7316/7/8 ADT7516/7/9 temperature sensor, ADC and DAC driver"
+ help
+ Say yes here to build support for Analog Devices ADT7316, ADT7317, ADT7318
+ and ADT7516, ADT7517, ADT7519 temperature sensors, ADC and DAC.
+
+config ADT7316_SPI
+ tristate "support SPI bus connection"
+ depends on SPI && ADT7316
+ default y
+ help
+ Say yes here to build SPI bus support for Analog Devices ADT7316/7/8
+ and ADT7516/7/9.
+
+config ADT7316_I2C
+ tristate "support I2C bus connection"
+ depends on I2C && ADT7316
+ help
+ Say yes here to build I2C bus support for Analog Devices ADT7316/7/8
+ and ADT7516/7/9.
diff --git a/drivers/staging/iio/addac/Makefile b/drivers/staging/iio/addac/Makefile
new file mode 100644
index 000000000000..4c7686133692
--- /dev/null
+++ b/drivers/staging/iio/addac/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for industrial I/O ADDAC drivers
+#
+
+obj-$(CONFIG_ADT7316) += adt7316.o
+obj-$(CONFIG_ADT7316_SPI) += adt7316-spi.o
+obj-$(CONFIG_ADT7316_I2C) += adt7316-i2c.o
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
new file mode 100644
index 000000000000..52d1ea349635
--- /dev/null
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -0,0 +1,170 @@
+/*
+ * I2C bus driver for ADT7316/7/8 ADT7516/7/9 digital temperature
+ * sensor, ADC and DAC
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+
+#include "adt7316.h"
+
+/*
+ * adt7316 register access by I2C
+ */
+static int adt7316_i2c_read(void *client, u8 reg, u8 *data)
+{
+ struct i2c_client *cl = client;
+ int ret = 0;
+
+ ret = i2c_smbus_write_byte(cl, reg);
+ if (ret < 0) {
+ dev_err(&cl->dev, "I2C fail to select reg\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte(client);
+ if (ret < 0) {
+ dev_err(&cl->dev, "I2C read error\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adt7316_i2c_write(void *client, u8 reg, u8 data)
+{
+ struct i2c_client *cl = client;
+ int ret = 0;
+
+ ret = i2c_smbus_write_byte_data(cl, reg, data);
+ if (ret < 0)
+ dev_err(&cl->dev, "I2C write error\n");
+
+ return ret;
+}
+
+static int adt7316_i2c_multi_read(void *client, u8 reg, u8 count, u8 *data)
+{
+ struct i2c_client *cl = client;
+ int i, ret = 0;
+
+ if (count > ADT7316_REG_MAX_ADDR)
+ count = ADT7316_REG_MAX_ADDR;
+
+ for (i = 0; i < count; i++) {
+ ret = adt7316_i2c_read(cl, reg, &data[i]);
+ if (ret < 0) {
+ dev_err(&cl->dev, "I2C multi read error\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int adt7316_i2c_multi_write(void *client, u8 reg, u8 count, u8 *data)
+{
+ struct i2c_client *cl = client;
+ int i, ret = 0;
+
+ if (count > ADT7316_REG_MAX_ADDR)
+ count = ADT7316_REG_MAX_ADDR;
+
+ for (i = 0; i < count; i++) {
+ ret = adt7316_i2c_write(cl, reg, data[i]);
+ if (ret < 0) {
+ dev_err(&cl->dev, "I2C multi write error\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit adt7316_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adt7316_bus bus = {
+ .client = client,
+ .irq = client->irq,
+ .irq_flags = IRQF_TRIGGER_LOW,
+ .read = adt7316_i2c_read,
+ .write = adt7316_i2c_write,
+ .multi_read = adt7316_i2c_multi_read,
+ .multi_write = adt7316_i2c_multi_write,
+ };
+
+ return adt7316_probe(&client->dev, &bus, id->name);
+}
+
+static int __devexit adt7316_i2c_remove(struct i2c_client *client)
+{
+ return adt7316_remove(&client->dev);;
+}
+
+static const struct i2c_device_id adt7316_i2c_id[] = {
+ { "adt7316", 0 },
+ { "adt7317", 0 },
+ { "adt7318", 0 },
+ { "adt7516", 0 },
+ { "adt7517", 0 },
+ { "adt7519", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, adt7316_i2c_id);
+
+#ifdef CONFIG_PM
+static int adt7316_i2c_suspend(struct i2c_client *client, pm_message_t message)
+{
+ return adt7316_disable(&client->dev);
+}
+
+static int adt7316_i2c_resume(struct i2c_client *client)
+{
+ return adt7316_enable(&client->dev);
+}
+#else
+# define adt7316_i2c_suspend NULL
+# define adt7316_i2c_resume NULL
+#endif
+
+static struct i2c_driver adt7316_driver = {
+ .driver = {
+ .name = "adt7316",
+ .owner = THIS_MODULE,
+ },
+ .probe = adt7316_i2c_probe,
+ .remove = __devexit_p(adt7316_i2c_remove),
+ .suspend = adt7316_i2c_suspend,
+ .resume = adt7316_i2c_resume,
+ .id_table = adt7316_i2c_id,
+};
+
+static __init int adt7316_i2c_init(void)
+{
+ return i2c_add_driver(&adt7316_driver);
+}
+
+static __exit void adt7316_i2c_exit(void)
+{
+ i2c_del_driver(&adt7316_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("I2C bus driver for Analog Devices ADT7316/7/9 and"
+ "ADT7516/7/8 digital temperature sensor, ADC and DAC");
+MODULE_LICENSE("GPL v2");
+
+module_init(adt7316_i2c_init);
+module_exit(adt7316_i2c_exit);
diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c
new file mode 100644
index 000000000000..369d4d01ed97
--- /dev/null
+++ b/drivers/staging/iio/addac/adt7316-spi.c
@@ -0,0 +1,180 @@
+/*
+ * API bus driver for ADT7316/7/8 ADT7516/7/9 digital temperature
+ * sensor, ADC and DAC
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+
+#include "adt7316.h"
+
+#define ADT7316_SPI_MAX_FREQ_HZ 5000000
+#define ADT7316_SPI_CMD_READ 0x91
+#define ADT7316_SPI_CMD_WRITE 0x90
+
+/*
+ * adt7316 register access by SPI
+ */
+
+static int adt7316_spi_multi_read(void *client, u8 reg, u8 count, u8 *data)
+{
+ struct spi_device *spi_dev = client;
+ u8 cmd[2];
+ int ret = 0;
+
+ if (count > ADT7316_REG_MAX_ADDR)
+ count = ADT7316_REG_MAX_ADDR;
+
+ cmd[0] = ADT7316_SPI_CMD_WRITE;
+ cmd[1] = reg;
+
+ ret = spi_write(spi_dev, cmd, 2);
+ if (ret < 0) {
+ dev_err(&spi_dev->dev, "SPI fail to select reg\n");
+ return ret;
+ }
+
+ cmd[0] = ADT7316_SPI_CMD_READ;
+
+ ret = spi_write_then_read(spi_dev, cmd, 1, data, count);
+ if (ret < 0) {
+ dev_err(&spi_dev->dev, "SPI read data error\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adt7316_spi_multi_write(void *client, u8 reg, u8 count, u8 *data)
+{
+ struct spi_device *spi_dev = client;
+ u8 buf[ADT7316_REG_MAX_ADDR + 2];
+ int i, ret = 0;
+
+ if (count > ADT7316_REG_MAX_ADDR)
+ count = ADT7316_REG_MAX_ADDR;
+
+ buf[0] = ADT7316_SPI_CMD_WRITE;
+ buf[1] = reg;
+ for (i = 0; i < count; i++)
+ buf[i + 2] = data[i];
+
+ ret = spi_write(spi_dev, buf, count + 2);
+ if (ret < 0) {
+ dev_err(&spi_dev->dev, "SPI write error\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int adt7316_spi_read(void *client, u8 reg, u8 *data)
+{
+ return adt7316_spi_multi_read(client, reg, 1, data);
+}
+
+static int adt7316_spi_write(void *client, u8 reg, u8 val)
+{
+ return adt7316_spi_multi_write(client, reg, 1, &val);
+}
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit adt7316_spi_probe(struct spi_device *spi_dev)
+{
+ struct adt7316_bus bus = {
+ .client = spi_dev,
+ .irq = spi_dev->irq,
+ .irq_flags = IRQF_TRIGGER_LOW,
+ .read = adt7316_spi_read,
+ .write = adt7316_spi_write,
+ .multi_read = adt7316_spi_multi_read,
+ .multi_write = adt7316_spi_multi_write,
+ };
+
+ /* don't exceed max specified SPI CLK frequency */
+ if (spi_dev->max_speed_hz > ADT7316_SPI_MAX_FREQ_HZ) {
+ dev_err(&spi_dev->dev, "SPI CLK %d Hz?\n",
+ spi_dev->max_speed_hz);
+ return -EINVAL;
+ }
+
+ /* switch from default I2C protocol to SPI protocol */
+ adt7316_spi_write(spi_dev, 0, 0);
+ adt7316_spi_write(spi_dev, 0, 0);
+ adt7316_spi_write(spi_dev, 0, 0);
+
+ return adt7316_probe(&spi_dev->dev, &bus, spi_dev->modalias);
+}
+
+static int __devexit adt7316_spi_remove(struct spi_device *spi_dev)
+{
+ return adt7316_remove(&spi_dev->dev);
+}
+
+static const struct spi_device_id adt7316_spi_id[] = {
+ { "adt7316", 0 },
+ { "adt7317", 0 },
+ { "adt7318", 0 },
+ { "adt7516", 0 },
+ { "adt7517", 0 },
+ { "adt7519", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(spi, adt7316_spi_id);
+
+#ifdef CONFIG_PM
+static int adt7316_spi_suspend(struct spi_device *spi_dev, pm_message_t message)
+{
+ return adt7316_disable(&spi_dev->dev);
+}
+
+static int adt7316_spi_resume(struct spi_device *spi_dev)
+{
+ return adt7316_enable(&spi_dev->dev);
+}
+#else
+# define adt7316_spi_suspend NULL
+# define adt7316_spi_resume NULL
+#endif
+
+static struct spi_driver adt7316_driver = {
+ .driver = {
+ .name = "adt7316",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = adt7316_spi_probe,
+ .remove = __devexit_p(adt7316_spi_remove),
+ .suspend = adt7316_spi_suspend,
+ .resume = adt7316_spi_resume,
+ .id_table = adt7316_spi_id,
+};
+
+static __init int adt7316_spi_init(void)
+{
+ return spi_register_driver(&adt7316_driver);
+}
+
+static __exit void adt7316_spi_exit(void)
+{
+ spi_unregister_driver(&adt7316_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("SPI bus driver for Analog Devices ADT7316/7/8 and"
+ "ADT7516/7/9 digital temperature sensor, ADC and DAC");
+MODULE_LICENSE("GPL v2");
+
+module_init(adt7316_spi_init);
+module_exit(adt7316_spi_exit);
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
new file mode 100644
index 000000000000..d1b5b13629d9
--- /dev/null
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -0,0 +1,2402 @@
+/*
+ * ADT7316 digital temperature sensor driver supporting ADT7316/7/8 ADT7516/7/9
+ *
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "adt7316.h"
+
+/*
+ * ADT7316 registers definition
+ */
+#define ADT7316_INT_STAT1 0x0
+#define ADT7316_INT_STAT2 0x1
+#define ADT7316_LSB_IN_TEMP_VDD 0x3
+#define ADT7316_LSB_IN_TEMP_MASK 0x3
+#define ADT7316_LSB_VDD_MASK 0xC
+#define ADT7316_LSB_VDD_OFFSET 2
+#define ADT7316_LSB_EX_TEMP_AIN 0x4
+#define ADT7316_LSB_EX_TEMP_MASK 0x3
+#define ADT7516_LSB_AIN_SHIFT 2
+#define ADT7316_AD_MSB_DATA_BASE 0x6
+#define ADT7316_AD_MSB_DATA_REGS 3
+#define ADT7516_AD_MSB_DATA_REGS 6
+#define ADT7316_MSB_VDD 0x6
+#define ADT7316_MSB_IN_TEMP 0x7
+#define ADT7316_MSB_EX_TEMP 0x8
+#define ADT7516_MSB_AIN1 0x8
+#define ADT7516_MSB_AIN2 0x9
+#define ADT7516_MSB_AIN3 0xA
+#define ADT7516_MSB_AIN4 0xB
+#define ADT7316_DA_DATA_BASE 0x10
+#define ADT7316_DA_MSB_DATA_REGS 4
+#define ADT7316_LSB_DAC_A 0x10
+#define ADT7316_MSB_DAC_A 0x11
+#define ADT7316_LSB_DAC_B 0x12
+#define ADT7316_MSB_DAC_B 0x13
+#define ADT7316_LSB_DAC_C 0x14
+#define ADT7316_MSB_DAC_C 0x15
+#define ADT7316_LSB_DAC_D 0x16
+#define ADT7316_MSB_DAC_D 0x17
+#define ADT7316_CONFIG1 0x18
+#define ADT7316_CONFIG2 0x19
+#define ADT7316_CONFIG3 0x1A
+#define ADT7316_LDAC_CONFIG 0x1B
+#define ADT7316_DAC_CONFIG 0x1C
+#define ADT7316_INT_MASK1 0x1D
+#define ADT7316_INT_MASK2 0x1E
+#define ADT7316_IN_TEMP_OFFSET 0x1F
+#define ADT7316_EX_TEMP_OFFSET 0x20
+#define ADT7316_IN_ANALOG_TEMP_OFFSET 0x21
+#define ADT7316_EX_ANALOG_TEMP_OFFSET 0x22
+#define ADT7316_VDD_HIGH 0x23
+#define ADT7316_VDD_LOW 0x24
+#define ADT7316_IN_TEMP_HIGH 0x25
+#define ADT7316_IN_TEMP_LOW 0x26
+#define ADT7316_EX_TEMP_HIGH 0x27
+#define ADT7316_EX_TEMP_LOW 0x28
+#define ADT7516_AIN2_HIGH 0x2B
+#define ADT7516_AIN2_LOW 0x2C
+#define ADT7516_AIN3_HIGH 0x2D
+#define ADT7516_AIN3_LOW 0x2E
+#define ADT7516_AIN4_HIGH 0x2F
+#define ADT7516_AIN4_LOW 0x30
+#define ADT7316_DEVICE_ID 0x4D
+#define ADT7316_MANUFACTURE_ID 0x4E
+#define ADT7316_DEVICE_REV 0x4F
+#define ADT7316_SPI_LOCK_STAT 0x7F
+
+/*
+ * ADT7316 config1
+ */
+#define ADT7316_EN 0x1
+#define ADT7516_SEL_EX_TEMP 0x4
+#define ADT7516_SEL_AIN1_2_EX_TEMP_MASK 0x6
+#define ADT7516_SEL_AIN3 0x8
+#define ADT7316_INT_EN 0x20
+#define ADT7316_INT_POLARITY 0x40
+#define ADT7316_PD 0x80
+
+/*
+ * ADT7316 config2
+ */
+#define ADT7316_AD_SINGLE_CH_MASK 0x3
+#define ADT7516_AD_SINGLE_CH_MASK 0x7
+#define ADT7316_AD_SINGLE_CH_VDD 0
+#define ADT7316_AD_SINGLE_CH_IN 1
+#define ADT7316_AD_SINGLE_CH_EX 2
+#define ADT7516_AD_SINGLE_CH_AIN1 2
+#define ADT7516_AD_SINGLE_CH_AIN2 3
+#define ADT7516_AD_SINGLE_CH_AIN3 4
+#define ADT7516_AD_SINGLE_CH_AIN4 5
+#define ADT7316_AD_SINGLE_CH_MODE 0x10
+#define ADT7316_DISABLE_AVERAGING 0x20
+#define ADT7316_EN_SMBUS_TIMEOUT 0x40
+#define ADT7316_RESET 0x80
+
+/*
+ * ADT7316 config3
+ */
+#define ADT7316_ADCLK_22_5 0x1
+#define ADT7316_DA_HIGH_RESOLUTION 0x2
+#define ADT7316_DA_EN_VIA_DAC_LDCA 0x4
+#define ADT7516_AIN_IN_VREF 0x10
+#define ADT7316_EN_IN_TEMP_PROP_DACA 0x20
+#define ADT7316_EN_EX_TEMP_PROP_DACB 0x40
+
+/*
+ * ADT7316 DAC config
+ */
+#define ADT7316_DA_2VREF_CH_MASK 0xF
+#define ADT7316_DA_EN_MODE_MASK 0x30
+#define ADT7316_DA_EN_MODE_SINGLE 0x00
+#define ADT7316_DA_EN_MODE_AB_CD 0x10
+#define ADT7316_DA_EN_MODE_ABCD 0x20
+#define ADT7316_DA_EN_MODE_LDAC 0x30
+#define ADT7316_VREF_BYPASS_DAC_AB 0x40
+#define ADT7316_VREF_BYPASS_DAC_CD 0x80
+
+/*
+ * ADT7316 LDAC config
+ */
+#define ADT7316_LDAC_EN_DA_MASK 0xF
+#define ADT7316_DAC_IN_VREF 0x10
+#define ADT7516_DAC_AB_IN_VREF 0x10
+#define ADT7516_DAC_CD_IN_VREF 0x20
+#define ADT7516_DAC_IN_VREF_OFFSET 4
+#define ADT7516_DAC_IN_VREF_MASK 0x30
+
+/*
+ * ADT7316 INT_MASK2
+ */
+#define ADT7316_INT_MASK2_VDD 0x10
+
+/*
+ * ADT7316 value masks
+ */
+#define ADT7316_VALUE_MASK 0xfff
+#define ADT7316_T_VALUE_SIGN 0x400
+#define ADT7316_T_VALUE_FLOAT_OFFSET 2
+#define ADT7316_T_VALUE_FLOAT_MASK 0x2
+
+/*
+ * Chip ID
+ */
+#define ID_ADT7316 0x1
+#define ID_ADT7317 0x2
+#define ID_ADT7318 0x3
+#define ID_ADT7516 0x11
+#define ID_ADT7517 0x12
+#define ID_ADT7519 0x14
+
+#define ID_FAMILY_MASK 0xF0
+#define ID_ADT73XX 0x0
+#define ID_ADT75XX 0x10
+
+/*
+ * struct adt7316_chip_info - chip specifc information
+ */
+
+struct adt7316_chip_info {
+ const char *name;
+ struct iio_dev *indio_dev;
+ struct work_struct thresh_work;
+ s64 last_timestamp;
+ struct adt7316_bus bus;
+ u16 ldac_pin;
+ u16 int_mask; /* 0x2f */
+ u8 config1;
+ u8 config2;
+ u8 config3;
+ u8 dac_config; /* DAC config */
+ u8 ldac_config; /* LDAC config */
+ u8 dac_bits; /* 8, 10, 12 */
+ u8 id; /* chip id */
+};
+
+/*
+ * Logic interrupt mask for user application to enable
+ * interrupts.
+ */
+#define ADT7316_IN_TEMP_HIGH_INT_MASK 0x1
+#define ADT7316_IN_TEMP_LOW_INT_MASK 0x2
+#define ADT7316_EX_TEMP_HIGH_INT_MASK 0x4
+#define ADT7316_EX_TEMP_LOW_INT_MASK 0x8
+#define ADT7316_EX_TEMP_FAULT_INT_MASK 0x10
+#define ADT7516_AIN1_INT_MASK 0x4
+#define ADT7516_AIN2_INT_MASK 0x20
+#define ADT7516_AIN3_INT_MASK 0x40
+#define ADT7516_AIN4_INT_MASK 0x80
+#define ADT7316_VDD_INT_MASK 0x100
+#define ADT7316_TEMP_INT_MASK 0x1F
+#define ADT7516_AIN_INT_MASK 0xE0
+#define ADT7316_TEMP_AIN_INT_MASK \
+ (ADT7316_TEMP_INT_MASK | ADT7316_TEMP_INT_MASK)
+
+/*
+ * struct adt7316_chip_info - chip specifc information
+ */
+
+struct adt7316_limit_regs {
+ u16 data_high;
+ u16 data_low;
+};
+
+static ssize_t adt7316_show_enabled(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_EN));
+}
+
+static ssize_t _adt7316_store_enabled(struct adt7316_chip_info *chip,
+ int enable)
+{
+ u8 config1;
+ int ret;
+
+ if (enable)
+ config1 = chip->config1 | ADT7316_EN;
+ else
+ config1 = chip->config1 & ~ADT7316_EN;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, config1);
+ if (ret)
+ return -EIO;
+
+ chip->config1 = config1;
+
+ return ret;
+
+}
+
+static ssize_t adt7316_store_enabled(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ int enable;
+
+ if (!memcmp(buf, "1", 1))
+ enable = 1;
+ else
+ enable = 0;
+
+ if (_adt7316_store_enabled(chip, enable) < 0)
+ return -EIO;
+ else
+ return len;
+}
+
+static IIO_DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR,
+ adt7316_show_enabled,
+ adt7316_store_enabled,
+ 0);
+
+static ssize_t adt7316_show_select_ex_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX)
+ return -EPERM;
+
+ return sprintf(buf, "%d\n", !!(chip->config1 & ADT7516_SEL_EX_TEMP));
+}
+
+static ssize_t adt7316_store_select_ex_temp(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 config1;
+ int ret;
+
+ if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX)
+ return -EPERM;
+
+ config1 = chip->config1 & (~ADT7516_SEL_EX_TEMP);
+ if (!memcmp(buf, "1", 1))
+ config1 |= ADT7516_SEL_EX_TEMP;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, config1);
+ if (ret)
+ return -EIO;
+
+ chip->config1 = config1;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(select_ex_temp, S_IRUGO | S_IWUSR,
+ adt7316_show_select_ex_temp,
+ adt7316_store_select_ex_temp,
+ 0);
+
+static ssize_t adt7316_show_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ if (chip->config2 & ADT7316_AD_SINGLE_CH_MODE)
+ return sprintf(buf, "single_channel\n");
+ else
+ return sprintf(buf, "round_robin\n");
+}
+
+static ssize_t adt7316_store_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 config2;
+ int ret;
+
+ config2 = chip->config2 & (~ADT7316_AD_SINGLE_CH_MODE);
+ if (!memcmp(buf, "single_channel", 14))
+ config2 |= ADT7316_AD_SINGLE_CH_MODE;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2);
+ if (ret)
+ return -EIO;
+
+ chip->config2 = config2;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+ adt7316_show_mode,
+ adt7316_store_mode,
+ 0);
+
+static ssize_t adt7316_show_all_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "single_channel\nround_robin\n");
+}
+
+static IIO_DEVICE_ATTR(all_modes, S_IRUGO, adt7316_show_all_modes, NULL, 0);
+
+static ssize_t adt7316_show_ad_channel(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ if (!(chip->config2 & ADT7316_AD_SINGLE_CH_MODE))
+ return -EPERM;
+
+ switch (chip->config2 & ADT7516_AD_SINGLE_CH_MASK) {
+ case ADT7316_AD_SINGLE_CH_VDD:
+ return sprintf(buf, "0 - VDD\n");
+ case ADT7316_AD_SINGLE_CH_IN:
+ return sprintf(buf, "1 - Internal Temperature\n");
+ case ADT7316_AD_SINGLE_CH_EX:
+ if (((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) &&
+ (chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0)
+ return sprintf(buf, "2 - AIN1\n");
+ else
+ return sprintf(buf, "2 - External Temperature\n");
+ case ADT7516_AD_SINGLE_CH_AIN2:
+ if ((chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0)
+ return sprintf(buf, "3 - AIN2\n");
+ else
+ return sprintf(buf, "N/A\n");
+ case ADT7516_AD_SINGLE_CH_AIN3:
+ if (chip->config1 & ADT7516_SEL_AIN3)
+ return sprintf(buf, "4 - AIN3\n");
+ else
+ return sprintf(buf, "N/A\n");
+ case ADT7516_AD_SINGLE_CH_AIN4:
+ return sprintf(buf, "5 - AIN4\n");
+ default:
+ return sprintf(buf, "N/A\n");
+ };
+}
+
+static ssize_t adt7316_store_ad_channel(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 config2;
+ unsigned long data = 0;
+ int ret;
+
+ if (!(chip->config2 & ADT7316_AD_SINGLE_CH_MODE))
+ return -EPERM;
+
+ ret = strict_strtoul(buf, 10, &data);
+ if (ret)
+ return -EINVAL;
+
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) {
+ if (data > 5)
+ return -EINVAL;
+
+ config2 = chip->config2 & (~ADT7516_AD_SINGLE_CH_MASK);
+ } else {
+ if (data > 2)
+ return -EINVAL;
+
+ config2 = chip->config2 & (~ADT7316_AD_SINGLE_CH_MASK);
+ }
+
+
+ config2 |= data;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2);
+ if (ret)
+ return -EIO;
+
+ chip->config2 = config2;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(ad_channel, S_IRUGO | S_IWUSR,
+ adt7316_show_ad_channel,
+ adt7316_store_ad_channel,
+ 0);
+
+static ssize_t adt7316_show_all_ad_channels(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ if (!(chip->config2 & ADT7316_AD_SINGLE_CH_MODE))
+ return -EPERM;
+
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+ return sprintf(buf, "0 - VDD\n1 - Internal Temperature\n"
+ "2 - External Temperature or AIN2\n"
+ "3 - AIN2\n4 - AIN3\n5 - AIN4\n");
+ else
+ return sprintf(buf, "0 - VDD\n1 - Internal Temperature\n"
+ "2 - External Temperature\n");
+}
+
+static IIO_DEVICE_ATTR(all_ad_channels, S_IRUGO,
+ adt7316_show_all_ad_channels, NULL, 0);
+
+static ssize_t adt7316_show_disable_averaging(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n",
+ !!(chip->config2 & ADT7316_DISABLE_AVERAGING));
+}
+
+static ssize_t adt7316_store_disable_averaging(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 config2;
+ int ret;
+
+ config2 = chip->config2 & (~ADT7316_DISABLE_AVERAGING);
+ if (!memcmp(buf, "1", 1))
+ config2 |= ADT7316_DISABLE_AVERAGING;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2);
+ if (ret)
+ return -EIO;
+
+ chip->config2 = config2;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(disable_averaging, S_IRUGO | S_IWUSR,
+ adt7316_show_disable_averaging,
+ adt7316_store_disable_averaging,
+ 0);
+
+static ssize_t adt7316_show_enable_smbus_timeout(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n",
+ !!(chip->config2 & ADT7316_EN_SMBUS_TIMEOUT));
+}
+
+static ssize_t adt7316_store_enable_smbus_timeout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 config2;
+ int ret;
+
+ config2 = chip->config2 & (~ADT7316_EN_SMBUS_TIMEOUT);
+ if (!memcmp(buf, "1", 1))
+ config2 |= ADT7316_EN_SMBUS_TIMEOUT;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2);
+ if (ret)
+ return -EIO;
+
+ chip->config2 = config2;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(enable_smbus_timeout, S_IRUGO | S_IWUSR,
+ adt7316_show_enable_smbus_timeout,
+ adt7316_store_enable_smbus_timeout,
+ 0);
+
+
+static ssize_t adt7316_store_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 config2;
+ int ret;
+
+ config2 = chip->config2 | ADT7316_RESET;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2);
+ if (ret)
+ return -EIO;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR,
+ NULL,
+ adt7316_store_reset,
+ 0);
+
+static ssize_t adt7316_show_powerdown(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_PD));
+}
+
+static ssize_t adt7316_store_powerdown(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 config1;
+ int ret;
+
+ config1 = chip->config1 & (~ADT7316_PD);
+ if (!memcmp(buf, "1", 1))
+ config1 |= ADT7316_PD;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, config1);
+ if (ret)
+ return -EIO;
+
+ chip->config1 = config1;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(powerdown, S_IRUGO | S_IWUSR,
+ adt7316_show_powerdown,
+ adt7316_store_powerdown,
+ 0);
+
+static ssize_t adt7316_show_fast_ad_clock(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", !!(chip->config3 & ADT7316_ADCLK_22_5));
+}
+
+static ssize_t adt7316_store_fast_ad_clock(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 config3;
+ int ret;
+
+ config3 = chip->config3 & (~ADT7316_ADCLK_22_5);
+ if (!memcmp(buf, "1", 1))
+ config3 |= ADT7316_ADCLK_22_5;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3);
+ if (ret)
+ return -EIO;
+
+ chip->config3 = config3;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(fast_ad_clock, S_IRUGO | S_IWUSR,
+ adt7316_show_fast_ad_clock,
+ adt7316_store_fast_ad_clock,
+ 0);
+
+static ssize_t adt7316_show_da_high_resolution(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ if (chip->config3 & ADT7316_DA_HIGH_RESOLUTION) {
+ if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
+ return sprintf(buf, "1 (12 bits)\n");
+ else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
+ return sprintf(buf, "1 (10 bits)\n");
+ }
+
+ return sprintf(buf, "0 (8 bits)\n");
+}
+
+static ssize_t adt7316_store_da_high_resolution(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 config3;
+ int ret;
+
+ chip->dac_bits = 8;
+
+ if (!memcmp(buf, "1", 1)) {
+ config3 = chip->config3 | ADT7316_DA_HIGH_RESOLUTION;
+ if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
+ chip->dac_bits = 12;
+ else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
+ chip->dac_bits = 10;
+ } else
+ config3 = chip->config3 & (~ADT7316_DA_HIGH_RESOLUTION);
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3);
+ if (ret)
+ return -EIO;
+
+ chip->config3 = config3;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(da_high_resolution, S_IRUGO | S_IWUSR,
+ adt7316_show_da_high_resolution,
+ adt7316_store_da_high_resolution,
+ 0);
+
+static ssize_t adt7316_show_AIN_internal_Vref(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX)
+ return -EPERM;
+
+ return sprintf(buf, "%d\n",
+ !!(chip->config3 & ADT7516_AIN_IN_VREF));
+}
+
+static ssize_t adt7316_store_AIN_internal_Vref(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 config3;
+ int ret;
+
+ if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX)
+ return -EPERM;
+
+ if (memcmp(buf, "1", 1))
+ config3 = chip->config3 & (~ADT7516_AIN_IN_VREF);
+ else
+ config3 = chip->config3 | ADT7516_AIN_IN_VREF;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3);
+ if (ret)
+ return -EIO;
+
+ chip->config3 = config3;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(AIN_internal_Vref, S_IRUGO | S_IWUSR,
+ adt7316_show_AIN_internal_Vref,
+ adt7316_store_AIN_internal_Vref,
+ 0);
+
+
+static ssize_t adt7316_show_enable_prop_DACA(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n",
+ !!(chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA));
+}
+
+static ssize_t adt7316_store_enable_prop_DACA(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 config3;
+ int ret;
+
+ config3 = chip->config3 & (~ADT7316_EN_IN_TEMP_PROP_DACA);
+ if (!memcmp(buf, "1", 1))
+ config3 |= ADT7316_EN_IN_TEMP_PROP_DACA;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3);
+ if (ret)
+ return -EIO;
+
+ chip->config3 = config3;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(enable_proportion_DACA, S_IRUGO | S_IWUSR,
+ adt7316_show_enable_prop_DACA,
+ adt7316_store_enable_prop_DACA,
+ 0);
+
+static ssize_t adt7316_show_enable_prop_DACB(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n",
+ !!(chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB));
+}
+
+static ssize_t adt7316_store_enable_prop_DACB(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 config3;
+ int ret;
+
+ config3 = chip->config3 & (~ADT7316_EN_EX_TEMP_PROP_DACB);
+ if (!memcmp(buf, "1", 1))
+ config3 |= ADT7316_EN_EX_TEMP_PROP_DACB;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3);
+ if (ret)
+ return -EIO;
+
+ chip->config3 = config3;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(enable_proportion_DACB, S_IRUGO | S_IWUSR,
+ adt7316_show_enable_prop_DACB,
+ adt7316_store_enable_prop_DACB,
+ 0);
+
+static ssize_t adt7316_show_DAC_2Vref_ch_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "0x%x\n",
+ chip->dac_config & ADT7316_DA_2VREF_CH_MASK);
+}
+
+static ssize_t adt7316_store_DAC_2Vref_ch_mask(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 dac_config;
+ unsigned long data = 0;
+ int ret;
+
+ ret = strict_strtoul(buf, 16, &data);
+ if (ret || data > ADT7316_DA_2VREF_CH_MASK)
+ return -EINVAL;
+
+ dac_config = chip->dac_config & (~ADT7316_DA_2VREF_CH_MASK);
+ dac_config |= data;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config);
+ if (ret)
+ return -EIO;
+
+ chip->dac_config = dac_config;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(DAC_2Vref_channels_mask, S_IRUGO | S_IWUSR,
+ adt7316_show_DAC_2Vref_ch_mask,
+ adt7316_store_DAC_2Vref_ch_mask,
+ 0);
+
+static ssize_t adt7316_show_DAC_update_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA))
+ return sprintf(buf, "manual\n");
+ else {
+ switch (chip->dac_config & ADT7316_DA_EN_MODE_MASK) {
+ case ADT7316_DA_EN_MODE_SINGLE:
+ return sprintf(buf, "0 - auto at any MSB DAC writing\n");
+ case ADT7316_DA_EN_MODE_AB_CD:
+ return sprintf(buf, "1 - auto at MSB DAC AB and CD writing\n");
+ case ADT7316_DA_EN_MODE_ABCD:
+ return sprintf(buf, "2 - auto at MSB DAC ABCD writing\n");
+ default: /* ADT7316_DA_EN_MODE_LDAC */
+ return sprintf(buf, "3 - manual\n");
+ };
+ }
+}
+
+static ssize_t adt7316_store_DAC_update_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 dac_config;
+ unsigned long data;
+ int ret;
+
+ if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA))
+ return -EPERM;
+
+ ret = strict_strtoul(buf, 10, &data);
+ if (ret || data > ADT7316_DA_EN_MODE_MASK)
+ return -EINVAL;
+
+ dac_config = chip->dac_config & (~ADT7316_DA_EN_MODE_MASK);
+ dac_config |= data;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config);
+ if (ret)
+ return -EIO;
+
+ chip->dac_config = dac_config;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(DAC_update_mode, S_IRUGO | S_IWUSR,
+ adt7316_show_DAC_update_mode,
+ adt7316_store_DAC_update_mode,
+ 0);
+
+static ssize_t adt7316_show_all_DAC_update_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA)
+ return sprintf(buf, "0 - auto at any MSB DAC writing\n"
+ "1 - auto at MSB DAC AB and CD writing\n"
+ "2 - auto at MSB DAC ABCD writing\n"
+ "3 - manual\n");
+ else
+ return sprintf(buf, "manual\n");
+}
+
+static IIO_DEVICE_ATTR(all_DAC_update_modes, S_IRUGO,
+ adt7316_show_all_DAC_update_modes, NULL, 0);
+
+
+static ssize_t adt7316_store_update_DAC(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 ldac_config;
+ unsigned long data;
+ int ret;
+
+ if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA) {
+ if ((chip->dac_config & ADT7316_DA_EN_MODE_MASK) !=
+ ADT7316_DA_EN_MODE_LDAC)
+ return -EPERM;
+
+ ret = strict_strtoul(buf, 16, &data);
+ if (ret || data > ADT7316_LDAC_EN_DA_MASK)
+ return -EINVAL;
+
+ ldac_config = chip->ldac_config & (~ADT7316_LDAC_EN_DA_MASK);
+ ldac_config |= data;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_LDAC_CONFIG,
+ ldac_config);
+ if (ret)
+ return -EIO;
+ } else {
+ gpio_set_value(chip->ldac_pin, 0);
+ gpio_set_value(chip->ldac_pin, 1);
+ }
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(update_DAC, S_IRUGO | S_IWUSR,
+ NULL,
+ adt7316_store_update_DAC,
+ 0);
+
+static ssize_t adt7316_show_DA_AB_Vref_bypass(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+ return -EPERM;
+
+ return sprintf(buf, "%d\n",
+ !!(chip->dac_config & ADT7316_VREF_BYPASS_DAC_AB));
+}
+
+static ssize_t adt7316_store_DA_AB_Vref_bypass(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 dac_config;
+ int ret;
+
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+ return -EPERM;
+
+ dac_config = chip->dac_config & (~ADT7316_VREF_BYPASS_DAC_AB);
+ if (!memcmp(buf, "1", 1))
+ dac_config |= ADT7316_VREF_BYPASS_DAC_AB;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config);
+ if (ret)
+ return -EIO;
+
+ chip->dac_config = dac_config;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(DA_AB_Vref_bypass, S_IRUGO | S_IWUSR,
+ adt7316_show_DA_AB_Vref_bypass,
+ adt7316_store_DA_AB_Vref_bypass,
+ 0);
+
+static ssize_t adt7316_show_DA_CD_Vref_bypass(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+ return -EPERM;
+
+ return sprintf(buf, "%d\n",
+ !!(chip->dac_config & ADT7316_VREF_BYPASS_DAC_CD));
+}
+
+static ssize_t adt7316_store_DA_CD_Vref_bypass(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 dac_config;
+ int ret;
+
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+ return -EPERM;
+
+ dac_config = chip->dac_config & (~ADT7316_VREF_BYPASS_DAC_CD);
+ if (!memcmp(buf, "1", 1))
+ dac_config |= ADT7316_VREF_BYPASS_DAC_CD;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config);
+ if (ret)
+ return -EIO;
+
+ chip->dac_config = dac_config;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(DA_CD_Vref_bypass, S_IRUGO | S_IWUSR,
+ adt7316_show_DA_CD_Vref_bypass,
+ adt7316_store_DA_CD_Vref_bypass,
+ 0);
+
+static ssize_t adt7316_show_DAC_internal_Vref(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+ return sprintf(buf, "0x%x\n",
+ (chip->dac_config & ADT7516_DAC_IN_VREF_MASK) >>
+ ADT7516_DAC_IN_VREF_OFFSET);
+ else
+ return sprintf(buf, "%d\n",
+ !!(chip->dac_config & ADT7316_DAC_IN_VREF));
+}
+
+static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 ldac_config;
+ unsigned long data;
+ int ret;
+
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) {
+ ret = strict_strtoul(buf, 16, &data);
+ if (ret || data > 3)
+ return -EINVAL;
+
+ ldac_config = chip->ldac_config & (~ADT7516_DAC_IN_VREF_MASK);
+ if (data & 0x1)
+ ldac_config |= ADT7516_DAC_AB_IN_VREF;
+ else if (data & 0x2)
+ ldac_config |= ADT7516_DAC_CD_IN_VREF;
+ } else {
+ ret = strict_strtoul(buf, 16, &data);
+ if (ret)
+ return -EINVAL;
+
+ ldac_config = chip->ldac_config & (~ADT7316_DAC_IN_VREF);
+ if (data)
+ ldac_config = chip->ldac_config | ADT7316_DAC_IN_VREF;
+ }
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_LDAC_CONFIG, ldac_config);
+ if (ret)
+ return -EIO;
+
+ chip->ldac_config = ldac_config;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(DAC_internal_Vref, S_IRUGO | S_IWUSR,
+ adt7316_show_DAC_internal_Vref,
+ adt7316_store_DAC_internal_Vref,
+ 0);
+
+static ssize_t adt7316_show_ad(struct adt7316_chip_info *chip,
+ int channel, char *buf)
+{
+ u16 data;
+ u8 msb, lsb;
+ char sign = ' ';
+ int ret;
+
+ if ((chip->config2 & ADT7316_AD_SINGLE_CH_MODE) &&
+ channel != (chip->config2 & ADT7516_AD_SINGLE_CH_MASK))
+ return -EPERM;
+
+ switch (channel) {
+ case ADT7316_AD_SINGLE_CH_IN:
+ ret = chip->bus.read(chip->bus.client,
+ ADT7316_LSB_IN_TEMP_VDD, &lsb);
+ if (ret)
+ return -EIO;
+
+ ret = chip->bus.read(chip->bus.client,
+ ADT7316_AD_MSB_DATA_BASE + channel, &msb);
+ if (ret)
+ return -EIO;
+
+ data = msb << ADT7316_T_VALUE_FLOAT_OFFSET;
+ data |= lsb & ADT7316_LSB_IN_TEMP_MASK;
+ break;
+ case ADT7316_AD_SINGLE_CH_VDD:
+ ret = chip->bus.read(chip->bus.client,
+ ADT7316_LSB_IN_TEMP_VDD, &lsb);
+ if (ret)
+ return -EIO;
+
+ ret = chip->bus.read(chip->bus.client,
+
+ ADT7316_AD_MSB_DATA_BASE + channel, &msb);
+ if (ret)
+ return -EIO;
+
+ data = msb << ADT7316_T_VALUE_FLOAT_OFFSET;
+ data |= (lsb & ADT7316_LSB_VDD_MASK) >> ADT7316_LSB_VDD_OFFSET;
+ return sprintf(buf, "%d\n", data);
+ default: /* ex_temp and ain */
+ ret = chip->bus.read(chip->bus.client,
+ ADT7316_LSB_EX_TEMP_AIN, &lsb);
+ if (ret)
+ return -EIO;
+
+ ret = chip->bus.read(chip->bus.client,
+ ADT7316_AD_MSB_DATA_BASE + channel, &msb);
+ if (ret)
+ return -EIO;
+
+ data = msb << ADT7316_T_VALUE_FLOAT_OFFSET;
+ data |= lsb & (ADT7316_LSB_EX_TEMP_MASK <<
+ (ADT7516_LSB_AIN_SHIFT * (channel -
+ (ADT7316_MSB_EX_TEMP - ADT7316_AD_MSB_DATA_BASE))));
+
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+ return sprintf(buf, "%d\n", data);
+ else
+ break;
+ };
+
+ if (data & ADT7316_T_VALUE_SIGN) {
+ /* convert supplement to positive value */
+ data = (ADT7316_T_VALUE_SIGN << 1) - data;
+ sign = '-';
+ }
+
+ return sprintf(buf, "%c%d.%.2d\n", sign,
+ (data >> ADT7316_T_VALUE_FLOAT_OFFSET),
+ (data & ADT7316_T_VALUE_FLOAT_MASK) * 25);
+}
+
+static ssize_t adt7316_show_VDD(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_VDD, buf);
+}
+static IIO_DEVICE_ATTR(VDD, S_IRUGO, adt7316_show_VDD, NULL, 0);
+
+static ssize_t adt7316_show_in_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_IN, buf);
+}
+
+static IIO_DEVICE_ATTR(in_temp, S_IRUGO, adt7316_show_in_temp, NULL, 0);
+
+static ssize_t adt7316_show_ex_temp_AIN1(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_EX, buf);
+}
+
+static IIO_DEVICE_ATTR(ex_temp_AIN1, S_IRUGO, adt7316_show_ex_temp_AIN1, NULL, 0);
+static IIO_DEVICE_ATTR(ex_temp, S_IRUGO, adt7316_show_ex_temp_AIN1, NULL, 0);
+
+static ssize_t adt7316_show_AIN2(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN2, buf);
+}
+static IIO_DEVICE_ATTR(AIN2, S_IRUGO, adt7316_show_AIN2, NULL, 0);
+
+static ssize_t adt7316_show_AIN3(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN3, buf);
+}
+static IIO_DEVICE_ATTR(AIN3, S_IRUGO, adt7316_show_AIN3, NULL, 0);
+
+static ssize_t adt7316_show_AIN4(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN4, buf);
+}
+static IIO_DEVICE_ATTR(AIN4, S_IRUGO, adt7316_show_AIN4, NULL, 0);
+
+static ssize_t adt7316_show_temp_offset(struct adt7316_chip_info *chip,
+ int offset_addr, char *buf)
+{
+ int data;
+ u8 val;
+ int ret;
+
+ ret = chip->bus.read(chip->bus.client, offset_addr, &val);
+ if (ret)
+ return -EIO;
+
+ data = (int)val;
+ if (val & 0x80)
+ data -= 256;
+
+ return sprintf(buf, "%d\n", data);
+}
+
+static ssize_t adt7316_store_temp_offset(struct adt7316_chip_info *chip,
+ int offset_addr, const char *buf, size_t len)
+{
+ long data;
+ u8 val;
+ int ret;
+
+ ret = strict_strtol(buf, 10, &data);
+ if (ret || data > 127 || data < -128)
+ return -EINVAL;
+
+ if (data < 0)
+ data += 256;
+
+ val = (u8)data;
+
+ ret = chip->bus.write(chip->bus.client, offset_addr, val);
+ if (ret)
+ return -EIO;
+
+ return len;
+}
+
+static ssize_t adt7316_show_in_temp_offset(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_show_temp_offset(chip, ADT7316_IN_TEMP_OFFSET, buf);
+}
+
+static ssize_t adt7316_store_in_temp_offset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_store_temp_offset(chip, ADT7316_IN_TEMP_OFFSET, buf, len);
+}
+
+static IIO_DEVICE_ATTR(in_temp_offset, S_IRUGO | S_IWUSR,
+ adt7316_show_in_temp_offset,
+ adt7316_store_in_temp_offset, 0);
+
+static ssize_t adt7316_show_ex_temp_offset(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_show_temp_offset(chip, ADT7316_EX_TEMP_OFFSET, buf);
+}
+
+static ssize_t adt7316_store_ex_temp_offset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_store_temp_offset(chip, ADT7316_EX_TEMP_OFFSET, buf, len);
+}
+
+static IIO_DEVICE_ATTR(ex_temp_offset, S_IRUGO | S_IWUSR,
+ adt7316_show_ex_temp_offset,
+ adt7316_store_ex_temp_offset, 0);
+
+static ssize_t adt7316_show_in_analog_temp_offset(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_show_temp_offset(chip,
+ ADT7316_IN_ANALOG_TEMP_OFFSET, buf);
+}
+
+static ssize_t adt7316_store_in_analog_temp_offset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_store_temp_offset(chip,
+ ADT7316_IN_ANALOG_TEMP_OFFSET, buf, len);
+}
+
+static IIO_DEVICE_ATTR(in_analog_temp_offset, S_IRUGO | S_IWUSR,
+ adt7316_show_in_analog_temp_offset,
+ adt7316_store_in_analog_temp_offset, 0);
+
+static ssize_t adt7316_show_ex_analog_temp_offset(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_show_temp_offset(chip,
+ ADT7316_EX_ANALOG_TEMP_OFFSET, buf);
+}
+
+static ssize_t adt7316_store_ex_analog_temp_offset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_store_temp_offset(chip,
+ ADT7316_EX_ANALOG_TEMP_OFFSET, buf, len);
+}
+
+static IIO_DEVICE_ATTR(ex_analog_temp_offset, S_IRUGO | S_IWUSR,
+ adt7316_show_ex_analog_temp_offset,
+ adt7316_store_ex_analog_temp_offset, 0);
+
+static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
+ int channel, char *buf)
+{
+ u16 data;
+ u8 msb, lsb, offset;
+ int ret;
+
+ if (channel >= ADT7316_DA_MSB_DATA_REGS ||
+ (channel == 0 &&
+ (chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA)) ||
+ (channel == 1 &&
+ (chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB)))
+ return -EPERM;
+
+ offset = chip->dac_bits - 8;
+
+ if (chip->dac_bits > 8) {
+ ret = chip->bus.read(chip->bus.client,
+ ADT7316_DA_DATA_BASE + channel * 2, &lsb);
+ if (ret)
+ return -EIO;
+ }
+
+ ret = chip->bus.read(chip->bus.client,
+ ADT7316_DA_DATA_BASE + 1 + channel * 2, &msb);
+ if (ret)
+ return -EIO;
+
+ data = (msb << offset) + (lsb & ((1 << offset) - 1));
+
+ return sprintf(buf, "%d\n", data);
+}
+
+static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
+ int channel, const char *buf, size_t len)
+{
+ u8 msb, lsb, offset;
+ unsigned long data;
+ int ret;
+
+ if (channel >= ADT7316_DA_MSB_DATA_REGS ||
+ (channel == 0 &&
+ (chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA)) ||
+ (channel == 1 &&
+ (chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB)))
+ return -EPERM;
+
+ offset = chip->dac_bits - 8;
+
+ ret = strict_strtoul(buf, 10, &data);
+ if (ret || data >= (1 << chip->dac_bits))
+ return -EINVAL;
+
+ if (chip->dac_bits > 8) {
+ lsb = data & (1 << offset);
+ ret = chip->bus.write(chip->bus.client,
+ ADT7316_DA_DATA_BASE + channel * 2, lsb);
+ if (ret)
+ return -EIO;
+ }
+
+ msb = data >> offset;
+ ret = chip->bus.write(chip->bus.client,
+ ADT7316_DA_DATA_BASE + 1 + channel * 2, msb);
+ if (ret)
+ return -EIO;
+
+ return len;
+}
+
+static ssize_t adt7316_show_DAC_A(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_show_DAC(chip, 0, buf);
+}
+
+static ssize_t adt7316_store_DAC_A(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_store_DAC(chip, 0, buf, len);
+}
+
+static IIO_DEVICE_ATTR(DAC_A, S_IRUGO | S_IWUSR, adt7316_show_DAC_A,
+ adt7316_store_DAC_A, 0);
+
+static ssize_t adt7316_show_DAC_B(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_show_DAC(chip, 1, buf);
+}
+
+static ssize_t adt7316_store_DAC_B(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_store_DAC(chip, 1, buf, len);
+}
+
+static IIO_DEVICE_ATTR(DAC_B, S_IRUGO | S_IWUSR, adt7316_show_DAC_B,
+ adt7316_store_DAC_B, 0);
+
+static ssize_t adt7316_show_DAC_C(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_show_DAC(chip, 2, buf);
+}
+
+static ssize_t adt7316_store_DAC_C(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_store_DAC(chip, 2, buf, len);
+}
+
+static IIO_DEVICE_ATTR(DAC_C, S_IRUGO | S_IWUSR, adt7316_show_DAC_C,
+ adt7316_store_DAC_C, 0);
+
+static ssize_t adt7316_show_DAC_D(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_show_DAC(chip, 3, buf);
+}
+
+static ssize_t adt7316_store_DAC_D(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return adt7316_store_DAC(chip, 3, buf, len);
+}
+
+static IIO_DEVICE_ATTR(DAC_D, S_IRUGO | S_IWUSR, adt7316_show_DAC_D,
+ adt7316_store_DAC_D, 0);
+
+static ssize_t adt7316_show_device_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 id;
+ int ret;
+
+ ret = chip->bus.read(chip->bus.client, ADT7316_DEVICE_ID, &id);
+ if (ret)
+ return -EIO;
+
+ return sprintf(buf, "%d\n", id);
+}
+
+static IIO_DEVICE_ATTR(device_id, S_IRUGO, adt7316_show_device_id, NULL, 0);
+
+static ssize_t adt7316_show_manufactorer_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 id;
+ int ret;
+
+ ret = chip->bus.read(chip->bus.client, ADT7316_MANUFACTURE_ID, &id);
+ if (ret)
+ return -EIO;
+
+ return sprintf(buf, "%d\n", id);
+}
+
+static IIO_DEVICE_ATTR(manufactorer_id, S_IRUGO,
+ adt7316_show_manufactorer_id, NULL, 0);
+
+static ssize_t adt7316_show_device_rev(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 rev;
+ int ret;
+
+ ret = chip->bus.read(chip->bus.client, ADT7316_DEVICE_REV, &rev);
+ if (ret)
+ return -EIO;
+
+ return sprintf(buf, "%d\n", rev);
+}
+
+static IIO_DEVICE_ATTR(device_rev, S_IRUGO, adt7316_show_device_rev, NULL, 0);
+
+static ssize_t adt7316_show_bus_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 stat;
+ int ret;
+
+ ret = chip->bus.read(chip->bus.client, ADT7316_SPI_LOCK_STAT, &stat);
+ if (ret)
+ return -EIO;
+
+ if (stat)
+ return sprintf(buf, "spi\n");
+ else
+ return sprintf(buf, "i2c\n");
+}
+
+static IIO_DEVICE_ATTR(bus_type, S_IRUGO, adt7316_show_bus_type, NULL, 0);
+
+static ssize_t adt7316_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, adt7316_show_name, NULL, 0);
+
+static struct attribute *adt7316_attributes[] = {
+ &iio_dev_attr_all_modes.dev_attr.attr,
+ &iio_dev_attr_mode.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_dev_attr_enabled.dev_attr.attr,
+ &iio_dev_attr_ad_channel.dev_attr.attr,
+ &iio_dev_attr_all_ad_channels.dev_attr.attr,
+ &iio_dev_attr_disable_averaging.dev_attr.attr,
+ &iio_dev_attr_enable_smbus_timeout.dev_attr.attr,
+ &iio_dev_attr_powerdown.dev_attr.attr,
+ &iio_dev_attr_fast_ad_clock.dev_attr.attr,
+ &iio_dev_attr_da_high_resolution.dev_attr.attr,
+ &iio_dev_attr_enable_proportion_DACA.dev_attr.attr,
+ &iio_dev_attr_enable_proportion_DACB.dev_attr.attr,
+ &iio_dev_attr_DAC_2Vref_channels_mask.dev_attr.attr,
+ &iio_dev_attr_DAC_update_mode.dev_attr.attr,
+ &iio_dev_attr_all_DAC_update_modes.dev_attr.attr,
+ &iio_dev_attr_update_DAC.dev_attr.attr,
+ &iio_dev_attr_DA_AB_Vref_bypass.dev_attr.attr,
+ &iio_dev_attr_DA_CD_Vref_bypass.dev_attr.attr,
+ &iio_dev_attr_DAC_internal_Vref.dev_attr.attr,
+ &iio_dev_attr_VDD.dev_attr.attr,
+ &iio_dev_attr_in_temp.dev_attr.attr,
+ &iio_dev_attr_ex_temp.dev_attr.attr,
+ &iio_dev_attr_in_temp_offset.dev_attr.attr,
+ &iio_dev_attr_ex_temp_offset.dev_attr.attr,
+ &iio_dev_attr_in_analog_temp_offset.dev_attr.attr,
+ &iio_dev_attr_ex_analog_temp_offset.dev_attr.attr,
+ &iio_dev_attr_DAC_A.dev_attr.attr,
+ &iio_dev_attr_DAC_B.dev_attr.attr,
+ &iio_dev_attr_DAC_C.dev_attr.attr,
+ &iio_dev_attr_DAC_D.dev_attr.attr,
+ &iio_dev_attr_device_id.dev_attr.attr,
+ &iio_dev_attr_manufactorer_id.dev_attr.attr,
+ &iio_dev_attr_device_rev.dev_attr.attr,
+ &iio_dev_attr_bus_type.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group adt7316_attribute_group = {
+ .attrs = adt7316_attributes,
+};
+
+static struct attribute *adt7516_attributes[] = {
+ &iio_dev_attr_all_modes.dev_attr.attr,
+ &iio_dev_attr_mode.dev_attr.attr,
+ &iio_dev_attr_select_ex_temp.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_dev_attr_enabled.dev_attr.attr,
+ &iio_dev_attr_ad_channel.dev_attr.attr,
+ &iio_dev_attr_all_ad_channels.dev_attr.attr,
+ &iio_dev_attr_disable_averaging.dev_attr.attr,
+ &iio_dev_attr_enable_smbus_timeout.dev_attr.attr,
+ &iio_dev_attr_powerdown.dev_attr.attr,
+ &iio_dev_attr_fast_ad_clock.dev_attr.attr,
+ &iio_dev_attr_AIN_internal_Vref.dev_attr.attr,
+ &iio_dev_attr_da_high_resolution.dev_attr.attr,
+ &iio_dev_attr_enable_proportion_DACA.dev_attr.attr,
+ &iio_dev_attr_enable_proportion_DACB.dev_attr.attr,
+ &iio_dev_attr_DAC_2Vref_channels_mask.dev_attr.attr,
+ &iio_dev_attr_DAC_update_mode.dev_attr.attr,
+ &iio_dev_attr_all_DAC_update_modes.dev_attr.attr,
+ &iio_dev_attr_update_DAC.dev_attr.attr,
+ &iio_dev_attr_DA_AB_Vref_bypass.dev_attr.attr,
+ &iio_dev_attr_DA_CD_Vref_bypass.dev_attr.attr,
+ &iio_dev_attr_DAC_internal_Vref.dev_attr.attr,
+ &iio_dev_attr_VDD.dev_attr.attr,
+ &iio_dev_attr_in_temp.dev_attr.attr,
+ &iio_dev_attr_ex_temp_AIN1.dev_attr.attr,
+ &iio_dev_attr_AIN2.dev_attr.attr,
+ &iio_dev_attr_AIN3.dev_attr.attr,
+ &iio_dev_attr_AIN4.dev_attr.attr,
+ &iio_dev_attr_in_temp_offset.dev_attr.attr,
+ &iio_dev_attr_ex_temp_offset.dev_attr.attr,
+ &iio_dev_attr_in_analog_temp_offset.dev_attr.attr,
+ &iio_dev_attr_ex_analog_temp_offset.dev_attr.attr,
+ &iio_dev_attr_DAC_A.dev_attr.attr,
+ &iio_dev_attr_DAC_B.dev_attr.attr,
+ &iio_dev_attr_DAC_C.dev_attr.attr,
+ &iio_dev_attr_DAC_D.dev_attr.attr,
+ &iio_dev_attr_device_id.dev_attr.attr,
+ &iio_dev_attr_manufactorer_id.dev_attr.attr,
+ &iio_dev_attr_device_rev.dev_attr.attr,
+ &iio_dev_attr_bus_type.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group adt7516_attribute_group = {
+ .attrs = adt7516_attributes,
+};
+
+
+/*
+ * temperature bound events
+ */
+
+#define IIO_EVENT_CODE_ADT7316_IN_TEMP_HIGH IIO_BUFFER_EVENT_CODE(0)
+#define IIO_EVENT_CODE_ADT7316_IN_TEMP_LOW IIO_BUFFER_EVENT_CODE(1)
+#define IIO_EVENT_CODE_ADT7316_EX_TEMP_HIGH IIO_BUFFER_EVENT_CODE(2)
+#define IIO_EVENT_CODE_ADT7316_EX_TEMP_LOW IIO_BUFFER_EVENT_CODE(3)
+#define IIO_EVENT_CODE_ADT7316_EX_TEMP_FAULT IIO_BUFFER_EVENT_CODE(4)
+#define IIO_EVENT_CODE_ADT7516_AIN1 IIO_BUFFER_EVENT_CODE(5)
+#define IIO_EVENT_CODE_ADT7516_AIN2 IIO_BUFFER_EVENT_CODE(6)
+#define IIO_EVENT_CODE_ADT7516_AIN3 IIO_BUFFER_EVENT_CODE(7)
+#define IIO_EVENT_CODE_ADT7516_AIN4 IIO_BUFFER_EVENT_CODE(8)
+#define IIO_EVENT_CODE_ADT7316_VDD IIO_BUFFER_EVENT_CODE(9)
+
+static void adt7316_interrupt_bh(struct work_struct *work_s)
+{
+ struct adt7316_chip_info *chip =
+ container_of(work_s, struct adt7316_chip_info, thresh_work);
+ u8 stat1, stat2;
+ int i, ret, count;
+
+ ret = chip->bus.read(chip->bus.client, ADT7316_INT_STAT1, &stat1);
+ if (!ret) {
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+ count = 8;
+ else
+ count = 5;
+
+ for (i = 0; i < count; i++) {
+ if (stat1 & (1 << i))
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_ADT7316_IN_TEMP_HIGH + i,
+ chip->last_timestamp);
+ }
+ }
+
+ ret = chip->bus.read(chip->bus.client, ADT7316_INT_STAT2, &stat2);
+ if (!ret) {
+ if (stat2 & ADT7316_INT_MASK2_VDD)
+ iio_push_event(chip->indio_dev, 0,
+ IIO_EVENT_CODE_ADT7316_VDD,
+ chip->last_timestamp);
+ }
+
+ enable_irq(chip->bus.irq);
+}
+
+static int adt7316_interrupt(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ chip->last_timestamp = timestamp;
+ schedule_work(&chip->thresh_work);
+
+ return 0;
+}
+
+IIO_EVENT_SH(adt7316, &adt7316_interrupt);
+
+/*
+ * Show mask of enabled interrupts in Hex.
+ */
+static ssize_t adt7316_show_int_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "0x%x\n", chip->int_mask);
+}
+
+/*
+ * Set 1 to the mask in Hex to enabled interrupts.
+ */
+static ssize_t adt7316_set_int_mask(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ unsigned long data;
+ int ret;
+ u8 mask;
+
+ ret = strict_strtoul(buf, 16, &data);
+ if (ret || data >= ADT7316_VDD_INT_MASK + 1)
+ return -EINVAL;
+
+ if (data & ADT7316_VDD_INT_MASK)
+ mask = 0; /* enable vdd int */
+ else
+ mask = ADT7316_INT_MASK2_VDD; /* disable vdd int */
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_INT_MASK2, mask);
+ if (!ret) {
+ chip->int_mask &= ~ADT7316_VDD_INT_MASK;
+ chip->int_mask |= data & ADT7316_VDD_INT_MASK;
+ }
+
+ if (data & ADT7316_TEMP_AIN_INT_MASK) {
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT73XX)
+ /* mask in reg is opposite, set 1 to disable */
+ mask = (~data) & ADT7316_TEMP_INT_MASK;
+ else
+ /* mask in reg is opposite, set 1 to disable */
+ mask = (~data) & ADT7316_TEMP_AIN_INT_MASK;
+ }
+ ret = chip->bus.write(chip->bus.client, ADT7316_INT_MASK1, mask);
+
+ chip->int_mask = mask;
+
+ return len;
+}
+static inline ssize_t adt7316_show_ad_bound(struct device *dev,
+ struct device_attribute *attr,
+ u8 bound_reg,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 val;
+ int data;
+ int ret;
+
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT73XX &&
+ bound_reg > ADT7316_EX_TEMP_LOW)
+ return -EPERM;
+
+ ret = chip->bus.read(chip->bus.client, bound_reg, &val);
+ if (ret)
+ return -EIO;
+
+ data = (int)val;
+
+ if (!((chip->id & ID_FAMILY_MASK) == ID_ADT75XX &&
+ (chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0)) {
+ if (data & 0x80)
+ data -= 256;
+ }
+
+ return sprintf(buf, "%d\n", data);
+}
+
+static inline ssize_t adt7316_set_ad_bound(struct device *dev,
+ struct device_attribute *attr,
+ u8 bound_reg,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ long data;
+ u8 val;
+ int ret;
+
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT73XX &&
+ bound_reg > ADT7316_EX_TEMP_LOW)
+ return -EPERM;
+
+ ret = strict_strtol(buf, 10, &data);
+ if (ret)
+ return -EINVAL;
+
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX &&
+ (chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0) {
+ if (data > 255 || data < 0)
+ return -EINVAL;
+ } else {
+ if (data > 127 || data < -128)
+ return -EINVAL;
+
+ if (data < 0)
+ data += 256;
+ }
+
+ val = (u8)data;
+
+ ret = chip->bus.write(chip->bus.client, bound_reg, val);
+ if (ret)
+ return -EIO;
+
+ return len;
+}
+
+static ssize_t adt7316_show_in_temp_high(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt7316_show_ad_bound(dev, attr,
+ ADT7316_IN_TEMP_HIGH, buf);
+}
+
+static inline ssize_t adt7316_set_in_temp_high(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt7316_set_ad_bound(dev, attr,
+ ADT7316_IN_TEMP_HIGH, buf, len);
+}
+
+static ssize_t adt7316_show_in_temp_low(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt7316_show_ad_bound(dev, attr,
+ ADT7316_IN_TEMP_LOW, buf);
+}
+
+static inline ssize_t adt7316_set_in_temp_low(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt7316_set_ad_bound(dev, attr,
+ ADT7316_IN_TEMP_LOW, buf, len);
+}
+
+static ssize_t adt7316_show_ex_temp_ain1_high(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt7316_show_ad_bound(dev, attr,
+ ADT7316_EX_TEMP_HIGH, buf);
+}
+
+static inline ssize_t adt7316_set_ex_temp_ain1_high(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt7316_set_ad_bound(dev, attr,
+ ADT7316_EX_TEMP_HIGH, buf, len);
+}
+
+static ssize_t adt7316_show_ex_temp_ain1_low(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt7316_show_ad_bound(dev, attr,
+ ADT7316_EX_TEMP_LOW, buf);
+}
+
+static inline ssize_t adt7316_set_ex_temp_ain1_low(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt7316_set_ad_bound(dev, attr,
+ ADT7316_EX_TEMP_LOW, buf, len);
+}
+
+static ssize_t adt7316_show_ain2_high(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt7316_show_ad_bound(dev, attr,
+ ADT7516_AIN2_HIGH, buf);
+}
+
+static inline ssize_t adt7316_set_ain2_high(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt7316_set_ad_bound(dev, attr,
+ ADT7516_AIN2_HIGH, buf, len);
+}
+
+static ssize_t adt7316_show_ain2_low(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt7316_show_ad_bound(dev, attr,
+ ADT7516_AIN2_LOW, buf);
+}
+
+static inline ssize_t adt7316_set_ain2_low(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt7316_set_ad_bound(dev, attr,
+ ADT7516_AIN2_LOW, buf, len);
+}
+
+static ssize_t adt7316_show_ain3_high(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt7316_show_ad_bound(dev, attr,
+ ADT7516_AIN3_HIGH, buf);
+}
+
+static inline ssize_t adt7316_set_ain3_high(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt7316_set_ad_bound(dev, attr,
+ ADT7516_AIN3_HIGH, buf, len);
+}
+
+static ssize_t adt7316_show_ain3_low(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt7316_show_ad_bound(dev, attr,
+ ADT7516_AIN3_LOW, buf);
+}
+
+static inline ssize_t adt7316_set_ain3_low(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt7316_set_ad_bound(dev, attr,
+ ADT7516_AIN3_LOW, buf, len);
+}
+
+static ssize_t adt7316_show_ain4_high(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt7316_show_ad_bound(dev, attr,
+ ADT7516_AIN4_HIGH, buf);
+}
+
+static inline ssize_t adt7316_set_ain4_high(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt7316_set_ad_bound(dev, attr,
+ ADT7516_AIN4_HIGH, buf, len);
+}
+
+static ssize_t adt7316_show_ain4_low(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adt7316_show_ad_bound(dev, attr,
+ ADT7516_AIN4_LOW, buf);
+}
+
+static inline ssize_t adt7316_set_ain4_low(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return adt7316_set_ad_bound(dev, attr,
+ ADT7516_AIN4_LOW, buf, len);
+}
+
+static ssize_t adt7316_show_int_enabled(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_INT_EN));
+}
+
+static ssize_t adt7316_set_int_enabled(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ u8 config1;
+ int ret;
+
+ config1 = chip->config1 & (~ADT7316_INT_EN);
+ if (!memcmp(buf, "1", 1))
+ config1 |= ADT7316_INT_EN;
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, config1);
+ if (ret)
+ return -EIO;
+
+ chip->config1 = config1;
+
+ return len;
+}
+
+
+IIO_EVENT_ATTR_SH(int_mask, iio_event_adt7316,
+ adt7316_show_int_mask, adt7316_set_int_mask, 0);
+IIO_EVENT_ATTR_SH(in_temp_high, iio_event_adt7316,
+ adt7316_show_in_temp_high, adt7316_set_in_temp_high, 0);
+IIO_EVENT_ATTR_SH(in_temp_low, iio_event_adt7316,
+ adt7316_show_in_temp_low, adt7316_set_in_temp_low, 0);
+IIO_EVENT_ATTR_SH(ex_temp_high, iio_event_adt7316,
+ adt7316_show_ex_temp_ain1_high,
+ adt7316_set_ex_temp_ain1_high, 0);
+IIO_EVENT_ATTR_SH(ex_temp_low, iio_event_adt7316,
+ adt7316_show_ex_temp_ain1_low,
+ adt7316_set_ex_temp_ain1_low, 0);
+IIO_EVENT_ATTR_SH(ex_temp_ain1_high, iio_event_adt7316,
+ adt7316_show_ex_temp_ain1_high,
+ adt7316_set_ex_temp_ain1_high, 0);
+IIO_EVENT_ATTR_SH(ex_temp_ain1_low, iio_event_adt7316,
+ adt7316_show_ex_temp_ain1_low,
+ adt7316_set_ex_temp_ain1_low, 0);
+IIO_EVENT_ATTR_SH(ain2_high, iio_event_adt7316,
+ adt7316_show_ain2_high, adt7316_set_ain2_high, 0);
+IIO_EVENT_ATTR_SH(ain2_low, iio_event_adt7316,
+ adt7316_show_ain2_low, adt7316_set_ain2_low, 0);
+IIO_EVENT_ATTR_SH(ain3_high, iio_event_adt7316,
+ adt7316_show_ain3_high, adt7316_set_ain3_high, 0);
+IIO_EVENT_ATTR_SH(ain3_low, iio_event_adt7316,
+ adt7316_show_ain3_low, adt7316_set_ain3_low, 0);
+IIO_EVENT_ATTR_SH(ain4_high, iio_event_adt7316,
+ adt7316_show_ain4_high, adt7316_set_ain4_high, 0);
+IIO_EVENT_ATTR_SH(ain4_low, iio_event_adt7316,
+ adt7316_show_ain4_low, adt7316_set_ain4_low, 0);
+IIO_EVENT_ATTR_SH(int_enabled, iio_event_adt7316,
+ adt7316_show_int_enabled, adt7316_set_int_enabled, 0);
+
+static struct attribute *adt7316_event_attributes[] = {
+ &iio_event_attr_int_mask.dev_attr.attr,
+ &iio_event_attr_in_temp_high.dev_attr.attr,
+ &iio_event_attr_in_temp_low.dev_attr.attr,
+ &iio_event_attr_ex_temp_high.dev_attr.attr,
+ &iio_event_attr_ex_temp_low.dev_attr.attr,
+ &iio_event_attr_int_enabled.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group adt7316_event_attribute_group = {
+ .attrs = adt7316_event_attributes,
+};
+
+static struct attribute *adt7516_event_attributes[] = {
+ &iio_event_attr_int_mask.dev_attr.attr,
+ &iio_event_attr_in_temp_high.dev_attr.attr,
+ &iio_event_attr_in_temp_low.dev_attr.attr,
+ &iio_event_attr_ex_temp_ain1_high.dev_attr.attr,
+ &iio_event_attr_ex_temp_ain1_low.dev_attr.attr,
+ &iio_event_attr_ain2_high.dev_attr.attr,
+ &iio_event_attr_ain2_low.dev_attr.attr,
+ &iio_event_attr_ain3_high.dev_attr.attr,
+ &iio_event_attr_ain3_low.dev_attr.attr,
+ &iio_event_attr_ain4_high.dev_attr.attr,
+ &iio_event_attr_ain4_low.dev_attr.attr,
+ &iio_event_attr_int_enabled.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group adt7516_event_attribute_group = {
+ .attrs = adt7516_event_attributes,
+};
+
+#ifdef CONFIG_PM
+int adt7316_disable(struct device *dev)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return _adt7316_store_enabled(chip, 0);
+}
+EXPORT_SYMBOL(adt7316_disable);
+
+int adt7316_enable(struct device *dev)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+
+ return _adt7316_store_enabled(chip, 1);
+}
+EXPORT_SYMBOL(adt7316_enable);
+#endif
+
+/*
+ * device probe and remove
+ */
+int __devinit adt7316_probe(struct device *dev, struct adt7316_bus *bus,
+ const char *name)
+{
+ struct adt7316_chip_info *chip;
+ unsigned short *adt7316_platform_data = dev->platform_data;
+ int ret = 0;
+
+ chip = kzalloc(sizeof(struct adt7316_chip_info), GFP_KERNEL);
+
+ if (chip == NULL)
+ return -ENOMEM;
+
+ /* this is only used for device removal purposes */
+ dev_set_drvdata(dev, chip);
+
+ chip->bus = *bus;
+ chip->name = name;
+
+ if (name[4] == '3')
+ chip->id = ID_ADT7316 + (name[6] - '6');
+ else if (name[4] == '5')
+ chip->id = ID_ADT7516 + (name[6] - '6');
+ else
+ return -ENODEV;
+
+ chip->ldac_pin = adt7316_platform_data[1];
+ if (chip->ldac_pin) {
+ chip->config3 |= ADT7316_DA_EN_VIA_DAC_LDCA;
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+ chip->config1 |= ADT7516_SEL_AIN3;
+ }
+ chip->int_mask = ADT7316_TEMP_INT_MASK | ADT7316_VDD_INT_MASK;
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+ chip->int_mask |= ADT7516_AIN_INT_MASK;
+
+ chip->indio_dev = iio_allocate_device();
+ if (chip->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_chip;
+ }
+
+ chip->indio_dev->dev.parent = dev;
+ if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) {
+ chip->indio_dev->attrs = &adt7516_attribute_group;
+ chip->indio_dev->event_attrs = &adt7516_event_attribute_group;
+ } else {
+ chip->indio_dev->attrs = &adt7316_attribute_group;
+ chip->indio_dev->event_attrs = &adt7316_event_attribute_group;
+ }
+ chip->indio_dev->dev_data = (void *)chip;
+ chip->indio_dev->driver_module = THIS_MODULE;
+ chip->indio_dev->num_interrupt_lines = 1;
+ chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(chip->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ if (chip->bus.irq > 0) {
+ if (adt7316_platform_data[0])
+ chip->bus.irq_flags = adt7316_platform_data[0];
+
+ ret = iio_register_interrupt_line(chip->bus.irq,
+ chip->indio_dev,
+ 0,
+ chip->bus.irq_flags,
+ chip->name);
+ if (ret)
+ goto error_unreg_dev;
+
+ /*
+ * The event handler list element refer to iio_event_adt7316.
+ * All event attributes bind to the same event handler.
+ * So, only register event handler once.
+ */
+ iio_add_event_to_list(&iio_event_adt7316,
+ &chip->indio_dev->interrupts[0]->ev_list);
+
+ INIT_WORK(&chip->thresh_work, adt7316_interrupt_bh);
+
+ if (chip->bus.irq_flags & IRQF_TRIGGER_HIGH)
+ chip->config1 |= ADT7316_INT_POLARITY;
+ }
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, chip->config1);
+ if (ret) {
+ ret = -EIO;
+ goto error_unreg_irq;
+ }
+
+ ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, chip->config3);
+ if (ret) {
+ ret = -EIO;
+ goto error_unreg_irq;
+ }
+
+ dev_info(dev, "%s temperature sensor, ADC and DAC registered.\n",
+ chip->name);
+
+ return 0;
+
+error_unreg_irq:
+ iio_unregister_interrupt_line(chip->indio_dev, 0);
+error_unreg_dev:
+ iio_device_unregister(chip->indio_dev);
+error_free_dev:
+ iio_free_device(chip->indio_dev);
+error_free_chip:
+ kfree(chip);
+
+ return ret;
+}
+EXPORT_SYMBOL(adt7316_probe);
+
+int __devexit adt7316_remove(struct device *dev)
+{
+
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct iio_dev *indio_dev = chip->indio_dev;
+
+ dev_set_drvdata(dev, NULL);
+ if (chip->bus.irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+ iio_device_unregister(indio_dev);
+ iio_free_device(chip->indio_dev);
+ kfree(chip);
+
+ return 0;
+}
+EXPORT_SYMBOL(adt7316_remove);
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADT7316/7/8 and ADT7516/7/9 digital"
+ " temperature sensor, ADC and DAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/addac/adt7316.h b/drivers/staging/iio/addac/adt7316.h
new file mode 100644
index 000000000000..d34bd679bb4e
--- /dev/null
+++ b/drivers/staging/iio/addac/adt7316.h
@@ -0,0 +1,33 @@
+/*
+ * ADT7316 digital temperature sensor driver supporting ADT7316/7/8 ADT7516/7/9
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _ADT7316_H_
+#define _ADT7316_H_
+
+#include <linux/types.h>
+
+#define ADT7316_REG_MAX_ADDR 0x3F
+
+struct adt7316_bus {
+ void *client;
+ int irq;
+ int irq_flags;
+ int (*read) (void *client, u8 reg, u8 *data);
+ int (*write) (void *client, u8 reg, u8 val);
+ int (*multi_read) (void *client, u8 first_reg, u8 count, u8 *data);
+ int (*multi_write) (void *client, u8 first_reg, u8 count, u8 *data);
+};
+
+#ifdef CONFIG_PM
+int adt7316_disable(struct device *dev);
+int adt7316_enable(struct device *dev);
+#endif
+int adt7316_probe(struct device *dev, struct adt7316_bus *bus, const char *name);
+int adt7316_remove(struct device *dev);
+
+#endif
diff --git a/drivers/staging/iio/dac/Kconfig b/drivers/staging/iio/dac/Kconfig
new file mode 100644
index 000000000000..9191bd23cc08
--- /dev/null
+++ b/drivers/staging/iio/dac/Kconfig
@@ -0,0 +1,21 @@
+#
+# DAC drivers
+#
+comment "Digital to analog convertors"
+
+config AD5624R_SPI
+ tristate "Analog Devices AD5624/44/64R DAC spi driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD5624R, AD5644R and
+ AD5664R convertors (DAC). This driver uses the common SPI interface.
+
+config AD5446
+ tristate "Analog Devices AD5444/6, AD5620/40/60 and AD5541A/12A DAC SPI driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD5444, AD5446,
+ AD5620, AD5640, AD5660 and AD5541A, AD5512A DACs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5446.
diff --git a/drivers/staging/iio/dac/Makefile b/drivers/staging/iio/dac/Makefile
new file mode 100644
index 000000000000..7cf331b4e001
--- /dev/null
+++ b/drivers/staging/iio/dac/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for industrial I/O DAC drivers
+#
+
+obj-$(CONFIG_AD5624R_SPI) += ad5624r_spi.o
+obj-$(CONFIG_AD5446) += ad5446.o
diff --git a/drivers/staging/iio/dac/ad5446.c b/drivers/staging/iio/dac/ad5446.c
new file mode 100644
index 000000000000..0f87ecac82fc
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5446.c
@@ -0,0 +1,323 @@
+/*
+ * AD5446 SPI DAC driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "dac.h"
+
+#include "ad5446.h"
+
+static void ad5446_store_sample(struct ad5446_state *st, unsigned val)
+{
+ st->data.d16 = cpu_to_be16(AD5446_LOAD |
+ (val << st->chip_info->left_shift));
+}
+
+static void ad5542_store_sample(struct ad5446_state *st, unsigned val)
+{
+ st->data.d16 = cpu_to_be16(val << st->chip_info->left_shift);
+}
+
+static void ad5620_store_sample(struct ad5446_state *st, unsigned val)
+{
+ st->data.d16 = cpu_to_be16(AD5620_LOAD |
+ (val << st->chip_info->left_shift));
+}
+
+static void ad5660_store_sample(struct ad5446_state *st, unsigned val)
+{
+ val |= AD5660_LOAD;
+ st->data.d24[0] = (val >> 16) & 0xFF;
+ st->data.d24[1] = (val >> 8) & 0xFF;
+ st->data.d24[2] = val & 0xFF;
+}
+
+static ssize_t ad5446_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad5446_state *st = dev_info->dev_data;
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+
+ if (val > RES_MASK(st->chip_info->bits)) {
+ ret = -EINVAL;
+ goto error_ret;
+ }
+
+ mutex_lock(&dev_info->mlock);
+ st->chip_info->store_sample(st, val);
+ ret = spi_sync(st->spi, &st->msg);
+ mutex_unlock(&dev_info->mlock);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static IIO_DEV_ATTR_OUT_RAW(0, ad5446_write, 0);
+
+static ssize_t ad5446_show_scale(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad5446_state *st = iio_dev_get_devdata(dev_info);
+ /* Corresponds to Vref / 2^(bits) */
+ unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
+
+ return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
+}
+static IIO_DEVICE_ATTR(out_scale, S_IRUGO, ad5446_show_scale, NULL, 0);
+
+static ssize_t ad5446_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad5446_state *st = iio_dev_get_devdata(dev_info);
+
+ return sprintf(buf, "%s\n", spi_get_device_id(st->spi)->name);
+}
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad5446_show_name, NULL, 0);
+
+static struct attribute *ad5446_attributes[] = {
+ &iio_dev_attr_out0_raw.dev_attr.attr,
+ &iio_dev_attr_out_scale.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad5446_attribute_group = {
+ .attrs = ad5446_attributes,
+};
+
+static const struct ad5446_chip_info ad5446_chip_info_tbl[] = {
+ [ID_AD5444] = {
+ .bits = 12,
+ .storagebits = 16,
+ .left_shift = 2,
+ .store_sample = ad5446_store_sample,
+ },
+ [ID_AD5446] = {
+ .bits = 14,
+ .storagebits = 16,
+ .left_shift = 0,
+ .store_sample = ad5446_store_sample,
+ },
+ [ID_AD5542A] = {
+ .bits = 16,
+ .storagebits = 16,
+ .left_shift = 0,
+ .store_sample = ad5542_store_sample,
+ },
+ [ID_AD5512A] = {
+ .bits = 12,
+ .storagebits = 16,
+ .left_shift = 4,
+ .store_sample = ad5542_store_sample,
+ },
+ [ID_AD5620_2500] = {
+ .bits = 12,
+ .storagebits = 16,
+ .left_shift = 2,
+ .int_vref_mv = 2500,
+ .store_sample = ad5620_store_sample,
+ },
+ [ID_AD5620_1250] = {
+ .bits = 12,
+ .storagebits = 16,
+ .left_shift = 2,
+ .int_vref_mv = 1250,
+ .store_sample = ad5620_store_sample,
+ },
+ [ID_AD5640_2500] = {
+ .bits = 14,
+ .storagebits = 16,
+ .left_shift = 0,
+ .int_vref_mv = 2500,
+ .store_sample = ad5620_store_sample,
+ },
+ [ID_AD5640_1250] = {
+ .bits = 14,
+ .storagebits = 16,
+ .left_shift = 0,
+ .int_vref_mv = 1250,
+ .store_sample = ad5620_store_sample,
+ },
+ [ID_AD5660_2500] = {
+ .bits = 16,
+ .storagebits = 24,
+ .left_shift = 0,
+ .int_vref_mv = 2500,
+ .store_sample = ad5660_store_sample,
+ },
+ [ID_AD5660_1250] = {
+ .bits = 16,
+ .storagebits = 24,
+ .left_shift = 0,
+ .int_vref_mv = 1250,
+ .store_sample = ad5660_store_sample,
+ },
+};
+
+static int __devinit ad5446_probe(struct spi_device *spi)
+{
+ struct ad5446_state *st;
+ int ret, voltage_uv = 0;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ st->reg = regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ goto error_put_reg;
+
+ voltage_uv = regulator_get_voltage(st->reg);
+ }
+
+ st->chip_info =
+ &ad5446_chip_info_tbl[spi_get_device_id(spi)->driver_data];
+
+ spi_set_drvdata(spi, st);
+
+ st->spi = spi;
+
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_disable_reg;
+ }
+
+ /* Estabilish that the iio_dev is a child of the spi device */
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->attrs = &ad5446_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ /* Setup default message */
+
+ st->xfer.tx_buf = &st->data;
+ st->xfer.len = st->chip_info->storagebits / 8;
+
+ spi_message_init(&st->msg);
+ spi_message_add_tail(&st->xfer, &st->msg);
+
+ switch (spi_get_device_id(spi)->driver_data) {
+ case ID_AD5620_2500:
+ case ID_AD5620_1250:
+ case ID_AD5640_2500:
+ case ID_AD5640_1250:
+ case ID_AD5660_2500:
+ case ID_AD5660_1250:
+ st->vref_mv = st->chip_info->int_vref_mv;
+ break;
+ default:
+ if (voltage_uv)
+ st->vref_mv = voltage_uv / 1000;
+ else
+ dev_warn(&spi->dev,
+ "reference voltage unspecified\n");
+ }
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_free_device;
+
+ return 0;
+
+error_free_device:
+ iio_free_device(st->indio_dev);
+error_disable_reg:
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+error_put_reg:
+ if (!IS_ERR(st->reg))
+ regulator_put(st->reg);
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int ad5446_remove(struct spi_device *spi)
+{
+ struct ad5446_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ iio_device_unregister(indio_dev);
+ if (!IS_ERR(st->reg)) {
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
+ }
+ kfree(st);
+ return 0;
+}
+
+static const struct spi_device_id ad5446_id[] = {
+ {"ad5444", ID_AD5444},
+ {"ad5446", ID_AD5446},
+ {"ad5542a", ID_AD5542A},
+ {"ad5512a", ID_AD5512A},
+ {"ad5620-2500", ID_AD5620_2500}, /* AD5620/40/60: */
+ {"ad5620-1250", ID_AD5620_1250}, /* part numbers may look differently */
+ {"ad5640-2500", ID_AD5640_2500},
+ {"ad5640-1250", ID_AD5640_1250},
+ {"ad5660-2500", ID_AD5660_2500},
+ {"ad5660-1250", ID_AD5660_1250},
+ {}
+};
+
+static struct spi_driver ad5446_driver = {
+ .driver = {
+ .name = "ad5446",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5446_probe,
+ .remove = __devexit_p(ad5446_remove),
+ .id_table = ad5446_id,
+};
+
+static int __init ad5446_init(void)
+{
+ return spi_register_driver(&ad5446_driver);
+}
+module_init(ad5446_init);
+
+static void __exit ad5446_exit(void)
+{
+ spi_unregister_driver(&ad5446_driver);
+}
+module_exit(ad5446_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD5444/AD5446 DAC");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ad5446");
diff --git a/drivers/staging/iio/dac/ad5446.h b/drivers/staging/iio/dac/ad5446.h
new file mode 100644
index 000000000000..902542e22c4a
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5446.h
@@ -0,0 +1,96 @@
+/*
+ * AD5446 SPI DAC driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+#ifndef IIO_DAC_AD5446_H_
+#define IIO_DAC_AD5446_H_
+
+/* DAC Control Bits */
+
+#define AD5446_LOAD (0x0 << 14) /* Load and update */
+#define AD5446_SDO_DIS (0x1 << 14) /* Disable SDO */
+#define AD5446_NOP (0x2 << 14) /* No operation */
+#define AD5446_CLK_RISING (0x3 << 14) /* Clock data on rising edge */
+
+#define AD5620_LOAD (0x0 << 14) /* Load and update Norm Operation*/
+#define AD5620_PWRDWN_1k (0x1 << 14) /* Power-down: 1kOhm to GND */
+#define AD5620_PWRDWN_100k (0x2 << 14) /* Power-down: 100kOhm to GND */
+#define AD5620_PWRDWN_TRISTATE (0x3 << 14) /* Power-down: Three-state */
+
+#define AD5660_LOAD (0x0 << 16) /* Load and update Norm Operation*/
+#define AD5660_PWRDWN_1k (0x1 << 16) /* Power-down: 1kOhm to GND */
+#define AD5660_PWRDWN_100k (0x2 << 16) /* Power-down: 100kOhm to GND */
+#define AD5660_PWRDWN_TRISTATE (0x3 << 16) /* Power-down: Three-state */
+
+#define RES_MASK(bits) ((1 << (bits)) - 1)
+
+/**
+ * struct ad5446_state - driver instance specific data
+ * @indio_dev: the industrial I/O device
+ * @spi: spi_device
+ * @chip_info: chip model specific constants, available modes etc
+ * @reg: supply regulator
+ * @poll_work: bottom half of polling interrupt handler
+ * @vref_mv: actual reference voltage used
+ * @xfer: default spi transfer
+ * @msg: default spi message
+ * @data: spi transmit buffer
+ */
+
+struct ad5446_state {
+ struct iio_dev *indio_dev;
+ struct spi_device *spi;
+ const struct ad5446_chip_info *chip_info;
+ struct regulator *reg;
+ struct work_struct poll_work;
+ unsigned short vref_mv;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ union {
+ unsigned short d16;
+ unsigned char d24[3];
+ } data;
+};
+
+/**
+ * struct ad5446_chip_info - chip specific information
+ * @bits: accuracy of the DAC in bits
+ * @storagebits: number of bits written to the DAC
+ * @left_shift: number of bits the datum must be shifted
+ * @int_vref_mv: AD5620/40/60: the internal reference voltage
+ * @store_sample: chip specific helper function to store the datum
+ */
+
+struct ad5446_chip_info {
+ u8 bits;
+ u8 storagebits;
+ u8 left_shift;
+ u16 int_vref_mv;
+ void (*store_sample) (struct ad5446_state *st, unsigned val);
+};
+
+/**
+ * ad5446_supported_device_ids:
+ * The AD5620/40/60 parts are available in different fixed internal reference
+ * voltage options. The actual part numbers may look differently
+ * (and a bit cryptic), however this style is used to make clear which
+ * parts are supported here.
+ */
+
+enum ad5446_supported_device_ids {
+ ID_AD5444,
+ ID_AD5446,
+ ID_AD5542A,
+ ID_AD5512A,
+ ID_AD5620_2500,
+ ID_AD5620_1250,
+ ID_AD5640_2500,
+ ID_AD5640_1250,
+ ID_AD5660_2500,
+ ID_AD5660_1250,
+};
+
+#endif /* IIO_DAC_AD5446_H_ */
diff --git a/drivers/staging/iio/dac/ad5624r.h b/drivers/staging/iio/dac/ad5624r.h
new file mode 100644
index 000000000000..ce518be652b7
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5624r.h
@@ -0,0 +1,21 @@
+#ifndef SPI_AD5624R_H_
+#define SPI_AD5624R_H_
+
+#define AD5624R_DAC_CHANNELS 4
+
+#define AD5624R_ADDR_DAC0 0x0
+#define AD5624R_ADDR_DAC1 0x1
+#define AD5624R_ADDR_DAC2 0x2
+#define AD5624R_ADDR_DAC3 0x3
+#define AD5624R_ADDR_ALL_DAC 0x7
+
+#define AD5624R_CMD_WRITE_INPUT_N 0x0
+#define AD5624R_CMD_UPDATE_DAC_N 0x1
+#define AD5624R_CMD_WRITE_INPUT_N_UPDATE_ALL 0x2
+#define AD5624R_CMD_WRITE_INPUT_N_UPDATE_N 0x3
+#define AD5624R_CMD_POWERDOWN_DAC 0x4
+#define AD5624R_CMD_RESET 0x5
+#define AD5624R_CMD_LDAC_SETUP 0x6
+#define AD5624R_CMD_INTERNAL_REFER_SETUP 0x7
+
+#endif
diff --git a/drivers/staging/iio/dac/ad5624r_spi.c b/drivers/staging/iio/dac/ad5624r_spi.c
new file mode 100644
index 000000000000..2b1c6dde4fdd
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5624r_spi.c
@@ -0,0 +1,300 @@
+/*
+ * AD5624R, AD5644R, AD5664R Digital to analog convertors spi driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/delay.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "dac.h"
+#include "ad5624r.h"
+
+/**
+ * struct ad5624r_state - device related storage
+ * @indio_dev: associated industrial IO device
+ * @us: spi device
+ **/
+struct ad5624r_state {
+ struct iio_dev *indio_dev;
+ struct spi_device *us;
+ int data_len;
+ int ldac_mode;
+ int dac_power_mode[AD5624R_DAC_CHANNELS];
+ int internal_ref;
+};
+
+static int ad5624r_spi_write(struct spi_device *spi,
+ u8 cmd, u8 addr, u16 val, u8 len)
+{
+ u32 data;
+ u8 msg[3];
+
+ /*
+ * The input shift register is 24 bits wide. The first two bits are don't care bits.
+ * The next three are the command bits, C2 to C0, followed by the 3-bit DAC address,
+ * A2 to A0, and then the 16-, 14-, 12-bit data-word. The data-word comprises the 16-,
+ * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits, for the AD5664R,
+ * AD5644R, and AD5624R, respectively.
+ */
+ data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len));
+ msg[0] = data >> 16;
+ msg[1] = data >> 8;
+ msg[2] = data;
+
+ return spi_write(spi, msg, 3);
+}
+
+static ssize_t ad5624r_write_dac(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ long readin;
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5624r_state *st = indio_dev->dev_data;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = strict_strtol(buf, 10, &readin);
+ if (ret)
+ return ret;
+
+ ret = ad5624r_spi_write(st->us, AD5624R_CMD_WRITE_INPUT_N_UPDATE_N,
+ this_attr->address, readin, st->data_len);
+ return ret ? ret : len;
+}
+
+static ssize_t ad5624r_read_ldac_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5624r_state *st = indio_dev->dev_data;
+
+ return sprintf(buf, "%x\n", st->ldac_mode);
+}
+
+static ssize_t ad5624r_write_ldac_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ long readin;
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5624r_state *st = indio_dev->dev_data;
+
+ ret = strict_strtol(buf, 16, &readin);
+ if (ret)
+ return ret;
+
+ ret = ad5624r_spi_write(st->us, AD5624R_CMD_LDAC_SETUP, 0,
+ readin & 0xF, 16);
+ st->ldac_mode = readin & 0xF;
+
+ return ret ? ret : len;
+}
+
+static ssize_t ad5624r_read_dac_power_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5624r_state *st = indio_dev->dev_data;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ return sprintf(buf, "%d\n", st->dac_power_mode[this_attr->address]);
+}
+
+static ssize_t ad5624r_write_dac_power_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ long readin;
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5624r_state *st = indio_dev->dev_data;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = strict_strtol(buf, 10, &readin);
+ if (ret)
+ return ret;
+
+ ret = ad5624r_spi_write(st->us, AD5624R_CMD_POWERDOWN_DAC, 0,
+ ((readin & 0x3) << 4) |
+ (1 << this_attr->address), 16);
+
+ st->dac_power_mode[this_attr->address] = readin & 0x3;
+
+ return ret ? ret : len;
+}
+
+static ssize_t ad5624r_read_internal_ref_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5624r_state *st = indio_dev->dev_data;
+
+ return sprintf(buf, "%d\n", st->internal_ref);
+}
+
+static ssize_t ad5624r_write_internal_ref_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ long readin;
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5624r_state *st = indio_dev->dev_data;
+
+ ret = strict_strtol(buf, 10, &readin);
+ if (ret)
+ return ret;
+
+ ret = ad5624r_spi_write(st->us, AD5624R_CMD_INTERNAL_REFER_SETUP, 0,
+ !!readin, 16);
+
+ st->internal_ref = !!readin;
+
+ return ret ? ret : len;
+}
+
+static IIO_DEV_ATTR_OUT_RAW(0, ad5624r_write_dac, AD5624R_ADDR_DAC0);
+static IIO_DEV_ATTR_OUT_RAW(1, ad5624r_write_dac, AD5624R_ADDR_DAC1);
+static IIO_DEV_ATTR_OUT_RAW(2, ad5624r_write_dac, AD5624R_ADDR_DAC2);
+static IIO_DEV_ATTR_OUT_RAW(3, ad5624r_write_dac, AD5624R_ADDR_DAC3);
+
+static IIO_DEVICE_ATTR(ldac_mode, S_IRUGO | S_IWUSR, ad5624r_read_ldac_mode,
+ ad5624r_write_ldac_mode, 0);
+static IIO_DEVICE_ATTR(internal_ref, S_IRUGO | S_IWUSR,
+ ad5624r_read_internal_ref_mode,
+ ad5624r_write_internal_ref_mode, 0);
+
+#define IIO_DEV_ATTR_DAC_POWER_MODE(_num, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(dac_power_mode_##_num, S_IRUGO | S_IWUSR, _show, _store, _addr)
+
+static IIO_DEV_ATTR_DAC_POWER_MODE(0, ad5624r_read_dac_power_mode,
+ ad5624r_write_dac_power_mode, 0);
+static IIO_DEV_ATTR_DAC_POWER_MODE(1, ad5624r_read_dac_power_mode,
+ ad5624r_write_dac_power_mode, 1);
+static IIO_DEV_ATTR_DAC_POWER_MODE(2, ad5624r_read_dac_power_mode,
+ ad5624r_write_dac_power_mode, 2);
+static IIO_DEV_ATTR_DAC_POWER_MODE(3, ad5624r_read_dac_power_mode,
+ ad5624r_write_dac_power_mode, 3);
+
+static struct attribute *ad5624r_attributes[] = {
+ &iio_dev_attr_out0_raw.dev_attr.attr,
+ &iio_dev_attr_out1_raw.dev_attr.attr,
+ &iio_dev_attr_out2_raw.dev_attr.attr,
+ &iio_dev_attr_out3_raw.dev_attr.attr,
+ &iio_dev_attr_dac_power_mode_0.dev_attr.attr,
+ &iio_dev_attr_dac_power_mode_1.dev_attr.attr,
+ &iio_dev_attr_dac_power_mode_2.dev_attr.attr,
+ &iio_dev_attr_dac_power_mode_3.dev_attr.attr,
+ &iio_dev_attr_ldac_mode.dev_attr.attr,
+ &iio_dev_attr_internal_ref.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad5624r_attribute_group = {
+ .attrs = ad5624r_attributes,
+};
+
+static int __devinit ad5624r_probe(struct spi_device *spi)
+{
+ struct ad5624r_state *st;
+ int ret = 0;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ spi_set_drvdata(spi, st);
+
+ st->data_len = spi_get_device_id(spi)->driver_data;
+
+ st->us = spi;
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 0;
+ st->indio_dev->event_attrs = NULL;
+
+ st->indio_dev->attrs = &ad5624r_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ spi->mode = SPI_MODE_0;
+ spi_setup(spi);
+
+ return 0;
+
+error_free_dev:
+ iio_free_device(st->indio_dev);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad5624r_remove(struct spi_device *spi)
+{
+ struct ad5624r_state *st = spi_get_drvdata(spi);
+
+ iio_device_unregister(st->indio_dev);
+ kfree(st);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5624r_id[] = {
+ {"ad5624r", 12},
+ {"ad5644r", 14},
+ {"ad5664r", 16},
+ {}
+};
+
+static struct spi_driver ad5624r_driver = {
+ .driver = {
+ .name = "ad5624r",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5624r_probe,
+ .remove = __devexit_p(ad5624r_remove),
+ .id_table = ad5624r_id,
+};
+
+static __init int ad5624r_spi_init(void)
+{
+ return spi_register_driver(&ad5624r_driver);
+}
+module_init(ad5624r_spi_init);
+
+static __exit void ad5624r_spi_exit(void)
+{
+ spi_unregister_driver(&ad5624r_driver);
+}
+module_exit(ad5624r_spi_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD5624/44/64R DAC spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dac/dac.h b/drivers/staging/iio/dac/dac.h
new file mode 100644
index 000000000000..1d82f353241c
--- /dev/null
+++ b/drivers/staging/iio/dac/dac.h
@@ -0,0 +1,6 @@
+/*
+ * dac.h - sysfs attributes associated with DACs
+ */
+
+#define IIO_DEV_ATTR_OUT_RAW(_num, _store, _addr) \
+ IIO_DEVICE_ATTR(out##_num##_raw, S_IWUSR, NULL, _store, _addr)
diff --git a/drivers/staging/iio/dds/Kconfig b/drivers/staging/iio/dds/Kconfig
new file mode 100644
index 000000000000..a047da62daf0
--- /dev/null
+++ b/drivers/staging/iio/dds/Kconfig
@@ -0,0 +1,56 @@
+#
+# Direct Digital Synthesis drivers
+#
+comment "Direct Digital Synthesis"
+
+config AD5930
+ tristate "Analog Devices ad5930/5932 driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices DDS chip
+ ad5930/ad5932, provides direct access via sysfs.
+
+config AD9832
+ tristate "Analog Devices ad9832/5 driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices DDS chip
+ ad9832 and ad9835, provides direct access via sysfs.
+
+config AD9834
+ tristate "Analog Devices ad9833/4/ driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices DDS chip
+ AD9833 and AD9834, provides direct access via sysfs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad9834.
+
+config AD9850
+ tristate "Analog Devices ad9850/1 driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices DDS chip
+ ad9850/1, provides direct access via sysfs.
+
+config AD9852
+ tristate "Analog Devices ad9852/4 driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices DDS chip
+ ad9852/4, provides direct access via sysfs.
+
+config AD9910
+ tristate "Analog Devices ad9910 driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices DDS chip
+ ad9910, provides direct access via sysfs.
+
+config AD9951
+ tristate "Analog Devices ad9951 driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices DDS chip
+ ad9951, provides direct access via sysfs.
diff --git a/drivers/staging/iio/dds/Makefile b/drivers/staging/iio/dds/Makefile
new file mode 100644
index 000000000000..147746176b9b
--- /dev/null
+++ b/drivers/staging/iio/dds/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for Direct Digital Synthesis drivers
+#
+
+obj-$(CONFIG_AD5930) += ad5930.o
+obj-$(CONFIG_AD9832) += ad9832.o
+obj-$(CONFIG_AD9834) += ad9834.o
+obj-$(CONFIG_AD9850) += ad9850.o
+obj-$(CONFIG_AD9852) += ad9852.o
+obj-$(CONFIG_AD9910) += ad9910.o
+obj-$(CONFIG_AD9951) += ad9951.o
diff --git a/drivers/staging/iio/dds/ad5930.c b/drivers/staging/iio/dds/ad5930.c
new file mode 100644
index 000000000000..f80039c5d539
--- /dev/null
+++ b/drivers/staging/iio/dds/ad5930.c
@@ -0,0 +1,170 @@
+/*
+ * Driver for ADI Direct Digital Synthesis ad5930
+ *
+ * Copyright (c) 2010-2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad5930"
+
+#define value_mask (u16)0xf000
+#define addr_shift 12
+
+/* Register format: 4 bits addr + 12 bits value */
+struct ad5903_config {
+ u16 control;
+ u16 incnum;
+ u16 frqdelt[2];
+ u16 incitvl;
+ u16 buritvl;
+ u16 strtfrq[2];
+};
+
+struct ad5930_state {
+ struct mutex lock;
+ struct iio_dev *idev;
+ struct spi_device *sdev;
+};
+
+static ssize_t ad5930_set_parameter(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ int ret;
+ struct ad5903_config *config = (struct ad5903_config *)buf;
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad5930_state *st = idev->dev_data;
+
+ config->control = (config->control & ~value_mask);
+ config->incnum = (config->control & ~value_mask) | (1 << addr_shift);
+ config->frqdelt[0] = (config->control & ~value_mask) | (2 << addr_shift);
+ config->frqdelt[1] = (config->control & ~value_mask) | 3 << addr_shift;
+ config->incitvl = (config->control & ~value_mask) | 4 << addr_shift;
+ config->buritvl = (config->control & ~value_mask) | 8 << addr_shift;
+ config->strtfrq[0] = (config->control & ~value_mask) | 0xc << addr_shift;
+ config->strtfrq[1] = (config->control & ~value_mask) | 0xd << addr_shift;
+
+ xfer.len = len;
+ xfer.tx_buf = config;
+ mutex_lock(&st->lock);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad5930_set_parameter, 0);
+
+static struct attribute *ad5930_attributes[] = {
+ &iio_dev_attr_dds.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad5930_attribute_group = {
+ .name = DRV_NAME,
+ .attrs = ad5930_attributes,
+};
+
+static int __devinit ad5930_probe(struct spi_device *spi)
+{
+ struct ad5930_state *st;
+ int ret = 0;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ spi_set_drvdata(spi, st);
+
+ mutex_init(&st->lock);
+ st->sdev = spi;
+
+ st->idev = iio_allocate_device();
+ if (st->idev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->idev->dev.parent = &spi->dev;
+ st->idev->num_interrupt_lines = 0;
+ st->idev->event_attrs = NULL;
+
+ st->idev->attrs = &ad5930_attribute_group;
+ st->idev->dev_data = (void *)(st);
+ st->idev->driver_module = THIS_MODULE;
+ st->idev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(st->idev);
+ if (ret)
+ goto error_free_dev;
+ spi->max_speed_hz = 2000000;
+ spi->mode = SPI_MODE_3;
+ spi->bits_per_word = 16;
+ spi_setup(spi);
+
+ return 0;
+
+error_free_dev:
+ iio_free_device(st->idev);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad5930_remove(struct spi_device *spi)
+{
+ struct ad5930_state *st = spi_get_drvdata(spi);
+
+ iio_device_unregister(st->idev);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver ad5930_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5930_probe,
+ .remove = __devexit_p(ad5930_remove),
+};
+
+static __init int ad5930_spi_init(void)
+{
+ return spi_register_driver(&ad5930_driver);
+}
+module_init(ad5930_spi_init);
+
+static __exit void ad5930_spi_exit(void)
+{
+ spi_unregister_driver(&ad5930_driver);
+}
+module_exit(ad5930_spi_exit);
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION("Analog Devices ad5930 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dds/ad9832.c b/drivers/staging/iio/dds/ad9832.c
new file mode 100644
index 000000000000..e911893b3db0
--- /dev/null
+++ b/drivers/staging/iio/dds/ad9832.c
@@ -0,0 +1,264 @@
+/*
+ * Driver for ADI Direct Digital Synthesis ad9832
+ *
+ * Copyright (c) 2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad9832"
+
+#define value_mask (u16)0xf000
+#define cmd_shift 12
+#define add_shift 8
+#define AD9832_SYNC (1 << 13)
+#define AD9832_SELSRC (1 << 12)
+#define AD9832_SLEEP (1 << 13)
+#define AD9832_RESET (1 << 12)
+#define AD9832_CLR (1 << 11)
+
+#define ADD_FREQ0LL 0x0
+#define ADD_FREQ0HL 0x1
+#define ADD_FREQ0LM 0x2
+#define ADD_FREQ0HM 0x3
+#define ADD_FREQ1LL 0x4
+#define ADD_FREQ1HL 0x5
+#define ADD_FREQ1LM 0x6
+#define ADD_FREQ1HM 0x7
+#define ADD_PHASE0L 0x8
+#define ADD_PHASE0H 0x9
+#define ADD_PHASE1L 0xa
+#define ADD_PHASE1H 0xb
+#define ADD_PHASE2L 0xc
+#define ADD_PHASE2H 0xd
+#define ADD_PHASE3L 0xe
+#define ADD_PHASE3H 0xf
+
+#define CMD_PHA8BITSW 0x1
+#define CMD_PHA16BITSW 0x0
+#define CMD_FRE8BITSW 0x3
+#define CMD_FRE16BITSW 0x2
+#define CMD_SELBITSCTL 0x6
+
+struct ad9832_setting {
+ u16 freq0[4];
+ u16 freq1[4];
+ u16 phase0[2];
+ u16 phase1[2];
+ u16 phase2[2];
+ u16 phase3[2];
+};
+
+struct ad9832_state {
+ struct mutex lock;
+ struct iio_dev *idev;
+ struct spi_device *sdev;
+};
+
+static ssize_t ad9832_set_parameter(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ int ret;
+ struct ad9832_setting config;
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad9832_state *st = idev->dev_data;
+
+ config.freq0[0] = (CMD_FRE8BITSW << add_shift | ADD_FREQ0LL << add_shift | buf[0]);
+ config.freq0[1] = (CMD_FRE16BITSW << add_shift | ADD_FREQ0HL << add_shift | buf[1]);
+ config.freq0[2] = (CMD_FRE8BITSW << add_shift | ADD_FREQ0LM << add_shift | buf[2]);
+ config.freq0[3] = (CMD_FRE16BITSW << add_shift | ADD_FREQ0HM << add_shift | buf[3]);
+ config.freq1[0] = (CMD_FRE8BITSW << add_shift | ADD_FREQ1LL << add_shift | buf[4]);
+ config.freq1[1] = (CMD_FRE16BITSW << add_shift | ADD_FREQ1HL << add_shift | buf[5]);
+ config.freq1[2] = (CMD_FRE8BITSW << add_shift | ADD_FREQ1LM << add_shift | buf[6]);
+ config.freq1[3] = (CMD_FRE16BITSW << add_shift | ADD_FREQ1HM << add_shift | buf[7]);
+
+ config.phase0[0] = (CMD_PHA8BITSW << add_shift | ADD_PHASE0L << add_shift | buf[9]);
+ config.phase0[1] = (CMD_PHA16BITSW << add_shift | ADD_PHASE0H << add_shift | buf[10]);
+ config.phase1[0] = (CMD_PHA8BITSW << add_shift | ADD_PHASE1L << add_shift | buf[11]);
+ config.phase1[1] = (CMD_PHA16BITSW << add_shift | ADD_PHASE1H << add_shift | buf[12]);
+ config.phase2[0] = (CMD_PHA8BITSW << add_shift | ADD_PHASE2L << add_shift | buf[13]);
+ config.phase2[1] = (CMD_PHA16BITSW << add_shift | ADD_PHASE2H << add_shift | buf[14]);
+ config.phase3[0] = (CMD_PHA8BITSW << add_shift | ADD_PHASE3L << add_shift | buf[15]);
+ config.phase3[1] = (CMD_PHA16BITSW << add_shift | ADD_PHASE3H << add_shift | buf[16]);
+
+ xfer.len = 2 * len;
+ xfer.tx_buf = &config;
+ mutex_lock(&st->lock);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad9832_set_parameter, 0);
+
+static struct attribute *ad9832_attributes[] = {
+ &iio_dev_attr_dds.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad9832_attribute_group = {
+ .name = DRV_NAME,
+ .attrs = ad9832_attributes,
+};
+
+static void ad9832_init(struct ad9832_state *st)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ int ret;
+ u16 config = 0;
+
+ config = 0x3 << 14 | AD9832_SLEEP | AD9832_RESET | AD9832_CLR;
+
+ mutex_lock(&st->lock);
+
+ xfer.len = 2;
+ xfer.tx_buf = &config;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ config = 0x2 << 14 | AD9832_SYNC | AD9832_SELSRC;
+ xfer.len = 2;
+ xfer.tx_buf = &config;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ config = CMD_SELBITSCTL << cmd_shift;
+ xfer.len = 2;
+ xfer.tx_buf = &config;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ config = 0x3 << 14;
+
+ xfer.len = 2;
+ xfer.tx_buf = &config;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+error_ret:
+ mutex_unlock(&st->lock);
+
+
+
+}
+
+static int __devinit ad9832_probe(struct spi_device *spi)
+{
+ struct ad9832_state *st;
+ int ret = 0;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ spi_set_drvdata(spi, st);
+
+ mutex_init(&st->lock);
+ st->sdev = spi;
+
+ st->idev = iio_allocate_device();
+ if (st->idev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->idev->dev.parent = &spi->dev;
+ st->idev->num_interrupt_lines = 0;
+ st->idev->event_attrs = NULL;
+
+ st->idev->attrs = &ad9832_attribute_group;
+ st->idev->dev_data = (void *)(st);
+ st->idev->driver_module = THIS_MODULE;
+ st->idev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(st->idev);
+ if (ret)
+ goto error_free_dev;
+ spi->max_speed_hz = 2000000;
+ spi->mode = SPI_MODE_3;
+ spi->bits_per_word = 16;
+ spi_setup(spi);
+ ad9832_init(st);
+ return 0;
+
+error_free_dev:
+ iio_free_device(st->idev);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad9832_remove(struct spi_device *spi)
+{
+ struct ad9832_state *st = spi_get_drvdata(spi);
+
+ iio_device_unregister(st->idev);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver ad9832_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad9832_probe,
+ .remove = __devexit_p(ad9832_remove),
+};
+
+static __init int ad9832_spi_init(void)
+{
+ return spi_register_driver(&ad9832_driver);
+}
+module_init(ad9832_spi_init);
+
+static __exit void ad9832_spi_exit(void)
+{
+ spi_unregister_driver(&ad9832_driver);
+}
+module_exit(ad9832_spi_exit);
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION("Analog Devices ad9832 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dds/ad9834.c b/drivers/staging/iio/dds/ad9834.c
new file mode 100644
index 000000000000..eb1a681874f9
--- /dev/null
+++ b/drivers/staging/iio/dds/ad9834.c
@@ -0,0 +1,477 @@
+/*
+ * AD9834 SPI DAC driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <asm/div64.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "dds.h"
+
+#include "ad9834.h"
+
+static unsigned int ad9834_calc_freqreg(unsigned long mclk, unsigned long fout)
+{
+ unsigned long long freqreg = (u64) fout * (u64) (1 << AD9834_FREQ_BITS);
+ do_div(freqreg, mclk);
+ return freqreg;
+}
+
+static int ad9834_write_frequency(struct ad9834_state *st,
+ unsigned long addr, unsigned long fout)
+{
+ unsigned long regval;
+
+ if (fout > (st->mclk / 2))
+ return -EINVAL;
+
+ regval = ad9834_calc_freqreg(st->mclk, fout);
+
+ st->freq_data[0] = cpu_to_be16(addr | (regval &
+ RES_MASK(AD9834_FREQ_BITS / 2)));
+ st->freq_data[1] = cpu_to_be16(addr | ((regval >>
+ (AD9834_FREQ_BITS / 2)) &
+ RES_MASK(AD9834_FREQ_BITS / 2)));
+
+ return spi_sync(st->spi, &st->freq_msg);;
+}
+
+static int ad9834_write_phase(struct ad9834_state *st,
+ unsigned long addr, unsigned long phase)
+{
+ if (phase > (1 << AD9834_PHASE_BITS))
+ return -EINVAL;
+ st->data = cpu_to_be16(addr | phase);
+
+ return spi_sync(st->spi, &st->msg);
+}
+
+static ssize_t ad9834_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad9834_state *st = dev_info->dev_data;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+
+ mutex_lock(&dev_info->mlock);
+ switch (this_attr->address) {
+ case AD9834_REG_FREQ0:
+ case AD9834_REG_FREQ1:
+ ret = ad9834_write_frequency(st, this_attr->address, val);
+ break;
+ case AD9834_REG_PHASE0:
+ case AD9834_REG_PHASE1:
+ ret = ad9834_write_phase(st, this_attr->address, val);
+ break;
+ case AD9834_OPBITEN:
+ if (st->control & AD9834_MODE) {
+ ret = -EINVAL; /* AD9843 reserved mode */
+ break;
+ }
+
+ if (val)
+ st->control |= AD9834_OPBITEN;
+ else
+ st->control &= ~AD9834_OPBITEN;
+
+ st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+ ret = spi_sync(st->spi, &st->msg);
+ break;
+ case AD9834_PIN_SW:
+ if (val)
+ st->control |= AD9834_PIN_SW;
+ else
+ st->control &= ~AD9834_PIN_SW;
+ st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+ ret = spi_sync(st->spi, &st->msg);
+ break;
+ case AD9834_FSEL:
+ case AD9834_PSEL:
+ if (val == 0)
+ st->control &= ~(this_attr->address | AD9834_PIN_SW);
+ else if (val == 1) {
+ st->control |= this_attr->address;
+ st->control &= ~AD9834_PIN_SW;
+ } else {
+ ret = -EINVAL;
+ break;
+ }
+ st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+ ret = spi_sync(st->spi, &st->msg);
+ break;
+ case AD9834_RESET:
+ if (val)
+ st->control &= ~AD9834_RESET;
+ else
+ st->control |= AD9834_RESET;
+
+ st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+ ret = spi_sync(st->spi, &st->msg);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ mutex_unlock(&dev_info->mlock);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t ad9834_store_wavetype(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad9834_state *st = dev_info->dev_data;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret = 0;
+ bool is_ad9833 = st->devid == ID_AD9833;
+
+ mutex_lock(&dev_info->mlock);
+
+ switch (this_attr->address) {
+ case 0:
+ if (sysfs_streq(buf, "sine")) {
+ st->control &= ~AD9834_MODE;
+ if (is_ad9833)
+ st->control &= ~AD9834_OPBITEN;
+ } else if (sysfs_streq(buf, "triangle")) {
+ if (is_ad9833) {
+ st->control &= ~AD9834_OPBITEN;
+ st->control |= AD9834_MODE;
+ } else if (st->control & AD9834_OPBITEN) {
+ ret = -EINVAL; /* AD9843 reserved mode */
+ } else {
+ st->control |= AD9834_MODE;
+ }
+ } else if (is_ad9833 && sysfs_streq(buf, "square")) {
+ st->control &= ~AD9834_MODE;
+ st->control |= AD9834_OPBITEN;
+ } else {
+ ret = -EINVAL;
+ }
+
+ break;
+ case 1:
+ if (sysfs_streq(buf, "square") &&
+ !(st->control & AD9834_MODE)) {
+ st->control &= ~AD9834_MODE;
+ st->control |= AD9834_OPBITEN;
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+ ret = spi_sync(st->spi, &st->msg);
+ }
+ mutex_unlock(&dev_info->mlock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t ad9834_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad9834_state *st = iio_dev_get_devdata(dev_info);
+
+ return sprintf(buf, "%s\n", spi_get_device_id(st->spi)->name);
+}
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad9834_show_name, NULL, 0);
+
+static ssize_t ad9834_show_out0_wavetype_available(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad9834_state *st = iio_dev_get_devdata(dev_info);
+ char *str;
+
+ if (st->devid == ID_AD9833)
+ str = "sine triangle square";
+ else if (st->control & AD9834_OPBITEN)
+ str = "sine";
+ else
+ str = "sine triangle";
+
+ return sprintf(buf, "%s\n", str);
+}
+
+
+static IIO_DEVICE_ATTR(dds0_out0_wavetype_available, S_IRUGO,
+ ad9834_show_out0_wavetype_available, NULL, 0);
+
+static ssize_t ad9834_show_out1_wavetype_available(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad9834_state *st = iio_dev_get_devdata(dev_info);
+ char *str;
+
+ if (st->control & AD9834_MODE)
+ str = "";
+ else
+ str = "square";
+
+ return sprintf(buf, "%s\n", str);
+}
+
+static IIO_DEVICE_ATTR(dds0_out1_wavetype_available, S_IRUGO,
+ ad9834_show_out1_wavetype_available, NULL, 0);
+
+/**
+ * see dds.h for further information
+ */
+
+static IIO_DEV_ATTR_FREQ(0, 0, S_IWUSR, NULL, ad9834_write, AD9834_REG_FREQ0);
+static IIO_DEV_ATTR_FREQ(0, 1, S_IWUSR, NULL, ad9834_write, AD9834_REG_FREQ1);
+static IIO_DEV_ATTR_FREQSYMBOL(0, S_IWUSR, NULL, ad9834_write, AD9834_FSEL);
+static IIO_CONST_ATTR_FREQ_SCALE(0, "1"); /* 1Hz */
+
+static IIO_DEV_ATTR_PHASE(0, 0, S_IWUSR, NULL, ad9834_write, AD9834_REG_PHASE0);
+static IIO_DEV_ATTR_PHASE(0, 1, S_IWUSR, NULL, ad9834_write, AD9834_REG_PHASE1);
+static IIO_DEV_ATTR_PHASESYMBOL(0, S_IWUSR, NULL, ad9834_write, AD9834_PSEL);
+static IIO_CONST_ATTR_PHASE_SCALE(0, "0.0015339808"); /* 2PI/2^12 rad*/
+
+static IIO_DEV_ATTR_PINCONTROL_EN(0, S_IWUSR, NULL,
+ ad9834_write, AD9834_PIN_SW);
+static IIO_DEV_ATTR_OUT_ENABLE(0, S_IWUSR, NULL, ad9834_write, AD9834_RESET);
+static IIO_DEV_ATTR_OUTY_ENABLE(0, 1, S_IWUSR, NULL,
+ ad9834_write, AD9834_OPBITEN);
+static IIO_DEV_ATTR_OUT_WAVETYPE(0, 0, ad9834_store_wavetype, 0);
+static IIO_DEV_ATTR_OUT_WAVETYPE(0, 1, ad9834_store_wavetype, 1);
+
+static struct attribute *ad9834_attributes[] = {
+ &iio_dev_attr_dds0_freq0.dev_attr.attr,
+ &iio_dev_attr_dds0_freq1.dev_attr.attr,
+ &iio_const_attr_dds0_freq_scale.dev_attr.attr,
+ &iio_dev_attr_dds0_phase0.dev_attr.attr,
+ &iio_dev_attr_dds0_phase1.dev_attr.attr,
+ &iio_const_attr_dds0_phase_scale.dev_attr.attr,
+ &iio_dev_attr_dds0_pincontrol_en.dev_attr.attr,
+ &iio_dev_attr_dds0_freqsymbol.dev_attr.attr,
+ &iio_dev_attr_dds0_phasesymbol.dev_attr.attr,
+ &iio_dev_attr_dds0_out_enable.dev_attr.attr,
+ &iio_dev_attr_dds0_out1_enable.dev_attr.attr,
+ &iio_dev_attr_dds0_out0_wavetype.dev_attr.attr,
+ &iio_dev_attr_dds0_out1_wavetype.dev_attr.attr,
+ &iio_dev_attr_dds0_out0_wavetype_available.dev_attr.attr,
+ &iio_dev_attr_dds0_out1_wavetype_available.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ NULL,
+};
+
+static mode_t ad9834_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct ad9834_state *st = iio_dev_get_devdata(dev_info);
+
+ mode_t mode = attr->mode;
+
+ if (st->devid == ID_AD9834)
+ return mode;
+
+ if ((attr == &iio_dev_attr_dds0_out1_enable.dev_attr.attr) ||
+ (attr == &iio_dev_attr_dds0_out1_wavetype.dev_attr.attr) ||
+ (attr ==
+ &iio_dev_attr_dds0_out1_wavetype_available.dev_attr.attr))
+ mode = 0;
+
+ return mode;
+}
+
+static const struct attribute_group ad9834_attribute_group = {
+ .attrs = ad9834_attributes,
+ .is_visible = ad9834_attr_is_visible,
+};
+
+static int __devinit ad9834_probe(struct spi_device *spi)
+{
+ struct ad9834_platform_data *pdata = spi->dev.platform_data;
+ struct ad9834_state *st;
+ int ret;
+
+ if (!pdata) {
+ dev_dbg(&spi->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ st->reg = regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ goto error_put_reg;
+ }
+
+ st->mclk = pdata->mclk;
+
+ spi_set_drvdata(spi, st);
+
+ st->spi = spi;
+ st->devid = spi_get_device_id(spi)->driver_data;
+
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_disable_reg;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->attrs = &ad9834_attribute_group;
+ st->indio_dev->dev_data = (void *) st;
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ /* Setup default messages */
+
+ st->xfer.tx_buf = &st->data;
+ st->xfer.len = 2;
+
+ spi_message_init(&st->msg);
+ spi_message_add_tail(&st->xfer, &st->msg);
+
+ st->freq_xfer[0].tx_buf = &st->freq_data[0];
+ st->freq_xfer[0].len = 2;
+ st->freq_xfer[0].cs_change = 1;
+ st->freq_xfer[1].tx_buf = &st->freq_data[1];
+ st->freq_xfer[1].len = 2;
+
+ spi_message_init(&st->freq_msg);
+ spi_message_add_tail(&st->freq_xfer[0], &st->freq_msg);
+ spi_message_add_tail(&st->freq_xfer[1], &st->freq_msg);
+
+ st->control = AD9834_B28 | AD9834_RESET;
+
+ if (!pdata->en_div2)
+ st->control |= AD9834_DIV2;
+
+ if (!pdata->en_signbit_msb_out && (st->devid == ID_AD9834))
+ st->control |= AD9834_SIGN_PIB;
+
+ st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+ ret = spi_sync(st->spi, &st->msg);
+ if (ret) {
+ dev_err(&spi->dev, "device init failed\n");
+ goto error_free_device;
+ }
+
+ ret = ad9834_write_frequency(st, AD9834_REG_FREQ0, pdata->freq0);
+ if (ret)
+ goto error_free_device;
+
+ ret = ad9834_write_frequency(st, AD9834_REG_FREQ1, pdata->freq1);
+ if (ret)
+ goto error_free_device;
+
+ ret = ad9834_write_phase(st, AD9834_REG_PHASE0, pdata->phase0);
+ if (ret)
+ goto error_free_device;
+
+ ret = ad9834_write_phase(st, AD9834_REG_PHASE1, pdata->phase1);
+ if (ret)
+ goto error_free_device;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_free_device;
+
+ return 0;
+
+error_free_device:
+ iio_free_device(st->indio_dev);
+error_disable_reg:
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+error_put_reg:
+ if (!IS_ERR(st->reg))
+ regulator_put(st->reg);
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad9834_remove(struct spi_device *spi)
+{
+ struct ad9834_state *st = spi_get_drvdata(spi);
+
+ iio_device_unregister(st->indio_dev);
+ if (!IS_ERR(st->reg)) {
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
+ }
+ kfree(st);
+ return 0;
+}
+
+static const struct spi_device_id ad9834_id[] = {
+ {"ad9833", ID_AD9833},
+ {"ad9834", ID_AD9834},
+ {}
+};
+
+static struct spi_driver ad9834_driver = {
+ .driver = {
+ .name = "ad9834",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad9834_probe,
+ .remove = __devexit_p(ad9834_remove),
+ .id_table = ad9834_id,
+};
+
+static int __init ad9834_init(void)
+{
+ return spi_register_driver(&ad9834_driver);
+}
+module_init(ad9834_init);
+
+static void __exit ad9834_exit(void)
+{
+ spi_unregister_driver(&ad9834_driver);
+}
+module_exit(ad9834_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD9833/AD9834 DDS");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ad9834");
diff --git a/drivers/staging/iio/dds/ad9834.h b/drivers/staging/iio/dds/ad9834.h
new file mode 100644
index 000000000000..0fc3b8859e9e
--- /dev/null
+++ b/drivers/staging/iio/dds/ad9834.h
@@ -0,0 +1,112 @@
+/*
+ * AD9834 SPI DDS driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+#ifndef IIO_DDS_AD9834_H_
+#define IIO_DDS_AD9834_H_
+
+/* Registers */
+
+#define AD9834_REG_CMD (0 << 14)
+#define AD9834_REG_FREQ0 (1 << 14)
+#define AD9834_REG_FREQ1 (2 << 14)
+#define AD9834_REG_PHASE0 (6 << 13)
+#define AD9834_REG_PHASE1 (7 << 13)
+
+/* Command Control Bits */
+
+#define AD9834_B28 (1 << 13)
+#define AD9834_HLB (1 << 12)
+#define AD9834_FSEL (1 << 11)
+#define AD9834_PSEL (1 << 10)
+#define AD9834_PIN_SW (1 << 9)
+#define AD9834_RESET (1 << 8)
+#define AD9834_SLEEP1 (1 << 7)
+#define AD9834_SLEEP12 (1 << 6)
+#define AD9834_OPBITEN (1 << 5)
+#define AD9834_SIGN_PIB (1 << 4)
+#define AD9834_DIV2 (1 << 3)
+#define AD9834_MODE (1 << 1)
+
+#define AD9834_FREQ_BITS 28
+#define AD9834_PHASE_BITS 12
+
+#define RES_MASK(bits) ((1 << (bits)) - 1)
+
+/**
+ * struct ad9834_state - driver instance specific data
+ * @indio_dev: the industrial I/O device
+ * @spi: spi_device
+ * @reg: supply regulator
+ * @mclk: external master clock
+ * @control: cached control word
+ * @xfer: default spi transfer
+ * @msg: default spi message
+ * @freq_xfer: tuning word spi transfer
+ * @freq_msg: tuning word spi message
+ * @data: spi transmit buffer
+ * @freq_data: tuning word spi transmit buffer
+ */
+
+struct ad9834_state {
+ struct iio_dev *indio_dev;
+ struct spi_device *spi;
+ struct regulator *reg;
+ unsigned int mclk;
+ unsigned short control;
+ unsigned short devid;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ struct spi_transfer freq_xfer[2];
+ struct spi_message freq_msg;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ unsigned short data ____cacheline_aligned;
+ unsigned short freq_data[2] ;
+};
+
+
+/*
+ * TODO: struct ad7887_platform_data needs to go into include/linux/iio
+ */
+
+/**
+ * struct ad9834_platform_data - platform specific information
+ * @mclk: master clock in Hz
+ * @freq0: power up freq0 tuning word in Hz
+ * @freq1: power up freq1 tuning word in Hz
+ * @phase0: power up phase0 value [0..4095] correlates with 0..2PI
+ * @phase1: power up phase1 value [0..4095] correlates with 0..2PI
+ * @en_div2: digital output/2 is passed to the SIGN BIT OUT pin
+ * @en_signbit_msb_out: the MSB (or MSB/2) of the DAC data is connected to the
+ * SIGN BIT OUT pin. en_div2 controls whether it is the MSB
+ * or MSB/2 that is output. if en_signbit_msb_out=false,
+ * the on-board comparator is connected to SIGN BIT OUT
+ */
+
+struct ad9834_platform_data {
+ unsigned int mclk;
+ unsigned int freq0;
+ unsigned int freq1;
+ unsigned short phase0;
+ unsigned short phase1;
+ bool en_div2;
+ bool en_signbit_msb_out;
+};
+
+/**
+ * ad9834_supported_device_ids:
+ */
+
+enum ad9834_supported_device_ids {
+ ID_AD9833,
+ ID_AD9834,
+};
+
+#endif /* IIO_DDS_AD9834_H_ */
diff --git a/drivers/staging/iio/dds/ad9850.c b/drivers/staging/iio/dds/ad9850.c
new file mode 100644
index 000000000000..b259bfeaf5aa
--- /dev/null
+++ b/drivers/staging/iio/dds/ad9850.c
@@ -0,0 +1,156 @@
+/*
+ * Driver for ADI Direct Digital Synthesis ad9850
+ *
+ * Copyright (c) 2010-2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad9850"
+
+#define value_mask (u16)0xf000
+#define addr_shift 12
+
+/* Register format: 4 bits addr + 12 bits value */
+struct ad9850_config {
+ u8 control[5];
+};
+
+struct ad9850_state {
+ struct mutex lock;
+ struct iio_dev *idev;
+ struct spi_device *sdev;
+};
+
+static ssize_t ad9850_set_parameter(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ int ret;
+ struct ad9850_config *config = (struct ad9850_config *)buf;
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad9850_state *st = idev->dev_data;
+
+ xfer.len = len;
+ xfer.tx_buf = config;
+ mutex_lock(&st->lock);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad9850_set_parameter, 0);
+
+static struct attribute *ad9850_attributes[] = {
+ &iio_dev_attr_dds.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad9850_attribute_group = {
+ .name = DRV_NAME,
+ .attrs = ad9850_attributes,
+};
+
+static int __devinit ad9850_probe(struct spi_device *spi)
+{
+ struct ad9850_state *st;
+ int ret = 0;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ spi_set_drvdata(spi, st);
+
+ mutex_init(&st->lock);
+ st->sdev = spi;
+
+ st->idev = iio_allocate_device();
+ if (st->idev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->idev->dev.parent = &spi->dev;
+ st->idev->num_interrupt_lines = 0;
+ st->idev->event_attrs = NULL;
+
+ st->idev->attrs = &ad9850_attribute_group;
+ st->idev->dev_data = (void *)(st);
+ st->idev->driver_module = THIS_MODULE;
+ st->idev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(st->idev);
+ if (ret)
+ goto error_free_dev;
+ spi->max_speed_hz = 2000000;
+ spi->mode = SPI_MODE_3;
+ spi->bits_per_word = 16;
+ spi_setup(spi);
+
+ return 0;
+
+error_free_dev:
+ iio_free_device(st->idev);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad9850_remove(struct spi_device *spi)
+{
+ struct ad9850_state *st = spi_get_drvdata(spi);
+
+ iio_device_unregister(st->idev);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver ad9850_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad9850_probe,
+ .remove = __devexit_p(ad9850_remove),
+};
+
+static __init int ad9850_spi_init(void)
+{
+ return spi_register_driver(&ad9850_driver);
+}
+module_init(ad9850_spi_init);
+
+static __exit void ad9850_spi_exit(void)
+{
+ spi_unregister_driver(&ad9850_driver);
+}
+module_exit(ad9850_spi_exit);
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION("Analog Devices ad9850 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dds/ad9852.c b/drivers/staging/iio/dds/ad9852.c
new file mode 100644
index 000000000000..594fb6a94331
--- /dev/null
+++ b/drivers/staging/iio/dds/ad9852.c
@@ -0,0 +1,305 @@
+/*
+ * Driver for ADI Direct Digital Synthesis ad9852
+ *
+ * Copyright (c) 2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad9852"
+
+#define addr_phaad1 0x0
+#define addr_phaad2 0x1
+#define addr_fretu1 0x2
+#define addr_fretu2 0x3
+#define addr_delfre 0x4
+#define addr_updclk 0x5
+#define addr_ramclk 0x6
+#define addr_contrl 0x7
+#define addr_optskm 0x8
+#define addr_optskr 0xa
+#define addr_dacctl 0xb
+
+#define COMPPD (1 << 4)
+#define REFMULT2 (1 << 2)
+#define BYPPLL (1 << 5)
+#define PLLRANG (1 << 6)
+#define IEUPCLK (1)
+#define OSKEN (1 << 5)
+
+#define read_bit (1 << 7)
+
+/* Register format: 1 byte addr + value */
+struct ad9852_config {
+ u8 phajst0[3];
+ u8 phajst1[3];
+ u8 fretun1[6];
+ u8 fretun2[6];
+ u8 dltafre[6];
+ u8 updtclk[5];
+ u8 ramprat[4];
+ u8 control[5];
+ u8 outpskm[3];
+ u8 outpskr[2];
+ u8 daccntl[3];
+};
+
+struct ad9852_state {
+ struct mutex lock;
+ struct iio_dev *idev;
+ struct spi_device *sdev;
+};
+
+static ssize_t ad9852_set_parameter(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ int ret;
+ struct ad9852_config *config = (struct ad9852_config *)buf;
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad9852_state *st = idev->dev_data;
+
+ xfer.len = 3;
+ xfer.tx_buf = &config->phajst0[0];
+ mutex_lock(&st->lock);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 3;
+ xfer.tx_buf = &config->phajst1[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 6;
+ xfer.tx_buf = &config->fretun1[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 6;
+ xfer.tx_buf = &config->fretun2[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 6;
+ xfer.tx_buf = &config->dltafre[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 5;
+ xfer.tx_buf = &config->updtclk[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 4;
+ xfer.tx_buf = &config->ramprat[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 5;
+ xfer.tx_buf = &config->control[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 3;
+ xfer.tx_buf = &config->outpskm[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 2;
+ xfer.tx_buf = &config->outpskr[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 3;
+ xfer.tx_buf = &config->daccntl[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad9852_set_parameter, 0);
+
+static void ad9852_init(struct ad9852_state *st)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ int ret;
+ u8 config[5];
+
+ config[0] = addr_contrl;
+ config[1] = COMPPD;
+ config[2] = REFMULT2 | BYPPLL | PLLRANG;
+ config[3] = IEUPCLK;
+ config[4] = OSKEN;
+
+ mutex_lock(&st->lock);
+
+ xfer.len = 5;
+ xfer.tx_buf = &config;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+error_ret:
+ mutex_unlock(&st->lock);
+
+
+
+}
+
+static struct attribute *ad9852_attributes[] = {
+ &iio_dev_attr_dds.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad9852_attribute_group = {
+ .name = DRV_NAME,
+ .attrs = ad9852_attributes,
+};
+
+static int __devinit ad9852_probe(struct spi_device *spi)
+{
+ struct ad9852_state *st;
+ int ret = 0;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ spi_set_drvdata(spi, st);
+
+ mutex_init(&st->lock);
+ st->sdev = spi;
+
+ st->idev = iio_allocate_device();
+ if (st->idev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->idev->dev.parent = &spi->dev;
+ st->idev->num_interrupt_lines = 0;
+ st->idev->event_attrs = NULL;
+
+ st->idev->attrs = &ad9852_attribute_group;
+ st->idev->dev_data = (void *)(st);
+ st->idev->driver_module = THIS_MODULE;
+ st->idev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(st->idev);
+ if (ret)
+ goto error_free_dev;
+ spi->max_speed_hz = 2000000;
+ spi->mode = SPI_MODE_3;
+ spi->bits_per_word = 8;
+ spi_setup(spi);
+ ad9852_init(st);
+ return 0;
+
+error_free_dev:
+ iio_free_device(st->idev);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad9852_remove(struct spi_device *spi)
+{
+ struct ad9852_state *st = spi_get_drvdata(spi);
+
+ iio_device_unregister(st->idev);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver ad9852_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad9852_probe,
+ .remove = __devexit_p(ad9852_remove),
+};
+
+static __init int ad9852_spi_init(void)
+{
+ return spi_register_driver(&ad9852_driver);
+}
+module_init(ad9852_spi_init);
+
+static __exit void ad9852_spi_exit(void)
+{
+ spi_unregister_driver(&ad9852_driver);
+}
+module_exit(ad9852_spi_exit);
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION("Analog Devices ad9852 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dds/ad9910.c b/drivers/staging/iio/dds/ad9910.c
new file mode 100644
index 000000000000..e8fb75cb66ec
--- /dev/null
+++ b/drivers/staging/iio/dds/ad9910.c
@@ -0,0 +1,440 @@
+/*
+ * Driver for ADI Direct Digital Synthesis ad9910
+ *
+ * Copyright (c) 2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad9910"
+
+#define CFR1 0x0
+#define CFR2 0x1
+#define CFR3 0x2
+
+#define AUXDAC 0x3
+#define IOUPD 0x4
+#define FTW 0x7
+#define POW 0x8
+#define ASF 0x9
+#define MULTC 0x0A
+#define DIG_RAMPL 0x0B
+#define DIG_RAMPS 0x0C
+#define DIG_RAMPR 0x0D
+#define SIN_TONEP0 0x0E
+#define SIN_TONEP1 0x0F
+#define SIN_TONEP2 0x10
+#define SIN_TONEP3 0x11
+#define SIN_TONEP4 0x12
+#define SIN_TONEP5 0x13
+#define SIN_TONEP6 0x14
+#define SIN_TONEP7 0x15
+
+#define RAM_ENABLE (1 << 7)
+
+#define MANUAL_OSK (1 << 7)
+#define INVSIC (1 << 6)
+#define DDS_SINEOP (1)
+
+#define AUTO_OSK (1)
+#define OSKEN (1 << 1)
+#define LOAD_ARR (1 << 2)
+#define CLR_PHA (1 << 3)
+#define CLR_DIG (1 << 4)
+#define ACLR_PHA (1 << 5)
+#define ACLR_DIG (1 << 6)
+#define LOAD_LRR (1 << 7)
+
+#define LSB_FST (1)
+#define SDIO_IPT (1 << 1)
+#define EXT_PWD (1 << 3)
+#define ADAC_PWD (1 << 4)
+#define REFCLK_PWD (1 << 5)
+#define DAC_PWD (1 << 6)
+#define DIG_PWD (1 << 7)
+
+#define ENA_AMP (1)
+#define READ_FTW (1)
+#define DIGR_LOW (1 << 1)
+#define DIGR_HIGH (1 << 2)
+#define DIGR_ENA (1 << 3)
+#define SYNCCLK_ENA (1 << 6)
+#define ITER_IOUPD (1 << 7)
+
+#define TX_ENA (1 << 1)
+#define PDCLK_INV (1 << 2)
+#define PDCLK_ENB (1 << 3)
+
+#define PARA_ENA (1 << 4)
+#define SYNC_DIS (1 << 5)
+#define DATA_ASS (1 << 6)
+#define MATCH_ENA (1 << 7)
+
+#define PLL_ENA (1)
+#define PFD_RST (1 << 2)
+#define REFCLK_RST (1 << 6)
+#define REFCLK_BYP (1 << 7)
+
+/* Register format: 1 byte addr + value */
+struct ad9910_config {
+ u8 auxdac[5];
+ u8 ioupd[5];
+ u8 ftw[5];
+ u8 pow[3];
+ u8 asf[5];
+ u8 multc[5];
+ u8 dig_rampl[9];
+ u8 dig_ramps[9];
+ u8 dig_rampr[5];
+ u8 sin_tonep0[9];
+ u8 sin_tonep1[9];
+ u8 sin_tonep2[9];
+ u8 sin_tonep3[9];
+ u8 sin_tonep4[9];
+ u8 sin_tonep5[9];
+ u8 sin_tonep6[9];
+ u8 sin_tonep7[9];
+};
+
+struct ad9910_state {
+ struct mutex lock;
+ struct iio_dev *idev;
+ struct spi_device *sdev;
+};
+
+static ssize_t ad9910_set_parameter(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ int ret;
+ struct ad9910_config *config = (struct ad9910_config *)buf;
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad9910_state *st = idev->dev_data;
+
+ xfer.len = 5;
+ xfer.tx_buf = &config->auxdac[0];
+ mutex_lock(&st->lock);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 5;
+ xfer.tx_buf = &config->ioupd[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 5;
+ xfer.tx_buf = &config->ftw[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 3;
+ xfer.tx_buf = &config->pow[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 5;
+ xfer.tx_buf = &config->asf[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 5;
+ xfer.tx_buf = &config->multc[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 9;
+ xfer.tx_buf = &config->dig_rampl[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 9;
+ xfer.tx_buf = &config->dig_ramps[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 5;
+ xfer.tx_buf = &config->dig_rampr[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 9;
+ xfer.tx_buf = &config->sin_tonep0[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 9;
+ xfer.tx_buf = &config->sin_tonep1[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 9;
+ xfer.tx_buf = &config->sin_tonep2[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+ xfer.len = 9;
+ xfer.tx_buf = &config->sin_tonep3[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 9;
+ xfer.tx_buf = &config->sin_tonep4[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 9;
+ xfer.tx_buf = &config->sin_tonep5[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 9;
+ xfer.tx_buf = &config->sin_tonep6[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 9;
+ xfer.tx_buf = &config->sin_tonep7[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad9910_set_parameter, 0);
+
+static void ad9910_init(struct ad9910_state *st)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ int ret;
+ u8 cfr[5];
+
+ cfr[0] = CFR1;
+ cfr[1] = 0;
+ cfr[2] = MANUAL_OSK | INVSIC | DDS_SINEOP;
+ cfr[3] = AUTO_OSK | OSKEN | ACLR_PHA | ACLR_DIG | LOAD_LRR;
+ cfr[4] = 0;
+
+ mutex_lock(&st->lock);
+
+ xfer.len = 5;
+ xfer.tx_buf = &cfr;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ cfr[0] = CFR2;
+ cfr[1] = ENA_AMP;
+ cfr[2] = READ_FTW | DIGR_ENA | ITER_IOUPD;
+ cfr[3] = TX_ENA | PDCLK_INV | PDCLK_ENB;
+ cfr[4] = PARA_ENA;
+
+ xfer.len = 5;
+ xfer.tx_buf = &cfr;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ cfr[0] = CFR3;
+ cfr[1] = PLL_ENA;
+ cfr[2] = 0;
+ cfr[3] = REFCLK_RST | REFCLK_BYP;
+ cfr[4] = 0;
+
+ xfer.len = 5;
+ xfer.tx_buf = &cfr;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+error_ret:
+ mutex_unlock(&st->lock);
+
+
+
+}
+
+static struct attribute *ad9910_attributes[] = {
+ &iio_dev_attr_dds.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad9910_attribute_group = {
+ .name = DRV_NAME,
+ .attrs = ad9910_attributes,
+};
+
+static int __devinit ad9910_probe(struct spi_device *spi)
+{
+ struct ad9910_state *st;
+ int ret = 0;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ spi_set_drvdata(spi, st);
+
+ mutex_init(&st->lock);
+ st->sdev = spi;
+
+ st->idev = iio_allocate_device();
+ if (st->idev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->idev->dev.parent = &spi->dev;
+ st->idev->num_interrupt_lines = 0;
+ st->idev->event_attrs = NULL;
+
+ st->idev->attrs = &ad9910_attribute_group;
+ st->idev->dev_data = (void *)(st);
+ st->idev->driver_module = THIS_MODULE;
+ st->idev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(st->idev);
+ if (ret)
+ goto error_free_dev;
+ spi->max_speed_hz = 2000000;
+ spi->mode = SPI_MODE_3;
+ spi->bits_per_word = 8;
+ spi_setup(spi);
+ ad9910_init(st);
+ return 0;
+
+error_free_dev:
+ iio_free_device(st->idev);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad9910_remove(struct spi_device *spi)
+{
+ struct ad9910_state *st = spi_get_drvdata(spi);
+
+ iio_device_unregister(st->idev);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver ad9910_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad9910_probe,
+ .remove = __devexit_p(ad9910_remove),
+};
+
+static __init int ad9910_spi_init(void)
+{
+ return spi_register_driver(&ad9910_driver);
+}
+module_init(ad9910_spi_init);
+
+static __exit void ad9910_spi_exit(void)
+{
+ spi_unregister_driver(&ad9910_driver);
+}
+module_exit(ad9910_spi_exit);
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION("Analog Devices ad9910 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dds/ad9951.c b/drivers/staging/iio/dds/ad9951.c
new file mode 100644
index 000000000000..57eddf6d4713
--- /dev/null
+++ b/drivers/staging/iio/dds/ad9951.c
@@ -0,0 +1,249 @@
+/*
+ * Driver for ADI Direct Digital Synthesis ad9951
+ *
+ * Copyright (c) 2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad9951"
+
+#define CFR1 0x0
+#define CFR2 0x1
+
+#define AUTO_OSK (1)
+#define OSKEN (1 << 1)
+#define LOAD_ARR (1 << 2)
+
+#define AUTO_SYNC (1 << 7)
+
+#define LSB_FST (1)
+#define SDIO_IPT (1 << 1)
+#define CLR_PHA (1 << 2)
+#define SINE_OPT (1 << 4)
+#define ACLR_PHA (1 << 5)
+
+#define VCO_RANGE (1 << 2)
+
+#define CRS_OPT (1 << 1)
+#define HMANU_SYNC (1 << 2)
+#define HSPD_SYNC (1 << 3)
+
+/* Register format: 1 byte addr + value */
+struct ad9951_config {
+ u8 asf[3];
+ u8 arr[2];
+ u8 ftw0[5];
+ u8 ftw1[3];
+};
+
+struct ad9951_state {
+ struct mutex lock;
+ struct iio_dev *idev;
+ struct spi_device *sdev;
+};
+
+static ssize_t ad9951_set_parameter(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ int ret;
+ struct ad9951_config *config = (struct ad9951_config *)buf;
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad9951_state *st = idev->dev_data;
+
+ xfer.len = 3;
+ xfer.tx_buf = &config->asf[0];
+ mutex_lock(&st->lock);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 2;
+ xfer.tx_buf = &config->arr[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 5;
+ xfer.tx_buf = &config->ftw0[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ xfer.len = 3;
+ xfer.tx_buf = &config->ftw1[0];
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad9951_set_parameter, 0);
+
+static void ad9951_init(struct ad9951_state *st)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ int ret;
+ u8 cfr[5];
+
+ cfr[0] = CFR1;
+ cfr[1] = 0;
+ cfr[2] = LSB_FST | CLR_PHA | SINE_OPT | ACLR_PHA;
+ cfr[3] = AUTO_OSK | OSKEN | LOAD_ARR;
+ cfr[4] = 0;
+
+ mutex_lock(&st->lock);
+
+ xfer.len = 5;
+ xfer.tx_buf = &cfr;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+ cfr[0] = CFR2;
+ cfr[1] = VCO_RANGE;
+ cfr[2] = HSPD_SYNC;
+ cfr[3] = 0;
+
+ xfer.len = 4;
+ xfer.tx_buf = &cfr;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+
+error_ret:
+ mutex_unlock(&st->lock);
+
+
+
+}
+
+static struct attribute *ad9951_attributes[] = {
+ &iio_dev_attr_dds.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad9951_attribute_group = {
+ .name = DRV_NAME,
+ .attrs = ad9951_attributes,
+};
+
+static int __devinit ad9951_probe(struct spi_device *spi)
+{
+ struct ad9951_state *st;
+ int ret = 0;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ spi_set_drvdata(spi, st);
+
+ mutex_init(&st->lock);
+ st->sdev = spi;
+
+ st->idev = iio_allocate_device();
+ if (st->idev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->idev->dev.parent = &spi->dev;
+ st->idev->num_interrupt_lines = 0;
+ st->idev->event_attrs = NULL;
+
+ st->idev->attrs = &ad9951_attribute_group;
+ st->idev->dev_data = (void *)(st);
+ st->idev->driver_module = THIS_MODULE;
+ st->idev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(st->idev);
+ if (ret)
+ goto error_free_dev;
+ spi->max_speed_hz = 2000000;
+ spi->mode = SPI_MODE_3;
+ spi->bits_per_word = 8;
+ spi_setup(spi);
+ ad9951_init(st);
+ return 0;
+
+error_free_dev:
+ iio_free_device(st->idev);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad9951_remove(struct spi_device *spi)
+{
+ struct ad9951_state *st = spi_get_drvdata(spi);
+
+ iio_device_unregister(st->idev);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver ad9951_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad9951_probe,
+ .remove = __devexit_p(ad9951_remove),
+};
+
+static __init int ad9951_spi_init(void)
+{
+ return spi_register_driver(&ad9951_driver);
+}
+module_init(ad9951_spi_init);
+
+static __exit void ad9951_spi_exit(void)
+{
+ spi_unregister_driver(&ad9951_driver);
+}
+module_exit(ad9951_spi_exit);
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION("Analog Devices ad9951 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dds/dds.h b/drivers/staging/iio/dds/dds.h
new file mode 100644
index 000000000000..d8ac3a93baf6
--- /dev/null
+++ b/drivers/staging/iio/dds/dds.h
@@ -0,0 +1,110 @@
+/*
+ * dds.h - sysfs attributes associated with DDS devices
+ *
+ * Copyright (c) 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_freqY
+ */
+
+#define IIO_DEV_ATTR_FREQ(_channel, _num, _mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(dds##_channel##_freq##_num, \
+ _mode, _show, _store, _addr)
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_freqY_scale
+ */
+
+#define IIO_CONST_ATTR_FREQ_SCALE(_channel, _string) \
+ IIO_CONST_ATTR(dds##_channel##_freq_scale, _string)
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_freqsymbol
+ */
+
+#define IIO_DEV_ATTR_FREQSYMBOL(_channel, _mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(dds##_channel##_freqsymbol, \
+ _mode, _show, _store, _addr);
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_phaseY
+ */
+
+#define IIO_DEV_ATTR_PHASE(_channel, _num, _mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(dds##_channel##_phase##_num, \
+ _mode, _show, _store, _addr)
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_phaseY_scale
+ */
+
+#define IIO_CONST_ATTR_PHASE_SCALE(_channel, _string) \
+ IIO_CONST_ATTR(dds##_channel##_phase_scale, _string)
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_phasesymbol
+ */
+
+#define IIO_DEV_ATTR_PHASESYMBOL(_channel, _mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(dds##_channel##_phasesymbol, \
+ _mode, _show, _store, _addr);
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_pincontrol_en
+ */
+
+#define IIO_DEV_ATTR_PINCONTROL_EN(_channel, _mode, _show, _store, _addr)\
+ IIO_DEVICE_ATTR(dds##_channel##_pincontrol_en, \
+ _mode, _show, _store, _addr);
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_pincontrol_freq_en
+ */
+
+#define IIO_DEV_ATTR_PINCONTROL_FREQ_EN(_channel, _mode, _show, _store, _addr)\
+ IIO_DEVICE_ATTR(dds##_channel##_pincontrol_freq_en, \
+ _mode, _show, _store, _addr);
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_pincontrol_phase_en
+ */
+
+#define IIO_DEV_ATTR_PINCONTROL_PHASE_EN(_channel, _mode, _show, _store, _addr)\
+ IIO_DEVICE_ATTR(dds##_channel##_pincontrol_phase_en, \
+ _mode, _show, _store, _addr);
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_out_enable
+ */
+
+#define IIO_DEV_ATTR_OUT_ENABLE(_channel, _mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(dds##_channel##_out_enable, \
+ _mode, _show, _store, _addr);
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_outY_enable
+ */
+
+#define IIO_DEV_ATTR_OUTY_ENABLE(_channel, _output, \
+ _mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(dds##_channel##_out##_output##_enable, \
+ _mode, _show, _store, _addr);
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_outY_wavetype
+ */
+
+#define IIO_DEV_ATTR_OUT_WAVETYPE(_channel, _output, _store, _addr) \
+ IIO_DEVICE_ATTR(dds##_channel##_out##_output##_wavetype, \
+ S_IWUSR, NULL, _store, _addr);
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_outY_wavetype_available
+ */
+
+#define IIO_CONST_ATTR_OUT_WAVETYPES_AVAILABLE(_channel, _output, _modes)\
+ IIO_CONST_ATTR(dds##_channel##_out##_output##_wavetype_available,\
+ _modes);
diff --git a/drivers/staging/iio/gyro/Kconfig b/drivers/staging/iio/gyro/Kconfig
index c4043610c0df..236f15fdbfc9 100644
--- a/drivers/staging/iio/gyro/Kconfig
+++ b/drivers/staging/iio/gyro/Kconfig
@@ -3,11 +3,45 @@
#
comment "Digital gyroscope sensors"
+config ADIS16060
+ tristate "Analog Devices ADIS16060 Yaw Rate Gyroscope with SPI driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices adis16060 wide bandwidth
+ yaw rate gyroscope with SPI.
+
+config ADIS16080
+ tristate "Analog Devices ADIS16080/100 Yaw Rate Gyroscope with SPI driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices adis16080/100 Yaw Rate
+ Gyroscope with SPI.
+
+config ADIS16130
+ tristate "Analog Devices ADIS16130 High Precision Angular Rate Sensor driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices ADIS16130 High Precision
+ Angular Rate Sensor driver.
+
config ADIS16260
- tristate "Analog Devices ADIS16260/5 Digital Gyroscope Sensor SPI driver"
+ tristate "Analog Devices ADIS16260 ADIS16265 Digital Gyroscope Sensor SPI driver"
depends on SPI
select IIO_TRIGGER if IIO_RING_BUFFER
select IIO_SW_RING if IIO_RING_BUFFER
help
- Say yes here to build support for Analog Devices adis16260/5
+ Say yes here to build support for Analog Devices ADIS16260 ADIS16265
programmable digital gyroscope sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called adis16260.
+
+config ADIS16251
+ tristate "Analog Devices ADIS16251 Digital Gyroscope Sensor SPI driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices adis16261 programmable
+ digital gyroscope sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called adis16251.
diff --git a/drivers/staging/iio/gyro/Makefile b/drivers/staging/iio/gyro/Makefile
index b5f0dc01122c..2764c15025a5 100644
--- a/drivers/staging/iio/gyro/Makefile
+++ b/drivers/staging/iio/gyro/Makefile
@@ -2,6 +2,18 @@
# Makefile for digital gyroscope sensor drivers
#
+adis16060-y := adis16060_core.o
+obj-$(CONFIG_ADIS16060) += adis16060.o
+
+adis16080-y := adis16080_core.o
+obj-$(CONFIG_ADIS16080) += adis16080.o
+
+adis16130-y := adis16130_core.o
+obj-$(CONFIG_ADIS16130) += adis16130.o
+
adis16260-y := adis16260_core.o
adis16260-$(CONFIG_IIO_RING_BUFFER) += adis16260_ring.o adis16260_trigger.o
obj-$(CONFIG_ADIS16260) += adis16260.o
+
+adis16251-y := adis16251_core.o
+obj-$(CONFIG_ADIS16251) += adis16251.o
diff --git a/drivers/staging/iio/gyro/adis16060.h b/drivers/staging/iio/gyro/adis16060.h
new file mode 100644
index 000000000000..5c00e5385ee0
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16060.h
@@ -0,0 +1,101 @@
+#ifndef SPI_ADIS16060_H_
+#define SPI_ADIS16060_H_
+
+#define ADIS16060_GYRO 0x20 /* Measure Angular Rate (Gyro) */
+#define ADIS16060_SUPPLY_OUT 0x10 /* Measure Temperature */
+#define ADIS16060_AIN2 0x80 /* Measure AIN2 */
+#define ADIS16060_AIN1 0x40 /* Measure AIN1 */
+#define ADIS16060_TEMP_OUT 0x22 /* Set Positive Self-Test and Output for Angular Rate */
+#define ADIS16060_ANGL_OUT 0x21 /* Set Negative Self-Test and Output for Angular Rate */
+
+#define ADIS16060_MAX_TX 3
+#define ADIS16060_MAX_RX 3
+
+/**
+ * struct adis16060_state - device instance specific data
+ * @us_w: actual spi_device to write data
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct adis16060_state {
+ struct spi_device *us_w;
+ struct spi_device *us_r;
+ struct work_struct work_trigger_to_ring;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+
+#if defined(CONFIG_IIO_RING_BUFFER) && defined(THIS_HAS_RING_BUFFER_SUPPORT)
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum adis16060_scan {
+ ADIS16060_SCAN_GYRO,
+ ADIS16060_SCAN_TEMP,
+ ADIS16060_SCAN_ADC_1,
+ ADIS16060_SCAN_ADC_2,
+};
+
+void adis16060_remove_trigger(struct iio_dev *indio_dev);
+int adis16060_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16060_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+
+int adis16060_configure_ring(struct iio_dev *indio_dev);
+void adis16060_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16060_initialize_ring(struct iio_ring_buffer *ring);
+void adis16060_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16060_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16060_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+adis16060_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int adis16060_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void adis16060_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16060_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+
+static inline void adis16060_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16060_H_ */
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
new file mode 100644
index 000000000000..fc48aca04bd3
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16060_core.c
@@ -0,0 +1,319 @@
+/*
+ * ADIS16060 Wide Bandwidth Yaw Rate Gyroscope with SPI driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "gyro.h"
+#include "../adc/adc.h"
+
+#include "adis16060.h"
+
+#define DRIVER_NAME "adis16060"
+
+struct adis16060_state *adis16060_st;
+
+int adis16060_spi_write(struct device *dev,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16060_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = 0;
+ st->tx[1] = 0;
+ st->tx[2] = val; /* The last 8 bits clocked in are latched */
+
+ ret = spi_write(st->us_w, st->tx, 3);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+int adis16060_spi_read(struct device *dev,
+ u16 *val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16060_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+
+ ret = spi_read(st->us_r, st->rx, 3);
+
+ /* The internal successive approximation ADC begins the conversion process
+ * on the falling edge of MSEL1 and starts to place data MSB first on the
+ * DOUT line at the 6th falling edge of SCLK
+ */
+ if (ret == 0)
+ *val = ((st->rx[0] & 0x3) << 12) | (st->rx[1] << 4) | ((st->rx[2] >> 4) & 0xF);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static ssize_t adis16060_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ u16 val;
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16060_spi_read(dev, &val);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret == 0)
+ return sprintf(buf, "%d\n", val);
+ else
+ return ret;
+}
+
+static ssize_t adis16060_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 16, &val);
+ if (ret)
+ goto error_ret;
+ ret = adis16060_spi_write(dev, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+#define IIO_DEV_ATTR_IN(_show) \
+ IIO_DEVICE_ATTR(in, S_IRUGO, _show, NULL, 0)
+
+#define IIO_DEV_ATTR_OUT(_store) \
+ IIO_DEVICE_ATTR(out, S_IRUGO, NULL, _store, 0)
+
+static IIO_DEV_ATTR_IN(adis16060_read);
+static IIO_DEV_ATTR_OUT(adis16060_write);
+
+static IIO_CONST_ATTR(name, "adis16060");
+
+static struct attribute *adis16060_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group adis16060_event_attribute_group = {
+ .attrs = adis16060_event_attributes,
+};
+
+static struct attribute *adis16060_attributes[] = {
+ &iio_dev_attr_in.dev_attr.attr,
+ &iio_dev_attr_out.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16060_attribute_group = {
+ .attrs = adis16060_attributes,
+};
+
+static int __devinit adis16060_r_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct adis16060_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADIS16060_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADIS16060_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us_r = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &adis16060_event_attribute_group;
+ st->indio_dev->attrs = &adis16060_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis16060_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = adis16060_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING,
+ "adis16060");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = adis16060_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ adis16060_st = st;
+ return 0;
+
+error_unregister_line:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ adis16060_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ adis16060_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int adis16060_r_remove(struct spi_device *spi)
+{
+ struct adis16060_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ flush_scheduled_work();
+
+ adis16060_remove_trigger(indio_dev);
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ adis16060_uninitialize_ring(indio_dev->ring);
+ adis16060_unconfigure_ring(indio_dev);
+ iio_device_unregister(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+}
+
+static int __devinit adis16060_w_probe(struct spi_device *spi)
+{
+ int ret;
+ struct adis16060_state *st = adis16060_st;
+ if (!st) {
+ ret = -ENODEV;
+ goto error_ret;
+ }
+ spi_set_drvdata(spi, st);
+ st->us_w = spi;
+ return 0;
+
+error_ret:
+ return ret;
+}
+
+static int adis16060_w_remove(struct spi_device *spi)
+{
+ return 0;
+}
+
+static struct spi_driver adis16060_r_driver = {
+ .driver = {
+ .name = "adis16060_r",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16060_r_probe,
+ .remove = __devexit_p(adis16060_r_remove),
+};
+
+static struct spi_driver adis16060_w_driver = {
+ .driver = {
+ .name = "adis16060_w",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16060_w_probe,
+ .remove = __devexit_p(adis16060_w_remove),
+};
+
+static __init int adis16060_init(void)
+{
+ int ret;
+
+ ret = spi_register_driver(&adis16060_r_driver);
+ if (ret < 0)
+ return ret;
+
+ ret = spi_register_driver(&adis16060_w_driver);
+ if (ret < 0) {
+ spi_unregister_driver(&adis16060_r_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(adis16060_init);
+
+static __exit void adis16060_exit(void)
+{
+ spi_unregister_driver(&adis16060_w_driver);
+ spi_unregister_driver(&adis16060_r_driver);
+}
+module_exit(adis16060_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16060 Yaw Rate Gyroscope with SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/gyro/adis16080.h b/drivers/staging/iio/gyro/adis16080.h
new file mode 100644
index 000000000000..3fcbe67f7c31
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16080.h
@@ -0,0 +1,102 @@
+#ifndef SPI_ADIS16080_H_
+#define SPI_ADIS16080_H_
+
+#define ADIS16080_DIN_CODE 4 /* Output data format setting. 0: Twos complement. 1: Offset binary. */
+#define ADIS16080_DIN_GYRO (0 << 10) /* Gyroscope output */
+#define ADIS16080_DIN_TEMP (1 << 10) /* Temperature output */
+#define ADIS16080_DIN_AIN1 (2 << 10)
+#define ADIS16080_DIN_AIN2 (3 << 10)
+#define ADIS16080_DIN_WRITE (1 << 15) /* 1: Write contents on DIN to control register.
+ * 0: No changes to control register.
+ */
+
+#define ADIS16080_MAX_TX 2
+#define ADIS16080_MAX_RX 2
+
+/**
+ * struct adis16080_state - device instance specific data
+ * @us: actual spi_device to write data
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct adis16080_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+
+#if defined(CONFIG_IIO_RING_BUFFER) && defined(THIS_HAS_RING_BUFFER_SUPPORT)
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum adis16080_scan {
+ ADIS16080_SCAN_GYRO,
+ ADIS16080_SCAN_TEMP,
+ ADIS16080_SCAN_ADC_1,
+ ADIS16080_SCAN_ADC_2,
+};
+
+void adis16080_remove_trigger(struct iio_dev *indio_dev);
+int adis16080_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16080_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+
+int adis16080_configure_ring(struct iio_dev *indio_dev);
+void adis16080_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16080_initialize_ring(struct iio_ring_buffer *ring);
+void adis16080_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16080_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16080_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+adis16080_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int adis16080_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void adis16080_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16080_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+
+static inline void adis16080_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16080_H_ */
diff --git a/drivers/staging/iio/gyro/adis16080_core.c b/drivers/staging/iio/gyro/adis16080_core.c
new file mode 100644
index 000000000000..0efb768db7d3
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16080_core.c
@@ -0,0 +1,271 @@
+/*
+ * ADIS16080/100 Yaw Rate Gyroscope with SPI driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "gyro.h"
+#include "../adc/adc.h"
+
+#include "adis16080.h"
+
+#define DRIVER_NAME "adis16080"
+
+struct adis16080_state *adis16080_st;
+
+int adis16080_spi_write(struct device *dev,
+ u16 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16080_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = val >> 8;
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+int adis16080_spi_read(struct device *dev,
+ u16 *val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16080_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+
+ ret = spi_read(st->us, st->rx, 2);
+
+ if (ret == 0)
+ *val = ((st->rx[0] & 0xF) << 8) | st->rx[1];
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static ssize_t adis16080_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ u16 val;
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16080_spi_read(dev, &val);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret == 0)
+ return sprintf(buf, "%d\n", val);
+ else
+ return ret;
+}
+
+static ssize_t adis16080_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 16, &val);
+ if (ret)
+ goto error_ret;
+ ret = adis16080_spi_write(dev, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+#define IIO_DEV_ATTR_IN(_show) \
+ IIO_DEVICE_ATTR(in, S_IRUGO, _show, NULL, 0)
+
+#define IIO_DEV_ATTR_OUT(_store) \
+ IIO_DEVICE_ATTR(out, S_IRUGO, NULL, _store, 0)
+
+static IIO_DEV_ATTR_IN(adis16080_read);
+static IIO_DEV_ATTR_OUT(adis16080_write);
+
+static IIO_CONST_ATTR(name, "adis16080");
+
+static struct attribute *adis16080_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group adis16080_event_attribute_group = {
+ .attrs = adis16080_event_attributes,
+};
+
+static struct attribute *adis16080_attributes[] = {
+ &iio_dev_attr_in.dev_attr.attr,
+ &iio_dev_attr_out.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16080_attribute_group = {
+ .attrs = adis16080_attributes,
+};
+
+static int __devinit adis16080_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct adis16080_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADIS16080_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADIS16080_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &adis16080_event_attribute_group;
+ st->indio_dev->attrs = &adis16080_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis16080_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = adis16080_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING,
+ "adis16080");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = adis16080_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ adis16080_st = st;
+ return 0;
+
+error_unregister_line:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ adis16080_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ adis16080_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int adis16080_remove(struct spi_device *spi)
+{
+ struct adis16080_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ flush_scheduled_work();
+
+ adis16080_remove_trigger(indio_dev);
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ adis16080_uninitialize_ring(indio_dev->ring);
+ adis16080_unconfigure_ring(indio_dev);
+ iio_device_unregister(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver adis16080_driver = {
+ .driver = {
+ .name = "adis16080",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16080_probe,
+ .remove = __devexit_p(adis16080_remove),
+};
+
+static __init int adis16080_init(void)
+{
+ return spi_register_driver(&adis16080_driver);
+}
+module_init(adis16080_init);
+
+static __exit void adis16080_exit(void)
+{
+ spi_unregister_driver(&adis16080_driver);
+}
+module_exit(adis16080_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16080/100 Yaw Rate Gyroscope with SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/gyro/adis16130.h b/drivers/staging/iio/gyro/adis16130.h
new file mode 100644
index 000000000000..ab80ef6a8961
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16130.h
@@ -0,0 +1,108 @@
+#ifndef SPI_ADIS16130_H_
+#define SPI_ADIS16130_H_
+
+#define ADIS16130_CON 0x0
+#define ADIS16130_CON_RD (1 << 6)
+#define ADIS16130_IOP 0x1
+#define ADIS16130_IOP_ALL_RDY (1 << 3) /* 1 = data-ready signal low when unread data on all channels; */
+#define ADIS16130_IOP_SYNC (1 << 0) /* 1 = synchronization enabled */
+#define ADIS16130_RATEDATA 0x8 /* Gyroscope output, rate of rotation */
+#define ADIS16130_TEMPDATA 0xA /* Temperature output */
+#define ADIS16130_RATECS 0x28 /* Gyroscope channel setup */
+#define ADIS16130_RATECS_EN (1 << 3) /* 1 = channel enable; */
+#define ADIS16130_TEMPCS 0x2A /* Temperature channel setup */
+#define ADIS16130_TEMPCS_EN (1 << 3)
+#define ADIS16130_RATECONV 0x30
+#define ADIS16130_TEMPCONV 0x32
+#define ADIS16130_MODE 0x38
+#define ADIS16130_MODE_24BIT (1 << 1) /* 1 = 24-bit resolution; */
+
+#define ADIS16130_MAX_TX 4
+#define ADIS16130_MAX_RX 4
+
+/**
+ * struct adis16130_state - device instance specific data
+ * @us: actual spi_device to write data
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct adis16130_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ u32 mode; /* 1: 24bits mode 0:16bits mode */
+ struct mutex buf_lock;
+};
+
+#if defined(CONFIG_IIO_RING_BUFFER) && defined(THIS_HAS_RING_BUFFER_SUPPORT)
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum adis16130_scan {
+ ADIS16130_SCAN_GYRO,
+ ADIS16130_SCAN_TEMP,
+};
+
+void adis16130_remove_trigger(struct iio_dev *indio_dev);
+int adis16130_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16130_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+
+int adis16130_configure_ring(struct iio_dev *indio_dev);
+void adis16130_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16130_initialize_ring(struct iio_ring_buffer *ring);
+void adis16130_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16130_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16130_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+adis16130_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int adis16130_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void adis16130_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16130_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+
+static inline void adis16130_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16130_H_ */
diff --git a/drivers/staging/iio/gyro/adis16130_core.c b/drivers/staging/iio/gyro/adis16130_core.c
new file mode 100644
index 000000000000..49ffc7b26e8a
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16130_core.c
@@ -0,0 +1,313 @@
+/*
+ * ADIS16130 Digital Output, High Precision Angular Rate Sensor driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "gyro.h"
+#include "../adc/adc.h"
+
+#include "adis16130.h"
+
+#define DRIVER_NAME "adis16130"
+
+struct adis16130_state *adis16130_st;
+
+int adis16130_spi_write(struct device *dev, u8 reg_addr,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16130_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = reg_addr;
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+int adis16130_spi_read(struct device *dev, u8 reg_addr,
+ u32 *val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16130_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+
+ st->tx[0] = ADIS16130_CON_RD | reg_addr;
+ if (st->mode)
+ ret = spi_read(st->us, st->rx, 4);
+ else
+ ret = spi_read(st->us, st->rx, 3);
+
+ if (ret == 0) {
+ if (st->mode)
+ *val = (st->rx[1] << 16) | (st->rx[2] << 8) | st->rx[3];
+ else
+ *val = (st->rx[1] << 8) | st->rx[2];
+ }
+
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static ssize_t adis16130_gyro_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ u32 val;
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16130_spi_read(dev, ADIS16130_RATEDATA, &val);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret == 0)
+ return sprintf(buf, "%d\n", val);
+ else
+ return ret;
+}
+
+static ssize_t adis16130_temp_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ u32 val;
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16130_spi_read(dev, ADIS16130_TEMPDATA, &val);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret == 0)
+ return sprintf(buf, "%d\n", val);
+ else
+ return ret;
+}
+
+static ssize_t adis16130_bitsmode_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16130_state *st = iio_dev_get_devdata(indio_dev);
+
+ return sprintf(buf, "%d\n", st->mode);
+}
+
+static ssize_t adis16130_bitsmode_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 16, &val);
+ if (ret)
+ goto error_ret;
+ ret = adis16130_spi_write(dev, ADIS16130_MODE, !!val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static IIO_DEV_ATTR_TEMP_RAW(adis16130_temp_read);
+
+static IIO_CONST_ATTR(name, "adis16130");
+
+static IIO_DEV_ATTR_GYRO(adis16130_gyro_read,
+ ADIS16130_RATEDATA);
+
+#define IIO_DEV_ATTR_BITS_MODE(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(bits_mode, _mode, _show, _store, _addr)
+
+static IIO_DEV_ATTR_BITS_MODE(S_IWUSR | S_IRUGO, adis16130_bitsmode_read, adis16130_bitsmode_write,
+ ADIS16130_MODE);
+
+static struct attribute *adis16130_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group adis16130_event_attribute_group = {
+ .attrs = adis16130_event_attributes,
+};
+
+static struct attribute *adis16130_attributes[] = {
+ &iio_dev_attr_temp_raw.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ &iio_dev_attr_gyro_raw.dev_attr.attr,
+ &iio_dev_attr_bits_mode.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16130_attribute_group = {
+ .attrs = adis16130_attributes,
+};
+
+static int __devinit adis16130_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct adis16130_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADIS16130_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADIS16130_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &adis16130_event_attribute_group;
+ st->indio_dev->attrs = &adis16130_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+ st->mode = 1;
+
+ ret = adis16130_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = adis16130_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING,
+ "adis16130");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = adis16130_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ adis16130_st = st;
+ return 0;
+
+error_unregister_line:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ adis16130_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ adis16130_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int adis16130_remove(struct spi_device *spi)
+{
+ struct adis16130_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ flush_scheduled_work();
+
+ adis16130_remove_trigger(indio_dev);
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ adis16130_uninitialize_ring(indio_dev->ring);
+ adis16130_unconfigure_ring(indio_dev);
+ iio_device_unregister(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver adis16130_driver = {
+ .driver = {
+ .name = "adis16130",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16130_probe,
+ .remove = __devexit_p(adis16130_remove),
+};
+
+static __init int adis16130_init(void)
+{
+ return spi_register_driver(&adis16130_driver);
+}
+module_init(adis16130_init);
+
+static __exit void adis16130_exit(void)
+{
+ spi_unregister_driver(&adis16130_driver);
+}
+module_exit(adis16130_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16130 High Precision Angular Rate Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/gyro/adis16251.h b/drivers/staging/iio/gyro/adis16251.h
new file mode 100644
index 000000000000..d23852cf78e8
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16251.h
@@ -0,0 +1,185 @@
+#ifndef SPI_ADIS16251_H_
+#define SPI_ADIS16251_H_
+
+#define ADIS16251_STARTUP_DELAY 220 /* ms */
+
+#define ADIS16251_READ_REG(a) a
+#define ADIS16251_WRITE_REG(a) ((a) | 0x80)
+
+#define ADIS16251_ENDURANCE 0x00 /* Flash memory write count */
+#define ADIS16251_SUPPLY_OUT 0x02 /* Power supply measurement */
+#define ADIS16251_GYRO_OUT 0x04 /* X-axis gyroscope output */
+#define ADIS16251_AUX_ADC 0x0A /* analog input channel measurement */
+#define ADIS16251_TEMP_OUT 0x0C /* internal temperature measurement */
+#define ADIS16251_ANGL_OUT 0x0E /* angle displacement */
+#define ADIS16251_GYRO_OFF 0x14 /* Calibration, offset/bias adjustment */
+#define ADIS16251_GYRO_SCALE 0x16 /* Calibration, scale adjustment */
+#define ADIS16251_ALM_MAG1 0x20 /* Alarm 1 magnitude/polarity setting */
+#define ADIS16251_ALM_MAG2 0x22 /* Alarm 2 magnitude/polarity setting */
+#define ADIS16251_ALM_SMPL1 0x24 /* Alarm 1 dynamic rate of change setting */
+#define ADIS16251_ALM_SMPL2 0x26 /* Alarm 2 dynamic rate of change setting */
+#define ADIS16251_ALM_CTRL 0x28 /* Alarm control */
+#define ADIS16251_AUX_DAC 0x30 /* Auxiliary DAC data */
+#define ADIS16251_GPIO_CTRL 0x32 /* Control, digital I/O line */
+#define ADIS16251_MSC_CTRL 0x34 /* Control, data ready, self-test settings */
+#define ADIS16251_SMPL_PRD 0x36 /* Control, internal sample rate */
+#define ADIS16251_SENS_AVG 0x38 /* Control, dynamic range, filtering */
+#define ADIS16251_SLP_CNT 0x3A /* Control, sleep mode initiation */
+#define ADIS16251_DIAG_STAT 0x3C /* Diagnostic, error flags */
+#define ADIS16251_GLOB_CMD 0x3E /* Control, global commands */
+
+#define ADIS16251_ERROR_ACTIVE (1<<14)
+#define ADIS16251_NEW_DATA (1<<14)
+
+/* MSC_CTRL */
+#define ADIS16251_MSC_CTRL_INT_SELF_TEST (1<<10) /* Internal self-test enable */
+#define ADIS16251_MSC_CTRL_NEG_SELF_TEST (1<<9)
+#define ADIS16251_MSC_CTRL_POS_SELF_TEST (1<<8)
+#define ADIS16251_MSC_CTRL_DATA_RDY_EN (1<<2)
+#define ADIS16251_MSC_CTRL_DATA_RDY_POL_HIGH (1<<1)
+#define ADIS16251_MSC_CTRL_DATA_RDY_DIO2 (1<<0)
+
+/* SMPL_PRD */
+#define ADIS16251_SMPL_PRD_TIME_BASE (1<<7) /* Time base (tB): 0 = 1.953 ms, 1 = 60.54 ms */
+#define ADIS16251_SMPL_PRD_DIV_MASK 0x7F
+
+/* SLP_CNT */
+#define ADIS16251_SLP_CNT_POWER_OFF 0x80
+
+/* DIAG_STAT */
+#define ADIS16251_DIAG_STAT_ALARM2 (1<<9)
+#define ADIS16251_DIAG_STAT_ALARM1 (1<<8)
+#define ADIS16251_DIAG_STAT_SELF_TEST (1<<5)
+#define ADIS16251_DIAG_STAT_OVERFLOW (1<<4)
+#define ADIS16251_DIAG_STAT_SPI_FAIL (1<<3)
+#define ADIS16251_DIAG_STAT_FLASH_UPT (1<<2)
+#define ADIS16251_DIAG_STAT_POWER_HIGH (1<<1)
+#define ADIS16251_DIAG_STAT_POWER_LOW (1<<0)
+
+#define ADIS16251_DIAG_STAT_ERR_MASK (ADIS16251_DIAG_STAT_ALARM2 | \
+ ADIS16251_DIAG_STAT_ALARM1 | \
+ ADIS16251_DIAG_STAT_SELF_TEST | \
+ ADIS16251_DIAG_STAT_OVERFLOW | \
+ ADIS16251_DIAG_STAT_SPI_FAIL | \
+ ADIS16251_DIAG_STAT_FLASH_UPT | \
+ ADIS16251_DIAG_STAT_POWER_HIGH | \
+ ADIS16251_DIAG_STAT_POWER_LOW)
+
+/* GLOB_CMD */
+#define ADIS16251_GLOB_CMD_SW_RESET (1<<7)
+#define ADIS16251_GLOB_CMD_FLASH_UPD (1<<3)
+#define ADIS16251_GLOB_CMD_DAC_LATCH (1<<2)
+#define ADIS16251_GLOB_CMD_FAC_CALIB (1<<1)
+#define ADIS16251_GLOB_CMD_AUTO_NULL (1<<0)
+
+#define ADIS16251_MAX_TX 24
+#define ADIS16251_MAX_RX 24
+
+#define ADIS16251_SPI_SLOW (u32)(300 * 1000)
+#define ADIS16251_SPI_BURST (u32)(1000 * 1000)
+#define ADIS16251_SPI_FAST (u32)(2000 * 1000)
+
+/**
+ * struct adis16251_state - device instance specific data
+ * @us: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct adis16251_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+
+int adis16251_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 val);
+
+int adis16251_spi_read_burst(struct device *dev, u8 *rx);
+
+int adis16251_spi_read_sequence(struct device *dev,
+ u8 *tx, u8 *rx, int num);
+
+int adis16251_set_irq(struct device *dev, bool enable);
+
+int adis16251_reset(struct device *dev);
+
+int adis16251_stop_device(struct device *dev);
+
+int adis16251_check_status(struct device *dev);
+
+#if defined(CONFIG_IIO_RING_BUFFER) && defined(THIS_HAS_RING_BUFFER_SUPPORT)
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum adis16251_scan {
+ ADIS16251_SCAN_SUPPLY,
+ ADIS16251_SCAN_GYRO,
+ ADIS16251_SCAN_TEMP,
+ ADIS16251_SCAN_ADC_0,
+};
+
+void adis16251_remove_trigger(struct iio_dev *indio_dev);
+int adis16251_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16251_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+
+int adis16251_configure_ring(struct iio_dev *indio_dev);
+void adis16251_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16251_initialize_ring(struct iio_ring_buffer *ring);
+void adis16251_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16251_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16251_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+adis16251_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int adis16251_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void adis16251_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16251_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+
+static inline void adis16251_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16251_H_ */
diff --git a/drivers/staging/iio/gyro/adis16251_core.c b/drivers/staging/iio/gyro/adis16251_core.c
new file mode 100644
index 000000000000..a0d400f7ee62
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16251_core.c
@@ -0,0 +1,777 @@
+/*
+ * ADIS16251 Programmable Digital Gyroscope Sensor Driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "gyro.h"
+#include "../adc/adc.h"
+
+#include "adis16251.h"
+
+#define DRIVER_NAME "adis16251"
+
+/* At the moment the spi framework doesn't allow global setting of cs_change.
+ * It's in the likely to be added comment at the top of spi.h.
+ * This means that use cannot be made of spi_write etc.
+ */
+
+/**
+ * adis16251_spi_write_reg_8() - write single byte to a register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be written
+ * @val: the value to write
+ **/
+int adis16251_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16251_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16251_WRITE_REG(reg_address);
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16251_spi_write_reg_16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: value to be written
+ **/
+static int adis16251_spi_write_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16251_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .tx_buf = st->tx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16251_WRITE_REG(lower_reg_address);
+ st->tx[1] = value & 0xFF;
+ st->tx[2] = ADIS16251_WRITE_REG(lower_reg_address + 1);
+ st->tx[3] = (value >> 8) & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16251_spi_read_reg_16() - read 2 bytes from a 16-bit register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: somewhere to pass back the value read
+ **/
+static int adis16251_spi_read_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16251_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16251_READ_REG(lower_reg_address);
+ st->tx[1] = 0;
+ st->tx[2] = 0;
+ st->tx[3] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
+ lower_reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[0] << 8) | st->rx[1];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+/**
+ * adis16251_spi_read_burst() - read all data registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read (min size is 24 bytes)
+ **/
+int adis16251_spi_read_burst(struct device *dev, u8 *rx)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16251_state *st = iio_dev_get_devdata(indio_dev);
+ u32 old_speed_hz = st->us->max_speed_hz;
+ int ret;
+
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 0,
+ }, {
+ .rx_buf = rx,
+ .bits_per_word = 8,
+ .len = 24,
+ .cs_change = 1,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16251_READ_REG(ADIS16251_GLOB_CMD);
+ st->tx[1] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+
+ st->us->max_speed_hz = min(ADIS16251_SPI_BURST, old_speed_hz);
+ spi_setup(st->us);
+
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ dev_err(&st->us->dev, "problem when burst reading");
+
+ st->us->max_speed_hz = old_speed_hz;
+ spi_setup(st->us);
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+/**
+ * adis16251_spi_read_sequence() - read a sequence of 16-bit registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @tx: register addresses in bytes 0,2,4,6... (min size is 2*num bytes)
+ * @rx: somewhere to pass back the value read (min size is 2*num bytes)
+ **/
+int adis16251_spi_read_sequence(struct device *dev,
+ u8 *tx, u8 *rx, int num)
+{
+ struct spi_message msg;
+ struct spi_transfer *xfers;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16251_state *st = iio_dev_get_devdata(indio_dev);
+ int ret, i;
+
+ xfers = kzalloc(num + 1, GFP_KERNEL);
+ if (xfers == NULL) {
+ dev_err(&st->us->dev, "memory alloc failed");
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ /* tx: |add1|addr2|addr3|...|addrN |zero|
+ * rx: |zero|res1 |res2 |...|resN-1|resN| */
+ spi_message_init(&msg);
+ for (i = 0; i < num + 1; i++) {
+ if (i > 0)
+ xfers[i].rx_buf = st->rx + 2*(i - 1);
+ if (i < num)
+ xfers[i].tx_buf = st->tx + 2*i;
+ xfers[i].bits_per_word = 8;
+ xfers[i].len = 2;
+ xfers[i].cs_change = 1;
+ spi_message_add_tail(&xfers[i], &msg);
+ }
+
+ mutex_lock(&st->buf_lock);
+
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ dev_err(&st->us->dev, "problem when reading sequence");
+
+ mutex_unlock(&st->buf_lock);
+ kfree(xfers);
+
+error_ret:
+ return ret;
+}
+
+static ssize_t adis16251_spi_read_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf,
+ unsigned bits)
+{
+ int ret;
+ s16 val = 0;
+ unsigned shift = 16 - bits;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16251_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16251_ERROR_ACTIVE)
+ adis16251_check_status(dev);
+ val = ((s16)(val << shift) >> shift);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t adis16251_read_12bit_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16251_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16251_ERROR_ACTIVE)
+ adis16251_check_status(dev);
+
+ return sprintf(buf, "%u\n", val & 0x0FFF);
+}
+
+static ssize_t adis16251_read_14bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16251_spi_read_signed(dev, attr, buf, 14);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16251_read_12bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16251_spi_read_signed(dev, attr, buf, 12);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16251_write_16bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = adis16251_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t adis16251_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret, len = 0;
+ u16 t;
+ int sps;
+ ret = adis16251_spi_read_reg_16(dev,
+ ADIS16251_SMPL_PRD,
+ &t);
+ if (ret)
+ return ret;
+ sps = (t & ADIS16251_SMPL_PRD_TIME_BASE) ? 8 : 256;
+ sps /= (t & ADIS16251_SMPL_PRD_DIV_MASK) + 1;
+ len = sprintf(buf, "%d SPS\n", sps);
+ return len;
+}
+
+static ssize_t adis16251_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16251_state *st = iio_dev_get_devdata(indio_dev);
+ long val;
+ int ret;
+ u8 t;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ t = (256 / val);
+ if (t > 0)
+ t--;
+ t &= ADIS16251_SMPL_PRD_DIV_MASK;
+ if ((t & ADIS16251_SMPL_PRD_DIV_MASK) >= 0x0A)
+ st->us->max_speed_hz = ADIS16251_SPI_SLOW;
+ else
+ st->us->max_speed_hz = ADIS16251_SPI_FAST;
+
+ ret = adis16251_spi_write_reg_8(dev,
+ ADIS16251_SMPL_PRD,
+ t);
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t adis16251_write_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -1;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return adis16251_reset(dev);
+ }
+ return -1;
+}
+
+
+
+int adis16251_set_irq(struct device *dev, bool enable)
+{
+ int ret;
+ u16 msc;
+ ret = adis16251_spi_read_reg_16(dev, ADIS16251_MSC_CTRL, &msc);
+ if (ret)
+ goto error_ret;
+
+ msc |= ADIS16251_MSC_CTRL_DATA_RDY_POL_HIGH;
+ if (enable)
+ msc |= ADIS16251_MSC_CTRL_DATA_RDY_EN;
+ else
+ msc &= ~ADIS16251_MSC_CTRL_DATA_RDY_EN;
+
+ ret = adis16251_spi_write_reg_16(dev, ADIS16251_MSC_CTRL, msc);
+ if (ret)
+ goto error_ret;
+
+error_ret:
+ return ret;
+}
+
+int adis16251_reset(struct device *dev)
+{
+ int ret;
+ ret = adis16251_spi_write_reg_8(dev,
+ ADIS16251_GLOB_CMD,
+ ADIS16251_GLOB_CMD_SW_RESET);
+ if (ret)
+ dev_err(dev, "problem resetting device");
+
+ return ret;
+}
+
+/* Power down the device */
+int adis16251_stop_device(struct device *dev)
+{
+ int ret;
+ u16 val = ADIS16251_SLP_CNT_POWER_OFF;
+
+ ret = adis16251_spi_write_reg_16(dev, ADIS16251_SLP_CNT, val);
+ if (ret)
+ dev_err(dev, "problem with turning device off: SLP_CNT");
+
+ return ret;
+}
+
+static int adis16251_self_test(struct device *dev)
+{
+ int ret;
+
+ ret = adis16251_spi_write_reg_16(dev,
+ ADIS16251_MSC_CTRL,
+ ADIS16251_MSC_CTRL_INT_SELF_TEST);
+ if (ret) {
+ dev_err(dev, "problem starting self test");
+ goto err_ret;
+ }
+
+ adis16251_check_status(dev);
+
+err_ret:
+ return ret;
+}
+
+int adis16251_check_status(struct device *dev)
+{
+ u16 status;
+ int ret;
+
+ ret = adis16251_spi_read_reg_16(dev, ADIS16251_DIAG_STAT, &status);
+
+ if (ret < 0) {
+ dev_err(dev, "Reading status failed\n");
+ goto error_ret;
+ }
+
+ if (!(status & ADIS16251_DIAG_STAT_ERR_MASK)) {
+ ret = 0;
+ goto error_ret;
+ }
+
+ ret = -EFAULT;
+
+ if (status & ADIS16251_DIAG_STAT_ALARM2)
+ dev_err(dev, "Alarm 2 active\n");
+ if (status & ADIS16251_DIAG_STAT_ALARM1)
+ dev_err(dev, "Alarm 1 active\n");
+ if (status & ADIS16251_DIAG_STAT_SELF_TEST)
+ dev_err(dev, "Self test error\n");
+ if (status & ADIS16251_DIAG_STAT_OVERFLOW)
+ dev_err(dev, "Sensor overrange\n");
+ if (status & ADIS16251_DIAG_STAT_SPI_FAIL)
+ dev_err(dev, "SPI failure\n");
+ if (status & ADIS16251_DIAG_STAT_FLASH_UPT)
+ dev_err(dev, "Flash update failed\n");
+ if (status & ADIS16251_DIAG_STAT_POWER_HIGH)
+ dev_err(dev, "Power supply above 5.25V\n");
+ if (status & ADIS16251_DIAG_STAT_POWER_LOW)
+ dev_err(dev, "Power supply below 4.75V\n");
+
+error_ret:
+ return ret;
+}
+
+static int adis16251_initial_setup(struct adis16251_state *st)
+{
+ int ret;
+ u16 smp_prd;
+ struct device *dev = &st->indio_dev->dev;
+
+ /* use low spi speed for init */
+ st->us->max_speed_hz = ADIS16251_SPI_SLOW;
+ st->us->mode = SPI_MODE_3;
+ spi_setup(st->us);
+
+ /* Disable IRQ */
+ ret = adis16251_set_irq(dev, false);
+ if (ret) {
+ dev_err(dev, "disable irq failed");
+ goto err_ret;
+ }
+
+ /* Do self test */
+
+ /* Read status register to check the result */
+ ret = adis16251_check_status(dev);
+ if (ret) {
+ adis16251_reset(dev);
+ dev_err(dev, "device not playing ball -> reset");
+ msleep(ADIS16251_STARTUP_DELAY);
+ ret = adis16251_check_status(dev);
+ if (ret) {
+ dev_err(dev, "giving up");
+ goto err_ret;
+ }
+ }
+
+ printk(KERN_INFO DRIVER_NAME ": at CS%d (irq %d)\n",
+ st->us->chip_select, st->us->irq);
+
+ /* use high spi speed if possible */
+ ret = adis16251_spi_read_reg_16(dev, ADIS16251_SMPL_PRD, &smp_prd);
+ if (!ret && (smp_prd & ADIS16251_SMPL_PRD_DIV_MASK) < 0x0A) {
+ st->us->max_speed_hz = ADIS16251_SPI_SLOW;
+ spi_setup(st->us);
+ }
+
+err_ret:
+ return ret;
+}
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(0, supply, adis16251_read_12bit_signed,
+ ADIS16251_SUPPLY_OUT);
+static IIO_CONST_ATTR(in0_supply_scale, "0.0018315");
+
+static IIO_DEV_ATTR_GYRO(adis16251_read_14bit_signed,
+ ADIS16251_GYRO_OUT);
+static IIO_DEV_ATTR_GYRO_SCALE(S_IWUSR | S_IRUGO,
+ adis16251_read_12bit_signed,
+ adis16251_write_16bit,
+ ADIS16251_GYRO_SCALE);
+static IIO_DEV_ATTR_GYRO_OFFSET(S_IWUSR | S_IRUGO,
+ adis16251_read_12bit_signed,
+ adis16251_write_16bit,
+ ADIS16251_GYRO_OFF);
+
+static IIO_DEV_ATTR_TEMP_RAW(adis16251_read_12bit_signed);
+static IIO_CONST_ATTR(temp_offset, "25 K");
+static IIO_CONST_ATTR(temp_scale, "0.1453 K");
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(1, aux, adis16251_read_12bit_unsigned,
+ ADIS16251_AUX_ADC);
+static IIO_CONST_ATTR(in1_aux_scale, "0.0006105");
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ adis16251_read_frequency,
+ adis16251_write_frequency);
+static IIO_DEV_ATTR_ANGL(adis16251_read_14bit_signed,
+ ADIS16251_ANGL_OUT);
+
+static IIO_DEV_ATTR_RESET(adis16251_write_reset);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("0.129 ~ 256");
+
+static IIO_CONST_ATTR(name, "adis16251");
+
+static struct attribute *adis16251_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group adis16251_event_attribute_group = {
+ .attrs = adis16251_event_attributes,
+};
+
+static struct attribute *adis16251_attributes[] = {
+ &iio_dev_attr_in0_supply_raw.dev_attr.attr,
+ &iio_const_attr_in0_supply_scale.dev_attr.attr,
+ &iio_dev_attr_gyro_raw.dev_attr.attr,
+ &iio_dev_attr_gyro_scale.dev_attr.attr,
+ &iio_dev_attr_gyro_offset.dev_attr.attr,
+ &iio_dev_attr_angl_raw.dev_attr.attr,
+ &iio_dev_attr_temp_raw.dev_attr.attr,
+ &iio_const_attr_temp_offset.dev_attr.attr,
+ &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_in1_aux_raw.dev_attr.attr,
+ &iio_const_attr_in1_aux_scale.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16251_attribute_group = {
+ .attrs = adis16251_attributes,
+};
+
+static int __devinit adis16251_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct adis16251_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADIS16251_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADIS16251_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &adis16251_event_attribute_group;
+ st->indio_dev->attrs = &adis16251_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis16251_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = adis16251_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING,
+ "adis16251");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = adis16251_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ /* Get the device into a sane initial state */
+ ret = adis16251_initial_setup(st);
+ if (ret)
+ goto error_remove_trigger;
+ return 0;
+
+error_remove_trigger:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ adis16251_remove_trigger(st->indio_dev);
+error_unregister_line:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ adis16251_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ adis16251_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int adis16251_remove(struct spi_device *spi)
+{
+ int ret;
+ struct adis16251_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ ret = adis16251_stop_device(&(indio_dev->dev));
+ if (ret)
+ goto err_ret;
+
+ flush_scheduled_work();
+
+ adis16251_remove_trigger(indio_dev);
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ adis16251_uninitialize_ring(indio_dev->ring);
+ adis16251_unconfigure_ring(indio_dev);
+ iio_device_unregister(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+
+err_ret:
+ return ret;
+}
+
+static struct spi_driver adis16251_driver = {
+ .driver = {
+ .name = "adis16251",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16251_probe,
+ .remove = __devexit_p(adis16251_remove),
+};
+
+static __init int adis16251_init(void)
+{
+ return spi_register_driver(&adis16251_driver);
+}
+module_init(adis16251_init);
+
+static __exit void adis16251_exit(void)
+{
+ spi_unregister_driver(&adis16251_driver);
+}
+module_exit(adis16251_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16251 Digital Gyroscope Sensor SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/gyro/adis16260.h b/drivers/staging/iio/gyro/adis16260.h
index 812440af57d6..c1fd4364287f 100644
--- a/drivers/staging/iio/gyro/adis16260.h
+++ b/drivers/staging/iio/gyro/adis16260.h
@@ -1,5 +1,6 @@
#ifndef SPI_ADIS16260_H_
#define SPI_ADIS16260_H_
+#include "adis16260_platform_data.h"
#define ADIS16260_STARTUP_DELAY 220 /* ms */
@@ -92,6 +93,7 @@
* @tx: transmit buffer
* @rx: recieve buffer
* @buf_lock: mutex to protect tx and rx
+ * @negate: negate the scale parameter
**/
struct adis16260_state {
struct spi_device *us;
@@ -102,6 +104,7 @@ struct adis16260_state {
u8 *tx;
u8 *rx;
struct mutex buf_lock;
+ unsigned negate:1;
};
int adis16260_set_irq(struct device *dev, bool enable);
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c
index 7d7716e5857c..045e27da980a 100644
--- a/drivers/staging/iio/gyro/adis16260_core.c
+++ b/drivers/staging/iio/gyro/adis16260_core.c
@@ -1,5 +1,5 @@
/*
- * ADIS16260 Programmable Digital Gyroscope Sensor Driver
+ * ADIS16260/ADIS16265 Programmable Digital Gyroscope Sensor Driver
*
* Copyright 2010 Analog Devices Inc.
*
@@ -134,8 +134,6 @@ static int adis16260_spi_read_reg_16(struct device *dev,
mutex_lock(&st->buf_lock);
st->tx[0] = ADIS16260_READ_REG(lower_reg_address);
st->tx[1] = 0;
- st->tx[2] = 0;
- st->tx[3] = 0;
spi_message_init(&msg);
spi_message_add_tail(&xfers[0], &msg);
@@ -293,6 +291,22 @@ static ssize_t adis16260_write_frequency(struct device *dev,
return ret ? ret : len;
}
+static ssize_t adis16260_read_gyro_scale(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+ ssize_t ret = 0;
+
+ if (st->negate)
+ ret = sprintf(buf, "-");
+ /* Take the iio_dev status lock */
+ ret += sprintf(buf + ret, "%s\n", "0.00127862821");
+
+ return ret;
+}
+
static int adis16260_reset(struct device *dev)
{
int ret;
@@ -447,18 +461,6 @@ static IIO_DEV_ATTR_IN_NAMED_RAW(0, supply,
ADIS16260_SUPPLY_OUT);
static IIO_CONST_ATTR_IN_NAMED_SCALE(0, supply, "0.0018315");
-static IIO_DEV_ATTR_GYRO(adis16260_read_14bit_signed,
- ADIS16260_GYRO_OUT);
-static IIO_CONST_ATTR_GYRO_SCALE("0.00127862821");
-static IIO_DEV_ATTR_GYRO_CALIBSCALE(S_IWUSR | S_IRUGO,
- adis16260_read_14bit_signed,
- adis16260_write_16bit,
- ADIS16260_GYRO_SCALE);
-static IIO_DEV_ATTR_GYRO_CALIBBIAS(S_IWUSR | S_IRUGO,
- adis16260_read_12bit_signed,
- adis16260_write_16bit,
- ADIS16260_GYRO_OFF);
-
static IIO_DEV_ATTR_TEMP_RAW(adis16260_read_12bit_unsigned);
static IIO_CONST_ATTR_TEMP_OFFSET("25");
static IIO_CONST_ATTR_TEMP_SCALE("0.1453");
@@ -470,8 +472,6 @@ static IIO_CONST_ATTR(in1_scale, "0.0006105");
static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
adis16260_read_frequency,
adis16260_write_frequency);
-static IIO_DEV_ATTR_ANGL(adis16260_read_14bit_signed,
- ADIS16260_ANGL_OUT);
static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16260_write_reset, 0);
@@ -487,38 +487,70 @@ static struct attribute_group adis16260_event_attribute_group = {
.attrs = adis16260_event_attributes,
};
-static struct attribute *adis16260_attributes[] = {
- &iio_dev_attr_in0_supply_raw.dev_attr.attr,
- &iio_const_attr_in0_supply_scale.dev_attr.attr,
- &iio_dev_attr_gyro_raw.dev_attr.attr,
- &iio_const_attr_gyro_scale.dev_attr.attr,
- &iio_dev_attr_gyro_calibscale.dev_attr.attr,
- &iio_dev_attr_gyro_calibbias.dev_attr.attr,
- &iio_dev_attr_angl_raw.dev_attr.attr,
- &iio_dev_attr_temp_raw.dev_attr.attr,
- &iio_const_attr_temp_offset.dev_attr.attr,
- &iio_const_attr_temp_scale.dev_attr.attr,
- &iio_dev_attr_in1_raw.dev_attr.attr,
- &iio_const_attr_in1_scale.dev_attr.attr,
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_reset.dev_attr.attr,
- &iio_const_attr_name.dev_attr.attr,
- NULL
-};
+#define ADIS16260_GYRO_ATTR_SET(axis) \
+ IIO_DEV_ATTR_GYRO##axis(adis16260_read_14bit_signed, \
+ ADIS16260_GYRO_OUT); \
+ static IIO_DEV_ATTR_GYRO##axis##_SCALE(S_IRUGO, \
+ adis16260_read_gyro_scale, \
+ NULL, \
+ 0); \
+ static IIO_DEV_ATTR_GYRO##axis##_CALIBSCALE(S_IRUGO | S_IWUSR, \
+ adis16260_read_12bit_unsigned, \
+ adis16260_write_16bit, \
+ ADIS16260_GYRO_SCALE); \
+ static IIO_DEV_ATTR_GYRO##axis##_CALIBBIAS(S_IWUSR | S_IRUGO, \
+ adis16260_read_12bit_signed, \
+ adis16260_write_16bit, \
+ ADIS16260_GYRO_OFF); \
+ static IIO_DEV_ATTR_ANGL##axis(adis16260_read_14bit_signed, \
+ ADIS16260_ANGL_OUT);
+
+static ADIS16260_GYRO_ATTR_SET();
+static ADIS16260_GYRO_ATTR_SET(_X);
+static ADIS16260_GYRO_ATTR_SET(_Y);
+static ADIS16260_GYRO_ATTR_SET(_Z);
+
+#define ADIS16260_ATTR_GROUP(axis) \
+ struct attribute *adis16260_attributes##axis[] = { \
+ &iio_dev_attr_in0_supply_raw.dev_attr.attr, \
+ &iio_const_attr_in0_supply_scale.dev_attr.attr, \
+ &iio_dev_attr_gyro##axis##_raw.dev_attr.attr, \
+ &iio_dev_attr_gyro##axis##_scale.dev_attr.attr, \
+ &iio_dev_attr_gyro##axis##_calibscale.dev_attr.attr, \
+ &iio_dev_attr_gyro##axis##_calibbias.dev_attr.attr, \
+ &iio_dev_attr_angl##axis##_raw.dev_attr.attr, \
+ &iio_dev_attr_temp_raw.dev_attr.attr, \
+ &iio_const_attr_temp_offset.dev_attr.attr, \
+ &iio_const_attr_temp_scale.dev_attr.attr, \
+ &iio_dev_attr_in1_raw.dev_attr.attr, \
+ &iio_const_attr_in1_scale.dev_attr.attr, \
+ &iio_dev_attr_sampling_frequency.dev_attr.attr, \
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr, \
+ &iio_dev_attr_reset.dev_attr.attr, \
+ &iio_const_attr_name.dev_attr.attr, \
+ NULL \
+ }; \
+ static const struct attribute_group adis16260_attribute_group##axis \
+ = { \
+ .attrs = adis16260_attributes##axis, \
+ };
-static const struct attribute_group adis16260_attribute_group = {
- .attrs = adis16260_attributes,
-};
+static ADIS16260_ATTR_GROUP();
+static ADIS16260_ATTR_GROUP(_x);
+static ADIS16260_ATTR_GROUP(_y);
+static ADIS16260_ATTR_GROUP(_z);
static int __devinit adis16260_probe(struct spi_device *spi)
{
int ret, regdone = 0;
+ struct adis16260_platform_data *pd = spi->dev.platform_data;
struct adis16260_state *st = kzalloc(sizeof *st, GFP_KERNEL);
if (!st) {
ret = -ENOMEM;
goto error_ret;
}
+ if (pd)
+ st->negate = pd->negate;
/* this is only used for removal purposes */
spi_set_drvdata(spi, st);
@@ -545,7 +577,24 @@ static int __devinit adis16260_probe(struct spi_device *spi)
st->indio_dev->dev.parent = &spi->dev;
st->indio_dev->num_interrupt_lines = 1;
st->indio_dev->event_attrs = &adis16260_event_attribute_group;
- st->indio_dev->attrs = &adis16260_attribute_group;
+ if (pd && pd->direction)
+ switch (pd->direction) {
+ case 'x':
+ st->indio_dev->attrs = &adis16260_attribute_group_x;
+ break;
+ case 'y':
+ st->indio_dev->attrs = &adis16260_attribute_group_y;
+ break;
+ case 'z':
+ st->indio_dev->attrs = &adis16260_attribute_group_z;
+ break;
+ default:
+ st->indio_dev->attrs = &adis16260_attribute_group;
+ break;
+ }
+ else
+ st->indio_dev->attrs = &adis16260_attribute_group;
+
st->indio_dev->dev_data = (void *)(st);
st->indio_dev->driver_module = THIS_MODULE;
st->indio_dev->modes = INDIO_DIRECT_MODE;
@@ -635,6 +684,18 @@ err_ret:
return ret;
}
+/*
+ * These parts do not need to be differentiated until someone adds
+ * support for the on chip filtering.
+ */
+static const struct spi_device_id adis16260_id[] = {
+ {"adis16260", 0},
+ {"adis16265", 0},
+ {"adis16250", 0},
+ {"adis16255", 0},
+ {}
+};
+
static struct spi_driver adis16260_driver = {
.driver = {
.name = "adis16260",
@@ -642,6 +703,7 @@ static struct spi_driver adis16260_driver = {
},
.probe = adis16260_probe,
.remove = __devexit_p(adis16260_remove),
+ .id_table = adis16260_id,
};
static __init int adis16260_init(void)
diff --git a/drivers/staging/iio/gyro/adis16260_platform_data.h b/drivers/staging/iio/gyro/adis16260_platform_data.h
new file mode 100644
index 000000000000..12802e97be92
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16260_platform_data.h
@@ -0,0 +1,19 @@
+/*
+ * ADIS16260 Programmable Digital Gyroscope Sensor Driver Platform Data
+ *
+ * Based on adis16255.h Matthia Brugger <m_brugger&web.de>
+ *
+ * Copyright (C) 2010 Fraunhofer Institute for Integrated Circuits
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+/**
+ * struct adis16260_platform_data - instance specific data
+ * @direction: x y or z
+ * @negate: flag to indicate value should be inverted.
+ **/
+struct adis16260_platform_data {
+ char direction;
+ unsigned negate:1;
+};
diff --git a/drivers/staging/iio/gyro/gyro.h b/drivers/staging/iio/gyro/gyro.h
index 98b837b775a2..b4ea5bf161ff 100644
--- a/drivers/staging/iio/gyro/gyro.h
+++ b/drivers/staging/iio/gyro/gyro.h
@@ -71,3 +71,12 @@
#define IIO_DEV_ATTR_ANGL(_show, _addr) \
IIO_DEVICE_ATTR(angl_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ANGL_X(_show, _addr) \
+ IIO_DEVICE_ATTR(angl_x_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ANGL_Y(_show, _addr) \
+ IIO_DEVICE_ATTR(angl_y_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ANGL_Z(_show, _addr) \
+ IIO_DEVICE_ATTR(angl_z_raw, S_IRUGO, _show, NULL, _addr)
diff --git a/drivers/staging/iio/imu/adis16350_core.c b/drivers/staging/iio/imu/adis16350_core.c
index 97c1ec8594ce..cf7176bc766b 100644
--- a/drivers/staging/iio/imu/adis16350_core.c
+++ b/drivers/staging/iio/imu/adis16350_core.c
@@ -570,6 +570,7 @@ static struct attribute *adis16350_attributes[] = {
&iio_dev_attr_temp_y_raw.dev_attr.attr,
&iio_dev_attr_temp_z_raw.dev_attr.attr,
&iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_const_attr_temp_offset.dev_attr.attr,
&iio_dev_attr_in1_raw.dev_attr.attr,
&iio_const_attr_in1_scale.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
diff --git a/drivers/staging/iio/meter/Kconfig b/drivers/staging/iio/meter/Kconfig
new file mode 100644
index 000000000000..12e36e460693
--- /dev/null
+++ b/drivers/staging/iio/meter/Kconfig
@@ -0,0 +1,61 @@
+#
+# IIO meter drivers configuration
+#
+comment "Active energy metering IC"
+
+config ADE7753
+ tristate "Analog Devices ADE7753/6 Single-Phase Multifunction Metering IC Driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices ADE7753 Single-Phase Multifunction
+ Metering IC with di/dt Sensor Interface.
+
+config ADE7754
+ tristate "Analog Devices ADE7754 Polyphase Multifunction Energy Metering IC Driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices ADE7754 Polyphase
+ Multifunction Energy Metering IC Driver.
+
+config ADE7758
+ tristate "Analog Devices ADE7758 Poly Phase Multifunction Energy Metering IC Driver"
+ depends on SPI
+ select IIO_TRIGGER if IIO_RING_BUFFER
+ select IIO_SW_RING if IIO_RING_BUFFER
+ help
+ Say yes here to build support for Analog Devices ADE7758 Polyphase
+ Multifunction Energy Metering IC with Per Phase Information Driver.
+
+config ADE7759
+ tristate "Analog Devices ADE7759 Active Energy Metering IC Driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices ADE7758 Active Energy
+ Metering IC with di/dt Sensor Interface.
+
+config ADE7854
+ tristate "Analog Devices ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver"
+ depends on SPI || I2C
+ help
+ Say yes here to build support for Analog Devices ADE7854/58/68/78 Polyphase
+ Multifunction Energy Metering IC Driver.
+
+config ADE7854_I2C
+ tristate "support I2C bus connection"
+ depends on ADE7854 && I2C
+ default y
+ help
+ Say Y here if you have ADE7854/58/68/78 hooked to an I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ade7854-i2c.
+
+config ADE7854_SPI
+ tristate "support SPI bus connection"
+ depends on ADE7854 && SPI
+ default y
+ help
+ Say Y here if you have ADE7854/58/68/78 hooked to a SPI bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ade7854-spi.
diff --git a/drivers/staging/iio/meter/Makefile b/drivers/staging/iio/meter/Makefile
new file mode 100644
index 000000000000..0cc7d5140dfe
--- /dev/null
+++ b/drivers/staging/iio/meter/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for metering ic drivers
+#
+
+obj-$(CONFIG_ADE7753) += ade7753.o
+obj-$(CONFIG_ADE7754) += ade7754.o
+
+ade7758-y := ade7758_core.o
+ade7758-$(CONFIG_IIO_RING_BUFFER) += ade7758_ring.o ade7758_trigger.o
+obj-$(CONFIG_ADE7758) += ade7758.o
+
+obj-$(CONFIG_ADE7759) += ade7759.o
+obj-$(CONFIG_ADE7854) += ade7854.o
+obj-$(CONFIG_ADE7854_I2C) += ade7854-i2c.o
+obj-$(CONFIG_ADE7854_SPI) += ade7854-spi.o
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
new file mode 100644
index 000000000000..e72afbd2b841
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -0,0 +1,730 @@
+/*
+ * ADE7753 Single-Phase Multifunction Metering IC with di/dt Sensor Interface Driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "meter.h"
+#include "ade7753.h"
+
+int ade7753_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7753_WRITE_REG(reg_address);
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7753_spi_write_reg_16(struct device *dev,
+ u8 reg_address,
+ u16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 3,
+ }
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7753_WRITE_REG(reg_address);
+ st->tx[1] = (value >> 8) & 0xFF;
+ st->tx[2] = value & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7753_spi_read_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7753_READ_REG(reg_address);
+ st->tx[1] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
+ reg_address);
+ goto error_ret;
+ }
+ *val = st->rx[1];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int ade7753_spi_read_reg_16(struct device *dev,
+ u8 reg_address,
+ u16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 3,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7753_READ_REG(reg_address);
+ st->tx[1] = 0;
+ st->tx[2] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
+ reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[1] << 8) | st->rx[2];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int ade7753_spi_read_reg_24(struct device *dev,
+ u8 reg_address,
+ u32 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 4,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7753_READ_REG(reg_address);
+ st->tx[1] = 0;
+ st->tx[2] = 0;
+ st->tx[3] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
+ reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[1] << 16) | (st->rx[2] << 8) | st->rx[3];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static ssize_t ade7753_read_8bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u8 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = ade7753_spi_read_reg_8(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7753_read_16bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = ade7753_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7753_read_24bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u32 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = ade7753_spi_read_reg_24(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", val & 0xFFFFFF);
+}
+
+static ssize_t ade7753_write_8bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = ade7753_spi_write_reg_8(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t ade7753_write_16bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = ade7753_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static int ade7753_reset(struct device *dev)
+{
+ int ret;
+ u16 val;
+ ade7753_spi_read_reg_16(dev,
+ ADE7753_MODE,
+ &val);
+ val |= 1 << 6; /* Software Chip Reset */
+ ret = ade7753_spi_write_reg_16(dev,
+ ADE7753_MODE,
+ val);
+
+ return ret;
+}
+
+static ssize_t ade7753_write_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -1;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return ade7753_reset(dev);
+ }
+ return -1;
+}
+
+static IIO_DEV_ATTR_AENERGY(ade7753_read_24bit, ADE7753_AENERGY);
+static IIO_DEV_ATTR_LAENERGY(ade7753_read_24bit, ADE7753_LAENERGY);
+static IIO_DEV_ATTR_VAENERGY(ade7753_read_24bit, ADE7753_VAENERGY);
+static IIO_DEV_ATTR_LVAENERGY(ade7753_read_24bit, ADE7753_LVAENERGY);
+static IIO_DEV_ATTR_CFDEN(S_IWUSR | S_IRUGO,
+ ade7753_read_16bit,
+ ade7753_write_16bit,
+ ADE7753_CFDEN);
+static IIO_DEV_ATTR_CFNUM(S_IWUSR | S_IRUGO,
+ ade7753_read_8bit,
+ ade7753_write_8bit,
+ ADE7753_CFNUM);
+static IIO_DEV_ATTR_CHKSUM(ade7753_read_8bit, ADE7753_CHKSUM);
+static IIO_DEV_ATTR_PHCAL(S_IWUSR | S_IRUGO,
+ ade7753_read_16bit,
+ ade7753_write_16bit,
+ ADE7753_PHCAL);
+static IIO_DEV_ATTR_APOS(S_IWUSR | S_IRUGO,
+ ade7753_read_16bit,
+ ade7753_write_16bit,
+ ADE7753_APOS);
+static IIO_DEV_ATTR_SAGCYC(S_IWUSR | S_IRUGO,
+ ade7753_read_8bit,
+ ade7753_write_8bit,
+ ADE7753_SAGCYC);
+static IIO_DEV_ATTR_SAGLVL(S_IWUSR | S_IRUGO,
+ ade7753_read_8bit,
+ ade7753_write_8bit,
+ ADE7753_SAGLVL);
+static IIO_DEV_ATTR_LINECYC(S_IWUSR | S_IRUGO,
+ ade7753_read_8bit,
+ ade7753_write_8bit,
+ ADE7753_LINECYC);
+static IIO_DEV_ATTR_WDIV(S_IWUSR | S_IRUGO,
+ ade7753_read_8bit,
+ ade7753_write_8bit,
+ ADE7753_WDIV);
+static IIO_DEV_ATTR_IRMS(S_IWUSR | S_IRUGO,
+ ade7753_read_24bit,
+ NULL,
+ ADE7753_IRMS);
+static IIO_DEV_ATTR_VRMS(S_IRUGO,
+ ade7753_read_24bit,
+ NULL,
+ ADE7753_VRMS);
+static IIO_DEV_ATTR_IRMSOS(S_IWUSR | S_IRUGO,
+ ade7753_read_16bit,
+ ade7753_write_16bit,
+ ADE7753_IRMSOS);
+static IIO_DEV_ATTR_VRMSOS(S_IWUSR | S_IRUGO,
+ ade7753_read_16bit,
+ ade7753_write_16bit,
+ ADE7753_VRMSOS);
+static IIO_DEV_ATTR_WGAIN(S_IWUSR | S_IRUGO,
+ ade7753_read_16bit,
+ ade7753_write_16bit,
+ ADE7753_WGAIN);
+static IIO_DEV_ATTR_VAGAIN(S_IWUSR | S_IRUGO,
+ ade7753_read_16bit,
+ ade7753_write_16bit,
+ ADE7753_VAGAIN);
+static IIO_DEV_ATTR_PGA_GAIN(S_IWUSR | S_IRUGO,
+ ade7753_read_16bit,
+ ade7753_write_16bit,
+ ADE7753_GAIN);
+static IIO_DEV_ATTR_IPKLVL(S_IWUSR | S_IRUGO,
+ ade7753_read_8bit,
+ ade7753_write_8bit,
+ ADE7753_IPKLVL);
+static IIO_DEV_ATTR_VPKLVL(S_IWUSR | S_IRUGO,
+ ade7753_read_8bit,
+ ade7753_write_8bit,
+ ADE7753_VPKLVL);
+static IIO_DEV_ATTR_IPEAK(S_IRUGO,
+ ade7753_read_24bit,
+ NULL,
+ ADE7753_IPEAK);
+static IIO_DEV_ATTR_VPEAK(S_IRUGO,
+ ade7753_read_24bit,
+ NULL,
+ ADE7753_VPEAK);
+static IIO_DEV_ATTR_VPERIOD(S_IRUGO,
+ ade7753_read_16bit,
+ NULL,
+ ADE7753_PERIOD);
+static IIO_DEV_ATTR_CH_OFF(1, S_IWUSR | S_IRUGO,
+ ade7753_read_8bit,
+ ade7753_write_8bit,
+ ADE7753_CH1OS);
+static IIO_DEV_ATTR_CH_OFF(2, S_IWUSR | S_IRUGO,
+ ade7753_read_8bit,
+ ade7753_write_8bit,
+ ADE7753_CH2OS);
+
+static int ade7753_set_irq(struct device *dev, bool enable)
+{
+ int ret;
+ u8 irqen;
+ ret = ade7753_spi_read_reg_8(dev, ADE7753_IRQEN, &irqen);
+ if (ret)
+ goto error_ret;
+
+ if (enable)
+ irqen |= 1 << 3; /* Enables an interrupt when a data is
+ present in the waveform register */
+ else
+ irqen &= ~(1 << 3);
+
+ ret = ade7753_spi_write_reg_8(dev, ADE7753_IRQEN, irqen);
+ if (ret)
+ goto error_ret;
+
+error_ret:
+ return ret;
+}
+
+/* Power down the device */
+int ade7753_stop_device(struct device *dev)
+{
+ int ret;
+ u16 val;
+ ade7753_spi_read_reg_16(dev,
+ ADE7753_MODE,
+ &val);
+ val |= 1 << 4; /* AD converters can be turned off */
+ ret = ade7753_spi_write_reg_16(dev,
+ ADE7753_MODE,
+ val);
+
+ return ret;
+}
+
+static int ade7753_initial_setup(struct ade7753_state *st)
+{
+ int ret;
+ struct device *dev = &st->indio_dev->dev;
+
+ /* use low spi speed for init */
+ st->us->mode = SPI_MODE_3;
+ spi_setup(st->us);
+
+ /* Disable IRQ */
+ ret = ade7753_set_irq(dev, false);
+ if (ret) {
+ dev_err(dev, "disable irq failed");
+ goto err_ret;
+ }
+
+ ade7753_reset(dev);
+ msleep(ADE7753_STARTUP_DELAY);
+
+err_ret:
+ return ret;
+}
+
+static ssize_t ade7753_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret, len = 0;
+ u8 t;
+ int sps;
+ ret = ade7753_spi_read_reg_8(dev,
+ ADE7753_MODE,
+ &t);
+ if (ret)
+ return ret;
+
+ t = (t >> 11) & 0x3;
+ sps = 27900 / (1 + t);
+
+ len = sprintf(buf, "%d SPS\n", sps);
+ return len;
+}
+
+static ssize_t ade7753_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+ unsigned long val;
+ int ret;
+ u16 reg, t;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ t = (27900 / val);
+ if (t > 0)
+ t--;
+
+ if (t > 1)
+ st->us->max_speed_hz = ADE7753_SPI_SLOW;
+ else
+ st->us->max_speed_hz = ADE7753_SPI_FAST;
+
+ ret = ade7753_spi_read_reg_16(dev,
+ ADE7753_MODE,
+ &reg);
+ if (ret)
+ goto out;
+
+ reg &= ~(3 << 11);
+ reg |= t << 11;
+
+ ret = ade7753_spi_write_reg_16(dev,
+ ADE7753_MODE,
+ reg);
+
+out:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+static IIO_DEV_ATTR_TEMP_RAW(ade7753_read_8bit);
+static IIO_CONST_ATTR(temp_offset, "-25 C");
+static IIO_CONST_ATTR(temp_scale, "0.67 C");
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ ade7753_read_frequency,
+ ade7753_write_frequency);
+
+static IIO_DEV_ATTR_RESET(ade7753_write_reset);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("27900 14000 7000 3500");
+
+static IIO_CONST_ATTR(name, "ade7753");
+
+static struct attribute *ade7753_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group ade7753_event_attribute_group = {
+ .attrs = ade7753_event_attributes,
+};
+
+static struct attribute *ade7753_attributes[] = {
+ &iio_dev_attr_temp_raw.dev_attr.attr,
+ &iio_const_attr_temp_offset.dev_attr.attr,
+ &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ &iio_dev_attr_phcal.dev_attr.attr,
+ &iio_dev_attr_cfden.dev_attr.attr,
+ &iio_dev_attr_aenergy.dev_attr.attr,
+ &iio_dev_attr_laenergy.dev_attr.attr,
+ &iio_dev_attr_vaenergy.dev_attr.attr,
+ &iio_dev_attr_lvaenergy.dev_attr.attr,
+ &iio_dev_attr_cfnum.dev_attr.attr,
+ &iio_dev_attr_apos.dev_attr.attr,
+ &iio_dev_attr_sagcyc.dev_attr.attr,
+ &iio_dev_attr_saglvl.dev_attr.attr,
+ &iio_dev_attr_linecyc.dev_attr.attr,
+ &iio_dev_attr_chksum.dev_attr.attr,
+ &iio_dev_attr_pga_gain.dev_attr.attr,
+ &iio_dev_attr_wgain.dev_attr.attr,
+ &iio_dev_attr_choff_1.dev_attr.attr,
+ &iio_dev_attr_choff_2.dev_attr.attr,
+ &iio_dev_attr_wdiv.dev_attr.attr,
+ &iio_dev_attr_irms.dev_attr.attr,
+ &iio_dev_attr_vrms.dev_attr.attr,
+ &iio_dev_attr_irmsos.dev_attr.attr,
+ &iio_dev_attr_vrmsos.dev_attr.attr,
+ &iio_dev_attr_vagain.dev_attr.attr,
+ &iio_dev_attr_ipklvl.dev_attr.attr,
+ &iio_dev_attr_vpklvl.dev_attr.attr,
+ &iio_dev_attr_ipeak.dev_attr.attr,
+ &iio_dev_attr_vpeak.dev_attr.attr,
+ &iio_dev_attr_vperiod.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ade7753_attribute_group = {
+ .attrs = ade7753_attributes,
+};
+
+static int __devinit ade7753_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct ade7753_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADE7753_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADE7753_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &ade7753_event_attribute_group;
+ st->indio_dev->attrs = &ade7753_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = ade7753_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = ade7753_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq) {
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_FALLING,
+ "ade7753");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = ade7753_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ /* Get the device into a sane initial state */
+ ret = ade7753_initial_setup(st);
+ if (ret)
+ goto error_remove_trigger;
+ return 0;
+
+error_remove_trigger:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ ade7753_remove_trigger(st->indio_dev);
+error_unregister_line:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ ade7753_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ ade7753_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int ade7753_remove(struct spi_device *spi)
+{
+ int ret;
+ struct ade7753_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ ret = ade7753_stop_device(&(indio_dev->dev));
+ if (ret)
+ goto err_ret;
+
+ flush_scheduled_work();
+
+ ade7753_remove_trigger(indio_dev);
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ ade7753_uninitialize_ring(indio_dev->ring);
+ ade7753_unconfigure_ring(indio_dev);
+ iio_device_unregister(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+
+err_ret:
+ return ret;
+}
+
+static struct spi_driver ade7753_driver = {
+ .driver = {
+ .name = "ade7753",
+ .owner = THIS_MODULE,
+ },
+ .probe = ade7753_probe,
+ .remove = __devexit_p(ade7753_remove),
+};
+
+static __init int ade7753_init(void)
+{
+ return spi_register_driver(&ade7753_driver);
+}
+module_init(ade7753_init);
+
+static __exit void ade7753_exit(void)
+{
+ spi_unregister_driver(&ade7753_driver);
+}
+module_exit(ade7753_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADE7753/6 Single-Phase Multifunction Metering IC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7753.h b/drivers/staging/iio/meter/ade7753.h
new file mode 100644
index 000000000000..a3722b8c90fa
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7753.h
@@ -0,0 +1,140 @@
+#ifndef _ADE7753_H
+#define _ADE7753_H
+
+#define ADE7753_WAVEFORM 0x01
+#define ADE7753_AENERGY 0x02
+#define ADE7753_RAENERGY 0x03
+#define ADE7753_LAENERGY 0x04
+#define ADE7753_VAENERGY 0x05
+#define ADE7753_RVAENERGY 0x06
+#define ADE7753_LVAENERGY 0x07
+#define ADE7753_LVARENERGY 0x08
+#define ADE7753_MODE 0x09
+#define ADE7753_IRQEN 0x0A
+#define ADE7753_STATUS 0x0B
+#define ADE7753_RSTSTATUS 0x0C
+#define ADE7753_CH1OS 0x0D
+#define ADE7753_CH2OS 0x0E
+#define ADE7753_GAIN 0x0F
+#define ADE7753_PHCAL 0x10
+#define ADE7753_APOS 0x11
+#define ADE7753_WGAIN 0x12
+#define ADE7753_WDIV 0x13
+#define ADE7753_CFNUM 0x14
+#define ADE7753_CFDEN 0x15
+#define ADE7753_IRMS 0x16
+#define ADE7753_VRMS 0x17
+#define ADE7753_IRMSOS 0x18
+#define ADE7753_VRMSOS 0x19
+#define ADE7753_VAGAIN 0x1A
+#define ADE7753_VADIV 0x1B
+#define ADE7753_LINECYC 0x1C
+#define ADE7753_ZXTOUT 0x1D
+#define ADE7753_SAGCYC 0x1E
+#define ADE7753_SAGLVL 0x1F
+#define ADE7753_IPKLVL 0x20
+#define ADE7753_VPKLVL 0x21
+#define ADE7753_IPEAK 0x22
+#define ADE7753_RSTIPEAK 0x23
+#define ADE7753_VPEAK 0x24
+#define ADE7753_RSTVPEAK 0x25
+#define ADE7753_TEMP 0x26
+#define ADE7753_PERIOD 0x27
+#define ADE7753_TMODE 0x3D
+#define ADE7753_CHKSUM 0x3E
+#define ADE7753_DIEREV 0x3F
+
+#define ADE7753_READ_REG(a) a
+#define ADE7753_WRITE_REG(a) ((a) | 0x80)
+
+#define ADE7753_MAX_TX 4
+#define ADE7753_MAX_RX 4
+#define ADE7753_STARTUP_DELAY 1
+
+#define ADE7753_SPI_SLOW (u32)(300 * 1000)
+#define ADE7753_SPI_BURST (u32)(1000 * 1000)
+#define ADE7753_SPI_FAST (u32)(2000 * 1000)
+
+#define DRIVER_NAME "ade7753"
+
+/**
+ * struct ade7753_state - device instance specific data
+ * @us: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct ade7753_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+#if defined(CONFIG_IIO_RING_BUFFER) && defined(THIS_HAS_RING_BUFFER_SUPPORT)
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum ade7753_scan {
+ ADE7753_SCAN_ACTIVE_POWER,
+ ADE7753_SCAN_CH1,
+ ADE7753_SCAN_CH2,
+};
+
+void ade7753_remove_trigger(struct iio_dev *indio_dev);
+int ade7753_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t ade7753_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+
+int ade7753_configure_ring(struct iio_dev *indio_dev);
+void ade7753_unconfigure_ring(struct iio_dev *indio_dev);
+
+int ade7753_initialize_ring(struct iio_ring_buffer *ring);
+void ade7753_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void ade7753_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7753_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+ade7753_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int ade7753_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+static inline void ade7753_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7753_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+static inline void ade7753_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+#endif /* CONFIG_IIO_RING_BUFFER */
+
+#endif
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
new file mode 100644
index 000000000000..23dedfa7a270
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -0,0 +1,756 @@
+/*
+ * ADE7754 Polyphase Multifunction Energy Metering IC Driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "meter.h"
+#include "ade7754.h"
+
+static int ade7754_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7754_WRITE_REG(reg_address);
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7754_spi_write_reg_16(struct device *dev,
+ u8 reg_address,
+ u16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 3,
+ }
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7754_WRITE_REG(reg_address);
+ st->tx[1] = (value >> 8) & 0xFF;
+ st->tx[2] = value & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7754_spi_read_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7754_READ_REG(reg_address);
+ st->tx[1] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
+ reg_address);
+ goto error_ret;
+ }
+ *val = st->rx[1];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int ade7754_spi_read_reg_16(struct device *dev,
+ u8 reg_address,
+ u16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 3,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7754_READ_REG(reg_address);
+ st->tx[1] = 0;
+ st->tx[2] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
+ reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[1] << 8) | st->rx[2];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int ade7754_spi_read_reg_24(struct device *dev,
+ u8 reg_address,
+ u32 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 4,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7754_READ_REG(reg_address);
+ st->tx[1] = 0;
+ st->tx[2] = 0;
+ st->tx[3] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
+ reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[1] << 16) | (st->rx[2] << 8) | st->rx[3];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static ssize_t ade7754_read_8bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u8 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = ade7754_spi_read_reg_8(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7754_read_16bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = ade7754_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7754_read_24bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u32 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = ade7754_spi_read_reg_24(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", val & 0xFFFFFF);
+}
+
+static ssize_t ade7754_write_8bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = ade7754_spi_write_reg_8(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t ade7754_write_16bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = ade7754_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static int ade7754_reset(struct device *dev)
+{
+ int ret;
+ u8 val;
+ ade7754_spi_read_reg_8(dev,
+ ADE7754_OPMODE,
+ &val);
+ val |= 1 << 6; /* Software Chip Reset */
+ ret = ade7754_spi_write_reg_8(dev,
+ ADE7754_OPMODE,
+ val);
+
+ return ret;
+}
+
+
+static ssize_t ade7754_write_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -1;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return ade7754_reset(dev);
+ }
+ return -1;
+}
+
+static IIO_DEV_ATTR_AENERGY(ade7754_read_24bit, ADE7754_AENERGY);
+static IIO_DEV_ATTR_LAENERGY(ade7754_read_24bit, ADE7754_LAENERGY);
+static IIO_DEV_ATTR_VAENERGY(ade7754_read_24bit, ADE7754_VAENERGY);
+static IIO_DEV_ATTR_LVAENERGY(ade7754_read_24bit, ADE7754_LVAENERGY);
+static IIO_DEV_ATTR_VPEAK(S_IWUSR | S_IRUGO,
+ ade7754_read_8bit,
+ ade7754_write_8bit,
+ ADE7754_VPEAK);
+static IIO_DEV_ATTR_IPEAK(S_IWUSR | S_IRUGO,
+ ade7754_read_8bit,
+ ade7754_write_8bit,
+ ADE7754_VPEAK);
+static IIO_DEV_ATTR_APHCAL(S_IWUSR | S_IRUGO,
+ ade7754_read_8bit,
+ ade7754_write_8bit,
+ ADE7754_APHCAL);
+static IIO_DEV_ATTR_BPHCAL(S_IWUSR | S_IRUGO,
+ ade7754_read_8bit,
+ ade7754_write_8bit,
+ ADE7754_BPHCAL);
+static IIO_DEV_ATTR_CPHCAL(S_IWUSR | S_IRUGO,
+ ade7754_read_8bit,
+ ade7754_write_8bit,
+ ADE7754_CPHCAL);
+static IIO_DEV_ATTR_AAPOS(S_IWUSR | S_IRUGO,
+ ade7754_read_16bit,
+ ade7754_write_16bit,
+ ADE7754_AAPOS);
+static IIO_DEV_ATTR_BAPOS(S_IWUSR | S_IRUGO,
+ ade7754_read_16bit,
+ ade7754_write_16bit,
+ ADE7754_BAPOS);
+static IIO_DEV_ATTR_CAPOS(S_IWUSR | S_IRUGO,
+ ade7754_read_16bit,
+ ade7754_write_16bit,
+ ADE7754_CAPOS);
+static IIO_DEV_ATTR_WDIV(S_IWUSR | S_IRUGO,
+ ade7754_read_8bit,
+ ade7754_write_8bit,
+ ADE7754_WDIV);
+static IIO_DEV_ATTR_VADIV(S_IWUSR | S_IRUGO,
+ ade7754_read_8bit,
+ ade7754_write_8bit,
+ ADE7754_VADIV);
+static IIO_DEV_ATTR_CFNUM(S_IWUSR | S_IRUGO,
+ ade7754_read_16bit,
+ ade7754_write_16bit,
+ ADE7754_CFNUM);
+static IIO_DEV_ATTR_CFDEN(S_IWUSR | S_IRUGO,
+ ade7754_read_16bit,
+ ade7754_write_16bit,
+ ADE7754_CFDEN);
+static IIO_DEV_ATTR_ACTIVE_POWER_A_GAIN(S_IWUSR | S_IRUGO,
+ ade7754_read_16bit,
+ ade7754_write_16bit,
+ ADE7754_AAPGAIN);
+static IIO_DEV_ATTR_ACTIVE_POWER_B_GAIN(S_IWUSR | S_IRUGO,
+ ade7754_read_16bit,
+ ade7754_write_16bit,
+ ADE7754_BAPGAIN);
+static IIO_DEV_ATTR_ACTIVE_POWER_C_GAIN(S_IWUSR | S_IRUGO,
+ ade7754_read_16bit,
+ ade7754_write_16bit,
+ ADE7754_CAPGAIN);
+static IIO_DEV_ATTR_AIRMS(S_IRUGO,
+ ade7754_read_24bit,
+ NULL,
+ ADE7754_AIRMS);
+static IIO_DEV_ATTR_BIRMS(S_IRUGO,
+ ade7754_read_24bit,
+ NULL,
+ ADE7754_BIRMS);
+static IIO_DEV_ATTR_CIRMS(S_IRUGO,
+ ade7754_read_24bit,
+ NULL,
+ ADE7754_CIRMS);
+static IIO_DEV_ATTR_AVRMS(S_IRUGO,
+ ade7754_read_24bit,
+ NULL,
+ ADE7754_AVRMS);
+static IIO_DEV_ATTR_BVRMS(S_IRUGO,
+ ade7754_read_24bit,
+ NULL,
+ ADE7754_BVRMS);
+static IIO_DEV_ATTR_CVRMS(S_IRUGO,
+ ade7754_read_24bit,
+ NULL,
+ ADE7754_CVRMS);
+static IIO_DEV_ATTR_AIRMSOS(S_IRUGO,
+ ade7754_read_16bit,
+ ade7754_write_16bit,
+ ADE7754_AIRMSOS);
+static IIO_DEV_ATTR_BIRMSOS(S_IRUGO,
+ ade7754_read_16bit,
+ ade7754_write_16bit,
+ ADE7754_BIRMSOS);
+static IIO_DEV_ATTR_CIRMSOS(S_IRUGO,
+ ade7754_read_16bit,
+ ade7754_write_16bit,
+ ADE7754_CIRMSOS);
+static IIO_DEV_ATTR_AVRMSOS(S_IRUGO,
+ ade7754_read_16bit,
+ ade7754_write_16bit,
+ ADE7754_AVRMSOS);
+static IIO_DEV_ATTR_BVRMSOS(S_IRUGO,
+ ade7754_read_16bit,
+ ade7754_write_16bit,
+ ADE7754_BVRMSOS);
+static IIO_DEV_ATTR_CVRMSOS(S_IRUGO,
+ ade7754_read_16bit,
+ ade7754_write_16bit,
+ ADE7754_CVRMSOS);
+
+static int ade7754_set_irq(struct device *dev, bool enable)
+{
+ int ret;
+ u16 irqen;
+ ret = ade7754_spi_read_reg_16(dev, ADE7754_IRQEN, &irqen);
+ if (ret)
+ goto error_ret;
+
+ if (enable)
+ irqen |= 1 << 14; /* Enables an interrupt when a data is
+ present in the waveform register */
+ else
+ irqen &= ~(1 << 14);
+
+ ret = ade7754_spi_write_reg_16(dev, ADE7754_IRQEN, irqen);
+ if (ret)
+ goto error_ret;
+
+error_ret:
+ return ret;
+}
+
+/* Power down the device */
+static int ade7754_stop_device(struct device *dev)
+{
+ int ret;
+ u8 val;
+ ade7754_spi_read_reg_8(dev,
+ ADE7754_OPMODE,
+ &val);
+ val |= 7 << 3; /* ADE7754 powered down */
+ ret = ade7754_spi_write_reg_8(dev,
+ ADE7754_OPMODE,
+ val);
+
+ return ret;
+}
+
+static int ade7754_initial_setup(struct ade7754_state *st)
+{
+ int ret;
+ struct device *dev = &st->indio_dev->dev;
+
+ /* use low spi speed for init */
+ st->us->mode = SPI_MODE_3;
+ spi_setup(st->us);
+
+ /* Disable IRQ */
+ ret = ade7754_set_irq(dev, false);
+ if (ret) {
+ dev_err(dev, "disable irq failed");
+ goto err_ret;
+ }
+
+ ade7754_reset(dev);
+ msleep(ADE7754_STARTUP_DELAY);
+
+err_ret:
+ return ret;
+}
+
+static ssize_t ade7754_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret, len = 0;
+ u8 t;
+ int sps;
+ ret = ade7754_spi_read_reg_8(dev,
+ ADE7754_WAVMODE,
+ &t);
+ if (ret)
+ return ret;
+
+ t = (t >> 3) & 0x3;
+ sps = 26000 / (1 + t);
+
+ len = sprintf(buf, "%d SPS\n", sps);
+ return len;
+}
+
+static ssize_t ade7754_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+ unsigned long val;
+ int ret;
+ u8 reg, t;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ t = (26000 / val);
+ if (t > 0)
+ t--;
+
+ if (t > 1)
+ st->us->max_speed_hz = ADE7754_SPI_SLOW;
+ else
+ st->us->max_speed_hz = ADE7754_SPI_FAST;
+
+ ret = ade7754_spi_read_reg_8(dev,
+ ADE7754_WAVMODE,
+ &reg);
+ if (ret)
+ goto out;
+
+ reg &= ~(3 << 3);
+ reg |= t << 3;
+
+ ret = ade7754_spi_write_reg_8(dev,
+ ADE7754_WAVMODE,
+ reg);
+
+out:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+static IIO_DEV_ATTR_TEMP_RAW(ade7754_read_8bit);
+static IIO_CONST_ATTR(temp_offset, "129 C");
+static IIO_CONST_ATTR(temp_scale, "4 C");
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ ade7754_read_frequency,
+ ade7754_write_frequency);
+
+static IIO_DEV_ATTR_RESET(ade7754_write_reset);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26000 13000 65000 33000");
+
+static IIO_CONST_ATTR(name, "ade7754");
+
+static struct attribute *ade7754_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group ade7754_event_attribute_group = {
+ .attrs = ade7754_event_attributes,
+};
+
+static struct attribute *ade7754_attributes[] = {
+ &iio_dev_attr_temp_raw.dev_attr.attr,
+ &iio_const_attr_temp_offset.dev_attr.attr,
+ &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ &iio_dev_attr_aenergy.dev_attr.attr,
+ &iio_dev_attr_laenergy.dev_attr.attr,
+ &iio_dev_attr_vaenergy.dev_attr.attr,
+ &iio_dev_attr_lvaenergy.dev_attr.attr,
+ &iio_dev_attr_vpeak.dev_attr.attr,
+ &iio_dev_attr_ipeak.dev_attr.attr,
+ &iio_dev_attr_aphcal.dev_attr.attr,
+ &iio_dev_attr_bphcal.dev_attr.attr,
+ &iio_dev_attr_cphcal.dev_attr.attr,
+ &iio_dev_attr_aapos.dev_attr.attr,
+ &iio_dev_attr_bapos.dev_attr.attr,
+ &iio_dev_attr_capos.dev_attr.attr,
+ &iio_dev_attr_wdiv.dev_attr.attr,
+ &iio_dev_attr_vadiv.dev_attr.attr,
+ &iio_dev_attr_cfnum.dev_attr.attr,
+ &iio_dev_attr_cfden.dev_attr.attr,
+ &iio_dev_attr_active_power_a_gain.dev_attr.attr,
+ &iio_dev_attr_active_power_b_gain.dev_attr.attr,
+ &iio_dev_attr_active_power_c_gain.dev_attr.attr,
+ &iio_dev_attr_airms.dev_attr.attr,
+ &iio_dev_attr_birms.dev_attr.attr,
+ &iio_dev_attr_cirms.dev_attr.attr,
+ &iio_dev_attr_avrms.dev_attr.attr,
+ &iio_dev_attr_bvrms.dev_attr.attr,
+ &iio_dev_attr_cvrms.dev_attr.attr,
+ &iio_dev_attr_airmsos.dev_attr.attr,
+ &iio_dev_attr_birmsos.dev_attr.attr,
+ &iio_dev_attr_cirmsos.dev_attr.attr,
+ &iio_dev_attr_avrmsos.dev_attr.attr,
+ &iio_dev_attr_bvrmsos.dev_attr.attr,
+ &iio_dev_attr_cvrmsos.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ade7754_attribute_group = {
+ .attrs = ade7754_attributes,
+};
+
+
+
+static int __devinit ade7754_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct ade7754_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADE7754_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADE7754_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &ade7754_event_attribute_group;
+ st->indio_dev->attrs = &ade7754_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = ade7754_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = ade7754_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq) {
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_FALLING,
+ "ade7754");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = ade7754_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ /* Get the device into a sane initial state */
+ ret = ade7754_initial_setup(st);
+ if (ret)
+ goto error_remove_trigger;
+ return 0;
+
+error_remove_trigger:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ ade7754_remove_trigger(st->indio_dev);
+error_unregister_line:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ ade7754_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ ade7754_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int ade7754_remove(struct spi_device *spi)
+{
+ int ret;
+ struct ade7754_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ ret = ade7754_stop_device(&(indio_dev->dev));
+ if (ret)
+ goto err_ret;
+
+ flush_scheduled_work();
+
+ ade7754_remove_trigger(indio_dev);
+ if (spi->irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ ade7754_uninitialize_ring(indio_dev->ring);
+ ade7754_unconfigure_ring(indio_dev);
+ iio_device_unregister(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+
+err_ret:
+ return ret;
+}
+
+static struct spi_driver ade7754_driver = {
+ .driver = {
+ .name = "ade7754",
+ .owner = THIS_MODULE,
+ },
+ .probe = ade7754_probe,
+ .remove = __devexit_p(ade7754_remove),
+};
+
+static __init int ade7754_init(void)
+{
+ return spi_register_driver(&ade7754_driver);
+}
+module_init(ade7754_init);
+
+static __exit void ade7754_exit(void)
+{
+ spi_unregister_driver(&ade7754_driver);
+}
+module_exit(ade7754_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADE7754 Polyphase Multifunction Energy Metering IC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7754.h b/drivers/staging/iio/meter/ade7754.h
new file mode 100644
index 000000000000..f6a3e4b926cf
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7754.h
@@ -0,0 +1,161 @@
+#ifndef _ADE7754_H
+#define _ADE7754_H
+
+#define ADE7754_AENERGY 0x01
+#define ADE7754_RAENERGY 0x02
+#define ADE7754_LAENERGY 0x03
+#define ADE7754_VAENERGY 0x04
+#define ADE7754_RVAENERGY 0x05
+#define ADE7754_LVAENERGY 0x06
+#define ADE7754_PERIOD 0x07
+#define ADE7754_TEMP 0x08
+#define ADE7754_WFORM 0x09
+#define ADE7754_OPMODE 0x0A
+#define ADE7754_MMODE 0x0B
+#define ADE7754_WAVMODE 0x0C
+#define ADE7754_WATMODE 0x0D
+#define ADE7754_VAMODE 0x0E
+#define ADE7754_IRQEN 0x0F
+#define ADE7754_STATUS 0x10
+#define ADE7754_RSTATUS 0x11
+#define ADE7754_ZXTOUT 0x12
+#define ADE7754_LINCYC 0x13
+#define ADE7754_SAGCYC 0x14
+#define ADE7754_SAGLVL 0x15
+#define ADE7754_VPEAK 0x16
+#define ADE7754_IPEAK 0x17
+#define ADE7754_GAIN 0x18
+#define ADE7754_AWG 0x19
+#define ADE7754_BWG 0x1A
+#define ADE7754_CWG 0x1B
+#define ADE7754_AVAG 0x1C
+#define ADE7754_BVAG 0x1D
+#define ADE7754_CVAG 0x1E
+#define ADE7754_APHCAL 0x1F
+#define ADE7754_BPHCAL 0x20
+#define ADE7754_CPHCAL 0x21
+#define ADE7754_AAPOS 0x22
+#define ADE7754_BAPOS 0x23
+#define ADE7754_CAPOS 0x24
+#define ADE7754_CFNUM 0x25
+#define ADE7754_CFDEN 0x26
+#define ADE7754_WDIV 0x27
+#define ADE7754_VADIV 0x28
+#define ADE7754_AIRMS 0x29
+#define ADE7754_BIRMS 0x2A
+#define ADE7754_CIRMS 0x2B
+#define ADE7754_AVRMS 0x2C
+#define ADE7754_BVRMS 0x2D
+#define ADE7754_CVRMS 0x2E
+#define ADE7754_AIRMSOS 0x2F
+#define ADE7754_BIRMSOS 0x30
+#define ADE7754_CIRMSOS 0x31
+#define ADE7754_AVRMSOS 0x32
+#define ADE7754_BVRMSOS 0x33
+#define ADE7754_CVRMSOS 0x34
+#define ADE7754_AAPGAIN 0x35
+#define ADE7754_BAPGAIN 0x36
+#define ADE7754_CAPGAIN 0x37
+#define ADE7754_AVGAIN 0x38
+#define ADE7754_BVGAIN 0x39
+#define ADE7754_CVGAIN 0x3A
+#define ADE7754_CHKSUM 0x3E
+#define ADE7754_VERSION 0x3F
+
+#define ADE7754_READ_REG(a) a
+#define ADE7754_WRITE_REG(a) ((a) | 0x80)
+
+#define ADE7754_MAX_TX 4
+#define ADE7754_MAX_RX 4
+#define ADE7754_STARTUP_DELAY 1
+
+#define ADE7754_SPI_SLOW (u32)(300 * 1000)
+#define ADE7754_SPI_BURST (u32)(1000 * 1000)
+#define ADE7754_SPI_FAST (u32)(2000 * 1000)
+
+#define DRIVER_NAME "ade7754"
+
+/**
+ * struct ade7754_state - device instance specific data
+ * @us: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct ade7754_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+#if defined(CONFIG_IIO_RING_BUFFER) && defined(THIS_HAS_RING_BUFFER_SUPPORT)
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum ade7754_scan {
+ ADE7754_SCAN_PHA_V,
+ ADE7754_SCAN_PHB_V,
+ ADE7754_SCAN_PHC_V,
+ ADE7754_SCAN_PHA_I,
+ ADE7754_SCAN_PHB_I,
+ ADE7754_SCAN_PHC_I,
+};
+
+void ade7754_remove_trigger(struct iio_dev *indio_dev);
+int ade7754_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t ade7754_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+
+int ade7754_configure_ring(struct iio_dev *indio_dev);
+void ade7754_unconfigure_ring(struct iio_dev *indio_dev);
+
+int ade7754_initialize_ring(struct iio_ring_buffer *ring);
+void ade7754_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void ade7754_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7754_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+ade7754_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int ade7754_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+static inline void ade7754_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7754_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+static inline void ade7754_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+#endif /* CONFIG_IIO_RING_BUFFER */
+
+#endif
diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h
new file mode 100644
index 000000000000..df5bb7ba5a0f
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7758.h
@@ -0,0 +1,171 @@
+#ifndef _ADE7758_H
+#define _ADE7758_H
+
+#define ADE7758_AWATTHR 0x01
+#define ADE7758_BWATTHR 0x02
+#define ADE7758_CWATTHR 0x03
+#define ADE7758_AVARHR 0x04
+#define ADE7758_BVARHR 0x05
+#define ADE7758_CVARHR 0x06
+#define ADE7758_AVAHR 0x07
+#define ADE7758_BVAHR 0x08
+#define ADE7758_CVAHR 0x09
+#define ADE7758_AIRMS 0x0A
+#define ADE7758_BIRMS 0x0B
+#define ADE7758_CIRMS 0x0C
+#define ADE7758_AVRMS 0x0D
+#define ADE7758_BVRMS 0x0E
+#define ADE7758_CVRMS 0x0F
+#define ADE7758_FREQ 0x10
+#define ADE7758_TEMP 0x11
+#define ADE7758_WFORM 0x12
+#define ADE7758_OPMODE 0x13
+#define ADE7758_MMODE 0x14
+#define ADE7758_WAVMODE 0x15
+#define ADE7758_COMPMODE 0x16
+#define ADE7758_LCYCMODE 0x17
+#define ADE7758_MASK 0x18
+#define ADE7758_STATUS 0x19
+#define ADE7758_RSTATUS 0x1A
+#define ADE7758_ZXTOUT 0x1B
+#define ADE7758_LINECYC 0x1C
+#define ADE7758_SAGCYC 0x1D
+#define ADE7758_SAGLVL 0x1E
+#define ADE7758_VPINTLVL 0x1F
+#define ADE7758_IPINTLVL 0x20
+#define ADE7758_VPEAK 0x21
+#define ADE7758_IPEAK 0x22
+#define ADE7758_GAIN 0x23
+#define ADE7758_AVRMSGAIN 0x24
+#define ADE7758_BVRMSGAIN 0x25
+#define ADE7758_CVRMSGAIN 0x26
+#define ADE7758_AIGAIN 0x27
+#define ADE7758_BIGAIN 0x28
+#define ADE7758_CIGAIN 0x29
+#define ADE7758_AWG 0x2A
+#define ADE7758_BWG 0x2B
+#define ADE7758_CWG 0x2C
+#define ADE7758_AVARG 0x2D
+#define ADE7758_BVARG 0x2E
+#define ADE7758_CVARG 0x2F
+#define ADE7758_AVAG 0x30
+#define ADE7758_BVAG 0x31
+#define ADE7758_CVAG 0x32
+#define ADE7758_AVRMSOS 0x33
+#define ADE7758_BVRMSOS 0x34
+#define ADE7758_CVRMSOS 0x35
+#define ADE7758_AIRMSOS 0x36
+#define ADE7758_BIRMSOS 0x37
+#define ADE7758_CIRMSOS 0x38
+#define ADE7758_AWAITOS 0x39
+#define ADE7758_BWAITOS 0x3A
+#define ADE7758_CWAITOS 0x3B
+#define ADE7758_AVAROS 0x3C
+#define ADE7758_BVAROS 0x3D
+#define ADE7758_CVAROS 0x3E
+#define ADE7758_APHCAL 0x3F
+#define ADE7758_BPHCAL 0x40
+#define ADE7758_CPHCAL 0x41
+#define ADE7758_WDIV 0x42
+#define ADE7758_VADIV 0x44
+#define ADE7758_VARDIV 0x43
+#define ADE7758_APCFNUM 0x45
+#define ADE7758_APCFDEN 0x46
+#define ADE7758_VARCFNUM 0x47
+#define ADE7758_VARCFDEN 0x48
+#define ADE7758_CHKSUM 0x7E
+#define ADE7758_VERSION 0x7F
+
+#define ADE7758_READ_REG(a) a
+#define ADE7758_WRITE_REG(a) ((a) | 0x80)
+
+#define ADE7758_MAX_TX 8
+#define ADE7758_MAX_RX 4
+#define ADE7758_STARTUP_DELAY 1
+
+#define ADE7758_SPI_SLOW (u32)(300 * 1000)
+#define ADE7758_SPI_BURST (u32)(1000 * 1000)
+#define ADE7758_SPI_FAST (u32)(2000 * 1000)
+
+#define DRIVER_NAME "ade7758"
+
+/**
+ * struct ade7758_state - device instance specific data
+ * @us: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct ade7758_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+#ifdef CONFIG_IIO_RING_BUFFER
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum ade7758_scan {
+ ADE7758_SCAN_WFORM,
+};
+
+void ade7758_remove_trigger(struct iio_dev *indio_dev);
+int ade7758_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t ade7758_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+
+int ade7758_configure_ring(struct iio_dev *indio_dev);
+void ade7758_unconfigure_ring(struct iio_dev *indio_dev);
+
+int ade7758_initialize_ring(struct iio_ring_buffer *ring);
+void ade7758_uninitialize_ring(struct iio_ring_buffer *ring);
+int ade7758_set_irq(struct device *dev, bool enable);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void ade7758_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7758_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+ade7758_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int ade7758_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+static inline void ade7758_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7758_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+static inline void ade7758_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+#endif /* CONFIG_IIO_RING_BUFFER */
+
+#endif
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
new file mode 100644
index 000000000000..b7634cb7aa4f
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -0,0 +1,866 @@
+/*
+ * ADE7758 Polyphase Multifunction Energy Metering IC Driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "meter.h"
+#include "ade7758.h"
+
+int ade7758_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7758_WRITE_REG(reg_address);
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7758_spi_write_reg_16(struct device *dev,
+ u8 reg_address,
+ u16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 3,
+ }
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7758_WRITE_REG(reg_address);
+ st->tx[1] = (value >> 8) & 0xFF;
+ st->tx[2] = value & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7758_spi_write_reg_24(struct device *dev,
+ u8 reg_address,
+ u32 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 4,
+ }
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7758_WRITE_REG(reg_address);
+ st->tx[1] = (value >> 16) & 0xFF;
+ st->tx[2] = (value >> 8) & 0xFF;
+ st->tx[3] = value & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7758_spi_read_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7758_READ_REG(reg_address);
+ st->tx[1] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
+ reg_address);
+ goto error_ret;
+ }
+ *val = st->rx[1];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int ade7758_spi_read_reg_16(struct device *dev,
+ u8 reg_address,
+ u16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 3,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7758_READ_REG(reg_address);
+ st->tx[1] = 0;
+ st->tx[2] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
+ reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[1] << 8) | st->rx[2];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int ade7758_spi_read_reg_24(struct device *dev,
+ u8 reg_address,
+ u32 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 4,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7758_READ_REG(reg_address);
+ st->tx[1] = 0;
+ st->tx[2] = 0;
+ st->tx[3] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
+ reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[1] << 16) | (st->rx[2] << 8) | st->rx[3];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static ssize_t ade7758_read_8bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u8 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = ade7758_spi_read_reg_8(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7758_read_16bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = ade7758_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7758_read_24bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u32 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = ade7758_spi_read_reg_24(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", val & 0xFFFFFF);
+}
+
+static ssize_t ade7758_write_8bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = ade7758_spi_write_reg_8(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t ade7758_write_16bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = ade7758_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+int ade7758_reset(struct device *dev)
+{
+ int ret;
+ u8 val;
+ ade7758_spi_read_reg_8(dev,
+ ADE7758_OPMODE,
+ &val);
+ val |= 1 << 6; /* Software Chip Reset */
+ ret = ade7758_spi_write_reg_8(dev,
+ ADE7758_OPMODE,
+ val);
+
+ return ret;
+}
+
+static ssize_t ade7758_write_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -1;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return ade7758_reset(dev);
+ }
+ return -1;
+}
+
+static IIO_DEV_ATTR_VPEAK(S_IWUSR | S_IRUGO,
+ ade7758_read_8bit,
+ ade7758_write_8bit,
+ ADE7758_VPEAK);
+static IIO_DEV_ATTR_IPEAK(S_IWUSR | S_IRUGO,
+ ade7758_read_8bit,
+ ade7758_write_8bit,
+ ADE7758_VPEAK);
+static IIO_DEV_ATTR_APHCAL(S_IWUSR | S_IRUGO,
+ ade7758_read_8bit,
+ ade7758_write_8bit,
+ ADE7758_APHCAL);
+static IIO_DEV_ATTR_BPHCAL(S_IWUSR | S_IRUGO,
+ ade7758_read_8bit,
+ ade7758_write_8bit,
+ ADE7758_BPHCAL);
+static IIO_DEV_ATTR_CPHCAL(S_IWUSR | S_IRUGO,
+ ade7758_read_8bit,
+ ade7758_write_8bit,
+ ADE7758_CPHCAL);
+static IIO_DEV_ATTR_WDIV(S_IWUSR | S_IRUGO,
+ ade7758_read_8bit,
+ ade7758_write_8bit,
+ ADE7758_WDIV);
+static IIO_DEV_ATTR_VADIV(S_IWUSR | S_IRUGO,
+ ade7758_read_8bit,
+ ade7758_write_8bit,
+ ADE7758_VADIV);
+static IIO_DEV_ATTR_AIRMS(S_IRUGO,
+ ade7758_read_24bit,
+ NULL,
+ ADE7758_AIRMS);
+static IIO_DEV_ATTR_BIRMS(S_IRUGO,
+ ade7758_read_24bit,
+ NULL,
+ ADE7758_BIRMS);
+static IIO_DEV_ATTR_CIRMS(S_IRUGO,
+ ade7758_read_24bit,
+ NULL,
+ ADE7758_CIRMS);
+static IIO_DEV_ATTR_AVRMS(S_IRUGO,
+ ade7758_read_24bit,
+ NULL,
+ ADE7758_AVRMS);
+static IIO_DEV_ATTR_BVRMS(S_IRUGO,
+ ade7758_read_24bit,
+ NULL,
+ ADE7758_BVRMS);
+static IIO_DEV_ATTR_CVRMS(S_IRUGO,
+ ade7758_read_24bit,
+ NULL,
+ ADE7758_CVRMS);
+static IIO_DEV_ATTR_AIRMSOS(S_IWUSR | S_IRUGO,
+ ade7758_read_16bit,
+ ade7758_write_16bit,
+ ADE7758_AIRMSOS);
+static IIO_DEV_ATTR_BIRMSOS(S_IWUSR | S_IRUGO,
+ ade7758_read_16bit,
+ ade7758_write_16bit,
+ ADE7758_BIRMSOS);
+static IIO_DEV_ATTR_CIRMSOS(S_IWUSR | S_IRUGO,
+ ade7758_read_16bit,
+ ade7758_write_16bit,
+ ADE7758_CIRMSOS);
+static IIO_DEV_ATTR_AVRMSOS(S_IWUSR | S_IRUGO,
+ ade7758_read_16bit,
+ ade7758_write_16bit,
+ ADE7758_AVRMSOS);
+static IIO_DEV_ATTR_BVRMSOS(S_IWUSR | S_IRUGO,
+ ade7758_read_16bit,
+ ade7758_write_16bit,
+ ADE7758_BVRMSOS);
+static IIO_DEV_ATTR_CVRMSOS(S_IWUSR | S_IRUGO,
+ ade7758_read_16bit,
+ ade7758_write_16bit,
+ ADE7758_CVRMSOS);
+static IIO_DEV_ATTR_AIGAIN(S_IWUSR | S_IRUGO,
+ ade7758_read_16bit,
+ ade7758_write_16bit,
+ ADE7758_AIGAIN);
+static IIO_DEV_ATTR_BIGAIN(S_IWUSR | S_IRUGO,
+ ade7758_read_16bit,
+ ade7758_write_16bit,
+ ADE7758_BIGAIN);
+static IIO_DEV_ATTR_CIGAIN(S_IWUSR | S_IRUGO,
+ ade7758_read_16bit,
+ ade7758_write_16bit,
+ ADE7758_CIGAIN);
+static IIO_DEV_ATTR_AVRMSGAIN(S_IWUSR | S_IRUGO,
+ ade7758_read_16bit,
+ ade7758_write_16bit,
+ ADE7758_AVRMSGAIN);
+static IIO_DEV_ATTR_BVRMSGAIN(S_IWUSR | S_IRUGO,
+ ade7758_read_16bit,
+ ade7758_write_16bit,
+ ADE7758_BVRMSGAIN);
+static IIO_DEV_ATTR_CVRMSGAIN(S_IWUSR | S_IRUGO,
+ ade7758_read_16bit,
+ ade7758_write_16bit,
+ ADE7758_CVRMSGAIN);
+
+int ade7758_set_irq(struct device *dev, bool enable)
+{
+ int ret;
+ u32 irqen;
+ ret = ade7758_spi_read_reg_24(dev, ADE7758_MASK, &irqen);
+ if (ret)
+ goto error_ret;
+
+ if (enable)
+ irqen |= 1 << 16; /* Enables an interrupt when a data is
+ present in the waveform register */
+ else
+ irqen &= ~(1 << 16);
+
+ ret = ade7758_spi_write_reg_24(dev, ADE7758_MASK, irqen);
+ if (ret)
+ goto error_ret;
+
+error_ret:
+ return ret;
+}
+
+/* Power down the device */
+static int ade7758_stop_device(struct device *dev)
+{
+ int ret;
+ u8 val;
+ ade7758_spi_read_reg_8(dev,
+ ADE7758_OPMODE,
+ &val);
+ val |= 7 << 3; /* ADE7758 powered down */
+ ret = ade7758_spi_write_reg_8(dev,
+ ADE7758_OPMODE,
+ val);
+
+ return ret;
+}
+
+static int ade7758_initial_setup(struct ade7758_state *st)
+{
+ int ret;
+ struct device *dev = &st->indio_dev->dev;
+
+ /* use low spi speed for init */
+ st->us->mode = SPI_MODE_3;
+ spi_setup(st->us);
+
+ /* Disable IRQ */
+ ret = ade7758_set_irq(dev, false);
+ if (ret) {
+ dev_err(dev, "disable irq failed");
+ goto err_ret;
+ }
+
+ ade7758_reset(dev);
+ msleep(ADE7758_STARTUP_DELAY);
+
+err_ret:
+ return ret;
+}
+
+static ssize_t ade7758_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret, len = 0;
+ u8 t;
+ int sps;
+ ret = ade7758_spi_read_reg_8(dev,
+ ADE7758_WAVMODE,
+ &t);
+ if (ret)
+ return ret;
+
+ t = (t >> 5) & 0x3;
+ sps = 26040 / (1 << t);
+
+ len = sprintf(buf, "%d SPS\n", sps);
+ return len;
+}
+
+static ssize_t ade7758_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+ unsigned long val;
+ int ret;
+ u8 reg, t;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ t = (26040 / val);
+ if (t > 0)
+ t >>= 1;
+
+ if (t > 1)
+ st->us->max_speed_hz = ADE7758_SPI_SLOW;
+ else
+ st->us->max_speed_hz = ADE7758_SPI_FAST;
+
+ ret = ade7758_spi_read_reg_8(dev,
+ ADE7758_WAVMODE,
+ &reg);
+ if (ret)
+ goto out;
+
+ reg &= ~(5 << 3);
+ reg |= t << 5;
+
+ ret = ade7758_spi_write_reg_8(dev,
+ ADE7758_WAVMODE,
+ reg);
+
+out:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t ade7758_read_waveform_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret, len = 0;
+ u8 t;
+ ret = ade7758_spi_read_reg_8(dev,
+ ADE7758_WAVMODE,
+ &t);
+ if (ret)
+ return ret;
+
+ t = (t >> 2) & 0x7;
+
+ len = sprintf(buf, "%d\n", t);
+
+ return len;
+}
+
+static ssize_t ade7758_write_waveform_type(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+ u8 reg;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val > 4)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+
+ ret = ade7758_spi_read_reg_8(dev,
+ ADE7758_WAVMODE,
+ &reg);
+ if (ret)
+ goto out;
+
+ reg &= ~(7 << 2);
+ reg |= val << 2;
+
+ ret = ade7758_spi_write_reg_8(dev,
+ ADE7758_WAVMODE,
+ reg);
+
+out:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEV_ATTR_TEMP_RAW(ade7758_read_8bit);
+static IIO_CONST_ATTR(temp_offset, "129 C");
+static IIO_CONST_ATTR(temp_scale, "4 C");
+
+static IIO_DEV_ATTR_AWATTHR(ade7758_read_16bit,
+ ADE7758_AWATTHR);
+static IIO_DEV_ATTR_BWATTHR(ade7758_read_16bit,
+ ADE7758_BWATTHR);
+static IIO_DEV_ATTR_CWATTHR(ade7758_read_16bit,
+ ADE7758_CWATTHR);
+static IIO_DEV_ATTR_AVARHR(ade7758_read_16bit,
+ ADE7758_AVARHR);
+static IIO_DEV_ATTR_BVARHR(ade7758_read_16bit,
+ ADE7758_BVARHR);
+static IIO_DEV_ATTR_CVARHR(ade7758_read_16bit,
+ ADE7758_CVARHR);
+static IIO_DEV_ATTR_AVAHR(ade7758_read_16bit,
+ ADE7758_AVAHR);
+static IIO_DEV_ATTR_BVAHR(ade7758_read_16bit,
+ ADE7758_BVAHR);
+static IIO_DEV_ATTR_CVAHR(ade7758_read_16bit,
+ ADE7758_CVAHR);
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ ade7758_read_frequency,
+ ade7758_write_frequency);
+
+/**
+ * IIO_DEV_ATTR_WAVEFORM_TYPE - set the type of waveform.
+ * @_mode: sysfs file mode/permissions
+ * @_show: output method for the attribute
+ * @_store: input method for the attribute
+ **/
+#define IIO_DEV_ATTR_WAVEFORM_TYPE(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(waveform_type, _mode, _show, _store, 0)
+
+static IIO_DEV_ATTR_WAVEFORM_TYPE(S_IWUSR | S_IRUGO,
+ ade7758_read_waveform_type,
+ ade7758_write_waveform_type);
+
+static IIO_DEV_ATTR_RESET(ade7758_write_reset);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26000 13000 65000 33000");
+
+static IIO_CONST_ATTR(name, "ade7758");
+
+static struct attribute *ade7758_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group ade7758_event_attribute_group = {
+ .attrs = ade7758_event_attributes,
+};
+
+static struct attribute *ade7758_attributes[] = {
+ &iio_dev_attr_temp_raw.dev_attr.attr,
+ &iio_const_attr_temp_offset.dev_attr.attr,
+ &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_waveform_type.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ &iio_dev_attr_awatthr.dev_attr.attr,
+ &iio_dev_attr_bwatthr.dev_attr.attr,
+ &iio_dev_attr_cwatthr.dev_attr.attr,
+ &iio_dev_attr_avarhr.dev_attr.attr,
+ &iio_dev_attr_bvarhr.dev_attr.attr,
+ &iio_dev_attr_cvarhr.dev_attr.attr,
+ &iio_dev_attr_avahr.dev_attr.attr,
+ &iio_dev_attr_bvahr.dev_attr.attr,
+ &iio_dev_attr_cvahr.dev_attr.attr,
+ &iio_dev_attr_vpeak.dev_attr.attr,
+ &iio_dev_attr_ipeak.dev_attr.attr,
+ &iio_dev_attr_aphcal.dev_attr.attr,
+ &iio_dev_attr_bphcal.dev_attr.attr,
+ &iio_dev_attr_cphcal.dev_attr.attr,
+ &iio_dev_attr_wdiv.dev_attr.attr,
+ &iio_dev_attr_vadiv.dev_attr.attr,
+ &iio_dev_attr_airms.dev_attr.attr,
+ &iio_dev_attr_birms.dev_attr.attr,
+ &iio_dev_attr_cirms.dev_attr.attr,
+ &iio_dev_attr_avrms.dev_attr.attr,
+ &iio_dev_attr_bvrms.dev_attr.attr,
+ &iio_dev_attr_cvrms.dev_attr.attr,
+ &iio_dev_attr_aigain.dev_attr.attr,
+ &iio_dev_attr_bigain.dev_attr.attr,
+ &iio_dev_attr_cigain.dev_attr.attr,
+ &iio_dev_attr_avrmsgain.dev_attr.attr,
+ &iio_dev_attr_bvrmsgain.dev_attr.attr,
+ &iio_dev_attr_cvrmsgain.dev_attr.attr,
+ &iio_dev_attr_airmsos.dev_attr.attr,
+ &iio_dev_attr_birmsos.dev_attr.attr,
+ &iio_dev_attr_cirmsos.dev_attr.attr,
+ &iio_dev_attr_avrmsos.dev_attr.attr,
+ &iio_dev_attr_bvrmsos.dev_attr.attr,
+ &iio_dev_attr_cvrmsos.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ade7758_attribute_group = {
+ .attrs = ade7758_attributes,
+};
+
+
+
+static int __devinit ade7758_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct ade7758_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADE7758_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADE7758_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &ade7758_event_attribute_group;
+ st->indio_dev->attrs = &ade7758_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = ade7758_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = ade7758_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq) {
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_FALLING,
+ "ade7758");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = ade7758_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ /* Get the device into a sane initial state */
+ ret = ade7758_initial_setup(st);
+ if (ret)
+ goto error_remove_trigger;
+ return 0;
+
+error_remove_trigger:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ ade7758_remove_trigger(st->indio_dev);
+error_unregister_line:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ ade7758_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ ade7758_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int ade7758_remove(struct spi_device *spi)
+{
+ int ret;
+ struct ade7758_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ ret = ade7758_stop_device(&(indio_dev->dev));
+ if (ret)
+ goto err_ret;
+
+ flush_scheduled_work();
+
+ ade7758_remove_trigger(indio_dev);
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ ade7758_uninitialize_ring(indio_dev->ring);
+ iio_device_unregister(indio_dev);
+ ade7758_unconfigure_ring(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+
+err_ret:
+ return ret;
+}
+
+static struct spi_driver ade7758_driver = {
+ .driver = {
+ .name = "ade7758",
+ .owner = THIS_MODULE,
+ },
+ .probe = ade7758_probe,
+ .remove = __devexit_p(ade7758_remove),
+};
+
+static __init int ade7758_init(void)
+{
+ return spi_register_driver(&ade7758_driver);
+}
+module_init(ade7758_init);
+
+static __exit void ade7758_exit(void)
+{
+ spi_unregister_driver(&ade7758_driver);
+}
+module_exit(ade7758_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADE7758 Polyphase Multifunction Energy Metering IC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
new file mode 100644
index 000000000000..274b4a07808c
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -0,0 +1,212 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_sw.h"
+#include "../accel/accel.h"
+#include "../trigger.h"
+#include "ade7758.h"
+
+/**
+ * combine_8_to_32() utility function to munge to u8s into u32
+ **/
+static inline u32 combine_8_to_32(u8 lower, u8 mid, u8 upper)
+{
+ u32 _lower = lower;
+ u32 _mid = mid;
+ u32 _upper = upper;
+
+ return _lower | (_mid << 8) | (_upper << 16);
+}
+
+static IIO_SCAN_EL_C(wform, ADE7758_SCAN_WFORM, ADE7758_WFORM, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(wform, s, 24, 32);
+static IIO_SCAN_EL_TIMESTAMP(1);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(timestamp, s, 64, 64);
+
+static struct attribute *ade7758_scan_el_attrs[] = {
+ &iio_scan_el_wform.dev_attr.attr,
+ &iio_const_attr_wform_index.dev_attr.attr,
+ &iio_const_attr_wform_type.dev_attr.attr,
+ &iio_scan_el_timestamp.dev_attr.attr,
+ &iio_const_attr_timestamp_index.dev_attr.attr,
+ &iio_const_attr_timestamp_type.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ade7758_scan_el_group = {
+ .attrs = ade7758_scan_el_attrs,
+ .name = "scan_elements",
+};
+
+/**
+ * ade7758_poll_func_th() top half interrupt handler called by trigger
+ * @private_data: iio_dev
+ **/
+static void ade7758_poll_func_th(struct iio_dev *indio_dev, s64 time)
+{
+ struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+ st->last_timestamp = time;
+ schedule_work(&st->work_trigger_to_ring);
+ /* Indicate that this interrupt is being handled */
+
+ /* Technically this is trigger related, but without this
+ * handler running there is currently no way for the interrupt
+ * to clear.
+ */
+}
+
+/**
+ * ade7758_spi_read_burst() - read all data registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read (min size is 24 bytes)
+ **/
+static int ade7758_spi_read_burst(struct device *dev, u8 *rx)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .rx_buf = rx,
+ .bits_per_word = 8,
+ .len = 4,
+ }, {
+ .tx_buf = st->tx + 4,
+ .rx_buf = rx,
+ .bits_per_word = 8,
+ .len = 4,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7758_READ_REG(ADE7758_RSTATUS);
+ st->tx[1] = 0;
+ st->tx[2] = 0;
+ st->tx[3] = 0;
+ st->tx[4] = ADE7758_READ_REG(ADE7758_WFORM);
+ st->tx[5] = 0;
+ st->tx[6] = 0;
+ st->tx[7] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ dev_err(&st->us->dev, "problem when reading WFORM value\n");
+
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
+ * specific to be rolled into the core.
+ */
+static void ade7758_trigger_bh_to_ring(struct work_struct *work_s)
+{
+ struct ade7758_state *st
+ = container_of(work_s, struct ade7758_state,
+ work_trigger_to_ring);
+ struct iio_ring_buffer *ring = st->indio_dev->ring;
+
+ int i = 0;
+ s32 *data;
+ size_t datasize = ring->access.get_bytes_per_datum(ring);
+
+ data = kmalloc(datasize, GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&st->us->dev, "memory alloc failed in ring bh");
+ return;
+ }
+
+ if (ring->scan_count)
+ if (ade7758_spi_read_burst(&st->indio_dev->dev, st->rx) >= 0)
+ for (; i < ring->scan_count; i++)
+ data[i] = combine_8_to_32(st->rx[i*2+2],
+ st->rx[i*2+1],
+ st->rx[i*2]);
+
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (ring->scan_timestamp)
+ *((s64 *)
+ (((u32)data + 4 * ring->scan_count + 4) & ~0x7)) =
+ st->last_timestamp;
+
+ ring->access.store_to(ring,
+ (u8 *)data,
+ st->last_timestamp);
+
+ iio_trigger_notify_done(st->indio_dev->trig);
+ kfree(data);
+
+ return;
+}
+
+void ade7758_unconfigure_ring(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->pollfunc);
+ iio_sw_rb_free(indio_dev->ring);
+}
+
+int ade7758_configure_ring(struct iio_dev *indio_dev)
+{
+ int ret = 0;
+ struct ade7758_state *st = indio_dev->dev_data;
+ struct iio_ring_buffer *ring;
+ INIT_WORK(&st->work_trigger_to_ring, ade7758_trigger_bh_to_ring);
+
+ ring = iio_sw_rb_allocate(indio_dev);
+ if (!ring) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ indio_dev->ring = ring;
+ /* Effectively select the ring buffer implementation */
+ iio_ring_sw_register_funcs(&ring->access);
+ ring->bpe = 4;
+ ring->scan_el_attrs = &ade7758_scan_el_group;
+ ring->scan_timestamp = true;
+ ring->preenable = &iio_sw_ring_preenable;
+ ring->postenable = &iio_triggered_ring_postenable;
+ ring->predisable = &iio_triggered_ring_predisable;
+ ring->owner = THIS_MODULE;
+
+ /* Set default scan mode */
+ iio_scan_mask_set(ring, iio_scan_el_wform.number);
+
+ ret = iio_alloc_pollfunc(indio_dev, NULL, &ade7758_poll_func_th);
+ if (ret)
+ goto error_iio_sw_rb_free;
+
+ indio_dev->modes |= INDIO_RING_TRIGGERED;
+ return 0;
+
+error_iio_sw_rb_free:
+ iio_sw_rb_free(indio_dev->ring);
+ return ret;
+}
+
+int ade7758_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return iio_ring_buffer_register(ring, 0);
+}
+
+void ade7758_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+ iio_ring_buffer_unregister(ring);
+}
diff --git a/drivers/staging/iio/meter/ade7758_trigger.c b/drivers/staging/iio/meter/ade7758_trigger.c
new file mode 100644
index 000000000000..60abca0c28ff
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7758_trigger.c
@@ -0,0 +1,125 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../trigger.h"
+#include "ade7758.h"
+
+/**
+ * ade7758_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static int ade7758_data_rdy_trig_poll(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct ade7758_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_trigger *trig = st->trig;
+
+ iio_trigger_poll(trig, timestamp);
+
+ return IRQ_HANDLED;
+}
+
+IIO_EVENT_SH(data_rdy_trig, &ade7758_data_rdy_trig_poll);
+
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *ade7758_trigger_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+
+static const struct attribute_group ade7758_trigger_attr_group = {
+ .attrs = ade7758_trigger_attrs,
+};
+
+/**
+ * ade7758_data_rdy_trigger_set_state() set datardy interrupt state
+ **/
+static int ade7758_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct ade7758_state *st = trig->private_data;
+ struct iio_dev *indio_dev = st->indio_dev;
+ int ret = 0;
+
+ dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
+ ret = ade7758_set_irq(&st->indio_dev->dev, state);
+ if (state == false) {
+ iio_remove_event_from_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]
+ ->ev_list);
+ /* possible quirk with handler currently worked around
+ by ensuring the work queue is empty */
+ flush_scheduled_work();
+ } else {
+ iio_add_event_to_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]->ev_list);
+ }
+ return ret;
+}
+
+/**
+ * ade7758_trig_try_reen() try renabling irq for data rdy trigger
+ * @trig: the datardy trigger
+ **/
+static int ade7758_trig_try_reen(struct iio_trigger *trig)
+{
+ struct ade7758_state *st = trig->private_data;
+ enable_irq(st->us->irq);
+ /* irq reenabled so success! */
+ return 0;
+}
+
+int ade7758_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct ade7758_state *st = indio_dev->dev_data;
+
+ st->trig = iio_allocate_trigger();
+ st->trig->name = kasprintf(GFP_KERNEL,
+ "ade7758-dev%d",
+ indio_dev->id);
+ if (!st->trig->name) {
+ ret = -ENOMEM;
+ goto error_free_trig;
+ }
+ st->trig->dev.parent = &st->us->dev;
+ st->trig->owner = THIS_MODULE;
+ st->trig->private_data = st;
+ st->trig->set_trigger_state = &ade7758_data_rdy_trigger_set_state;
+ st->trig->try_reenable = &ade7758_trig_try_reen;
+ st->trig->control_attrs = &ade7758_trigger_attr_group;
+ ret = iio_trigger_register(st->trig);
+
+ /* select default trigger */
+ indio_dev->trig = st->trig;
+ if (ret)
+ goto error_free_trig_name;
+
+ return 0;
+
+error_free_trig_name:
+ kfree(st->trig->name);
+error_free_trig:
+ iio_free_trigger(st->trig);
+
+ return ret;
+}
+
+void ade7758_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct ade7758_state *state = indio_dev->dev_data;
+
+ iio_trigger_unregister(state->trig);
+ kfree(state->trig->name);
+ iio_free_trigger(state->trig);
+}
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
new file mode 100644
index 000000000000..fafc3c1e5aaa
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -0,0 +1,670 @@
+/*
+ * ADE7759 Active Energy Metering IC with di/dt Sensor Interface Driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "meter.h"
+#include "ade7759.h"
+
+int ade7759_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7759_WRITE_REG(reg_address);
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7759_spi_write_reg_16(struct device *dev,
+ u8 reg_address,
+ u16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 3,
+ }
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7759_WRITE_REG(reg_address);
+ st->tx[1] = (value >> 8) & 0xFF;
+ st->tx[2] = value & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7759_spi_read_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7759_READ_REG(reg_address);
+ st->tx[1] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
+ reg_address);
+ goto error_ret;
+ }
+ *val = st->rx[1];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int ade7759_spi_read_reg_16(struct device *dev,
+ u8 reg_address,
+ u16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 3,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7759_READ_REG(reg_address);
+ st->tx[1] = 0;
+ st->tx[2] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
+ reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[1] << 8) | st->rx[2];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int ade7759_spi_read_reg_40(struct device *dev,
+ u8 reg_address,
+ u64 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 6,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7759_READ_REG(reg_address);
+ memset(&st->tx[1], 0 , 5);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev, "problem when reading 40 bit register 0x%02X",
+ reg_address);
+ goto error_ret;
+ }
+ *val = ((u64)st->rx[1] << 32) | (st->rx[2] << 24) |
+ (st->rx[3] << 16) | (st->rx[4] << 8) | st->rx[5];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static ssize_t ade7759_read_8bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u8 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = ade7759_spi_read_reg_8(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7759_read_16bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = ade7759_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7759_read_40bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u64 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = ade7759_spi_read_reg_40(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%llu\n", val);
+}
+
+static ssize_t ade7759_write_8bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = ade7759_spi_write_reg_8(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t ade7759_write_16bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = ade7759_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static int ade7759_reset(struct device *dev)
+{
+ int ret;
+ u16 val;
+ ade7759_spi_read_reg_16(dev,
+ ADE7759_MODE,
+ &val);
+ val |= 1 << 6; /* Software Chip Reset */
+ ret = ade7759_spi_write_reg_16(dev,
+ ADE7759_MODE,
+ val);
+
+ return ret;
+}
+
+static ssize_t ade7759_write_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -1;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return ade7759_reset(dev);
+ }
+ return -1;
+}
+
+static IIO_DEV_ATTR_AENERGY(ade7759_read_40bit, ADE7759_AENERGY);
+static IIO_DEV_ATTR_CFDEN(S_IWUSR | S_IRUGO,
+ ade7759_read_16bit,
+ ade7759_write_16bit,
+ ADE7759_CFDEN);
+static IIO_DEV_ATTR_CFNUM(S_IWUSR | S_IRUGO,
+ ade7759_read_8bit,
+ ade7759_write_8bit,
+ ADE7759_CFNUM);
+static IIO_DEV_ATTR_CHKSUM(ade7759_read_8bit, ADE7759_CHKSUM);
+static IIO_DEV_ATTR_PHCAL(S_IWUSR | S_IRUGO,
+ ade7759_read_16bit,
+ ade7759_write_16bit,
+ ADE7759_PHCAL);
+static IIO_DEV_ATTR_APOS(S_IWUSR | S_IRUGO,
+ ade7759_read_16bit,
+ ade7759_write_16bit,
+ ADE7759_APOS);
+static IIO_DEV_ATTR_SAGCYC(S_IWUSR | S_IRUGO,
+ ade7759_read_8bit,
+ ade7759_write_8bit,
+ ADE7759_SAGCYC);
+static IIO_DEV_ATTR_SAGLVL(S_IWUSR | S_IRUGO,
+ ade7759_read_8bit,
+ ade7759_write_8bit,
+ ADE7759_SAGLVL);
+static IIO_DEV_ATTR_LINECYC(S_IWUSR | S_IRUGO,
+ ade7759_read_8bit,
+ ade7759_write_8bit,
+ ADE7759_LINECYC);
+static IIO_DEV_ATTR_LENERGY(ade7759_read_40bit, ADE7759_LENERGY);
+static IIO_DEV_ATTR_PGA_GAIN(S_IWUSR | S_IRUGO,
+ ade7759_read_8bit,
+ ade7759_write_8bit,
+ ADE7759_GAIN);
+static IIO_DEV_ATTR_ACTIVE_POWER_GAIN(S_IWUSR | S_IRUGO,
+ ade7759_read_16bit,
+ ade7759_write_16bit,
+ ADE7759_APGAIN);
+static IIO_DEV_ATTR_CH_OFF(1, S_IWUSR | S_IRUGO,
+ ade7759_read_8bit,
+ ade7759_write_8bit,
+ ADE7759_CH1OS);
+static IIO_DEV_ATTR_CH_OFF(2, S_IWUSR | S_IRUGO,
+ ade7759_read_8bit,
+ ade7759_write_8bit,
+ ADE7759_CH2OS);
+
+static int ade7759_set_irq(struct device *dev, bool enable)
+{
+ int ret;
+ u8 irqen;
+ ret = ade7759_spi_read_reg_8(dev, ADE7759_IRQEN, &irqen);
+ if (ret)
+ goto error_ret;
+
+ if (enable)
+ irqen |= 1 << 3; /* Enables an interrupt when a data is
+ present in the waveform register */
+ else
+ irqen &= ~(1 << 3);
+
+ ret = ade7759_spi_write_reg_8(dev, ADE7759_IRQEN, irqen);
+ if (ret)
+ goto error_ret;
+
+error_ret:
+ return ret;
+}
+
+/* Power down the device */
+int ade7759_stop_device(struct device *dev)
+{
+ int ret;
+ u16 val;
+ ade7759_spi_read_reg_16(dev,
+ ADE7759_MODE,
+ &val);
+ val |= 1 << 4; /* AD converters can be turned off */
+ ret = ade7759_spi_write_reg_16(dev,
+ ADE7759_MODE,
+ val);
+
+ return ret;
+}
+
+static int ade7759_initial_setup(struct ade7759_state *st)
+{
+ int ret;
+ struct device *dev = &st->indio_dev->dev;
+
+ /* use low spi speed for init */
+ st->us->mode = SPI_MODE_3;
+ spi_setup(st->us);
+
+ /* Disable IRQ */
+ ret = ade7759_set_irq(dev, false);
+ if (ret) {
+ dev_err(dev, "disable irq failed");
+ goto err_ret;
+ }
+
+ ade7759_reset(dev);
+ msleep(ADE7759_STARTUP_DELAY);
+
+err_ret:
+ return ret;
+}
+
+static ssize_t ade7759_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret, len = 0;
+ u16 t;
+ int sps;
+ ret = ade7759_spi_read_reg_16(dev,
+ ADE7759_MODE,
+ &t);
+ if (ret)
+ return ret;
+
+ t = (t >> 3) & 0x3;
+ sps = 27900 / (1 + t);
+
+ len = sprintf(buf, "%d SPS\n", sps);
+ return len;
+}
+
+static ssize_t ade7759_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+ unsigned long val;
+ int ret;
+ u16 reg, t;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ t = (27900 / val);
+ if (t > 0)
+ t--;
+
+ if (t > 1)
+ st->us->max_speed_hz = ADE7759_SPI_SLOW;
+ else
+ st->us->max_speed_hz = ADE7759_SPI_FAST;
+
+ ret = ade7759_spi_read_reg_16(dev,
+ ADE7759_MODE,
+ &reg);
+ if (ret)
+ goto out;
+
+ reg &= ~(3 << 13);
+ reg |= t << 13;
+
+ ret = ade7759_spi_write_reg_16(dev,
+ ADE7759_MODE,
+ reg);
+
+out:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+static IIO_DEV_ATTR_TEMP_RAW(ade7759_read_8bit);
+static IIO_CONST_ATTR(temp_offset, "70 C");
+static IIO_CONST_ATTR(temp_scale, "1 C");
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ ade7759_read_frequency,
+ ade7759_write_frequency);
+
+static IIO_DEV_ATTR_RESET(ade7759_write_reset);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("27900 14000 7000 3500");
+
+static IIO_CONST_ATTR(name, "ade7759");
+
+static struct attribute *ade7759_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group ade7759_event_attribute_group = {
+ .attrs = ade7759_event_attributes,
+};
+
+static struct attribute *ade7759_attributes[] = {
+ &iio_dev_attr_temp_raw.dev_attr.attr,
+ &iio_const_attr_temp_offset.dev_attr.attr,
+ &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ &iio_dev_attr_phcal.dev_attr.attr,
+ &iio_dev_attr_cfden.dev_attr.attr,
+ &iio_dev_attr_aenergy.dev_attr.attr,
+ &iio_dev_attr_cfnum.dev_attr.attr,
+ &iio_dev_attr_apos.dev_attr.attr,
+ &iio_dev_attr_sagcyc.dev_attr.attr,
+ &iio_dev_attr_saglvl.dev_attr.attr,
+ &iio_dev_attr_linecyc.dev_attr.attr,
+ &iio_dev_attr_lenergy.dev_attr.attr,
+ &iio_dev_attr_chksum.dev_attr.attr,
+ &iio_dev_attr_pga_gain.dev_attr.attr,
+ &iio_dev_attr_active_power_gain.dev_attr.attr,
+ &iio_dev_attr_choff_1.dev_attr.attr,
+ &iio_dev_attr_choff_2.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ade7759_attribute_group = {
+ .attrs = ade7759_attributes,
+};
+
+static int __devinit ade7759_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct ade7759_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADE7759_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADE7759_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &ade7759_event_attribute_group;
+ st->indio_dev->attrs = &ade7759_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = ade7759_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = ade7759_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq) {
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_FALLING,
+ "ade7759");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = ade7759_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ /* Get the device into a sane initial state */
+ ret = ade7759_initial_setup(st);
+ if (ret)
+ goto error_remove_trigger;
+ return 0;
+
+error_remove_trigger:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ ade7759_remove_trigger(st->indio_dev);
+error_unregister_line:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ ade7759_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ ade7759_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int ade7759_remove(struct spi_device *spi)
+{
+ int ret;
+ struct ade7759_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ ret = ade7759_stop_device(&(indio_dev->dev));
+ if (ret)
+ goto err_ret;
+
+ flush_scheduled_work();
+
+ ade7759_remove_trigger(indio_dev);
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ ade7759_uninitialize_ring(indio_dev->ring);
+ ade7759_unconfigure_ring(indio_dev);
+ iio_device_unregister(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+
+err_ret:
+ return ret;
+}
+
+static struct spi_driver ade7759_driver = {
+ .driver = {
+ .name = "ade7759",
+ .owner = THIS_MODULE,
+ },
+ .probe = ade7759_probe,
+ .remove = __devexit_p(ade7759_remove),
+};
+
+static __init int ade7759_init(void)
+{
+ return spi_register_driver(&ade7759_driver);
+}
+module_init(ade7759_init);
+
+static __exit void ade7759_exit(void)
+{
+ spi_unregister_driver(&ade7759_driver);
+}
+module_exit(ade7759_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADE7759 Active Energy Metering IC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7759.h b/drivers/staging/iio/meter/ade7759.h
new file mode 100644
index 000000000000..813dea2676a9
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7759.h
@@ -0,0 +1,122 @@
+#ifndef _ADE7759_H
+#define _ADE7759_H
+
+#define ADE7759_WAVEFORM 0x01
+#define ADE7759_AENERGY 0x02
+#define ADE7759_RSTENERGY 0x03
+#define ADE7759_STATUS 0x04
+#define ADE7759_RSTSTATUS 0x05
+#define ADE7759_MODE 0x06
+#define ADE7759_CFDEN 0x07
+#define ADE7759_CH1OS 0x08
+#define ADE7759_CH2OS 0x09
+#define ADE7759_GAIN 0x0A
+#define ADE7759_APGAIN 0x0B
+#define ADE7759_PHCAL 0x0C
+#define ADE7759_APOS 0x0D
+#define ADE7759_ZXTOUT 0x0E
+#define ADE7759_SAGCYC 0x0F
+#define ADE7759_IRQEN 0x10
+#define ADE7759_SAGLVL 0x11
+#define ADE7759_TEMP 0x12
+#define ADE7759_LINECYC 0x13
+#define ADE7759_LENERGY 0x14
+#define ADE7759_CFNUM 0x15
+#define ADE7759_CHKSUM 0x1E
+#define ADE7759_DIEREV 0x1F
+
+#define ADE7759_READ_REG(a) a
+#define ADE7759_WRITE_REG(a) ((a) | 0x80)
+
+#define ADE7759_MAX_TX 6
+#define ADE7759_MAX_RX 6
+#define ADE7759_STARTUP_DELAY 1
+
+#define ADE7759_SPI_SLOW (u32)(300 * 1000)
+#define ADE7759_SPI_BURST (u32)(1000 * 1000)
+#define ADE7759_SPI_FAST (u32)(2000 * 1000)
+
+#define DRIVER_NAME "ade7759"
+
+/**
+ * struct ade7759_state - device instance specific data
+ * @us: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct ade7759_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+#if defined(CONFIG_IIO_RING_BUFFER) && defined(THIS_HAS_RING_BUFFER_SUPPORT)
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum ade7759_scan {
+ ADE7759_SCAN_ACTIVE_POWER,
+ ADE7759_SCAN_CH1_CH2,
+ ADE7759_SCAN_CH1,
+ ADE7759_SCAN_CH2,
+};
+
+void ade7759_remove_trigger(struct iio_dev *indio_dev);
+int ade7759_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t ade7759_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+
+int ade7759_configure_ring(struct iio_dev *indio_dev);
+void ade7759_unconfigure_ring(struct iio_dev *indio_dev);
+
+int ade7759_initialize_ring(struct iio_ring_buffer *ring);
+void ade7759_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void ade7759_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7759_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+ade7759_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int ade7759_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+static inline void ade7759_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7759_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+static inline void ade7759_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+#endif /* CONFIG_IIO_RING_BUFFER */
+
+#endif
diff --git a/drivers/staging/iio/meter/ade7854-i2c.c b/drivers/staging/iio/meter/ade7854-i2c.c
new file mode 100644
index 000000000000..4578e7b7f460
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7854-i2c.c
@@ -0,0 +1,272 @@
+/*
+ * ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver (I2C Bus)
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
+#include "../iio.h"
+#include "ade7854.h"
+
+static int ade7854_i2c_write_reg_8(struct device *dev,
+ u16 reg_address,
+ u8 value)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = (reg_address >> 8) & 0xFF;
+ st->tx[1] = reg_address & 0xFF;
+ st->tx[2] = value;
+
+ ret = i2c_master_send(st->i2c, st->tx, 3);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7854_i2c_write_reg_16(struct device *dev,
+ u16 reg_address,
+ u16 value)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = (reg_address >> 8) & 0xFF;
+ st->tx[1] = reg_address & 0xFF;
+ st->tx[2] = (value >> 8) & 0xFF;
+ st->tx[3] = value & 0xFF;
+
+ ret = i2c_master_send(st->i2c, st->tx, 4);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7854_i2c_write_reg_24(struct device *dev,
+ u16 reg_address,
+ u32 value)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = (reg_address >> 8) & 0xFF;
+ st->tx[1] = reg_address & 0xFF;
+ st->tx[2] = (value >> 16) & 0xFF;
+ st->tx[3] = (value >> 8) & 0xFF;
+ st->tx[4] = value & 0xFF;
+
+ ret = i2c_master_send(st->i2c, st->tx, 5);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7854_i2c_write_reg_32(struct device *dev,
+ u16 reg_address,
+ u32 value)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = (reg_address >> 8) & 0xFF;
+ st->tx[1] = reg_address & 0xFF;
+ st->tx[2] = (value >> 24) & 0xFF;
+ st->tx[3] = (value >> 16) & 0xFF;
+ st->tx[4] = (value >> 8) & 0xFF;
+ st->tx[5] = value & 0xFF;
+
+ ret = i2c_master_send(st->i2c, st->tx, 6);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7854_i2c_read_reg_8(struct device *dev,
+ u16 reg_address,
+ u8 *val)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = (reg_address >> 8) & 0xFF;
+ st->tx[1] = reg_address & 0xFF;
+
+ ret = i2c_master_send(st->i2c, st->tx, 2);
+ if (ret)
+ goto out;
+
+ ret = i2c_master_recv(st->i2c, st->rx, 1);
+ if (ret)
+ goto out;
+
+ *val = st->rx[0];
+out:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int ade7854_i2c_read_reg_16(struct device *dev,
+ u16 reg_address,
+ u16 *val)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = (reg_address >> 8) & 0xFF;
+ st->tx[1] = reg_address & 0xFF;
+
+ ret = i2c_master_send(st->i2c, st->tx, 2);
+ if (ret)
+ goto out;
+
+ ret = i2c_master_recv(st->i2c, st->rx, 2);
+ if (ret)
+ goto out;
+
+ *val = (st->rx[0] << 8) | st->rx[1];
+out:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int ade7854_i2c_read_reg_24(struct device *dev,
+ u16 reg_address,
+ u32 *val)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = (reg_address >> 8) & 0xFF;
+ st->tx[1] = reg_address & 0xFF;
+
+ ret = i2c_master_send(st->i2c, st->tx, 2);
+ if (ret)
+ goto out;
+
+ ret = i2c_master_recv(st->i2c, st->rx, 3);
+ if (ret)
+ goto out;
+
+ *val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
+out:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int ade7854_i2c_read_reg_32(struct device *dev,
+ u16 reg_address,
+ u32 *val)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = (reg_address >> 8) & 0xFF;
+ st->tx[1] = reg_address & 0xFF;
+
+ ret = i2c_master_send(st->i2c, st->tx, 2);
+ if (ret)
+ goto out;
+
+ ret = i2c_master_recv(st->i2c, st->rx, 3);
+ if (ret)
+ goto out;
+
+ *val = (st->rx[0] << 24) | (st->rx[1] << 16) | (st->rx[2] << 8) | st->rx[3];
+out:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int __devinit ade7854_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct ade7854_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ i2c_set_clientdata(client, st);
+ st->read_reg_8 = ade7854_i2c_read_reg_8;
+ st->read_reg_16 = ade7854_i2c_read_reg_16;
+ st->read_reg_24 = ade7854_i2c_read_reg_24;
+ st->read_reg_32 = ade7854_i2c_read_reg_32;
+ st->write_reg_8 = ade7854_i2c_write_reg_8;
+ st->write_reg_16 = ade7854_i2c_write_reg_16;
+ st->write_reg_24 = ade7854_i2c_write_reg_24;
+ st->write_reg_32 = ade7854_i2c_write_reg_32;
+ st->i2c = client;
+ st->irq = client->irq;
+
+ ret = ade7854_probe(st, &client->dev);
+ if (ret) {
+ kfree(st);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int __devexit ade7854_i2c_remove(struct i2c_client *client)
+{
+ return ade7854_remove(i2c_get_clientdata(client));
+}
+
+static const struct i2c_device_id ade7854_id[] = {
+ { "ade7854", 0 },
+ { "ade7858", 0 },
+ { "ade7868", 0 },
+ { "ade7878", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ade7854_id);
+
+static struct i2c_driver ade7854_i2c_driver = {
+ .driver = {
+ .name = "ade7854",
+ },
+ .probe = ade7854_i2c_probe,
+ .remove = __devexit_p(ade7854_i2c_remove),
+ .id_table = ade7854_id,
+};
+
+static __init int ade7854_i2c_init(void)
+{
+ return i2c_add_driver(&ade7854_i2c_driver);
+}
+module_init(ade7854_i2c_init);
+
+static __exit void ade7854_i2c_exit(void)
+{
+ i2c_del_driver(&ade7854_i2c_driver);
+}
+module_exit(ade7854_i2c_exit);
+
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC I2C Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
new file mode 100644
index 000000000000..fe58103ed4ca
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -0,0 +1,360 @@
+/*
+ * ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver (SPI Bus)
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+
+#include "../iio.h"
+#include "ade7854.h"
+
+static int ade7854_spi_write_reg_8(struct device *dev,
+ u16 reg_address,
+ u8 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 4,
+ }
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7854_WRITE_REG;
+ st->tx[1] = (reg_address >> 8) & 0xFF;
+ st->tx[2] = reg_address & 0xFF;
+ st->tx[3] = value & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->spi, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7854_spi_write_reg_16(struct device *dev,
+ u16 reg_address,
+ u16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 5,
+ }
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7854_WRITE_REG;
+ st->tx[1] = (reg_address >> 8) & 0xFF;
+ st->tx[2] = reg_address & 0xFF;
+ st->tx[3] = (value >> 8) & 0xFF;
+ st->tx[4] = value & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->spi, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7854_spi_write_reg_24(struct device *dev,
+ u16 reg_address,
+ u32 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 6,
+ }
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7854_WRITE_REG;
+ st->tx[1] = (reg_address >> 8) & 0xFF;
+ st->tx[2] = reg_address & 0xFF;
+ st->tx[3] = (value >> 16) & 0xFF;
+ st->tx[4] = (value >> 8) & 0xFF;
+ st->tx[5] = value & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->spi, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7854_spi_write_reg_32(struct device *dev,
+ u16 reg_address,
+ u32 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 7,
+ }
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7854_WRITE_REG;
+ st->tx[1] = (reg_address >> 8) & 0xFF;
+ st->tx[2] = reg_address & 0xFF;
+ st->tx[3] = (value >> 24) & 0xFF;
+ st->tx[4] = (value >> 16) & 0xFF;
+ st->tx[5] = (value >> 8) & 0xFF;
+ st->tx[6] = value & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->spi, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int ade7854_spi_read_reg_8(struct device *dev,
+ u16 reg_address,
+ u8 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 4,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+
+ st->tx[0] = ADE7854_READ_REG;
+ st->tx[1] = (reg_address >> 8) & 0xFF;
+ st->tx[2] = reg_address & 0xFF;
+ st->tx[3] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->spi, &msg);
+ if (ret) {
+ dev_err(&st->spi->dev, "problem when reading 8 bit register 0x%02X",
+ reg_address);
+ goto error_ret;
+ }
+ *val = st->rx[3];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int ade7854_spi_read_reg_16(struct device *dev,
+ u16 reg_address,
+ u16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 5,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADE7854_READ_REG;
+ st->tx[1] = (reg_address >> 8) & 0xFF;
+ st->tx[2] = reg_address & 0xFF;
+ st->tx[3] = 0;
+ st->tx[4] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->spi, &msg);
+ if (ret) {
+ dev_err(&st->spi->dev, "problem when reading 16 bit register 0x%02X",
+ reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[3] << 8) | st->rx[4];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int ade7854_spi_read_reg_24(struct device *dev,
+ u16 reg_address,
+ u32 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 6,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+
+ st->tx[0] = ADE7854_READ_REG;
+ st->tx[1] = (reg_address >> 8) & 0xFF;
+ st->tx[2] = reg_address & 0xFF;
+ st->tx[3] = 0;
+ st->tx[4] = 0;
+ st->tx[5] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->spi, &msg);
+ if (ret) {
+ dev_err(&st->spi->dev, "problem when reading 24 bit register 0x%02X",
+ reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[3] << 16) | (st->rx[4] << 8) | st->rx[5];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int ade7854_spi_read_reg_32(struct device *dev,
+ u16 reg_address,
+ u32 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 7,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+
+ st->tx[0] = ADE7854_READ_REG;
+ st->tx[1] = (reg_address >> 8) & 0xFF;
+ st->tx[2] = reg_address & 0xFF;
+ st->tx[3] = 0;
+ st->tx[4] = 0;
+ st->tx[5] = 0;
+ st->tx[6] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(xfers, &msg);
+ ret = spi_sync(st->spi, &msg);
+ if (ret) {
+ dev_err(&st->spi->dev, "problem when reading 32 bit register 0x%02X",
+ reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[3] << 24) | (st->rx[4] << 16) | (st->rx[5] << 8) | st->rx[6];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int __devinit ade7854_spi_probe(struct spi_device *spi)
+{
+ int ret;
+ struct ade7854_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ spi_set_drvdata(spi, st);
+ st->read_reg_8 = ade7854_spi_read_reg_8;
+ st->read_reg_16 = ade7854_spi_read_reg_16;
+ st->read_reg_24 = ade7854_spi_read_reg_24;
+ st->read_reg_32 = ade7854_spi_read_reg_32;
+ st->write_reg_8 = ade7854_spi_write_reg_8;
+ st->write_reg_16 = ade7854_spi_write_reg_16;
+ st->write_reg_24 = ade7854_spi_write_reg_24;
+ st->write_reg_32 = ade7854_spi_write_reg_32;
+ st->irq = spi->irq;
+ st->spi = spi;
+
+ ret = ade7854_probe(st, &spi->dev);
+ if (ret) {
+ kfree(st);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ade7854_spi_remove(struct spi_device *spi)
+{
+ ade7854_remove(spi_get_drvdata(spi));
+
+ return 0;
+}
+
+static struct spi_driver ade7854_driver = {
+ .driver = {
+ .name = "ade7854",
+ .owner = THIS_MODULE,
+ },
+ .probe = ade7854_spi_probe,
+ .remove = __devexit_p(ade7854_spi_remove),
+};
+
+static __init int ade7854_init(void)
+{
+ return spi_register_driver(&ade7854_driver);
+}
+module_init(ade7854_init);
+
+static __exit void ade7854_exit(void)
+{
+ spi_unregister_driver(&ade7854_driver);
+}
+module_exit(ade7854_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC SPI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
new file mode 100644
index 000000000000..a13d5048cf42
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -0,0 +1,680 @@
+/*
+ * ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "meter.h"
+#include "ade7854.h"
+
+static ssize_t ade7854_read_8bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u8 val = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = st->read_reg_8(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7854_read_16bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = st->read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7854_read_24bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u32 val = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = st->read_reg_24(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", val & 0xFFFFFF);
+}
+
+static ssize_t ade7854_read_32bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u32 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+ ret = st->read_reg_32(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7854_write_8bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = st->write_reg_8(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t ade7854_write_16bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = st->write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t ade7854_write_24bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = st->write_reg_24(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t ade7854_write_32bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = st->write_reg_32(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static int ade7854_reset(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+ int ret;
+ u16 val;
+
+ st->read_reg_16(dev, ADE7854_CONFIG, &val);
+ val |= 1 << 7; /* Software Chip Reset */
+ ret = st->write_reg_16(dev, ADE7854_CONFIG, val);
+
+ return ret;
+}
+
+
+static ssize_t ade7854_write_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -1;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return ade7854_reset(dev);
+ }
+ return -1;
+}
+
+static IIO_DEV_ATTR_AIGAIN(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_AIGAIN);
+static IIO_DEV_ATTR_BIGAIN(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_BIGAIN);
+static IIO_DEV_ATTR_CIGAIN(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_CIGAIN);
+static IIO_DEV_ATTR_NIGAIN(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_NIGAIN);
+static IIO_DEV_ATTR_AVGAIN(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_AVGAIN);
+static IIO_DEV_ATTR_BVGAIN(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_BVGAIN);
+static IIO_DEV_ATTR_CVGAIN(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_CVGAIN);
+static IIO_DEV_ATTR_APPARENT_POWER_A_GAIN(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_AVAGAIN);
+static IIO_DEV_ATTR_APPARENT_POWER_B_GAIN(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_BVAGAIN);
+static IIO_DEV_ATTR_APPARENT_POWER_C_GAIN(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_CVAGAIN);
+static IIO_DEV_ATTR_ACTIVE_POWER_A_OFFSET(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_AWATTOS);
+static IIO_DEV_ATTR_ACTIVE_POWER_B_OFFSET(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_BWATTOS);
+static IIO_DEV_ATTR_ACTIVE_POWER_C_OFFSET(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_CWATTOS);
+static IIO_DEV_ATTR_REACTIVE_POWER_A_GAIN(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_AVARGAIN);
+static IIO_DEV_ATTR_REACTIVE_POWER_B_GAIN(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_BVARGAIN);
+static IIO_DEV_ATTR_REACTIVE_POWER_C_GAIN(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_CVARGAIN);
+static IIO_DEV_ATTR_REACTIVE_POWER_A_OFFSET(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_AVAROS);
+static IIO_DEV_ATTR_REACTIVE_POWER_B_OFFSET(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_BVAROS);
+static IIO_DEV_ATTR_REACTIVE_POWER_C_OFFSET(S_IWUSR | S_IRUGO,
+ ade7854_read_24bit,
+ ade7854_write_24bit,
+ ADE7854_CVAROS);
+static IIO_DEV_ATTR_VPEAK(S_IWUSR | S_IRUGO,
+ ade7854_read_32bit,
+ ade7854_write_32bit,
+ ADE7854_VPEAK);
+static IIO_DEV_ATTR_IPEAK(S_IWUSR | S_IRUGO,
+ ade7854_read_32bit,
+ ade7854_write_32bit,
+ ADE7854_VPEAK);
+static IIO_DEV_ATTR_APHCAL(S_IWUSR | S_IRUGO,
+ ade7854_read_16bit,
+ ade7854_write_16bit,
+ ADE7854_APHCAL);
+static IIO_DEV_ATTR_BPHCAL(S_IWUSR | S_IRUGO,
+ ade7854_read_16bit,
+ ade7854_write_16bit,
+ ADE7854_BPHCAL);
+static IIO_DEV_ATTR_CPHCAL(S_IWUSR | S_IRUGO,
+ ade7854_read_16bit,
+ ade7854_write_16bit,
+ ADE7854_CPHCAL);
+static IIO_DEV_ATTR_CF1DEN(S_IWUSR | S_IRUGO,
+ ade7854_read_16bit,
+ ade7854_write_16bit,
+ ADE7854_CF1DEN);
+static IIO_DEV_ATTR_CF2DEN(S_IWUSR | S_IRUGO,
+ ade7854_read_16bit,
+ ade7854_write_16bit,
+ ADE7854_CF2DEN);
+static IIO_DEV_ATTR_CF3DEN(S_IWUSR | S_IRUGO,
+ ade7854_read_16bit,
+ ade7854_write_16bit,
+ ADE7854_CF3DEN);
+static IIO_DEV_ATTR_LINECYC(S_IWUSR | S_IRUGO,
+ ade7854_read_16bit,
+ ade7854_write_16bit,
+ ADE7854_LINECYC);
+static IIO_DEV_ATTR_SAGCYC(S_IWUSR | S_IRUGO,
+ ade7854_read_8bit,
+ ade7854_write_8bit,
+ ADE7854_SAGCYC);
+static IIO_DEV_ATTR_CFCYC(S_IWUSR | S_IRUGO,
+ ade7854_read_8bit,
+ ade7854_write_8bit,
+ ADE7854_CFCYC);
+static IIO_DEV_ATTR_PEAKCYC(S_IWUSR | S_IRUGO,
+ ade7854_read_8bit,
+ ade7854_write_8bit,
+ ADE7854_PEAKCYC);
+static IIO_DEV_ATTR_CHKSUM(ade7854_read_24bit,
+ ADE7854_CHECKSUM);
+static IIO_DEV_ATTR_ANGLE0(ade7854_read_24bit,
+ ADE7854_ANGLE0);
+static IIO_DEV_ATTR_ANGLE1(ade7854_read_24bit,
+ ADE7854_ANGLE1);
+static IIO_DEV_ATTR_ANGLE2(ade7854_read_24bit,
+ ADE7854_ANGLE2);
+static IIO_DEV_ATTR_AIRMS(S_IRUGO,
+ ade7854_read_24bit,
+ NULL,
+ ADE7854_AIRMS);
+static IIO_DEV_ATTR_BIRMS(S_IRUGO,
+ ade7854_read_24bit,
+ NULL,
+ ADE7854_BIRMS);
+static IIO_DEV_ATTR_CIRMS(S_IRUGO,
+ ade7854_read_24bit,
+ NULL,
+ ADE7854_CIRMS);
+static IIO_DEV_ATTR_NIRMS(S_IRUGO,
+ ade7854_read_24bit,
+ NULL,
+ ADE7854_NIRMS);
+static IIO_DEV_ATTR_AVRMS(S_IRUGO,
+ ade7854_read_24bit,
+ NULL,
+ ADE7854_AVRMS);
+static IIO_DEV_ATTR_BVRMS(S_IRUGO,
+ ade7854_read_24bit,
+ NULL,
+ ADE7854_BVRMS);
+static IIO_DEV_ATTR_CVRMS(S_IRUGO,
+ ade7854_read_24bit,
+ NULL,
+ ADE7854_CVRMS);
+static IIO_DEV_ATTR_AIRMSOS(S_IRUGO,
+ ade7854_read_16bit,
+ ade7854_write_16bit,
+ ADE7854_AIRMSOS);
+static IIO_DEV_ATTR_BIRMSOS(S_IRUGO,
+ ade7854_read_16bit,
+ ade7854_write_16bit,
+ ADE7854_BIRMSOS);
+static IIO_DEV_ATTR_CIRMSOS(S_IRUGO,
+ ade7854_read_16bit,
+ ade7854_write_16bit,
+ ADE7854_CIRMSOS);
+static IIO_DEV_ATTR_AVRMSOS(S_IRUGO,
+ ade7854_read_16bit,
+ ade7854_write_16bit,
+ ADE7854_AVRMSOS);
+static IIO_DEV_ATTR_BVRMSOS(S_IRUGO,
+ ade7854_read_16bit,
+ ade7854_write_16bit,
+ ADE7854_BVRMSOS);
+static IIO_DEV_ATTR_CVRMSOS(S_IRUGO,
+ ade7854_read_16bit,
+ ade7854_write_16bit,
+ ADE7854_CVRMSOS);
+static IIO_DEV_ATTR_VOLT_A(ade7854_read_24bit,
+ ADE7854_VAWV);
+static IIO_DEV_ATTR_VOLT_B(ade7854_read_24bit,
+ ADE7854_VBWV);
+static IIO_DEV_ATTR_VOLT_C(ade7854_read_24bit,
+ ADE7854_VCWV);
+static IIO_DEV_ATTR_CURRENT_A(ade7854_read_24bit,
+ ADE7854_IAWV);
+static IIO_DEV_ATTR_CURRENT_B(ade7854_read_24bit,
+ ADE7854_IBWV);
+static IIO_DEV_ATTR_CURRENT_C(ade7854_read_24bit,
+ ADE7854_ICWV);
+static IIO_DEV_ATTR_AWATTHR(ade7854_read_32bit,
+ ADE7854_AWATTHR);
+static IIO_DEV_ATTR_BWATTHR(ade7854_read_32bit,
+ ADE7854_BWATTHR);
+static IIO_DEV_ATTR_CWATTHR(ade7854_read_32bit,
+ ADE7854_CWATTHR);
+static IIO_DEV_ATTR_AFWATTHR(ade7854_read_32bit,
+ ADE7854_AFWATTHR);
+static IIO_DEV_ATTR_BFWATTHR(ade7854_read_32bit,
+ ADE7854_BFWATTHR);
+static IIO_DEV_ATTR_CFWATTHR(ade7854_read_32bit,
+ ADE7854_CFWATTHR);
+static IIO_DEV_ATTR_AVARHR(ade7854_read_32bit,
+ ADE7854_AVARHR);
+static IIO_DEV_ATTR_BVARHR(ade7854_read_32bit,
+ ADE7854_BVARHR);
+static IIO_DEV_ATTR_CVARHR(ade7854_read_32bit,
+ ADE7854_CVARHR);
+static IIO_DEV_ATTR_AVAHR(ade7854_read_32bit,
+ ADE7854_AVAHR);
+static IIO_DEV_ATTR_BVAHR(ade7854_read_32bit,
+ ADE7854_BVAHR);
+static IIO_DEV_ATTR_CVAHR(ade7854_read_32bit,
+ ADE7854_CVAHR);
+
+static int ade7854_set_irq(struct device *dev, bool enable)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+ int ret;
+ u32 irqen;
+
+ ret = st->read_reg_32(dev, ADE7854_MASK0, &irqen);
+ if (ret)
+ goto error_ret;
+
+ if (enable)
+ irqen |= 1 << 17; /* 1: interrupt enabled when all periodical
+ (at 8 kHz rate) DSP computations finish. */
+ else
+ irqen &= ~(1 << 17);
+
+ ret = st->write_reg_32(dev, ADE7854_MASK0, irqen);
+ if (ret)
+ goto error_ret;
+
+error_ret:
+ return ret;
+}
+
+static int ade7854_initial_setup(struct ade7854_state *st)
+{
+ int ret;
+ struct device *dev = &st->indio_dev->dev;
+
+ /* Disable IRQ */
+ ret = ade7854_set_irq(dev, false);
+ if (ret) {
+ dev_err(dev, "disable irq failed");
+ goto err_ret;
+ }
+
+ ade7854_reset(dev);
+ msleep(ADE7854_STARTUP_DELAY);
+
+err_ret:
+ return ret;
+}
+
+static IIO_DEV_ATTR_RESET(ade7854_write_reset);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("8000");
+
+static IIO_CONST_ATTR(name, "ade7854");
+
+static struct attribute *ade7854_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group ade7854_event_attribute_group = {
+ .attrs = ade7854_event_attributes,
+};
+
+static struct attribute *ade7854_attributes[] = {
+ &iio_dev_attr_aigain.dev_attr.attr,
+ &iio_dev_attr_bigain.dev_attr.attr,
+ &iio_dev_attr_cigain.dev_attr.attr,
+ &iio_dev_attr_nigain.dev_attr.attr,
+ &iio_dev_attr_avgain.dev_attr.attr,
+ &iio_dev_attr_bvgain.dev_attr.attr,
+ &iio_dev_attr_cvgain.dev_attr.attr,
+ &iio_dev_attr_linecyc.dev_attr.attr,
+ &iio_dev_attr_sagcyc.dev_attr.attr,
+ &iio_dev_attr_cfcyc.dev_attr.attr,
+ &iio_dev_attr_peakcyc.dev_attr.attr,
+ &iio_dev_attr_chksum.dev_attr.attr,
+ &iio_dev_attr_apparent_power_a_gain.dev_attr.attr,
+ &iio_dev_attr_apparent_power_b_gain.dev_attr.attr,
+ &iio_dev_attr_apparent_power_c_gain.dev_attr.attr,
+ &iio_dev_attr_active_power_a_offset.dev_attr.attr,
+ &iio_dev_attr_active_power_b_offset.dev_attr.attr,
+ &iio_dev_attr_active_power_c_offset.dev_attr.attr,
+ &iio_dev_attr_reactive_power_a_gain.dev_attr.attr,
+ &iio_dev_attr_reactive_power_b_gain.dev_attr.attr,
+ &iio_dev_attr_reactive_power_c_gain.dev_attr.attr,
+ &iio_dev_attr_reactive_power_a_offset.dev_attr.attr,
+ &iio_dev_attr_reactive_power_b_offset.dev_attr.attr,
+ &iio_dev_attr_reactive_power_c_offset.dev_attr.attr,
+ &iio_dev_attr_awatthr.dev_attr.attr,
+ &iio_dev_attr_bwatthr.dev_attr.attr,
+ &iio_dev_attr_cwatthr.dev_attr.attr,
+ &iio_dev_attr_afwatthr.dev_attr.attr,
+ &iio_dev_attr_bfwatthr.dev_attr.attr,
+ &iio_dev_attr_cfwatthr.dev_attr.attr,
+ &iio_dev_attr_avarhr.dev_attr.attr,
+ &iio_dev_attr_bvarhr.dev_attr.attr,
+ &iio_dev_attr_cvarhr.dev_attr.attr,
+ &iio_dev_attr_angle0.dev_attr.attr,
+ &iio_dev_attr_angle1.dev_attr.attr,
+ &iio_dev_attr_angle2.dev_attr.attr,
+ &iio_dev_attr_avahr.dev_attr.attr,
+ &iio_dev_attr_bvahr.dev_attr.attr,
+ &iio_dev_attr_cvahr.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ &iio_dev_attr_vpeak.dev_attr.attr,
+ &iio_dev_attr_ipeak.dev_attr.attr,
+ &iio_dev_attr_aphcal.dev_attr.attr,
+ &iio_dev_attr_bphcal.dev_attr.attr,
+ &iio_dev_attr_cphcal.dev_attr.attr,
+ &iio_dev_attr_cf1den.dev_attr.attr,
+ &iio_dev_attr_cf2den.dev_attr.attr,
+ &iio_dev_attr_cf3den.dev_attr.attr,
+ &iio_dev_attr_airms.dev_attr.attr,
+ &iio_dev_attr_birms.dev_attr.attr,
+ &iio_dev_attr_cirms.dev_attr.attr,
+ &iio_dev_attr_nirms.dev_attr.attr,
+ &iio_dev_attr_avrms.dev_attr.attr,
+ &iio_dev_attr_bvrms.dev_attr.attr,
+ &iio_dev_attr_cvrms.dev_attr.attr,
+ &iio_dev_attr_airmsos.dev_attr.attr,
+ &iio_dev_attr_birmsos.dev_attr.attr,
+ &iio_dev_attr_cirmsos.dev_attr.attr,
+ &iio_dev_attr_avrmsos.dev_attr.attr,
+ &iio_dev_attr_bvrmsos.dev_attr.attr,
+ &iio_dev_attr_cvrmsos.dev_attr.attr,
+ &iio_dev_attr_volt_a.dev_attr.attr,
+ &iio_dev_attr_volt_b.dev_attr.attr,
+ &iio_dev_attr_volt_c.dev_attr.attr,
+ &iio_dev_attr_current_a.dev_attr.attr,
+ &iio_dev_attr_current_b.dev_attr.attr,
+ &iio_dev_attr_current_c.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ade7854_attribute_group = {
+ .attrs = ade7854_attributes,
+};
+
+int ade7854_probe(struct ade7854_state *st, struct device *dev)
+{
+ int ret, regdone = 0;
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADE7854_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADE7854_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &ade7854_event_attribute_group;
+ st->indio_dev->attrs = &ade7854_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = ade7854_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = ade7854_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (st->irq) {
+ ret = iio_register_interrupt_line(st->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_FALLING,
+ "ade7854");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = ade7854_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+ /* Get the device into a sane initial state */
+ ret = ade7854_initial_setup(st);
+ if (ret)
+ goto error_remove_trigger;
+
+ return 0;
+
+error_remove_trigger:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ ade7854_remove_trigger(st->indio_dev);
+error_unregister_line:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ ade7854_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ ade7854_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+ return ret;
+
+}
+EXPORT_SYMBOL(ade7854_probe);
+
+int ade7854_remove(struct ade7854_state *st)
+{
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ flush_scheduled_work();
+
+ ade7854_remove_trigger(indio_dev);
+ if (st->irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ ade7854_uninitialize_ring(indio_dev->ring);
+ ade7854_unconfigure_ring(indio_dev);
+ iio_device_unregister(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+}
+EXPORT_SYMBOL(ade7854_remove);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7854.h b/drivers/staging/iio/meter/ade7854.h
new file mode 100644
index 000000000000..47690e521ec1
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7854.h
@@ -0,0 +1,245 @@
+#ifndef _ADE7854_H
+#define _ADE7854_H
+
+#define ADE7854_AIGAIN 0x4380
+#define ADE7854_AVGAIN 0x4381
+#define ADE7854_BIGAIN 0x4382
+#define ADE7854_BVGAIN 0x4383
+#define ADE7854_CIGAIN 0x4384
+#define ADE7854_CVGAIN 0x4385
+#define ADE7854_NIGAIN 0x4386
+#define ADE7854_AIRMSOS 0x4387
+#define ADE7854_AVRMSOS 0x4388
+#define ADE7854_BIRMSOS 0x4389
+#define ADE7854_BVRMSOS 0x438A
+#define ADE7854_CIRMSOS 0x438B
+#define ADE7854_CVRMSOS 0x438C
+#define ADE7854_NIRMSOS 0x438D
+#define ADE7854_AVAGAIN 0x438E
+#define ADE7854_BVAGAIN 0x438F
+#define ADE7854_CVAGAIN 0x4390
+#define ADE7854_AWGAIN 0x4391
+#define ADE7854_AWATTOS 0x4392
+#define ADE7854_BWGAIN 0x4393
+#define ADE7854_BWATTOS 0x4394
+#define ADE7854_CWGAIN 0x4395
+#define ADE7854_CWATTOS 0x4396
+#define ADE7854_AVARGAIN 0x4397
+#define ADE7854_AVAROS 0x4398
+#define ADE7854_BVARGAIN 0x4399
+#define ADE7854_BVAROS 0x439A
+#define ADE7854_CVARGAIN 0x439B
+#define ADE7854_CVAROS 0x439C
+#define ADE7854_AFWGAIN 0x439D
+#define ADE7854_AFWATTOS 0x439E
+#define ADE7854_BFWGAIN 0x439F
+#define ADE7854_BFWATTOS 0x43A0
+#define ADE7854_CFWGAIN 0x43A1
+#define ADE7854_CFWATTOS 0x43A2
+#define ADE7854_AFVARGAIN 0x43A3
+#define ADE7854_AFVAROS 0x43A4
+#define ADE7854_BFVARGAIN 0x43A5
+#define ADE7854_BFVAROS 0x43A6
+#define ADE7854_CFVARGAIN 0x43A7
+#define ADE7854_CFVAROS 0x43A8
+#define ADE7854_VATHR1 0x43A9
+#define ADE7854_VATHR0 0x43AA
+#define ADE7854_WTHR1 0x43AB
+#define ADE7854_WTHR0 0x43AC
+#define ADE7854_VARTHR1 0x43AD
+#define ADE7854_VARTHR0 0x43AE
+#define ADE7854_RSV 0x43AF
+#define ADE7854_VANOLOAD 0x43B0
+#define ADE7854_APNOLOAD 0x43B1
+#define ADE7854_VARNOLOAD 0x43B2
+#define ADE7854_VLEVEL 0x43B3
+#define ADE7854_DICOEFF 0x43B5
+#define ADE7854_HPFDIS 0x43B6
+#define ADE7854_ISUMLVL 0x43B8
+#define ADE7854_ISUM 0x43BF
+#define ADE7854_AIRMS 0x43C0
+#define ADE7854_AVRMS 0x43C1
+#define ADE7854_BIRMS 0x43C2
+#define ADE7854_BVRMS 0x43C3
+#define ADE7854_CIRMS 0x43C4
+#define ADE7854_CVRMS 0x43C5
+#define ADE7854_NIRMS 0x43C6
+#define ADE7854_RUN 0xE228
+#define ADE7854_AWATTHR 0xE400
+#define ADE7854_BWATTHR 0xE401
+#define ADE7854_CWATTHR 0xE402
+#define ADE7854_AFWATTHR 0xE403
+#define ADE7854_BFWATTHR 0xE404
+#define ADE7854_CFWATTHR 0xE405
+#define ADE7854_AVARHR 0xE406
+#define ADE7854_BVARHR 0xE407
+#define ADE7854_CVARHR 0xE408
+#define ADE7854_AFVARHR 0xE409
+#define ADE7854_BFVARHR 0xE40A
+#define ADE7854_CFVARHR 0xE40B
+#define ADE7854_AVAHR 0xE40C
+#define ADE7854_BVAHR 0xE40D
+#define ADE7854_CVAHR 0xE40E
+#define ADE7854_IPEAK 0xE500
+#define ADE7854_VPEAK 0xE501
+#define ADE7854_STATUS0 0xE502
+#define ADE7854_STATUS1 0xE503
+#define ADE7854_OILVL 0xE507
+#define ADE7854_OVLVL 0xE508
+#define ADE7854_SAGLVL 0xE509
+#define ADE7854_MASK0 0xE50A
+#define ADE7854_MASK1 0xE50B
+#define ADE7854_IAWV 0xE50C
+#define ADE7854_IBWV 0xE50D
+#define ADE7854_ICWV 0xE50E
+#define ADE7854_VAWV 0xE510
+#define ADE7854_VBWV 0xE511
+#define ADE7854_VCWV 0xE512
+#define ADE7854_AWATT 0xE513
+#define ADE7854_BWATT 0xE514
+#define ADE7854_CWATT 0xE515
+#define ADE7854_AVA 0xE519
+#define ADE7854_BVA 0xE51A
+#define ADE7854_CVA 0xE51B
+#define ADE7854_CHECKSUM 0xE51F
+#define ADE7854_VNOM 0xE520
+#define ADE7854_PHSTATUS 0xE600
+#define ADE7854_ANGLE0 0xE601
+#define ADE7854_ANGLE1 0xE602
+#define ADE7854_ANGLE2 0xE603
+#define ADE7854_PERIOD 0xE607
+#define ADE7854_PHNOLOAD 0xE608
+#define ADE7854_LINECYC 0xE60C
+#define ADE7854_ZXTOUT 0xE60D
+#define ADE7854_COMPMODE 0xE60E
+#define ADE7854_GAIN 0xE60F
+#define ADE7854_CFMODE 0xE610
+#define ADE7854_CF1DEN 0xE611
+#define ADE7854_CF2DEN 0xE612
+#define ADE7854_CF3DEN 0xE613
+#define ADE7854_APHCAL 0xE614
+#define ADE7854_BPHCAL 0xE615
+#define ADE7854_CPHCAL 0xE616
+#define ADE7854_PHSIGN 0xE617
+#define ADE7854_CONFIG 0xE618
+#define ADE7854_MMODE 0xE700
+#define ADE7854_ACCMODE 0xE701
+#define ADE7854_LCYCMODE 0xE702
+#define ADE7854_PEAKCYC 0xE703
+#define ADE7854_SAGCYC 0xE704
+#define ADE7854_CFCYC 0xE705
+#define ADE7854_HSDC_CFG 0xE706
+#define ADE7854_CONFIG2 0xEC01
+
+#define ADE7854_READ_REG 0x1
+#define ADE7854_WRITE_REG 0x0
+
+#define ADE7854_MAX_TX 7
+#define ADE7854_MAX_RX 7
+#define ADE7854_STARTUP_DELAY 1
+
+#define ADE7854_SPI_SLOW (u32)(300 * 1000)
+#define ADE7854_SPI_BURST (u32)(1000 * 1000)
+#define ADE7854_SPI_FAST (u32)(2000 * 1000)
+
+#define DRIVER_NAME "ade7854"
+
+/**
+ * struct ade7854_state - device instance specific data
+ * @spi: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct ade7854_state {
+ struct spi_device *spi;
+ struct i2c_client *i2c;
+ struct work_struct work_trigger_to_ring;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ int (*read_reg_8) (struct device *, u16, u8 *);
+ int (*read_reg_16) (struct device *, u16, u16 *);
+ int (*read_reg_24) (struct device *, u16, u32 *);
+ int (*read_reg_32) (struct device *, u16, u32 *);
+ int (*write_reg_8) (struct device *, u16, u8);
+ int (*write_reg_16) (struct device *, u16, u16);
+ int (*write_reg_24) (struct device *, u16, u32);
+ int (*write_reg_32) (struct device *, u16, u32);
+ int irq;
+ struct mutex buf_lock;
+};
+
+extern int ade7854_probe(struct ade7854_state *st, struct device *dev);
+extern int ade7854_remove(struct ade7854_state *st);
+
+#if defined(CONFIG_IIO_RING_BUFFER) && defined(THIS_HAS_RING_BUFFER_SUPPORT)
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum ade7854_scan {
+ ADE7854_SCAN_PHA_V,
+ ADE7854_SCAN_PHB_V,
+ ADE7854_SCAN_PHC_V,
+ ADE7854_SCAN_PHA_I,
+ ADE7854_SCAN_PHB_I,
+ ADE7854_SCAN_PHC_I,
+};
+
+void ade7854_remove_trigger(struct iio_dev *indio_dev);
+int ade7854_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t ade7854_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+
+int ade7854_configure_ring(struct iio_dev *indio_dev);
+void ade7854_unconfigure_ring(struct iio_dev *indio_dev);
+
+int ade7854_initialize_ring(struct iio_ring_buffer *ring);
+void ade7854_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void ade7854_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7854_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+ade7854_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static inline int ade7854_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void ade7854_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7854_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+static inline void ade7854_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+#endif /* CONFIG_IIO_RING_BUFFER */
+
+#endif
diff --git a/drivers/staging/iio/meter/meter.h b/drivers/staging/iio/meter/meter.h
new file mode 100644
index 000000000000..142c50d71fda
--- /dev/null
+++ b/drivers/staging/iio/meter/meter.h
@@ -0,0 +1,396 @@
+#include "../sysfs.h"
+
+/* metering ic types of attribute */
+
+#define IIO_DEV_ATTR_CURRENT_A_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(current_a_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CURRENT_B_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(current_b_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CURRENT_C_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(current_c_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VOLT_A_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(volt_a_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VOLT_B_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(volt_b_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VOLT_C_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(volt_c_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_REACTIVE_POWER_A_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(reactive_power_a_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_REACTIVE_POWER_B_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(reactive_power_b_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_REACTIVE_POWER_C_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(reactive_power_c_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACTIVE_POWER_A_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(active_power_a_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACTIVE_POWER_B_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(active_power_b_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACTIVE_POWER_C_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(active_power_c_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CURRENT_A_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(current_a_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CURRENT_B_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(current_b_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CURRENT_C_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(current_c_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_APPARENT_POWER_A_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(apparent_power_a_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_APPARENT_POWER_B_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(apparent_power_b_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_APPARENT_POWER_C_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(apparent_power_c_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACTIVE_POWER_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(active_power_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACTIVE_POWER_A_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(active_power_a_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACTIVE_POWER_B_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(active_power_b_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACTIVE_POWER_C_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(active_power_c_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_REACTIVE_POWER_A_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(reactive_power_a_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_REACTIVE_POWER_B_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(reactive_power_b_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_REACTIVE_POWER_C_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(reactive_power_c_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CURRENT_A(_show, _addr) \
+ IIO_DEVICE_ATTR(current_a, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_CURRENT_B(_show, _addr) \
+ IIO_DEVICE_ATTR(current_b, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_CURRENT_C(_show, _addr) \
+ IIO_DEVICE_ATTR(current_c, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_VOLT_A(_show, _addr) \
+ IIO_DEVICE_ATTR(volt_a, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_VOLT_B(_show, _addr) \
+ IIO_DEVICE_ATTR(volt_b, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_VOLT_C(_show, _addr) \
+ IIO_DEVICE_ATTR(volt_c, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_AENERGY(_show, _addr) \
+ IIO_DEVICE_ATTR(aenergy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_LENERGY(_show, _addr) \
+ IIO_DEVICE_ATTR(lenergy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_RAENERGY(_show, _addr) \
+ IIO_DEVICE_ATTR(raenergy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_LAENERGY(_show, _addr) \
+ IIO_DEVICE_ATTR(laenergy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_VAENERGY(_show, _addr) \
+ IIO_DEVICE_ATTR(vaenergy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_LVAENERGY(_show, _addr) \
+ IIO_DEVICE_ATTR(lvaenergy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_RVAENERGY(_show, _addr) \
+ IIO_DEVICE_ATTR(rvaenergy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_LVARENERGY(_show, _addr) \
+ IIO_DEVICE_ATTR(lvarenergy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_CHKSUM(_show, _addr) \
+ IIO_DEVICE_ATTR(chksum, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ANGLE0(_show, _addr) \
+ IIO_DEVICE_ATTR(angle0, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ANGLE1(_show, _addr) \
+ IIO_DEVICE_ATTR(angle1, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ANGLE2(_show, _addr) \
+ IIO_DEVICE_ATTR(angle2, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_AWATTHR(_show, _addr) \
+ IIO_DEVICE_ATTR(awatthr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_BWATTHR(_show, _addr) \
+ IIO_DEVICE_ATTR(bwatthr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_CWATTHR(_show, _addr) \
+ IIO_DEVICE_ATTR(cwatthr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_AFWATTHR(_show, _addr) \
+ IIO_DEVICE_ATTR(afwatthr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_BFWATTHR(_show, _addr) \
+ IIO_DEVICE_ATTR(bfwatthr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_CFWATTHR(_show, _addr) \
+ IIO_DEVICE_ATTR(cfwatthr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_AVARHR(_show, _addr) \
+ IIO_DEVICE_ATTR(avarhr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_BVARHR(_show, _addr) \
+ IIO_DEVICE_ATTR(bvarhr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_CVARHR(_show, _addr) \
+ IIO_DEVICE_ATTR(cvarhr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_AVAHR(_show, _addr) \
+ IIO_DEVICE_ATTR(avahr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_BVAHR(_show, _addr) \
+ IIO_DEVICE_ATTR(bvahr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_CVAHR(_show, _addr) \
+ IIO_DEVICE_ATTR(cvahr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_IOS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(ios, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VOS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(vos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_PHCAL(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(phcal, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_APHCAL(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(aphcal, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BPHCAL(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(bphcal, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CPHCAL(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(cphcal, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_APOS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(apos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_AAPOS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(aapos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BAPOS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(bapos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CAPOS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(capos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_AVRMSGAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(avrmsgain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BVRMSGAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(bvrmsgain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CVRMSGAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(cvrmsgain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_AIGAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(aigain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BIGAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(bigain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CIGAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(cigain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_NIGAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(nigain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_AVGAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(avgain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BVGAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(bvgain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CVGAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(cvgain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_WGAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(wgain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_WDIV(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(wdiv, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CFNUM(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(cfnum, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CFDEN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(cfden, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CF1DEN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(cf1den, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CF2DEN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(cf2den, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CF3DEN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(cf3den, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_IRMS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(irms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VRMS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(vrms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_AIRMS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(airms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BIRMS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(birms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CIRMS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(cirms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_NIRMS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(nirms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_AVRMS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(avrms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BVRMS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(bvrms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CVRMS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(cvrms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_IRMSOS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(irmsos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VRMSOS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(vrmsos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_AIRMSOS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(airmsos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BIRMSOS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(birmsos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CIRMSOS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(cirmsos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_AVRMSOS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(avrmsos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BVRMSOS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(bvrmsos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CVRMSOS(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(cvrmsos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VAGAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(vagain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_PGA_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(pga_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VADIV(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(vadiv, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_LINECYC(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(linecyc, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_SAGCYC(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(sagcyc, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CFCYC(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(cfcyc, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_PEAKCYC(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(peakcyc, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_SAGLVL(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(saglvl, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_IPKLVL(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(ipklvl, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VPKLVL(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(vpklvl, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_IPEAK(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(ipeak, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_RIPEAK(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(ripeak, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VPEAK(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(vpeak, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_RVPEAK(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(rvpeak, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VPERIOD(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(vperiod, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CH_OFF(_num, _mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(choff_##_num, _mode, _show, _store, _addr)
+
+/* active energy register, AENERGY, is more than half full */
+#define IIO_EVENT_ATTR_AENERGY_HALF_FULL(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(aenergy_half_full, _evlist, _show, _store, _mask)
+
+/* a SAG on the line voltage */
+#define IIO_EVENT_ATTR_LINE_VOLT_SAG(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(line_volt_sag, _evlist, _show, _store, _mask)
+
+/*
+ * Indicates the end of energy accumulation over an integer number
+ * of half line cycles
+ */
+#define IIO_EVENT_ATTR_CYCEND(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(cycend, _evlist, _show, _store, _mask)
+
+/* on the rising and falling edge of the the voltage waveform */
+#define IIO_EVENT_ATTR_ZERO_CROSS(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(zero_cross, _evlist, _show, _store, _mask)
+
+/* the active energy register has overflowed */
+#define IIO_EVENT_ATTR_AENERGY_OVERFLOW(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(aenergy_overflow, _evlist, _show, _store, _mask)
+
+/* the apparent energy register has overflowed */
+#define IIO_EVENT_ATTR_VAENERGY_OVERFLOW(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(vaenergy_overflow, _evlist, _show, _store, _mask)
+
+/* the active energy register, VAENERGY, is more than half full */
+#define IIO_EVENT_ATTR_VAENERGY_HALF_FULL(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(vaenergy_half_full, _evlist, _show, _store, _mask)
+
+/* the power has gone from negative to positive */
+#define IIO_EVENT_ATTR_PPOS(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(ppos, _evlist, _show, _store, _mask)
+
+/* the power has gone from positive to negative */
+#define IIO_EVENT_ATTR_PNEG(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(pneg, _evlist, _show, _store, _mask)
+
+/* waveform sample from Channel 1 has exceeded the IPKLVL value */
+#define IIO_EVENT_ATTR_IPKLVL_EXC(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(ipklvl_exc, _evlist, _show, _store, _mask)
+
+/* waveform sample from Channel 2 has exceeded the VPKLVL value */
+#define IIO_EVENT_ATTR_VPKLVL_EXC(_evlist, _show, _store, _mask) \
+ IIO_EVENT_ATTR_SH(vpklvl_exc, _evlist, _show, _store, _mask)
+
diff --git a/drivers/staging/iio/resolver/Kconfig b/drivers/staging/iio/resolver/Kconfig
new file mode 100644
index 000000000000..a4a363429355
--- /dev/null
+++ b/drivers/staging/iio/resolver/Kconfig
@@ -0,0 +1,54 @@
+#
+# Resolver/Synchro drivers
+#
+comment "Resolver to digital converters"
+
+config AD2S90
+ tristate "Analog Devices ad2s90 driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices spi resolver
+ to digital converters, ad2s90, provides direct access via sysfs.
+
+config AD2S120X
+ tristate "Analog Devices ad2s120x driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices spi resolver
+ to digital converters, ad2s1200 and ad2s1205, provides direct access
+ via sysfs.
+
+config AD2S1210
+ tristate "Analog Devices ad2s1210 driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices spi resolver
+ to digital converters, ad2s1210, provides direct access via sysfs.
+
+choice
+ prompt "Resolution Control"
+ depends on AD2S1210
+ default AD2S1210_GPIO_NONE
+ help
+ In normal mode, the resolution of the digital output is selected
+ using the RES0 and RES1 input pins. In configuration mode, the
+ resolution is selected by setting the RES0 and RES1 bits in the
+ control regsiter. When switching between normal mode and configuration
+ mode, there are some schemes to keep them matchs.
+
+config AD2S1210_GPIO_INPUT
+ bool "read resolution from gpio pins"
+ help
+ GPIO pins are sampling RES0 and RES1 pins, read the resolution
+ settings from the GPIO pins.
+
+config AD2S1210_GPIO_OUTPUT
+ bool "set gpio pins to set resolution"
+ help
+ RES0 and RES1 pins are controlled by GPIOs, setting GPIO pins to
+ set the resolution.
+
+config AD2S1210_GPIO_NONE
+ bool "take the responsibility by user"
+
+endchoice
diff --git a/drivers/staging/iio/resolver/Makefile b/drivers/staging/iio/resolver/Makefile
new file mode 100644
index 000000000000..0b84a89e6cac
--- /dev/null
+++ b/drivers/staging/iio/resolver/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for Resolver/Synchro drivers
+#
+
+obj-$(CONFIG_AD2S90) += ad2s90.o
+obj-$(CONFIG_AD2S120X) += ad2s120x.o
+obj-$(CONFIG_AD2S1210) += ad2s1210.o
diff --git a/drivers/staging/iio/resolver/ad2s120x.c b/drivers/staging/iio/resolver/ad2s120x.c
new file mode 100644
index 000000000000..8f497a23976c
--- /dev/null
+++ b/drivers/staging/iio/resolver/ad2s120x.c
@@ -0,0 +1,310 @@
+/*
+ * ad2s120x.c simple support for the ADI Resolver to Digital Converters: AD2S1200/1205
+ *
+ * Copyright (c) 2010-2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad2s120x"
+
+/* input pin sample and rdvel is controlled by driver */
+#define AD2S120X_PN 2
+
+/* input clock on serial interface */
+#define AD2S120X_HZ 8192000
+/* clock period in nano second */
+#define AD2S120X_TSCLK (1000000000/AD2S120X_HZ)
+
+struct ad2s120x_state {
+ struct mutex lock;
+ struct iio_dev *idev;
+ struct spi_device *sdev;
+ unsigned short sample;
+ unsigned short rdvel;
+ u8 rx[2];
+ u8 tx[2];
+};
+
+static ssize_t ad2s120x_show_pos_vel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ int ret = 0;
+ ssize_t len = 0;
+ u16 pos;
+ s16 vel;
+ u8 status;
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s120x_state *st = idev->dev_data;
+
+ xfer.len = 1;
+ xfer.tx_buf = st->tx;
+ xfer.rx_buf = st->rx;
+ mutex_lock(&st->lock);
+
+ gpio_set_value(st->sample, 0);
+ /* delay (6 * AD2S120X_TSCLK + 20) nano seconds */
+ udelay(1);
+ gpio_set_value(st->sample, 1);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+ status = st->rx[1];
+ pos = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
+ len = sprintf(buf, "%d %c%c%c%c ", pos,
+ (status & 0x8) ? 'P' : 'V',
+ (status & 0x4) ? 'd' : '_',
+ (status & 0x2) ? 'l' : '_',
+ (status & 0x1) ? '1' : '0');
+
+ /* delay 18 ns */
+ /* ndelay(18); */
+
+ gpio_set_value(st->rdvel, 0);
+ /* ndelay(5);*/
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+ status = st->rx[1];
+ vel = (st->rx[0] & 0x80) ? 0xf000 : 0;
+ vel |= (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
+ len += sprintf(buf + len, "%d %c%c%c%c\n", vel,
+ (status & 0x8) ? 'P' : 'V',
+ (status & 0x4) ? 'd' : '_',
+ (status & 0x2) ? 'l' : '_',
+ (status & 0x1) ? '1' : '0');
+error_ret:
+ gpio_set_value(st->rdvel, 1);
+ /* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */
+ udelay(1);
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t ad2s120x_show_pos(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ int ret = 0;
+ ssize_t len = 0;
+ u16 pos;
+ u8 status;
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s120x_state *st = idev->dev_data;
+
+ xfer.len = 1;
+ xfer.tx_buf = st->tx;
+ xfer.rx_buf = st->rx;
+ mutex_lock(&st->lock);
+
+ gpio_set_value(st->sample, 0);
+ /* delay (6 * AD2S120X_TSCLK + 20) nano seconds */
+ udelay(1);
+ gpio_set_value(st->sample, 1);
+ gpio_set_value(st->rdvel, 1);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+ status = st->rx[1];
+ pos = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
+ len = sprintf(buf, "%d %c%c%c%c ", pos,
+ (status & 0x8) ? 'P' : 'V',
+ (status & 0x4) ? 'd' : '_',
+ (status & 0x2) ? 'l' : '_',
+ (status & 0x1) ? '1' : '0');
+error_ret:
+ /* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */
+ udelay(1);
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t ad2s120x_show_vel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ int ret = 0;
+ ssize_t len = 0;
+ s16 vel;
+ u8 status;
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s120x_state *st = idev->dev_data;
+
+ xfer.len = 1;
+ xfer.tx_buf = st->tx;
+ xfer.rx_buf = st->rx;
+ mutex_lock(&st->lock);
+
+ gpio_set_value(st->sample, 0);
+ /* delay (6 * AD2S120X_TSCLK + 20) nano seconds */
+ udelay(1);
+ gpio_set_value(st->sample, 1);
+
+ gpio_set_value(st->rdvel, 0);
+ /* ndelay(5);*/
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+ status = st->rx[1];
+ vel = (st->rx[0] & 0x80) ? 0xf000 : 0;
+ vel |= (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
+ len += sprintf(buf + len, "%d %c%c%c%c\n", vel,
+ (status & 0x8) ? 'P' : 'V',
+ (status & 0x4) ? 'd' : '_',
+ (status & 0x2) ? 'l' : '_',
+ (status & 0x1) ? '1' : '0');
+error_ret:
+ gpio_set_value(st->rdvel, 1);
+ /* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */
+ udelay(1);
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+static IIO_CONST_ATTR(description,
+ "12-Bit R/D Converter with Reference Oscillator");
+static IIO_DEVICE_ATTR(pos_vel, S_IRUGO, ad2s120x_show_pos_vel, NULL, 0);
+static IIO_DEVICE_ATTR(pos, S_IRUGO, ad2s120x_show_pos, NULL, 0);
+static IIO_DEVICE_ATTR(vel, S_IRUGO, ad2s120x_show_vel, NULL, 0);
+
+static struct attribute *ad2s120x_attributes[] = {
+ &iio_const_attr_description.dev_attr.attr,
+ &iio_dev_attr_pos_vel.dev_attr.attr,
+ &iio_dev_attr_pos.dev_attr.attr,
+ &iio_dev_attr_vel.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad2s120x_attribute_group = {
+ .name = DRV_NAME,
+ .attrs = ad2s120x_attributes,
+};
+
+static int __devinit ad2s120x_probe(struct spi_device *spi)
+{
+ struct ad2s120x_state *st;
+ int pn, ret = 0;
+ unsigned short *pins = spi->dev.platform_data;
+
+ for (pn = 0; pn < AD2S120X_PN; pn++) {
+ if (gpio_request(pins[pn], DRV_NAME)) {
+ pr_err("%s: request gpio pin %d failed\n",
+ DRV_NAME, pins[pn]);
+ goto error_ret;
+ }
+ gpio_direction_output(pins[pn], 1);
+ }
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ spi_set_drvdata(spi, st);
+
+ mutex_init(&st->lock);
+ st->sdev = spi;
+ st->sample = pins[0];
+ st->rdvel = pins[1];
+
+ st->idev = iio_allocate_device();
+ if (st->idev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->idev->dev.parent = &spi->dev;
+ st->idev->num_interrupt_lines = 0;
+ st->idev->event_attrs = NULL;
+
+ st->idev->attrs = &ad2s120x_attribute_group;
+ st->idev->dev_data = (void *)(st);
+ st->idev->driver_module = THIS_MODULE;
+ st->idev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(st->idev);
+ if (ret)
+ goto error_free_dev;
+
+ spi->max_speed_hz = AD2S120X_HZ;
+ spi->mode = SPI_MODE_3;
+ spi_setup(spi);
+
+ return 0;
+
+error_free_dev:
+ iio_free_device(st->idev);
+error_free_st:
+ kfree(st);
+error_ret:
+ for (--pn; pn >= 0; pn--)
+ gpio_free(pins[pn]);
+ return ret;
+}
+
+static int __devexit ad2s120x_remove(struct spi_device *spi)
+{
+ struct ad2s120x_state *st = spi_get_drvdata(spi);
+
+ iio_device_unregister(st->idev);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver ad2s120x_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad2s120x_probe,
+ .remove = __devexit_p(ad2s120x_remove),
+};
+
+static __init int ad2s120x_spi_init(void)
+{
+ return spi_register_driver(&ad2s120x_driver);
+}
+module_init(ad2s120x_spi_init);
+
+static __exit void ad2s120x_spi_exit(void)
+{
+ spi_unregister_driver(&ad2s120x_driver);
+}
+module_exit(ad2s120x_spi_exit);
+
+MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD2S1200/1205 Resolver to Digital SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
new file mode 100644
index 000000000000..c12f64cc40df
--- /dev/null
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -0,0 +1,872 @@
+/*
+ * ad2s1210.c support for the ADI Resolver to Digital Converters: AD2S1210
+ *
+ * Copyright (c) 2010-2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad2s1210"
+
+#define DEF_CONTROL 0x7E
+
+#define MSB_IS_HIGH 0x80
+#define MSB_IS_LOW 0x7F
+#define PHASE_LOCK_RANGE_44 0x20
+#define ENABLE_HYSTERESIS 0x10
+#define SET_ENRES1 0x08
+#define SET_ENRES0 0x04
+#define SET_RES1 0x02
+#define SET_RES0 0x01
+
+#define SET_ENRESOLUTION (SET_ENRES1 | SET_ENRES0)
+#define SET_RESOLUTION (SET_RES1 | SET_RES0)
+
+#define REG_POSITION 0x80
+#define REG_VELOCITY 0x82
+#define REG_LOS_THRD 0x88
+#define REG_DOS_OVR_THRD 0x89
+#define REG_DOS_MIS_THRD 0x8A
+#define REG_DOS_RST_MAX_THRD 0x8B
+#define REG_DOS_RST_MIN_THRD 0x8C
+#define REG_LOT_HIGH_THRD 0x8D
+#define REG_LOT_LOW_THRD 0x8E
+#define REG_EXCIT_FREQ 0x91
+#define REG_CONTROL 0x92
+#define REG_SOFT_RESET 0xF0
+#define REG_FAULT 0xFF
+
+/* pin SAMPLE, A0, A1, RES0, RES1, is controlled by driver */
+#define AD2S1210_SAA 3
+#if defined(CONFIG_AD2S1210_GPIO_INPUT) || defined(CONFIG_AD2S1210_GPIO_OUTPUT)
+# define AD2S1210_RES 2
+#else
+# define AD2S1210_RES 0
+#endif
+#define AD2S1210_PN (AD2S1210_SAA + AD2S1210_RES)
+
+#define AD2S1210_MIN_CLKIN 6144000
+#define AD2S1210_MAX_CLKIN 10240000
+#define AD2S1210_MIN_EXCIT 2000
+#define AD2S1210_MAX_EXCIT 20000
+#define AD2S1210_MIN_FCW 0x4
+#define AD2S1210_MAX_FCW 0x50
+
+/* default input clock on serial interface */
+#define AD2S1210_DEF_CLKIN 8192000
+/* clock period in nano second */
+#define AD2S1210_DEF_TCK (1000000000/AD2S1210_DEF_CLKIN)
+#define AD2S1210_DEF_EXCIT 10000
+
+enum ad2s1210_mode {
+ MOD_POS = 0,
+ MOD_VEL,
+ MOD_RESERVED,
+ MOD_CONFIG,
+};
+
+enum ad2s1210_res {
+ RES_10 = 10,
+ RES_12 = 12,
+ RES_14 = 14,
+ RES_16 = 16,
+};
+
+static unsigned int resolution_value[] = {
+ RES_10, RES_12, RES_14, RES_16};
+
+struct ad2s1210_state {
+ struct mutex lock;
+ struct iio_dev *idev;
+ struct spi_device *sdev;
+ struct spi_transfer xfer;
+ unsigned int hysteresis;
+ unsigned int old_data;
+ enum ad2s1210_mode mode;
+ enum ad2s1210_res resolution;
+ unsigned int fclkin;
+ unsigned int fexcit;
+ unsigned short sample;
+ unsigned short a0;
+ unsigned short a1;
+ unsigned short res0;
+ unsigned short res1;
+ u8 rx[3];
+ u8 tx[3];
+};
+
+static inline void start_sample(struct ad2s1210_state *st)
+{
+ gpio_set_value(st->sample, 0);
+}
+
+static inline void stop_sample(struct ad2s1210_state *st)
+{
+ gpio_set_value(st->sample, 1);
+}
+
+static inline void set_mode(enum ad2s1210_mode mode, struct ad2s1210_state *st)
+{
+ switch (mode) {
+ case MOD_POS:
+ gpio_set_value(st->a0, 0);
+ gpio_set_value(st->a1, 0);
+ break;
+ case MOD_VEL:
+ gpio_set_value(st->a0, 0);
+ gpio_set_value(st->a1, 1);
+ break;
+ case MOD_CONFIG:
+ gpio_set_value(st->a0, 1);
+ gpio_set_value(st->a1, 1);
+ break;
+ default:
+ /* set to reserved mode */
+ gpio_set_value(st->a0, 1);
+ gpio_set_value(st->a1, 0);
+ }
+ st->mode = mode;
+}
+
+/* write 1 bytes (address or data) to the chip */
+static int config_write(struct ad2s1210_state *st,
+ unsigned char data)
+{
+ struct spi_message msg;
+ int ret = 0;
+
+ st->xfer.len = 1;
+ set_mode(MOD_CONFIG, st);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&st->xfer, &msg);
+ st->tx[0] = data;
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ return ret;
+ st->old_data = 1;
+ return ret;
+}
+
+/* read value from one of the registers */
+static int config_read(struct ad2s1210_state *st,
+ unsigned char address,
+ unsigned char *data)
+{
+ struct spi_message msg;
+ int ret = 0;
+
+ st->xfer.len = 2;
+ set_mode(MOD_CONFIG, st);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&st->xfer, &msg);
+ st->tx[0] = address | MSB_IS_HIGH;
+ st->tx[1] = REG_FAULT;
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ return ret;
+ *data = st->rx[1];
+ st->old_data = 1;
+ return ret;
+}
+
+static inline void update_frequency_control_word(struct ad2s1210_state *st)
+{
+ unsigned char fcw;
+ fcw = (unsigned char)(st->fexcit * (1 << 15) / st->fclkin);
+ if (fcw >= AD2S1210_MIN_FCW && fcw <= AD2S1210_MAX_FCW) {
+ config_write(st, REG_EXCIT_FREQ);
+ config_write(st, fcw);
+ } else
+ pr_err("ad2s1210: FCW out of range\n");
+}
+
+#if defined(CONFIG_AD2S1210_GPIO_INPUT)
+static inline unsigned char read_resolution_pin(struct ad2s1210_state *st)
+{
+ unsigned int data;
+ data = (gpio_get_value(st->res0) << 1) |
+ gpio_get_value(st->res1);
+ return resolution_value[data];
+}
+#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
+static inline void set_resolution_pin(struct ad2s1210_state *st)
+{
+ switch (st->resolution) {
+ case RES_10:
+ gpio_set_value(st->res0, 0);
+ gpio_set_value(st->res1, 0);
+ break;
+ case RES_12:
+ gpio_set_value(st->res0, 0);
+ gpio_set_value(st->res1, 1);
+ break;
+ case RES_14:
+ gpio_set_value(st->res0, 1);
+ gpio_set_value(st->res1, 0);
+ break;
+ case RES_16:
+ gpio_set_value(st->res0, 1);
+ gpio_set_value(st->res1, 1);
+ break;
+ }
+}
+#endif
+
+static inline void soft_reset(struct ad2s1210_state *st)
+{
+ config_write(st, REG_SOFT_RESET);
+ config_write(st, 0x0);
+}
+
+
+/* return the OLD DATA since last spi bus write */
+static ssize_t ad2s1210_show_raw(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+ int ret;
+
+ mutex_lock(&st->lock);
+ if (st->old_data) {
+ ret = sprintf(buf, "0x%x\n", st->rx[0]);
+ st->old_data = 0;
+ } else
+ ret = 0;
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static ssize_t ad2s1210_store_raw(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+ unsigned long udata;
+ unsigned char data;
+ int ret;
+
+ ret = strict_strtoul(buf, 16, &udata);
+ if (ret)
+ return -EINVAL;
+ data = udata & 0xff;
+ mutex_lock(&st->lock);
+ config_write(st, data);
+ mutex_unlock(&st->lock);
+ return 1;
+}
+
+static ssize_t ad2s1210_store_softreset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+ mutex_lock(&st->lock);
+ soft_reset(st);
+ mutex_unlock(&st->lock);
+ return len;
+}
+
+static ssize_t ad2s1210_show_fclkin(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+ return sprintf(buf, "%d\n", st->fclkin);
+}
+
+static ssize_t ad2s1210_store_fclkin(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+ unsigned long fclkin;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &fclkin);
+ if (!ret && fclkin >= AD2S1210_MIN_CLKIN &&
+ fclkin <= AD2S1210_MAX_CLKIN) {
+ mutex_lock(&st->lock);
+ st->fclkin = fclkin;
+ } else {
+ pr_err("ad2s1210: fclkin out of range\n");
+ return -EINVAL;
+ }
+ update_frequency_control_word(st);
+ soft_reset(st);
+ mutex_unlock(&st->lock);
+ return len;
+}
+
+static ssize_t ad2s1210_show_fexcit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+ return sprintf(buf, "%d\n", st->fexcit);
+}
+
+static ssize_t ad2s1210_store_fexcit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+ unsigned long fexcit;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &fexcit);
+ if (!ret && fexcit >= AD2S1210_MIN_EXCIT &&
+ fexcit <= AD2S1210_MAX_EXCIT) {
+ mutex_lock(&st->lock);
+ st->fexcit = fexcit;
+ } else {
+ pr_err("ad2s1210: excitation frequency out of range\n");
+ return -EINVAL;
+ }
+ update_frequency_control_word(st);
+ soft_reset(st);
+ mutex_unlock(&st->lock);
+ return len;
+}
+
+static ssize_t ad2s1210_show_control(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+ unsigned char data;
+ mutex_lock(&st->lock);
+ config_read(st, REG_CONTROL, &data);
+ mutex_unlock(&st->lock);
+ return sprintf(buf, "0x%x\n", data);
+}
+
+static ssize_t ad2s1210_store_control(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+ unsigned long udata;
+ unsigned char data;
+ int ret;
+
+ ret = strict_strtoul(buf, 16, &udata);
+ if (ret) {
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ mutex_lock(&st->lock);
+ config_write(st, REG_CONTROL);
+ data = udata & MSB_IS_LOW;
+ config_write(st, data);
+ config_read(st, REG_CONTROL, &data);
+ if (data & MSB_IS_HIGH) {
+ ret = -EIO;
+ pr_err("ad2s1210: write control register fail\n");
+ goto error_ret;
+ }
+ st->resolution = resolution_value[data & SET_RESOLUTION];
+#if defined(CONFIG_AD2S1210_GPIO_INPUT)
+ data = read_resolution_pin(st);
+ if (data != st->resolution)
+ pr_warning("ad2s1210: resolution settings not match\n");
+#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
+ set_resolution_pin(st);
+#endif
+ ret = len;
+ if (data & ENABLE_HYSTERESIS)
+ st->hysteresis = 1;
+ else
+ st->hysteresis = 0;
+error_ret:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static ssize_t ad2s1210_show_resolution(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+ return sprintf(buf, "%d\n", st->resolution);
+}
+
+static ssize_t ad2s1210_store_resolution(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+ unsigned char data;
+ unsigned long udata;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &udata);
+ if (ret || udata < RES_10 || udata > RES_16) {
+ pr_err("ad2s1210: resolution out of range\n");
+ return -EINVAL;
+ }
+ mutex_lock(&st->lock);
+ config_read(st, REG_CONTROL, &data);
+ data &= ~SET_RESOLUTION;
+ data |= (udata - RES_10) >> 1;
+ config_write(st, REG_CONTROL);
+ config_write(st, data & MSB_IS_LOW);
+ config_read(st, REG_CONTROL, &data);
+ if (data & MSB_IS_HIGH) {
+ ret = -EIO;
+ pr_err("ad2s1210: setting resolution fail\n");
+ goto error_ret;
+ }
+ st->resolution = resolution_value[data & SET_RESOLUTION];
+#if defined(CONFIG_AD2S1210_GPIO_INPUT)
+ data = read_resolution_pin(st);
+ if (data != st->resolution)
+ pr_warning("ad2s1210: resolution settings not match\n");
+#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
+ set_resolution_pin(st);
+#endif
+ ret = len;
+error_ret:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+/* read the fault register since last sample */
+static ssize_t ad2s1210_show_fault(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret = 0;
+ ssize_t len = 0;
+ unsigned char data;
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+
+ mutex_lock(&st->lock);
+ ret = config_read(st, REG_FAULT, &data);
+
+ if (ret)
+ goto error_ret;
+ len = sprintf(buf, "0x%x\n", data);
+error_ret:
+ mutex_unlock(&st->lock);
+ return ret ? ret : len;
+}
+
+static ssize_t ad2s1210_clear_fault(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+ unsigned char data;
+
+ mutex_lock(&st->lock);
+ start_sample(st);
+ /* delay (2 * tck + 20) nano seconds */
+ udelay(1);
+ stop_sample(st);
+ config_read(st, REG_FAULT, &data);
+ start_sample(st);
+ stop_sample(st);
+ mutex_unlock(&st->lock);
+
+ return 0;
+}
+
+static ssize_t ad2s1210_show_reg(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+ unsigned char data;
+ struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
+
+ mutex_lock(&st->lock);
+ config_read(st, iattr->address, &data);
+ mutex_unlock(&st->lock);
+ return sprintf(buf, "%d\n", data);
+}
+
+static ssize_t ad2s1210_store_reg(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+ unsigned long data;
+ int ret;
+ struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
+
+ ret = strict_strtoul(buf, 10, &data);
+ if (ret)
+ return -EINVAL;
+ mutex_lock(&st->lock);
+ config_write(st, iattr->address);
+ config_write(st, data & MSB_IS_LOW);
+ mutex_unlock(&st->lock);
+ return len;
+}
+
+static ssize_t ad2s1210_show_pos(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct spi_message msg;
+ int ret = 0;
+ ssize_t len = 0;
+ u16 pos;
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+
+ st->xfer.len = 2;
+ mutex_lock(&st->lock);
+ start_sample(st);
+ /* delay (6 * tck + 20) nano seconds */
+ udelay(1);
+
+ set_mode(MOD_POS, st);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&st->xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+ pos = ((((u16)(st->rx[0])) << 8) | (st->rx[1]));
+ if (st->hysteresis)
+ pos >>= 16 - st->resolution;
+ len = sprintf(buf, "%d\n", pos);
+error_ret:
+ stop_sample(st);
+ /* delay (2 * tck + 20) nano seconds */
+ udelay(1);
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t ad2s1210_show_vel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct spi_message msg;
+ unsigned short negative;
+ int ret = 0;
+ ssize_t len = 0;
+ s16 vel;
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+
+ st->xfer.len = 2;
+ mutex_lock(&st->lock);
+ start_sample(st);
+ /* delay (6 * tck + 20) nano seconds */
+ udelay(1);
+
+ set_mode(MOD_VEL, st);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&st->xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+ negative = st->rx[0] & 0x80;
+ vel = ((((s16)(st->rx[0])) << 8) | (st->rx[1]));
+ vel >>= 16 - st->resolution;
+ if (negative) {
+ negative = (0xffff >> st->resolution) << st->resolution;
+ vel |= negative;
+ }
+ len = sprintf(buf, "%d\n", vel);
+error_ret:
+ stop_sample(st);
+ /* delay (2 * tck + 20) nano seconds */
+ udelay(1);
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t ad2s1210_show_pos_vel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct spi_message msg;
+ unsigned short negative;
+ int ret = 0;
+ ssize_t len = 0;
+ u16 pos;
+ s16 vel;
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s1210_state *st = idev->dev_data;
+
+ st->xfer.len = 2;
+ mutex_lock(&st->lock);
+ start_sample(st);
+ /* delay (6 * tck + 20) nano seconds */
+ udelay(1);
+
+ set_mode(MOD_POS, st);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&st->xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+ pos = ((((u16)(st->rx[0])) << 8) | (st->rx[1]));
+ if (st->hysteresis)
+ pos >>= 16 - st->resolution;
+ len = sprintf(buf, "%d ", pos);
+
+ st->xfer.len = 2;
+ set_mode(MOD_VEL, st);
+ spi_message_init(&msg);
+ spi_message_add_tail(&st->xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+ negative = st->rx[0] & 0x80;
+ vel = ((((s16)(st->rx[0])) << 8) | (st->rx[1]));
+ vel >>= 16 - st->resolution;
+ if (negative) {
+ negative = (0xffff >> st->resolution) << st->resolution;
+ vel |= negative;
+ }
+ len += sprintf(buf + len, "%d\n", vel);
+error_ret:
+ stop_sample(st);
+ /* delay (2 * tck + 20) nano seconds */
+ udelay(1);
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+static IIO_CONST_ATTR(description,
+ "Variable Resolution, 10-Bit to 16Bit R/D\n\
+Converter with Reference Oscillator");
+static IIO_DEVICE_ATTR(raw_io, S_IRUGO | S_IWUSR,
+ ad2s1210_show_raw, ad2s1210_store_raw, 0);
+static IIO_DEVICE_ATTR(reset, S_IWUSR,
+ NULL, ad2s1210_store_softreset, 0);
+static IIO_DEVICE_ATTR(fclkin, S_IRUGO | S_IWUSR,
+ ad2s1210_show_fclkin, ad2s1210_store_fclkin, 0);
+static IIO_DEVICE_ATTR(fexcit, S_IRUGO | S_IWUSR,
+ ad2s1210_show_fexcit, ad2s1210_store_fexcit, 0);
+static IIO_DEVICE_ATTR(control, S_IRUGO | S_IWUSR,
+ ad2s1210_show_control, ad2s1210_store_control, 0);
+static IIO_DEVICE_ATTR(bits, S_IRUGO | S_IWUSR,
+ ad2s1210_show_resolution, ad2s1210_store_resolution, 0);
+static IIO_DEVICE_ATTR(fault, S_IRUGO | S_IWUSR,
+ ad2s1210_show_fault, ad2s1210_clear_fault, 0);
+static IIO_DEVICE_ATTR(pos, S_IRUGO,
+ ad2s1210_show_pos, NULL, 0);
+static IIO_DEVICE_ATTR(vel, S_IRUGO,
+ ad2s1210_show_vel, NULL, 0);
+static IIO_DEVICE_ATTR(pos_vel, S_IRUGO,
+ ad2s1210_show_pos_vel, NULL, 0);
+static IIO_DEVICE_ATTR(los_thrd, S_IRUGO | S_IWUSR,
+ ad2s1210_show_reg, ad2s1210_store_reg, REG_LOS_THRD);
+static IIO_DEVICE_ATTR(dos_ovr_thrd, S_IRUGO | S_IWUSR,
+ ad2s1210_show_reg, ad2s1210_store_reg, REG_DOS_OVR_THRD);
+static IIO_DEVICE_ATTR(dos_mis_thrd, S_IRUGO | S_IWUSR,
+ ad2s1210_show_reg, ad2s1210_store_reg, REG_DOS_MIS_THRD);
+static IIO_DEVICE_ATTR(dos_rst_max_thrd, S_IRUGO | S_IWUSR,
+ ad2s1210_show_reg, ad2s1210_store_reg, REG_DOS_RST_MAX_THRD);
+static IIO_DEVICE_ATTR(dos_rst_min_thrd, S_IRUGO | S_IWUSR,
+ ad2s1210_show_reg, ad2s1210_store_reg, REG_DOS_RST_MIN_THRD);
+static IIO_DEVICE_ATTR(lot_high_thrd, S_IRUGO | S_IWUSR,
+ ad2s1210_show_reg, ad2s1210_store_reg, REG_LOT_HIGH_THRD);
+static IIO_DEVICE_ATTR(lot_low_thrd, S_IRUGO | S_IWUSR,
+ ad2s1210_show_reg, ad2s1210_store_reg, REG_LOT_LOW_THRD);
+
+static struct attribute *ad2s1210_attributes[] = {
+ &iio_const_attr_description.dev_attr.attr,
+ &iio_dev_attr_raw_io.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_dev_attr_fclkin.dev_attr.attr,
+ &iio_dev_attr_fexcit.dev_attr.attr,
+ &iio_dev_attr_control.dev_attr.attr,
+ &iio_dev_attr_bits.dev_attr.attr,
+ &iio_dev_attr_fault.dev_attr.attr,
+ &iio_dev_attr_pos.dev_attr.attr,
+ &iio_dev_attr_vel.dev_attr.attr,
+ &iio_dev_attr_pos_vel.dev_attr.attr,
+ &iio_dev_attr_los_thrd.dev_attr.attr,
+ &iio_dev_attr_dos_ovr_thrd.dev_attr.attr,
+ &iio_dev_attr_dos_mis_thrd.dev_attr.attr,
+ &iio_dev_attr_dos_rst_max_thrd.dev_attr.attr,
+ &iio_dev_attr_dos_rst_min_thrd.dev_attr.attr,
+ &iio_dev_attr_lot_high_thrd.dev_attr.attr,
+ &iio_dev_attr_lot_low_thrd.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad2s1210_attribute_group = {
+ .name = DRV_NAME,
+ .attrs = ad2s1210_attributes,
+};
+
+static int __devinit ad2s1210_initial(struct ad2s1210_state *st)
+{
+ unsigned char data;
+ int ret;
+
+ mutex_lock(&st->lock);
+#if defined(CONFIG_AD2S1210_GPIO_INPUT)
+ st->resolution = read_resolution_pin(st);
+#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
+ set_resolution_pin(st);
+#endif
+
+ config_write(st, REG_CONTROL);
+ data = DEF_CONTROL & ~(SET_RESOLUTION);
+ data |= (st->resolution - RES_10) >> 1;
+ config_write(st, data);
+ ret = config_read(st, REG_CONTROL, &data);
+ if (ret)
+ goto error_ret;
+
+ if (data & MSB_IS_HIGH) {
+ ret = -EIO;
+ goto error_ret;
+ }
+
+ update_frequency_control_word(st);
+ soft_reset(st);
+error_ret:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static int __devinit ad2s1210_probe(struct spi_device *spi)
+{
+ struct ad2s1210_state *st;
+ int pn, ret = 0;
+ unsigned short *pins = spi->dev.platform_data;
+
+ for (pn = 0; pn < AD2S1210_PN; pn++) {
+ if (gpio_request(pins[pn], DRV_NAME)) {
+ pr_err("%s: request gpio pin %d failed\n",
+ DRV_NAME, pins[pn]);
+ goto error_ret;
+ }
+ if (pn < AD2S1210_SAA)
+ gpio_direction_output(pins[pn], 1);
+ else {
+#if defined(CONFIG_AD2S1210_GPIO_INPUT)
+ gpio_direction_input(pins[pn]);
+#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
+ gpio_direction_output(pins[pn], 1);
+#endif
+ }
+ }
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ spi_set_drvdata(spi, st);
+
+ mutex_init(&st->lock);
+ st->sdev = spi;
+ st->xfer.tx_buf = st->tx;
+ st->xfer.rx_buf = st->rx;
+ st->hysteresis = 1;
+ st->mode = MOD_CONFIG;
+ st->resolution = RES_12;
+ st->fclkin = AD2S1210_DEF_CLKIN;
+ st->fexcit = AD2S1210_DEF_EXCIT;
+ st->sample = pins[0];
+ st->a0 = pins[1];
+ st->a1 = pins[2];
+ st->res0 = pins[3];
+ st->res1 = pins[4];
+
+ st->idev = iio_allocate_device();
+ if (st->idev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->idev->dev.parent = &spi->dev;
+ st->idev->num_interrupt_lines = 0;
+ st->idev->event_attrs = NULL;
+
+ st->idev->attrs = &ad2s1210_attribute_group;
+ st->idev->dev_data = (void *)(st);
+ st->idev->driver_module = THIS_MODULE;
+ st->idev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(st->idev);
+ if (ret)
+ goto error_free_dev;
+
+ if (spi->max_speed_hz != AD2S1210_DEF_CLKIN)
+ st->fclkin = spi->max_speed_hz;
+ spi->mode = SPI_MODE_3;
+ spi_setup(spi);
+
+ ad2s1210_initial(st);
+ return 0;
+
+error_free_dev:
+ iio_free_device(st->idev);
+error_free_st:
+ kfree(st);
+error_ret:
+ for (--pn; pn >= 0; pn--)
+ gpio_free(pins[pn]);
+ return ret;
+}
+
+static int __devexit ad2s1210_remove(struct spi_device *spi)
+{
+ struct ad2s1210_state *st = spi_get_drvdata(spi);
+
+ iio_device_unregister(st->idev);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver ad2s1210_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad2s1210_probe,
+ .remove = __devexit_p(ad2s1210_remove),
+};
+
+static __init int ad2s1210_spi_init(void)
+{
+ return spi_register_driver(&ad2s1210_driver);
+}
+module_init(ad2s1210_spi_init);
+
+static __exit void ad2s1210_spi_exit(void)
+{
+ spi_unregister_driver(&ad2s1210_driver);
+}
+module_exit(ad2s1210_spi_exit);
+
+MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD2S1210 Resolver to Digital SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
new file mode 100644
index 000000000000..4143535242d9
--- /dev/null
+++ b/drivers/staging/iio/resolver/ad2s90.c
@@ -0,0 +1,159 @@
+/*
+ * ad2s90.c simple support for the ADI Resolver to Digital Converters: AD2S90
+ *
+ * Copyright (c) 2010-2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad2s90"
+
+struct ad2s90_state {
+ struct mutex lock;
+ struct iio_dev *idev;
+ struct spi_device *sdev;
+ u8 rx[2];
+ u8 tx[2];
+};
+
+static ssize_t ad2s90_show_angular(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ int ret;
+ ssize_t len = 0;
+ u16 val;
+ struct iio_dev *idev = dev_get_drvdata(dev);
+ struct ad2s90_state *st = idev->dev_data;
+
+ xfer.len = 1;
+ xfer.tx_buf = st->tx;
+ xfer.rx_buf = st->rx;
+ mutex_lock(&st->lock);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(st->sdev, &msg);
+ if (ret)
+ goto error_ret;
+ val = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
+ len = sprintf(buf, "%d\n", val);
+error_ret:
+ mutex_unlock(&st->lock);
+
+ return ret ? ret : len;
+}
+
+#define IIO_DEV_ATTR_SIMPLE_RESOLVER(_show) \
+ IIO_DEVICE_ATTR(angular, S_IRUGO, _show, NULL, 0)
+
+static IIO_CONST_ATTR(description,
+ "Low Cost, Complete 12-Bit Resolver-to-Digital Converter");
+static IIO_DEV_ATTR_SIMPLE_RESOLVER(ad2s90_show_angular);
+
+static struct attribute *ad2s90_attributes[] = {
+ &iio_const_attr_description.dev_attr.attr,
+ &iio_dev_attr_angular.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad2s90_attribute_group = {
+ .name = DRV_NAME,
+ .attrs = ad2s90_attributes,
+};
+
+static int __devinit ad2s90_probe(struct spi_device *spi)
+{
+ struct ad2s90_state *st;
+ int ret = 0;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ spi_set_drvdata(spi, st);
+
+ mutex_init(&st->lock);
+ st->sdev = spi;
+
+ st->idev = iio_allocate_device();
+ if (st->idev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->idev->dev.parent = &spi->dev;
+ st->idev->num_interrupt_lines = 0;
+ st->idev->event_attrs = NULL;
+
+ st->idev->attrs = &ad2s90_attribute_group;
+ st->idev->dev_data = (void *)(st);
+ st->idev->driver_module = THIS_MODULE;
+ st->idev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(st->idev);
+ if (ret)
+ goto error_free_dev;
+
+ /* need 600ns between CS and the first falling edge of SCLK */
+ spi->max_speed_hz = 830000;
+ spi->mode = SPI_MODE_3;
+ spi_setup(spi);
+
+ return 0;
+
+error_free_dev:
+ iio_free_device(st->idev);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad2s90_remove(struct spi_device *spi)
+{
+ struct ad2s90_state *st = spi_get_drvdata(spi);
+
+ iio_device_unregister(st->idev);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver ad2s90_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad2s90_probe,
+ .remove = __devexit_p(ad2s90_remove),
+};
+
+static __init int ad2s90_spi_init(void)
+{
+ return spi_register_driver(&ad2s90_driver);
+}
+module_init(ad2s90_spi_init);
+
+static __exit void ad2s90_spi_exit(void)
+{
+ spi_unregister_driver(&ad2s90_driver);
+}
+module_exit(ad2s90_spi_exit);
+
+MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD2S90 Resolver to Digital SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/sysfs.h b/drivers/staging/iio/sysfs.h
index ee91a95a8b95..24b74ddcd083 100644
--- a/drivers/staging/iio/sysfs.h
+++ b/drivers/staging/iio/sysfs.h
@@ -108,6 +108,12 @@ struct iio_const_attr {
IIO_DEVICE_ATTR(name, S_IRUGO, _show, NULL, 0)
/**
+ * IIO_DEV_ATTR_RESET: resets the device
+ **/
+#define IIO_DEV_ATTR_RESET(_store) \
+ IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, _store, 0)
+
+/**
* IIO_CONST_ATTR_NAME - constant identifier
* @_string: the name
**/
diff --git a/drivers/staging/intel_sst/Kconfig b/drivers/staging/intel_sst/Kconfig
index b46bd9d1b324..82391077b384 100644
--- a/drivers/staging/intel_sst/Kconfig
+++ b/drivers/staging/intel_sst/Kconfig
@@ -8,6 +8,7 @@ config SND_INTEL_SST
config SND_INTELMID
tristate "Intel MID sound card driver"
+ depends on SOUND && SND
select SND_PCM
select SND_SEQUENCER
select SND_JACK
diff --git a/drivers/staging/intel_sst/intel_sst.c b/drivers/staging/intel_sst/intel_sst.c
index 24d3928e7071..ce4a9f79ccd2 100644
--- a/drivers/staging/intel_sst/intel_sst.c
+++ b/drivers/staging/intel_sst/intel_sst.c
@@ -29,11 +29,14 @@
* This file contains all init functions
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pci.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/miscdevice.h>
+#include <linux/pm_runtime.h>
#include <asm/mrst.h>
#include "intel_sst.h"
#include "intel_sst_ioctl.h"
@@ -169,17 +172,17 @@ static int __devinit intel_sst_probe(struct pci_dev *pci,
{
int i, ret = 0;
- pr_debug("sst: Probe for DID %x\n", pci->device);
+ pr_debug("Probe for DID %x\n", pci->device);
mutex_lock(&drv_ctx_lock);
if (sst_drv_ctx) {
- pr_err("sst: Only one sst handle is supported\n");
+ pr_err("Only one sst handle is supported\n");
mutex_unlock(&drv_ctx_lock);
return -EBUSY;
}
sst_drv_ctx = kzalloc(sizeof(*sst_drv_ctx), GFP_KERNEL);
if (!sst_drv_ctx) {
- pr_err("sst: intel_sst malloc fail\n");
+ pr_err("malloc fail\n");
mutex_unlock(&drv_ctx_lock);
return -ENOMEM;
}
@@ -226,7 +229,7 @@ static int __devinit intel_sst_probe(struct pci_dev *pci,
spin_lock_init(&sst_drv_ctx->list_spin_lock);
sst_drv_ctx->max_streams = pci_id->driver_data;
- pr_debug("sst: Got drv data max stream %d\n",
+ pr_debug("Got drv data max stream %d\n",
sst_drv_ctx->max_streams);
for (i = 1; i <= sst_drv_ctx->max_streams; i++) {
struct stream_info *stream = &sst_drv_ctx->streams[i];
@@ -241,18 +244,18 @@ static int __devinit intel_sst_probe(struct pci_dev *pci,
sst_drv_ctx->mmap_mem =
kzalloc(sst_drv_ctx->mmap_len, GFP_KERNEL);
if (sst_drv_ctx->mmap_mem) {
- pr_debug("sst: Got memory %p size 0x%x\n",
+ pr_debug("Got memory %p size 0x%x\n",
sst_drv_ctx->mmap_mem,
sst_drv_ctx->mmap_len);
break;
}
if (sst_drv_ctx->mmap_len < (SST_MMAP_STEP*PAGE_SIZE)) {
- pr_err("sst: mem alloc fail...abort!!\n");
+ pr_err("mem alloc fail...abort!!\n");
ret = -ENOMEM;
goto free_process_reply_wq;
}
sst_drv_ctx->mmap_len -= (SST_MMAP_STEP * PAGE_SIZE);
- pr_debug("sst:mem alloc failed...trying %d\n",
+ pr_debug("mem alloc failed...trying %d\n",
sst_drv_ctx->mmap_len);
}
}
@@ -260,7 +263,7 @@ static int __devinit intel_sst_probe(struct pci_dev *pci,
/* Init the device */
ret = pci_enable_device(pci);
if (ret) {
- pr_err("sst: device cant be enabled\n");
+ pr_err("device cant be enabled\n");
goto do_free_mem;
}
sst_drv_ctx->pci = pci_dev_get(pci);
@@ -273,25 +276,25 @@ static int __devinit intel_sst_probe(struct pci_dev *pci,
sst_drv_ctx->shim = pci_ioremap_bar(pci, 1);
if (!sst_drv_ctx->shim)
goto do_release_regions;
- pr_debug("sst: SST Shim Ptr %p\n", sst_drv_ctx->shim);
+ pr_debug("SST Shim Ptr %p\n", sst_drv_ctx->shim);
/* Shared SRAM */
sst_drv_ctx->mailbox = pci_ioremap_bar(pci, 2);
if (!sst_drv_ctx->mailbox)
goto do_unmap_shim;
- pr_debug("sst: SRAM Ptr %p\n", sst_drv_ctx->mailbox);
+ pr_debug("SRAM Ptr %p\n", sst_drv_ctx->mailbox);
/* IRAM */
sst_drv_ctx->iram = pci_ioremap_bar(pci, 3);
if (!sst_drv_ctx->iram)
goto do_unmap_sram;
- pr_debug("sst:IRAM Ptr %p\n", sst_drv_ctx->iram);
+ pr_debug("IRAM Ptr %p\n", sst_drv_ctx->iram);
/* DRAM */
sst_drv_ctx->dram = pci_ioremap_bar(pci, 4);
if (!sst_drv_ctx->dram)
goto do_unmap_iram;
- pr_debug("sst: DRAM Ptr %p\n", sst_drv_ctx->dram);
+ pr_debug("DRAM Ptr %p\n", sst_drv_ctx->dram);
mutex_lock(&sst_drv_ctx->sst_lock);
sst_drv_ctx->sst_state = SST_UN_INIT;
@@ -301,26 +304,31 @@ static int __devinit intel_sst_probe(struct pci_dev *pci,
IRQF_SHARED, SST_DRV_NAME, sst_drv_ctx);
if (ret)
goto do_unmap_dram;
- pr_debug("sst: Registered IRQ 0x%x\n", pci->irq);
+ pr_debug("Registered IRQ 0x%x\n", pci->irq);
+
+ /*Register LPE Control as misc driver*/
+ ret = misc_register(&lpe_ctrl);
+ if (ret) {
+ pr_err("couldn't register control device\n");
+ goto do_free_irq;
+ }
if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
ret = misc_register(&lpe_dev);
if (ret) {
- pr_err("sst: couldn't register LPE device\n");
- goto do_free_irq;
- }
-
- /*Register LPE Control as misc driver*/
- ret = misc_register(&lpe_ctrl);
- if (ret) {
- pr_err("sst: couldn't register misc driver\n");
- goto do_free_irq;
- }
+ pr_err("couldn't register misc driver\n");
+ goto do_free_misc;
+ }
}
sst_drv_ctx->lpe_stalled = 0;
- pr_debug("sst: ...successfully done!!!\n");
+ pm_runtime_set_active(&pci->dev);
+ pm_runtime_enable(&pci->dev);
+ pm_runtime_allow(&pci->dev);
+ pr_debug("...successfully done!!!\n");
return ret;
+do_free_misc:
+ misc_deregister(&lpe_ctrl);
do_free_irq:
free_irq(pci->irq, sst_drv_ctx);
do_unmap_dram:
@@ -347,7 +355,7 @@ free_mad_wq:
destroy_workqueue(sst_drv_ctx->mad_wq);
do_free_drv_ctx:
kfree(sst_drv_ctx);
- pr_err("sst: Probe failed with 0x%x\n", ret);
+ pr_err("Probe failed with 0x%x\n", ret);
return ret;
}
@@ -365,10 +373,9 @@ static void __devexit intel_sst_remove(struct pci_dev *pci)
mutex_lock(&sst_drv_ctx->sst_lock);
sst_drv_ctx->sst_state = SST_UN_INIT;
mutex_unlock(&sst_drv_ctx->sst_lock);
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
+ misc_deregister(&lpe_ctrl);
+ if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
misc_deregister(&lpe_dev);
- misc_deregister(&lpe_ctrl);
- }
free_irq(pci->irq, sst_drv_ctx);
iounmap(sst_drv_ctx->dram);
iounmap(sst_drv_ctx->iram);
@@ -404,10 +411,12 @@ int intel_sst_suspend(struct pci_dev *pci, pm_message_t state)
{
union config_status_reg csr;
- pr_debug("sst: intel_sst_suspend called\n");
+ pr_debug("intel_sst_suspend called\n");
- if (sst_drv_ctx->pb_streams != 0 || sst_drv_ctx->cp_streams != 0)
- return -EPERM;
+ if (sst_drv_ctx->stream_cnt) {
+ pr_err("active streams,not able to suspend\n");
+ return -EBUSY;
+ }
/*Assert RESET on LPE Processor*/
csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
csr.full = csr.full | 0x2;
@@ -434,17 +443,17 @@ int intel_sst_resume(struct pci_dev *pci)
{
int ret = 0;
- pr_debug("sst: intel_sst_resume called\n");
+ pr_debug("intel_sst_resume called\n");
if (sst_drv_ctx->sst_state != SST_SUSPENDED) {
- pr_err("sst: SST is not in suspended state\n");
- return -EPERM;
+ pr_err("SST is not in suspended state\n");
+ return 0;
}
sst_drv_ctx = pci_get_drvdata(pci);
pci_set_power_state(pci, PCI_D0);
pci_restore_state(pci);
ret = pci_enable_device(pci);
if (ret)
- pr_err("sst: device cant be enabled\n");
+ pr_err("device cant be enabled\n");
mutex_lock(&sst_drv_ctx->sst_lock);
sst_drv_ctx->sst_state = SST_UN_INIT;
@@ -452,6 +461,34 @@ int intel_sst_resume(struct pci_dev *pci)
return 0;
}
+static int intel_sst_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ pr_debug("runtime_suspend called\n");
+ return intel_sst_suspend(pci_dev, PMSG_SUSPEND);
+}
+
+static int intel_sst_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ pr_debug("runtime_resume called\n");
+ return intel_sst_resume(pci_dev);
+}
+
+static int intel_sst_runtime_idle(struct device *dev)
+{
+ pr_debug("runtime_idle called\n");
+ if (sst_drv_ctx->stream_cnt == 0 && sst_drv_ctx->am_cnt == 0)
+ pm_schedule_suspend(dev, SST_SUSPEND_DELAY);
+ return -EBUSY;
+}
+
+static const struct dev_pm_ops intel_sst_pm = {
+ .runtime_suspend = intel_sst_runtime_suspend,
+ .runtime_resume = intel_sst_runtime_resume,
+ .runtime_idle = intel_sst_runtime_idle,
+};
+
/* PCI Routines */
static struct pci_device_id intel_sst_ids[] = {
{ PCI_VDEVICE(INTEL, SST_MRST_PCI_ID), 3},
@@ -468,6 +505,9 @@ static struct pci_driver driver = {
#ifdef CONFIG_PM
.suspend = intel_sst_suspend,
.resume = intel_sst_resume,
+ .driver = {
+ .pm = &intel_sst_pm,
+ },
#endif
};
@@ -482,14 +522,14 @@ static int __init intel_sst_init(void)
{
/* Init all variables, data structure etc....*/
int ret = 0;
- pr_debug("sst: INFO: ******** SST DRIVER loading.. Ver: %s\n",
+ pr_debug("INFO: ******** SST DRIVER loading.. Ver: %s\n",
SST_DRIVER_VERSION);
mutex_init(&drv_ctx_lock);
/* Register with PCI */
ret = pci_register_driver(&driver);
if (ret)
- pr_err("sst: PCI register failed\n");
+ pr_err("PCI register failed\n");
return ret;
}
@@ -504,7 +544,7 @@ static void __exit intel_sst_exit(void)
{
pci_unregister_driver(&driver);
- pr_debug("sst: driver unloaded\n");
+ pr_debug("driver unloaded\n");
return;
}
diff --git a/drivers/staging/intel_sst/intel_sst.h b/drivers/staging/intel_sst/intel_sst.h
index 1f19f0d1d316..cb03ff7d1a21 100644
--- a/drivers/staging/intel_sst/intel_sst.h
+++ b/drivers/staging/intel_sst/intel_sst.h
@@ -29,6 +29,7 @@
* and middleware.
* This file is shared between the SST and MAD drivers
*/
+#include "intel_sst_ioctl.h"
#define SST_CARD_NAMES "intel_mid_card"
@@ -107,10 +108,15 @@ struct snd_pmic_ops {
int (*power_down_pmic) (void);
};
+struct intel_sst_pcm_control {
+ int (*open) (struct snd_sst_params *str_param);
+ int (*device_control) (int cmd, void *arg);
+ int (*close) (unsigned int str_id);
+};
struct intel_sst_card_ops {
char *module_name;
unsigned int vendor_id;
- int (*control_set) (int control_element, void *value);
+ struct intel_sst_pcm_control *pcm_control;
struct snd_pmic_ops *scard_ops;
};
diff --git a/drivers/staging/intel_sst/intel_sst_app_interface.c b/drivers/staging/intel_sst/intel_sst_app_interface.c
index 991440015e92..a367991d5600 100644
--- a/drivers/staging/intel_sst/intel_sst_app_interface.c
+++ b/drivers/staging/intel_sst/intel_sst_app_interface.c
@@ -27,12 +27,15 @@
* Upper layer interfaces (MAD driver, MMF) to SST driver
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pci.h>
#include <linux/fs.h>
#include <linux/uio.h>
#include <linux/aio.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
#include <linux/ioctl.h>
#ifdef CONFIG_MRST_RAR_HANDLER
#include <linux/rar_register.h>
@@ -58,14 +61,14 @@ static int intel_sst_check_device(void)
{
int retval = 0;
if (sst_drv_ctx->pmic_state != SND_MAD_INIT_DONE) {
- pr_warn("sst: Sound card not availble\n ");
+ pr_warn("Sound card not available\n");
return -EIO;
}
if (sst_drv_ctx->sst_state == SST_SUSPENDED) {
- pr_debug("sst: Resuming from Suspended state\n");
+ pr_debug("Resuming from Suspended state\n");
retval = intel_sst_resume(sst_drv_ctx->pci);
if (retval) {
- pr_debug("sst: Resume Failed= %#x,abort\n", retval);
+ pr_debug("Resume Failed= %#x,abort\n", retval);
return retval;
}
}
@@ -97,15 +100,22 @@ static int intel_sst_check_device(void)
*/
int intel_sst_open(struct inode *i_node, struct file *file_ptr)
{
- int retval = intel_sst_check_device();
- if (retval)
- return retval;
+ unsigned int retval;
mutex_lock(&sst_drv_ctx->stream_lock);
+ pm_runtime_get_sync(&sst_drv_ctx->pci->dev);
+ retval = intel_sst_check_device();
+ if (retval) {
+ pm_runtime_put(&sst_drv_ctx->pci->dev);
+ mutex_unlock(&sst_drv_ctx->stream_lock);
+ return retval;
+ }
+
if (sst_drv_ctx->encoded_cnt < MAX_ENC_STREAM) {
struct ioctl_pvt_data *data =
kzalloc(sizeof(struct ioctl_pvt_data), GFP_KERNEL);
if (!data) {
+ pm_runtime_put(&sst_drv_ctx->pci->dev);
mutex_unlock(&sst_drv_ctx->stream_lock);
return -ENOMEM;
}
@@ -115,9 +125,10 @@ int intel_sst_open(struct inode *i_node, struct file *file_ptr)
data->pvt_id = sst_assign_pvt_id(sst_drv_ctx);
data->str_id = 0;
file_ptr->private_data = (void *)data;
- pr_debug("sst: pvt_id handle = %d!\n", data->pvt_id);
+ pr_debug("pvt_id handle = %d!\n", data->pvt_id);
} else {
retval = -EUSERS;
+ pm_runtime_put(&sst_drv_ctx->pci->dev);
mutex_unlock(&sst_drv_ctx->stream_lock);
}
return retval;
@@ -136,18 +147,26 @@ int intel_sst_open(struct inode *i_node, struct file *file_ptr)
*/
int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr)
{
- int retval = intel_sst_check_device();
- if (retval)
- return retval;
+ unsigned int retval;
/* audio manager open */
mutex_lock(&sst_drv_ctx->stream_lock);
+ pm_runtime_get_sync(&sst_drv_ctx->pci->dev);
+ retval = intel_sst_check_device();
+ if (retval) {
+ pm_runtime_put(&sst_drv_ctx->pci->dev);
+ mutex_unlock(&sst_drv_ctx->stream_lock);
+ return retval;
+ }
+
if (sst_drv_ctx->am_cnt < MAX_AM_HANDLES) {
sst_drv_ctx->am_cnt++;
- pr_debug("sst: AM handle opened...\n");
+ pr_debug("AM handle opened...\n");
file_ptr->private_data = NULL;
- } else
+ } else {
retval = -EACCES;
+ pm_runtime_put(&sst_drv_ctx->pci->dev);
+ }
mutex_unlock(&sst_drv_ctx->stream_lock);
return retval;
@@ -166,10 +185,11 @@ int intel_sst_release(struct inode *i_node, struct file *file_ptr)
{
struct ioctl_pvt_data *data = file_ptr->private_data;
- pr_debug("sst: Release called, closing app handle\n");
+ pr_debug("Release called, closing app handle\n");
mutex_lock(&sst_drv_ctx->stream_lock);
sst_drv_ctx->encoded_cnt--;
sst_drv_ctx->stream_cnt--;
+ pm_runtime_put(&sst_drv_ctx->pci->dev);
mutex_unlock(&sst_drv_ctx->stream_lock);
free_stream_context(data->str_id);
kfree(data);
@@ -181,8 +201,9 @@ int intel_sst_release_cntrl(struct inode *i_node, struct file *file_ptr)
/* audio manager close */
mutex_lock(&sst_drv_ctx->stream_lock);
sst_drv_ctx->am_cnt--;
+ pm_runtime_put(&sst_drv_ctx->pci->dev);
mutex_unlock(&sst_drv_ctx->stream_lock);
- pr_debug("sst: AM handle closed\n");
+ pr_debug("AM handle closed\n");
return 0;
}
@@ -208,7 +229,7 @@ int intel_sst_mmap(struct file *file_ptr, struct vm_area_struct *vma)
return -EINVAL;
length = vma->vm_end - vma->vm_start;
- pr_debug("sst: called for stream %d length 0x%x\n", str_id, length);
+ pr_debug("called for stream %d length 0x%x\n", str_id, length);
if (length > sst_drv_ctx->mmap_len)
return -ENOMEM;
@@ -231,7 +252,7 @@ int intel_sst_mmap(struct file *file_ptr, struct vm_area_struct *vma)
else
sst_drv_ctx->streams[str_id].mmapped = true;
- pr_debug("sst: mmap ret 0x%x\n", retval);
+ pr_debug("mmap ret 0x%x\n", retval);
return retval;
}
@@ -245,7 +266,7 @@ static int intel_sst_mmap_play_capture(u32 str_id,
struct snd_sst_mmap_buff_entry *buf_entry;
struct snd_sst_mmap_buff_entry *tmp_buf;
- pr_debug("sst:called for str_id %d\n", str_id);
+ pr_debug("called for str_id %d\n", str_id);
retval = sst_validate_strid(str_id);
if (retval)
return -EINVAL;
@@ -270,7 +291,7 @@ static int intel_sst_mmap_play_capture(u32 str_id,
goto out_free;
}
- pr_debug("sst:new buffers count %d status %d\n",
+ pr_debug("new buffers count %d status %d\n",
mmap_buf->entries, stream->status);
buf_entry = tmp_buf;
for (i = 0; i < mmap_buf->entries; i++) {
@@ -300,14 +321,14 @@ static int intel_sst_mmap_play_capture(u32 str_id,
stream->status = STREAM_RUNNING;
if (stream->ops == STREAM_OPS_PLAYBACK) {
if (sst_play_frame(str_id) < 0) {
- pr_warn("sst: play frames fail\n");
+ pr_warn("play frames fail\n");
mutex_unlock(&stream->lock);
retval = -EIO;
goto out_free;
}
} else if (stream->ops == STREAM_OPS_CAPTURE) {
if (sst_capture_frame(str_id) < 0) {
- pr_warn("sst: capture frame fail\n");
+ pr_warn("capture frame fail\n");
mutex_unlock(&stream->lock);
retval = -EIO;
goto out_free;
@@ -324,7 +345,7 @@ static int intel_sst_mmap_play_capture(u32 str_id,
if (retval >= 0)
retval = stream->cumm_bytes;
- pr_debug("sst:end of play/rec ioctl bytes = %d!!\n", retval);
+ pr_debug("end of play/rec ioctl bytes = %d!!\n", retval);
out_free:
kfree(tmp_buf);
@@ -349,7 +370,7 @@ static int intel_sst_play_capture(struct stream_info *stream, int str_id)
if (stream->status == STREAM_INIT && stream->prev == STREAM_UN_INIT) {
/* stream is not started yet */
- pr_debug("sst: Stream isn't in started state %d, prev %d\n",
+ pr_debug("Stream isn't in started state %d, prev %d\n",
stream->status, stream->prev);
} else if ((stream->status == STREAM_RUNNING ||
stream->status == STREAM_PAUSED) &&
@@ -358,13 +379,13 @@ static int intel_sst_play_capture(struct stream_info *stream, int str_id)
if (stream->ops == STREAM_OPS_PLAYBACK ||
stream->ops == STREAM_OPS_PLAYBACK_DRM) {
if (sst_play_frame(str_id) < 0) {
- pr_warn("sst: play frames failed\n");
+ pr_warn("play frames failed\n");
mutex_unlock(&stream->lock);
return -EIO;
}
} else if (stream->ops == STREAM_OPS_CAPTURE) {
if (sst_capture_frame(str_id) < 0) {
- pr_warn("sst: capture frames failed\n ");
+ pr_warn("capture frames failed\n");
mutex_unlock(&stream->lock);
return -EIO;
}
@@ -379,7 +400,7 @@ static int intel_sst_play_capture(struct stream_info *stream, int str_id)
retval = sst_wait_interruptible(sst_drv_ctx, &stream->data_blk);
if (retval) {
stream->status = STREAM_INIT;
- pr_debug("sst: wait returned error...\n");
+ pr_debug("wait returned error...\n");
}
return retval;
}
@@ -477,7 +498,7 @@ static int snd_sst_fill_kernel_list(struct stream_info *stream,
if (((unsigned long)iovec[index].iov_base
+ iovec[index].iov_len) <
((unsigned long)iovec[index].iov_base)) {
- pr_debug("sst: Buffer overflows");
+ pr_debug("Buffer overflows\n");
kfree(stream_bufs);
return -EINVAL;
}
@@ -490,7 +511,7 @@ static int snd_sst_fill_kernel_list(struct stream_info *stream,
}
copied_size += size;
- pr_debug("sst: copied_size - %lx\n", copied_size);
+ pr_debug("copied_size - %lx\n", copied_size);
if ((copied_size >= mmap_len) ||
(stream->sg_index == nr_segs)) {
add_to_list = 1;
@@ -520,7 +541,7 @@ static int snd_sst_copy_userbuf_capture(struct stream_info *stream,
int retval = 0;
/* copy sent buffers */
- pr_debug("sst: capture stream copying to user now...\n");
+ pr_debug("capture stream copying to user now...\n");
list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
if (kbufs->in_use == true) {
/* copy to user */
@@ -538,7 +559,7 @@ static int snd_sst_copy_userbuf_capture(struct stream_info *stream,
}
}
}
- pr_debug("sst: end of cap copy\n");
+ pr_debug("end of cap copy\n");
return retval;
}
@@ -590,7 +611,7 @@ static int intel_sst_read_write(unsigned int str_id, char __user *buf,
return -EINVAL;
stream = &sst_drv_ctx->streams[str_id];
if (stream->mmapped == true) {
- pr_warn("sst: user write and stream is mapped");
+ pr_warn("user write and stream is mapped\n");
return -EIO;
}
if (!count)
@@ -598,7 +619,7 @@ static int intel_sst_read_write(unsigned int str_id, char __user *buf,
stream->curr_bytes = 0;
stream->cumm_bytes = 0;
/* copy user buf details */
- pr_debug("sst: new buffers %p, copy size %d, status %d\n" ,
+ pr_debug("new buffers %p, copy size %d, status %d\n" ,
buf, (int) count, (int) stream->status);
stream->buf_type = SST_BUF_USER_STATIC;
@@ -618,7 +639,7 @@ static int intel_sst_read_write(unsigned int str_id, char __user *buf,
stream->cur_ptr = NULL;
if (retval >= 0)
retval = stream->cumm_bytes;
- pr_debug("sst: end of play/rec bytes = %d!!\n", retval);
+ pr_debug("end of play/rec bytes = %d!!\n", retval);
return retval;
}
@@ -639,7 +660,7 @@ int intel_sst_write(struct file *file_ptr, const char __user *buf,
int str_id = data->str_id;
struct stream_info *stream = &sst_drv_ctx->streams[str_id];
- pr_debug("sst: called for %d\n", str_id);
+ pr_debug("called for %d\n", str_id);
if (stream->status == STREAM_UN_INIT ||
stream->status == STREAM_DECODE) {
return -EBADRQC;
@@ -665,12 +686,12 @@ ssize_t intel_sst_aio_write(struct kiocb *kiocb, const struct iovec *iov,
int str_id = data->str_id;
struct stream_info *stream;
- pr_debug("sst: entry - %ld\n", nr_segs);
+ pr_debug("entry - %ld\n", nr_segs);
if (is_sync_kiocb(kiocb) == false)
return -EINVAL;
- pr_debug("sst: called for str_id %d\n", str_id);
+ pr_debug("called for str_id %d\n", str_id);
retval = sst_validate_strid(str_id);
if (retval)
return -EINVAL;
@@ -683,7 +704,7 @@ ssize_t intel_sst_aio_write(struct kiocb *kiocb, const struct iovec *iov,
}
stream->curr_bytes = 0;
stream->cumm_bytes = 0;
- pr_debug("sst: new segs %ld, offset %d, status %d\n" ,
+ pr_debug("new segs %ld, offset %d, status %d\n" ,
nr_segs, (int) offset, (int) stream->status);
stream->buf_type = SST_BUF_USER_STATIC;
do {
@@ -698,7 +719,7 @@ ssize_t intel_sst_aio_write(struct kiocb *kiocb, const struct iovec *iov,
stream->cur_ptr = NULL;
if (retval >= 0)
retval = stream->cumm_bytes;
- pr_debug("sst: end of play/rec bytes = %d!!\n", retval);
+ pr_debug("end of play/rec bytes = %d!!\n", retval);
return retval;
}
@@ -719,7 +740,7 @@ int intel_sst_read(struct file *file_ptr, char __user *buf,
int str_id = data->str_id;
struct stream_info *stream = &sst_drv_ctx->streams[str_id];
- pr_debug("sst: called for %d\n", str_id);
+ pr_debug("called for %d\n", str_id);
if (stream->status == STREAM_UN_INIT ||
stream->status == STREAM_DECODE)
return -EBADRQC;
@@ -744,14 +765,14 @@ ssize_t intel_sst_aio_read(struct kiocb *kiocb, const struct iovec *iov,
int str_id = data->str_id;
struct stream_info *stream;
- pr_debug("sst: entry - %ld\n", nr_segs);
+ pr_debug("entry - %ld\n", nr_segs);
if (is_sync_kiocb(kiocb) == false) {
- pr_debug("sst: aio_read from user space is not allowed\n");
+ pr_debug("aio_read from user space is not allowed\n");
return -EINVAL;
}
- pr_debug("sst: called for str_id %d\n", str_id);
+ pr_debug("called for str_id %d\n", str_id);
retval = sst_validate_strid(str_id);
if (retval)
return -EINVAL;
@@ -764,7 +785,7 @@ ssize_t intel_sst_aio_read(struct kiocb *kiocb, const struct iovec *iov,
stream->curr_bytes = 0;
stream->cumm_bytes = 0;
- pr_debug("sst: new segs %ld, offset %d, status %d\n" ,
+ pr_debug("new segs %ld, offset %d, status %d\n" ,
nr_segs, (int) offset, (int) stream->status);
stream->buf_type = SST_BUF_USER_STATIC;
do {
@@ -779,34 +800,169 @@ ssize_t intel_sst_aio_read(struct kiocb *kiocb, const struct iovec *iov,
stream->cur_ptr = NULL;
if (retval >= 0)
retval = stream->cumm_bytes;
- pr_debug("sst: end of play/rec bytes = %d!!\n", retval);
+ pr_debug("end of play/rec bytes = %d!!\n", retval);
return retval;
}
/* sst_print_stream_params - prints the stream parameters (debug fn)*/
static void sst_print_stream_params(struct snd_sst_get_stream_params *get_prm)
{
- pr_debug("sst: codec params:result =%d\n",
+ pr_debug("codec params:result = %d\n",
get_prm->codec_params.result);
- pr_debug("sst: codec params:stream = %d\n",
+ pr_debug("codec params:stream = %d\n",
get_prm->codec_params.stream_id);
- pr_debug("sst: codec params:codec = %d\n",
+ pr_debug("codec params:codec = %d\n",
get_prm->codec_params.codec);
- pr_debug("sst: codec params:ops = %d\n",
+ pr_debug("codec params:ops = %d\n",
get_prm->codec_params.ops);
- pr_debug("sst: codec params:stream_type= %d\n",
+ pr_debug("codec params:stream_type = %d\n",
get_prm->codec_params.stream_type);
- pr_debug("sst: pcmparams:sfreq= %d\n",
+ pr_debug("pcmparams:sfreq = %d\n",
get_prm->pcm_params.sfreq);
- pr_debug("sst: pcmparams:num_chan= %d\n",
+ pr_debug("pcmparams:num_chan = %d\n",
get_prm->pcm_params.num_chan);
- pr_debug("sst: pcmparams:pcm_wd_sz= %d\n",
+ pr_debug("pcmparams:pcm_wd_sz = %d\n",
get_prm->pcm_params.pcm_wd_sz);
return;
}
/**
- * intel_sst_ioctl - recieves the device ioctl's
+ * sst_create_algo_ipc - create ipc msg for algorithm parameters
+ *
+ * @algo_params: Algorithm parameters
+ * @msg: post msg pointer
+ *
+ * This function is called to create ipc msg
+ */
+int sst_create_algo_ipc(struct snd_ppp_params *algo_params,
+ struct ipc_post **msg)
+{
+ if (sst_create_large_msg(msg))
+ return -ENOMEM;
+ sst_fill_header(&(*msg)->header,
+ IPC_IA_ALG_PARAMS, 1, algo_params->str_id);
+ (*msg)->header.part.data = sizeof(u32) +
+ sizeof(*algo_params) + algo_params->size;
+ memcpy((*msg)->mailbox_data, &(*msg)->header, sizeof(u32));
+ memcpy((*msg)->mailbox_data + sizeof(u32),
+ algo_params, sizeof(*algo_params));
+ return 0;
+}
+
+/**
+ * sst_send_algo_ipc - send ipc msg for algorithm parameters
+ *
+ * @msg: post msg pointer
+ *
+ * This function is called to send ipc msg
+ */
+int sst_send_algo_ipc(struct ipc_post **msg)
+{
+ sst_drv_ctx->ppp_params_blk.condition = false;
+ sst_drv_ctx->ppp_params_blk.ret_code = 0;
+ sst_drv_ctx->ppp_params_blk.on = true;
+ sst_drv_ctx->ppp_params_blk.data = NULL;
+ spin_lock(&sst_drv_ctx->list_spin_lock);
+ list_add_tail(&(*msg)->node, &sst_drv_ctx->ipc_dispatch_list);
+ spin_unlock(&sst_drv_ctx->list_spin_lock);
+ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
+ return sst_wait_interruptible_timeout(sst_drv_ctx,
+ &sst_drv_ctx->ppp_params_blk, SST_BLOCK_TIMEOUT);
+}
+
+/**
+ * intel_sst_ioctl_dsp - recieves the device ioctl's
+ *
+ * @cmd:Ioctl cmd
+ * @arg:data
+ *
+ * This function is called when a user space component
+ * sends a DSP Ioctl to SST driver
+ */
+long intel_sst_ioctl_dsp(unsigned int cmd, unsigned long arg)
+{
+ int retval = 0;
+ struct snd_ppp_params algo_params;
+ struct snd_ppp_params *algo_params_copied;
+ struct ipc_post *msg;
+
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(SNDRV_SST_SET_ALGO):
+ if (copy_from_user(&algo_params, (void __user *)arg,
+ sizeof(algo_params)))
+ return -EFAULT;
+ if (algo_params.size > SST_MAILBOX_SIZE)
+ return -EMSGSIZE;
+
+ pr_debug("Algo ID %d Str id %d Enable %d Size %d\n",
+ algo_params.algo_id, algo_params.str_id,
+ algo_params.enable, algo_params.size);
+ retval = sst_create_algo_ipc(&algo_params, &msg);
+ if (retval)
+ break;
+ algo_params.reserved = 0;
+ if (copy_from_user(msg->mailbox_data + sizeof(algo_params),
+ algo_params.params, algo_params.size))
+ return -EFAULT;
+
+ retval = sst_send_algo_ipc(&msg);
+ if (retval) {
+ pr_debug("Error in sst_set_algo = %d\n", retval);
+ retval = -EIO;
+ }
+ break;
+
+ case _IOC_NR(SNDRV_SST_GET_ALGO):
+ if (copy_from_user(&algo_params, (void __user *)arg,
+ sizeof(algo_params)))
+ return -EFAULT;
+ pr_debug("Algo ID %d Str id %d Enable %d Size %d\n",
+ algo_params.algo_id, algo_params.str_id,
+ algo_params.enable, algo_params.size);
+ retval = sst_create_algo_ipc(&algo_params, &msg);
+ if (retval)
+ break;
+ algo_params.reserved = 1;
+ retval = sst_send_algo_ipc(&msg);
+ if (retval) {
+ pr_debug("Error in sst_get_algo = %d\n", retval);
+ retval = -EIO;
+ break;
+ }
+ algo_params_copied = (struct snd_ppp_params *)
+ sst_drv_ctx->ppp_params_blk.data;
+ if (algo_params_copied->size > algo_params.size) {
+ pr_debug("mem insufficient to copy\n");
+ retval = -EMSGSIZE;
+ goto free_mem;
+ } else {
+ char __user *tmp;
+
+ if (copy_to_user(algo_params.params,
+ algo_params_copied->params,
+ algo_params_copied->size)) {
+ retval = -EFAULT;
+ goto free_mem;
+ }
+ tmp = (char __user *)arg + offsetof(
+ struct snd_ppp_params, size);
+ if (copy_to_user(tmp, &algo_params_copied->size,
+ sizeof(__u32))) {
+ retval = -EFAULT;
+ goto free_mem;
+ }
+
+ }
+free_mem:
+ kfree(algo_params_copied->params);
+ kfree(algo_params_copied);
+ break;
+ }
+ return retval;
+}
+
+/**
+ * intel_sst_ioctl - receives the device ioctl's
* @file_ptr:pointer to file
* @cmd:Ioctl cmd
* @arg:data
@@ -832,7 +988,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
switch (_IOC_NR(cmd)) {
case _IOC_NR(SNDRV_SST_STREAM_PAUSE):
- pr_debug("sst: IOCTL_PAUSE recieved for %d!\n", str_id);
+ pr_debug("IOCTL_PAUSE received for %d!\n", str_id);
if (minor != STREAM_MODULE) {
retval = -EBADRQC;
break;
@@ -841,7 +997,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
break;
case _IOC_NR(SNDRV_SST_STREAM_RESUME):
- pr_debug("sst: SNDRV_SST_IOCTL_RESUME recieved!\n");
+ pr_debug("SNDRV_SST_IOCTL_RESUME received!\n");
if (minor != STREAM_MODULE) {
retval = -EBADRQC;
break;
@@ -852,7 +1008,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): {
struct snd_sst_params str_param;
- pr_debug("sst: IOCTL_SET_PARAMS recieved!\n");
+ pr_debug("IOCTL_SET_PARAMS received!\n");
if (minor != STREAM_MODULE) {
retval = -EBADRQC;
break;
@@ -884,7 +1040,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
retval = -EINVAL;
}
} else {
- pr_debug("sst: SET_STREAM_PARAMS recieved!\n");
+ pr_debug("SET_STREAM_PARAMS received!\n");
/* allocated set params only */
retval = sst_set_stream_param(str_id, &str_param);
/* Block the call for reply */
@@ -907,14 +1063,14 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
if (copy_from_user(&set_vol, (void __user *)arg,
sizeof(set_vol))) {
- pr_debug("sst: copy failed\n");
+ pr_debug("copy failed\n");
retval = -EFAULT;
break;
}
- pr_debug("sst: SET_VOLUME recieved for %d!\n",
+ pr_debug("SET_VOLUME recieved for %d!\n",
set_vol.stream_id);
if (minor == STREAM_MODULE && set_vol.stream_id == 0) {
- pr_debug("sst: invalid operation!\n");
+ pr_debug("invalid operation!\n");
retval = -EPERM;
break;
}
@@ -929,10 +1085,10 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n",
+ pr_debug("IOCTL_GET_VOLUME recieved for stream = %d!\n",
get_vol.stream_id);
if (minor == STREAM_MODULE && get_vol.stream_id == 0) {
- pr_debug("sst: invalid operation!\n");
+ pr_debug("invalid operation!\n");
retval = -EPERM;
break;
}
@@ -941,7 +1097,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
retval = -EIO;
break;
}
- pr_debug("sst: id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n",
+ pr_debug("id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n",
get_vol.stream_id, get_vol.volume,
get_vol.ramp_duration, get_vol.ramp_type);
if (copy_to_user((struct snd_sst_vol __user *)arg,
@@ -961,7 +1117,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n",
+ pr_debug("SNDRV_SST_SET_VOLUME recieved for %d!\n",
set_mute.stream_id);
if (minor == STREAM_MODULE && set_mute.stream_id == 0) {
retval = -EPERM;
@@ -973,7 +1129,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): {
struct snd_sst_get_stream_params get_params;
- pr_debug("sst: IOCTL_GET_PARAMS recieved!\n");
+ pr_debug("IOCTL_GET_PARAMS received!\n");
if (minor != 0) {
retval = -EBADRQC;
break;
@@ -997,7 +1153,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): {
struct snd_sst_mmap_buffs mmap_buf;
- pr_debug("sst: SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n");
+ pr_debug("SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n");
if (minor != STREAM_MODULE) {
retval = -EBADRQC;
break;
@@ -1011,7 +1167,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
break;
}
case _IOC_NR(SNDRV_SST_STREAM_DROP):
- pr_debug("sst: SNDRV_SST_IOCTL_DROP recieved!\n");
+ pr_debug("SNDRV_SST_IOCTL_DROP received!\n");
if (minor != STREAM_MODULE) {
retval = -EINVAL;
break;
@@ -1023,7 +1179,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
struct snd_sst_tstamp tstamp = {0};
unsigned long long time, freq, mod;
- pr_debug("sst: SNDRV_SST_STREAM_GET_TSTAMP recieved!\n");
+ pr_debug("SNDRV_SST_STREAM_GET_TSTAMP received!\n");
if (minor != STREAM_MODULE) {
retval = -EBADRQC;
break;
@@ -1044,7 +1200,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
case _IOC_NR(SNDRV_SST_STREAM_START):{
struct stream_info *stream;
- pr_debug("sst: SNDRV_SST_STREAM_START recieved!\n");
+ pr_debug("SNDRV_SST_STREAM_START received!\n");
if (minor != STREAM_MODULE) {
retval = -EINVAL;
break;
@@ -1083,7 +1239,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): {
struct snd_sst_target_device target_device;
- pr_debug("sst: SET_TARGET_DEVICE recieved!\n");
+ pr_debug("SET_TARGET_DEVICE recieved!\n");
if (copy_from_user(&target_device, (void __user *)arg,
sizeof(target_device))) {
retval = -EFAULT;
@@ -1100,7 +1256,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
case _IOC_NR(SNDRV_SST_DRIVER_INFO): {
struct snd_sst_driver_info info;
- pr_debug("sst: SNDRV_SST_DRIVER_INFO recived\n");
+ pr_debug("SNDRV_SST_DRIVER_INFO recived\n");
info.version = SST_VERSION_NUM;
/* hard coding, shud get sumhow later */
info.active_pcm_streams = sst_drv_ctx->stream_cnt -
@@ -1122,7 +1278,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
struct snd_sst_buff_entry *ibuf_tmp, *obuf_tmp;
char __user *dest;
- pr_debug("sst: SNDRV_SST_STREAM_DECODE recived\n");
+ pr_debug("SNDRV_SST_STREAM_DECODE received\n");
if (minor != STREAM_MODULE) {
retval = -EBADRQC;
break;
@@ -1197,7 +1353,7 @@ free_iobufs:
}
case _IOC_NR(SNDRV_SST_STREAM_DRAIN):
- pr_debug("sst: SNDRV_SST_STREAM_DRAIN recived\n");
+ pr_debug("SNDRV_SST_STREAM_DRAIN received\n");
if (minor != STREAM_MODULE) {
retval = -EINVAL;
break;
@@ -1209,7 +1365,7 @@ free_iobufs:
unsigned long long __user *bytes = (unsigned long long __user *)arg;
struct snd_sst_tstamp tstamp = {0};
- pr_debug("sst: STREAM_BYTES_DECODED recieved!\n");
+ pr_debug("STREAM_BYTES_DECODED received!\n");
if (minor != STREAM_MODULE) {
retval = -EINVAL;
break;
@@ -1225,7 +1381,7 @@ free_iobufs:
case _IOC_NR(SNDRV_SST_FW_INFO): {
struct snd_sst_fw_info *fw_info;
- pr_debug("sst: SNDRV_SST_FW_INFO recived\n");
+ pr_debug("SNDRV_SST_FW_INFO received\n");
fw_info = kzalloc(sizeof(*fw_info), GFP_ATOMIC);
if (!fw_info) {
@@ -1248,10 +1404,18 @@ free_iobufs:
kfree(fw_info);
break;
}
+ case _IOC_NR(SNDRV_SST_GET_ALGO):
+ case _IOC_NR(SNDRV_SST_SET_ALGO):
+ if (minor != AM_MODULE) {
+ retval = -EBADRQC;
+ break;
+ }
+ retval = intel_sst_ioctl_dsp(cmd, arg);
+ break;
default:
retval = -EINVAL;
}
- pr_debug("sst: intel_sst_ioctl:complete ret code = %d\n", retval);
+ pr_debug("intel_sst_ioctl:complete ret code = %d\n", retval);
return retval;
}
diff --git a/drivers/staging/intel_sst/intel_sst_common.h b/drivers/staging/intel_sst/intel_sst_common.h
index bf0ead78bfae..0a60e865b696 100644
--- a/drivers/staging/intel_sst/intel_sst_common.h
+++ b/drivers/staging/intel_sst/intel_sst_common.h
@@ -28,15 +28,15 @@
* Common private declarations for SST
*/
-#define SST_DRIVER_VERSION "1.2.05"
-#define SST_VERSION_NUM 0x1205
+#define SST_DRIVER_VERSION "1.2.09"
+#define SST_VERSION_NUM 0x1209
/* driver names */
#define SST_DRV_NAME "intel_sst_driver"
-#define SST_FW_FILENAME_MRST "fw_sst_080a.bin"
-#define SST_FW_FILENAME_MFLD "fw_sst_082f.bin"
#define SST_MRST_PCI_ID 0x080A
#define SST_MFLD_PCI_ID 0x082F
+#define PCI_ID_LENGTH 4
+#define SST_SUSPEND_DELAY 2000
enum sst_states {
SST_FW_LOADED = 1,
@@ -392,7 +392,7 @@ struct intel_sst_drv {
struct stream_info streams[MAX_NUM_STREAMS];
struct stream_alloc_block alloc_block[MAX_ACTIVE_STREAM];
- struct sst_block tgt_dev_blk, fw_info_blk,
+ struct sst_block tgt_dev_blk, fw_info_blk, ppp_params_blk,
vol_info_blk, mute_info_blk, hs_info_blk;
struct mutex list_lock;/* mutex for IPC list locking */
spinlock_t list_spin_lock; /* mutex for IPC list locking */
diff --git a/drivers/staging/intel_sst/intel_sst_drv_interface.c b/drivers/staging/intel_sst/intel_sst_drv_interface.c
index 669e298016f2..ea8e251b5099 100644
--- a/drivers/staging/intel_sst/intel_sst_drv_interface.c
+++ b/drivers/staging/intel_sst/intel_sst_drv_interface.c
@@ -26,10 +26,13 @@
* Upper layer interfaces (MAD driver, MMF) to SST driver
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/fs.h>
#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
#include "intel_sst.h"
#include "intel_sst_ioctl.h"
#include "intel_sst_fw_ipc.h"
@@ -45,17 +48,18 @@ int sst_download_fw(void)
{
int retval;
const struct firmware *fw_sst;
- const char *name;
+ char name[20];
+
if (sst_drv_ctx->sst_state != SST_UN_INIT)
return -EPERM;
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
- name = SST_FW_FILENAME_MRST;
- else
- name = SST_FW_FILENAME_MFLD;
- pr_debug("sst: Downloading %s FW now...\n", name);
+
+ snprintf(name, sizeof(name), "%s%04x%s", "fw_sst_",
+ sst_drv_ctx->pci_id, ".bin");
+
+ pr_debug("Downloading %s FW now...\n", name);
retval = request_firmware(&fw_sst, name, &sst_drv_ctx->pci->dev);
if (retval) {
- pr_err("sst: request fw failed %d\n", retval);
+ pr_err("request fw failed %d\n", retval);
return retval;
}
sst_drv_ctx->alloc_block[0].sst_id = FW_DWNL_ID;
@@ -66,7 +70,7 @@ int sst_download_fw(void)
retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[0]);
if (retval)
- pr_err("sst: fw download failed %d\n" , retval);
+ pr_err("fw download failed %d\n" , retval);
end_restore:
release_firmware(fw_sst);
sst_drv_ctx->alloc_block[0].sst_id = BLOCK_UNINIT;
@@ -90,7 +94,7 @@ int sst_stalled(void)
retry--;
}
- pr_debug("sst: in Stalled State\n");
+ pr_debug("in Stalled State\n");
return retval;
}
@@ -138,23 +142,23 @@ int sst_get_stream_allocated(struct snd_sst_params *str_param,
retval = sst_alloc_stream((char *) &str_param->sparams, str_param->ops,
str_param->codec, str_param->device_type);
if (retval < 0) {
- pr_err("sst: sst_alloc_stream failed %d\n", retval);
+ pr_err("sst_alloc_stream failed %d\n", retval);
return retval;
}
- pr_debug("sst: Stream allocated %d\n", retval);
+ pr_debug("Stream allocated %d\n", retval);
str_id = retval;
str_info = &sst_drv_ctx->streams[str_id];
/* Block the call for reply */
retval = sst_wait_interruptible_timeout(sst_drv_ctx,
&str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
if ((retval != 0) || (str_info->ctrl_blk.ret_code != 0)) {
- pr_debug("sst: FW alloc failed retval %d, ret_code %d\n",
+ pr_debug("FW alloc failed retval %d, ret_code %d\n",
retval, str_info->ctrl_blk.ret_code);
str_id = -str_info->ctrl_blk.ret_code; /*return error*/
*lib_dnld = str_info->ctrl_blk.data;
sst_clean_stream(str_info);
} else
- pr_debug("sst: FW Stream allocated sucess\n");
+ pr_debug("FW Stream allocated success\n");
return str_id; /*will ret either error (in above if) or correct str id*/
}
@@ -171,9 +175,9 @@ static int sst_get_sfreq(struct snd_sst_params *str_param)
case SST_CODEC_TYPE_MP3:
return str_param->sparams.uc.mp3_params.sfreq;
case SST_CODEC_TYPE_AAC:
- return str_param->sparams.uc.aac_params.sfreq;;
+ return str_param->sparams.uc.aac_params.sfreq;
case SST_CODEC_TYPE_WMA9:
- return str_param->sparams.uc.wma_params.sfreq;;
+ return str_param->sparams.uc.wma_params.sfreq;
default:
return 0;
}
@@ -196,14 +200,14 @@ int sst_get_stream(struct snd_sst_params *str_param)
/* codec download is required */
struct snd_sst_alloc_response *response;
- pr_debug("sst: Codec is required.... trying that\n");
+ pr_debug("Codec is required.... trying that\n");
if (lib_dnld == NULL) {
- pr_err("sst: lib download null!!! abort\n");
+ pr_err("lib download null!!! abort\n");
return -EIO;
}
i = sst_get_block_stream(sst_drv_ctx);
response = sst_drv_ctx->alloc_block[i].ops_block.data;
- pr_debug("sst: alloc block allocated = %d\n", i);
+ pr_debug("alloc block allocated = %d\n", i);
if (i < 0) {
kfree(lib_dnld);
return -ENOMEM;
@@ -213,15 +217,15 @@ int sst_get_stream(struct snd_sst_params *str_param)
sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
if (!retval) {
- pr_debug("sst: codec was downloaded sucesfully\n");
+ pr_debug("codec was downloaded successfully\n");
retval = sst_get_stream_allocated(str_param, &lib_dnld);
if (retval <= 0)
goto err;
- pr_debug("sst: Alloc done stream id %d\n", retval);
+ pr_debug("Alloc done stream id %d\n", retval);
} else {
- pr_debug("sst: codec download failed\n");
+ pr_debug("codec download failed\n");
retval = -EIO;
goto err;
}
@@ -279,97 +283,138 @@ void sst_process_mad_ops(struct work_struct *work)
retval = sst_start_stream(mad_ops->stream_id);
break;
case SST_SND_STREAM_PROCESS:
- pr_debug("sst: play/capt frames...\n");
+ pr_debug("play/capt frames...\n");
break;
default:
- pr_err("sst: wrong control_ops reported\n");
+ pr_err(" wrong control_ops reported\n");
}
return;
}
+
+void send_intial_rx_timeslot(void)
+{
+ if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID &&
+ sst_drv_ctx->rx_time_slot_status != RX_TIMESLOT_UNINIT
+ && sst_drv_ctx->pmic_vendor != SND_NC)
+ sst_enable_rx_timeslot(sst_drv_ctx->rx_time_slot_status);
+}
+
/*
- * sst_control_set - Set Control params
+ * sst_open_pcm_stream - Open PCM interface
*
- * @control_list: list of controls to be set
+ * @str_param: parameters of pcm stream
*
- * This function is called by MID sound card driver to set
- * SST/Sound card controls. This is registered with MID driver
+ * This function is called by MID sound card driver to open
+ * a new pcm interface
*/
-int sst_control_set(int control_element, void *value)
+int sst_open_pcm_stream(struct snd_sst_params *str_param)
{
- int retval = 0, str_id = 0;
- struct stream_info *stream;
+ struct stream_info *str_info;
+ int retval;
+
+ pm_runtime_get_sync(&sst_drv_ctx->pci->dev);
if (sst_drv_ctx->sst_state == SST_SUSPENDED) {
- /*LPE is suspended, resume it before proceding*/
- pr_debug("sst: Resuming from Suspended state\n");
+ /* LPE is suspended, resume it before proceding*/
+ pr_debug("Resuming from Suspended state\n");
retval = intel_sst_resume(sst_drv_ctx->pci);
if (retval) {
- pr_err("sst: Resume Failed = %#x, abort\n", retval);
+ pr_err("Resume Failed = %#x, abort\n", retval);
+ pm_runtime_put(&sst_drv_ctx->pci->dev);
return retval;
}
}
if (sst_drv_ctx->sst_state == SST_UN_INIT) {
/* FW is not downloaded */
- pr_debug("sst: DSP Downloading FW now...\n");
+ pr_debug("DSP Downloading FW now...\n");
retval = sst_download_fw();
if (retval) {
- pr_err("sst: FW download fail %x, abort\n", retval);
+ pr_err("FW download fail %x, abort\n", retval);
+ pm_runtime_put(&sst_drv_ctx->pci->dev);
return retval;
}
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID &&
- sst_drv_ctx->rx_time_slot_status != RX_TIMESLOT_UNINIT
- && sst_drv_ctx->pmic_vendor != SND_NC)
- sst_enable_rx_timeslot(
- sst_drv_ctx->rx_time_slot_status);
+ send_intial_rx_timeslot();
}
- switch (control_element) {
- case SST_SND_ALLOC: {
- struct snd_sst_params *str_param;
- struct stream_info *str_info;
+ if (!str_param) {
+ pm_runtime_put(&sst_drv_ctx->pci->dev);
+ return -EINVAL;
+ }
- str_param = (struct snd_sst_params *)value;
- BUG_ON(!str_param);
- retval = sst_get_stream(str_param);
- if (retval >= 0)
- sst_drv_ctx->stream_cnt++;
+ retval = sst_get_stream(str_param);
+ if (retval > 0) {
+ sst_drv_ctx->stream_cnt++;
str_info = &sst_drv_ctx->streams[retval];
str_info->src = MAD_DRV;
- break;
- }
+ } else
+ pm_runtime_put(&sst_drv_ctx->pci->dev);
+
+ return retval;
+}
+/*
+ * sst_close_pcm_stream - Close PCM interface
+ *
+ * @str_id: stream id to be closed
+ *
+ * This function is called by MID sound card driver to close
+ * an existing pcm interface
+ */
+int sst_close_pcm_stream(unsigned int str_id)
+{
+ struct stream_info *stream;
+
+ pr_debug("sst: stream free called\n");
+ if (sst_validate_strid(str_id))
+ return -EINVAL;
+ stream = &sst_drv_ctx->streams[str_id];
+ free_stream_context(str_id);
+ stream->pcm_substream = NULL;
+ stream->status = STREAM_UN_INIT;
+ stream->period_elapsed = NULL;
+ sst_drv_ctx->stream_cnt--;
+ pr_debug("sst: will call runtime put now\n");
+ pm_runtime_put(&sst_drv_ctx->pci->dev);
+ return 0;
+}
+
+/*
+ * sst_device_control - Set Control params
+ *
+ * @cmd: control cmd to be set
+ * @arg: command argument
+ *
+ * This function is called by MID sound card driver to set
+ * SST/Sound card controls for an opened stream.
+ * This is registered with MID driver
+ */
+int sst_device_control(int cmd, void *arg)
+{
+ int retval = 0, str_id = 0;
+
+ switch (cmd) {
case SST_SND_PAUSE:
case SST_SND_RESUME:
case SST_SND_DROP:
case SST_SND_START:
- sst_drv_ctx->mad_ops.control_op = control_element;
- sst_drv_ctx->mad_ops.stream_id = *(int *)value;
+ sst_drv_ctx->mad_ops.control_op = cmd;
+ sst_drv_ctx->mad_ops.stream_id = *(int *)arg;
queue_work(sst_drv_ctx->mad_wq, &sst_drv_ctx->mad_ops.wq);
break;
- case SST_SND_FREE:
- str_id = *(int *)value;
- stream = &sst_drv_ctx->streams[str_id];
- free_stream_context(str_id);
- stream->pcm_substream = NULL;
- stream->status = STREAM_UN_INIT;
- stream->period_elapsed = NULL;
- sst_drv_ctx->stream_cnt--;
- break;
-
case SST_SND_STREAM_INIT: {
struct pcm_stream_info *str_info;
struct stream_info *stream;
- pr_debug("sst: stream init called\n");
- str_info = (struct pcm_stream_info *)value;
+ pr_debug("stream init called\n");
+ str_info = (struct pcm_stream_info *)arg;
str_id = str_info->str_id;
retval = sst_validate_strid(str_id);
if (retval)
break;
stream = &sst_drv_ctx->streams[str_id];
- pr_debug("sst: setting the period ptrs\n");
+ pr_debug("setting the period ptrs\n");
stream->pcm_substream = str_info->mad_substream;
stream->period_elapsed = str_info->period_elapsed;
stream->sfreq = str_info->sfreq;
@@ -384,7 +429,7 @@ int sst_control_set(int control_element, void *value)
struct stream_info *stream;
- stream_info = (struct pcm_stream_info *)value;
+ stream_info = (struct pcm_stream_info *)arg;
str_id = stream_info->str_id;
retval = sst_validate_strid(str_id);
if (retval)
@@ -398,26 +443,26 @@ int sst_control_set(int control_element, void *value)
+(str_id * sizeof(fw_tstamp))),
sizeof(fw_tstamp));
- pr_debug("sst: Pointer Query on strid = %d ops %d\n",
+ pr_debug("Pointer Query on strid = %d ops %d\n",
str_id, stream->ops);
if (stream->ops == STREAM_OPS_PLAYBACK)
stream_info->buffer_ptr = fw_tstamp.samples_rendered;
else
stream_info->buffer_ptr = fw_tstamp.samples_processed;
- pr_debug("sst: Samples rendered = %llu, buffer ptr %llu\n",
+ pr_debug("Samples rendered = %llu, buffer ptr %llu\n",
fw_tstamp.samples_rendered, stream_info->buffer_ptr);
break;
}
case SST_ENABLE_RX_TIME_SLOT: {
- int status = *(int *)value;
+ int status = *(int *)arg;
sst_drv_ctx->rx_time_slot_status = status ;
sst_enable_rx_timeslot(status);
break;
}
default:
/* Illegal case */
- pr_warn("sst: illegal req\n");
+ pr_warn("illegal req\n");
return -EINVAL;
}
@@ -425,8 +470,14 @@ int sst_control_set(int control_element, void *value)
}
+struct intel_sst_pcm_control pcm_ops = {
+ .open = sst_open_pcm_stream,
+ .device_control = sst_device_control,
+ .close = sst_close_pcm_stream,
+};
+
struct intel_sst_card_ops sst_pmic_ops = {
- .control_set = sst_control_set,
+ .pcm_control = &pcm_ops,
};
/*
@@ -439,12 +490,12 @@ struct intel_sst_card_ops sst_pmic_ops = {
int register_sst_card(struct intel_sst_card_ops *card)
{
if (!sst_drv_ctx) {
- pr_err("sst: No SST driver register card reject\n");
+ pr_err("No SST driver register card reject\n");
return -ENODEV;
}
if (!card || !card->module_name) {
- pr_err("sst: Null Pointer Passed\n");
+ pr_err("Null Pointer Passed\n");
return -EINVAL;
}
if (sst_drv_ctx->pmic_state == SND_MAD_UN_INIT) {
@@ -456,17 +507,17 @@ int register_sst_card(struct intel_sst_card_ops *card)
sst_pmic_ops.module_name = card->module_name;
sst_drv_ctx->pmic_state = SND_MAD_INIT_DONE;
sst_drv_ctx->rx_time_slot_status = 0; /*default AMIC*/
- card->control_set = sst_pmic_ops.control_set;
+ card->pcm_control = sst_pmic_ops.pcm_control;
sst_drv_ctx->scard_ops->card_status = SND_CARD_UN_INIT;
return 0;
} else {
- pr_err("sst: strcmp fail %s\n", card->module_name);
+ pr_err("strcmp fail %s\n", card->module_name);
return -EINVAL;
}
} else {
/* already registered a driver */
- pr_err("sst: Repeat for registeration..denied\n");
+ pr_err("Repeat for registration..denied\n");
return -EBADRQC;
}
return 0;
@@ -482,11 +533,11 @@ EXPORT_SYMBOL_GPL(register_sst_card);
*/
void unregister_sst_card(struct intel_sst_card_ops *card)
{
- if (sst_pmic_ops.control_set == card->control_set) {
+ if (sst_pmic_ops.pcm_control == card->pcm_control) {
/* unreg */
sst_pmic_ops.module_name = "";
sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
- pr_debug("sst: Unregistered %s\n", card->module_name);
+ pr_debug("Unregistered %s\n", card->module_name);
}
return;
}
diff --git a/drivers/staging/intel_sst/intel_sst_dsp.c b/drivers/staging/intel_sst/intel_sst_dsp.c
index d80a6ee2deb8..6e5c9152da9f 100644
--- a/drivers/staging/intel_sst/intel_sst_dsp.c
+++ b/drivers/staging/intel_sst/intel_sst_dsp.c
@@ -29,6 +29,9 @@
* This file contains all dsp controlling functions like firmware download,
* setting/resetting dsp cores, etc
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pci.h>
#include <linux/fs.h>
#include <linux/firmware.h>
@@ -47,8 +50,9 @@ static int intel_sst_reset_dsp_mrst(void)
{
union config_status_reg csr;
- pr_debug("sst: Resetting the DSP in mrst\n");
- csr.full = 0x3a2;
+ pr_debug("Resetting the DSP in mrst\n");
+ csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+ csr.full |= 0x382;
sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
csr.part.strb_cntr_rst = 0;
@@ -68,7 +72,7 @@ static int intel_sst_reset_dsp_medfield(void)
{
union config_status_reg csr;
- pr_debug("sst: Resetting the DSP in medfield\n");
+ pr_debug("Resetting the DSP in medfield\n");
csr.full = 0x048303E2;
sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
@@ -90,7 +94,7 @@ static int sst_start_mrst(void)
csr.part.run_stall = 0;
csr.part.sst_reset = 0;
csr.part.strb_cntr_rst = 1;
- pr_debug("sst: Setting SST to execute_mrst 0x%x\n", csr.full);
+ pr_debug("Setting SST to execute_mrst 0x%x\n", csr.full);
sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
return 0;
@@ -111,7 +115,7 @@ static int sst_start_medfield(void)
sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
csr.full = 0x04830061;
sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- pr_debug("sst: Starting the DSP_medfld\n");
+ pr_debug("Starting the DSP_medfld\n");
return 0;
}
@@ -130,16 +134,16 @@ static int sst_parse_module(struct fw_module_header *module)
u32 count;
void __iomem *ram;
- pr_debug("sst: module sign %s size %x blocks %x type %x\n",
+ pr_debug("module sign %s size %x blocks %x type %x\n",
module->signature, module->mod_size,
module->blocks, module->type);
- pr_debug("sst: module entrypoint 0x%x\n", module->entry_point);
+ pr_debug("module entrypoint 0x%x\n", module->entry_point);
block = (void *)module + sizeof(*module);
for (count = 0; count < module->blocks; count++) {
if (block->size <= 0) {
- pr_err("sst: block size invalid\n");
+ pr_err("block size invalid\n");
return -EINVAL;
}
switch (block->type) {
@@ -150,7 +154,7 @@ static int sst_parse_module(struct fw_module_header *module)
ram = sst_drv_ctx->dram;
break;
default:
- pr_err("sst: wrong ram type0x%x in block0x%x\n",
+ pr_err("wrong ram type0x%x in block0x%x\n",
block->type, count);
return -EINVAL;
}
@@ -184,10 +188,10 @@ static int sst_parse_fw_image(const struct firmware *sst_fw)
if ((strncmp(header->signature, SST_FW_SIGN, 4) != 0) ||
(sst_fw->size != header->file_size + sizeof(*header))) {
/* Invalid FW signature */
- pr_err("sst: InvalidFW sign/filesize mismatch\n");
+ pr_err("Invalid FW sign/filesize mismatch\n");
return -EINVAL;
}
- pr_debug("sst: header sign=%s size=%x modules=%x fmt=%x size=%x\n",
+ pr_debug("header sign=%s size=%x modules=%x fmt=%x size=%x\n",
header->signature, header->file_size, header->modules,
header->file_format, sizeof(*header));
module = (void *)sst_fw->data + sizeof(*header);
@@ -214,7 +218,7 @@ int sst_load_fw(const struct firmware *fw, void *context)
{
int ret_val;
- pr_debug("sst: load_fw called\n");
+ pr_debug("load_fw called\n");
BUG_ON(!fw);
if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
@@ -239,7 +243,7 @@ int sst_load_fw(const struct firmware *fw, void *context)
if (ret_val)
return ret_val;
- pr_debug("sst: fw loaded successful!!!\n");
+ pr_debug("fw loaded successful!!!\n");
return ret_val;
}
@@ -261,7 +265,7 @@ static int sst_download_library(const struct firmware *fw_lib,
pvt_id = sst_assign_pvt_id(sst_drv_ctx);
i = sst_get_block_stream(sst_drv_ctx);
- pr_debug("sst: alloc block allocated = %d, pvt_id %d\n", i, pvt_id);
+ pr_debug("alloc block allocated = %d, pvt_id %d\n", i, pvt_id);
if (i < 0) {
kfree(msg);
return -ENOMEM;
@@ -281,11 +285,11 @@ static int sst_download_library(const struct firmware *fw_lib,
if (retval) {
/* error */
sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- pr_err("sst: Prep codec downloaded failed %d\n",
+ pr_err("Prep codec downloaded failed %d\n",
retval);
return -EIO;
}
- pr_debug("sst: FW responded, ready for download now...\n");
+ pr_debug("FW responded, ready for download now...\n");
/* downloading on success */
mutex_lock(&sst_drv_ctx->sst_lock);
sst_drv_ctx->sst_state = SST_FW_LOADED;
@@ -325,7 +329,7 @@ static int sst_download_library(const struct firmware *fw_lib,
list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
spin_unlock(&sst_drv_ctx->list_spin_lock);
sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- pr_debug("sst: Waiting for FW response Download complete\n");
+ pr_debug("Waiting for FW response Download complete\n");
sst_drv_ctx->alloc_block[i].ops_block.condition = false;
retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[i]);
if (retval) {
@@ -337,7 +341,7 @@ static int sst_download_library(const struct firmware *fw_lib,
return -EIO;
}
- pr_debug("sst: FW sucess on Download complete\n");
+ pr_debug("FW success on Download complete\n");
sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
mutex_lock(&sst_drv_ctx->sst_lock);
sst_drv_ctx->sst_state = SST_FW_RUNNING;
@@ -360,14 +364,14 @@ static int sst_validate_library(const struct firmware *fw_lib,
header = (struct fw_header *)fw_lib->data;
if (header->modules != 1) {
- pr_err("sst: Module no mismatch found\n ");
+ pr_err("Module no mismatch found\n");
err = -EINVAL;
goto exit;
}
module = (void *)fw_lib->data + sizeof(*header);
*entry_point = module->entry_point;
- pr_debug("sst: Module entry point 0x%x\n", *entry_point);
- pr_debug("sst: Module Sign %s, Size 0x%x, Blocks 0x%x Type 0x%x\n",
+ pr_debug("Module entry point 0x%x\n", *entry_point);
+ pr_debug("Module Sign %s, Size 0x%x, Blocks 0x%x Type 0x%x\n",
module->signature, module->mod_size,
module->blocks, module->type);
@@ -381,20 +385,20 @@ static int sst_validate_library(const struct firmware *fw_lib,
dsize += block->size;
break;
default:
- pr_err("sst: Invalid block type for 0x%x\n", n_blk);
+ pr_err("Invalid block type for 0x%x\n", n_blk);
err = -EINVAL;
goto exit;
}
block = (void *)block + sizeof(*block) + block->size;
}
if (isize > slot->iram_size || dsize > slot->dram_size) {
- pr_err("sst: library exceeds size allocated\n");
+ pr_err("library exceeds size allocated\n");
err = -EINVAL;
goto exit;
} else
- pr_debug("sst: Library is safe for download...\n");
+ pr_debug("Library is safe for download...\n");
- pr_debug("sst: iram 0x%x, dram 0x%x, iram 0x%x, dram 0x%x\n",
+ pr_debug("iram 0x%x, dram 0x%x, iram 0x%x, dram 0x%x\n",
isize, dsize, slot->iram_size, slot->dram_size);
exit:
return err;
@@ -414,15 +418,15 @@ int sst_load_library(struct snd_sst_lib_download *lib, u8 ops)
memset(buf, 0, sizeof(buf));
- pr_debug("sst: Lib Type 0x%x, Slot 0x%x, ops 0x%x\n",
+ pr_debug("Lib Type 0x%x, Slot 0x%x, ops 0x%x\n",
lib->lib_info.lib_type, lib->slot_info.slot_num, ops);
- pr_debug("sst: Version 0x%x, name %s, caps 0x%x media type 0x%x\n",
+ pr_debug("Version 0x%x, name %s, caps 0x%x media type 0x%x\n",
lib->lib_info.lib_version, lib->lib_info.lib_name,
lib->lib_info.lib_caps, lib->lib_info.media_type);
- pr_debug("sst: IRAM Size 0x%x, offset 0x%x\n",
+ pr_debug("IRAM Size 0x%x, offset 0x%x\n",
lib->slot_info.iram_size, lib->slot_info.iram_offset);
- pr_debug("sst: DRAM Size 0x%x, offset 0x%x\n",
+ pr_debug("DRAM Size 0x%x, offset 0x%x\n",
lib->slot_info.dram_size, lib->slot_info.dram_offset);
switch (lib->lib_info.lib_type) {
@@ -442,7 +446,7 @@ int sst_load_library(struct snd_sst_lib_download *lib, u8 ops)
type = "wma9_";
break;
default:
- pr_err("sst: Invalid codec type\n");
+ pr_err("Invalid codec type\n");
error = -EINVAL;
goto wake;
}
@@ -458,11 +462,11 @@ int sst_load_library(struct snd_sst_lib_download *lib, u8 ops)
lib->slot_info.slot_num);
len += snprintf(buf + len, sizeof(buf) - len, ".bin");
- pr_debug("sst: Requesting %s\n", buf);
+ pr_debug("Requesting %s\n", buf);
error = request_firmware(&fw_lib, buf, &sst_drv_ctx->pci->dev);
if (error) {
- pr_err("sst: library load failed %d\n", error);
+ pr_err("library load failed %d\n", error);
goto wake;
}
error = sst_validate_library(fw_lib, &lib->slot_info, &entry_point);
@@ -476,7 +480,7 @@ int sst_load_library(struct snd_sst_lib_download *lib, u8 ops)
goto wake_free;
/* lib is downloaded and init send alloc again */
- pr_debug("sst: Library is downloaded now...\n");
+ pr_debug("Library is downloaded now...\n");
wake_free:
/* sst_wake_up_alloc_block(sst_drv_ctx, pvt_id, error, NULL); */
release_firmware(fw_lib);
diff --git a/drivers/staging/intel_sst/intel_sst_fw_ipc.h b/drivers/staging/intel_sst/intel_sst_fw_ipc.h
index 9d3c36807e07..8df313d10d2a 100644
--- a/drivers/staging/intel_sst/intel_sst_fw_ipc.h
+++ b/drivers/staging/intel_sst/intel_sst_fw_ipc.h
@@ -31,6 +31,7 @@
*/
#define MAX_NUM_STREAMS_MRST 3
+#define MAX_NUM_STREAMS_MFLD 6
#define MAX_NUM_STREAMS 6
#define MAX_DBG_RW_BYTES 80
#define MAX_NUM_SCATTER_BUFFERS 8
@@ -67,6 +68,8 @@
#define IPC_IA_CAPT_VOICE 0x17
#define IPC_IA_DECODE_FRAMES 0x18
+#define IPC_IA_ALG_PARAMS 0x1A
+
/* I2L Stream config/control msgs */
#define IPC_IA_ALLOC_STREAM 0x20 /* Allocate a stream ID */
#define IPC_IA_FREE_STREAM 0x21 /* Free the stream ID */
@@ -141,73 +144,87 @@ enum sst_error_codes {
/* Error code,response to msgId: Description */
/* Common error codes */
SST_SUCCESS = 0, /* Success */
- SST_ERR_INVALID_STREAM_ID, /* Invalid stream ID */
- SST_ERR_INVALID_MSG_ID, /* Invalid message ID */
- SST_ERR_INVALID_STREAM_OP, /* Invalid stream operation request */
- SST_ERR_INVALID_PARAMS, /* Invalid params */
- SST_ERR_INVALID_CODEC, /* Invalid codec type */
- SST_ERR_INVALID_MEDIA_TYPE, /* Invalid media type */
- SST_ERR_STREAM_ERR, /* ANY: Stream control or config or
- processing error */
+ SST_ERR_INVALID_STREAM_ID = 1,
+ SST_ERR_INVALID_MSG_ID = 2,
+ SST_ERR_INVALID_STREAM_OP = 3,
+ SST_ERR_INVALID_PARAMS = 4,
+ SST_ERR_INVALID_CODEC = 5,
+ SST_ERR_INVALID_MEDIA_TYPE = 6,
+ SST_ERR_STREAM_ERR = 7,
/* IPC specific error codes */
- SST_IPC_ERR_CALL_BACK_NOT_REGD, /* Call back for msg not regd */
- SST_IPC_ERR_STREAM_NOT_ALLOCATED, /* Stream is not allocated */
- SST_IPC_ERR_STREAM_ALLOC_FAILED, /* ALLOC:Stream alloc failed */
- SST_IPC_ERR_GET_STREAM_FAILED, /* ALLOC:Get stream id failed*/
- SST_ERR_MOD_NOT_AVAIL, /* SET/GET: Mod(AEC/AGC/ALC) not available */
- SST_ERR_MOD_DNLD_RQD, /* SET/GET: Mod(AEC/AGC/ALC) download required */
- SST_ERR_STREAM_STOPPED, /* ANY: Stream is in stopped state */
- SST_ERR_STREAM_IN_USE, /* ANY: Stream is already in use */
+ SST_IPC_ERR_CALL_BACK_NOT_REGD = 8,
+ SST_IPC_ERR_STREAM_NOT_ALLOCATED = 9,
+ SST_IPC_ERR_STREAM_ALLOC_FAILED = 10,
+ SST_IPC_ERR_GET_STREAM_FAILED = 11,
+ SST_ERR_MOD_NOT_AVAIL = 12,
+ SST_ERR_MOD_DNLD_RQD = 13,
+ SST_ERR_STREAM_STOPPED = 14,
+ SST_ERR_STREAM_IN_USE = 15,
/* Capture specific error codes */
- SST_CAP_ERR_INCMPLTE_CAPTURE_MSG,/* ANY:Incomplete message */
- SST_CAP_ERR_CAPTURE_FAIL, /* ANY:Capture op failed */
- SST_CAP_ERR_GET_DDR_NEW_SGLIST,
- SST_CAP_ERR_UNDER_RUN, /* lack of input data */
- SST_CAP_ERR_OVERFLOW, /* lack of output space */
+ SST_CAP_ERR_INCMPLTE_CAPTURE_MSG = 16,
+ SST_CAP_ERR_CAPTURE_FAIL = 17,
+ SST_CAP_ERR_GET_DDR_NEW_SGLIST = 18,
+ SST_CAP_ERR_UNDER_RUN = 19,
+ SST_CAP_ERR_OVERFLOW = 20,
/* Playback specific error codes*/
- SST_PB_ERR_INCMPLTE_PLAY_MSG, /* ANY: Incomplete message */
- SST_PB_ERR_PLAY_FAIL, /* ANY: Playback operation failed */
- SST_PB_ERR_GET_DDR_NEW_SGLIST,
+ SST_PB_ERR_INCMPLTE_PLAY_MSG = 21,
+ SST_PB_ERR_PLAY_FAIL = 22,
+ SST_PB_ERR_GET_DDR_NEW_SGLIST = 23,
/* Codec manager specific error codes */
- SST_LIB_ERR_LIB_DNLD_REQUIRED, /* ALLOC: Codec download required */
- SST_LIB_ERR_LIB_NOT_SUPPORTED, /* Library is not supported */
+ SST_LIB_ERR_LIB_DNLD_REQUIRED = 24,
+ SST_LIB_ERR_LIB_NOT_SUPPORTED = 25,
/* Library manager specific error codes */
- SST_SCC_ERR_PREP_DNLD_FAILED, /* Failed to prepare for codec download */
- SST_SCC_ERR_LIB_DNLD_RES_FAILED, /* Lib download resume failed */
+ SST_SCC_ERR_PREP_DNLD_FAILED = 26,
+ SST_SCC_ERR_LIB_DNLD_RES_FAILED = 27,
/* Scheduler specific error codes */
- SST_SCH_ERR_FAIL, /* REPORT: */
+ SST_SCH_ERR_FAIL = 28,
/* DMA specific error codes */
- SST_DMA_ERR_NO_CHNL_AVAILABLE, /* DMA Ch not available */
- SST_DMA_ERR_INVALID_INPUT_PARAMS, /* Invalid input params */
- SST_DMA_ERR_CHNL_ALREADY_SUSPENDED, /* Ch is suspended */
- SST_DMA_ERR_CHNL_ALREADY_STARTED, /* Ch already started */
- SST_DMA_ERR_CHNL_NOT_ENABLED, /* Ch not enabled */
- SST_DMA_ERR_TRANSFER_FAILED, /* Transfer failed */
- SST_SSP_ERR_ALREADY_ENABLED, /* REPORT: SSP already enabled */
- SST_SSP_ERR_ALREADY_DISABLED, /* REPORT: SSP already disabled */
- SST_SSP_ERR_NOT_INITIALIZED,
+ SST_DMA_ERR_NO_CHNL_AVAILABLE = 29,
+ SST_DMA_ERR_INVALID_INPUT_PARAMS = 30,
+ SST_DMA_ERR_CHNL_ALREADY_SUSPENDED = 31,
+ SST_DMA_ERR_CHNL_ALREADY_STARTED = 32,
+ SST_DMA_ERR_CHNL_NOT_ENABLED = 33,
+ SST_DMA_ERR_TRANSFER_FAILED = 34,
+
+ SST_SSP_ERR_ALREADY_ENABLED = 35,
+ SST_SSP_ERR_ALREADY_DISABLED = 36,
+ SST_SSP_ERR_NOT_INITIALIZED = 37,
+ SST_SSP_ERR_SRAM_NO_DMA_DATA = 38,
/* Other error codes */
- SST_ERR_MOD_INIT_FAIL, /* Firmware Module init failed */
+ SST_ERR_MOD_INIT_FAIL = 39,
/* FW init error codes */
- SST_RDR_ERR_IO_DEV_SEL_NOT_ALLOWED,
- SST_RDR_ERR_ROUTE_ALREADY_STARTED,
- SST_RDR_PREP_CODEC_DNLD_FAILED,
+ SST_RDR_ERR_IO_DEV_SEL_NOT_ALLOWED = 40,
+ SST_RDR_ERR_ROUTE_ALREADY_STARTED = 41,
+ SST_RDR_ERR_IO_DEV_SEL_FAILED = 42,
+ SST_RDR_PREP_CODEC_DNLD_FAILED = 43,
/* Memory debug error codes */
- SST_ERR_DBG_MEM_READ_FAIL,
- SST_ERR_DBG_MEM_WRITE_FAIL,
-
- /* Decode error codes */
- SST_ERR_DEC_NEED_INPUT_BUF,
-
+ SST_ERR_DBG_MEM_READ_FAIL = 44,
+ SST_ERR_DBG_MEM_WRITE_FAIL = 45,
+ SST_ERR_INSUFFICIENT_INPUT_SG_LIST = 46,
+ SST_ERR_INSUFFICIENT_OUTPUT_SG_LIST = 47,
+
+ SST_ERR_BUFFER_NOT_AVAILABLE = 48,
+ SST_ERR_BUFFER_NOT_ALLOCATED = 49,
+ SST_ERR_INVALID_REGION_TYPE = 50,
+ SST_ERR_NULL_PTR = 51,
+ SST_ERR_INVALID_BUFFER_SIZE = 52,
+ SST_ERR_INVALID_BUFFER_INDEX = 53,
+
+ /*IIPC specific error codes */
+ SST_IIPC_QUEUE_FULL = 54,
+ SST_IIPC_ERR_MSG_SND_FAILED = 55,
+ SST_PB_ERR_UNDERRUN_OCCURED = 56,
+ SST_RDR_INSUFFICIENT_MIXER_BUFFER = 57,
+ SST_INVALID_TIME_SLOTS = 58,
};
enum dbg_mem_data_type {
diff --git a/drivers/staging/intel_sst/intel_sst_ioctl.h b/drivers/staging/intel_sst/intel_sst_ioctl.h
index 03b931619a3e..bebc395a3c1f 100644
--- a/drivers/staging/intel_sst/intel_sst_ioctl.h
+++ b/drivers/staging/intel_sst/intel_sst_ioctl.h
@@ -190,21 +190,15 @@ struct snd_prp_params {
__u32 reserved; /* No pre-processing defined yet */
};
-struct snd_params_block {
- __u32 type; /*Type of the parameter*/
- __u32 size; /*size of the parameters in the block*/
- __u8 params[0]; /*Parameters of the algorithm*/
-};
-
/* Pre and post processing params structure */
struct snd_ppp_params {
- enum sst_algo_types algo_id;/* Post/Pre processing algorithm ID */
+ __u8 algo_id;/* Post/Pre processing algorithm ID */
__u8 str_id; /*Only 5 bits used 0 - 31 are valid*/
__u8 enable; /* 0= disable, 1= enable*/
__u8 reserved;
__u32 size; /*Size of parameters for all blocks*/
- struct snd_params_block params[0];
-};
+ void *params;
+} __attribute__ ((packed));
struct snd_sst_postproc_info {
__u32 src_min; /* Supported SRC Min sampling freq */
@@ -431,5 +425,8 @@ struct snd_sst_dbufs {
#define SNDRV_SST_FW_INFO _IOR('L', 0x20, struct snd_sst_fw_info *)
#define SNDRV_SST_SET_TARGET_DEVICE _IOW('L', 0x21, \
struct snd_sst_target_device *)
+/*DSP Ioctls on /dev/intel_sst_ctrl only*/
+#define SNDRV_SST_SET_ALGO _IOW('L', 0x30, struct snd_ppp_params *)
+#define SNDRV_SST_GET_ALGO _IOWR('L', 0x31, struct snd_ppp_params *)
#endif /* __INTEL_SST_IOCTL_H__ */
diff --git a/drivers/staging/intel_sst/intel_sst_ipc.c b/drivers/staging/intel_sst/intel_sst_ipc.c
index 39c67fa0bd0c..0742dde2685d 100644
--- a/drivers/staging/intel_sst/intel_sst_ipc.c
+++ b/drivers/staging/intel_sst/intel_sst_ipc.c
@@ -26,6 +26,8 @@
* This file defines all ipc functions
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pci.h>
#include <linux/firmware.h>
#include <linux/sched.h>
@@ -75,16 +77,16 @@ void sst_post_message(struct work_struct *work)
/*To check if LPE is in stalled state.*/
retval = sst_stalled();
if (retval < 0) {
- pr_err("sst: in stalled state\n");
+ pr_err("in stalled state\n");
return;
}
- pr_debug("sst: post message called\n");
+ pr_debug("post message called\n");
spin_lock(&sst_drv_ctx->list_spin_lock);
/* check list */
if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
/* list is empty, mask imr */
- pr_debug("sst: Empty msg queue... masking\n");
+ pr_debug("Empty msg queue... masking\n");
imr.full = readl(sst_drv_ctx->shim + SST_IMRX);
imr.part.done_interrupt = 1;
/* dummy register for shim workaround */
@@ -97,7 +99,7 @@ void sst_post_message(struct work_struct *work)
header.full = sst_shim_read(sst_drv_ctx->shim, SST_IPCX);
if (header.part.busy) {
/* busy, unmask */
- pr_debug("sst: Busy not free... unmasking\n");
+ pr_debug("Busy not free... unmasking\n");
imr.full = readl(sst_drv_ctx->shim + SST_IMRX);
imr.part.done_interrupt = 0;
/* dummy register for shim workaround */
@@ -109,8 +111,8 @@ void sst_post_message(struct work_struct *work)
msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
struct ipc_post, node);
list_del(&msg->node);
- pr_debug("sst: Post message: header = %x\n", msg->header.full);
- pr_debug("sst: size: = %x\n", msg->header.part.data);
+ pr_debug("Post message: header = %x\n", msg->header.full);
+ pr_debug("size: = %x\n", msg->header.part.data);
if (msg->header.part.large)
memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
msg->mailbox_data, msg->header.part.data);
@@ -166,13 +168,13 @@ int process_fw_init(struct sst_ipc_msg_wq *msg)
(struct ipc_header_fw_init *)msg->mailbox;
int retval = 0;
- pr_debug("sst: *** FW Init msg came***\n");
+ pr_debug("*** FW Init msg came***\n");
if (init->result) {
mutex_lock(&sst_drv_ctx->sst_lock);
sst_drv_ctx->sst_state = SST_ERROR;
mutex_unlock(&sst_drv_ctx->sst_lock);
- pr_debug("sst: FW Init failed, Error %x\n", init->result);
- pr_err("sst: FW Init failed, Error %x\n", init->result);
+ pr_debug("FW Init failed, Error %x\n", init->result);
+ pr_err("FW Init failed, Error %x\n", init->result);
retval = -init->result;
return retval;
}
@@ -180,12 +182,13 @@ int process_fw_init(struct sst_ipc_msg_wq *msg)
sst_send_sound_card_type();
mutex_lock(&sst_drv_ctx->sst_lock);
sst_drv_ctx->sst_state = SST_FW_RUNNING;
+ sst_drv_ctx->lpe_stalled = 0;
mutex_unlock(&sst_drv_ctx->sst_lock);
- pr_debug("sst: FW Version %x.%x\n",
+ pr_debug("FW Version %x.%x\n",
init->fw_version.major, init->fw_version.minor);
- pr_debug("sst: Build No %x Type %x\n",
+ pr_debug("Build No %x Type %x\n",
init->fw_version.build, init->fw_version.type);
- pr_debug("sst: Build date %s Time %s\n",
+ pr_debug(" Build date %s Time %s\n",
init->build_info.date, init->build_info.time);
sst_wake_up_alloc_block(sst_drv_ctx, FW_DWNL_ID, retval, NULL);
return retval;
@@ -204,19 +207,19 @@ void sst_process_message(struct work_struct *work)
container_of(work, struct sst_ipc_msg_wq, wq);
int str_id = msg->header.part.str_id;
- pr_debug("sst: IPC process for %x\n", msg->header.full);
+ pr_debug("IPC process for %x\n", msg->header.full);
/* based on msg in list call respective handler */
switch (msg->header.part.msg_id) {
case IPC_SST_BUF_UNDER_RUN:
case IPC_SST_BUF_OVER_RUN:
if (sst_validate_strid(str_id)) {
- pr_err("sst: stream id %d invalid\n", str_id);
+ pr_err("stream id %d invalid\n", str_id);
break;
}
- pr_err("sst: Buffer under/overrun for%d\n",
+ pr_err("Buffer under/overrun for %d\n",
msg->header.part.str_id);
- pr_err("sst: Got Underrun & not to send data...ignore\n");
+ pr_err("Got Underrun & not to send data...ignore\n");
break;
case IPC_SST_GET_PLAY_FRAMES:
@@ -224,35 +227,35 @@ void sst_process_message(struct work_struct *work)
struct stream_info *stream ;
if (sst_validate_strid(str_id)) {
- pr_err("sst: strid %d invalid\n", str_id);
+ pr_err("strid %d invalid\n", str_id);
break;
}
/* call sst_play_frame */
stream = &sst_drv_ctx->streams[str_id];
- pr_debug("sst: sst_play_frames for %d\n",
+ pr_debug("sst_play_frames for %d\n",
msg->header.part.str_id);
mutex_lock(&sst_drv_ctx->streams[str_id].lock);
sst_play_frame(msg->header.part.str_id);
mutex_unlock(&sst_drv_ctx->streams[str_id].lock);
break;
} else
- pr_err("sst: sst_play_frames for Penwell!!\n");
+ pr_err("sst_play_frames for Penwell!!\n");
case IPC_SST_GET_CAPT_FRAMES:
if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
struct stream_info *stream;
/* call sst_capture_frame */
if (sst_validate_strid(str_id)) {
- pr_err("sst: str id %d invalid\n", str_id);
+ pr_err("str id %d invalid\n", str_id);
break;
}
stream = &sst_drv_ctx->streams[str_id];
- pr_debug("sst: sst_capture_frames for %d\n",
+ pr_debug("sst_capture_frames for %d\n",
msg->header.part.str_id);
mutex_lock(&stream->lock);
if (stream->mmapped == false &&
stream->src == SST_DRV) {
- pr_debug("sst: waking up block for copy.\n");
+ pr_debug("waking up block for copy.\n");
stream->data_blk.ret_code = 0;
stream->data_blk.condition = true;
stream->data_blk.on = false;
@@ -261,11 +264,11 @@ void sst_process_message(struct work_struct *work)
sst_capture_frame(msg->header.part.str_id);
mutex_unlock(&stream->lock);
} else
- pr_err("sst: sst_play_frames for Penwell!!\n");
+ pr_err("sst_play_frames for Penwell!!\n");
break;
case IPC_IA_PRINT_STRING:
- pr_debug("sst: been asked to print something by fw\n");
+ pr_debug("been asked to print something by fw\n");
/* TBD */
break;
@@ -277,12 +280,12 @@ void sst_process_message(struct work_struct *work)
case IPC_SST_STREAM_PROCESS_FATAL_ERR:
if (sst_validate_strid(str_id)) {
- pr_err("sst: stream id %d invalid\n", str_id);
+ pr_err("stream id %d invalid\n", str_id);
break;
}
- pr_err("sst: codec fatal error %x stream %d...\n",
+ pr_err("codec fatal error %x stream %d...\n",
msg->header.full, msg->header.part.str_id);
- pr_err("sst: Dropping the stream\n");
+ pr_err("Dropping the stream\n");
sst_drop_stream(msg->header.part.str_id);
break;
case IPC_IA_LPE_GETTING_STALLED:
@@ -293,7 +296,7 @@ void sst_process_message(struct work_struct *work)
break;
default:
/* Illegal case */
- pr_err("sst: Unhandled msg %x header %x\n",
+ pr_err("Unhandled msg %x header %x\n",
msg->header.part.msg_id, msg->header.full);
}
sst_clear_interrupt();
@@ -322,7 +325,7 @@ void sst_process_reply(struct work_struct *work)
if (!msg->header.part.data) {
sst_drv_ctx->tgt_dev_blk.ret_code = 0;
} else {
- pr_err("sst: Msg %x reply error %x\n",
+ pr_err(" Msg %x reply error %x\n",
msg->header.part.msg_id, msg->header.part.data);
sst_drv_ctx->tgt_dev_blk.ret_code =
-msg->header.part.data;
@@ -333,6 +336,55 @@ void sst_process_reply(struct work_struct *work)
wake_up(&sst_drv_ctx->wait_queue);
}
break;
+ case IPC_IA_ALG_PARAMS: {
+ pr_debug("sst:IPC_ALG_PARAMS response %x\n", msg->header.full);
+ pr_debug("sst: data value %x\n", msg->header.part.data);
+ pr_debug("sst: large value %x\n", msg->header.part.large);
+
+ if (!msg->header.part.large) {
+ if (!msg->header.part.data) {
+ pr_debug("sst: alg set success\n");
+ sst_drv_ctx->ppp_params_blk.ret_code = 0;
+ } else {
+ pr_debug("sst: alg set failed\n");
+ sst_drv_ctx->ppp_params_blk.ret_code =
+ -msg->header.part.data;
+ }
+
+ } else if (msg->header.part.data) {
+ struct snd_ppp_params *mailbox_params, *get_params;
+ char *params;
+
+ pr_debug("sst: alg get success\n");
+ mailbox_params = (struct snd_ppp_params *)msg->mailbox;
+ get_params = kzalloc(sizeof(*get_params), GFP_KERNEL);
+ if (get_params == NULL) {
+ pr_err("sst: out of memory for ALG PARAMS");
+ break;
+ }
+ memcpy_fromio(get_params, mailbox_params,
+ sizeof(*get_params));
+ get_params->params = kzalloc(mailbox_params->size,
+ GFP_KERNEL);
+ if (get_params->params == NULL) {
+ kfree(get_params);
+ pr_err("sst: out of memory for ALG PARAMS block");
+ break;
+ }
+ params = msg->mailbox;
+ params = params + sizeof(*mailbox_params) - sizeof(u32);
+ memcpy_fromio(get_params->params, params,
+ get_params->size);
+ sst_drv_ctx->ppp_params_blk.ret_code = 0;
+ sst_drv_ctx->ppp_params_blk.data = get_params;
+ }
+
+ if (sst_drv_ctx->ppp_params_blk.on == true) {
+ sst_drv_ctx->ppp_params_blk.condition = true;
+ wake_up(&sst_drv_ctx->wait_queue);
+ }
+ break;
+ }
case IPC_IA_GET_FW_INFO: {
struct snd_sst_fw_info *fw_info =
(struct snd_sst_fw_info *)msg->mailbox;
@@ -340,7 +392,7 @@ void sst_process_reply(struct work_struct *work)
int major = fw_info->fw_version.major;
int minor = fw_info->fw_version.minor;
int build = fw_info->fw_version.build;
- pr_debug("sst: Msg succedded %x\n",
+ pr_debug("Msg succeeded %x\n",
msg->header.part.msg_id);
pr_debug("INFO: ***FW*** = %02d.%02d.%02d\n",
major, minor, build);
@@ -349,13 +401,13 @@ void sst_process_reply(struct work_struct *work)
sizeof(struct snd_sst_fw_info));
sst_drv_ctx->fw_info_blk.ret_code = 0;
} else {
- pr_err("sst: Msg %x reply error %x\n",
+ pr_err(" Msg %x reply error %x\n",
msg->header.part.msg_id, msg->header.part.data);
sst_drv_ctx->fw_info_blk.ret_code =
-msg->header.part.data;
}
if (sst_drv_ctx->fw_info_blk.on == true) {
- pr_debug("sst: Memcopy succedded\n");
+ pr_debug("Memcopy succeeded\n");
sst_drv_ctx->fw_info_blk.on = false;
sst_drv_ctx->fw_info_blk.condition = true;
wake_up(&sst_drv_ctx->wait_queue);
@@ -364,11 +416,11 @@ void sst_process_reply(struct work_struct *work)
}
case IPC_IA_SET_STREAM_MUTE:
if (!msg->header.part.data) {
- pr_debug("sst: Msg succedded %x\n",
+ pr_debug("Msg succeeded %x\n",
msg->header.part.msg_id);
sst_drv_ctx->mute_info_blk.ret_code = 0;
} else {
- pr_err("sst: Msg %x reply error %x\n",
+ pr_err(" Msg %x reply error %x\n",
msg->header.part.msg_id, msg->header.part.data);
sst_drv_ctx->mute_info_blk.ret_code =
-msg->header.part.data;
@@ -382,11 +434,11 @@ void sst_process_reply(struct work_struct *work)
break;
case IPC_IA_SET_STREAM_VOL:
if (!msg->header.part.data) {
- pr_debug("sst: Msg succedded %x\n",
+ pr_debug("Msg succeeded %x\n",
msg->header.part.msg_id);
sst_drv_ctx->vol_info_blk.ret_code = 0;
} else {
- pr_err("sst: Msg %x reply error %x\n",
+ pr_err(" Msg %x reply error %x\n",
msg->header.part.msg_id,
msg->header.part.data);
sst_drv_ctx->vol_info_blk.ret_code =
@@ -402,15 +454,15 @@ void sst_process_reply(struct work_struct *work)
break;
case IPC_IA_GET_STREAM_VOL:
if (msg->header.part.large) {
- pr_debug("sst: Large Msg Received Successfully\n");
- pr_debug("sst: Msg succedded %x\n",
+ pr_debug("Large Msg Received Successfully\n");
+ pr_debug("Msg succeeded %x\n",
msg->header.part.msg_id);
memcpy_fromio(sst_drv_ctx->vol_info_blk.data,
(void *) msg->mailbox,
sizeof(struct snd_sst_vol));
sst_drv_ctx->vol_info_blk.ret_code = 0;
} else {
- pr_err("sst: Msg %x reply error %x\n",
+ pr_err("Msg %x reply error %x\n",
msg->header.part.msg_id, msg->header.part.data);
sst_drv_ctx->vol_info_blk.ret_code =
-msg->header.part.data;
@@ -424,18 +476,18 @@ void sst_process_reply(struct work_struct *work)
case IPC_IA_GET_STREAM_PARAMS:
if (sst_validate_strid(str_id)) {
- pr_err("sst: stream id %d invalid\n", str_id);
+ pr_err("stream id %d invalid\n", str_id);
break;
}
str_info = &sst_drv_ctx->streams[str_id];
if (msg->header.part.large) {
- pr_debug("sst: Get stream large success\n");
+ pr_debug("Get stream large success\n");
memcpy_fromio(str_info->ctrl_blk.data,
((void *)(msg->mailbox)),
sizeof(struct snd_sst_fw_get_stream_params));
str_info->ctrl_blk.ret_code = 0;
} else {
- pr_err("sst: Msg %x reply error %x\n",
+ pr_err("Msg %x reply error %x\n",
msg->header.part.msg_id, msg->header.part.data);
str_info->ctrl_blk.ret_code = -msg->header.part.data;
}
@@ -447,19 +499,19 @@ void sst_process_reply(struct work_struct *work)
break;
case IPC_IA_DECODE_FRAMES:
if (sst_validate_strid(str_id)) {
- pr_err("sst: stream id %d invalid\n", str_id);
+ pr_err("stream id %d invalid\n", str_id);
break;
}
str_info = &sst_drv_ctx->streams[str_id];
if (msg->header.part.large) {
- pr_debug("sst: Msg succedded %x\n",
+ pr_debug("Msg succeeded %x\n",
msg->header.part.msg_id);
memcpy_fromio(str_info->data_blk.data,
((void *)(msg->mailbox)),
sizeof(struct snd_sst_decode_info));
str_info->data_blk.ret_code = 0;
} else {
- pr_err("sst: Msg %x reply error %x\n",
+ pr_err("Msg %x reply error %x\n",
msg->header.part.msg_id, msg->header.part.data);
str_info->data_blk.ret_code = -msg->header.part.data;
}
@@ -471,17 +523,17 @@ void sst_process_reply(struct work_struct *work)
break;
case IPC_IA_DRAIN_STREAM:
if (sst_validate_strid(str_id)) {
- pr_err("sst: stream id %d invalid\n", str_id);
+ pr_err("stream id %d invalid\n", str_id);
break;
}
str_info = &sst_drv_ctx->streams[str_id];
if (!msg->header.part.data) {
- pr_debug("sst: Msg succedded %x\n",
+ pr_debug("Msg succeeded %x\n",
msg->header.part.msg_id);
str_info->ctrl_blk.ret_code = 0;
} else {
- pr_err("sst: Msg %x reply error %x\n",
+ pr_err(" Msg %x reply error %x\n",
msg->header.part.msg_id, msg->header.part.data);
str_info->ctrl_blk.ret_code = -msg->header.part.data;
@@ -496,7 +548,7 @@ void sst_process_reply(struct work_struct *work)
case IPC_IA_DROP_STREAM:
if (sst_validate_strid(str_id)) {
- pr_err("sst: str id %d invalid\n", str_id);
+ pr_err("str id %d invalid\n", str_id);
break;
}
str_info = &sst_drv_ctx->streams[str_id];
@@ -504,12 +556,12 @@ void sst_process_reply(struct work_struct *work)
struct snd_sst_drop_response *drop_resp =
(struct snd_sst_drop_response *)msg->mailbox;
- pr_debug("sst: Drop ret bytes %x\n", drop_resp->bytes);
+ pr_debug("Drop ret bytes %x\n", drop_resp->bytes);
str_info->curr_bytes = drop_resp->bytes;
str_info->ctrl_blk.ret_code = 0;
} else {
- pr_err("sst: Msg %x reply error %x\n",
+ pr_err(" Msg %x reply error %x\n",
msg->header.part.msg_id, msg->header.part.data);
str_info->ctrl_blk.ret_code = -msg->header.part.data;
}
@@ -521,10 +573,10 @@ void sst_process_reply(struct work_struct *work)
break;
case IPC_IA_ENABLE_RX_TIME_SLOT:
if (!msg->header.part.data) {
- pr_debug("sst: RX_TIME_SLOT success\n");
+ pr_debug("RX_TIME_SLOT success\n");
sst_drv_ctx->hs_info_blk.ret_code = 0;
} else {
- pr_err("sst: Msg %x reply error %x\n",
+ pr_err(" Msg %x reply error %x\n",
msg->header.part.msg_id,
msg->header.part.data);
sst_drv_ctx->hs_info_blk.ret_code =
@@ -541,17 +593,17 @@ void sst_process_reply(struct work_struct *work)
case IPC_IA_SET_STREAM_PARAMS:
str_info = &sst_drv_ctx->streams[str_id];
if (!msg->header.part.data) {
- pr_debug("sst: Msg succedded %x\n",
+ pr_debug("Msg succeeded %x\n",
msg->header.part.msg_id);
str_info->ctrl_blk.ret_code = 0;
} else {
- pr_err("sst: Msg %x reply error %x\n",
+ pr_err(" Msg %x reply error %x\n",
msg->header.part.msg_id,
msg->header.part.data);
str_info->ctrl_blk.ret_code = -msg->header.part.data;
}
if (sst_validate_strid(str_id)) {
- pr_err("sst: stream id %d invalid\n", str_id);
+ pr_err(" stream id %d invalid\n", str_id);
break;
}
@@ -564,9 +616,9 @@ void sst_process_reply(struct work_struct *work)
case IPC_IA_FREE_STREAM:
if (!msg->header.part.data) {
- pr_debug("sst: Stream %d freed\n", str_id);
+ pr_debug("Stream %d freed\n", str_id);
} else {
- pr_err("sst: Free for %d ret error %x\n",
+ pr_err("Free for %d ret error %x\n",
str_id, msg->header.part.data);
}
break;
@@ -575,7 +627,7 @@ void sst_process_reply(struct work_struct *work)
struct snd_sst_alloc_response *resp =
(struct snd_sst_alloc_response *)msg->mailbox;
if (resp->str_type.result)
- pr_err("sst: error alloc stream = %x\n",
+ pr_err("error alloc stream = %x\n",
resp->str_type.result);
sst_alloc_stream_response(str_id, resp);
break;
@@ -584,21 +636,21 @@ void sst_process_reply(struct work_struct *work)
case IPC_IA_PLAY_FRAMES:
case IPC_IA_CAPT_FRAMES:
if (sst_validate_strid(str_id)) {
- pr_err("sst: stream id %d invalid\n" , str_id);
+ pr_err("stream id %d invalid\n", str_id);
break;
}
- pr_debug("sst: Ack for play/capt frames recived\n");
+ pr_debug("Ack for play/capt frames received\n");
break;
case IPC_IA_PREP_LIB_DNLD: {
struct snd_sst_str_type *str_type =
(struct snd_sst_str_type *)msg->mailbox;
- pr_debug("sst: Prep Lib download %x\n",
+ pr_debug("Prep Lib download %x\n",
msg->header.part.msg_id);
if (str_type->result)
- pr_err("sst: Prep lib download %x\n", str_type->result);
+ pr_err("Prep lib download %x\n", str_type->result);
else
- pr_debug("sst: Can download codec now...\n");
+ pr_debug("Can download codec now...\n");
sst_wake_up_alloc_block(sst_drv_ctx, str_id,
str_type->result, NULL);
break;
@@ -609,12 +661,12 @@ void sst_process_reply(struct work_struct *work)
(struct snd_sst_lib_download_info *)msg->mailbox;
int retval = resp->result;
- pr_debug("sst: Lib downloaded %x\n", msg->header.part.msg_id);
+ pr_debug("Lib downloaded %x\n", msg->header.part.msg_id);
if (resp->result) {
- pr_err("sst: err in lib dload %x\n", resp->result);
+ pr_err("err in lib dload %x\n", resp->result);
} else {
- pr_debug("sst: Codec download complete...\n");
- pr_debug("sst: codec Type %d Ver %d Built %s: %s\n",
+ pr_debug("Codec download complete...\n");
+ pr_debug("codec Type %d Ver %d Built %s: %s\n",
resp->dload_lib.lib_info.lib_type,
resp->dload_lib.lib_info.lib_version,
resp->dload_lib.lib_info.b_date,
@@ -639,17 +691,17 @@ void sst_process_reply(struct work_struct *work)
case IPC_IA_GET_FW_BUILD_INF: {
struct sst_fw_build_info *build =
(struct sst_fw_build_info *)msg->mailbox;
- pr_debug("sst: Build date:%sTime:%s", build->date, build->time);
+ pr_debug("Build date:%sTime:%s", build->date, build->time);
break;
}
case IPC_IA_SET_PMIC_TYPE:
break;
case IPC_IA_START_STREAM:
- pr_debug("sst: reply for START STREAM %x\n", msg->header.full);
+ pr_debug("reply for START STREAM %x\n", msg->header.full);
break;
default:
/* Illegal case */
- pr_err("sst: process reply:default = %x\n", msg->header.full);
+ pr_err("process reply:default = %x\n", msg->header.full);
}
sst_clear_interrupt();
return;
diff --git a/drivers/staging/intel_sst/intel_sst_pvt.c b/drivers/staging/intel_sst/intel_sst_pvt.c
index 6487e192bf93..01f8c3b1cf74 100644
--- a/drivers/staging/intel_sst/intel_sst_pvt.c
+++ b/drivers/staging/intel_sst/intel_sst_pvt.c
@@ -29,6 +29,8 @@
* This file contains all private functions
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pci.h>
#include <linux/fs.h>
#include <linux/firmware.h>
@@ -60,7 +62,7 @@ int sst_get_block_stream(struct intel_sst_drv *sst_drv_ctx)
}
}
if (i == MAX_ACTIVE_STREAM) {
- pr_err("sst: max alloc_stream reached");
+ pr_err("max alloc_stream reached\n");
i = -EBUSY; /* active stream limit reached */
}
return i;
@@ -84,14 +86,14 @@ int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
block->condition)) {
/* event wake */
if (block->ret_code < 0) {
- pr_err("sst: stream failed %d\n", block->ret_code);
+ pr_err("stream failed %d\n", block->ret_code);
retval = -EBUSY;
} else {
- pr_debug("sst: event up\n");
+ pr_debug("event up\n");
retval = 0;
}
} else {
- pr_err("sst: signal interrupted\n");
+ pr_err("signal interrupted\n");
retval = -EINTR;
}
return retval;
@@ -115,18 +117,18 @@ int sst_wait_interruptible_timeout(
{
int retval = 0;
- pr_debug("sst: sst_wait_interruptible_timeout - waiting....\n");
+ pr_debug("sst_wait_interruptible_timeout - waiting....\n");
if (wait_event_interruptible_timeout(sst_drv_ctx->wait_queue,
block->condition,
msecs_to_jiffies(timeout))) {
if (block->ret_code < 0)
- pr_err("sst: stream failed %d\n", block->ret_code);
+ pr_err("stream failed %d\n", block->ret_code);
else
- pr_debug("sst: event up\n");
+ pr_debug("event up\n");
retval = block->ret_code;
} else {
block->on = false;
- pr_err("sst: timeout occured...\n");
+ pr_err("timeout occurred...\n");
/*setting firmware state as uninit so that the
firmware will get re-downloaded on next request
this is because firmare not responding for 5 sec
@@ -156,18 +158,18 @@ int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx,
/* NOTE:
Observed that FW processes the alloc msg and replies even
before the alloc thread has finished execution */
- pr_debug("sst: waiting for %x, condition %x\n",
+ pr_debug("waiting for %x, condition %x\n",
block->sst_id, block->ops_block.condition);
if (wait_event_interruptible_timeout(sst_drv_ctx->wait_queue,
block->ops_block.condition,
msecs_to_jiffies(SST_BLOCK_TIMEOUT))) {
/* event wake */
- pr_debug("sst: Event wake %x\n", block->ops_block.condition);
- pr_debug("sst: message ret: %d\n", block->ops_block.ret_code);
+ pr_debug("Event wake %x\n", block->ops_block.condition);
+ pr_debug("message ret: %d\n", block->ops_block.ret_code);
retval = block->ops_block.ret_code;
} else {
block->ops_block.on = false;
- pr_err("sst: Wait timed-out %x\n", block->ops_block.condition);
+ pr_err("Wait timed-out %x\n", block->ops_block.condition);
/* settign firmware state as uninit so that the
firmware will get redownloaded on next request
this is because firmare not responding for 5 sec
@@ -192,14 +194,14 @@ int sst_create_large_msg(struct ipc_post **arg)
msg = kzalloc(sizeof(struct ipc_post), GFP_ATOMIC);
if (!msg) {
- pr_err("sst: kzalloc msg failed\n");
+ pr_err("kzalloc msg failed\n");
return -ENOMEM;
}
msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
if (!msg->mailbox_data) {
kfree(msg);
- pr_err("sst: kzalloc mailbox_data failed");
+ pr_err("kzalloc mailbox_data failed");
return -ENOMEM;
};
*arg = msg;
@@ -219,7 +221,7 @@ int sst_create_short_msg(struct ipc_post **arg)
msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
if (!msg) {
- pr_err("sst: kzalloc msg failed\n");
+ pr_err("kzalloc msg failed\n");
return -ENOMEM;
}
msg->mailbox_data = NULL;
@@ -290,10 +292,10 @@ int sst_enable_rx_timeslot(int status)
struct ipc_post *msg = NULL;
if (sst_create_short_msg(&msg)) {
- pr_err("sst: mem allocation failed\n");
+ pr_err("mem allocation failed\n");
return -ENOMEM;
}
- pr_debug("sst: ipc message sending: ENABLE_RX_TIME_SLOT\n");
+ pr_debug("ipc message sending: ENABLE_RX_TIME_SLOT\n");
sst_fill_header(&msg->header, IPC_IA_ENABLE_RX_TIME_SLOT, 0, 0);
msg->header.part.data = status;
sst_drv_ctx->hs_info_blk.condition = false;
diff --git a/drivers/staging/intel_sst/intel_sst_stream.c b/drivers/staging/intel_sst/intel_sst_stream.c
index b2c4b7067da0..795e42ab7360 100644
--- a/drivers/staging/intel_sst/intel_sst_stream.c
+++ b/drivers/staging/intel_sst/intel_sst_stream.c
@@ -26,6 +26,8 @@
* This file contains the stream operations of SST driver
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pci.h>
#include <linux/firmware.h>
#include <linux/sched.h>
@@ -45,8 +47,8 @@
*/
int sst_check_device_type(u32 device, u32 num_chan, u32 *pcm_slot)
{
- if (device >= MAX_NUM_STREAMS) {
- pr_debug("sst: device type invalid %d\n", device);
+ if (device > MAX_NUM_STREAMS_MFLD) {
+ pr_debug("device type invalid %d\n", device);
return -EINVAL;
}
if (sst_drv_ctx->streams[device].status == STREAM_UN_INIT) {
@@ -71,15 +73,15 @@ int sst_check_device_type(u32 device, u32 num_chan, u32 *pcm_slot)
else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 4)
*pcm_slot = 0x0F;
else {
- pr_debug("sst: No condition satisfied.. ret err\n");
+ pr_debug("No condition satisfied.. ret err\n");
return -EINVAL;
}
} else {
- pr_debug("sst: this stream state is not uni-init, is %d\n",
+ pr_debug("this stream state is not uni-init, is %d\n",
sst_drv_ctx->streams[device].status);
return -EBADRQC;
}
- pr_debug("sst: returning slot %x\n", *pcm_slot);
+ pr_debug("returning slot %x\n", *pcm_slot);
return 0;
}
/**
@@ -96,7 +98,7 @@ static unsigned int get_mrst_stream_id(void)
if (sst_drv_ctx->streams[i].status == STREAM_UN_INIT)
return i;
}
- pr_debug("sst: Didnt find empty stream for mrst\n");
+ pr_debug("Didnt find empty stream for mrst\n");
return -EBUSY;
}
@@ -305,7 +307,7 @@ int sst_pause_stream(int str_id)
if (str_info->prev == STREAM_UN_INIT)
return -EBADRQC;
if (str_info->ctrl_blk.on == true) {
- pr_err("SST ERR: control path is in use\n ");
+ pr_err("SST ERR: control path is in use\n");
return -EINVAL;
}
if (sst_create_short_msg(&msg))
@@ -333,7 +335,7 @@ int sst_pause_stream(int str_id)
}
} else {
retval = -EBADRQC;
- pr_err("SST ERR:BADQRC for stream\n ");
+ pr_err("SST ERR: BADQRC for stream\n");
}
return retval;
@@ -468,7 +470,7 @@ int sst_drop_stream(int str_id)
}
} else {
retval = -EBADRQC;
- pr_err("SST ERR:BADQRC for stream\n");
+ pr_err("SST ERR: BADQRC for stream\n");
}
return retval;
}
diff --git a/drivers/staging/intel_sst/intel_sst_stream_encoded.c b/drivers/staging/intel_sst/intel_sst_stream_encoded.c
index 5c455608b024..85789ba65186 100644
--- a/drivers/staging/intel_sst/intel_sst_stream_encoded.c
+++ b/drivers/staging/intel_sst/intel_sst_stream_encoded.c
@@ -26,13 +26,15 @@
* This file contains the stream operations of SST driver
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pci.h>
#include <linux/syscalls.h>
#include <linux/firmware.h>
#include <linux/sched.h>
-#include <linux/rar_register.h>
#ifdef CONFIG_MRST_RAR_HANDLER
-#include "../../../drivers/staging/memrar/memrar.h"
+#include <linux/rar_register.h>
+#include "../memrar/memrar.h"
#endif
#include "intel_sst_ioctl.h"
#include "intel_sst.h"
@@ -53,7 +55,7 @@ int sst_get_stream_params(int str_id,
struct stream_info *str_info;
struct snd_sst_fw_get_stream_params *fw_params;
- pr_debug("sst: get_stream for %d\n", str_id);
+ pr_debug("get_stream for %d\n", str_id);
retval = sst_validate_strid(str_id);
if (retval)
return retval;
@@ -61,16 +63,16 @@ int sst_get_stream_params(int str_id,
str_info = &sst_drv_ctx->streams[str_id];
if (str_info->status != STREAM_UN_INIT) {
if (str_info->ctrl_blk.on == true) {
- pr_err("sst: control path in use\n");
+ pr_err("control path in use\n");
return -EINVAL;
}
if (sst_create_short_msg(&msg)) {
- pr_err("sst: message creation failed\n");
+ pr_err("message creation failed\n");
return -ENOMEM;
}
fw_params = kzalloc(sizeof(*fw_params), GFP_ATOMIC);
if (!fw_params) {
- pr_err("sst: mem allcoation failed\n ");
+ pr_err("mem allocation failed\n");
kfree(msg);
return -ENOMEM;
}
@@ -104,7 +106,7 @@ int sst_get_stream_params(int str_id,
get_params->codec_params.stream_type = str_info->str_type;
kfree(fw_params);
} else {
- pr_debug("sst: Stream is not in the init state\n");
+ pr_debug("Stream is not in the init state\n");
}
return retval;
}
@@ -125,17 +127,17 @@ int sst_set_stream_param(int str_id, struct snd_sst_params *str_param)
BUG_ON(!str_param);
if (sst_drv_ctx->streams[str_id].ops != str_param->ops) {
- pr_err("sst: Invalid operation\n");
+ pr_err("Invalid operation\n");
return -EINVAL;
}
retval = sst_validate_strid(str_id);
if (retval)
return retval;
- pr_debug("sst: set_stream for %d\n", str_id);
+ pr_debug("set_stream for %d\n", str_id);
str_info = &sst_drv_ctx->streams[str_id];
if (sst_drv_ctx->streams[str_id].status == STREAM_INIT) {
if (str_info->ctrl_blk.on == true) {
- pr_err("sst: control path in use\n");
+ pr_err("control path in use\n");
return -EAGAIN;
}
if (sst_create_large_msg(&msg))
@@ -163,7 +165,7 @@ int sst_set_stream_param(int str_id, struct snd_sst_params *str_param)
}
} else {
retval = -EBADRQC;
- pr_err("sst: BADQRC for stream\n");
+ pr_err("BADQRC for stream\n");
}
return retval;
}
@@ -183,7 +185,7 @@ int sst_get_vol(struct snd_sst_vol *get_vol)
struct snd_sst_vol *fw_get_vol;
int str_id = get_vol->stream_id;
- pr_debug("sst: get vol called\n");
+ pr_debug("get vol called\n");
if (sst_create_short_msg(&msg))
return -ENOMEM;
@@ -195,7 +197,7 @@ int sst_get_vol(struct snd_sst_vol *get_vol)
sst_drv_ctx->vol_info_blk.on = true;
fw_get_vol = kzalloc(sizeof(*fw_get_vol), GFP_ATOMIC);
if (!fw_get_vol) {
- pr_err("sst: mem allocation failed\n");
+ pr_err("mem allocation failed\n");
kfree(msg);
return -ENOMEM;
}
@@ -209,10 +211,10 @@ int sst_get_vol(struct snd_sst_vol *get_vol)
if (retval)
retval = -EIO;
else {
- pr_debug("sst: stream id %d\n", fw_get_vol->stream_id);
- pr_debug("sst: volume %d\n", fw_get_vol->volume);
- pr_debug("sst: ramp duration %d\n", fw_get_vol->ramp_duration);
- pr_debug("sst: ramp_type %d\n", fw_get_vol->ramp_type);
+ pr_debug("stream id %d\n", fw_get_vol->stream_id);
+ pr_debug("volume %d\n", fw_get_vol->volume);
+ pr_debug("ramp duration %d\n", fw_get_vol->ramp_duration);
+ pr_debug("ramp_type %d\n", fw_get_vol->ramp_type);
memcpy(get_vol, fw_get_vol, sizeof(*fw_get_vol));
}
return retval;
@@ -231,10 +233,10 @@ int sst_set_vol(struct snd_sst_vol *set_vol)
int retval = 0;
struct ipc_post *msg = NULL;
- pr_debug("sst: set vol called\n");
+ pr_debug("set vol called\n");
if (sst_create_large_msg(&msg)) {
- pr_err("sst: message creation failed\n");
+ pr_err("message creation failed\n");
return -ENOMEM;
}
sst_fill_header(&msg->header, IPC_IA_SET_STREAM_VOL, 1,
@@ -254,7 +256,7 @@ int sst_set_vol(struct snd_sst_vol *set_vol)
retval = sst_wait_interruptible_timeout(sst_drv_ctx,
&sst_drv_ctx->vol_info_blk, SST_BLOCK_TIMEOUT);
if (retval) {
- pr_err("sst: error in set_vol = %d\n", retval);
+ pr_err("error in set_vol = %d\n", retval);
retval = -EIO;
}
return retval;
@@ -273,10 +275,10 @@ int sst_set_mute(struct snd_sst_mute *set_mute)
int retval = 0;
struct ipc_post *msg = NULL;
- pr_debug("sst: set mute called\n");
+ pr_debug("set mute called\n");
if (sst_create_large_msg(&msg)) {
- pr_err("sst: message creation failed\n");
+ pr_err("message creation failed\n");
return -ENOMEM;
}
sst_fill_header(&msg->header, IPC_IA_SET_STREAM_MUTE, 1,
@@ -297,7 +299,7 @@ int sst_set_mute(struct snd_sst_mute *set_mute)
retval = sst_wait_interruptible_timeout(sst_drv_ctx,
&sst_drv_ctx->mute_info_blk, SST_BLOCK_TIMEOUT);
if (retval) {
- pr_err("sst: error in set_mute = %d\n", retval);
+ pr_err("error in set_mute = %d\n", retval);
retval = -EIO;
}
return retval;
@@ -358,20 +360,20 @@ int sst_parse_target(struct snd_sst_slot_info *slot)
slot->device_type == SND_SST_DEVICE_PCM) {
retval = sst_activate_target(slot);
if (retval)
- pr_err("sst: SST_Activate_target_fail\n");
+ pr_err("SST_Activate_target_fail\n");
else
- pr_err("sst: SST_Activate_target_pass\n");
+ pr_err("SST_Activate_target_pass\n");
return retval;
} else if (slot->action == SND_SST_PORT_PREPARE &&
slot->device_type == SND_SST_DEVICE_PCM) {
retval = sst_prepare_target(slot);
if (retval)
- pr_err("sst: SST_prepare_target_fail\n");
+ pr_err("SST_prepare_target_fail\n");
else
- pr_err("sst: SST_prepare_target_pass\n");
+ pr_err("SST_prepare_target_pass\n");
return retval;
} else {
- pr_err("sst: slot_action : %d, device_type: %d\n",
+ pr_err("slot_action : %d, device_type: %d\n",
slot->action, slot->device_type);
return retval;
}
@@ -383,7 +385,7 @@ int sst_send_target(struct snd_sst_target_device *target)
struct ipc_post *msg;
if (sst_create_large_msg(&msg)) {
- pr_err("sst: message creation failed\n");
+ pr_err("message creation failed\n");
return -ENOMEM;
}
sst_fill_header(&msg->header, IPC_IA_TARGET_DEV_SELECT, 1, 0);
@@ -399,11 +401,11 @@ int sst_send_target(struct snd_sst_target_device *target)
list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
spin_unlock(&sst_drv_ctx->list_spin_lock);
sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- pr_debug("sst: message sent- waiting\n");
+ pr_debug("message sent- waiting\n");
retval = sst_wait_interruptible_timeout(sst_drv_ctx,
&sst_drv_ctx->tgt_dev_blk, TARGET_DEV_BLOCK_TIMEOUT);
if (retval)
- pr_err("sst: target device ipc failed = 0x%x\n", retval);
+ pr_err("target device ipc failed = 0x%x\n", retval);
return retval;
}
@@ -439,7 +441,7 @@ int sst_target_device_validate(struct snd_sst_target_device *target)
goto err;
} else {
err:
- pr_err("sst: i/p params incorrect\n");
+ pr_err("i/p params incorrect\n");
return -EINVAL;
}
}
@@ -460,15 +462,15 @@ int sst_target_device_select(struct snd_sst_target_device *target)
{
int retval, i, prepare_count = 0;
- pr_debug("sst: Target Device Select\n");
+ pr_debug("Target Device Select\n");
if (target->device_route < 0 || target->device_route > 2) {
- pr_err("sst: device route is invalid\n");
+ pr_err("device route is invalid\n");
return -EINVAL;
}
if (target->device_route != 0) {
- pr_err("sst: Unsupported config\n");
+ pr_err("Unsupported config\n");
return -EIO;
}
retval = sst_target_device_validate(target);
@@ -480,18 +482,18 @@ int sst_target_device_select(struct snd_sst_target_device *target)
return retval;
for (i = 0; i < SST_MAX_TARGET_DEVICES; i++) {
if (target->devices[i].action == SND_SST_PORT_ACTIVATE) {
- pr_debug("sst: activate called in %d\n", i);
+ pr_debug("activate called in %d\n", i);
retval = sst_parse_target(&target->devices[i]);
if (retval)
return retval;
} else if (target->devices[i].action == SND_SST_PORT_PREPARE) {
- pr_debug("sst: PREPARE in %d, Forwading\n", i);
+ pr_debug("PREPARE in %d, Forwarding\n", i);
retval = sst_parse_target(&target->devices[i]);
if (retval) {
- pr_err("sst: Parse Target fail %d", retval);
+ pr_err("Parse Target fail %d\n", retval);
return retval;
}
- pr_debug("sst: Parse Target successful %d", retval);
+ pr_debug("Parse Target successful %d\n", retval);
if (target->devices[i].device_type ==
SND_SST_DEVICE_PCM)
prepare_count++;
@@ -512,11 +514,11 @@ static inline int sst_get_RAR(struct RAR_buffer *buffers, int count)
rar_status = rar_handle_to_bus(buffers, count);
if (count != rar_status) {
- pr_err("sst: The rar CALL Failed");
+ pr_err("The rar CALL Failed");
retval = -EIO;
}
if (buffers->info.type != RAR_TYPE_AUDIO) {
- pr_err("sst: Invalid RAR type\n");
+ pr_err("Invalid RAR type\n");
return -EINVAL;
}
return retval;
@@ -539,10 +541,10 @@ static int sst_create_sg_list(struct stream_info *stream,
if (kbufs->in_use == false) {
#ifdef CONFIG_MRST_RAR_HANDLER
if (stream->ops == STREAM_OPS_PLAYBACK_DRM) {
- pr_debug("sst: DRM playback handling\n");
+ pr_debug("DRM playback handling\n");
rar_buffers.info.handle = (__u32)kbufs->addr;
rar_buffers.info.size = kbufs->size;
- pr_debug("sst: rar handle 0x%x size=0x%x",
+ pr_debug("rar handle 0x%x size=0x%x\n",
rar_buffers.info.handle,
rar_buffers.info.size);
retval = sst_get_RAR(&rar_buffers, 1);
@@ -552,7 +554,7 @@ static int sst_create_sg_list(struct stream_info *stream,
sg_list->addr[i].addr = rar_buffers.bus_address;
/* rar_buffers.info.size; */
sg_list->addr[i].size = (__u32)kbufs->size;
- pr_debug("sst: phyaddr[%d] 0x%x Size:0x%x\n"
+ pr_debug("phyaddr[%d] 0x%x Size:0x%x\n"
, i, sg_list->addr[i].addr,
sg_list->addr[i].size);
}
@@ -562,7 +564,7 @@ static int sst_create_sg_list(struct stream_info *stream,
virt_to_phys((void *)
kbufs->addr + kbufs->offset);
sg_list->addr[i].size = kbufs->size;
- pr_debug("sst: phyaddr[%d]:0x%x Size:0x%x\n"
+ pr_debug("phyaddr[%d]:0x%x Size:0x%x\n"
, i , sg_list->addr[i].addr, kbufs->size);
}
stream->curr_bytes += sg_list->addr[i].size;
@@ -574,7 +576,7 @@ static int sst_create_sg_list(struct stream_info *stream,
}
sg_list->num_entries = i;
- pr_debug("sst:sg list entries = %d\n", sg_list->num_entries);
+ pr_debug("sg list entries = %d\n", sg_list->num_entries);
return i;
}
@@ -595,7 +597,7 @@ int sst_play_frame(int str_id)
struct sst_stream_bufs *kbufs = NULL, *_kbufs;
struct stream_info *stream;
- pr_debug("sst: play frame for %d\n", str_id);
+ pr_debug("play frame for %d\n", str_id);
retval = sst_validate_strid(str_id);
if (retval)
return retval;
@@ -615,14 +617,14 @@ int sst_play_frame(int str_id)
stream->curr_bytes = 0;
if (list_empty(&stream->bufs)) {
/* no user buffer available */
- pr_debug("sst: Null buffer stream status %d\n", stream->status);
+ pr_debug("Null buffer stream status %d\n", stream->status);
stream->prev = stream->status;
stream->status = STREAM_INIT;
- pr_debug("sst:new stream status = %d\n", stream->status);
+ pr_debug("new stream status = %d\n", stream->status);
if (stream->need_draining == true) {
- pr_debug("sst:draining stream\n");
+ pr_debug("draining stream\n");
if (sst_create_short_msg(&msg)) {
- pr_err("sst: mem alloc failed\n");
+ pr_err("mem allocation failed\n");
return -ENOMEM;
}
sst_fill_header(&msg->header, IPC_IA_DRAIN_STREAM,
@@ -633,7 +635,7 @@ int sst_play_frame(int str_id)
spin_unlock(&sst_drv_ctx->list_spin_lock);
sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
} else if (stream->data_blk.on == true) {
- pr_debug("sst:user list empty.. wake\n");
+ pr_debug("user list empty.. wake\n");
/* unblock */
stream->data_blk.ret_code = 0;
stream->data_blk.condition = true;
@@ -678,7 +680,7 @@ int sst_capture_frame(int str_id)
struct stream_info *stream;
- pr_debug("sst:capture frame for %d\n", str_id);
+ pr_debug("capture frame for %d\n", str_id);
retval = sst_validate_strid(str_id);
if (retval)
return retval;
@@ -688,19 +690,19 @@ int sst_capture_frame(int str_id)
if (kbufs->in_use == true) {
list_del(&kbufs->node);
kfree(kbufs);
- pr_debug("sst:del node\n");
+ pr_debug("del node\n");
}
}
if (list_empty(&stream->bufs)) {
/* no user buffer available */
- pr_debug("sst:Null buffer!!!!stream status %d\n",
+ pr_debug("Null buffer!!!!stream status %d\n",
stream->status);
stream->prev = stream->status;
stream->status = STREAM_INIT;
- pr_debug("sst:new stream status = %d\n",
+ pr_debug("new stream status = %d\n",
stream->status);
if (stream->data_blk.on == true) {
- pr_debug("sst:user list empty.. wake\n");
+ pr_debug("user list empty.. wake\n");
/* unblock */
stream->data_blk.ret_code = 0;
stream->data_blk.condition = true;
@@ -731,7 +733,7 @@ int sst_capture_frame(int str_id)
stream->cumm_bytes += stream->curr_bytes;
stream->curr_bytes = 0;
- pr_debug("sst:Cum bytes = %d\n", stream->cumm_bytes);
+ pr_debug("Cum bytes = %d\n", stream->cumm_bytes);
return 0;
}
@@ -743,7 +745,7 @@ static unsigned int calculate_min_size(struct snd_sst_buffs *bufs)
if (bufs->buff_entry[i].size < min_val)
min_val = bufs->buff_entry[i].size;
}
- pr_debug("sst:min_val = %d\n", min_val);
+ pr_debug("min_val = %d\n", min_val);
return min_val;
}
@@ -754,7 +756,7 @@ static unsigned int calculate_max_size(struct snd_sst_buffs *bufs)
if (bufs->buff_entry[i].size > max_val)
max_val = bufs->buff_entry[i].size;
}
- pr_debug("sst:max_val = %d\n", max_val);
+ pr_debug("max_val = %d\n", max_val);
return max_val;
}
@@ -773,7 +775,7 @@ static int sst_allocate_decode_buf(struct stream_info *str_info,
if (dbufs->ibufs->entries == dbufs->obufs->entries)
return 0;
else {
- pr_err("sst: RAR entries dont match\n");
+ pr_err("RAR entries dont match\n");
return -EINVAL;
}
} else
@@ -783,26 +785,26 @@ static int sst_allocate_decode_buf(struct stream_info *str_info,
}
#endif
if (!str_info->decode_ibuf) {
- pr_debug("sst:no i/p buffers, trying full size\n");
+ pr_debug("no i/p buffers, trying full size\n");
str_info->decode_isize = cum_input_given;
str_info->decode_ibuf = kzalloc(str_info->decode_isize,
GFP_KERNEL);
str_info->idecode_alloc = str_info->decode_isize;
}
if (!str_info->decode_ibuf) {
- pr_debug("sst:buff alloc failed, try max size\n");
+ pr_debug("buff alloc failed, try max size\n");
str_info->decode_isize = calculate_max_size(dbufs->ibufs);
str_info->decode_ibuf = kzalloc(
str_info->decode_isize, GFP_KERNEL);
str_info->idecode_alloc = str_info->decode_isize;
}
if (!str_info->decode_ibuf) {
- pr_debug("sst:buff alloc failed, try min size\n");
+ pr_debug("buff alloc failed, try min size\n");
str_info->decode_isize = calculate_min_size(dbufs->ibufs);
str_info->decode_ibuf = kzalloc(str_info->decode_isize,
GFP_KERNEL);
if (!str_info->decode_ibuf) {
- pr_err("sst: mem allocation failed\n");
+ pr_err("mem allocation failed\n");
return -ENOMEM;
}
str_info->idecode_alloc = str_info->decode_isize;
@@ -820,7 +822,7 @@ static int sst_send_decode_mess(int str_id, struct stream_info *str_info,
struct ipc_post *msg = NULL;
int retval = 0;
- pr_debug("SST DBGsst_set_mute:called\n");
+ pr_debug("SST DBG:sst_set_mute:called\n");
if (str_info->decode_ibuf_type == SST_BUF_RAR) {
#ifdef CONFIG_MRST_RAR_HANDLER
@@ -857,7 +859,7 @@ static int sst_send_decode_mess(int str_id, struct stream_info *str_info,
dec_info->input_bytes_consumed = 0;
dec_info->output_bytes_produced = 0;
if (sst_create_large_msg(&msg)) {
- pr_err("sst: message creation failed\n");
+ pr_err("message creation failed\n");
return -ENOMEM;
}
@@ -878,13 +880,13 @@ static int sst_send_decode_mess(int str_id, struct stream_info *str_info,
return retval;
}
+#ifdef CONFIG_MRST_RAR_HANDLER
static int sst_prepare_input_buffers_rar(struct stream_info *str_info,
struct snd_sst_dbufs *dbufs,
int *input_index, int *in_copied,
int *input_index_valid_size, int *new_entry_flag)
{
int retval = 0;
-#ifdef CONFIG_MRST_RAR_HANDLER
int i;
if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
@@ -894,7 +896,7 @@ static int sst_prepare_input_buffers_rar(struct stream_info *str_info,
dbufs->ibufs->buff_entry[i].buffer,
sizeof(__u32));
if (retval) {
- pr_err("sst:cpy from user fail\n");
+ pr_err("cpy from user fail\n");
return -EAGAIN;
}
rar_buffers.info.type = dbufs->ibufs->type;
@@ -919,9 +921,10 @@ static int sst_prepare_input_buffers_rar(struct stream_info *str_info,
str_info->decode_ibuf_type = dbufs->ibufs->type;
*in_copied = str_info->decode_isize;
}
-#endif
return retval;
}
+#endif
+
/*This function is used to prepare the kernel input buffers with contents
before sending for decode*/
static int sst_prepare_input_buffers(struct stream_info *str_info,
@@ -931,7 +934,7 @@ static int sst_prepare_input_buffers(struct stream_info *str_info,
{
int i, cpy_size, retval = 0;
- pr_debug("sst:input_index = %d, input entries = %d\n",
+ pr_debug("input_index = %d, input entries = %d\n",
*input_index, dbufs->ibufs->entries);
for (i = *input_index; i < dbufs->ibufs->entries; i++) {
#ifdef CONFIG_MRST_RAR_HANDLER
@@ -939,7 +942,7 @@ static int sst_prepare_input_buffers(struct stream_info *str_info,
dbufs, input_index, in_copied,
input_index_valid_size, new_entry_flag);
if (retval) {
- pr_err("sst: In prepare input buffers for RAR\n");
+ pr_err("In prepare input buffers for RAR\n");
return -EIO;
}
#endif
@@ -947,10 +950,10 @@ static int sst_prepare_input_buffers(struct stream_info *str_info,
if (*input_index_valid_size == 0)
*input_index_valid_size =
dbufs->ibufs->buff_entry[i].size;
- pr_debug("sst:inout addr = %p, size = %d\n",
+ pr_debug("inout addr = %p, size = %d\n",
dbufs->ibufs->buff_entry[i].buffer,
*input_index_valid_size);
- pr_debug("sst:decode_isize = %d, in_copied %d\n",
+ pr_debug("decode_isize = %d, in_copied %d\n",
str_info->decode_isize, *in_copied);
if (*input_index_valid_size <=
(str_info->decode_isize - *in_copied))
@@ -958,12 +961,12 @@ static int sst_prepare_input_buffers(struct stream_info *str_info,
else
cpy_size = str_info->decode_isize - *in_copied;
- pr_debug("sst:cpy size = %d\n", cpy_size);
+ pr_debug("cpy size = %d\n", cpy_size);
if (!dbufs->ibufs->buff_entry[i].buffer) {
- pr_err("sst: i/p buffer is null\n");
+ pr_err("i/p buffer is null\n");
return -EINVAL;
}
- pr_debug("sst:Try copy To %p, From %p, size %d\n",
+ pr_debug("Try copy To %p, From %p, size %d\n",
str_info->decode_ibuf + *in_copied,
dbufs->ibufs->buff_entry[i].buffer, cpy_size);
@@ -972,22 +975,22 @@ static int sst_prepare_input_buffers(struct stream_info *str_info,
(void *) dbufs->ibufs->buff_entry[i].buffer,
cpy_size);
if (retval) {
- pr_err("sst: copy from user failed\n");
+ pr_err("copy from user failed\n");
return -EIO;
}
*in_copied += cpy_size;
*input_index_valid_size -= cpy_size;
- pr_debug("sst:in buff size = %d, in_copied = %d\n",
+ pr_debug("in buff size = %d, in_copied = %d\n",
*input_index_valid_size, *in_copied);
if (*input_index_valid_size != 0) {
- pr_debug("sst:more input buffers left\n");
+ pr_debug("more input buffers left\n");
dbufs->ibufs->buff_entry[i].buffer += cpy_size;
break;
}
if (*in_copied == str_info->decode_isize &&
*input_index_valid_size == 0 &&
(i+1) <= dbufs->ibufs->entries) {
- pr_debug("sst:all input buffers copied\n");
+ pr_debug("all input buffers copied\n");
*new_entry_flag = true;
*input_index = i + 1;
break;
@@ -1005,23 +1008,23 @@ static int sst_prepare_output_buffers(struct stream_info *str_info,
{
int i, cpy_size, retval = 0;
- pr_debug("sst:output_index = %d, output entries = %d\n",
+ pr_debug("output_index = %d, output entries = %d\n",
*output_index,
dbufs->obufs->entries);
for (i = *output_index; i < dbufs->obufs->entries; i++) {
*output_index = i;
- pr_debug("sst:output addr = %p, size = %d\n",
+ pr_debug("output addr = %p, size = %d\n",
dbufs->obufs->buff_entry[i].buffer,
dbufs->obufs->buff_entry[i].size);
- pr_debug("sst:output_size = %d, out_copied = %d\n",
+ pr_debug("output_size = %d, out_copied = %d\n",
output_size, *out_copied);
if (dbufs->obufs->buff_entry[i].size <
(output_size - *out_copied))
cpy_size = dbufs->obufs->buff_entry[i].size;
else
cpy_size = output_size - *out_copied;
- pr_debug("sst:cpy size = %d\n", cpy_size);
- pr_debug("sst:Try copy To: %p, From %p, size %d\n",
+ pr_debug("cpy size = %d\n", cpy_size);
+ pr_debug("Try copy To: %p, From %p, size %d\n",
dbufs->obufs->buff_entry[i].buffer,
sst_drv_ctx->mmap_mem + *out_copied,
cpy_size);
@@ -1029,13 +1032,13 @@ static int sst_prepare_output_buffers(struct stream_info *str_info,
sst_drv_ctx->mmap_mem + *out_copied,
cpy_size);
if (retval) {
- pr_err("sst: copy to user failed\n");
+ pr_err("copy to user failed\n");
return -EIO;
} else
- pr_debug("sst:copy to user passed\n");
+ pr_debug("copy to user passed\n");
*out_copied += cpy_size;
dbufs->obufs->buff_entry[i].size -= cpy_size;
- pr_debug("sst:o/p buff size %d, out_copied %d\n",
+ pr_debug("o/p buff size %d, out_copied %d\n",
dbufs->obufs->buff_entry[i].size, *out_copied);
if (dbufs->obufs->buff_entry[i].size != 0) {
*output_index = i;
@@ -1073,7 +1076,7 @@ int sst_decode(int str_id, struct snd_sst_dbufs *dbufs)
unsigned long long input_bytes, output_bytes;
sst_drv_ctx->scard_ops->power_down_pmic();
- pr_debug("sst: Powering_down_PMIC...\n");
+ pr_debug("Powering_down_PMIC...\n");
retval = sst_validate_strid(str_id);
if (retval)
@@ -1081,7 +1084,7 @@ int sst_decode(int str_id, struct snd_sst_dbufs *dbufs)
str_info = &sst_drv_ctx->streams[str_id];
if (str_info->status != STREAM_INIT) {
- pr_err("sst: invalid stream state = %d\n",
+ pr_err("invalid stream state = %d\n",
str_info->status);
return -EINVAL;
}
@@ -1098,7 +1101,7 @@ int sst_decode(int str_id, struct snd_sst_dbufs *dbufs)
retval = sst_allocate_decode_buf(str_info, dbufs,
cum_input_given, cum_output_given);
if (retval) {
- pr_err("sst: mem allocation failed, abort!!!\n");
+ pr_err("mem allocation failed, abort!!!\n");
retval = -ENOMEM;
goto finish;
}
@@ -1114,7 +1117,7 @@ int sst_decode(int str_id, struct snd_sst_dbufs *dbufs)
dbufs, &input_index, &in_copied,
&input_index_valid_size, &new_entry_flag);
if (retval) {
- pr_err("sst: prepare in buffers failed\n");
+ pr_err("prepare in buffers failed\n");
goto finish;
}
@@ -1145,8 +1148,8 @@ int sst_decode(int str_id, struct snd_sst_dbufs *dbufs)
str_info->decode_osize = dbufs->obufs->
buff_entry[output_index].size;
str_info->decode_obuf_type = dbufs->obufs->type;
- pr_debug("sst:DRM handling\n");
- pr_debug("o/p_add=0x%lu Size=0x%x",
+ pr_debug("DRM handling\n");
+ pr_debug("o/p_add=0x%lu Size=0x%x\n",
(unsigned long) str_info->decode_obuf,
str_info->decode_osize);
} else {
@@ -1160,7 +1163,7 @@ int sst_decode(int str_id, struct snd_sst_dbufs *dbufs)
if (str_info->ops != STREAM_OPS_PLAYBACK_DRM) {
if (str_info->decode_isize > in_copied) {
str_info->decode_isize = in_copied;
- pr_debug("sst:i/p size = %d\n",
+ pr_debug("i/p size = %d\n",
str_info->decode_isize);
}
}
@@ -1168,20 +1171,19 @@ int sst_decode(int str_id, struct snd_sst_dbufs *dbufs)
retval = sst_send_decode_mess(str_id, str_info, &dec_info);
if (retval || dec_info.input_bytes_consumed == 0) {
- pr_err(
- "SST ERR: mess failed or no input consumed\n");
+ pr_err("SST ERR: mess failed or no input consumed\n");
goto finish;
}
input_bytes = dec_info.input_bytes_consumed;
output_bytes = dec_info.output_bytes_produced;
- pr_debug("sst:in_copied=%d, con=%lld, prod=%lld\n",
+ pr_debug("in_copied=%d, con=%lld, prod=%lld\n",
in_copied, input_bytes, output_bytes);
if (dbufs->obufs->type == SST_BUF_RAR) {
output_index += 1;
if (output_index == dbufs->obufs->entries) {
copy_in_done = true;
- pr_debug("sst:all i/p cpy done\n");
+ pr_debug("all i/p cpy done\n");
}
total_output += output_bytes;
} else {
@@ -1190,14 +1192,14 @@ int sst_decode(int str_id, struct snd_sst_dbufs *dbufs)
retval = sst_prepare_output_buffers(str_info, dbufs,
&output_index, output_size, &out_copied);
if (retval) {
- pr_err("sst:prep out buff fail\n");
+ pr_err("prep out buff fail\n");
goto finish;
}
if (str_info->ops != STREAM_OPS_PLAYBACK_DRM) {
if (in_copied != input_bytes) {
int bytes_left = in_copied -
input_bytes;
- pr_debug("sst:bytes %d\n",
+ pr_debug("bytes %d\n",
bytes_left);
if (new_entry_flag == true)
input_index--;
@@ -1237,7 +1239,7 @@ int sst_decode(int str_id, struct snd_sst_dbufs *dbufs)
total_output += out_copied;
if (str_info->decode_osize != out_copied) {
str_info->decode_osize -= out_copied;
- pr_debug("sst:output size modified = %d\n",
+ pr_debug("output size modified = %d\n",
str_info->decode_osize);
}
}
@@ -1251,16 +1253,16 @@ int sst_decode(int str_id, struct snd_sst_dbufs *dbufs)
} else {
if (total_output == cum_output_given) {
copy_out_done = true;
- pr_debug("sst:all o/p cpy done\n");
+ pr_debug("all o/p cpy done\n");
}
if (total_input == cum_input_given) {
copy_in_done = true;
- pr_debug("sst:all i/p cpy done\n");
+ pr_debug("all i/p cpy done\n");
}
}
- pr_debug("sst:copy_out = %d, copy_in = %d\n",
+ pr_debug("copy_out = %d, copy_in = %d\n",
copy_out_done, copy_in_done);
}
diff --git a/drivers/staging/intel_sst/intelmid.c b/drivers/staging/intel_sst/intelmid.c
index 4c0264ceaa88..fb2292186703 100644
--- a/drivers/staging/intel_sst/intelmid.c
+++ b/drivers/staging/intel_sst/intelmid.c
@@ -24,6 +24,9 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* ALSA driver for Intel MID sound card chipset
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/platform_device.h>
@@ -101,12 +104,10 @@ static struct snd_pcm_hardware snd_intelmad_stream = {
static int snd_intelmad_pcm_trigger(struct snd_pcm_substream *substream,
int cmd)
{
- int ret_val = 0;
+ int ret_val = 0, str_id;
struct snd_intelmad *intelmaddata;
struct mad_stream_pvt *stream;
- /*struct stream_buffer buffer_to_sst;*/
-
-
+ struct intel_sst_pcm_control *sst_ops;
WARN_ON(!substream);
@@ -115,38 +116,35 @@ static int snd_intelmad_pcm_trigger(struct snd_pcm_substream *substream,
WARN_ON(!intelmaddata->sstdrv_ops);
WARN_ON(!intelmaddata->sstdrv_ops->scard_ops);
+ sst_ops = intelmaddata->sstdrv_ops->pcm_control;
+ str_id = stream->stream_info.str_id;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- pr_debug("sst: Trigger Start\n");
- ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_START,
- &stream->stream_info.str_id);
+ pr_debug("Trigger Start\n");
+ ret_val = sst_ops->device_control(SST_SND_START, &str_id);
if (ret_val)
return ret_val;
stream->stream_status = RUNNING;
stream->substream = substream;
- stream->stream_status = RUNNING;
break;
case SNDRV_PCM_TRIGGER_STOP:
- pr_debug("sst: in stop\n");
- ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_DROP,
- &stream->stream_info.str_id);
+ pr_debug("in stop\n");
+ ret_val = sst_ops->device_control(SST_SND_DROP, &str_id);
if (ret_val)
return ret_val;
stream->stream_status = DROPPED;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- pr_debug("sst: in pause\n");
- ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_PAUSE,
- &stream->stream_info.str_id);
+ pr_debug("in pause\n");
+ ret_val = sst_ops->device_control(SST_SND_PAUSE, &str_id);
if (ret_val)
return ret_val;
stream->stream_status = PAUSED;
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- pr_debug("sst: in pause release\n");
- ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_RESUME,
- &stream->stream_info.str_id);
+ pr_debug("in pause release\n");
+ ret_val = sst_ops->device_control(SST_SND_RESUME, &str_id);
if (ret_val)
return ret_val;
stream->stream_status = RUNNING;
@@ -170,19 +168,19 @@ static int snd_intelmad_pcm_prepare(struct snd_pcm_substream *substream)
int ret_val = 0;
struct snd_intelmad *intelmaddata;
- pr_debug("sst: pcm_prepare called\n");
+ pr_debug("pcm_prepare called\n");
WARN_ON(!substream);
stream = substream->runtime->private_data;
intelmaddata = snd_pcm_substream_chip(substream);
- pr_debug("sst: pb cnt = %d cap cnt = %d\n",\
+ pr_debug("pb cnt = %d cap cnt = %d\n",\
intelmaddata->playback_cnt,
intelmaddata->capture_cnt);
if (stream->stream_info.str_id) {
- pr_debug("sst: Prepare called for already set stream\n");
- ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_DROP,
- &stream->stream_info.str_id);
+ pr_debug("Prepare called for already set stream\n");
+ ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
+ SST_SND_DROP, &stream->stream_info.str_id);
return ret_val;
}
@@ -197,7 +195,7 @@ static int snd_intelmad_pcm_prepare(struct snd_pcm_substream *substream)
/* return back the stream id */
snprintf(substream->pcm->id, sizeof(substream->pcm->id),
"%d", stream->stream_info.str_id);
- pr_debug("sst: stream id to user = %s\n",
+ pr_debug("stream id to user = %s\n",
substream->pcm->id);
ret_val = snd_intelmad_init_stream(substream);
@@ -212,7 +210,7 @@ static int snd_intelmad_hw_params(struct snd_pcm_substream *substream,
{
int ret_val;
- pr_debug("sst: snd_intelmad_hw_params called\n");
+ pr_debug("snd_intelmad_hw_params called\n");
ret_val = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
memset(substream->runtime->dma_area, 0,
@@ -223,7 +221,7 @@ static int snd_intelmad_hw_params(struct snd_pcm_substream *substream,
static int snd_intelmad_hw_free(struct snd_pcm_substream *substream)
{
- pr_debug("sst: snd_intelmad_hw_free called\n");
+ pr_debug("snd_intelmad_hw_free called\n");
return snd_pcm_lib_free_pages(substream);
}
@@ -250,15 +248,15 @@ static snd_pcm_uframes_t snd_intelmad_pcm_pointer
if (stream->stream_status == INIT)
return 0;
- ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_BUFFER_POINTER,
- &stream->stream_info);
+ ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
+ SST_SND_BUFFER_POINTER, &stream->stream_info);
if (ret_val) {
- pr_err("sst: error code = 0x%x\n", ret_val);
+ pr_err("error code = 0x%x\n", ret_val);
return ret_val;
}
- pr_debug("sst: samples reported out 0x%llx\n",
+ pr_debug("samples reported out 0x%llx\n",
stream->stream_info.buffer_ptr);
- pr_debug("sst: Frame bits:: %d period_count :: %d\n",
+ pr_debug("Frame bits:: %d period_count :: %d\n",
(int)substream->runtime->frame_bits,
(int)substream->runtime->period_size);
@@ -277,26 +275,26 @@ static int snd_intelmad_close(struct snd_pcm_substream *substream)
{
struct snd_intelmad *intelmaddata;
struct mad_stream_pvt *stream;
- int ret_val = 0;
+ int ret_val = 0, str_id;
WARN_ON(!substream);
stream = substream->runtime->private_data;
+ str_id = stream->stream_info.str_id;
- pr_debug("sst: snd_intelmad_close called\n");
+ pr_debug("sst: snd_intelmad_close called for %d\n", str_id);
intelmaddata = snd_pcm_substream_chip(substream);
- pr_debug("sst: str id = %d\n", stream->stream_info.str_id);
+ pr_debug("str id = %d\n", stream->stream_info.str_id);
if (stream->stream_info.str_id) {
/* SST API to actually stop/free the stream */
- ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_FREE,
- &stream->stream_info.str_id);
+ ret_val = intelmaddata->sstdrv_ops->pcm_control->close(str_id);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
intelmaddata->playback_cnt--;
else
intelmaddata->capture_cnt--;
}
- pr_debug("sst: snd_intelmad_close : pb cnt = %d cap cnt = %d\n",
+ pr_debug("snd_intelmad_close : pb cnt = %d cap cnt = %d\n",
intelmaddata->playback_cnt, intelmaddata->capture_cnt);
kfree(substream->runtime->private_data);
return ret_val;
@@ -319,7 +317,7 @@ static int snd_intelmad_open(struct snd_pcm_substream *substream,
WARN_ON(!substream);
- pr_debug("sst: snd_intelmad_open called\n");
+ pr_debug("snd_intelmad_open called\n");
intelmaddata = snd_pcm_substream_chip(substream);
runtime = substream->runtime;
@@ -456,17 +454,17 @@ void sst_mad_send_jack_report(struct snd_jack *jack,
{
if (!jack) {
- pr_debug("sst: MAD error jack empty\n");
+ pr_debug("MAD error jack empty\n");
} else {
- pr_debug("sst: MAD send jack report for = %d!!!\n", status);
- pr_debug("sst: MAD send jack report %d\n", jack->type);
+ pr_debug("MAD send jack report for = %d!!!\n", status);
+ pr_debug("MAD send jack report %d\n", jack->type);
snd_jack_report(jack, status);
/*button pressed and released */
if (buttonpressevent)
snd_jack_report(jack, 0);
- pr_debug("sst: MAD sending jack report Done !!!\n");
+ pr_debug("MAD sending jack report Done !!!\n");
}
@@ -490,7 +488,7 @@ void sst_mad_jackdetection_fs(u8 intsts , struct snd_intelmad *intelmaddata)
if (intsts & 0x4) {
if (!(intelmid_audio_interrupt_enable)) {
- pr_debug("sst: Audio interrupt enable\n");
+ pr_debug("Audio interrupt enable\n");
sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
sst_sc_reg_access(sc_access_write, PMIC_WRITE, 1);
@@ -500,7 +498,7 @@ void sst_mad_jackdetection_fs(u8 intsts , struct snd_intelmad *intelmaddata)
}
/* send headphone detect */
- pr_debug("sst: MAD headphone %d\n", intsts & 0x4);
+ pr_debug("MAD headphone %d\n", intsts & 0x4);
jack = &intelmaddata->jack[0].jack;
present = !(intelmaddata->jack[0].jack_status);
intelmaddata->jack[0].jack_status = present;
@@ -510,7 +508,7 @@ void sst_mad_jackdetection_fs(u8 intsts , struct snd_intelmad *intelmaddata)
if (intsts & 0x2) {
/* send short push */
- pr_debug("sst: MAD short push %d\n", intsts & 0x2);
+ pr_debug("MAD short push %d\n", intsts & 0x2);
jack = &intelmaddata->jack[2].jack;
present = 1;
jack_event_flag = 1;
@@ -518,7 +516,7 @@ void sst_mad_jackdetection_fs(u8 intsts , struct snd_intelmad *intelmaddata)
}
if (intsts & 0x1) {
/* send long push */
- pr_debug("sst: MAD long push %d\n", intsts & 0x1);
+ pr_debug("MAD long push %d\n", intsts & 0x1);
jack = &intelmaddata->jack[3].jack;
present = 1;
jack_event_flag = 1;
@@ -526,7 +524,7 @@ void sst_mad_jackdetection_fs(u8 intsts , struct snd_intelmad *intelmaddata)
}
if (intsts & 0x8) {
if (!(intelmid_audio_interrupt_enable)) {
- pr_debug("sst: Audio interrupt enable\n");
+ pr_debug("Audio interrupt enable\n");
sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
sst_sc_reg_access(sc_access_write, PMIC_WRITE, 1);
@@ -535,7 +533,7 @@ void sst_mad_jackdetection_fs(u8 intsts , struct snd_intelmad *intelmaddata)
intelmaddata->jack[1].jack_status = 0;
}
/* send headset detect */
- pr_debug("sst: MAD headset = %d\n", intsts & 0x8);
+ pr_debug("MAD headset = %d\n", intsts & 0x8);
jack = &intelmaddata->jack[1].jack;
present = !(intelmaddata->jack[1].jack_status);
intelmaddata->jack[1].jack_status = present;
@@ -558,10 +556,10 @@ void sst_mad_jackdetection_mx(u8 intsts, struct snd_intelmad *intelmaddata)
scard_ops = intelmaddata->sstdrv_ops->scard_ops;
- pr_debug("sst: previous value: %x\n", intelmaddata->jack_prev_state);
+ pr_debug("previous value: %x\n", intelmaddata->jack_prev_state);
if (!(intelmid_audio_interrupt_enable)) {
- pr_debug("sst: Audio interrupt enable\n");
+ pr_debug("Audio interrupt enable\n");
intelmaddata->jack_prev_state = 0xC0;
intelmid_audio_interrupt_enable = 1;
}
@@ -572,12 +570,12 @@ void sst_mad_jackdetection_mx(u8 intsts, struct snd_intelmad *intelmaddata)
sc_access_read.reg_addr = 0x201;
sst_sc_reg_access(&sc_access_read, PMIC_READ, 1);
value = (sc_access_read.value);
- pr_debug("sst: value returned = 0x%x\n", value);
+ pr_debug("value returned = 0x%x\n", value);
}
if (jack_prev_state == 0xc0 && value == 0x40) {
/*headset detected. */
- pr_debug("sst: MAD headset inserted\n");
+ pr_debug("MAD headset inserted\n");
jack = &intelmaddata->jack[1].jack;
present = 1;
jack_event_flag = 1;
@@ -587,7 +585,7 @@ void sst_mad_jackdetection_mx(u8 intsts, struct snd_intelmad *intelmaddata)
if (jack_prev_state == 0xc0 && value == 0x00) {
/* headphone detected. */
- pr_debug("sst: MAD headphone inserted\n");
+ pr_debug("MAD headphone inserted\n");
jack = &intelmaddata->jack[0].jack;
present = 1;
jack_event_flag = 1;
@@ -596,9 +594,9 @@ void sst_mad_jackdetection_mx(u8 intsts, struct snd_intelmad *intelmaddata)
if (jack_prev_state == 0x40 && value == 0xc0) {
/*headset removed*/
- pr_debug("sst: Jack headset status %d\n",
+ pr_debug("Jack headset status %d\n",
intelmaddata->jack[1].jack_status);
- pr_debug("sst: MAD headset removed\n");
+ pr_debug("MAD headset removed\n");
jack = &intelmaddata->jack[1].jack;
present = 0;
jack_event_flag = 1;
@@ -607,9 +605,9 @@ void sst_mad_jackdetection_mx(u8 intsts, struct snd_intelmad *intelmaddata)
if (jack_prev_state == 0x00 && value == 0xc0) {
/* headphone detected. */
- pr_debug("sst: Jack headphone status %d\n",
+ pr_debug("Jack headphone status %d\n",
intelmaddata->jack[0].jack_status);
- pr_debug("sst: headphone removed\n");
+ pr_debug("headphone removed\n");
jack = &intelmaddata->jack[0].jack;
present = 0;
jack_event_flag = 1;
@@ -618,7 +616,7 @@ void sst_mad_jackdetection_mx(u8 intsts, struct snd_intelmad *intelmaddata)
if (jack_prev_state == 0x40 && value == 0x00) {
/*button pressed*/
do_gettimeofday(&intelmaddata->jack[1].buttonpressed);
- pr_debug("sst: MAD button press detected n");
+ pr_debug("MAD button press detected\n");
}
@@ -628,19 +626,19 @@ void sst_mad_jackdetection_mx(u8 intsts, struct snd_intelmad *intelmaddata)
do_gettimeofday(
&intelmaddata->jack[1].buttonreleased);
/*button pressed */
- pr_debug("sst: Button Released detected\n");
+ pr_debug("Button Released detected\n");
timediff = intelmaddata->jack[1].
buttonreleased.tv_sec - intelmaddata->
jack[1].buttonpressed.tv_sec;
buttonpressflag = 1;
if (timediff > 1) {
- pr_debug("sst: long press detected\n");
+ pr_debug("long press detected\n");
/* send headphone detect/undetect */
jack = &intelmaddata->jack[3].jack;
present = 1;
jack_event_flag = 1;
} else {
- pr_debug("sst: short press detected\n");
+ pr_debug("short press detected\n");
/* send headphone detect/undetect */
jack = &intelmaddata->jack[2].jack;
present = 1;
@@ -667,24 +665,24 @@ void sst_mad_jackdetection_nec(u8 intsts, struct snd_intelmad *intelmaddata)
sc_access_read.reg_addr = 0x132;
sst_sc_reg_access(&sc_access_read, PMIC_READ, 1);
value = (sc_access_read.value);
- pr_debug("sst: value returned = 0x%x\n", value);
+ pr_debug("value returned = 0x%x\n", value);
}
if (intsts & 0x1) {
- pr_debug("sst: headset detected\n");
+ pr_debug("headset detected\n");
/* send headset detect/undetect */
jack = &intelmaddata->jack[1].jack;
present = (value == 0x1) ? 1 : 0;
jack_event_flag = 1;
}
if (intsts & 0x2) {
- pr_debug("sst: headphone detected\n");
+ pr_debug("headphone detected\n");
/* send headphone detect/undetect */
jack = &intelmaddata->jack[0].jack;
present = (value == 0x2) ? 1 : 0;
jack_event_flag = 1;
}
if (intsts & 0x4) {
- pr_debug("sst: short push detected\n");
+ pr_debug("short push detected\n");
/* send short push */
jack = &intelmaddata->jack[2].jack;
present = 1;
@@ -692,7 +690,7 @@ void sst_mad_jackdetection_nec(u8 intsts, struct snd_intelmad *intelmaddata)
buttonpressflag = 1;
}
if (intsts & 0x8) {
- pr_debug("sst: long push detected\n");
+ pr_debug("long push detected\n");
/* send long push */
jack = &intelmaddata->jack[3].jack;
present = 1;
@@ -738,12 +736,12 @@ static int __devinit snd_intelmad_register_irq(
u32 regbase = AUDINT_BASE, regsize = 8;
char *drv_name;
- pr_debug("sst: irq reg done, regbase 0x%x, regsize 0x%x\n",
+ pr_debug("irq reg done, regbase 0x%x, regsize 0x%x\n",
regbase, regsize);
intelmaddata->int_base = ioremap_nocache(regbase, regsize);
if (!intelmaddata->int_base)
- pr_err("sst: Mapping of cache failed\n");
- pr_debug("sst: irq = 0x%x\n", intelmaddata->irq);
+ pr_err("Mapping of cache failed\n");
+ pr_debug("irq = 0x%x\n", intelmaddata->irq);
if (intelmaddata->cpu_id == CPU_CHIP_PENWELL)
drv_name = DRIVER_NAME_MFLD;
else
@@ -753,7 +751,7 @@ static int __devinit snd_intelmad_register_irq(
IRQF_SHARED, drv_name,
intelmaddata);
if (ret_val)
- pr_err("sst: cannot register IRQ\n");
+ pr_err("cannot register IRQ\n");
return ret_val;
}
@@ -775,10 +773,10 @@ static int __devinit snd_intelmad_sst_register(
if (ret_val)
return ret_val;
sst_card_vendor_id = (vendor_addr.value & (MASK2|MASK1|MASK0));
- pr_debug("sst: orginal n extrated vendor id = 0x%x %d\n",
+ pr_debug("orginal n extrated vendor id = 0x%x %d\n",
vendor_addr.value, sst_card_vendor_id);
if (sst_card_vendor_id < 0 || sst_card_vendor_id > 2) {
- pr_err("sst: vendor card not supported!!\n");
+ pr_err("vendor card not supported!!\n");
return -EIO;
}
} else
@@ -801,7 +799,7 @@ static int __devinit snd_intelmad_sst_register(
/* registering with SST driver to get access to SST APIs to use */
ret_val = register_sst_card(intelmaddata->sstdrv_ops);
if (ret_val) {
- pr_err("sst: sst card registration failed\n");
+ pr_err("sst card registration failed\n");
return ret_val;
}
@@ -832,7 +830,7 @@ static int __devinit snd_intelmad_pcm_new(struct snd_card *card,
char name[32] = INTEL_MAD;
struct snd_pcm_ops *pb_ops = NULL, *cap_ops = NULL;
- pr_debug("sst: called for pb %d, cp %d, idx %d\n", pb, cap, index);
+ pr_debug("called for pb %d, cp %d, idx %d\n", pb, cap, index);
ret_val = snd_pcm_new(card, name, index, pb, cap, &pcm);
if (ret_val)
return ret_val;
@@ -878,7 +876,7 @@ static int __devinit snd_intelmad_pcm(struct snd_card *card,
WARN_ON(!card);
WARN_ON(!intelmaddata);
- pr_debug("sst: snd_intelmad_pcm called\n");
+ pr_debug("snd_intelmad_pcm called\n");
ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 1, 0);
if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT)
return ret_val;
@@ -903,7 +901,7 @@ static int snd_intelmad_jack(struct snd_intelmad *intelmaddata)
struct snd_jack *jack;
int retval;
- pr_debug("sst: snd_intelmad_jack called\n");
+ pr_debug("snd_intelmad_jack called\n");
jack = &intelmaddata->jack[0].jack;
retval = snd_jack_new(intelmaddata->card, "Headphone",
SND_JACK_HEADPHONE, &jack);
@@ -982,9 +980,9 @@ static int __devinit snd_intelmad_mixer(struct snd_intelmad *intelmaddata)
ret_val = snd_ctl_add(card,
snd_ctl_new1(&controls[idx],
intelmaddata));
- pr_debug("sst: mixer[idx]=%d added\n", idx);
+ pr_debug("mixer[idx]=%d added\n", idx);
if (ret_val) {
- pr_err("sst: in adding of control index = %d\n", idx);
+ pr_err("in adding of control index = %d\n", idx);
break;
}
}
@@ -999,7 +997,7 @@ static int snd_intelmad_dev_free(struct snd_device *device)
intelmaddata = device->device_data;
- pr_debug("sst: snd_intelmad_dev_free called\n");
+ pr_debug("snd_intelmad_dev_free called\n");
snd_card_free(intelmaddata->card);
/*genl_unregister_family(&audio_event_genl_family);*/
unregister_sst_card(intelmaddata->sstdrv_ops);
@@ -1040,23 +1038,23 @@ int __devinit snd_intelmad_probe(struct platform_device *pdev)
const struct platform_device_id *id = platform_get_device_id(pdev);
unsigned int cpu_id = (unsigned int)id->driver_data;
- pr_debug("sst: probe for %s cpu_id %d\n", pdev->name, cpu_id);
+ pr_debug("probe for %s cpu_id %d\n", pdev->name, cpu_id);
if (!strcmp(pdev->name, DRIVER_NAME_MRST))
- pr_debug("sst: detected MRST\n");
+ pr_debug("detected MRST\n");
else if (!strcmp(pdev->name, DRIVER_NAME_MFLD))
- pr_debug("sst: detected MFLD\n");
+ pr_debug("detected MFLD\n");
else {
- pr_err("sst: detected unknown device abort!!\n");
+ pr_err("detected unknown device abort!!\n");
return -EIO;
}
if ((cpu_id < CPU_CHIP_LINCROFT) || (cpu_id > CPU_CHIP_PENWELL)) {
- pr_err("sst: detected unknown cpu_id abort!!\n");
+ pr_err("detected unknown cpu_id abort!!\n");
return -EIO;
}
/* allocate memory for saving internal context and working */
intelmaddata = kzalloc(sizeof(*intelmaddata), GFP_KERNEL);
if (!intelmaddata) {
- pr_debug("sst: mem alloctn fail\n");
+ pr_debug("mem alloctn fail\n");
return -ENOMEM;
}
@@ -1064,7 +1062,7 @@ int __devinit snd_intelmad_probe(struct platform_device *pdev)
intelmaddata->sstdrv_ops = kzalloc(sizeof(struct intel_sst_card_ops),
GFP_KERNEL);
if (!intelmaddata->sstdrv_ops) {
- pr_err("sst: mem allocation for ops fail\n");
+ pr_err("mem allocation for ops fail\n");
kfree(intelmaddata);
return -ENOMEM;
}
@@ -1073,7 +1071,7 @@ int __devinit snd_intelmad_probe(struct platform_device *pdev)
/* create a card instance with ALSA framework */
ret_val = snd_card_create(card_index, card_id, THIS_MODULE, 0, &card);
if (ret_val) {
- pr_err("sst: snd_card_create fail\n");
+ pr_err("snd_card_create fail\n");
goto free_allocs;
}
@@ -1092,7 +1090,7 @@ int __devinit snd_intelmad_probe(struct platform_device *pdev)
/* registering with LPE driver to get access to SST APIs to use */
ret_val = snd_intelmad_sst_register(intelmaddata);
if (ret_val) {
- pr_err("sst: snd_intelmad_sst_register failed\n");
+ pr_err("snd_intelmad_sst_register failed\n");
goto free_allocs;
}
@@ -1100,19 +1098,19 @@ int __devinit snd_intelmad_probe(struct platform_device *pdev)
ret_val = snd_intelmad_pcm(card, intelmaddata);
if (ret_val) {
- pr_err("sst: snd_intelmad_pcm failed\n");
+ pr_err("snd_intelmad_pcm failed\n");
goto free_allocs;
}
ret_val = snd_intelmad_mixer(intelmaddata);
if (ret_val) {
- pr_err("sst: snd_intelmad_mixer failed\n");
+ pr_err("snd_intelmad_mixer failed\n");
goto free_allocs;
}
ret_val = snd_intelmad_jack(intelmaddata);
if (ret_val) {
- pr_err("sst: snd_intelmad_jack failed\n");
+ pr_err("snd_intelmad_jack failed\n");
goto free_allocs;
}
@@ -1126,31 +1124,31 @@ int __devinit snd_intelmad_probe(struct platform_device *pdev)
ret_val = snd_intelmad_register_irq(intelmaddata);
if (ret_val) {
- pr_err("sst: snd_intelmad_register_irq fail\n");
+ pr_err("snd_intelmad_register_irq fail\n");
goto free_allocs;
}
/* internal function call to register device with ALSA */
ret_val = snd_intelmad_create(intelmaddata, card);
if (ret_val) {
- pr_err("sst: snd_intelmad_create failed\n");
+ pr_err("snd_intelmad_create failed\n");
goto free_allocs;
}
card->private_data = &intelmaddata;
snd_card_set_dev(card, &pdev->dev);
ret_val = snd_card_register(card);
if (ret_val) {
- pr_err("sst: snd_card_register failed\n");
+ pr_err("snd_card_register failed\n");
goto free_allocs;
}
- pr_debug("sst:snd_intelmad_probe complete\n");
+ pr_debug("snd_intelmad_probe complete\n");
return ret_val;
free_mad_jack_wq:
destroy_workqueue(intelmaddata->mad_jack_wq);
free_allocs:
- pr_err("sst: probe failed\n");
+ pr_err("probe failed\n");
snd_card_free(card);
kfree(intelmaddata->sstdrv_ops);
kfree(intelmaddata);
@@ -1200,7 +1198,7 @@ static struct platform_driver snd_intelmad_driver = {
*/
static int __init alsa_card_intelmad_init(void)
{
- pr_debug("sst: mad_init called\n");
+ pr_debug("mad_init called\n");
return platform_driver_register(&snd_intelmad_driver);
}
@@ -1211,7 +1209,7 @@ static int __init alsa_card_intelmad_init(void)
*/
static void __exit alsa_card_intelmad_exit(void)
{
- pr_debug("sst:mad_exit called\n");
+ pr_debug("mad_exit called\n");
return platform_driver_unregister(&snd_intelmad_driver);
}
diff --git a/drivers/staging/intel_sst/intelmid.h b/drivers/staging/intel_sst/intelmid.h
index 81e744816765..0ce103185848 100644
--- a/drivers/staging/intel_sst/intelmid.h
+++ b/drivers/staging/intel_sst/intelmid.h
@@ -178,9 +178,4 @@ extern struct snd_kcontrol_new snd_intelmad_controls_mrst[];
extern struct snd_kcontrol_new snd_intelmad_controls_mfld[];
extern struct snd_pmic_ops *intelmad_vendor_ops[];
-/* This is an enabler hook as the platform detection logic isn't yet
- present and depends on some firmware and DMI support to detect AAVA
- devices. It will vanish once the AAVA platform support is merged */
-#define is_aava() 0
-
#endif /* __INTELMID_H */
diff --git a/drivers/staging/intel_sst/intelmid_ctrl.c b/drivers/staging/intel_sst/intelmid_ctrl.c
index 03b4ece02f91..69af0704ce94 100644
--- a/drivers/staging/intel_sst/intelmid_ctrl.c
+++ b/drivers/staging/intel_sst/intelmid_ctrl.c
@@ -24,6 +24,9 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* ALSA driver handling mixer controls for Intel MAD chipset
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <sound/core.h>
#include <sound/control.h>
#include "jack.h"
@@ -216,7 +219,7 @@ static int snd_intelmad_volume_get(struct snd_kcontrol *kcontrol,
struct snd_intelmad *intelmaddata;
struct snd_pmic_ops *scard_ops;
- pr_debug("sst: snd_intelmad_volume_get called\n");
+ pr_debug("snd_intelmad_volume_get called\n");
WARN_ON(!uval);
WARN_ON(!kcontrol);
@@ -273,7 +276,7 @@ static int snd_intelmad_mute_get(struct snd_kcontrol *kcontrol,
struct snd_intelmad *intelmaddata;
struct snd_pmic_ops *scard_ops;
- pr_debug("sst: Mute_get called\n");
+ pr_debug("Mute_get called\n");
WARN_ON(!uval);
WARN_ON(!kcontrol);
@@ -332,7 +335,7 @@ static int snd_intelmad_volume_set(struct snd_kcontrol *kcontrol,
struct snd_intelmad *intelmaddata;
struct snd_pmic_ops *scard_ops;
- pr_debug("sst: volume set called:%ld %ld\n",
+ pr_debug("volume set called:%ld %ld\n",
uval->value.integer.value[0],
uval->value.integer.value[1]);
@@ -387,7 +390,7 @@ static int snd_intelmad_mute_set(struct snd_kcontrol *kcontrol,
struct snd_intelmad *intelmaddata;
struct snd_pmic_ops *scard_ops;
- pr_debug("sst: snd_intelmad_mute_set called\n");
+ pr_debug("snd_intelmad_mute_set called\n");
WARN_ON(!uval);
WARN_ON(!kcontrol);
@@ -455,7 +458,7 @@ static int snd_intelmad_device_get(struct snd_kcontrol *kcontrol,
{
struct snd_intelmad *intelmaddata;
struct snd_pmic_ops *scard_ops;
- pr_debug("sst: device_get called\n");
+ pr_debug("device_get called\n");
WARN_ON(!uval);
WARN_ON(!kcontrol);
@@ -491,8 +494,9 @@ static int snd_intelmad_device_set(struct snd_kcontrol *kcontrol,
struct snd_intelmad *intelmaddata;
struct snd_pmic_ops *scard_ops;
int ret_val = 0, vendor, status;
+ struct intel_sst_pcm_control *pcm_control;
- pr_debug("sst: snd_intelmad_device_set called\n");
+ pr_debug("snd_intelmad_device_set called\n");
WARN_ON(!uval);
WARN_ON(!kcontrol);
@@ -518,15 +522,13 @@ static int snd_intelmad_device_set(struct snd_kcontrol *kcontrol,
case INPUT_SEL:
vendor = intelmaddata->sstdrv_ops->vendor_id;
if ((vendor == SND_MX) || (vendor == SND_FS)) {
- if (uval->value.enumerated.item[0] == HS_MIC) {
+ pcm_control = intelmaddata->sstdrv_ops->pcm_control;
+ if (uval->value.enumerated.item[0] == HS_MIC)
status = 1;
- intelmaddata->sstdrv_ops->
- control_set(SST_ENABLE_RX_TIME_SLOT, &status);
- } else {
+ else
status = 0;
- intelmaddata->sstdrv_ops->
- control_set(SST_ENABLE_RX_TIME_SLOT, &status);
- }
+ pcm_control->device_control(
+ SST_ENABLE_RX_TIME_SLOT, &status);
}
ret_val = scard_ops->set_input_dev(
uval->value.enumerated.item[0]);
diff --git a/drivers/staging/intel_sst/intelmid_msic_control.c b/drivers/staging/intel_sst/intelmid_msic_control.c
index 4d1755efceef..da093ed0cd88 100644
--- a/drivers/staging/intel_sst/intelmid_msic_control.c
+++ b/drivers/staging/intel_sst/intelmid_msic_control.c
@@ -24,6 +24,8 @@
* This file contains the control operations of msic vendors
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pci.h>
#include <linux/file.h>
#include "intel_sst.h"
@@ -83,7 +85,7 @@ static int msic_init_card(void)
snd_msic_ops.cap_on = 0;
snd_msic_ops.input_dev_id = DMIC; /*def dev*/
snd_msic_ops.output_dev_id = STEREO_HEADPHONE;
- pr_debug("sst: msic init complete!!\n");
+ pr_debug("msic init complete!!\n");
return 0;
}
@@ -173,7 +175,7 @@ static int msic_power_up_pb(unsigned int device)
return retval;
}
- pr_debug("sst: powering up pb.... Device %d\n", device);
+ pr_debug("powering up pb.... Device %d\n", device);
sst_sc_reg_access(sc_access1, PMIC_WRITE, 4);
switch (device) {
case SND_SST_DEVICE_HEADSET:
@@ -205,7 +207,7 @@ static int msic_power_up_pb(unsigned int device)
break;
default:
- pr_warn("sst: Wrong Device %d, selected %d\n",
+ pr_warn("Wrong Device %d, selected %d\n",
device, snd_msic_ops.output_dev_id);
}
return sst_sc_reg_access(sc_access_pcm2, PMIC_READ_MODIFY, 1);
@@ -268,7 +270,7 @@ static int msic_power_up_cp(unsigned int device)
return retval;
}
- pr_debug("sst: powering up cp....%d\n", snd_msic_ops.input_dev_id);
+ pr_debug("powering up cp....%d\n", snd_msic_ops.input_dev_id);
sst_sc_reg_access(sc_access2, PMIC_READ_MODIFY, 1);
snd_msic_ops.cap_on = 1;
if (snd_msic_ops.input_dev_id == AMIC)
@@ -283,7 +285,7 @@ static int msic_power_down(void)
{
int retval = 0;
- pr_debug("sst: powering dn msic\n");
+ pr_debug("powering dn msic\n");
snd_msic_ops.pb_on = 0;
snd_msic_ops.cap_on = 0;
return retval;
@@ -293,7 +295,7 @@ static int msic_power_down_pb(void)
{
int retval = 0;
- pr_debug("sst: powering dn pb....\n");
+ pr_debug("powering dn pb....\n");
snd_msic_ops.pb_on = 0;
return retval;
}
@@ -302,7 +304,7 @@ static int msic_power_down_cp(void)
{
int retval = 0;
- pr_debug("sst: powering dn cp....\n");
+ pr_debug("powering dn cp....\n");
snd_msic_ops.cap_on = 0;
return retval;
}
@@ -311,7 +313,7 @@ static int msic_set_selected_output_dev(u8 value)
{
int retval = 0;
- pr_debug("sst: msic set selected output:%d\n", value);
+ pr_debug("msic set selected output:%d\n", value);
snd_msic_ops.output_dev_id = value;
if (snd_msic_ops.pb_on)
msic_power_up_pb(SND_SST_DEVICE_HEADSET);
@@ -330,15 +332,15 @@ static int msic_set_selected_input_dev(u8 value)
};
int retval = 0;
- pr_debug("sst: msic_set_selected_input_dev:%d\n", value);
+ pr_debug("msic_set_selected_input_dev:%d\n", value);
snd_msic_ops.input_dev_id = value;
switch (value) {
case AMIC:
- pr_debug("sst: Selecting AMIC1\n");
+ pr_debug("Selecting AMIC1\n");
retval = sst_sc_reg_access(sc_access_amic, PMIC_WRITE, 1);
break;
case DMIC:
- pr_debug("sst: Selecting DMIC1\n");
+ pr_debug("Selecting DMIC1\n");
retval = sst_sc_reg_access(sc_access_dmic, PMIC_WRITE, 1);
break;
default:
diff --git a/drivers/staging/intel_sst/intelmid_pvt.c b/drivers/staging/intel_sst/intelmid_pvt.c
index 9ed9475ccc7b..3ba9daf67526 100644
--- a/drivers/staging/intel_sst/intelmid_pvt.c
+++ b/drivers/staging/intel_sst/intelmid_pvt.c
@@ -23,6 +23,9 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* ALSA driver for Intel MID sound card chipset - holding private functions
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/io.h>
#include <asm/intel_scu_ipc.h>
#include <sound/core.h>
@@ -50,7 +53,7 @@ void period_elapsed(void *mad_substream)
if (stream->stream_status != RUNNING)
return;
- pr_debug("sst: calling period elapsed\n");
+ pr_debug("calling period elapsed\n");
snd_pcm_period_elapsed(substream);
return;
}
@@ -76,8 +79,8 @@ int snd_intelmad_alloc_stream(struct snd_pcm_substream *substream)
param.uc.pcm_params.period_count = substream->runtime->period_size;
param.uc.pcm_params.ring_buffer_addr =
virt_to_phys(substream->runtime->dma_area);
- pr_debug("sst: period_cnt = %d\n", param.uc.pcm_params.period_count);
- pr_debug("sst: sfreq= %d, wd_sz = %d\n",
+ pr_debug("period_cnt = %d\n", param.uc.pcm_params.period_count);
+ pr_debug("sfreq= %d, wd_sz = %d\n",
param.uc.pcm_params.sfreq, param.uc.pcm_params.pcm_wd_sz);
str_params.sparams = param;
@@ -85,24 +88,22 @@ int snd_intelmad_alloc_stream(struct snd_pcm_substream *substream)
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
str_params.ops = STREAM_OPS_PLAYBACK;
- pr_debug("sst: Playbck stream,Device %d\n", stream->device);
+ pr_debug("Playbck stream,Device %d\n", stream->device);
} else {
str_params.ops = STREAM_OPS_CAPTURE;
stream->device = SND_SST_DEVICE_CAPTURE;
- pr_debug("sst: Capture stream,Device %d\n", stream->device);
+ pr_debug("Capture stream,Device %d\n", stream->device);
}
str_params.device_type = stream->device;
- ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_ALLOC,
- &str_params);
- pr_debug("sst: SST_SND_PLAY/CAPTURE ret_val = %x\n",
- ret_val);
+ ret_val = intelmaddata->sstdrv_ops->pcm_control->open(&str_params);
+ pr_debug("sst: SST_SND_PLAY/CAPTURE ret_val = %x\n", ret_val);
if (ret_val < 0)
return ret_val;
stream->stream_info.str_id = ret_val;
stream->stream_status = INIT;
stream->stream_info.buffer_ptr = 0;
- pr_debug("sst: str id : %d\n", stream->stream_info.str_id);
+ pr_debug("str id : %d\n", stream->stream_info.str_id);
return ret_val;
}
@@ -113,15 +114,15 @@ int snd_intelmad_init_stream(struct snd_pcm_substream *substream)
struct snd_intelmad *intelmaddata = snd_pcm_substream_chip(substream);
int ret_val;
- pr_debug("sst: setting buffer ptr param\n");
+ pr_debug("setting buffer ptr param\n");
stream->stream_info.period_elapsed = period_elapsed;
stream->stream_info.mad_substream = substream;
stream->stream_info.buffer_ptr = 0;
stream->stream_info.sfreq = substream->runtime->rate;
- ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_STREAM_INIT,
- &stream->stream_info);
+ ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
+ SST_SND_STREAM_INIT, &stream->stream_info);
if (ret_val)
- pr_err("sst: control_set ret error %d\n", ret_val);
+ pr_err("control_set ret error %d\n", ret_val);
return ret_val;
}
@@ -145,30 +146,29 @@ int sst_sc_reg_access(struct sc_reg_access *sc_access,
for (i = 0; i < num_val; i++) {
retval = intel_scu_ipc_iowrite8(sc_access[i].reg_addr,
sc_access[i].value);
- if (retval) {
- pr_err("sst: IPC write failed!!! %d\n", retval);
- return retval;
- }
+ if (retval)
+ goto err;
}
} else if (type == PMIC_READ) {
for (i = 0; i < num_val; i++) {
retval = intel_scu_ipc_ioread8(sc_access[i].reg_addr,
&(sc_access[i].value));
- if (retval) {
- pr_err("sst: IPC read failed!!!!!%d\n", retval);
- return retval;
- }
+ if (retval)
+ goto err;
}
} else {
for (i = 0; i < num_val; i++) {
retval = intel_scu_ipc_update_register(
sc_access[i].reg_addr, sc_access[i].value,
sc_access[i].mask);
- if (retval) {
- pr_err("sst: IPC Modify failed!!!%d\n", retval);
- return retval;
- }
+ if (retval)
+ goto err;
}
}
- return retval;
+ return 0;
+err:
+ pr_err("IPC failed for cmd %d, %d\n", retval, type);
+ pr_err("reg:0x%2x addr:0x%2x\n",
+ sc_access[i].reg_addr, sc_access[i].value);
+ return retval;
}
diff --git a/drivers/staging/intel_sst/intelmid_v0_control.c b/drivers/staging/intel_sst/intelmid_v0_control.c
index f586d62ac9af..7859225e3d60 100644
--- a/drivers/staging/intel_sst/intelmid_v0_control.c
+++ b/drivers/staging/intel_sst/intelmid_v0_control.c
@@ -26,6 +26,8 @@
* This file contains the control operations of vendor 1
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pci.h>
#include <linux/file.h>
#include "intel_sst.h"
@@ -151,7 +153,7 @@ static int fs_power_up_pb(unsigned int port)
if (retval)
return retval;
- pr_debug("sst: in fs power up pb\n");
+ pr_debug("in fs power up pb\n");
return fs_enable_audiodac(UNMUTE);
}
@@ -173,7 +175,7 @@ static int fs_power_down_pb(void)
if (retval)
return retval;
- pr_debug("sst: in fsl power down pb\n");
+ pr_debug("in fsl power down pb\n");
return fs_enable_audiodac(UNMUTE);
}
@@ -380,7 +382,7 @@ static int fs_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
}
- pr_debug("sst: sfreq:%d,Register value = %x\n", sfreq, config1);
+ pr_debug("sfreq:%d,Register value = %x\n", sfreq, config1);
if (word_size == 24) {
sc_access[0].reg_addr = AUDIOPORT1;
@@ -438,18 +440,18 @@ static int fs_set_selected_input_dev(u8 value)
switch (value) {
case AMIC:
- pr_debug("sst: Selecting amic not supported in mono cfg\n");
+ pr_debug("Selecting amic not supported in mono cfg\n");
return sst_sc_reg_access(sc_access_mic, PMIC_READ_MODIFY, 2);
break;
case HS_MIC:
- pr_debug("sst: Selecting hsmic\n");
+ pr_debug("Selecting hsmic\n");
return sst_sc_reg_access(sc_access_hsmic,
PMIC_READ_MODIFY, 2);
break;
case DMIC:
- pr_debug("sst: Selecting dmic\n");
+ pr_debug("Selecting dmic\n");
return sst_sc_reg_access(sc_access_dmic, PMIC_READ_MODIFY, 2);
break;
@@ -505,7 +507,7 @@ static int fs_set_mute(int dev_id, u8 value)
return retval;
- pr_debug("sst: dev_id:0x%x value:0x%x\n", dev_id, value);
+ pr_debug("dev_id:0x%x value:0x%x\n", dev_id, value);
switch (dev_id) {
case PMIC_SND_DMIC_MUTE:
sc_access[0].reg_addr = MICCTRL;
@@ -606,7 +608,7 @@ static int fs_set_vol(int dev_id, int value)
switch (dev_id) {
case PMIC_SND_LEFT_PB_VOL:
- pr_debug("sst: PMIC_SND_LEFT_PB_VOL:%d\n", value);
+ pr_debug("PMIC_SND_LEFT_PB_VOL:%d\n", value);
sc_access[0].value = sc_access[1].value = value;
sc_access[0].reg_addr = AUD16;
sc_access[1].reg_addr = AUD15;
@@ -616,7 +618,7 @@ static int fs_set_vol(int dev_id, int value)
break;
case PMIC_SND_RIGHT_PB_VOL:
- pr_debug("sst: PMIC_SND_RIGHT_PB_VOL:%d\n", value);
+ pr_debug("PMIC_SND_RIGHT_PB_VOL:%d\n", value);
sc_access[0].value = sc_access[1].value = value;
sc_access[0].reg_addr = AUD17;
sc_access[1].reg_addr = AUD15;
@@ -629,7 +631,7 @@ static int fs_set_vol(int dev_id, int value)
reg_num = 2;
break;
case PMIC_SND_CAPTURE_VOL:
- pr_debug("sst: PMIC_SND_CAPTURE_VOL:%d\n", value);
+ pr_debug("PMIC_SND_CAPTURE_VOL:%d\n", value);
sc_access[0].reg_addr = MICLICTRL1;
sc_access[1].reg_addr = MICLICTRL2;
sc_access[2].reg_addr = DMICCTRL1;
@@ -726,17 +728,17 @@ static int fs_get_vol(int dev_id, int *value)
switch (dev_id) {
case PMIC_SND_CAPTURE_VOL:
- pr_debug("sst: PMIC_SND_CAPTURE_VOL\n");
+ pr_debug("PMIC_SND_CAPTURE_VOL\n");
sc_access.reg_addr = MICLICTRL1;
mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
break;
case PMIC_SND_LEFT_PB_VOL:
- pr_debug("sst: PMIC_SND_LEFT_PB_VOL\n");
+ pr_debug("PMIC_SND_LEFT_PB_VOL\n");
sc_access.reg_addr = AUD16;
mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
break;
case PMIC_SND_RIGHT_PB_VOL:
- pr_debug("sst: PMIC_SND_RT_PB_VOL\n");
+ pr_debug("PMIC_SND_RT_PB_VOL\n");
sc_access.reg_addr = AUD17;
mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
break;
@@ -745,9 +747,9 @@ static int fs_get_vol(int dev_id, int *value)
}
retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- pr_debug("sst: value read = 0x%x\n", sc_access.value);
+ pr_debug("value read = 0x%x\n", sc_access.value);
*value = (int) (sc_access.value & mask);
- pr_debug("sst: value returned = 0x%x\n", *value);
+ pr_debug("value returned = 0x%x\n", *value);
return retval;
}
diff --git a/drivers/staging/intel_sst/intelmid_v1_control.c b/drivers/staging/intel_sst/intelmid_v1_control.c
index 9de86b2f6b08..478cfecdefd7 100644
--- a/drivers/staging/intel_sst/intelmid_v1_control.c
+++ b/drivers/staging/intel_sst/intelmid_v1_control.c
@@ -25,6 +25,8 @@
* This file contains the control operations of vendor 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pci.h>
#include <linux/file.h>
#include <asm/mrst.h>
@@ -132,56 +134,6 @@ static int mx_init_card(void)
return sst_sc_reg_access(sc_access, PMIC_WRITE, 47);
}
-static int mx_init_capture_card(void)
-{
- struct sc_reg_access sc_access[] = {
- {0x206, 0x5a, 0x0},
- {0x207, 0xbe, 0x0},
- {0x208, 0x90, 0x0},
- {0x209, 0x32, 0x0},
- {0x20e, 0x22, 0x0},
- {0x210, 0x84, 0x0},
- {0x223, 0x20, 0x0},
- {0x226, 0xC0, 0x0},
- };
-
- int retval = 0;
-
- retval = sst_sc_reg_access(sc_access, PMIC_WRITE, 8);
- if (0 != retval) {
- /* pmic communication fails */
- pr_debug("sst: pmic commn failed\n");
- return retval;
- }
-
- pr_debug("sst: Capture configuration complete!!\n");
- return 0;
-}
-
-static int mx_init_playback_card(void)
-{
- struct sc_reg_access sc_access[] = {
- {0x206, 0x00, 0x0},
- {0x207, 0x00, 0x0},
- {0x208, 0x00, 0x0},
- {0x209, 0x51, 0x0},
- {0x20e, 0x51, 0x0},
- {0x210, 0x21, 0x0},
- {0x223, 0x01, 0x0},
- };
- int retval = 0;
-
- retval = sst_sc_reg_access(sc_access, PMIC_WRITE, 9);
- if (0 != retval) {
- /* pmic communication fails */
- pr_debug("sst: pmic commn failed\n");
- return retval;
- }
-
- pr_debug("sst: Playback configuration complete!!\n");
- return 0;
-}
-
static int mx_enable_audiodac(int value)
{
struct sc_reg_access sc_access[3];
@@ -204,7 +156,7 @@ static int mx_enable_audiodac(int value)
retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
if (retval)
return retval;
- pr_debug("sst: mute status = %d", snd_pmic_ops_mx.mute_status);
+ pr_debug("mute status = %d\n", snd_pmic_ops_mx.mute_status);
if (snd_pmic_ops_mx.mute_status == MUTE ||
snd_pmic_ops_mx.master_mute == MUTE)
return retval;
@@ -412,7 +364,7 @@ static int mx_set_pcm_voice_params(void)
if (retval)
return retval;
}
- pr_debug("sst: SST DBG mx_set_pcm_voice_params called\n");
+ pr_debug("SST DBG:mx_set_pcm_voice_params called\n");
return sst_sc_reg_access(sc_access, PMIC_WRITE, 44);
}
@@ -529,7 +481,7 @@ static int mx_set_selected_output_dev(u8 dev_id)
return retval;
}
- pr_debug("sst: mx_set_selected_output_dev dev_id:0x%x\n", dev_id);
+ pr_debug("mx_set_selected_output_dev dev_id:0x%x\n", dev_id);
snd_pmic_ops_mx.output_dev_id = dev_id;
switch (dev_id) {
case STEREO_HEADPHONE:
@@ -549,7 +501,7 @@ static int mx_set_selected_output_dev(u8 dev_id)
num_reg = 1;
break;
case RECEIVER:
- pr_debug("sst: RECEIVER Koski selected\n");
+ pr_debug("RECEIVER Koski selected\n");
/* configuration - AS enable, receiver enable */
sc_access[0].reg_addr = 0xFF;
@@ -559,7 +511,7 @@ static int mx_set_selected_output_dev(u8 dev_id)
num_reg = 1;
break;
default:
- pr_err("sst: Not a valid output dev\n");
+ pr_err("Not a valid output dev\n");
return 0;
}
return sst_sc_reg_access(sc_access, PMIC_WRITE, num_reg);
@@ -598,7 +550,7 @@ static int mx_set_selected_input_dev(u8 dev_id)
return retval;
}
snd_pmic_ops_mx.input_dev_id = dev_id;
- pr_debug("sst: mx_set_selected_input_dev dev_id:0x%x\n", dev_id);
+ pr_debug("mx_set_selected_input_dev dev_id:0x%x\n", dev_id);
switch (dev_id) {
case AMIC:
@@ -646,7 +598,7 @@ static int mx_set_mute(int dev_id, u8 value)
}
- pr_debug("sst: set_mute dev_id:0x%x , value:%d\n", dev_id, value);
+ pr_debug("set_mute dev_id:0x%x , value:%d\n", dev_id, value);
switch (dev_id) {
case PMIC_SND_DMIC_MUTE:
@@ -760,7 +712,7 @@ static int mx_set_vol(int dev_id, int value)
if (retval)
return retval;
}
- pr_debug("sst: set_vol dev_id:0x%x ,value:%d\n", dev_id, value);
+ pr_debug("set_vol dev_id:0x%x ,value:%d\n", dev_id, value);
switch (dev_id) {
case PMIC_SND_RECEIVER_VOL:
return 0;
@@ -875,7 +827,7 @@ static int mx_get_vol(int dev_id, int *value)
if (retval)
return retval;
*value = -(sc_access.value & mask);
- pr_debug("sst: get volume value extracted %d\n", *value);
+ pr_debug("get volume value extracted %d\n", *value);
return retval;
}
diff --git a/drivers/staging/intel_sst/intelmid_v2_control.c b/drivers/staging/intel_sst/intelmid_v2_control.c
index 3a7de769842a..e2f6d6a3c850 100644
--- a/drivers/staging/intel_sst/intelmid_v2_control.c
+++ b/drivers/staging/intel_sst/intelmid_v2_control.c
@@ -26,6 +26,8 @@
* This file contains the control operations of vendor 3
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/pci.h>
#include <linux/file.h>
#include "intel_sst.h"
@@ -120,7 +122,7 @@ static int nc_init_card(void)
snd_pmic_ops_nc.master_mute = UNMUTE;
snd_pmic_ops_nc.mute_status = UNMUTE;
sst_sc_reg_access(sc_access, PMIC_WRITE, 26);
- pr_debug("sst: init complete!!\n");
+ pr_debug("init complete!!\n");
return 0;
}
@@ -169,7 +171,7 @@ static int nc_power_up_pb(unsigned int port)
nc_enable_audiodac(MUTE);
msleep(30);
- pr_debug("sst: powering up pb....\n");
+ pr_debug("powering up pb....\n");
sc_access[0].reg_addr = VAUDIOCNT;
sc_access[0].value = 0x27;
@@ -222,7 +224,7 @@ static int nc_power_up_cp(unsigned int port)
return retval;
- pr_debug("sst: powering up cp....\n");
+ pr_debug("powering up cp....\n");
if (port == 0xFF)
return 0;
@@ -275,7 +277,7 @@ static int nc_power_down(void)
nc_enable_audiodac(MUTE);
- pr_debug("sst: powering dn nc_power_down ....\n");
+ pr_debug("powering dn nc_power_down ....\n");
msleep(30);
@@ -324,7 +326,7 @@ static int nc_power_down_pb(void)
if (retval)
return retval;
- pr_debug("sst: powering dn pb....\n");
+ pr_debug("powering dn pb....\n");
nc_enable_audiodac(MUTE);
@@ -370,7 +372,7 @@ static int nc_power_down_cp(void)
if (retval)
return retval;
- pr_debug("sst: powering dn cp....\n");
+ pr_debug("powering dn cp....\n");
return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
}
@@ -400,7 +402,7 @@ static int nc_set_pcm_voice_params(void)
return retval;
sst_sc_reg_access(sc_access, PMIC_WRITE, 14);
- pr_debug("sst: Voice parameters set successfully!!\n");
+ pr_debug("Voice parameters set successfully!!\n");
return 0;
}
@@ -451,20 +453,20 @@ static int nc_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
sc_access.value = 0x07;
sc_access.reg_addr = RMUTE;
- pr_debug("sst: RIGHT_HP_MUTE value%d\n", sc_access.value);
+ pr_debug("RIGHT_HP_MUTE value%d\n", sc_access.value);
sc_access.mask = MASK2;
sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
} else {
sc_access.value = 0x00;
sc_access.reg_addr = RMUTE;
- pr_debug("sst: RIGHT_HP_MUTE value %d\n", sc_access.value);
+ pr_debug("RIGHT_HP_MUTE value %d\n", sc_access.value);
sc_access.mask = MASK2;
sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
}
- pr_debug("sst: word_size = %d\n", word_size);
+ pr_debug("word_size = %d\n", word_size);
if (word_size == 24) {
sc_access.reg_addr = AUDIOPORT2;
@@ -477,7 +479,7 @@ static int nc_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
}
sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
- pr_debug("sst: word_size = %d\n", word_size);
+ pr_debug("word_size = %d\n", word_size);
sc_access.reg_addr = AUDIOPORT1;
sc_access.mask = MASK5|MASK4|MASK1|MASK0;
if (word_size == 16)
@@ -508,7 +510,7 @@ static int nc_set_selected_output_dev(u8 value)
retval = nc_init_card();
if (retval)
return retval;
- pr_debug("sst: nc set selected output:%d\n", value);
+ pr_debug("nc set selected output:%d\n", value);
switch (value) {
case STEREO_HEADPHONE:
retval = sst_sc_reg_access(sc_access_HP, PMIC_WRITE, 2);
@@ -517,7 +519,7 @@ static int nc_set_selected_output_dev(u8 value)
retval = sst_sc_reg_access(sc_access_IS, PMIC_WRITE, 2);
break;
default:
- pr_err("sst: rcvd illegal request: %d\n", value);
+ pr_err("rcvd illegal request: %d\n", value);
return -EINVAL;
}
return retval;
@@ -541,7 +543,7 @@ static int nc_audio_init(void)
};
sst_sc_reg_access(sc_access, PMIC_WRITE, 12);
- pr_debug("sst: Audio Init successfully!!\n");
+ pr_debug("Audio Init successfully!!\n");
/*set output device */
nc_set_selected_output_dev(snd_pmic_ops_nc.output_dev_id);
@@ -549,13 +551,13 @@ static int nc_audio_init(void)
if (snd_pmic_ops_nc.num_channel == 1) {
sc_acces.value = 0x07;
sc_acces.reg_addr = RMUTE;
- pr_debug("sst: RIGHT_HP_MUTE value%d\n", sc_acces.value);
+ pr_debug("RIGHT_HP_MUTE value%d\n", sc_acces.value);
sc_acces.mask = MASK2;
sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
} else {
sc_acces.value = 0x00;
sc_acces.reg_addr = RMUTE;
- pr_debug("sst: RIGHT_HP_MUTE value%d\n", sc_acces.value);
+ pr_debug("RIGHT_HP_MUTE value%d\n", sc_acces.value);
sc_acces.mask = MASK2;
sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
}
@@ -629,11 +631,11 @@ static int nc_set_mute(int dev_id, u8 value)
if (retval)
return retval;
- pr_debug("sst: set device id::%d, value %d\n", dev_id, value);
+ pr_debug("set device id::%d, value %d\n", dev_id, value);
switch (dev_id) {
case PMIC_SND_MUTE_ALL:
- pr_debug("sst: PMIC_SND_MUTE_ALL value %d\n", value);
+ pr_debug("PMIC_SND_MUTE_ALL value %d\n", value);
snd_pmic_ops_nc.mute_status = value;
snd_pmic_ops_nc.master_mute = value;
if (value == UNMUTE) {
@@ -669,7 +671,7 @@ static int nc_set_mute(int dev_id, u8 value)
}
break;
case PMIC_SND_HP_MIC_MUTE:
- pr_debug("sst: PMIC_SND_HPMIC_MUTE value %d\n", value);
+ pr_debug("PMIC_SND_HPMIC_MUTE value %d\n", value);
if (value == UNMUTE) {
/* unmute the system, set the 6th bit to one */
sc_access[0].value = 0x00;
@@ -682,7 +684,7 @@ static int nc_set_mute(int dev_id, u8 value)
retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
break;
case PMIC_SND_AMIC_MUTE:
- pr_debug("sst: PMIC_SND_AMIC_MUTE value %d\n", value);
+ pr_debug("PMIC_SND_AMIC_MUTE value %d\n", value);
if (value == UNMUTE) {
/* unmute the system, set the 6th bit to one */
sc_access[0].value = 0x00;
@@ -696,7 +698,7 @@ static int nc_set_mute(int dev_id, u8 value)
break;
case PMIC_SND_DMIC_MUTE:
- pr_debug("sst: INPUT_MUTE_DMIC value%d\n", value);
+ pr_debug("INPUT_MUTE_DMIC value%d\n", value);
if (value == UNMUTE) {
/* unmute the system, set the 6th bit to one */
sc_access[1].value = 0x00;
@@ -724,13 +726,13 @@ static int nc_set_mute(int dev_id, u8 value)
if (dev_id == PMIC_SND_LEFT_HP_MUTE) {
sc_access[0].reg_addr = LMUTE;
- pr_debug("sst: LEFT_HP_MUTE value %d\n",
+ pr_debug("LEFT_HP_MUTE value %d\n",
sc_access[0].value);
} else {
if (snd_pmic_ops_nc.num_channel == 1)
sc_access[0].value = 0x04;
sc_access[0].reg_addr = RMUTE;
- pr_debug("sst: RIGHT_HP_MUTE value %d\n",
+ pr_debug("RIGHT_HP_MUTE value %d\n",
sc_access[0].value);
}
sc_access[0].mask = MASK2;
@@ -743,7 +745,7 @@ static int nc_set_mute(int dev_id, u8 value)
else
sc_access[0].value = 0x03;
sc_access[0].reg_addr = LMUTE;
- pr_debug("sst: SPEAKER_MUTE %d\n", sc_access[0].value);
+ pr_debug("SPEAKER_MUTE %d\n", sc_access[0].value);
sc_access[0].mask = MASK1;
retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
break;
@@ -764,10 +766,10 @@ static int nc_set_vol(int dev_id, int value)
if (retval)
return retval;
- pr_debug("sst: set volume:%d\n", dev_id);
+ pr_debug("set volume:%d\n", dev_id);
switch (dev_id) {
case PMIC_SND_CAPTURE_VOL:
- pr_debug("sst: PMIC_SND_CAPTURE_VOL:value::%d\n", value);
+ pr_debug("PMIC_SND_CAPTURE_VOL:value::%d\n", value);
sc_access[0].value = sc_access[1].value =
sc_access[2].value = -value;
sc_access[0].mask = sc_access[1].mask = sc_access[2].mask =
@@ -779,7 +781,7 @@ static int nc_set_vol(int dev_id, int value)
break;
case PMIC_SND_LEFT_PB_VOL:
- pr_debug("sst: PMIC_SND_LEFT_HP_VOL %d\n", value);
+ pr_debug("PMIC_SND_LEFT_HP_VOL %d\n", value);
sc_access[0].value = -value;
sc_access[0].reg_addr = AUDIOLVOL;
sc_access[0].mask =
@@ -788,7 +790,7 @@ static int nc_set_vol(int dev_id, int value)
break;
case PMIC_SND_RIGHT_PB_VOL:
- pr_debug("sst: PMIC_SND_RIGHT_HP_VOL value %d\n", value);
+ pr_debug("PMIC_SND_RIGHT_HP_VOL value %d\n", value);
if (snd_pmic_ops_nc.num_channel == 1) {
sc_access[0].value = 0x04;
sc_access[0].reg_addr = RMUTE;
@@ -821,11 +823,11 @@ static int nc_set_selected_input_dev(u8 value)
return retval;
snd_pmic_ops_nc.input_dev_id = value;
- pr_debug("sst: nc set selected input:%d\n", value);
+ pr_debug("nc set selected input:%d\n", value);
switch (value) {
case AMIC:
- pr_debug("sst: Selecting AMIC\n");
+ pr_debug("Selecting AMIC\n");
sc_access[0].reg_addr = 0x107;
sc_access[0].value = 0x40;
sc_access[0].mask = MASK6|MASK4|MASK3|MASK1|MASK0;
@@ -842,7 +844,7 @@ static int nc_set_selected_input_dev(u8 value)
break;
case HS_MIC:
- pr_debug("sst: Selecting HS_MIC\n");
+ pr_debug("Selecting HS_MIC\n");
sc_access[0].reg_addr = 0x107;
sc_access[0].mask = MASK6|MASK4|MASK3|MASK1|MASK0;
sc_access[0].value = 0x10;
@@ -859,7 +861,7 @@ static int nc_set_selected_input_dev(u8 value)
break;
case DMIC:
- pr_debug("sst: DMIC\n");
+ pr_debug("DMIC\n");
sc_access[0].reg_addr = 0x107;
sc_access[0].mask = MASK6|MASK4|MASK3|MASK1|MASK0;
sc_access[0].value = 0x0B;
@@ -871,8 +873,11 @@ static int nc_set_selected_input_dev(u8 value)
sc_access[2].mask = MASK6;
sc_access[3].reg_addr = 0x109;
sc_access[3].mask = MASK6;
- sc_access[3].value = 0x40;
- num_val = 4;
+ sc_access[3].value = 0x00;
+ sc_access[4].reg_addr = 0x104;
+ sc_access[4].value = 0x3C;
+ sc_access[4].mask = 0xff;
+ num_val = 5;
break;
default:
return -EINVAL;
@@ -890,23 +895,23 @@ static int nc_get_mute(int dev_id, u8 *value)
if (retval)
return retval;
- pr_debug("sst: get mute::%d\n", dev_id);
+ pr_debug("get mute::%d\n", dev_id);
switch (dev_id) {
case PMIC_SND_AMIC_MUTE:
- pr_debug("sst: PMIC_SND_INPUT_MUTE_MIC1\n");
+ pr_debug("PMIC_SND_INPUT_MUTE_MIC1\n");
sc_access.reg_addr = LILSEL;
mask = MASK6;
break;
case PMIC_SND_HP_MIC_MUTE:
- pr_debug("sst: PMIC_SND_INPUT_MUTE_MIC2\n");
+ pr_debug("PMIC_SND_INPUT_MUTE_MIC2\n");
sc_access.reg_addr = LIRSEL;
mask = MASK6;
break;
case PMIC_SND_LEFT_HP_MUTE:
case PMIC_SND_RIGHT_HP_MUTE:
mask = MASK2;
- pr_debug("sst: PMIC_SN_LEFT/RIGHT_HP_MUTE\n");
+ pr_debug("PMIC_SN_LEFT/RIGHT_HP_MUTE\n");
if (dev_id == PMIC_SND_RIGHT_HP_MUTE)
sc_access.reg_addr = RMUTE;
else
@@ -914,12 +919,12 @@ static int nc_get_mute(int dev_id, u8 *value)
break;
case PMIC_SND_LEFT_SPEAKER_MUTE:
- pr_debug("sst: PMIC_MONO_EARPIECE_MUTE\n");
+ pr_debug("PMIC_MONO_EARPIECE_MUTE\n");
sc_access.reg_addr = RMUTE;
mask = MASK1;
break;
case PMIC_SND_DMIC_MUTE:
- pr_debug("sst: PMIC_SND_INPUT_MUTE_DMIC\n");
+ pr_debug("PMIC_SND_INPUT_MUTE_DMIC\n");
sc_access.reg_addr = 0x105;
mask = MASK6;
break;
@@ -928,16 +933,16 @@ static int nc_get_mute(int dev_id, u8 *value)
}
retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- pr_debug("sst: reg value = %d\n", sc_access.value);
+ pr_debug("reg value = %d\n", sc_access.value);
if (retval)
return retval;
*value = (sc_access.value) & mask;
- pr_debug("sst: masked value = %d\n", *value);
+ pr_debug("masked value = %d\n", *value);
if (*value)
*value = 0;
else
*value = 1;
- pr_debug("sst: value returned = 0x%x\n", *value);
+ pr_debug("value returned = 0x%x\n", *value);
return retval;
}
@@ -953,19 +958,19 @@ static int nc_get_vol(int dev_id, int *value)
switch (dev_id) {
case PMIC_SND_CAPTURE_VOL:
- pr_debug("sst: PMIC_SND_INPUT_CAPTURE_VOL\n");
+ pr_debug("PMIC_SND_INPUT_CAPTURE_VOL\n");
sc_access.reg_addr = LILSEL;
mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
break;
case PMIC_SND_RIGHT_PB_VOL:
- pr_debug("sst: GET_VOLUME_PMIC_LEFT_HP_VOL\n");
+ pr_debug("GET_VOLUME_PMIC_LEFT_HP_VOL\n");
sc_access.reg_addr = AUDIOLVOL;
mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
break;
case PMIC_SND_LEFT_PB_VOL:
- pr_debug("sst: GET_VOLUME_PMIC_RIGHT_HP_VOL\n");
+ pr_debug("GET_VOLUME_PMIC_RIGHT_HP_VOL\n");
sc_access.reg_addr = AUDIORVOL;
mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
break;
@@ -975,9 +980,9 @@ static int nc_get_vol(int dev_id, int *value)
}
retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- pr_debug("sst: value read = 0x%x\n", sc_access.value);
+ pr_debug("value read = 0x%x\n", sc_access.value);
*value = -((sc_access.value) & mask);
- pr_debug("sst: get vol value returned = %d\n", *value);
+ pr_debug("get vol value returned = %d\n", *value);
return retval;
}
diff --git a/drivers/staging/keucr/init.c b/drivers/staging/keucr/init.c
index 978bf87ff13d..515e448852a0 100644
--- a/drivers/staging/keucr/init.c
+++ b/drivers/staging/keucr/init.c
@@ -11,43 +11,37 @@
#include "transport.h"
#include "init.h"
-BYTE IsSSFDCCompliance;
-BYTE IsXDCompliance;
-extern DWORD MediaChange;
-extern int Check_D_MediaFmt(struct us_data *);
+BYTE IsSSFDCCompliance;
+BYTE IsXDCompliance;
-//----- ENE_InitMedia() ----------------------------------------
+/*
+ * ENE_InitMedia():
+ */
int ENE_InitMedia(struct us_data *us)
{
int result;
BYTE MiscReg03 = 0;
- printk("--- Init Media ---\n");
+ printk(KERN_INFO "--- Init Media ---\n");
result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03);
- if (result != USB_STOR_XFER_GOOD)
- {
- printk("Read register fail !!\n");
+ if (result != USB_STOR_XFER_GOOD) {
+ printk(KERN_ERR "Read register fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
- printk("MiscReg03 = %x\n", MiscReg03);
+ printk(KERN_INFO "MiscReg03 = %x\n", MiscReg03);
- if (MiscReg03 & 0x01)
- {
- if (!us->SD_Status.Ready)
- {
+ if (MiscReg03 & 0x01) {
+ if (!us->SD_Status.Ready) {
result = ENE_SDInit(us);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
}
}
- if (MiscReg03 & 0x02)
- {
- if (!us->SM_Status.Ready && !us->MS_Status.Ready)
- {
+ if (MiscReg03 & 0x02) {
+ if (!us->SM_Status.Ready && !us->MS_Status.Ready) {
result = ENE_SMInit(us);
- if (result != USB_STOR_XFER_GOOD)
- {
+ if (result != USB_STOR_XFER_GOOD) {
result = ENE_MSInit(us);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
@@ -58,7 +52,9 @@ int ENE_InitMedia(struct us_data *us)
return result;
}
-//----- ENE_Read_BYTE() ----------------------------------------
+/*
+ * ENE_Read_BYTE() :
+ */
int ENE_Read_BYTE(struct us_data *us, WORD index, void *buf)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
@@ -76,19 +72,20 @@ int ENE_Read_BYTE(struct us_data *us, WORD index, void *buf)
return result;
}
-//----- ENE_SDInit() ---------------------
+/*
+ * ENE_SDInit():
+ */
int ENE_SDInit(struct us_data *us)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
BYTE buf[0x200];
- printk("transport --- ENE_SDInit\n");
- // SD Init Part-1
+ printk(KERN_INFO "transport --- ENE_SDInit\n");
+ /* SD Init Part-1 */
result = ENE_LoadBinCode(us, SD_INIT1_PATTERN);
- if (result != USB_STOR_XFER_GOOD)
- {
- printk("Load SD Init Code Part-1 Fail !!\n");
+ if (result != USB_STOR_XFER_GOOD) {
+ printk(KERN_ERR "Load SD Init Code Part-1 Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
@@ -98,17 +95,15 @@ int ENE_SDInit(struct us_data *us)
bcb->CDB[0] = 0xF2;
result = ENE_SendScsiCmd(us, FDIR_READ, NULL, 0);
- if (result != USB_STOR_XFER_GOOD)
- {
- printk("Exection SD Init Code Fail !!\n");
+ if (result != USB_STOR_XFER_GOOD) {
+ printk(KERN_ERR "Exection SD Init Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
- // SD Init Part-2
+ /* SD Init Part-2 */
result = ENE_LoadBinCode(us, SD_INIT2_PATTERN);
- if (result != USB_STOR_XFER_GOOD)
- {
- printk("Load SD Init Code Part-2 Fail !!\n");
+ if (result != USB_STOR_XFER_GOOD) {
+ printk(KERN_ERR "Load SD Init Code Part-2 Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
@@ -119,45 +114,41 @@ int ENE_SDInit(struct us_data *us)
bcb->CDB[0] = 0xF1;
result = ENE_SendScsiCmd(us, FDIR_READ, &buf, 0);
- if (result != USB_STOR_XFER_GOOD)
- {
- printk("Exection SD Init Code Fail !!\n");
+ if (result != USB_STOR_XFER_GOOD) {
+ printk(KERN_ERR "Exection SD Init Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
us->SD_Status = *(PSD_STATUS)&buf[0];
- if (us->SD_Status.Insert && us->SD_Status.Ready)
- {
+ if (us->SD_Status.Insert && us->SD_Status.Ready) {
ENE_ReadSDReg(us, (PBYTE)&buf);
- printk("Insert = %x\n", us->SD_Status.Insert);
- printk("Ready = %x\n", us->SD_Status.Ready);
- printk("IsMMC = %x\n", us->SD_Status.IsMMC);
- printk("HiCapacity = %x\n", us->SD_Status.HiCapacity);
- printk("HiSpeed = %x\n", us->SD_Status.HiSpeed);
- printk("WtP = %x\n", us->SD_Status.WtP);
- }
- else
- {
- printk("SD Card Not Ready --- %x\n", buf[0]);
+ printk(KERN_INFO "Insert = %x\n", us->SD_Status.Insert);
+ printk(KERN_INFO "Ready = %x\n", us->SD_Status.Ready);
+ printk(KERN_INFO "IsMMC = %x\n", us->SD_Status.IsMMC);
+ printk(KERN_INFO "HiCapacity = %x\n", us->SD_Status.HiCapacity);
+ printk(KERN_INFO "HiSpeed = %x\n", us->SD_Status.HiSpeed);
+ printk(KERN_INFO "WtP = %x\n", us->SD_Status.WtP);
+ } else {
+ printk(KERN_ERR "SD Card Not Ready --- %x\n", buf[0]);
return USB_STOR_TRANSPORT_ERROR;
}
return USB_STOR_TRANSPORT_GOOD;
}
-//----- ENE_MSInit() ----------------------------------------
+/*
+ * ENE_MSInit():
+ */
int ENE_MSInit(struct us_data *us)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
BYTE buf[0x200];
WORD MSP_BlockSize, MSP_UserAreaBlocks;
-
- printk("transport --- ENE_MSInit\n");
+ printk(KERN_INFO "transport --- ENE_MSInit\n");
result = ENE_LoadBinCode(us, MS_INIT_PATTERN);
- if (result != USB_STOR_XFER_GOOD)
- {
- printk("Load MS Init Code Fail !!\n");
+ if (result != USB_STOR_XFER_GOOD) {
+ printk(KERN_ERR "Load MS Init Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
@@ -169,53 +160,49 @@ int ENE_MSInit(struct us_data *us)
bcb->CDB[1] = 0x01;
result = ENE_SendScsiCmd(us, FDIR_READ, &buf, 0);
- if (result != USB_STOR_XFER_GOOD)
- {
- printk("Exection MS Init Code Fail !!\n");
+ if (result != USB_STOR_XFER_GOOD) {
+ printk(KERN_ERR "Exection MS Init Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
us->MS_Status = *(PMS_STATUS)&buf[0];
- if (us->MS_Status.Insert && us->MS_Status.Ready)
- {
- printk("Insert = %x\n", us->MS_Status.Insert);
- printk("Ready = %x\n", us->MS_Status.Ready);
- printk("IsMSPro = %x\n", us->MS_Status.IsMSPro);
- printk("IsMSPHG = %x\n", us->MS_Status.IsMSPHG);
- printk("WtP = %x\n", us->MS_Status.WtP);
- if (us->MS_Status.IsMSPro)
- {
- MSP_BlockSize = (buf[6] <<8) | buf[7];
- MSP_UserAreaBlocks = (buf[10]<<8) | buf[11];
+ if (us->MS_Status.Insert && us->MS_Status.Ready) {
+ printk(KERN_INFO "Insert = %x\n", us->MS_Status.Insert);
+ printk(KERN_INFO "Ready = %x\n", us->MS_Status.Ready);
+ printk(KERN_INFO "IsMSPro = %x\n", us->MS_Status.IsMSPro);
+ printk(KERN_INFO "IsMSPHG = %x\n", us->MS_Status.IsMSPHG);
+ printk(KERN_INFO "WtP = %x\n", us->MS_Status.WtP);
+ if (us->MS_Status.IsMSPro) {
+ MSP_BlockSize = (buf[6] << 8) | buf[7];
+ MSP_UserAreaBlocks = (buf[10] << 8) | buf[11];
us->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks;
- }
- else
+ } else {
MS_CardInit(us);
- printk("MS Init Code OK !!\n");
- }
- else
- {
- printk("MS Card Not Ready --- %x\n", buf[0]);
+ }
+ printk(KERN_INFO "MS Init Code OK !!\n");
+ } else {
+ printk(KERN_INFO "MS Card Not Ready --- %x\n", buf[0]);
return USB_STOR_TRANSPORT_ERROR;
}
return USB_STOR_TRANSPORT_GOOD;
}
-//----- ENE_SMInit() ----------------------------------------
+/*
+ *ENE_SMInit()
+ */
int ENE_SMInit(struct us_data *us)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
BYTE buf[0x200];
- printk("transport --- ENE_SMInit\n");
+ printk(KERN_INFO "transport --- ENE_SMInit\n");
result = ENE_LoadBinCode(us, SM_INIT_PATTERN);
- if (result != USB_STOR_XFER_GOOD)
- {
- printk("Load SM Init Code Fail !!\n");
+ if (result != USB_STOR_XFER_GOOD) {
+ printk(KERN_INFO "Load SM Init Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
@@ -227,9 +214,9 @@ int ENE_SMInit(struct us_data *us)
bcb->CDB[1] = 0x01;
result = ENE_SendScsiCmd(us, FDIR_READ, &buf, 0);
- if (result != USB_STOR_XFER_GOOD)
- {
- printk("Exection SM Init Code Fail !! result = %x\n", result);
+ if (result != USB_STOR_XFER_GOOD) {
+ printk(KERN_ERR
+ "Exection SM Init Code Fail !! result = %x\n", result);
return USB_STOR_TRANSPORT_ERROR;
}
@@ -238,32 +225,31 @@ int ENE_SMInit(struct us_data *us)
us->SM_DeviceID = buf[1];
us->SM_CardID = buf[2];
- if (us->SM_Status.Insert && us->SM_Status.Ready)
- {
- printk("Insert = %x\n", us->SM_Status.Insert);
- printk("Ready = %x\n", us->SM_Status.Ready);
- printk("WtP = %x\n", us->SM_Status.WtP);
- printk("DeviceID = %x\n", us->SM_DeviceID);
- printk("CardID = %x\n", us->SM_CardID);
+ if (us->SM_Status.Insert && us->SM_Status.Ready) {
+ printk(KERN_INFO "Insert = %x\n", us->SM_Status.Insert);
+ printk(KERN_INFO "Ready = %x\n", us->SM_Status.Ready);
+ printk(KERN_INFO "WtP = %x\n", us->SM_Status.WtP);
+ printk(KERN_INFO "DeviceID = %x\n", us->SM_DeviceID);
+ printk(KERN_INFO "CardID = %x\n", us->SM_CardID);
MediaChange = 1;
Check_D_MediaFmt(us);
- }
- else
- {
- printk("SM Card Not Ready --- %x\n", buf[0]);
+ } else {
+ printk(KERN_ERR "SM Card Not Ready --- %x\n", buf[0]);
return USB_STOR_TRANSPORT_ERROR;
}
return USB_STOR_TRANSPORT_GOOD;
}
-//----- ENE_ReadSDReg() ----------------------------------------------
+/*
+ * ENE_ReadSDReg()
+ */
int ENE_ReadSDReg(struct us_data *us, u8 *RdBuf)
{
WORD tmpreg;
DWORD reg4b;
-
- //printk("transport --- ENE_ReadSDReg\n");
+
+ /* printk(KERN_INFO "transport --- ENE_ReadSDReg\n"); */
reg4b = *(PDWORD)&RdBuf[0x18];
us->SD_READ_BL_LEN = (BYTE)((reg4b >> 8) & 0x0f);
@@ -277,74 +263,75 @@ int ENE_ReadSDReg(struct us_data *us, u8 *RdBuf)
if (us->SD_Status.HiCapacity && us->SD_Status.IsMMC)
us->HC_C_SIZE = *(PDWORD)(&RdBuf[0x100]);
- if (us->SD_READ_BL_LEN > SD_BLOCK_LEN)
- {
- us->SD_Block_Mult = 1 << (us->SD_READ_BL_LEN - SD_BLOCK_LEN); us->SD_READ_BL_LEN = SD_BLOCK_LEN;
- }
- else
- { us->SD_Block_Mult = 1;
+ if (us->SD_READ_BL_LEN > SD_BLOCK_LEN) {
+ us->SD_Block_Mult =
+ 1 << (us->SD_READ_BL_LEN - SD_BLOCK_LEN);
+ us->SD_READ_BL_LEN = SD_BLOCK_LEN;
+ } else {
+ us->SD_Block_Mult = 1;
}
return USB_STOR_TRANSPORT_GOOD;
}
-//----- ENE_LoadBinCode() ---------------------
+/*
+ * ENE_LoadBinCode()
+ */
int ENE_LoadBinCode(struct us_data *us, BYTE flag)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- //void *buf;
+ /* void *buf; */
PBYTE buf;
- //printk("transport --- ENE_LoadBinCode\n");
+ /* printk(KERN_INFO "transport --- ENE_LoadBinCode\n"); */
if (us->BIN_FLAG == flag)
return USB_STOR_TRANSPORT_GOOD;
buf = kmalloc(0x800, GFP_KERNEL);
if (buf == NULL)
return USB_STOR_TRANSPORT_ERROR;
- switch ( flag )
- {
- // For SD
- case SD_INIT1_PATTERN:
- printk("SD_INIT1_PATTERN\n");
- memcpy(buf, SD_Init1, 0x800);
+ switch (flag) {
+ /* For SD */
+ case SD_INIT1_PATTERN:
+ printk(KERN_INFO "SD_INIT1_PATTERN\n");
+ memcpy(buf, SD_Init1, 0x800);
break;
- case SD_INIT2_PATTERN:
- printk("SD_INIT2_PATTERN\n");
- memcpy(buf, SD_Init2, 0x800);
+ case SD_INIT2_PATTERN:
+ printk(KERN_INFO "SD_INIT2_PATTERN\n");
+ memcpy(buf, SD_Init2, 0x800);
break;
- case SD_RW_PATTERN:
- printk("SD_RW_PATTERN\n");
- memcpy(buf, SD_Rdwr, 0x800);
+ case SD_RW_PATTERN:
+ printk(KERN_INFO "SD_RW_PATTERN\n");
+ memcpy(buf, SD_Rdwr, 0x800);
break;
- // For MS
- case MS_INIT_PATTERN:
- printk("MS_INIT_PATTERN\n");
- memcpy(buf, MS_Init, 0x800);
+ /* For MS */
+ case MS_INIT_PATTERN:
+ printk(KERN_INFO "MS_INIT_PATTERN\n");
+ memcpy(buf, MS_Init, 0x800);
break;
- case MSP_RW_PATTERN:
- printk("MSP_RW_PATTERN\n");
- memcpy(buf, MSP_Rdwr, 0x800);
+ case MSP_RW_PATTERN:
+ printk(KERN_INFO "MSP_RW_PATTERN\n");
+ memcpy(buf, MSP_Rdwr, 0x800);
break;
- case MS_RW_PATTERN:
- printk("MS_RW_PATTERN\n");
- memcpy(buf, MS_Rdwr, 0x800);
+ case MS_RW_PATTERN:
+ printk(KERN_INFO "MS_RW_PATTERN\n");
+ memcpy(buf, MS_Rdwr, 0x800);
break;
- // For SS
- case SM_INIT_PATTERN:
- printk("SM_INIT_PATTERN\n");
- memcpy(buf, SM_Init, 0x800);
+ /* For SS */
+ case SM_INIT_PATTERN:
+ printk(KERN_INFO "SM_INIT_PATTERN\n");
+ memcpy(buf, SM_Init, 0x800);
break;
- case SM_RW_PATTERN:
- printk("SM_RW_PATTERN\n");
- memcpy(buf, SM_Rdwr, 0x800);
+ case SM_RW_PATTERN:
+ printk(KERN_INFO "SM_RW_PATTERN\n");
+ memcpy(buf, SM_Rdwr, 0x800);
break;
}
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x800;
- bcb->Flags =0x00;
+ bcb->Flags = 0x00;
bcb->CDB[0] = 0xEF;
result = ENE_SendScsiCmd(us, FDIR_WRITE, buf, 0);
@@ -354,54 +341,63 @@ int ENE_LoadBinCode(struct us_data *us, BYTE flag)
return result;
}
-//----- ENE_SendScsiCmd() ---------------------
+/*
+ * ENE_SendScsiCmd():
+ */
int ENE_SendScsiCmd(struct us_data *us, BYTE fDir, void *buf, int use_sg)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
int result;
- unsigned int transfer_length=bcb->DataTransferLength, cswlen=0, partial=0;
+ unsigned int transfer_length = bcb->DataTransferLength,
+ cswlen = 0, partial = 0;
unsigned int residue;
- //printk("transport --- ENE_SendScsiCmd\n");
- // send cmd to out endpoint
- result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, NULL);
- if (result != USB_STOR_XFER_GOOD)
- {
- printk("send cmd to out endpoint fail ---\n");
+ /* printk(KERN_INFO "transport --- ENE_SendScsiCmd\n"); */
+ /* send cmd to out endpoint */
+ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
+ bcb, US_BULK_CB_WRAP_LEN, NULL);
+ if (result != USB_STOR_XFER_GOOD) {
+ printk(KERN_ERR "send cmd to out endpoint fail ---\n");
return USB_STOR_TRANSPORT_ERROR;
}
- if (buf)
- {
- unsigned int pipe = fDir == FDIR_READ ? us->recv_bulk_pipe : us->send_bulk_pipe;
- // Bulk
+ if (buf) {
+ unsigned int pipe = fDir;
+
+ if (fDir == FDIR_READ)
+ pipe = us->recv_bulk_pipe;
+ else
+ pipe = us->send_bulk_pipe;
+
+ /* Bulk */
if (use_sg)
result = usb_stor_bulk_srb(us, pipe, us->srb);
else
- result = usb_stor_bulk_transfer_sg(us, pipe, buf, transfer_length, 0, &partial);
- if (result != USB_STOR_XFER_GOOD)
- {
- printk("data transfer fail ---\n");
+ result = usb_stor_bulk_transfer_sg(us, pipe, buf,
+ transfer_length, 0, &partial);
+ if (result != USB_STOR_XFER_GOOD) {
+ printk(KERN_ERR "data transfer fail ---\n");
return USB_STOR_TRANSPORT_ERROR;
}
}
- // Get CSW for device status
- result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen);
+ /* Get CSW for device status */
+ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs,
+ US_BULK_CS_WRAP_LEN, &cswlen);
- if (result == USB_STOR_XFER_SHORT && cswlen == 0)
- {
- printk("Received 0-length CSW; retrying...\n");
- result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen);
+ if (result == USB_STOR_XFER_SHORT && cswlen == 0) {
+ printk(KERN_WARNING "Received 0-length CSW; retrying...\n");
+ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
+ bcs, US_BULK_CS_WRAP_LEN, &cswlen);
}
- if (result == USB_STOR_XFER_STALLED)
- {
+ if (result == USB_STOR_XFER_STALLED) {
/* get the status again */
- printk("Attempting to get CSW (2nd try)...\n");
- result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, NULL);
+ printk(KERN_WARNING "Attempting to get CSW (2nd try)...\n");
+ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
+ bcs, US_BULK_CS_WRAP_LEN, NULL);
}
if (result != USB_STOR_XFER_GOOD)
@@ -410,12 +406,14 @@ int ENE_SendScsiCmd(struct us_data *us, BYTE fDir, void *buf, int use_sg)
/* check bulk status */
residue = le32_to_cpu(bcs->Residue);
- /* try to compute the actual residue, based on how much data
- * was really transferred and what the device tells us */
- if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE))
- {
+ /*
+ * try to compute the actual residue, based on how much data
+ * was really transferred and what the device tells us
+ */
+ if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {
residue = min(residue, transfer_length);
- scsi_set_resid(us->srb, max(scsi_get_resid(us->srb), (int) residue));
+ scsi_set_resid(us->srb, max(scsi_get_resid(us->srb),
+ (int) residue));
}
if (bcs->Status != US_BULK_STAT_OK)
@@ -424,35 +422,40 @@ int ENE_SendScsiCmd(struct us_data *us, BYTE fDir, void *buf, int use_sg)
return USB_STOR_TRANSPORT_GOOD;
}
-//----- ENE_Read_Data() ---------------------
+/*
+ * ENE_Read_Data()
+ */
int ENE_Read_Data(struct us_data *us, void *buf, unsigned int length)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
int result;
- //printk("transport --- ENE_Read_Data\n");
- // set up the command wrapper
+ /* printk(KERN_INFO "transport --- ENE_Read_Data\n"); */
+ /* set up the command wrapper */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = length;
- bcb->Flags =0x80;
+ bcb->Flags = 0x80;
bcb->CDB[0] = 0xED;
bcb->CDB[2] = 0xFF;
bcb->CDB[3] = 0x81;
- // send cmd to out endpoint
- result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, NULL);
+ /* send cmd to out endpoint */
+ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb,
+ US_BULK_CB_WRAP_LEN, NULL);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
- // R/W data
- result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, buf, length, NULL);
+ /* R/W data */
+ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
+ buf, length, NULL);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
- // Get CSW for device status
- result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, NULL);
+ /* Get CSW for device status */
+ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs,
+ US_BULK_CS_WRAP_LEN, NULL);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (bcs->Status != US_BULK_STAT_OK)
@@ -461,35 +464,40 @@ int ENE_Read_Data(struct us_data *us, void *buf, unsigned int length)
return USB_STOR_TRANSPORT_GOOD;
}
-//----- ENE_Write_Data() ---------------------
+/*
+ * ENE_Write_Data():
+ */
int ENE_Write_Data(struct us_data *us, void *buf, unsigned int length)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
int result;
- //printk("transport --- ENE_Write_Data\n");
- // set up the command wrapper
+ /* printk("transport --- ENE_Write_Data\n"); */
+ /* set up the command wrapper */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = length;
- bcb->Flags =0x00;
+ bcb->Flags = 0x00;
bcb->CDB[0] = 0xEE;
bcb->CDB[2] = 0xFF;
bcb->CDB[3] = 0x81;
- // send cmd to out endpoint
- result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, NULL);
+ /* send cmd to out endpoint */
+ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb,
+ US_BULK_CB_WRAP_LEN, NULL);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
- // R/W data
- result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, buf, length, NULL);
+ /* R/W data */
+ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
+ buf, length, NULL);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
- // Get CSW for device status
- result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, NULL);
+ /* Get CSW for device status */
+ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs,
+ US_BULK_CS_WRAP_LEN, NULL);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (bcs->Status != US_BULK_STAT_OK)
@@ -498,42 +506,52 @@ int ENE_Write_Data(struct us_data *us, void *buf, unsigned int length)
return USB_STOR_TRANSPORT_GOOD;
}
-//----- usb_stor_print_cmd() ---------------------
+/*
+ * usb_stor_print_cmd():
+ */
void usb_stor_print_cmd(struct scsi_cmnd *srb)
{
PBYTE Cdb = srb->cmnd;
DWORD cmd = Cdb[0];
- DWORD bn = ((Cdb[2]<<24) & 0xff000000) | ((Cdb[3]<<16) & 0x00ff0000) |
- ((Cdb[4]<< 8) & 0x0000ff00) | ((Cdb[5]<< 0) & 0x000000ff);
- WORD blen = ((Cdb[7]<< 8) & 0xff00) | ((Cdb[8]<< 0) & 0x00ff);
+ DWORD bn = ((Cdb[2] << 24) & 0xff000000) |
+ ((Cdb[3] << 16) & 0x00ff0000) |
+ ((Cdb[4] << 8) & 0x0000ff00) |
+ ((Cdb[5] << 0) & 0x000000ff);
+ WORD blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
switch (cmd) {
case TEST_UNIT_READY:
- //printk("scsi cmd %X --- SCSIOP_TEST_UNIT_READY\n", cmd);
+ /* printk(KERN_INFO
+ "scsi cmd %X --- SCSIOP_TEST_UNIT_READY\n", cmd); */
break;
case INQUIRY:
- printk("scsi cmd %X --- SCSIOP_INQUIRY\n", cmd);
+ printk(KERN_INFO "scsi cmd %X --- SCSIOP_INQUIRY\n", cmd);
break;
case MODE_SENSE:
- printk("scsi cmd %X --- SCSIOP_MODE_SENSE\n", cmd);
+ printk(KERN_INFO "scsi cmd %X --- SCSIOP_MODE_SENSE\n", cmd);
break;
case START_STOP:
- printk("scsi cmd %X --- SCSIOP_START_STOP\n", cmd);
+ printk(KERN_INFO "scsi cmd %X --- SCSIOP_START_STOP\n", cmd);
break;
case READ_CAPACITY:
- printk("scsi cmd %X --- SCSIOP_READ_CAPACITY\n", cmd);
+ printk(KERN_INFO "scsi cmd %X --- SCSIOP_READ_CAPACITY\n", cmd);
break;
case READ_10:
- //printk("scsi cmd %X --- SCSIOP_READ, bn = %X, blen = %X\n", cmd, bn, blen);
+ /* printk(KERN_INFO
+ "scsi cmd %X --- SCSIOP_READ,bn = %X, blen = %X\n"
+ ,cmd, bn, blen); */
break;
case WRITE_10:
- //printk("scsi cmd %X --- SCSIOP_WRITE, bn = %X, blen = %X\n", cmd, bn, blen);
+ /* printk(KERN_INFO
+ "scsi cmd %X --- SCSIOP_WRITE,
+ bn = %X, blen = %X\n" , cmd, bn, blen); */
break;
case ALLOW_MEDIUM_REMOVAL:
- printk("scsi cmd %X --- SCSIOP_ALLOW_MEDIUM_REMOVAL\n", cmd);
+ printk(KERN_INFO
+ "scsi cmd %X --- SCSIOP_ALLOW_MEDIUM_REMOVAL\n", cmd);
break;
default:
- printk("scsi cmd %X --- Other cmd\n", cmd);
+ printk(KERN_INFO "scsi cmd %X --- Other cmd\n", cmd);
break;
}
bn = 0;
diff --git a/drivers/staging/keucr/init.h b/drivers/staging/keucr/init.h
index cd199fc1e6d5..5223132a6c24 100644
--- a/drivers/staging/keucr/init.h
+++ b/drivers/staging/keucr/init.h
@@ -1,5 +1,8 @@
#include "common.h"
+extern DWORD MediaChange;
+extern int Check_D_MediaFmt(struct us_data *);
+
BYTE SD_Init1[] = {
0x90, 0xFF, 0x09, 0xE0, 0x30, 0xE1, 0x06, 0x90,
0xFF, 0x23, 0x74, 0x80, 0xF0, 0x90, 0xFF, 0x09,
diff --git a/drivers/staging/keucr/ms.c b/drivers/staging/keucr/ms.c
index 9a3fdb4e4fe4..452ea8f54f67 100644
--- a/drivers/staging/keucr/ms.c
+++ b/drivers/staging/keucr/ms.c
@@ -347,7 +347,7 @@ int MS_LibProcessBootBlock(struct us_data *us, WORD PhyBlock, BYTE *PageData)
BYTE *PageBuffer;
MS_LibTypeExtdat ExtraData;
- if ((PageBuffer = (BYTE *)kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL))==NULL)
+ if ((PageBuffer = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL))==NULL)
return (DWORD)-1;
result = (DWORD)-1;
@@ -480,8 +480,8 @@ int MS_LibAllocLogicalMap(struct us_data *us)
DWORD i;
- us->MS_Lib.Phy2LogMap = (WORD *)kmalloc(us->MS_Lib.NumberOfPhyBlock * sizeof(WORD), GFP_KERNEL);
- us->MS_Lib.Log2PhyMap = (WORD *)kmalloc(us->MS_Lib.NumberOfLogBlock * sizeof(WORD), GFP_KERNEL);
+ us->MS_Lib.Phy2LogMap = kmalloc(us->MS_Lib.NumberOfPhyBlock * sizeof(WORD), GFP_KERNEL);
+ us->MS_Lib.Log2PhyMap = kmalloc(us->MS_Lib.NumberOfLogBlock * sizeof(WORD), GFP_KERNEL);
if ((us->MS_Lib.Phy2LogMap == NULL) || (us->MS_Lib.Log2PhyMap == NULL))
{
@@ -610,8 +610,8 @@ int MS_LibAllocWriteBuf(struct us_data *us)
{
us->MS_Lib.wrtblk = (WORD)-1;
- us->MS_Lib.blkpag = (BYTE *)kmalloc(us->MS_Lib.PagesPerBlock * us->MS_Lib.BytesPerSector, GFP_KERNEL);
- us->MS_Lib.blkext = (MS_LibTypeExtdat *)kmalloc(us->MS_Lib.PagesPerBlock * sizeof(MS_LibTypeExtdat), GFP_KERNEL);
+ us->MS_Lib.blkpag = kmalloc(us->MS_Lib.PagesPerBlock * us->MS_Lib.BytesPerSector, GFP_KERNEL);
+ us->MS_Lib.blkext = kmalloc(us->MS_Lib.PagesPerBlock * sizeof(MS_LibTypeExtdat), GFP_KERNEL);
if ((us->MS_Lib.blkpag == NULL) || (us->MS_Lib.blkext == NULL))
{
diff --git a/drivers/staging/keucr/smilmain.c b/drivers/staging/keucr/smilmain.c
index bdfbf76f8df9..2cbe9f897eef 100644
--- a/drivers/staging/keucr/smilmain.c
+++ b/drivers/staging/keucr/smilmain.c
@@ -153,9 +153,9 @@ int Media_D_ReadSector(struct us_data *us, DWORD start,WORD count,BYTE *buf)
WORD len, bn;
//if (Check_D_MediaPower()) ; ¦b 6250 don't care
- // return(ErrCode); ;
+ // return(ErrCode);
//if (Check_D_MediaFmt(fdoExt)) ;
- // return(ErrCode); ;
+ // return(ErrCode);
if (Conv_D_MediaAddr(us, start))
return(ErrCode);
diff --git a/drivers/staging/keucr/smilsub.c b/drivers/staging/keucr/smilsub.c
index 1b52535a388f..ce10cf215f51 100644
--- a/drivers/staging/keucr/smilsub.c
+++ b/drivers/staging/keucr/smilsub.c
@@ -763,8 +763,8 @@ int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant)
bcb->CDB[7] = (BYTE)addr;
bcb->CDB[6] = (BYTE)(addr/0x0100);
bcb->CDB[5] = Media.Zone/2;
- bcb->CDB[8] = *(redundant+REDT_ADDR1H);;
- bcb->CDB[9] = *(redundant+REDT_ADDR1L);;
+ bcb->CDB[8] = *(redundant+REDT_ADDR1H);
+ bcb->CDB[9] = *(redundant+REDT_ADDR1L);
result = ENE_SendScsiCmd(us, FDIR_WRITE, buf, 0);
if (result != USB_STOR_XFER_GOOD)
diff --git a/drivers/staging/line6/capture.c b/drivers/staging/line6/capture.c
index 1e3bb1406331..9647154a4923 100644
--- a/drivers/staging/line6/capture.c
+++ b/drivers/staging/line6/capture.c
@@ -147,7 +147,7 @@ void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf, int fsize)
const int bytes_per_frame = line6pcm->properties->bytes_per_frame;
int frames = fsize / bytes_per_frame;
- if (runtime == 0)
+ if (runtime == NULL)
return;
if (line6pcm->pos_in_done + frames > runtime->buffer_size) {
diff --git a/drivers/staging/line6/midi.c b/drivers/staging/line6/midi.c
index ab67e889d2c4..e554a2da643a 100644
--- a/drivers/staging/line6/midi.c
+++ b/drivers/staging/line6/midi.c
@@ -127,7 +127,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (urb == 0) {
+ if (urb == NULL) {
dev_err(line6->ifcdev, "Out of memory\n");
return -ENOMEM;
}
@@ -137,7 +137,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
transfer_buffer = kmalloc(length, GFP_ATOMIC);
- if (transfer_buffer == 0) {
+ if (transfer_buffer == NULL) {
usb_free_urb(urb);
dev_err(line6->ifcdev, "Out of memory\n");
return -ENOMEM;
diff --git a/drivers/staging/line6/playback.c b/drivers/staging/line6/playback.c
index 29940fd1671b..10c543836583 100644
--- a/drivers/staging/line6/playback.c
+++ b/drivers/staging/line6/playback.c
@@ -246,7 +246,7 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
change_volume(urb_out, line6pcm->volume_playback, bytes_per_frame);
- if (line6pcm->prev_fbuf != 0) {
+ if (line6pcm->prev_fbuf != NULL) {
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
if (line6pcm->flags & MASK_PCM_IMPULSE) {
create_impulse_test_signal(line6pcm, urb_out,
diff --git a/drivers/staging/lirc/TODO.lirc_zilog b/drivers/staging/lirc/TODO.lirc_zilog
index 6aa312df4018..2d0263f07937 100644
--- a/drivers/staging/lirc/TODO.lirc_zilog
+++ b/drivers/staging/lirc/TODO.lirc_zilog
@@ -1,13 +1,37 @@
-The binding between hdpvr and lirc_zilog is currently disabled,
+1. Both ir-kbd-i2c and lirc_zilog provide support for RX events.
+The 'tx_only' lirc_zilog module parameter will allow ir-kbd-i2c
+and lirc_zilog to coexist in the kernel, if the user requires such a set-up.
+However the IR unit will not work well without coordination between the
+two modules. A shared mutex, for transceiver access locking, needs to be
+supplied by bridge drivers, in struct IR_i2_init_data, to both ir-kbd-i2c
+and lirc_zilog, before they will coexist usefully. This should be fixed
+before moving out of staging.
+
+2. References and locking need careful examination. For cx18 and ivtv PCI
+cards, which are not easily "hot unplugged", the imperfect state of reference
+counting and locking is acceptable if not correct. For USB connected units
+like HD PVR, PVR USB2, HVR-1900, and HVR1950, the likelyhood of an Ooops on
+unplug is probably great. Proper reference counting and locking needs to be
+implemented before this module is moved out of staging.
+
+3. The binding between hdpvr and lirc_zilog is currently disabled,
due to an OOPS reported a few years ago when both the hdpvr and cx18
drivers were loaded in his system. More details can be seen at:
http://www.mail-archive.com/linux-media@vger.kernel.org/msg09163.html
More tests need to be done, in order to fix the reported issue.
-There's a conflict between ir-kbd-i2c: Both provide support for RX events.
-Such conflict needs to be fixed, before moving it out of staging.
+4. In addition to providing a shared mutex for transceiver access
+locking, bridge drivers, if able, should provide a chip reset() callback
+to lirc_zilog via struct IR_i2c_init_data. cx18 and ivtv already have routines
+to perform Z8 chip resets via GPIO manipulations. This will allow lirc_zilog
+to bring the chip back to normal when it hangs, in the same places the
+original lirc_pvr150 driver code does. This is not strictly needed, so it
+is not required to move lirc_zilog out of staging.
+
+5. Both lirc_zilog and ir-kbd-i2c support the Zilog Z8 for IR, as programmed
+and installed on Hauppauge products. When working on either module, developers
+must consider at least the following bridge drivers which mention an IR Rx unit
+at address 0x71 (indicative of a Z8):
-The way I2C probe works, it will try to register the driver twice, one
-for RX and another for TX. The logic needs to be fixed to avoid such
-issue.
+ ivtv cx18 hdpvr pvrusb2 bt8xx cx88 saa7134
diff --git a/drivers/staging/lirc/lirc_imon.c b/drivers/staging/lirc/lirc_imon.c
index 0da6b9518af9..235cab0eb087 100644
--- a/drivers/staging/lirc/lirc_imon.c
+++ b/drivers/staging/lirc/lirc_imon.c
@@ -447,6 +447,7 @@ static ssize_t vfd_write(struct file *file, const char *buf,
exit:
mutex_unlock(&context->ctx_lock);
+ kfree(data_buf);
return (!retval) ? n_bytes : retval;
}
diff --git a/drivers/staging/lirc/lirc_it87.c b/drivers/staging/lirc/lirc_it87.c
index 929ae5795467..5938616f3e8f 100644
--- a/drivers/staging/lirc/lirc_it87.c
+++ b/drivers/staging/lirc/lirc_it87.c
@@ -232,6 +232,7 @@ static ssize_t lirc_write(struct file *file, const char *buf,
i++;
}
terminate_send(tx_buf[i - 1]);
+ kfree(tx_buf);
return n;
}
diff --git a/drivers/staging/lirc/lirc_parallel.c b/drivers/staging/lirc/lirc_parallel.c
index dfd2c447e67d..3a9c09881b2b 100644
--- a/drivers/staging/lirc/lirc_parallel.c
+++ b/drivers/staging/lirc/lirc_parallel.c
@@ -376,6 +376,7 @@ static ssize_t lirc_write(struct file *filep, const char *buf, size_t n,
unsigned long flags;
int counttimer;
int *wbuf;
+ ssize_t ret;
if (!is_claimed)
return -EBUSY;
@@ -393,8 +394,10 @@ static ssize_t lirc_write(struct file *filep, const char *buf, size_t n,
if (timer == 0) {
/* try again if device is ready */
timer = init_lirc_timer();
- if (timer == 0)
- return -EIO;
+ if (timer == 0) {
+ ret = -EIO;
+ goto out;
+ }
}
/* adjust values from usecs */
@@ -420,7 +423,8 @@ static ssize_t lirc_write(struct file *filep, const char *buf, size_t n,
if (check_pselecd && (in(1) & LP_PSELECD)) {
lirc_off();
local_irq_restore(flags);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
} while (counttimer < wbuf[i]);
i++;
@@ -436,7 +440,8 @@ static ssize_t lirc_write(struct file *filep, const char *buf, size_t n,
level = newlevel;
if (check_pselecd && (in(1) & LP_PSELECD)) {
local_irq_restore(flags);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
} while (counttimer < wbuf[i]);
i++;
@@ -445,7 +450,11 @@ static ssize_t lirc_write(struct file *filep, const char *buf, size_t n,
#else
/* place code that handles write without external timer here */
#endif
- return n;
+ ret = n;
+out:
+ kfree(wbuf);
+
+ return ret;
}
static unsigned int lirc_poll(struct file *file, poll_table *wait)
diff --git a/drivers/staging/lirc/lirc_sasem.c b/drivers/staging/lirc/lirc_sasem.c
index 998485ebdbce..925eabe14854 100644
--- a/drivers/staging/lirc/lirc_sasem.c
+++ b/drivers/staging/lirc/lirc_sasem.c
@@ -448,6 +448,7 @@ static ssize_t vfd_write(struct file *file, const char *buf,
exit:
mutex_unlock(&context->ctx_lock);
+ kfree(data_buf);
return (!retval) ? n_bytes : retval;
}
diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/lirc/lirc_serial.c
index 9bcf149c4260..1c3099b388e0 100644
--- a/drivers/staging/lirc/lirc_serial.c
+++ b/drivers/staging/lirc/lirc_serial.c
@@ -966,7 +966,7 @@ static ssize_t lirc_write(struct file *file, const char *buf,
if (n % sizeof(int) || count % 2 == 0)
return -EINVAL;
wbuf = memdup_user(buf, n);
- if (PTR_ERR(wbuf))
+ if (IS_ERR(wbuf))
return PTR_ERR(wbuf);
spin_lock_irqsave(&hardware[type].lock, flags);
if (type == LIRC_IRDEO) {
@@ -981,6 +981,7 @@ static ssize_t lirc_write(struct file *file, const char *buf,
}
off();
spin_unlock_irqrestore(&hardware[type].lock, flags);
+ kfree(wbuf);
return n;
}
diff --git a/drivers/staging/lirc/lirc_sir.c b/drivers/staging/lirc/lirc_sir.c
index c553ab626238..76be7b8c6209 100644
--- a/drivers/staging/lirc/lirc_sir.c
+++ b/drivers/staging/lirc/lirc_sir.c
@@ -330,6 +330,7 @@ static ssize_t lirc_write(struct file *file, const char *buf, size_t n,
/* enable receiver */
Ser2UTCR3 = UTCR3_RXE|UTCR3_RIE;
#endif
+ kfree(tx_buf);
return count;
}
diff --git a/drivers/staging/lirc/lirc_zilog.c b/drivers/staging/lirc/lirc_zilog.c
index ad29bb1275ab..0aad0d7a74a3 100644
--- a/drivers/staging/lirc/lirc_zilog.c
+++ b/drivers/staging/lirc/lirc_zilog.c
@@ -20,6 +20,9 @@
*
* parts are cut&pasted from the lirc_i2c.c driver
*
+ * Numerous changes updating lirc_zilog.c in kernel 2.6.38 and later are
+ * Copyright (C) 2011 Andy Walls <awalls@md.metrocast.net>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -60,38 +63,44 @@
#include <media/lirc_dev.h>
#include <media/lirc.h>
-struct IR {
- struct lirc_driver l;
-
- /* Device info */
- struct mutex ir_lock;
- int open;
- bool is_hdpvr;
-
+struct IR_rx {
/* RX device */
- struct i2c_client c_rx;
- int have_rx;
+ struct i2c_client *c;
/* RX device buffer & lock */
struct lirc_buffer buf;
struct mutex buf_lock;
/* RX polling thread data */
- struct completion *t_notify;
- struct completion *t_notify2;
- int shutdown;
struct task_struct *task;
/* RX read data */
unsigned char b[3];
+ bool hdpvr_data_fmt;
+};
+struct IR_tx {
/* TX device */
- struct i2c_client c_tx;
+ struct i2c_client *c;
+
+ /* TX additional actions needed */
int need_boot;
- int have_tx;
+ bool post_tx_ready_poll;
+};
+
+struct IR {
+ struct lirc_driver l;
+
+ struct mutex ir_lock;
+ int open;
+
+ struct i2c_adapter *adapter;
+ struct IR_rx *rx;
+ struct IR_tx *tx;
};
/* Minor -> data mapping */
+static struct mutex ir_devices_lock;
static struct IR *ir_devices[MAX_IRCTL_DEVICES];
/* Block size for IR transmitter */
@@ -124,14 +133,11 @@ static struct mutex tx_data_lock;
#define zilog_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, \
## args)
#define zilog_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
-
-#define ZILOG_HAUPPAUGE_IR_RX_NAME "Zilog/Hauppauge IR RX"
-#define ZILOG_HAUPPAUGE_IR_TX_NAME "Zilog/Hauppauge IR TX"
+#define zilog_info(s, args...) printk(KERN_INFO KBUILD_MODNAME ": " s, ## args)
/* module parameters */
static int debug; /* debug output */
-static int disable_rx; /* disable RX device */
-static int disable_tx; /* disable TX device */
+static int tx_only; /* only handle the IR Tx function */
static int minor = -1; /* minor number */
#define dprintk(fmt, args...) \
@@ -150,8 +156,12 @@ static int add_to_buf(struct IR *ir)
int ret;
int failures = 0;
unsigned char sendbuf[1] = { 0 };
+ struct IR_rx *rx = ir->rx;
- if (lirc_buffer_full(&ir->buf)) {
+ if (rx == NULL)
+ return -ENXIO;
+
+ if (lirc_buffer_full(&rx->buf)) {
dprintk("buffer overflow\n");
return -EOVERFLOW;
}
@@ -161,17 +171,25 @@ static int add_to_buf(struct IR *ir)
* data and we have space
*/
do {
+ if (kthread_should_stop())
+ return -ENODATA;
+
/*
* Lock i2c bus for the duration. RX/TX chips interfere so
* this is worth it
*/
mutex_lock(&ir->ir_lock);
+ if (kthread_should_stop()) {
+ mutex_unlock(&ir->ir_lock);
+ return -ENODATA;
+ }
+
/*
* Send random "poll command" (?) Windows driver does this
* and it is a good point to detect chip failure.
*/
- ret = i2c_master_send(&ir->c_rx, sendbuf, 1);
+ ret = i2c_master_send(rx->c, sendbuf, 1);
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
if (failures >= 3) {
@@ -186,45 +204,53 @@ static int add_to_buf(struct IR *ir)
"trying reset\n");
set_current_state(TASK_UNINTERRUPTIBLE);
+ if (kthread_should_stop()) {
+ mutex_unlock(&ir->ir_lock);
+ return -ENODATA;
+ }
schedule_timeout((100 * HZ + 999) / 1000);
- ir->need_boot = 1;
+ ir->tx->need_boot = 1;
++failures;
mutex_unlock(&ir->ir_lock);
continue;
}
- ret = i2c_master_recv(&ir->c_rx, keybuf, sizeof(keybuf));
+ if (kthread_should_stop()) {
+ mutex_unlock(&ir->ir_lock);
+ return -ENODATA;
+ }
+ ret = i2c_master_recv(rx->c, keybuf, sizeof(keybuf));
mutex_unlock(&ir->ir_lock);
if (ret != sizeof(keybuf)) {
zilog_error("i2c_master_recv failed with %d -- "
"keeping last read buffer\n", ret);
} else {
- ir->b[0] = keybuf[3];
- ir->b[1] = keybuf[4];
- ir->b[2] = keybuf[5];
- dprintk("key (0x%02x/0x%02x)\n", ir->b[0], ir->b[1]);
+ rx->b[0] = keybuf[3];
+ rx->b[1] = keybuf[4];
+ rx->b[2] = keybuf[5];
+ dprintk("key (0x%02x/0x%02x)\n", rx->b[0], rx->b[1]);
}
/* key pressed ? */
- if (ir->is_hdpvr) {
+ if (rx->hdpvr_data_fmt) {
if (got_data && (keybuf[0] == 0x80))
return 0;
else if (got_data && (keybuf[0] == 0x00))
return -ENODATA;
- } else if ((ir->b[0] & 0x80) == 0)
+ } else if ((rx->b[0] & 0x80) == 0)
return got_data ? 0 : -ENODATA;
/* look what we have */
- code = (((__u16)ir->b[0] & 0x7f) << 6) | (ir->b[1] >> 2);
+ code = (((__u16)rx->b[0] & 0x7f) << 6) | (rx->b[1] >> 2);
codes[0] = (code >> 8) & 0xff;
codes[1] = code & 0xff;
/* return it */
- lirc_buffer_write(&ir->buf, codes);
+ lirc_buffer_write(&rx->buf, codes);
++got_data;
- } while (!lirc_buffer_full(&ir->buf));
+ } while (!lirc_buffer_full(&rx->buf));
return 0;
}
@@ -242,46 +268,35 @@ static int add_to_buf(struct IR *ir)
static int lirc_thread(void *arg)
{
struct IR *ir = arg;
-
- if (ir->t_notify != NULL)
- complete(ir->t_notify);
+ struct IR_rx *rx = ir->rx;
dprintk("poll thread started\n");
- do {
- if (ir->open) {
- set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
- /*
- * This is ~113*2 + 24 + jitter (2*repeat gap +
- * code length). We use this interval as the chip
- * resets every time you poll it (bad!). This is
- * therefore just sufficient to catch all of the
- * button presses. It makes the remote much more
- * responsive. You can see the difference by
- * running irw and holding down a button. With
- * 100ms, the old polling interval, you'll notice
- * breaks in the repeat sequence corresponding to
- * lost keypresses.
- */
- schedule_timeout((260 * HZ) / 1000);
- if (ir->shutdown)
- break;
- if (!add_to_buf(ir))
- wake_up_interruptible(&ir->buf.wait_poll);
- } else {
- /* if device not opened so we can sleep half a second */
- set_current_state(TASK_INTERRUPTIBLE);
+ /* if device not opened, we can sleep half a second */
+ if (!ir->open) {
schedule_timeout(HZ/2);
+ continue;
}
- } while (!ir->shutdown);
-
- if (ir->t_notify2 != NULL)
- wait_for_completion(ir->t_notify2);
- ir->task = NULL;
- if (ir->t_notify != NULL)
- complete(ir->t_notify);
+ /*
+ * This is ~113*2 + 24 + jitter (2*repeat gap + code length).
+ * We use this interval as the chip resets every time you poll
+ * it (bad!). This is therefore just sufficient to catch all
+ * of the button presses. It makes the remote much more
+ * responsive. You can see the difference by running irw and
+ * holding down a button. With 100ms, the old polling
+ * interval, you'll notice breaks in the repeat sequence
+ * corresponding to lost keypresses.
+ */
+ schedule_timeout((260 * HZ) / 1000);
+ if (kthread_should_stop())
+ break;
+ if (!add_to_buf(ir))
+ wake_up_interruptible(&rx->buf.wait_poll);
+ }
dprintk("poll thread ended\n");
return 0;
@@ -299,10 +314,10 @@ static int set_use_inc(void *data)
* this is completely broken code. lirc_unregister_driver()
* must be possible even when the device is open
*/
- if (ir->c_rx.addr)
- i2c_use_client(&ir->c_rx);
- if (ir->c_tx.addr)
- i2c_use_client(&ir->c_tx);
+ if (ir->rx != NULL)
+ i2c_use_client(ir->rx->c);
+ if (ir->tx != NULL)
+ i2c_use_client(ir->tx->c);
return 0;
}
@@ -311,10 +326,10 @@ static void set_use_dec(void *data)
{
struct IR *ir = data;
- if (ir->c_rx.addr)
- i2c_release_client(&ir->c_rx);
- if (ir->c_tx.addr)
- i2c_release_client(&ir->c_tx);
+ if (ir->rx)
+ i2c_release_client(ir->rx->c);
+ if (ir->tx)
+ i2c_release_client(ir->tx->c);
if (ir->l.owner != NULL)
module_put(ir->l.owner);
}
@@ -453,7 +468,7 @@ corrupt:
}
/* send a block of data to the IR TX device */
-static int send_data_block(struct IR *ir, unsigned char *data_block)
+static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
{
int i, j, ret;
unsigned char buf[5];
@@ -467,7 +482,7 @@ static int send_data_block(struct IR *ir, unsigned char *data_block)
buf[1 + j] = data_block[i + j];
dprintk("%02x %02x %02x %02x %02x",
buf[0], buf[1], buf[2], buf[3], buf[4]);
- ret = i2c_master_send(&ir->c_tx, buf, tosend + 1);
+ ret = i2c_master_send(tx->c, buf, tosend + 1);
if (ret != tosend + 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
@@ -478,38 +493,50 @@ static int send_data_block(struct IR *ir, unsigned char *data_block)
}
/* send boot data to the IR TX device */
-static int send_boot_data(struct IR *ir)
+static int send_boot_data(struct IR_tx *tx)
{
- int ret;
+ int ret, i;
unsigned char buf[4];
/* send the boot block */
- ret = send_data_block(ir, tx_data->boot_data);
+ ret = send_data_block(tx, tx_data->boot_data);
if (ret != 0)
return ret;
- /* kick it off? */
+ /* Hit the go button to activate the new boot data */
buf[0] = 0x00;
buf[1] = 0x20;
- ret = i2c_master_send(&ir->c_tx, buf, 2);
+ ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
- ret = i2c_master_send(&ir->c_tx, buf, 1);
+
+ /*
+ * Wait for zilog to settle after hitting go post boot block upload.
+ * Without this delay, the HD-PVR and HVR-1950 both return an -EIO
+ * upon attempting to get firmware revision, and tx probe thus fails.
+ */
+ for (i = 0; i < 10; i++) {
+ ret = i2c_master_send(tx->c, buf, 1);
+ if (ret == 1)
+ break;
+ udelay(100);
+ }
+
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Here comes the firmware version... (hopefully) */
- ret = i2c_master_recv(&ir->c_tx, buf, 4);
+ ret = i2c_master_recv(tx->c, buf, 4);
if (ret != 4) {
zilog_error("i2c_master_recv failed with %d\n", ret);
return 0;
}
- if (buf[0] != 0x80) {
- zilog_error("unexpected IR TX response: %02x\n", buf[0]);
+ if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
+ zilog_error("unexpected IR TX init response: %02x\n", buf[0]);
return 0;
}
zilog_notify("Zilog/Hauppauge IR blaster firmware version "
@@ -543,7 +570,7 @@ static void fw_unload(void)
}
/* load "firmware" for the IR TX device */
-static int fw_load(struct IR *ir)
+static int fw_load(struct IR_tx *tx)
{
int ret;
unsigned int i;
@@ -558,7 +585,7 @@ static int fw_load(struct IR *ir)
}
/* Request codeset data file */
- ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", &ir->c_tx.dev);
+ ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", &tx->c->dev);
if (ret != 0) {
zilog_error("firmware haup-ir-blaster.bin not available "
"(%d)\n", ret);
@@ -685,20 +712,20 @@ out:
}
/* initialise the IR TX device */
-static int tx_init(struct IR *ir)
+static int tx_init(struct IR_tx *tx)
{
int ret;
/* Load 'firmware' */
- ret = fw_load(ir);
+ ret = fw_load(tx);
if (ret != 0)
return ret;
/* Send boot block */
- ret = send_boot_data(ir);
+ ret = send_boot_data(tx);
if (ret != 0)
return ret;
- ir->need_boot = 0;
+ tx->need_boot = 0;
/* Looks good */
return 0;
@@ -714,20 +741,20 @@ static loff_t lseek(struct file *filep, loff_t offset, int orig)
static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
{
struct IR *ir = filep->private_data;
- unsigned char buf[ir->buf.chunk_size];
+ struct IR_rx *rx = ir->rx;
int ret = 0, written = 0;
DECLARE_WAITQUEUE(wait, current);
dprintk("read called\n");
- if (ir->c_rx.addr == 0)
+ if (rx == NULL)
return -ENODEV;
- if (mutex_lock_interruptible(&ir->buf_lock))
+ if (mutex_lock_interruptible(&rx->buf_lock))
return -ERESTARTSYS;
- if (n % ir->buf.chunk_size) {
+ if (n % rx->buf.chunk_size) {
dprintk("read result = -EINVAL\n");
- mutex_unlock(&ir->buf_lock);
+ mutex_unlock(&rx->buf_lock);
return -EINVAL;
}
@@ -736,7 +763,7 @@ static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
* to avoid losing scan code (in case when queue is awaken somewhere
* between while condition checking and scheduling)
*/
- add_wait_queue(&ir->buf.wait_poll, &wait);
+ add_wait_queue(&rx->buf.wait_poll, &wait);
set_current_state(TASK_INTERRUPTIBLE);
/*
@@ -744,7 +771,7 @@ static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
* mode and 'copy_to_user' is happy, wait for data.
*/
while (written < n && ret == 0) {
- if (lirc_buffer_empty(&ir->buf)) {
+ if (lirc_buffer_empty(&rx->buf)) {
/*
* According to the read(2) man page, 'written' can be
* returned as less than 'n', instead of blocking
@@ -764,16 +791,17 @@ static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
schedule();
set_current_state(TASK_INTERRUPTIBLE);
} else {
- lirc_buffer_read(&ir->buf, buf);
+ unsigned char buf[rx->buf.chunk_size];
+ lirc_buffer_read(&rx->buf, buf);
ret = copy_to_user((void *)outbuf+written, buf,
- ir->buf.chunk_size);
- written += ir->buf.chunk_size;
+ rx->buf.chunk_size);
+ written += rx->buf.chunk_size;
}
}
- remove_wait_queue(&ir->buf.wait_poll, &wait);
+ remove_wait_queue(&rx->buf.wait_poll, &wait);
set_current_state(TASK_RUNNING);
- mutex_unlock(&ir->buf_lock);
+ mutex_unlock(&rx->buf_lock);
dprintk("read result = %s (%d)\n",
ret ? "-EFAULT" : "OK", ret);
@@ -782,7 +810,7 @@ static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
}
/* send a keypress to the IR TX device */
-static int send_code(struct IR *ir, unsigned int code, unsigned int key)
+static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
{
unsigned char data_block[TX_BLOCK_SIZE];
unsigned char buf[2];
@@ -799,26 +827,34 @@ static int send_code(struct IR *ir, unsigned int code, unsigned int key)
return ret;
/* Send the data block */
- ret = send_data_block(ir, data_block);
+ ret = send_data_block(tx, data_block);
if (ret != 0)
return ret;
/* Send data block length? */
buf[0] = 0x00;
buf[1] = 0x40;
- ret = i2c_master_send(&ir->c_tx, buf, 2);
+ ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
- ret = i2c_master_send(&ir->c_tx, buf, 1);
+
+ /* Give the z8 a moment to process data block */
+ for (i = 0; i < 10; i++) {
+ ret = i2c_master_send(tx->c, buf, 1);
+ if (ret == 1)
+ break;
+ udelay(100);
+ }
+
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Send finished download? */
- ret = i2c_master_recv(&ir->c_tx, buf, 1);
+ ret = i2c_master_recv(tx->c, buf, 1);
if (ret != 1) {
zilog_error("i2c_master_recv failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
@@ -832,7 +868,7 @@ static int send_code(struct IR *ir, unsigned int code, unsigned int key)
/* Send prepare command? */
buf[0] = 0x00;
buf[1] = 0x80;
- ret = i2c_master_send(&ir->c_tx, buf, 2);
+ ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
@@ -843,7 +879,7 @@ static int send_code(struct IR *ir, unsigned int code, unsigned int key)
* last i2c_master_recv always fails with a -5, so for now, we're
* going to skip this whole mess and say we're done on the HD PVR
*/
- if (ir->is_hdpvr) {
+ if (!tx->post_tx_ready_poll) {
dprintk("sent code %u, key %u\n", code, key);
return 0;
}
@@ -857,7 +893,7 @@ static int send_code(struct IR *ir, unsigned int code, unsigned int key)
for (i = 0; i < 20; ++i) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout((50 * HZ + 999) / 1000);
- ret = i2c_master_send(&ir->c_tx, buf, 1);
+ ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
dprintk("NAK expected: i2c_master_send "
@@ -870,7 +906,7 @@ static int send_code(struct IR *ir, unsigned int code, unsigned int key)
}
/* Seems to be an 'ok' response */
- i = i2c_master_recv(&ir->c_tx, buf, 1);
+ i = i2c_master_recv(tx->c, buf, 1);
if (i != 1) {
zilog_error("i2c_master_recv failed with %d\n", ret);
return -EFAULT;
@@ -895,10 +931,11 @@ static ssize_t write(struct file *filep, const char *buf, size_t n,
loff_t *ppos)
{
struct IR *ir = filep->private_data;
+ struct IR_tx *tx = ir->tx;
size_t i;
int failures = 0;
- if (ir->c_tx.addr == 0)
+ if (tx == NULL)
return -ENODEV;
/* Validate user parameters */
@@ -919,15 +956,15 @@ static ssize_t write(struct file *filep, const char *buf, size_t n,
}
/* Send boot data first if required */
- if (ir->need_boot == 1) {
- ret = send_boot_data(ir);
+ if (tx->need_boot == 1) {
+ ret = send_boot_data(tx);
if (ret == 0)
- ir->need_boot = 0;
+ tx->need_boot = 0;
}
/* Send the code */
if (ret == 0) {
- ret = send_code(ir, (unsigned)command >> 16,
+ ret = send_code(tx, (unsigned)command >> 16,
(unsigned)command & 0xFFFF);
if (ret == -EPROTO) {
mutex_unlock(&ir->ir_lock);
@@ -952,7 +989,7 @@ static ssize_t write(struct file *filep, const char *buf, size_t n,
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout((100 * HZ + 999) / 1000);
- ir->need_boot = 1;
+ tx->need_boot = 1;
++failures;
} else
i += sizeof(int);
@@ -969,22 +1006,23 @@ static ssize_t write(struct file *filep, const char *buf, size_t n,
static unsigned int poll(struct file *filep, poll_table *wait)
{
struct IR *ir = filep->private_data;
+ struct IR_rx *rx = ir->rx;
unsigned int ret;
dprintk("poll called\n");
- if (ir->c_rx.addr == 0)
+ if (rx == NULL)
return -ENODEV;
- mutex_lock(&ir->buf_lock);
+ mutex_lock(&rx->buf_lock);
- poll_wait(filep, &ir->buf.wait_poll, wait);
+ poll_wait(filep, &rx->buf.wait_poll, wait);
dprintk("poll result = %s\n",
- lirc_buffer_empty(&ir->buf) ? "0" : "POLLIN|POLLRDNORM");
+ lirc_buffer_empty(&rx->buf) ? "0" : "POLLIN|POLLRDNORM");
- ret = lirc_buffer_empty(&ir->buf) ? 0 : (POLLIN|POLLRDNORM);
+ ret = lirc_buffer_empty(&rx->buf) ? 0 : (POLLIN|POLLRDNORM);
- mutex_unlock(&ir->buf_lock);
+ mutex_unlock(&rx->buf_lock);
return ret;
}
@@ -994,10 +1032,9 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
int result;
unsigned long mode, features = 0;
- if (ir->c_rx.addr != 0)
+ features |= LIRC_CAN_SEND_PULSE;
+ if (ir->rx != NULL)
features |= LIRC_CAN_REC_LIRCCODE;
- if (ir->c_tx.addr != 0)
- features |= LIRC_CAN_SEND_PULSE;
switch (cmd) {
case LIRC_GET_LENGTH:
@@ -1024,15 +1061,9 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
result = -EINVAL;
break;
case LIRC_GET_SEND_MODE:
- if (!(features&LIRC_CAN_SEND_MASK))
- return -ENOSYS;
-
result = put_user(LIRC_MODE_PULSE, (unsigned long *) arg);
break;
case LIRC_SET_SEND_MODE:
- if (!(features&LIRC_CAN_SEND_MASK))
- return -ENOSYS;
-
result = get_user(mode, (unsigned long *) arg);
if (!result && mode != LIRC_MODE_PULSE)
return -EINVAL;
@@ -1043,6 +1074,15 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
return result;
}
+/* ir_devices_lock must be held */
+static struct IR *find_ir_device_by_minor(unsigned int minor)
+{
+ if (minor >= MAX_IRCTL_DEVICES)
+ return NULL;
+
+ return ir_devices[minor];
+}
+
/*
* Open the IR device. Get hold of our IR structure and
* stash it in private_data for the file
@@ -1051,15 +1091,15 @@ static int open(struct inode *node, struct file *filep)
{
struct IR *ir;
int ret;
+ unsigned int minor = MINOR(node->i_rdev);
/* find our IR struct */
- unsigned minor = MINOR(node->i_rdev);
- if (minor >= MAX_IRCTL_DEVICES) {
- dprintk("minor %d: open result = -ENODEV\n",
- minor);
+ mutex_lock(&ir_devices_lock);
+ ir = find_ir_device_by_minor(minor);
+ mutex_unlock(&ir_devices_lock);
+
+ if (ir == NULL)
return -ENODEV;
- }
- ir = ir_devices[minor];
/* increment in use count */
mutex_lock(&ir->ir_lock);
@@ -1106,7 +1146,6 @@ static struct lirc_driver lirc_template = {
static int ir_remove(struct i2c_client *client);
static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id);
-static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg);
#define ID_FLAG_TX 0x01
#define ID_FLAG_HDPVR 0x02
@@ -1126,7 +1165,6 @@ static struct i2c_driver driver = {
},
.probe = ir_probe,
.remove = ir_remove,
- .command = ir_command,
.id_table = ir_transceiver_id,
};
@@ -1144,214 +1182,253 @@ static const struct file_operations lirc_fops = {
.release = close
};
-static int ir_remove(struct i2c_client *client)
+static void destroy_rx_kthread(struct IR_rx *rx)
{
- struct IR *ir = i2c_get_clientdata(client);
+ /* end up polling thread */
+ if (rx != NULL && !IS_ERR_OR_NULL(rx->task)) {
+ kthread_stop(rx->task);
+ rx->task = NULL;
+ }
+}
- mutex_lock(&ir->ir_lock);
+/* ir_devices_lock must be held */
+static int add_ir_device(struct IR *ir)
+{
+ int i;
- if (ir->have_rx || ir->have_tx) {
- DECLARE_COMPLETION(tn);
- DECLARE_COMPLETION(tn2);
-
- /* end up polling thread */
- if (ir->task && !IS_ERR(ir->task)) {
- ir->t_notify = &tn;
- ir->t_notify2 = &tn2;
- ir->shutdown = 1;
- wake_up_process(ir->task);
- complete(&tn2);
- wait_for_completion(&tn);
- ir->t_notify = NULL;
- ir->t_notify2 = NULL;
+ for (i = 0; i < MAX_IRCTL_DEVICES; i++)
+ if (ir_devices[i] == NULL) {
+ ir_devices[i] = ir;
+ break;
}
- } else {
- mutex_unlock(&ir->ir_lock);
- zilog_error("%s: detached from something we didn't "
- "attach to\n", __func__);
- return -ENODEV;
+ return i == MAX_IRCTL_DEVICES ? -ENOMEM : i;
+}
+
+/* ir_devices_lock must be held */
+static void del_ir_device(struct IR *ir)
+{
+ int i;
+
+ for (i = 0; i < MAX_IRCTL_DEVICES; i++)
+ if (ir_devices[i] == ir) {
+ ir_devices[i] = NULL;
+ break;
+ }
+}
+
+static int ir_remove(struct i2c_client *client)
+{
+ struct IR *ir = i2c_get_clientdata(client);
+
+ mutex_lock(&ir_devices_lock);
+
+ if (ir == NULL) {
+ /* We destroyed everything when the first client came through */
+ mutex_unlock(&ir_devices_lock);
+ return 0;
}
- /* unregister lirc driver */
- if (ir->l.minor >= 0 && ir->l.minor < MAX_IRCTL_DEVICES) {
- lirc_unregister_driver(ir->l.minor);
- ir_devices[ir->l.minor] = NULL;
+ /* Good-bye LIRC */
+ lirc_unregister_driver(ir->l.minor);
+
+ /* Good-bye Rx */
+ destroy_rx_kthread(ir->rx);
+ if (ir->rx != NULL) {
+ if (ir->rx->buf.fifo_initialized)
+ lirc_buffer_free(&ir->rx->buf);
+ i2c_set_clientdata(ir->rx->c, NULL);
+ kfree(ir->rx);
}
- /* free memory */
- lirc_buffer_free(&ir->buf);
- mutex_unlock(&ir->ir_lock);
+ /* Good-bye Tx */
+ i2c_set_clientdata(ir->tx->c, NULL);
+ kfree(ir->tx);
+
+ /* Good-bye IR */
+ del_ir_device(ir);
kfree(ir);
+ mutex_unlock(&ir_devices_lock);
return 0;
}
-static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
+
+/* ir_devices_lock must be held */
+static struct IR *find_ir_device_by_adapter(struct i2c_adapter *adapter)
{
+ int i;
struct IR *ir = NULL;
+
+ for (i = 0; i < MAX_IRCTL_DEVICES; i++)
+ if (ir_devices[i] != NULL &&
+ ir_devices[i]->adapter == adapter) {
+ ir = ir_devices[i];
+ break;
+ }
+
+ return ir;
+}
+
+static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct IR *ir;
struct i2c_adapter *adap = client->adapter;
- char buf;
int ret;
- int have_rx = 0, have_tx = 0;
+ bool tx_probe = false;
- dprintk("%s: adapter name (%s) nr %d, i2c_device_id name (%s), "
- "client addr=0x%02x\n",
- __func__, adap->name, adap->nr, id->name, client->addr);
+ dprintk("%s: %s on i2c-%d (%s), client addr=0x%02x\n",
+ __func__, id->name, adap->nr, adap->name, client->addr);
/*
- * FIXME - This probe function probes both the Tx and Rx
- * addresses of the IR microcontroller.
- *
- * However, the I2C subsystem is passing along one I2C client at a
- * time, based on matches to the ir_transceiver_id[] table above.
- * The expectation is that each i2c_client address will be probed
- * individually by drivers so the I2C subsystem can mark all client
- * addresses as claimed or not.
- *
- * This probe routine causes only one of the client addresses, TX or RX,
- * to be claimed. This will cause a problem if the I2C subsystem is
- * subsequently triggered to probe unclaimed clients again.
+ * The IR receiver is at i2c address 0x71.
+ * The IR transmitter is at i2c address 0x70.
*/
- /*
- * The external IR receiver is at i2c address 0x71.
- * The IR transmitter is at 0x70.
- */
- client->addr = 0x70;
- if (!disable_tx) {
- if (i2c_master_recv(client, &buf, 1) == 1)
- have_tx = 1;
- dprintk("probe 0x70 @ %s: %s\n",
- adap->name, have_tx ? "success" : "failed");
- }
+ if (id->driver_data & ID_FLAG_TX)
+ tx_probe = true;
+ else if (tx_only) /* module option */
+ return -ENXIO;
- if (!disable_rx) {
- client->addr = 0x71;
- if (i2c_master_recv(client, &buf, 1) == 1)
- have_rx = 1;
- dprintk("probe 0x71 @ %s: %s\n",
- adap->name, have_rx ? "success" : "failed");
- }
+ zilog_info("probing IR %s on %s (i2c-%d)\n",
+ tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
- if (!(have_rx || have_tx)) {
- zilog_error("%s: no devices found\n", adap->name);
- goto out_nodev;
- }
+ mutex_lock(&ir_devices_lock);
- printk(KERN_INFO "lirc_zilog: chip found with %s\n",
- have_rx && have_tx ? "RX and TX" :
- have_rx ? "RX only" : "TX only");
+ /* Use a single struct IR instance for both the Rx and Tx functions */
+ ir = find_ir_device_by_adapter(adap);
+ if (ir == NULL) {
+ ir = kzalloc(sizeof(struct IR), GFP_KERNEL);
+ if (ir == NULL) {
+ ret = -ENOMEM;
+ goto out_no_ir;
+ }
+ /* store for use in ir_probe() again, and open() later on */
+ ret = add_ir_device(ir);
+ if (ret)
+ goto out_free_ir;
+
+ ir->adapter = adap;
+ mutex_init(&ir->ir_lock);
+
+ /* set lirc_dev stuff */
+ memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
+ ir->l.minor = minor; /* module option */
+ ir->l.code_length = 13;
+ ir->l.rbuf = NULL;
+ ir->l.fops = &lirc_fops;
+ ir->l.data = ir;
+ ir->l.dev = &adap->dev;
+ ir->l.sample_rate = 0;
+ }
- ir = kzalloc(sizeof(struct IR), GFP_KERNEL);
+ if (tx_probe) {
+ /* Set up a struct IR_tx instance */
+ ir->tx = kzalloc(sizeof(struct IR_tx), GFP_KERNEL);
+ if (ir->tx == NULL) {
+ ret = -ENOMEM;
+ goto out_free_xx;
+ }
- if (!ir)
- goto out_nomem;
+ ir->tx->c = client;
+ ir->tx->need_boot = 1;
+ ir->tx->post_tx_ready_poll =
+ (id->driver_data & ID_FLAG_HDPVR) ? false : true;
+ } else {
+ /* Set up a struct IR_rx instance */
+ ir->rx = kzalloc(sizeof(struct IR_rx), GFP_KERNEL);
+ if (ir->rx == NULL) {
+ ret = -ENOMEM;
+ goto out_free_xx;
+ }
- ret = lirc_buffer_init(&ir->buf, 2, BUFLEN / 2);
- if (ret)
- goto out_nomem;
+ ret = lirc_buffer_init(&ir->rx->buf, 2, BUFLEN / 2);
+ if (ret)
+ goto out_free_xx;
- mutex_init(&ir->ir_lock);
- mutex_init(&ir->buf_lock);
- ir->need_boot = 1;
- ir->is_hdpvr = (id->driver_data & ID_FLAG_HDPVR) ? true : false;
+ mutex_init(&ir->rx->buf_lock);
+ ir->rx->c = client;
+ ir->rx->hdpvr_data_fmt =
+ (id->driver_data & ID_FLAG_HDPVR) ? true : false;
- memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
- ir->l.minor = -1;
+ /* set lirc_dev stuff */
+ ir->l.rbuf = &ir->rx->buf;
+ }
- /* I2C attach to device */
i2c_set_clientdata(client, ir);
- /* initialise RX device */
- if (have_rx) {
- DECLARE_COMPLETION(tn);
- memcpy(&ir->c_rx, client, sizeof(struct i2c_client));
-
- ir->c_rx.addr = 0x71;
- strlcpy(ir->c_rx.name, ZILOG_HAUPPAUGE_IR_RX_NAME,
- I2C_NAME_SIZE);
+ /* Proceed only if we have the required Tx and Rx clients ready to go */
+ if (ir->tx == NULL ||
+ (ir->rx == NULL && !tx_only)) {
+ zilog_info("probe of IR %s on %s (i2c-%d) done. Waiting on "
+ "IR %s.\n", tx_probe ? "Tx" : "Rx", adap->name,
+ adap->nr, tx_probe ? "Rx" : "Tx");
+ goto out_ok;
+ }
+ /* initialise RX device */
+ if (ir->rx != NULL) {
/* try to fire up polling thread */
- ir->t_notify = &tn;
- ir->task = kthread_run(lirc_thread, ir, "lirc_zilog");
- if (IS_ERR(ir->task)) {
- ret = PTR_ERR(ir->task);
- zilog_error("lirc_register_driver: cannot run "
- "poll thread %d\n", ret);
- goto err;
+ ir->rx->task = kthread_run(lirc_thread, ir,
+ "zilog-rx-i2c-%d", adap->nr);
+ if (IS_ERR(ir->rx->task)) {
+ ret = PTR_ERR(ir->rx->task);
+ zilog_error("%s: could not start IR Rx polling thread"
+ "\n", __func__);
+ goto out_free_xx;
}
- wait_for_completion(&tn);
- ir->t_notify = NULL;
- ir->have_rx = 1;
}
- /* initialise TX device */
- if (have_tx) {
- memcpy(&ir->c_tx, client, sizeof(struct i2c_client));
- ir->c_tx.addr = 0x70;
- strlcpy(ir->c_tx.name, ZILOG_HAUPPAUGE_IR_TX_NAME,
- I2C_NAME_SIZE);
- ir->have_tx = 1;
- }
-
- /* set lirc_dev stuff */
- ir->l.code_length = 13;
- ir->l.rbuf = &ir->buf;
- ir->l.fops = &lirc_fops;
- ir->l.data = ir;
- ir->l.minor = minor;
- ir->l.dev = &adap->dev;
- ir->l.sample_rate = 0;
-
/* register with lirc */
ir->l.minor = lirc_register_driver(&ir->l);
if (ir->l.minor < 0 || ir->l.minor >= MAX_IRCTL_DEVICES) {
- zilog_error("ir_attach: \"minor\" must be between 0 and %d "
- "(%d)!\n", MAX_IRCTL_DEVICES-1, ir->l.minor);
+ zilog_error("%s: \"minor\" must be between 0 and %d (%d)!\n",
+ __func__, MAX_IRCTL_DEVICES-1, ir->l.minor);
ret = -EBADRQC;
- goto err;
+ goto out_free_thread;
}
- /* store this for getting back in open() later on */
- ir_devices[ir->l.minor] = ir;
-
/*
* if we have the tx device, load the 'firmware'. We do this
* after registering with lirc as otherwise hotplug seems to take
* 10s to create the lirc device.
*/
- if (have_tx) {
- /* Special TX init */
- ret = tx_init(ir);
- if (ret != 0)
- goto err;
- }
+ ret = tx_init(ir->tx);
+ if (ret != 0)
+ goto out_unregister;
+ zilog_info("probe of IR %s on %s (i2c-%d) done. IR unit ready.\n",
+ tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
+out_ok:
+ mutex_unlock(&ir_devices_lock);
return 0;
-err:
- /* undo everything, hopefully... */
- if (ir->c_rx.addr)
- ir_remove(&ir->c_rx);
- if (ir->c_tx.addr)
- ir_remove(&ir->c_tx);
- return ret;
-
-out_nodev:
- zilog_error("no device found\n");
- return -ENODEV;
-
-out_nomem:
- zilog_error("memory allocation failure\n");
+out_unregister:
+ lirc_unregister_driver(ir->l.minor);
+out_free_thread:
+ destroy_rx_kthread(ir->rx);
+out_free_xx:
+ if (ir->rx != NULL) {
+ if (ir->rx->buf.fifo_initialized)
+ lirc_buffer_free(&ir->rx->buf);
+ if (ir->rx->c != NULL)
+ i2c_set_clientdata(ir->rx->c, NULL);
+ kfree(ir->rx);
+ }
+ if (ir->tx != NULL) {
+ if (ir->tx->c != NULL)
+ i2c_set_clientdata(ir->tx->c, NULL);
+ kfree(ir->tx);
+ }
+out_free_ir:
+ del_ir_device(ir);
kfree(ir);
- return -ENOMEM;
-}
-
-static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg)
-{
- /* nothing */
- return 0;
+out_no_ir:
+ zilog_error("%s: probing IR %s on %s (i2c-%d) failed with %d\n",
+ __func__, tx_probe ? "Tx" : "Rx", adap->name, adap->nr,
+ ret);
+ mutex_unlock(&ir_devices_lock);
+ return ret;
}
static int __init zilog_init(void)
@@ -1361,6 +1438,7 @@ static int __init zilog_init(void)
zilog_notify("Zilog/Hauppauge IR driver initializing\n");
mutex_init(&tx_data_lock);
+ mutex_init(&ir_devices_lock);
request_module("firmware_class");
@@ -1386,7 +1464,8 @@ module_exit(zilog_exit);
MODULE_DESCRIPTION("Zilog/Hauppauge infrared transmitter driver (i2c stack)");
MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, "
- "Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver");
+ "Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver, "
+ "Andy Walls");
MODULE_LICENSE("GPL");
/* for compat with old name, which isn't all that accurate anymore */
MODULE_ALIAS("lirc_pvr150");
@@ -1397,8 +1476,5 @@ MODULE_PARM_DESC(minor, "Preferred minor device number");
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Enable debugging messages");
-module_param(disable_rx, bool, 0644);
-MODULE_PARM_DESC(disable_rx, "Disable the IR receiver device");
-
-module_param(disable_tx, bool, 0644);
-MODULE_PARM_DESC(disable_tx, "Disable the IR transmitter device");
+module_param(tx_only, bool, 0644);
+MODULE_PARM_DESC(tx_only, "Only handle the IR transmit function");
diff --git a/drivers/staging/memrar/memrar.h b/drivers/staging/memrar/memrar.h
index 0b735b827c09..0feb73b94c91 100644
--- a/drivers/staging/memrar/memrar.h
+++ b/drivers/staging/memrar/memrar.h
@@ -95,6 +95,7 @@ struct RAR_buffer {
dma_addr_t bus_address;
};
+#if defined(CONFIG_MRST_RAR_HANDLER)
/**
* rar_reserve() - reserve RAR buffers
* @buffers: array of RAR_buffers where type and size of buffers to
@@ -149,7 +150,25 @@ extern size_t rar_release(struct RAR_buffer *buffers,
extern size_t rar_handle_to_bus(struct RAR_buffer *buffers,
size_t count);
+#else
+extern inline size_t rar_reserve(struct RAR_buffer *buffers, size_t count)
+{
+ return 0;
+}
+
+extern inline size_t rar_release(struct RAR_buffer *buffers, size_t count)
+{
+ return 0;
+}
+
+extern inline size_t rar_handle_to_bus(struct RAR_buffer *buffers,
+ size_t count)
+{
+ return 0;
+}
+
+#endif /* MRST_RAR_HANDLER */
#endif /* __KERNEL__ */
#endif /* _MEMRAR_H */
diff --git a/drivers/staging/msm/Makefile b/drivers/staging/msm/Makefile
index bb3606faf20e..07a89ecfcc2b 100644
--- a/drivers/staging/msm/Makefile
+++ b/drivers/staging/msm/Makefile
@@ -41,11 +41,11 @@ obj-$(CONFIG_FB_MSM_EBI2) += ebi2_lcd.o
obj-$(CONFIG_FB_MSM_LCDC) += lcdc.o
# MDDI
-msm_mddi-objs := mddi.o mddihost.o mddihosti.o
+msm_mddi-y := mddi.o mddihost.o mddihosti.o
obj-$(CONFIG_FB_MSM_MDDI) += msm_mddi.o
# External MDDI
-msm_mddi_ext-objs := mddihost_e.o mddi_ext.o
+msm_mddi_ext-y := mddihost_e.o mddi_ext.o
obj-$(CONFIG_FB_MSM_EXTMDDI) += msm_mddi_ext.o
# TVEnc
diff --git a/drivers/staging/msm/lcdc_toshiba_wvga_pt.c b/drivers/staging/msm/lcdc_toshiba_wvga_pt.c
index 864d7c18913d..edba78a3afcf 100644
--- a/drivers/staging/msm/lcdc_toshiba_wvga_pt.c
+++ b/drivers/staging/msm/lcdc_toshiba_wvga_pt.c
@@ -77,7 +77,7 @@ static void toshiba_spi_write(char cmd, uint32 data, int num)
/* followed by parameter bytes */
if (num) {
- bp = (char *)&data;;
+ bp = (char *)&data;
bp += (num - 1);
while (num) {
toshiba_spi_write_byte(1, *bp);
diff --git a/drivers/staging/msm/msm_fb.c b/drivers/staging/msm/msm_fb.c
index 23fa049b51f2..a2f29d464051 100644
--- a/drivers/staging/msm/msm_fb.c
+++ b/drivers/staging/msm/msm_fb.c
@@ -347,7 +347,7 @@ static int msm_fb_suspend(struct platform_device *pdev, pm_message_t state)
if ((!mfd) || (mfd->key != MFD_KEY))
return 0;
- acquire_console_sem();
+ console_lock();
fb_set_suspend(mfd->fbi, 1);
ret = msm_fb_suspend_sub(mfd);
@@ -358,7 +358,7 @@ static int msm_fb_suspend(struct platform_device *pdev, pm_message_t state)
pdev->dev.power.power_state = state;
}
- release_console_sem();
+ console_unlock();
return ret;
}
#else
@@ -431,11 +431,11 @@ static int msm_fb_resume(struct platform_device *pdev)
if ((!mfd) || (mfd->key != MFD_KEY))
return 0;
- acquire_console_sem();
+ console_lock();
ret = msm_fb_resume_sub(mfd);
pdev->dev.power.power_state = PMSG_ON;
fb_set_suspend(mfd->fbi, 1);
- release_console_sem();
+ console_unlock();
return ret;
}
diff --git a/drivers/staging/msm/msm_fb_bl.c b/drivers/staging/msm/msm_fb_bl.c
index 033fc9486e01..2a8077511fc0 100644
--- a/drivers/staging/msm/msm_fb_bl.c
+++ b/drivers/staging/msm/msm_fb_bl.c
@@ -42,7 +42,7 @@ static int msm_fb_bl_update_status(struct backlight_device *pbd)
return 0;
}
-static struct backlight_ops msm_fb_bl_ops = {
+static const struct backlight_ops msm_fb_bl_ops = {
.get_brightness = msm_fb_bl_get_brightness,
.update_status = msm_fb_bl_update_status,
};
diff --git a/drivers/staging/msm/tvenc.c b/drivers/staging/msm/tvenc.c
index f41c5ac22f25..4fbb77b253d3 100644
--- a/drivers/staging/msm/tvenc.c
+++ b/drivers/staging/msm/tvenc.c
@@ -279,12 +279,13 @@ static int __init tvenc_driver_init(void)
if (IS_ERR(tvenc_clk)) {
printk(KERN_ERR "error: can't get tvenc_clk!\n");
- return IS_ERR(tvenc_clk);
+ return PTR_ERR(tvenc_clk);
}
if (IS_ERR(tvdac_clk)) {
printk(KERN_ERR "error: can't get tvdac_clk!\n");
- return IS_ERR(tvdac_clk);
+ clk_put(tvenc_clk);
+ return PTR_ERR(tvdac_clk);
}
// pm_qos_add_requirement(PM_QOS_SYSTEM_BUS_FREQ , "tvenc",
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
index ac2d3d023715..35f9cda7be11 100644
--- a/drivers/staging/olpc_dcon/TODO
+++ b/drivers/staging/olpc_dcon/TODO
@@ -1,6 +1,5 @@
TODO:
- checkpatch.pl cleanups
- - port geode gpio calls to newer cs5535 API
- see if vx855 gpio API can be made similar enough to cs5535 so we can
share more code
- allow simultaneous XO-1 and XO-1.5 support
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 4ca45ec7fd84..56a283d1a74d 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -27,7 +27,6 @@
#include <asm/uaccess.h>
#include <linux/ctype.h>
#include <linux/reboot.h>
-#include <linux/gpio.h>
#include <asm/tsc.h>
#include <asm/olpc.h>
@@ -49,7 +48,7 @@ struct dcon_platform_data {
int (*init)(void);
void (*bus_stabilize_wiggle)(void);
void (*set_dconload)(int);
- int (*read_status)(void);
+ u8 (*read_status)(void);
};
static struct dcon_platform_data *pdata;
@@ -374,17 +373,17 @@ static void dcon_source_switch(struct work_struct *work)
*
* For now, we just hope..
*/
- acquire_console_sem();
+ console_lock();
ignore_fb_events = 1;
if (fb_blank(fbinfo, FB_BLANK_UNBLANK)) {
ignore_fb_events = 0;
- release_console_sem();
+ console_unlock();
printk(KERN_ERR "olpc-dcon: Failed to enter CPU mode\n");
dcon_pending = DCON_SOURCE_DCON;
return;
}
ignore_fb_events = 0;
- release_console_sem();
+ console_unlock();
/* And turn off the DCON */
pdata->set_dconload(1);
@@ -436,12 +435,12 @@ static void dcon_source_switch(struct work_struct *work)
}
}
- acquire_console_sem();
+ console_lock();
ignore_fb_events = 1;
if (fb_blank(fbinfo, FB_BLANK_POWERDOWN))
printk(KERN_ERR "olpc-dcon: couldn't blank fb!\n");
ignore_fb_events = 0;
- release_console_sem();
+ console_unlock();
printk(KERN_INFO "olpc-dcon: The DCON has control\n");
break;
@@ -615,7 +614,7 @@ static struct device_attribute dcon_device_files[] = {
__ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store),
};
-static struct backlight_ops dcon_bl_ops = {
+static const struct backlight_ops dcon_bl_ops = {
.get_brightness = dconbl_get,
.update_status = dconbl_set
};
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index 6453ca4ba0ee..e566d213da2a 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -29,26 +29,6 @@
#define DCON_REG_SCAN_INT 9
#define DCON_REG_BRIGHT 10
-/* GPIO registers (CS5536) */
-
-#define MSR_LBAR_GPIO 0x5140000C
-
-#define GPIOx_OUT_VAL 0x00
-#define GPIOx_OUT_EN 0x04
-#define GPIOx_IN_EN 0x20
-#define GPIOx_INV_EN 0x24
-#define GPIOx_IN_FLTR_EN 0x28
-#define GPIOx_EVNTCNT_EN 0x2C
-#define GPIOx_READ_BACK 0x30
-#define GPIOx_EVNT_EN 0x38
-#define GPIOx_NEGEDGE_EN 0x44
-#define GPIOx_NEGEDGE_STS 0x4C
-#define GPIO_FLT7_AMNT 0xD8
-#define GPIO_MAP_X 0xE0
-#define GPIO_MAP_Y 0xE4
-#define GPIO_FE7_SEL 0xF7
-
-
/* Status values */
#define DCONSTAT_SCANINT 0
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index 779fb7d7b30c..043198dc6ff7 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -10,54 +10,70 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
-
+#include <linux/cs5535.h>
+#include <linux/gpio.h>
#include <asm/olpc.h>
#include "olpc_dcon.h"
-/* Base address of the GPIO registers */
-static unsigned long gpio_base;
-
-/*
- * List of GPIOs that we care about:
- * (in) GPIO12 -- DCONBLANK
- * (in) GPIO[56] -- DCONSTAT[01]
- * (out) GPIO11 -- DCONLOAD
- */
-
-#define IN_GPIOS ((1<<5) | (1<<6) | (1<<7) | (1<<12))
-#define OUT_GPIOS (1<<11)
-
static int dcon_init_xo_1(void)
{
- unsigned long lo, hi;
unsigned char lob;
- rdmsr(MSR_LBAR_GPIO, lo, hi);
-
- /* Check the mask and whether GPIO is enabled (sanity check) */
- if (hi != 0x0000f001) {
- printk(KERN_ERR "GPIO not enabled -- cannot use DCON\n");
- return -ENODEV;
+ if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) {
+ printk(KERN_ERR "olpc-dcon: failed to request STAT0 GPIO\n");
+ return -EIO;
+ }
+ if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) {
+ printk(KERN_ERR "olpc-dcon: failed to request STAT1 GPIO\n");
+ goto err_gp_stat1;
+ }
+ if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) {
+ printk(KERN_ERR "olpc-dcon: failed to request IRQ GPIO\n");
+ goto err_gp_irq;
+ }
+ if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) {
+ printk(KERN_ERR "olpc-dcon: failed to request LOAD GPIO\n");
+ goto err_gp_load;
+ }
+ if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) {
+ printk(KERN_ERR "olpc-dcon: failed to request BLANK GPIO\n");
+ goto err_gp_blank;
}
-
- /* Mask off the IO base address */
- gpio_base = lo & 0x0000ff00;
/* Turn off the event enable for GPIO7 just to be safe */
- outl(1 << (16+7), gpio_base + GPIOx_EVNT_EN);
+ cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
+
+ /*
+ * Determine the current state by reading the GPIO bit; earlier
+ * stages of the boot process have established the state.
+ *
+ * Note that we read GPIO_OUPUT_VAL rather than GPIO_READ_BACK here;
+ * this is because OFW will disable input for the pin and set a value..
+ * READ_BACK will only contain a valid value if input is enabled and
+ * then a value is set. So, future readings of the pin can use
+ * READ_BACK, but the first one cannot. Awesome, huh?
+ */
+ dcon_source = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL)
+ ? DCON_SOURCE_CPU
+ : DCON_SOURCE_DCON;
+ dcon_pending = dcon_source;
/* Set the directions for the GPIO pins */
- outl(OUT_GPIOS | (IN_GPIOS << 16), gpio_base + GPIOx_OUT_EN);
- outl(IN_GPIOS | (OUT_GPIOS << 16), gpio_base + GPIOx_IN_EN);
+ gpio_direction_input(OLPC_GPIO_DCON_STAT0);
+ gpio_direction_input(OLPC_GPIO_DCON_STAT1);
+ gpio_direction_input(OLPC_GPIO_DCON_IRQ);
+ gpio_direction_input(OLPC_GPIO_DCON_BLANK);
+ gpio_direction_output(OLPC_GPIO_DCON_LOAD,
+ dcon_source == DCON_SOURCE_CPU);
/* Set up the interrupt mappings */
/* Set the IRQ to pair 2 */
- geode_gpio_event_irq(OLPC_GPIO_DCON_IRQ, 2);
+ cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0);
/* Enable group 2 to trigger the DCON interrupt */
- geode_gpio_set_irq(2, DCON_IRQ);
+ cs5535_gpio_set_irq(2, DCON_IRQ);
/* Select edge level for interrupt (in PIC) */
lob = inb(0x4d0);
@@ -65,52 +81,61 @@ static int dcon_init_xo_1(void)
outb(lob, 0x4d0);
/* Register the interupt handler */
- if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", &dcon_driver))
- return -EIO;
+ if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", &dcon_driver)) {
+ printk(KERN_ERR "olpc-dcon: failed to request DCON's irq\n");
+ goto err_req_irq;
+ }
/* Clear INV_EN for GPIO7 (DCONIRQ) */
- outl((1<<(16+7)), gpio_base + GPIOx_INV_EN);
+ cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT);
/* Enable filter for GPIO12 (DCONBLANK) */
- outl(1<<(12), gpio_base + GPIOx_IN_FLTR_EN);
+ cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER);
/* Disable filter for GPIO7 */
- outl(1<<(16+7), gpio_base + GPIOx_IN_FLTR_EN);
+ cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER);
/* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
-
- outl(1<<(16+7), gpio_base + GPIOx_EVNTCNT_EN);
- outl(1<<(16+12), gpio_base + GPIOx_EVNTCNT_EN);
+ cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT);
+ cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT);
/* Add GPIO12 to the Filter Event Pair #7 */
- outb(12, gpio_base + GPIO_FE7_SEL);
+ cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL);
/* Turn off negative Edge Enable for GPIO12 */
- outl(1<<(16+12), gpio_base + GPIOx_NEGEDGE_EN);
+ cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN);
/* Enable negative Edge Enable for GPIO7 */
- outl(1<<7, gpio_base + GPIOx_NEGEDGE_EN);
+ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN);
/* Zero the filter amount for Filter Event Pair #7 */
- outw(0, gpio_base + GPIO_FLT7_AMNT);
+ cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT);
/* Clear the negative edge status for GPIO7 and GPIO12 */
- outl((1<<7) | (1<<12), gpio_base+0x4c);
+ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
+ cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS);
/* FIXME: Clear the posiitive status as well, just to be sure */
- outl((1<<7) | (1<<12), gpio_base+0x48);
+ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS);
+ cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS);
/* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
- outl((1<<(7))|(1<<12), gpio_base + GPIOx_EVNT_EN);
-
- /* Determine the current state by reading the GPIO bit */
- /* Earlier stages of the boot process have established the state */
- dcon_source = inl(gpio_base + GPIOx_OUT_VAL) & (1<<11)
- ? DCON_SOURCE_CPU
- : DCON_SOURCE_DCON;
- dcon_pending = dcon_source;
+ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
+ cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE);
return 0;
+
+err_req_irq:
+ gpio_free(OLPC_GPIO_DCON_BLANK);
+err_gp_blank:
+ gpio_free(OLPC_GPIO_DCON_LOAD);
+err_gp_load:
+ gpio_free(OLPC_GPIO_DCON_IRQ);
+err_gp_irq:
+ gpio_free(OLPC_GPIO_DCON_STAT1);
+err_gp_stat1:
+ gpio_free(OLPC_GPIO_DCON_STAT0);
+ return -EIO;
}
static void dcon_wiggle_xo_1(void)
@@ -128,37 +153,44 @@ static void dcon_wiggle_xo_1(void)
* simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and
* GPIO15.
*/
- geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL);
- geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE);
- geode_gpio_clear(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
- geode_gpio_clear(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2);
- geode_gpio_clear(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
+ cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+ cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL);
+ cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE);
+ cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE);
+ cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
+ cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
+ cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2);
+ cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2);
+ cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
+ cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
for (x = 0; x < 16; x++) {
udelay(5);
- geode_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+ cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
udelay(5);
- geode_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+ cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
}
udelay(5);
- geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
- geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
+ cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
+ cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
+ cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
+ cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
}
static void dcon_set_dconload_1(int val)
{
- if (val)
- outl(1<<11, gpio_base + GPIOx_OUT_VAL);
- else
- outl(1<<(11 + 16), gpio_base + GPIOx_OUT_VAL);
+ gpio_set_value(OLPC_GPIO_DCON_LOAD, val);
}
-static int dcon_read_status_xo_1(void)
+static u8 dcon_read_status_xo_1(void)
{
- int status = inl(gpio_base + GPIOx_READ_BACK) >> 5;
-
+ u8 status;
+
+ status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
+ status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
+
/* Clear the negative edge status for GPIO7 */
- outl(1 << 7, gpio_base + GPIOx_NEGEDGE_STS);
+ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
return status;
}
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
index cca6a235ef96..4f56098bb366 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
@@ -195,9 +195,9 @@ static void dcon_set_dconload_xo_1_5(int val)
}
}
-static int dcon_read_status_xo_1_5(void)
+static u8 dcon_read_status_xo_1_5(void)
{
- int status;
+ u8 status;
if (!dcon_was_irq())
return -1;
diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
index 677152044f45..683657cb21f5 100644
--- a/drivers/staging/phison/phison.c
+++ b/drivers/staging/phison/phison.c
@@ -69,7 +69,7 @@ static int phison_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
-static const struct pci_device_id phison_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(phison_pci_tbl) = {
{ PCI_VENDOR_ID_PHISON, PCI_DEVICE_ID_PS5000, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
{ 0, },
diff --git a/drivers/staging/pohmelfs/crypto.c b/drivers/staging/pohmelfs/crypto.c
index 2fdb3e01460d..6540864216c8 100644
--- a/drivers/staging/pohmelfs/crypto.c
+++ b/drivers/staging/pohmelfs/crypto.c
@@ -130,10 +130,8 @@ err_out_exit:
void pohmelfs_crypto_engine_exit(struct pohmelfs_crypto_engine *e)
{
- if (e->hash)
- crypto_free_hash(e->hash);
- if (e->cipher)
- crypto_free_ablkcipher(e->cipher);
+ crypto_free_hash(e->hash);
+ crypto_free_ablkcipher(e->cipher);
kfree(e->data);
}
diff --git a/drivers/staging/pohmelfs/net.c b/drivers/staging/pohmelfs/net.c
index 9279897ff161..b2e918622088 100644
--- a/drivers/staging/pohmelfs/net.c
+++ b/drivers/staging/pohmelfs/net.c
@@ -413,7 +413,7 @@ static int pohmelfs_readdir_response(struct netfs_state *st)
if (dentry) {
alias = d_materialise_unique(dentry, &npi->vfs_inode);
if (alias)
- dput(dentry);
+ dput(alias);
}
dput(dentry);
diff --git a/drivers/staging/rt2860/chip/mac_pci.h b/drivers/staging/rt2860/chip/mac_pci.h
index 9f25ef047f59..b8868a5b9e04 100644
--- a/drivers/staging/rt2860/chip/mac_pci.h
+++ b/drivers/staging/rt2860/chip/mac_pci.h
@@ -30,7 +30,8 @@
Abstract:
Revision History:
- Who When What
+ Who When What
+ Justin P. Mattock 11/07/2010 Fix some typos
--------- ---------- ----------------------------------------------
*/
@@ -45,7 +46,7 @@
/* */
/* Device ID & Vendor ID related definitions, */
-/* NOTE: you should not add the new VendorID/DeviceID here unless you not sure it belongs to what chip. */
+/* NOTE: you should not add the new VendorID/DeviceID here unless you know for sure what chip it belongs too. */
/* */
#define NIC_PCI_VENDOR_ID 0x1814
#define PCIBUS_INTEL_VENDOR 0x8086
@@ -83,7 +84,7 @@ struct PACKED rt_txd {
u32 SDPtr1;
/*Word3 */
u32 rsv2:24;
- u32 WIV:1; /* Wireless Info Valid. 1 if Driver already fill WI, o if DMA needs to copy WI to correctposition */
+ u32 WIV:1; /* Wireless Info Valid. 1 if Driver already fill WI, o if DMA needs to copy WI to correct position */
u32 QSEL:2; /* select on-chip FIFO ID for 2nd-stage output scheduler.0:MGMT, 1:HCCA 2:EDCA */
u32 rsv:2;
u32 TCO:1; /* */
diff --git a/drivers/staging/rt2860/chip/mac_usb.h b/drivers/staging/rt2860/chip/mac_usb.h
index ed0c0b43b05e..e8158fb58648 100644
--- a/drivers/staging/rt2860/chip/mac_usb.h
+++ b/drivers/staging/rt2860/chip/mac_usb.h
@@ -30,7 +30,8 @@
Abstract:
Revision History:
- Who When What
+ Who When What
+ Justin P. Mattock 11/07/2010 Fix a typo
--------- ---------- ----------------------------------------------
*/
@@ -93,7 +94,7 @@ struct rt_txinfo {
/* Word 0 */
u32 USBDMATxPktLen:16; /*used ONLY in USB bulk Aggregation, Total byte counts of all sub-frame. */
u32 rsv:8;
- u32 WIV:1; /* Wireless Info Valid. 1 if Driver already fill WI, o if DMA needs to copy WI to correctposition */
+ u32 WIV:1; /* Wireless Info Valid. 1 if Driver already fill WI, o if DMA needs to copy WI to correct position */
u32 QSEL:2; /* select on-chip FIFO ID for 2nd-stage output scheduler.0:MGMT, 1:HCCA 2:EDCA */
u32 SwUseLastRound:1; /* Software use. */
u32 rsv2:2; /* Software use. */
diff --git a/drivers/staging/rt2860/chip/rtmp_mac.h b/drivers/staging/rt2860/chip/rtmp_mac.h
index e8f7172ce42a..3d1e4915b956 100644
--- a/drivers/staging/rt2860/chip/rtmp_mac.h
+++ b/drivers/staging/rt2860/chip/rtmp_mac.h
@@ -32,6 +32,7 @@
Revision History:
Who When What
+ Justin P. Mattock 11/07/2010 Fix a comments, and typos
-------- ---------- ----------------------------------------------
*/
@@ -43,7 +44,7 @@
/* ================================================================================= */
/* the first 24-byte in TXD is called TXINFO and will be DMAed to MAC block through TXFIFO. */
-/* MAC block use this TXINFO to control the transmission behavior of this frame. */
+/* MAC block uses this TXINFO to control the transmission behavior of this frame. */
#define FIFO_MGMT 0
#define FIFO_HCCA 1
#define FIFO_EDCA 2
@@ -458,8 +459,8 @@ typedef union _BBP_CSR_CFG_STRUC {
/* */
typedef union _RF_CSR_CFG0_STRUC {
struct {
- u32 RegIdAndContent:24; /* Register value to program into BBP */
- u32 bitwidth:5; /* Selected BBP register */
+ u32 RegIdAndContent:24; /* Register value to program into BBP */
+ u32 bitwidth:5; /* Selected BBP register */
u32 StandbyMode:1; /* 0: high when stand by 1: low when standby */
u32 Sel:1; /* 0:RF_LE0 activate 1:RF_LE1 activate */
u32 Busy:1; /* 0: idle 1: 8busy */
@@ -469,7 +470,7 @@ typedef union _RF_CSR_CFG0_STRUC {
#define RF_CSR_CFG1 0x1024
typedef union _RF_CSR_CFG1_STRUC {
struct {
- u32 RegIdAndContent:24; /* Register value to program into BBP */
+ u32 RegIdAndContent:24; /* Register value to program into BBP */
u32 RFGap:5; /* Gap between BB_CONTROL_RF and RF_LE. 0: 3 system clock cycle (37.5usec) 1: 5 system clock cycle (62.5usec) */
u32 rsv:7; /* 0: idle 1: 8busy */
} field;
@@ -478,7 +479,7 @@ typedef union _RF_CSR_CFG1_STRUC {
#define RF_CSR_CFG2 0x1028 /* */
typedef union _RF_CSR_CFG2_STRUC {
struct {
- u32 RegIdAndContent:24; /* Register value to program into BBP */
+ u32 RegIdAndContent:24; /* Register value to program into BBP */
u32 rsv:8; /* 0: idle 1: 8busy */
} field;
u32 word;
@@ -490,7 +491,7 @@ typedef union _LED_CFG_STRUC {
u32 OffPeriod:8; /* blinking off period unit 1ms */
u32 SlowBlinkPeriod:6; /* slow blinking period. unit:1ms */
u32 rsv:2;
- u32 RLedMode:2; /* red Led Mode 0: off1: blinking upon TX2: periodic slow blinking3: always on */
+ u32 RLedMode:2; /* red Led Mode 0: off1: blinking upon TX2: periodic slow blinking3: always on */
u32 GLedMode:2; /* green Led Mode */
u32 YLedMode:2; /* yellow Led Mode */
u32 LedPolar:1; /* Led Polarity. 0: active low1: active high */
@@ -621,9 +622,9 @@ typedef union _TX_TIMEOUT_CFG_STRUC {
#define TX_RTY_CFG 0x134c
typedef union PACKED _TX_RTY_CFG_STRUC {
struct {
- u32 ShortRtyLimit:8; /* short retry limit */
- u32 LongRtyLimit:8; /*long retry limit */
- u32 LongRtyThre:12; /* Long retry threshoold */
+ u32 ShortRtyLimit:8; /* short retry limit */
+ u32 LongRtyLimit:8; /* long retry limit */
+ u32 LongRtyThre:12; /* Long retry threshold */
u32 NonAggRtyMode:1; /* Non-Aggregate MPDU retry mode. 0:expired by retry limit, 1: expired by mpdu life timer */
u32 AggRtyMode:1; /* Aggregate MPDU retry mode. 0:expired by retry limit, 1: expired by mpdu life timer */
u32 TxautoFBEnable:1; /* Tx retry PHY rate auto fallback enable */
diff --git a/drivers/staging/rt2860/chip/rtmp_phy.h b/drivers/staging/rt2860/chip/rtmp_phy.h
index 9f924ea6ca35..98454df30a22 100644
--- a/drivers/staging/rt2860/chip/rtmp_phy.h
+++ b/drivers/staging/rt2860/chip/rtmp_phy.h
@@ -247,7 +247,7 @@
} \
} \
if (BbpCsr.field.Busy == BUSY) { \
- DBGPRINT_ERR(("BBP(viaMCU=%d) read R%d fail\n", (_bViaMCU), _bbpID)); \
+ DBGPRINT_ERR("BBP(viaMCU=%d) read R%d fail\n", (_bViaMCU), _bbpID); \
*(_pV) = (_pAd)->BbpWriteLatch[_bbpID]; \
if ((_bViaMCU) == TRUE) { \
RTMP_IO_READ32(_pAd, _regID, &BbpCsr.word); \
@@ -336,11 +336,11 @@
} \
} \
} else { \
- DBGPRINT_ERR((" , brt30xxBanMcuCmd = %d, Read BBP %d \n", (_A)->brt30xxBanMcuCmd, (_I))); \
+ DBGPRINT_ERR(" , brt30xxBanMcuCmd = %d, Read BBP %d \n", (_A)->brt30xxBanMcuCmd, (_I)); \
*(_pV) = (_A)->BbpWriteLatch[_I]; \
} \
if ((BbpCsr.field.Busy == BUSY) || ((_A)->bPCIclkOff == TRUE)) { \
- DBGPRINT_ERR(("BBP read R%d=0x%x fail\n", _I, BbpCsr.word)); \
+ DBGPRINT_ERR("BBP read R%d=0x%x fail\n", _I, BbpCsr.word); \
*(_pV) = (_A)->BbpWriteLatch[_I]; \
} \
}
@@ -378,7 +378,7 @@
break; \
} \
if (_busyCnt == MAX_BUSY_COUNT) { \
- DBGPRINT_ERR(("BBP write R%d fail\n", _bbpID)); \
+ DBGPRINT_ERR("BBP write R%d fail\n", _bbpID); \
if ((_bViaMCU) == TRUE) { \
RTMP_IO_READ32(_pAd, H2M_BBP_AGENT, &BbpCsr.word); \
BbpCsr.field.Busy = 0; \
@@ -459,15 +459,15 @@
break; \
} \
} else { \
- DBGPRINT_ERR((" brt30xxBanMcuCmd = %d. Write BBP %d \n", (_A)->brt30xxBanMcuCmd, (_I))); \
+ DBGPRINT_ERR(" brt30xxBanMcuCmd = %d. Write BBP %d \n", (_A)->brt30xxBanMcuCmd, (_I)); \
} \
if ((BusyCnt == MAX_BUSY_COUNT) || ((_A)->bPCIclkOff == TRUE)) { \
if (BusyCnt == MAX_BUSY_COUNT) \
(_A)->AccessBBPFailCount++; \
- DBGPRINT_ERR(("BBP write R%d=0x%x fail. BusyCnt= %d.bPCIclkOff = %d. \n", _I, BbpCsr.word, BusyCnt, (_A)->bPCIclkOff)); \
+ DBGPRINT_ERR("BBP write R%d=0x%x fail. BusyCnt= %d.bPCIclkOff = %d. \n", _I, BbpCsr.word, BusyCnt, (_A)->bPCIclkOff); \
} \
} else { \
- DBGPRINT_ERR(("****** BBP_Write_Latch Buffer exceeds max boundry ****** \n")); \
+ DBGPRINT_ERR("****** BBP_Write_Latch Buffer exceeds max boundry ****** \n"); \
} \
}
#endif /* RTMP_MAC_PCI // */
diff --git a/drivers/staging/rt2860/chips/rt3090.c b/drivers/staging/rt2860/chips/rt3090.c
index c2933c69bc04..334720ee1345 100644
--- a/drivers/staging/rt2860/chips/rt3090.c
+++ b/drivers/staging/rt2860/chips/rt3090.c
@@ -28,10 +28,11 @@
rt3090.c
Abstract:
- Specific funcitons and variables for RT3070
+ Specific functions and variables for RT3070
Revision History:
- Who When What
+ Who When What
+ Justin P. Mattock 11/07/2010 Fix a typo
-------- ---------- ----------------------------------------------
*/
@@ -51,7 +52,8 @@ void NICInitRT3090RFRegisters(struct rt_rtmp_adapter *pAd)
if (IS_RT3090(pAd)) {
/* Init RF calibration */
/* Driver should toggle RF R30 bit7 before init RF registers */
- u32 RfReg = 0, data;
+ u8 RfReg;
+ u32 data;
RT30xxReadRFRegister(pAd, RF_R30, (u8 *)&RfReg);
RfReg |= 0x80;
diff --git a/drivers/staging/rt2860/chips/rt30xx.c b/drivers/staging/rt2860/chips/rt30xx.c
index 4367a196aeff..354debfe1477 100644
--- a/drivers/staging/rt2860/chips/rt30xx.c
+++ b/drivers/staging/rt2860/chips/rt30xx.c
@@ -28,10 +28,11 @@
rt30xx.c
Abstract:
- Specific funcitons and variables for RT30xx.
+ Specific functions and variables for RT30xx.
Revision History:
- Who When What
+ Who When What
+ Justin P. Mattock 11/07/2010 Fix some typos
-------- ---------- ----------------------------------------------
*/
@@ -53,7 +54,7 @@ struct rt_reg_pair RT30xx_RFRegTable[] = {
,
{RF_R06, 0x02}
,
- {RF_R07, 0x70}
+ {RF_R07, 0x60}
,
{RF_R09, 0x0F}
,
@@ -89,7 +90,7 @@ struct rt_reg_pair RT30xx_RFRegTable[] = {
u8 NUM_RF_REG_PARMS = (sizeof(RT30xx_RFRegTable) / sizeof(struct rt_reg_pair));
-/* Antenna divesity use GPIO3 and EESK pin for control */
+/* Antenna diversity use GPIO3 and EESK pin for control */
/* Antenna and EEPROM access are both using EESK pin, */
/* Therefor we should avoid accessing EESK at the same time */
/* Then restore antenna after EEPROM access */
@@ -243,7 +244,7 @@ void RTMPFilterCalibration(struct rt_rtmp_adapter *pAd)
break;
}
- /* prevent infinite loop cause driver hang. */
+ /* prevent infinite loop; causes driver hang. */
if (loopcnt++ > 100) {
DBGPRINT(RT_DEBUG_ERROR,
("RTMPFilterCalibration - can't find a valid value, loopcnt=%d stop calibrating",
@@ -441,7 +442,7 @@ void RT30xxReverseRFSleepModeSetup(struct rt_rtmp_adapter *pAd)
/* VCO_IC, RF R7 register Bit 4 & Bit 5 to 1 */
RT30xxReadRFRegister(pAd, RF_R07, &RFValue);
- RFValue |= 0x30;
+ RFValue |= 0x20;
RT30xxWriteRFRegister(pAd, RF_R07, RFValue);
/* Idoh, RF R9 register Bit 1, Bit 2 & Bit 3 to 1 */
diff --git a/drivers/staging/rt2860/common/ba_action.c b/drivers/staging/rt2860/common/ba_action.c
index 8eef82d92621..b046c2b814c5 100644
--- a/drivers/staging/rt2860/common/ba_action.c
+++ b/drivers/staging/rt2860/common/ba_action.c
@@ -799,8 +799,8 @@ void BAOriSessionTearDown(struct rt_rtmp_adapter *pAd,
/* force send specified TID DelBA */
struct rt_mlme_delba_req DelbaReq;
struct rt_mlme_queue_elem *Elem =
- (struct rt_mlme_queue_elem *)kmalloc(sizeof(struct rt_mlme_queue_elem),
- MEM_ALLOC_FLAG);
+ kmalloc(sizeof(struct rt_mlme_queue_elem),
+ MEM_ALLOC_FLAG);
if (Elem != NULL) {
NdisZeroMemory(&DelbaReq, sizeof(DelbaReq));
NdisZeroMemory(Elem, sizeof(struct rt_mlme_queue_elem));
@@ -839,8 +839,8 @@ void BAOriSessionTearDown(struct rt_rtmp_adapter *pAd,
&& (pBAEntry->ORI_BA_Status == Originator_Done)) {
struct rt_mlme_delba_req DelbaReq;
struct rt_mlme_queue_elem *Elem =
- (struct rt_mlme_queue_elem *)kmalloc(sizeof(struct rt_mlme_queue_elem),
- MEM_ALLOC_FLAG);
+ kmalloc(sizeof(struct rt_mlme_queue_elem),
+ MEM_ALLOC_FLAG);
if (Elem != NULL) {
NdisZeroMemory(&DelbaReq, sizeof(DelbaReq));
NdisZeroMemory(Elem, sizeof(struct rt_mlme_queue_elem));
@@ -908,8 +908,8 @@ void BARecSessionTearDown(struct rt_rtmp_adapter *pAd,
/* */
if (bPassive == FALSE) {
struct rt_mlme_queue_elem *Elem =
- (struct rt_mlme_queue_elem *)kmalloc(sizeof(struct rt_mlme_queue_elem),
- MEM_ALLOC_FLAG);
+ kmalloc(sizeof(struct rt_mlme_queue_elem),
+ MEM_ALLOC_FLAG);
if (Elem != NULL) {
NdisZeroMemory(&DelbaReq, sizeof(DelbaReq));
NdisZeroMemory(Elem, sizeof(struct rt_mlme_queue_elem));
@@ -1270,13 +1270,13 @@ BOOLEAN CntlEnqueueForRecv(struct rt_rtmp_adapter *pAd,
/* First check the size, it MUST not exceed the mlme queue size */
if (MsgLen > MGMT_DMA_BUFFER_SIZE) {
- DBGPRINT_ERR(("CntlEnqueueForRecv: frame too large, size = %ld \n", MsgLen));
+ DBGPRINT_ERR("CntlEnqueueForRecv: frame too large, size = %ld \n", MsgLen);
return FALSE;
} else if (MsgLen != sizeof(struct rt_frame_ba_req)) {
- DBGPRINT_ERR(("CntlEnqueueForRecv: BlockAck Request frame length size = %ld incorrect\n", MsgLen));
+ DBGPRINT_ERR("CntlEnqueueForRecv: BlockAck Request frame length size = %ld incorrect\n", MsgLen);
return FALSE;
} else if (MsgLen != sizeof(struct rt_frame_ba_req)) {
- DBGPRINT_ERR(("CntlEnqueueForRecv: BlockAck Request frame length size = %ld incorrect\n", MsgLen));
+ DBGPRINT_ERR("CntlEnqueueForRecv: BlockAck Request frame length size = %ld incorrect\n", MsgLen);
return FALSE;
}
diff --git a/drivers/staging/rt2860/common/cmm_data.c b/drivers/staging/rt2860/common/cmm_data.c
index 93a53479d766..2204c2bda386 100644
--- a/drivers/staging/rt2860/common/cmm_data.c
+++ b/drivers/staging/rt2860/common/cmm_data.c
@@ -1366,7 +1366,7 @@ void RTMPResumeMsduTransmission(struct rt_rtmp_adapter *pAd)
/* R66 should not be 0 */
if (pAd->BbpTuning.R66CurrentValue == 0) {
pAd->BbpTuning.R66CurrentValue = 0x38;
- DBGPRINT_ERR(("RTMPResumeMsduTransmission, R66CurrentValue=0...\n"));
+ DBGPRINT_ERR("RTMPResumeMsduTransmission, R66CurrentValue=0...\n");
}
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R66,
diff --git a/drivers/staging/rt2860/common/cmm_data_pci.c b/drivers/staging/rt2860/common/cmm_data_pci.c
index 43d73a05c8eb..7af59ff9e220 100644
--- a/drivers/staging/rt2860/common/cmm_data_pci.c
+++ b/drivers/staging/rt2860/common/cmm_data_pci.c
@@ -137,7 +137,7 @@ u16 RtmpPCI_WriteSingleTxResource(struct rt_rtmp_adapter *pAd,
pTxD->SDPtr0 = BufBasePaLow;
pTxD->SDLen0 = TXINFO_SIZE + TXWI_SIZE + hwHeaderLen; /* include padding */
- pTxD->SDPtr1 = PCI_MAP_SINGLE(pAd, pTxBlk, 0, 1, PCI_DMA_TODEVICE);;
+ pTxD->SDPtr1 = PCI_MAP_SINGLE(pAd, pTxBlk, 0, 1, PCI_DMA_TODEVICE);
pTxD->SDLen1 = pTxBlk->SrcBufLen;
pTxD->LastSec0 = 0;
pTxD->LastSec1 = (bIsLast) ? 1 : 0;
@@ -215,7 +215,7 @@ u16 RtmpPCI_WriteMultiTxResource(struct rt_rtmp_adapter *pAd,
pTxD->SDPtr0 = BufBasePaLow;
pTxD->SDLen0 = firstDMALen; /* include padding */
- pTxD->SDPtr1 = PCI_MAP_SINGLE(pAd, pTxBlk, 0, 1, PCI_DMA_TODEVICE);;
+ pTxD->SDPtr1 = PCI_MAP_SINGLE(pAd, pTxBlk, 0, 1, PCI_DMA_TODEVICE);
pTxD->SDLen1 = pTxBlk->SrcBufLen;
pTxD->LastSec0 = 0;
pTxD->LastSec1 = (bIsLast) ? 1 : 0;
diff --git a/drivers/staging/rt2860/common/cmm_mac_pci.c b/drivers/staging/rt2860/common/cmm_mac_pci.c
index e26ba4942877..850f0fbc6d90 100644
--- a/drivers/staging/rt2860/common/cmm_mac_pci.c
+++ b/drivers/staging/rt2860/common/cmm_mac_pci.c
@@ -89,7 +89,7 @@ int RTMPAllocTxRxRingMemory(struct rt_rtmp_adapter *pAd)
if (pAd->TxDescRing[num].AllocVa == NULL) {
ErrorValue = ERRLOG_OUT_OF_SHARED_MEMORY;
- DBGPRINT_ERR(("Failed to allocate a big buffer\n"));
+ DBGPRINT_ERR("Failed to allocate a big buffer\n");
Status = NDIS_STATUS_RESOURCES;
break;
}
@@ -121,7 +121,7 @@ int RTMPAllocTxRxRingMemory(struct rt_rtmp_adapter *pAd)
if (pAd->TxBufSpace[num].AllocVa == NULL) {
ErrorValue = ERRLOG_OUT_OF_SHARED_MEMORY;
- DBGPRINT_ERR(("Failed to allocate a big buffer\n"));
+ DBGPRINT_ERR("Failed to allocate a big buffer\n");
Status = NDIS_STATUS_RESOURCES;
break;
}
@@ -197,7 +197,7 @@ int RTMPAllocTxRxRingMemory(struct rt_rtmp_adapter *pAd)
if (pAd->MgmtDescRing.AllocVa == NULL) {
ErrorValue = ERRLOG_OUT_OF_SHARED_MEMORY;
- DBGPRINT_ERR(("Failed to allocate a big buffer\n"));
+ DBGPRINT_ERR("Failed to allocate a big buffer\n");
Status = NDIS_STATUS_RESOURCES;
break;
}
@@ -251,7 +251,7 @@ int RTMPAllocTxRxRingMemory(struct rt_rtmp_adapter *pAd)
if (pAd->RxDescRing.AllocVa == NULL) {
ErrorValue = ERRLOG_OUT_OF_SHARED_MEMORY;
- DBGPRINT_ERR(("Failed to allocate a big buffer\n"));
+ DBGPRINT_ERR("Failed to allocate a big buffer\n");
Status = NDIS_STATUS_RESOURCES;
break;
}
@@ -304,7 +304,7 @@ int RTMPAllocTxRxRingMemory(struct rt_rtmp_adapter *pAd)
/* Error handling */
if (pDmaBuf->AllocVa == NULL) {
ErrorValue = ERRLOG_OUT_OF_SHARED_MEMORY;
- DBGPRINT_ERR(("Failed to allocate RxRing's 1st buffer\n"));
+ DBGPRINT_ERR("Failed to allocate RxRing's 1st buffer\n");
Status = NDIS_STATUS_RESOURCES;
break;
}
diff --git a/drivers/staging/rt2860/common/cmm_mac_usb.c b/drivers/staging/rt2860/common/cmm_mac_usb.c
index 72731cbb81df..64a65a460c29 100644
--- a/drivers/staging/rt2860/common/cmm_mac_usb.c
+++ b/drivers/staging/rt2860/common/cmm_mac_usb.c
@@ -236,7 +236,7 @@ int NICInitTransmit(struct rt_rtmp_adapter *pAd)
os_alloc_mem(pAd, (u8 **) (&pAd->MgmtDescRing.AllocVa),
pAd->MgmtDescRing.AllocSize);
if (pAd->MgmtDescRing.AllocVa == NULL) {
- DBGPRINT_ERR(("Failed to allocate a big buffer for MgmtDescRing!\n"));
+ DBGPRINT_ERR("Failed to allocate a big buffer for MgmtDescRing!\n");
Status = NDIS_STATUS_RESOURCES;
goto out1;
}
diff --git a/drivers/staging/rt2860/common/cmm_wpa.c b/drivers/staging/rt2860/common/cmm_wpa.c
index e37b64b6a608..0040f45b629b 100644
--- a/drivers/staging/rt2860/common/cmm_wpa.c
+++ b/drivers/staging/rt2860/common/cmm_wpa.c
@@ -2794,7 +2794,7 @@ u8 *GetSuiteFromRSNIE(u8 *rsnie,
/* Check length */
if ((len <= 0) || (pEid->Len != len)) {
- DBGPRINT_ERR(("%s : The length is invalid\n", __func__));
+ DBGPRINT_ERR("%s : The length is invalid\n", __func__);
return NULL;
}
/* Check WPA or WPA2 */
@@ -2803,14 +2803,13 @@ u8 *GetSuiteFromRSNIE(u8 *rsnie,
u16 ucount;
if (len < sizeof(struct rt_rsnie)) {
- DBGPRINT_ERR(("%s : The length is too short for WPA\n",
- __func__));
+ DBGPRINT_ERR("%s : The length is too short for WPA\n", __func__);
return NULL;
}
/* Get the count of pairwise cipher */
ucount = cpu2le16(pRsnie->ucount);
if (ucount > 2) {
- DBGPRINT_ERR(("%s : The count(%d) of pairwise cipher is invlaid\n", __func__, ucount));
+ DBGPRINT_ERR("%s : The count(%d) of pairwise cipher is invlaid\n", __func__, ucount);
return NULL;
}
/* Get the group cipher */
@@ -2836,14 +2835,13 @@ u8 *GetSuiteFromRSNIE(u8 *rsnie,
isWPA2 = TRUE;
if (len < sizeof(struct rt_rsnie2)) {
- DBGPRINT_ERR(("%s : The length is too short for WPA2\n",
- __func__));
+ DBGPRINT_ERR("%s : The length is too short for WPA2\n", __func__);
return NULL;
}
/* Get the count of pairwise cipher */
ucount = cpu2le16(pRsnie->ucount);
if (ucount > 2) {
- DBGPRINT_ERR(("%s : The count(%d) of pairwise cipher is invlaid\n", __func__, ucount));
+ DBGPRINT_ERR("%s : The count(%d) of pairwise cipher is invlaid\n", __func__, ucount);
return NULL;
}
/* Get the group cipher */
@@ -2863,7 +2861,7 @@ u8 *GetSuiteFromRSNIE(u8 *rsnie,
offset = sizeof(struct rt_rsnie2) + (4 * (ucount - 1));
} else {
- DBGPRINT_ERR(("%s : Unknown IE (%d)\n", __func__, pEid->Eid));
+ DBGPRINT_ERR("%s : Unknown IE (%d)\n", __func__, pEid->Eid);
return NULL;
}
@@ -2872,8 +2870,7 @@ u8 *GetSuiteFromRSNIE(u8 *rsnie,
len -= offset;
if (len < sizeof(struct rt_rsnie_auth)) {
- DBGPRINT_ERR(("%s : The length of RSNIE is too short\n",
- __func__));
+ DBGPRINT_ERR("%s : The length of RSNIE is too short\n", __func__);
return NULL;
}
/* pointer to AKM count */
@@ -2882,8 +2879,7 @@ u8 *GetSuiteFromRSNIE(u8 *rsnie,
/* Get the count of pairwise cipher */
acount = cpu2le16(pAkm->acount);
if (acount > 2) {
- DBGPRINT_ERR(("%s : The count(%d) of AKM is invlaid\n",
- __func__, acount));
+ DBGPRINT_ERR("%s : The count(%d) of AKM is invlaid\n", __func__, acount);
return NULL;
}
/* Get the AKM suite */
@@ -2910,7 +2906,7 @@ u8 *GetSuiteFromRSNIE(u8 *rsnie,
return pBuf;
}
} else {
- DBGPRINT_ERR(("%s : it can't get any more information beyond AKM \n", __func__));
+ DBGPRINT_ERR("%s : it can't get any more information beyond AKM \n", __func__);
return NULL;
}
diff --git a/drivers/staging/rt2860/common/ee_efuse.c b/drivers/staging/rt2860/common/ee_efuse.c
index 03412f5bc990..fed0ba452271 100644
--- a/drivers/staging/rt2860/common/ee_efuse.c
+++ b/drivers/staging/rt2860/common/ee_efuse.c
@@ -264,7 +264,7 @@ int set_eFuseGetFreeBlockCount_Proc(struct rt_rtmp_adapter *pAd, char *arg)
if (i == EFUSE_USAGE_MAP_END)
efusefreenum = 0;
}
- printk("efuseFreeNumber is %d\n", efusefreenum);
+ printk(KERN_DEBUG "efuseFreeNumber is %d\n", efusefreenum);
return TRUE;
}
@@ -274,16 +274,23 @@ int set_eFusedump_Proc(struct rt_rtmp_adapter *pAd, char *arg)
int i = 0;
if (!pAd->bUseEfuse)
return FALSE;
+
+ printk(KERN_DEBUG "Block 0: ");
+
for (i = 0; i < EFUSE_USAGE_MAP_END / 2; i++) {
InBuf[0] = 2 * i;
InBuf[1] = 2;
InBuf[2] = 0x0;
eFuseReadPhysical(pAd, &InBuf[0], 4, &InBuf[2], 2);
- if (i % 4 == 0)
- printk("\nBlock %x:", i / 8);
- printk("%04x ", InBuf[2]);
+ if (i && i % 4 == 0) {
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG "Block %x:", i / 8);
+ }
+ printk(KERN_CONT "%04x ", InBuf[2]);
}
+ printk(KERN_CONT "\n");
+
return TRUE;
}
diff --git a/drivers/staging/rt2860/common/mlme.c b/drivers/staging/rt2860/common/mlme.c
index 7300c6e9c175..d9c3fd5c2166 100644
--- a/drivers/staging/rt2860/common/mlme.c
+++ b/drivers/staging/rt2860/common/mlme.c
@@ -550,7 +550,7 @@ void MlmeHandler(struct rt_rtmp_adapter *pAd)
Elem->MsgLen = 0;
} else {
- DBGPRINT_ERR(("MlmeHandler: MlmeQueue empty\n"));
+ DBGPRINT_ERR("MlmeHandler: MlmeQueue empty\n");
}
}
@@ -4698,8 +4698,7 @@ BOOLEAN MlmeEnqueue(struct rt_rtmp_adapter *pAd,
/* First check the size, it MUST not exceed the mlme queue size */
if (MsgLen > MGMT_DMA_BUFFER_SIZE) {
- DBGPRINT_ERR(("MlmeEnqueue: msg too large, size = %ld \n",
- MsgLen));
+ DBGPRINT_ERR("MlmeEnqueue: msg too large, size = %ld \n", MsgLen);
return FALSE;
}
@@ -4762,12 +4761,12 @@ BOOLEAN MlmeEnqueueForRecv(struct rt_rtmp_adapter *pAd,
if (RTMP_TEST_FLAG
(pAd,
fRTMP_ADAPTER_HALT_IN_PROGRESS | fRTMP_ADAPTER_NIC_NOT_EXIST)) {
- DBGPRINT_ERR(("MlmeEnqueueForRecv: fRTMP_ADAPTER_HALT_IN_PROGRESS\n"));
+ DBGPRINT_ERR("MlmeEnqueueForRecv: fRTMP_ADAPTER_HALT_IN_PROGRESS\n");
return FALSE;
}
/* First check the size, it MUST not exceed the mlme queue size */
if (MsgLen > MGMT_DMA_BUFFER_SIZE) {
- DBGPRINT_ERR(("MlmeEnqueueForRecv: frame too large, size = %ld \n", MsgLen));
+ DBGPRINT_ERR("MlmeEnqueueForRecv: frame too large, size = %ld \n", MsgLen);
return FALSE;
}
@@ -4777,7 +4776,7 @@ BOOLEAN MlmeEnqueueForRecv(struct rt_rtmp_adapter *pAd,
{
if (!MsgTypeSubst(pAd, pFrame, &Machine, &MsgType)) {
- DBGPRINT_ERR(("MlmeEnqueueForRecv: un-recongnized mgmt->subtype=%d\n", pFrame->Hdr.FC.SubType));
+ DBGPRINT_ERR("MlmeEnqueueForRecv: un-recongnized mgmt->subtype=%d\n", pFrame->Hdr.FC.SubType);
return FALSE;
}
}
@@ -4867,7 +4866,7 @@ void MlmeRestartStateMachine(struct rt_rtmp_adapter *pAd)
Elem->MsgLen = 0;
} else {
- DBGPRINT_ERR(("MlmeRestartStateMachine: MlmeQueue empty\n"));
+ DBGPRINT_ERR("MlmeRestartStateMachine: MlmeQueue empty\n");
}
}
#endif /* RTMP_MAC_PCI // */
diff --git a/drivers/staging/rt2860/common/rt_rf.c b/drivers/staging/rt2860/common/rt_rf.c
index 519121d81040..2895447ffc48 100644
--- a/drivers/staging/rt2860/common/rt_rf.c
+++ b/drivers/staging/rt2860/common/rt_rf.c
@@ -131,8 +131,7 @@ int RT30xxReadRFRegister(struct rt_rtmp_adapter *pAd,
}
}
if (rfcsr.field.RF_CSR_KICK == BUSY) {
- DBGPRINT_ERR(("RF read R%d=0x%x fail, i[%d], k[%d]\n", regID,
- rfcsr.word, i, k));
+ DBGPRINT_ERR("RF read R%d=0x%x fail, i[%d], k[%d]\n", regID, rfcsr.word, i, k);
return STATUS_UNSUCCESSFUL;
}
diff --git a/drivers/staging/rt2860/common/rtmp_init.c b/drivers/staging/rt2860/common/rtmp_init.c
index 3628e85802e3..d359a14f5d4d 100644
--- a/drivers/staging/rt2860/common/rtmp_init.c
+++ b/drivers/staging/rt2860/common/rtmp_init.c
@@ -169,14 +169,14 @@ int RTMPAllocAdapterBlock(void *handle,
pBeaconBuf = kmalloc(MAX_BEACON_SIZE, MEM_ALLOC_FLAG);
if (pBeaconBuf == NULL) {
Status = NDIS_STATUS_FAILURE;
- DBGPRINT_ERR(("Failed to allocate memory - BeaconBuf!\n"));
+ DBGPRINT_ERR("Failed to allocate memory - BeaconBuf!\n");
break;
}
NdisZeroMemory(pBeaconBuf, MAX_BEACON_SIZE);
Status = AdapterBlockAllocateMemory(handle, (void **) & pAd);
if (Status != NDIS_STATUS_SUCCESS) {
- DBGPRINT_ERR(("Failed to allocate memory - ADAPTER\n"));
+ DBGPRINT_ERR("Failed to allocate memory - ADAPTER\n");
break;
}
pAd->BeaconBuf = pBeaconBuf;
@@ -785,8 +785,7 @@ void NICReadEEPROMParameters(struct rt_rtmp_adapter *pAd, u8 *mac_addr)
Version.field.Version, Version.field.FaeReleaseNumber));
if (Version.field.Version > VALID_EEPROM_VERSION) {
- DBGPRINT_ERR(("E2PROM: WRONG VERSION 0x%x, should be %d\n",
- Version.field.Version, VALID_EEPROM_VERSION));
+ DBGPRINT_ERR("E2PROM: WRONG VERSION 0x%x, should be %d\n", Version.field.Version, VALID_EEPROM_VERSION);
/*pAd->SystemErrorBitmap |= 0x00000001;
// hard-code default value when no proper E2PROM installed
@@ -2911,7 +2910,7 @@ void RTMPSetTimer(struct rt_ralink_timer *pTimer, unsigned long Value)
RTMP_OS_Add_Timer(&pTimer->TimerObj, Value);
}
} else {
- DBGPRINT_ERR(("RTMPSetTimer failed, Timer hasn't been initialize!\n"));
+ DBGPRINT_ERR("RTMPSetTimer failed, Timer hasn't been initialize!\n");
}
}
@@ -2947,7 +2946,7 @@ void RTMPModTimer(struct rt_ralink_timer *pTimer, unsigned long Value)
RTMP_OS_Mod_Timer(&pTimer->TimerObj, Value);
}
} else {
- DBGPRINT_ERR(("RTMPModTimer failed, Timer hasn't been initialize!\n"));
+ DBGPRINT_ERR("RTMPModTimer failed, Timer hasn't been initialize!\n");
}
}
@@ -2989,7 +2988,7 @@ void RTMPCancelTimer(struct rt_ralink_timer *pTimer, OUT BOOLEAN * pCancelled)
RtmpTimerQRemove(pTimer->pAd, pTimer);
#endif /* RTMP_TIMER_TASK_SUPPORT // */
} else {
- DBGPRINT_ERR(("RTMPCancelTimer failed, Timer hasn't been initialize!\n"));
+ DBGPRINT_ERR("RTMPCancelTimer failed, Timer hasn't been initialize!\n");
}
}
@@ -3251,8 +3250,7 @@ int rt28xx_init(struct rt_rtmp_adapter *pAd,
/* Load 8051 firmware */
Status = NICLoadFirmware(pAd);
if (Status != NDIS_STATUS_SUCCESS) {
- DBGPRINT_ERR(("NICLoadFirmware failed, Status[=0x%08x]\n",
- Status));
+ DBGPRINT_ERR("NICLoadFirmware failed, Status[=0x%08x]\n", Status);
goto err1;
}
@@ -3268,8 +3266,7 @@ int rt28xx_init(struct rt_rtmp_adapter *pAd,
Status = RTMPAllocTxRxRingMemory(pAd);
if (Status != NDIS_STATUS_SUCCESS) {
- DBGPRINT_ERR(("RTMPAllocDMAMemory failed, Status[=0x%08x]\n",
- Status));
+ DBGPRINT_ERR("RTMPAllocDMAMemory failed, Status[=0x%08x]\n", Status);
goto err1;
}
@@ -3284,7 +3281,7 @@ int rt28xx_init(struct rt_rtmp_adapter *pAd,
Status = MlmeInit(pAd);
if (Status != NDIS_STATUS_SUCCESS) {
- DBGPRINT_ERR(("MlmeInit failed, Status[=0x%08x]\n", Status));
+ DBGPRINT_ERR("MlmeInit failed, Status[=0x%08x]\n", Status);
goto err2;
}
/* Initialize pAd->StaCfg, pAd->ApCfg, pAd->CommonCfg to manufacture default */
@@ -3309,8 +3306,7 @@ int rt28xx_init(struct rt_rtmp_adapter *pAd,
/* */
Status = NICInitializeAdapter(pAd, TRUE);
if (Status != NDIS_STATUS_SUCCESS) {
- DBGPRINT_ERR(("NICInitializeAdapter failed, Status[=0x%08x]\n",
- Status));
+ DBGPRINT_ERR("NICInitializeAdapter failed, Status[=0x%08x]\n", Status);
if (Status != NDIS_STATUS_SUCCESS)
goto err3;
}
diff --git a/drivers/staging/rt2860/common/rtmp_mcu.c b/drivers/staging/rt2860/common/rtmp_mcu.c
index 844d4b987b78..80fa4160ed62 100644
--- a/drivers/staging/rt2860/common/rtmp_mcu.c
+++ b/drivers/staging/rt2860/common/rtmp_mcu.c
@@ -267,7 +267,7 @@ int RtmpAsicSendCommandToMcu(struct rt_rtmp_adapter *pAd,
} while (i++ < 100);
if (i > 100) {
- DBGPRINT_ERR(("H2M_MAILBOX still hold by MCU. command fail\n"));
+ DBGPRINT_ERR("H2M_MAILBOX still hold by MCU. command fail\n");
return FALSE;
}
@@ -296,7 +296,7 @@ int RtmpAsicSendCommandToMcu(struct rt_rtmp_adapter *pAd,
#ifdef RTMP_MAC_PCI
#endif /* RTMP_MAC_PCI // */
{
- DBGPRINT_ERR(("H2M_MAILBOX still hold by MCU. command fail\n"));
+ DBGPRINT_ERR("H2M_MAILBOX still hold by MCU. command fail\n");
}
return FALSE;
}
diff --git a/drivers/staging/rt2860/common/spectrum.c b/drivers/staging/rt2860/common/spectrum.c
index 2d5f847e6cc6..1dfb802aab9a 100644
--- a/drivers/staging/rt2860/common/spectrum.c
+++ b/drivers/staging/rt2860/common/spectrum.c
@@ -1837,7 +1837,7 @@ static void PeerChSwAnnAction(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_
}
if (index >= pAd->ChannelListNum) {
- DBGPRINT_ERR(("&&&&&&&&&&&&&&&&&&&&&&&&&&PeerChSwAnnAction(can not find New Channel=%d in ChannelList[%d]\n", pAd->CommonCfg.Channel, pAd->ChannelListNum));
+ DBGPRINT_ERR("&&&&&&&&&&&&&&&&&&&&&&&&&&PeerChSwAnnAction(can not find New Channel=%d in ChannelList[%d]\n", pAd->CommonCfg.Channel, pAd->ChannelListNum);
}
}
}
diff --git a/drivers/staging/rt2860/mlme.h b/drivers/staging/rt2860/mlme.h
index 01414c3b4889..cd1ee3d7a91d 100644
--- a/drivers/staging/rt2860/mlme.h
+++ b/drivers/staging/rt2860/mlme.h
@@ -32,8 +32,9 @@
Revision History:
Who When What
-------- ---------- ------------------------------
- John Chang 2003-08-28 Created
- John Chang 2004-09-06 modified for RT2600
+ John Chang 2003-08-28 Created
+ John Chang 2004-09-06 modified for RT2600
+ Justin P. Mattock 11/07/2010 Fix typos in comments
*/
#ifndef __MLME_H__
@@ -41,7 +42,7 @@
#include "rtmp_dot11.h"
-/* maximum supported capability information - */
+/* maximum supported capability information */
/* ESS, IBSS, Privacy, Short Preamble, Spectrum mgmt, Short Slot */
#define SUPPORTED_CAPABILITY_INFO 0x0533
@@ -77,7 +78,7 @@
#define CW_MAX_IN_BITS 10 /* actual CwMax = 2^CW_MAX_IN_BITS - 1 */
/* Note: RSSI_TO_DBM_OFFSET has been changed to variable for new RF (2004-0720). */
-/* SHould not refer to this constant anymore */
+/* Should not refer to this constant anymore */
/*#define RSSI_TO_DBM_OFFSET 120 // for RT2530 RSSI-115 = dBm */
#define RSSI_FOR_MID_TX_POWER -55 /* -55 db is considered mid-distance */
#define RSSI_FOR_LOW_TX_POWER -45 /* -45 db is considered very short distance and */
@@ -123,7 +124,7 @@
#define TID_MAC_HASH_INDEX(Addr, TID) (TID_MAC_HASH(Addr, TID) % HASH_TABLE_SIZE)
/* LED Control */
-/* assoiation ON. one LED ON. another blinking when TX, OFF when idle */
+/* association ON. one LED ON. another blinking when TX, OFF when idle */
/* no association, both LED off */
#define ASIC_LED_ACT_ON(pAd) RTMP_IO_WRITE32(pAd, MAC_CSR14, 0x00031e46)
#define ASIC_LED_ACT_OFF(pAd) RTMP_IO_WRITE32(pAd, MAC_CSR14, 0x00001e46)
@@ -284,8 +285,8 @@ struct PACKED rt_ht_capability_ie {
/* 802.11n draft3 related structure definitions. */
/* 7.3.2.60 */
-#define dot11OBSSScanPassiveDwell 20 /* in TU. min amount of time that the STA continously scans each channel when performing an active OBSS scan. */
-#define dot11OBSSScanActiveDwell 10 /* in TU.min amount of time that the STA continously scans each channel when performing an passive OBSS scan. */
+#define dot11OBSSScanPassiveDwell 20 /* in TU. min amount of time that the STA continuously scans each channel when performing an active OBSS scan. */
+#define dot11OBSSScanActiveDwell 10 /* in TU.min amount of time that the STA continuously scans each channel when performing an passive OBSS scan. */
#define dot11BSSWidthTriggerScanInterval 300 /* in sec. max interval between scan operations to be performed to detect BSS channel width trigger events. */
#define dot11OBSSScanPassiveTotalPerChannel 200 /* in TU. min total amount of time that the STA scans each channel when performing a passive OBSS scan. */
#define dot11OBSSScanActiveTotalPerChannel 20 /*in TU. min total amount of time that the STA scans each channel when performing a active OBSS scan */
@@ -325,7 +326,7 @@ struct rt_trigger_eventa {
};
/* 20/40 trigger event table */
-/* If one Event A delete or created, or if Event B is detected or not detected, STA should send 2040BSSCoexistence to AP. */
+/* If one Event (A) is deleted or created, or if Event (B) is detected or not detected, STA should send 2040BSSCoexistence to AP. */
#define MAX_TRIGGER_EVENT 64
struct rt_trigger_event_tab {
u8 EventANo;
@@ -357,14 +358,14 @@ struct PACKED rt_bss_2040_intolerant_ch_report {
u8 ChList[0];
};
-/* The structure for channel switch annoucement IE. This is in 802.11n D3.03 */
+/* The structure for channel switch announcement IE. This is in 802.11n D3.03 */
struct PACKED rt_cha_switch_announce_ie {
u8 SwitchMode; /*channel switch mode */
u8 NewChannel; /* */
u8 SwitchCount; /* */
};
-/* The structure for channel switch annoucement IE. This is in 802.11n D3.03 */
+/* The structure for channel switch announcement IE. This is in 802.11n D3.03 */
struct PACKED rt_sec_cha_offset_ie {
u8 SecondaryChannelOffset; /* 1: Secondary above, 3: Secondary below, 0: no Secondary */
};
@@ -377,7 +378,7 @@ struct rt_ht_phy_info {
u8 MCSSet[16];
};
-/*This structure substracts ralink supports from all 802.11n-related features. */
+/*This structure subtracts ralink supports from all 802.11n-related features. */
/*Features not listed here but contained in 802.11n spec are not supported in rt2860. */
struct rt_ht_capability {
u16 ChannelWidth:1;
@@ -387,14 +388,14 @@ struct rt_ht_capability {
u16 ShortGIfor40:1; /*for40MHz */
u16 TxSTBC:1;
u16 RxSTBC:2; /* 2 bits */
- u16 AmsduEnable:1; /* Enable to transmit A-MSDU. Suggest disable. We should use A-MPDU to gain best benifit of 802.11n */
+ u16 AmsduEnable:1; /* Enable to transmit A-MSDU. Suggest disable. We should use A-MPDU to gain best benefit of 802.11n */
u16 AmsduSize:1; /* Max receiving A-MSDU size */
u16 rsv:5;
/*Substract from Addiont HT INFO IE */
u8 MaxRAmpduFactor:2;
u8 MpduDensity:3;
- u8 ExtChanOffset:2; /* Please not the difference with following u8 NewExtChannelOffset; from 802.11n */
+ u8 ExtChanOffset:2; /* Please note the difference with following u8 NewExtChannelOffset; from 802.11n */
u8 RecomWidth:1;
u16 OperaionMode:2;
@@ -481,7 +482,7 @@ struct PACKED rt_ba_parm {
u16 AMSDUSupported:1; /* 0: not permitted 1: permitted */
u16 BAPolicy:1; /* 1: immediately BA 0:delayed BA */
u16 TID:4; /* value of TC os TS */
- u16 BufSize:10; /* number of buffe of size 2304 octetsr */
+ u16 BufSize:10; /* number of buffer of size 2304 octetsr */
};
/* 2-byte BA Starting Seq CONTROL field */
@@ -551,7 +552,7 @@ struct PACKED rt_frame_mtba_req {
BASEQ_CONTROL BAStartingSeq;
};
-/* Compressed format is mandantory in HT STA */
+/* Compressed format is mandatory in HT STA */
struct PACKED rt_frame_mtba {
struct rt_frame_control FC;
u16 Duration;
@@ -647,7 +648,7 @@ struct PACKED rt_frame_ba {
u8 bitmask[8];
};
-/* Radio Measuement Request Frame Format */
+/* Radio Measurement Request Frame Format */
struct PACKED rt_frame_rm_req_action {
struct rt_header_802_11 Hdr;
u8 Category;
@@ -709,7 +710,7 @@ struct rt_edca_parm {
u8 Cwmin[4];
u8 Cwmax[4];
u16 Txop[4]; /* in unit of 32-us */
- BOOLEAN bACM[4]; /* 1: Admission Control of AC_BK is mandattory */
+ BOOLEAN bACM[4]; /* 1: Admission Control of AC_BK is mandatory */
};
/* QBSS LOAD information from QAP's BEACON/ProbeRsp */
@@ -757,7 +758,7 @@ struct rt_wpa_ie {
struct rt_bss_entry {
u8 Bssid[MAC_ADDR_LEN];
u8 Channel;
- u8 CentralChannel; /*Store the wide-band central channel for 40MHz. .used in 40MHz AP. Or this is the same as Channel. */
+ u8 CentralChannel; /*Store the wide-band central channel for 40MHz. used in 40MHz AP. Or this is the same as Channel. */
u8 BssType;
u16 AtimWin;
u16 BeaconPeriod;
@@ -855,7 +856,7 @@ struct rt_state_machine {
STATE_MACHINE_FUNC *TransFunc;
};
-/* MLME AUX data structure that hold temporarliy settings during a connection attempt. */
+/* MLME AUX data structure that holds temporarliy settings during a connection attempt. */
/* Once this attemp succeeds, all settings will be copy to pAd->StaActive. */
/* A connection attempt (user set OID, roaming, CCX fast roaming,..) consists of */
/* several steps (JOIN, AUTH, ASSOC or REASSOC) and may fail at any step. We purposely */
@@ -996,7 +997,7 @@ struct PACKED rt_rtmp_tx_rate_switch {
#define MAC_TABLE_ASSOC_TIMEOUT 5 /* unit: sec */
#define MAC_TABLE_FULL(Tab) ((Tab).size == MAX_LEN_OF_MAC_TABLE)
-/* AP shall drop the sta if contine Tx fail count reach it. */
+/* AP shall drop the sta if continue Tx fail count reach it. */
#define MAC_ENTRY_LIFE_CHECK_CNT 20 /* packet cnt. */
/* Value domain of pMacEntry->Sst */
diff --git a/drivers/staging/rt2860/oid.h b/drivers/staging/rt2860/oid.h
index 1704c27b2736..5a25f0d3cfeb 100644
--- a/drivers/staging/rt2860/oid.h
+++ b/drivers/staging/rt2860/oid.h
@@ -32,7 +32,8 @@
Revision History:
Who When What
-------- ---------- ----------------------------------------------
- Name Date Modification logs
+ Name Date Modification logs
+ Justin P. Mattock 11/07/2010 Fix typos in comments
*/
#ifndef _OID_H_
#define _OID_H_
@@ -78,7 +79,7 @@
#define NDIS_802_11_LENGTH_RATES 8
#define NDIS_802_11_LENGTH_RATES_EX 16
#define MAC_ADDR_LENGTH 6
-/*#define MAX_NUM_OF_CHS 49 // 14 channels @2.4G + 12@UNII + 4 @MMAC + 11 @HiperLAN2 + 7 @Japan + 1 as NULL terminationc */
+/*#define MAX_NUM_OF_CHS 49 // 14 channels @2.4G + 12@UNII + 4 @MMAC + 11 @HiperLAN2 + 7 @Japan + 1 as NULL termination */
#define MAX_NUM_OF_CHS 54 /* 14 channels @2.4G + 12@UNII(lower/middle) + 16@HiperLAN2 + 11@UNII(upper) + 0 @Japan + 1 as NULL termination */
#define MAX_NUMBER_OF_EVENT 10 /* entry # in EVENT table */
#define MAX_NUMBER_OF_MAC 32 /* if MAX_MBSSID_NUM is 8, this value can't be larger than 211 */
@@ -87,7 +88,7 @@
#define MAX_NUMBER_OF_DLS_ENTRY 4
#define RT_QUERY_SIGNAL_CONTEXT 0x0402
-#define RT_SET_IAPP_PID 0x0404
+#define RT_SET_IAPP_PID 0x0404
#define RT_SET_APD_PID 0x0405
#define RT_SET_DEL_MAC_ENTRY 0x0406
#define RT_QUERY_EVENT_TABLE 0x0407
@@ -610,7 +611,7 @@ struct rt_802_11_event_log {
struct rt_802_11_event_table {
unsigned long Num;
- unsigned long Rsv; /* to align Log[] at LARGE_INEGER boundary */
+ unsigned long Rsv; /* to align Log[] at LARGE_INTEGER boundary */
struct rt_802_11_event_log Log[MAX_NUMBER_OF_EVENT];
};
@@ -721,9 +722,9 @@ struct rt_802_11_tx_rates {
#define AUTH_FAIL 0x4 /* Open authentication fail */
#define AUTH_FAIL_KEYS 0x5 /* Shared authentication fail */
#define ASSOC_FAIL 0x6 /* Association failed */
-#define EAP_MIC_FAILURE 0x7 /* Deauthencation because MIC failure */
-#define EAP_4WAY_TIMEOUT 0x8 /* Deauthencation on 4-way handshake timeout */
-#define EAP_GROUP_KEY_TIMEOUT 0x9 /* Deauthencation on group key handshake timeout */
+#define EAP_MIC_FAILURE 0x7 /* Deauthentication because MIC failure */
+#define EAP_4WAY_TIMEOUT 0x8 /* Deauthentication on 4-way handshake timeout */
+#define EAP_GROUP_KEY_TIMEOUT 0x9 /* Deauthentication on group key handshake timeout */
#define EAP_SUCCESS 0xa /* EAP succeed */
#define DETECT_RADAR_SIGNAL 0xb /* Radar signal occur in current channel */
#define EXTRA_INFO_MAX 0xb /* Indicate Last OID */
diff --git a/drivers/staging/rt2860/pci_main_dev.c b/drivers/staging/rt2860/pci_main_dev.c
index 321facd6b0ab..25fbb1880ff2 100644
--- a/drivers/staging/rt2860/pci_main_dev.c
+++ b/drivers/staging/rt2860/pci_main_dev.c
@@ -31,7 +31,8 @@
Create and register network interface for PCI based chipsets in Linux platform.
Revision History:
- Who When What
+ Who When What
+ Justin P. Mattock 11/07/2010 Fix typos in some comments
-------- ---------- ----------------------------------------------
*/
@@ -40,8 +41,8 @@
#include <linux/slab.h>
/* Following information will be show when you run 'modinfo' */
-/* *** If you have a solution for the bug in current version of driver, please mail to me. */
-/* Otherwise post to forum in ralinktech's web site(www.ralinktech.com) and let all users help you. *** */
+/* If you have a solution for a bug in current version of driver, please e-mail me. */
+/* Otherwise post to forum in ralinktech's web site(www.ralinktech.com) and let all users help you. */
MODULE_AUTHOR("Jett Chen <jett_chen@ralinktech.com>");
MODULE_DESCRIPTION("RT2860/RT3090 Wireless Lan Linux Driver");
MODULE_LICENSE("GPL");
@@ -50,9 +51,6 @@ MODULE_ALIAS("rt3090sta");
/* */
/* Function declarations */
/* */
-extern int rt28xx_close(IN struct net_device *net_dev);
-extern int rt28xx_open(struct net_device *net_dev);
-
static void __devexit rt2860_remove_one(struct pci_dev *pci_dev);
static int __devinit rt2860_probe(struct pci_dev *pci_dev,
const struct pci_device_id *ent);
@@ -205,7 +203,7 @@ static int rt2860_resume(struct pci_dev *pci_dev)
/* initialize device before it's used by a driver */
if (pci_enable_device(pci_dev)) {
- printk("pci enable fail!\n");
+ printk(KERN_ERR "rt2860: pci enable fail!\n");
return 0;
}
@@ -599,7 +597,7 @@ void RTMPInitPCIeLinkCtrlValue(struct rt_rtmp_adapter *pAd)
DBGPRINT_RAW(RT_DEBUG_ERROR,
(" AUX_CTRL = 0x%32x\n", MacValue));
- /* for RT30xx F and after, PCIe infterface, and for power solution 3 */
+ /* for RT30xx F and after, PCIe interface, and for power solution 3 */
if ((IS_VERSION_AFTER_F(pAd))
&& (pAd->StaCfg.PSControl.field.rt30xxPowerMode >= 2)
&& (pAd->StaCfg.PSControl.field.rt30xxPowerMode <= 3)) {
@@ -902,7 +900,7 @@ void RTMPPCIeLinkCtrlValueRestore(struct rt_rtmp_adapter *pAd, u8 Level)
Configuration);
if ((Configuration != 0) && (Configuration != 0xFFFF)) {
Configuration &= 0xfefc;
- /* If call from interface down, restore to orginial setting. */
+ /* If call from interface down, restore to original setting. */
if (Level == RESTORE_CLOSE)
Configuration |= pAd->HostLnkCtrlConfiguration;
else
@@ -924,7 +922,7 @@ void RTMPPCIeLinkCtrlValueRestore(struct rt_rtmp_adapter *pAd, u8 Level)
Configuration);
if ((Configuration != 0) && (Configuration != 0xFFFF)) {
Configuration &= 0xfefc;
- /* If call from interface down, restore to orginial setting. */
+ /* If call from interface down, restore to original setting. */
if (Level == RESTORE_CLOSE)
Configuration |= pAd->RLnkCtrlConfiguration;
else
@@ -1106,12 +1104,12 @@ void RTMPrt3xSetPCIePowerLinkCtrl(struct rt_rtmp_adapter *pAd)
if (pos != 0)
pAd->HostLnkCtrlOffset = pos + PCI_EXP_LNKCTL;
- /* If configurared to turn on L1. */
+ /* If configured to turn on L1. */
HostConfiguration = 0;
if (pAd->StaCfg.PSControl.field.rt30xxForceASPMTest == 1) {
DBGPRINT(RT_DEBUG_TRACE, ("Enter,PSM : Force ASPM\n"));
- /* Skip non-exist deice right away */
+ /* Skip non-exist device right away */
if ((pAd->HostLnkCtrlOffset != 0)) {
PCI_REG_READ_WORD(pObj->parent_pci_dev,
pAd->HostLnkCtrlOffset,
diff --git a/drivers/staging/rt2860/rt_linux.c b/drivers/staging/rt2860/rt_linux.c
index abfeea11721d..728864e18a18 100644
--- a/drivers/staging/rt2860/rt_linux.c
+++ b/drivers/staging/rt2860/rt_linux.c
@@ -321,7 +321,7 @@ int RTMPCloneNdisPacket(struct rt_rtmp_adapter *pAd,
RTMP_SET_PACKET_SOURCE(OSPKT_TO_RTPKT(pkt), PKTSRC_NDIS);
- printk("###Clone###\n");
+ printk(KERN_DEBUG "###Clone###\n");
return NDIS_STATUS_SUCCESS;
}
@@ -343,9 +343,8 @@ int RTMPAllocateNdisPacket(struct rt_rtmp_adapter *pAd,
RTMP_PKT_TAIL_PADDING);
if (pPacket == NULL) {
*ppPacket = NULL;
-#ifdef DEBUG
- printk("RTMPAllocateNdisPacket Fail\n");
-#endif
+ pr_devel("RTMPAllocateNdisPacket Fail\n");
+
return NDIS_STATUS_FAILURE;
}
/* 2. clone the frame content */
@@ -601,15 +600,15 @@ void hex_dump(char *str, unsigned char *pSrcBufVA, unsigned int SrcBufLen)
return;
pt = pSrcBufVA;
- printk("%s: %p, len = %d\n", str, pSrcBufVA, SrcBufLen);
+ printk(KERN_DEBUG "%s: %p, len = %d\n", str, pSrcBufVA, SrcBufLen);
for (x = 0; x < SrcBufLen; x++) {
if (x % 16 == 0)
- printk("0x%04x : ", x);
- printk("%02x ", ((unsigned char)pt[x]));
+ printk(KERN_DEBUG "0x%04x : ", x);
+ printk(KERN_DEBUG "%02x ", ((unsigned char)pt[x]));
if (x % 16 == 15)
- printk("\n");
+ printk(KERN_DEBUG "\n");
}
- printk("\n");
+ printk(KERN_DEBUG "\n");
}
/*
@@ -767,13 +766,13 @@ void send_monitor_packets(struct rt_rtmp_adapter *pAd, struct rt_rx_blk *pRxBlk)
/* QOS */
if (pRxBlk->pHeader->FC.SubType & 0x08) {
header_len += 2;
- /* Data skip QOS contorl field */
+ /* Data skip QOS control field */
pRxBlk->DataSize -= 2;
}
/* Order bit: A-Ralink or HTC+ */
if (pRxBlk->pHeader->FC.Order) {
header_len += 4;
- /* Data skip HTC contorl field */
+ /* Data skip HTC control field */
pRxBlk->DataSize -= 4;
}
/* Copy Header */
@@ -854,7 +853,7 @@ void send_monitor_packets(struct rt_rtmp_adapter *pAd, struct rt_rx_blk *pRxBlk)
RSSI1,
RSSI_1),
ConvertToRssi(pAd, pRxBlk->pRxWI->RSSI2,
- RSSI_2));;
+ RSSI_2));
ph->signal.did = DIDmsg_lnxind_wlansniffrm_signal;
ph->signal.status = 0;
@@ -926,7 +925,7 @@ int RtmpOSIRQRequest(struct net_device *pNetDev)
request_irq(_pObj->pci_dev->irq, rt2860_interrupt, SA_SHIRQ,
(net_dev)->name, (net_dev));
if (retval != 0)
- printk("RT2860: request_irq ERROR(%d)\n", retval);
+ printk(KERN_ERR "rt2860: request_irq ERROR(%d)\n", retval);
}
return retval;
@@ -1022,7 +1021,7 @@ int RtmpOSTaskKill(struct rt_rtmp_os_task *pTask)
}
#else
CHECK_PID_LEGALITY(pTask->taskPID) {
- printk("Terminate the task(%s) with pid(%d)!\n",
+ printk(KERN_INFO "Terminate the task(%s) with pid(%d)!\n",
pTask->taskName, GET_PID_NUMBER(pTask->taskPID));
mb();
pTask->task_killed = 1;
@@ -1175,7 +1174,7 @@ int RtmpOSNetDevAddrSet(struct net_device *pNetDev, u8 *pMacAddr)
net_dev = pNetDev;
GET_PAD_FROM_NET_DEV(pAd, net_dev);
- /* work-around for the SuSE due to it has it's own interface name management system. */
+ /* work-around for SuSE, due to them having their own interface name management system. */
{
NdisZeroMemory(pAd->StaCfg.dev_name, 16);
NdisMoveMemory(pAd->StaCfg.dev_name, net_dev->name,
@@ -1300,7 +1299,7 @@ int RtmpOSNetDevAttach(struct net_device *pNetDev,
int ret, rtnl_locked = FALSE;
DBGPRINT(RT_DEBUG_TRACE, ("RtmpOSNetDevAttach()--->\n"));
- /* If we need hook some callback function to the net device structrue, now do it. */
+ /* If we need hook some callback function to the net device structure, now do it. */
if (pDevOpHook) {
struct rt_rtmp_adapter *pAd = NULL;
@@ -1351,10 +1350,10 @@ struct net_device *RtmpOSNetDevCreate(struct rt_rtmp_adapter *pAd,
return NULL;
}
- /* find a available interface name, max 32 interfaces */
+ /* find an available interface name, max 32 interfaces */
status = RtmpOSNetDevRequestName(pAd, pNetDev, pNamePrefix, devNum);
if (status != NDIS_STATUS_SUCCESS) {
- /* error! no any available ra name can be used! */
+ /* error! no available ra name can be used! */
DBGPRINT(RT_DEBUG_ERROR,
("Assign interface name (%s with suffix 0~32) failed...\n",
pNamePrefix));
diff --git a/drivers/staging/rt2860/rt_linux.h b/drivers/staging/rt2860/rt_linux.h
index 5acedf18b1ab..92ff5438e777 100644
--- a/drivers/staging/rt2860/rt_linux.h
+++ b/drivers/staging/rt2860/rt_linux.h
@@ -30,7 +30,8 @@
Abstract:
Revision History:
- Who When What
+ Who When What
+ Justin P. Mattock 11/07/2010 Fix typo in a comment
--------- ---------- ----------------------------------------------
*/
@@ -422,11 +423,7 @@ do{ \
#define DBGPRINT(Level, Fmt) DBGPRINT_RAW(Level, Fmt)
-#define DBGPRINT_ERR(Fmt) \
-{ \
- printk("ERROR! "); \
- printk Fmt; \
-}
+#define DBGPRINT_ERR(fmt, args...) printk(KERN_ERR fmt, ##args)
#define DBGPRINT_S(Status, Fmt) \
{ \
@@ -726,7 +723,7 @@ void linux_pci_unmap_single(struct rt_rtmp_adapter *pAd, dma_addr_t dma_addr,
#define RTMP_GET_PACKET_MOREDATA(_p) (RTPKT_TO_OSPKT(_p)->cb[CB_OFF+7])
/* */
-/* Sepcific Pakcet Type definition */
+/* Specific Packet Type definition */
/* */
#define RTMP_PACKET_SPECIFIC_CB_OFFSET 11
diff --git a/drivers/staging/rt2860/rt_main_dev.c b/drivers/staging/rt2860/rt_main_dev.c
index ad60ceaf4b88..236dd36d349a 100644
--- a/drivers/staging/rt2860/rt_main_dev.c
+++ b/drivers/staging/rt2860/rt_main_dev.c
@@ -31,7 +31,8 @@
Create and register network interface.
Revision History:
- Who When What
+ Who When What
+ Justin P. Mattock 11/07/2010 Fix typos in comments
-------- ---------- ----------------------------------------------
*/
@@ -101,8 +102,8 @@ int MainVirtualIF_close(IN struct net_device *net_dev)
(!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST))) {
struct rt_mlme_disassoc_req DisReq;
struct rt_mlme_queue_elem *MsgElem =
- (struct rt_mlme_queue_elem *)kmalloc(sizeof(struct rt_mlme_queue_elem),
- MEM_ALLOC_FLAG);
+ kmalloc(sizeof(struct rt_mlme_queue_elem),
+ MEM_ALLOC_FLAG);
if (MsgElem) {
COPY_MAC_ADDR(DisReq.Addr,
@@ -234,7 +235,7 @@ int rt28xx_close(struct net_device *dev)
RTMPPCIeLinkCtrlValueRestore(pAd, RESTORE_CLOSE);
#endif /* RTMP_MAC_PCI // */
- /* If dirver doesn't wake up firmware here, */
+ /* If driver doesn't wake up firmware here, */
/* NICLoadFirmware will hang forever when interface is up again. */
if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE)) {
AsicForceWakeup(pAd, TRUE);
@@ -310,8 +311,8 @@ int rt28xx_close(struct net_device *dev)
RTMP_ASIC_INTERRUPT_DISABLE(pAd);
}
/* Receive packets to clear DMA index after disable interrupt. */
- /*RTMPHandleRxDoneInterrupt(pAd); */
- /* put to radio off to save power when driver unload. After radiooff, can't write /read register. So need to finish all */
+ /* RTMPHandleRxDoneInterrupt(pAd); */
+ /* put radio off to save power when driver unloads. After radiooff, can't write/read register, so need to finish all. */
/* register access before Radio off. */
brc = RT28xxPciAsicRadioOff(pAd, RTMP_HALT, 0);
@@ -420,7 +421,7 @@ int rt28xx_open(struct net_device *dev)
{
u32 reg = 0;
RTMP_IO_READ32(pAd, 0x1300, &reg); /* clear garbage interrupts */
- printk("0x1300 = %08x\n", reg);
+ printk(KERN_DEBUG "0x1300 = %08x\n", reg);
}
{
@@ -483,8 +484,6 @@ struct net_device *RtmpPhyNetDevInit(struct rt_rtmp_adapter *pAd,
net_dev->ml_priv = (void *)pAd;
pAd->net_dev = net_dev;
- netif_stop_queue(net_dev);
-
return net_dev;
}
@@ -724,7 +723,8 @@ Note:
int AdapterBlockAllocateMemory(void *handle, void ** ppAd)
{
- *ppAd = (void *)vmalloc(sizeof(struct rt_rtmp_adapter)); /*pci_alloc_consistent(pci_dev, sizeof(struct rt_rtmp_adapter), phy_addr); */
+ *ppAd = vmalloc(sizeof(struct rt_rtmp_adapter));
+ /* pci_alloc_consistent(pci_dev, sizeof(struct rt_rtmp_adapter), phy_addr); */
if (*ppAd) {
NdisZeroMemory(*ppAd, sizeof(struct rt_rtmp_adapter));
diff --git a/drivers/staging/rt2860/rt_pci_rbus.c b/drivers/staging/rt2860/rt_pci_rbus.c
index 3004be6da003..e5fb67cd9a68 100644
--- a/drivers/staging/rt2860/rt_pci_rbus.c
+++ b/drivers/staging/rt2860/rt_pci_rbus.c
@@ -31,7 +31,8 @@
Create and register network interface.
Revision History:
- Who When What
+ Who When What
+ Justin P. Mattock 11/07/2010 Fix a typo
-------- ---------- ----------------------------------------------
*/
@@ -356,7 +357,7 @@ static void mgmt_dma_done_tasklet(unsigned long data)
RTMPHandleMgmtRingDmaDoneInterrupt(pAd);
- /* if you use RTMP_SEM_LOCK, sometimes kernel will hang up, no any */
+ /* if you use RTMP_SEM_LOCK, sometimes kernel will hang up, without any */
/* bug report output */
RTMP_INT_LOCK(&pAd->irq_lock, flags);
/*
@@ -787,7 +788,7 @@ IRQ_HANDLE_TYPE rt2860_interrupt(int irq, void *dev_instance)
}
/*
- * invaild or writeback cache
+ * invalid or writeback cache
* and convert virtual address to physical address
*/
dma_addr_t linux_pci_map_single(struct rt_rtmp_adapter *pAd, void *ptr,
diff --git a/drivers/staging/rt2860/rt_usb.c b/drivers/staging/rt2860/rt_usb.c
index bcfc0f54d2aa..eb037d2e04a2 100644
--- a/drivers/staging/rt2860/rt_usb.c
+++ b/drivers/staging/rt2860/rt_usb.c
@@ -32,7 +32,8 @@
Revision History:
Who When What
-------- ---------- ----------------------------------------------
- Name Date Modification logs
+ Name Date Modification logs
+ Justin P. Mattock 11/07/2010 Fix some typos.
*/
@@ -40,25 +41,25 @@
void dump_urb(struct urb *purb)
{
- printk("urb :0x%08lx\n", (unsigned long)purb);
- printk("\tdev :0x%08lx\n", (unsigned long)purb->dev);
- printk("\t\tdev->state :0x%d\n", purb->dev->state);
- printk("\tpipe :0x%08x\n", purb->pipe);
- printk("\tstatus :%d\n", purb->status);
- printk("\ttransfer_flags :0x%08x\n", purb->transfer_flags);
- printk("\ttransfer_buffer :0x%08lx\n",
+ printk(KERN_DEBUG "urb :0x%08lx\n", (unsigned long)purb);
+ printk(KERN_DEBUG "\tdev :0x%08lx\n", (unsigned long)purb->dev);
+ printk(KERN_DEBUG "\t\tdev->state :0x%d\n", purb->dev->state);
+ printk(KERN_DEBUG "\tpipe :0x%08x\n", purb->pipe);
+ printk(KERN_DEBUG "\tstatus :%d\n", purb->status);
+ printk(KERN_DEBUG "\ttransfer_flags :0x%08x\n", purb->transfer_flags);
+ printk(KERN_DEBUG "\ttransfer_buffer :0x%08lx\n",
(unsigned long)purb->transfer_buffer);
- printk("\ttransfer_buffer_length:%d\n", purb->transfer_buffer_length);
- printk("\tactual_length :%d\n", purb->actual_length);
- printk("\tsetup_packet :0x%08lx\n",
+ printk(KERN_DEBUG "\ttransfer_buffer_length:%d\n", purb->transfer_buffer_length);
+ printk(KERN_DEBUG "\tactual_length :%d\n", purb->actual_length);
+ printk(KERN_DEBUG "\tsetup_packet :0x%08lx\n",
(unsigned long)purb->setup_packet);
- printk("\tstart_frame :%d\n", purb->start_frame);
- printk("\tnumber_of_packets :%d\n", purb->number_of_packets);
- printk("\tinterval :%d\n", purb->interval);
- printk("\terror_count :%d\n", purb->error_count);
- printk("\tcontext :0x%08lx\n",
+ printk(KERN_DEBUG "\tstart_frame :%d\n", purb->start_frame);
+ printk(KERN_DEBUG "\tnumber_of_packets :%d\n", purb->number_of_packets);
+ printk(KERN_DEBUG "\tinterval :%d\n", purb->interval);
+ printk(KERN_DEBUG "\terror_count :%d\n", purb->error_count);
+ printk(KERN_DEBUG "\tcontext :0x%08lx\n",
(unsigned long)purb->context);
- printk("\tcomplete :0x%08lx\n\n",
+ printk(KERN_DEBUG "\tcomplete :0x%08lx\n\n",
(unsigned long)purb->complete);
}
@@ -279,7 +280,7 @@ static void rtusb_dataout_complete(unsigned long data)
&& !RTUSB_TEST_BULK_FLAG(pAd,
(fRTUSB_BULK_OUT_DATA_FRAG <<
BulkOutPipeId))) {
- /* Indicate There is data avaliable */
+ /* Indicate There is data available */
RTUSB_SET_BULK_FLAG(pAd,
(fRTUSB_BULK_OUT_DATA_NORMAL <<
BulkOutPipeId));
@@ -335,7 +336,7 @@ static void rtusb_null_frame_done_tasklet(unsigned long data)
}
/* Always call Bulk routine, even reset bulk. */
- /* The protectioon of rest bulk should be in BulkOut routine */
+ /* The protection of rest bulk should be in BulkOut routine */
RTUSBKickBulkOut(pAd);
}
@@ -383,7 +384,7 @@ static void rtusb_rts_frame_done_tasklet(unsigned long data)
RTMP_SEM_UNLOCK(&pAd->BulkOutLock[pRTSContext->BulkOutPipeId]);
/* Always call Bulk routine, even reset bulk. */
- /* The protectioon of rest bulk should be in BulkOut routine */
+ /* The protection of rest bulk should be in BulkOut routine */
RTUSBKickBulkOut(pAd);
}
@@ -427,7 +428,7 @@ static void rtusb_pspoll_frame_done_tasklet(unsigned long data)
RTMP_SEM_UNLOCK(&pAd->BulkOutLock[0]);
/* Always call Bulk routine, even reset bulk. */
- /* The protectioon of rest bulk should be in BulkOut routine */
+ /* The protection of rest bulk should be in BulkOut routine */
RTUSBKickBulkOut(pAd);
}
@@ -575,7 +576,7 @@ static void rtusb_mgmt_dma_done_tasklet(unsigned long data)
} else {
/* Always call Bulk routine, even reset bulk. */
- /* The protectioon of rest bulk should be in BulkOut routine */
+ /* The protection of rest bulk should be in BulkOut routine */
if (pAd->MgmtRing.TxSwFreeIdx <
MGMT_RING_SIZE
/* pMLMEContext->bWaitingBulkOut == TRUE */) {
diff --git a/drivers/staging/rt2860/rtmp.h b/drivers/staging/rt2860/rtmp.h
index ca54e53b6603..70daaa4f9ac2 100644
--- a/drivers/staging/rt2860/rtmp.h
+++ b/drivers/staging/rt2860/rtmp.h
@@ -31,11 +31,12 @@
Miniport generic portion header file
Revision History:
- Who When What
+ Who When What
-------- ---------- ----------------------------------------------
- Paul Lin 2002-08-01 created
- James Tan 2002-09-06 modified (Revise NTCRegTable)
- John Chang 2004-09-06 modified for RT2600
+ Paul Lin 2002-08-01 created
+ James Tan 2002-09-06 modified (Revise NTCRegTable)
+ John Chang 2004-09-06 modified for RT2600
+ Justin P. Mattock 11/07/2010 Fix some typos
*/
#ifndef __RTMP_H__
#define __RTMP_H__
@@ -337,7 +338,7 @@ struct rt_rtmp_sg_list {
#define LEAP_ON(_p) (((_p)->StaCfg.LeapAuthMode) == CISCO_AuthModeLEAP)
#define LEAP_CCKM_ON(_p) ((((_p)->StaCfg.LeapAuthMode) == CISCO_AuthModeLEAP) && ((_p)->StaCfg.LeapAuthInfo.CCKM == TRUE))
-/* if orginal Ethernet frame contains no LLC/SNAP, then an extra LLC/SNAP encap is required */
+/* if original Ethernet frame contains no LLC/SNAP, then an extra LLC/SNAP encap is required */
#define EXTRA_LLCSNAP_ENCAP_FROM_PKT_START(_pBufVA, _pExtraLlcSnapEncap) \
{ \
if (((*(_pBufVA + 12) << 8) + *(_pBufVA + 13)) > 1500) { \
@@ -466,7 +467,7 @@ struct rt_rtmp_dmabuf {
/* Control block (Descriptor) for all ring descriptor DMA operation, buffer must be */
/* contiguous physical memory. char stored the binding Rx packet descriptor */
/* which won't be released, driver has to wait until upper layer return the packet */
-/* before giveing up this rx ring descriptor to ASIC. NDIS_BUFFER is assocaited pair */
+/* before giving up this rx ring descriptor to ASIC. NDIS_BUFFER is associated pair */
/* to describe the packet buffer. For Tx, char stored the tx packet descriptor */
/* which driver should ACK upper layer when the tx is physically done or failed. */
/* */
@@ -602,7 +603,7 @@ struct rt_counter_ralink {
};
struct rt_counter_drs {
- /* to record the each TX rate's quality. 0 is best, the bigger the worse. */
+ /* record each TX rate's quality. 0 is best, the bigger the worse. */
u16 TxQuality[MAX_STEP_OF_TX_RATE_SWITCH];
u8 PER[MAX_STEP_OF_TX_RATE_SWITCH];
u8 TxRateUpPenalty; /* extra # of second penalty due to last unstable condition */
@@ -719,7 +720,7 @@ struct rt_fragment_frame {
/* Packet information for NdisQueryPacket */
/* */
struct rt_packet_info {
- u32 PhysicalBufferCount; /* Physical breaks of buffer descripor chained */
+ u32 PhysicalBufferCount; /* Physical breaks of buffer descriptor chained */
u32 BufferCount; /* Number of Buffer descriptor chained */
u32 TotalPacketLength; /* Self explained */
char *pFirstBuffer; /* Pointer to first buffer descriptor */
@@ -846,8 +847,8 @@ typedef enum _ABGBAND_STATE_ {
/* Power save method control */
typedef union _PS_CONTROL {
struct {
- unsigned long EnablePSinIdle:1; /* Enable radio off when not connect to AP. radio on only when sitesurvey, */
- unsigned long EnableNewPS:1; /* Enable new Chip power save fucntion . New method can only be applied in chip version after 2872. and PCIe. */
+ unsigned long EnablePSinIdle:1; /* Enable radio off when not connected to AP. radio on only when sitesurvey, */
+ unsigned long EnableNewPS:1; /* Enable new Chip power save function . New method can only be applied in chip version after 2872. and PCIe. */
unsigned long rt30xxPowerMode:2; /* Power Level Mode for rt30xx chip */
unsigned long rt30xxFollowHostASPM:1; /* Card Follows Host's setting for rt30xx chip. */
unsigned long rt30xxForceASPMTest:1; /* Force enable L1 for rt30xx chip. This has higher priority than rt30xxFollowHostASPM Mode. */
@@ -1117,8 +1118,8 @@ struct rt_beacon_sync {
unsigned long TimIELocationInBeacon[HW_BEACON_MAX_COUNT];
unsigned long CapabilityInfoLocationInBeacon[HW_BEACON_MAX_COUNT];
BOOLEAN EnableBeacon; /* trigger to enable beacon transmission. */
- u8 BeaconBitMap; /* NOTE: If the MAX_MBSSID_NUM is larger than 8, this parameter need to change. */
- u8 DtimBitOn; /* NOTE: If the MAX_MBSSID_NUM is larger than 8, this parameter need to change. */
+ u8 BeaconBitMap; /* NOTE: If the MAX_MBSSID_NUM is larger than 8, this parameter needs to change. */
+ u8 DtimBitOn; /* NOTE: If the MAX_MBSSID_NUM is larger than 8, this parameter needs to change. */
};
#endif /* RTMP_MAC_USB // */
@@ -1211,7 +1212,7 @@ struct rt_common_config {
/*BOOLEAN bAutoTxRateSwitch; */
u8 MinTxRate; /* RATE_1, RATE_2, RATE_5_5, RATE_11 */
u8 RtsRate; /* RATE_xxx */
- HTTRANSMIT_SETTING MlmeTransmit; /* MGMT frame PHY rate setting when operatin at Ht rate. */
+ HTTRANSMIT_SETTING MlmeTransmit; /* MGMT frame PHY rate setting when operation at Ht rate. */
u8 MlmeRate; /* RATE_xxx, used to send MLME frames */
u8 BasicMlmeRate; /* Default Rate for sending MLME frames */
@@ -1264,7 +1265,7 @@ struct rt_common_config {
struct rt_ht_capability_ie HtCapability;
struct rt_add_ht_info_ie AddHTInfo; /* Useful as AP. */
/*This IE is used with channel switch announcement element when changing to a new 40MHz. */
- /*This IE is included in channel switch ammouncement frames 7.4.1.5, beacons, probe Rsp. */
+ /*This IE is included in channel switch announcement frames 7.4.1.5, beacons, probe Rsp. */
struct rt_new_ext_chan_ie NewExtChanOffset; /*7.3.2.20A, 1 if extension channel is above the control channel, 3 if below, 0 if not present */
BOOLEAN bHTProtect;
@@ -1329,7 +1330,7 @@ struct rt_sta_admin_config {
/* GROUP 1 - */
/* User configuration loaded from Registry, E2PROM or OID_xxx. These settings describe */
/* the user intended configuration, but not necessary fully equal to the final */
- /* settings in ACTIVE BSS after negotiation/compromize with the BSS holder (either */
+ /* settings in ACTIVE BSS after negotiation/compromise with the BSS holder (either */
/* AP or IBSS holder). */
/* Once initialized, user configuration can only be changed via OID_xxx */
u8 BssType; /* BSS_INFRA or BSS_ADHOC */
@@ -1386,12 +1387,12 @@ struct rt_sta_admin_config {
/* For WPA countermeasures */
unsigned long LastMicErrorTime; /* record last MIC error time */
- unsigned long MicErrCnt; /* Should be 0, 1, 2, then reset to zero (after disassoiciation). */
+ unsigned long MicErrCnt; /* Should be 0, 1, 2, then reset to zero (after disassociation). */
BOOLEAN bBlockAssoc; /* Block associate attempt for 60 seconds after counter measure occurred. */
/* For WPA-PSK supplicant state */
WPA_STATE WpaState; /* Default is SS_NOTUSE and handled by microsoft 802.1x */
u8 ReplayCounter[8];
- u8 ANonce[32]; /* ANonce for WPA-PSK from aurhenticator */
+ u8 ANonce[32]; /* ANonce for WPA-PSK from auhenticator */
u8 SNonce[32]; /* SNonce for WPA-PSK */
u8 LastSNR0; /* last received BEACON's SNR */
@@ -1423,7 +1424,7 @@ struct rt_sta_admin_config {
u8 RSNIE_Len;
u8 RSN_IE[MAX_LEN_OF_RSNIE]; /* The content saved here should be little-endian format. */
- unsigned long CLBusyBytes; /* Save the total bytes received durning channel load scan time */
+ unsigned long CLBusyBytes; /* Save the total bytes received during channel load scan time */
u16 RPIDensity[8]; /* Array for RPI density collection */
u8 RMReqCnt; /* Number of measurement request saved. */
@@ -1489,9 +1490,9 @@ struct rt_sta_admin_config {
BOOLEAN bForceTxBurst; /* 1: force enble TX PACKET BURST, 0: disable */
};
-/* This data structure keep the current active BSS/IBSS's configuration that this STA */
+/* This data structure keeps the current active BSS/IBSS's configuration that this STA */
/* had agreed upon joining the network. Which means these parameters are usually decided */
-/* by the BSS/IBSS creator instead of user configuration. Data in this data structurre */
+/* by the BSS/IBSS creator instead of user configuration. Data in this data structure */
/* is valid only when either ADHOC_ON(pAd) or INFRA_ON(pAd) is TRUE. */
/* Normally, after SCAN or failed roaming attempts, we need to recover back to */
/* the current active settings. */
@@ -1519,7 +1520,7 @@ struct rt_mac_table_entry {
/*Choose 1 from ValidAsWDS and ValidAsCLI to validize. */
BOOLEAN ValidAsCLI; /* Sta mode, set this TRUE after Linkup,too. */
BOOLEAN ValidAsWDS; /* This is WDS Entry. only for AP mode. */
- BOOLEAN ValidAsApCli; /*This is a AP-Client entry, only for AP mode which enable AP-Client functions. */
+ BOOLEAN ValidAsApCli; /* This is a AP-Client entry, only for AP mode which enable AP-Client functions. */
BOOLEAN ValidAsMesh;
BOOLEAN ValidAsDls; /* This is DLS Entry. only for STA mode. */
BOOLEAN isCached;
@@ -1527,7 +1528,7 @@ struct rt_mac_table_entry {
u8 EnqueueEapolStartTimerRunning; /* Enqueue EAPoL-Start for triggering EAP SM */
/*jan for wpa */
- /* record which entry revoke MIC Failure , if it leaves the BSS itself, AP won't update aMICFailTime MIB */
+ /* record which entry revoke MIC Failure, if it leaves the BSS itself, AP won't update aMICFailTime MIB */
u8 CMTimerRunning;
u8 apidx; /* MBSS number */
u8 RSNIE_Len;
@@ -1722,7 +1723,7 @@ struct rt_rtmp_adapter {
unsigned long Rt3xxRalinkLinkCtrl; /* USed for 3090F chip */
u16 DeviceID; /* Read from PCI config */
unsigned long AccessBBPFailCount;
- BOOLEAN bPCIclkOff; /* flag that indicate if the PICE power status in Configuration SPace.. */
+ BOOLEAN bPCIclkOff; /* flag that indicates if the PICE power status in Configuration Space.. */
BOOLEAN bPCIclkOffDisableTx; /* */
BOOLEAN brt30xxBanMcuCmd; /*when = 0xff means all commands are ok to set . */
@@ -1871,9 +1872,9 @@ struct rt_rtmp_adapter {
/* ---------------------------- */
u8 RfIcType; /* RFIC_xxx */
unsigned long RfFreqOffset; /* Frequency offset for channel switching */
- struct rt_rtmp_rf_regs LatchRfRegs; /* latch th latest RF programming value since RF IC doesn't support READ */
+ struct rt_rtmp_rf_regs LatchRfRegs; /* latch the latest RF programming value since RF IC doesn't support READ */
- EEPROM_ANTENNA_STRUC Antenna; /* Since ANtenna definition is different for a & g. We need to save it for future reference. */
+ EEPROM_ANTENNA_STRUC Antenna; /* Since Antenna definition is different for a & g. We need to save it for future reference. */
EEPROM_NIC_CONFIG2_STRUC NicConfig2;
/* This soft Rx Antenna Diversity mechanism is used only when user set */
@@ -1990,7 +1991,7 @@ struct rt_rtmp_adapter {
struct rt_common_config CommonCfg;
struct rt_mlme Mlme;
- /* AP needs those vaiables for site survey feature. */
+ /* AP needs those variables for site survey feature. */
struct rt_mlme_aux MlmeAux; /* temporary settings used during MLME state machine */
struct rt_bss_table ScanTab; /* store the latest SCAN result */
@@ -2012,7 +2013,7 @@ struct rt_rtmp_adapter {
/* various Counters */
struct rt_counter_802_3 Counters8023; /* 802.3 counters */
struct rt_counter_802_11 WlanCounters; /* 802.11 MIB counters */
- struct rt_counter_ralink RalinkCounters; /* Ralink propriety counters */
+ struct rt_counter_ralink RalinkCounters; /* Ralink proprietary counters */
struct rt_counter_drs DrsCounters; /* counters for Dynamic TX Rate Switching */
struct rt_private PrivateInfo; /* Private information & counters */
@@ -2024,7 +2025,7 @@ struct rt_rtmp_adapter {
u16 Sequence;
/* Control disconnect / connect event generation */
- /*+++Didn't used anymore */
+ /*+++Not used anymore */
unsigned long LinkDownTime;
/*--- */
unsigned long LastRxRate;
@@ -2036,7 +2037,7 @@ struct rt_rtmp_adapter {
unsigned long ExtraInfo; /* Extra information for displaying status */
unsigned long SystemErrorBitmap; /* b0: E2PROM version error */
- /*+++Didn't used anymore */
+ /*+++Not used anymore */
unsigned long MacIcVersion; /* MAC/BBP serial interface issue solved after ver.D */
/*--- */
@@ -2089,7 +2090,7 @@ struct rt_rtmp_adapter {
unsigned long BulkOutReq;
unsigned long BulkOutComplete;
unsigned long BulkOutCompleteOther;
- unsigned long BulkOutCompleteCancel; /* seems not use now? */
+ unsigned long BulkOutCompleteCancel; /* seems not used now? */
unsigned long BulkInReq;
unsigned long BulkInComplete;
unsigned long BulkInCompleteFail;
@@ -2196,9 +2197,9 @@ struct rt_rx_blk {
struct rt_tx_blk {
u8 QueIdx;
u8 TxFrameType; /* Indicate the Transmission type of the all frames in one batch */
- u8 TotalFrameNum; /* Total frame number want to send-out in one batch */
+ u8 TotalFrameNum; /* Total frame number that wants to send-out in one batch */
u16 TotalFragNum; /* Total frame fragments required in one batch */
- u16 TotalFrameLen; /* Total length of all frames want to send-out in one batch */
+ u16 TotalFrameLen; /* Total length of all frames that wants to send-out in one batch */
struct rt_queue_header TxPacketList;
struct rt_mac_table_entry *pMacEntry; /* NULL: packet with 802.11 RA field is multicast/broadcast address */
@@ -2207,7 +2208,7 @@ struct rt_tx_blk {
/* Following structure used for the characteristics of a specific packet. */
void *pPacket;
u8 *pSrcBufHeader; /* Reference to the head of sk_buff->data */
- u8 *pSrcBufData; /* Reference to the sk_buff->data, will changed depends on hanlding progresss */
+ u8 *pSrcBufData; /* Reference to the sk_buff->data, will change depending on the handling progresss */
u32 SrcBufLen; /* Length of packet payload which not including Layer 2 header */
u8 *pExtraLlcSnapEncap; /* NULL means no extra LLC/SNAP is required */
u8 HeaderBuf[128]; /* TempBuffer for TX_INFO + TX_WI + 802.11 Header + padding + AMSDU SubHeader + LLC/SNAP */
@@ -2219,7 +2220,7 @@ struct rt_tx_blk {
u8 apidx; /* The interface associated to this packet */
u8 Wcid; /* The MAC entry associated to this packet */
u8 UserPriority; /* priority class of packet */
- u8 FrameGap; /* what kind of IFS this packet use */
+ u8 FrameGap; /* what kind of IFS does this packet use */
u8 MpduReqNum; /* number of fragments of this frame */
u8 TxRate; /* TODO: Obsoleted? Should change to MCS? */
u8 CipherAlg; /* cipher alogrithm */
@@ -2978,7 +2979,7 @@ void LinkDown(struct rt_rtmp_adapter *pAd, IN BOOLEAN IsReqFromAP);
void IterateOnBssTab(struct rt_rtmp_adapter *pAd);
-void IterateOnBssTab2(struct rt_rtmp_adapter *pAd);;
+void IterateOnBssTab2(struct rt_rtmp_adapter *pAd);
void JoinParmFill(struct rt_rtmp_adapter *pAd,
struct rt_mlme_join_req *JoinReq, unsigned long BssIdx);
diff --git a/drivers/staging/rt2860/rtmp_def.h b/drivers/staging/rt2860/rtmp_def.h
index 9c54bacb845b..6ac617e7c9bb 100644
--- a/drivers/staging/rt2860/rtmp_def.h
+++ b/drivers/staging/rt2860/rtmp_def.h
@@ -31,10 +31,11 @@
Miniport related definition header
Revision History:
- Who When What
+ Who When What
-------- ---------- ----------------------------------------------
- Paul Lin 08-01-2002 created
- John Chang 08-05-2003 add definition for 11g & other drafts
+ Paul Lin 08-01-2002 created
+ John Chang 08-05-2003 add definition for 11g & other drafts
+ Justin P. Mattock 11/07/2010 Fix some typos
*/
#ifndef __RTMP_DEF_H__
#define __RTMP_DEF_H__
@@ -111,11 +112,11 @@
WMM Note: If memory of your system is not much, please reduce the definition;
or when you do WMM test, the queue for low priority AC will be full, i.e.
TX_RING_SIZE + MAX_PACKETS_IN_QUEUE packets for the AC will be buffered in
- WLAN, maybe no any packet buffer can be got in Ethernet driver.
+ WLAN, maybe no packet buffers can get into the Ethernet driver.
- Sometimes no packet buffer can be got in Ethernet driver, the system will
+ Sometimes no packet buffer can be get into the Ethernet driver, the system will
send flow control packet to the sender to slow down its sending rate.
- So no WMM can be saw in the air.
+ So no WMM can be seen in the air.
*/
/*
@@ -125,7 +126,7 @@
And in rt_main_end.c, clConfig.clNum = RX_RING_SIZE * 3; is changed to
clConfig.clNum = RX_RING_SIZE * 4;
*/
-/* TODO: For VxWorks the size is 256. Shall we cahnge the value as 256 for all OS????? */
+/* TODO: For VxWorks the size is 256. Shall we change the value as 256 for all OS? */
#define MAX_PACKETS_IN_QUEUE (512) /*(512) // to pass WMM A5-WPAPSK */
#define MAX_PACKETS_IN_MCAST_PS_QUEUE 32
@@ -171,7 +172,7 @@
#define fRTMP_ADAPTER_SCAN_2040 0x04000000
#define fRTMP_ADAPTER_RADIO_MEASUREMENT 0x08000000
-#define fRTMP_ADAPTER_START_UP 0x10000000 /*Devive already initialized and enabled Tx/Rx. */
+#define fRTMP_ADAPTER_START_UP 0x10000000 /*Device already initialized and enabled Tx/Rx. */
#define fRTMP_ADAPTER_MEDIA_STATE_CHANGE 0x20000000
#define fRTMP_ADAPTER_IDLE_RADIO_OFF 0x40000000
@@ -205,8 +206,8 @@
#define fRTMP_PS_SET_PCI_CLK_OFF_COMMAND 0x00000002
/* Indicate driver should disable kick off hardware to send packets from now. */
#define fRTMP_PS_DISABLE_TX 0x00000004
-/* Indicate driver should IMMEDIATELY fo to sleep after receiving AP's beacon in which doesn't indicate unicate nor multicast packets for me */
-/*. This flag is used ONLY in RTMPHandleRxDoneInterrupt routine. */
+/* Indicate driver should IMMEDIATELY go to sleep after receiving AP's beacon in which doesn't indicate unicate nor multicast packets for me */
+/* This flag is used ONLY in RTMPHandleRxDoneInterrupt routine. */
#define fRTMP_PS_GO_TO_SLEEP_NOW 0x00000008
#define fRTMP_PS_TOGGLE_L1 0x00000010 /* Use Toggle L1 mechanism for rt28xx PCIe */
@@ -303,7 +304,7 @@
/* WDS definition */
#define MAX_WDS_ENTRY 4
-#define WDS_PAIRWISE_KEY_OFFSET 60 /* WDS links uses pairwise key#60 ~ 63 in ASIC pairwise key table */
+#define WDS_PAIRWISE_KEY_OFFSET 60 /* WDS links use pairwise key#60 ~ 63 in ASIC pairwise key table */
#define WDS_DISABLE_MODE 0
#define WDS_RESTRICT_MODE 1
@@ -559,7 +560,7 @@
#define IE_ADD_HT2 53 /* 802.11n d1. ADDITIONAL HT CAPABILITY. ELEMENT ID TBD */
/* For 802.11n D3.03 */
-/*#define IE_NEW_EXT_CHA_OFFSET 62 // 802.11n d1. New extension channel offset elemet */
+/*#define IE_NEW_EXT_CHA_OFFSET 62 // 802.11n d1. New extension channel offset element */
#define IE_SECONDARY_CH_OFFSET 62 /* 802.11n D3.03 Secondary Channel Offset element */
#define IE_WAPI 68 /* WAPI information element */
#define IE_2040_BSS_COEXIST 72 /* 802.11n D3.0.3 */
@@ -678,7 +679,7 @@
#define ACT_MACHINE_BASE 0
-/*Those PEER_xx_CATE number is based on real Categary value in IEEE spec. Please don'es modify it by your self. */
+/*Those PEER_xx_CATE number is based on real Categary value in IEEE spec. Please do not modify it by your self. */
/*Category */
#define MT2_PEER_SPECTRUM_CATE 0
#define MT2_PEER_QOS_CATE 1
@@ -748,7 +749,7 @@
#define ACT_FUNC_SIZE (MAX_ACT_STATE * MAX_ACT_MSG)
/* */
-/* STA's AUTHENTICATION state machine: states, evvents, total function # */
+/* STA's AUTHENTICATION state machine: states, events, total function # */
/* */
#define AUTH_REQ_IDLE 0
#define AUTH_WAIT_SEQ2 1
@@ -948,7 +949,7 @@
#define BLOCK_ACK 0x60 /* b6:5 = 11 */
/* */
-/* rtmp_data.c use these definition */
+/* rtmp_data.c uses this definition */
/* */
#define LENGTH_802_11 24
#define LENGTH_802_11_AND_H 30
@@ -1288,7 +1289,7 @@
#define IW_STA_LINKDOWN_EVENT_FLAG 0x0210
#define IW_SCAN_COMPLETED_EVENT_FLAG 0x0211
#define IW_SCAN_ENQUEUE_FAIL_EVENT_FLAG 0x0212
-/* if add new system event flag, please upadte the IW_SYS_EVENT_FLAG_END */
+/* if add new system event flag, please update the IW_SYS_EVENT_FLAG_END */
#define IW_SYS_EVENT_FLAG_END 0x0212
#define IW_SYS_EVENT_TYPE_NUM (IW_SYS_EVENT_FLAG_END - IW_SYS_EVENT_FLAG_START + 1)
/* For system event - end */
@@ -1305,7 +1306,7 @@
#define IW_SPOOF_DEAUTH_EVENT_FLAG 0x0307
#define IW_SPOOF_UNKNOWN_MGMT_EVENT_FLAG 0x0308
#define IW_REPLAY_ATTACK_EVENT_FLAG 0x0309
-/* if add new spoof attack event flag, please upadte the IW_SPOOF_EVENT_FLAG_END */
+/* if add new spoof attack event flag, please update the IW_SPOOF_EVENT_FLAG_END */
#define IW_SPOOF_EVENT_FLAG_END 0x0309
#define IW_SPOOF_EVENT_TYPE_NUM (IW_SPOOF_EVENT_FLAG_END - IW_SPOOF_EVENT_FLAG_START + 1)
/* For spoof attack event - end */
@@ -1319,7 +1320,7 @@
#define IW_FLOOD_DISASSOC_EVENT_FLAG 0x0404
#define IW_FLOOD_DEAUTH_EVENT_FLAG 0x0405
#define IW_FLOOD_EAP_REQ_EVENT_FLAG 0x0406
-/* if add new flooding attack event flag, please upadte the IW_FLOOD_EVENT_FLAG_END */
+/* if add new flooding attack event flag, please update the IW_FLOOD_EVENT_FLAG_END */
#define IW_FLOOD_EVENT_FLAG_END 0x0406
#define IW_FLOOD_EVENT_TYPE_NUM (IW_FLOOD_EVENT_FLAG_END - IW_FLOOD_EVENT_FLAG_START + 1)
/* For flooding attack - end */
diff --git a/drivers/staging/rt2860/rtmp_timer.h b/drivers/staging/rt2860/rtmp_timer.h
index 28b8ac6e8352..15b628743500 100644
--- a/drivers/staging/rt2860/rtmp_timer.h
+++ b/drivers/staging/rt2860/rtmp_timer.h
@@ -28,13 +28,14 @@
rtmp_timer.h
Abstract:
- Ralink Wireless Driver timer related data structures and delcarations
+ Ralink Wireless Driver timer related data structures and declarations
Revision History:
- Who When What
+ Who When What
-------- ---------- ----------------------------------------------
- Name Date Modification logs
- Shiang Tu Aug-28-2008 init version
+ Name Date Modification logs
+ Shiang Tu Aug-28-2008 init version
+ Justin P. Mattock 11/07/2010 Fix a typo
*/
@@ -51,8 +52,8 @@
/* ----------------- Timer Related MARCO ---------------*/
/* In some os or chipset, we have a lot of timer functions and will read/write register, */
-/* it's not allowed in Linux USB sub-system to do it ( because of sleep issue when */
-/* submit to ctrl pipe). So we need a wrapper function to take care it. */
+/* it's not allowed in Linux USB sub-system to do it ( because of sleep issue when */
+/* submit to ctrl pipe). So we need a wrapper function to take care it. */
#ifdef RTMP_TIMER_TASK_SUPPORT
typedef void(*RTMP_TIMER_TASK_HANDLE) (void *SystemSpecific1,
diff --git a/drivers/staging/rt2860/spectrum.h b/drivers/staging/rt2860/spectrum.h
index 648fd632b606..4c325ba7ba21 100644
--- a/drivers/staging/rt2860/spectrum.h
+++ b/drivers/staging/rt2860/spectrum.h
@@ -37,7 +37,7 @@ char RTMP_GetTxPwr(struct rt_rtmp_adapter *pAd, IN HTTRANSMIT_SETTING HTTxMode);
==========================================================================
Description:
Prepare Measurement request action frame and enqueue it into
- management queue waiting for transmition.
+ management queue waiting for transmission.
Parametrs:
1. the destination mac address of the frame.
@@ -60,7 +60,7 @@ void MakeMeasurementReqFrame(struct rt_rtmp_adapter *pAd,
==========================================================================
Description:
Prepare Measurement report action frame and enqueue it into
- management queue waiting for transmition.
+ management queue waiting for transmission.
Parametrs:
1. the destination mac address of the frame.
@@ -80,7 +80,7 @@ void EnqueueMeasurementRep(struct rt_rtmp_adapter *pAd,
==========================================================================
Description:
Prepare TPC Request action frame and enqueue it into
- management queue waiting for transmition.
+ management queue waiting for transmission.
Parametrs:
1. the destination mac address of the frame.
@@ -94,7 +94,7 @@ void EnqueueTPCReq(struct rt_rtmp_adapter *pAd, u8 *pDA, u8 DialogToken);
==========================================================================
Description:
Prepare TPC Report action frame and enqueue it into
- management queue waiting for transmition.
+ management queue waiting for transmission.
Parametrs:
1. the destination mac address of the frame.
@@ -110,7 +110,7 @@ void EnqueueTPCRep(struct rt_rtmp_adapter *pAd,
==========================================================================
Description:
Prepare Channel Switch Announcement action frame and enqueue it into
- management queue waiting for transmition.
+ management queue waiting for transmission.
Parametrs:
1. the destination mac address of the frame.
@@ -126,7 +126,7 @@ void EnqueueChSwAnn(struct rt_rtmp_adapter *pAd,
/*
==========================================================================
Description:
- Spectrun action frames Handler such as channel switch annoucement,
+ Spectrun action frames Handler such as channel switch announcement,
measurement report, measurement request actions frames.
Parametrs:
diff --git a/drivers/staging/rt2860/sta/assoc.c b/drivers/staging/rt2860/sta/assoc.c
index b7efb0b6b3f0..59e931c3190d 100644
--- a/drivers/staging/rt2860/sta/assoc.c
+++ b/drivers/staging/rt2860/sta/assoc.c
@@ -32,7 +32,8 @@
Revision History:
Who When What
-------- ---------- ----------------------------------------------
- John 2004-9-3 porting from RT2500
+ John 2004-9-3 porting from RT2500
+ Justin P. Mattock 11/07/2010 Fix typos
*/
#include "../rt_config.h"
@@ -277,10 +278,10 @@ void MlmeAssocReqAction(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_elem *
u16 VarIesOffset;
u16 Status;
- /* Block all authentication request durning WPA block period */
+ /* Block all authentication request during WPA block period */
if (pAd->StaCfg.bBlockAssoc == TRUE) {
DBGPRINT(RT_DEBUG_TRACE,
- ("ASSOC - Block Assoc request durning WPA block period!\n"));
+ ("ASSOC - Block Assoc request during WPA block period!\n"));
pAd->Mlme.AssocMachine.CurrState = ASSOC_IDLE;
Status = MLME_STATE_MACHINE_REJECT;
MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_ASSOC_CONF, 2,
@@ -605,10 +606,10 @@ void MlmeReassocReqAction(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_elem
u8 *pOutBuffer = NULL;
u16 Status;
- /* Block all authentication request durning WPA block period */
+ /* Block all authentication request during WPA block period */
if (pAd->StaCfg.bBlockAssoc == TRUE) {
DBGPRINT(RT_DEBUG_TRACE,
- ("ASSOC - Block ReAssoc request durning WPA block period!\n"));
+ ("ASSOC - Block ReAssoc request during WPA block period!\n"));
pAd->Mlme.AssocMachine.CurrState = ASSOC_IDLE;
Status = MLME_STATE_MACHINE_REJECT;
MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_REASSOC_CONF, 2,
@@ -1001,7 +1002,7 @@ void AssocPostProc(struct rt_rtmp_adapter *pAd, u8 *pAddr2, u16 CapabilityInfo,
pAd->MlmeAux.CapabilityInfo =
CapabilityInfo & SUPPORTED_CAPABILITY_INFO;
- /* Some HT AP might lost WMM IE. We add WMM ourselves. beacuase HT requires QoS on. */
+ /* Some HT AP might lost WMM IE. We add WMM ourselves. because HT requires QoS on. */
if ((HtCapabilityLen > 0) && (pEdcaParm->bValid == FALSE)) {
pEdcaParm->bValid = TRUE;
pEdcaParm->Aifsn[0] = 3;
@@ -1054,7 +1055,7 @@ void AssocPostProc(struct rt_rtmp_adapter *pAd, u8 *pAddr2, u16 CapabilityInfo,
/* Set New WPA information */
Idx = BssTableSearch(&pAd->ScanTab, pAddr2, pAd->MlmeAux.Channel);
if (Idx == BSS_NOT_FOUND) {
- DBGPRINT_ERR(("ASSOC - Can't find BSS after receiving Assoc response\n"));
+ DBGPRINT_ERR("ASSOC - Can't find BSS after receiving Assoc response\n");
} else {
/* Init variable */
pAd->MacTab.Content[BSSID_WCID].RSNIE_Len = 0;
diff --git a/drivers/staging/rt2860/sta/auth.c b/drivers/staging/rt2860/sta/auth.c
index 404bd220679d..23ea00b896b0 100644
--- a/drivers/staging/rt2860/sta/auth.c
+++ b/drivers/staging/rt2860/sta/auth.c
@@ -32,7 +32,8 @@
Revision History:
Who When What
-------- ---------- ----------------------------------------------
- John 2004-9-3 porting from RT2500
+ John 2004-9-3 porting from RT2500
+ Justin P. Mattock 11/07/2010 Fix typos
*/
#include "../rt_config.h"
@@ -455,10 +456,10 @@ BOOLEAN AUTH_ReqSend(struct rt_rtmp_adapter *pAd,
u8 *pOutBuffer = NULL;
unsigned long FrameLen = 0, tmp = 0;
- /* Block all authentication request durning WPA block period */
+ /* Block all authentication request during WPA block period */
if (pAd->StaCfg.bBlockAssoc == TRUE) {
DBGPRINT(RT_DEBUG_TRACE,
- ("%s - Block Auth request durning WPA block period!\n",
+ ("%s - Block Auth request during WPA block period!\n",
pSMName));
pAd->Mlme.AuthMachine.CurrState = AUTH_REQ_IDLE;
Status = MLME_STATE_MACHINE_REJECT;
@@ -508,8 +509,7 @@ BOOLEAN AUTH_ReqSend(struct rt_rtmp_adapter *pAd,
RTMPSetTimer(pAuthTimer, Timeout);
return TRUE;
} else {
- DBGPRINT_ERR(("%s - MlmeAuthReqAction() sanity check failed\n",
- pSMName));
+ DBGPRINT_ERR("%s - MlmeAuthReqAction() sanity check failed\n", pSMName);
return FALSE;
}
diff --git a/drivers/staging/rt2860/sta/connect.c b/drivers/staging/rt2860/sta/connect.c
index c380551c0354..4996258f6ecd 100644
--- a/drivers/staging/rt2860/sta/connect.c
+++ b/drivers/staging/rt2860/sta/connect.c
@@ -32,7 +32,8 @@
Revision History:
Who When What
-------- ---------- ----------------------------------------------
- John 2004-08-08 Major modification from RT2560
+ John 2004-08-08 Major modification from RT2560
+ Justin P. Mattock 11/07/2010 Fix typos
*/
#include "../rt_config.h"
@@ -64,7 +65,7 @@ u8 CipherSuiteWpaNoneAesLen =
/* The following MACRO is called after 1. starting an new IBSS, 2. successfully JOIN an IBSS, */
/* or 3. successfully ASSOCIATE to a BSS, 4. successfully RE_ASSOCIATE to a BSS */
-/* All settings successfuly negotiated furing MLME state machines become final settings */
+/* All settings successfuly negotiated firing MLME state machines become final settings */
/* and are copied to pAd->StaActive */
#define COPY_SETTINGS_FROM_MLME_AUX_TO_ACTIVE_CFG(_pAd) \
{ \
@@ -214,8 +215,7 @@ void MlmeCntlMachinePerformAction(struct rt_rtmp_adapter *pAd,
break;
#endif /* RTMP_MAC_USB // */
default:
- DBGPRINT_ERR(("ERROR! CNTL - Illegal message type(=%ld)",
- Elem->MsgType));
+ DBGPRINT_ERR("ERROR! CNTL - Illegal message type(=%ld)", Elem->MsgType);
break;
}
}
@@ -553,7 +553,7 @@ void CntlOidRTBssidProc(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_elem *
NdisMoveMemory(&pAd->MlmeAux.SsidBssTab.BssEntry[0],
&pAd->ScanTab.BssEntry[BssIdx], sizeof(struct rt_bss_entry));
- /* Add SSID into MlmeAux for site surey joining hidden SSID */
+ /* Add SSID into MlmeAux for site survey joining hidden SSID */
pAd->MlmeAux.SsidLen = pAd->ScanTab.BssEntry[BssIdx].SsidLen;
NdisMoveMemory(pAd->MlmeAux.Ssid, pAd->ScanTab.BssEntry[BssIdx].Ssid,
pAd->MlmeAux.SsidLen);
@@ -666,7 +666,7 @@ void CntlOidRTBssidProc(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_elem *
}
/* Roaming is the only external request triggering CNTL state machine */
-/* despite of other "SET OID" operation. All "SET OID" related oerations */
+/* despite of other "SET OID" operation. All "SET OID" related operations */
/* happen in sequence, because no other SET OID will be sent to this device */
/* until the the previous SET operation is complete (successful o failed). */
/* So, how do we quarantee this ROAMING request won't corrupt other "SET OID"? */
@@ -1224,7 +1224,7 @@ void LinkUp(struct rt_rtmp_adapter *pAd, u8 BssType)
/* Change to AP channel */
if ((pAd->CommonCfg.CentralChannel > pAd->CommonCfg.Channel)
&& (pAd->MlmeAux.HtCapability.HtCapInfo.ChannelWidth == BW_40)) {
- /* Must using 40MHz. */
+ /* Must use 40MHz. */
pAd->CommonCfg.BBPCurrentBW = BW_40;
AsicSwitchChannel(pAd, pAd->CommonCfg.CentralChannel, FALSE);
AsicLockChannel(pAd, pAd->CommonCfg.CentralChannel);
@@ -1259,7 +1259,7 @@ void LinkUp(struct rt_rtmp_adapter *pAd, u8 BssType)
} else if ((pAd->CommonCfg.CentralChannel < pAd->CommonCfg.Channel)
&& (pAd->MlmeAux.HtCapability.HtCapInfo.ChannelWidth ==
BW_40)) {
- /* Must using 40MHz. */
+ /* Must use 40MHz. */
pAd->CommonCfg.BBPCurrentBW = BW_40;
AsicSwitchChannel(pAd, pAd->CommonCfg.CentralChannel, FALSE);
AsicLockChannel(pAd, pAd->CommonCfg.CentralChannel);
@@ -1343,12 +1343,12 @@ void LinkUp(struct rt_rtmp_adapter *pAd, u8 BssType)
AsicSetSlotTime(pAd, TRUE);
AsicSetEdcaParm(pAd, &pAd->CommonCfg.APEdcaParm);
- /* Call this for RTS protectionfor legacy rate, we will always enable RTS threshold, but normally it will not hit */
+ /* Call this for RTS protection for legacy rate, we will always enable RTS threshold, but normally it will not hit */
AsicUpdateProtect(pAd, 0, (OFDMSETPROTECT | CCKSETPROTECT), TRUE,
FALSE);
if ((pAd->StaActive.SupportedPhyInfo.bHtEnable == TRUE)) {
- /* Update HT protectionfor based on AP's operating mode. */
+ /* Update HT protection for based on AP's operating mode. */
if (pAd->MlmeAux.AddHtInfo.AddHtInfo2.NonGfPresent == 1) {
AsicUpdateProtect(pAd,
pAd->MlmeAux.AddHtInfo.AddHtInfo2.
@@ -1530,7 +1530,7 @@ void LinkUp(struct rt_rtmp_adapter *pAd, u8 BssType)
/* Add BSSID to WCID search table */
AsicUpdateRxWCIDTable(pAd, BSSID_WCID, pAd->CommonCfg.Bssid);
- /* If WEP is enabled, add paiewise and shared key */
+ /* If WEP is enabled, add pairwise and shared key */
if (((pAd->StaCfg.WpaSupplicantUP) &&
(pAd->StaCfg.WepStatus == Ndis802_11WEPEnabled) &&
(pAd->StaCfg.PortSecured == WPA_802_1X_PORT_SECURED)) ||
@@ -1681,9 +1681,9 @@ void LinkUp(struct rt_rtmp_adapter *pAd, u8 BssType)
pAd->Mlme.PeriodicRound = 0;
pAd->Mlme.OneSecPeriodicRound = 0;
pAd->bConfigChanged = FALSE; /* Reset config flag */
- pAd->ExtraInfo = GENERAL_LINK_UP; /* Update extra information to link is up */
+ pAd->ExtraInfo = GENERAL_LINK_UP; /* Update extra information after link is up */
- /* Set asic auto fall back */
+ /* Set basic auto fall back */
{
u8 *pTable;
u8 TableSize = 0;
@@ -1854,8 +1854,8 @@ void LinkUp(struct rt_rtmp_adapter *pAd, u8 BssType)
Note:
We need more information to know it's this requst from AP.
If yes! we need to do extra handling, for example, remove the WPA key.
- Otherwise on 4-way handshaking will faied, since the WPA key didn't be
- remove while auto reconnect.
+ Otherwise on 4-way handshaking will fail, since the WPA key didn't get
+ removed while auto reconnect.
Disconnect request from AP, it means we will start afresh 4-way handshaking
on WPA mode.
@@ -1870,9 +1870,9 @@ void LinkDown(struct rt_rtmp_adapter *pAd, IN BOOLEAN IsReqFromAP)
return;
RTMP_CLEAR_PSFLAG(pAd, fRTMP_PS_GO_TO_SLEEP_NOW);
- /*Comment the codes, beasue the line 2291 call the same function. */
- /*RTMPCancelTimer(&pAd->Mlme.PsPollTimer, &Cancelled); */
- /* Not allow go to sleep within linkdown function. */
+ /* Comment the codes, because the line 2291 call the same function. */
+ /* RTMPCancelTimer(&pAd->Mlme.PsPollTimer, &Cancelled); */
+ /* Not allowed go to sleep within the linkdown function. */
RTMP_CLEAR_PSFLAG(pAd, fRTMP_PS_CAN_GO_SLEEP);
if (pAd->CommonCfg.bWirelessEvent) {
@@ -1970,7 +1970,7 @@ void LinkDown(struct rt_rtmp_adapter *pAd, IN BOOLEAN IsReqFromAP)
/* Set LED */
RTMPSetLED(pAd, LED_LINK_DOWN);
pAd->LedIndicatorStrength = 0xF0;
- RTMPSetSignalLED(pAd, -100); /* Force signal strength Led to be turned off, firmware is not done it. */
+ RTMPSetSignalLED(pAd, -100); /* Force signal strength Led to be turned off, firmware has not done it. */
AsicDisableSync(pAd);
diff --git a/drivers/staging/rt2860/sta/rtmp_data.c b/drivers/staging/rt2860/sta/rtmp_data.c
index 23879b7cd49a..e82c6b669eb2 100644
--- a/drivers/staging/rt2860/sta/rtmp_data.c
+++ b/drivers/staging/rt2860/sta/rtmp_data.c
@@ -31,7 +31,8 @@
Data path subroutines
Revision History:
- Who When What
+ Who When What
+ Justin P. Mattock 11/07/2010 Fix typos
-------- ---------- ----------------------------------------------
*/
#include "../rt_config.h"
@@ -257,8 +258,8 @@ void STARxDataFrameAnnounce(struct rt_rtmp_adapter *pAd,
&& (pAd->CommonCfg.bDisableReordering == 0)) {
Indicate_AMPDU_Packet(pAd, pRxBlk, FromWhichBSSID);
} else {
- /* Determin the destination of the EAP frame */
- /* to WPA state machine or upper layer */
+ /* Determine the destination of the EAP frame */
+ /* to WPA state machine or upper layer */
STARxEAPOLFrameIndicate(pAd, pEntry, pRxBlk,
FromWhichBSSID);
}
@@ -644,7 +645,7 @@ void STAHandleRxMgmtFrame(struct rt_rtmp_adapter *pAd, struct rt_rx_blk *pRxBlk)
/* First check the size, it MUST not exceed the mlme queue size */
if (pRxWI->MPDUtotalByteCount > MGMT_DMA_BUFFER_SIZE) {
- DBGPRINT_ERR(("STAHandleRxMgmtFrame: frame too large, size = %d \n", pRxWI->MPDUtotalByteCount));
+ DBGPRINT_ERR("STAHandleRxMgmtFrame: frame too large, size = %d \n", pRxWI->MPDUtotalByteCount);
break;
}
@@ -853,7 +854,7 @@ Return Value:
NONE
Note:
- This function do early checking and classification for send-out packet.
+ This function does early checking and classification for send-out packet.
You only can put OS-depened & STA related code in here.
========================================================================
*/
@@ -943,7 +944,7 @@ int STASendPacket(struct rt_rtmp_adapter *pAd, void *pPacket)
DBGPRINT(RT_DEBUG_ERROR,
("STASendPacket --> pSrcBufVA == NULL !SrcBufLen=%x\n",
SrcBufLen));
- /* Resourece is low, system did not allocate virtual address */
+ /* Resource is low, system did not allocate virtual address */
/* return NDIS_STATUS_FAILURE directly to upper layer */
RELEASE_NDIS_PACKET(pAd, pPacket, NDIS_STATUS_FAILURE);
return NDIS_STATUS_FAILURE;
@@ -979,7 +980,7 @@ int STASendPacket(struct rt_rtmp_adapter *pAd, void *pPacket)
DBGPRINT(RT_DEBUG_ERROR,
("STASendPacket->Cannot find pEntry(%pM) in MacTab!\n",
pSrcBufVA));
- /* Resourece is low, system did not allocate virtual address */
+ /* Resource is low, system did not allocate virtual address */
/* return NDIS_STATUS_FAILURE directly to upper layer */
RELEASE_NDIS_PACKET(pAd, pPacket, NDIS_STATUS_FAILURE);
return NDIS_STATUS_FAILURE;
@@ -1057,9 +1058,9 @@ int STASendPacket(struct rt_rtmp_adapter *pAd, void *pPacket)
/* STEP 2. Check the requirement of RTS: */
/* If multiple fragment required, RTS is required only for the first fragment */
- /* if the fragment size large than RTS threshold */
+ /* if the fragment size is larger than RTS threshold */
/* For RT28xx, Let ASIC send RTS/CTS */
-/* RTMP_SET_PACKET_RTS(pPacket, 0); */
+ /* RTMP_SET_PACKET_RTS(pPacket, 0); */
if (NumberOfFrag > 1)
RTSRequired =
(pAd->CommonCfg.FragmentThreshold >
@@ -1171,8 +1172,8 @@ int STASendPacket(struct rt_rtmp_adapter *pAd, void *pPacket)
========================================================================
Routine Description:
- This subroutine will scan through releative ring descriptor to find
- out avaliable free ring descriptor and compare with request size.
+ This subroutine will scan through relative ring descriptor to find
+ out available free ring descriptor and compare with request size.
Arguments:
pAd Pointer to our adapter
@@ -1588,7 +1589,7 @@ static inline u8 *STA_Build_ARalink_Frame_Header(struct rt_rtmp_adapter *pAd,
pHeaderBufPtr += 2;
pTxBlk->MpduHeaderLen += 2;
}
- /* padding at front of LLC header. LLC header should at 4-bytes aligment. */
+ /* padding at front of LLC header. LLC header should at 4-bytes alignment. */
pTxBlk->HdrPadLen = (unsigned long)pHeaderBufPtr;
pHeaderBufPtr = (u8 *)ROUND_UP(pHeaderBufPtr, 4);
pTxBlk->HdrPadLen = (unsigned long)(pHeaderBufPtr - pTxBlk->HdrPadLen);
@@ -2014,7 +2015,7 @@ void STA_Legacy_Frame_Tx(struct rt_rtmp_adapter *pAd, struct rt_tx_blk *pTxBlk)
pHeaderBufPtr += 2;
pTxBlk->MpduHeaderLen += 2;
}
- /* The remaining content of MPDU header should locate at 4-octets aligment */
+ /* The remaining content of MPDU header should locate at 4-octets alignment */
pTxBlk->HdrPadLen = (unsigned long)pHeaderBufPtr;
pHeaderBufPtr = (u8 *)ROUND_UP(pHeaderBufPtr, 4);
pTxBlk->HdrPadLen = (unsigned long)(pHeaderBufPtr - pTxBlk->HdrPadLen);
@@ -2114,7 +2115,7 @@ void STA_ARalink_Frame_Tx(struct rt_rtmp_adapter *pAd, struct rt_tx_blk *pTxBlk)
STA_Build_ARalink_Frame_Header(pAd, pTxBlk);
/* It's ok write the TxWI here, because the TxWI->MPDUtotalByteCount */
- /* will be updated after final frame was handled. */
+ /* will be updated after final frame was handled. */
RTMPWriteTxWI_Data(pAd,
(struct rt_txwi *) (&pTxBlk->
HeaderBuf
@@ -2291,8 +2292,8 @@ void STA_Fragment_Frame_Tx(struct rt_rtmp_adapter *pAd, struct rt_tx_blk *pTxBlk
pTxBlk->pExtraLlcSnapEncap, pTxBlk->pKey,
0);
- /* NOTE: DON'T refer the skb->len directly after following copy. Becasue the length is not adjust */
- /* to correct lenght, refer to pTxBlk->SrcBufLen for the packet length in following progress. */
+ /* NOTE: DON'T refer the skb->len directly after following copy. Because the length is not adjusted */
+ /* to correct length, refer to pTxBlk->SrcBufLen for the packet length in following progress. */
NdisMoveMemory(pTxBlk->pSrcBufData + pTxBlk->SrcBufLen,
&pAd->PrivateInfo.Tx.MIC[0], 8);
/*skb_put((RTPKT_TO_OSPKT(pTxBlk->pPacket))->tail, 8); */
@@ -2301,7 +2302,7 @@ void STA_Fragment_Frame_Tx(struct rt_rtmp_adapter *pAd, struct rt_tx_blk *pTxBlk
pTxBlk->CipherAlg = CIPHER_TKIP_NO_MIC;
}
/* */
- /* calcuate the overhead bytes that encryption algorithm may add. This */
+ /* calculate the overhead bytes that encryption algorithm may add. This */
/* affects the calculate of "duration" field */
/* */
if ((pTxBlk->CipherAlg == CIPHER_WEP64)
diff --git a/drivers/staging/rt2860/sta/sanity.c b/drivers/staging/rt2860/sta/sanity.c
index 8f9fd19be151..0c32604f2d3f 100644
--- a/drivers/staging/rt2860/sta/sanity.c
+++ b/drivers/staging/rt2860/sta/sanity.c
@@ -32,7 +32,8 @@
Revision History:
Who When What
-------- ---------- ----------------------------------------------
- John Chang 2004-09-01 add WMM support
+ John Chang 2004-09-01 add WMM support
+ Justin P. Mattock 11/07/2010 Fix typos
*/
#include "../rt_config.h"
@@ -118,7 +119,7 @@ BOOLEAN PeerAssocRspSanity(struct rt_rtmp_adapter *pAd, void * pMsg, unsigned lo
NdisMoveMemory(pAid, &pFrame->Octet[4], 2);
Length += 2;
- /* Aid already swaped byte order in RTMPFrameEndianChange() for big endian platform */
+ /* Aid already swapped byte order in RTMPFrameEndianChange() for big endian platform */
*pAid = (*pAid) & 0x3fff; /* AID is low 14-bit */
/* -- get supported rates from payload and advance the pointer */
diff --git a/drivers/staging/rt2860/sta/sync.c b/drivers/staging/rt2860/sta/sync.c
index 747d3c6d1851..7054ba1323d0 100644
--- a/drivers/staging/rt2860/sta/sync.c
+++ b/drivers/staging/rt2860/sta/sync.c
@@ -32,8 +32,9 @@
Revision History:
Who When What
-------- ---------- ----------------------------------------------
- John Chang 2004-09-01 modified for rt2561/2661
- Jan Lee 2006-08-01 modified for rt2860 for 802.11n
+ John Chang 2004-09-01 modified for rt2561/2661
+ Jan Lee 2006-08-01 modified for rt2860 for 802.11n
+ Justin P. Mattock 11/07/2010 Fix typos
*/
#include "../rt_config.h"
@@ -233,9 +234,9 @@ void MlmeScanReqAction(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_elem *E
RTMPSuspendMsduTransmission(pAd);
/* */
- /* To prevent data lost. */
- /* Send an NULL data with turned PSM bit on to current associated AP before SCAN progress. */
- /* And should send an NULL data with turned PSM bit off to AP, when scan progress done */
+ /* To prevent data loss. */
+ /* Send a NULL data with turned PSM bit on to current associated AP before SCAN progress. */
+ /* And should send a NULL data with turned PSM bit off to AP, when scan progress done */
/* */
if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_MEDIA_STATE_CONNECTED)
&& (INFRA_ON(pAd))) {
@@ -283,7 +284,7 @@ void MlmeScanReqAction(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_elem *E
DBGPRINT(RT_DEBUG_TRACE, ("SYNC - BBP R4 to 20MHz.l\n"));
ScanNextChannel(pAd);
} else {
- DBGPRINT_ERR(("SYNC - MlmeScanReqAction() sanity check fail\n"));
+ DBGPRINT_ERR("SYNC - MlmeScanReqAction() sanity check fail\n");
pAd->Mlme.SyncMachine.CurrState = SYNC_IDLE;
Status = MLME_INVALID_FORMAT;
MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_SCAN_CONF, 2,
@@ -535,7 +536,7 @@ void MlmeStartReqAction(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_elem *
MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_START_CONF, 2,
&Status);
} else {
- DBGPRINT_ERR(("SYNC - MlmeStartReqAction() sanity check fail.\n"));
+ DBGPRINT_ERR("SYNC - MlmeStartReqAction() sanity check fail.\n");
pAd->Mlme.SyncMachine.CurrState = SYNC_IDLE;
Status = MLME_INVALID_FORMAT;
MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_START_CONF, 2,
@@ -750,9 +751,9 @@ void PeerBeaconAtJoinAction(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_el
/* BEACON from desired BSS/IBSS found. We should be able to decide most */
/* BSS parameters here. */
- /* Q. But what happen if this JOIN doesn't conclude a successful ASSOCIATEION? */
- /* Do we need to receover back all parameters belonging to previous BSS? */
- /* A. Should be not. There's no back-door recover to previous AP. It still need */
+ /* Q. But what happen if this JOIN doesn't conclude a successful ASSOCIATION? */
+ /* Do we need to recover back all parameters belonging to previous BSS? */
+ /* A. Should be not. There's no back-door recover to previous AP. It still needs */
/* a new JOIN-AUTH-ASSOC sequence. */
if (MAC_ADDR_EQUAL(pAd->MlmeAux.Bssid, Bssid)) {
DBGPRINT(RT_DEBUG_TRACE,
@@ -876,7 +877,7 @@ void PeerBeaconAtJoinAction(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_el
pAd->MlmeAux.CfpMaxDuration = Cf.CfpMaxDuration;
pAd->MlmeAux.APRalinkIe = RalinkIe;
- /* Copy AP's supported rate to MlmeAux for creating assoication request */
+ /* Copy AP's supported rate to MlmeAux for creating association request */
/* Also filter out not supported rate */
pAd->MlmeAux.SupRateLen = SupRateLen;
NdisMoveMemory(pAd->MlmeAux.SupRate, SupRate,
@@ -1207,7 +1208,7 @@ void PeerBeacon(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_elem *Elem)
}
if (index >= pAd->ChannelListNum) {
- DBGPRINT_ERR(("PeerBeacon(can not find New Channel=%d in ChannelList[%d]\n", pAd->CommonCfg.Channel, pAd->ChannelListNum));
+ DBGPRINT_ERR("PeerBeacon(can not find New Channel=%d in ChannelList[%d]\n", pAd->CommonCfg.Channel, pAd->ChannelListNum);
}
}
/* if the ssid matched & bssid unmatched, we should select the bssid with large value. */
diff --git a/drivers/staging/rt2860/sta/wpa.c b/drivers/staging/rt2860/sta/wpa.c
index 69b8a24daa21..ff348325028b 100644
--- a/drivers/staging/rt2860/sta/wpa.c
+++ b/drivers/staging/rt2860/sta/wpa.c
@@ -33,7 +33,8 @@
Who When What
-------- ---------- ----------------------------------------------
Jan Lee 03-07-22 Initial
- Paul Lin 03-11-28 Modify for supplicant
+ Paul Lin 03-11-28 Modify for supplicant
+ Justin P. Mattock 11/07/2010 Fix typos
*/
#include "../rt_config.h"
@@ -86,7 +87,7 @@ void RTMPReportMicError(struct rt_rtmp_adapter *pAd, struct rt_cipher_key *pWpaK
/* Violate MIC error counts, MIC countermeasures kicks in */
pAd->StaCfg.MicErrCnt++;
/* We shall block all reception */
- /* We shall clean all Tx ring and disassoicate from AP after next EAPOL frame */
+ /* We shall clean all Tx ring and disassociate from AP after next EAPOL frame */
/* */
/* No necessary to clean all Tx ring, on RTMPHardTransmit will stop sending non-802.1X EAPOL packets */
/* if pAd->StaCfg.MicErrCnt greater than 2. */
diff --git a/drivers/staging/rt2860/sta_ioctl.c b/drivers/staging/rt2860/sta_ioctl.c
index e095a44cbc0e..5717e12a9544 100644
--- a/drivers/staging/rt2860/sta_ioctl.c
+++ b/drivers/staging/rt2860/sta_ioctl.c
@@ -31,10 +31,11 @@
IOCTL related subroutines
Revision History:
- Who When What
+ Who When What
-------- ---------- ----------------------------------------------
- Rory Chen 01-03-2003 created
- Rory Chen 02-14-2005 modify to support RT61
+ Rory Chen 01-03-2003 created
+ Rory Chen 02-14-2005 modify to support RT61
+ Justin P. Mattock 11/07/2010 Fix typos
*/
#include "rt_config.h"
@@ -851,7 +852,7 @@ int rt_ioctl_giwscan(struct net_device *dev,
/*
Protocol:
- it will show scanned AP's WirelessMode .
+ it will show scanned AP's WirelessMode.
it might be
802.11a
802.11a/n
@@ -875,13 +876,13 @@ int rt_ioctl_giwscan(struct net_device *dev,
strcpy(iwe.u.name, "802.11a");
} else {
/*
- if one of non B mode rate is set supported rate . it mean G only.
+ if one of non B mode rate is set supported rate, it means G only.
*/
for (rateCnt = 0;
rateCnt < pBssEntry->SupRateLen;
rateCnt++) {
/*
- 6Mbps(140) 9Mbps(146) and >=12Mbps(152) are supported rate , it mean G only.
+ 6Mbps(140) 9Mbps(146) and >=12Mbps(152) are supported rate, it means G only.
*/
if (pBssEntry->SupRate[rateCnt] == 140
|| pBssEntry->SupRate[rateCnt] ==
@@ -1417,7 +1418,7 @@ int rt_ioctl_siwencode(struct net_device *dev,
if ((index >= 0) && (index < 4)) {
pAdapter->StaCfg.DefaultKeyId = index;
} else
- /* Don't complain if only change the mode */
+ /* Don't complain if the mode is only changed */
if (!(erq->flags & IW_ENCODE_MODE))
return -EINVAL;
}
@@ -2732,8 +2733,8 @@ int Set_NetworkType_Proc(struct rt_rtmp_adapter *pAdapter, char *arg)
}
if (INFRA_ON(pAdapter)) {
/*BOOLEAN Cancelled; */
- /* Set the AutoReconnectSsid to prevent it reconnect to old SSID */
- /* Since calling this indicate user don't want to connect to that SSID anymore. */
+ /* Set the AutoReconnectSsid to prevent it from reconnecting to the old SSID */
+ /* Since calling this indicates users don't want to connect to that SSID anymore. */
pAdapter->MlmeAux.AutoReconnectSsidLen = 32;
NdisZeroMemory(pAdapter->MlmeAux.
AutoReconnectSsid,
@@ -2766,8 +2767,8 @@ int Set_NetworkType_Proc(struct rt_rtmp_adapter *pAdapter, char *arg)
LinkDown(pAdapter, FALSE);
}
if (ADHOC_ON(pAdapter)) {
- /* Set the AutoReconnectSsid to prevent it reconnect to old SSID */
- /* Since calling this indicate user don't want to connect to that SSID anymore. */
+ /* Set the AutoReconnectSsid to prevent it from reconnecting to the old SSID */
+ /* Since calling this indicates users don't want to connect to that SSID anymore. */
pAdapter->MlmeAux.AutoReconnectSsidLen = 32;
NdisZeroMemory(pAdapter->MlmeAux.
AutoReconnectSsid,
@@ -2884,7 +2885,7 @@ int Set_NetworkType_Proc(struct rt_rtmp_adapter *pAdapter, char *arg)
}
/* Enable Rx with promiscuous reception */
RTMP_IO_WRITE32(pAdapter, RX_FILTR_CFG, 0x3);
- /* ASIC supporsts sniffer function with replacing RSSI with timestamp. */
+ /* ASIC supports sniffer function with replacing RSSI with timestamp. */
/*RTMP_IO_READ32(pAdapter, MAC_SYS_CTRL, &Value); */
/*Value |= (0x80); */
/*RTMP_IO_WRITE32(pAdapter, MAC_SYS_CTRL, Value); */
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index cd15daae5412..322bf49ee906 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -27,8 +27,8 @@
#include "rt_config.h"
/* Following information will be show when you run 'modinfo' */
-/* *** If you have a solution for the bug in current version of driver, please mail to me. */
-/* Otherwise post to forum in ralinktech's web site(www.ralinktech.com) and let all users help you. *** */
+/* If you have a solution for the bug in current version of driver, please e-mail me. */
+/* Otherwise post to the forum at ralinktech's web site(www.ralinktech.com) and let all users help you. */
MODULE_AUTHOR("Paul Lin <paul_lin@ralinktech.com>");
MODULE_DESCRIPTION("RT2870/RT3070 Wireless Lan Linux Driver");
MODULE_LICENSE("GPL");
@@ -106,6 +106,7 @@ struct usb_device_id rtusb_usb_id[] = {
{USB_DEVICE(0x0411, 0x016f)}, /* MelCo.,Inc. WLI-UC-G301N */
{USB_DEVICE(0x1737, 0x0070)}, /* Linksys WUSB100 */
{USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */
+ {USB_DEVICE(0x1737, 0x0078)}, /* Linksys WUSB100v2 */
{USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */
{USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */
{USB_DEVICE(0x100D, 0x9031)}, /* Motorola 2770 */
@@ -233,7 +234,7 @@ BOOLEAN RT28XXChipsetCheck(IN void *_dev_p)
for (i = 0; i < rtusb_usb_id_len; i++) {
if (dev_p->descriptor.idVendor == rtusb_usb_id[i].idVendor &&
dev_p->descriptor.idProduct == rtusb_usb_id[i].idProduct) {
- printk("rt2870: idVendor = 0x%x, idProduct = 0x%x\n",
+ printk(KERN_INFO "rt2870: idVendor = 0x%x, idProduct = 0x%x\n",
dev_p->descriptor.idVendor,
dev_p->descriptor.idProduct);
break;
@@ -241,7 +242,7 @@ BOOLEAN RT28XXChipsetCheck(IN void *_dev_p)
}
if (i == rtusb_usb_id_len) {
- printk("rt2870: Error! Device Descriptor not matching!\n");
+ printk(KERN_ERR "rt2870: Error! Device Descriptor not matching!\n");
return FALSE;
}
@@ -323,7 +324,7 @@ static BOOLEAN USBDevConfigInit(IN struct usb_device *dev,
if (!(pAd->BulkInEpAddr && pAd->BulkOutEpAddr[0])) {
printk
- ("%s: Could not find both bulk-in and bulk-out endpoints\n",
+ (KERN_ERR "%s: Could not find both bulk-in and bulk-out endpoints\n",
__FUNCTION__);
return FALSE;
}
@@ -423,7 +424,7 @@ static int rt2870_resume(struct usb_interface *intf)
/* Init driver module */
int __init rtusb_init(void)
{
- printk("rtusb init --->\n");
+ printk(KERN_DEBUG "rtusb init --->\n");
return usb_register(&rtusb_driver);
}
@@ -431,7 +432,7 @@ int __init rtusb_init(void)
void __exit rtusb_exit(void)
{
usb_deregister(&rtusb_driver);
- printk("<--- rtusb exit\n");
+ printk(KERN_DEBUG "<--- rtusb exit\n");
}
module_init(rtusb_init);
@@ -814,7 +815,7 @@ static void rt2870_disconnect(struct usb_device *dev, struct rt_rtmp_adapter *pA
dev->bus->bus_name, dev->devpath));
if (!pAd) {
usb_put_dev(dev);
- printk("rtusb_disconnect: pAd == NULL!\n");
+ printk(KERN_ERR "rtusb_disconnect: pAd == NULL!\n");
return;
}
RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST);
@@ -884,8 +885,8 @@ static int __devinit rt2870_probe(IN struct usb_interface *intf,
if (net_dev == NULL)
goto err_out_free_radev;
- /* Here are the net_device structure with usb specific parameters. */
- /* for supporting Network Manager.
+ /* Here are the net_device structure with usb specific parameters.
+ * for supporting Network Manager.
* Set the sysfs physical device reference for the network logical device if set prior to registration will
* cause a symlink during initialization.
*/
diff --git a/drivers/staging/rt2860/wpa.h b/drivers/staging/rt2860/wpa.h
index 6199ae6cdcd0..116fc2caa886 100644
--- a/drivers/staging/rt2860/wpa.h
+++ b/drivers/staging/rt2860/wpa.h
@@ -32,13 +32,14 @@
Revision History:
Who When What
-------- ---------- ----------------------------------------------
- Name Date Modification logs
+ Name Date Modification logs
+ Justin P. Mattock 11/07/2010 Fix a typo
*/
#ifndef __WPA_H__
#define __WPA_H__
-/* EAPOL Key descripter frame format related length */
+/* EAPOL Key descriptor frame format related length */
#define LEN_KEY_DESC_NONCE 32
#define LEN_KEY_DESC_IV 16
#define LEN_KEY_DESC_RSC 8
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
index dd8a221e21ae..b26b5a8b5f6b 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
@@ -822,7 +822,7 @@ int ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
{
txb->queue_index = UP2AC(skb->priority);
} else {
- txb->queue_index = WME_AC_BK;;
+ txb->queue_index = WME_AC_BK;
}
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
index b1786dcac245..fac4eee28e4e 100644
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ b/drivers/staging/rtl8192e/r8192E_core.c
@@ -89,7 +89,7 @@ u32 rt_global_debug_component =
// COMP_INTR |
COMP_ERR ; //always open err flags on
-static const struct pci_device_id rtl8192_pci_id_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8192_pci_id_tbl) = {
#ifdef RTL8190P
/* Realtek */
/* Dlink */
@@ -2283,9 +2283,7 @@ static void rtl8192_init_priv_variable(struct net_device* dev)
IMR_TXFOVW | IMR_BcnInt | IMR_TBDOK | IMR_TBDER);
priv->AcmControl = 0;
- priv->pFirmware = (rt_firmware*)vmalloc(sizeof(rt_firmware));
- if (priv->pFirmware)
- memset(priv->pFirmware, 0, sizeof(rt_firmware));
+ priv->pFirmware = vzalloc(sizeof(rt_firmware));
/* rx related queue */
skb_queue_head_init(&priv->rx_queue);
diff --git a/drivers/staging/rtl8192e/r819xE_phy.c b/drivers/staging/rtl8192e/r819xE_phy.c
index d83bcbcb20bd..50cd0e52b921 100644
--- a/drivers/staging/rtl8192e/r819xE_phy.c
+++ b/drivers/staging/rtl8192e/r819xE_phy.c
@@ -2596,7 +2596,7 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device* dev, RF90_RADIO_PATH_E
break;
}
- return ret;;
+ return ret;
}
/******************************************************************************
diff --git a/drivers/staging/rtl8192u/Makefile b/drivers/staging/rtl8192u/Makefile
index 206d924a707c..eefc657ce99e 100644
--- a/drivers/staging/rtl8192u/Makefile
+++ b/drivers/staging/rtl8192u/Makefile
@@ -9,7 +9,6 @@ ccflags-y += -DTHOMAS_BEACON -DTHOMAS_TASKLET -DTHOMAS_SKB -DTHOMAS_TURBO
#ccflags-y += -DUSB_TX_DRIVER_AGGREGATION_ENABLE
#ccflags-y += -DUSB_RX_AGGREGATION_SUPPORT
ccflags-y += -DUSE_ONE_PIPE
-ccflags-y += -DENABLE_DOT11D
ccflags-y += -Idrivers/staging/rtl8192u/ieee80211
r8192u_usb-y := r8192U_core.o r8180_93cx6.o r8192U_wx.o \
diff --git a/drivers/staging/rtl8192u/dot11d.h b/drivers/staging/rtl8192u/dot11d.h
index d99cc030ec7a..92e7a00f3ee1 100644
--- a/drivers/staging/rtl8192u/dot11d.h
+++ b/drivers/staging/rtl8192u/dot11d.h
@@ -1,7 +1,6 @@
#ifndef __INC_DOT11D_H
#define __INC_DOT11D_H
-#ifdef ENABLE_DOT11D
#include "ieee80211/ieee80211.h"
@@ -98,5 +97,4 @@ int ToLegalChannel(
struct ieee80211_device *dev,
u8 channel
);
-#endif /* ENABLE_DOT11D */
#endif /* #ifndef __INC_DOT11D_H */
diff --git a/drivers/staging/rtl8192u/ieee80211/Makefile b/drivers/staging/rtl8192u/ieee80211/Makefile
index 45704f85ef0b..0775c5599d69 100644
--- a/drivers/staging/rtl8192u/ieee80211/Makefile
+++ b/drivers/staging/rtl8192u/ieee80211/Makefile
@@ -20,7 +20,6 @@ ifeq ($(NIC_SELECT),RTL8192U)
endif
#ccflags-y := -DJOHN_NOCPY
#flags to enable or disble 80211D feature
-ccflags-y += -DENABLE_DOT11D
ieee80211-rsl-objs := ieee80211_rx.o \
ieee80211_softmac.o \
ieee80211_tx.o \
@@ -75,7 +74,6 @@ CFLAGS += -I$(KSRC)/include -I.
CFLAGS += -DMODVERSIONS -DEXPORT_SYMTAB -include $(KSRC)/include/linux/modversions.h
#Kernel 2.4.20
#CFLAGS += -D__NO_VERSION__ -DEXPORT_SYMTAB
-#CFLAGS += -DENABLE_DOT11D
SMP := $(shell $(CC) $(MODCFLAGS) -E -dM $(CONFIG_FILE) | \
grep CONFIG_SMP | awk '{print $$3}')
ifneq ($(SMP),1)
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.c b/drivers/staging/rtl8192u/ieee80211/dot11d.c
index b91cbfcfa71f..ce63fc341c6e 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.c
@@ -1,4 +1,3 @@
-#ifdef ENABLE_DOT11D
//-----------------------------------------------------------------------------
// File:
// Dot11d.c
@@ -220,4 +219,3 @@ EXPORT_SYMBOL(DOT11D_ScanComplete);
EXPORT_SYMBOL(IsLegalChannel);
EXPORT_SYMBOL(ToLegalChannel);
-#endif
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.h b/drivers/staging/rtl8192u/ieee80211/dot11d.h
index 15b7a4ba37b6..54f2b4c434ff 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.h
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.h
@@ -1,10 +1,8 @@
#ifndef __INC_DOT11D_H
#define __INC_DOT11D_H
-#ifdef ENABLE_DOT11D
#include "ieee80211.h"
-//#define ENABLE_DOT11D
//#define DOT11D_MAX_CHNL_NUM 83
@@ -98,5 +96,4 @@ int ToLegalChannel(
struct ieee80211_device * dev,
u8 channel
);
-#endif //ENABLE_DOT11D
#endif // #ifndef __INC_DOT11D_H
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index e1216b704959..c0b844d75c0d 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -1572,10 +1572,8 @@ struct ieee80211_network {
#ifdef THOMAS_TURBO
u8 Turbo_Enable;//enable turbo mode, added by thomas
#endif
-#ifdef ENABLE_DOT11D
u16 CountryIeLen;
u8 CountryIeBuf[MAX_IE_LEN];
-#endif
// HT Related, by amy, 2008.04.29
BSS_HT bssht;
// Add to handle broadcom AP management frame CCK rate.
@@ -1769,7 +1767,6 @@ typedef u32 RT_RF_CHANGE_SOURCE;
#define RF_CHANGE_BY_IPS BIT28
#define RF_CHANGE_BY_INIT 0 // Do not change the RFOff reason. Defined by Bruce, 2008-01-17.
-#ifdef ENABLE_DOT11D
typedef enum
{
COUNTRY_CODE_FCC = 0,
@@ -1784,7 +1781,6 @@ typedef enum
COUNTRY_CODE_MIC,
COUNTRY_CODE_GLOBAL_DOMAIN
}country_code_type_t;
-#endif
#define RT_MAX_LD_SLOT_NUM 10
typedef struct _RT_LINK_DETECT_T{
@@ -1970,12 +1966,8 @@ struct ieee80211_device {
/* map of allowed channels. 0 is dummy */
// FIXME: remeber to default to a basic channel plan depending of the PHY type
-#ifdef ENABLE_DOT11D
void* pDot11dInfo;
bool bGlobalDomain;
-#else
- int channel_map[MAX_CHANNEL_NUMBER+1];
-#endif
int rate; /* current rate */
int basic_rate;
//FIXME: pleace callback, see if redundant with softmac_features
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index c8ca9d8ed5d2..1ea8da3655ec 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -44,9 +44,7 @@
#include <linux/ctype.h>
#include "ieee80211.h"
-#ifdef ENABLE_DOT11D
#include "dot11d.h"
-#endif
static inline void ieee80211_monitor_rx(struct ieee80211_device *ieee,
struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats)
@@ -1599,7 +1597,6 @@ static const char *get_info_element_string(u16 id)
}
#endif
-#ifdef ENABLE_DOT11D
static inline void ieee80211_extract_country_ie(
struct ieee80211_device *ieee,
struct ieee80211_info_element *info_element,
@@ -1632,7 +1629,6 @@ static inline void ieee80211_extract_country_ie(
}
}
-#endif
int ieee80211_parse_info_param(struct ieee80211_device *ieee,
struct ieee80211_info_element *info_element,
@@ -2086,14 +2082,12 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
"QoS Error need to parse QOS_PARAMETER IE\n");
break;
-#ifdef ENABLE_DOT11D
case MFIE_TYPE_COUNTRY:
IEEE80211_DEBUG_SCAN("MFIE_TYPE_COUNTRY: %d bytes\n",
info_element->len);
//printk("=====>Receive <%s> Country IE\n",network->ssid);
ieee80211_extract_country_ie(ieee, info_element, network, network->bssid);//addr2 is same as addr3 when from an AP
break;
-#endif
/* TODO */
default:
IEEE80211_DEBUG_MGMT
@@ -2229,10 +2223,8 @@ static inline int ieee80211_network_init(
#ifdef THOMAS_TURBO
network->Turbo_Enable = 0;
#endif
-#ifdef ENABLE_DOT11D
network->CountryIeLen = 0;
memset(network->CountryIeBuf, 0, MAX_IE_LEN);
-#endif
//Initialize HT parameters
//ieee80211_ht_initialize(&network->bssht);
HTInitializeBssDesc(&network->bssht);
@@ -2399,10 +2391,8 @@ static inline void update_network(struct ieee80211_network *dst,
dst->Turbo_Enable = src->Turbo_Enable;
#endif
-#ifdef ENABLE_DOT11D
dst->CountryIeLen = src->CountryIeLen;
memcpy(dst->CountryIeBuf, src->CountryIeBuf, src->CountryIeLen);
-#endif
//added by amy for LEAP
dst->bWithAironetIE = src->bWithAironetIE;
@@ -2470,7 +2460,6 @@ static inline void ieee80211_process_probe_response(
return;
}
-#ifdef ENABLE_DOT11D
// For Asus EeePc request,
// (1) if wireless adapter receive get any 802.11d country code in AP beacon,
// wireless adapter should follow the country code.
@@ -2527,7 +2516,6 @@ static inline void ieee80211_process_probe_response(
}
}
}
-#endif
/* The network parsed correctly -- so now we scan our known networks
* to see if we can find it in our list.
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index bc8c42533693..20f8c347cae4 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -21,9 +21,7 @@
#include <linux/slab.h>
#include <linux/version.h>
#include <asm/uaccess.h>
-#ifdef ENABLE_DOT11D
#include "dot11d.h"
-#endif
u8 rsn_authen_cipher_suite[16][4] = {
{0x00,0x0F,0xAC,0x00}, //Use group key, //Reserved
@@ -430,10 +428,8 @@ void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
{
short ch = 0;
-#ifdef ENABLE_DOT11D
u8 channel_map[MAX_CHANNEL_NUMBER+1];
memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
-#endif
down(&ieee->scan_sem);
while(1)
@@ -443,11 +439,7 @@ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
ch++;
if (ch > MAX_CHANNEL_NUMBER)
goto out; /* scan completed */
-#ifdef ENABLE_DOT11D
}while(!channel_map[ch]);
-#else
- }while(!ieee->channel_map[ch]);
-#endif
/* this function can be called in two situations
* 1- We have switched to ad-hoc mode and we are
@@ -471,9 +463,7 @@ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
if (ieee->state == IEEE80211_LINKED)
goto out;
ieee->set_chan(ieee->dev, ch);
-#ifdef ENABLE_DOT11D
if(channel_map[ch] == 1)
-#endif
ieee80211_send_probe_requests(ieee);
/* this prevent excessive time wait when we
@@ -496,10 +486,8 @@ out:
}
else{
ieee->sync_scan_hurryup = 0;
-#ifdef ENABLE_DOT11D
if(IS_DOT11D_ENABLE(ieee))
DOT11D_ScanComplete(ieee);
-#endif
up(&ieee->scan_sem);
}
}
@@ -510,10 +498,8 @@ void ieee80211_softmac_scan_wq(struct work_struct *work)
struct delayed_work *dwork = container_of(work, struct delayed_work, work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
static short watchdog = 0;
-#ifdef ENABLE_DOT11D
u8 channel_map[MAX_CHANNEL_NUMBER+1];
memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
-#endif
if(!ieee->ieee_up)
return;
down(&ieee->scan_sem);
@@ -523,25 +509,16 @@ void ieee80211_softmac_scan_wq(struct work_struct *work)
if (watchdog++ > MAX_CHANNEL_NUMBER)
{
//if current channel is not in channel map, set to default channel.
- #ifdef ENABLE_DOT11D
- if (!channel_map[ieee->current_network.channel]);
- #else
- if (!ieee->channel_map[ieee->current_network.channel]);
- #endif
+ if (!channel_map[ieee->current_network.channel]) {
ieee->current_network.channel = 6;
goto out; /* no good chans */
+ }
}
-#ifdef ENABLE_DOT11D
}while(!channel_map[ieee->current_network.channel]);
-#else
- }while(!ieee->channel_map[ieee->current_network.channel]);
-#endif
if (ieee->scanning == 0 )
goto out;
ieee->set_chan(ieee->dev, ieee->current_network.channel);
-#ifdef ENABLE_DOT11D
if(channel_map[ieee->current_network.channel] == 1)
-#endif
ieee80211_send_probe_requests(ieee);
@@ -550,10 +527,8 @@ void ieee80211_softmac_scan_wq(struct work_struct *work)
up(&ieee->scan_sem);
return;
out:
-#ifdef ENABLE_DOT11D
if(IS_DOT11D_ENABLE(ieee))
DOT11D_ScanComplete(ieee);
-#endif
ieee->actscanning = false;
watchdog = 0;
ieee->scanning = 0;
@@ -635,7 +610,6 @@ void ieee80211_stop_scan(struct ieee80211_device *ieee)
/* called with ieee->lock held */
void ieee80211_start_scan(struct ieee80211_device *ieee)
{
-#ifdef ENABLE_DOT11D
if(IS_DOT11D_ENABLE(ieee) )
{
if(IS_COUNTRY_IE_VALID(ieee))
@@ -643,7 +617,6 @@ void ieee80211_start_scan(struct ieee80211_device *ieee)
RESET_CIE_WATCHDOG(ieee);
}
}
-#endif
if (ieee->softmac_features & IEEE_SOFTMAC_SCAN){
if (ieee->scanning == 0){
ieee->scanning = 1;
@@ -657,7 +630,6 @@ void ieee80211_start_scan(struct ieee80211_device *ieee)
/* called with wx_sem held */
void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
{
-#ifdef ENABLE_DOT11D
if(IS_DOT11D_ENABLE(ieee) )
{
if(IS_COUNTRY_IE_VALID(ieee))
@@ -665,7 +637,6 @@ void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
RESET_CIE_WATCHDOG(ieee);
}
}
-#endif
ieee->sync_scan_hurryup = 0;
if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
ieee80211_softmac_scan_syncro(ieee);
@@ -2390,11 +2361,9 @@ void ieee80211_start_ibss_wq(struct work_struct *work)
ieee80211_softmac_check_all_nets(ieee);
-#ifdef ENABLE_DOT11D //if creating an ad-hoc, set its channel to 10 temporarily--this is the requirement for ASUS, not 11D, so disable 11d.
// if((IS_DOT11D_ENABLE(ieee)) && (ieee->state == IEEE80211_NOLINK))
if (ieee->state == IEEE80211_NOLINK)
ieee->current_network.channel = 6;
-#endif
/* if not then the state is not linked. Maybe the user swithced to
* ad-hoc mode just after being in monitor mode, or just after
* being very few time in managed mode (so the card have had no
@@ -2483,7 +2452,6 @@ inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
void ieee80211_start_bss(struct ieee80211_device *ieee)
{
unsigned long flags;
-#ifdef ENABLE_DOT11D
//
// Ref: 802.11d 11.1.3.3
// STA shall not start a BSS unless properly formed Beacon frame including a Country IE.
@@ -2495,7 +2463,6 @@ void ieee80211_start_bss(struct ieee80211_device *ieee)
return;
}
}
-#endif
/* check if we have already found the net we
* are interested in (if any).
* if not (we are disassociated and we are not
@@ -2530,10 +2497,8 @@ void ieee80211_disassociate(struct ieee80211_device *ieee)
if (ieee->data_hard_stop)
ieee->data_hard_stop(ieee->dev);
-#ifdef ENABLE_DOT11D
if(IS_DOT11D_ENABLE(ieee))
Dot11d_Reset(ieee);
-#endif
ieee->state = IEEE80211_NOLINK;
ieee->is_set_key = false;
ieee->link_change(ieee->dev);
@@ -2669,11 +2634,7 @@ void ieee80211_start_protocol(struct ieee80211_device *ieee)
ch++;
if (ch > MAX_CHANNEL_NUMBER)
return; /* no channel found */
-#ifdef ENABLE_DOT11D
}while(!GET_DOT11D_INFO(ieee)->channel_map[ch]);
-#else
- }while(!ieee->channel_map[ch]);
-#endif
ieee->current_network.channel = ch;
}
@@ -2721,11 +2682,9 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
for(i = 0; i < 5; i++) {
ieee->seq_ctrl[i] = 0;
}
-#ifdef ENABLE_DOT11D
ieee->pDot11dInfo = kzalloc(sizeof(RT_DOT11D_INFO), GFP_ATOMIC);
if (!ieee->pDot11dInfo)
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for DOT11D\n");
-#endif
//added for AP roaming
ieee->LinkDetectInfo.SlotNum = 2;
ieee->LinkDetectInfo.NumRecvBcnInPeriod=0;
@@ -2796,13 +2755,11 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
void ieee80211_softmac_free(struct ieee80211_device *ieee)
{
down(&ieee->wx_sem);
-#ifdef ENABLE_DOT11D
if(NULL != ieee->pDot11dInfo)
{
kfree(ieee->pDot11dInfo);
ieee->pDot11dInfo = NULL;
}
-#endif
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index f335c258ba84..cb5a3c32974e 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -15,9 +15,7 @@
#include "ieee80211.h"
-#ifdef ENABLE_DOT11D
#include "dot11d.h"
-#endif
/* FIXME: add A freqs */
const long ieee80211_wlan_frequencies[] = {
@@ -63,12 +61,10 @@ int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info
}else { /* Set the channel */
-#ifdef ENABLE_DOT11D
if (!(GET_DOT11D_INFO(ieee)->channel_map)[fwrq->m]) {
ret = -EINVAL;
goto out;
}
-#endif
ieee->current_network.channel = fwrq->m;
ieee->set_chan(ieee->dev, ieee->current_network.channel);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 81aa2ed226ac..ec7845ecdb7e 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -754,7 +754,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
{
txb->queue_index = UP2AC(skb->priority);
} else {
- txb->queue_index = WME_AC_BK;;
+ txb->queue_index = WME_AC_BK;
}
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 494f180acc26..ae4f2b9d9e8f 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -77,9 +77,7 @@ double __extendsfdf2(float a) {return a;}
#include "r8192_pm.h"
#endif
-#ifdef ENABLE_DOT11D
#include "dot11d.h"
-#endif
//set here to open your trace code. //WB
u32 rt_global_debug_component = \
// COMP_INIT |
@@ -166,7 +164,6 @@ static struct usb_driver rtl8192_usb_driver = {
#endif
};
-#ifdef ENABLE_DOT11D
typedef struct _CHANNEL_LIST
{
@@ -242,9 +239,7 @@ static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv* priv)
}
return;
}
-#endif
-#define eqMacAddr(a,b) ( ((a)[0]==(b)[0] && (a)[1]==(b)[1] && (a)[2]==(b)[2] && (a)[3]==(b)[3] && (a)[4]==(b)[4] && (a)[5]==(b)[5]) ? 1:0 )
#define rx_hal_is_cck_rate(_pdrvinfo)\
(_pdrvinfo->RxRate == DESC90_RATE1M ||\
@@ -1507,7 +1502,7 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
{
//
// Handle HW Beacon:
- // We had transfer our beacon frame to host controler at this moment.
+ // We had transfer our beacon frame to host controller at this moment.
//
//
// Caution:
@@ -2203,6 +2198,8 @@ short rtl8192_usb_initendpoints(struct net_device *dev)
priv->rx_urb = kmalloc(sizeof(struct urb *) * (MAX_RX_URB+1),
GFP_KERNEL);
+ if (priv->rx_urb == NULL)
+ return -ENOMEM;
#ifndef JACKSON_NEW_RX
for(i=0;i<(MAX_RX_URB+1);i++){
@@ -3155,7 +3152,6 @@ static void rtl8192_read_eeprom_info(struct net_device* dev)
short rtl8192_get_channel_map(struct net_device * dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
-#ifdef ENABLE_DOT11D
if(priv->ChannelPlan > COUNTRY_CODE_GLOBAL_DOMAIN){
printk("rtl8180_init:Error channel plan! Set to default.\n");
priv->ChannelPlan= 0;
@@ -3163,21 +3159,6 @@ short rtl8192_get_channel_map(struct net_device * dev)
RT_TRACE(COMP_INIT, "Channel plan is %d\n",priv->ChannelPlan);
rtl819x_set_channel_map(priv->ChannelPlan, priv);
-#else
- int ch,i;
- //Set Default Channel Plan
- if(!channels){
- DMESG("No channels, aborting");
- return -1;
- }
- ch=channels;
- priv->ChannelPlan= 0;//hikaru
- // set channels 1..14 allowed in given locale
- for (i=1; i<=14; i++) {
- (priv->ieee80211->channel_map)[i] = (u8)(ch & 0x01);
- ch >>= 1;
- }
-#endif
return 0;
}
@@ -5085,7 +5066,7 @@ static void rtl8192_query_rxphystatus(
//Get Rx snr value in DB
tmp_rxsnr = pofdm_buf->rxsnr_X[i];
rx_snrX = (char)(tmp_rxsnr);
- //rx_snrX >>= 1;;
+ //rx_snrX >>= 1;
rx_snrX /= 2;
priv->stats.rxSNRdB[i] = (long)rx_snrX;
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index 25d5c870b0fb..f6408f98ede6 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -21,9 +21,7 @@
#include "r8192U.h"
#include "r8192U_hw.h"
-#ifdef ENABLE_DOT11D
#include "dot11d.h"
-#endif
#define RATE_COUNT 12
u32 rtl8180_rates[] = {1000000,2000000,5500000,11000000,
@@ -458,11 +456,7 @@ static int rtl8180_wx_get_range(struct net_device *dev,
for (i = 0, val = 0; i < 14; i++) {
// Include only legal frequencies for some countries
-#ifdef ENABLE_DOT11D
if ((GET_DOT11D_INFO(priv->ieee80211)->channel_map)[i+1]) {
-#else
- if ((priv->ieee80211->channel_map)[i+1]) {
-#endif
range->freq[val].i = i + 1;
range->freq[val].m = ieee80211_wlan_frequencies[i] * 100000;
range->freq[val].e = 1;
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index a3adaedece9a..41684e8fcf4c 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -6,9 +6,7 @@
#include "r8192U_dm.h"
#include "r819xU_firmware_img.h"
-#ifdef ENABLE_DOT11D
#include "dot11d.h"
-#endif
static u32 RF_CHANNEL_TABLE_ZEBRA[] = {
0,
0x085c, //2412 1
@@ -1011,7 +1009,7 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device* dev, RF90_RADIO_PATH_E
break;
}
- return ret;;
+ return ret;
}
/******************************************************************************
@@ -1257,13 +1255,11 @@ u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel, u8* stage, u
RT_TRACE(COMP_CH, "====>%s()====stage:%d, step:%d, channel:%d\n", __FUNCTION__, *stage, *step, channel);
// RT_ASSERT(IsLegalChannel(Adapter, channel), ("illegal channel: %d\n", channel));
-#ifdef ENABLE_DOT11D
if (!IsLegalChannel(priv->ieee80211, channel))
{
RT_TRACE(COMP_ERR, "=============>set to illegal channel:%d\n", channel);
return true; //return true to tell upper caller function this channel setting is finished! Or it will in while loop.
}
-#endif
//FIXME:need to check whether channel is legal or not here.WB
diff --git a/drivers/staging/rtl8712/TODO b/drivers/staging/rtl8712/TODO
index 5c888214666e..2aa5deb3af7b 100644
--- a/drivers/staging/rtl8712/TODO
+++ b/drivers/staging/rtl8712/TODO
@@ -3,7 +3,6 @@ TODO:
- switch to use LIB80211
- switch to use MAC80211
- checkpatch.pl fixes - only a few remain
-- sparse fixes
- switch from large inline firmware file to use the firmware interface
and add the file to the linux-firmware package.
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index 32088a641eba..84be383abec3 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -128,12 +128,13 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
u8 *ptmpchar = NULL, *ppayload, *ptr;
struct tx_desc *ptx_desc;
u32 txdscp_sz = sizeof(struct tx_desc);
+ u8 ret = _FAIL;
ulfilelength = rtl871x_open_fw(padapter, &phfwfile_hdl, &pmappedfw);
if (pmappedfw && (ulfilelength > 0)) {
update_fwhdr(&fwhdr, pmappedfw);
if (chk_fwhdr(&fwhdr, ulfilelength) == _FAIL)
- goto exit_fail;
+ goto firmware_rel;
fill_fwpriv(padapter, &fwhdr.fwpriv);
/* firmware check ok */
maxlen = (fwhdr.img_IMEM_size > fwhdr.img_SRAM_size) ?
@@ -141,7 +142,7 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
maxlen += txdscp_sz;
ptmpchar = _malloc(maxlen + FWBUFF_ALIGN_SZ);
if (ptmpchar == NULL)
- return _FAIL;
+ goto firmware_rel;
ptx_desc = (struct tx_desc *)(ptmpchar + FWBUFF_ALIGN_SZ -
((addr_t)(ptmpchar) & (FWBUFF_ALIGN_SZ - 1)));
@@ -273,11 +274,13 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
goto exit_fail;
} else
goto exit_fail;
- return _SUCCESS;
+ ret = _SUCCESS;
exit_fail:
kfree(ptmpchar);
- return _FAIL;
+firmware_rel:
+ release_firmware((struct firmware *)phfwfile_hdl);
+ return ret;
}
uint rtl8712_hal_init(struct _adapter *padapter)
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index 831d81e0e429..36eeb5a1b5a5 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -195,10 +195,7 @@ static inline void sleep_schedulable(int ms)
static inline u8 *_malloc(u32 sz)
{
- u8 *pbuf;
-
- pbuf = kmalloc(sz, GFP_ATOMIC);
- return pbuf;
+ return kmalloc(sz, GFP_ATOMIC);
}
static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer)
@@ -220,34 +217,22 @@ static inline void flush_signals_thread(void)
static inline u32 _RND8(u32 sz)
{
- u32 val;
-
- val = ((sz >> 3) + ((sz & 7) ? 1 : 0)) << 3;
- return val;
+ return ((sz >> 3) + ((sz & 7) ? 1 : 0)) << 3;
}
static inline u32 _RND128(u32 sz)
{
- u32 val;
-
- val = ((sz >> 7) + ((sz & 127) ? 1 : 0)) << 7;
- return val;
+ return ((sz >> 7) + ((sz & 127) ? 1 : 0)) << 7;
}
static inline u32 _RND256(u32 sz)
{
- u32 val;
-
- val = ((sz >> 8) + ((sz & 255) ? 1 : 0)) << 8;
- return val;
+ return ((sz >> 8) + ((sz & 255) ? 1 : 0)) << 8;
}
static inline u32 _RND512(u32 sz)
{
- u32 val;
-
- val = ((sz >> 9) + ((sz & 511) ? 1 : 0)) << 9;
- return val;
+ return ((sz >> 9) + ((sz & 511) ? 1 : 0)) << 9;
}
#define STRUCT_PACKED __attribute__ ((packed))
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c
index 9730ae1c58d5..1dc12b7a2f52 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.c
+++ b/drivers/staging/rtl8712/rtl8712_efuse.c
@@ -428,7 +428,7 @@ u8 r8712_efuse_access(struct _adapter *padapter, u8 bRead, u16 start_addr,
u16 cnts, u8 *data)
{
int i;
- u8 res = true;;
+ u8 res = true;
if (start_addr > EFUSE_MAX_SIZE)
return false;
diff --git a/drivers/staging/rtl8712/rtl8712_xmit.c b/drivers/staging/rtl8712/rtl8712_xmit.c
index 8edc518536f9..88a15049bc2a 100644
--- a/drivers/staging/rtl8712/rtl8712_xmit.c
+++ b/drivers/staging/rtl8712/rtl8712_xmit.c
@@ -148,7 +148,7 @@ static u32 get_ff_hwaddr(struct xmit_frame *pxmitframe)
case 0x11:
case 0x12:
case 0x13:
- addr = RTL8712_DMA_H2CCMD;;
+ addr = RTL8712_DMA_H2CCMD;
break;
default:
addr = RTL8712_DMA_BEQ;/*RTL8712_EP_LO;*/
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index b8195e3a72d4..75f1a6bba2f6 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -372,7 +372,7 @@ static sint xmitframe_addmic(struct _adapter *padapter,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0};
datalen = pattrib->pktlen - pattrib->hdrlen;
- pframe = pxmitframe->buf_addr + TXDESC_OFFSET;;
+ pframe = pxmitframe->buf_addr + TXDESC_OFFSET;
if (bmcst) {
if (!memcmp(psecuritypriv->XGrptxmickey
[psecuritypriv->XGrpKeyid].skey,
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index f1f0c63e5bbc..21ce2af447b5 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -47,54 +47,123 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
static void r871xu_dev_remove(struct usb_interface *pusb_intf);
static struct usb_device_id rtl871x_usb_id_tbl[] = {
- /*92SU
- * Realtek */
- {USB_DEVICE(0x0bda, 0x8171)},
- {USB_DEVICE(0x0bda, 0x8172)},
+
+/* RTL8188SU */
+ /* Realtek */
+ {USB_DEVICE(0x0BDA, 0x8171)},
{USB_DEVICE(0x0bda, 0x8173)},
- {USB_DEVICE(0x0bda, 0x8174)},
{USB_DEVICE(0x0bda, 0x8712)},
{USB_DEVICE(0x0bda, 0x8713)},
{USB_DEVICE(0x0bda, 0xC512)},
- /* Abocom */
+ /* Abocom */
{USB_DEVICE(0x07B8, 0x8188)},
+ /* ASUS */
+ {USB_DEVICE(0x0B05, 0x1786)},
+ {USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */
+ /* Belkin */
+ {USB_DEVICE(0x050D, 0x945A)},
/* Corega */
- {USB_DEVICE(0x07aa, 0x0047)},
- /* Dlink */
- {USB_DEVICE(0x07d1, 0x3303)},
- {USB_DEVICE(0x07d1, 0x3302)},
- {USB_DEVICE(0x07d1, 0x3300)},
- /* Dlink for Skyworth */
- {USB_DEVICE(0x14b2, 0x3300)},
- {USB_DEVICE(0x14b2, 0x3301)},
- {USB_DEVICE(0x14b2, 0x3302)},
+ {USB_DEVICE(0x07AA, 0x0047)},
+ /* D-Link */
+ {USB_DEVICE(0x2001, 0x3306)},
+ {USB_DEVICE(0x07D1, 0x3306)}, /* 11n mode disable */
+ /* Edimax */
+ {USB_DEVICE(0x7392, 0x7611)},
/* EnGenius */
{USB_DEVICE(0x1740, 0x9603)},
- {USB_DEVICE(0x1740, 0x9605)},
+ /* Hawking */
+ {USB_DEVICE(0x0E66, 0x0016)},
+ /* Hercules */
+ {USB_DEVICE(0x06F8, 0xE034)},
+ {USB_DEVICE(0x06F8, 0xE032)},
+ /* Logitec */
+ {USB_DEVICE(0x0789, 0x0167)},
+ /* PCI */
+ {USB_DEVICE(0x2019, 0xAB28)},
+ {USB_DEVICE(0x2019, 0xED16)},
+ /* Sitecom */
+ {USB_DEVICE(0x0DF6, 0x0057)},
+ {USB_DEVICE(0x0DF6, 0x0045)},
+ {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
+ {USB_DEVICE(0x0DF6, 0x004B)},
+ {USB_DEVICE(0x0DF6, 0x0063)},
+ /* Sweex */
+ {USB_DEVICE(0x177F, 0x0154)},
+ /* Thinkware */
+ {USB_DEVICE(0x0BDA, 0x5077)},
+ /* Toshiba */
+ {USB_DEVICE(0x1690, 0x0752)},
+ /* - */
+ {USB_DEVICE(0x20F4, 0x646B)},
+ {USB_DEVICE(0x083A, 0xC512)},
+
+/* RTL8191SU */
+ /* Realtek */
+ {USB_DEVICE(0x0BDA, 0x8172)},
+ /* Amigo */
+ {USB_DEVICE(0x0EB0, 0x9061)},
+ /* ASUS/EKB */
+ {USB_DEVICE(0x0BDA, 0x8172)},
+ {USB_DEVICE(0x13D3, 0x3323)},
+ {USB_DEVICE(0x13D3, 0x3311)}, /* 11n mode disable */
+ {USB_DEVICE(0x13D3, 0x3342)},
+ /* ASUS/EKBLenovo */
+ {USB_DEVICE(0x13D3, 0x3333)},
+ {USB_DEVICE(0x13D3, 0x3334)},
+ {USB_DEVICE(0x13D3, 0x3335)}, /* 11n mode disable */
+ {USB_DEVICE(0x13D3, 0x3336)}, /* 11n mode disable */
+ /* ASUS/Media BOX */
+ {USB_DEVICE(0x13D3, 0x3309)},
/* Belkin */
- {USB_DEVICE(0x050d, 0x815F)},
- {USB_DEVICE(0x050d, 0x945A)},
- {USB_DEVICE(0x050d, 0x845A)},
- /* Guillemot */
- {USB_DEVICE(0x06f8, 0xe031)},
+ {USB_DEVICE(0x050D, 0x815F)},
+ /* D-Link */
+ {USB_DEVICE(0x07D1, 0x3302)},
+ {USB_DEVICE(0x07D1, 0x3300)},
+ {USB_DEVICE(0x07D1, 0x3303)},
/* Edimax */
- {USB_DEVICE(0x7392, 0x7611)},
{USB_DEVICE(0x7392, 0x7612)},
- {USB_DEVICE(0x7392, 0x7622)},
- /* Sitecom */
- {USB_DEVICE(0x0DF6, 0x0045)},
+ /* EnGenius */
+ {USB_DEVICE(0x1740, 0x9605)},
+ /* Guillemot */
+ {USB_DEVICE(0x06F8, 0xE031)},
/* Hawking */
{USB_DEVICE(0x0E66, 0x0015)},
- {USB_DEVICE(0x0E66, 0x0016)},
- {USB_DEVICE(0x0b05, 0x1786)},
- {USB_DEVICE(0x0b05, 0x1791)}, /* 11n mode disable */
-
+ /* Mediao */
{USB_DEVICE(0x13D3, 0x3306)},
- {USB_DEVICE(0x13D3, 0x3309)},
+ /* PCI */
+ {USB_DEVICE(0x2019, 0xED18)},
+ {USB_DEVICE(0x2019, 0x4901)},
+ /* Sitecom */
+ {USB_DEVICE(0x0DF6, 0x0058)},
+ {USB_DEVICE(0x0DF6, 0x0049)},
+ {USB_DEVICE(0x0DF6, 0x004C)},
+ {USB_DEVICE(0x0DF6, 0x0064)},
+ /* Skyworth */
+ {USB_DEVICE(0x14b2, 0x3300)},
+ {USB_DEVICE(0x14b2, 0x3301)},
+ {USB_DEVICE(0x14B2, 0x3302)},
+ /* - */
+ {USB_DEVICE(0x04F2, 0xAFF2)},
+ {USB_DEVICE(0x04F2, 0xAFF5)},
+ {USB_DEVICE(0x04F2, 0xAFF6)},
+ {USB_DEVICE(0x13D3, 0x3339)},
+ {USB_DEVICE(0x13D3, 0x3340)}, /* 11n mode disable */
+ {USB_DEVICE(0x13D3, 0x3341)}, /* 11n mode disable */
{USB_DEVICE(0x13D3, 0x3310)},
- {USB_DEVICE(0x13D3, 0x3311)}, /* 11n mode disable */
{USB_DEVICE(0x13D3, 0x3325)},
- {USB_DEVICE(0x083A, 0xC512)},
+
+/* RTL8192SU */
+ /* Realtek */
+ {USB_DEVICE(0x0BDA, 0x8174)},
+ {USB_DEVICE(0x0BDA, 0x8174)},
+ /* Belkin */
+ {USB_DEVICE(0x050D, 0x845A)},
+ /* Corega */
+ {USB_DEVICE(0x07AA, 0x0051)},
+ /* Edimax */
+ {USB_DEVICE(0x7392, 0x7622)},
+ /* NEC */
+ {USB_DEVICE(0x0409, 0x02B6)},
{}
};
@@ -103,8 +172,20 @@ MODULE_DEVICE_TABLE(usb, rtl871x_usb_id_tbl);
static struct specific_device_id specific_device_id_tbl[] = {
{.idVendor = 0x0b05, .idProduct = 0x1791,
.flags = SPEC_DEV_ID_DISABLE_HT},
+ {.idVendor = 0x0df6, .idProduct = 0x0059,
+ .flags = SPEC_DEV_ID_DISABLE_HT},
+ {.idVendor = 0x13d3, .idProduct = 0x3306,
+ .flags = SPEC_DEV_ID_DISABLE_HT},
{.idVendor = 0x13D3, .idProduct = 0x3311,
.flags = SPEC_DEV_ID_DISABLE_HT},
+ {.idVendor = 0x13d3, .idProduct = 0x3335,
+ .flags = SPEC_DEV_ID_DISABLE_HT},
+ {.idVendor = 0x13d3, .idProduct = 0x3336,
+ .flags = SPEC_DEV_ID_DISABLE_HT},
+ {.idVendor = 0x13d3, .idProduct = 0x3340,
+ .flags = SPEC_DEV_ID_DISABLE_HT},
+ {.idVendor = 0x13d3, .idProduct = 0x3341,
+ .flags = SPEC_DEV_ID_DISABLE_HT},
{}
};
@@ -527,7 +608,7 @@ error:
static void r871xu_dev_remove(struct usb_interface *pusb_intf)
{
struct net_device *pnetdev = usb_get_intfdata(pusb_intf);
- struct _adapter *padapter = (struct _adapter *)netdev_priv(pnetdev);
+ struct _adapter *padapter = netdev_priv(pnetdev);
struct usb_device *udev = interface_to_usbdev(pusb_intf);
if (padapter) {
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c
index ac2bf11e1119..701e8d52a9fa 100644
--- a/drivers/staging/samsung-laptop/samsung-laptop.c
+++ b/drivers/staging/samsung-laptop/samsung-laptop.c
@@ -269,7 +269,7 @@ static int update_status(struct backlight_device *bd)
return 0;
}
-static struct backlight_ops backlight_ops = {
+static const struct backlight_ops backlight_ops = {
.get_brightness = get_brightness,
.update_status = update_status,
};
diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig
new file mode 100644
index 000000000000..92bf16667d04
--- /dev/null
+++ b/drivers/staging/sep/Kconfig
@@ -0,0 +1,10 @@
+config DX_SEP
+ tristate "Discretix SEP driver"
+ depends on PCI
+ help
+ Discretix SEP driver; used for the security processor subsystem
+ on bard the Intel Mobile Internet Device.
+
+ The driver's name is sep_driver.
+
+ If unsure, select N.
diff --git a/drivers/staging/sep/Makefile b/drivers/staging/sep/Makefile
new file mode 100644
index 000000000000..628d5f919414
--- /dev/null
+++ b/drivers/staging/sep/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_DX_SEP) := sep_driver.o
+
diff --git a/drivers/staging/sep/TODO b/drivers/staging/sep/TODO
new file mode 100644
index 000000000000..089c2406345e
--- /dev/null
+++ b/drivers/staging/sep/TODO
@@ -0,0 +1,5 @@
+Todo's so far (from Alan Cox)
+- Check whether it can be plugged into any of the kernel crypto API
+ interfaces - Crypto API 'glue' is still not ready to submit
+- Clean up unused ioctls - Needs vendor help
+- Clean up unused fields in ioctl structures - Needs vendor help
diff --git a/drivers/staging/sep/sep_dev.h b/drivers/staging/sep/sep_dev.h
new file mode 100644
index 000000000000..0ffe68cb7140
--- /dev/null
+++ b/drivers/staging/sep/sep_dev.h
@@ -0,0 +1,156 @@
+#ifndef __SEP_DEV_H__
+#define __SEP_DEV_H__
+
+/*
+ *
+ * sep_dev.h - Security Processor Device Structures
+ *
+ * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ * Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ * CHANGES
+ * 2010.09.14 upgrade to Medfield
+ */
+
+struct sep_device {
+ /* pointer to pci dev */
+ struct pci_dev *pdev;
+
+ /* character device file */
+ struct cdev sep_cdev;
+ struct cdev sep_daemon_cdev;
+ struct cdev sep_singleton_cdev;
+
+ /* devices (using misc dev) */
+ struct miscdevice miscdev_sep;
+ struct miscdevice miscdev_singleton;
+ struct miscdevice miscdev_daemon;
+
+ /* major / minor numbers of device */
+ dev_t sep_devno;
+ dev_t sep_daemon_devno;
+ dev_t sep_singleton_devno;
+
+ struct mutex sep_mutex;
+ struct mutex ioctl_mutex;
+ spinlock_t snd_rply_lck;
+
+ /* flags to indicate use and lock status of sep */
+ u32 pid_doing_transaction;
+ unsigned long in_use_flags;
+
+ /* request daemon alread open */
+ unsigned long request_daemon_open;
+
+ /* 1 = Moorestown; 0 = Medfield */
+ int mrst;
+
+ /* address of the shared memory allocated during init for SEP driver
+ (coherent alloc) */
+ dma_addr_t shared_bus;
+ size_t shared_size;
+ void *shared_addr;
+
+ /* restricted access region (coherent alloc) */
+ dma_addr_t rar_bus;
+ size_t rar_size;
+ void *rar_addr;
+
+ /* Firmware regions; cache is at rar for Moorestown and
+ resident is at rar for Medfield */
+ dma_addr_t cache_bus;
+ size_t cache_size;
+ void *cache_addr;
+
+ dma_addr_t resident_bus;
+ size_t resident_size;
+ void *resident_addr;
+
+ /* sep's scratchpad */
+ dma_addr_t dcache_bus;
+ size_t dcache_size;
+ void *dcache_addr;
+
+ /* Only used on Medfield */
+ dma_addr_t extapp_bus;
+ size_t extapp_size;
+ void *extapp_addr;
+
+ /* start address of the access to the SEP registers from driver */
+ dma_addr_t reg_physical_addr;
+ dma_addr_t reg_physical_end;
+ void __iomem *reg_addr;
+
+ /* wait queue head (event) of the driver */
+ wait_queue_head_t event;
+ wait_queue_head_t event_request_daemon;
+ wait_queue_head_t event_mmap;
+
+ struct sep_caller_id_entry
+ caller_id_table[SEP_CALLER_ID_TABLE_NUM_ENTRIES];
+
+ /* access flag for singleton device */
+ unsigned long singleton_access_flag;
+
+ /* transaction counter that coordinates the
+ transactions between SEP and HOST */
+ unsigned long send_ct;
+ /* counter for the messages from sep */
+ unsigned long reply_ct;
+ /* counter for the number of bytes allocated in the pool for the
+ current transaction */
+ long data_pool_bytes_allocated;
+
+ u32 num_of_data_allocations;
+
+ /* number of the lli tables created in the current transaction */
+ u32 num_lli_tables_created;
+
+ /* number of data control blocks */
+ u32 nr_dcb_creat;
+
+ struct sep_dma_resource dma_res_arr[SEP_MAX_NUM_SYNC_DMA_OPS];
+
+};
+
+static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value)
+{
+ void __iomem *addr = dev->reg_addr + reg;
+ writel(value, addr);
+}
+
+static inline u32 sep_read_reg(struct sep_device *dev, int reg)
+{
+ void __iomem *addr = dev->reg_addr + reg;
+ return readl(addr);
+}
+
+/* wait for SRAM write complete(indirect write */
+static inline void sep_wait_sram_write(struct sep_device *dev)
+{
+ u32 reg_val;
+ do {
+ reg_val = sep_read_reg(dev, HW_SRAM_DATA_READY_REG_ADDR);
+ } while (!(reg_val & 1));
+}
+
+
+#endif
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
new file mode 100644
index 000000000000..ac5d56943d4b
--- /dev/null
+++ b/drivers/staging/sep/sep_driver.c
@@ -0,0 +1,3577 @@
+/*
+ *
+ * sep_driver.c - Security Processor Driver main group of functions
+ *
+ * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ * Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ * CHANGES:
+ *
+ * 2009.06.26 Initial publish
+ * 2010.09.14 Upgrade to Medfield
+ *
+ */
+#define DEBUG
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/kdev_t.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/ioctl.h>
+#include <asm/current.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/rar_register.h>
+
+#include "../memrar/memrar.h"
+
+#include "sep_driver_hw_defs.h"
+#include "sep_driver_config.h"
+#include "sep_driver_api.h"
+#include "sep_dev.h"
+
+/*----------------------------------------
+ DEFINES
+-----------------------------------------*/
+
+#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
+
+/*--------------------------------------------
+ GLOBAL variables
+--------------------------------------------*/
+
+/* Keep this a single static object for now to keep the conversion easy */
+
+static struct sep_device *sep_dev;
+
+/**
+ * sep_load_firmware - copy firmware cache/resident
+ * @sep: pointer to struct sep_device we are loading
+ *
+ * This functions copies the cache and resident from their source
+ * location into destination shared memory.
+ */
+static int sep_load_firmware(struct sep_device *sep)
+{
+ const struct firmware *fw;
+ char *cache_name = "cache.image.bin";
+ char *res_name = "resident.image.bin";
+ char *extapp_name = "extapp.image.bin";
+ int error ;
+ unsigned long work1, work2, work3;
+
+ /* Set addresses and load resident */
+ sep->resident_bus = sep->rar_bus;
+ sep->resident_addr = sep->rar_addr;
+
+ error = request_firmware(&fw, res_name, &sep->pdev->dev);
+ if (error) {
+ dev_warn(&sep->pdev->dev, "can't request resident fw\n");
+ return error;
+ }
+
+ memcpy(sep->resident_addr, (void *)fw->data, fw->size);
+ sep->resident_size = fw->size;
+ release_firmware(fw);
+
+ dev_dbg(&sep->pdev->dev, "resident virtual is %p\n",
+ sep->resident_addr);
+ dev_dbg(&sep->pdev->dev, "resident bus is %lx\n",
+ (unsigned long)sep->resident_bus);
+ dev_dbg(&sep->pdev->dev, "resident size is %08zx\n",
+ sep->resident_size);
+
+ /* Set addresses for dcache (no loading needed) */
+ work1 = (unsigned long)sep->resident_bus;
+ work2 = (unsigned long)sep->resident_size;
+ work3 = (work1 + work2 + (1024 * 4)) & 0xfffff000;
+ sep->dcache_bus = (dma_addr_t)work3;
+
+ work1 = (unsigned long)sep->resident_addr;
+ work2 = (unsigned long)sep->resident_size;
+ work3 = (work1 + work2 + (1024 * 4)) & 0xfffff000;
+ sep->dcache_addr = (void *)work3;
+
+ sep->dcache_size = 1024 * 128;
+
+ /* Set addresses and load cache */
+ sep->cache_bus = sep->dcache_bus + sep->dcache_size;
+ sep->cache_addr = sep->dcache_addr + sep->dcache_size;
+
+ error = request_firmware(&fw, cache_name, &sep->pdev->dev);
+ if (error) {
+ dev_warn(&sep->pdev->dev, "Unable to request cache firmware\n");
+ return error;
+ }
+
+ memcpy(sep->cache_addr, (void *)fw->data, fw->size);
+ sep->cache_size = fw->size;
+ release_firmware(fw);
+
+ dev_dbg(&sep->pdev->dev, "cache virtual is %p\n",
+ sep->cache_addr);
+ dev_dbg(&sep->pdev->dev, "cache bus is %08lx\n",
+ (unsigned long)sep->cache_bus);
+ dev_dbg(&sep->pdev->dev, "cache size is %08zx\n",
+ sep->cache_size);
+
+ /* Set addresses and load extapp */
+ sep->extapp_bus = sep->cache_bus + (1024 * 370);
+ sep->extapp_addr = sep->cache_addr + (1024 * 370);
+
+ error = request_firmware(&fw, extapp_name, &sep->pdev->dev);
+ if (error) {
+ dev_warn(&sep->pdev->dev, "Unable to request extapp firmware\n");
+ return error;
+ }
+
+ memcpy(sep->extapp_addr, (void *)fw->data, fw->size);
+ sep->extapp_size = fw->size;
+ release_firmware(fw);
+
+ dev_dbg(&sep->pdev->dev, "extapp virtual is %p\n",
+ sep->extapp_addr);
+ dev_dbg(&sep->pdev->dev, "extapp bus is %08llx\n",
+ (unsigned long long)sep->extapp_bus);
+ dev_dbg(&sep->pdev->dev, "extapp size is %08zx\n",
+ sep->extapp_size);
+
+ return error;
+}
+
+MODULE_FIRMWARE("sep/cache.image.bin");
+MODULE_FIRMWARE("sep/resident.image.bin");
+MODULE_FIRMWARE("sep/extapp.image.bin");
+
+/**
+ * sep_dump_message - dump the message that is pending
+ * @sep: SEP device
+ */
+static void sep_dump_message(struct sep_device *sep)
+{
+ int count;
+ u32 *p = sep->shared_addr;
+ for (count = 0; count < 12 * 4; count += 4)
+ dev_dbg(&sep->pdev->dev, "Word %d of the message is %x\n",
+ count, *p++);
+}
+
+/**
+ * sep_map_and_alloc_shared_area - allocate shared block
+ * @sep: security processor
+ * @size: size of shared area
+ */
+static int sep_map_and_alloc_shared_area(struct sep_device *sep)
+{
+ sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
+ sep->shared_size,
+ &sep->shared_bus, GFP_KERNEL);
+
+ if (!sep->shared_addr) {
+ dev_warn(&sep->pdev->dev,
+ "shared memory dma_alloc_coherent failed\n");
+ return -ENOMEM;
+ }
+ dev_dbg(&sep->pdev->dev,
+ "shared_addr %zx bytes @%p (bus %llx)\n",
+ sep->shared_size, sep->shared_addr,
+ (unsigned long long)sep->shared_bus);
+ return 0;
+}
+
+/**
+ * sep_unmap_and_free_shared_area - free shared block
+ * @sep: security processor
+ */
+static void sep_unmap_and_free_shared_area(struct sep_device *sep)
+{
+ dev_dbg(&sep->pdev->dev, "shared area unmap and free\n");
+ dma_free_coherent(&sep->pdev->dev, sep->shared_size,
+ sep->shared_addr, sep->shared_bus);
+}
+
+/**
+ * sep_shared_bus_to_virt - convert bus/virt addresses
+ * @sep: pointer to struct sep_device
+ * @bus_address: address to convert
+ *
+ * Returns virtual address inside the shared area according
+ * to the bus address.
+ */
+static void *sep_shared_bus_to_virt(struct sep_device *sep,
+ dma_addr_t bus_address)
+{
+ return sep->shared_addr + (bus_address - sep->shared_bus);
+}
+
+/**
+ * open function for the singleton driver
+ * @inode_ptr struct inode *
+ * @file_ptr struct file *
+ *
+ * Called when the user opens the singleton device interface
+ */
+static int sep_singleton_open(struct inode *inode_ptr, struct file *file_ptr)
+{
+ int error = 0;
+ struct sep_device *sep;
+
+ /*
+ * Get the SEP device structure and use it for the
+ * private_data field in filp for other methods
+ */
+ sep = sep_dev;
+
+ file_ptr->private_data = sep;
+
+ dev_dbg(&sep->pdev->dev, "Singleton open for pid %d\n", current->pid);
+
+ dev_dbg(&sep->pdev->dev, "calling test and set for singleton 0\n");
+ if (test_and_set_bit(0, &sep->singleton_access_flag)) {
+ error = -EBUSY;
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev, "sep_singleton_open end\n");
+end_function:
+ return error;
+}
+
+/**
+ * sep_open - device open method
+ * @inode: inode of SEP device
+ * @filp: file handle to SEP device
+ *
+ * Open method for the SEP device. Called when userspace opens
+ * the SEP device node.
+ *
+ * Returns zero on success otherwise an error code.
+ */
+static int sep_open(struct inode *inode, struct file *filp)
+{
+ struct sep_device *sep;
+
+ /*
+ * Get the SEP device structure and use it for the
+ * private_data field in filp for other methods
+ */
+ sep = sep_dev;
+ filp->private_data = sep;
+
+ dev_dbg(&sep->pdev->dev, "Open for pid %d\n", current->pid);
+
+ /* Anyone can open; locking takes place at transaction level */
+ return 0;
+}
+
+/**
+ * sep_singleton_release - close a SEP singleton device
+ * @inode: inode of SEP device
+ * @filp: file handle being closed
+ *
+ * Called on the final close of a SEP device. As the open protects against
+ * multiple simultaenous opens that means this method is called when the
+ * final reference to the open handle is dropped.
+ */
+static int sep_singleton_release(struct inode *inode, struct file *filp)
+{
+ struct sep_device *sep = filp->private_data;
+
+ dev_dbg(&sep->pdev->dev, "Singleton release for pid %d\n",
+ current->pid);
+ clear_bit(0, &sep->singleton_access_flag);
+ return 0;
+}
+
+/**
+ * sep_request_daemonopen - request daemon open method
+ * @inode: inode of SEP device
+ * @filp: file handle to SEP device
+ *
+ * Open method for the SEP request daemon. Called when
+ * request daemon in userspace opens the SEP device node.
+ *
+ * Returns zero on success otherwise an error code.
+ */
+static int sep_request_daemon_open(struct inode *inode, struct file *filp)
+{
+ struct sep_device *sep = sep_dev;
+ int error = 0;
+
+ filp->private_data = sep;
+
+ dev_dbg(&sep->pdev->dev, "Request daemon open for pid %d\n",
+ current->pid);
+
+ /* There is supposed to be only one request daemon */
+ dev_dbg(&sep->pdev->dev, "calling test and set for req_dmon open 0\n");
+ if (test_and_set_bit(0, &sep->request_daemon_open))
+ error = -EBUSY;
+ return error;
+}
+
+/**
+ * sep_request_daemon_release - close a SEP daemon
+ * @inode: inode of SEP device
+ * @filp: file handle being closed
+ *
+ * Called on the final close of a SEP daemon.
+ */
+static int sep_request_daemon_release(struct inode *inode, struct file *filp)
+{
+ struct sep_device *sep = filp->private_data;
+
+ dev_dbg(&sep->pdev->dev, "Reques daemon release for pid %d\n",
+ current->pid);
+
+ /* Clear the request_daemon_open flag */
+ clear_bit(0, &sep->request_daemon_open);
+ return 0;
+}
+
+/**
+ * sep_req_daemon_send_reply_command_handler - poke the SEP
+ * @sep: struct sep_device *
+ *
+ * This function raises interrupt to SEPm that signals that is has a
+ * new command from HOST
+ */
+static int sep_req_daemon_send_reply_command_handler(struct sep_device *sep)
+{
+ unsigned long lck_flags;
+
+ dev_dbg(&sep->pdev->dev,
+ "sep_req_daemon_send_reply_command_handler start\n");
+
+ sep_dump_message(sep);
+
+ /* Counters are lockable region */
+ spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
+ sep->send_ct++;
+ sep->reply_ct++;
+
+ /* Send the interrupt to SEP */
+ sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
+ sep->send_ct++;
+
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
+
+ dev_dbg(&sep->pdev->dev,
+ "sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n",
+ sep->send_ct, sep->reply_ct);
+
+ dev_dbg(&sep->pdev->dev,
+ "sep_req_daemon_send_reply_command_handler end\n");
+
+ return 0;
+}
+
+
+/**
+ * sep_free_dma_table_data_handler - free DMA table
+ * @sep: pointere to struct sep_device
+ *
+ * Handles the request to free DMA table for synchronic actions
+ */
+static int sep_free_dma_table_data_handler(struct sep_device *sep)
+{
+ int count;
+ int dcb_counter;
+ /* Pointer to the current dma_resource struct */
+ struct sep_dma_resource *dma;
+
+ dev_dbg(&sep->pdev->dev, "sep_free_dma_table_data_handler start\n");
+
+ for (dcb_counter = 0; dcb_counter < sep->nr_dcb_creat; dcb_counter++) {
+ dma = &sep->dma_res_arr[dcb_counter];
+
+ /* Unmap and free input map array */
+ if (dma->in_map_array) {
+ for (count = 0; count < dma->in_num_pages; count++) {
+ dma_unmap_page(&sep->pdev->dev,
+ dma->in_map_array[count].dma_addr,
+ dma->in_map_array[count].size,
+ DMA_TO_DEVICE);
+ }
+ kfree(dma->in_map_array);
+ }
+
+ /* Unmap output map array, DON'T free it yet */
+ if (dma->out_map_array) {
+ for (count = 0; count < dma->out_num_pages; count++) {
+ dma_unmap_page(&sep->pdev->dev,
+ dma->out_map_array[count].dma_addr,
+ dma->out_map_array[count].size,
+ DMA_FROM_DEVICE);
+ }
+ kfree(dma->out_map_array);
+ }
+
+ /* Free page cache for output */
+ if (dma->in_page_array) {
+ for (count = 0; count < dma->in_num_pages; count++) {
+ flush_dcache_page(dma->in_page_array[count]);
+ page_cache_release(dma->in_page_array[count]);
+ }
+ kfree(dma->in_page_array);
+ }
+
+ if (dma->out_page_array) {
+ for (count = 0; count < dma->out_num_pages; count++) {
+ if (!PageReserved(dma->out_page_array[count]))
+ SetPageDirty(dma->out_page_array[count]);
+ flush_dcache_page(dma->out_page_array[count]);
+ page_cache_release(dma->out_page_array[count]);
+ }
+ kfree(dma->out_page_array);
+ }
+
+ /* Reset all the values */
+ dma->in_page_array = NULL;
+ dma->out_page_array = NULL;
+ dma->in_num_pages = 0;
+ dma->out_num_pages = 0;
+ dma->in_map_array = NULL;
+ dma->out_map_array = NULL;
+ dma->in_map_num_entries = 0;
+ dma->out_map_num_entries = 0;
+ }
+
+ sep->nr_dcb_creat = 0;
+ sep->num_lli_tables_created = 0;
+
+ dev_dbg(&sep->pdev->dev, "sep_free_dma_table_data_handler end\n");
+ return 0;
+}
+
+/**
+ * sep_request_daemon_mmap - maps the shared area to user space
+ * @filp: pointer to struct file
+ * @vma: pointer to vm_area_struct
+ *
+ * Called by the kernel when the daemon attempts an mmap() syscall
+ * using our handle.
+ */
+static int sep_request_daemon_mmap(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ struct sep_device *sep = filp->private_data;
+ dma_addr_t bus_address;
+ int error = 0;
+
+ dev_dbg(&sep->pdev->dev, "daemon mmap start\n");
+
+ if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ /* Get physical address */
+ bus_address = sep->shared_bus;
+
+ dev_dbg(&sep->pdev->dev, "bus_address is %08lx\n",
+ (unsigned long)bus_address);
+
+ if (remap_pfn_range(vma, vma->vm_start, bus_address >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
+
+ dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
+ error = -EAGAIN;
+ goto end_function;
+ }
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "daemon mmap end\n");
+ return error;
+}
+
+/**
+ * sep_request_daemon_poll - poll implementation
+ * @sep: struct sep_device * for current SEP device
+ * @filp: struct file * for open file
+ * @wait: poll_table * for poll
+ *
+ * Called when our device is part of a poll() or select() syscall
+ */
+static unsigned int sep_request_daemon_poll(struct file *filp,
+ poll_table *wait)
+{
+ u32 mask = 0;
+ /* GPR2 register */
+ u32 retval2;
+ unsigned long lck_flags;
+ struct sep_device *sep = filp->private_data;
+
+ dev_dbg(&sep->pdev->dev, "daemon poll: start\n");
+
+ poll_wait(filp, &sep->event_request_daemon, wait);
+
+ dev_dbg(&sep->pdev->dev, "daemon poll: send_ct is %lx reply ct is %lx\n",
+ sep->send_ct, sep->reply_ct);
+
+ spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
+ /* Check if the data is ready */
+ if (sep->send_ct == sep->reply_ct) {
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
+
+ retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ dev_dbg(&sep->pdev->dev,
+ "daemon poll: data check (GPR2) is %x\n", retval2);
+
+ /* Check if PRINT request */
+ if ((retval2 >> 30) & 0x1) {
+ dev_dbg(&sep->pdev->dev, "daemon poll: PRINTF request in\n");
+ mask |= POLLIN;
+ goto end_function;
+ }
+ /* Check if NVS request */
+ if (retval2 >> 31) {
+ dev_dbg(&sep->pdev->dev, "daemon poll: NVS request in\n");
+ mask |= POLLPRI | POLLWRNORM;
+ }
+ } else {
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
+ dev_dbg(&sep->pdev->dev,
+ "daemon poll: no reply received; returning 0\n");
+ mask = 0;
+ }
+end_function:
+ dev_dbg(&sep->pdev->dev, "daemon poll: exit\n");
+ return mask;
+}
+
+/**
+ * sep_release - close a SEP device
+ * @inode: inode of SEP device
+ * @filp: file handle being closed
+ *
+ * Called on the final close of a SEP device.
+ */
+static int sep_release(struct inode *inode, struct file *filp)
+{
+ struct sep_device *sep = filp->private_data;
+
+ dev_dbg(&sep->pdev->dev, "Release for pid %d\n", current->pid);
+
+ mutex_lock(&sep->sep_mutex);
+ /* Is this the process that has a transaction open?
+ * If so, lets reset pid_doing_transaction to 0 and
+ * clear the in use flags, and then wake up sep_event
+ * so that other processes can do transactions
+ */
+ dev_dbg(&sep->pdev->dev, "waking up event and mmap_event\n");
+ if (sep->pid_doing_transaction == current->pid) {
+ clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
+ clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
+ sep_free_dma_table_data_handler(sep);
+ wake_up(&sep->event);
+ sep->pid_doing_transaction = 0;
+ }
+
+ mutex_unlock(&sep->sep_mutex);
+ return 0;
+}
+
+/**
+ * sep_mmap - maps the shared area to user space
+ * @filp: pointer to struct file
+ * @vma: pointer to vm_area_struct
+ *
+ * Called on an mmap of our space via the normal SEP device
+ */
+static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ dma_addr_t bus_addr;
+ struct sep_device *sep = filp->private_data;
+ unsigned long error = 0;
+
+ dev_dbg(&sep->pdev->dev, "mmap start\n");
+
+ /* Set the transaction busy (own the device) */
+ wait_event_interruptible(sep->event,
+ test_and_set_bit(SEP_MMAP_LOCK_BIT,
+ &sep->in_use_flags) == 0);
+
+ if (signal_pending(current)) {
+ error = -EINTR;
+ goto end_function_with_error;
+ }
+ /*
+ * The pid_doing_transaction indicates that this process
+ * now owns the facilities to performa a transaction with
+ * the SEP. While this process is performing a transaction,
+ * no other process who has the SEP device open can perform
+ * any transactions. This method allows more than one process
+ * to have the device open at any given time, which provides
+ * finer granularity for device utilization by multiple
+ * processes.
+ */
+ mutex_lock(&sep->sep_mutex);
+ sep->pid_doing_transaction = current->pid;
+ mutex_unlock(&sep->sep_mutex);
+
+ /* Zero the pools and the number of data pool alocation pointers */
+ sep->data_pool_bytes_allocated = 0;
+ sep->num_of_data_allocations = 0;
+
+ /*
+ * Check that the size of the mapped range is as the size of the message
+ * shared area
+ */
+ if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
+ error = -EINVAL;
+ goto end_function_with_error;
+ }
+
+ dev_dbg(&sep->pdev->dev, "shared_addr is %p\n", sep->shared_addr);
+
+ /* Get bus address */
+ bus_addr = sep->shared_bus;
+
+ dev_dbg(&sep->pdev->dev,
+ "bus_address is %lx\n", (unsigned long)bus_addr);
+
+ if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
+ dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
+ error = -EAGAIN;
+ goto end_function_with_error;
+ }
+ dev_dbg(&sep->pdev->dev, "mmap end\n");
+ goto end_function;
+
+end_function_with_error:
+ /* Clear the bit */
+ clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
+ mutex_lock(&sep->sep_mutex);
+ sep->pid_doing_transaction = 0;
+ mutex_unlock(&sep->sep_mutex);
+
+ /* Raise event for stuck contextes */
+
+ dev_warn(&sep->pdev->dev, "mmap error - waking up event\n");
+ wake_up(&sep->event);
+
+end_function:
+ return error;
+}
+
+/**
+ * sep_poll - poll handler
+ * @filp: pointer to struct file
+ * @wait: pointer to poll_table
+ *
+ * Called by the OS when the kernel is asked to do a poll on
+ * a SEP file handle.
+ */
+static unsigned int sep_poll(struct file *filp, poll_table *wait)
+{
+ u32 mask = 0;
+ u32 retval = 0;
+ u32 retval2 = 0;
+ unsigned long lck_flags;
+
+ struct sep_device *sep = filp->private_data;
+
+ dev_dbg(&sep->pdev->dev, "poll: start\n");
+
+ /* Am I the process that owns the transaction? */
+ mutex_lock(&sep->sep_mutex);
+ if (current->pid != sep->pid_doing_transaction) {
+ dev_warn(&sep->pdev->dev, "poll; wrong pid\n");
+ mask = POLLERR;
+ mutex_unlock(&sep->sep_mutex);
+ goto end_function;
+ }
+ mutex_unlock(&sep->sep_mutex);
+
+ /* Check if send command or send_reply were activated previously */
+ if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
+ dev_warn(&sep->pdev->dev, "poll; lock bit set\n");
+ mask = POLLERR;
+ goto end_function;
+ }
+
+ /* Add the event to the polling wait table */
+ dev_dbg(&sep->pdev->dev, "poll: calling wait sep_event\n");
+
+ poll_wait(filp, &sep->event, wait);
+
+ dev_dbg(&sep->pdev->dev, "poll: send_ct is %lx reply ct is %lx\n",
+ sep->send_ct, sep->reply_ct);
+
+ /* Check if error occured during poll */
+ retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ if (retval2 != 0x0) {
+ dev_warn(&sep->pdev->dev, "poll; poll error %x\n", retval2);
+ mask |= POLLERR;
+ goto end_function;
+ }
+
+ spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
+
+ if (sep->send_ct == sep->reply_ct) {
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
+ retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ dev_dbg(&sep->pdev->dev, "poll: data ready check (GPR2) %x\n",
+ retval);
+
+ /* Check if printf request */
+ if ((retval >> 30) & 0x1) {
+ dev_dbg(&sep->pdev->dev, "poll: SEP printf request\n");
+ wake_up(&sep->event_request_daemon);
+ goto end_function;
+ }
+
+ /* Check if the this is SEP reply or request */
+ if (retval >> 31) {
+ dev_dbg(&sep->pdev->dev, "poll: SEP request\n");
+ wake_up(&sep->event_request_daemon);
+ } else {
+ dev_dbg(&sep->pdev->dev, "poll: normal return\n");
+ /* In case it is again by send_reply_comand */
+ clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
+ sep_dump_message(sep);
+ dev_dbg(&sep->pdev->dev,
+ "poll; SEP reply POLLIN | POLLRDNORM\n");
+ mask |= POLLIN | POLLRDNORM;
+ }
+ } else {
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
+ dev_dbg(&sep->pdev->dev,
+ "poll; no reply received; returning mask of 0\n");
+ mask = 0;
+ }
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "poll: end\n");
+ return mask;
+}
+
+/**
+ * sep_time_address - address in SEP memory of time
+ * @sep: SEP device we want the address from
+ *
+ * Return the address of the two dwords in memory used for time
+ * setting.
+ */
+static u32 *sep_time_address(struct sep_device *sep)
+{
+ return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
+}
+
+/**
+ * sep_set_time - set the SEP time
+ * @sep: the SEP we are setting the time for
+ *
+ * Calculates time and sets it at the predefined address.
+ * Called with the SEP mutex held.
+ */
+static unsigned long sep_set_time(struct sep_device *sep)
+{
+ struct timeval time;
+ u32 *time_addr; /* Address of time as seen by the kernel */
+
+
+ dev_dbg(&sep->pdev->dev, "sep_set_time start\n");
+
+ do_gettimeofday(&time);
+
+ /* Set value in the SYSTEM MEMORY offset */
+ time_addr = sep_time_address(sep);
+
+ time_addr[0] = SEP_TIME_VAL_TOKEN;
+ time_addr[1] = time.tv_sec;
+
+ dev_dbg(&sep->pdev->dev, "time.tv_sec is %lu\n", time.tv_sec);
+ dev_dbg(&sep->pdev->dev, "time_addr is %p\n", time_addr);
+ dev_dbg(&sep->pdev->dev, "sep->shared_addr is %p\n", sep->shared_addr);
+
+ return time.tv_sec;
+}
+
+/**
+ * sep_set_caller_id_handler - insert caller id entry
+ * @sep: SEP device
+ * @arg: pointer to struct caller_id_struct
+ *
+ * Inserts the data into the caller id table. Note that this function
+ * falls under the ioctl lock
+ */
+static int sep_set_caller_id_handler(struct sep_device *sep, unsigned long arg)
+{
+ void __user *hash;
+ int error = 0;
+ int i;
+ struct caller_id_struct command_args;
+
+ dev_dbg(&sep->pdev->dev, "sep_set_caller_id_handler start\n");
+
+ for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
+ if (sep->caller_id_table[i].pid == 0)
+ break;
+ }
+
+ if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) {
+ dev_warn(&sep->pdev->dev, "no more caller id entries left\n");
+ dev_warn(&sep->pdev->dev, "maximum number is %d\n",
+ SEP_CALLER_ID_TABLE_NUM_ENTRIES);
+ error = -EUSERS;
+ goto end_function;
+ }
+
+ /* Copy the data */
+ if (copy_from_user(&command_args, (void __user *)arg,
+ sizeof(command_args))) {
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ hash = (void __user *)(unsigned long)command_args.callerIdAddress;
+
+ if (!command_args.pid || !command_args.callerIdSizeInBytes) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev, "pid is %x\n", command_args.pid);
+ dev_dbg(&sep->pdev->dev, "callerIdSizeInBytes is %x\n",
+ command_args.callerIdSizeInBytes);
+
+ if (command_args.callerIdSizeInBytes >
+ SEP_CALLER_ID_HASH_SIZE_IN_BYTES) {
+ error = -EMSGSIZE;
+ goto end_function;
+ }
+
+ sep->caller_id_table[i].pid = command_args.pid;
+
+ if (copy_from_user(sep->caller_id_table[i].callerIdHash,
+ hash, command_args.callerIdSizeInBytes))
+ error = -EFAULT;
+end_function:
+ dev_dbg(&sep->pdev->dev, "sep_set_caller_id_handler end\n");
+ return error;
+}
+
+/**
+ * sep_set_current_caller_id - set the caller id
+ * @sep: pointer to struct_sep_device
+ *
+ * Set the caller ID (if it exists) to the SEP. Note that this
+ * function falls under the ioctl lock
+ */
+static int sep_set_current_caller_id(struct sep_device *sep)
+{
+ int i;
+ u32 *hash_buf_ptr;
+
+ dev_dbg(&sep->pdev->dev, "sep_set_current_caller_id start\n");
+ dev_dbg(&sep->pdev->dev, "current process is %d\n", current->pid);
+
+ /* Zero the previous value */
+ memset(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
+ 0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
+
+ for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
+ if (sep->caller_id_table[i].pid == current->pid) {
+ dev_dbg(&sep->pdev->dev, "Caller Id found\n");
+
+ memcpy(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
+ (void *)(sep->caller_id_table[i].callerIdHash),
+ SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
+ break;
+ }
+ }
+ /* Ensure data is in little endian */
+ hash_buf_ptr = (u32 *)sep->shared_addr +
+ SEP_CALLER_ID_OFFSET_BYTES;
+
+ for (i = 0; i < SEP_CALLER_ID_HASH_SIZE_IN_WORDS; i++)
+ hash_buf_ptr[i] = cpu_to_le32(hash_buf_ptr[i]);
+
+ dev_dbg(&sep->pdev->dev, "sep_set_current_caller_id end\n");
+ return 0;
+}
+
+/**
+ * sep_send_command_handler - kick off a command
+ * @sep: SEP being signalled
+ *
+ * This function raises interrupt to SEP that signals that is has a new
+ * command from the host
+ *
+ * Note that this function does fall under the ioctl lock
+ */
+static int sep_send_command_handler(struct sep_device *sep)
+{
+ unsigned long lck_flags;
+ int error = 0;
+
+ dev_dbg(&sep->pdev->dev, "sep_send_command_handler start\n");
+
+ if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
+ error = -EPROTO;
+ goto end_function;
+ }
+ sep_set_time(sep);
+
+ sep_set_current_caller_id(sep);
+
+ sep_dump_message(sep);
+
+ /* Update counter */
+ spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
+ sep->send_ct++;
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
+
+ dev_dbg(&sep->pdev->dev,
+ "sep_send_command_handler send_ct %lx reply_ct %lx\n",
+ sep->send_ct, sep->reply_ct);
+
+ /* Send interrupt to SEP */
+ sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "sep_send_command_handler end\n");
+ return error;
+}
+
+/**
+ * sep_allocate_data_pool_memory_handler -allocate pool memory
+ * @sep: pointer to struct sep_device
+ * @arg: pointer to struct alloc_struct
+ *
+ * This function handles the allocate data pool memory request
+ * This function returns calculates the bus address of the
+ * allocated memory, and the offset of this area from the mapped address.
+ * Therefore, the FVOs in user space can calculate the exact virtual
+ * address of this allocated memory
+ */
+static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
+ unsigned long arg)
+{
+ int error = 0;
+ struct alloc_struct command_args;
+
+ /* Holds the allocated buffer address in the system memory pool */
+ u32 *token_addr;
+
+ dev_dbg(&sep->pdev->dev,
+ "sep_allocate_data_pool_memory_handler start\n");
+
+ if (copy_from_user(&command_args, (void __user *)arg,
+ sizeof(struct alloc_struct))) {
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ /* Allocate memory */
+ if ((sep->data_pool_bytes_allocated + command_args.num_bytes) >
+ SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev,
+ "bytes_allocated: %x\n", (int)sep->data_pool_bytes_allocated);
+ dev_dbg(&sep->pdev->dev,
+ "offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES);
+ /* Set the virtual and bus address */
+ command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
+ sep->data_pool_bytes_allocated;
+
+ dev_dbg(&sep->pdev->dev,
+ "command_args.offset: %x\n", command_args.offset);
+
+ /* Place in the shared area that is known by the SEP */
+ token_addr = (u32 *)(sep->shared_addr +
+ SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES +
+ (sep->num_of_data_allocations)*2*sizeof(u32));
+
+ dev_dbg(&sep->pdev->dev, "allocation offset: %x\n",
+ SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES);
+ dev_dbg(&sep->pdev->dev, "data pool token addr is %p\n", token_addr);
+
+ token_addr[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN;
+ token_addr[1] = (u32)sep->shared_bus +
+ SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
+ sep->data_pool_bytes_allocated;
+
+ dev_dbg(&sep->pdev->dev, "data pool token [0] %x\n", token_addr[0]);
+ dev_dbg(&sep->pdev->dev, "data pool token [1] %x\n", token_addr[1]);
+
+ /* Write the memory back to the user space */
+ error = copy_to_user((void *)arg, (void *)&command_args,
+ sizeof(struct alloc_struct));
+ if (error) {
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ /* Update the allocation */
+ sep->data_pool_bytes_allocated += command_args.num_bytes;
+ sep->num_of_data_allocations += 1;
+
+ dev_dbg(&sep->pdev->dev, "data_allocations %d\n",
+ sep->num_of_data_allocations);
+ dev_dbg(&sep->pdev->dev, "bytes allocated %d\n",
+ (int)sep->data_pool_bytes_allocated);
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "sep_allocate_data_pool_memory_handler end\n");
+ return error;
+}
+
+/**
+ * sep_lock_kernel_pages - map kernel pages for DMA
+ * @sep: pointer to struct sep_device
+ * @kernel_virt_addr: address of data buffer in kernel
+ * @data_size: size of data
+ * @lli_array_ptr: lli array
+ * @in_out_flag: input into device or output from device
+ *
+ * This function locks all the physical pages of the kernel virtual buffer
+ * and construct a basic lli array, where each entry holds the physical
+ * page address and the size that application data holds in this page
+ * This function is used only during kernel crypto mod calls from within
+ * the kernel (when ioctl is not used)
+ */
+static int sep_lock_kernel_pages(struct sep_device *sep,
+ unsigned long kernel_virt_addr,
+ u32 data_size,
+ struct sep_lli_entry **lli_array_ptr,
+ int in_out_flag)
+
+{
+ int error = 0;
+ /* Array of lli */
+ struct sep_lli_entry *lli_array;
+ /* Map array */
+ struct sep_dma_map *map_array;
+
+ dev_dbg(&sep->pdev->dev, "sep_lock_kernel_pages start\n");
+ dev_dbg(&sep->pdev->dev, "kernel_virt_addr is %08lx\n",
+ (unsigned long)kernel_virt_addr);
+ dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
+
+ lli_array = kmalloc(sizeof(struct sep_lli_entry), GFP_ATOMIC);
+ if (!lli_array) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+ map_array = kmalloc(sizeof(struct sep_dma_map), GFP_ATOMIC);
+ if (!map_array) {
+ error = -ENOMEM;
+ goto end_function_with_error;
+ }
+
+ map_array[0].dma_addr =
+ dma_map_single(&sep->pdev->dev, (void *)kernel_virt_addr,
+ data_size, DMA_BIDIRECTIONAL);
+ map_array[0].size = data_size;
+
+
+ /*
+ * Set the start address of the first page - app data may start not at
+ * the beginning of the page
+ */
+ lli_array[0].bus_address = (u32)map_array[0].dma_addr;
+ lli_array[0].block_size = map_array[0].size;
+
+ dev_dbg(&sep->pdev->dev,
+ "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
+ (unsigned long)lli_array[0].bus_address,
+ lli_array[0].block_size);
+
+ /* Set the output parameters */
+ if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+ *lli_array_ptr = lli_array;
+ sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 1;
+ sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
+ sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
+ sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1;
+ } else {
+ *lli_array_ptr = lli_array;
+ sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = 1;
+ sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
+ sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
+ sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1;
+ }
+ goto end_function;
+
+end_function_with_error:
+ kfree(lli_array);
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "sep_lock_kernel_pages end\n");
+ return error;
+}
+
+/**
+ * sep_lock_user_pages - lock and map user pages for DMA
+ * @sep: pointer to struct sep_device
+ * @app_virt_addr: user memory data buffer
+ * @data_size: size of data buffer
+ * @lli_array_ptr: lli array
+ * @in_out_flag: input or output to device
+ *
+ * This function locks all the physical pages of the application
+ * virtual buffer and construct a basic lli array, where each entry
+ * holds the physical page address and the size that application
+ * data holds in this physical pages
+ */
+static int sep_lock_user_pages(struct sep_device *sep,
+ u32 app_virt_addr,
+ u32 data_size,
+ struct sep_lli_entry **lli_array_ptr,
+ int in_out_flag)
+
+{
+ int error = 0;
+ u32 count;
+ int result;
+ /* The the page of the end address of the user space buffer */
+ u32 end_page;
+ /* The page of the start address of the user space buffer */
+ u32 start_page;
+ /* The range in pages */
+ u32 num_pages;
+ /* Array of pointers to page */
+ struct page **page_array;
+ /* Array of lli */
+ struct sep_lli_entry *lli_array;
+ /* Map array */
+ struct sep_dma_map *map_array;
+ /* Direction of the DMA mapping for locked pages */
+ enum dma_data_direction dir;
+
+ dev_dbg(&sep->pdev->dev, "sep_lock_user_pages start\n");
+
+ /* Set start and end pages and num pages */
+ end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
+ start_page = app_virt_addr >> PAGE_SHIFT;
+ num_pages = end_page - start_page + 1;
+
+ dev_dbg(&sep->pdev->dev, "app_virt_addr is %x\n", app_virt_addr);
+ dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
+ dev_dbg(&sep->pdev->dev, "start_page is %x\n", start_page);
+ dev_dbg(&sep->pdev->dev, "end_page is %x\n", end_page);
+ dev_dbg(&sep->pdev->dev, "num_pages is %x\n", num_pages);
+
+ dev_dbg(&sep->pdev->dev, "starting page_array malloc\n");
+
+ /* Allocate array of pages structure pointers */
+ page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
+ if (!page_array) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+ map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
+ if (!map_array) {
+ dev_warn(&sep->pdev->dev, "kmalloc for map_array failed\n");
+ error = -ENOMEM;
+ goto end_function_with_error1;
+ }
+
+ lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
+ GFP_ATOMIC);
+
+ if (!lli_array) {
+ dev_warn(&sep->pdev->dev, "kmalloc for lli_array failed\n");
+ error = -ENOMEM;
+ goto end_function_with_error2;
+ }
+
+ dev_dbg(&sep->pdev->dev, "starting get_user_pages\n");
+
+ /* Convert the application virtual address into a set of physical */
+ down_read(&current->mm->mmap_sem);
+ result = get_user_pages(current, current->mm, app_virt_addr,
+ num_pages,
+ ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
+ 0, page_array, NULL);
+
+ up_read(&current->mm->mmap_sem);
+
+ /* Check the number of pages locked - if not all then exit with error */
+ if (result != num_pages) {
+ dev_warn(&sep->pdev->dev,
+ "not all pages locked by get_user_pages\n");
+ error = -ENOMEM;
+ goto end_function_with_error3;
+ }
+
+ dev_dbg(&sep->pdev->dev, "get_user_pages succeeded\n");
+
+ /* Set direction */
+ if (in_out_flag == SEP_DRIVER_IN_FLAG)
+ dir = DMA_TO_DEVICE;
+ else
+ dir = DMA_FROM_DEVICE;
+
+ /*
+ * Fill the array using page array data and
+ * map the pages - this action will also flush the cache as needed
+ */
+ for (count = 0; count < num_pages; count++) {
+ /* Fill the map array */
+ map_array[count].dma_addr =
+ dma_map_page(&sep->pdev->dev, page_array[count],
+ 0, PAGE_SIZE, /*dir*/DMA_BIDIRECTIONAL);
+
+ map_array[count].size = PAGE_SIZE;
+
+ /* Fill the lli array entry */
+ lli_array[count].bus_address = (u32)map_array[count].dma_addr;
+ lli_array[count].block_size = PAGE_SIZE;
+
+ dev_warn(&sep->pdev->dev, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
+ count, (unsigned long)lli_array[count].bus_address,
+ count, lli_array[count].block_size);
+ }
+
+ /* Check the offset for the first page */
+ lli_array[0].bus_address =
+ lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
+
+ /* Check that not all the data is in the first page only */
+ if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
+ lli_array[0].block_size = data_size;
+ else
+ lli_array[0].block_size =
+ PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
+
+ dev_dbg(&sep->pdev->dev,
+ "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
+ (unsigned long)lli_array[count].bus_address,
+ lli_array[count].block_size);
+
+ /* Check the size of the last page */
+ if (num_pages > 1) {
+ lli_array[num_pages - 1].block_size =
+ (app_virt_addr + data_size) & (~PAGE_MASK);
+
+ dev_warn(&sep->pdev->dev,
+ "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
+ num_pages - 1,
+ (unsigned long)lli_array[count].bus_address,
+ num_pages - 1,
+ lli_array[count].block_size);
+ }
+
+ /* Set output params acording to the in_out flag */
+ if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+ *lli_array_ptr = lli_array;
+ sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = num_pages;
+ sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = page_array;
+ sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
+ sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries =
+ num_pages;
+ } else {
+ *lli_array_ptr = lli_array;
+ sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = num_pages;
+ sep->dma_res_arr[sep->nr_dcb_creat].out_page_array =
+ page_array;
+ sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
+ sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries =
+ num_pages;
+ }
+ goto end_function;
+
+end_function_with_error3:
+ /* Free lli array */
+ kfree(lli_array);
+
+end_function_with_error2:
+ kfree(map_array);
+
+end_function_with_error1:
+ /* Free page array */
+ kfree(page_array);
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "sep_lock_user_pages end\n");
+ return error;
+}
+
+/**
+ * u32 sep_calculate_lli_table_max_size - size the LLI table
+ * @sep: pointer to struct sep_device
+ * @lli_in_array_ptr
+ * @num_array_entries
+ * @last_table_flag
+ *
+ * This function calculates the size of data that can be inserted into
+ * the lli table from this array, such that either the table is full
+ * (all entries are entered), or there are no more entries in the
+ * lli array
+ */
+static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
+ struct sep_lli_entry *lli_in_array_ptr,
+ u32 num_array_entries,
+ u32 *last_table_flag)
+{
+ u32 counter;
+ /* Table data size */
+ u32 table_data_size = 0;
+ /* Data size for the next table */
+ u32 next_table_data_size;
+
+ *last_table_flag = 0;
+
+ /*
+ * Calculate the data in the out lli table till we fill the whole
+ * table or till the data has ended
+ */
+ for (counter = 0;
+ (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
+ (counter < num_array_entries); counter++)
+ table_data_size += lli_in_array_ptr[counter].block_size;
+
+ /*
+ * Check if we reached the last entry,
+ * meaning this ia the last table to build,
+ * and no need to check the block alignment
+ */
+ if (counter == num_array_entries) {
+ /* Set the last table flag */
+ *last_table_flag = 1;
+ goto end_function;
+ }
+
+ /*
+ * Calculate the data size of the next table.
+ * Stop if no entries left or if data size is more the DMA restriction
+ */
+ next_table_data_size = 0;
+ for (; counter < num_array_entries; counter++) {
+ next_table_data_size += lli_in_array_ptr[counter].block_size;
+ if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
+ break;
+ }
+
+ /*
+ * Check if the next table data size is less then DMA rstriction.
+ * if it is - recalculate the current table size, so that the next
+ * table data size will be adaquete for DMA
+ */
+ if (next_table_data_size &&
+ next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
+
+ table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
+ next_table_data_size);
+
+ dev_dbg(&sep->pdev->dev, "table data size is %x\n",
+ table_data_size);
+end_function:
+ return table_data_size;
+}
+
+/**
+ * sep_build_lli_table - build an lli array for the given table
+ * @sep: pointer to struct sep_device
+ * @lli_array_ptr: pointer to lli array
+ * @lli_table_ptr: pointer to lli table
+ * @num_processed_entries_ptr: pointer to number of entries
+ * @num_table_entries_ptr: pointer to number of tables
+ * @table_data_size: total data size
+ *
+ * Builds ant lli table from the lli_array according to
+ * the given size of data
+ */
+static void sep_build_lli_table(struct sep_device *sep,
+ struct sep_lli_entry *lli_array_ptr,
+ struct sep_lli_entry *lli_table_ptr,
+ u32 *num_processed_entries_ptr,
+ u32 *num_table_entries_ptr,
+ u32 table_data_size)
+{
+ /* Current table data size */
+ u32 curr_table_data_size;
+ /* Counter of lli array entry */
+ u32 array_counter;
+
+ dev_dbg(&sep->pdev->dev, "sep_build_lli_table start\n");
+
+ /* Init currrent table data size and lli array entry counter */
+ curr_table_data_size = 0;
+ array_counter = 0;
+ *num_table_entries_ptr = 1;
+
+ dev_dbg(&sep->pdev->dev, "table_data_size is %x\n", table_data_size);
+
+ /* Fill the table till table size reaches the needed amount */
+ while (curr_table_data_size < table_data_size) {
+ /* Update the number of entries in table */
+ (*num_table_entries_ptr)++;
+
+ lli_table_ptr->bus_address =
+ cpu_to_le32(lli_array_ptr[array_counter].bus_address);
+
+ lli_table_ptr->block_size =
+ cpu_to_le32(lli_array_ptr[array_counter].block_size);
+
+ curr_table_data_size += lli_array_ptr[array_counter].block_size;
+
+ dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n",
+ lli_table_ptr);
+ dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
+ (unsigned long)lli_table_ptr->bus_address);
+ dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
+ lli_table_ptr->block_size);
+
+ /* Check for overflow of the table data */
+ if (curr_table_data_size > table_data_size) {
+ dev_dbg(&sep->pdev->dev,
+ "curr_table_data_size too large\n");
+
+ /* Update the size of block in the table */
+ lli_table_ptr->block_size -=
+ cpu_to_le32((curr_table_data_size - table_data_size));
+
+ /* Update the physical address in the lli array */
+ lli_array_ptr[array_counter].bus_address +=
+ cpu_to_le32(lli_table_ptr->block_size);
+
+ /* Update the block size left in the lli array */
+ lli_array_ptr[array_counter].block_size =
+ (curr_table_data_size - table_data_size);
+ } else
+ /* Advance to the next entry in the lli_array */
+ array_counter++;
+
+ dev_dbg(&sep->pdev->dev,
+ "lli_table_ptr->bus_address is %08lx\n",
+ (unsigned long)lli_table_ptr->bus_address);
+ dev_dbg(&sep->pdev->dev,
+ "lli_table_ptr->block_size is %x\n",
+ lli_table_ptr->block_size);
+
+ /* Move to the next entry in table */
+ lli_table_ptr++;
+ }
+
+ /* Set the info entry to default */
+ lli_table_ptr->bus_address = 0xffffffff;
+ lli_table_ptr->block_size = 0;
+
+ dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n", lli_table_ptr);
+ dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
+ (unsigned long)lli_table_ptr->bus_address);
+ dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
+ lli_table_ptr->block_size);
+
+ /* Set the output parameter */
+ *num_processed_entries_ptr += array_counter;
+
+ dev_dbg(&sep->pdev->dev, "num_processed_entries_ptr is %x\n",
+ *num_processed_entries_ptr);
+
+ dev_dbg(&sep->pdev->dev, "sep_build_lli_table end\n");
+}
+
+/**
+ * sep_shared_area_virt_to_bus - map shared area to bus address
+ * @sep: pointer to struct sep_device
+ * @virt_address: virtual address to convert
+ *
+ * This functions returns the physical address inside shared area according
+ * to the virtual address. It can be either on the externa RAM device
+ * (ioremapped), or on the system RAM
+ * This implementation is for the external RAM
+ */
+static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
+ void *virt_address)
+{
+ dev_dbg(&sep->pdev->dev, "sh virt to phys v %p\n", virt_address);
+ dev_dbg(&sep->pdev->dev, "sh virt to phys p %08lx\n",
+ (unsigned long)
+ sep->shared_bus + (virt_address - sep->shared_addr));
+
+ return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
+}
+
+/**
+ * sep_shared_area_bus_to_virt - map shared area bus address to kernel
+ * @sep: pointer to struct sep_device
+ * @bus_address: bus address to convert
+ *
+ * This functions returns the virtual address inside shared area
+ * according to the physical address. It can be either on the
+ * externa RAM device (ioremapped), or on the system RAM
+ * This implementation is for the external RAM
+ */
+static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
+ dma_addr_t bus_address)
+{
+ dev_dbg(&sep->pdev->dev, "shared bus to virt b=%lx v=%lx\n",
+ (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
+ (size_t)(bus_address - sep->shared_bus)));
+
+ return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
+}
+
+/**
+ * sep_debug_print_lli_tables - dump LLI table
+ * @sep: pointer to struct sep_device
+ * @lli_table_ptr: pointer to sep_lli_entry
+ * @num_table_entries: number of entries
+ * @table_data_size: total data size
+ *
+ * Walk the the list of the print created tables and print all the data
+ */
+static void sep_debug_print_lli_tables(struct sep_device *sep,
+ struct sep_lli_entry *lli_table_ptr,
+ unsigned long num_table_entries,
+ unsigned long table_data_size)
+{
+ unsigned long table_count = 1;
+ unsigned long entries_count = 0;
+
+ dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables start\n");
+
+ while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
+ dev_dbg(&sep->pdev->dev,
+ "lli table %08lx, table_data_size is %lu\n",
+ table_count, table_data_size);
+ dev_dbg(&sep->pdev->dev, "num_table_entries is %lu\n",
+ num_table_entries);
+
+ /* Print entries of the table (without info entry) */
+ for (entries_count = 0; entries_count < num_table_entries;
+ entries_count++, lli_table_ptr++) {
+
+ dev_dbg(&sep->pdev->dev,
+ "lli_table_ptr address is %08lx\n",
+ (unsigned long) lli_table_ptr);
+
+ dev_dbg(&sep->pdev->dev,
+ "phys address is %08lx block size is %x\n",
+ (unsigned long)lli_table_ptr->bus_address,
+ lli_table_ptr->block_size);
+ }
+ /* Point to the info entry */
+ lli_table_ptr--;
+
+ dev_dbg(&sep->pdev->dev,
+ "phys lli_table_ptr->block_size is %x\n",
+ lli_table_ptr->block_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "phys lli_table_ptr->physical_address is %08lu\n",
+ (unsigned long)lli_table_ptr->bus_address);
+
+
+ table_data_size = lli_table_ptr->block_size & 0xffffff;
+ num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
+
+ dev_dbg(&sep->pdev->dev,
+ "phys table_data_size is %lu num_table_entries is"
+ " %lu bus_address is%lu\n", table_data_size,
+ num_table_entries, (unsigned long)lli_table_ptr->bus_address);
+
+ if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
+ lli_table_ptr = (struct sep_lli_entry *)
+ sep_shared_bus_to_virt(sep,
+ (unsigned long)lli_table_ptr->bus_address);
+
+ table_count++;
+ }
+ dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables end\n");
+}
+
+
+/**
+ * sep_prepare_empty_lli_table - create a blank LLI table
+ * @sep: pointer to struct sep_device
+ * @lli_table_addr_ptr: pointer to lli table
+ * @num_entries_ptr: pointer to number of entries
+ * @table_data_size_ptr: point to table data size
+ *
+ * This function creates empty lli tables when there is no data
+ */
+static void sep_prepare_empty_lli_table(struct sep_device *sep,
+ dma_addr_t *lli_table_addr_ptr,
+ u32 *num_entries_ptr,
+ u32 *table_data_size_ptr)
+{
+ struct sep_lli_entry *lli_table_ptr;
+
+ dev_dbg(&sep->pdev->dev, "sep_prepare_empty_lli_table start\n");
+
+ /* Find the area for new table */
+ lli_table_ptr =
+ (struct sep_lli_entry *)(sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+ lli_table_ptr->bus_address = 0;
+ lli_table_ptr->block_size = 0;
+
+ lli_table_ptr++;
+ lli_table_ptr->bus_address = 0xFFFFFFFF;
+ lli_table_ptr->block_size = 0;
+
+ /* Set the output parameter value */
+ *lli_table_addr_ptr = sep->shared_bus +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ sep->num_lli_tables_created *
+ sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* Set the num of entries and table data size for empty table */
+ *num_entries_ptr = 2;
+ *table_data_size_ptr = 0;
+
+ /* Update the number of created tables */
+ sep->num_lli_tables_created++;
+
+ dev_dbg(&sep->pdev->dev, "sep_prepare_empty_lli_table start\n");
+
+}
+
+/**
+ * sep_prepare_input_dma_table - prepare input DMA mappings
+ * @sep: pointer to struct sep_device
+ * @data_size:
+ * @block_size:
+ * @lli_table_ptr:
+ * @num_entries_ptr:
+ * @table_data_size_ptr:
+ * @is_kva: set for kernel data (kernel cryptio call)
+ *
+ * This function prepares only input DMA table for synhronic symmetric
+ * operations (HASH)
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_prepare_input_dma_table(struct sep_device *sep,
+ unsigned long app_virt_addr,
+ u32 data_size,
+ u32 block_size,
+ dma_addr_t *lli_table_ptr,
+ u32 *num_entries_ptr,
+ u32 *table_data_size_ptr,
+ bool is_kva)
+{
+ int error = 0;
+ /* Pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry *info_entry_ptr;
+ /* Array of pointers to page */
+ struct sep_lli_entry *lli_array_ptr;
+ /* Points to the first entry to be processed in the lli_in_array */
+ u32 current_entry = 0;
+ /* Num entries in the virtual buffer */
+ u32 sep_lli_entries = 0;
+ /* Lli table pointer */
+ struct sep_lli_entry *in_lli_table_ptr;
+ /* The total data in one table */
+ u32 table_data_size = 0;
+ /* Flag for last table */
+ u32 last_table_flag = 0;
+ /* Number of entries in lli table */
+ u32 num_entries_in_table = 0;
+ /* Next table address */
+ void *lli_table_alloc_addr = 0;
+
+ dev_dbg(&sep->pdev->dev, "sep_prepare_input_dma_table start\n");
+ dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
+ dev_dbg(&sep->pdev->dev, "block_size is %x\n", block_size);
+
+ /* Initialize the pages pointers */
+ sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
+ sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 0;
+
+ /* Set the kernel address for first table to be allocated */
+ lli_table_alloc_addr = (void *)(sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+ if (data_size == 0) {
+ /* Special case - create meptu table - 2 entries, zero data */
+ sep_prepare_empty_lli_table(sep, lli_table_ptr,
+ num_entries_ptr, table_data_size_ptr);
+ goto update_dcb_counter;
+ }
+
+ /* Check if the pages are in Kernel Virtual Address layout */
+ if (is_kva == true)
+ /* Lock the pages in the kernel */
+ error = sep_lock_kernel_pages(sep, app_virt_addr,
+ data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
+ else
+ /*
+ * Lock the pages of the user buffer
+ * and translate them to pages
+ */
+ error = sep_lock_user_pages(sep, app_virt_addr,
+ data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
+
+ if (error)
+ goto end_function;
+
+ dev_dbg(&sep->pdev->dev, "output sep_in_num_pages is %x\n",
+ sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
+
+ current_entry = 0;
+ info_entry_ptr = NULL;
+
+ sep_lli_entries = sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages;
+
+ /* Loop till all the entries in in array are not processed */
+ while (current_entry < sep_lli_entries) {
+
+ /* Set the new input and output tables */
+ in_lli_table_ptr =
+ (struct sep_lli_entry *)lli_table_alloc_addr;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ if (lli_table_alloc_addr >
+ ((void *)sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
+
+ error = -ENOMEM;
+ goto end_function_error;
+
+ }
+
+ /* Update the number of created tables */
+ sep->num_lli_tables_created++;
+
+ /* Calculate the maximum size of data for input table */
+ table_data_size = sep_calculate_lli_table_max_size(sep,
+ &lli_array_ptr[current_entry],
+ (sep_lli_entries - current_entry),
+ &last_table_flag);
+
+ /*
+ * If this is not the last table -
+ * then allign it to the block size
+ */
+ if (!last_table_flag)
+ table_data_size =
+ (table_data_size / block_size) * block_size;
+
+ dev_dbg(&sep->pdev->dev, "output table_data_size is %x\n",
+ table_data_size);
+
+ /* Construct input lli table */
+ sep_build_lli_table(sep, &lli_array_ptr[current_entry],
+ in_lli_table_ptr,
+ &current_entry, &num_entries_in_table, table_data_size);
+
+ if (info_entry_ptr == NULL) {
+
+ /* Set the output parameters to physical addresses */
+ *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
+ in_lli_table_ptr);
+ *num_entries_ptr = num_entries_in_table;
+ *table_data_size_ptr = table_data_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "output lli_table_in_ptr is %08lx\n",
+ (unsigned long)*lli_table_ptr);
+
+ } else {
+ /* Update the info entry of the previous in table */
+ info_entry_ptr->bus_address =
+ sep_shared_area_virt_to_bus(sep,
+ in_lli_table_ptr);
+ info_entry_ptr->block_size =
+ ((num_entries_in_table) << 24) |
+ (table_data_size);
+ }
+ /* Save the pointer to the info entry of the current tables */
+ info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
+ }
+ /* Print input tables */
+ sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
+ sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
+ *num_entries_ptr, *table_data_size_ptr);
+ /* The array of the pages */
+ kfree(lli_array_ptr);
+
+update_dcb_counter:
+ /* Update DCB counter */
+ sep->nr_dcb_creat++;
+ goto end_function;
+
+end_function_error:
+ /* Free all the allocated resources */
+ kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
+ kfree(lli_array_ptr);
+ kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "sep_prepare_input_dma_table end\n");
+ return error;
+
+}
+/**
+ * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
+ * @sep: pointer to struct sep_device
+ * @lli_in_array:
+ * @sep_in_lli_entries:
+ * @lli_out_array:
+ * @sep_out_lli_entries
+ * @block_size
+ * @lli_table_in_ptr
+ * @lli_table_out_ptr
+ * @in_num_entries_ptr
+ * @out_num_entries_ptr
+ * @table_data_size_ptr
+ *
+ * This function creates the input and output DMA tables for
+ * symmetric operations (AES/DES) according to the block
+ * size from LLI arays
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_construct_dma_tables_from_lli(
+ struct sep_device *sep,
+ struct sep_lli_entry *lli_in_array,
+ u32 sep_in_lli_entries,
+ struct sep_lli_entry *lli_out_array,
+ u32 sep_out_lli_entries,
+ u32 block_size,
+ dma_addr_t *lli_table_in_ptr,
+ dma_addr_t *lli_table_out_ptr,
+ u32 *in_num_entries_ptr,
+ u32 *out_num_entries_ptr,
+ u32 *table_data_size_ptr)
+{
+ /* Points to the area where next lli table can be allocated */
+ void *lli_table_alloc_addr = 0;
+ /* Input lli table */
+ struct sep_lli_entry *in_lli_table_ptr = NULL;
+ /* Output lli table */
+ struct sep_lli_entry *out_lli_table_ptr = NULL;
+ /* Pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry *info_in_entry_ptr = NULL;
+ /* Pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry *info_out_entry_ptr = NULL;
+ /* Points to the first entry to be processed in the lli_in_array */
+ u32 current_in_entry = 0;
+ /* Points to the first entry to be processed in the lli_out_array */
+ u32 current_out_entry = 0;
+ /* Max size of the input table */
+ u32 in_table_data_size = 0;
+ /* Max size of the output table */
+ u32 out_table_data_size = 0;
+ /* Flag te signifies if this is the last tables build */
+ u32 last_table_flag = 0;
+ /* The data size that should be in table */
+ u32 table_data_size = 0;
+ /* Number of etnries in the input table */
+ u32 num_entries_in_table = 0;
+ /* Number of etnries in the output table */
+ u32 num_entries_out_table = 0;
+
+ dev_dbg(&sep->pdev->dev, "sep_construct_dma_tables_from_lli start\n");
+
+ /* Initiate to point after the message area */
+ lli_table_alloc_addr = (void *)(sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ (sep->num_lli_tables_created *
+ (sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
+
+ /* Loop till all the entries in in array are not processed */
+ while (current_in_entry < sep_in_lli_entries) {
+ /* Set the new input and output tables */
+ in_lli_table_ptr =
+ (struct sep_lli_entry *)lli_table_alloc_addr;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* Set the first output tables */
+ out_lli_table_ptr =
+ (struct sep_lli_entry *)lli_table_alloc_addr;
+
+ /* Check if the DMA table area limit was overrun */
+ if ((lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
+ ((void *)sep->shared_addr +
+ SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
+
+ dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
+ return -ENOMEM;
+ }
+
+ /* Update the number of the lli tables created */
+ sep->num_lli_tables_created += 2;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* Calculate the maximum size of data for input table */
+ in_table_data_size =
+ sep_calculate_lli_table_max_size(sep,
+ &lli_in_array[current_in_entry],
+ (sep_in_lli_entries - current_in_entry),
+ &last_table_flag);
+
+ /* Calculate the maximum size of data for output table */
+ out_table_data_size =
+ sep_calculate_lli_table_max_size(sep,
+ &lli_out_array[current_out_entry],
+ (sep_out_lli_entries - current_out_entry),
+ &last_table_flag);
+
+ dev_dbg(&sep->pdev->dev,
+ "in_table_data_size is %x\n",
+ in_table_data_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "out_table_data_size is %x\n",
+ out_table_data_size);
+
+ table_data_size = in_table_data_size;
+
+ if (!last_table_flag) {
+ /*
+ * If this is not the last table,
+ * then must check where the data is smallest
+ * and then align it to the block size
+ */
+ if (table_data_size > out_table_data_size)
+ table_data_size = out_table_data_size;
+
+ /*
+ * Now calculate the table size so that
+ * it will be module block size
+ */
+ table_data_size = (table_data_size / block_size) *
+ block_size;
+ }
+
+ dev_dbg(&sep->pdev->dev, "table_data_size is %x\n",
+ table_data_size);
+
+ /* Construct input lli table */
+ sep_build_lli_table(sep, &lli_in_array[current_in_entry],
+ in_lli_table_ptr,
+ &current_in_entry,
+ &num_entries_in_table,
+ table_data_size);
+
+ /* Construct output lli table */
+ sep_build_lli_table(sep, &lli_out_array[current_out_entry],
+ out_lli_table_ptr,
+ &current_out_entry,
+ &num_entries_out_table,
+ table_data_size);
+
+ /* If info entry is null - this is the first table built */
+ if (info_in_entry_ptr == NULL) {
+ /* Set the output parameters to physical addresses */
+ *lli_table_in_ptr =
+ sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
+
+ *in_num_entries_ptr = num_entries_in_table;
+
+ *lli_table_out_ptr =
+ sep_shared_area_virt_to_bus(sep,
+ out_lli_table_ptr);
+
+ *out_num_entries_ptr = num_entries_out_table;
+ *table_data_size_ptr = table_data_size;
+
+ dev_dbg(&sep->pdev->dev,
+ "output lli_table_in_ptr is %08lx\n",
+ (unsigned long)*lli_table_in_ptr);
+ dev_dbg(&sep->pdev->dev,
+ "output lli_table_out_ptr is %08lx\n",
+ (unsigned long)*lli_table_out_ptr);
+ } else {
+ /* Update the info entry of the previous in table */
+ info_in_entry_ptr->bus_address =
+ sep_shared_area_virt_to_bus(sep,
+ in_lli_table_ptr);
+
+ info_in_entry_ptr->block_size =
+ ((num_entries_in_table) << 24) |
+ (table_data_size);
+
+ /* Update the info entry of the previous in table */
+ info_out_entry_ptr->bus_address =
+ sep_shared_area_virt_to_bus(sep,
+ out_lli_table_ptr);
+
+ info_out_entry_ptr->block_size =
+ ((num_entries_out_table) << 24) |
+ (table_data_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "output lli_table_in_ptr:%08lx %08x\n",
+ (unsigned long)info_in_entry_ptr->bus_address,
+ info_in_entry_ptr->block_size);
+
+ dev_dbg(&sep->pdev->dev,
+ "output lli_table_out_ptr:%08lx %08x\n",
+ (unsigned long)info_out_entry_ptr->bus_address,
+ info_out_entry_ptr->block_size);
+ }
+
+ /* Save the pointer to the info entry of the current tables */
+ info_in_entry_ptr = in_lli_table_ptr +
+ num_entries_in_table - 1;
+ info_out_entry_ptr = out_lli_table_ptr +
+ num_entries_out_table - 1;
+
+ dev_dbg(&sep->pdev->dev,
+ "output num_entries_out_table is %x\n",
+ (u32)num_entries_out_table);
+ dev_dbg(&sep->pdev->dev,
+ "output info_in_entry_ptr is %lx\n",
+ (unsigned long)info_in_entry_ptr);
+ dev_dbg(&sep->pdev->dev,
+ "output info_out_entry_ptr is %lx\n",
+ (unsigned long)info_out_entry_ptr);
+ }
+
+ /* Print input tables */
+ sep_debug_print_lli_tables(sep,
+ (struct sep_lli_entry *)
+ sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
+ *in_num_entries_ptr,
+ *table_data_size_ptr);
+
+ /* Print output tables */
+ sep_debug_print_lli_tables(sep,
+ (struct sep_lli_entry *)
+ sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
+ *out_num_entries_ptr,
+ *table_data_size_ptr);
+
+ dev_dbg(&sep->pdev->dev, "sep_construct_dma_tables_from_lli end\n");
+ return 0;
+}
+
+/**
+ * sep_prepare_input_output_dma_table - prepare DMA I/O table
+ * @app_virt_in_addr:
+ * @app_virt_out_addr:
+ * @data_size:
+ * @block_size:
+ * @lli_table_in_ptr:
+ * @lli_table_out_ptr:
+ * @in_num_entries_ptr:
+ * @out_num_entries_ptr:
+ * @table_data_size_ptr:
+ * @is_kva: set for kernel data; used only for kernel crypto module
+ *
+ * This function builds input and output DMA tables for synhronic
+ * symmetric operations (AES, DES, HASH). It also checks that each table
+ * is of the modular block size
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_prepare_input_output_dma_table(struct sep_device *sep,
+ unsigned long app_virt_in_addr,
+ unsigned long app_virt_out_addr,
+ u32 data_size,
+ u32 block_size,
+ dma_addr_t *lli_table_in_ptr,
+ dma_addr_t *lli_table_out_ptr,
+ u32 *in_num_entries_ptr,
+ u32 *out_num_entries_ptr,
+ u32 *table_data_size_ptr,
+ bool is_kva)
+
+{
+ int error = 0;
+ /* Array of pointers of page */
+ struct sep_lli_entry *lli_in_array;
+ /* Array of pointers of page */
+ struct sep_lli_entry *lli_out_array;
+
+ dev_dbg(&sep->pdev->dev, "sep_prepare_input_output_dma_table start\n");
+
+ if (data_size == 0) {
+ /* Prepare empty table for input and output */
+ sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
+ in_num_entries_ptr, table_data_size_ptr);
+
+ sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
+ out_num_entries_ptr, table_data_size_ptr);
+
+ goto update_dcb_counter;
+ }
+
+ /* Initialize the pages pointers */
+ sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
+ sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
+
+ /* Lock the pages of the buffer and translate them to pages */
+ if (is_kva == true) {
+ error = sep_lock_kernel_pages(sep, app_virt_in_addr,
+ data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "lock kernel for in failed\n");
+ goto end_function;
+ }
+
+ error = sep_lock_kernel_pages(sep, app_virt_out_addr,
+ data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "lock kernel for out failed\n");
+ goto end_function;
+ }
+ }
+
+ else {
+ error = sep_lock_user_pages(sep, app_virt_in_addr,
+ data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "sep_lock_user_pages for input virtual buffer failed\n");
+ goto end_function;
+ }
+
+ error = sep_lock_user_pages(sep, app_virt_out_addr,
+ data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "sep_lock_user_pages for output virtual buffer failed\n");
+ goto end_function_free_lli_in;
+ }
+ }
+
+ dev_dbg(&sep->pdev->dev, "sep_in_num_pages is %x\n",
+ sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
+ dev_dbg(&sep->pdev->dev, "sep_out_num_pages is %x\n",
+ sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages);
+ dev_dbg(&sep->pdev->dev, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+ /* Call the fucntion that creates table from the lli arrays */
+ error = sep_construct_dma_tables_from_lli(sep, lli_in_array,
+ sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages,
+ lli_out_array,
+ sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages,
+ block_size, lli_table_in_ptr, lli_table_out_ptr,
+ in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "sep_construct_dma_tables_from_lli failed\n");
+ goto end_function_with_error;
+ }
+
+ kfree(lli_out_array);
+ kfree(lli_in_array);
+
+update_dcb_counter:
+ /* Update DCB counter */
+ sep->nr_dcb_creat++;
+ /* Fall through - free the lli entry arrays */
+ dev_dbg(&sep->pdev->dev, "in_num_entries_ptr is %08x\n",
+ *in_num_entries_ptr);
+ dev_dbg(&sep->pdev->dev, "out_num_entries_ptr is %08x\n",
+ *out_num_entries_ptr);
+ dev_dbg(&sep->pdev->dev, "table_data_size_ptr is %08x\n",
+ *table_data_size_ptr);
+
+ goto end_function;
+
+end_function_with_error:
+ kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_map_array);
+ kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_page_array);
+ kfree(lli_out_array);
+
+
+end_function_free_lli_in:
+ kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
+ kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
+ kfree(lli_in_array);
+
+end_function:
+ dev_dbg(&sep->pdev->dev,
+ "sep_prepare_input_output_dma_table end result = %d\n", error);
+
+ return error;
+
+}
+
+/**
+ * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
+ * @app_in_address: unsigned long; for data buffer in (user space)
+ * @app_out_address: unsigned long; for data buffer out (user space)
+ * @data_in_size: u32; for size of data
+ * @block_size: u32; for block size
+ * @tail_block_size: u32; for size of tail block
+ * @isapplet: bool; to indicate external app
+ * @is_kva: bool; kernel buffer; only used for kernel crypto module
+ *
+ * This function prepares the linked DMA tables and puts the
+ * address for the linked list of tables inta a DCB (data control
+ * block) the address of which is known by the SEP hardware
+ * Note that all bus addresses that are passed to the SEP
+ * are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
+ unsigned long app_in_address,
+ unsigned long app_out_address,
+ u32 data_in_size,
+ u32 block_size,
+ u32 tail_block_size,
+ bool isapplet,
+ bool is_kva)
+{
+ int error = 0;
+ /* Size of tail */
+ u32 tail_size = 0;
+ /* Address of the created DCB table */
+ struct sep_dcblock *dcb_table_ptr = NULL;
+ /* The physical address of the first input DMA table */
+ dma_addr_t in_first_mlli_address = 0;
+ /* Number of entries in the first input DMA table */
+ u32 in_first_num_entries = 0;
+ /* The physical address of the first output DMA table */
+ dma_addr_t out_first_mlli_address = 0;
+ /* Number of entries in the first output DMA table */
+ u32 out_first_num_entries = 0;
+ /* Data in the first input/output table */
+ u32 first_data_size = 0;
+
+ dev_dbg(&sep->pdev->dev, "prepare_input_output_dma_table_in_dcb start\n");
+
+ if (sep->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
+ /* No more DCBs to allocate */
+ dev_warn(&sep->pdev->dev, "no more DCBs available\n");
+ error = -ENOSPC;
+ goto end_function;
+ }
+
+ /* Allocate new DCB */
+ dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
+ SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
+ (sep->nr_dcb_creat * sizeof(struct sep_dcblock)));
+
+ /* Set the default values in the DCB */
+ dcb_table_ptr->input_mlli_address = 0;
+ dcb_table_ptr->input_mlli_num_entries = 0;
+ dcb_table_ptr->input_mlli_data_size = 0;
+ dcb_table_ptr->output_mlli_address = 0;
+ dcb_table_ptr->output_mlli_num_entries = 0;
+ dcb_table_ptr->output_mlli_data_size = 0;
+ dcb_table_ptr->tail_data_size = 0;
+ dcb_table_ptr->out_vr_tail_pt = 0;
+
+ if (isapplet == true) {
+ tail_size = data_in_size % block_size;
+ if (tail_size) {
+ if (data_in_size < tail_block_size) {
+ dev_warn(&sep->pdev->dev, "data in size smaller than tail block size\n");
+ error = -ENOSPC;
+ goto end_function;
+ }
+ if (tail_block_size)
+ /*
+ * Case the tail size should be
+ * bigger than the real block size
+ */
+ tail_size = tail_block_size +
+ ((data_in_size -
+ tail_block_size) % block_size);
+ }
+
+ /* Check if there is enough data for DMA operation */
+ if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
+ if (is_kva == true) {
+ memcpy(dcb_table_ptr->tail_data,
+ (void *)app_in_address, data_in_size);
+ } else {
+ if (copy_from_user(dcb_table_ptr->tail_data,
+ (void __user *)app_in_address,
+ data_in_size)) {
+ error = -EFAULT;
+ goto end_function;
+ }
+ }
+
+ dcb_table_ptr->tail_data_size = data_in_size;
+
+ /* Set the output user-space address for mem2mem op */
+ if (app_out_address)
+ dcb_table_ptr->out_vr_tail_pt =
+ (u32)app_out_address;
+
+ /*
+ * Update both data length parameters in order to avoid
+ * second data copy and allow building of empty mlli
+ * tables
+ */
+ tail_size = 0x0;
+ data_in_size = 0x0;
+ }
+ if (tail_size) {
+ if (is_kva == true) {
+ memcpy(dcb_table_ptr->tail_data,
+ (void *)(app_in_address + data_in_size -
+ tail_size), tail_size);
+ } else {
+ /* We have tail data - copy it to DCB */
+ if (copy_from_user(dcb_table_ptr->tail_data,
+ (void *)(app_in_address +
+ data_in_size - tail_size), tail_size)) {
+ error = -EFAULT;
+ goto end_function;
+ }
+ }
+ if (app_out_address)
+ /*
+ * Calculate the output address
+ * according to tail data size
+ */
+ dcb_table_ptr->out_vr_tail_pt =
+ (u32)app_out_address + data_in_size
+ - tail_size;
+
+ /* Save the real tail data size */
+ dcb_table_ptr->tail_data_size = tail_size;
+ /*
+ * Update the data size without the tail
+ * data size AKA data for the dma
+ */
+ data_in_size = (data_in_size - tail_size);
+ }
+ }
+ /* Check if we need to build only input table or input/output */
+ if (app_out_address) {
+ /* Prepare input/output tables */
+ error = sep_prepare_input_output_dma_table(sep,
+ app_in_address,
+ app_out_address,
+ data_in_size,
+ block_size,
+ &in_first_mlli_address,
+ &out_first_mlli_address,
+ &in_first_num_entries,
+ &out_first_num_entries,
+ &first_data_size,
+ is_kva);
+ } else {
+ /* Prepare input tables */
+ error = sep_prepare_input_dma_table(sep,
+ app_in_address,
+ data_in_size,
+ block_size,
+ &in_first_mlli_address,
+ &in_first_num_entries,
+ &first_data_size,
+ is_kva);
+ }
+
+ if (error) {
+ dev_warn(&sep->pdev->dev, "prepare DMA table call failed from prepare DCB call\n");
+ goto end_function;
+ }
+
+ /* Set the DCB values */
+ dcb_table_ptr->input_mlli_address = in_first_mlli_address;
+ dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
+ dcb_table_ptr->input_mlli_data_size = first_data_size;
+ dcb_table_ptr->output_mlli_address = out_first_mlli_address;
+ dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
+ dcb_table_ptr->output_mlli_data_size = first_data_size;
+
+end_function:
+ dev_dbg(&sep->pdev->dev,
+ "sep_prepare_input_output_dma_table_in_dcb end\n");
+ return error;
+
+}
+
+
+/**
+ * sep_create_sync_dma_tables_handler - create sync DMA tables
+ * @sep: pointer to struct sep_device
+ * @arg: pointer to struct bld_syn_tab_struct
+ *
+ * Handle the request for creation of the DMA tables for the synchronic
+ * symmetric operations (AES,DES). Note that all bus addresses that are
+ * passed to the SEP are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
+ unsigned long arg)
+{
+ int error = 0;
+
+ /* Command arguments */
+ struct bld_syn_tab_struct command_args;
+
+ dev_dbg(&sep->pdev->dev,
+ "sep_create_sync_dma_tables_handler start\n");
+
+ if (copy_from_user(&command_args, (void __user *)arg,
+ sizeof(struct bld_syn_tab_struct))) {
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev, "app_in_address is %08llx\n",
+ command_args.app_in_address);
+ dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
+ command_args.app_out_address);
+ dev_dbg(&sep->pdev->dev, "data_size is %u\n",
+ command_args.data_in_size);
+ dev_dbg(&sep->pdev->dev, "block_size is %u\n",
+ command_args.block_size);
+
+ /* Validate user parameters */
+ if (!command_args.app_in_address) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ error = sep_prepare_input_output_dma_table_in_dcb(sep,
+ (unsigned long)command_args.app_in_address,
+ (unsigned long)command_args.app_out_address,
+ command_args.data_in_size,
+ command_args.block_size,
+ 0x0,
+ false,
+ false);
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "sep_create_sync_dma_tables_handler end\n");
+ return error;
+}
+
+/**
+ * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
+ * @sep: pointer to struct sep_device
+ * @isapplet: indicates external application (used for kernel access)
+ * @is_kva: indicates kernel addresses (only used for kernel crypto)
+ *
+ * This function frees the DMA tables and DCB
+ */
+static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
+ bool is_kva)
+{
+ int i = 0;
+ int error = 0;
+ int error_temp = 0;
+ struct sep_dcblock *dcb_table_ptr;
+ unsigned long pt_hold;
+ void *tail_pt;
+
+ dev_dbg(&sep->pdev->dev, "sep_free_dma_tables_and_dcb start\n");
+
+ if (isapplet == true) {
+ /* Set pointer to first DCB table */
+ dcb_table_ptr = (struct sep_dcblock *)
+ (sep->shared_addr +
+ SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
+
+ /* Go over each DCB and see if tail pointer must be updated */
+ for (i = 0; i < sep->nr_dcb_creat; i++, dcb_table_ptr++) {
+ if (dcb_table_ptr->out_vr_tail_pt) {
+ pt_hold = (unsigned long)dcb_table_ptr->out_vr_tail_pt;
+ tail_pt = (void *)pt_hold;
+ if (is_kva == true) {
+ memcpy(tail_pt,
+ dcb_table_ptr->tail_data,
+ dcb_table_ptr->tail_data_size);
+ } else {
+ error_temp = copy_to_user(
+ tail_pt,
+ dcb_table_ptr->tail_data,
+ dcb_table_ptr->tail_data_size);
+ }
+ if (error_temp) {
+ /* Release the DMA resource */
+ error = -EFAULT;
+ break;
+ }
+ }
+ }
+ }
+ /* Free the output pages, if any */
+ sep_free_dma_table_data_handler(sep);
+
+ dev_dbg(&sep->pdev->dev, "sep_free_dma_tables_and_dcb end\n");
+ return error;
+}
+
+/**
+ * sep_get_static_pool_addr_handler - get static pool address
+ * @sep: pointer to struct sep_device
+ *
+ * This function sets the bus and virtual addresses of the static pool
+ */
+static int sep_get_static_pool_addr_handler(struct sep_device *sep)
+{
+ u32 *static_pool_addr = NULL;
+
+ dev_dbg(&sep->pdev->dev, "sep_get_static_pool_addr_handler start\n");
+
+ static_pool_addr = (u32 *)(sep->shared_addr +
+ SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
+
+ static_pool_addr[0] = SEP_STATIC_POOL_VAL_TOKEN;
+ static_pool_addr[1] = (u32)sep->shared_bus +
+ SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
+
+ dev_dbg(&sep->pdev->dev, "static pool: physical %x\n",
+ (u32)static_pool_addr[1]);
+
+ dev_dbg(&sep->pdev->dev, "sep_get_static_pool_addr_handler end\n");
+
+ return 0;
+}
+
+/**
+ * sep_start_handler - start device
+ * @sep: pointer to struct sep_device
+ */
+static int sep_start_handler(struct sep_device *sep)
+{
+ unsigned long reg_val;
+ unsigned long error = 0;
+
+ dev_dbg(&sep->pdev->dev, "sep_start_handler start\n");
+
+ /* Wait in polling for message from SEP */
+ do {
+ reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ } while (!reg_val);
+
+ /* Check the value */
+ if (reg_val == 0x1)
+ /* Fatal error - read error status from GPRO */
+ error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
+ dev_dbg(&sep->pdev->dev, "sep_start_handler end\n");
+ return error;
+}
+
+/**
+ * ep_check_sum_calc - checksum messages
+ * @data: buffer to checksum
+ * @length: buffer size
+ *
+ * This function performs a checksum for messages that are sent
+ * to the SEP.
+ */
+static u32 sep_check_sum_calc(u8 *data, u32 length)
+{
+ u32 sum = 0;
+ u16 *Tdata = (u16 *)data;
+
+ while (length > 1) {
+ /* This is the inner loop */
+ sum += *Tdata++;
+ length -= 2;
+ }
+
+ /* Add left-over byte, if any */
+ if (length > 0)
+ sum += *(u8 *)Tdata;
+
+ /* Fold 32-bit sum to 16 bits */
+ while (sum>>16)
+ sum = (sum & 0xffff) + (sum >> 16);
+
+ return ~sum & 0xFFFF;
+}
+
+/**
+ * sep_init_handler -
+ * @sep: pointer to struct sep_device
+ * @arg: parameters from user space application
+ *
+ * Handles the request for SEP initialization
+ * Note that this will go away for Medfield once the SCU
+ * SEP initialization is complete
+ * Also note that the message to the SEP has components
+ * from user space as well as components written by the driver
+ * This is becuase the portions of the message that pertain to
+ * physical addresses must be set by the driver after the message
+ * leaves custody of the user space application for security
+ * reasons.
+ */
+static int sep_init_handler(struct sep_device *sep, unsigned long arg)
+{
+ u32 message_buff[14];
+ u32 counter;
+ int error = 0;
+ u32 reg_val;
+ dma_addr_t new_base_addr;
+ unsigned long addr_hold;
+ struct init_struct command_args;
+
+ dev_dbg(&sep->pdev->dev, "sep_init_handler start\n");
+
+ /* Make sure that we have not initialized already */
+ reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+
+ if (reg_val != 0x2) {
+ error = SEP_ALREADY_INITIALIZED_ERR;
+ dev_warn(&sep->pdev->dev, "init; device already initialized\n");
+ goto end_function;
+ }
+
+ /* Only root can initialize */
+ if (!capable(CAP_SYS_ADMIN)) {
+ error = -EACCES;
+ goto end_function;
+ }
+
+ /* Copy in the parameters */
+ error = copy_from_user(&command_args, (void __user *)arg,
+ sizeof(struct init_struct));
+
+ if (error) {
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ /* Validate parameters */
+ if (!command_args.message_addr || !command_args.sep_sram_addr ||
+ command_args.message_size_in_words > 14) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ /* Copy in the SEP init message */
+ addr_hold = (unsigned long)command_args.message_addr;
+ error = copy_from_user(message_buff,
+ (void __user *)addr_hold,
+ command_args.message_size_in_words*sizeof(u32));
+
+ if (error) {
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ /* Load resident, cache, and extapp firmware */
+ error = sep_load_firmware(sep);
+
+ if (error) {
+ dev_warn(&sep->pdev->dev,
+ "init; copy SEP init message failed %x\n", error);
+ goto end_function;
+ }
+
+ /* Compute the base address */
+ new_base_addr = sep->shared_bus;
+
+ if (sep->resident_bus < new_base_addr)
+ new_base_addr = sep->resident_bus;
+
+ if (sep->cache_bus < new_base_addr)
+ new_base_addr = sep->cache_bus;
+
+ if (sep->dcache_bus < new_base_addr)
+ new_base_addr = sep->dcache_bus;
+
+ /* Put physical addresses in SEP message */
+ message_buff[3] = (u32)new_base_addr;
+ message_buff[4] = (u32)sep->shared_bus;
+ message_buff[6] = (u32)sep->resident_bus;
+ message_buff[7] = (u32)sep->cache_bus;
+ message_buff[8] = (u32)sep->dcache_bus;
+
+ message_buff[command_args.message_size_in_words - 1] = 0x0;
+ message_buff[command_args.message_size_in_words - 1] =
+ sep_check_sum_calc((u8 *)message_buff,
+ command_args.message_size_in_words*sizeof(u32));
+
+ /* Debug print of message */
+ for (counter = 0; counter < command_args.message_size_in_words;
+ counter++)
+ dev_dbg(&sep->pdev->dev, "init; SEP message word %d is %x\n",
+ counter, message_buff[counter]);
+
+ /* Tell the SEP the sram address */
+ sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, command_args.sep_sram_addr);
+
+ /* Push the message to the SEP */
+ for (counter = 0; counter < command_args.message_size_in_words;
+ counter++) {
+ sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR,
+ message_buff[counter]);
+ sep_wait_sram_write(sep);
+ }
+
+ /* Signal SEP that message is ready and to init */
+ sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
+
+ /* Wait for acknowledge */
+ dev_dbg(&sep->pdev->dev, "init; waiting for msg response\n");
+
+ do {
+ reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ } while (!(reg_val & 0xFFFFFFFD));
+
+ if (reg_val == 0x1) {
+ dev_warn(&sep->pdev->dev, "init; device int failed\n");
+ error = sep_read_reg(sep, 0x8060);
+ dev_warn(&sep->pdev->dev, "init; sw monitor is %x\n", error);
+ error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
+ dev_warn(&sep->pdev->dev, "init; error is %x\n", error);
+ goto end_function;
+ }
+ dev_dbg(&sep->pdev->dev, "init; end CC INIT, reg_val is %x\n", reg_val);
+
+ /* Signal SEP to zero the GPR3 */
+ sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x10);
+
+ /* Wait for response */
+ dev_dbg(&sep->pdev->dev, "init; waiting for zero set response\n");
+
+ do {
+ reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ } while (reg_val != 0);
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "init is done\n");
+ return error;
+}
+
+/**
+ * sep_end_transaction_handler - end transaction
+ * @sep: pointer to struct sep_device
+ *
+ * This API handles the end transaction request
+ */
+static int sep_end_transaction_handler(struct sep_device *sep)
+{
+ dev_dbg(&sep->pdev->dev, "sep_end_transaction_handler start\n");
+
+ /* Clear the data pool pointers Token */
+ memset((void *)(sep->shared_addr +
+ SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES),
+ 0, sep->num_of_data_allocations*2*sizeof(u32));
+
+ /* Check that all the DMA resources were freed */
+ sep_free_dma_table_data_handler(sep);
+
+ clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
+
+ /*
+ * We are now through with the transaction. Let's
+ * allow other processes who have the device open
+ * to perform transactions
+ */
+ mutex_lock(&sep->sep_mutex);
+ sep->pid_doing_transaction = 0;
+ mutex_unlock(&sep->sep_mutex);
+ /* Raise event for stuck contextes */
+ wake_up(&sep->event);
+
+ dev_dbg(&sep->pdev->dev, "waking up event\n");
+ dev_dbg(&sep->pdev->dev, "sep_end_transaction_handler end\n");
+
+ return 0;
+}
+
+/**
+ * sep_prepare_dcb_handler - prepare a control block
+ * @sep: pointer to struct sep_device
+ * @arg: pointer to user parameters
+ *
+ * This function will retrieve the RAR buffer physical addresses, type
+ * & size corresponding to the RAR handles provided in the buffers vector.
+ */
+static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg)
+{
+ int error;
+ /* Command arguments */
+ struct build_dcb_struct command_args;
+
+ dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler start\n");
+
+ /* Get the command arguments */
+ if (copy_from_user(&command_args, (void __user *)arg,
+ sizeof(struct build_dcb_struct))) {
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ dev_dbg(&sep->pdev->dev, "app_in_address is %08llx\n",
+ command_args.app_in_address);
+ dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
+ command_args.app_out_address);
+ dev_dbg(&sep->pdev->dev, "data_size is %x\n",
+ command_args.data_in_size);
+ dev_dbg(&sep->pdev->dev, "block_size is %x\n",
+ command_args.block_size);
+ dev_dbg(&sep->pdev->dev, "tail block_size is %x\n",
+ command_args.tail_block_size);
+
+ error = sep_prepare_input_output_dma_table_in_dcb(sep,
+ (unsigned long)command_args.app_in_address,
+ (unsigned long)command_args.app_out_address,
+ command_args.data_in_size, command_args.block_size,
+ command_args.tail_block_size, true, false);
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler end\n");
+ return error;
+
+}
+
+/**
+ * sep_free_dcb_handler - free control block resources
+ * @sep: pointer to struct sep_device
+ *
+ * This function frees the DCB resources and updates the needed
+ * user-space buffers.
+ */
+static int sep_free_dcb_handler(struct sep_device *sep)
+{
+ int error ;
+
+ dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler start\n");
+ dev_dbg(&sep->pdev->dev, "num of DCBs %x\n", sep->nr_dcb_creat);
+
+ error = sep_free_dma_tables_and_dcb(sep, false, false);
+
+ dev_dbg(&sep->pdev->dev, "sep_free_dcb_handler end\n");
+ return error;
+}
+
+/**
+ * sep_rar_prepare_output_msg_handler - prepare an output message
+ * @sep: pointer to struct sep_device
+ * @arg: pointer to user parameters
+ *
+ * This function will retrieve the RAR buffer physical addresses, type
+ * & size corresponding to the RAR handles provided in the buffers vector.
+ */
+static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
+ unsigned long arg)
+{
+ int error = 0;
+ /* Command args */
+ struct rar_hndl_to_bus_struct command_args;
+ struct RAR_buffer rar_buf;
+ /* Bus address */
+ dma_addr_t rar_bus = 0;
+ /* Holds the RAR address in the system memory offset */
+ u32 *rar_addr;
+
+ dev_dbg(&sep->pdev->dev, "sep_rar_prepare_output_msg_handler start\n");
+
+ /* Copy the data */
+ if (copy_from_user(&command_args, (void __user *)arg,
+ sizeof(command_args))) {
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ /* Call to translation function only if user handle is not NULL */
+ if (command_args.rar_handle) {
+ memset(&rar_buf, 0, sizeof(rar_buf));
+ rar_buf.info.handle = (u32)command_args.rar_handle;
+
+ if (rar_handle_to_bus(&rar_buf, 1) != 1) {
+ dev_dbg(&sep->pdev->dev, "rar_handle_to_bus failure\n");
+ error = -EFAULT;
+ goto end_function;
+ }
+ rar_bus = rar_buf.bus_address;
+ }
+ dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus);
+
+ /* Set value in the SYSTEM MEMORY offset */
+ rar_addr = (u32 *)(sep->shared_addr +
+ SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
+
+ /* Copy the physical address to the System Area for the SEP */
+ rar_addr[0] = SEP_RAR_VAL_TOKEN;
+ rar_addr[1] = rar_bus;
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "sep_rar_prepare_output_msg_handler start\n");
+ return error;
+}
+
+/**
+ * sep_realloc_ext_cache_handler - report location of extcache
+ * @sep: pointer to struct sep_device
+ * @arg: pointer to user parameters
+ *
+ * This function tells the SEP where the extapp is located
+ */
+static int sep_realloc_ext_cache_handler(struct sep_device *sep,
+ unsigned long arg)
+{
+ /* Holds the new ext cache address in the system memory offset */
+ u32 *system_addr;
+
+ /* Set value in the SYSTEM MEMORY offset */
+ system_addr = (u32 *)(sep->shared_addr +
+ SEP_DRIVER_SYSTEM_EXT_CACHE_ADDR_OFFSET_IN_BYTES);
+
+ /* Copy the physical address to the System Area for the SEP */
+ system_addr[0] = SEP_EXT_CACHE_ADDR_VAL_TOKEN;
+ dev_dbg(&sep->pdev->dev, "ext cache init; system addr 0 is %x\n",
+ system_addr[0]);
+ system_addr[1] = sep->extapp_bus;
+ dev_dbg(&sep->pdev->dev, "ext cache init; system addr 1 is %x\n",
+ system_addr[1]);
+
+ return 0;
+}
+
+/**
+ * sep_ioctl - ioctl api
+ * @filp: pointer to struct file
+ * @cmd: command
+ * @arg: pointer to argument structure
+ *
+ * Implement the ioctl methods availble on the SEP device.
+ */
+static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int error = 0;
+ struct sep_device *sep = filp->private_data;
+
+ dev_dbg(&sep->pdev->dev, "ioctl start\n");
+
+ dev_dbg(&sep->pdev->dev, "cmd is %x\n", cmd);
+
+ /* Make sure we own this device */
+ mutex_lock(&sep->sep_mutex);
+ if ((current->pid != sep->pid_doing_transaction) &&
+ (sep->pid_doing_transaction != 0)) {
+ dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n");
+ mutex_unlock(&sep->sep_mutex);
+ error = -EACCES;
+ goto end_function;
+ }
+
+ mutex_unlock(&sep->sep_mutex);
+
+ /* Check that the command is for SEP device */
+ if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
+ error = -ENOTTY;
+ goto end_function;
+ }
+
+ /* Lock to prevent the daemon to interfere with operation */
+ mutex_lock(&sep->ioctl_mutex);
+
+ switch (cmd) {
+ case SEP_IOCSENDSEPCOMMAND:
+ /* Send command to SEP */
+ error = sep_send_command_handler(sep);
+ break;
+ case SEP_IOCALLOCDATAPOLL:
+ /* Allocate data pool */
+ error = sep_allocate_data_pool_memory_handler(sep, arg);
+ break;
+ case SEP_IOCCREATESYMDMATABLE:
+ /* Create DMA table for synhronic operation */
+ error = sep_create_sync_dma_tables_handler(sep, arg);
+ break;
+ case SEP_IOCFREEDMATABLEDATA:
+ /* Free the pages */
+ error = sep_free_dma_table_data_handler(sep);
+ break;
+ case SEP_IOCSEPSTART:
+ /* Start command to SEP */
+ if (sep->pdev->revision == 0) /* Only for old chip */
+ error = sep_start_handler(sep);
+ else
+ error = -EPERM; /* Not permitted on new chip */
+ break;
+ case SEP_IOCSEPINIT:
+ /* Init command to SEP */
+ if (sep->pdev->revision == 0) /* Only for old chip */
+ error = sep_init_handler(sep, arg);
+ else
+ error = -EPERM; /* Not permitted on new chip */
+ break;
+ case SEP_IOCGETSTATICPOOLADDR:
+ /* Inform the SEP the bus address of the static pool */
+ error = sep_get_static_pool_addr_handler(sep);
+ break;
+ case SEP_IOCENDTRANSACTION:
+ error = sep_end_transaction_handler(sep);
+ break;
+ case SEP_IOCREALLOCEXTCACHE:
+ if (sep->pdev->revision == 0) /* Only for old chip */
+ error = sep_realloc_ext_cache_handler(sep, arg);
+ else
+ error = -EPERM; /* Not permitted on new chip */
+ break;
+ case SEP_IOCRARPREPAREMESSAGE:
+ error = sep_rar_prepare_output_msg_handler(sep, arg);
+ break;
+ case SEP_IOCPREPAREDCB:
+ error = sep_prepare_dcb_handler(sep, arg);
+ break;
+ case SEP_IOCFREEDCB:
+ error = sep_free_dcb_handler(sep);
+ break;
+ default:
+ dev_dbg(&sep->pdev->dev, "invalid ioctl %x\n", cmd);
+ error = -ENOTTY;
+ break;
+ }
+ mutex_unlock(&sep->ioctl_mutex);
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "ioctl end\n");
+ return error;
+}
+
+/**
+ * sep_singleton_ioctl - ioctl api for singleton interface
+ * @filp: pointer to struct file
+ * @cmd: command
+ * @arg: pointer to argument structure
+ *
+ * Implement the additional ioctls for the singleton device
+ */
+static long sep_singleton_ioctl(struct file *filp, u32 cmd, unsigned long arg)
+{
+ long error = 0;
+ struct sep_device *sep = filp->private_data;
+
+ dev_dbg(&sep->pdev->dev, "singleton_ioctl start\n");
+ dev_dbg(&sep->pdev->dev, "cmd is %x\n", cmd);
+
+ /* Check that the command is for the SEP device */
+ if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
+ error = -ENOTTY;
+ goto end_function;
+ }
+
+ /* Make sure we own this device */
+ mutex_lock(&sep->sep_mutex);
+ if ((current->pid != sep->pid_doing_transaction) &&
+ (sep->pid_doing_transaction != 0)) {
+ dev_dbg(&sep->pdev->dev, "singleton ioctl pid is not owner\n");
+ mutex_unlock(&sep->sep_mutex);
+ error = -EACCES;
+ goto end_function;
+ }
+
+ mutex_unlock(&sep->sep_mutex);
+
+ switch (cmd) {
+ case SEP_IOCTLSETCALLERID:
+ mutex_lock(&sep->ioctl_mutex);
+ error = sep_set_caller_id_handler(sep, arg);
+ mutex_unlock(&sep->ioctl_mutex);
+ break;
+ default:
+ error = sep_ioctl(filp, cmd, arg);
+ break;
+ }
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "singleton ioctl end\n");
+ return error;
+}
+
+/**
+ * sep_request_daemon_ioctl - ioctl for daemon
+ * @filp: pointer to struct file
+ * @cmd: command
+ * @arg: pointer to argument structure
+ *
+ * Called by the request daemon to perform ioctls on the daemon device
+ */
+static long sep_request_daemon_ioctl(struct file *filp, u32 cmd,
+ unsigned long arg)
+{
+
+ long error;
+ struct sep_device *sep = filp->private_data;
+
+ dev_dbg(&sep->pdev->dev, "daemon ioctl: start\n");
+ dev_dbg(&sep->pdev->dev, "daemon ioctl: cmd is %x\n", cmd);
+
+ /* Check that the command is for SEP device */
+ if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
+ error = -ENOTTY;
+ goto end_function;
+ }
+
+ /* Only one process can access ioctl at any given time */
+ mutex_lock(&sep->ioctl_mutex);
+
+ switch (cmd) {
+ case SEP_IOCSENDSEPRPLYCOMMAND:
+ /* Send reply command to SEP */
+ error = sep_req_daemon_send_reply_command_handler(sep);
+ break;
+ case SEP_IOCENDTRANSACTION:
+ /*
+ * End req daemon transaction, do nothing
+ * will be removed upon update in middleware
+ * API library
+ */
+ error = 0;
+ break;
+ default:
+ dev_dbg(&sep->pdev->dev, "daemon ioctl: no such IOCTL\n");
+ error = -ENOTTY;
+ }
+ mutex_unlock(&sep->ioctl_mutex);
+
+end_function:
+ dev_dbg(&sep->pdev->dev, "daemon ioctl: end\n");
+ return error;
+
+}
+
+/**
+ * sep_inthandler - interrupt handler
+ * @irq: interrupt
+ * @dev_id: device id
+ */
+static irqreturn_t sep_inthandler(int irq, void *dev_id)
+{
+ irqreturn_t int_error = IRQ_HANDLED;
+ unsigned long lck_flags;
+ u32 reg_val, reg_val2 = 0;
+ struct sep_device *sep = dev_id;
+
+ /* Read the IRR register to check if this is SEP interrupt */
+ reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
+ dev_dbg(&sep->pdev->dev, "SEP Interrupt - reg is %08x\n", reg_val);
+
+ if (reg_val & (0x1 << 13)) {
+ /* Lock and update the counter of reply messages */
+ spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
+ sep->reply_ct++;
+ spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
+
+ dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
+ sep->send_ct, sep->reply_ct);
+
+ /* Is this printf or daemon request? */
+ reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ dev_dbg(&sep->pdev->dev,
+ "SEP Interrupt - reg2 is %08x\n", reg_val2);
+
+ if ((reg_val2 >> 30) & 0x1) {
+ dev_dbg(&sep->pdev->dev, "int: printf request\n");
+ wake_up(&sep->event_request_daemon);
+ } else if (reg_val2 >> 31) {
+ dev_dbg(&sep->pdev->dev, "int: daemon request\n");
+ wake_up(&sep->event_request_daemon);
+ } else {
+ dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
+ wake_up(&sep->event);
+ }
+ } else {
+ dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
+ int_error = IRQ_NONE;
+ }
+ if (int_error == IRQ_HANDLED)
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
+
+ return int_error;
+}
+
+/**
+ * sep_reconfig_shared_area - reconfigure shared area
+ * @sep: pointer to struct sep_device
+ *
+ * Reconfig the shared area between HOST and SEP - needed in case
+ * the DX_CC_Init function was called before OS loading.
+ */
+static int sep_reconfig_shared_area(struct sep_device *sep)
+{
+ int ret_val;
+
+ /* use to limit waiting for SEP */
+ unsigned long end_time;
+
+ dev_dbg(&sep->pdev->dev, "reconfig shared area start\n");
+
+ /* Send the new SHARED MESSAGE AREA to the SEP */
+ dev_dbg(&sep->pdev->dev, "sending %08llx to sep\n",
+ (unsigned long long)sep->shared_bus);
+
+ sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
+
+ /* Poll for SEP response */
+ ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
+
+ end_time = jiffies + (WAIT_TIME * HZ);
+
+ while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
+ (ret_val != sep->shared_bus))
+ ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
+
+ /* Check the return value (register) */
+ if (ret_val != sep->shared_bus) {
+ dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
+ dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
+ ret_val = -ENOMEM;
+ } else
+ ret_val = 0;
+
+ dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
+ return ret_val;
+}
+
+/* File operation for singleton SEP operations */
+static const struct file_operations singleton_file_operations = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sep_singleton_ioctl,
+ .poll = sep_poll,
+ .open = sep_singleton_open,
+ .release = sep_singleton_release,
+ .mmap = sep_mmap,
+};
+
+/* File operation for daemon operations */
+static const struct file_operations daemon_file_operations = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sep_request_daemon_ioctl,
+ .poll = sep_request_daemon_poll,
+ .open = sep_request_daemon_open,
+ .release = sep_request_daemon_release,
+ .mmap = sep_request_daemon_mmap,
+};
+
+/* The files operations structure of the driver */
+static const struct file_operations sep_file_operations = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sep_ioctl,
+ .poll = sep_poll,
+ .open = sep_open,
+ .release = sep_release,
+ .mmap = sep_mmap,
+};
+
+/**
+ * sep_register_driver_with_fs - register misc devices
+ * @sep: pointer to struct sep_device
+ *
+ * This function registers the driver with the file system
+ */
+static int sep_register_driver_with_fs(struct sep_device *sep)
+{
+ int ret_val;
+
+ sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
+ sep->miscdev_sep.name = SEP_DEV_NAME;
+ sep->miscdev_sep.fops = &sep_file_operations;
+
+ sep->miscdev_singleton.minor = MISC_DYNAMIC_MINOR;
+ sep->miscdev_singleton.name = SEP_DEV_SINGLETON;
+ sep->miscdev_singleton.fops = &singleton_file_operations;
+
+ sep->miscdev_daemon.minor = MISC_DYNAMIC_MINOR;
+ sep->miscdev_daemon.name = SEP_DEV_DAEMON;
+ sep->miscdev_daemon.fops = &daemon_file_operations;
+
+ ret_val = misc_register(&sep->miscdev_sep);
+ if (ret_val) {
+ dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
+ ret_val);
+ return ret_val;
+ }
+
+ ret_val = misc_register(&sep->miscdev_singleton);
+ if (ret_val) {
+ dev_warn(&sep->pdev->dev, "misc reg fails for sing %x\n",
+ ret_val);
+ misc_deregister(&sep->miscdev_sep);
+ return ret_val;
+ }
+
+ ret_val = misc_register(&sep->miscdev_daemon);
+ if (ret_val) {
+ dev_warn(&sep->pdev->dev, "misc reg fails for dmn %x\n",
+ ret_val);
+ misc_deregister(&sep->miscdev_sep);
+ misc_deregister(&sep->miscdev_singleton);
+
+ return ret_val;
+ }
+ return ret_val;
+}
+
+
+/**
+ * sep_probe - probe a matching PCI device
+ * @pdev: pci_device
+ * @end: pci_device_id
+ *
+ * Attempt to set up and configure a SEP device that has been
+ * discovered by the PCI layer.
+ */
+static int __devinit sep_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int error = 0;
+ struct sep_device *sep;
+
+ pr_debug("SEP pci probe starting\n");
+ if (sep_dev != NULL) {
+ dev_warn(&pdev->dev, "only one SEP supported.\n");
+ return -EBUSY;
+ }
+
+ /* Enable the device */
+ error = pci_enable_device(pdev);
+ if (error) {
+ dev_warn(&pdev->dev, "error enabling pci device\n");
+ goto end_function;
+ }
+
+ /* Allocate the sep_device structure for this device */
+ sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
+ if (sep_dev == NULL) {
+ dev_warn(&pdev->dev,
+ "can't kmalloc the sep_device structure\n");
+ error = -ENOMEM;
+ goto end_function_disable_device;
+ }
+
+ /*
+ * We're going to use another variable for actually
+ * working with the device; this way, if we have
+ * multiple devices in the future, it would be easier
+ * to make appropriate changes
+ */
+ sep = sep_dev;
+
+ sep->pdev = pci_dev_get(pdev);
+
+ init_waitqueue_head(&sep->event);
+ init_waitqueue_head(&sep->event_request_daemon);
+ spin_lock_init(&sep->snd_rply_lck);
+ mutex_init(&sep->sep_mutex);
+ mutex_init(&sep->ioctl_mutex);
+
+ dev_dbg(&sep->pdev->dev, "PCI obtained, device being prepared\n");
+ dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision);
+
+ /* Set up our register area */
+ sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
+ if (!sep->reg_physical_addr) {
+ dev_warn(&sep->pdev->dev, "Error getting register start\n");
+ error = -ENODEV;
+ goto end_function_free_sep_dev;
+ }
+
+ sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
+ if (!sep->reg_physical_end) {
+ dev_warn(&sep->pdev->dev, "Error getting register end\n");
+ error = -ENODEV;
+ goto end_function_free_sep_dev;
+ }
+
+ sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
+ (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
+ if (!sep->reg_addr) {
+ dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
+ error = -ENODEV;
+ goto end_function_free_sep_dev;
+ }
+
+ dev_dbg(&sep->pdev->dev,
+ "Register area start %llx end %llx virtual %p\n",
+ (unsigned long long)sep->reg_physical_addr,
+ (unsigned long long)sep->reg_physical_end,
+ sep->reg_addr);
+
+ /* Allocate the shared area */
+ sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
+ SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
+
+ if (sep_map_and_alloc_shared_area(sep)) {
+ error = -ENOMEM;
+ /* Allocation failed */
+ goto end_function_error;
+ }
+
+ sep->rar_size = FAKE_RAR_SIZE;
+ sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
+ sep->rar_size, &sep->rar_bus, GFP_KERNEL);
+ if (sep->rar_addr == NULL) {
+ dev_warn(&sep->pdev->dev, "can't allocate mfld rar\n");
+ error = -ENOMEM;
+ goto end_function_deallocate_sep_shared_area;
+ }
+
+ dev_dbg(&sep->pdev->dev, "rar start is %p, phy is %llx,"
+ " size is %zx\n", sep->rar_addr,
+ (unsigned long long)sep->rar_bus,
+ sep->rar_size);
+
+ dev_dbg(&sep->pdev->dev, "about to write IMR and ICR REG_ADDR\n");
+
+ /* Clear ICR register */
+ sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+
+ /* Set the IMR register - open only GPR 2 */
+ sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
+
+ /* Read send/receive counters from SEP */
+ sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+ sep->reply_ct &= 0x3FFFFFFF;
+ sep->send_ct = sep->reply_ct;
+
+ dev_dbg(&sep->pdev->dev, "about to call request_irq\n");
+ /* Get the interrupt line */
+ error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
+ "sep_driver", sep);
+
+ if (error)
+ goto end_function_dealloc_rar;
+
+ /* The new chip requires ashared area reconfigure */
+ if (sep->pdev->revision == 4) { /* Only for new chip */
+ error = sep_reconfig_shared_area(sep);
+ if (error)
+ goto end_function_free_irq;
+ }
+ /* Finally magic up the device nodes */
+ /* Register driver with the fs */
+ error = sep_register_driver_with_fs(sep);
+ if (error == 0)
+ /* Success */
+ return 0;
+
+end_function_free_irq:
+ free_irq(pdev->irq, sep);
+
+end_function_dealloc_rar:
+ if (sep->rar_addr)
+ dma_free_coherent(&sep->pdev->dev, sep->rar_size,
+ sep->rar_addr, sep->rar_bus);
+ goto end_function;
+
+end_function_deallocate_sep_shared_area:
+ /* De-allocate shared area */
+ sep_unmap_and_free_shared_area(sep);
+
+end_function_error:
+ iounmap(sep->reg_addr);
+
+end_function_free_sep_dev:
+ pci_dev_put(sep_dev->pdev);
+ kfree(sep_dev);
+ sep_dev = NULL;
+
+end_function_disable_device:
+ pci_disable_device(pdev);
+
+end_function:
+ return error;
+}
+
+static void sep_remove(struct pci_dev *pdev)
+{
+ struct sep_device *sep = sep_dev;
+
+ /* Unregister from fs */
+ misc_deregister(&sep->miscdev_sep);
+ misc_deregister(&sep->miscdev_singleton);
+ misc_deregister(&sep->miscdev_daemon);
+
+ /* Free the irq */
+ free_irq(sep->pdev->irq, sep);
+
+ /* Free the shared area */
+ sep_unmap_and_free_shared_area(sep_dev);
+ iounmap((void *) sep_dev->reg_addr);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)},
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
+
+/* Field for registering driver to PCI device */
+static struct pci_driver sep_pci_driver = {
+ .name = "sep_sec_driver",
+ .id_table = sep_pci_id_tbl,
+ .probe = sep_probe,
+ .remove = sep_remove
+};
+
+
+/**
+ * sep_init - init function
+ *
+ * Module load time. Register the PCI device driver.
+ */
+static int __init sep_init(void)
+{
+ return pci_register_driver(&sep_pci_driver);
+}
+
+
+/**
+ * sep_exit - called to unload driver
+ *
+ * Drop the misc devices then remove and unmap the various resources
+ * that are not released by the driver remove method.
+ */
+static void __exit sep_exit(void)
+{
+ pci_unregister_driver(&sep_pci_driver);
+}
+
+
+module_init(sep_init);
+module_exit(sep_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_driver_api.h b/drivers/staging/sep/sep_driver_api.h
new file mode 100644
index 000000000000..fbbfa2396555
--- /dev/null
+++ b/drivers/staging/sep/sep_driver_api.h
@@ -0,0 +1,297 @@
+/*
+ *
+ * sep_driver_api.h - Security Processor Driver api definitions
+ *
+ * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ * Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ * CHANGES:
+ *
+ * 2010.09.14 Upgrade to Medfield
+ *
+ */
+
+#ifndef __SEP_DRIVER_API_H__
+#define __SEP_DRIVER_API_H__
+
+/* Type of request from device */
+#define SEP_DRIVER_SRC_REPLY 1
+#define SEP_DRIVER_SRC_REQ 2
+#define SEP_DRIVER_SRC_PRINTF 3
+
+
+/*-------------------------------------------
+ TYPEDEFS
+----------------------------------------------*/
+
+/*
+ * Note that several members of these structres are only here
+ * for campatability with the middleware; they are not used
+ * by this driver.
+ * All user space buffer addresses are set to aligned u64
+ * in order to ensure compatibility with 64 bit systems
+ */
+
+/*
+ init command struct; this will go away when SCU does init
+*/
+struct init_struct {
+ /* address that SEP can access for message */
+ aligned_u64 message_addr;
+
+ /* message size */
+ u32 message_size_in_words;
+
+ /* offset of the init message in the sep sram */
+ u32 sep_sram_addr;
+
+ /* -not used- resident size in bytes*/
+ u32 unused_resident_size_in_bytes;
+
+ /* -not used- cache size in bytes*/
+ u32 unused_cache_size_in_bytes;
+
+ /* -not used- ext cache current address */
+ aligned_u64 unused_extcache_addr;
+
+ /* -not used- ext cache size in bytes*/
+ u32 unused_extcache_size_in_bytes;
+};
+
+struct realloc_ext_struct {
+ /* -not used- current external cache address */
+ aligned_u64 unused_ext_cache_addr;
+
+ /* -not used- external cache size in bytes*/
+ u32 unused_ext_cache_size_in_bytes;
+};
+
+struct alloc_struct {
+ /* offset from start of shared pool area */
+ u32 offset;
+ /* number of bytes to allocate */
+ u32 num_bytes;
+};
+
+/*
+ Note that all app addresses are cast as u32; the sep
+ middleware sends them as fixed 32 bit words
+*/
+struct bld_syn_tab_struct {
+ /* address value of the data in (user space addr) */
+ aligned_u64 app_in_address;
+
+ /* size of data in */
+ u32 data_in_size;
+
+ /* address of the data out (user space addr) */
+ aligned_u64 app_out_address;
+
+ /* the size of the block of the operation - if needed,
+ every table will be modulo this parameter */
+ u32 block_size;
+
+ /* -not used- distinct user/kernel layout */
+ bool isKernelVirtualAddress;
+
+};
+
+/* command struct for getting caller id value and address */
+struct caller_id_struct {
+ /* pid of the process */
+ u32 pid;
+ /* virtual address of the caller id hash */
+ aligned_u64 callerIdAddress;
+ /* caller id hash size in bytes */
+ u32 callerIdSizeInBytes;
+};
+
+/*
+ structure that represents DCB
+*/
+struct sep_dcblock {
+ /* physical address of the first input mlli */
+ u32 input_mlli_address;
+ /* num of entries in the first input mlli */
+ u32 input_mlli_num_entries;
+ /* size of data in the first input mlli */
+ u32 input_mlli_data_size;
+ /* physical address of the first output mlli */
+ u32 output_mlli_address;
+ /* num of entries in the first output mlli */
+ u32 output_mlli_num_entries;
+ /* size of data in the first output mlli */
+ u32 output_mlli_data_size;
+ /* pointer to the output virtual tail */
+ u32 out_vr_tail_pt;
+ /* size of tail data */
+ u32 tail_data_size;
+ /* input tail data array */
+ u8 tail_data[64];
+};
+
+struct sep_caller_id_entry {
+ int pid;
+ unsigned char callerIdHash[SEP_CALLER_ID_HASH_SIZE_IN_BYTES];
+};
+
+/*
+ command structure for building dcb block (currently for ext app only
+*/
+struct build_dcb_struct {
+ /* address value of the data in */
+ aligned_u64 app_in_address;
+ /* size of data in */
+ u32 data_in_size;
+ /* address of the data out */
+ aligned_u64 app_out_address;
+ /* the size of the block of the operation - if needed,
+ every table will be modulo this parameter */
+ u32 block_size;
+ /* the size of the block of the operation - if needed,
+ every table will be modulo this parameter */
+ u32 tail_block_size;
+};
+
+/**
+ * @struct sep_dma_map
+ *
+ * Structure that contains all information needed for mapping the user pages
+ * or kernel buffers for dma operations
+ *
+ *
+ */
+struct sep_dma_map {
+ /* mapped dma address */
+ dma_addr_t dma_addr;
+ /* size of the mapped data */
+ size_t size;
+};
+
+struct sep_dma_resource {
+ /* array of pointers to the pages that represent
+ input data for the synchronic DMA action */
+ struct page **in_page_array;
+
+ /* array of pointers to the pages that represent out
+ data for the synchronic DMA action */
+ struct page **out_page_array;
+
+ /* number of pages in the sep_in_page_array */
+ u32 in_num_pages;
+
+ /* number of pages in the sep_out_page_array */
+ u32 out_num_pages;
+
+ /* map array of the input data */
+ struct sep_dma_map *in_map_array;
+
+ /* map array of the output data */
+ struct sep_dma_map *out_map_array;
+
+ /* number of entries of the input mapp array */
+ u32 in_map_num_entries;
+
+ /* number of entries of the output mapp array */
+ u32 out_map_num_entries;
+};
+
+
+/* command struct for translating rar handle to bus address
+ and setting it at predefined location */
+struct rar_hndl_to_bus_struct {
+
+ /* rar handle */
+ aligned_u64 rar_handle;
+};
+
+/*
+ structure that represent one entry in the DMA LLI table
+*/
+struct sep_lli_entry {
+ /* physical address */
+ u32 bus_address;
+
+ /* block size */
+ u32 block_size;
+};
+
+/*----------------------------------------------------------------
+ IOCTL command defines
+ -----------------------------------------------------------------*/
+
+/* magic number 1 of the sep IOCTL command */
+#define SEP_IOC_MAGIC_NUMBER 's'
+
+/* sends interrupt to sep that message is ready */
+#define SEP_IOCSENDSEPCOMMAND \
+ _IO(SEP_IOC_MAGIC_NUMBER, 0)
+
+/* sends interrupt to sep that message is ready */
+#define SEP_IOCSENDSEPRPLYCOMMAND \
+ _IO(SEP_IOC_MAGIC_NUMBER, 1)
+
+/* allocate memory in data pool */
+#define SEP_IOCALLOCDATAPOLL \
+ _IOW(SEP_IOC_MAGIC_NUMBER, 2, struct alloc_struct)
+
+/* create sym dma lli tables */
+#define SEP_IOCCREATESYMDMATABLE \
+ _IOW(SEP_IOC_MAGIC_NUMBER, 5, struct bld_syn_tab_struct)
+
+/* free dynamic data aalocated during table creation */
+#define SEP_IOCFREEDMATABLEDATA \
+ _IO(SEP_IOC_MAGIC_NUMBER, 7)
+
+/* get the static pool area addersses (physical and virtual) */
+#define SEP_IOCGETSTATICPOOLADDR \
+ _IO(SEP_IOC_MAGIC_NUMBER, 8)
+
+/* start sep command */
+#define SEP_IOCSEPSTART \
+ _IO(SEP_IOC_MAGIC_NUMBER, 12)
+
+/* init sep command */
+#define SEP_IOCSEPINIT \
+ _IOW(SEP_IOC_MAGIC_NUMBER, 13, struct init_struct)
+
+/* end transaction command */
+#define SEP_IOCENDTRANSACTION \
+ _IO(SEP_IOC_MAGIC_NUMBER, 15)
+
+/* reallocate external app; unused structure still needed for
+ * compatability with middleware */
+#define SEP_IOCREALLOCEXTCACHE \
+ _IOW(SEP_IOC_MAGIC_NUMBER, 18, struct realloc_ext_struct)
+
+#define SEP_IOCRARPREPAREMESSAGE \
+ _IOW(SEP_IOC_MAGIC_NUMBER, 20, struct rar_hndl_to_bus_struct)
+
+#define SEP_IOCTLSETCALLERID \
+ _IOW(SEP_IOC_MAGIC_NUMBER, 34, struct caller_id_struct)
+
+#define SEP_IOCPREPAREDCB \
+ _IOW(SEP_IOC_MAGIC_NUMBER, 35, struct build_dcb_struct)
+
+#define SEP_IOCFREEDCB \
+ _IO(SEP_IOC_MAGIC_NUMBER, 36)
+
+#endif
diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h
new file mode 100644
index 000000000000..b18625d2f7f4
--- /dev/null
+++ b/drivers/staging/sep/sep_driver_config.h
@@ -0,0 +1,239 @@
+/*
+ *
+ * sep_driver_config.h - Security Processor Driver configuration
+ *
+ * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ * Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ * CHANGES:
+ *
+ * 2010.06.26 Upgrade to Medfield
+ *
+ */
+
+#ifndef __SEP_DRIVER_CONFIG_H__
+#define __SEP_DRIVER_CONFIG_H__
+
+
+/*--------------------------------------
+ DRIVER CONFIGURATION FLAGS
+ -------------------------------------*/
+
+/* if flag is on , then the driver is running in polling and
+ not interrupt mode */
+#define SEP_DRIVER_POLLING_MODE 0
+
+/* flag which defines if the shared area address should be
+ reconfiged (send to SEP anew) during init of the driver */
+#define SEP_DRIVER_RECONFIG_MESSAGE_AREA 0
+
+/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */
+#define SEP_DRIVER_ARM_DEBUG_MODE 0
+
+/*-------------------------------------------
+ INTERNAL DATA CONFIGURATION
+ -------------------------------------------*/
+
+/* flag for the input array */
+#define SEP_DRIVER_IN_FLAG 0
+
+/* flag for output array */
+#define SEP_DRIVER_OUT_FLAG 1
+
+/* maximum number of entries in one LLI tables */
+#define SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP 31
+
+/* minimum data size of the MLLI table */
+#define SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE 16
+
+/* flag that signifies tah the lock is
+currently held by the proccess (struct file) */
+#define SEP_DRIVER_OWN_LOCK_FLAG 1
+
+/* flag that signifies tah the lock is currently NOT
+held by the proccess (struct file) */
+#define SEP_DRIVER_DISOWN_LOCK_FLAG 0
+
+/* indicates whether driver has mapped/unmapped shared area */
+#define SEP_REQUEST_DAEMON_MAPPED 1
+#define SEP_REQUEST_DAEMON_UNMAPPED 0
+
+/*--------------------------------------------------------
+ SHARED AREA memory total size is 36K
+ it is divided is following:
+
+ SHARED_MESSAGE_AREA 8K }
+ }
+ STATIC_POOL_AREA 4K } MAPPED AREA ( 24 K)
+ }
+ DATA_POOL_AREA 12K }
+
+ SYNCHRONIC_DMA_TABLES_AREA 5K
+
+ placeholder until drver changes
+ FLOW_DMA_TABLES_AREA 4K
+
+ SYSTEM_MEMORY_AREA 3k
+
+ SYSTEM_MEMORY total size is 3k
+ it is divided as following:
+
+ TIME_MEMORY_AREA 8B
+-----------------------------------------------------------*/
+
+#define SEP_DEV_NAME "sep_sec_driver"
+#define SEP_DEV_SINGLETON "sep_sec_singleton_driver"
+#define SEP_DEV_DAEMON "sep_req_daemon_driver"
+
+
+/*
+ the maximum length of the message - the rest of the message shared
+ area will be dedicated to the dma lli tables
+*/
+#define SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES (8 * 1024)
+
+/* the size of the message shared area in pages */
+#define SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES (8 * 1024)
+
+/* the size of the data pool static area in pages */
+#define SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES (4 * 1024)
+
+/* the size of the data pool shared area size in pages */
+#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES (16 * 1024)
+
+/* the size of the message shared area in pages */
+#define SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES (1024 * 5)
+
+/* Placeholder until driver changes */
+#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 4)
+
+/* system data (time, caller id etc') pool */
+#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES (1024 * 3)
+
+/* the size in bytes of the time memory */
+#define SEP_DRIVER_TIME_MEMORY_SIZE_IN_BYTES 8
+
+/* the size in bytes of the RAR parameters memory */
+#define SEP_DRIVER_SYSTEM_RAR_MEMORY_SIZE_IN_BYTES 8
+
+/* area size that is mapped - we map the MESSAGE AREA, STATIC POOL and
+ DATA POOL areas. area must be module 4k */
+#define SEP_DRIVER_MMMAP_AREA_SIZE (1024 * 28)
+
+/*-----------------------------------------------
+ offsets of the areas starting from the shared area start address
+*/
+
+/* message area offset */
+#define SEP_DRIVER_MESSAGE_AREA_OFFSET_IN_BYTES 0
+
+/* static pool area offset */
+#define SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES \
+ (SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
+
+/* data pool area offset */
+#define SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES \
+ (SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES + \
+ SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES)
+
+/* synhronic dma tables area offset */
+#define SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES \
+ (SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + \
+ SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)
+
+/* system memory offset in bytes */
+#define SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES \
+ (SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + \
+ SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)
+
+/* offset of the time area */
+#define SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES \
+ (SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES)
+
+/* offset of the RAR area */
+#define SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES \
+ (SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES + \
+ SEP_DRIVER_TIME_MEMORY_SIZE_IN_BYTES)
+
+/* offset of the caller id area */
+#define SEP_CALLER_ID_OFFSET_BYTES \
+ (SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES + \
+ SEP_DRIVER_SYSTEM_RAR_MEMORY_SIZE_IN_BYTES)
+
+/* offset of the DCB area */
+#define SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES \
+ (SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES + \
+ 0x400)
+
+/* offset of the ext cache area */
+#define SEP_DRIVER_SYSTEM_EXT_CACHE_ADDR_OFFSET_IN_BYTES \
+ SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES
+
+/* offset of the allocation data pointer area */
+#define SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES \
+ (SEP_CALLER_ID_OFFSET_BYTES + \
+ SEP_CALLER_ID_HASH_SIZE_IN_BYTES)
+
+/* the token that defines the start of time address */
+#define SEP_TIME_VAL_TOKEN 0x12345678
+
+#define FAKE_RAR_SIZE (1024*1024) /* used only for mfld */
+/* DEBUG LEVEL MASKS */
+
+/* size of the caller id hash (sha2) */
+#define SEP_CALLER_ID_HASH_SIZE_IN_BYTES 32
+
+/* size of the caller id hash (sha2) in 32 bit words */
+#define SEP_CALLER_ID_HASH_SIZE_IN_WORDS 8
+
+/* maximum number of entries in the caller id table */
+#define SEP_CALLER_ID_TABLE_NUM_ENTRIES 20
+
+/* maximum number of symetric operation (that require DMA resource)
+ per one message */
+#define SEP_MAX_NUM_SYNC_DMA_OPS 16
+
+/* the token that defines the start of time address */
+#define SEP_RAR_VAL_TOKEN 0xABABABAB
+
+/* ioctl error that should be returned when trying
+ to realloc the cache/resident second time */
+#define SEP_ALREADY_INITIALIZED_ERR 12
+
+/* bit that locks access to the shared area */
+#define SEP_MMAP_LOCK_BIT 0
+
+/* bit that lock access to the poll - after send_command */
+#define SEP_SEND_MSG_LOCK_BIT 1
+
+/* the token that defines the static pool address address */
+#define SEP_STATIC_POOL_VAL_TOKEN 0xABBAABBA
+
+/* the token that defines the data pool pointers address */
+#define SEP_DATA_POOL_POINTERS_VAL_TOKEN 0xEDDEEDDE
+
+/* the token that defines the data pool pointers address */
+#define SEP_EXT_CACHE_ADDR_VAL_TOKEN 0xBABABABA
+
+/* Time limit for SEP to finish */
+#define WAIT_TIME 10
+
+#endif /* SEP DRIVER CONFIG */
diff --git a/drivers/staging/sep/sep_driver_hw_defs.h b/drivers/staging/sep/sep_driver_hw_defs.h
new file mode 100644
index 000000000000..300f90963de3
--- /dev/null
+++ b/drivers/staging/sep/sep_driver_hw_defs.h
@@ -0,0 +1,233 @@
+/*
+ *
+ * sep_driver_hw_defs.h - Security Processor Driver hardware definitions
+ *
+ * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
+ * Contributions(c) 2009,2010 Discretix. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ * Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ * CHANGES:
+ *
+ * 2010.09.20 Upgrade to Medfield
+ *
+ */
+
+#ifndef SEP_DRIVER_HW_DEFS__H
+#define SEP_DRIVER_HW_DEFS__H
+
+/* PCI ID's */
+#define MFLD_PCI_DEVICE_ID 0x0826
+
+/*----------------------- */
+/* HW Registers Defines. */
+/* */
+/*---------------------- -*/
+
+
+/* cf registers */
+#define HW_R0B_ADDR_0_REG_ADDR 0x0000UL
+#define HW_R0B_ADDR_1_REG_ADDR 0x0004UL
+#define HW_R0B_ADDR_2_REG_ADDR 0x0008UL
+#define HW_R0B_ADDR_3_REG_ADDR 0x000cUL
+#define HW_R0B_ADDR_4_REG_ADDR 0x0010UL
+#define HW_R0B_ADDR_5_REG_ADDR 0x0014UL
+#define HW_R0B_ADDR_6_REG_ADDR 0x0018UL
+#define HW_R0B_ADDR_7_REG_ADDR 0x001cUL
+#define HW_R0B_ADDR_8_REG_ADDR 0x0020UL
+#define HW_R2B_ADDR_0_REG_ADDR 0x0080UL
+#define HW_R2B_ADDR_1_REG_ADDR 0x0084UL
+#define HW_R2B_ADDR_2_REG_ADDR 0x0088UL
+#define HW_R2B_ADDR_3_REG_ADDR 0x008cUL
+#define HW_R2B_ADDR_4_REG_ADDR 0x0090UL
+#define HW_R2B_ADDR_5_REG_ADDR 0x0094UL
+#define HW_R2B_ADDR_6_REG_ADDR 0x0098UL
+#define HW_R2B_ADDR_7_REG_ADDR 0x009cUL
+#define HW_R2B_ADDR_8_REG_ADDR 0x00a0UL
+#define HW_R3B_REG_ADDR 0x00C0UL
+#define HW_R4B_REG_ADDR 0x0100UL
+#define HW_CSA_ADDR_0_REG_ADDR 0x0140UL
+#define HW_CSA_ADDR_1_REG_ADDR 0x0144UL
+#define HW_CSA_ADDR_2_REG_ADDR 0x0148UL
+#define HW_CSA_ADDR_3_REG_ADDR 0x014cUL
+#define HW_CSA_ADDR_4_REG_ADDR 0x0150UL
+#define HW_CSA_ADDR_5_REG_ADDR 0x0154UL
+#define HW_CSA_ADDR_6_REG_ADDR 0x0158UL
+#define HW_CSA_ADDR_7_REG_ADDR 0x015cUL
+#define HW_CSA_ADDR_8_REG_ADDR 0x0160UL
+#define HW_CSA_REG_ADDR 0x0140UL
+#define HW_SINB_REG_ADDR 0x0180UL
+#define HW_SOUTB_REG_ADDR 0x0184UL
+#define HW_PKI_CONTROL_REG_ADDR 0x01C0UL
+#define HW_PKI_STATUS_REG_ADDR 0x01C4UL
+#define HW_PKI_BUSY_REG_ADDR 0x01C8UL
+#define HW_PKI_A_1025_REG_ADDR 0x01CCUL
+#define HW_PKI_SDMA_CTL_REG_ADDR 0x01D0UL
+#define HW_PKI_SDMA_OFFSET_REG_ADDR 0x01D4UL
+#define HW_PKI_SDMA_POINTERS_REG_ADDR 0x01D8UL
+#define HW_PKI_SDMA_DLENG_REG_ADDR 0x01DCUL
+#define HW_PKI_SDMA_EXP_POINTERS_REG_ADDR 0x01E0UL
+#define HW_PKI_SDMA_RES_POINTERS_REG_ADDR 0x01E4UL
+#define HW_PKI_CLR_REG_ADDR 0x01E8UL
+#define HW_PKI_SDMA_BUSY_REG_ADDR 0x01E8UL
+#define HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR 0x01ECUL
+#define HW_PKI_SDMA_MUL_BY1_REG_ADDR 0x01F0UL
+#define HW_PKI_SDMA_RMUL_SEL_REG_ADDR 0x01F4UL
+#define HW_DES_KEY_0_REG_ADDR 0x0208UL
+#define HW_DES_KEY_1_REG_ADDR 0x020CUL
+#define HW_DES_KEY_2_REG_ADDR 0x0210UL
+#define HW_DES_KEY_3_REG_ADDR 0x0214UL
+#define HW_DES_KEY_4_REG_ADDR 0x0218UL
+#define HW_DES_KEY_5_REG_ADDR 0x021CUL
+#define HW_DES_CONTROL_0_REG_ADDR 0x0220UL
+#define HW_DES_CONTROL_1_REG_ADDR 0x0224UL
+#define HW_DES_IV_0_REG_ADDR 0x0228UL
+#define HW_DES_IV_1_REG_ADDR 0x022CUL
+#define HW_AES_KEY_0_ADDR_0_REG_ADDR 0x0400UL
+#define HW_AES_KEY_0_ADDR_1_REG_ADDR 0x0404UL
+#define HW_AES_KEY_0_ADDR_2_REG_ADDR 0x0408UL
+#define HW_AES_KEY_0_ADDR_3_REG_ADDR 0x040cUL
+#define HW_AES_KEY_0_ADDR_4_REG_ADDR 0x0410UL
+#define HW_AES_KEY_0_ADDR_5_REG_ADDR 0x0414UL
+#define HW_AES_KEY_0_ADDR_6_REG_ADDR 0x0418UL
+#define HW_AES_KEY_0_ADDR_7_REG_ADDR 0x041cUL
+#define HW_AES_KEY_0_REG_ADDR 0x0400UL
+#define HW_AES_IV_0_ADDR_0_REG_ADDR 0x0440UL
+#define HW_AES_IV_0_ADDR_1_REG_ADDR 0x0444UL
+#define HW_AES_IV_0_ADDR_2_REG_ADDR 0x0448UL
+#define HW_AES_IV_0_ADDR_3_REG_ADDR 0x044cUL
+#define HW_AES_IV_0_REG_ADDR 0x0440UL
+#define HW_AES_CTR1_ADDR_0_REG_ADDR 0x0460UL
+#define HW_AES_CTR1_ADDR_1_REG_ADDR 0x0464UL
+#define HW_AES_CTR1_ADDR_2_REG_ADDR 0x0468UL
+#define HW_AES_CTR1_ADDR_3_REG_ADDR 0x046cUL
+#define HW_AES_CTR1_REG_ADDR 0x0460UL
+#define HW_AES_SK_REG_ADDR 0x0478UL
+#define HW_AES_MAC_OK_REG_ADDR 0x0480UL
+#define HW_AES_PREV_IV_0_ADDR_0_REG_ADDR 0x0490UL
+#define HW_AES_PREV_IV_0_ADDR_1_REG_ADDR 0x0494UL
+#define HW_AES_PREV_IV_0_ADDR_2_REG_ADDR 0x0498UL
+#define HW_AES_PREV_IV_0_ADDR_3_REG_ADDR 0x049cUL
+#define HW_AES_PREV_IV_0_REG_ADDR 0x0490UL
+#define HW_AES_CONTROL_REG_ADDR 0x04C0UL
+#define HW_HASH_H0_REG_ADDR 0x0640UL
+#define HW_HASH_H1_REG_ADDR 0x0644UL
+#define HW_HASH_H2_REG_ADDR 0x0648UL
+#define HW_HASH_H3_REG_ADDR 0x064CUL
+#define HW_HASH_H4_REG_ADDR 0x0650UL
+#define HW_HASH_H5_REG_ADDR 0x0654UL
+#define HW_HASH_H6_REG_ADDR 0x0658UL
+#define HW_HASH_H7_REG_ADDR 0x065CUL
+#define HW_HASH_H8_REG_ADDR 0x0660UL
+#define HW_HASH_H9_REG_ADDR 0x0664UL
+#define HW_HASH_H10_REG_ADDR 0x0668UL
+#define HW_HASH_H11_REG_ADDR 0x066CUL
+#define HW_HASH_H12_REG_ADDR 0x0670UL
+#define HW_HASH_H13_REG_ADDR 0x0674UL
+#define HW_HASH_H14_REG_ADDR 0x0678UL
+#define HW_HASH_H15_REG_ADDR 0x067CUL
+#define HW_HASH_CONTROL_REG_ADDR 0x07C0UL
+#define HW_HASH_PAD_EN_REG_ADDR 0x07C4UL
+#define HW_HASH_PAD_CFG_REG_ADDR 0x07C8UL
+#define HW_HASH_CUR_LEN_0_REG_ADDR 0x07CCUL
+#define HW_HASH_CUR_LEN_1_REG_ADDR 0x07D0UL
+#define HW_HASH_CUR_LEN_2_REG_ADDR 0x07D4UL
+#define HW_HASH_CUR_LEN_3_REG_ADDR 0x07D8UL
+#define HW_HASH_PARAM_REG_ADDR 0x07DCUL
+#define HW_HASH_INT_BUSY_REG_ADDR 0x07E0UL
+#define HW_HASH_SW_RESET_REG_ADDR 0x07E4UL
+#define HW_HASH_ENDIANESS_REG_ADDR 0x07E8UL
+#define HW_HASH_DATA_REG_ADDR 0x07ECUL
+#define HW_DRNG_CONTROL_REG_ADDR 0x0800UL
+#define HW_DRNG_VALID_REG_ADDR 0x0804UL
+#define HW_DRNG_DATA_REG_ADDR 0x0808UL
+#define HW_RND_SRC_EN_REG_ADDR 0x080CUL
+#define HW_AES_CLK_ENABLE_REG_ADDR 0x0810UL
+#define HW_DES_CLK_ENABLE_REG_ADDR 0x0814UL
+#define HW_HASH_CLK_ENABLE_REG_ADDR 0x0818UL
+#define HW_PKI_CLK_ENABLE_REG_ADDR 0x081CUL
+#define HW_CLK_STATUS_REG_ADDR 0x0824UL
+#define HW_CLK_ENABLE_REG_ADDR 0x0828UL
+#define HW_DRNG_SAMPLE_REG_ADDR 0x0850UL
+#define HW_RND_SRC_CTL_REG_ADDR 0x0858UL
+#define HW_CRYPTO_CTL_REG_ADDR 0x0900UL
+#define HW_CRYPTO_STATUS_REG_ADDR 0x090CUL
+#define HW_CRYPTO_BUSY_REG_ADDR 0x0910UL
+#define HW_AES_BUSY_REG_ADDR 0x0914UL
+#define HW_DES_BUSY_REG_ADDR 0x0918UL
+#define HW_HASH_BUSY_REG_ADDR 0x091CUL
+#define HW_CONTENT_REG_ADDR 0x0924UL
+#define HW_VERSION_REG_ADDR 0x0928UL
+#define HW_CONTEXT_ID_REG_ADDR 0x0930UL
+#define HW_DIN_BUFFER_REG_ADDR 0x0C00UL
+#define HW_DIN_MEM_DMA_BUSY_REG_ADDR 0x0c20UL
+#define HW_SRC_LLI_MEM_ADDR_REG_ADDR 0x0c24UL
+#define HW_SRC_LLI_WORD0_REG_ADDR 0x0C28UL
+#define HW_SRC_LLI_WORD1_REG_ADDR 0x0C2CUL
+#define HW_SRAM_SRC_ADDR_REG_ADDR 0x0c30UL
+#define HW_DIN_SRAM_BYTES_LEN_REG_ADDR 0x0c34UL
+#define HW_DIN_SRAM_DMA_BUSY_REG_ADDR 0x0C38UL
+#define HW_WRITE_ALIGN_REG_ADDR 0x0C3CUL
+#define HW_OLD_DATA_REG_ADDR 0x0C48UL
+#define HW_WRITE_ALIGN_LAST_REG_ADDR 0x0C4CUL
+#define HW_DOUT_BUFFER_REG_ADDR 0x0C00UL
+#define HW_DST_LLI_WORD0_REG_ADDR 0x0D28UL
+#define HW_DST_LLI_WORD1_REG_ADDR 0x0D2CUL
+#define HW_DST_LLI_MEM_ADDR_REG_ADDR 0x0D24UL
+#define HW_DOUT_MEM_DMA_BUSY_REG_ADDR 0x0D20UL
+#define HW_SRAM_DEST_ADDR_REG_ADDR 0x0D30UL
+#define HW_DOUT_SRAM_BYTES_LEN_REG_ADDR 0x0D34UL
+#define HW_DOUT_SRAM_DMA_BUSY_REG_ADDR 0x0D38UL
+#define HW_READ_ALIGN_REG_ADDR 0x0D3CUL
+#define HW_READ_LAST_DATA_REG_ADDR 0x0D44UL
+#define HW_RC4_THRU_CPU_REG_ADDR 0x0D4CUL
+#define HW_AHB_SINGLE_REG_ADDR 0x0E00UL
+#define HW_SRAM_DATA_REG_ADDR 0x0F00UL
+#define HW_SRAM_ADDR_REG_ADDR 0x0F04UL
+#define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL
+#define HW_HOST_IRR_REG_ADDR 0x0A00UL
+#define HW_HOST_IMR_REG_ADDR 0x0A04UL
+#define HW_HOST_ICR_REG_ADDR 0x0A08UL
+#define HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR 0x0A10UL
+#define HW_HOST_SEP_BUSY_REG_ADDR 0x0A14UL
+#define HW_HOST_SEP_LCS_REG_ADDR 0x0A18UL
+#define HW_HOST_CC_SW_RST_REG_ADDR 0x0A40UL
+#define HW_HOST_SEP_SW_RST_REG_ADDR 0x0A44UL
+#define HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR 0x0A80UL
+#define HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR 0x0A84UL
+#define HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR 0x0A88UL
+#define HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR 0x0A8cUL
+#define HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR 0x0A90UL
+#define HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR 0x0A94UL
+#define HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR 0x0A98UL
+#define HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR 0x0A9cUL
+#define HW_HOST_SEP_HOST_GPR0_REG_ADDR 0x0B00UL
+#define HW_HOST_SEP_HOST_GPR1_REG_ADDR 0x0B04UL
+#define HW_HOST_SEP_HOST_GPR2_REG_ADDR 0x0B08UL
+#define HW_HOST_SEP_HOST_GPR3_REG_ADDR 0x0B0CUL
+#define HW_HOST_HOST_SEP_GPR0_REG_ADDR 0x0B80UL
+#define HW_HOST_HOST_SEP_GPR1_REG_ADDR 0x0B84UL
+#define HW_HOST_HOST_SEP_GPR2_REG_ADDR 0x0B88UL
+#define HW_HOST_HOST_SEP_GPR3_REG_ADDR 0x0B8CUL
+#define HW_HOST_HOST_ENDIAN_REG_ADDR 0x0B90UL
+#define HW_HOST_HOST_COMM_CLK_EN_REG_ADDR 0x0B94UL
+#define HW_CLR_SRAM_BUSY_REG_REG_ADDR 0x0F0CUL
+#define HW_CC_SRAM_BASE_ADDRESS 0x5800UL
+
+#endif /* ifndef HW_DEFS */
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index 24f47d6388f4..d007e4a12c14 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -996,7 +996,7 @@ failed_free:
/* Jason (08/11/2009) PCI_DRV wrapper essential structs */
-static const struct pci_device_id smtcfb_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(smtcfb_pci_table) = {
{0x126f, 0x710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x126f, 0x712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x126f, 0x720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@ -1044,9 +1044,9 @@ static int __maybe_unused smtcfb_suspend(struct pci_dev *pdev, pm_message_t msg)
/* when doing suspend, call fb apis and pci apis */
if (msg.event == PM_EVENT_SUSPEND) {
- acquire_console_sem();
+ console_lock();
fb_set_suspend(&sfb->fb, 1);
- release_console_sem();
+ console_unlock();
retv = pci_save_state(pdev);
pci_disable_device(pdev);
retv = pci_choose_state(pdev, msg);
@@ -1071,7 +1071,7 @@ static int __maybe_unused smtcfb_resume(struct pci_dev *pdev)
/* when resuming, restore pci data and fb cursor */
if (pdev->dev.power.power_state.event != PM_EVENT_FREEZE) {
retv = pci_set_power_state(pdev, PCI_D0);
- retv = pci_restore_state(pdev);
+ pci_restore_state(pdev);
if (pci_enable_device(pdev))
return -1;
pci_set_master(pdev);
@@ -1105,9 +1105,9 @@ static int __maybe_unused smtcfb_resume(struct pci_dev *pdev)
smtcfb_setmode(sfb);
- acquire_console_sem();
+ console_lock();
fb_set_suspend(&sfb->fb, 0);
- release_console_sem();
+ console_unlock();
return 0;
}
diff --git a/drivers/staging/smbfs/dir.c b/drivers/staging/smbfs/dir.c
index dd612f50749f..f204d33910ec 100644
--- a/drivers/staging/smbfs/dir.c
+++ b/drivers/staging/smbfs/dir.c
@@ -283,7 +283,7 @@ static int smb_compare_dentry(const struct dentry *,
unsigned int, const char *, const struct qstr *);
static int smb_delete_dentry(const struct dentry *);
-static const struct dentry_operations smbfs_dentry_operations =
+const struct dentry_operations smbfs_dentry_operations =
{
.d_revalidate = smb_lookup_validate,
.d_hash = smb_hash_dentry,
@@ -291,7 +291,7 @@ static const struct dentry_operations smbfs_dentry_operations =
.d_delete = smb_delete_dentry,
};
-static const struct dentry_operations smbfs_dentry_operations_case =
+const struct dentry_operations smbfs_dentry_operations_case =
{
.d_revalidate = smb_lookup_validate,
.d_delete = smb_delete_dentry,
@@ -403,12 +403,6 @@ smb_delete_dentry(const struct dentry *dentry)
void
smb_new_dentry(struct dentry *dentry)
{
- struct smb_sb_info *server = server_from_dentry(dentry);
-
- if (server->mnt->flags & SMB_MOUNT_CASE)
- d_set_d_op(dentry, &smbfs_dentry_operations_case);
- else
- d_set_d_op(dentry, &smbfs_dentry_operations);
dentry->d_time = jiffies;
}
@@ -440,7 +434,6 @@ smb_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
struct smb_fattr finfo;
struct inode *inode;
int error;
- struct smb_sb_info *server;
error = -ENAMETOOLONG;
if (dentry->d_name.len > SMB_MAXNAMELEN)
@@ -468,12 +461,6 @@ smb_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
inode = smb_iget(dir->i_sb, &finfo);
if (inode) {
add_entry:
- server = server_from_dentry(dentry);
- if (server->mnt->flags & SMB_MOUNT_CASE)
- d_set_d_op(dentry, &smbfs_dentry_operations_case);
- else
- d_set_d_op(dentry, &smbfs_dentry_operations);
-
d_add(dentry, inode);
smb_renew_times(dentry);
error = 0;
diff --git a/drivers/staging/smbfs/inode.c b/drivers/staging/smbfs/inode.c
index 244319dc9702..0778589d9e9e 100644
--- a/drivers/staging/smbfs/inode.c
+++ b/drivers/staging/smbfs/inode.c
@@ -614,6 +614,10 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent)
printk(KERN_ERR "smbfs: failed to start smbiod\n");
goto out_no_smbiod;
}
+ if (server->mnt->flags & SMB_MOUNT_CASE)
+ sb->s_d_op = &smbfs_dentry_operations_case;
+ else
+ sb->s_d_op = &smbfs_dentry_operations;
/*
* Keep the super block locked while we get the root inode.
diff --git a/drivers/staging/smbfs/proto.h b/drivers/staging/smbfs/proto.h
index 05939a6f43e6..3883cb16a3f6 100644
--- a/drivers/staging/smbfs/proto.h
+++ b/drivers/staging/smbfs/proto.h
@@ -38,6 +38,8 @@ extern void smb_install_null_ops(struct smb_ops *ops);
extern const struct file_operations smb_dir_operations;
extern const struct inode_operations smb_dir_inode_operations;
extern const struct inode_operations smb_dir_inode_operations_unix;
+extern const struct dentry_operations smbfs_dentry_operations_case;
+extern const struct dentry_operations smbfs_dentry_operations;
extern void smb_new_dentry(struct dentry *dentry);
extern void smb_renew_times(struct dentry *dentry);
/* cache.c */
diff --git a/drivers/staging/solo6x10/Kconfig b/drivers/staging/solo6x10/Kconfig
index d96398c701f8..2cf77c940860 100644
--- a/drivers/staging/solo6x10/Kconfig
+++ b/drivers/staging/solo6x10/Kconfig
@@ -1,7 +1,7 @@
config SOLO6X10
tristate "Softlogic 6x10 MPEG codec cards"
- depends on PCI && VIDEO_DEV && SND
- select VIDEOBUF_DMA_CONTIG
+ depends on PCI && VIDEO_DEV && SND && I2C
+ select VIDEOBUF_DMA_SG
---help---
This driver supports the Softlogic based MPEG-4 and h.264 codec
codec cards.
diff --git a/drivers/staging/solo6x10/TODO b/drivers/staging/solo6x10/TODO
index e6a2ee226743..7e6c4fa130df 100644
--- a/drivers/staging/solo6x10/TODO
+++ b/drivers/staging/solo6x10/TODO
@@ -1,7 +1,5 @@
TODO (staging => main):
- * checkpatch.pl (haven't run it yet)
- * Lindent (should be clean, but check)
* Motion detection flags need to be moved to v4l2
* Some private CIDs need to be moved to v4l2
@@ -21,8 +19,6 @@ TODO (general):
- implement playback via external sound jack
- implement loopback of external sound jack with incoming audio?
- implement pause/resume
- - check into jacking sound from tx28xx chips directly (to avoid
- g.723/8khz limitations)
Plase send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc Ben Collins
<bcollins@bluecherry.net>
diff --git a/drivers/staging/solo6x10/solo6010-core.c b/drivers/staging/solo6x10/solo6010-core.c
index 4a051cde55da..c433136f972c 100644
--- a/drivers/staging/solo6x10/solo6010-core.c
+++ b/drivers/staging/solo6x10/solo6010-core.c
@@ -136,6 +136,7 @@ static int __devinit solo6010_pci_probe(struct pci_dev *pdev,
int ret;
int sdram;
u8 chip_id;
+
solo_dev = kzalloc(sizeof(*solo_dev), GFP_KERNEL);
if (solo_dev == NULL)
return -ENOMEM;
@@ -163,21 +164,21 @@ static int __devinit solo6010_pci_probe(struct pci_dev *pdev,
chip_id = solo_reg_read(solo_dev, SOLO_CHIP_OPTION) &
SOLO_CHIP_ID_MASK;
switch (chip_id) {
- case 7:
- solo_dev->nr_chans = 16;
- solo_dev->nr_ext = 5;
- break;
- case 6:
- solo_dev->nr_chans = 8;
- solo_dev->nr_ext = 2;
- break;
- default:
- dev_warn(&pdev->dev, "Invalid chip_id 0x%02x, "
- "defaulting to 4 channels\n",
- chip_id);
- case 5:
- solo_dev->nr_chans = 4;
- solo_dev->nr_ext = 1;
+ case 7:
+ solo_dev->nr_chans = 16;
+ solo_dev->nr_ext = 5;
+ break;
+ case 6:
+ solo_dev->nr_chans = 8;
+ solo_dev->nr_ext = 2;
+ break;
+ default:
+ dev_warn(&pdev->dev, "Invalid chip_id 0x%02x, "
+ "defaulting to 4 channels\n",
+ chip_id);
+ case 5:
+ solo_dev->nr_chans = 4;
+ solo_dev->nr_ext = 1;
}
/* Disable all interrupts to start */
@@ -261,13 +262,18 @@ static void __devexit solo6010_pci_remove(struct pci_dev *pdev)
}
static struct pci_device_id solo6010_id_table[] = {
+ /* 6010 based cards */
{PCI_DEVICE(PCI_VENDOR_ID_SOFTLOGIC, PCI_DEVICE_ID_SOLO6010)},
{PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_NEUSOLO_4)},
{PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_NEUSOLO_9)},
{PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_NEUSOLO_16)},
- {PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_COMMSOLO_4)},
- {PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_COMMSOLO_9)},
- {PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_COMMSOLO_16)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_SOLO_4)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_SOLO_9)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_SOLO_16)},
+ /* 6110 based cards */
+ {PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_6110_4)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_6110_8)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_6110_16)},
{0,}
};
diff --git a/drivers/staging/solo6x10/solo6010-disp.c b/drivers/staging/solo6x10/solo6010-disp.c
index 555f024f72e7..f866f8438175 100644
--- a/drivers/staging/solo6x10/solo6010-disp.c
+++ b/drivers/staging/solo6x10/solo6010-disp.c
@@ -198,12 +198,12 @@ static void solo_motion_config(struct solo6010_dev *solo_dev)
}
/* Default motion settings */
- solo_reg_write(solo_dev, SOLO_VI_MOT_ADR, SOLO_VI_MOTION_EN(0) |
+ solo_reg_write(solo_dev, SOLO_VI_MOT_ADR, SOLO_VI_MOTION_EN(0) |
(SOLO_MOTION_EXT_ADDR(solo_dev) >> 16));
solo_reg_write(solo_dev, SOLO_VI_MOT_CTRL,
SOLO_VI_MOTION_FRAME_COUNT(3) |
SOLO_VI_MOTION_SAMPLE_LENGTH(solo_dev->video_hsize / 16)
- | //SOLO_VI_MOTION_INTR_START_STOP |
+ | /* SOLO_VI_MOTION_INTR_START_STOP | */
SOLO_VI_MOTION_SAMPLE_COUNT(10));
solo_reg_write(solo_dev, SOLO_VI_MOTION_BORDER, 0);
@@ -264,7 +264,7 @@ void solo_disp_exit(struct solo6010_dev *solo_dev)
solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_CTRL(0), 0);
solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_START(0), 0);
solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_STOP(0), 0);
-
+
solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_CTRL(1), 0);
solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_START(1), 0);
solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_STOP(1), 0);
diff --git a/drivers/staging/solo6x10/solo6010-enc.c b/drivers/staging/solo6x10/solo6010-enc.c
index a6cf0a8a3f20..481a49277f77 100644
--- a/drivers/staging/solo6x10/solo6010-enc.c
+++ b/drivers/staging/solo6x10/solo6010-enc.c
@@ -22,7 +22,7 @@
#include "solo6010.h"
#include "solo6010-osd-font.h"
-#define CAPTURE_MAX_BANDWIDTH 32 // D1 4channel (D1 == 4)
+#define CAPTURE_MAX_BANDWIDTH 32 /* D1 4channel (D1 == 4) */
#define OSG_BUFFER_SIZE 1024
#define VI_PROG_HSIZE (1280 - 16)
@@ -145,8 +145,8 @@ int solo_osd_print(struct solo_enc_dev *solo_enc)
solo_p2m_dma(solo_dev, 0, 1, buf, SOLO_EOSD_EXT_ADDR(solo_dev) +
(solo_enc->ch * SOLO_EOSD_EXT_SIZE), SOLO_EOSD_EXT_SIZE);
- reg |= (1 << solo_enc->ch);
- solo_reg_write(solo_dev, SOLO_VE_OSD_CH, reg);
+ reg |= (1 << solo_enc->ch);
+ solo_reg_write(solo_dev, SOLO_VE_OSD_CH, reg);
kfree(buf);
diff --git a/drivers/staging/solo6x10/solo6010-g723.c b/drivers/staging/solo6x10/solo6010-g723.c
index 82fbcb845878..254b46ab20c5 100644
--- a/drivers/staging/solo6x10/solo6010-g723.c
+++ b/drivers/staging/solo6x10/solo6010-g723.c
@@ -47,7 +47,7 @@
* is broken down to 20 * 48 byte regions (one for each channel possible)
* with the rest of the page being dummy data. */
#define MAX_BUFFER (G723_PERIOD_BYTES * PERIODS_MAX)
-#define IRQ_PAGES 4 // 0 - 4
+#define IRQ_PAGES 4 /* 0 - 4 */
#define PERIODS_MIN (1 << IRQ_PAGES)
#define PERIODS_MAX G723_FDMA_PAGES
@@ -158,7 +158,7 @@ static int snd_solo_pcm_close(struct snd_pcm_substream *ss)
snd_pcm_substream_chip(ss) = solo_pcm->solo_dev;
kfree(solo_pcm);
- return 0;
+ return 0;
}
static int snd_solo_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
@@ -197,7 +197,7 @@ static int snd_solo_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
static int snd_solo_pcm_prepare(struct snd_pcm_substream *ss)
{
- return 0;
+ return 0;
}
static snd_pcm_uframes_t snd_solo_pcm_pointer(struct snd_pcm_substream *ss)
@@ -271,7 +271,7 @@ static int snd_solo_capture_volume_get(struct snd_kcontrol *kcontrol,
value->value.integer.value[0] = tw28_get_audio_gain(solo_dev, ch);
- return 0;
+ return 0;
}
static int snd_solo_capture_volume_put(struct snd_kcontrol *kcontrol,
@@ -279,15 +279,15 @@ static int snd_solo_capture_volume_put(struct snd_kcontrol *kcontrol,
{
struct solo6010_dev *solo_dev = snd_kcontrol_chip(kcontrol);
u8 ch = value->id.numid - 1;
- u8 old_val;
+ u8 old_val;
- old_val = tw28_get_audio_gain(solo_dev, ch);
+ old_val = tw28_get_audio_gain(solo_dev, ch);
if (old_val == value->value.integer.value[0])
return 0;
tw28_set_audio_gain(solo_dev, ch, value->value.integer.value[0]);
- return 1;
+ return 1;
}
static struct snd_kcontrol_new snd_solo_capture_volume = {
@@ -368,14 +368,16 @@ int solo_g723_init(struct solo6010_dev *solo_dev)
strcpy(card->mixername, "SOLO-6010");
kctl = snd_solo_capture_volume;
kctl.count = solo_dev->nr_chans;
- ret = snd_ctl_add(card, snd_ctl_new1(&kctl, solo_dev));
+ ret = snd_ctl_add(card, snd_ctl_new1(&kctl, solo_dev));
if (ret < 0)
return ret;
- if ((ret = solo_snd_pcm_init(solo_dev)) < 0)
+ ret = solo_snd_pcm_init(solo_dev);
+ if (ret < 0)
goto snd_error;
- if ((ret = snd_card_register(card)) < 0)
+ ret = snd_card_register(card);
+ if (ret < 0)
goto snd_error;
solo_g723_config(solo_dev);
diff --git a/drivers/staging/solo6x10/solo6010-gpio.c b/drivers/staging/solo6x10/solo6010-gpio.c
index 46f7a71edabc..8869b88dc307 100644
--- a/drivers/staging/solo6x10/solo6010-gpio.c
+++ b/drivers/staging/solo6x10/solo6010-gpio.c
@@ -92,8 +92,8 @@ static void solo_gpio_config(struct solo6010_dev *solo_dev)
int solo_gpio_init(struct solo6010_dev *solo_dev)
{
- solo_gpio_config(solo_dev);
- return 0;
+ solo_gpio_config(solo_dev);
+ return 0;
}
void solo_gpio_exit(struct solo6010_dev *solo_dev)
diff --git a/drivers/staging/solo6x10/solo6010-i2c.c b/drivers/staging/solo6x10/solo6010-i2c.c
index cadd5120d575..60b69cd0d09d 100644
--- a/drivers/staging/solo6x10/solo6010-i2c.c
+++ b/drivers/staging/solo6x10/solo6010-i2c.c
@@ -46,7 +46,7 @@ u8 solo_i2c_readbyte(struct solo6010_dev *solo_dev, int id, u8 addr, u8 off)
i2c_transfer(&solo_dev->i2c_adap[id], msgs, 2);
- return data;
+ return data;
}
void solo_i2c_writebyte(struct solo6010_dev *solo_dev, int id, u8 addr,
@@ -225,9 +225,9 @@ static int solo_i2c_master_xfer(struct i2c_adapter *adap,
}
if (i == SOLO_I2C_ADAPTERS)
- return num; // XXX Right return value for failure?
+ return num; /* XXX Right return value for failure? */
- down(&solo_dev->i2c_sem);
+ mutex_lock(&solo_dev->i2c_mutex);
solo_dev->i2c_id = i;
solo_dev->i2c_msg = msgs;
solo_dev->i2c_msg_num = num;
@@ -258,7 +258,7 @@ static int solo_i2c_master_xfer(struct i2c_adapter *adap,
solo_dev->i2c_state = IIC_STATE_IDLE;
solo_dev->i2c_id = -1;
- up(&solo_dev->i2c_sem);
+ mutex_unlock(&solo_dev->i2c_mutex);
return ret;
}
@@ -284,7 +284,7 @@ int solo_i2c_init(struct solo6010_dev *solo_dev)
solo_dev->i2c_id = -1;
solo_dev->i2c_state = IIC_STATE_IDLE;
init_waitqueue_head(&solo_dev->i2c_wait);
- sema_init(&solo_dev->i2c_sem, 1);
+ mutex_init(&solo_dev->i2c_mutex);
for (i = 0; i < SOLO_I2C_ADAPTERS; i++) {
struct i2c_adapter *adap = &solo_dev->i2c_adap[i];
@@ -296,7 +296,8 @@ int solo_i2c_init(struct solo6010_dev *solo_dev)
adap->retries = 1;
adap->dev.parent = &solo_dev->pdev->dev;
- if ((ret = i2c_add_adapter(adap))) {
+ ret = i2c_add_adapter(adap);
+ if (ret) {
adap->algo_data = NULL;
break;
}
diff --git a/drivers/staging/solo6x10/solo6010-osd-font.h b/drivers/staging/solo6x10/solo6010-osd-font.h
index d6f565bd76cc..d72efbb3bb3d 100644
--- a/drivers/staging/solo6x10/solo6010-osd-font.h
+++ b/drivers/staging/solo6x10/solo6010-osd-font.h
@@ -22,7 +22,7 @@
static const unsigned int solo_osd_font[] = {
0x00000000, 0x0000c0c8, 0xccfefe0c, 0x08000000,
- 0x00000000, 0x10103838, 0x7c7cfefe, 0x00000000, // 0
+ 0x00000000, 0x10103838, 0x7c7cfefe, 0x00000000, /* 0 */
0x00000000, 0xfefe7c7c, 0x38381010, 0x10000000,
0x00000000, 0x7c82fefe, 0xfefefe7c, 0x00000000,
0x00000000, 0x00001038, 0x10000000, 0x00000000,
@@ -54,67 +54,67 @@ static const unsigned int solo_osd_font[] = {
0x0000003f, 0x7f404c52, 0x524c407f, 0x00000000,
0x0000007c, 0x82ba82ba, 0x82ba82fe, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x183c3c3c, 0x18180018, 0x18000000, // 32 !
+ 0x00000000, 0x183c3c3c, 0x18180018, 0x18000000, /* 32 ! */
0x00000066, 0x66240000, 0x00000000, 0x00000000,
- 0x00000000, 0x6c6cfe6c, 0x6c6cfe6c, 0x6c000000, // 34 " #
+ 0x00000000, 0x6c6cfe6c, 0x6c6cfe6c, 0x6c000000, /* 34 " # */
0x00001010, 0x7cd6d616, 0x7cd0d6d6, 0x7c101000,
- 0x00000000, 0x0086c660, 0x30180cc6, 0xc2000000, // 36 $ %
+ 0x00000000, 0x0086c660, 0x30180cc6, 0xc2000000, /* 36 $ % */
0x00000000, 0x386c6c38, 0xdc766666, 0xdc000000,
- 0x0000000c, 0x0c0c0600, 0x00000000, 0x00000000, // 38 & '
+ 0x0000000c, 0x0c0c0600, 0x00000000, 0x00000000, /* 38 & ' */
0x00000000, 0x30180c0c, 0x0c0c0c18, 0x30000000,
- 0x00000000, 0x0c183030, 0x30303018, 0x0c000000, // 40 ( )
+ 0x00000000, 0x0c183030, 0x30303018, 0x0c000000, /* 40 ( ) */
0x00000000, 0x0000663c, 0xff3c6600, 0x00000000,
- 0x00000000, 0x00001818, 0x7e181800, 0x00000000, // 42 * +
+ 0x00000000, 0x00001818, 0x7e181800, 0x00000000, /* 42 * + */
0x00000000, 0x00000000, 0x00000e0e, 0x0c060000,
- 0x00000000, 0x00000000, 0x7e000000, 0x00000000, // 44 , -
+ 0x00000000, 0x00000000, 0x7e000000, 0x00000000, /* 44 , - */
0x00000000, 0x00000000, 0x00000006, 0x06000000,
- 0x00000000, 0x80c06030, 0x180c0602, 0x00000000, // 46 . /
+ 0x00000000, 0x80c06030, 0x180c0602, 0x00000000, /* 46 . / */
0x0000007c, 0xc6e6f6de, 0xcec6c67c, 0x00000000,
- 0x00000030, 0x383c3030, 0x303030fc, 0x00000000, // 48 0 1
+ 0x00000030, 0x383c3030, 0x303030fc, 0x00000000, /* 48 0 1 */
0x0000007c, 0xc6c06030, 0x180cc6fe, 0x00000000,
- 0x0000007c, 0xc6c0c07c, 0xc0c0c67c, 0x00000000, // 50 2 3
+ 0x0000007c, 0xc6c0c07c, 0xc0c0c67c, 0x00000000, /* 50 2 3 */
0x00000060, 0x70786c66, 0xfe6060f0, 0x00000000,
- 0x000000fe, 0x0606067e, 0xc0c0c67c, 0x00000000, // 52 4 5
+ 0x000000fe, 0x0606067e, 0xc0c0c67c, 0x00000000, /* 52 4 5 */
0x00000038, 0x0c06067e, 0xc6c6c67c, 0x00000000,
- 0x000000fe, 0xc6c06030, 0x18181818, 0x00000000, // 54 6 7
+ 0x000000fe, 0xc6c06030, 0x18181818, 0x00000000, /* 54 6 7 */
0x0000007c, 0xc6c6c67c, 0xc6c6c67c, 0x00000000,
- 0x0000007c, 0xc6c6c6fc, 0xc0c06038, 0x00000000, // 56 8 9
+ 0x0000007c, 0xc6c6c6fc, 0xc0c06038, 0x00000000, /* 56 8 9 */
0x00000000, 0x18180000, 0x00181800, 0x00000000,
- 0x00000000, 0x18180000, 0x0018180c, 0x00000000, // 58 : ;
+ 0x00000000, 0x18180000, 0x0018180c, 0x00000000, /* 58 : ; */
0x00000060, 0x30180c06, 0x0c183060, 0x00000000,
0x00000000, 0x007e0000, 0x007e0000, 0x00000000,
0x00000006, 0x0c183060, 0x30180c06, 0x00000000,
0x0000007c, 0xc6c66030, 0x30003030, 0x00000000,
0x0000007c, 0xc6f6d6d6, 0x7606067c, 0x00000000,
- 0x00000010, 0x386cc6c6, 0xfec6c6c6, 0x00000000, // 64 @ A
+ 0x00000010, 0x386cc6c6, 0xfec6c6c6, 0x00000000, /* 64 @ A */
0x0000007e, 0xc6c6c67e, 0xc6c6c67e, 0x00000000,
- 0x00000078, 0xcc060606, 0x0606cc78, 0x00000000, // 66
+ 0x00000078, 0xcc060606, 0x0606cc78, 0x00000000, /* 66 */
0x0000003e, 0x66c6c6c6, 0xc6c6663e, 0x00000000,
- 0x000000fe, 0x0606063e, 0x060606fe, 0x00000000, // 68
+ 0x000000fe, 0x0606063e, 0x060606fe, 0x00000000, /* 68 */
0x000000fe, 0x0606063e, 0x06060606, 0x00000000,
- 0x00000078, 0xcc060606, 0xf6c6ccb8, 0x00000000, // 70
+ 0x00000078, 0xcc060606, 0xf6c6ccb8, 0x00000000, /* 70 */
0x000000c6, 0xc6c6c6fe, 0xc6c6c6c6, 0x00000000,
- 0x0000003c, 0x18181818, 0x1818183c, 0x00000000, // 72
+ 0x0000003c, 0x18181818, 0x1818183c, 0x00000000, /* 72 */
0x00000060, 0x60606060, 0x6066663c, 0x00000000,
- 0x000000c6, 0xc666361e, 0x3666c6c6, 0x00000000, // 74
+ 0x000000c6, 0xc666361e, 0x3666c6c6, 0x00000000, /* 74 */
0x00000006, 0x06060606, 0x060606fe, 0x00000000,
- 0x000000c6, 0xeefed6c6, 0xc6c6c6c6, 0x00000000, // 76
+ 0x000000c6, 0xeefed6c6, 0xc6c6c6c6, 0x00000000, /* 76 */
0x000000c6, 0xcedefef6, 0xe6c6c6c6, 0x00000000,
- 0x00000038, 0x6cc6c6c6, 0xc6c66c38, 0x00000000, // 78
+ 0x00000038, 0x6cc6c6c6, 0xc6c66c38, 0x00000000, /* 78 */
0x0000007e, 0xc6c6c67e, 0x06060606, 0x00000000,
- 0x00000038, 0x6cc6c6c6, 0xc6d67c38, 0x60000000, // 80
+ 0x00000038, 0x6cc6c6c6, 0xc6d67c38, 0x60000000, /* 80 */
0x0000007e, 0xc6c6c67e, 0x66c6c6c6, 0x00000000,
- 0x0000007c, 0xc6c60c38, 0x60c6c67c, 0x00000000, // 82
+ 0x0000007c, 0xc6c60c38, 0x60c6c67c, 0x00000000, /* 82 */
0x0000007e, 0x18181818, 0x18181818, 0x00000000,
- 0x000000c6, 0xc6c6c6c6, 0xc6c6c67c, 0x00000000, // 84
+ 0x000000c6, 0xc6c6c6c6, 0xc6c6c67c, 0x00000000, /* 84 */
0x000000c6, 0xc6c6c6c6, 0xc66c3810, 0x00000000,
- 0x000000c6, 0xc6c6c6c6, 0xd6d6fe6c, 0x00000000, // 86
+ 0x000000c6, 0xc6c6c6c6, 0xd6d6fe6c, 0x00000000, /* 86 */
0x000000c6, 0xc6c66c38, 0x6cc6c6c6, 0x00000000,
- 0x00000066, 0x66666666, 0x3c181818, 0x00000000, // 88
+ 0x00000066, 0x66666666, 0x3c181818, 0x00000000, /* 88 */
0x000000fe, 0xc0603018, 0x0c0606fe, 0x00000000,
- 0x0000003c, 0x0c0c0c0c, 0x0c0c0c3c, 0x00000000, // 90
+ 0x0000003c, 0x0c0c0c0c, 0x0c0c0c3c, 0x00000000, /* 90 */
0x00000002, 0x060c1830, 0x60c08000, 0x00000000,
- 0x0000003c, 0x30303030, 0x3030303c, 0x00000000, // 92
+ 0x0000003c, 0x30303030, 0x3030303c, 0x00000000, /* 92 */
0x00001038, 0x6cc60000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00fe0000,
0x00001818, 0x30000000, 0x00000000, 0x00000000,
diff --git a/drivers/staging/solo6x10/solo6010-p2m.c b/drivers/staging/solo6x10/solo6010-p2m.c
index 7ed3ed4b8f7e..956dea09348a 100644
--- a/drivers/staging/solo6x10/solo6010-p2m.c
+++ b/drivers/staging/solo6x10/solo6010-p2m.c
@@ -18,10 +18,11 @@
*/
#include <linux/kernel.h>
+#include <linux/scatterlist.h>
#include "solo6010.h"
-// #define SOLO_TEST_P2M
+/* #define SOLO_TEST_P2M */
int solo_p2m_dma(struct solo6010_dev *solo_dev, u8 id, int wr,
void *sys_addr, u32 ext_addr, u32 size)
@@ -30,8 +31,9 @@ int solo_p2m_dma(struct solo6010_dev *solo_dev, u8 id, int wr,
int ret;
WARN_ON(!size);
- WARN_ON(id >= SOLO_NR_P2M);
- if (!size || id >= SOLO_NR_P2M)
+ BUG_ON(id >= SOLO_NR_P2M);
+
+ if (!size)
return -EINVAL;
dma_addr = pci_map_single(solo_dev->pdev, sys_addr, size,
@@ -48,41 +50,136 @@ int solo_p2m_dma(struct solo6010_dev *solo_dev, u8 id, int wr,
int solo_p2m_dma_t(struct solo6010_dev *solo_dev, u8 id, int wr,
dma_addr_t dma_addr, u32 ext_addr, u32 size)
{
+ struct p2m_desc *desc = kzalloc(sizeof(*desc) * 2, GFP_DMA);
+ int ret;
+
+ if (desc == NULL)
+ return -ENOMEM;
+
+ solo_p2m_push_desc(&desc[1], wr, dma_addr, ext_addr, size, 0, 0);
+ ret = solo_p2m_dma_desc(solo_dev, id, desc, 2);
+ kfree(desc);
+
+ return ret;
+}
+
+void solo_p2m_push_desc(struct p2m_desc *desc, int wr, dma_addr_t dma_addr,
+ u32 ext_addr, u32 size, int repeat, u32 ext_size)
+{
+ desc->ta = dma_addr;
+ desc->fa = ext_addr;
+
+ desc->ext = SOLO_P2M_COPY_SIZE(size >> 2);
+ desc->ctrl = SOLO_P2M_BURST_SIZE(SOLO_P2M_BURST_256) |
+ (wr ? SOLO_P2M_WRITE : 0) | SOLO_P2M_TRANS_ON;
+
+ /* Ext size only matters when we're repeating */
+ if (repeat) {
+ desc->ext |= SOLO_P2M_EXT_INC(ext_size >> 2);
+ desc->ctrl |= SOLO_P2M_PCI_INC(size >> 2) |
+ SOLO_P2M_REPEAT(repeat);
+ }
+}
+
+int solo_p2m_dma_desc(struct solo6010_dev *solo_dev, u8 id,
+ struct p2m_desc *desc, int desc_count)
+{
struct solo_p2m_dev *p2m_dev;
- unsigned int timeout = 0;
+ unsigned int timeout;
+ int ret = 0;
+ u32 config = 0;
+ dma_addr_t desc_dma = 0;
- WARN_ON(!size);
- WARN_ON(id >= SOLO_NR_P2M);
- if (!size || id >= SOLO_NR_P2M)
- return -EINVAL;
+ BUG_ON(id >= SOLO_NR_P2M);
+ BUG_ON(!desc_count || desc_count > SOLO_NR_P2M_DESC);
p2m_dev = &solo_dev->p2m_dev[id];
- down(&p2m_dev->sem);
+ mutex_lock(&p2m_dev->mutex);
+
+ solo_reg_write(solo_dev, SOLO_P2M_CONTROL(id), 0);
-start_dma:
INIT_COMPLETION(p2m_dev->completion);
p2m_dev->error = 0;
- solo_reg_write(solo_dev, SOLO_P2M_TAR_ADR(id), dma_addr);
- solo_reg_write(solo_dev, SOLO_P2M_EXT_ADR(id), ext_addr);
- solo_reg_write(solo_dev, SOLO_P2M_EXT_CFG(id),
- SOLO_P2M_COPY_SIZE(size >> 2));
- solo_reg_write(solo_dev, SOLO_P2M_CONTROL(id),
- SOLO_P2M_BURST_SIZE(SOLO_P2M_BURST_256) |
- (wr ? SOLO_P2M_WRITE : 0) | SOLO_P2M_TRANS_ON);
+ /* Enable the descriptors */
+ config = solo_reg_read(solo_dev, SOLO_P2M_CONFIG(id));
+ desc_dma = pci_map_single(solo_dev->pdev, desc,
+ desc_count * sizeof(*desc),
+ PCI_DMA_TODEVICE);
+ solo_reg_write(solo_dev, SOLO_P2M_DES_ADR(id), desc_dma);
+ solo_reg_write(solo_dev, SOLO_P2M_DESC_ID(id), desc_count - 1);
+ solo_reg_write(solo_dev, SOLO_P2M_CONFIG(id), config |
+ SOLO_P2M_DESC_MODE);
+
+ /* Should have all descriptors completed from one interrupt */
timeout = wait_for_completion_timeout(&p2m_dev->completion, HZ);
solo_reg_write(solo_dev, SOLO_P2M_CONTROL(id), 0);
- /* XXX Really looks to me like we will get stuck here if a
- * real PCI P2M error occurs */
+ /* Reset back to non-descriptor mode */
+ solo_reg_write(solo_dev, SOLO_P2M_CONFIG(id), config);
+ solo_reg_write(solo_dev, SOLO_P2M_DESC_ID(id), 0);
+ solo_reg_write(solo_dev, SOLO_P2M_DES_ADR(id), 0);
+ pci_unmap_single(solo_dev->pdev, desc_dma,
+ desc_count * sizeof(*desc),
+ PCI_DMA_TODEVICE);
+
if (p2m_dev->error)
- goto start_dma;
+ ret = -EIO;
+ else if (timeout == 0)
+ ret = -EAGAIN;
+
+ mutex_unlock(&p2m_dev->mutex);
+
+ WARN_ON_ONCE(ret);
- up(&p2m_dev->sem);
+ return ret;
+}
+
+int solo_p2m_dma_sg(struct solo6010_dev *solo_dev, u8 id,
+ struct p2m_desc *pdesc, int wr,
+ struct scatterlist *sg, u32 sg_off,
+ u32 ext_addr, u32 size)
+{
+ int i;
+ int idx;
+
+ BUG_ON(id >= SOLO_NR_P2M);
+
+ if (WARN_ON_ONCE(!size))
+ return -EINVAL;
+
+ memset(pdesc, 0, sizeof(*pdesc));
+
+ /* Should rewrite this to handle > SOLO_NR_P2M_DESC transactions */
+ for (i = 0, idx = 1; idx < SOLO_NR_P2M_DESC && sg && size > 0;
+ i++, sg = sg_next(sg)) {
+ struct p2m_desc *desc = &pdesc[idx];
+ u32 sg_len = sg_dma_len(sg);
+ u32 len;
- return (timeout == 0) ? -EAGAIN : 0;
+ if (sg_off >= sg_len) {
+ sg_off -= sg_len;
+ continue;
+ }
+
+ sg_len -= sg_off;
+ len = min(sg_len, size);
+
+ solo_p2m_push_desc(desc, wr, sg_dma_address(sg) + sg_off,
+ ext_addr, len, 0, 0);
+
+ size -= len;
+ ext_addr += len;
+ idx++;
+
+ sg_off = 0;
+ }
+
+ WARN_ON_ONCE(size || i >= SOLO_NR_P2M_DESC);
+
+ return solo_p2m_dma_desc(solo_dev, id, pdesc, idx);
}
#ifdef SOLO_TEST_P2M
@@ -147,13 +244,16 @@ static void run_p2m_test(struct solo6010_dev *solo_dev)
return;
}
#else
-#define run_p2m_test(__solo) do{}while(0)
+#define run_p2m_test(__solo) do {} while (0)
#endif
void solo_p2m_isr(struct solo6010_dev *solo_dev, int id)
{
+ struct solo_p2m_dev *p2m_dev = &solo_dev->p2m_dev[id];
+
solo_reg_write(solo_dev, SOLO_IRQ_STAT, SOLO_IRQ_P2M(id));
- complete(&solo_dev->p2m_dev[id].completion);
+
+ complete(&p2m_dev->completion);
}
void solo_p2m_error_isr(struct solo6010_dev *solo_dev, u32 status)
@@ -188,16 +288,14 @@ int solo_p2m_init(struct solo6010_dev *solo_dev)
for (i = 0; i < SOLO_NR_P2M; i++) {
p2m_dev = &solo_dev->p2m_dev[i];
- sema_init(&p2m_dev->sem, 1);
+ mutex_init(&p2m_dev->mutex);
init_completion(&p2m_dev->completion);
- solo_reg_write(solo_dev, SOLO_P2M_DES_ADR(i),
- __pa(p2m_dev->desc));
-
solo_reg_write(solo_dev, SOLO_P2M_CONTROL(i), 0);
solo_reg_write(solo_dev, SOLO_P2M_CONFIG(i),
SOLO_P2M_CSC_16BIT_565 |
- SOLO_P2M_DMA_INTERVAL(0) |
+ SOLO_P2M_DMA_INTERVAL(3) |
+ SOLO_P2M_DESC_INTR_OPT |
SOLO_P2M_PCI_MASTER_MODE);
solo6010_irq_on(solo_dev, SOLO_IRQ_P2M(i));
}
diff --git a/drivers/staging/solo6x10/solo6010-tw28.c b/drivers/staging/solo6x10/solo6010-tw28.c
index 0159c8392436..905a6ad23a37 100644
--- a/drivers/staging/solo6x10/solo6010-tw28.c
+++ b/drivers/staging/solo6x10/solo6010-tw28.c
@@ -35,107 +35,107 @@
#define DEFAULT_VACTIVE_PAL (312-DEFAULT_VDELAY_PAL)
static u8 tbl_tw2864_template[] = {
- 0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, // 0x00
+ 0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, /* 0x00 */
0x12, 0xf5, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
- 0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, // 0x10
+ 0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, /* 0x10 */
0x12, 0xf5, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
- 0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, // 0x20
+ 0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, /* 0x20 */
0x12, 0xf5, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
- 0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, // 0x30
+ 0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, /* 0x30 */
0x12, 0xf5, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x40
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x50
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x60
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x70
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA3, 0x00,
- 0x00, 0x02, 0x00, 0xcc, 0x00, 0x80, 0x44, 0x50, // 0x80
+ 0x00, 0x02, 0x00, 0xcc, 0x00, 0x80, 0x44, 0x50, /* 0x80 */
0x22, 0x01, 0xd8, 0xbc, 0xb8, 0x44, 0x38, 0x00,
- 0x00, 0x78, 0x72, 0x3e, 0x14, 0xa5, 0xe4, 0x05, // 0x90
+ 0x00, 0x78, 0x72, 0x3e, 0x14, 0xa5, 0xe4, 0x05, /* 0x90 */
0x00, 0x28, 0x44, 0x44, 0xa0, 0x88, 0x5a, 0x01,
- 0x08, 0x08, 0x08, 0x08, 0x1a, 0x1a, 0x1a, 0x1a, // 0xa0
+ 0x08, 0x08, 0x08, 0x08, 0x1a, 0x1a, 0x1a, 0x1a, /* 0xa0 */
0x00, 0x00, 0x00, 0xf0, 0xf0, 0xf0, 0xf0, 0x44,
- 0x44, 0x0a, 0x00, 0xff, 0xef, 0xef, 0xef, 0xef, // 0xb0
+ 0x44, 0x0a, 0x00, 0xff, 0xef, 0xef, 0xef, 0xef, /* 0xb0 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0xc0
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */
0x00, 0x00, 0x55, 0x00, 0xb1, 0xe4, 0x40, 0x00,
- 0x77, 0x77, 0x01, 0x13, 0x57, 0x9b, 0xdf, 0x20, // 0xd0
+ 0x77, 0x77, 0x01, 0x13, 0x57, 0x9b, 0xdf, 0x20, /* 0xd0 */
0x64, 0xa8, 0xec, 0xd1, 0x0f, 0x11, 0x11, 0x81,
- 0x10, 0xe0, 0xbb, 0xbb, 0x00, 0x11, 0x00, 0x00, // 0xe0
+ 0x10, 0xe0, 0xbb, 0xbb, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */
0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00,
- 0x83, 0xb5, 0x09, 0x78, 0x85, 0x00, 0x01, 0x20, // 0xf0
+ 0x83, 0xb5, 0x09, 0x78, 0x85, 0x00, 0x01, 0x20, /* 0xf0 */
0x64, 0x11, 0x40, 0xaf, 0xff, 0x00, 0x00, 0x00,
};
static u8 tbl_tw2865_ntsc_template[] = {
- 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, // 0x00
+ 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x00 */
0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
- 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, // 0x10
+ 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x10 */
0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
- 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, // 0x20
+ 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x20 */
0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
- 0x00, 0xf0, 0x70, 0x48, 0x80, 0x80, 0x00, 0x02, // 0x30
+ 0x00, 0xf0, 0x70, 0x48, 0x80, 0x80, 0x00, 0x02, /* 0x30 */
0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
- 0x00, 0x00, 0x90, 0x68, 0x00, 0x38, 0x80, 0x80, // 0x40
+ 0x00, 0x00, 0x90, 0x68, 0x00, 0x38, 0x80, 0x80, /* 0x40 */
0x80, 0x80, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x50
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x45, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x60
+ 0x45, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x43,
- 0x08, 0x00, 0x00, 0x01, 0xf1, 0x03, 0xEF, 0x03, // 0x70
+ 0x08, 0x00, 0x00, 0x01, 0xf1, 0x03, 0xEF, 0x03, /* 0x70 */
0xE9, 0x03, 0xD9, 0x15, 0x15, 0xE4, 0xA3, 0x80,
- 0x00, 0x02, 0x00, 0xCC, 0x00, 0x80, 0x44, 0x50, // 0x80
+ 0x00, 0x02, 0x00, 0xCC, 0x00, 0x80, 0x44, 0x50, /* 0x80 */
0x22, 0x01, 0xD8, 0xBC, 0xB8, 0x44, 0x38, 0x00,
- 0x00, 0x78, 0x44, 0x3D, 0x14, 0xA5, 0xE0, 0x05, // 0x90
+ 0x00, 0x78, 0x44, 0x3D, 0x14, 0xA5, 0xE0, 0x05, /* 0x90 */
0x00, 0x28, 0x44, 0x44, 0xA0, 0x90, 0x52, 0x13,
- 0x08, 0x08, 0x08, 0x08, 0x1A, 0x1A, 0x1B, 0x1A, // 0xa0
+ 0x08, 0x08, 0x08, 0x08, 0x1A, 0x1A, 0x1B, 0x1A, /* 0xa0 */
0x00, 0x00, 0x00, 0xF0, 0xF0, 0xF0, 0xF0, 0x44,
- 0x44, 0x4A, 0x00, 0xFF, 0xEF, 0xEF, 0xEF, 0xEF, // 0xb0
+ 0x44, 0x4A, 0x00, 0xFF, 0xEF, 0xEF, 0xEF, 0xEF, /* 0xb0 */
0xFF, 0xE7, 0xE9, 0xE9, 0xEB, 0xFF, 0xD6, 0xD8,
- 0xD8, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0xc0
+ 0xD8, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */
0x00, 0x00, 0x55, 0x00, 0xE4, 0x39, 0x00, 0x80,
- 0x77, 0x77, 0x03, 0x20, 0x57, 0x9b, 0xdf, 0x31, // 0xd0
+ 0x77, 0x77, 0x03, 0x20, 0x57, 0x9b, 0xdf, 0x31, /* 0xd0 */
0x64, 0xa8, 0xec, 0xd1, 0x0f, 0x11, 0x11, 0x81,
- 0x10, 0xC0, 0xAA, 0xAA, 0x00, 0x11, 0x00, 0x00, // 0xe0
+ 0x10, 0xC0, 0xAA, 0xAA, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */
0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00,
- 0x83, 0xB5, 0x09, 0x78, 0x85, 0x00, 0x01, 0x20, // 0xf0
+ 0x83, 0xB5, 0x09, 0x78, 0x85, 0x00, 0x01, 0x20, /* 0xf0 */
0x64, 0x51, 0x40, 0xaf, 0xFF, 0xF0, 0x00, 0xC0,
};
static u8 tbl_tw2865_pal_template[] = {
- 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, // 0x00
+ 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x00 */
0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f,
- 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, // 0x10
+ 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x10 */
0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f,
- 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, // 0x20
+ 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x20 */
0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f,
- 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, // 0x30
+ 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x30 */
0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f,
- 0x00, 0x94, 0x90, 0x48, 0x00, 0x38, 0x7F, 0x80, // 0x40
+ 0x00, 0x94, 0x90, 0x48, 0x00, 0x38, 0x7F, 0x80, /* 0x40 */
0x80, 0x80, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x50
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x45, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x60
+ 0x45, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x43,
- 0x08, 0x00, 0x00, 0x01, 0xf1, 0x03, 0xEF, 0x03, // 0x70
+ 0x08, 0x00, 0x00, 0x01, 0xf1, 0x03, 0xEF, 0x03, /* 0x70 */
0xEA, 0x03, 0xD9, 0x15, 0x15, 0xE4, 0xA3, 0x80,
- 0x00, 0x02, 0x00, 0xCC, 0x00, 0x80, 0x44, 0x50, // 0x80
+ 0x00, 0x02, 0x00, 0xCC, 0x00, 0x80, 0x44, 0x50, /* 0x80 */
0x22, 0x01, 0xD8, 0xBC, 0xB8, 0x44, 0x38, 0x00,
- 0x00, 0x78, 0x44, 0x3D, 0x14, 0xA5, 0xE0, 0x05, // 0x90
+ 0x00, 0x78, 0x44, 0x3D, 0x14, 0xA5, 0xE0, 0x05, /* 0x90 */
0x00, 0x28, 0x44, 0x44, 0xA0, 0x90, 0x52, 0x13,
- 0x08, 0x08, 0x08, 0x08, 0x1A, 0x1A, 0x1A, 0x1A, // 0xa0
+ 0x08, 0x08, 0x08, 0x08, 0x1A, 0x1A, 0x1A, 0x1A, /* 0xa0 */
0x00, 0x00, 0x00, 0xF0, 0xF0, 0xF0, 0xF0, 0x44,
- 0x44, 0x4A, 0x00, 0xFF, 0xEF, 0xEF, 0xEF, 0xEF, // 0xb0
+ 0x44, 0x4A, 0x00, 0xFF, 0xEF, 0xEF, 0xEF, 0xEF, /* 0xb0 */
0xFF, 0xE7, 0xE9, 0xE9, 0xE9, 0xFF, 0xD7, 0xD8,
- 0xD9, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0xc0
+ 0xD9, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */
0x00, 0x00, 0x55, 0x00, 0xE4, 0x39, 0x00, 0x80,
- 0x77, 0x77, 0x03, 0x20, 0x57, 0x9b, 0xdf, 0x31, // 0xd0
+ 0x77, 0x77, 0x03, 0x20, 0x57, 0x9b, 0xdf, 0x31, /* 0xd0 */
0x64, 0xa8, 0xec, 0xd1, 0x0f, 0x11, 0x11, 0x81,
- 0x10, 0xC0, 0xAA, 0xAA, 0x00, 0x11, 0x00, 0x00, // 0xe0
+ 0x10, 0xC0, 0xAA, 0xAA, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */
0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00,
- 0x83, 0xB5, 0x09, 0x00, 0xA0, 0x00, 0x01, 0x20, // 0xf0
+ 0x83, 0xB5, 0x09, 0x00, 0xA0, 0x00, 0x01, 0x20, /* 0xf0 */
0x64, 0x51, 0x40, 0xaf, 0xFF, 0xF0, 0x00, 0xC0,
};
@@ -181,8 +181,8 @@ static void tw_write_and_verify(struct solo6010_dev *solo_dev, u8 addr, u8 off,
msleep_interruptible(1);
}
-// printk("solo6010/tw28: Error writing register: %02x->%02x [%02x]\n",
-// addr, off, val);
+/* printk("solo6010/tw28: Error writing register: %02x->%02x [%02x]\n",
+ addr, off, val); */
}
static int tw2865_setup(struct solo6010_dev *solo_dev, u8 dev_addr)
@@ -217,7 +217,7 @@ static int tw2865_setup(struct solo6010_dev *solo_dev, u8 dev_addr)
for (i = 0; i < 0xff; i++) {
/* Skip read only registers */
- if (i >= 0xb8 && i <= 0xc1 )
+ if (i >= 0xb8 && i <= 0xc1)
continue;
if ((i & ~0x30) == 0x00 ||
(i & ~0x30) == 0x0c ||
@@ -302,7 +302,7 @@ static int tw2864_setup(struct solo6010_dev *solo_dev, u8 dev_addr)
for (i = 0; i < 0xff; i++) {
/* Skip read only registers */
- if (i >= 0xb8 && i <= 0xc1 )
+ if (i >= 0xb8 && i <= 0xc1)
continue;
if ((i & ~0x30) == 0x00 ||
(i & ~0x30) == 0x0c ||
@@ -334,13 +334,13 @@ static int tw2815_setup(struct solo6010_dev *solo_dev, u8 dev_addr)
};
u8 tbl_tw2815_sfr[] = {
- 0x00, 0x00, 0x00, 0xc0, 0x45, 0xa0, 0xd0, 0x2f, // 0x00
+ 0x00, 0x00, 0x00, 0xc0, 0x45, 0xa0, 0xd0, 0x2f, /* 0x00 */
0x64, 0x80, 0x80, 0x82, 0x82, 0x00, 0x00, 0x00,
- 0x00, 0x0f, 0x05, 0x00, 0x00, 0x80, 0x06, 0x00, // 0x10
+ 0x00, 0x0f, 0x05, 0x00, 0x00, 0x80, 0x06, 0x00, /* 0x10 */
0x00, 0x00, 0x00, 0xff, 0x8f, 0x00, 0x00, 0x00,
- 0x88, 0x88, 0xc0, 0x00, 0x20, 0x64, 0xa8, 0xec, // 0x20
+ 0x88, 0x88, 0xc0, 0x00, 0x20, 0x64, 0xa8, 0xec, /* 0x20 */
0x31, 0x75, 0xb9, 0xfd, 0x00, 0x00, 0x88, 0x88,
- 0x88, 0x11, 0x00, 0x88, 0x88, 0x00, // 0x30
+ 0x88, 0x11, 0x00, 0x88, 0x88, 0x00, /* 0x30 */
};
u8 *tbl_tw2815_common;
int i;
@@ -459,7 +459,7 @@ static int tw2815_setup(struct solo6010_dev *solo_dev, u8 dev_addr)
for (i = 0; i < 0x0f; i++) {
if (i == 0x00)
- continue; // read-only
+ continue; /* read-only */
solo_i2c_writebyte(solo_dev, SOLO_I2C_TW,
dev_addr, (ch * 0x10) + i,
tbl_tw2815_common[i]);
@@ -597,7 +597,7 @@ int solo_tw28_init(struct solo6010_dev *solo_dev)
return 0;
}
-/*
+/*
* We accessed the video status signal in the Techwell chip through
* iic/i2c because the video status reported by register REG_VI_STATUS1
* (address 0x012C) of the SOLO6010 chip doesn't give the correct video
@@ -751,7 +751,7 @@ int tw28_get_ctrl_val(struct solo6010_dev *solo_dev, u32 ctrl, u8 ch,
rval = tw_readbyte(solo_dev, chip_num,
TW286x_BRIGHTNESS_ADDR(ch),
TW_BRIGHTNESS_ADDR(ch));
- if (is_tw286x(solo_dev, chip_num))
+ if (is_tw286x(solo_dev, chip_num))
*val = (s32)((char)rval) + 128;
else
*val = rval;
diff --git a/drivers/staging/solo6x10/solo6010-v4l2-enc.c b/drivers/staging/solo6x10/solo6010-v4l2-enc.c
index 097e82bc7a63..7bbb94097d29 100644
--- a/drivers/staging/solo6x10/solo6010-v4l2-enc.c
+++ b/drivers/staging/solo6x10/solo6010-v4l2-enc.c
@@ -24,7 +24,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-common.h>
-#include <media/videobuf-dma-contig.h>
+#include <media/videobuf-dma-sg.h>
#include "solo6010.h"
#include "solo6010-tw28.h"
@@ -47,13 +47,14 @@ struct solo_enc_fh {
struct videobuf_queue vidq;
struct list_head vidq_active;
struct task_struct *kthread;
+ struct p2m_desc desc[SOLO_NR_P2M_DESC];
};
static unsigned char vid_vop_header[] = {
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x20,
0x02, 0x48, 0x05, 0xc0, 0x00, 0x40, 0x00, 0x40,
0x00, 0x40, 0x00, 0x80, 0x00, 0x97, 0x53, 0x04,
- 0x1f, 0x4c, 0x58, 0x10, 0x78, 0x51, 0x18, 0x3e,
+ 0x1f, 0x4c, 0x58, 0x10, 0x78, 0x51, 0x18, 0x3f,
};
/*
@@ -151,6 +152,11 @@ static void solo_motion_toggle(struct solo_enc_dev *solo_enc, int on)
else
solo_dev->motion_mask &= ~(1 << ch);
+ /* Do this regardless of if we are turning on or off */
+ solo_reg_write(solo_enc->solo_dev, SOLO_VI_MOT_CLEAR,
+ 1 << solo_enc->ch);
+ solo_enc->motion_detected = 0;
+
solo_reg_write(solo_dev, SOLO_VI_MOT_ADR,
SOLO_VI_MOTION_EN(solo_dev->motion_mask) |
(SOLO_MOTION_EXT_ADDR(solo_dev) >> 16));
@@ -184,7 +190,7 @@ static void solo_update_mode(struct solo_enc_dev *solo_enc)
solo_enc->bw_weight <<= 2;
break;
default:
- WARN(1, "mode is unknown");
+ WARN(1, "mode is unknown\n");
}
}
@@ -211,11 +217,6 @@ static int solo_enc_on(struct solo_enc_fh *fh)
solo_dev->enc_bw_remain -= solo_enc->bw_weight;
}
- fh->kthread = kthread_run(solo_enc_thread, fh, SOLO6010_NAME "_enc");
-
- if (IS_ERR(fh->kthread))
- return PTR_ERR(fh->kthread);
-
fh->enc_on = 1;
fh->rd_idx = solo_enc->solo_dev->enc_wr_idx;
@@ -279,6 +280,24 @@ static void solo_enc_off(struct solo_enc_fh *fh)
solo_reg_write(solo_dev, SOLO_CAP_CH_COMP_ENA_E(solo_enc->ch), 0);
}
+static int solo_start_fh_thread(struct solo_enc_fh *fh)
+{
+ struct solo_enc_dev *solo_enc = fh->enc;
+
+ fh->kthread = kthread_run(solo_enc_thread, fh, SOLO6010_NAME "_enc");
+
+ /* Oops, we had a problem */
+ if (IS_ERR(fh->kthread)) {
+ spin_lock(&solo_enc->lock);
+ solo_enc_off(fh);
+ spin_unlock(&solo_enc->lock);
+
+ return PTR_ERR(fh->kthread);
+ }
+
+ return 0;
+}
+
static void enc_reset_gop(struct solo6010_dev *solo_dev, u8 ch)
{
BUG_ON(ch >= solo_dev->nr_chans);
@@ -299,22 +318,68 @@ static int enc_gop_reset(struct solo6010_dev *solo_dev, u8 ch, u8 vop)
return 0;
}
-static int enc_get_mpeg_dma_t(struct solo6010_dev *solo_dev, dma_addr_t buf,
- unsigned int off, unsigned int size)
+static void enc_write_sg(struct scatterlist *sglist, void *buf, int size)
+{
+ struct scatterlist *sg;
+ u8 *src = buf;
+
+ for (sg = sglist; sg && size > 0; sg = sg_next(sg)) {
+ u8 *p = sg_virt(sg);
+ size_t len = sg_dma_len(sg);
+ int i;
+
+ for (i = 0; i < len && size; i++)
+ p[i] = *(src++);
+ }
+}
+
+static int enc_get_mpeg_dma_sg(struct solo6010_dev *solo_dev,
+ struct p2m_desc *desc,
+ struct scatterlist *sglist, int skip,
+ unsigned int off, unsigned int size)
{
int ret;
if (off > SOLO_MP4E_EXT_SIZE(solo_dev))
return -EINVAL;
- if (off + size <= SOLO_MP4E_EXT_SIZE(solo_dev))
+ if (off + size <= SOLO_MP4E_EXT_SIZE(solo_dev)) {
+ return solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_MP4E,
+ desc, 0, sglist, skip,
+ SOLO_MP4E_EXT_ADDR(solo_dev) + off, size);
+ }
+
+ /* Buffer wrap */
+ ret = solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_MP4E, desc, 0,
+ sglist, skip, SOLO_MP4E_EXT_ADDR(solo_dev) + off,
+ SOLO_MP4E_EXT_SIZE(solo_dev) - off);
+
+ ret |= solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_MP4E, desc, 0,
+ sglist, skip + SOLO_MP4E_EXT_SIZE(solo_dev) - off,
+ SOLO_MP4E_EXT_ADDR(solo_dev),
+ size + off - SOLO_MP4E_EXT_SIZE(solo_dev));
+
+ return ret;
+}
+
+static int enc_get_mpeg_dma_t(struct solo6010_dev *solo_dev,
+ dma_addr_t buf, unsigned int off,
+ unsigned int size)
+{
+ int ret;
+
+ if (off > SOLO_MP4E_EXT_SIZE(solo_dev))
+ return -EINVAL;
+
+ if (off + size <= SOLO_MP4E_EXT_SIZE(solo_dev)) {
return solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_MP4E, 0, buf,
SOLO_MP4E_EXT_ADDR(solo_dev) + off, size);
+ }
/* Buffer wrap */
ret = solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_MP4E, 0, buf,
- SOLO_MP4E_EXT_ADDR(solo_dev) + off,
- SOLO_MP4E_EXT_SIZE(solo_dev) - off);
+ SOLO_MP4E_EXT_ADDR(solo_dev) + off,
+ SOLO_MP4E_EXT_SIZE(solo_dev) - off);
ret |= solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_MP4E, 0,
buf + SOLO_MP4E_EXT_SIZE(solo_dev) - off,
@@ -337,70 +402,108 @@ static int enc_get_mpeg_dma(struct solo6010_dev *solo_dev, void *buf,
return ret;
}
-static int enc_get_jpeg_dma(struct solo6010_dev *solo_dev, dma_addr_t buf,
- unsigned int off, unsigned int size)
+static int enc_get_jpeg_dma_sg(struct solo6010_dev *solo_dev,
+ struct p2m_desc *desc,
+ struct scatterlist *sglist, int skip,
+ unsigned int off, unsigned int size)
{
int ret;
if (off > SOLO_JPEG_EXT_SIZE(solo_dev))
return -EINVAL;
- if (off + size <= SOLO_JPEG_EXT_SIZE(solo_dev))
- return solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_JPEG, 0, buf,
- SOLO_JPEG_EXT_ADDR(solo_dev) + off, size);
+ if (off + size <= SOLO_JPEG_EXT_SIZE(solo_dev)) {
+ return solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_JPEG,
+ desc, 0, sglist, skip,
+ SOLO_JPEG_EXT_ADDR(solo_dev) + off, size);
+ }
/* Buffer wrap */
- ret = solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_JPEG, 0, buf,
- SOLO_JPEG_EXT_ADDR(solo_dev) + off,
- SOLO_JPEG_EXT_SIZE(solo_dev) - off);
+ ret = solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_JPEG, desc, 0,
+ sglist, skip, SOLO_JPEG_EXT_ADDR(solo_dev) + off,
+ SOLO_JPEG_EXT_SIZE(solo_dev) - off);
- ret |= solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_JPEG, 0,
- buf + SOLO_JPEG_EXT_SIZE(solo_dev) - off,
- SOLO_JPEG_EXT_ADDR(solo_dev),
- size + off - SOLO_JPEG_EXT_SIZE(solo_dev));
+ ret |= solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_JPEG, desc, 0,
+ sglist, skip + SOLO_JPEG_EXT_SIZE(solo_dev) - off,
+ SOLO_JPEG_EXT_ADDR(solo_dev),
+ size + off - SOLO_JPEG_EXT_SIZE(solo_dev));
return ret;
}
+/* Returns true of __chk is within the first __range bytes of __off */
+#define OFF_IN_RANGE(__off, __range, __chk) \
+ ((__off <= __chk) && ((__off + __range) >= __chk))
+
+static void solo_jpeg_header(struct solo_enc_dev *solo_enc,
+ struct videobuf_dmabuf *vbuf)
+{
+ struct scatterlist *sg;
+ void *src = jpeg_header;
+ size_t copied = 0;
+ size_t to_copy = sizeof(jpeg_header);
+
+ for (sg = vbuf->sglist; sg && copied < to_copy; sg = sg_next(sg)) {
+ size_t this_copy = min(sg_dma_len(sg),
+ (unsigned int)(to_copy - copied));
+ u8 *p = sg_virt(sg);
+
+ memcpy(p, src + copied, this_copy);
+
+ if (OFF_IN_RANGE(copied, this_copy, SOF0_START + 5))
+ p[(SOF0_START + 5) - copied] =
+ 0xff & (solo_enc->height >> 8);
+ if (OFF_IN_RANGE(copied, this_copy, SOF0_START + 6))
+ p[(SOF0_START + 6) - copied] = 0xff & solo_enc->height;
+ if (OFF_IN_RANGE(copied, this_copy, SOF0_START + 7))
+ p[(SOF0_START + 7) - copied] =
+ 0xff & (solo_enc->width >> 8);
+ if (OFF_IN_RANGE(copied, this_copy, SOF0_START + 8))
+ p[(SOF0_START + 8) - copied] = 0xff & solo_enc->width;
+
+ copied += this_copy;
+ }
+}
+
static int solo_fill_jpeg(struct solo_enc_fh *fh, struct solo_enc_buf *enc_buf,
- struct videobuf_buffer *vb, dma_addr_t vbuf)
+ struct videobuf_buffer *vb,
+ struct videobuf_dmabuf *vbuf)
{
- struct solo_enc_dev *solo_enc = fh->enc;
- struct solo6010_dev *solo_dev = solo_enc->solo_dev;
- u8 *p = videobuf_queue_to_vaddr(&fh->vidq, vb);
+ struct solo6010_dev *solo_dev = fh->enc->solo_dev;
+ int size = enc_buf->jpeg_size;
- memcpy(p, jpeg_header, sizeof(jpeg_header));
- p[SOF0_START + 5] = 0xff & (solo_enc->height >> 8);
- p[SOF0_START + 6] = 0xff & solo_enc->height;
- p[SOF0_START + 7] = 0xff & (solo_enc->width >> 8);
- p[SOF0_START + 8] = 0xff & solo_enc->width;
+ /* Copy the header first (direct write) */
+ solo_jpeg_header(fh->enc, vbuf);
- vbuf += sizeof(jpeg_header);
- vb->size = enc_buf->jpeg_size + sizeof(jpeg_header);
+ vb->size = size + sizeof(jpeg_header);
- return enc_get_jpeg_dma(solo_dev, vbuf, enc_buf->jpeg_off,
- enc_buf->jpeg_size);
+ /* Grab the jpeg frame */
+ return enc_get_jpeg_dma_sg(solo_dev, fh->desc, vbuf->sglist,
+ sizeof(jpeg_header),
+ enc_buf->jpeg_off, size);
}
static int solo_fill_mpeg(struct solo_enc_fh *fh, struct solo_enc_buf *enc_buf,
- struct videobuf_buffer *vb, dma_addr_t vbuf)
+ struct videobuf_buffer *vb,
+ struct videobuf_dmabuf *vbuf)
{
struct solo_enc_dev *solo_enc = fh->enc;
struct solo6010_dev *solo_dev = solo_enc->solo_dev;
struct vop_header vh;
int ret;
int frame_size, frame_off;
+ int skip = 0;
if (WARN_ON_ONCE(enc_buf->size <= sizeof(vh)))
- return -1;
+ return -EINVAL;
/* First get the hardware vop header (not real mpeg) */
ret = enc_get_mpeg_dma(solo_dev, &vh, enc_buf->off, sizeof(vh));
- if (ret)
- return -1;
+ if (WARN_ON_ONCE(ret))
+ return ret;
if (WARN_ON_ONCE(vh.size > enc_buf->size))
- return -1;
+ return -EINVAL;
vb->width = vh.hsize << 4;
vb->height = vh.vsize << 4;
@@ -410,9 +513,9 @@ static int solo_fill_mpeg(struct solo_enc_fh *fh, struct solo_enc_buf *enc_buf,
if (!enc_buf->vop) {
u16 fps = solo_dev->fps * 1000;
u16 interval = solo_enc->interval * 1000;
- u8 *p = videobuf_queue_to_vaddr(&fh->vidq, vb);
+ u8 p[sizeof(vid_vop_header)];
- memcpy(p, vid_vop_header, sizeof(vid_vop_header));
+ memcpy(p, vid_vop_header, sizeof(p));
if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC)
p[10] |= ((XVID_PAR_43_NTSC << 3) & 0x78);
@@ -434,43 +537,49 @@ static int solo_fill_mpeg(struct solo_enc_fh *fh, struct solo_enc_buf *enc_buf,
if (vh.interlace)
p[29] |= 0x20;
+ enc_write_sg(vbuf->sglist, p, sizeof(p));
+
/* Adjust the dma buffer past this header */
vb->size += sizeof(vid_vop_header);
- vbuf += sizeof(vid_vop_header);
+ skip = sizeof(vid_vop_header);
}
/* Now get the actual mpeg payload */
frame_off = (enc_buf->off + sizeof(vh)) % SOLO_MP4E_EXT_SIZE(solo_dev);
frame_size = enc_buf->size - sizeof(vh);
- ret = enc_get_mpeg_dma_t(solo_dev, vbuf, frame_off, frame_size);
- if (WARN_ON_ONCE(ret))
- return -1;
- return 0;
+ ret = enc_get_mpeg_dma_sg(solo_dev, fh->desc, vbuf->sglist,
+ skip, frame_off, frame_size);
+ WARN_ON_ONCE(ret);
+
+ return ret;
}
-/* On successful return (0), leaves solo_enc->lock unlocked */
-static int solo_enc_fillbuf(struct solo_enc_fh *fh,
+static void solo_enc_fillbuf(struct solo_enc_fh *fh,
struct videobuf_buffer *vb)
{
struct solo_enc_dev *solo_enc = fh->enc;
struct solo6010_dev *solo_dev = solo_enc->solo_dev;
struct solo_enc_buf *enc_buf = NULL;
- dma_addr_t vbuf;
+ struct videobuf_dmabuf *vbuf;
int ret;
+ int error = 1;
u16 idx = fh->rd_idx;
while (idx != solo_dev->enc_wr_idx) {
struct solo_enc_buf *ebuf = &solo_dev->enc_buf[idx];
+
idx = (idx + 1) % SOLO_NR_RING_BUFS;
+
+ if (ebuf->ch != solo_enc->ch)
+ continue;
+
if (fh->fmt == V4L2_PIX_FMT_MPEG) {
- if (fh->type != ebuf->type)
- continue;
- if (ebuf->ch == solo_enc->ch) {
+ if (fh->type == ebuf->type) {
enc_buf = ebuf;
break;
}
- } else if (ebuf->ch == solo_enc->ch) {
+ } else {
/* For mjpeg, keep reading to the newest frame */
enc_buf = ebuf;
}
@@ -478,48 +587,55 @@ static int solo_enc_fillbuf(struct solo_enc_fh *fh,
fh->rd_idx = idx;
- if (!enc_buf)
- return -1;
+ if (WARN_ON_ONCE(!enc_buf))
+ goto buf_err;
if ((fh->fmt == V4L2_PIX_FMT_MPEG &&
vb->bsize < enc_buf->size) ||
(fh->fmt == V4L2_PIX_FMT_MJPEG &&
vb->bsize < (enc_buf->jpeg_size + sizeof(jpeg_header)))) {
- return -1;
+ WARN_ON_ONCE(1);
+ goto buf_err;
}
- if (!(vbuf = videobuf_to_dma_contig(vb)))
- return -1;
-
- /* Is it ok that we mess with this buffer out of lock? */
- spin_unlock(&solo_enc->lock);
+ vbuf = videobuf_to_dma(vb);
+ if (WARN_ON_ONCE(!vbuf))
+ goto buf_err;
if (fh->fmt == V4L2_PIX_FMT_MPEG)
ret = solo_fill_mpeg(fh, enc_buf, vb, vbuf);
else
ret = solo_fill_jpeg(fh, enc_buf, vb, vbuf);
- if (ret) // Ignore failures
- return 0;
+ if (!ret)
+ error = 0;
- list_del(&vb->queue);
- vb->field_count++;
- vb->ts = enc_buf->ts;
- vb->state = VIDEOBUF_DONE;
+buf_err:
+ if (error) {
+ vb->state = VIDEOBUF_ERROR;
+ } else {
+ vb->field_count++;
+ vb->ts = enc_buf->ts;
+ vb->state = VIDEOBUF_DONE;
+ }
wake_up(&vb->done);
- return 0;
+ return;
}
static void solo_enc_thread_try(struct solo_enc_fh *fh)
{
struct solo_enc_dev *solo_enc = fh->enc;
+ struct solo6010_dev *solo_dev = solo_enc->solo_dev;
struct videobuf_buffer *vb;
for (;;) {
spin_lock(&solo_enc->lock);
+ if (fh->rd_idx == solo_dev->enc_wr_idx)
+ break;
+
if (list_empty(&fh->vidq_active))
break;
@@ -529,9 +645,11 @@ static void solo_enc_thread_try(struct solo_enc_fh *fh)
if (!waitqueue_active(&vb->done))
break;
- /* On success, returns with solo_enc->lock unlocked */
- if (solo_enc_fillbuf(fh, vb))
- break;
+ list_del(&vb->queue);
+
+ spin_unlock(&solo_enc->lock);
+
+ solo_enc_fillbuf(fh, vb);
}
assert_spin_locked(&solo_enc->lock);
@@ -557,7 +675,7 @@ static int solo_enc_thread(void *data)
remove_wait_queue(&solo_enc->thread_wait, &wait);
- return 0;
+ return 0;
}
void solo_motion_isr(struct solo6010_dev *solo_dev)
@@ -614,7 +732,8 @@ void solo_enc_v4l2_isr(struct solo6010_dev *solo_dev)
jpeg_next = solo_reg_read(solo_dev,
SOLO_VE_JPEG_QUE(solo_dev->enc_idx));
- if ((ch = (mpeg_current >> 24) & 0x1f) >= SOLO_MAX_CHANNELS) {
+ ch = (mpeg_current >> 24) & 0x1f;
+ if (ch >= SOLO_MAX_CHANNELS) {
ch -= SOLO_MAX_CHANNELS;
enc_type = SOLO_ENC_TYPE_EXT;
} else
@@ -669,12 +788,12 @@ void solo_enc_v4l2_isr(struct solo6010_dev *solo_dev)
static int solo_enc_buf_setup(struct videobuf_queue *vq, unsigned int *count,
unsigned int *size)
{
- *size = FRAME_BUF_SIZE;
+ *size = FRAME_BUF_SIZE;
- if (*count < MIN_VID_BUFFERS)
+ if (*count < MIN_VID_BUFFERS)
*count = MIN_VID_BUFFERS;
- return 0;
+ return 0;
}
static int solo_enc_buf_prepare(struct videobuf_queue *vq,
@@ -696,7 +815,9 @@ static int solo_enc_buf_prepare(struct videobuf_queue *vq,
if (vb->state == VIDEOBUF_NEEDS_INIT) {
int rc = videobuf_iolock(vq, vb, NULL);
if (rc < 0) {
- videobuf_dma_contig_free(vq, vb);
+ struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+ videobuf_dma_unmap(vq->dev, dma);
+ videobuf_dma_free(dma);
vb->state = VIDEOBUF_NEEDS_INIT;
return rc;
}
@@ -719,7 +840,10 @@ static void solo_enc_buf_queue(struct videobuf_queue *vq,
static void solo_enc_buf_release(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
- videobuf_dma_contig_free(vq, vb);
+ struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+
+ videobuf_dma_unmap(vq->dev, dma);
+ videobuf_dma_free(dma);
vb->state = VIDEOBUF_NEEDS_INIT;
}
@@ -750,25 +874,22 @@ static int solo_enc_open(struct file *file)
struct solo_enc_dev *solo_enc = video_drvdata(file);
struct solo_enc_fh *fh;
- if ((fh = kzalloc(sizeof(*fh), GFP_KERNEL)) == NULL)
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (fh == NULL)
return -ENOMEM;
- spin_lock(&solo_enc->lock);
-
fh->enc = solo_enc;
file->private_data = fh;
INIT_LIST_HEAD(&fh->vidq_active);
fh->fmt = V4L2_PIX_FMT_MPEG;
fh->type = SOLO_ENC_TYPE_STD;
- videobuf_queue_dma_contig_init(&fh->vidq, &solo_enc_video_qops,
- &solo_enc->solo_dev->pdev->dev,
- &solo_enc->lock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct videobuf_buffer), fh, NULL);
-
- spin_unlock(&solo_enc->lock);
+ videobuf_queue_sg_init(&fh->vidq, &solo_enc_video_qops,
+ &solo_enc->solo_dev->pdev->dev,
+ &solo_enc->lock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_INTERLACED,
+ sizeof(struct videobuf_buffer), fh, NULL);
return 0;
}
@@ -785,7 +906,11 @@ static ssize_t solo_enc_read(struct file *file, char __user *data,
spin_lock(&solo_enc->lock);
ret = solo_enc_on(fh);
- spin_unlock(&solo_enc->lock);
+ spin_unlock(&solo_enc->lock);
+ if (ret)
+ return ret;
+
+ ret = solo_start_fh_thread(fh);
if (ret)
return ret;
}
@@ -797,10 +922,15 @@ static ssize_t solo_enc_read(struct file *file, char __user *data,
static int solo_enc_release(struct file *file)
{
struct solo_enc_fh *fh = file->private_data;
+ struct solo_enc_dev *solo_enc = fh->enc;
videobuf_stop(&fh->vidq);
videobuf_mmap_free(&fh->vidq);
+
+ spin_lock(&solo_enc->lock);
solo_enc_off(fh);
+ spin_unlock(&solo_enc->lock);
+
kfree(fh);
return 0;
@@ -842,7 +972,7 @@ static int solo_enc_enum_input(struct file *file, void *priv,
if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC)
input->std = V4L2_STD_NTSC_M;
else
- input->std = V4L2_STD_PAL_M;
+ input->std = V4L2_STD_PAL_B;
if (!tw28_get_video_status(solo_dev, solo_enc->ch))
input->status = V4L2_IN_ST_NO_SIGNAL;
@@ -915,9 +1045,8 @@ static int solo_enc_try_fmt_cap(struct file *file, void *priv,
if (pix->field == V4L2_FIELD_ANY)
pix->field = V4L2_FIELD_INTERLACED;
- else if (pix->field != V4L2_FIELD_INTERLACED) {
+ else if (pix->field != V4L2_FIELD_INTERLACED)
pix->field = V4L2_FIELD_INTERLACED;
- }
/* Just set these */
pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
@@ -937,7 +1066,8 @@ static int solo_enc_set_fmt_cap(struct file *file, void *priv,
spin_lock(&solo_enc->lock);
- if ((ret = solo_enc_try_fmt_cap(file, priv, f))) {
+ ret = solo_enc_try_fmt_cap(file, priv, f);
+ if (ret) {
spin_unlock(&solo_enc->lock);
return ret;
}
@@ -956,7 +1086,10 @@ static int solo_enc_set_fmt_cap(struct file *file, void *priv,
spin_unlock(&solo_enc->lock);
- return ret;
+ if (ret)
+ return ret;
+
+ return solo_start_fh_thread(fh);
}
static int solo_enc_get_fmt_cap(struct file *file, void *priv,
@@ -977,7 +1110,7 @@ static int solo_enc_get_fmt_cap(struct file *file, void *priv,
return 0;
}
-static int solo_enc_reqbufs(struct file *file, void *priv,
+static int solo_enc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *req)
{
struct solo_enc_fh *fh = priv;
@@ -1014,6 +1147,10 @@ static int solo_enc_dqbuf(struct file *file, void *priv,
spin_unlock(&solo_enc->lock);
if (ret)
return ret;
+
+ ret = solo_start_fh_thread(fh);
+ if (ret)
+ return ret;
}
ret = videobuf_dqbuf(&fh->vidq, buf, file->f_flags & O_NONBLOCK);
@@ -1033,12 +1170,16 @@ static int solo_enc_dqbuf(struct file *file, void *priv,
/* Check for key frame on mpeg data */
if (fh->fmt == V4L2_PIX_FMT_MPEG) {
- struct videobuf_buffer *vb = fh->vidq.bufs[buf->index];
- u8 *p = videobuf_queue_to_vaddr(&fh->vidq, vb);
- if (p[3] == 0x00)
- buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
- else
- buf->flags |= V4L2_BUF_FLAG_PFRAME;
+ struct videobuf_dmabuf *vbuf =
+ videobuf_to_dma(fh->vidq.bufs[buf->index]);
+
+ if (vbuf) {
+ u8 *p = sg_virt(vbuf->sglist);
+ if (p[3] == 0x00)
+ buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ else
+ buf->flags |= V4L2_BUF_FLAG_PFRAME;
+ }
}
return 0;
@@ -1136,7 +1277,7 @@ static int solo_g_parm(struct file *file, void *priv,
/* XXX: Shouldn't we be able to get/set this from videobuf? */
cp->readbuffers = 2;
- return 0;
+ return 0;
}
static int solo_s_parm(struct file *file, void *priv,
@@ -1176,7 +1317,7 @@ static int solo_s_parm(struct file *file, void *priv,
spin_unlock(&solo_enc->lock);
- return 0;
+ return 0;
}
static int solo_queryctrl(struct file *file, void *priv,
@@ -1240,7 +1381,7 @@ static int solo_queryctrl(struct file *file, void *priv,
return 0;
}
- return -EINVAL;
+ return -EINVAL;
}
static int solo_querymenu(struct file *file, void *priv,
@@ -1250,7 +1391,8 @@ static int solo_querymenu(struct file *file, void *priv,
int err;
qctrl.id = qmenu->id;
- if ((err = solo_queryctrl(file, priv, &qctrl)))
+ err = solo_queryctrl(file, priv, &qctrl);
+ if (err)
return err;
return v4l2_ctrl_query_menu(qmenu, &qctrl, NULL);
@@ -1350,9 +1492,9 @@ static int solo_s_ext_ctrls(struct file *file, void *priv,
switch (ctrl->id) {
case V4L2_CID_RDS_TX_RADIO_TEXT:
if (ctrl->size - 1 > OSD_TEXT_MAX)
- err = -ERANGE;
+ err = -ERANGE;
else {
- err = copy_from_user(solo_enc->osd_text,
+ err = copy_from_user(solo_enc->osd_text,
ctrl->string,
OSD_TEXT_MAX);
solo_enc->osd_text[OSD_TEXT_MAX] = '\0';
@@ -1459,7 +1601,7 @@ static struct video_device solo_enc_template = {
.minor = -1,
.release = video_device_release,
- .tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL_M,
+ .tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL_B,
.current_norm = V4L2_STD_NTSC_M,
};
@@ -1505,7 +1647,7 @@ static struct solo_enc_dev *solo_enc_alloc(struct solo6010_dev *solo_dev, u8 ch)
atomic_set(&solo_enc->readers, 0);
solo_enc->qp = SOLO_DEFAULT_QP;
- solo_enc->gop = solo_dev->fps;
+ solo_enc->gop = solo_dev->fps;
solo_enc->interval = 1;
solo_enc->mode = SOLO_ENC_MODE_CIF;
solo_enc->motion_thresh = SOLO_DEF_MOT_THRESH;
diff --git a/drivers/staging/solo6x10/solo6010-v4l2.c b/drivers/staging/solo6x10/solo6010-v4l2.c
index 6ffd21de837d..a8491dc0e914 100644
--- a/drivers/staging/solo6x10/solo6010-v4l2.c
+++ b/drivers/staging/solo6x10/solo6010-v4l2.c
@@ -24,14 +24,13 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-common.h>
-#include <media/videobuf-dma-contig.h>
+#include <media/videobuf-dma-sg.h>
#include "solo6010.h"
#include "solo6010-tw28.h"
#define SOLO_HW_BPL 2048
#define SOLO_DISP_PIX_FIELD V4L2_FIELD_INTERLACED
-#define SOLO_DISP_BUF_SIZE (64 * 1024) // 64k
/* Image size is two fields, SOLO_HW_BPL is one horizontal line */
#define solo_vlines(__solo) (__solo->video_vsize * 2)
@@ -49,6 +48,8 @@ struct solo_filehandle {
spinlock_t slock;
int old_write;
struct list_head vidq_active;
+ struct p2m_desc desc[SOLO_NR_P2M_DESC];
+ int desc_idx;
};
unsigned video_nr = -1;
@@ -96,7 +97,7 @@ static void solo_win_setup(struct solo6010_dev *solo_dev, u8 ch,
SOLO_VI_WIN_EX(ex) |
SOLO_VI_WIN_SCALE(scale));
- solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL1(ch),
+ solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL1(ch),
SOLO_VI_WIN_SY(sy) |
SOLO_VI_WIN_EY(ey));
}
@@ -203,50 +204,149 @@ static int solo_v4l2_set_ch(struct solo6010_dev *solo_dev, u8 ch)
return 0;
}
+static void disp_reset_desc(struct solo_filehandle *fh)
+{
+ /* We use desc mode, which ignores desc 0 */
+ memset(fh->desc, 0, sizeof(*fh->desc));
+ fh->desc_idx = 1;
+}
+
+static int disp_flush_descs(struct solo_filehandle *fh)
+{
+ int ret;
+
+ if (!fh->desc_idx)
+ return 0;
+
+ ret = solo_p2m_dma_desc(fh->solo_dev, SOLO_P2M_DMA_ID_DISP,
+ fh->desc, fh->desc_idx);
+ disp_reset_desc(fh);
+
+ return ret;
+}
+
+static int disp_push_desc(struct solo_filehandle *fh, dma_addr_t dma_addr,
+ u32 ext_addr, int size, int repeat, int ext_size)
+{
+ if (fh->desc_idx >= SOLO_NR_P2M_DESC) {
+ int ret = disp_flush_descs(fh);
+ if (ret)
+ return ret;
+ }
+
+ solo_p2m_push_desc(&fh->desc[fh->desc_idx], 0, dma_addr, ext_addr,
+ size, repeat, ext_size);
+ fh->desc_idx++;
+
+ return 0;
+}
+
static void solo_fillbuf(struct solo_filehandle *fh,
struct videobuf_buffer *vb)
{
struct solo6010_dev *solo_dev = fh->solo_dev;
- dma_addr_t vbuf;
+ struct videobuf_dmabuf *vbuf;
unsigned int fdma_addr;
- int frame_size;
int error = 1;
int i;
+ struct scatterlist *sg;
+ dma_addr_t sg_dma;
+ int sg_size_left;
- if (!(vbuf = videobuf_to_dma_contig(vb)))
+ vbuf = videobuf_to_dma(vb);
+ if (!vbuf)
goto finish_buf;
if (erase_off(solo_dev)) {
- void *p = videobuf_queue_to_vaddr(&fh->vidq, vb);
- int image_size = solo_image_size(solo_dev);
- for (i = 0; i < image_size; i += 2) {
- ((u8 *)p)[i] = 0x80;
- ((u8 *)p)[i + 1] = 0x00;
+ int i;
+
+ /* Just blit to the entire sg list, ignoring size */
+ for_each_sg(vbuf->sglist, sg, vbuf->sglen, i) {
+ void *p = sg_virt(sg);
+ size_t len = sg_dma_len(sg);
+
+ for (i = 0; i < len; i += 2) {
+ ((u8 *)p)[i] = 0x80;
+ ((u8 *)p)[i + 1] = 0x00;
+ }
}
+
error = 0;
goto finish_buf;
}
- frame_size = SOLO_HW_BPL * solo_vlines(solo_dev);
- fdma_addr = SOLO_DISP_EXT_ADDR(solo_dev) + (fh->old_write * frame_size);
+ disp_reset_desc(fh);
+ sg = vbuf->sglist;
+ sg_dma = sg_dma_address(sg);
+ sg_size_left = sg_dma_len(sg);
+
+ fdma_addr = SOLO_DISP_EXT_ADDR(solo_dev) + (fh->old_write *
+ (SOLO_HW_BPL * solo_vlines(solo_dev)));
- for (i = 0; i < frame_size / SOLO_DISP_BUF_SIZE; i++) {
- int j;
- for (j = 0; j < (SOLO_DISP_BUF_SIZE / SOLO_HW_BPL); j++) {
- if (solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_DISP, 0,
- vbuf, fdma_addr + (j * SOLO_HW_BPL),
- solo_bytesperline(solo_dev)))
+ for (i = 0; i < solo_vlines(solo_dev); i++) {
+ int line_len = solo_bytesperline(solo_dev);
+ int lines;
+
+ if (!sg_size_left) {
+ sg = sg_next(sg);
+ if (sg == NULL)
goto finish_buf;
- vbuf += solo_bytesperline(solo_dev);
+ sg_dma = sg_dma_address(sg);
+ sg_size_left = sg_dma_len(sg);
}
- fdma_addr += SOLO_DISP_BUF_SIZE;
+
+ /* No room for an entire line, so chunk it up */
+ if (sg_size_left < line_len) {
+ int this_addr = fdma_addr;
+
+ while (line_len > 0) {
+ int this_write;
+
+ if (!sg_size_left) {
+ sg = sg_next(sg);
+ if (sg == NULL)
+ goto finish_buf;
+ sg_dma = sg_dma_address(sg);
+ sg_size_left = sg_dma_len(sg);
+ }
+
+ this_write = min(sg_size_left, line_len);
+
+ if (disp_push_desc(fh, sg_dma, this_addr,
+ this_write, 0, 0))
+ goto finish_buf;
+
+ line_len -= this_write;
+ sg_size_left -= this_write;
+ sg_dma += this_write;
+ this_addr += this_write;
+ }
+
+ fdma_addr += SOLO_HW_BPL;
+ continue;
+ }
+
+ /* Shove as many lines into a repeating descriptor as possible */
+ lines = min(sg_size_left / line_len,
+ solo_vlines(solo_dev) - i);
+
+ if (disp_push_desc(fh, sg_dma, fdma_addr, line_len,
+ lines - 1, SOLO_HW_BPL))
+ goto finish_buf;
+
+ i += lines - 1;
+ fdma_addr += SOLO_HW_BPL * lines;
+ sg_dma += lines * line_len;
+ sg_size_left -= lines * line_len;
}
- error = 0;
+
+ error = disp_flush_descs(fh);
finish_buf:
if (error) {
vb->state = VIDEOBUF_ERROR;
} else {
+ vb->size = solo_vlines(solo_dev) * solo_bytesperline(solo_dev);
vb->state = VIDEOBUF_DONE;
vb->field_count++;
do_gettimeofday(&vb->ts);
@@ -275,7 +375,7 @@ static void solo_thread_try(struct solo_filehandle *fh)
break;
cur_write = SOLO_VI_STATUS0_PAGE(solo_reg_read(fh->solo_dev,
- SOLO_VI_STATUS0));
+ SOLO_VI_STATUS0));
if (cur_write == fh->old_write)
break;
@@ -310,7 +410,7 @@ static int solo_thread(void *data)
remove_wait_queue(&solo_dev->disp_thread_wait, &wait);
- return 0;
+ return 0;
}
static int solo_start_thread(struct solo_filehandle *fh)
@@ -337,12 +437,12 @@ static int solo_buf_setup(struct videobuf_queue *vq, unsigned int *count,
struct solo_filehandle *fh = vq->priv_data;
struct solo6010_dev *solo_dev = fh->solo_dev;
- *size = solo_image_size(solo_dev);
+ *size = solo_image_size(solo_dev);
- if (*count < MIN_VID_BUFFERS)
+ if (*count < MIN_VID_BUFFERS)
*count = MIN_VID_BUFFERS;
- return 0;
+ return 0;
}
static int solo_buf_prepare(struct videobuf_queue *vq,
@@ -364,7 +464,9 @@ static int solo_buf_prepare(struct videobuf_queue *vq,
if (vb->state == VIDEOBUF_NEEDS_INIT) {
int rc = videobuf_iolock(vq, vb, NULL);
if (rc < 0) {
- videobuf_dma_contig_free(vq, vb);
+ struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+ videobuf_dma_unmap(vq->dev, dma);
+ videobuf_dma_free(dma);
vb->state = VIDEOBUF_NEEDS_INIT;
return rc;
}
@@ -388,7 +490,10 @@ static void solo_buf_queue(struct videobuf_queue *vq,
static void solo_buf_release(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
- videobuf_dma_contig_free(vq, vb);
+ struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+
+ videobuf_dma_unmap(vq->dev, dma);
+ videobuf_dma_free(dma);
vb->state = VIDEOBUF_NEEDS_INIT;
}
@@ -404,7 +509,7 @@ static unsigned int solo_v4l2_poll(struct file *file,
{
struct solo_filehandle *fh = file->private_data;
- return videobuf_poll_stream(file, &fh->vidq, wait);
+ return videobuf_poll_stream(file, &fh->vidq, wait);
}
static int solo_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
@@ -420,7 +525,8 @@ static int solo_v4l2_open(struct file *file)
struct solo_filehandle *fh;
int ret;
- if ((fh = kzalloc(sizeof(*fh), GFP_KERNEL)) == NULL)
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (fh == NULL)
return -ENOMEM;
spin_lock_init(&fh->slock);
@@ -428,16 +534,17 @@ static int solo_v4l2_open(struct file *file)
fh->solo_dev = solo_dev;
file->private_data = fh;
- if ((ret = solo_start_thread(fh))) {
+ ret = solo_start_thread(fh);
+ if (ret) {
kfree(fh);
return ret;
}
- videobuf_queue_dma_contig_init(&fh->vidq, &solo_video_qops,
- &solo_dev->pdev->dev, &fh->slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- SOLO_DISP_PIX_FIELD,
- sizeof(struct videobuf_buffer), fh, NULL);
+ videobuf_queue_sg_init(&fh->vidq, &solo_video_qops,
+ &solo_dev->pdev->dev, &fh->slock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ SOLO_DISP_PIX_FIELD,
+ sizeof(struct videobuf_buffer), fh, NULL);
return 0;
}
@@ -530,7 +637,7 @@ static int solo_enum_input(struct file *file, void *priv,
if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC)
input->std = V4L2_STD_NTSC_M;
else
- input->std = V4L2_STD_PAL_M;
+ input->std = V4L2_STD_PAL_B;
return 0;
}
@@ -622,7 +729,7 @@ static int solo_get_fmt_cap(struct file *file, void *priv,
return 0;
}
-static int solo_reqbufs(struct file *file, void *priv,
+static int solo_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *req)
{
struct solo_filehandle *fh = priv;
@@ -781,11 +888,11 @@ static const struct v4l2_ioctl_ops solo_v4l2_ioctl_ops = {
.vidioc_qbuf = solo_qbuf,
.vidioc_dqbuf = solo_dqbuf,
.vidioc_streamon = solo_streamon,
- .vidioc_streamoff = solo_streamoff,
+ .vidioc_streamoff = solo_streamoff,
/* Controls */
.vidioc_queryctrl = solo_disp_queryctrl,
- .vidioc_g_ctrl = solo_disp_g_ctrl,
- .vidioc_s_ctrl = solo_disp_s_ctrl,
+ .vidioc_g_ctrl = solo_disp_g_ctrl,
+ .vidioc_s_ctrl = solo_disp_s_ctrl,
};
static struct video_device solo_v4l2_template = {
@@ -795,7 +902,7 @@ static struct video_device solo_v4l2_template = {
.minor = -1,
.release = video_device_release,
- .tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL_M,
+ .tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL_B,
.current_norm = V4L2_STD_NTSC_M,
};
@@ -836,13 +943,13 @@ int solo_v4l2_init(struct solo6010_dev *solo_dev)
for (i = 0; i < solo_dev->nr_chans; i++) {
solo_v4l2_set_ch(solo_dev, i);
while (erase_off(solo_dev))
- ;// Do nothing
+ ;/* Do nothing */
}
/* Set the default display channel */
solo_v4l2_set_ch(solo_dev, 0);
while (erase_off(solo_dev))
- ;// Do nothing
+ ;/* Do nothing */
solo6010_irq_on(solo_dev, SOLO_IRQ_VIDEO_IN);
diff --git a/drivers/staging/solo6x10/solo6010.h b/drivers/staging/solo6x10/solo6010.h
index dca8e3e15450..9c930f3a017b 100644
--- a/drivers/staging/solo6x10/solo6010.h
+++ b/drivers/staging/solo6x10/solo6010.h
@@ -26,8 +26,8 @@
#include <linux/semaphore.h>
#include <linux/mutex.h>
#include <linux/list.h>
-#include <linux/delay.h>
#include <linux/wait.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include <asm/atomic.h>
@@ -48,10 +48,14 @@
#define PCI_DEVICE_ID_NEUSOLO_4 0x4304
#define PCI_DEVICE_ID_NEUSOLO_9 0x4309
#define PCI_DEVICE_ID_NEUSOLO_16 0x4310
-/* Commell Softlogic 6010 based cards */
-#define PCI_DEVICE_ID_COMMSOLO_4 0x4E04
-#define PCI_DEVICE_ID_COMMSOLO_9 0x4E09
-#define PCI_DEVICE_ID_COMMSOLO_16 0x4E10
+/* Bluecherry Softlogic 6010 based cards */
+#define PCI_DEVICE_ID_BC_SOLO_4 0x4E04
+#define PCI_DEVICE_ID_BC_SOLO_9 0x4E09
+#define PCI_DEVICE_ID_BC_SOLO_16 0x4E10
+/* Bluecherry Softlogic 6110 based cards */
+#define PCI_DEVICE_ID_BC_6110_4 0x5304
+#define PCI_DEVICE_ID_BC_6110_8 0x5308
+#define PCI_DEVICE_ID_BC_6110_16 0x5310
#endif /* Bluecherry */
#define SOLO6010_NAME "solo6010"
@@ -64,7 +68,7 @@
#define SOLO6010_VER_MINOR 0
#define SOLO6010_VER_SUB 0
#define SOLO6010_VER_NUM \
- KERNEL_VERSION(SOLO6010_VER_MAJOR, SOLO6010_VER_MINOR, SOLO6010_VER_SUB)
+ KERNEL_VERSION(SOLO6010_VER_MAJOR, SOLO6010_VER_MINOR, SOLO6010_VER_SUB)
/*
* The SOLO6010 actually has 8 i2c channels, but we only use 2.
@@ -78,7 +82,6 @@
/* DMA Engine setup */
#define SOLO_NR_P2M 4
#define SOLO_NR_P2M_DESC 256
-#define SOLO_P2M_DESC_SIZE (SOLO_NR_P2M_DESC * 16)
/* MPEG and JPEG share the same interrupt and locks so they must be together
* in the same dma channel. */
#define SOLO_P2M_DMA_ID_MP4E 0
@@ -123,11 +126,17 @@ enum SOLO_I2C_STATE {
IIC_STATE_STOP
};
+struct p2m_desc {
+ u32 ctrl;
+ u32 ext;
+ u32 ta;
+ u32 fa;
+};
+
struct solo_p2m_dev {
- struct semaphore sem;
+ struct mutex mutex;
struct completion completion;
int error;
- u8 desc[SOLO_P2M_DESC_SIZE];
};
#define OSD_TEXT_MAX 30
@@ -185,7 +194,7 @@ struct solo6010_dev {
/* i2c related items */
struct i2c_adapter i2c_adap[SOLO_I2C_ADAPTERS];
enum SOLO_I2C_STATE i2c_state;
- struct semaphore i2c_sem;
+ struct mutex i2c_mutex;
int i2c_id;
wait_queue_head_t i2c_wait;
struct i2c_msg *i2c_msg;
@@ -212,7 +221,7 @@ struct solo6010_dev {
struct solo_enc_buf enc_buf[SOLO_NR_RING_BUFS];
/* Current video settings */
- u32 video_type;
+ u32 video_type;
u16 video_hsize, video_vsize;
u16 vout_hstart, vout_vstart;
u16 vin_hstart, vin_vstart;
@@ -306,6 +315,14 @@ int solo_p2m_dma_t(struct solo6010_dev *solo_dev, u8 id, int wr,
dma_addr_t dma_addr, u32 ext_addr, u32 size);
int solo_p2m_dma(struct solo6010_dev *solo_dev, u8 id, int wr,
void *sys_addr, u32 ext_addr, u32 size);
+int solo_p2m_dma_sg(struct solo6010_dev *solo_dev, u8 id,
+ struct p2m_desc *pdesc, int wr,
+ struct scatterlist *sglist, u32 sg_off,
+ u32 ext_addr, u32 size);
+void solo_p2m_push_desc(struct p2m_desc *desc, int wr, dma_addr_t dma_addr,
+ u32 ext_addr, u32 size, int repeat, u32 ext_size);
+int solo_p2m_dma_desc(struct solo6010_dev *solo_dev, u8 id,
+ struct p2m_desc *desc, int desc_count);
/* Set the threshold for motion detection */
void solo_set_motion_threshold(struct solo6010_dev *solo_dev, u8 ch, u16 val);
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index cc79f9edfe9e..07a7f5432597 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -332,7 +332,7 @@ static ssize_t silent_store(struct kobject *kobj, struct kobj_attribute *attr,
unsigned long flags;
len = strlen(buf);
- if (len > 0 || len < 3) {
+ if (len > 0 && len < 3) {
ch = buf[0];
if (ch == '\n')
ch = '0';
@@ -984,8 +984,10 @@ int speakup_kobj_init(void)
* not known ahead of time.
*/
accessibility_kobj = kobject_create_and_add("accessibility", NULL);
- if (!accessibility_kobj)
- return -ENOMEM;
+ if (!accessibility_kobj) {
+ retval = -ENOMEM;
+ goto out;
+ }
speakup_kobj = kobject_create_and_add("speakup", accessibility_kobj);
if (!speakup_kobj) {
@@ -1002,7 +1004,7 @@ int speakup_kobj_init(void)
if (retval)
goto err_group;
- return 0;
+ goto out;
err_group:
sysfs_remove_group(speakup_kobj, &main_attr_group);
@@ -1010,6 +1012,7 @@ err_speakup:
kobject_put(speakup_kobj);
err_acc:
kobject_put(accessibility_kobj);
+out:
return retval;
}
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 4b7a9c2b965f..3cd00396a462 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -2253,17 +2253,17 @@ static int __init speakup_init(void)
err = speakup_add_virtual_keyboard();
if (err)
- return err;
+ goto out;
initialize_msgs(); /* Initialize arrays for i18n. */
first_console = kzalloc(sizeof(*first_console), GFP_KERNEL);
- if (!first_console)
- return -ENOMEM;
- err = speakup_kobj_init();
- if (err) {
- kfree(first_console);
- return err;
+ if (!first_console) {
+ err = -ENOMEM;
+ goto err_cons;
}
+ err = speakup_kobj_init();
+ if (err)
+ goto err_kobject;
reset_default_chars();
reset_default_chartab();
@@ -2299,11 +2299,20 @@ static int __init speakup_init(void)
speakup_task = kthread_create(speakup_thread, NULL, "speakup");
set_user_nice(speakup_task, 10);
- if (!IS_ERR(speakup_task))
- wake_up_process(speakup_task);
- else
- return -ENOMEM;
- return 0;
+ if (IS_ERR(speakup_task)) {
+ err = -ENOMEM;
+ goto err_kobject;
+ }
+ wake_up_process(speakup_task);
+ goto out;
+
+err_kobject:
+speakup_kobj_exit();
+ kfree(first_console);
+err_cons:
+ speakup_remove_virtual_keyboard();
+out:
+ return err;
}
module_init(speakup_init);
diff --git a/drivers/staging/speakup/spk_types.h b/drivers/staging/speakup/spk_types.h
index 840bddb64101..d36c90e30d54 100644
--- a/drivers/staging/speakup/spk_types.h
+++ b/drivers/staging/speakup/spk_types.h
@@ -52,7 +52,7 @@ typedef int (*special_func)(struct vc_data *vc, u_char type, u_char ch,
#define COLOR_BUFFER_SIZE 160
-struct spk_highlight_color_track{
+struct spk_highlight_color_track {
/* Count of each background color */
unsigned int bgcount[8];
/* Buffer for characters drawn with each background color */
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c
index 63a9d0adf32d..007b24b54e25 100644
--- a/drivers/staging/spectra/ffsport.c
+++ b/drivers/staging/spectra/ffsport.c
@@ -28,6 +28,7 @@
#include <linux/log2.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/async.h>
/**** Helper functions used for Div, Remainder operation on u64 ****/
@@ -729,34 +730,16 @@ static void create_sysfs_entry(struct device *dev)
}
*/
-static int GLOB_SBD_init(void)
+static void register_spectra_ftl_async(void *unused, async_cookie_t cookie)
{
int i;
- /* Set debug output level (0~3) here. 3 is most verbose */
- printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
-
- mutex_init(&spectra_lock);
-
- GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
- if (GLOB_SBD_majornum <= 0) {
- printk(KERN_ERR "Unable to get the major %d for Spectra",
- GLOB_SBD_majornum);
- return -EBUSY;
- }
-
- if (PASS != GLOB_FTL_Flash_Init()) {
- printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
- "Aborting\n");
- goto out_flash_register;
- }
-
/* create_sysfs_entry(&dev->dev); */
if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
"Aborting\n");
- goto out_flash_register;
+ return;
} else {
nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
"Num blocks=%d, pagesperblock=%d, "
@@ -775,24 +758,50 @@ static int GLOB_SBD_init(void)
}
printk(KERN_ALERT "Spectra: block table has been found.\n");
+ GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
+ if (GLOB_SBD_majornum <= 0) {
+ printk(KERN_ERR "Unable to get the major %d for Spectra",
+ GLOB_SBD_majornum);
+ goto out_ftl_flash_register;
+ }
+
for (i = 0; i < NUM_DEVICES; i++)
if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
- goto out_ftl_flash_register;
+ goto out_blk_register;
nand_dbg_print(NAND_DBG_DEBUG,
"Spectra: module loaded with major number %d\n",
GLOB_SBD_majornum);
- return 0;
+ return;
+out_blk_register:
+ unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
out_ftl_flash_register:
GLOB_FTL_Cache_Release();
-out_flash_register:
- GLOB_FTL_Flash_Release();
- unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
printk(KERN_ERR "Spectra: Module load failed.\n");
+}
- return -ENOMEM;
+int register_spectra_ftl()
+{
+ async_schedule(register_spectra_ftl_async, NULL);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(register_spectra_ftl);
+
+static int GLOB_SBD_init(void)
+{
+ /* Set debug output level (0~3) here. 3 is most verbose */
+ printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
+
+ mutex_init(&spectra_lock);
+
+ if (PASS != GLOB_FTL_Flash_Init()) {
+ printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
+ "Aborting\n");
+ return -ENODEV;
+ }
+ return 0;
}
static void __exit GLOB_SBD_exit(void)
diff --git a/drivers/staging/spectra/ffsport.h b/drivers/staging/spectra/ffsport.h
index 6c5d90c53430..85c0750612f6 100644
--- a/drivers/staging/spectra/ffsport.h
+++ b/drivers/staging/spectra/ffsport.h
@@ -80,5 +80,6 @@ extern int nand_debug_level;
extern int GLOB_Calc_Used_Bits(u32 n);
extern u64 GLOB_u64_Div(u64 addr, u32 divisor);
extern u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type);
+extern int register_spectra_ftl(void);
#endif /* _FFSPORT_ */
diff --git a/drivers/staging/spectra/flash.c b/drivers/staging/spectra/flash.c
index 4e6e451cd5c8..fb39c8ecf596 100644
--- a/drivers/staging/spectra/flash.c
+++ b/drivers/staging/spectra/flash.c
@@ -1258,9 +1258,7 @@ int GLOB_FTL_Flash_Init(void)
g_SBDCmdIndex = 0;
- GLOB_LLD_Flash_Init();
-
- status = GLOB_LLD_Read_Device_ID();
+ status = GLOB_LLD_Flash_Init();
return status;
}
diff --git a/drivers/staging/spectra/lld_emu.c b/drivers/staging/spectra/lld_emu.c
index 6733bbf8016d..095f2f0c2e5b 100644
--- a/drivers/staging/spectra/lld_emu.c
+++ b/drivers/staging/spectra/lld_emu.c
@@ -180,10 +180,8 @@ u16 emu_Flash_Init(void)
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
- flash_memory[0] = (u8 *)vmalloc(GLOB_LLD_PAGE_SIZE *
- GLOB_LLD_BLOCKS *
- GLOB_LLD_PAGES *
- sizeof(u8));
+ flash_memory[0] = vmalloc(GLOB_LLD_PAGE_SIZE * GLOB_LLD_BLOCKS *
+ GLOB_LLD_PAGES * sizeof(u8));
if (!flash_memory[0]) {
printk(KERN_ERR "Fail to allocate memory "
"for nand emulator!\n");
diff --git a/drivers/staging/spectra/lld_nand.c b/drivers/staging/spectra/lld_nand.c
index 0d647a8fd2b6..2263d3ea5456 100644
--- a/drivers/staging/spectra/lld_nand.c
+++ b/drivers/staging/spectra/lld_nand.c
@@ -2395,14 +2395,94 @@ static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
unsigned long csr_base;
unsigned long csr_len;
struct mrst_nand_info *pndev = &info;
+ u32 int_mask;
nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
+ FlashReg = ioremap_nocache(GLOB_HWCTL_REG_BASE,
+ GLOB_HWCTL_REG_SIZE);
+ if (!FlashReg) {
+ printk(KERN_ERR "Spectra: ioremap_nocache failed!");
+ return -ENOMEM;
+ }
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: Remapped reg base address: "
+ "0x%p, len: %d\n",
+ FlashReg, GLOB_HWCTL_REG_SIZE);
+
+ FlashMem = ioremap_nocache(GLOB_HWCTL_MEM_BASE,
+ GLOB_HWCTL_MEM_SIZE);
+ if (!FlashMem) {
+ printk(KERN_ERR "Spectra: ioremap_nocache failed!");
+ iounmap(FlashReg);
+ return -ENOMEM;
+ }
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: Remapped flash base address: "
+ "0x%p, len: %d\n",
+ (void *)FlashMem, GLOB_HWCTL_MEM_SIZE);
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
+ "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
+ "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
+ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
+ ioread32(FlashReg + ACC_CLKS),
+ ioread32(FlashReg + RE_2_WE),
+ ioread32(FlashReg + WE_2_RE),
+ ioread32(FlashReg + ADDR_2_DATA),
+ ioread32(FlashReg + RDWR_EN_LO_CNT),
+ ioread32(FlashReg + RDWR_EN_HI_CNT),
+ ioread32(FlashReg + CS_SETUP_CNT));
+
+ NAND_Flash_Reset();
+
+ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
+
+#if CMD_DMA
+ info.pcmds_num = 0;
+ info.flash_bank = 0;
+ info.cdma_num = 0;
+ int_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
+ DMA_INTR__DESC_COMP_CHANNEL1 |
+ DMA_INTR__DESC_COMP_CHANNEL2 |
+ DMA_INTR__DESC_COMP_CHANNEL3 |
+ DMA_INTR__MEMCOPY_DESC_COMP);
+ iowrite32(int_mask, FlashReg + DMA_INTR_EN);
+ iowrite32(0xFFFF, FlashReg + DMA_INTR);
+
+ int_mask = (INTR_STATUS0__ECC_ERR |
+ INTR_STATUS0__PROGRAM_FAIL |
+ INTR_STATUS0__ERASE_FAIL);
+#else
+ int_mask = INTR_STATUS0__DMA_CMD_COMP |
+ INTR_STATUS0__ECC_TRANSACTION_DONE |
+ INTR_STATUS0__ECC_ERR |
+ INTR_STATUS0__PROGRAM_FAIL |
+ INTR_STATUS0__ERASE_FAIL;
+#endif
+ iowrite32(int_mask, FlashReg + INTR_EN0);
+ iowrite32(int_mask, FlashReg + INTR_EN1);
+ iowrite32(int_mask, FlashReg + INTR_EN2);
+ iowrite32(int_mask, FlashReg + INTR_EN3);
+
+ /* Clear all status bits */
+ iowrite32(0xFFFF, FlashReg + INTR_STATUS0);
+ iowrite32(0xFFFF, FlashReg + INTR_STATUS1);
+ iowrite32(0xFFFF, FlashReg + INTR_STATUS2);
+ iowrite32(0xFFFF, FlashReg + INTR_STATUS3);
+
+ iowrite32(0x0F, FlashReg + RB_PIN_ENABLED);
+ iowrite32(CHIP_EN_DONT_CARE__FLAG, FlashReg + CHIP_ENABLE_DONT_CARE);
+
+ /* Should set value for these registers when init */
+ iowrite32(0, FlashReg + TWO_ROW_ADDR_CYCLES);
+ iowrite32(1, FlashReg + ECC_ENABLE);
+ enable_ecc = 1;
ret = pci_enable_device(dev);
if (ret) {
printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
- return ret;
+ goto failed_req_csr;
}
pci_set_master(dev);
@@ -2461,12 +2541,26 @@ static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
pci_set_drvdata(dev, pndev);
+ ret = GLOB_LLD_Read_Device_ID();
+ if (ret) {
+ iounmap(pndev->ioaddr);
+ goto failed_remap_csr;
+ }
+
+ ret = register_spectra_ftl();
+ if (ret) {
+ iounmap(pndev->ioaddr);
+ goto failed_remap_csr;
+ }
+
return 0;
failed_remap_csr:
pci_release_regions(dev);
failed_req_csr:
pci_disable_device(dev);
+ iounmap(FlashMem);
+ iounmap(FlashReg);
return ret;
}
@@ -2498,91 +2592,10 @@ static struct pci_driver nand_pci_driver = {
int NAND_Flash_Init(void)
{
int retval;
- u32 int_mask;
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
- FlashReg = ioremap_nocache(GLOB_HWCTL_REG_BASE,
- GLOB_HWCTL_REG_SIZE);
- if (!FlashReg) {
- printk(KERN_ERR "Spectra: ioremap_nocache failed!");
- return -ENOMEM;
- }
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: Remapped reg base address: "
- "0x%p, len: %d\n",
- FlashReg, GLOB_HWCTL_REG_SIZE);
-
- FlashMem = ioremap_nocache(GLOB_HWCTL_MEM_BASE,
- GLOB_HWCTL_MEM_SIZE);
- if (!FlashMem) {
- printk(KERN_ERR "Spectra: ioremap_nocache failed!");
- iounmap(FlashReg);
- return -ENOMEM;
- }
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: Remapped flash base address: "
- "0x%p, len: %d\n",
- (void *)FlashMem, GLOB_HWCTL_MEM_SIZE);
-
- nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
- "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
- "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
- "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
- ioread32(FlashReg + ACC_CLKS),
- ioread32(FlashReg + RE_2_WE),
- ioread32(FlashReg + WE_2_RE),
- ioread32(FlashReg + ADDR_2_DATA),
- ioread32(FlashReg + RDWR_EN_LO_CNT),
- ioread32(FlashReg + RDWR_EN_HI_CNT),
- ioread32(FlashReg + CS_SETUP_CNT));
-
- NAND_Flash_Reset();
-
- iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
-
-#if CMD_DMA
- info.pcmds_num = 0;
- info.flash_bank = 0;
- info.cdma_num = 0;
- int_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
- DMA_INTR__DESC_COMP_CHANNEL1 |
- DMA_INTR__DESC_COMP_CHANNEL2 |
- DMA_INTR__DESC_COMP_CHANNEL3 |
- DMA_INTR__MEMCOPY_DESC_COMP);
- iowrite32(int_mask, FlashReg + DMA_INTR_EN);
- iowrite32(0xFFFF, FlashReg + DMA_INTR);
-
- int_mask = (INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__PROGRAM_FAIL |
- INTR_STATUS0__ERASE_FAIL);
-#else
- int_mask = INTR_STATUS0__DMA_CMD_COMP |
- INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__PROGRAM_FAIL |
- INTR_STATUS0__ERASE_FAIL;
-#endif
- iowrite32(int_mask, FlashReg + INTR_EN0);
- iowrite32(int_mask, FlashReg + INTR_EN1);
- iowrite32(int_mask, FlashReg + INTR_EN2);
- iowrite32(int_mask, FlashReg + INTR_EN3);
-
- /* Clear all status bits */
- iowrite32(0xFFFF, FlashReg + INTR_STATUS0);
- iowrite32(0xFFFF, FlashReg + INTR_STATUS1);
- iowrite32(0xFFFF, FlashReg + INTR_STATUS2);
- iowrite32(0xFFFF, FlashReg + INTR_STATUS3);
-
- iowrite32(0x0F, FlashReg + RB_PIN_ENABLED);
- iowrite32(CHIP_EN_DONT_CARE__FLAG, FlashReg + CHIP_ENABLE_DONT_CARE);
-
- /* Should set value for these registers when init */
- iowrite32(0, FlashReg + TWO_ROW_ADDR_CYCLES);
- iowrite32(1, FlashReg + ECC_ENABLE);
- enable_ecc = 1;
-
retval = pci_register_driver(&nand_pci_driver);
if (retval)
return -ENOMEM;
diff --git a/drivers/staging/ste_rmi4/Kconfig b/drivers/staging/ste_rmi4/Kconfig
new file mode 100644
index 000000000000..e8679509e525
--- /dev/null
+++ b/drivers/staging/ste_rmi4/Kconfig
@@ -0,0 +1,9 @@
+config TOUCHSCREEN_SYNAPTICS_I2C_RMI4
+ tristate "Synaptics i2c rmi4 touchscreen"
+ depends on I2C && INPUT
+ help
+ Say Y here if you have a Synaptics RMI4 and
+ want to enable support for the built-in touchscreen.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_rmi4_ts.
diff --git a/drivers/staging/ste_rmi4/Makefile b/drivers/staging/ste_rmi4/Makefile
new file mode 100644
index 000000000000..6cce2ed187ef
--- /dev/null
+++ b/drivers/staging/ste_rmi4/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for the RMI4 touchscreen driver.
+#
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += synaptics_i2c_rmi4.o
diff --git a/drivers/staging/ste_rmi4/TODO b/drivers/staging/ste_rmi4/TODO
new file mode 100644
index 000000000000..9be2437da85f
--- /dev/null
+++ b/drivers/staging/ste_rmi4/TODO
@@ -0,0 +1,7 @@
+TODO
+----
+
+Wait for the official upstream synaptics rmi4 clearpad drivers as promised over the past few months
+Merge any device support needed from this driver into it
+Delete this driver
+
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
new file mode 100644
index 000000000000..80183a7e6624
--- /dev/null
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -0,0 +1,1178 @@
+/**
+ *
+ * Synaptics Register Mapped Interface (RMI4) I2C Physical Layer Driver.
+ * Copyright (c) 2007-2010, Synaptics Incorporated
+ *
+ * Author: Js HA <js.ha@stericsson.com> for ST-Ericsson
+ * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
+ * Copyright 2010 (c) ST-Ericsson AB
+ */
+/*
+ * This file is licensed under the GPL2 license.
+ *
+ *#############################################################################
+ * GPL
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ *#############################################################################
+ */
+
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
+#include "synaptics_i2c_rmi4.h"
+
+/* TODO: for multiple device support will need a per-device mutex */
+#define DRIVER_NAME "synaptics_rmi4_i2c"
+
+#define MAX_ERROR_REPORT 6
+#define MAX_TOUCH_MAJOR 15
+#define MAX_RETRY_COUNT 5
+#define STD_QUERY_LEN 21
+#define PAGE_LEN 2
+#define DATA_BUF_LEN 32
+#define BUF_LEN 37
+#define QUERY_LEN 9
+#define DATA_LEN 12
+#define HAS_TAP 0x01
+#define HAS_PALMDETECT 0x01
+#define HAS_ROTATE 0x02
+#define HAS_TAPANDHOLD 0x02
+#define HAS_DOUBLETAP 0x04
+#define HAS_EARLYTAP 0x08
+#define HAS_RELEASE 0x08
+#define HAS_FLICK 0x10
+#define HAS_PRESS 0x20
+#define HAS_PINCH 0x40
+
+#define MASK_16BIT 0xFFFF
+#define MASK_8BIT 0xFF
+#define MASK_7BIT 0x7F
+#define MASK_5BIT 0x1F
+#define MASK_4BIT 0x0F
+#define MASK_3BIT 0x07
+#define MASK_2BIT 0x03
+#define TOUCHPAD_CTRL_INTR 0x8
+#define PDT_START_SCAN_LOCATION (0x00E9)
+#define PDT_END_SCAN_LOCATION (0x000A)
+#define PDT_ENTRY_SIZE (0x0006)
+#define RMI4_NUMBER_OF_MAX_FINGERS (8)
+#define SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM (0x11)
+#define SYNAPTICS_RMI4_DEVICE_CONTROL_FUNC_NUM (0x01)
+
+/**
+ * struct synaptics_rmi4_fn_desc - contains the funtion descriptor information
+ * @query_base_addr: base address for query
+ * @cmd_base_addr: base address for command
+ * @ctrl_base_addr: base address for control
+ * @data_base_addr: base address for data
+ * @intr_src_count: count for the interrupt source
+ * @fn_number: function number
+ *
+ * This structure is used to gives the function descriptor information
+ * of the particular functionality.
+ */
+struct synaptics_rmi4_fn_desc {
+ unsigned char query_base_addr;
+ unsigned char cmd_base_addr;
+ unsigned char ctrl_base_addr;
+ unsigned char data_base_addr;
+ unsigned char intr_src_count;
+ unsigned char fn_number;
+};
+
+/**
+ * struct synaptics_rmi4_fn - contains the funtion information
+ * @fn_number: function number
+ * @num_of_data_sources: number of data sources
+ * @num_of_data_points: number of fingers touched
+ * @size_of_data_register_block: data register block size
+ * @index_to_intr_reg: index for interrupt register
+ * @intr_mask: interrupt mask value
+ * @fn_desc: variable for function descriptor structure
+ * @link: linked list for function descriptors
+ *
+ * This structure gives information about the number of data sources and
+ * the number of data registers associated with the function.
+ */
+struct synaptics_rmi4_fn {
+ unsigned char fn_number;
+ unsigned char num_of_data_sources;
+ unsigned char num_of_data_points;
+ unsigned char size_of_data_register_block;
+ unsigned char index_to_intr_reg;
+ unsigned char intr_mask;
+ struct synaptics_rmi4_fn_desc fn_desc;
+ struct list_head link;
+};
+
+/**
+ * struct synaptics_rmi4_device_info - contains the rmi4 device information
+ * @version_major: protocol major version number
+ * @version_minor: protocol minor version number
+ * @manufacturer_id: manufacturer identification byte
+ * @product_props: product properties information
+ * @product_info: product info array
+ * @date_code: device manufacture date
+ * @tester_id: tester id array
+ * @serial_number: serial number for that device
+ * @product_id_string: product id for the device
+ * @support_fn_list: linked list for device information
+ *
+ * This structure gives information about the number of data sources and
+ * the number of data registers associated with the function.
+ */
+struct synaptics_rmi4_device_info {
+ unsigned int version_major;
+ unsigned int version_minor;
+ unsigned char manufacturer_id;
+ unsigned char product_props;
+ unsigned char product_info[2];
+ unsigned char date_code[3];
+ unsigned short tester_id;
+ unsigned short serial_number;
+ unsigned char product_id_string[11];
+ struct list_head support_fn_list;
+};
+
+/**
+ * struct synaptics_rmi4_data - contains the rmi4 device data
+ * @rmi4_mod_info: structure variable for rmi4 device info
+ * @input_dev: pointer for input device
+ * @i2c_client: pointer for i2c client
+ * @board: constant pointer for touch platform data
+ * @fn_list_mutex: mutex for funtion list
+ * @rmi4_page_mutex: mutex for rmi4 page
+ * @current_page: variable for integer
+ * @number_of_interrupt_register: interrupt registers count
+ * @fn01_ctrl_base_addr: control base address for fn01
+ * @fn01_query_base_addr: query base address for fn01
+ * @fn01_data_base_addr: data base address for fn01
+ * @sensor_max_x: sensor maximum x value
+ * @sensor_max_y: sensor maximum y value
+ * @regulator: pointer to the regulator structure
+ * @wait: wait queue structure variable
+ * @touch_stopped: flag to stop the thread function
+ *
+ * This structure gives the device data information.
+ */
+struct synaptics_rmi4_data {
+ struct synaptics_rmi4_device_info rmi4_mod_info;
+ struct input_dev *input_dev;
+ struct i2c_client *i2c_client;
+ const struct synaptics_rmi4_platform_data *board;
+ struct mutex fn_list_mutex;
+ struct mutex rmi4_page_mutex;
+ int current_page;
+ unsigned int number_of_interrupt_register;
+ unsigned short fn01_ctrl_base_addr;
+ unsigned short fn01_query_base_addr;
+ unsigned short fn01_data_base_addr;
+ int sensor_max_x;
+ int sensor_max_y;
+ struct regulator *regulator;
+ wait_queue_head_t wait;
+ bool touch_stopped;
+};
+
+/**
+ * synaptics_rmi4_set_page() - sets the page
+ * @pdata: pointer to synaptics_rmi4_data structure
+ * @address: set the address of the page
+ *
+ * This function is used to set the page and returns integer.
+ */
+static int synaptics_rmi4_set_page(struct synaptics_rmi4_data *pdata,
+ unsigned int address)
+{
+ unsigned char txbuf[PAGE_LEN];
+ int retval;
+ unsigned int page;
+ struct i2c_client *i2c = pdata->i2c_client;
+
+ page = ((address >> 8) & MASK_8BIT);
+ if (page != pdata->current_page) {
+ txbuf[0] = MASK_8BIT;
+ txbuf[1] = page;
+ retval = i2c_master_send(i2c, txbuf, PAGE_LEN);
+ if (retval != PAGE_LEN)
+ dev_err(&i2c->dev, "%s:failed:%d\n", __func__, retval);
+ else
+ pdata->current_page = page;
+ } else
+ retval = PAGE_LEN;
+ return retval;
+}
+/**
+ * synaptics_rmi4_i2c_block_read() - read the block of data
+ * @pdata: pointer to synaptics_rmi4_data structure
+ * @address: read the block of data from this offset
+ * @valp: pointer to a buffer containing the data to be read
+ * @size: number of bytes to read
+ *
+ * This function is to read the block of data and returns integer.
+ */
+static int synaptics_rmi4_i2c_block_read(struct synaptics_rmi4_data *pdata,
+ unsigned short address,
+ unsigned char *valp, int size)
+{
+ int retval = 0;
+ int retry_count = 0;
+ int index;
+ struct i2c_client *i2c = pdata->i2c_client;
+
+ mutex_lock(&(pdata->rmi4_page_mutex));
+ retval = synaptics_rmi4_set_page(pdata, address);
+ if (retval != PAGE_LEN)
+ goto exit;
+ index = address & MASK_8BIT;
+retry:
+ retval = i2c_smbus_read_i2c_block_data(i2c, index, size, valp);
+ if (retval != size) {
+ if (++retry_count == MAX_RETRY_COUNT)
+ dev_err(&i2c->dev,
+ "%s:address 0x%04x size %d failed:%d\n",
+ __func__, address, size, retval);
+ else {
+ synaptics_rmi4_set_page(pdata, address);
+ goto retry;
+ }
+ }
+exit:
+ mutex_unlock(&(pdata->rmi4_page_mutex));
+ return retval;
+}
+
+/**
+ * synaptics_rmi4_i2c_byte_write() - write the single byte data
+ * @pdata: pointer to synaptics_rmi4_data structure
+ * @address: write the block of data from this offset
+ * @data: data to be write
+ *
+ * This function is to write the single byte data and returns integer.
+ */
+static int synaptics_rmi4_i2c_byte_write(struct synaptics_rmi4_data *pdata,
+ unsigned short address,
+ unsigned char data)
+{
+ unsigned char txbuf[2];
+ int retval = 0;
+ struct i2c_client *i2c = pdata->i2c_client;
+
+ /* Can't have anyone else changing the page behind our backs */
+ mutex_lock(&(pdata->rmi4_page_mutex));
+
+ retval = synaptics_rmi4_set_page(pdata, address);
+ if (retval != PAGE_LEN)
+ goto exit;
+ txbuf[0] = address & MASK_8BIT;
+ txbuf[1] = data;
+ retval = i2c_master_send(pdata->i2c_client, txbuf, 2);
+ /* Add in retry on writes only in certian error return values */
+ if (retval != 2) {
+ dev_err(&i2c->dev, "%s:failed:%d\n", __func__, retval);
+ retval = -EIO;
+ } else
+ retval = 1;
+exit:
+ mutex_unlock(&(pdata->rmi4_page_mutex));
+ return retval;
+}
+
+/**
+ * synpatics_rmi4_touchpad_report() - reports for the rmi4 touchpad device
+ * @pdata: pointer to synaptics_rmi4_data structure
+ * @rfi: pointer to synaptics_rmi4_fn structure
+ *
+ * This function calls to reports for the rmi4 touchpad device
+ */
+static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
+ struct synaptics_rmi4_fn *rfi)
+{
+ /* number of touch points - fingers down in this case */
+ int touch_count = 0;
+ int finger;
+ int fingers_supported;
+ int finger_registers;
+ int reg;
+ int finger_shift;
+ int finger_status;
+ int retval;
+ unsigned short data_base_addr;
+ unsigned short data_offset;
+ unsigned char data_reg_blk_size;
+ unsigned char values[2];
+ unsigned char data[DATA_LEN];
+ int x[RMI4_NUMBER_OF_MAX_FINGERS];
+ int y[RMI4_NUMBER_OF_MAX_FINGERS];
+ int wx[RMI4_NUMBER_OF_MAX_FINGERS];
+ int wy[RMI4_NUMBER_OF_MAX_FINGERS];
+ struct i2c_client *client = pdata->i2c_client;
+
+ /* get 2D sensor finger data */
+ /*
+ * First get the finger status field - the size of the finger status
+ * field is determined by the number of finger supporte - 2 bits per
+ * finger, so the number of registers to read is:
+ * registerCount = ceil(numberOfFingers/4).
+ * Read the required number of registers and check each 2 bit field to
+ * determine if a finger is down:
+ * 00 = finger not present,
+ * 01 = finger present and data accurate,
+ * 10 = finger present but data may not be accurate,
+ * 11 = reserved for product use.
+ */
+ fingers_supported = rfi->num_of_data_points;
+ finger_registers = (fingers_supported + 3)/4;
+ data_base_addr = rfi->fn_desc.data_base_addr;
+ retval = synaptics_rmi4_i2c_block_read(pdata, data_base_addr, values,
+ finger_registers);
+ if (retval != finger_registers) {
+ dev_err(&client->dev, "%s:read status registers failed\n",
+ __func__);
+ return 0;
+ }
+ /*
+ * For each finger present, read the proper number of registers
+ * to get absolute data.
+ */
+ data_reg_blk_size = rfi->size_of_data_register_block;
+ for (finger = 0; finger < fingers_supported; finger++) {
+ /* determine which data byte the finger status is in */
+ reg = finger/4;
+ /* bit shift to get finger's status */
+ finger_shift = (finger % 4) * 2;
+ finger_status = (values[reg] >> finger_shift) & 3;
+ /*
+ * if finger status indicates a finger is present then
+ * read the finger data and report it
+ */
+ if (finger_status == 1 || finger_status == 2) {
+ /* Read the finger data */
+ data_offset = data_base_addr +
+ ((finger * data_reg_blk_size) +
+ finger_registers);
+ retval = synaptics_rmi4_i2c_block_read(pdata,
+ data_offset, data,
+ data_reg_blk_size);
+ if (retval != data_reg_blk_size) {
+ printk(KERN_ERR "%s:read data failed\n",
+ __func__);
+ return 0;
+ } else {
+ x[touch_count] =
+ (data[0] << 4) | (data[2] & MASK_4BIT);
+ y[touch_count] =
+ (data[1] << 4) |
+ ((data[2] >> 4) & MASK_4BIT);
+ wy[touch_count] =
+ (data[3] >> 4) & MASK_4BIT;
+ wx[touch_count] =
+ (data[3] & MASK_4BIT);
+
+ if (pdata->board->x_flip)
+ x[touch_count] =
+ pdata->sensor_max_x -
+ x[touch_count];
+ if (pdata->board->y_flip)
+ y[touch_count] =
+ pdata->sensor_max_y -
+ y[touch_count];
+ }
+ /* number of active touch points */
+ touch_count++;
+ }
+ }
+
+ /* report to input subsystem */
+ if (touch_count) {
+ for (finger = 0; finger < touch_count; finger++) {
+ input_report_abs(pdata->input_dev, ABS_MT_TOUCH_MAJOR,
+ max(wx[finger] , wy[finger]));
+ input_report_abs(pdata->input_dev, ABS_MT_POSITION_X,
+ x[finger]);
+ input_report_abs(pdata->input_dev, ABS_MT_POSITION_Y,
+ y[finger]);
+ input_mt_sync(pdata->input_dev);
+ }
+ } else
+ input_mt_sync(pdata->input_dev);
+
+ /* sync after groups of events */
+ input_sync(pdata->input_dev);
+ /* return the number of touch points */
+ return touch_count;
+}
+
+/**
+ * synaptics_rmi4_report_device() - reports the rmi4 device
+ * @pdata: pointer to synaptics_rmi4_data structure
+ * @rfi: pointer to synaptics_rmi4_fn
+ *
+ * This function is used to call the report function of the rmi4 device.
+ */
+static int synaptics_rmi4_report_device(struct synaptics_rmi4_data *pdata,
+ struct synaptics_rmi4_fn *rfi)
+{
+ int touch = 0;
+ struct i2c_client *client = pdata->i2c_client;
+ static int num_error_reports;
+ if (rfi->fn_number != SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM) {
+ num_error_reports++;
+ if (num_error_reports < MAX_ERROR_REPORT)
+ dev_err(&client->dev, "%s:report not supported\n",
+ __func__);
+ } else
+ touch = synpatics_rmi4_touchpad_report(pdata, rfi);
+ return touch;
+}
+/**
+ * synaptics_rmi4_sensor_report() - reports to input subsystem
+ * @pdata: pointer to synaptics_rmi4_data structure
+ *
+ * This function is used to reads in all data sources and reports
+ * them to the input subsystem.
+ */
+static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *pdata)
+{
+ unsigned char intr_status[4];
+ /* number of touch points - fingers or buttons */
+ int touch = 0;
+ unsigned int retval;
+ struct synaptics_rmi4_fn *rfi;
+ struct synaptics_rmi4_device_info *rmi;
+ struct i2c_client *client = pdata->i2c_client;
+
+ /*
+ * Get the interrupt status from the function $01
+ * control register+1 to find which source(s) were interrupting
+ * so we can read the data from the source(s) (2D sensor, buttons..)
+ */
+ retval = synaptics_rmi4_i2c_block_read(pdata,
+ pdata->fn01_data_base_addr + 1,
+ intr_status,
+ pdata->number_of_interrupt_register);
+ if (retval != pdata->number_of_interrupt_register) {
+ dev_err(&client->dev,
+ "could not read interrupt status registers\n");
+ return 0;
+ }
+ /*
+ * check each function that has data sources and if the interrupt for
+ * that triggered then call that RMI4 functions report() function to
+ * gather data and report it to the input subsystem
+ */
+ rmi = &(pdata->rmi4_mod_info);
+ list_for_each_entry(rfi, &rmi->support_fn_list, link) {
+ if (rfi->num_of_data_sources) {
+ if (intr_status[rfi->index_to_intr_reg] &
+ rfi->intr_mask)
+ touch = synaptics_rmi4_report_device(pdata,
+ rfi);
+ }
+ }
+ /* return the number of touch points */
+ return touch;
+}
+
+/**
+ * synaptics_rmi4_irq() - thread function for rmi4 attention line
+ * @irq: irq value
+ * @data: void pointer
+ *
+ * This function is interrupt thread function. It just notifies the
+ * application layer that attention is required.
+ */
+static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
+{
+ struct synaptics_rmi4_data *pdata = data;
+ int touch_count;
+ do {
+ touch_count = synaptics_rmi4_sensor_report(pdata);
+ if (touch_count)
+ wait_event_timeout(pdata->wait, pdata->touch_stopped,
+ msecs_to_jiffies(1));
+ else
+ break;
+ } while (!pdata->touch_stopped);
+ return IRQ_HANDLED;
+}
+
+/**
+ * synpatics_rmi4_touchpad_detect() - detects the rmi4 touchpad device
+ * @pdata: pointer to synaptics_rmi4_data structure
+ * @rfi: pointer to synaptics_rmi4_fn structure
+ * @fd: pointer to synaptics_rmi4_fn_desc structure
+ * @interruptcount: count the number of interrupts
+ *
+ * This function calls to detects the rmi4 touchpad device
+ */
+static int synpatics_rmi4_touchpad_detect(struct synaptics_rmi4_data *pdata,
+ struct synaptics_rmi4_fn *rfi,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned int interruptcount)
+{
+ unsigned char queries[QUERY_LEN];
+ unsigned short intr_offset;
+ unsigned char abs_data_size;
+ unsigned char abs_data_blk_size;
+ unsigned char egr_0, egr_1;
+ unsigned int all_data_blk_size;
+ int has_pinch, has_flick, has_tap;
+ int has_tapandhold, has_doubletap;
+ int has_earlytap, has_press;
+ int has_palmdetect, has_rotate;
+ int has_rel;
+ int i;
+ int retval;
+ struct i2c_client *client = pdata->i2c_client;
+
+ rfi->fn_desc.query_base_addr = fd->query_base_addr;
+ rfi->fn_desc.data_base_addr = fd->data_base_addr;
+ rfi->fn_desc.intr_src_count = fd->intr_src_count;
+ rfi->fn_desc.fn_number = fd->fn_number;
+ rfi->fn_number = fd->fn_number;
+ rfi->num_of_data_sources = fd->intr_src_count;
+ rfi->fn_desc.ctrl_base_addr = fd->ctrl_base_addr;
+ rfi->fn_desc.cmd_base_addr = fd->cmd_base_addr;
+
+ /*
+ * need to get number of fingers supported, data size, etc.
+ * to be used when getting data since the number of registers to
+ * read depends on the number of fingers supported and data size.
+ */
+ retval = synaptics_rmi4_i2c_block_read(pdata, fd->query_base_addr,
+ queries,
+ sizeof(queries));
+ if (retval != sizeof(queries)) {
+ dev_err(&client->dev, "%s:read function query registers\n",
+ __func__);
+ return retval;
+ }
+ /*
+ * 2D data sources have only 3 bits for the number of fingers
+ * supported - so the encoding is a bit wierd.
+ */
+ if ((queries[1] & MASK_3BIT) <= 4)
+ /* add 1 since zero based */
+ rfi->num_of_data_points = (queries[1] & MASK_3BIT) + 1;
+ else {
+ /*
+ * a value of 5 is up to 10 fingers - 6 and 7 are reserved
+ * (shouldn't get these i int retval;n a normal 2D source).
+ */
+ if ((queries[1] & MASK_3BIT) == 5)
+ rfi->num_of_data_points = 10;
+ }
+ /* Need to get interrupt info for handling interrupts */
+ rfi->index_to_intr_reg = (interruptcount + 7)/8;
+ if (rfi->index_to_intr_reg != 0)
+ rfi->index_to_intr_reg -= 1;
+ /*
+ * loop through interrupts for each source in fn $11
+ * and or in a bit to the interrupt mask for each.
+ */
+ intr_offset = interruptcount % 8;
+ rfi->intr_mask = 0;
+ for (i = intr_offset;
+ i < ((fd->intr_src_count & MASK_3BIT) + intr_offset); i++)
+ rfi->intr_mask |= 1 << i;
+
+ /* Size of just the absolute data for one finger */
+ abs_data_size = queries[5] & MASK_2BIT;
+ /* One each for X and Y, one for LSB for X & Y, one for W, one for Z */
+ abs_data_blk_size = 3 + (2 * (abs_data_size == 0 ? 1 : 0));
+ rfi->size_of_data_register_block = abs_data_blk_size;
+
+ /*
+ * need to determine the size of data to read - this depends on
+ * conditions such as whether Relative data is reported and if Gesture
+ * data is reported.
+ */
+ egr_0 = queries[7];
+ egr_1 = queries[8];
+
+ /*
+ * Get info about what EGR data is supported, whether it has
+ * Relative data supported, etc.
+ */
+ has_pinch = egr_0 & HAS_PINCH;
+ has_flick = egr_0 & HAS_FLICK;
+ has_tap = egr_0 & HAS_TAP;
+ has_earlytap = egr_0 & HAS_EARLYTAP;
+ has_press = egr_0 & HAS_PRESS;
+ has_rotate = egr_1 & HAS_ROTATE;
+ has_rel = queries[1] & HAS_RELEASE;
+ has_tapandhold = egr_0 & HAS_TAPANDHOLD;
+ has_doubletap = egr_0 & HAS_DOUBLETAP;
+ has_palmdetect = egr_1 & HAS_PALMDETECT;
+
+ /*
+ * Size of all data including finger status, absolute data for each
+ * finger, relative data and EGR data
+ */
+ all_data_blk_size =
+ /* finger status, four fingers per register */
+ ((rfi->num_of_data_points + 3) / 4) +
+ /* absolute data, per finger times number of fingers */
+ (abs_data_blk_size * rfi->num_of_data_points) +
+ /*
+ * two relative registers (if relative is being reported)
+ */
+ 2 * has_rel +
+ /*
+ * F11_2D_data8 is only present if the egr_0
+ * register is non-zero.
+ */
+ !!(egr_0) +
+ /*
+ * F11_2D_data9 is only present if either egr_0 or
+ * egr_1 registers are non-zero.
+ */
+ (egr_0 || egr_1) +
+ /*
+ * F11_2D_data10 is only present if EGR_PINCH or EGR_FLICK of
+ * egr_0 reports as 1.
+ */
+ !!(has_pinch | has_flick) +
+ /*
+ * F11_2D_data11 and F11_2D_data12 are only present if
+ * EGR_FLICK of egr_0 reports as 1.
+ */
+ 2 * !!(has_flick);
+ return retval;
+}
+
+/**
+ * synpatics_rmi4_touchpad_config() - confiures the rmi4 touchpad device
+ * @pdata: pointer to synaptics_rmi4_data structure
+ * @rfi: pointer to synaptics_rmi4_fn structure
+ *
+ * This function calls to confiures the rmi4 touchpad device
+ */
+int synpatics_rmi4_touchpad_config(struct synaptics_rmi4_data *pdata,
+ struct synaptics_rmi4_fn *rfi)
+{
+ /*
+ * For the data source - print info and do any
+ * source specific configuration.
+ */
+ unsigned char data[BUF_LEN];
+ int retval = 0;
+ struct i2c_client *client = pdata->i2c_client;
+
+ /* Get and print some info about the data source... */
+ /* To Query 2D devices we need to read from the address obtained
+ * from the function descriptor stored in the RMI function info.
+ */
+ retval = synaptics_rmi4_i2c_block_read(pdata,
+ rfi->fn_desc.query_base_addr,
+ data, QUERY_LEN);
+ if (retval != QUERY_LEN)
+ dev_err(&client->dev, "%s:read query registers failed\n",
+ __func__);
+ else {
+ retval = synaptics_rmi4_i2c_block_read(pdata,
+ rfi->fn_desc.ctrl_base_addr,
+ data, DATA_BUF_LEN);
+ if (retval != DATA_BUF_LEN) {
+ dev_err(&client->dev,
+ "%s:read control registers failed\n",
+ __func__);
+ return retval;
+ }
+ /* Store these for use later*/
+ pdata->sensor_max_x = ((data[6] & MASK_8BIT) << 0) |
+ ((data[7] & MASK_4BIT) << 8);
+ pdata->sensor_max_y = ((data[8] & MASK_5BIT) << 0) |
+ ((data[9] & MASK_4BIT) << 8);
+ }
+ return retval;
+}
+
+/**
+ * synaptics_rmi4_i2c_query_device() - query the rmi4 device
+ * @pdata: pointer to synaptics_rmi4_data structure
+ *
+ * This function is used to query the rmi4 device.
+ */
+static int synaptics_rmi4_i2c_query_device(struct synaptics_rmi4_data *pdata)
+{
+ int i;
+ int retval;
+ unsigned char std_queries[STD_QUERY_LEN];
+ unsigned char intr_count = 0;
+ int data_sources = 0;
+ unsigned int ctrl_offset;
+ struct synaptics_rmi4_fn *rfi;
+ struct synaptics_rmi4_fn_desc rmi_fd;
+ struct synaptics_rmi4_device_info *rmi;
+ struct i2c_client *client = pdata->i2c_client;
+
+ /*
+ * init the physical drivers RMI module
+ * info list of functions
+ */
+ INIT_LIST_HEAD(&pdata->rmi4_mod_info.support_fn_list);
+
+ /*
+ * Read the Page Descriptor Table to determine what functions
+ * are present
+ */
+ for (i = PDT_START_SCAN_LOCATION; i > PDT_END_SCAN_LOCATION;
+ i -= PDT_ENTRY_SIZE) {
+ retval = synaptics_rmi4_i2c_block_read(pdata, i,
+ (unsigned char *)&rmi_fd,
+ sizeof(rmi_fd));
+ if (retval != sizeof(rmi_fd)) {
+ /* failed to read next PDT entry */
+ dev_err(&client->dev, "%s: read error\n", __func__);
+ return -EIO;
+ }
+ rfi = NULL;
+ if (rmi_fd.fn_number) {
+ switch (rmi_fd.fn_number & MASK_8BIT) {
+ case SYNAPTICS_RMI4_DEVICE_CONTROL_FUNC_NUM:
+ pdata->fn01_query_base_addr =
+ rmi_fd.query_base_addr;
+ pdata->fn01_ctrl_base_addr =
+ rmi_fd.ctrl_base_addr;
+ pdata->fn01_data_base_addr =
+ rmi_fd.data_base_addr;
+ break;
+ case SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM:
+ if (rmi_fd.intr_src_count) {
+ rfi = kmalloc(sizeof(*rfi),
+ GFP_KERNEL);
+ if (!rfi) {
+ dev_err(&client->dev,
+ "%s:kmalloc failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+ retval = synpatics_rmi4_touchpad_detect
+ (pdata, rfi,
+ &rmi_fd,
+ intr_count);
+ if (retval < 0)
+ return retval;
+ }
+ break;
+ }
+ /* interrupt count for next iteration */
+ intr_count += (rmi_fd.intr_src_count & MASK_3BIT);
+ /*
+ * We only want to add functions to the list
+ * that have data associated with them.
+ */
+ if (rfi && rmi_fd.intr_src_count) {
+ /* link this function info to the RMI module */
+ mutex_lock(&(pdata->fn_list_mutex));
+ list_add_tail(&rfi->link,
+ &pdata->rmi4_mod_info.support_fn_list);
+ mutex_unlock(&(pdata->fn_list_mutex));
+ }
+ } else {
+ /*
+ * A zero in the function number
+ * signals the end of the PDT
+ */
+ dev_dbg(&client->dev,
+ "%s:end of PDT\n", __func__);
+ break;
+ }
+ }
+ /*
+ * calculate the interrupt register count - used in the
+ * ISR to read the correct number of interrupt registers
+ */
+ pdata->number_of_interrupt_register = (intr_count + 7) / 8;
+ /*
+ * Function $01 will be used to query the product properties,
+ * and product ID so we had to read the PDT above first to get
+ * the Fn $01 query address and prior to filling in the product
+ * info. NOTE: Even an unflashed device will still have FN $01.
+ */
+
+ /* Load up the standard queries and get the RMI4 module info */
+ retval = synaptics_rmi4_i2c_block_read(pdata,
+ pdata->fn01_query_base_addr,
+ std_queries,
+ sizeof(std_queries));
+ if (retval != sizeof(std_queries)) {
+ dev_err(&client->dev, "%s:Failed reading queries\n",
+ __func__);
+ return -EIO;
+ }
+
+ /* Currently supported RMI version is 4.0 */
+ pdata->rmi4_mod_info.version_major = 4;
+ pdata->rmi4_mod_info.version_minor = 0;
+ /*
+ * get manufacturer id, product_props, product info,
+ * date code, tester id, serial num and product id (name)
+ */
+ pdata->rmi4_mod_info.manufacturer_id = std_queries[0];
+ pdata->rmi4_mod_info.product_props = std_queries[1];
+ pdata->rmi4_mod_info.product_info[0] = std_queries[2];
+ pdata->rmi4_mod_info.product_info[1] = std_queries[3];
+ /* year - 2001-2032 */
+ pdata->rmi4_mod_info.date_code[0] = std_queries[4] & MASK_5BIT;
+ /* month - 1-12 */
+ pdata->rmi4_mod_info.date_code[1] = std_queries[5] & MASK_4BIT;
+ /* day - 1-31 */
+ pdata->rmi4_mod_info.date_code[2] = std_queries[6] & MASK_5BIT;
+ pdata->rmi4_mod_info.tester_id = ((std_queries[7] & MASK_7BIT) << 8) |
+ (std_queries[8] & MASK_7BIT);
+ pdata->rmi4_mod_info.serial_number =
+ ((std_queries[9] & MASK_7BIT) << 8) |
+ (std_queries[10] & MASK_7BIT);
+ memcpy(pdata->rmi4_mod_info.product_id_string, &std_queries[11], 10);
+
+ /* Check if this is a Synaptics device - report if not. */
+ if (pdata->rmi4_mod_info.manufacturer_id != 1)
+ dev_err(&client->dev, "%s: non-Synaptics mfg id:%d\n",
+ __func__, pdata->rmi4_mod_info.manufacturer_id);
+
+ list_for_each_entry(rfi, &pdata->rmi4_mod_info.support_fn_list, link)
+ data_sources += rfi->num_of_data_sources;
+ if (data_sources) {
+ rmi = &(pdata->rmi4_mod_info);
+ list_for_each_entry(rfi, &rmi->support_fn_list, link) {
+ if (rfi->num_of_data_sources) {
+ if (rfi->fn_number ==
+ SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM) {
+ retval = synpatics_rmi4_touchpad_config
+ (pdata, rfi);
+ if (retval < 0)
+ return retval;
+ } else
+ dev_err(&client->dev,
+ "%s:fn_number not supported\n",
+ __func__);
+ /*
+ * Turn on interrupts for this
+ * function's data sources.
+ */
+ ctrl_offset = pdata->fn01_ctrl_base_addr + 1 +
+ rfi->index_to_intr_reg;
+ retval = synaptics_rmi4_i2c_byte_write(pdata,
+ ctrl_offset,
+ rfi->intr_mask);
+ if (retval < 0)
+ return retval;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * synaptics_rmi4_probe() - Initialze the i2c-client touchscreen driver
+ * @i2c: i2c client structure pointer
+ * @id:i2c device id pointer
+ *
+ * This function will allocate and initialize the instance
+ * data and request the irq and set the instance data as the clients
+ * platform data then register the physical driver which will do a scan of
+ * the rmi4 Physical Device Table and enumerate any rmi4 functions that
+ * have data sources associated with them.
+ */
+static int __devinit synaptics_rmi4_probe
+ (struct i2c_client *client, const struct i2c_device_id *dev_id)
+{
+ int retval;
+ unsigned char intr_status[4];
+ struct synaptics_rmi4_data *rmi4_data;
+ const struct synaptics_rmi4_platform_data *platformdata =
+ client->dev.platform_data;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "i2c smbus byte data not supported\n");
+ return -EIO;
+ }
+
+ if (!platformdata) {
+ dev_err(&client->dev, "%s: no platform data\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Allocate and initialize the instance data for this client */
+ rmi4_data = kzalloc(sizeof(struct synaptics_rmi4_data) * 2,
+ GFP_KERNEL);
+ if (!rmi4_data) {
+ dev_err(&client->dev, "%s: no memory allocated\n", __func__);
+ return -ENOMEM;
+ }
+
+ rmi4_data->input_dev = input_allocate_device();
+ if (rmi4_data->input_dev == NULL) {
+ dev_err(&client->dev, "%s:input device alloc failed\n",
+ __func__);
+ retval = -ENOMEM;
+ goto err_input;
+ }
+
+ dev_set_name(&client->dev, platformdata->name);
+
+ if (platformdata->regulator_en) {
+ rmi4_data->regulator = regulator_get(&client->dev, "v-touch");
+ if (IS_ERR(rmi4_data->regulator)) {
+ dev_err(&client->dev, "%s:get regulator failed\n",
+ __func__);
+ retval = PTR_ERR(rmi4_data->regulator);
+ goto err_regulator;
+ }
+ regulator_enable(rmi4_data->regulator);
+ }
+
+ init_waitqueue_head(&rmi4_data->wait);
+ /*
+ * Copy i2c_client pointer into RTID's i2c_client pointer for
+ * later use in rmi4_read, rmi4_write, etc.
+ */
+ rmi4_data->i2c_client = client;
+ /* So we set the page correctly the first time */
+ rmi4_data->current_page = MASK_16BIT;
+ rmi4_data->board = platformdata;
+ rmi4_data->touch_stopped = false;
+
+ /* init the mutexes for maintain the lists */
+ mutex_init(&(rmi4_data->fn_list_mutex));
+ mutex_init(&(rmi4_data->rmi4_page_mutex));
+
+ /*
+ * Register physical driver - this will call the detect function that
+ * will then scan the device and determine the supported
+ * rmi4 functions.
+ */
+ retval = synaptics_rmi4_i2c_query_device(rmi4_data);
+ if (retval) {
+ dev_err(&client->dev, "%s: rmi4 query device failed\n",
+ __func__);
+ goto err_query_dev;
+ }
+
+ /* Store the instance data in the i2c_client */
+ i2c_set_clientdata(client, rmi4_data);
+
+ /*initialize the input device parameters */
+ rmi4_data->input_dev->name = DRIVER_NAME;
+ rmi4_data->input_dev->phys = "Synaptics_Clearpad";
+ rmi4_data->input_dev->id.bustype = BUS_I2C;
+ rmi4_data->input_dev->dev.parent = &client->dev;
+ input_set_drvdata(rmi4_data->input_dev, rmi4_data);
+
+ /* Initialize the function handlers for rmi4 */
+ set_bit(EV_SYN, rmi4_data->input_dev->evbit);
+ set_bit(EV_KEY, rmi4_data->input_dev->evbit);
+ set_bit(EV_ABS, rmi4_data->input_dev->evbit);
+
+ input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_X, 0,
+ rmi4_data->sensor_max_x, 0, 0);
+ input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_Y, 0,
+ rmi4_data->sensor_max_y, 0, 0);
+ input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0,
+ MAX_TOUCH_MAJOR, 0, 0);
+
+ /* Clear interrupts */
+ synaptics_rmi4_i2c_block_read(rmi4_data,
+ rmi4_data->fn01_data_base_addr + 1, intr_status,
+ rmi4_data->number_of_interrupt_register);
+ retval = request_threaded_irq(platformdata->irq_number, NULL,
+ synaptics_rmi4_irq,
+ platformdata->irq_type,
+ platformdata->name, rmi4_data);
+ if (retval) {
+ dev_err(&client->dev, "%s:Unable to get attn irq %d\n",
+ __func__, platformdata->irq_number);
+ goto err_unset_clientdata;
+ }
+
+ retval = input_register_device(rmi4_data->input_dev);
+ if (retval) {
+ dev_err(&client->dev, "%s:input register failed\n", __func__);
+ goto err_free_irq;
+ }
+
+ return retval;
+
+err_free_irq:
+ free_irq(platformdata->irq_number, rmi4_data);
+err_unset_clientdata:
+ i2c_set_clientdata(client, NULL);
+err_query_dev:
+ if (platformdata->regulator_en) {
+ regulator_disable(rmi4_data->regulator);
+ regulator_put(rmi4_data->regulator);
+ }
+err_regulator:
+ input_free_device(rmi4_data->input_dev);
+ rmi4_data->input_dev = NULL;
+err_input:
+ kfree(rmi4_data);
+
+ return retval;
+}
+/**
+ * synaptics_rmi4_remove() - Removes the i2c-client touchscreen driver
+ * @client: i2c client structure pointer
+ *
+ * This funtion uses to remove the i2c-client
+ * touchscreen driver and returns integer.
+ */
+static int __devexit synaptics_rmi4_remove(struct i2c_client *client)
+{
+ struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client);
+ const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
+
+ rmi4_data->touch_stopped = true;
+ wake_up(&rmi4_data->wait);
+ free_irq(pdata->irq_number, rmi4_data);
+ input_unregister_device(rmi4_data->input_dev);
+ if (pdata->regulator_en) {
+ regulator_disable(rmi4_data->regulator);
+ regulator_put(rmi4_data->regulator);
+ }
+ kfree(rmi4_data);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/**
+ * synaptics_rmi4_suspend() - suspend the touch screen controller
+ * @dev: pointer to device structure
+ *
+ * This funtion is used to suspend the
+ * touch panel controller and returns integer
+ */
+static int synaptics_rmi4_suspend(struct device *dev)
+{
+ /* Touch sleep mode */
+ int retval;
+ unsigned char intr_status;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
+
+ rmi4_data->touch_stopped = true;
+ disable_irq(pdata->irq_number);
+
+ retval = synaptics_rmi4_i2c_block_read(rmi4_data,
+ rmi4_data->fn01_data_base_addr + 1,
+ &intr_status,
+ rmi4_data->number_of_interrupt_register);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_i2c_byte_write(rmi4_data,
+ rmi4_data->fn01_ctrl_base_addr + 1,
+ (intr_status & ~TOUCHPAD_CTRL_INTR));
+ if (retval < 0)
+ return retval;
+
+ if (pdata->regulator_en)
+ regulator_disable(rmi4_data->regulator);
+
+ return 0;
+}
+/**
+ * synaptics_rmi4_resume() - resume the touch screen controller
+ * @dev: pointer to device structure
+ *
+ * This funtion is used to resume the touch panel
+ * controller and returns integer.
+ */
+static int synaptics_rmi4_resume(struct device *dev)
+{
+ int retval;
+ unsigned char intr_status;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
+
+ if (pdata->regulator_en)
+ regulator_enable(rmi4_data->regulator);
+
+ enable_irq(pdata->irq_number);
+ rmi4_data->touch_stopped = false;
+
+ retval = synaptics_rmi4_i2c_block_read(rmi4_data,
+ rmi4_data->fn01_data_base_addr + 1,
+ &intr_status,
+ rmi4_data->number_of_interrupt_register);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_i2c_byte_write(rmi4_data,
+ rmi4_data->fn01_ctrl_base_addr + 1,
+ (intr_status | TOUCHPAD_CTRL_INTR));
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = {
+ .suspend = synaptics_rmi4_suspend,
+ .resume = synaptics_rmi4_resume,
+};
+#endif
+
+static const struct i2c_device_id synaptics_rmi4_id_table[] = {
+ { DRIVER_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
+
+static struct i2c_driver synaptics_rmi4_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &synaptics_rmi4_dev_pm_ops,
+#endif
+ },
+ .probe = synaptics_rmi4_probe,
+ .remove = __devexit_p(synaptics_rmi4_remove),
+ .id_table = synaptics_rmi4_id_table,
+};
+/**
+ * synaptics_rmi4_init() - Initialize the touchscreen driver
+ *
+ * This funtion uses to initializes the synaptics
+ * touchscreen driver and returns integer.
+ */
+static int __init synaptics_rmi4_init(void)
+{
+ return i2c_add_driver(&synaptics_rmi4_driver);
+}
+/**
+ * synaptics_rmi4_exit() - De-initialize the touchscreen driver
+ *
+ * This funtion uses to de-initialize the synaptics
+ * touchscreen driver and returns none.
+ */
+static void __exit synaptics_rmi4_exit(void)
+{
+ i2c_del_driver(&synaptics_rmi4_driver);
+}
+
+
+module_init(synaptics_rmi4_init);
+module_exit(synaptics_rmi4_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("naveen.gaddipati@stericsson.com, js.ha@stericsson.com");
+MODULE_DESCRIPTION("synaptics rmi4 i2c touch Driver");
+MODULE_ALIAS("i2c:synaptics_rmi4_ts");
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
new file mode 100644
index 000000000000..820ae275fa2b
--- /dev/null
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
@@ -0,0 +1,50 @@
+/**
+ *
+ * Synaptics Register Mapped Interface (RMI4) I2C Physical Layer Driver.
+ * Copyright (c) 2007-2010, Synaptics Incorporated
+ *
+ * Author: Js HA <js.ha@stericsson.com> for ST-Ericsson
+ * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
+ * Copyright 2010 (c) ST-Ericsson AB
+ */
+/*
+ * This file is licensed under the GPL2 license.
+ *
+ *#############################################################################
+ * GPL
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ *#############################################################################
+ */
+
+#ifndef _SYNAPTICS_RMI4_H_INCLUDED_
+#define _SYNAPTICS_RMI4_H_INCLUDED_
+
+/**
+ * struct synaptics_rmi4_platform_data - contains the rmi4 platform data
+ * @irq_number: irq number
+ * @irq_type: irq type
+ * @x flip: x flip flag
+ * @y flip: y flip flag
+ * @regulator_en: regulator enable flag
+ *
+ * This structure gives platform data for rmi4.
+ */
+struct synaptics_rmi4_platform_data {
+ const char *name;
+ int irq_number;
+ int irq_type;
+ bool x_flip;
+ bool y_flip;
+ bool regulator_en;
+};
+
+#endif
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 571864555ddd..27e0aa81a584 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -949,7 +949,7 @@ func_end:
* Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
* schedules a DPC to dispatch I/O.
*/
-void io_mbox_msg(u32 msg)
+int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg)
{
struct io_mgr *pio_mgr;
struct dev_object *dev_obj;
@@ -959,9 +959,9 @@ void io_mbox_msg(u32 msg)
dev_get_io_mgr(dev_obj, &pio_mgr);
if (!pio_mgr)
- return;
+ return NOTIFY_BAD;
- pio_mgr->intr_val = (u16)msg;
+ pio_mgr->intr_val = (u16)((u32)msg);
if (pio_mgr->intr_val & MBX_PM_CLASS)
io_dispatch_pm(pio_mgr);
@@ -973,7 +973,7 @@ void io_mbox_msg(u32 msg)
spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
tasklet_schedule(&pio_mgr->dpc_tasklet);
}
- return;
+ return NOTIFY_OK;
}
/*
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index 1be081f917a7..a3f69f6f505f 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -223,6 +223,10 @@ static struct bridge_drv_interface drv_interface_fxns = {
bridge_msg_set_queue_id,
};
+static struct notifier_block dsp_mbox_notifier = {
+ .notifier_call = io_mbox_msg,
+};
+
static inline void flush_all(struct bridge_dev_context *dev_context)
{
if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
@@ -553,7 +557,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
* Enable Mailbox events and also drain any pending
* stale messages.
*/
- dev_context->mbox = omap_mbox_get("dsp");
+ dev_context->mbox = omap_mbox_get("dsp", &dsp_mbox_notifier);
if (IS_ERR(dev_context->mbox)) {
dev_context->mbox = NULL;
pr_err("%s: Failed to get dsp mailbox handle\n",
@@ -563,8 +567,6 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
}
if (!status) {
- dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;
-
/*PM_IVA2GRPSEL_PER = 0xC0;*/
temp = readl(resources->dw_per_pm_base + 0xA8);
temp = (temp & 0xFFFFFF30) | 0xC0;
@@ -596,7 +598,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dsp_addr);
if (dsp_debug)
while (__raw_readw(dw_sync_addr))
- ;;
+ ;
/* Wait for DSP to clear word in shared memory */
/* Read the Location */
@@ -685,7 +687,7 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
/* Disable the mailbox interrupts */
if (dev_context->mbox) {
omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
- omap_mbox_put(dev_context->mbox);
+ omap_mbox_put(dev_context->mbox, &dsp_mbox_notifier);
dev_context->mbox = NULL;
}
/* Reset IVA2 clocks*/
@@ -786,10 +788,7 @@ static int bridge_dev_create(struct bridge_dev_context
pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
if (pt_attrs != NULL) {
- /* Assuming that we use only DSP's memory map
- * until 0x4000:0000 , we would need only 1024
- * L1 enties i.e L1 size = 4K */
- pt_attrs->l1_size = 0x1000;
+ pt_attrs->l1_size = SZ_16K; /* 4096 entries of 32 bits */
align_size = pt_attrs->l1_size;
/* Align sizes are expected to be power of 2 */
/* we like to get aligned on L1 table size */
@@ -1671,7 +1670,7 @@ static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
/* Find a free L2 PT. */
for (i = 0; (i < pt->l2_num_pages) &&
(pt->pg_info[i].num_entries != 0); i++)
- ;;
+ ;
if (i < pt->l2_num_pages) {
l2_page_num = i;
l2_base_pa = pt->l2_base_pa + (l2_page_num *
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
index 18aec55d8647..8242c70e09dd 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
@@ -72,22 +72,17 @@ extern void io_dpc(unsigned long ref_data);
/*
* ======== io_mbox_msg ========
* Purpose:
- * Main interrupt handler for the shared memory Bridge channel manager.
- * Calls the Bridge's chnlsm_isr to determine if this interrupt is ours,
- * then schedules a DPC to dispatch I/O.
+ * Main message handler for the shared memory Bridge channel manager.
+ * Determine if this message is ours, then schedules a DPC to
+ * dispatch I/O.
* Parameters:
- * ref_data: Pointer to the channel manager object for this board.
- * Set in an initial call to ISR_Install().
+ * self: Pointer to its own notifier_block struct.
+ * len: Length of message.
+ * msg: Message code received.
* Returns:
- * TRUE if interrupt handled; FALSE otherwise.
- * Requires:
- * Must be in locked memory if executing in kernel mode.
- * Must only call functions which are in locked memory if Kernel mode.
- * Must only call asynchronous services.
- * Interrupts are disabled and EOI for this interrupt has been sent.
- * Ensures:
+ * NOTIFY_OK if handled; NOTIFY_BAD otherwise.
*/
-void io_mbox_msg(u32 msg);
+int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg);
/*
* ======== io_request_chnl ========
diff --git a/drivers/staging/tidspbridge/rmgr/nldr.c b/drivers/staging/tidspbridge/rmgr/nldr.c
index a6ae007015d0..28354bbf1aeb 100644
--- a/drivers/staging/tidspbridge/rmgr/nldr.c
+++ b/drivers/staging/tidspbridge/rmgr/nldr.c
@@ -943,7 +943,7 @@ static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
/* Determine which phase this section belongs to */
for (pch = sect_name + 1; *pch && *pch != seps; pch++)
- ;;
+ ;
if (*pch) {
pch++; /* Skip over the ':' */
diff --git a/drivers/staging/tm6000/tm6000-video.c b/drivers/staging/tm6000/tm6000-video.c
index 8fe017c3721f..eb9b9f1bc138 100644
--- a/drivers/staging/tm6000/tm6000-video.c
+++ b/drivers/staging/tm6000/tm6000-video.c
@@ -1450,29 +1450,55 @@ static struct video_device tm6000_template = {
* ------------------------------------------------------------------
*/
-int tm6000_v4l2_register(struct tm6000_core *dev)
+static struct video_device *vdev_init(struct tm6000_core *dev,
+ const struct video_device
+ *template, const char *type_name)
{
- int ret = -1;
struct video_device *vfd;
vfd = video_device_alloc();
- if(!vfd) {
+ if (NULL == vfd)
+ return NULL;
+
+ *vfd = *template;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->release = video_device_release;
+ vfd->debug = tm6000_debug;
+ vfd->lock = &dev->lock;
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
+
+ video_set_drvdata(vfd, dev);
+ return vfd;
+}
+
+int tm6000_v4l2_register(struct tm6000_core *dev)
+{
+ int ret = -1;
+
+ dev->vfd = vdev_init(dev, &tm6000_template, "video");
+
+ if (!dev->vfd) {
+ printk(KERN_INFO "%s: can't register video device\n",
+ dev->name);
return -ENOMEM;
}
- dev->vfd = vfd;
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
INIT_LIST_HEAD(&dev->vidq.queued);
- memcpy(dev->vfd, &tm6000_template, sizeof(*(dev->vfd)));
- dev->vfd->debug = tm6000_debug;
- dev->vfd->lock = &dev->lock;
+ ret = video_register_device(dev->vfd, VFL_TYPE_GRABBER, video_nr);
- vfd->v4l2_dev = &dev->v4l2_dev;
- video_set_drvdata(vfd, dev);
+ if (ret < 0) {
+ printk(KERN_INFO "%s: can't register video device\n",
+ dev->name);
+ return ret;
+ }
+
+ printk(KERN_INFO "%s: registered device %s\n",
+ dev->name, video_device_node_name(dev->vfd));
- ret = video_register_device(dev->vfd, VFL_TYPE_GRABBER, video_nr);
printk(KERN_INFO "Trident TVMaster TM5600/TM6000/TM6010 USB2 board (Load status: %d)\n", ret);
return ret;
}
diff --git a/drivers/staging/usbip/stub.h b/drivers/staging/usbip/stub.h
index 30dbfb6d16f2..d73267961ef4 100644
--- a/drivers/staging/usbip/stub.h
+++ b/drivers/staging/usbip/stub.h
@@ -32,6 +32,7 @@
struct stub_device {
struct usb_interface *interface;
+ struct usb_device *udev;
struct list_head list;
struct usbip_device ud;
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index b186b5fed2b9..a7ce51cc8909 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -258,10 +258,11 @@ static void stub_shutdown_connection(struct usbip_device *ud)
static void stub_device_reset(struct usbip_device *ud)
{
struct stub_device *sdev = container_of(ud, struct stub_device, ud);
- struct usb_device *udev = interface_to_usbdev(sdev->interface);
+ struct usb_device *udev = sdev->udev;
int ret;
usbip_udbg("device reset");
+
ret = usb_lock_device_for_reset(udev, sdev->interface);
if (ret < 0) {
dev_err(&udev->dev, "lock for reset\n");
@@ -309,7 +310,8 @@ static void stub_device_unusable(struct usbip_device *ud)
*
* Allocates and initializes a new stub_device struct.
*/
-static struct stub_device *stub_device_alloc(struct usb_interface *interface)
+static struct stub_device *stub_device_alloc(struct usb_device *udev,
+ struct usb_interface *interface)
{
struct stub_device *sdev;
int busnum = interface_to_busnum(interface);
@@ -324,7 +326,8 @@ static struct stub_device *stub_device_alloc(struct usb_interface *interface)
return NULL;
}
- sdev->interface = interface;
+ sdev->interface = usb_get_intf(interface);
+ sdev->udev = usb_get_dev(udev);
/*
* devid is defined with devnum when this driver is first allocated.
@@ -450,11 +453,12 @@ static int stub_probe(struct usb_interface *interface,
return err;
}
+ usb_get_intf(interface);
return 0;
}
/* ok. this is my device. */
- sdev = stub_device_alloc(interface);
+ sdev = stub_device_alloc(udev, interface);
if (!sdev)
return -ENOMEM;
@@ -476,6 +480,8 @@ static int stub_probe(struct usb_interface *interface,
dev_err(&interface->dev, "create sysfs files for %s\n",
udev_busid);
usb_set_intfdata(interface, NULL);
+ usb_put_intf(interface);
+
busid_priv->interf_count = 0;
busid_priv->sdev = NULL;
@@ -545,6 +551,7 @@ static void stub_disconnect(struct usb_interface *interface)
if (busid_priv->interf_count > 1) {
busid_priv->interf_count--;
shutdown_busid(busid_priv);
+ usb_put_intf(interface);
return;
}
@@ -554,6 +561,9 @@ static void stub_disconnect(struct usb_interface *interface)
/* 1. shutdown the current connection */
shutdown_busid(busid_priv);
+ usb_put_dev(sdev->udev);
+ usb_put_intf(interface);
+
/* 3. free sdev */
busid_priv->sdev = NULL;
stub_device_free(sdev);
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 3de6fd2539dc..ae6ac82754a4 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -364,7 +364,7 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
static int get_pipe(struct stub_device *sdev, int epnum, int dir)
{
- struct usb_device *udev = interface_to_usbdev(sdev->interface);
+ struct usb_device *udev = sdev->udev;
struct usb_host_endpoint *ep;
struct usb_endpoint_descriptor *epd = NULL;
@@ -484,7 +484,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
int ret;
struct stub_priv *priv;
struct usbip_device *ud = &sdev->ud;
- struct usb_device *udev = interface_to_usbdev(sdev->interface);
+ struct usb_device *udev = sdev->udev;
int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
index 41a1fe5138f4..afc3b1a71881 100644
--- a/drivers/staging/usbip/vhci.h
+++ b/drivers/staging/usbip/vhci.h
@@ -100,9 +100,6 @@ struct vhci_hcd {
* But, the index of this array begins from 0.
*/
struct vhci_device vdev[VHCI_NPORTS];
-
- /* vhci_device which has not been assiged its address yet */
- int pending_port;
};
@@ -119,6 +116,9 @@ void rh_port_disconnect(int rhport);
void vhci_rx_loop(struct usbip_task *ut);
void vhci_tx_loop(struct usbip_task *ut);
+struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
+ __u32 seqnum);
+
#define hardware (&the_controller->pdev.dev)
static inline struct vhci_device *port_to_vdev(__u32 port)
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index 832608d3e579..a35fe61268de 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -138,8 +138,6 @@ void rh_port_connect(int rhport, enum usb_device_speed speed)
* the_controller->vdev[rhport].ud.status = VDEV_CONNECT;
* spin_unlock(&the_controller->vdev[rhport].ud.lock); */
- the_controller->pending_port = rhport;
-
spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
@@ -559,6 +557,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
struct device *dev = &urb->dev->dev;
int ret = 0;
unsigned long flags;
+ struct vhci_device *vdev;
usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
hcd, urb, mem_flags);
@@ -574,6 +573,18 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
return urb->status;
}
+ vdev = port_to_vdev(urb->dev->portnum-1);
+
+ /* refuse enqueue for dead connection */
+ spin_lock(&vdev->ud.lock);
+ if (vdev->ud.status == VDEV_ST_NULL || vdev->ud.status == VDEV_ST_ERROR) {
+ usbip_uerr("enqueue for inactive port %d\n", vdev->rhport);
+ spin_unlock(&vdev->ud.lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
+ return -ENODEV;
+ }
+ spin_unlock(&vdev->ud.lock);
+
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret)
goto no_need_unlink;
@@ -592,8 +603,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
__u8 type = usb_pipetype(urb->pipe);
struct usb_ctrlrequest *ctrlreq =
(struct usb_ctrlrequest *) urb->setup_packet;
- struct vhci_device *vdev =
- port_to_vdev(the_controller->pending_port);
if (type != PIPE_CONTROL || !ctrlreq) {
dev_err(dev, "invalid request to devnum 0\n");
@@ -607,7 +616,9 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
dev_info(dev, "SetAddress Request (%d) to port %d\n",
ctrlreq->wValue, vdev->rhport);
- vdev->udev = urb->dev;
+ if (vdev->udev)
+ usb_put_dev(vdev->udev);
+ vdev->udev = usb_get_dev(urb->dev);
spin_lock(&vdev->ud.lock);
vdev->ud.status = VDEV_ST_USED;
@@ -627,8 +638,9 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
"Get_Descriptor to device 0 "
"(get max pipe size)\n");
- /* FIXME: reference count? (usb_get_dev()) */
- vdev->udev = urb->dev;
+ if (vdev->udev)
+ usb_put_dev(vdev->udev);
+ vdev->udev = usb_get_dev(urb->dev);
goto out;
default:
@@ -799,27 +811,12 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
spin_unlock_irqrestore(&vdev->priv_lock, flags2);
}
-
- if (!vdev->ud.tcp_socket) {
- /* tcp connection is closed */
- usbip_uinfo("vhci_hcd: vhci_urb_dequeue() gives back urb %p\n",
- urb);
-
- usb_hcd_unlink_urb_from_ep(hcd, urb);
-
- spin_unlock_irqrestore(&the_controller->lock, flags);
- usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
- urb->status);
- spin_lock_irqsave(&the_controller->lock, flags);
- }
-
spin_unlock_irqrestore(&the_controller->lock, flags);
usbip_dbg_vhci_hc("leave\n");
return 0;
}
-
static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
{
struct vhci_unlink *unlink, *tmp;
@@ -827,11 +824,34 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
spin_lock(&vdev->priv_lock);
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
+ usbip_uinfo("unlink cleanup tx %lu\n", unlink->unlink_seqnum);
list_del(&unlink->list);
kfree(unlink);
}
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
+ struct urb *urb;
+
+ /* give back URB of unanswered unlink request */
+ usbip_uinfo("unlink cleanup rx %lu\n", unlink->unlink_seqnum);
+
+ urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
+ if (!urb) {
+ usbip_uinfo("the urb (seqnum %lu) was already given back\n",
+ unlink->unlink_seqnum);
+ list_del(&unlink->list);
+ kfree(unlink);
+ continue;
+ }
+
+ urb->status = -ENODEV;
+
+ spin_lock(&the_controller->lock);
+ usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
+ spin_unlock(&the_controller->lock);
+
+ usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
+
list_del(&unlink->list);
kfree(unlink);
}
@@ -901,6 +921,10 @@ static void vhci_device_reset(struct usbip_device *ud)
vdev->speed = 0;
vdev->devid = 0;
+ if (vdev->udev)
+ usb_put_dev(vdev->udev);
+ vdev->udev = NULL;
+
ud->tcp_socket = NULL;
ud->status = VDEV_ST_NULL;
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 8147d7202b2d..bf6991470941 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -23,16 +23,14 @@
#include "vhci.h"
-/* get URB from transmitted urb queue */
-static struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
+/* get URB from transmitted urb queue. caller must hold vdev->priv_lock */
+struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
__u32 seqnum)
{
struct vhci_priv *priv, *tmp;
struct urb *urb = NULL;
int status;
- spin_lock(&vdev->priv_lock);
-
list_for_each_entry_safe(priv, tmp, &vdev->priv_rx, list) {
if (priv->seqnum == seqnum) {
urb = priv->urb;
@@ -63,8 +61,6 @@ static struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
}
}
- spin_unlock(&vdev->priv_lock);
-
return urb;
}
@@ -74,9 +70,11 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
struct usbip_device *ud = &vdev->ud;
struct urb *urb;
+ spin_lock(&vdev->priv_lock);
urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
+ spin_unlock(&vdev->priv_lock);
if (!urb) {
usbip_uerr("cannot find a urb of seqnum %u\n",
@@ -161,7 +159,12 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
return;
}
+ spin_lock(&vdev->priv_lock);
+
urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
+
+ spin_unlock(&vdev->priv_lock);
+
if (!urb) {
/*
* I get the result of a unlink request. But, it seems that I
@@ -190,6 +193,19 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
return;
}
+static int vhci_priv_tx_empty(struct vhci_device *vdev)
+{
+ int empty = 0;
+
+ spin_lock(&vdev->priv_lock);
+
+ empty = list_empty(&vdev->priv_rx);
+
+ spin_unlock(&vdev->priv_lock);
+
+ return empty;
+}
+
/* recv a pdu */
static void vhci_rx_pdu(struct usbip_device *ud)
{
@@ -202,11 +218,29 @@ static void vhci_rx_pdu(struct usbip_device *ud)
memset(&pdu, 0, sizeof(pdu));
-
/* 1. receive a pdu header */
ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0);
+ if (ret < 0) {
+ if (ret == -ECONNRESET)
+ usbip_uinfo("connection reset by peer\n");
+ else if (ret == -EAGAIN) {
+ /* ignore if connection was idle */
+ if (vhci_priv_tx_empty(vdev))
+ return;
+ usbip_uinfo("connection timed out with pending urbs\n");
+ } else if (ret != -ERESTARTSYS)
+ usbip_uinfo("xmit failed %d\n", ret);
+
+ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+ return;
+ }
+ if (ret == 0) {
+ usbip_uinfo("connection closed");
+ usbip_event_add(ud, VDEV_EVENT_DOWN);
+ return;
+ }
if (ret != sizeof(pdu)) {
- usbip_uerr("receiving pdu failed! size is %d, should be %d\n",
+ usbip_uerr("received pdu size is %d, should be %d\n",
ret, (unsigned int)sizeof(pdu));
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
return;
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.c b/drivers/staging/vme/bridges/vme_ca91cx42.c
index 4d745623211b..42de83e6f1d9 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.c
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.c
@@ -44,7 +44,7 @@ static int geoid;
static char driver_name[] = "vme_ca91cx42";
-static const struct pci_device_id ca91cx42_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(ca91cx42_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
{ },
};
@@ -58,7 +58,7 @@ static struct pci_driver ca91cx42_driver = {
static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
{
- wake_up(&(bridge->dma_queue));
+ wake_up(&bridge->dma_queue);
return CA91CX42_LINT_DMA;
}
@@ -82,14 +82,14 @@ static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
/* XXX This needs to be split into 4 queues */
static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
{
- wake_up(&(bridge->mbox_queue));
+ wake_up(&bridge->mbox_queue);
return CA91CX42_LINT_MBOX;
}
static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
{
- wake_up(&(bridge->iack_queue));
+ wake_up(&bridge->iack_queue);
return CA91CX42_LINT_SW_IACK;
}
@@ -207,9 +207,9 @@ static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
/* Initialise list for VME bus errors */
- INIT_LIST_HEAD(&(ca91cx42_bridge->vme_errors));
+ INIT_LIST_HEAD(&ca91cx42_bridge->vme_errors);
- mutex_init(&(ca91cx42_bridge->irq_mtx));
+ mutex_init(&ca91cx42_bridge->irq_mtx);
/* Disable interrupts from PCI to VME */
iowrite32(0, bridge->base + VINT_EN);
@@ -259,8 +259,8 @@ static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
/*
* Set up an VME interrupt
*/
-void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level, int state,
- int sync)
+static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
+ int state, int sync)
{
struct pci_dev *pdev;
@@ -287,7 +287,7 @@ void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level, int state,
}
}
-int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
+static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
int statid)
{
u32 tmp;
@@ -299,7 +299,7 @@ int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
if (statid & 1)
return -EINVAL;
- mutex_lock(&(bridge->vme_int));
+ mutex_lock(&bridge->vme_int);
tmp = ioread32(bridge->base + VINT_EN);
@@ -318,12 +318,12 @@ int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
tmp = tmp & ~(1 << (level + 24));
iowrite32(tmp, bridge->base + VINT_EN);
- mutex_unlock(&(bridge->vme_int));
+ mutex_unlock(&bridge->vme_int);
return 0;
}
-int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
+static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
{
@@ -429,7 +429,7 @@ int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
return 0;
}
-int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
+static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
{
@@ -518,8 +518,8 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
image->kern_base = NULL;
if (image->bus_resource.name != NULL)
kfree(image->bus_resource.name);
- release_resource(&(image->bus_resource));
- memset(&(image->bus_resource), 0, sizeof(struct resource));
+ release_resource(&image->bus_resource);
+ memset(&image->bus_resource, 0, sizeof(struct resource));
}
if (image->bus_resource.name == NULL) {
@@ -540,7 +540,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
image->bus_resource.flags = IORESOURCE_MEM;
retval = pci_bus_alloc_resource(pdev->bus,
- &(image->bus_resource), size, size, PCIBIOS_MIN_MEM,
+ &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
0, NULL, NULL);
if (retval) {
dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
@@ -563,10 +563,10 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
iounmap(image->kern_base);
image->kern_base = NULL;
err_remap:
- release_resource(&(image->bus_resource));
+ release_resource(&image->bus_resource);
err_resource:
kfree(image->bus_resource.name);
- memset(&(image->bus_resource), 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(struct resource));
err_name:
return retval;
}
@@ -578,13 +578,13 @@ static void ca91cx42_free_resource(struct vme_master_resource *image)
{
iounmap(image->kern_base);
image->kern_base = NULL;
- release_resource(&(image->bus_resource));
+ release_resource(&image->bus_resource);
kfree(image->bus_resource.name);
- memset(&(image->bus_resource), 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(struct resource));
}
-int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
+static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
{
@@ -620,7 +620,7 @@ int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
goto err_window;
}
- spin_lock(&(image->lock));
+ spin_lock(&image->lock);
/*
* Let's allocate the resource here rather than further up the stack as
@@ -628,7 +628,7 @@ int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
*/
retval = ca91cx42_alloc_resource(image, size);
if (retval) {
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
"for resource name\n");
retval = -ENOMEM;
@@ -672,7 +672,7 @@ int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
break;
default:
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
retval = -EINVAL;
goto err_dwidth;
@@ -704,7 +704,7 @@ int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
case VME_USER3:
case VME_USER4:
default:
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
retval = -EINVAL;
goto err_aspace;
@@ -730,7 +730,7 @@ int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
return 0;
err_aspace:
@@ -741,8 +741,8 @@ err_window:
return retval;
}
-int __ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
+static int __ca91cx42_master_get(struct vme_master_resource *image,
+ int *enabled, unsigned long long *vme_base, unsigned long long *size,
vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
{
unsigned int i, ctl;
@@ -828,24 +828,24 @@ int __ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
return 0;
}
-int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
+static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
{
int retval;
- spin_lock(&(image->lock));
+ spin_lock(&image->lock);
retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
cycle, dwidth);
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
return retval;
}
-ssize_t ca91cx42_master_read(struct vme_master_resource *image, void *buf,
- size_t count, loff_t offset)
+static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
+ void *buf, size_t count, loff_t offset)
{
ssize_t retval;
void *addr = image->kern_base + offset;
@@ -855,7 +855,7 @@ ssize_t ca91cx42_master_read(struct vme_master_resource *image, void *buf,
if (count == 0)
return 0;
- spin_lock(&(image->lock));
+ spin_lock(&image->lock);
/* The following code handles VME address alignment problem
* in order to assure the maximal data width cycle.
@@ -899,13 +899,13 @@ ssize_t ca91cx42_master_read(struct vme_master_resource *image, void *buf,
}
out:
retval = count;
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
return retval;
}
-ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
- size_t count, loff_t offset)
+static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
+ void *buf, size_t count, loff_t offset)
{
ssize_t retval;
void *addr = image->kern_base + offset;
@@ -915,7 +915,7 @@ ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
if (count == 0)
return 0;
- spin_lock(&(image->lock));
+ spin_lock(&image->lock);
/* Here we apply for the same strategy we do in master_read
* function in order to assure D16 cycle when required.
@@ -954,11 +954,12 @@ ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
out:
retval = count;
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
+
return retval;
}
-unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
+static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
unsigned int mask, unsigned int compare, unsigned int swap,
loff_t offset)
{
@@ -974,10 +975,10 @@ unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
i = image->number;
/* Locking as we can only do one of these at a time */
- mutex_lock(&(bridge->vme_rmw));
+ mutex_lock(&bridge->vme_rmw);
/* Lock image */
- spin_lock(&(image->lock));
+ spin_lock(&image->lock);
pci_addr = (u32)image->kern_base + offset;
@@ -1007,15 +1008,15 @@ unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
iowrite32(0, bridge->base + SCYC_CTL);
out:
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
- mutex_unlock(&(bridge->vme_rmw));
+ mutex_unlock(&bridge->vme_rmw);
return result;
}
-int ca91cx42_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
- struct vme_dma_attr *dest, size_t count)
+static int ca91cx42_dma_list_add(struct vme_dma_list *list,
+ struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
{
struct ca91cx42_dma_entry *entry, *prev;
struct vme_dma_pci *pci_attr;
@@ -1036,14 +1037,14 @@ int ca91cx42_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
}
/* Test descriptor alignment */
- if ((unsigned long)&(entry->descriptor) & CA91CX42_DCPP_M) {
+ if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) {
dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
- "required: %p\n", &(entry->descriptor));
+ "required: %p\n", &entry->descriptor);
retval = -EINVAL;
goto err_align;
}
- memset(&(entry->descriptor), 0, sizeof(struct ca91cx42_dma_descriptor));
+ memset(&entry->descriptor, 0, sizeof(struct ca91cx42_dma_descriptor));
if (dest->type == VME_DMA_VME) {
entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
@@ -1138,14 +1139,14 @@ int ca91cx42_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
/* Add to list */
- list_add_tail(&(entry->list), &(list->entries));
+ list_add_tail(&entry->list, &list->entries);
/* Fill out previous descriptors "Next Address" */
- if (entry->list.prev != &(list->entries)) {
+ if (entry->list.prev != &list->entries) {
prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
list);
/* We need the bus address for the pointer */
- desc_ptr = virt_to_bus(&(entry->descriptor));
+ desc_ptr = virt_to_bus(&entry->descriptor);
prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
}
@@ -1175,7 +1176,7 @@ static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
return 1;
}
-int ca91cx42_dma_list_exec(struct vme_dma_list *list)
+static int ca91cx42_dma_list_exec(struct vme_dma_list *list)
{
struct vme_dma_resource *ctrlr;
struct ca91cx42_dma_entry *entry;
@@ -1190,28 +1191,28 @@ int ca91cx42_dma_list_exec(struct vme_dma_list *list)
bridge = ctrlr->parent->driver_priv;
dev = ctrlr->parent->parent;
- mutex_lock(&(ctrlr->mtx));
+ mutex_lock(&ctrlr->mtx);
- if (!(list_empty(&(ctrlr->running)))) {
+ if (!(list_empty(&ctrlr->running))) {
/*
* XXX We have an active DMA transfer and currently haven't
* sorted out the mechanism for "pending" DMA transfers.
* Return busy.
*/
/* Need to add to pending here */
- mutex_unlock(&(ctrlr->mtx));
+ mutex_unlock(&ctrlr->mtx);
return -EBUSY;
} else {
- list_add(&(list->list), &(ctrlr->running));
+ list_add(&list->list, &ctrlr->running);
}
/* Get first bus address and write into registers */
- entry = list_first_entry(&(list->entries), struct ca91cx42_dma_entry,
+ entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry,
list);
- bus_addr = virt_to_bus(&(entry->descriptor));
+ bus_addr = virt_to_bus(&entry->descriptor);
- mutex_unlock(&(ctrlr->mtx));
+ mutex_unlock(&ctrlr->mtx);
iowrite32(0, bridge->base + DTBC);
iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
@@ -1249,21 +1250,21 @@ int ca91cx42_dma_list_exec(struct vme_dma_list *list)
}
/* Remove list from running list */
- mutex_lock(&(ctrlr->mtx));
- list_del(&(list->list));
- mutex_unlock(&(ctrlr->mtx));
+ mutex_lock(&ctrlr->mtx);
+ list_del(&list->list);
+ mutex_unlock(&ctrlr->mtx);
return retval;
}
-int ca91cx42_dma_list_empty(struct vme_dma_list *list)
+static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
{
struct list_head *pos, *temp;
struct ca91cx42_dma_entry *entry;
/* detach and free each entry */
- list_for_each_safe(pos, temp, &(list->entries)) {
+ list_for_each_safe(pos, temp, &list->entries) {
list_del(pos);
entry = list_entry(pos, struct ca91cx42_dma_entry, list);
kfree(entry);
@@ -1279,8 +1280,8 @@ int ca91cx42_dma_list_empty(struct vme_dma_list *list)
* This does not enable the LM monitor - that should be done when the first
* callback is attached and disabled when the last callback is removed.
*/
-int ca91cx42_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
- vme_address_t aspace, vme_cycle_t cycle)
+static int ca91cx42_lm_set(struct vme_lm_resource *lm,
+ unsigned long long lm_base, vme_address_t aspace, vme_cycle_t cycle)
{
u32 temp_base, lm_ctl = 0;
int i;
@@ -1298,12 +1299,12 @@ int ca91cx42_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
return -EINVAL;
}
- mutex_lock(&(lm->mtx));
+ mutex_lock(&lm->mtx);
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
if (bridge->lm_callback[i] != NULL) {
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
dev_err(dev, "Location monitor callback attached, "
"can't reset\n");
return -EBUSY;
@@ -1321,7 +1322,7 @@ int ca91cx42_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
lm_ctl |= CA91CX42_LM_CTL_AS_A32;
break;
default:
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
dev_err(dev, "Invalid address space\n");
return -EINVAL;
break;
@@ -1339,7 +1340,7 @@ int ca91cx42_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
iowrite32(lm_base, bridge->base + LM_BS);
iowrite32(lm_ctl, bridge->base + LM_CTL);
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
return 0;
}
@@ -1347,15 +1348,15 @@ int ca91cx42_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
/* Get configuration of the callback monitor and return whether it is enabled
* or disabled.
*/
-int ca91cx42_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
- vme_address_t *aspace, vme_cycle_t *cycle)
+static int ca91cx42_lm_get(struct vme_lm_resource *lm,
+ unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
{
u32 lm_ctl, enabled = 0;
struct ca91cx42_driver *bridge;
bridge = lm->parent->driver_priv;
- mutex_lock(&(lm->mtx));
+ mutex_lock(&lm->mtx);
*lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
lm_ctl = ioread32(bridge->base + LM_CTL);
@@ -1380,7 +1381,7 @@ int ca91cx42_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
if (lm_ctl & CA91CX42_LM_CTL_DATA)
*cycle |= VME_DATA;
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
return enabled;
}
@@ -1390,7 +1391,7 @@ int ca91cx42_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
*
* Callback will be passed the monitor triggered.
*/
-int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
+static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
void (*callback)(int))
{
u32 lm_ctl, tmp;
@@ -1400,19 +1401,19 @@ int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
bridge = lm->parent->driver_priv;
dev = lm->parent->parent;
- mutex_lock(&(lm->mtx));
+ mutex_lock(&lm->mtx);
/* Ensure that the location monitor is configured - need PGM or DATA */
lm_ctl = ioread32(bridge->base + LM_CTL);
if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
dev_err(dev, "Location monitor not properly configured\n");
return -EINVAL;
}
/* Check that a callback isn't already attached */
if (bridge->lm_callback[monitor] != NULL) {
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
dev_err(dev, "Existing callback attached\n");
return -EBUSY;
}
@@ -1431,7 +1432,7 @@ int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
iowrite32(lm_ctl, bridge->base + LM_CTL);
}
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
return 0;
}
@@ -1439,14 +1440,14 @@ int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
/*
* Detach a callback function forn a specific location monitor.
*/
-int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
+static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
{
u32 tmp;
struct ca91cx42_driver *bridge;
bridge = lm->parent->driver_priv;
- mutex_lock(&(lm->mtx));
+ mutex_lock(&lm->mtx);
/* Disable Location Monitor and ensure previous interrupts are clear */
tmp = ioread32(bridge->base + LINT_EN);
@@ -1467,12 +1468,12 @@ int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
iowrite32(tmp, bridge->base + LM_CTL);
}
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
return 0;
}
-int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
+static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
{
u32 slot = 0;
struct ca91cx42_driver *bridge;
@@ -1526,7 +1527,7 @@ static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
- &(bridge->crcsr_bus));
+ &bridge->crcsr_bus);
if (bridge->crcsr_kernel == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
"image\n");
@@ -1632,12 +1633,12 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Initialize wait queues & mutual exclusion flags */
- init_waitqueue_head(&(ca91cx42_device->dma_queue));
- init_waitqueue_head(&(ca91cx42_device->iack_queue));
- mutex_init(&(ca91cx42_device->vme_int));
- mutex_init(&(ca91cx42_device->vme_rmw));
+ init_waitqueue_head(&ca91cx42_device->dma_queue);
+ init_waitqueue_head(&ca91cx42_device->iack_queue);
+ mutex_init(&ca91cx42_device->vme_int);
+ mutex_init(&ca91cx42_device->vme_rmw);
- ca91cx42_bridge->parent = &(pdev->dev);
+ ca91cx42_bridge->parent = &pdev->dev;
strcpy(ca91cx42_bridge->name, driver_name);
/* Setup IRQ */
@@ -1648,7 +1649,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add master windows to list */
- INIT_LIST_HEAD(&(ca91cx42_bridge->master_resources));
+ INIT_LIST_HEAD(&ca91cx42_bridge->master_resources);
for (i = 0; i < CA91C142_MAX_MASTER; i++) {
master_image = kmalloc(sizeof(struct vme_master_resource),
GFP_KERNEL);
@@ -1659,7 +1660,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_master;
}
master_image->parent = ca91cx42_bridge;
- spin_lock_init(&(master_image->lock));
+ spin_lock_init(&master_image->lock);
master_image->locked = 0;
master_image->number = i;
master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
@@ -1667,15 +1668,15 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
VME_SUPER | VME_USER | VME_PROG | VME_DATA;
master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
- memset(&(master_image->bus_resource), 0,
+ memset(&master_image->bus_resource, 0,
sizeof(struct resource));
master_image->kern_base = NULL;
- list_add_tail(&(master_image->list),
- &(ca91cx42_bridge->master_resources));
+ list_add_tail(&master_image->list,
+ &ca91cx42_bridge->master_resources);
}
/* Add slave windows to list */
- INIT_LIST_HEAD(&(ca91cx42_bridge->slave_resources));
+ INIT_LIST_HEAD(&ca91cx42_bridge->slave_resources);
for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
slave_image = kmalloc(sizeof(struct vme_slave_resource),
GFP_KERNEL);
@@ -1686,7 +1687,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_slave;
}
slave_image->parent = ca91cx42_bridge;
- mutex_init(&(slave_image->mtx));
+ mutex_init(&slave_image->mtx);
slave_image->locked = 0;
slave_image->number = i;
slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
@@ -1698,12 +1699,12 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
VME_SUPER | VME_USER | VME_PROG | VME_DATA;
- list_add_tail(&(slave_image->list),
- &(ca91cx42_bridge->slave_resources));
+ list_add_tail(&slave_image->list,
+ &ca91cx42_bridge->slave_resources);
}
/* Add dma engines to list */
- INIT_LIST_HEAD(&(ca91cx42_bridge->dma_resources));
+ INIT_LIST_HEAD(&ca91cx42_bridge->dma_resources);
for (i = 0; i < CA91C142_MAX_DMA; i++) {
dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
GFP_KERNEL);
@@ -1714,19 +1715,19 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_dma;
}
dma_ctrlr->parent = ca91cx42_bridge;
- mutex_init(&(dma_ctrlr->mtx));
+ mutex_init(&dma_ctrlr->mtx);
dma_ctrlr->locked = 0;
dma_ctrlr->number = i;
dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
VME_DMA_MEM_TO_VME;
- INIT_LIST_HEAD(&(dma_ctrlr->pending));
- INIT_LIST_HEAD(&(dma_ctrlr->running));
- list_add_tail(&(dma_ctrlr->list),
- &(ca91cx42_bridge->dma_resources));
+ INIT_LIST_HEAD(&dma_ctrlr->pending);
+ INIT_LIST_HEAD(&dma_ctrlr->running);
+ list_add_tail(&dma_ctrlr->list,
+ &ca91cx42_bridge->dma_resources);
}
/* Add location monitor to list */
- INIT_LIST_HEAD(&(ca91cx42_bridge->lm_resources));
+ INIT_LIST_HEAD(&ca91cx42_bridge->lm_resources);
lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
if (lm == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
@@ -1735,11 +1736,11 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_lm;
}
lm->parent = ca91cx42_bridge;
- mutex_init(&(lm->mtx));
+ mutex_init(&lm->mtx);
lm->locked = 0;
lm->number = 1;
lm->monitors = 4;
- list_add_tail(&(lm->list), &(ca91cx42_bridge->lm_resources));
+ list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources);
ca91cx42_bridge->slave_get = ca91cx42_slave_get;
ca91cx42_bridge->slave_set = ca91cx42_slave_set;
@@ -1786,28 +1787,28 @@ err_reg:
ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
err_lm:
/* resources are stored in link list */
- list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
+ list_for_each(pos, &ca91cx42_bridge->lm_resources) {
lm = list_entry(pos, struct vme_lm_resource, list);
list_del(pos);
kfree(lm);
}
err_dma:
/* resources are stored in link list */
- list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
+ list_for_each(pos, &ca91cx42_bridge->dma_resources) {
dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
list_del(pos);
kfree(dma_ctrlr);
}
err_slave:
/* resources are stored in link list */
- list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
+ list_for_each(pos, &ca91cx42_bridge->slave_resources) {
slave_image = list_entry(pos, struct vme_slave_resource, list);
list_del(pos);
kfree(slave_image);
}
err_master:
/* resources are stored in link list */
- list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
+ list_for_each(pos, &ca91cx42_bridge->master_resources) {
master_image = list_entry(pos, struct vme_master_resource,
list);
list_del(pos);
@@ -1831,7 +1832,7 @@ err_struct:
}
-void ca91cx42_remove(struct pci_dev *pdev)
+static void ca91cx42_remove(struct pci_dev *pdev)
{
struct list_head *pos = NULL;
struct vme_master_resource *master_image;
@@ -1870,28 +1871,28 @@ void ca91cx42_remove(struct pci_dev *pdev)
ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
/* resources are stored in link list */
- list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
+ list_for_each(pos, &ca91cx42_bridge->lm_resources) {
lm = list_entry(pos, struct vme_lm_resource, list);
list_del(pos);
kfree(lm);
}
/* resources are stored in link list */
- list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
+ list_for_each(pos, &ca91cx42_bridge->dma_resources) {
dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
list_del(pos);
kfree(dma_ctrlr);
}
/* resources are stored in link list */
- list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
+ list_for_each(pos, &ca91cx42_bridge->slave_resources) {
slave_image = list_entry(pos, struct vme_slave_resource, list);
list_del(pos);
kfree(slave_image);
}
/* resources are stored in link list */
- list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
+ list_for_each(pos, &ca91cx42_bridge->master_resources) {
master_image = list_entry(pos, struct vme_master_resource,
list);
list_del(pos);
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.h b/drivers/staging/vme/bridges/vme_ca91cx42.h
index e72c65b193ec..02a7c794db05 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.h
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.h
@@ -39,7 +39,7 @@
/* Structure used to hold driver specific information */
struct ca91cx42_driver {
- void *base; /* Base Address of device registers */
+ void __iomem *base; /* Base Address of device registers */
wait_queue_head_t dma_queue;
wait_queue_head_t iack_queue;
wait_queue_head_t mbox_queue;
diff --git a/drivers/staging/vme/bridges/vme_tsi148.c b/drivers/staging/vme/bridges/vme_tsi148.c
index 492ddb2d5108..26ea42fa784d 100644
--- a/drivers/staging/vme/bridges/vme_tsi148.c
+++ b/drivers/staging/vme/bridges/vme_tsi148.c
@@ -46,7 +46,7 @@ static int geoid;
static char driver_name[] = "vme_tsi148";
-static const struct pci_device_id tsi148_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
{ },
};
@@ -81,11 +81,11 @@ static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
u32 serviced = 0;
if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
- wake_up(&(bridge->dma_queue[0]));
+ wake_up(&bridge->dma_queue[0]);
serviced |= TSI148_LCSR_INTC_DMA0C;
}
if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
- wake_up(&(bridge->dma_queue[1]));
+ wake_up(&bridge->dma_queue[1]);
serviced |= TSI148_LCSR_INTC_DMA1C;
}
@@ -191,7 +191,7 @@ static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
if (error) {
error->address = error_addr;
error->attributes = error_attrib;
- list_add_tail(&(error->list), &(tsi148_bridge->vme_errors));
+ list_add_tail(&error->list, &tsi148_bridge->vme_errors);
} else {
dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
"VMEbus Error reporting\n");
@@ -210,7 +210,7 @@ static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
*/
static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
{
- wake_up(&(bridge->iack_queue));
+ wake_up(&bridge->iack_queue);
return TSI148_LCSR_INTC_IACKC;
}
@@ -320,9 +320,9 @@ static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
bridge = tsi148_bridge->driver_priv;
/* Initialise list for VME bus errors */
- INIT_LIST_HEAD(&(tsi148_bridge->vme_errors));
+ INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
- mutex_init(&(tsi148_bridge->irq_mtx));
+ mutex_init(&tsi148_bridge->irq_mtx);
result = request_irq(pdev->irq,
tsi148_irqhandler,
@@ -374,8 +374,11 @@ static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
return 0;
}
-static void tsi148_irq_exit(struct tsi148_driver *bridge, struct pci_dev *pdev)
+static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
+ struct pci_dev *pdev)
{
+ struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
+
/* Turn off interrupts */
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
@@ -384,13 +387,13 @@ static void tsi148_irq_exit(struct tsi148_driver *bridge, struct pci_dev *pdev)
iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
/* Detach interrupt handler */
- free_irq(pdev->irq, pdev);
+ free_irq(pdev->irq, tsi148_bridge);
}
/*
* Check to see if an IACk has been received, return true (1) or false (0).
*/
-int tsi148_iack_received(struct tsi148_driver *bridge)
+static int tsi148_iack_received(struct tsi148_driver *bridge)
{
u32 tmp;
@@ -405,7 +408,7 @@ int tsi148_iack_received(struct tsi148_driver *bridge)
/*
* Configure VME interrupt
*/
-void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
+static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
int state, int sync)
{
struct pci_dev *pdev;
@@ -445,14 +448,15 @@ void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
* Generate a VME bus interrupt at the requested level & vector. Wait for
* interrupt to be acked.
*/
-int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, int statid)
+static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
+ int statid)
{
u32 tmp;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
- mutex_lock(&(bridge->vme_int));
+ mutex_lock(&bridge->vme_int);
/* Read VICR register */
tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
@@ -470,7 +474,7 @@ int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, int statid)
wait_event_interruptible(bridge->iack_queue,
tsi148_iack_received(bridge));
- mutex_unlock(&(bridge->vme_int));
+ mutex_unlock(&bridge->vme_int);
return 0;
}
@@ -496,7 +500,7 @@ static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
*/
err_pos = NULL;
/* Iterate through errors */
- list_for_each(err_pos, &(tsi148_bridge->vme_errors)) {
+ list_for_each(err_pos, &tsi148_bridge->vme_errors) {
vme_err = list_entry(err_pos, struct vme_bus_error, list);
if ((vme_err->address >= address) &&
(vme_err->address < bound)) {
@@ -530,7 +534,7 @@ static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
*/
err_pos = NULL;
/* Iterate through errors */
- list_for_each_safe(err_pos, temp, &(tsi148_bridge->vme_errors)) {
+ list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
vme_err = list_entry(err_pos, struct vme_bus_error, list);
if ((vme_err->address >= address) &&
@@ -545,7 +549,7 @@ static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
/*
* Initialize a slave window with the requested attributes.
*/
-int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
+static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
{
@@ -695,7 +699,7 @@ int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
/*
* Get slave window configuration.
*/
-int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
+static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
{
@@ -819,8 +823,8 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
image->kern_base = NULL;
if (image->bus_resource.name != NULL)
kfree(image->bus_resource.name);
- release_resource(&(image->bus_resource));
- memset(&(image->bus_resource), 0, sizeof(struct resource));
+ release_resource(&image->bus_resource);
+ memset(&image->bus_resource, 0, sizeof(struct resource));
}
/* Exit here if size is zero */
@@ -845,7 +849,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
image->bus_resource.flags = IORESOURCE_MEM;
retval = pci_bus_alloc_resource(pdev->bus,
- &(image->bus_resource), size, size, PCIBIOS_MIN_MEM,
+ &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
0, NULL, NULL);
if (retval) {
dev_err(tsi148_bridge->parent, "Failed to allocate mem "
@@ -868,10 +872,10 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
iounmap(image->kern_base);
image->kern_base = NULL;
err_remap:
- release_resource(&(image->bus_resource));
+ release_resource(&image->bus_resource);
err_resource:
kfree(image->bus_resource.name);
- memset(&(image->bus_resource), 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(struct resource));
err_name:
return retval;
}
@@ -883,15 +887,15 @@ static void tsi148_free_resource(struct vme_master_resource *image)
{
iounmap(image->kern_base);
image->kern_base = NULL;
- release_resource(&(image->bus_resource));
+ release_resource(&image->bus_resource);
kfree(image->bus_resource.name);
- memset(&(image->bus_resource), 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(struct resource));
}
/*
* Set the attributes of an outbound window.
*/
-int tsi148_master_set(struct vme_master_resource *image, int enabled,
+static int tsi148_master_set(struct vme_master_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
{
@@ -924,7 +928,7 @@ int tsi148_master_set(struct vme_master_resource *image, int enabled,
goto err_window;
}
- spin_lock(&(image->lock));
+ spin_lock(&image->lock);
/* Let's allocate the resource here rather than further up the stack as
* it avoids pushing loads of bus dependant stuff up the stack. If size
@@ -932,7 +936,7 @@ int tsi148_master_set(struct vme_master_resource *image, int enabled,
*/
retval = tsi148_alloc_resource(image, size);
if (retval) {
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
"resource\n");
goto err_res;
@@ -959,19 +963,19 @@ int tsi148_master_set(struct vme_master_resource *image, int enabled,
reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
if (pci_base_low & 0xFFFF) {
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
retval = -EINVAL;
goto err_gran;
}
if (pci_bound_low & 0xFFFF) {
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
retval = -EINVAL;
goto err_gran;
}
if (vme_offset_low & 0xFFFF) {
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid VME Offset "
"alignment\n");
retval = -EINVAL;
@@ -1035,7 +1039,7 @@ int tsi148_master_set(struct vme_master_resource *image, int enabled,
temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
break;
default:
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid data width\n");
retval = -EINVAL;
goto err_dwidth;
@@ -1072,7 +1076,7 @@ int tsi148_master_set(struct vme_master_resource *image, int enabled,
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
break;
default:
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid address space\n");
retval = -EINVAL;
goto err_aspace;
@@ -1109,7 +1113,7 @@ int tsi148_master_set(struct vme_master_resource *image, int enabled,
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
return 0;
err_aspace:
@@ -1127,7 +1131,7 @@ err_window:
*
* XXX Not parsing prefetch information.
*/
-int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
+static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
{
@@ -1237,23 +1241,23 @@ int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
}
-int tsi148_master_get(struct vme_master_resource *image, int *enabled,
+static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
{
int retval;
- spin_lock(&(image->lock));
+ spin_lock(&image->lock);
retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
cycle, dwidth);
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
return retval;
}
-ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
+static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
size_t count, loff_t offset)
{
int retval, enabled;
@@ -1266,7 +1270,7 @@ ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
tsi148_bridge = image->parent;
- spin_lock(&(image->lock));
+ spin_lock(&image->lock);
memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
retval = count;
@@ -1289,13 +1293,13 @@ ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
}
skip_chk:
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
return retval;
}
-ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
+static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
size_t count, loff_t offset)
{
int retval = 0, enabled;
@@ -1312,7 +1316,7 @@ ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
bridge = tsi148_bridge->driver_priv;
- spin_lock(&(image->lock));
+ spin_lock(&image->lock);
memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
retval = count;
@@ -1352,7 +1356,7 @@ ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
}
skip_chk:
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
return retval;
}
@@ -1362,7 +1366,7 @@ skip_chk:
*
* Requires a previously configured master window, returns final value.
*/
-unsigned int tsi148_master_rmw(struct vme_master_resource *image,
+static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
unsigned int mask, unsigned int compare, unsigned int swap,
loff_t offset)
{
@@ -1378,10 +1382,10 @@ unsigned int tsi148_master_rmw(struct vme_master_resource *image,
i = image->number;
/* Locking as we can only do one of these at a time */
- mutex_lock(&(bridge->vme_rmw));
+ mutex_lock(&bridge->vme_rmw);
/* Lock image */
- spin_lock(&(image->lock));
+ spin_lock(&image->lock);
pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAU);
@@ -1411,9 +1415,9 @@ unsigned int tsi148_master_rmw(struct vme_master_resource *image,
tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
- spin_unlock(&(image->lock));
+ spin_unlock(&image->lock);
- mutex_unlock(&(bridge->vme_rmw));
+ mutex_unlock(&bridge->vme_rmw);
return result;
}
@@ -1609,8 +1613,8 @@ static int tsi148_dma_set_vme_dest_attributes(struct device *dev, u32 *attr,
/*
* Add a link list descriptor to the list
*/
-int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
- struct vme_dma_attr *dest, size_t count)
+static int tsi148_dma_list_add(struct vme_dma_list *list,
+ struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
{
struct tsi148_dma_entry *entry, *prev;
u32 address_high, address_low;
@@ -1633,10 +1637,10 @@ int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
}
/* Test descriptor alignment */
- if ((unsigned long)&(entry->descriptor) & 0x7) {
+ if ((unsigned long)&entry->descriptor & 0x7) {
dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
"byte boundary as required: %p\n",
- &(entry->descriptor));
+ &entry->descriptor);
retval = -EINVAL;
goto err_align;
}
@@ -1644,7 +1648,7 @@ int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
/* Given we are going to fill out the structure, we probably don't
* need to zero it, but better safe than sorry for now.
*/
- memset(&(entry->descriptor), 0, sizeof(struct tsi148_dma_descriptor));
+ memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
/* Fill out source part */
switch (src->type) {
@@ -1681,7 +1685,7 @@ int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
retval = tsi148_dma_set_vme_src_attributes(
- tsi148_bridge->parent, &(entry->descriptor.dsat),
+ tsi148_bridge->parent, &entry->descriptor.dsat,
vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
if (retval < 0)
goto err_source;
@@ -1719,7 +1723,7 @@ int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
retval = tsi148_dma_set_vme_dest_attributes(
- tsi148_bridge->parent, &(entry->descriptor.ddat),
+ tsi148_bridge->parent, &entry->descriptor.ddat,
vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
if (retval < 0)
goto err_dest;
@@ -1735,16 +1739,16 @@ int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
entry->descriptor.dcnt = (u32)count;
/* Add to list */
- list_add_tail(&(entry->list), &(list->entries));
+ list_add_tail(&entry->list, &list->entries);
/* Fill out previous descriptors "Next Address" */
- if (entry->list.prev != &(list->entries)) {
+ if (entry->list.prev != &list->entries) {
prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
list);
/* We need the bus address for the pointer */
- desc_ptr = virt_to_bus(&(entry->descriptor));
- reg_split(desc_ptr, &(prev->descriptor.dnlau),
- &(prev->descriptor.dnlal));
+ desc_ptr = virt_to_bus(&entry->descriptor);
+ reg_split(desc_ptr, &prev->descriptor.dnlau,
+ &prev->descriptor.dnlal);
}
return 0;
@@ -1782,7 +1786,7 @@ static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
*
* XXX Need to provide control register configuration.
*/
-int tsi148_dma_list_exec(struct vme_dma_list *list)
+static int tsi148_dma_list_exec(struct vme_dma_list *list)
{
struct vme_dma_resource *ctrlr;
int channel, retval = 0;
@@ -1799,30 +1803,30 @@ int tsi148_dma_list_exec(struct vme_dma_list *list)
bridge = tsi148_bridge->driver_priv;
- mutex_lock(&(ctrlr->mtx));
+ mutex_lock(&ctrlr->mtx);
channel = ctrlr->number;
- if (!list_empty(&(ctrlr->running))) {
+ if (!list_empty(&ctrlr->running)) {
/*
* XXX We have an active DMA transfer and currently haven't
* sorted out the mechanism for "pending" DMA transfers.
* Return busy.
*/
/* Need to add to pending here */
- mutex_unlock(&(ctrlr->mtx));
+ mutex_unlock(&ctrlr->mtx);
return -EBUSY;
} else {
- list_add(&(list->list), &(ctrlr->running));
+ list_add(&list->list, &ctrlr->running);
}
/* Get first bus address and write into registers */
- entry = list_first_entry(&(list->entries), struct tsi148_dma_entry,
+ entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
list);
- bus_addr = virt_to_bus(&(entry->descriptor));
+ bus_addr = virt_to_bus(&entry->descriptor);
- mutex_unlock(&(ctrlr->mtx));
+ mutex_unlock(&ctrlr->mtx);
reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
@@ -1850,9 +1854,9 @@ int tsi148_dma_list_exec(struct vme_dma_list *list)
}
/* Remove list from running list */
- mutex_lock(&(ctrlr->mtx));
- list_del(&(list->list));
- mutex_unlock(&(ctrlr->mtx));
+ mutex_lock(&ctrlr->mtx);
+ list_del(&list->list);
+ mutex_unlock(&ctrlr->mtx);
return retval;
}
@@ -1862,13 +1866,13 @@ int tsi148_dma_list_exec(struct vme_dma_list *list)
*
* We have a separate function, don't assume that the chain can't be reused.
*/
-int tsi148_dma_list_empty(struct vme_dma_list *list)
+static int tsi148_dma_list_empty(struct vme_dma_list *list)
{
struct list_head *pos, *temp;
struct tsi148_dma_entry *entry;
/* detach and free each entry */
- list_for_each_safe(pos, temp, &(list->entries)) {
+ list_for_each_safe(pos, temp, &list->entries) {
list_del(pos);
entry = list_entry(pos, struct tsi148_dma_entry, list);
kfree(entry);
@@ -1884,7 +1888,7 @@ int tsi148_dma_list_empty(struct vme_dma_list *list)
* This does not enable the LM monitor - that should be done when the first
* callback is attached and disabled when the last callback is removed.
*/
-int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
+static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
vme_address_t aspace, vme_cycle_t cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl = 0;
@@ -1896,12 +1900,12 @@ int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
bridge = tsi148_bridge->driver_priv;
- mutex_lock(&(lm->mtx));
+ mutex_lock(&lm->mtx);
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
if (bridge->lm_callback[i] != NULL) {
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Location monitor "
"callback attached, can't reset\n");
return -EBUSY;
@@ -1922,7 +1926,7 @@ int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
break;
default:
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Invalid address space\n");
return -EINVAL;
break;
@@ -1943,7 +1947,7 @@ int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
return 0;
}
@@ -1951,15 +1955,15 @@ int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
/* Get configuration of the callback monitor and return whether it is enabled
* or disabled.
*/
-int tsi148_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
- vme_address_t *aspace, vme_cycle_t *cycle)
+static int tsi148_lm_get(struct vme_lm_resource *lm,
+ unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
struct tsi148_driver *bridge;
bridge = lm->parent->driver_priv;
- mutex_lock(&(lm->mtx));
+ mutex_lock(&lm->mtx);
lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
@@ -1992,7 +1996,7 @@ int tsi148_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
if (lm_ctl & TSI148_LCSR_LMAT_DATA)
*cycle |= VME_DATA;
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
return enabled;
}
@@ -2002,7 +2006,7 @@ int tsi148_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
*
* Callback will be passed the monitor triggered.
*/
-int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
+static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
void (*callback)(int))
{
u32 lm_ctl, tmp;
@@ -2013,12 +2017,12 @@ int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
bridge = tsi148_bridge->driver_priv;
- mutex_lock(&(lm->mtx));
+ mutex_lock(&lm->mtx);
/* Ensure that the location monitor is configured - need PGM or DATA */
lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Location monitor not properly "
"configured\n");
return -EINVAL;
@@ -2026,7 +2030,7 @@ int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
/* Check that a callback isn't already attached */
if (bridge->lm_callback[monitor] != NULL) {
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Existing callback attached\n");
return -EBUSY;
}
@@ -2049,7 +2053,7 @@ int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
}
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
return 0;
}
@@ -2057,14 +2061,14 @@ int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
/*
* Detach a callback function forn a specific location monitor.
*/
-int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
+static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
{
u32 lm_en, tmp;
struct tsi148_driver *bridge;
bridge = lm->parent->driver_priv;
- mutex_lock(&(lm->mtx));
+ mutex_lock(&lm->mtx);
/* Disable Location Monitor and ensure previous interrupts are clear */
lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
@@ -2089,7 +2093,7 @@ int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
}
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
return 0;
}
@@ -2097,7 +2101,7 @@ int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
/*
* Determine Geographical Addressing
*/
-int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
+static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
{
u32 slot = 0;
struct tsi148_driver *bridge;
@@ -2142,7 +2146,7 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
- &(bridge->crcsr_bus));
+ &bridge->crcsr_bus);
if (bridge->crcsr_kernel == NULL) {
dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
"CR/CSR image\n");
@@ -2280,13 +2284,13 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Initialize wait queues & mutual exclusion flags */
- init_waitqueue_head(&(tsi148_device->dma_queue[0]));
- init_waitqueue_head(&(tsi148_device->dma_queue[1]));
- init_waitqueue_head(&(tsi148_device->iack_queue));
- mutex_init(&(tsi148_device->vme_int));
- mutex_init(&(tsi148_device->vme_rmw));
+ init_waitqueue_head(&tsi148_device->dma_queue[0]);
+ init_waitqueue_head(&tsi148_device->dma_queue[1]);
+ init_waitqueue_head(&tsi148_device->iack_queue);
+ mutex_init(&tsi148_device->vme_int);
+ mutex_init(&tsi148_device->vme_rmw);
- tsi148_bridge->parent = &(pdev->dev);
+ tsi148_bridge->parent = &pdev->dev;
strcpy(tsi148_bridge->name, driver_name);
/* Setup IRQ */
@@ -2314,7 +2318,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_master;
}
tsi148_device->flush_image->parent = tsi148_bridge;
- spin_lock_init(&(tsi148_device->flush_image->lock));
+ spin_lock_init(&tsi148_device->flush_image->lock);
tsi148_device->flush_image->locked = 1;
tsi148_device->flush_image->number = master_num;
tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
@@ -2324,13 +2328,13 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
VME_USER | VME_PROG | VME_DATA;
tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
- memset(&(tsi148_device->flush_image->bus_resource), 0,
+ memset(&tsi148_device->flush_image->bus_resource, 0,
sizeof(struct resource));
tsi148_device->flush_image->kern_base = NULL;
}
/* Add master windows to list */
- INIT_LIST_HEAD(&(tsi148_bridge->master_resources));
+ INIT_LIST_HEAD(&tsi148_bridge->master_resources);
for (i = 0; i < master_num; i++) {
master_image = kmalloc(sizeof(struct vme_master_resource),
GFP_KERNEL);
@@ -2341,7 +2345,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_master;
}
master_image->parent = tsi148_bridge;
- spin_lock_init(&(master_image->lock));
+ spin_lock_init(&master_image->lock);
master_image->locked = 0;
master_image->number = i;
master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
@@ -2351,15 +2355,15 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
VME_PROG | VME_DATA;
master_image->width_attr = VME_D16 | VME_D32;
- memset(&(master_image->bus_resource), 0,
+ memset(&master_image->bus_resource, 0,
sizeof(struct resource));
master_image->kern_base = NULL;
- list_add_tail(&(master_image->list),
- &(tsi148_bridge->master_resources));
+ list_add_tail(&master_image->list,
+ &tsi148_bridge->master_resources);
}
/* Add slave windows to list */
- INIT_LIST_HEAD(&(tsi148_bridge->slave_resources));
+ INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
for (i = 0; i < TSI148_MAX_SLAVE; i++) {
slave_image = kmalloc(sizeof(struct vme_slave_resource),
GFP_KERNEL);
@@ -2370,7 +2374,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_slave;
}
slave_image->parent = tsi148_bridge;
- mutex_init(&(slave_image->mtx));
+ mutex_init(&slave_image->mtx);
slave_image->locked = 0;
slave_image->number = i;
slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
@@ -2380,12 +2384,12 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
VME_PROG | VME_DATA;
- list_add_tail(&(slave_image->list),
- &(tsi148_bridge->slave_resources));
+ list_add_tail(&slave_image->list,
+ &tsi148_bridge->slave_resources);
}
/* Add dma engines to list */
- INIT_LIST_HEAD(&(tsi148_bridge->dma_resources));
+ INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
for (i = 0; i < TSI148_MAX_DMA; i++) {
dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
GFP_KERNEL);
@@ -2396,21 +2400,21 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_dma;
}
dma_ctrlr->parent = tsi148_bridge;
- mutex_init(&(dma_ctrlr->mtx));
+ mutex_init(&dma_ctrlr->mtx);
dma_ctrlr->locked = 0;
dma_ctrlr->number = i;
dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
VME_DMA_PATTERN_TO_MEM;
- INIT_LIST_HEAD(&(dma_ctrlr->pending));
- INIT_LIST_HEAD(&(dma_ctrlr->running));
- list_add_tail(&(dma_ctrlr->list),
- &(tsi148_bridge->dma_resources));
+ INIT_LIST_HEAD(&dma_ctrlr->pending);
+ INIT_LIST_HEAD(&dma_ctrlr->running);
+ list_add_tail(&dma_ctrlr->list,
+ &tsi148_bridge->dma_resources);
}
/* Add location monitor to list */
- INIT_LIST_HEAD(&(tsi148_bridge->lm_resources));
+ INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
if (lm == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
@@ -2419,11 +2423,11 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_lm;
}
lm->parent = tsi148_bridge;
- mutex_init(&(lm->mtx));
+ mutex_init(&lm->mtx);
lm->locked = 0;
lm->number = 1;
lm->monitors = 4;
- list_add_tail(&(lm->list), &(tsi148_bridge->lm_resources));
+ list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
tsi148_bridge->slave_get = tsi148_slave_get;
tsi148_bridge->slave_set = tsi148_slave_set;
@@ -2477,41 +2481,40 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
- vme_unregister_bridge(tsi148_bridge);
err_reg:
tsi148_crcsr_exit(tsi148_bridge, pdev);
err_crcsr:
err_lm:
/* resources are stored in link list */
- list_for_each(pos, &(tsi148_bridge->lm_resources)) {
+ list_for_each(pos, &tsi148_bridge->lm_resources) {
lm = list_entry(pos, struct vme_lm_resource, list);
list_del(pos);
kfree(lm);
}
err_dma:
/* resources are stored in link list */
- list_for_each(pos, &(tsi148_bridge->dma_resources)) {
+ list_for_each(pos, &tsi148_bridge->dma_resources) {
dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
list_del(pos);
kfree(dma_ctrlr);
}
err_slave:
/* resources are stored in link list */
- list_for_each(pos, &(tsi148_bridge->slave_resources)) {
+ list_for_each(pos, &tsi148_bridge->slave_resources) {
slave_image = list_entry(pos, struct vme_slave_resource, list);
list_del(pos);
kfree(slave_image);
}
err_master:
/* resources are stored in link list */
- list_for_each(pos, &(tsi148_bridge->master_resources)) {
+ list_for_each(pos, &tsi148_bridge->master_resources) {
master_image = list_entry(pos, struct vme_master_resource,
list);
list_del(pos);
kfree(master_image);
}
- tsi148_irq_exit(tsi148_device, pdev);
+ tsi148_irq_exit(tsi148_bridge, pdev);
err_irq:
err_test:
iounmap(tsi148_device->base);
@@ -2531,6 +2534,7 @@ err_struct:
static void tsi148_remove(struct pci_dev *pdev)
{
struct list_head *pos = NULL;
+ struct list_head *tmplist;
struct vme_master_resource *master_image;
struct vme_slave_resource *slave_image;
struct vme_dma_resource *dma_ctrlr;
@@ -2582,36 +2586,34 @@ static void tsi148_remove(struct pci_dev *pdev)
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
- tsi148_irq_exit(bridge, pdev);
+ tsi148_irq_exit(tsi148_bridge, pdev);
vme_unregister_bridge(tsi148_bridge);
tsi148_crcsr_exit(tsi148_bridge, pdev);
/* resources are stored in link list */
- list_for_each(pos, &(tsi148_bridge->dma_resources)) {
+ list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
list_del(pos);
kfree(dma_ctrlr);
}
/* resources are stored in link list */
- list_for_each(pos, &(tsi148_bridge->slave_resources)) {
+ list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
slave_image = list_entry(pos, struct vme_slave_resource, list);
list_del(pos);
kfree(slave_image);
}
/* resources are stored in link list */
- list_for_each(pos, &(tsi148_bridge->master_resources)) {
+ list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
master_image = list_entry(pos, struct vme_master_resource,
list);
list_del(pos);
kfree(master_image);
}
- tsi148_irq_exit(bridge, pdev);
-
iounmap(bridge->base);
pci_release_regions(pdev);
diff --git a/drivers/staging/vme/bridges/vme_tsi148.h b/drivers/staging/vme/bridges/vme_tsi148.h
index bda64ef85754..9f97fa8084e8 100644
--- a/drivers/staging/vme/bridges/vme_tsi148.h
+++ b/drivers/staging/vme/bridges/vme_tsi148.h
@@ -35,7 +35,7 @@
/* Structure used to hold driver specific information */
struct tsi148_driver {
- void *base; /* Base Address of device registers */
+ void __iomem *base; /* Base Address of device registers */
wait_queue_head_t dma_queue[2];
wait_queue_head_t iack_queue;
void (*lm_callback[4])(int); /* Called in interrupt handler */
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index 71bbc526626c..a571173249cf 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -44,7 +44,7 @@ static DEFINE_MUTEX(vme_user_mutex);
static char driver_name[] = "vme_user";
static int bus[USER_BUS_MAX];
-static int bus_num;
+static unsigned int bus_num;
/* Currently Documentation/devices.txt defines the following for VME:
*
@@ -92,7 +92,7 @@ static int bus_num;
* Structure to handle image related parameters.
*/
typedef struct {
- void __iomem *kern_buf; /* Buffer address in kernel space */
+ void *kern_buf; /* Buffer address in kernel space */
dma_addr_t pci_buf; /* Buffer address in PCI address space */
unsigned long long size_buf; /* Buffer size */
struct semaphore sem; /* Semaphore for locking image */
@@ -114,9 +114,9 @@ typedef struct {
} driver_stats_t;
static driver_stats_t statistics;
-struct cdev *vme_user_cdev; /* Character device */
-struct class *vme_user_sysfs_class; /* Sysfs class */
-struct device *vme_user_bridge; /* Pointer to the bridge device */
+static struct cdev *vme_user_cdev; /* Character device */
+static struct class *vme_user_sysfs_class; /* Sysfs class */
+static struct device *vme_user_bridge; /* Pointer to bridge device */
static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
@@ -129,13 +129,14 @@ static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
static int vme_user_open(struct inode *, struct file *);
static int vme_user_release(struct inode *, struct file *);
-static ssize_t vme_user_read(struct file *, char *, size_t, loff_t *);
-static ssize_t vme_user_write(struct file *, const char *, size_t, loff_t *);
+static ssize_t vme_user_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t vme_user_write(struct file *, const char __user *, size_t,
+ loff_t *);
static loff_t vme_user_llseek(struct file *, loff_t, int);
static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long);
-static int __init vme_user_probe(struct device *, int, int);
-static int __exit vme_user_remove(struct device *, int, int);
+static int __devinit vme_user_probe(struct device *, int, int);
+static int __devexit vme_user_remove(struct device *, int, int);
static struct file_operations vme_user_fops = {
.open = vme_user_open,
@@ -246,7 +247,7 @@ static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
* page) transfers will lock the user space buffer into memory and then
* transfer the data directly from the user space buffers out to VME.
*/
-static ssize_t resource_from_user(unsigned int minor, const char *buf,
+static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
size_t count, loff_t *ppos)
{
ssize_t retval;
@@ -277,7 +278,7 @@ static ssize_t resource_from_user(unsigned int minor, const char *buf,
static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
size_t count, loff_t *ppos)
{
- void __iomem *image_ptr;
+ void *image_ptr;
ssize_t retval;
image_ptr = image[minor].kern_buf + *ppos;
@@ -293,10 +294,10 @@ static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
return retval;
}
-static ssize_t buffer_from_user(unsigned int minor, const char *buf,
+static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
size_t count, loff_t *ppos)
{
- void __iomem *image_ptr;
+ void *image_ptr;
size_t retval;
image_ptr = image[minor].kern_buf + *ppos;
@@ -312,7 +313,7 @@ static ssize_t buffer_from_user(unsigned int minor, const char *buf,
return retval;
}
-static ssize_t vme_user_read(struct file *file, char *buf, size_t count,
+static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
@@ -356,8 +357,8 @@ static ssize_t vme_user_read(struct file *file, char *buf, size_t count,
return retval;
}
-static ssize_t vme_user_write(struct file *file, const char *buf, size_t count,
- loff_t *ppos)
+static ssize_t vme_user_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
ssize_t retval;
@@ -455,6 +456,7 @@ static int vme_user_ioctl(struct inode *inode, struct file *file,
unsigned int minor = MINOR(inode->i_rdev);
int retval;
dma_addr_t pci_addr;
+ void __user *argp = (void __user *)arg;
statistics.ioctls++;
@@ -470,11 +472,11 @@ static int vme_user_ioctl(struct inode *inode, struct file *file,
* to userspace as they are
*/
retval = vme_master_get(image[minor].resource,
- &(master.enable), &(master.vme_addr),
- &(master.size), &(master.aspace),
- &(master.cycle), &(master.dwidth));
+ &master.enable, &master.vme_addr,
+ &master.size, &master.aspace,
+ &master.cycle, &master.dwidth);
- copied = copy_to_user((char *)arg, &master,
+ copied = copy_to_user(argp, &master,
sizeof(struct vme_master));
if (copied != 0) {
printk(KERN_WARNING "Partial copy to "
@@ -487,8 +489,7 @@ static int vme_user_ioctl(struct inode *inode, struct file *file,
case VME_SET_MASTER:
- copied = copy_from_user(&master, (char *)arg,
- sizeof(master));
+ copied = copy_from_user(&master, argp, sizeof(master));
if (copied != 0) {
printk(KERN_WARNING "Partial copy from "
"userspace\n");
@@ -514,11 +515,11 @@ static int vme_user_ioctl(struct inode *inode, struct file *file,
* to userspace as they are
*/
retval = vme_slave_get(image[minor].resource,
- &(slave.enable), &(slave.vme_addr),
- &(slave.size), &pci_addr, &(slave.aspace),
- &(slave.cycle));
+ &slave.enable, &slave.vme_addr,
+ &slave.size, &pci_addr, &slave.aspace,
+ &slave.cycle);
- copied = copy_to_user((char *)arg, &slave,
+ copied = copy_to_user(argp, &slave,
sizeof(struct vme_slave));
if (copied != 0) {
printk(KERN_WARNING "Partial copy to "
@@ -531,8 +532,7 @@ static int vme_user_ioctl(struct inode *inode, struct file *file,
case VME_SET_SLAVE:
- copied = copy_from_user(&slave, (char *)arg,
- sizeof(slave));
+ copied = copy_from_user(&slave, argp, sizeof(slave));
if (copied != 0) {
printk(KERN_WARNING "Partial copy from "
"userspace\n");
@@ -596,7 +596,7 @@ static void buf_unalloc(int num)
static struct vme_driver vme_user_driver = {
.name = driver_name,
.probe = vme_user_probe,
- .remove = vme_user_remove,
+ .remove = __devexit_p(vme_user_remove),
};
@@ -611,6 +611,7 @@ static int __init vme_user_init(void)
if (bus_num == 0) {
printk(KERN_ERR "%s: No cards, skipping registration\n",
driver_name);
+ retval = -ENODEV;
goto err_nocard;
}
@@ -629,6 +630,7 @@ static int __init vme_user_init(void)
if (ids == NULL) {
printk(KERN_ERR "%s: Unable to allocate ID table\n",
driver_name);
+ retval = -ENOMEM;
goto err_id;
}
@@ -652,7 +654,6 @@ static int __init vme_user_init(void)
return retval;
- vme_unregister_driver(&vme_user_driver);
err_reg:
kfree(ids);
err_id:
@@ -665,7 +666,8 @@ err_nocard:
* as practical. We will therefore reserve the buffers and request the images
* here so that we don't have to do it later.
*/
-static int __init vme_user_probe(struct device *dev, int cur_bus, int cur_slot)
+static int __devinit vme_user_probe(struct device *dev, int cur_bus,
+ int cur_slot)
{
int i, err;
char name[12];
@@ -683,7 +685,7 @@ static int __init vme_user_probe(struct device *dev, int cur_bus, int cur_slot)
for (i = 0; i < VME_DEVS; i++) {
image[i].kern_buf = NULL;
image[i].pci_buf = 0;
- sema_init(&(image[i].sem), 1);
+ sema_init(&image[i].sem, 1);
image[i].device = NULL;
image[i].resource = NULL;
image[i].users = 0;
@@ -727,7 +729,7 @@ static int __init vme_user_probe(struct device *dev, int cur_bus, int cur_slot)
}
image[i].size_buf = PCI_BUF_SIZE;
image[i].kern_buf = vme_alloc_consistent(image[i].resource,
- image[i].size_buf, &(image[i].pci_buf));
+ image[i].size_buf, &image[i].pci_buf);
if (image[i].kern_buf == NULL) {
printk(KERN_WARNING "Unable to allocate memory for "
"buffer\n");
@@ -828,8 +830,8 @@ err_master:
err_slave:
while (i > SLAVE_MINOR) {
i--;
- vme_slave_free(image[i].resource);
buf_unalloc(i);
+ vme_slave_free(image[i].resource);
}
err_class:
cdev_del(vme_user_cdev);
@@ -840,7 +842,8 @@ err_dev:
return err;
}
-static int __exit vme_user_remove(struct device *dev, int cur_bus, int cur_slot)
+static int __devexit vme_user_remove(struct device *dev, int cur_bus,
+ int cur_slot)
{
int i;
@@ -849,13 +852,15 @@ static int __exit vme_user_remove(struct device *dev, int cur_bus, int cur_slot)
device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
class_destroy(vme_user_sysfs_class);
- for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++)
+ for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
kfree(image[i].kern_buf);
+ vme_master_free(image[i].resource);
+ }
for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
- vme_slave_free(image[i].resource);
buf_unalloc(i);
+ vme_slave_free(image[i].resource);
}
/* Unregister device driver */
diff --git a/drivers/staging/vme/vme.c b/drivers/staging/vme/vme.c
index 093fbffbf557..d9fc8644376e 100644
--- a/drivers/staging/vme/vme.c
+++ b/drivers/staging/vme/vme.c
@@ -245,7 +245,7 @@ struct vme_resource *vme_slave_request(struct device *dev,
}
/* Loop through slave resources */
- list_for_each(slave_pos, &(bridge->slave_resources)) {
+ list_for_each(slave_pos, &bridge->slave_resources) {
slave_image = list_entry(slave_pos,
struct vme_slave_resource, list);
@@ -255,17 +255,17 @@ struct vme_resource *vme_slave_request(struct device *dev,
}
/* Find an unlocked and compatible image */
- mutex_lock(&(slave_image->mtx));
+ mutex_lock(&slave_image->mtx);
if (((slave_image->address_attr & address) == address) &&
((slave_image->cycle_attr & cycle) == cycle) &&
(slave_image->locked == 0)) {
slave_image->locked = 1;
- mutex_unlock(&(slave_image->mtx));
+ mutex_unlock(&slave_image->mtx);
allocated_image = slave_image;
break;
}
- mutex_unlock(&(slave_image->mtx));
+ mutex_unlock(&slave_image->mtx);
}
/* No free image */
@@ -278,15 +278,15 @@ struct vme_resource *vme_slave_request(struct device *dev,
goto err_alloc;
}
resource->type = VME_SLAVE;
- resource->entry = &(allocated_image->list);
+ resource->entry = &allocated_image->list;
return resource;
err_alloc:
/* Unlock image */
- mutex_lock(&(slave_image->mtx));
+ mutex_lock(&slave_image->mtx);
slave_image->locked = 0;
- mutex_unlock(&(slave_image->mtx));
+ mutex_unlock(&slave_image->mtx);
err_image:
err_bus:
return NULL;
@@ -369,12 +369,12 @@ void vme_slave_free(struct vme_resource *resource)
}
/* Unlock image */
- mutex_lock(&(slave_image->mtx));
+ mutex_lock(&slave_image->mtx);
if (slave_image->locked == 0)
printk(KERN_ERR "Image is already free\n");
slave_image->locked = 0;
- mutex_unlock(&(slave_image->mtx));
+ mutex_unlock(&slave_image->mtx);
/* Free up resource memory */
kfree(resource);
@@ -401,7 +401,7 @@ struct vme_resource *vme_master_request(struct device *dev,
}
/* Loop through master resources */
- list_for_each(master_pos, &(bridge->master_resources)) {
+ list_for_each(master_pos, &bridge->master_resources) {
master_image = list_entry(master_pos,
struct vme_master_resource, list);
@@ -411,18 +411,18 @@ struct vme_resource *vme_master_request(struct device *dev,
}
/* Find an unlocked and compatible image */
- spin_lock(&(master_image->lock));
+ spin_lock(&master_image->lock);
if (((master_image->address_attr & address) == address) &&
((master_image->cycle_attr & cycle) == cycle) &&
((master_image->width_attr & dwidth) == dwidth) &&
(master_image->locked == 0)) {
master_image->locked = 1;
- spin_unlock(&(master_image->lock));
+ spin_unlock(&master_image->lock);
allocated_image = master_image;
break;
}
- spin_unlock(&(master_image->lock));
+ spin_unlock(&master_image->lock);
}
/* Check to see if we found a resource */
@@ -437,16 +437,16 @@ struct vme_resource *vme_master_request(struct device *dev,
goto err_alloc;
}
resource->type = VME_MASTER;
- resource->entry = &(allocated_image->list);
+ resource->entry = &allocated_image->list;
return resource;
kfree(resource);
err_alloc:
/* Unlock image */
- spin_lock(&(master_image->lock));
+ spin_lock(&master_image->lock);
master_image->locked = 0;
- spin_unlock(&(master_image->lock));
+ spin_unlock(&master_image->lock);
err_image:
err_bus:
return NULL;
@@ -628,12 +628,12 @@ void vme_master_free(struct vme_resource *resource)
}
/* Unlock image */
- spin_lock(&(master_image->lock));
+ spin_lock(&master_image->lock);
if (master_image->locked == 0)
printk(KERN_ERR "Image is already free\n");
master_image->locked = 0;
- spin_unlock(&(master_image->lock));
+ spin_unlock(&master_image->lock);
/* Free up resource memory */
kfree(resource);
@@ -662,7 +662,7 @@ struct vme_resource *vme_dma_request(struct device *dev, vme_dma_route_t route)
}
/* Loop through DMA resources */
- list_for_each(dma_pos, &(bridge->dma_resources)) {
+ list_for_each(dma_pos, &bridge->dma_resources) {
dma_ctrlr = list_entry(dma_pos,
struct vme_dma_resource, list);
@@ -672,16 +672,16 @@ struct vme_resource *vme_dma_request(struct device *dev, vme_dma_route_t route)
}
/* Find an unlocked and compatible controller */
- mutex_lock(&(dma_ctrlr->mtx));
+ mutex_lock(&dma_ctrlr->mtx);
if (((dma_ctrlr->route_attr & route) == route) &&
(dma_ctrlr->locked == 0)) {
dma_ctrlr->locked = 1;
- mutex_unlock(&(dma_ctrlr->mtx));
+ mutex_unlock(&dma_ctrlr->mtx);
allocated_ctrlr = dma_ctrlr;
break;
}
- mutex_unlock(&(dma_ctrlr->mtx));
+ mutex_unlock(&dma_ctrlr->mtx);
}
/* Check to see if we found a resource */
@@ -694,15 +694,15 @@ struct vme_resource *vme_dma_request(struct device *dev, vme_dma_route_t route)
goto err_alloc;
}
resource->type = VME_DMA;
- resource->entry = &(allocated_ctrlr->list);
+ resource->entry = &allocated_ctrlr->list;
return resource;
err_alloc:
/* Unlock image */
- mutex_lock(&(dma_ctrlr->mtx));
+ mutex_lock(&dma_ctrlr->mtx);
dma_ctrlr->locked = 0;
- mutex_unlock(&(dma_ctrlr->mtx));
+ mutex_unlock(&dma_ctrlr->mtx);
err_ctrlr:
err_bus:
return NULL;
@@ -729,9 +729,9 @@ struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
printk(KERN_ERR "Unable to allocate memory for new dma list\n");
return NULL;
}
- INIT_LIST_HEAD(&(dma_list->entries));
+ INIT_LIST_HEAD(&dma_list->entries);
dma_list->parent = ctrlr;
- mutex_init(&(dma_list->mtx));
+ mutex_init(&dma_list->mtx);
return dma_list;
}
@@ -880,14 +880,14 @@ int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
return -EINVAL;
}
- if (!mutex_trylock(&(list->mtx))) {
+ if (!mutex_trylock(&list->mtx)) {
printk(KERN_ERR "Link List already submitted\n");
return -EINVAL;
}
retval = bridge->dma_list_add(list, src, dest, count);
- mutex_unlock(&(list->mtx));
+ mutex_unlock(&list->mtx);
return retval;
}
@@ -903,11 +903,11 @@ int vme_dma_list_exec(struct vme_dma_list *list)
return -EINVAL;
}
- mutex_lock(&(list->mtx));
+ mutex_lock(&list->mtx);
retval = bridge->dma_list_exec(list);
- mutex_unlock(&(list->mtx));
+ mutex_unlock(&list->mtx);
return retval;
}
@@ -923,7 +923,7 @@ int vme_dma_list_free(struct vme_dma_list *list)
return -EINVAL;
}
- if (!mutex_trylock(&(list->mtx))) {
+ if (!mutex_trylock(&list->mtx)) {
printk(KERN_ERR "Link List in use\n");
return -EINVAL;
}
@@ -935,10 +935,10 @@ int vme_dma_list_free(struct vme_dma_list *list)
retval = bridge->dma_list_empty(list);
if (retval) {
printk(KERN_ERR "Unable to empty link-list entries\n");
- mutex_unlock(&(list->mtx));
+ mutex_unlock(&list->mtx);
return retval;
}
- mutex_unlock(&(list->mtx));
+ mutex_unlock(&list->mtx);
kfree(list);
return retval;
@@ -956,20 +956,20 @@ int vme_dma_free(struct vme_resource *resource)
ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
- if (!mutex_trylock(&(ctrlr->mtx))) {
+ if (!mutex_trylock(&ctrlr->mtx)) {
printk(KERN_ERR "Resource busy, can't free\n");
return -EBUSY;
}
- if (!(list_empty(&(ctrlr->pending)) && list_empty(&(ctrlr->running)))) {
+ if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
printk(KERN_WARNING "Resource still processing transfers\n");
- mutex_unlock(&(ctrlr->mtx));
+ mutex_unlock(&ctrlr->mtx);
return -EBUSY;
}
ctrlr->locked = 0;
- mutex_unlock(&(ctrlr->mtx));
+ mutex_unlock(&ctrlr->mtx);
return 0;
}
@@ -1013,10 +1013,10 @@ int vme_irq_request(struct device *dev, int level, int statid,
return -EINVAL;
}
- mutex_lock(&(bridge->irq_mtx));
+ mutex_lock(&bridge->irq_mtx);
if (bridge->irq[level - 1].callback[statid].func) {
- mutex_unlock(&(bridge->irq_mtx));
+ mutex_unlock(&bridge->irq_mtx);
printk(KERN_WARNING "VME Interrupt already taken\n");
return -EBUSY;
}
@@ -1028,7 +1028,7 @@ int vme_irq_request(struct device *dev, int level, int statid,
/* Enable IRQ level */
bridge->irq_set(bridge, level, 1, 1);
- mutex_unlock(&(bridge->irq_mtx));
+ mutex_unlock(&bridge->irq_mtx);
return 0;
}
@@ -1054,7 +1054,7 @@ void vme_irq_free(struct device *dev, int level, int statid)
return;
}
- mutex_lock(&(bridge->irq_mtx));
+ mutex_lock(&bridge->irq_mtx);
bridge->irq[level - 1].count--;
@@ -1065,7 +1065,7 @@ void vme_irq_free(struct device *dev, int level, int statid)
bridge->irq[level - 1].callback[statid].func = NULL;
bridge->irq[level - 1].callback[statid].priv_data = NULL;
- mutex_unlock(&(bridge->irq_mtx));
+ mutex_unlock(&bridge->irq_mtx);
}
EXPORT_SYMBOL(vme_irq_free);
@@ -1111,7 +1111,7 @@ struct vme_resource *vme_lm_request(struct device *dev)
}
/* Loop through DMA resources */
- list_for_each(lm_pos, &(bridge->lm_resources)) {
+ list_for_each(lm_pos, &bridge->lm_resources) {
lm = list_entry(lm_pos,
struct vme_lm_resource, list);
@@ -1122,14 +1122,14 @@ struct vme_resource *vme_lm_request(struct device *dev)
}
/* Find an unlocked controller */
- mutex_lock(&(lm->mtx));
+ mutex_lock(&lm->mtx);
if (lm->locked == 0) {
lm->locked = 1;
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
allocated_lm = lm;
break;
}
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
}
/* Check to see if we found a resource */
@@ -1142,15 +1142,15 @@ struct vme_resource *vme_lm_request(struct device *dev)
goto err_alloc;
}
resource->type = VME_LM;
- resource->entry = &(allocated_lm->list);
+ resource->entry = &allocated_lm->list;
return resource;
err_alloc:
/* Unlock image */
- mutex_lock(&(lm->mtx));
+ mutex_lock(&lm->mtx);
lm->locked = 0;
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
err_lm:
err_bus:
return NULL;
@@ -1270,7 +1270,7 @@ void vme_lm_free(struct vme_resource *resource)
lm = list_entry(resource->entry, struct vme_lm_resource, list);
- mutex_lock(&(lm->mtx));
+ mutex_lock(&lm->mtx);
/* XXX
* Check to see that there aren't any callbacks still attached, if
@@ -1279,7 +1279,7 @@ void vme_lm_free(struct vme_resource *resource)
lm->locked = 0;
- mutex_unlock(&(lm->mtx));
+ mutex_unlock(&lm->mtx);
kfree(resource);
}
@@ -1326,7 +1326,7 @@ static int vme_alloc_bus_num(void)
static void vme_free_bus_num(int bus)
{
mutex_lock(&vme_bus_num_mtx);
- vme_bus_numbers |= ~(0x1 << bus);
+ vme_bus_numbers &= ~(0x1 << bus);
mutex_unlock(&vme_bus_num_mtx);
}
@@ -1343,11 +1343,11 @@ int vme_register_bridge(struct vme_bridge *bridge)
* specification.
*/
for (i = 0; i < VME_SLOTS_MAX; i++) {
- dev = &(bridge->dev[i]);
+ dev = &bridge->dev[i];
memset(dev, 0, sizeof(struct device));
dev->parent = bridge->parent;
- dev->bus = &(vme_bus_type);
+ dev->bus = &vme_bus_type;
/*
* We save a pointer to the bridge in platform_data so that we
* can get to it later. We keep driver_data for use by the
@@ -1366,7 +1366,7 @@ int vme_register_bridge(struct vme_bridge *bridge)
i = VME_SLOTS_MAX;
err_reg:
while (i > -1) {
- dev = &(bridge->dev[i]);
+ dev = &bridge->dev[i];
device_unregister(dev);
}
vme_free_bus_num(bridge->num);
@@ -1381,7 +1381,7 @@ void vme_unregister_bridge(struct vme_bridge *bridge)
for (i = 0; i < VME_SLOTS_MAX; i++) {
- dev = &(bridge->dev[i]);
+ dev = &bridge->dev[i];
device_unregister(dev);
}
vme_free_bus_num(bridge->num);
@@ -1418,7 +1418,7 @@ static int vme_calc_slot(struct device *dev)
/* Determine slot number */
num = 0;
while (num < VME_SLOTS_MAX) {
- if (&(bridge->dev[num]) == dev)
+ if (&bridge->dev[num] == dev)
break;
num++;
diff --git a/drivers/staging/vme/vme_bridge.h b/drivers/staging/vme/vme_bridge.h
index b653ec02e1fc..4c6ec31b01db 100644
--- a/drivers/staging/vme/vme_bridge.h
+++ b/drivers/staging/vme/vme_bridge.h
@@ -20,7 +20,7 @@ struct vme_master_resource {
vme_cycle_t cycle_attr;
vme_width_t width_attr;
struct resource bus_resource;
- void *kern_base;
+ void __iomem *kern_base;
};
struct vme_slave_resource {
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 32d095c4d51c..951a3a8ddcb2 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -2058,7 +2058,7 @@ bool CARDbSoftwareReset (void *pDeviceHandler)
QWORD CARDqGetTSFOffset (unsigned char byRxRate, QWORD qwTSF1, QWORD qwTSF2)
{
QWORD qwTSFOffset;
- unsigned short wRxBcnTSFOffst= 0;;
+ unsigned short wRxBcnTSFOffst= 0;
HIDWORD(qwTSFOffset) = 0;
LODWORD(qwTSFOffset) = 0;
diff --git a/drivers/staging/vt6655/iwctl.c b/drivers/staging/vt6655/iwctl.c
index 92e33999054b..5e425d1476b8 100644
--- a/drivers/staging/vt6655/iwctl.c
+++ b/drivers/staging/vt6655/iwctl.c
@@ -2073,7 +2073,7 @@ int iwctl_giwencodeext(struct net_device *dev,
struct iw_point *wrq,
char *extra)
{
- return -EOPNOTSUPP;;
+ return -EOPNOTSUPP;
}
int iwctl_siwmlme(struct net_device *dev,
diff --git a/drivers/staging/vt6655/wpa2.c b/drivers/staging/vt6655/wpa2.c
index 805164bed7e4..744799cfe832 100644
--- a/drivers/staging/vt6655/wpa2.c
+++ b/drivers/staging/vt6655/wpa2.c
@@ -216,7 +216,7 @@ WPA2vParseRSN (
m = *((unsigned short *) &(pRSN->abyRSN[4]));
if (pRSN->len >= 10+m*4) { // ver(2) + GK(4) + PK count(2) + PKS(4*m) + AKMSS count(2)
- pBSSNode->wAKMSSAuthCount = *((unsigned short *) &(pRSN->abyRSN[6+4*m]));;
+ pBSSNode->wAKMSSAuthCount = *((unsigned short *) &(pRSN->abyRSN[6+4*m]));
j = 0;
pbyOUI = &(pRSN->abyRSN[8+4*m]);
for (i = 0; (i < pBSSNode->wAKMSSAuthCount) && (j < sizeof(pBSSNode->abyAKMSSAuthType)/sizeof(unsigned char)); i++) {
@@ -235,7 +235,7 @@ WPA2vParseRSN (
pBSSNode->wAKMSSAuthCount = (unsigned short)j;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wAKMSSAuthCount: %d\n", pBSSNode->wAKMSSAuthCount);
- n = *((unsigned short *) &(pRSN->abyRSN[6+4*m]));;
+ n = *((unsigned short *) &(pRSN->abyRSN[6+4*m]));
if (pRSN->len >= 12+4*m+4*n) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*m)+AKMSSCnt(2)+AKMSS(4*n)+Cap(2)
pBSSNode->sRSNCapObj.bRSNCapExist = true;
pBSSNode->sRSNCapObj.wRSNCap = *((unsigned short *) &(pRSN->abyRSN[8+4*m+4*n]));
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index e5add2046375..0d11147f91c1 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -963,7 +963,7 @@ BBvSetAntennaMode (PSDevice pDevice, BYTE byAntennaMode)
break;
case ANT_RXB:
pDevice->byBBRxConf &= 0xFE;
- pDevice->byBBRxConf |= 0x02;;
+ pDevice->byBBRxConf |= 0x02;
break;
}
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 8de21aac1bff..a49053bd7c65 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -1092,7 +1092,7 @@ CARDbChannelSwitch (
pDevice->sMgmtObj.uCurrChannel = byNewChannel;
bResult = CARDbSetMediaChannel(pDevice, byNewChannel);
- return(bResult);
+ return bResult;
}
pDevice->byChannelSwitchCount = byCount;
pDevice->byNewChannel = byNewChannel;
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index 1f9d29636803..f4fb0c6e4eac 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -1608,8 +1608,8 @@ void RXvMngWorkItem(void *Context)
}
}
- pDevice->bIsRxMngWorkItemQueued = FALSE;
- spin_unlock_irq(&pDevice->lock);
+ pDevice->bIsRxMngWorkItemQueued = FALSE;
+ spin_unlock_irq(&pDevice->lock);
}
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index 0004be8e3957..2121205a912b 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -1883,7 +1883,7 @@ int iwctl_giwencodeext(struct net_device *dev,
struct iw_point *wrq,
char *extra)
{
- return -EOPNOTSUPP;;
+ return -EOPNOTSUPP;
}
int iwctl_siwmlme(struct net_device *dev,
diff --git a/drivers/staging/vt6656/power.c b/drivers/staging/vt6656/power.c
index 0c12fd36d0f9..e8c1b35e8128 100644
--- a/drivers/staging/vt6656/power.c
+++ b/drivers/staging/vt6656/power.c
@@ -192,7 +192,7 @@ BOOL PSbConsiderPowerDown(void *hDeviceContext,
// check if already in Doze mode
ControlvReadByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PSCTL, &byData);
if ( (byData & PSCTL_PS) != 0 )
- return TRUE;;
+ return TRUE;
if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) {
// check if in TIM wake period
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index bbdc127a987d..8f18578a5903 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -68,8 +68,7 @@
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
-//static int msglevel =MSG_LEVEL_DEBUG;
-static int msglevel =MSG_LEVEL_INFO;
+static int msglevel = MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
diff --git a/drivers/staging/vt6656/tkip.c b/drivers/staging/vt6656/tkip.c
index a6bd533f9577..0715636cb9cb 100644
--- a/drivers/staging/vt6656/tkip.c
+++ b/drivers/staging/vt6656/tkip.c
@@ -214,13 +214,14 @@ void TKIPvMixKey(
/* Phase 1, step 2 */
for (i=0; i<8; i++) {
j = 2*(i & 1);
- p1k[0] = (p1k[0] + tkip_sbox( (p1k[4] ^ ((256*pbyTKey[1+j]) + pbyTKey[j])) % 65536 )) % 65536;
- p1k[1] = (p1k[1] + tkip_sbox( (p1k[0] ^ ((256*pbyTKey[5+j]) + pbyTKey[4+j])) % 65536 )) % 65536;
- p1k[2] = (p1k[2] + tkip_sbox( (p1k[1] ^ ((256*pbyTKey[9+j]) + pbyTKey[8+j])) % 65536 )) % 65536;
- p1k[3] = (p1k[3] + tkip_sbox( (p1k[2] ^ ((256*pbyTKey[13+j]) + pbyTKey[12+j])) % 65536 )) % 65536;
- p1k[4] = (p1k[4] + tkip_sbox( (p1k[3] ^ (((256*pbyTKey[1+j]) + pbyTKey[j]))) % 65536 )) % 65536;
+ p1k[0] = (p1k[0] + tkip_sbox((p1k[4] ^ ((256*pbyTKey[1+j]) + pbyTKey[j])) % 65536)) % 65536;
+ p1k[1] = (p1k[1] + tkip_sbox((p1k[0] ^ ((256*pbyTKey[5+j]) + pbyTKey[4+j])) % 65536)) % 65536;
+ p1k[2] = (p1k[2] + tkip_sbox((p1k[1] ^ ((256*pbyTKey[9+j]) + pbyTKey[8+j])) % 65536)) % 65536;
+ p1k[3] = (p1k[3] + tkip_sbox((p1k[2] ^ ((256*pbyTKey[13+j]) + pbyTKey[12+j])) % 65536)) % 65536;
+ p1k[4] = (p1k[4] + tkip_sbox((p1k[3] ^ (((256*pbyTKey[1+j]) + pbyTKey[j]))) % 65536)) % 65536;
p1k[4] = (p1k[4] + i) % 65536;
}
+
/* Phase 2, Step 1 */
ppk0 = p1k[0];
ppk1 = p1k[1];
@@ -230,19 +231,19 @@ void TKIPvMixKey(
ppk5 = (p1k[4] + tsc2) % 65536;
/* Phase2, Step 2 */
- ppk0 = ppk0 + tkip_sbox( (ppk5 ^ ((256*pbyTKey[1]) + pbyTKey[0])) % 65536);
- ppk1 = ppk1 + tkip_sbox( (ppk0 ^ ((256*pbyTKey[3]) + pbyTKey[2])) % 65536);
- ppk2 = ppk2 + tkip_sbox( (ppk1 ^ ((256*pbyTKey[5]) + pbyTKey[4])) % 65536);
- ppk3 = ppk3 + tkip_sbox( (ppk2 ^ ((256*pbyTKey[7]) + pbyTKey[6])) % 65536);
- ppk4 = ppk4 + tkip_sbox( (ppk3 ^ ((256*pbyTKey[9]) + pbyTKey[8])) % 65536);
- ppk5 = ppk5 + tkip_sbox( (ppk4 ^ ((256*pbyTKey[11]) + pbyTKey[10])) % 65536);
-
- ppk0 = ppk0 + rotr1(ppk5 ^ ((256*pbyTKey[13]) + pbyTKey[12]));
- ppk1 = ppk1 + rotr1(ppk0 ^ ((256*pbyTKey[15]) + pbyTKey[14]));
- ppk2 = ppk2 + rotr1(ppk1);
- ppk3 = ppk3 + rotr1(ppk2);
- ppk4 = ppk4 + rotr1(ppk3);
- ppk5 = ppk5 + rotr1(ppk4);
+ ppk0 = ppk0 + tkip_sbox((ppk5 ^ ((256*pbyTKey[1]) + pbyTKey[0])) % 65536);
+ ppk1 = ppk1 + tkip_sbox((ppk0 ^ ((256*pbyTKey[3]) + pbyTKey[2])) % 65536);
+ ppk2 = ppk2 + tkip_sbox((ppk1 ^ ((256*pbyTKey[5]) + pbyTKey[4])) % 65536);
+ ppk3 = ppk3 + tkip_sbox((ppk2 ^ ((256*pbyTKey[7]) + pbyTKey[6])) % 65536);
+ ppk4 = ppk4 + tkip_sbox((ppk3 ^ ((256*pbyTKey[9]) + pbyTKey[8])) % 65536);
+ ppk5 = ppk5 + tkip_sbox((ppk4 ^ ((256*pbyTKey[11]) + pbyTKey[10])) % 65536);
+
+ ppk0 = ppk0 + rotr1(ppk5 ^ ((256*pbyTKey[13]) + pbyTKey[12]));
+ ppk1 = ppk1 + rotr1(ppk0 ^ ((256*pbyTKey[15]) + pbyTKey[14]));
+ ppk2 = ppk2 + rotr1(ppk1);
+ ppk3 = ppk3 + rotr1(ppk2);
+ ppk4 = ppk4 + rotr1(ppk3);
+ ppk5 = ppk5 + rotr1(ppk4);
/* Phase 2, Step 3 */
pbyRC4Key[0] = (tsc2 >> 8) % 256;
diff --git a/drivers/staging/vt6656/wpa2.c b/drivers/staging/vt6656/wpa2.c
index 6d13190885d1..d4f3f7530ee4 100644
--- a/drivers/staging/vt6656/wpa2.c
+++ b/drivers/staging/vt6656/wpa2.c
@@ -215,7 +215,7 @@ WPA2vParseRSN (
m = *((PWORD) &(pRSN->abyRSN[4]));
if (pRSN->len >= 10+m*4) { // ver(2) + GK(4) + PK count(2) + PKS(4*m) + AKMSS count(2)
- pBSSNode->wAKMSSAuthCount = *((PWORD) &(pRSN->abyRSN[6+4*m]));;
+ pBSSNode->wAKMSSAuthCount = *((PWORD) &(pRSN->abyRSN[6+4*m]));
j = 0;
pbyOUI = &(pRSN->abyRSN[8+4*m]);
for (i = 0; (i < pBSSNode->wAKMSSAuthCount) && (j < sizeof(pBSSNode->abyAKMSSAuthType)/sizeof(BYTE)); i++) {
@@ -234,7 +234,7 @@ WPA2vParseRSN (
pBSSNode->wAKMSSAuthCount = (WORD)j;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wAKMSSAuthCount: %d\n", pBSSNode->wAKMSSAuthCount);
- n = *((PWORD) &(pRSN->abyRSN[6+4*m]));;
+ n = *((PWORD) &(pRSN->abyRSN[6+4*m]));
if (pRSN->len >= 12+4*m+4*n) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*m)+AKMSSCnt(2)+AKMSS(4*n)+Cap(2)
pBSSNode->sRSNCapObj.bRSNCapExist = TRUE;
pBSSNode->sRSNCapObj.wRSNCap = *((PWORD) &(pRSN->abyRSN[8+4*m+4*n]));
diff --git a/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c b/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c
index a6780296888f..ad0c61db9937 100644
--- a/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c
+++ b/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c
@@ -2127,10 +2127,7 @@ void cy_as_hal_pll_lock_loss_handler(cy_as_hal_device_tag tag)
*/
void *cy_as_hal_alloc(uint32_t cnt)
{
- void *ret_p;
-
- ret_p = kmalloc(cnt, GFP_ATOMIC);
- return ret_p;
+ return kmalloc(cnt, GFP_ATOMIC);
}
/*
@@ -2150,10 +2147,7 @@ void cy_as_hal_free(void *mem_p)
*/
void *cy_as_hal_c_b_alloc(uint32_t cnt)
{
- void *ret_p;
-
- ret_p = kmalloc(cnt, GFP_ATOMIC);
- return ret_p;
+ return kmalloc(cnt, GFP_ATOMIC);
}
/*
diff --git a/drivers/staging/winbond/Makefile b/drivers/staging/winbond/Makefile
index 79fa2271a0c1..081d48db04cb 100644
--- a/drivers/staging/winbond/Makefile
+++ b/drivers/staging/winbond/Makefile
@@ -1,6 +1,5 @@
w35und-y := \
mds.o \
- mlmetxrx.o \
mto.o \
phy_calibration.o \
reg.o \
diff --git a/drivers/staging/winbond/core.h b/drivers/staging/winbond/core.h
index 2b87a0007319..d7b3aca5ddeb 100644
--- a/drivers/staging/winbond/core.h
+++ b/drivers/staging/winbond/core.h
@@ -4,7 +4,7 @@
#include <linux/wireless.h>
#include <linux/types.h>
-#include "wbhal_s.h"
+#include "wbhal.h"
#include "mto.h"
#include "mac_structures.h"
diff --git a/drivers/staging/winbond/mac_structures.h b/drivers/staging/winbond/mac_structures.h
index ed3df2964065..76c63c74d50c 100644
--- a/drivers/staging/winbond/mac_structures.h
+++ b/drivers/staging/winbond/mac_structures.h
@@ -21,23 +21,11 @@
#ifndef _MAC_Structures_H_
#define _MAC_Structures_H_
-#include <linux/skbuff.h>
-
-/*=========================================================
-// Some miscellaneous definitions
-//-----*/
-#define MAX_CHANNELS 30
#define MAC_ADDR_LENGTH 6
-#define MAX_WEP_KEY_SIZE 16 /* 128 bits */
-#define MAX_802_11_FRAGMENT_NUMBER 10 /* By spec */
/* ========================================================
// 802.11 Frame define
//----- */
-#define MASK_PROTOCOL_VERSION_TYPE 0x0F
-#define MASK_FRAGMENT_NUMBER 0x000F
-#define SEQUENCE_NUMBER_SHIFT 4
-#define DIFFER_11_TO_3 18
#define DOT_11_MAC_HEADER_SIZE 24
#define DOT_11_SNAP_SIZE 6
#define DOT_11_DURATION_OFFSET 2
@@ -47,15 +35,9 @@
#define DOT_11_TYPE_OFFSET 30
#define DOT_11_DATA_OFFSET 24
#define DOT_11_DA_OFFSET 4
-#define DOT_3_TYPE_ARP 0x80F3
-#define DOT_3_TYPE_IPX 0x8137
-#define DOT_3_TYPE_OFFSET 12
-
-#define ETHERNET_HEADER_SIZE 14
#define MAX_ETHERNET_PACKET_SIZE 1514
-
/* ----- management : Type of Bits (2, 3) and Subtype of Bits (4, 5, 6, 7) */
#define MAC_SUBTYPE_MNGMNT_ASSOC_REQUEST 0x00
#define MAC_SUBTYPE_MNGMNT_ASSOC_RESPONSE 0x10
@@ -69,129 +51,6 @@
#define MAC_SUBTYPE_MNGMNT_AUTHENTICATION 0xB0
#define MAC_SUBTYPE_MNGMNT_DEAUTHENTICATION 0xC0
-/* ----- control : Type of Bits (2, 3) and Subtype of Bits (4, 5, 6, 7) */
-#define MAC_SUBTYPE_CONTROL_PSPOLL 0xA4
-#define MAC_SUBTYPE_CONTROL_RTS 0xB4
-#define MAC_SUBTYPE_CONTROL_CTS 0xC4
-#define MAC_SUBTYPE_CONTROL_ACK 0xD4
-#define MAC_SUBTYPE_CONTROL_CFEND 0xE4
-#define MAC_SUBTYPE_CONTROL_CFEND_CFACK 0xF4
-
-/* ----- data : Type of Bits (2, 3) and Subtype of Bits (4, 5, 6, 7) */
-#define MAC_SUBTYPE_DATA 0x08
-#define MAC_SUBTYPE_DATA_CFACK 0x18
-#define MAC_SUBTYPE_DATA_CFPOLL 0x28
-#define MAC_SUBTYPE_DATA_CFACK_CFPOLL 0x38
-#define MAC_SUBTYPE_DATA_NULL 0x48
-#define MAC_SUBTYPE_DATA_CFACK_NULL 0x58
-#define MAC_SUBTYPE_DATA_CFPOLL_NULL 0x68
-#define MAC_SUBTYPE_DATA_CFACK_CFPOLL_NULL 0x78
-
-/* ----- Frame Type of Bits (2, 3) */
-#define MAC_TYPE_MANAGEMENT 0x00
-#define MAC_TYPE_CONTROL 0x04
-#define MAC_TYPE_DATA 0x08
-
-/* ----- definitions for Management Frame Element ID (1 BYTE) */
-#define ELEMENT_ID_SSID 0
-#define ELEMENT_ID_SUPPORTED_RATES 1
-#define ELEMENT_ID_FH_PARAMETER_SET 2
-#define ELEMENT_ID_DS_PARAMETER_SET 3
-#define ELEMENT_ID_CF_PARAMETER_SET 4
-#define ELEMENT_ID_TIM 5
-#define ELEMENT_ID_IBSS_PARAMETER_SET 6
-/* 7~15 reserverd */
-#define ELEMENT_ID_CHALLENGE_TEXT 16
-/* 17~31 reserved for challenge text extension */
-/* 32~255 reserved */
-/*-- 11G -- */
-#define ELEMENT_ID_ERP_INFORMATION 42
-#define ELEMENT_ID_EXTENDED_SUPPORTED_RATES 50
-
-/* -- WPA -- */
-
-#define ELEMENT_ID_RSN_WPA 221
-#ifdef _WPA2_
-#define ELEMENT_ID_RSN_WPA2 48
-#endif /* endif WPA2 */
-
-#define WLAN_MAX_PAIRWISE_CIPHER_SUITE_COUNT ((u16) 6)
-#define WLAN_MAX_AUTH_KEY_MGT_SUITE_LIST_COUNT ((u16) 2)
-
-/* ===================================================================
-* Reason Code (Table 18): indicate the reason of DisAssoc, DeAuthen
-* length of ReasonCode is 2 Octs.
-* =================================================================== */
-#define REASON_REASERED 0
-#define REASON_UNSPECIDIED 1
-#define REASON_PREAUTH_INVALID 2
-#define DEAUTH_REASON_LEFT_BSS 3
-#define DISASS_REASON_AP_INACTIVE 4
-#define DISASS_REASON_AP_BUSY 5
-#define REASON_CLASS2_FRAME_FROM_NONAUTH_STA 6
-#define REASON_CLASS3_FRAME_FROM_NONASSO_STA 7
-#define DISASS_REASON_LEFT_BSS 8
-#define REASON_NOT_AUTH_YET 9
-/* 802.11i define */
-#define REASON_INVALID_IE 13
-#define REASON_MIC_ERROR 14
-#define REASON_4WAY_HANDSHAKE_TIMEOUT 15
-#define REASON_GROUPKEY_UPDATE_TIMEOUT 16
-#define REASON_IE_DIFF_4WAY_ASSOC 17
-#define REASON_INVALID_MULTICAST_CIPHER 18
-#define REASON_INVALID_UNICAST_CIPHER 19
-#define REASON_INVALID_AKMP 20
-#define REASON_UNSUPPORTED_RSNIE_VERSION 21
-#define REASON_INVALID_RSNIE_CAPABILITY 22
-#define REASON_802_1X_AUTH_FAIL 23
-#define REASON_CIPHER_REJECT_PER_SEC_POLICY 14
-
-/*
-//===========================================================
-// enum_MMPDUResultCode --
-// Status code (2 Octs) in the MMPDU's frame body. Table.19
-//
-//===========================================================
-enum enum_MMPDUResultCode
-{
-// SUCCESS = 0, // Redefined
- UNSPECIFIED_FAILURE = 1,
-
- // 2 - 9 Reserved
-
- NOT_SUPPROT_CAPABILITIES = 10,
-
- //REASSOCIATION_DENIED
- //
- REASSOC_DENIED_UNABLE_CFM_ASSOC_EXIST = 11,
-
- //ASSOCIATION_DENIED_NOT_IN_STANDARD
- //
- ASSOC_DENIED_REASON_NOT_IN_STANDARD = 12,
- PEER_NOT_SUPPORT_AUTH_ALGORITHM = 13,
- AUTH_SEQNUM_OUT_OF_EXPECT = 14,
- AUTH_REJECT_REASON_CHALLENGE_FAIL = 15,
- AUTH_REJECT_REASON_WAIT_TIMEOUT = 16,
- ASSOC_DENIED_REASON_AP_BUSY = 17,
- ASSOC_DENIED_REASON_NOT_SUPPORT_BASIC_RATE = 18
-} WB_MMPDURESULTCODE, *PWB_MMPDURESULTCODE;
-*/
-
-#define RATE_BITMAP_1M 1
-#define RATE_BITMAP_2M 2
-#define RATE_BITMAP_5dot5M 5
-#define RATE_BITMAP_6M 6
-#define RATE_BITMAP_9M 9
-#define RATE_BITMAP_11M 11
-#define RATE_BITMAP_12M 12
-#define RATE_BITMAP_18M 18
-#define RATE_BITMAP_22M 22
-#define RATE_BITMAP_24M 24
-#define RATE_BITMAP_33M 17
-#define RATE_BITMAP_36M 19
-#define RATE_BITMAP_48M 25
-#define RATE_BITMAP_54M 28
-
#define RATE_AUTO 0
#define RATE_1M 2
#define RATE_2M 4
@@ -209,408 +68,4 @@ enum enum_MMPDUResultCode
#define RATE_54M 108
#define RATE_MAX 255
-/* CAPABILITY */
-#define CAPABILITY_ESS_BIT 0x0001
-#define CAPABILITY_IBSS_BIT 0x0002
-#define CAPABILITY_CF_POLL_BIT 0x0004
-#define CAPABILITY_CF_POLL_REQ_BIT 0x0008
-#define CAPABILITY_PRIVACY_BIT 0x0010
-#define CAPABILITY_SHORT_PREAMBLE_BIT 0x0020
-#define CAPABILITY_PBCC_BIT 0x0040
-#define CAPABILITY_CHAN_AGILITY_BIT 0x0080
-#define CAPABILITY_SHORT_SLOT_TIME_BIT 0x0400
-#define CAPABILITY_DSSS_OFDM_BIT 0x2000
-
-
-struct Capability_Information_Element {
- union {
- u16 __attribute__ ((packed)) wValue;
- #ifdef _BIG_ENDIAN_ /* 20060926 add by anson's endian */
- struct _Capability {
- /* -- 11G -- */
- u8 Reserved3:2;
- u8 DSSS_OFDM:1;
- u8 Reserved2:2;
- u8 Short_Slot_Time:1;
- u8 Reserved1:2;
- u8 Channel_Agility:1;
- u8 PBCC:1;
- u8 ShortPreamble:1;
- u8 CF_Privacy:1;
- u8 CF_Poll_Request:1;
- u8 CF_Pollable:1;
- u8 IBSS:1;
- u8 ESS:1;
- } __attribute__ ((packed)) Capability;
- #else
- struct _Capability {
- u8 ESS:1;
- u8 IBSS:1;
- u8 CF_Pollable:1;
- u8 CF_Poll_Request:1;
- u8 CF_Privacy:1;
- u8 ShortPreamble:1;
- u8 PBCC:1;
- u8 Channel_Agility:1;
- u8 Reserved1:2;
- /* -- 11G -- */
- u8 Short_Slot_Time:1;
- u8 Reserved2:2;
- u8 DSSS_OFDM:1;
- u8 Reserved3:2;
- } __attribute__ ((packed)) Capability;
- #endif
- } __attribute__ ((packed)) ;
-} __attribute__ ((packed));
-
-struct FH_Parameter_Set_Element {
- u8 Element_ID;
- u8 Length;
- u8 Dwell_Time[2];
- u8 Hop_Set;
- u8 Hop_Pattern;
- u8 Hop_Index;
-};
-
-struct DS_Parameter_Set_Element {
- u8 Element_ID;
- u8 Length;
- u8 Current_Channel;
-};
-
-struct Supported_Rates_Element {
- u8 Element_ID;
- u8 Length;
- u8 SupportedRates[8];
-} __attribute__ ((packed));
-
-struct SSID_Element {
- u8 Element_ID;
- u8 Length;
- u8 SSID[32];
-} __attribute__ ((packed)) ;
-
-struct CF_Parameter_Set_Element {
- u8 Element_ID;
- u8 Length;
- u8 CFP_Count;
- u8 CFP_Period;
- u8 CFP_MaxDuration[2]; /* in Time Units */
- u8 CFP_DurRemaining[2]; /* in time units */
-};
-
-struct TIM_Element {
- u8 Element_ID;
- u8 Length;
- u8 DTIM_Count;
- u8 DTIM_Period;
- u8 Bitmap_Control;
- u8 Partial_Virtual_Bitmap[251];
-};
-
-struct IBSS_Parameter_Set_Element {
- u8 Element_ID;
- u8 Length;
- u8 ATIM_Window[2];
-};
-
-struct Challenge_Text_Element {
- u8 Element_ID;
- u8 Length;
- u8 Challenge_Text[253];
-};
-
-struct PHY_Parameter_Set_Element {
-/* int aSlotTime; */
-/* int aSifsTime; */
- s32 aCCATime;
- s32 aRxTxTurnaroundTime;
- s32 aTxPLCPDelay;
- s32 RxPLCPDelay;
- s32 aRxTxSwitchTime;
- s32 aTxRampOntime;
- s32 aTxRampOffTime;
- s32 aTxRFDelay;
- s32 aRxRFDelay;
- s32 aAirPropagationTime;
- s32 aMACProcessingDelay;
- s32 aPreambleLength;
- s32 aPLCPHeaderLength;
- s32 aMPDUDurationFactor;
- s32 aMPDUMaxLength;
-/* int aCWmin; */
-/* int aCWmax; */
-};
-
-/* -- 11G -- */
-struct ERP_Information_Element {
- u8 Element_ID;
- u8 Length;
- #ifdef _BIG_ENDIAN_ /* 20060926 add by anson's endian */
- u8 Reserved:5; /* 20060926 add by anson */
- u8 Barker_Preamble_Mode:1;
- u8 Use_Protection:1;
- u8 NonERP_Present:1;
- #else
- u8 NonERP_Present:1;
- u8 Use_Protection:1;
- u8 Barker_Preamble_Mode:1;
- u8 Reserved:5;
- #endif
-};
-
-struct Extended_Supported_Rates_Element {
- u8 Element_ID;
- u8 Length;
- u8 ExtendedSupportedRates[255];
-} __attribute__ ((packed));
-
-/* WPA(802.11i draft 3.0) */
-#define VERSION_WPA 1
-#ifdef _WPA2_
-#define VERSION_WPA2 1
-#endif /* end def _WPA2_ */
-/* WPA2.0 OUI=00:50:F2, the MSB is reserved for suite type */
-#define OUI_WPA 0x00F25000
-#ifdef _WPA2_
-/* for wpa2 change to 0x00ACOF04 by Ws 26/04/04 */
-#define OUI_WPA2 0x00AC0F00
-#endif /* end def _WPA2_ */
-
-#define OUI_WPA_ADDITIONAL 0x01
-#define WLAN_MIN_RSN_WPA_LENGTH 6 /* added by ws 09/10/04 */
-#ifdef _WPA2_
-#define WLAN_MIN_RSN_WPA2_LENGTH 2 /* Fix to 2 09/14/05 */
-#endif /* end def _WPA2_ */
-
-#define oui_wpa (u32)(OUI_WPA|OUI_WPA_ADDITIONAL)
-
-#define WPA_OUI_BIG ((u32) 0x01F25000)/* added by ws 09/23/04 */
-#define WPA_OUI_LITTLE ((u32) 0x01F25001)/* added by ws 09/23/04 */
-/* 20061108 For WPS. It's little endian. Big endian is 0x0050F204 */
-#define WPA_WPS_OUI cpu_to_le32(0x04F25000)
-
-/* -----WPA2----- */
-#ifdef _WPA2_
-#define WPA2_OUI_BIG ((u32)0x01AC0F00)
-#define WPA2_OUI_LITTLE ((u32)0x01AC0F01)
-#endif /* end def _WPA2_ */
-
-/* Authentication suite */
-#define OUI_AUTH_WPA_NONE 0x00 /* for WPA_NONE */
-#define OUI_AUTH_8021X 0x01
-#define OUI_AUTH_PSK 0x02
-/* Cipher suite */
-#define OUI_CIPHER_GROUP_KEY 0x00 /* added by ws 05/21/04 */
-#define OUI_CIPHER_WEP_40 0x01
-#define OUI_CIPHER_TKIP 0x02
-#define OUI_CIPHER_CCMP 0x04
-#define OUI_CIPHER_WEP_104 0x05
-
-struct suite_selector{
- union{
- u8 Value[4];
- struct _SUIT_ {
- u8 OUI[3];
- u8 Type;
- } SuitSelector;
- };
-};
-
-/* -- WPA -- */
-struct RSN_Information_Element{
- u8 Element_ID;
- u8 Length;
- /* WPA version 2.0 additional field, and should be 00:50:F2:01 */
- struct suite_selector OuiWPAAdditional;
- u16 Version;
- struct suite_selector GroupKeySuite;
- u16 PairwiseKeySuiteCount;
- struct suite_selector PairwiseKeySuite[1];
-} __attribute__ ((packed));
-struct RSN_Auth_Sub_Information_Element {
- u16 AuthKeyMngtSuiteCount;
- struct suite_selector AuthKeyMngtSuite[1];
-} __attribute__ ((packed));
-
-/* -- WPA2 -- */
-struct RSN_Capability_Element {
- union {
- u16 __attribute__ ((packed)) wValue;
- #ifdef _BIG_ENDIAN_ /* 20060927 add by anson's endian */
- struct _RSN_Capability {
- u16 __attribute__ ((packed)) Reserved2:8; /* 20051201 */
- u16 __attribute__ ((packed)) Reserved1:2;
- u16 __attribute__ ((packed)) GTK_Replay_Counter:2;
- u16 __attribute__ ((packed)) PTK_Replay_Counter:2;
- u16 __attribute__ ((packed)) No_Pairwise:1;
- u16 __attribute__ ((packed)) Pre_Auth:1;
- } __attribute__ ((packed)) RSN_Capability;
- #else
- struct _RSN_Capability {
- u16 __attribute__ ((packed)) Pre_Auth:1;
- u16 __attribute__ ((packed)) No_Pairwise:1;
- u16 __attribute__ ((packed)) PTK_Replay_Counter:2;
- u16 __attribute__ ((packed)) GTK_Replay_Counter:2;
- u16 __attribute__ ((packed)) Reserved1:2;
- u16 __attribute__ ((packed)) Reserved2:8; /* 20051201 */
- } __attribute__ ((packed)) RSN_Capability;
- #endif
-
- } __attribute__ ((packed)) ;
-} __attribute__ ((packed)) ;
-
-#ifdef _WPA2_
-struct pmkid {
- u8 pValue[16];
-};
-
-struct WPA2_RSN_Information_Element {
- u8 Element_ID;
- u8 Length;
- u16 Version;
- struct suite_selector GroupKeySuite;
- u16 PairwiseKeySuiteCount;
- struct suite_selector PairwiseKeySuite[1];
-
-} __attribute__ ((packed));
-
-struct WPA2_RSN_Auth_Sub_Information_Element {
- u16 AuthKeyMngtSuiteCount;
- struct suite_selector AuthKeyMngtSuite[1];
-} __attribute__ ((packed));
-
-
-struct PMKID_Information_Element {
- u16 PMKID_Count;
- struct pmkid pmkid[16];
-} __attribute__ ((packed));
-
-#endif /* enddef _WPA2_ */
-/*============================================================
-// MAC Frame structure (different type) and subfield structure
-//============================================================*/
-struct MAC_frame_control {
-/* a combination of the [Protocol Version, Control Type, Control Subtype]*/
- u8 mac_frame_info;
-/* 20060927 add by anson's endian */
- #ifdef _BIG_ENDIAN_
- u8 order:1;
- u8 WEP:1;
- u8 more_data:1;
- u8 pwr_mgt:1;
- u8 retry:1;
- u8 more_frag:1;
- u8 from_ds:1;
- u8 to_ds:1;
- #else
- u8 to_ds:1;
- u8 from_ds:1;
- u8 more_frag:1;
- u8 retry:1;
- u8 pwr_mgt:1;
- u8 more_data:1;
- u8 WEP:1;
- u8 order:1;
- #endif
-} __attribute__ ((packed));
-
-struct Management_Frame {
-/* 2B, ToDS,FromDS,MoreFrag,MoreData,Order=0 */
- struct MAC_frame_control frame_control;
- u16 duration;
- u8 DA[MAC_ADDR_LENGTH]; /* Addr1 */
- u8 SA[MAC_ADDR_LENGTH]; /* Addr2 */
- u8 BSSID[MAC_ADDR_LENGTH]; /* Addr3 */
- u16 Sequence_Control;
- /* Management Frame Body <= 325 bytes */
- /* FCS 4 bytes */
-} __attribute__ ((packed));
-
-/* SW-MAC don't Tx/Rx Control-Frame, HW-MAC do it. */
-struct Control_Frame {
-/* ToDS,FromDS,MoreFrag,Retry,MoreData,WEP,Order=0 */
- struct MAC_frame_control frame_control;
- u16 duration;
- u8 RA[MAC_ADDR_LENGTH];
- u8 TA[MAC_ADDR_LENGTH];
- u16 FCS;
-} __attribute__ ((packed));
-
-struct Data_Frame {
- struct MAC_frame_control frame_control;
- u16 duration;
- u8 Addr1[MAC_ADDR_LENGTH];
- u8 Addr2[MAC_ADDR_LENGTH];
- u8 Addr3[MAC_ADDR_LENGTH];
- u16 Sequence_Control;
- u8 Addr4[MAC_ADDR_LENGTH]; /* only exist when ToDS=FromDS=1 */
- /* Data Frame Body <= 2312 */
- /* FCS */
-} __attribute__ ((packed));
-
-struct Disassociation_Frame_Body {
- u16 reasonCode;
-} __attribute__ ((packed));
-
-struct Association_Request_Frame_Body {
- u16 capability_information;
- u16 listenInterval;
- u8 Current_AP_Address[MAC_ADDR_LENGTH];/* for reassociation only */
- /* SSID (2+32 bytes) */
- /* Supported_Rates (2+8 bytes) */
-} __attribute__ ((packed));
-
-struct Association_Response_Frame_Body {
- u16 capability_information;
- u16 statusCode;
- u16 Association_ID;
- struct Supported_Rates_Element supportedRates;
-} __attribute__ ((packed));
-
-/*struct Reassociation_Request_Frame_Body
-{
- u16 capability_information;
- u16 listenInterval;
- u8 Current_AP_Address[MAC_ADDR_LENGTH];
- // SSID (2+32 bytes)
- // Supported_Rates (2+8 bytes)
-};*/
-/* eliminated by WS 07/22/04 comboined with associateion request frame. */
-
-struct Reassociation_Response_Frame_Body {
- u16 capability_information;
- u16 statusCode;
- u16 Association_ID;
- struct Supported_Rates_Element supportedRates;
-} __attribute__ ((packed));
-
-struct Deauthentication_Frame_Body {
- u16 reasonCode;
-} __attribute__ ((packed));
-
-
-struct Probe_Response_Frame_Body {
- u16 Timestamp;
- u16 Beacon_Interval;
- u16 Capability_Information;
- /* SSID
- // Supported_Rates
- // PHY parameter Set (DS Parameters)
- // CF parameter Set
- // IBSS parameter Set */
-} __attribute__ ((packed));
-
-struct Authentication_Frame_Body {
- u16 algorithmNumber;
- u16 sequenceNumber;
- u16 statusCode;
- /* NB: don't include ChallengeText in this structure
- // struct Challenge_Text_Element sChallengeTextElement;
- // wkchen added */
-} __attribute__ ((packed));
-
-
#endif /* _MAC_Structure_H_ */
-
-
diff --git a/drivers/staging/winbond/mds.c b/drivers/staging/winbond/mds.c
index 9217762b1814..9cfea94bcea5 100644
--- a/drivers/staging/winbond/mds.c
+++ b/drivers/staging/winbond/mds.c
@@ -1,9 +1,7 @@
#include "mds_f.h"
-#include "mlmetxrx_f.h"
#include "mto.h"
-#include "sysdef.h"
-#include "wbhal_f.h"
-#include "wblinux_f.h"
+#include "wbhal.h"
+#include "wb35tx_f.h"
unsigned char
Mds_initial(struct wbsoft_priv *adapter)
@@ -17,11 +15,6 @@ Mds_initial(struct wbsoft_priv *adapter)
return hal_get_tx_buffer(&adapter->sHwData, &pMds->pTxBuffer);
}
-void
-Mds_Destroy(struct wbsoft_priv *adapter)
-{
-}
-
static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes, u8 *buffer)
{
struct T00_descriptor *pT00;
@@ -350,9 +343,7 @@ static void Mds_HeaderCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *
ctmp1 = ctmpf = CURRENT_TX_RATE_FOR_MNG;
pDes->TxRate = ctmp1;
- #ifdef _PE_TX_DUMP_
- printk("Tx rate =%x\n", ctmp1);
- #endif
+ pr_debug("Tx rate =%x\n", ctmp1);
pT01->T01_modulation_type = (ctmp1%3) ? 0 : 1;
@@ -404,6 +395,44 @@ static void Mds_HeaderCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *
}
+static void MLME_GetNextPacket(struct wbsoft_priv *adapter, struct wb35_descriptor *desc)
+{
+ desc->InternalUsed = desc->buffer_start_index + desc->buffer_number;
+ desc->InternalUsed %= MAX_DESCRIPTOR_BUFFER_INDEX;
+ desc->buffer_address[desc->InternalUsed] = adapter->sMlmeFrame.pMMPDU;
+ desc->buffer_size[desc->InternalUsed] = adapter->sMlmeFrame.len;
+ desc->buffer_total_size += adapter->sMlmeFrame.len;
+ desc->buffer_number++;
+ desc->Type = adapter->sMlmeFrame.DataType;
+}
+
+static void MLMEfreeMMPDUBuffer(struct wbsoft_priv *adapter, s8 *pData)
+{
+ int i;
+
+ /* Reclaim the data buffer */
+ for (i = 0; i < MAX_NUM_TX_MMPDU; i++) {
+ if (pData == (s8 *)&(adapter->sMlmeFrame.TxMMPDU[i]))
+ break;
+ }
+ if (adapter->sMlmeFrame.TxMMPDUInUse[i])
+ adapter->sMlmeFrame.TxMMPDUInUse[i] = false;
+ else {
+ /* Something wrong
+ PD43 Add debug code here??? */
+ }
+}
+
+static void MLME_SendComplete(struct wbsoft_priv *adapter, u8 PacketID, unsigned char SendOK)
+{
+ /* Reclaim the data buffer */
+ adapter->sMlmeFrame.len = 0;
+ MLMEfreeMMPDUBuffer(adapter, adapter->sMlmeFrame.pMMPDU);
+
+ /* Return resource */
+ adapter->sMlmeFrame.IsInUsed = PACKET_FREE_TO_USE;
+}
+
void
Mds_Tx(struct wbsoft_priv *adapter)
{
@@ -430,9 +459,7 @@ Mds_Tx(struct wbsoft_priv *adapter)
do {
FillIndex = pMds->TxFillIndex;
if (pMds->TxOwner[FillIndex]) { /* Is owned by software 0:Yes 1:No */
-#ifdef _PE_TX_DUMP_
- printk("[Mds_Tx] Tx Owner is H/W.\n");
-#endif
+ pr_debug("[Mds_Tx] Tx Owner is H/W.\n");
break;
}
@@ -476,9 +503,7 @@ Mds_Tx(struct wbsoft_priv *adapter)
/* For speed up Key setting */
if (pTxDes->EapFix) {
-#ifdef _PE_TX_DUMP_
- printk("35: EPA 4th frame detected. Size = %d\n", PacketSize);
-#endif
+ pr_debug("35: EPA 4th frame detected. Size = %d\n", PacketSize);
pHwData->IsKeyPreSet = 1;
}
@@ -492,11 +517,6 @@ Mds_Tx(struct wbsoft_priv *adapter)
XmitBufSize += CurrentSize;
XmitBufAddress += CurrentSize;
-#ifdef _IBSS_BEACON_SEQ_STICK_
- if ((XmitBufAddress[DOT_11_DA_OFFSET+8] & 0xfc) != MAC_SUBTYPE_MNGMNT_PROBE_REQUEST) /* +8 for USB hdr */
-#endif
- pMds->TxToggle = true;
-
/* Get packet to transmit completed, 1:TESTSTA 2:MLME 3: Ndis data */
MLME_SendComplete(adapter, 0, true);
@@ -567,9 +587,7 @@ Mds_SendComplete(struct wbsoft_priv *adapter, struct T02_descriptor *pT02)
pHwData->tx_retry_count[RetryCount] += RetryCount;
else
pHwData->tx_retry_count[7] += RetryCount;
- #ifdef _PE_STATE_DUMP_
- printk("dto_tx_retry_count =%d\n", pHwData->dto_tx_retry_count);
- #endif
+ pr_debug("dto_tx_retry_count =%d\n", pHwData->dto_tx_retry_count);
MTO_SetTxCount(adapter, TxRate, RetryCount);
}
pHwData->dto_tx_frag_count += (RetryCount+1);
diff --git a/drivers/staging/winbond/mds_f.h b/drivers/staging/winbond/mds_f.h
index 7f68deae6d04..ce8be079e957 100644
--- a/drivers/staging/winbond/mds_f.h
+++ b/drivers/staging/winbond/mds_f.h
@@ -1,11 +1,10 @@
#ifndef __WINBOND_MDS_F_H
#define __WINBOND_MDS_F_H
-#include "wbhal_s.h"
+#include "wbhal.h"
#include "core.h"
unsigned char Mds_initial(struct wbsoft_priv *adapter);
-void Mds_Destroy(struct wbsoft_priv *adapter);
void Mds_Tx(struct wbsoft_priv *adapter);
void Mds_SendComplete(struct wbsoft_priv *adapter, struct T02_descriptor *pt02);
void Mds_MpduProcess(struct wbsoft_priv *adapter, struct wb35_descriptor *prxdes);
diff --git a/drivers/staging/winbond/mds_s.h b/drivers/staging/winbond/mds_s.h
index e2de4bd23b4d..eeedf0186365 100644
--- a/drivers/staging/winbond/mds_s.h
+++ b/drivers/staging/winbond/mds_s.h
@@ -107,10 +107,6 @@ struct wb35_mds {
u8 TxRate[((MAX_USB_TX_DESCRIPTOR + 1) & ~0x01)][2]; /* [0] current tx rate, [1] fall back rate */
u8 TxInfo[((MAX_USB_TX_DESCRIPTOR + 1) & ~0x01)]; /*Store information for callback function */
- /* for scanning mechanism */
- u8 TxToggle; /* It is TRUE if there are tx activities in some time interval */
- u8 Reserved_[3];
-
/* ---- for Tx Parameter */
u16 TxFragmentThreshold; /* For frame body only */
u16 TxRTSThreshold;
diff --git a/drivers/staging/winbond/mlmetxrx.c b/drivers/staging/winbond/mlmetxrx.c
deleted file mode 100644
index 7425a23f12e8..000000000000
--- a/drivers/staging/winbond/mlmetxrx.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/* ============================================================================
- Module Name:
- MLMETxRx.C
-
- Description:
- The interface between MDS (MAC Data Service) and MLME.
-
- Revision History:
- --------------------------------------------------------------------------
- 200209 UN20 Jennifer Xu
- Initial Release
- 20021108 PD43 Austin Liu
- 20030117 PD43 Austin Liu
- Deleted MLMEReturnPacket and MLMEProcThread()
-
- Copyright (c) 1996-2002 Winbond Electronics Corp. All Rights Reserved.
-============================================================================ */
-#include "sysdef.h"
-
-#include "mds_f.h"
-
-void MLME_GetNextPacket(struct wbsoft_priv *adapter, struct wb35_descriptor *desc)
-{
- desc->InternalUsed = desc->buffer_start_index + desc->buffer_number;
- desc->InternalUsed %= MAX_DESCRIPTOR_BUFFER_INDEX;
- desc->buffer_address[desc->InternalUsed] = adapter->sMlmeFrame.pMMPDU;
- desc->buffer_size[desc->InternalUsed] = adapter->sMlmeFrame.len;
- desc->buffer_total_size += adapter->sMlmeFrame.len;
- desc->buffer_number++;
- desc->Type = adapter->sMlmeFrame.DataType;
-}
-
-static void MLMEfreeMMPDUBuffer(struct wbsoft_priv *adapter, s8 *pData)
-{
- int i;
-
- /* Reclaim the data buffer */
- for (i = 0; i < MAX_NUM_TX_MMPDU; i++) {
- if (pData == (s8 *)&(adapter->sMlmeFrame.TxMMPDU[i]))
- break;
- }
- if (adapter->sMlmeFrame.TxMMPDUInUse[i])
- adapter->sMlmeFrame.TxMMPDUInUse[i] = false;
- else {
- /* Something wrong
- PD43 Add debug code here??? */
- }
-}
-
-void
-MLME_SendComplete(struct wbsoft_priv *adapter, u8 PacketID, unsigned char SendOK)
-{
- /* Reclaim the data buffer */
- adapter->sMlmeFrame.len = 0;
- MLMEfreeMMPDUBuffer(adapter, adapter->sMlmeFrame.pMMPDU);
-
- /* Return resource */
- adapter->sMlmeFrame.IsInUsed = PACKET_FREE_TO_USE;
-}
-
-
-
diff --git a/drivers/staging/winbond/mlmetxrx_f.h b/drivers/staging/winbond/mlmetxrx_f.h
deleted file mode 100644
index 012507fc49e3..000000000000
--- a/drivers/staging/winbond/mlmetxrx_f.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* ================================================================
-// MLMETxRx.H --
-//
-// Functions defined in MLMETxRx.c.
-//
-// Copyright (c) 2002 Winbond Electrics Corp. All Rights Reserved.
-//================================================================ */
-#ifndef _MLMETXRX_H
-#define _MLMETXRX_H
-
-#include "core.h"
-
-void MLME_GetNextPacket(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes);
-
-void
-MLME_SendComplete(struct wbsoft_priv *adapter, u8 PacketID,
- unsigned char SendOK);
-
-#ifdef _IBSS_BEACON_SEQ_STICK_
-s8 SendBCNullData(struct wbsoft_priv *adapter, u16 wIdx);
-#endif
-
-#endif
diff --git a/drivers/staging/winbond/mto.c b/drivers/staging/winbond/mto.c
index 9cd212783d61..c03e5010ca34 100644
--- a/drivers/staging/winbond/mto.c
+++ b/drivers/staging/winbond/mto.c
@@ -17,9 +17,10 @@
* ============================================================================
*/
-#include "sysdef.h"
#include "sme_api.h"
-#include "wbhal_f.h"
+#include "wbhal.h"
+#include "wb35reg_f.h"
+#include "core.h"
/* Declare SQ3 to rate and fragmentation threshold table */
/* Declare fragmentation thresholds table */
diff --git a/drivers/staging/winbond/phy_calibration.c b/drivers/staging/winbond/phy_calibration.c
index 2b375ba3812a..09844db64fe9 100644
--- a/drivers/staging/winbond/phy_calibration.c
+++ b/drivers/staging/winbond/phy_calibration.c
@@ -10,9 +10,10 @@
*/
/****************** INCLUDE FILES SECTION ***********************************/
-#include "sysdef.h"
#include "phy_calibration.h"
-#include "wbhal_f.h"
+#include "wbhal.h"
+#include "wb35reg_f.h"
+#include "core.h"
/****************** DEBUG CONSTANT AND MACRO SECTION ************************/
diff --git a/drivers/staging/winbond/phy_calibration.h b/drivers/staging/winbond/phy_calibration.h
index 303203148839..84f6e840a47a 100644
--- a/drivers/staging/winbond/phy_calibration.h
+++ b/drivers/staging/winbond/phy_calibration.h
@@ -1,7 +1,7 @@
#ifndef __WINBOND_PHY_CALIBRATION_H
#define __WINBOND_PHY_CALIBRATION_H
-#include "wbhal_f.h"
+#include "wbhal.h"
#define REG_AGC_CTRL1 0x1000
#define REG_AGC_CTRL2 0x1004
diff --git a/drivers/staging/winbond/reg.c b/drivers/staging/winbond/reg.c
index 990f9d4bdbbd..1b38d6d225c9 100644
--- a/drivers/staging/winbond/reg.c
+++ b/drivers/staging/winbond/reg.c
@@ -1,5 +1,6 @@
-#include "sysdef.h"
-#include "wbhal_f.h"
+#include "wbhal.h"
+#include "wb35reg_f.h"
+#include "core.h"
/*
* ====================================================
@@ -1010,9 +1011,7 @@ void RFSynthesizer_initial(struct hw_data *pHwData)
case RF_AIROHA_7230:
/* Start to fill RF parameters, PLL_ON should be pulled low. */
Wb35Reg_WriteSync(pHwData, 0x03dc, 0x00000000);
- #ifdef _PE_STATE_DUMP_
- printk("* PLL_ON low\n");
- #endif
+ pr_debug("* PLL_ON low\n");
number = ARRAY_SIZE(al7230_rf_data_24);
Set_ChanIndep_RfData_al7230_24(pHwData, pltmp, number);
break;
@@ -1098,9 +1097,7 @@ void RFSynthesizer_initial(struct hw_data *pHwData)
case RF_AIROHA_7230:
/* RF parameters have filled completely, PLL_ON should be pulled high */
Wb35Reg_WriteSync(pHwData, 0x03dc, 0x00000080);
- #ifdef _PE_STATE_DUMP_
- printk("* PLL_ON high\n");
- #endif
+ pr_debug("* PLL_ON high\n");
/* 2.4GHz */
ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x9ABA8F;
@@ -1115,9 +1112,7 @@ void RFSynthesizer_initial(struct hw_data *pHwData)
/* 5GHz */
Wb35Reg_WriteSync(pHwData, 0x03dc, 0x00000000);
- #ifdef _PE_STATE_DUMP_
- printk("* PLL_ON low\n");
- #endif
+ pr_debug("* PLL_ON low\n");
number = ARRAY_SIZE(al7230_rf_data_50);
Set_ChanIndep_RfData_al7230_50(pHwData, pltmp, number);
@@ -1127,9 +1122,7 @@ void RFSynthesizer_initial(struct hw_data *pHwData)
msleep(5);
Wb35Reg_WriteSync(pHwData, 0x03dc, 0x00000080);
- #ifdef _PE_STATE_DUMP_
- printk("* PLL_ON high\n");
- #endif
+ pr_debug("* PLL_ON high\n");
ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x9ABA8F;
Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
@@ -1795,9 +1788,7 @@ void RFSynthesizer_SwitchingChannel(struct hw_data *pHwData, struct chan_info C
/* Write to register. number must less and equal than 16 */
Wb35Reg_BurstWrite(pHwData, 0x0864, pltmp, number, NO_INCREMENT);
- #ifdef _PE_STATE_DUMP_
- printk("Band changed\n");
- #endif
+ pr_debug("Band changed\n");
}
if (Channel.band <= BAND_TYPE_OFDM_24) { /* channel 1 ~ 14 */
@@ -2073,11 +2064,7 @@ void Mxx_initial(struct hw_data *pHwData)
*/
/* M00 bit set */
- #ifdef _IBSS_BEACON_SEQ_STICK_
- reg->M00_MacControl = 0; /* Solve beacon sequence number stop by software */
- #else
reg->M00_MacControl = 0x80000000; /* Solve beacon sequence number stop by hardware */
- #endif
/* M24 disable enter power save, BB RxOn and enable NAV attack */
reg->M24_MacControl = 0x08040042;
@@ -2336,13 +2323,6 @@ void EEPROMTxVgaAdjust(struct hw_data *pHwData)
pHwData->TxVgaFor50[32].TxVgaValue = pTxVga[17] - stmp * 2 / 4;
pHwData->TxVgaFor50[31].TxVgaValue = pTxVga[17] - stmp * 3 / 4;
}
-
- #ifdef _PE_STATE_DUMP_
- printk(" TxVgaFor24 :\n");
- DataDmp((u8 *)pHwData->TxVgaFor24, 14 , 0);
- printk(" TxVgaFor50 :\n");
- DataDmp((u8 *)pHwData->TxVgaFor50, 70 , 0);
- #endif
}
void BBProcessor_RateChanging(struct hw_data *pHwData, u8 rate)
diff --git a/drivers/staging/winbond/sysdef.h b/drivers/staging/winbond/sysdef.h
deleted file mode 100644
index d0d71f69bc8c..000000000000
--- a/drivers/staging/winbond/sysdef.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Winbond WLAN System Configuration defines */
-
-#ifndef SYS_DEF_H
-#define SYS_DEF_H
-
-#include <linux/delay.h>
-
-#define WB_LINUX
-#define WB_LINUX_WPA_PSK
-
-#define _USE_FALLBACK_RATE_
-
-#define _WPA2_
-
-#ifndef _WPA_PSK_DEBUG
-#undef _WPA_PSK_DEBUG
-#endif
-
-/* debug print options, mark what debug you don't need */
-
-#ifdef FULL_DEBUG
-#define _PE_STATE_DUMP_
-#define _PE_TX_DUMP_
-#define _PE_RX_DUMP_
-#define _PE_OID_DUMP_
-#define _PE_DTO_DUMP_
-#define _PE_REG_DUMP_
-#define _PE_USB_INI_DUMP_
-#endif
-
-#endif
diff --git a/drivers/staging/winbond/wb35reg.c b/drivers/staging/winbond/wb35reg.c
index 770722385eeb..42ae61014522 100644
--- a/drivers/staging/winbond/wb35reg.c
+++ b/drivers/staging/winbond/wb35reg.c
@@ -1,4 +1,3 @@
-#include "sysdef.h"
#include "wb35reg_f.h"
#include <linux/usb.h>
@@ -140,8 +139,8 @@ unsigned char Wb35Reg_WriteSync(struct hw_data *pHwData, u16 RegisterNo, u32 Reg
/* Sync IoCallDriver */
reg->EP0vm_state = VM_RUNNING;
- ret = usb_control_msg(pHwData->WbUsb.udev,
- usb_sndctrlpipe(pHwData->WbUsb.udev, 0),
+ ret = usb_control_msg(pHwData->udev,
+ usb_sndctrlpipe(pHwData->udev, 0),
0x03, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
0x0, RegisterNo, &RegisterValue, 4, HZ * 100);
reg->EP0vm_state = VM_STOP;
@@ -150,9 +149,7 @@ unsigned char Wb35Reg_WriteSync(struct hw_data *pHwData, u16 RegisterNo, u32 Reg
Wb35Reg_EP0VM_start(pHwData);
if (ret < 0) {
-#ifdef _PE_REG_DUMP_
- printk("EP0 Write register usb message sending error\n");
-#endif
+ pr_debug("EP0 Write register usb message sending error\n");
pHwData->SurpriseRemove = 1;
return false;
}
@@ -305,8 +302,8 @@ unsigned char Wb35Reg_ReadSync(struct hw_data *pHwData, u16 RegisterNo, u32 *pRe
msleep(10);
reg->EP0vm_state = VM_RUNNING;
- ret = usb_control_msg(pHwData->WbUsb.udev,
- usb_rcvctrlpipe(pHwData->WbUsb.udev, 0),
+ ret = usb_control_msg(pHwData->udev,
+ usb_rcvctrlpipe(pHwData->udev, 0),
0x01, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x0, RegisterNo, pltmp, 4, HZ * 100);
@@ -320,9 +317,7 @@ unsigned char Wb35Reg_ReadSync(struct hw_data *pHwData, u16 RegisterNo, u32 *pRe
Wb35Reg_EP0VM_start(pHwData);
if (ret < 0) {
-#ifdef _PE_REG_DUMP_
- printk("EP0 Read register usb message sending error\n");
-#endif
+ pr_debug("EP0 Read register usb message sending error\n");
pHwData->SurpriseRemove = 1;
return false;
}
@@ -432,8 +427,8 @@ void Wb35Reg_EP0VM(struct hw_data *pHwData)
if (reg_queue->DIRECT == 1) /* output */
pBuffer = &reg_queue->VALUE;
- usb_fill_control_urb(urb, pHwData->WbUsb.udev,
- REG_DIRECTION(pHwData->WbUsb.udev, reg_queue),
+ usb_fill_control_urb(urb, pHwData->udev,
+ REG_DIRECTION(pHwData->udev, reg_queue),
(u8 *)dr, pBuffer, cpu_to_le16(dr->wLength),
Wb35Reg_EP0VM_complete, (void *)pHwData);
@@ -442,9 +437,7 @@ void Wb35Reg_EP0VM(struct hw_data *pHwData)
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0) {
-#ifdef _PE_REG_DUMP_
- printk("EP0 Irp sending error\n");
-#endif
+ pr_debug("EP0 Irp sending error\n");
goto cleanup;
}
return;
@@ -479,9 +472,7 @@ void Wb35Reg_EP0VM_complete(struct urb *urb)
spin_unlock_irq(&reg->EP0VM_spin_lock);
if (reg->EP0VM_status) {
-#ifdef _PE_REG_DUMP_
- printk("EP0 IoCompleteRoutine return error\n");
-#endif
+ pr_debug("EP0 IoCompleteRoutine return error\n");
reg->EP0vm_state = VM_STOP;
pHwData->SurpriseRemove = 1;
} else {
@@ -526,9 +517,7 @@ void Wb35Reg_destroy(struct hw_data *pHwData)
usb_free_urb(urb);
kfree(reg_queue);
} else {
-#ifdef _PE_REG_DUMP_
- printk("EP0 queue release error\n");
-#endif
+ pr_debug("EP0 queue release error\n");
}
spin_lock_irq(&reg->EP0VM_spin_lock);
diff --git a/drivers/staging/winbond/wb35reg_f.h b/drivers/staging/winbond/wb35reg_f.h
index bf23c1084199..95dc98096845 100644
--- a/drivers/staging/winbond/wb35reg_f.h
+++ b/drivers/staging/winbond/wb35reg_f.h
@@ -1,7 +1,7 @@
#ifndef __WINBOND_WB35REG_F_H
#define __WINBOND_WB35REG_F_H
-#include "wbhal_s.h"
+#include "wbhal.h"
/*
* ====================================
diff --git a/drivers/staging/winbond/wb35reg_s.h b/drivers/staging/winbond/wb35reg_s.h
index 4eff009444b8..eb274ffdd1ba 100644
--- a/drivers/staging/winbond/wb35reg_s.h
+++ b/drivers/staging/winbond/wb35reg_s.h
@@ -5,6 +5,8 @@
#include <linux/types.h>
#include <asm/atomic.h>
+struct hw_data;
+
/* =========================================================================
*
* HAL setting function
@@ -49,11 +51,7 @@
#define DEFAULT_CWMAX 1023 /* (M2C) CWmax. Its value is in the range 0-1023. */
#define DEFAULT_AID 1 /* (M34) AID. Its value is in the range 1-2007. */
-#ifdef _USE_FALLBACK_RATE_
#define DEFAULT_RATE_RETRY_LIMIT 2 /* (M38) as named */
-#else
-#define DEFAULT_RATE_RETRY_LIMIT 7 /* (M38) as named */
-#endif
#define DEFAULT_LONG_RETRY_LIMIT 7 /* (M38) LongRetryLimit. Its value is in the range 0-15. */
#define DEFAULT_SHORT_RETRY_LIMIT 7 /* (M38) ShortRetryLimit. Its value is in the range 0-15. */
@@ -168,4 +166,75 @@ struct wb35_reg {
u32 SQ3_filter[MAX_SQ3_FILTER_SIZE];
u32 SQ3_index;
};
+
+/* =====================================================================
+ * Function declaration
+ * =====================================================================
+ */
+void hal_remove_mapping_key(struct hw_data *hw_data, u8 *mac_addr);
+void hal_remove_default_key(struct hw_data *hw_data, u32 index);
+unsigned char hal_set_mapping_key(struct hw_data *adapter, u8 *mac_addr,
+ u8 null_key, u8 wep_on, u8 *tx_tsc,
+ u8 *rx_tsc, u8 key_type, u8 key_len,
+ u8 *key_data);
+unsigned char hal_set_default_key(struct hw_data *adapter, u8 index,
+ u8 null_key, u8 wep_on, u8 *tx_tsc,
+ u8 *rx_tsc, u8 key_type, u8 key_len,
+ u8 *key_data);
+void hal_clear_all_default_key(struct hw_data *hw_data);
+void hal_clear_all_group_key(struct hw_data *hw_data);
+void hal_clear_all_mapping_key(struct hw_data *hw_data);
+void hal_clear_all_key(struct hw_data *hw_data);
+void hal_set_power_save_mode(struct hw_data *hw_data, unsigned char power_save,
+ unsigned char wakeup, unsigned char dtim);
+void hal_get_power_save_mode(struct hw_data *hw_data, u8 *in_pwr_save);
+void hal_set_slot_time(struct hw_data *hw_data, u8 type);
+
+#define hal_set_atim_window(_A, _ATM)
+
+void hal_start_bss(struct hw_data *hw_data, u8 mac_op_mode);
+
+/* 0:BSS STA 1:IBSS STA */
+void hal_join_request(struct hw_data *hw_data, u8 bss_type);
+
+void hal_stop_sync_bss(struct hw_data *hw_data);
+void hal_resume_sync_bss(struct hw_data *hw_data);
+void hal_set_aid(struct hw_data *hw_data, u16 aid);
+void hal_set_bssid(struct hw_data *hw_data, u8 *bssid);
+void hal_get_bssid(struct hw_data *hw_data, u8 *bssid);
+void hal_set_listen_interval(struct hw_data *hw_data, u16 listen_interval);
+void hal_set_cap_info(struct hw_data *hw_data, u16 capability_info);
+void hal_set_ssid(struct hw_data *hw_data, u8 *ssid, u8 ssid_len);
+void hal_start_tx0(struct hw_data *hw_data);
+
+#define hal_get_cwmin(_A) ((_A)->cwmin)
+
+void hal_set_cwmax(struct hw_data *hw_data, u16 cwin_max);
+
+#define hal_get_cwmax(_A) ((_A)->cwmax)
+
+void hal_set_rsn_wpa(struct hw_data *hw_data, u32 *rsn_ie_bitmap,
+ u32 *rsn_oui_type , unsigned char desired_auth_mode);
+void hal_set_connect_info(struct hw_data *hw_data, unsigned char bo_connect);
+u8 hal_get_est_sq3(struct hw_data *hw_data, u8 count);
+void hal_descriptor_indicate(struct hw_data *hw_data,
+ struct wb35_descriptor *des);
+u8 hal_get_antenna_number(struct hw_data *hw_data);
+u32 hal_get_bss_pk_cnt(struct hw_data *hw_data);
+
+#define hal_get_region_from_EEPROM(_A) ((_A)->reg.EEPROMRegion)
+#define hal_get_tx_buffer(_A, _B) Wb35Tx_get_tx_buffer(_A, _B)
+#define hal_software_set(_A) (_A->SoftwareSet)
+#define hal_driver_init_OK(_A) (_A->IsInitOK)
+#define hal_rssi_boundary_high(_A) (_A->RSSI_high)
+#define hal_rssi_boundary_low(_A) (_A->RSSI_low)
+#define hal_scan_interval(_A) (_A->Scan_Interval)
+
+#define PHY_DEBUG(msg, args...)
+
+/* return 100ms count */
+#define hal_get_time_count(_P) (_P->time_count / 10)
+
+#define hal_ibss_disconnect(_A) (hal_stop_sync_bss(_A))
+
#endif
diff --git a/drivers/staging/winbond/wb35rx.c b/drivers/staging/winbond/wb35rx.c
index 448514aada44..f118eeba396a 100644
--- a/drivers/staging/winbond/wb35rx.c
+++ b/drivers/staging/winbond/wb35rx.c
@@ -14,7 +14,6 @@
#include <linux/slab.h>
#include "core.h"
-#include "sysdef.h"
#include "wb35rx_f.h"
static void packet_came(struct ieee80211_hw *hw, char *pRxBufferAddress, int PacketSize)
@@ -109,10 +108,7 @@ static u16 Wb35Rx_indicate(struct ieee80211_hw *hw)
/* Basic check for Rx length. Is length valid? */
if (PacketSize > MAX_PACKET_SIZE) {
-#ifdef _PE_RX_DUMP_
- printk("Serious ERROR : Rx data size too long, size =%d\n", PacketSize);
-#endif
-
+ pr_debug("Serious ERROR : Rx data size too long, size =%d\n", PacketSize);
pWb35Rx->EP3vm_state = VM_STOP;
pWb35Rx->Ep3ErrorCount2++;
break;
@@ -174,7 +170,7 @@ static void Wb35Rx_Complete(struct urb *urb)
/* The IRP is completed */
pWb35Rx->EP3vm_state = VM_COMPLETED;
- if (pHwData->SurpriseRemove || pHwData->HwStop) /* Must be here, or RxBufferId is invalid */
+ if (pHwData->SurpriseRemove) /* Must be here, or RxBufferId is invalid */
goto error;
if (pWb35Rx->rx_halt)
@@ -186,9 +182,7 @@ static void Wb35Rx_Complete(struct urb *urb)
/* The URB is completed, check the result */
if (pWb35Rx->EP3VM_status != 0) {
-#ifdef _PE_USB_STATE_DUMP_
- printk("EP3 IoCompleteRoutine return error\n");
-#endif
+ pr_debug("EP3 IoCompleteRoutine return error\n");
pWb35Rx->EP3vm_state = VM_STOP;
goto error;
}
@@ -239,7 +233,7 @@ static void Wb35Rx(struct ieee80211_hw *hw)
u32 RxBufferId;
/* Issuing URB */
- if (pHwData->SurpriseRemove || pHwData->HwStop)
+ if (pHwData->SurpriseRemove)
goto error;
if (pWb35Rx->rx_halt)
@@ -249,9 +243,7 @@ static void Wb35Rx(struct ieee80211_hw *hw)
RxBufferId = pWb35Rx->RxBufferId;
if (!pWb35Rx->RxOwner[RxBufferId]) {
/* It's impossible to run here. */
-#ifdef _PE_RX_DUMP_
- printk("Rx driver fifo unavailable\n");
-#endif
+ pr_debug("Rx driver fifo unavailable\n");
goto error;
}
@@ -268,8 +260,8 @@ static void Wb35Rx(struct ieee80211_hw *hw)
}
pRxBufferAddress = pWb35Rx->pDRx;
- usb_fill_bulk_urb(urb, pHwData->WbUsb.udev,
- usb_rcvbulkpipe(pHwData->WbUsb.udev, 3),
+ usb_fill_bulk_urb(urb, pHwData->udev,
+ usb_rcvbulkpipe(pHwData->udev, 3),
pRxBufferAddress, MAX_USB_RX_BUFFER,
Wb35Rx_Complete, hw);
@@ -337,9 +329,7 @@ void Wb35Rx_stop(struct hw_data *pHwData)
/* Canceling the Irp if already sends it out. */
if (pWb35Rx->EP3vm_state == VM_RUNNING) {
usb_unlink_urb(pWb35Rx->RxUrb); /* Only use unlink, let Wb35Rx_destroy to free them */
-#ifdef _PE_RX_DUMP_
- printk("EP3 Rx stop\n");
-#endif
+ pr_debug("EP3 Rx stop\n");
}
}
@@ -355,8 +345,6 @@ void Wb35Rx_destroy(struct hw_data *pHwData)
if (pWb35Rx->RxUrb)
usb_free_urb(pWb35Rx->RxUrb);
-#ifdef _PE_RX_DUMP_
- printk("Wb35Rx_destroy OK\n");
-#endif
+ pr_debug("Wb35Rx_destroy OK\n");
}
diff --git a/drivers/staging/winbond/wb35rx_f.h b/drivers/staging/winbond/wb35rx_f.h
index 98acce517d90..1fdf65ea6041 100644
--- a/drivers/staging/winbond/wb35rx_f.h
+++ b/drivers/staging/winbond/wb35rx_f.h
@@ -2,7 +2,7 @@
#define __WINBOND_WB35RX_F_H
#include <net/mac80211.h>
-#include "wbhal_s.h"
+#include "wbhal.h"
//====================================
// Interface function declare
diff --git a/drivers/staging/winbond/wb35tx.c b/drivers/staging/winbond/wb35tx.c
index 2a9d05557678..44fc3fe79b79 100644
--- a/drivers/staging/winbond/wb35tx.c
+++ b/drivers/staging/winbond/wb35tx.c
@@ -13,7 +13,6 @@
#include "wb35tx_f.h"
#include "mds_f.h"
-#include "sysdef.h"
unsigned char
Wb35Tx_get_tx_buffer(struct hw_data * pHwData, u8 **pBuffer)
@@ -41,7 +40,7 @@ static void Wb35Tx_complete(struct urb * pUrb)
pWb35Tx->TxSendIndex++;
pWb35Tx->TxSendIndex %= MAX_USB_TX_BUFFER_NUMBER;
- if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove
+ if (pHwData->SurpriseRemove) // Let WbWlanHalt to handle surprise remove
goto error;
if (pWb35Tx->tx_halt)
@@ -74,7 +73,7 @@ static void Wb35Tx(struct wbsoft_priv *adapter)
u32 SendIndex;
- if (pHwData->SurpriseRemove || pHwData->HwStop)
+ if (pHwData->SurpriseRemove)
goto cleanup;
if (pWb35Tx->tx_halt)
@@ -89,8 +88,8 @@ static void Wb35Tx(struct wbsoft_priv *adapter)
//
// Issuing URB
//
- usb_fill_bulk_urb(pUrb, pHwData->WbUsb.udev,
- usb_sndbulkpipe(pHwData->WbUsb.udev, 4),
+ usb_fill_bulk_urb(pUrb, pHwData->udev,
+ usb_sndbulkpipe(pHwData->udev, 4),
pTxBufferAddress, pMds->TxBufferSize[ SendIndex ],
Wb35Tx_complete, adapter);
@@ -153,16 +152,12 @@ void Wb35Tx_stop(struct hw_data * pHwData)
// Trying to canceling the Trp of EP2
if (pWb35Tx->EP2vm_state == VM_RUNNING)
usb_unlink_urb( pWb35Tx->Tx2Urb ); // Only use unlink, let Wb35Tx_destrot to free them
- #ifdef _PE_TX_DUMP_
- printk("EP2 Tx stop\n");
- #endif
+ pr_debug("EP2 Tx stop\n");
// Trying to canceling the Irp of EP4
if (pWb35Tx->EP4vm_state == VM_RUNNING)
usb_unlink_urb( pWb35Tx->Tx4Urb ); // Only use unlink, let Wb35Tx_destrot to free them
- #ifdef _PE_TX_DUMP_
- printk("EP4 Tx stop\n");
- #endif
+ pr_debug("EP4 Tx stop\n");
}
//======================================================
@@ -182,9 +177,7 @@ void Wb35Tx_destroy(struct hw_data * pHwData)
if (pWb35Tx->Tx2Urb)
usb_free_urb( pWb35Tx->Tx2Urb );
- #ifdef _PE_TX_DUMP_
- printk("Wb35Tx_destroy OK\n");
- #endif
+ pr_debug("Wb35Tx_destroy OK\n");
}
void Wb35Tx_CurrentTime(struct wbsoft_priv *adapter, u32 TimeCount)
@@ -222,7 +215,7 @@ static void Wb35Tx_EP2VM_complete(struct urb * pUrb)
pWb35Tx->EP2VM_status = pUrb->status;
// For Linux 2.4. Interrupt will always trigger
- if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove
+ if (pHwData->SurpriseRemove) // Let WbWlanHalt to handle surprise remove
goto error;
if (pWb35Tx->tx_halt)
@@ -263,7 +256,7 @@ static void Wb35Tx_EP2VM(struct wbsoft_priv *adapter)
u32 * pltmp = (u32 *)pWb35Tx->EP2_buf;
int retv;
- if (pHwData->SurpriseRemove || pHwData->HwStop)
+ if (pHwData->SurpriseRemove)
goto error;
if (pWb35Tx->tx_halt)
@@ -272,16 +265,14 @@ static void Wb35Tx_EP2VM(struct wbsoft_priv *adapter)
//
// Issuing URB
//
- usb_fill_int_urb( pUrb, pHwData->WbUsb.udev, usb_rcvintpipe(pHwData->WbUsb.udev,2),
+ usb_fill_int_urb( pUrb, pHwData->udev, usb_rcvintpipe(pHwData->udev,2),
pltmp, MAX_INTERRUPT_LENGTH, Wb35Tx_EP2VM_complete, adapter, 32);
pWb35Tx->EP2vm_state = VM_RUNNING;
retv = usb_submit_urb(pUrb, GFP_ATOMIC);
if (retv < 0) {
- #ifdef _PE_TX_DUMP_
- printk("EP2 Tx Irp sending error\n");
- #endif
+ pr_debug("EP2 Tx Irp sending error\n");
goto error;
}
diff --git a/drivers/staging/winbond/wb35tx_f.h b/drivers/staging/winbond/wb35tx_f.h
index 1d3b515f83bc..018fd35e815d 100644
--- a/drivers/staging/winbond/wb35tx_f.h
+++ b/drivers/staging/winbond/wb35tx_f.h
@@ -2,7 +2,6 @@
#define __WINBOND_WB35TX_F_H
#include "core.h"
-#include "wbhal_f.h"
/*
* ====================================
diff --git a/drivers/staging/winbond/wbhal_s.h b/drivers/staging/winbond/wbhal.h
index 821a1b3f1301..39e84a0d9729 100644
--- a/drivers/staging/winbond/wbhal_s.h
+++ b/drivers/staging/winbond/wbhal.h
@@ -342,9 +342,6 @@ struct wb35_descriptor { /* Skip length = 8 DWORD */
void *buffer_address[MAX_DESCRIPTOR_BUFFER_INDEX];
};
-
-#define DEFAULT_NULL_PACKET_COUNT 180000 /* 180 seconds */
-
#define MAX_TXVGA_EEPROM 9 /* How many word(u16) of EEPROM will be used for TxVGA */
#define MAX_RF_PARAMETER 32
@@ -359,7 +356,6 @@ struct txvga_for_50 {
* ==============================================
*/
-#include "wbusb_s.h"
#include "wb35reg_s.h"
#include "wb35tx_s.h"
#include "wb35rx_s.h"
@@ -440,7 +436,7 @@ struct hw_data {
* Variable for each module
* ==================================================
*/
- struct wb_usb WbUsb; /* Need WbUsb.h */
+ struct usb_device *udev;
struct wb35_reg reg; /* Need Wb35Reg.h */
struct wb35_tx Wb35Tx; /* Need Wb35Tx.h */
struct wb35_rx Wb35Rx; /* Need Wb35Rx.h */
@@ -510,16 +506,8 @@ struct hw_data {
u32 RxByteCountLast;
u32 TxByteCountLast;
- atomic_t SurpriseRemoveCount;
-
/* For global timer */
u32 time_count; /* TICK_TIME_100ms 1 = 100ms */
-
- /* For error recover */
- u32 HwStop;
-
- /* For avoid AP disconnect */
- u32 NullPacketCount;
};
#endif
diff --git a/drivers/staging/winbond/wbhal_f.h b/drivers/staging/winbond/wbhal_f.h
deleted file mode 100644
index fc78c14ae583..000000000000
--- a/drivers/staging/winbond/wbhal_f.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * =====================================================================
- * Device related include
- * =====================================================================
-*/
-#include "wb35reg_f.h"
-#include "wb35tx_f.h"
-#include "wb35rx_f.h"
-
-#include "core.h"
-
-/* =====================================================================
- * Function declaration
- * =====================================================================
- */
-void hal_remove_mapping_key(struct hw_data *hw_data, u8 *mac_addr);
-void hal_remove_default_key(struct hw_data *hw_data, u32 index);
-unsigned char hal_set_mapping_key(struct hw_data *adapter, u8 *mac_addr,
- u8 null_key, u8 wep_on, u8 *tx_tsc,
- u8 *rx_tsc, u8 key_type, u8 key_len,
- u8 *key_data);
-unsigned char hal_set_default_key(struct hw_data *adapter, u8 index,
- u8 null_key, u8 wep_on, u8 *tx_tsc,
- u8 *rx_tsc, u8 key_type, u8 key_len,
- u8 *key_data);
-void hal_clear_all_default_key(struct hw_data *hw_data);
-void hal_clear_all_group_key(struct hw_data *hw_data);
-void hal_clear_all_mapping_key(struct hw_data *hw_data);
-void hal_clear_all_key(struct hw_data *hw_data);
-void hal_set_power_save_mode(struct hw_data *hw_data, unsigned char power_save,
- unsigned char wakeup, unsigned char dtim);
-void hal_get_power_save_mode(struct hw_data *hw_data, u8 *in_pwr_save);
-void hal_set_slot_time(struct hw_data *hw_data, u8 type);
-
-#define hal_set_atim_window(_A, _ATM)
-
-void hal_start_bss(struct hw_data *hw_data, u8 mac_op_mode);
-
-/* 0:BSS STA 1:IBSS STA */
-void hal_join_request(struct hw_data *hw_data, u8 bss_type);
-
-void hal_stop_sync_bss(struct hw_data *hw_data);
-void hal_resume_sync_bss(struct hw_data *hw_data);
-void hal_set_aid(struct hw_data *hw_data, u16 aid);
-void hal_set_bssid(struct hw_data *hw_data, u8 *bssid);
-void hal_get_bssid(struct hw_data *hw_data, u8 *bssid);
-void hal_set_listen_interval(struct hw_data *hw_data, u16 listen_interval);
-void hal_set_cap_info(struct hw_data *hw_data, u16 capability_info);
-void hal_set_ssid(struct hw_data *hw_data, u8 *ssid, u8 ssid_len);
-void hal_start_tx0(struct hw_data *hw_data);
-
-#define hal_get_cwmin(_A) ((_A)->cwmin)
-
-void hal_set_cwmax(struct hw_data *hw_data, u16 cwin_max);
-
-#define hal_get_cwmax(_A) ((_A)->cwmax)
-
-void hal_set_rsn_wpa(struct hw_data *hw_data, u32 *rsn_ie_bitmap,
- u32 *rsn_oui_type , unsigned char desired_auth_mode);
-void hal_set_connect_info(struct hw_data *hw_data, unsigned char bo_connect);
-u8 hal_get_est_sq3(struct hw_data *hw_data, u8 count);
-void hal_descriptor_indicate(struct hw_data *hw_data,
- struct wb35_descriptor *des);
-u8 hal_get_antenna_number(struct hw_data *hw_data);
-u32 hal_get_bss_pk_cnt(struct hw_data *hw_data);
-
-#define hal_get_region_from_EEPROM(_A) ((_A)->reg.EEPROMRegion)
-#define hal_get_tx_buffer(_A, _B) Wb35Tx_get_tx_buffer(_A, _B)
-#define hal_software_set(_A) (_A->SoftwareSet)
-#define hal_driver_init_OK(_A) (_A->IsInitOK)
-#define hal_rssi_boundary_high(_A) (_A->RSSI_high)
-#define hal_rssi_boundary_low(_A) (_A->RSSI_low)
-#define hal_scan_interval(_A) (_A->Scan_Interval)
-
-#define PHY_DEBUG(msg, args...)
-
-/* return 100ms count */
-#define hal_get_time_count(_P) (_P->time_count / 10)
-#define hal_detect_error(_P) (_P->WbUsb.DetectCount)
-
-#define hal_ibss_disconnect(_A) (hal_stop_sync_bss(_A))
diff --git a/drivers/staging/winbond/wblinux_f.h b/drivers/staging/winbond/wblinux_f.h
deleted file mode 100644
index 0a9d214f7187..000000000000
--- a/drivers/staging/winbond/wblinux_f.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __WBLINUX_F_H
-#define __WBLINUX_F_H
-
-#include "core.h"
-#include "mds_s.h"
-
-/*
- * ====================================================================
- * Copyright (c) 1996-2004 Winbond Electronic Corporation
- *
- * wblinux_f.h
- * ====================================================================
- */
-int wb35_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-void wb35_set_multicast(struct net_device *netdev);
-struct net_device_stats *wb35_netdev_stats(struct net_device *netdev);
-#endif
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 3f60cf7e6ec1..2163d60c2eaf 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -14,10 +14,11 @@
#include "core.h"
#include "mds_f.h"
-#include "mlmetxrx_f.h"
#include "mto.h"
-#include "wbhal_f.h"
-#include "wblinux_f.h"
+#include "wbhal.h"
+#include "wb35reg_f.h"
+#include "wb35tx_f.h"
+#include "wb35rx_f.h"
MODULE_DESCRIPTION("IS89C35 802.11bg WLAN USB Driver");
MODULE_LICENSE("GPL");
@@ -181,10 +182,7 @@ static void hal_set_current_channel_ex(struct hw_data *pHwData, struct chan_info
RFSynthesizer_SwitchingChannel(pHwData, channel); /* Switch channel */
pHwData->Channel = channel.ChanNo;
pHwData->band = channel.band;
-#ifdef _PE_STATE_DUMP_
- printk("Set channel is %d, band =%d\n", pHwData->Channel,
- pHwData->band);
-#endif
+ pr_debug("Set channel is %d, band =%d\n", pHwData->Channel, pHwData->band);
reg->M28_MacControl &= ~0xff; /* Clean channel information field */
reg->M28_MacControl |= channel.ChanNo;
Wb35Reg_WriteWithCallbackValue(pHwData, 0x0828, reg->M28_MacControl,
@@ -339,10 +337,8 @@ static void hal_stop(struct hw_data *pHwData)
static unsigned char hal_idle(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
- struct wb_usb *pWbUsb = &pHwData->WbUsb;
- if (!pHwData->SurpriseRemove
- && (pWbUsb->DetectCount || reg->EP0vm_state != VM_STOP))
+ if (!pHwData->SurpriseRemove && reg->EP0vm_state != VM_STOP)
return false;
return true;
@@ -608,15 +604,6 @@ static void hal_led_control(unsigned long data)
}
break;
}
-
- /* Active send null packet to avoid AP disconnect */
- if (pHwData->LED_LinkOn) {
- pHwData->NullPacketCount += TimeInterval;
- if (pHwData->NullPacketCount >=
- DEFAULT_NULL_PACKET_COUNT) {
- pHwData->NullPacketCount = 0;
- }
- }
}
pHwData->time_count += TimeInterval;
@@ -651,13 +638,6 @@ static int hal_init_hardware(struct ieee80211_hw *hw)
SoftwareSet = hal_software_set(pHwData);
-#ifdef Vendor2
- /* Try to make sure the EEPROM contain */
- SoftwareSet >>= 8;
- if (SoftwareSet != 0x82)
- return false;
-#endif
-
Wb35Rx_start(hw);
Wb35Tx_EP2VM_start(priv);
@@ -734,9 +714,7 @@ static int wb35_hw_init(struct ieee80211_hw *hw)
}
priv->sLocalPara.bAntennaNo = hal_get_antenna_number(pHwData);
-#ifdef _PE_STATE_DUMP_
- printk("Driver init, antenna no = %d\n", psLOCAL->bAntennaNo);
-#endif
+ pr_debug("Driver init, antenna no = %d\n", priv->sLocalPara.bAntennaNo);
hal_get_hw_radio_off(pHwData);
/* Waiting for HAL setting OK */
@@ -769,7 +747,6 @@ static int wb35_probe(struct usb_interface *intf,
struct usb_host_interface *interface;
struct ieee80211_hw *dev;
struct wbsoft_priv *priv;
- struct wb_usb *pWbUsb;
int nr, err;
u32 ltmp;
@@ -800,16 +777,13 @@ static int wb35_probe(struct usb_interface *intf,
priv = dev->priv;
- pWbUsb = &priv->sHwData.WbUsb;
- pWbUsb->udev = udev;
+ priv->sHwData.udev = udev;
interface = intf->cur_altsetting;
endpoint = &interface->endpoint[0].desc;
- if (endpoint[2].wMaxPacketSize == 512) {
+ if (endpoint[2].wMaxPacketSize == 512)
printk("[w35und] Working on USB 2.0\n");
- pWbUsb->IsUsb20 = 1;
- }
err = wb35_hw_init(dev);
if (err)
@@ -860,13 +834,9 @@ static void hal_halt(struct hw_data *pHwData)
static void wb35_hw_halt(struct wbsoft_priv *adapter)
{
- Mds_Destroy(adapter);
-
/* Turn off Rx and Tx hardware ability */
hal_stop(&adapter->sHwData);
-#ifdef _PE_USB_INI_DUMP_
- printk("[w35und] Hal_stop O.K.\n");
-#endif
+ pr_debug("[w35und] Hal_stop O.K.\n");
/* Waiting Irp completed */
msleep(100);
diff --git a/drivers/staging/winbond/wbusb_s.h b/drivers/staging/winbond/wbusb_s.h
deleted file mode 100644
index 8961ae594c4e..000000000000
--- a/drivers/staging/winbond/wbusb_s.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* =========================================================
- * Copyright (c) 1996-2004 Winbond Electronic Corporation
- *
- * Module Name:
- * wbusb_s.h
- * =========================================================
- */
-#ifndef __WINBOND_WBUSB_S_H
-#define __WINBOND_WBUSB_S_H
-
-#include <linux/types.h>
-
-struct wb_usb {
- u32 IsUsb20;
- struct usb_device *udev;
- u32 DetectCount;
-};
-#endif
diff --git a/drivers/staging/wlags49_h2/wl_pci.c b/drivers/staging/wlags49_h2/wl_pci.c
index 020b17adee2d..28ae9dd1b44e 100644
--- a/drivers/staging/wlags49_h2/wl_pci.c
+++ b/drivers/staging/wlags49_h2/wl_pci.c
@@ -458,7 +458,7 @@ void __devexit wl_pci_remove(struct pci_dev *pdev)
return;
}
- dev = (struct net_device *)pci_get_drvdata( pdev );
+ dev = pci_get_drvdata( pdev );
if( dev == NULL ) {
DBG_ERROR( DbgInfo, "Could not retrieve net_device structure\n" );
return;
diff --git a/drivers/staging/wlags49_h2/wl_profile.c b/drivers/staging/wlags49_h2/wl_profile.c
index 7a1337db7aa1..a459e48c7bf0 100644
--- a/drivers/staging/wlags49_h2/wl_profile.c
+++ b/drivers/staging/wlags49_h2/wl_profile.c
@@ -248,7 +248,7 @@ void parse_config(struct net_device *dev)
} else {
DBG_TRACE(DbgInfo, "F/W image file found\n");
#define DHF_ALLOC_SIZE 96000 /* just below 96K, let's hope it suffices for now and for the future */
- cp = (char *)vmalloc(DHF_ALLOC_SIZE);
+ cp = vmalloc(DHF_ALLOC_SIZE);
if (cp == NULL) {
DBG_ERROR(DbgInfo, "error in vmalloc\n");
} else {
diff --git a/drivers/staging/wlags49_h2/wl_sysfs.c b/drivers/staging/wlags49_h2/wl_sysfs.c
index e4c8804ac37d..9b833b30ae62 100644
--- a/drivers/staging/wlags49_h2/wl_sysfs.c
+++ b/drivers/staging/wlags49_h2/wl_sysfs.c
@@ -42,7 +42,7 @@ static ssize_t show_tallies(struct device *d, struct device_attribute *attr,
CFG_HERMES_TALLIES_STRCT tallies;
ssize_t ret = -EINVAL;
- read_lock(&dev_base_lock);
+ rcu_read_lock();
if (dev_isalive(dev)) {
wl_lock(lp, &flags);
@@ -102,7 +102,7 @@ static ssize_t show_tallies(struct device *d, struct device_attribute *attr,
}
}
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return ret;
}
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 83879f9a0b7d..146f3651b6f2 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -148,7 +148,8 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
/* tack on SNAP */
e_snap =
- (struct wlan_snap *) skb_push(skb, sizeof(struct wlan_snap));
+ (struct wlan_snap *) skb_push(skb,
+ sizeof(struct wlan_snap));
e_snap->type = htons(proto);
if (ethconv == WLAN_ETHCONV_8021h
&& p80211_stt_findproto(proto)) {
@@ -161,7 +162,8 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
/* tack on llc */
e_llc =
- (struct wlan_llc *) skb_push(skb, sizeof(struct wlan_llc));
+ (struct wlan_llc *) skb_push(skb,
+ sizeof(struct wlan_llc));
e_llc->dsap = 0xAA; /* SNAP, see IEEE 802 */
e_llc->ssap = 0xAA;
e_llc->ctl = 0x03;
@@ -297,10 +299,12 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
if ((WLAN_GET_FC_TODS(fc) == 0) && (WLAN_GET_FC_FROMDS(fc) == 0)) {
memcpy(daddr, w_hdr->a3.a1, WLAN_ETHADDR_LEN);
memcpy(saddr, w_hdr->a3.a2, WLAN_ETHADDR_LEN);
- } else if ((WLAN_GET_FC_TODS(fc) == 0) && (WLAN_GET_FC_FROMDS(fc) == 1)) {
+ } else if ((WLAN_GET_FC_TODS(fc) == 0)
+ && (WLAN_GET_FC_FROMDS(fc) == 1)) {
memcpy(daddr, w_hdr->a3.a1, WLAN_ETHADDR_LEN);
memcpy(saddr, w_hdr->a3.a3, WLAN_ETHADDR_LEN);
- } else if ((WLAN_GET_FC_TODS(fc) == 1) && (WLAN_GET_FC_FROMDS(fc) == 0)) {
+ } else if ((WLAN_GET_FC_TODS(fc) == 1)
+ && (WLAN_GET_FC_FROMDS(fc) == 0)) {
memcpy(daddr, w_hdr->a3.a3, WLAN_ETHADDR_LEN);
memcpy(saddr, w_hdr->a3.a2, WLAN_ETHADDR_LEN);
} else {
@@ -349,7 +353,8 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
e_llc = (struct wlan_llc *) (skb->data + payload_offset);
e_snap =
- (struct wlan_snap *) (skb->data + payload_offset + sizeof(struct wlan_llc));
+ (struct wlan_snap *) (skb->data + payload_offset +
+ sizeof(struct wlan_llc));
/* Test for the various encodings */
if ((payload_length >= sizeof(struct wlan_ethhdr)) &&
@@ -372,9 +377,11 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
/* chop off the 802.11 CRC */
skb_trim(skb, skb->len - WLAN_CRC_LEN);
- } else if ((payload_length >= sizeof(struct wlan_llc) + sizeof(struct wlan_snap))
- && (e_llc->dsap == 0xaa) && (e_llc->ssap == 0xaa)
- && (e_llc->ctl == 0x03)
+ } else if ((payload_length >= sizeof(struct wlan_llc) +
+ sizeof(struct wlan_snap))
+ &&(e_llc->dsap == 0xaa)
+ && (e_llc->ssap == 0xaa)
+ && (e_llc->ctl == 0x03)
&&
(((memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) == 0)
&& (ethconv == WLAN_ETHCONV_8021h)
@@ -406,21 +413,25 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
/* chop off the 802.11 CRC */
skb_trim(skb, skb->len - WLAN_CRC_LEN);
- } else if ((payload_length >= sizeof(struct wlan_llc) + sizeof(struct wlan_snap))
- && (e_llc->dsap == 0xaa) && (e_llc->ssap == 0xaa)
- && (e_llc->ctl == 0x03)) {
+ } else if ((payload_length >= sizeof(struct wlan_llc) +
+ sizeof(struct wlan_snap))
+ &&(e_llc->dsap == 0xaa)
+ && (e_llc->ssap == 0xaa)
+ && (e_llc->ctl == 0x03)) {
pr_debug("802.1h/RFC1042 len: %d\n", payload_length);
- /* it's an 802.1h frame || (an RFC1042 && protocol is not in STT) */
- /* build a DIXII + RFC894 */
+ /* it's an 802.1h frame || (an RFC1042 && protocol not in STT)
+ build a DIXII + RFC894 */
/* Test for an overlength frame */
- if ((payload_length - sizeof(struct wlan_llc) - sizeof(struct wlan_snap))
- > netdev->mtu) {
+ if ((payload_length - sizeof(struct wlan_llc) -
+ sizeof(struct wlan_snap))
+ > netdev->mtu) {
/* A bogus length ethfrm has been sent. */
/* Is someone trying an oflow attack? */
printk(KERN_ERR "DIXII frame too large (%ld > %d)\n",
- (long int)(payload_length - sizeof(struct wlan_llc) -
- sizeof(struct wlan_snap)), netdev->mtu);
+ (long int)(payload_length -
+ sizeof(struct wlan_llc) -
+ sizeof(struct wlan_snap)), netdev->mtu);
return 1;
}
diff --git a/drivers/staging/wlan-ng/p80211conv.h b/drivers/staging/wlan-ng/p80211conv.h
index eca0391c676f..ea493aa74f00 100644
--- a/drivers/staging/wlan-ng/p80211conv.h
+++ b/drivers/staging/wlan-ng/p80211conv.h
@@ -66,12 +66,14 @@
#define P80211_FRMMETA_MAGIC 0x802110
#define P80211SKB_FRMMETA(s) \
- (((((struct p80211_frmmeta *)((s)->cb))->magic) == P80211_FRMMETA_MAGIC) ? \
+ (((((struct p80211_frmmeta *)((s)->cb))->magic) == \
+ P80211_FRMMETA_MAGIC) ? \
((struct p80211_frmmeta *)((s)->cb)) : \
(NULL))
#define P80211SKB_RXMETA(s) \
- (P80211SKB_FRMMETA((s)) ? P80211SKB_FRMMETA((s))->rx : ((struct p80211_rxmeta *)(NULL)))
+ (P80211SKB_FRMMETA((s)) ? P80211SKB_FRMMETA((s))->rx : \
+ ((struct p80211_rxmeta *)(NULL)))
struct p80211_rxmeta {
struct wlandevice *wlandev;
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index b7b4a733b467..b0af292bc7e3 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -301,7 +301,8 @@ static void p80211netdev_rx_bh(unsigned long arg)
if (memcmp
(hdr->a1, wlandev->netdev->dev_addr,
ETH_ALEN) != 0) {
- /* but reject anything else that isn't multicast */
+ /* but reject anything else that
+ isn't multicast */
if (!(hdr->a1[0] & 0x01)) {
dev_kfree_skb(skb);
continue;
@@ -770,7 +771,8 @@ int wlan_setup(wlandevice_t *wlandev, struct device *physdev)
}
/* Allocate and initialize the struct device */
- netdev = alloc_netdev(sizeof(struct wireless_dev), "wlan%d", ether_setup);
+ netdev = alloc_netdev(sizeof(struct wireless_dev), "wlan%d",
+ ether_setup);
if (netdev == NULL) {
printk(KERN_ERR "Failed to alloc netdev.\n");
wlan_free_wiphy(wiphy);
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 1ec33740f10f..85884176b661 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -94,11 +94,11 @@ typedef struct net_device netdevice_t;
#endif
/*--- NSD Capabilities Flags ------------------------------*/
-#define P80211_NSDCAP_HARDWAREWEP 0x01 /* hardware wep engine */
-#define P80211_NSDCAP_SHORT_PREAMBLE 0x10 /* hardware supports */
-#define P80211_NSDCAP_HWFRAGMENT 0x80 /* nsd handles frag/defrag */
-#define P80211_NSDCAP_AUTOJOIN 0x100 /* nsd does autojoin */
-#define P80211_NSDCAP_NOSCAN 0x200 /* nsd can scan */
+#define P80211_NSDCAP_HARDWAREWEP 0x01 /* hardware wep engine */
+#define P80211_NSDCAP_SHORT_PREAMBLE 0x10 /* hardware supports */
+#define P80211_NSDCAP_HWFRAGMENT 0x80 /* nsd handles frag/defrag */
+#define P80211_NSDCAP_AUTOJOIN 0x100 /* nsd does autojoin */
+#define P80211_NSDCAP_NOSCAN 0x200 /* nsd can scan */
/* Received frame statistics */
typedef struct p80211_frmrx_t {
diff --git a/drivers/staging/wlan-ng/p80211types.h b/drivers/staging/wlan-ng/p80211types.h
index 41a99c59c6c5..9dec8596f451 100644
--- a/drivers/staging/wlan-ng/p80211types.h
+++ b/drivers/staging/wlan-ng/p80211types.h
@@ -141,14 +141,14 @@
#define P80211DID_LSB_ITEM (12)
#define P80211DID_LSB_INDEX (18)
#define P80211DID_LSB_ISTABLE (26)
-#define P80211DID_LSB_ACCESS (27)
+#define P80211DID_LSB_ACCESS (27)
#define P80211DID_MASK_SECTION (0x0000003fUL)
#define P80211DID_MASK_GROUP (0x0000003fUL)
#define P80211DID_MASK_ITEM (0x0000003fUL)
#define P80211DID_MASK_INDEX (0x000000ffUL)
#define P80211DID_MASK_ISTABLE (0x00000001UL)
-#define P80211DID_MASK_ACCESS (0x00000003UL)
+#define P80211DID_MASK_ACCESS (0x00000003UL)
#define P80211DID_MK(a, m, l) ((((u32)(a)) & (m)) << (l))
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 04514a85d101..6675c8226cef 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -213,8 +213,8 @@ int prism2mgmt_scan(wlandevice_t *wlandev, void *msgp)
u16 wordbuf[17];
result = hfa384x_drvr_setconfig16(hw,
- HFA384x_RID_CNFROAMINGMODE,
- HFA384x_ROAMMODE_HOSTSCAN_HOSTROAM);
+ HFA384x_RID_CNFROAMINGMODE,
+ HFA384x_ROAMMODE_HOSTSCAN_HOSTROAM);
if (result) {
printk(KERN_ERR
"setconfig(ROAMINGMODE) failed. result=%d\n",
@@ -258,8 +258,8 @@ int prism2mgmt_scan(wlandevice_t *wlandev, void *msgp)
}
/* ibss options */
result = hfa384x_drvr_setconfig16(hw,
- HFA384x_RID_CREATEIBSS,
- HFA384x_CREATEIBSS_JOINCREATEIBSS);
+ HFA384x_RID_CREATEIBSS,
+ HFA384x_CREATEIBSS_JOINCREATEIBSS);
if (result) {
printk(KERN_ERR "Failed to set CREATEIBSS.\n");
msg->resultcode.data =
@@ -416,7 +416,8 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
#define REQBASICRATE(N) \
if ((count >= N) && DOT11_RATE5_ISBASIC_GET(item->supprates[(N)-1])) { \
req->basicrate ## N .data = item->supprates[(N)-1]; \
- req->basicrate ## N .status = P80211ENUM_msgitem_status_data_ok; \
+ req->basicrate ## N .status = \
+ P80211ENUM_msgitem_status_data_ok; \
}
REQBASICRATE(1);
@@ -431,7 +432,8 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
#define REQSUPPRATE(N) \
if (count >= N) { \
req->supprate ## N .data = item->supprates[(N)-1]; \
- req->supprate ## N .status = P80211ENUM_msgitem_status_data_ok; \
+ req->supprate ## N .status = \
+ P80211ENUM_msgitem_status_data_ok; \
}
REQSUPPRATE(1);
@@ -1102,7 +1104,7 @@ int prism2mgmt_wlansniff(wlandevice_t *wlandev, void *msgp)
result = hfa384x_drvr_disable(hw, 0);
if (result) {
pr_debug
- ("failed to disable port 0 after sniffing, result=%d\n",
+ ("failed to disable port 0 after sniffing, result=%d\n",
result);
goto failed;
}
@@ -1137,7 +1139,7 @@ int prism2mgmt_wlansniff(wlandevice_t *wlandev, void *msgp)
result = hfa384x_drvr_enable(hw, 0);
if (result) {
pr_debug
- ("failed to enable port to presniff setting, result=%d\n",
+ ("failed to enable port to presniff setting, result=%d\n",
result);
goto failed;
}
@@ -1161,7 +1163,7 @@ int prism2mgmt_wlansniff(wlandevice_t *wlandev, void *msgp)
&(hw->presniff_port_type));
if (result) {
pr_debug
- ("failed to read porttype, result=%d\n",
+ ("failed to read porttype, result=%d\n",
result);
goto failed;
}
@@ -1171,7 +1173,7 @@ int prism2mgmt_wlansniff(wlandevice_t *wlandev, void *msgp)
&(hw->presniff_wepflags));
if (result) {
pr_debug
- ("failed to read wepflags, result=%d\n",
+ ("failed to read wepflags, result=%d\n",
result);
goto failed;
}
@@ -1238,8 +1240,8 @@ int prism2mgmt_wlansniff(wlandevice_t *wlandev, void *msgp)
if (result) {
pr_debug
- ("failed to set wepflags=0x%04x, result=%d\n",
- word, result);
+ ("failed to set wepflags=0x%04x, result=%d\n",
+ word, result);
goto failed;
}
}
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 4f73d095c3ac..ee008e5a0cbc 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -472,9 +472,11 @@ unsigned char XGIfb_query_north_bridge_space(struct xgi_hw_device_info *pXGIhw_e
break;
}
- pdev = pci_find_device(PCI_VENDOR_ID_SI, nbridge_id, pdev);
- if (pdev)
+ pdev = pci_get_device(PCI_VENDOR_ID_SI, nbridge_id, pdev);
+ if (pdev) {
valid_pdev = 1;
+ pci_dev_put(pdev);
+ }
}
if (!valid_pdev) {
@@ -2178,8 +2180,7 @@ static int XGIfb_heap_init(void)
#ifndef AGPOFF
if (XGIfb_queuemode == AGP_CMD_QUEUE) {
- agp_info = vmalloc(sizeof(*agp_info));
- memset((void *)agp_info, 0x00, sizeof(*agp_info));
+ agp_info = vzalloc(sizeof(*agp_info));
agp_copy_info(agp_info);
agp_backend_acquire();
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 7016fdd2509f..e19b932492e1 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -3954,8 +3954,8 @@ void XGI_GetCRT2ResInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
{
- if ((((pVBInfo->VBInfo & SetCRT2ToLCD) | SetCRT2ToLCDA))
- && (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
+ if ((pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) &&
+ (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
return 1;
return 0;
@@ -8773,7 +8773,7 @@ unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
if (pVBInfo->IF_DEF_LVDS == 0) {
CRT2Index = CRT2Index >> 6; /* for LCD */
- if (((pVBInfo->VBInfo & SetCRT2ToLCD) | SetCRT2ToLCDA)) { /*301b*/
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { /*301b*/
if (pVBInfo->LCDResInfo != Panel1024x768)
VCLKIndex = LCDXlat2VCLK[CRT2Index];
else
diff --git a/drivers/staging/zram/xvmalloc.c b/drivers/staging/zram/xvmalloc.c
index 3fdbb8ada827..b64406739d05 100644
--- a/drivers/staging/zram/xvmalloc.c
+++ b/drivers/staging/zram/xvmalloc.c
@@ -187,7 +187,7 @@ static void insert_block(struct xv_pool *pool, struct page *page, u32 offset,
slindex = get_index_for_insert(block->size);
flindex = slindex / BITS_PER_LONG;
- block->link.prev_page = 0;
+ block->link.prev_page = NULL;
block->link.prev_offset = 0;
block->link.next_page = pool->freelist[slindex].page;
block->link.next_offset = pool->freelist[slindex].offset;
@@ -217,7 +217,7 @@ static void remove_block_head(struct xv_pool *pool,
pool->freelist[slindex].page = block->link.next_page;
pool->freelist[slindex].offset = block->link.next_offset;
- block->link.prev_page = 0;
+ block->link.prev_page = NULL;
block->link.prev_offset = 0;
if (!pool->freelist[slindex].page) {
@@ -232,7 +232,7 @@ static void remove_block_head(struct xv_pool *pool,
*/
tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
pool->freelist[slindex].offset, KM_USER1);
- tmpblock->link.prev_page = 0;
+ tmpblock->link.prev_page = NULL;
tmpblock->link.prev_offset = 0;
put_ptr_atomic(tmpblock, KM_USER1);
}
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index d0e9e0207539..4bd8cbdaee76 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -227,6 +227,7 @@ static int zram_read(struct zram *zram, struct bio *bio)
if (zram_test_flag(zram, index, ZRAM_ZERO)) {
handle_zero_page(page);
+ index++;
continue;
}
@@ -235,12 +236,14 @@ static int zram_read(struct zram *zram, struct bio *bio)
pr_debug("Read before write: sector=%lu, size=%u",
(ulong)(bio->bi_sector), bio->bi_size);
/* Do nothing */
+ index++;
continue;
}
/* Page is stored uncompressed since it's incompressible */
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
handle_uncompressed_page(zram, page, index);
+ index++;
continue;
}
@@ -320,6 +323,7 @@ static int zram_write(struct zram *zram, struct bio *bio)
mutex_unlock(&zram->lock);
zram_stat_inc(&zram->stats.pages_zero);
zram_set_flag(zram, index, ZRAM_ZERO);
+ index++;
continue;
}
@@ -527,7 +531,7 @@ int zram_init_device(struct zram *zram)
}
num_pages = zram->disksize >> PAGE_SHIFT;
- zram->table = vmalloc(num_pages * sizeof(*zram->table));
+ zram->table = vzalloc(num_pages * sizeof(*zram->table));
if (!zram->table) {
pr_err("Error allocating zram address table\n");
/* To prevent accessing table entries during cleanup */
@@ -535,7 +539,6 @@ int zram_init_device(struct zram *zram)
ret = -ENOMEM;
goto fail;
}
- memset(zram->table, 0, num_pages * sizeof(*zram->table));
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
new file mode 100644
index 000000000000..2fac3be209ac
--- /dev/null
+++ b/drivers/target/Kconfig
@@ -0,0 +1,32 @@
+
+menuconfig TARGET_CORE
+ tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
+ depends on SCSI && BLOCK
+ select CONFIGFS_FS
+ default n
+ help
+ Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
+ control path for target_core_mod. This includes built-in TCM RAMDISK
+ subsystem logic for virtual LUN 0 access
+
+if TARGET_CORE
+
+config TCM_IBLOCK
+ tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
+ help
+ Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
+ access to Linux/Block devices using BIO
+
+config TCM_FILEIO
+ tristate "TCM/FILEIO Subsystem Plugin for Linux/VFS"
+ help
+ Say Y here to enable the TCM/FILEIO subsystem plugin for buffered
+ access to Linux/VFS struct file or struct block_device
+
+config TCM_PSCSI
+ tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI"
+ help
+ Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
+ passthrough access to Linux/SCSI device
+
+endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
new file mode 100644
index 000000000000..973bb190ef57
--- /dev/null
+++ b/drivers/target/Makefile
@@ -0,0 +1,23 @@
+EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/drivers/scsi/
+
+target_core_mod-y := target_core_configfs.o \
+ target_core_device.o \
+ target_core_fabric_configfs.o \
+ target_core_fabric_lib.o \
+ target_core_hba.o \
+ target_core_pr.o \
+ target_core_alua.o \
+ target_core_scdb.o \
+ target_core_tmr.o \
+ target_core_tpg.o \
+ target_core_transport.o \
+ target_core_cdb.o \
+ target_core_ua.o \
+ target_core_rd.o
+
+obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
+
+# Subsystem modules
+obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o
+obj-$(CONFIG_TCM_FILEIO) += target_core_file.o
+obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
new file mode 100644
index 000000000000..2c5fcfed5934
--- /dev/null
+++ b/drivers/target/target_core_alua.c
@@ -0,0 +1,1991 @@
+/*******************************************************************************
+ * Filename: target_core_alua.c
+ *
+ * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
+ *
+ * Copyright (c) 2009-2010 Rising Tide Systems
+ * Copyright (c) 2009-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/configfs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_ua.h"
+
+static int core_alua_check_transition(int state, int *primary);
+static int core_alua_set_tg_pt_secondary_state(
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+ struct se_port *port, int explict, int offline);
+
+/*
+ * REPORT_TARGET_PORT_GROUPS
+ *
+ * See spc4r17 section 6.27
+ */
+int core_emulate_report_target_port_groups(struct se_cmd *cmd)
+{
+ struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
+ struct se_port *port;
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
+ Target port group descriptor */
+
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+ tg_pt_gp_list) {
+ /*
+ * PREF: Preferred target port bit, determine if this
+ * bit should be set for port group.
+ */
+ if (tg_pt_gp->tg_pt_gp_pref)
+ buf[off] = 0x80;
+ /*
+ * Set the ASYMMETRIC ACCESS State
+ */
+ buf[off++] |= (atomic_read(
+ &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
+ /*
+ * Set supported ASYMMETRIC ACCESS State bits
+ */
+ buf[off] = 0x80; /* T_SUP */
+ buf[off] |= 0x40; /* O_SUP */
+ buf[off] |= 0x8; /* U_SUP */
+ buf[off] |= 0x4; /* S_SUP */
+ buf[off] |= 0x2; /* AN_SUP */
+ buf[off++] |= 0x1; /* AO_SUP */
+ /*
+ * TARGET PORT GROUP
+ */
+ buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
+ buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
+
+ off++; /* Skip over Reserved */
+ /*
+ * STATUS CODE
+ */
+ buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
+ /*
+ * Vendor Specific field
+ */
+ buf[off++] = 0x00;
+ /*
+ * TARGET PORT COUNT
+ */
+ buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
+ rd_len += 8;
+
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
+ tg_pt_gp_mem_list) {
+ port = tg_pt_gp_mem->tg_pt;
+ /*
+ * Start Target Port descriptor format
+ *
+ * See spc4r17 section 6.2.7 Table 247
+ */
+ off += 2; /* Skip over Obsolete */
+ /*
+ * Set RELATIVE TARGET PORT IDENTIFIER
+ */
+ buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+ buf[off++] = (port->sep_rtpi & 0xff);
+ rd_len += 4;
+ }
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+ }
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ /*
+ * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
+ */
+ buf[0] = ((rd_len >> 24) & 0xff);
+ buf[1] = ((rd_len >> 16) & 0xff);
+ buf[2] = ((rd_len >> 8) & 0xff);
+ buf[3] = (rd_len & 0xff);
+
+ return 0;
+}
+
+/*
+ * SET_TARGET_PORT_GROUPS for explict ALUA operation.
+ *
+ * See spc4r17 section 6.35
+ */
+int core_emulate_set_target_port_groups(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
+ struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep;
+ struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl;
+ struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
+ u32 len = 4; /* Skip over RESERVED area in header */
+ int alua_access_state, primary = 0, rc;
+ u16 tg_pt_id, rtpi;
+
+ if (!(l_port))
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ /*
+ * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
+ * for the local tg_pt_gp.
+ */
+ l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
+ if (!(l_tg_pt_gp_mem)) {
+ printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
+ if (!(l_tg_pt_gp)) {
+ spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
+ spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+ if (!(rc)) {
+ printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS"
+ " while TPGS_EXPLICT_ALUA is disabled\n");
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+
+ while (len < cmd->data_length) {
+ alua_access_state = (ptr[0] & 0x0f);
+ /*
+ * Check the received ALUA access state, and determine if
+ * the state is a primary or secondary target port asymmetric
+ * access state.
+ */
+ rc = core_alua_check_transition(alua_access_state, &primary);
+ if (rc != 0) {
+ /*
+ * If the SET TARGET PORT GROUPS attempts to establish
+ * an invalid combination of target port asymmetric
+ * access states or attempts to establish an
+ * unsupported target port asymmetric access state,
+ * then the command shall be terminated with CHECK
+ * CONDITION status, with the sense key set to ILLEGAL
+ * REQUEST, and the additional sense code set to INVALID
+ * FIELD IN PARAMETER LIST.
+ */
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ rc = -1;
+ /*
+ * If the ASYMMETRIC ACCESS STATE field (see table 267)
+ * specifies a primary target port asymmetric access state,
+ * then the TARGET PORT GROUP OR TARGET PORT field specifies
+ * a primary target port group for which the primary target
+ * port asymmetric access state shall be changed. If the
+ * ASYMMETRIC ACCESS STATE field specifies a secondary target
+ * port asymmetric access state, then the TARGET PORT GROUP OR
+ * TARGET PORT field specifies the relative target port
+ * identifier (see 3.1.120) of the target port for which the
+ * secondary target port asymmetric access state shall be
+ * changed.
+ */
+ if (primary) {
+ tg_pt_id = ((ptr[2] << 8) & 0xff);
+ tg_pt_id |= (ptr[3] & 0xff);
+ /*
+ * Locate the matching target port group ID from
+ * the global tg_pt_gp list
+ */
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp,
+ &T10_ALUA(su_dev)->tg_pt_gps_list,
+ tg_pt_gp_list) {
+ if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ continue;
+
+ if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
+ continue;
+
+ atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+ rc = core_alua_do_port_transition(tg_pt_gp,
+ dev, l_port, nacl,
+ alua_access_state, 1);
+
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ smp_mb__after_atomic_dec();
+ break;
+ }
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ /*
+ * If not matching target port group ID can be located
+ * throw an exception with ASCQ: INVALID_PARAMETER_LIST
+ */
+ if (rc != 0)
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ } else {
+ /*
+ * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
+ * the Target Port in question for the the incoming
+ * SET_TARGET_PORT_GROUPS op.
+ */
+ rtpi = ((ptr[2] << 8) & 0xff);
+ rtpi |= (ptr[3] & 0xff);
+ /*
+ * Locate the matching relative target port identifer
+ * for the struct se_device storage object.
+ */
+ spin_lock(&dev->se_port_lock);
+ list_for_each_entry(port, &dev->dev_sep_list,
+ sep_list) {
+ if (port->sep_rtpi != rtpi)
+ continue;
+
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ spin_unlock(&dev->se_port_lock);
+
+ rc = core_alua_set_tg_pt_secondary_state(
+ tg_pt_gp_mem, port, 1, 1);
+
+ spin_lock(&dev->se_port_lock);
+ break;
+ }
+ spin_unlock(&dev->se_port_lock);
+ /*
+ * If not matching relative target port identifier can
+ * be located, throw an exception with ASCQ:
+ * INVALID_PARAMETER_LIST
+ */
+ if (rc != 0)
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+
+ ptr += 4;
+ len += 4;
+ }
+
+ return 0;
+}
+
+static inline int core_alua_state_nonoptimized(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ int nonop_delay_msecs,
+ u8 *alua_ascq)
+{
+ /*
+ * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
+ * later to determine if processing of this cmd needs to be
+ * temporarily delayed for the Active/NonOptimized primary access state.
+ */
+ cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
+ cmd->alua_nonop_delay = nonop_delay_msecs;
+ return 0;
+}
+
+static inline int core_alua_state_standby(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ u8 *alua_ascq)
+{
+ /*
+ * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
+ * spc4r17 section 5.9.2.4.4
+ */
+ switch (cdb[0]) {
+ case INQUIRY:
+ case LOG_SELECT:
+ case LOG_SENSE:
+ case MODE_SELECT:
+ case MODE_SENSE:
+ case REPORT_LUNS:
+ case RECEIVE_DIAGNOSTIC:
+ case SEND_DIAGNOSTIC:
+ case MAINTENANCE_IN:
+ switch (cdb[1]) {
+ case MI_REPORT_TARGET_PGS:
+ return 0;
+ default:
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+ return 1;
+ }
+ case MAINTENANCE_OUT:
+ switch (cdb[1]) {
+ case MO_SET_TARGET_PGS:
+ return 0;
+ default:
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+ return 1;
+ }
+ case REQUEST_SENSE:
+ case PERSISTENT_RESERVE_IN:
+ case PERSISTENT_RESERVE_OUT:
+ case READ_BUFFER:
+ case WRITE_BUFFER:
+ return 0;
+ default:
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int core_alua_state_unavailable(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ u8 *alua_ascq)
+{
+ /*
+ * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
+ * spc4r17 section 5.9.2.4.5
+ */
+ switch (cdb[0]) {
+ case INQUIRY:
+ case REPORT_LUNS:
+ case MAINTENANCE_IN:
+ switch (cdb[1]) {
+ case MI_REPORT_TARGET_PGS:
+ return 0;
+ default:
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ return 1;
+ }
+ case MAINTENANCE_OUT:
+ switch (cdb[1]) {
+ case MO_SET_TARGET_PGS:
+ return 0;
+ default:
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ return 1;
+ }
+ case REQUEST_SENSE:
+ case READ_BUFFER:
+ case WRITE_BUFFER:
+ return 0;
+ default:
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int core_alua_state_transition(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ u8 *alua_ascq)
+{
+ /*
+ * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
+ * spc4r17 section 5.9.2.5
+ */
+ switch (cdb[0]) {
+ case INQUIRY:
+ case REPORT_LUNS:
+ case MAINTENANCE_IN:
+ switch (cdb[1]) {
+ case MI_REPORT_TARGET_PGS:
+ return 0;
+ default:
+ *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+ return 1;
+ }
+ case REQUEST_SENSE:
+ case READ_BUFFER:
+ case WRITE_BUFFER:
+ return 0;
+ default:
+ *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
+ * in transport_cmd_sequencer(). This function is assigned to
+ * struct t10_alua *->state_check() in core_setup_alua()
+ */
+static int core_alua_state_check_nop(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ u8 *alua_ascq)
+{
+ return 0;
+}
+
+/*
+ * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
+ * This function is assigned to struct t10_alua *->state_check() in
+ * core_setup_alua()
+ *
+ * Also, this function can return three different return codes to
+ * signal transport_generic_cmd_sequencer()
+ *
+ * return 1: Is used to signal LUN not accecsable, and check condition/not ready
+ * return 0: Used to signal success
+ * reutrn -1: Used to signal failure, and invalid cdb field
+ */
+static int core_alua_state_check(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ u8 *alua_ascq)
+{
+ struct se_lun *lun = SE_LUN(cmd);
+ struct se_port *port = lun->lun_sep;
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ int out_alua_state, nonop_delay_msecs;
+
+ if (!(port))
+ return 0;
+ /*
+ * First, check for a struct se_port specific secondary ALUA target port
+ * access state: OFFLINE
+ */
+ if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
+ *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
+ printk(KERN_INFO "ALUA: Got secondary offline status for local"
+ " target port\n");
+ *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
+ return 1;
+ }
+ /*
+ * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
+ * ALUA target port group, to obtain current ALUA access state.
+ * Otherwise look for the underlying struct se_device association with
+ * a ALUA logical unit group.
+ */
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+ nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ /*
+ * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a seperate conditional
+ * statement so the complier knows explictly to check this case first.
+ * For the Optimized ALUA access state case, we want to process the
+ * incoming fabric cmd ASAP..
+ */
+ if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
+ return 0;
+
+ switch (out_alua_state) {
+ case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+ return core_alua_state_nonoptimized(cmd, cdb,
+ nonop_delay_msecs, alua_ascq);
+ case ALUA_ACCESS_STATE_STANDBY:
+ return core_alua_state_standby(cmd, cdb, alua_ascq);
+ case ALUA_ACCESS_STATE_UNAVAILABLE:
+ return core_alua_state_unavailable(cmd, cdb, alua_ascq);
+ case ALUA_ACCESS_STATE_TRANSITION:
+ return core_alua_state_transition(cmd, cdb, alua_ascq);
+ /*
+ * OFFLINE is a secondary ALUA target port group access state, that is
+ * handled above with struct se_port->sep_tg_pt_secondary_offline=1
+ */
+ case ALUA_ACCESS_STATE_OFFLINE:
+ default:
+ printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n",
+ out_alua_state);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Check implict and explict ALUA state change request.
+ */
+static int core_alua_check_transition(int state, int *primary)
+{
+ switch (state) {
+ case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+ case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+ case ALUA_ACCESS_STATE_STANDBY:
+ case ALUA_ACCESS_STATE_UNAVAILABLE:
+ /*
+ * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
+ * defined as primary target port asymmetric access states.
+ */
+ *primary = 1;
+ break;
+ case ALUA_ACCESS_STATE_OFFLINE:
+ /*
+ * OFFLINE state is defined as a secondary target port
+ * asymmetric access state.
+ */
+ *primary = 0;
+ break;
+ default:
+ printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state);
+ return -1;
+ }
+
+ return 0;
+}
+
+static char *core_alua_dump_state(int state)
+{
+ switch (state) {
+ case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+ return "Active/Optimized";
+ case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+ return "Active/NonOptimized";
+ case ALUA_ACCESS_STATE_STANDBY:
+ return "Standby";
+ case ALUA_ACCESS_STATE_UNAVAILABLE:
+ return "Unavailable";
+ case ALUA_ACCESS_STATE_OFFLINE:
+ return "Offline";
+ default:
+ return "Unknown";
+ }
+
+ return NULL;
+}
+
+char *core_alua_dump_status(int status)
+{
+ switch (status) {
+ case ALUA_STATUS_NONE:
+ return "None";
+ case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
+ return "Altered by Explict STPG";
+ case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
+ return "Altered by Implict ALUA";
+ default:
+ return "Unknown";
+ }
+
+ return NULL;
+}
+
+/*
+ * Used by fabric modules to determine when we need to delay processing
+ * for the Active/NonOptimized paths..
+ */
+int core_alua_check_nonop_delay(
+ struct se_cmd *cmd)
+{
+ if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
+ return 0;
+ if (in_interrupt())
+ return 0;
+ /*
+ * The ALUA Active/NonOptimized access state delay can be disabled
+ * in via configfs with a value of zero
+ */
+ if (!(cmd->alua_nonop_delay))
+ return 0;
+ /*
+ * struct se_cmd->alua_nonop_delay gets set by a target port group
+ * defined interval in core_alua_state_nonoptimized()
+ */
+ msleep_interruptible(cmd->alua_nonop_delay);
+ return 0;
+}
+EXPORT_SYMBOL(core_alua_check_nonop_delay);
+
+/*
+ * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
+ *
+ */
+static int core_alua_write_tpg_metadata(
+ const char *path,
+ unsigned char *md_buf,
+ u32 md_buf_len)
+{
+ mm_segment_t old_fs;
+ struct file *file;
+ struct iovec iov[1];
+ int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
+
+ memset(iov, 0, sizeof(struct iovec));
+
+ file = filp_open(path, flags, 0600);
+ if (IS_ERR(file) || !file || !file->f_dentry) {
+ printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n",
+ path);
+ return -ENODEV;
+ }
+
+ iov[0].iov_base = &md_buf[0];
+ iov[0].iov_len = md_buf_len;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
+ set_fs(old_fs);
+
+ if (ret < 0) {
+ printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path);
+ filp_close(file, NULL);
+ return -EIO;
+ }
+ filp_close(file, NULL);
+
+ return 0;
+}
+
+/*
+ * Called with tg_pt_gp->tg_pt_gp_md_mutex held
+ */
+static int core_alua_update_tpg_primary_metadata(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ int primary_state,
+ unsigned char *md_buf)
+{
+ struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct t10_wwn *wwn = &su_dev->t10_wwn;
+ char path[ALUA_METADATA_PATH_LEN];
+ int len;
+
+ memset(path, 0, ALUA_METADATA_PATH_LEN);
+
+ len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
+ "tg_pt_gp_id=%hu\n"
+ "alua_access_state=0x%02x\n"
+ "alua_access_status=0x%02x\n",
+ tg_pt_gp->tg_pt_gp_id, primary_state,
+ tg_pt_gp->tg_pt_gp_alua_access_status);
+
+ snprintf(path, ALUA_METADATA_PATH_LEN,
+ "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
+ config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
+
+ return core_alua_write_tpg_metadata(path, md_buf, len);
+}
+
+static int core_alua_do_transition_tg_pt(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ struct se_port *l_port,
+ struct se_node_acl *nacl,
+ unsigned char *md_buf,
+ int new_state,
+ int explict)
+{
+ struct se_dev_entry *se_deve;
+ struct se_lun_acl *lacl;
+ struct se_port *port;
+ struct t10_alua_tg_pt_gp_member *mem;
+ int old_state = 0;
+ /*
+ * Save the old primary ALUA access state, and set the current state
+ * to ALUA_ACCESS_STATE_TRANSITION.
+ */
+ old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+ ALUA_ACCESS_STATE_TRANSITION);
+ tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
+ ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
+ ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+ /*
+ * Check for the optional ALUA primary state transition delay
+ */
+ if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
+ msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
+ tg_pt_gp_mem_list) {
+ port = mem->tg_pt;
+ /*
+ * After an implicit target port asymmetric access state
+ * change, a device server shall establish a unit attention
+ * condition for the initiator port associated with every I_T
+ * nexus with the additional sense code set to ASYMMETRIC
+ * ACCESS STATE CHAGED.
+ *
+ * After an explicit target port asymmetric access state
+ * change, a device server shall establish a unit attention
+ * condition with the additional sense code set to ASYMMETRIC
+ * ACCESS STATE CHANGED for the initiator port associated with
+ * every I_T nexus other than the I_T nexus on which the SET
+ * TARGET PORT GROUPS command
+ */
+ atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+ spin_lock_bh(&port->sep_alua_lock);
+ list_for_each_entry(se_deve, &port->sep_alua_list,
+ alua_port_list) {
+ lacl = se_deve->se_lun_acl;
+ /*
+ * se_deve->se_lun_acl pointer may be NULL for a
+ * entry created without explict Node+MappedLUN ACLs
+ */
+ if (!(lacl))
+ continue;
+
+ if (explict &&
+ (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
+ (l_port != NULL) && (l_port == port))
+ continue;
+
+ core_scsi3_ua_allocate(lacl->se_lun_nacl,
+ se_deve->mapped_lun, 0x2A,
+ ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
+ }
+ spin_unlock_bh(&port->sep_alua_lock);
+
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
+ smp_mb__after_atomic_dec();
+ }
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+ /*
+ * Update the ALUA metadata buf that has been allocated in
+ * core_alua_do_port_transition(), this metadata will be written
+ * to struct file.
+ *
+ * Note that there is the case where we do not want to update the
+ * metadata when the saved metadata is being parsed in userspace
+ * when setting the existing port access state and access status.
+ *
+ * Also note that the failure to write out the ALUA metadata to
+ * struct file does NOT affect the actual ALUA transition.
+ */
+ if (tg_pt_gp->tg_pt_gp_write_metadata) {
+ mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
+ core_alua_update_tpg_primary_metadata(tg_pt_gp,
+ new_state, md_buf);
+ mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
+ }
+ /*
+ * Set the current primary ALUA access state to the requested new state
+ */
+ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
+
+ printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+ " from primary access state %s to %s\n", (explict) ? "explict" :
+ "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+ tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
+ core_alua_dump_state(new_state));
+
+ return 0;
+}
+
+int core_alua_do_port_transition(
+ struct t10_alua_tg_pt_gp *l_tg_pt_gp,
+ struct se_device *l_dev,
+ struct se_port *l_port,
+ struct se_node_acl *l_nacl,
+ int new_state,
+ int explict)
+{
+ struct se_device *dev;
+ struct se_port *port;
+ struct se_subsystem_dev *su_dev;
+ struct se_node_acl *nacl;
+ struct t10_alua_lu_gp *lu_gp;
+ struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ unsigned char *md_buf;
+ int primary;
+
+ if (core_alua_check_transition(new_state, &primary) != 0)
+ return -EINVAL;
+
+ md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
+ if (!(md_buf)) {
+ printk("Unable to allocate buf for ALUA metadata\n");
+ return -ENOMEM;
+ }
+
+ local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
+ spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
+ lu_gp = local_lu_gp_mem->lu_gp;
+ atomic_inc(&lu_gp->lu_gp_ref_cnt);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
+ /*
+ * For storage objects that are members of the 'default_lu_gp',
+ * we only do transition on the passed *l_tp_pt_gp, and not
+ * on all of the matching target port groups IDs in default_lu_gp.
+ */
+ if (!(lu_gp->lu_gp_id)) {
+ /*
+ * core_alua_do_transition_tg_pt() will always return
+ * success.
+ */
+ core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
+ md_buf, new_state, explict);
+ atomic_dec(&lu_gp->lu_gp_ref_cnt);
+ smp_mb__after_atomic_dec();
+ kfree(md_buf);
+ return 0;
+ }
+ /*
+ * For all other LU groups aside from 'default_lu_gp', walk all of
+ * the associated storage objects looking for a matching target port
+ * group ID from the local target port group.
+ */
+ spin_lock(&lu_gp->lu_gp_lock);
+ list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
+ lu_gp_mem_list) {
+
+ dev = lu_gp_mem->lu_gp_mem_dev;
+ su_dev = dev->se_sub_dev;
+ atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&lu_gp->lu_gp_lock);
+
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp,
+ &T10_ALUA(su_dev)->tg_pt_gps_list,
+ tg_pt_gp_list) {
+
+ if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ continue;
+ /*
+ * If the target behavior port asymmetric access state
+ * is changed for any target port group accessiable via
+ * a logical unit within a LU group, the target port
+ * behavior group asymmetric access states for the same
+ * target port group accessible via other logical units
+ * in that LU group will also change.
+ */
+ if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
+ continue;
+
+ if (l_tg_pt_gp == tg_pt_gp) {
+ port = l_port;
+ nacl = l_nacl;
+ } else {
+ port = NULL;
+ nacl = NULL;
+ }
+ atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ /*
+ * core_alua_do_transition_tg_pt() will always return
+ * success.
+ */
+ core_alua_do_transition_tg_pt(tg_pt_gp, port,
+ nacl, md_buf, new_state, explict);
+
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ smp_mb__after_atomic_dec();
+ }
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+ spin_lock(&lu_gp->lu_gp_lock);
+ atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
+ smp_mb__after_atomic_dec();
+ }
+ spin_unlock(&lu_gp->lu_gp_lock);
+
+ printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT"
+ " Group IDs: %hu %s transition to primary state: %s\n",
+ config_item_name(&lu_gp->lu_gp_group.cg_item),
+ l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
+ core_alua_dump_state(new_state));
+
+ atomic_dec(&lu_gp->lu_gp_ref_cnt);
+ smp_mb__after_atomic_dec();
+ kfree(md_buf);
+ return 0;
+}
+
+/*
+ * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
+ */
+static int core_alua_update_tpg_secondary_metadata(
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+ struct se_port *port,
+ unsigned char *md_buf,
+ u32 md_buf_len)
+{
+ struct se_portal_group *se_tpg = port->sep_tpg;
+ char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
+ int len;
+
+ memset(path, 0, ALUA_METADATA_PATH_LEN);
+ memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
+
+ len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
+ TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg));
+
+ if (TPG_TFO(se_tpg)->tpg_get_tag != NULL)
+ snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
+ TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
+
+ len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
+ "alua_tg_pt_status=0x%02x\n",
+ atomic_read(&port->sep_tg_pt_secondary_offline),
+ port->sep_tg_pt_secondary_stat);
+
+ snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
+ TPG_TFO(se_tpg)->get_fabric_name(), wwn,
+ port->sep_lun->unpacked_lun);
+
+ return core_alua_write_tpg_metadata(path, md_buf, len);
+}
+
+static int core_alua_set_tg_pt_secondary_state(
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+ struct se_port *port,
+ int explict,
+ int offline)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ unsigned char *md_buf;
+ u32 md_buf_len;
+ int trans_delay_msecs;
+
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ if (!(tg_pt_gp)) {
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ printk(KERN_ERR "Unable to complete secondary state"
+ " transition\n");
+ return -1;
+ }
+ trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
+ /*
+ * Set the secondary ALUA target port access state to OFFLINE
+ * or release the previously secondary state for struct se_port
+ */
+ if (offline)
+ atomic_set(&port->sep_tg_pt_secondary_offline, 1);
+ else
+ atomic_set(&port->sep_tg_pt_secondary_offline, 0);
+
+ md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
+ port->sep_tg_pt_secondary_stat = (explict) ?
+ ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
+ ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+
+ printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+ " to secondary access state: %s\n", (explict) ? "explict" :
+ "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+ tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
+
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ /*
+ * Do the optional transition delay after we set the secondary
+ * ALUA access state.
+ */
+ if (trans_delay_msecs != 0)
+ msleep_interruptible(trans_delay_msecs);
+ /*
+ * See if we need to update the ALUA fabric port metadata for
+ * secondary state and status
+ */
+ if (port->sep_tg_pt_secondary_write_md) {
+ md_buf = kzalloc(md_buf_len, GFP_KERNEL);
+ if (!(md_buf)) {
+ printk(KERN_ERR "Unable to allocate md_buf for"
+ " secondary ALUA access metadata\n");
+ return -1;
+ }
+ mutex_lock(&port->sep_tg_pt_md_mutex);
+ core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
+ md_buf, md_buf_len);
+ mutex_unlock(&port->sep_tg_pt_md_mutex);
+
+ kfree(md_buf);
+ }
+
+ return 0;
+}
+
+struct t10_alua_lu_gp *
+core_alua_allocate_lu_gp(const char *name, int def_group)
+{
+ struct t10_alua_lu_gp *lu_gp;
+
+ lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
+ if (!(lu_gp)) {
+ printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n");
+ return ERR_PTR(-ENOMEM);;
+ }
+ INIT_LIST_HEAD(&lu_gp->lu_gp_list);
+ INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
+ spin_lock_init(&lu_gp->lu_gp_lock);
+ atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
+
+ if (def_group) {
+ lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++;;
+ lu_gp->lu_gp_valid_id = 1;
+ se_global->alua_lu_gps_count++;
+ }
+
+ return lu_gp;
+}
+
+int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
+{
+ struct t10_alua_lu_gp *lu_gp_tmp;
+ u16 lu_gp_id_tmp;
+ /*
+ * The lu_gp->lu_gp_id may only be set once..
+ */
+ if (lu_gp->lu_gp_valid_id) {
+ printk(KERN_WARNING "ALUA LU Group already has a valid ID,"
+ " ignoring request\n");
+ return -1;
+ }
+
+ spin_lock(&se_global->lu_gps_lock);
+ if (se_global->alua_lu_gps_count == 0x0000ffff) {
+ printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:"
+ " 0x0000ffff reached\n");
+ spin_unlock(&se_global->lu_gps_lock);
+ kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
+ return -1;
+ }
+again:
+ lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
+ se_global->alua_lu_gps_counter++;
+
+ list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) {
+ if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
+ if (!(lu_gp_id))
+ goto again;
+
+ printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu"
+ " already exists, ignoring request\n",
+ lu_gp_id);
+ spin_unlock(&se_global->lu_gps_lock);
+ return -1;
+ }
+ }
+
+ lu_gp->lu_gp_id = lu_gp_id_tmp;
+ lu_gp->lu_gp_valid_id = 1;
+ list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list);
+ se_global->alua_lu_gps_count++;
+ spin_unlock(&se_global->lu_gps_lock);
+
+ return 0;
+}
+
+static struct t10_alua_lu_gp_member *
+core_alua_allocate_lu_gp_mem(struct se_device *dev)
+{
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+
+ lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
+ if (!(lu_gp_mem)) {
+ printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
+ spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
+ atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
+
+ lu_gp_mem->lu_gp_mem_dev = dev;
+ dev->dev_alua_lu_gp_mem = lu_gp_mem;
+
+ return lu_gp_mem;
+}
+
+void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
+{
+ struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
+ /*
+ * Once we have reached this point, config_item_put() has
+ * already been called from target_core_alua_drop_lu_gp().
+ *
+ * Here, we remove the *lu_gp from the global list so that
+ * no associations can be made while we are releasing
+ * struct t10_alua_lu_gp.
+ */
+ spin_lock(&se_global->lu_gps_lock);
+ atomic_set(&lu_gp->lu_gp_shutdown, 1);
+ list_del(&lu_gp->lu_gp_list);
+ se_global->alua_lu_gps_count--;
+ spin_unlock(&se_global->lu_gps_lock);
+ /*
+ * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
+ * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
+ * released with core_alua_put_lu_gp_from_name()
+ */
+ while (atomic_read(&lu_gp->lu_gp_ref_cnt))
+ cpu_relax();
+ /*
+ * Release reference to struct t10_alua_lu_gp * from all associated
+ * struct se_device.
+ */
+ spin_lock(&lu_gp->lu_gp_lock);
+ list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
+ &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
+ if (lu_gp_mem->lu_gp_assoc) {
+ list_del(&lu_gp_mem->lu_gp_mem_list);
+ lu_gp->lu_gp_members--;
+ lu_gp_mem->lu_gp_assoc = 0;
+ }
+ spin_unlock(&lu_gp->lu_gp_lock);
+ /*
+ *
+ * lu_gp_mem is assoicated with a single
+ * struct se_device->dev_alua_lu_gp_mem, and is released when
+ * struct se_device is released via core_alua_free_lu_gp_mem().
+ *
+ * If the passed lu_gp does NOT match the default_lu_gp, assume
+ * we want to re-assocate a given lu_gp_mem with default_lu_gp.
+ */
+ spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+ if (lu_gp != se_global->default_lu_gp)
+ __core_alua_attach_lu_gp_mem(lu_gp_mem,
+ se_global->default_lu_gp);
+ else
+ lu_gp_mem->lu_gp = NULL;
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+ spin_lock(&lu_gp->lu_gp_lock);
+ }
+ spin_unlock(&lu_gp->lu_gp_lock);
+
+ kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
+}
+
+void core_alua_free_lu_gp_mem(struct se_device *dev)
+{
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+ struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua_lu_gp *lu_gp;
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+
+ if (alua->alua_type != SPC3_ALUA_EMULATED)
+ return;
+
+ lu_gp_mem = dev->dev_alua_lu_gp_mem;
+ if (!(lu_gp_mem))
+ return;
+
+ while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
+ cpu_relax();
+
+ spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+ lu_gp = lu_gp_mem->lu_gp;
+ if ((lu_gp)) {
+ spin_lock(&lu_gp->lu_gp_lock);
+ if (lu_gp_mem->lu_gp_assoc) {
+ list_del(&lu_gp_mem->lu_gp_mem_list);
+ lu_gp->lu_gp_members--;
+ lu_gp_mem->lu_gp_assoc = 0;
+ }
+ spin_unlock(&lu_gp->lu_gp_lock);
+ lu_gp_mem->lu_gp = NULL;
+ }
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+ kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
+}
+
+struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
+{
+ struct t10_alua_lu_gp *lu_gp;
+ struct config_item *ci;
+
+ spin_lock(&se_global->lu_gps_lock);
+ list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) {
+ if (!(lu_gp->lu_gp_valid_id))
+ continue;
+ ci = &lu_gp->lu_gp_group.cg_item;
+ if (!(strcmp(config_item_name(ci), name))) {
+ atomic_inc(&lu_gp->lu_gp_ref_cnt);
+ spin_unlock(&se_global->lu_gps_lock);
+ return lu_gp;
+ }
+ }
+ spin_unlock(&se_global->lu_gps_lock);
+
+ return NULL;
+}
+
+void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
+{
+ spin_lock(&se_global->lu_gps_lock);
+ atomic_dec(&lu_gp->lu_gp_ref_cnt);
+ spin_unlock(&se_global->lu_gps_lock);
+}
+
+/*
+ * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
+ */
+void __core_alua_attach_lu_gp_mem(
+ struct t10_alua_lu_gp_member *lu_gp_mem,
+ struct t10_alua_lu_gp *lu_gp)
+{
+ spin_lock(&lu_gp->lu_gp_lock);
+ lu_gp_mem->lu_gp = lu_gp;
+ lu_gp_mem->lu_gp_assoc = 1;
+ list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
+ lu_gp->lu_gp_members++;
+ spin_unlock(&lu_gp->lu_gp_lock);
+}
+
+/*
+ * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
+ */
+void __core_alua_drop_lu_gp_mem(
+ struct t10_alua_lu_gp_member *lu_gp_mem,
+ struct t10_alua_lu_gp *lu_gp)
+{
+ spin_lock(&lu_gp->lu_gp_lock);
+ list_del(&lu_gp_mem->lu_gp_mem_list);
+ lu_gp_mem->lu_gp = NULL;
+ lu_gp_mem->lu_gp_assoc = 0;
+ lu_gp->lu_gp_members--;
+ spin_unlock(&lu_gp->lu_gp_lock);
+}
+
+struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
+ struct se_subsystem_dev *su_dev,
+ const char *name,
+ int def_group)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+
+ tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
+ if (!(tg_pt_gp)) {
+ printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
+ INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
+ mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
+ spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
+ atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
+ tg_pt_gp->tg_pt_gp_su_dev = su_dev;
+ tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
+ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+ ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
+ /*
+ * Enable both explict and implict ALUA support by default
+ */
+ tg_pt_gp->tg_pt_gp_alua_access_type =
+ TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
+ /*
+ * Set the default Active/NonOptimized Delay in milliseconds
+ */
+ tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
+ tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
+
+ if (def_group) {
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ tg_pt_gp->tg_pt_gp_id =
+ T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
+ tg_pt_gp->tg_pt_gp_valid_id = 1;
+ T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
+ list_add_tail(&tg_pt_gp->tg_pt_gp_list,
+ &T10_ALUA(su_dev)->tg_pt_gps_list);
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ }
+
+ return tg_pt_gp;
+}
+
+int core_alua_set_tg_pt_gp_id(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ u16 tg_pt_gp_id)
+{
+ struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
+ u16 tg_pt_gp_id_tmp;
+ /*
+ * The tg_pt_gp->tg_pt_gp_id may only be set once..
+ */
+ if (tg_pt_gp->tg_pt_gp_valid_id) {
+ printk(KERN_WARNING "ALUA TG PT Group already has a valid ID,"
+ " ignoring request\n");
+ return -1;
+ }
+
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) {
+ printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:"
+ " 0x0000ffff reached\n");
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
+ return -1;
+ }
+again:
+ tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
+ T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
+
+ list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+ tg_pt_gp_list) {
+ if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
+ if (!(tg_pt_gp_id))
+ goto again;
+
+ printk(KERN_ERR "ALUA Target Port Group ID: %hu already"
+ " exists, ignoring request\n", tg_pt_gp_id);
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ return -1;
+ }
+ }
+
+ tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
+ tg_pt_gp->tg_pt_gp_valid_id = 1;
+ list_add_tail(&tg_pt_gp->tg_pt_gp_list,
+ &T10_ALUA(su_dev)->tg_pt_gps_list);
+ T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+ return 0;
+}
+
+struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
+ struct se_port *port)
+{
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+ tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
+ GFP_KERNEL);
+ if (!(tg_pt_gp_mem)) {
+ printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+ spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
+
+ tg_pt_gp_mem->tg_pt = port;
+ port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
+ atomic_set(&port->sep_tg_pt_gp_active, 1);
+
+ return tg_pt_gp_mem;
+}
+
+void core_alua_free_tg_pt_gp(
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+ struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
+ /*
+ * Once we have reached this point, config_item_put() has already
+ * been called from target_core_alua_drop_tg_pt_gp().
+ *
+ * Here we remove *tg_pt_gp from the global list so that
+ * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
+ * can be made while we are releasing struct t10_alua_tg_pt_gp.
+ */
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ list_del(&tg_pt_gp->tg_pt_gp_list);
+ T10_ALUA(su_dev)->alua_tg_pt_gps_counter--;
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ /*
+ * Allow a struct t10_alua_tg_pt_gp_member * referenced by
+ * core_alua_get_tg_pt_gp_by_name() in
+ * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
+ * to be released with core_alua_put_tg_pt_gp_from_name().
+ */
+ while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
+ cpu_relax();
+ /*
+ * Release reference to struct t10_alua_tg_pt_gp from all associated
+ * struct se_port.
+ */
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
+ &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
+ if (tg_pt_gp_mem->tg_pt_gp_assoc) {
+ list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+ tg_pt_gp->tg_pt_gp_members--;
+ tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+ }
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+ /*
+ * tg_pt_gp_mem is assoicated with a single
+ * se_port->sep_alua_tg_pt_gp_mem, and is released via
+ * core_alua_free_tg_pt_gp_mem().
+ *
+ * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
+ * assume we want to re-assocate a given tg_pt_gp_mem with
+ * default_tg_pt_gp.
+ */
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) {
+ __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+ T10_ALUA(su_dev)->default_tg_pt_gp);
+ } else
+ tg_pt_gp_mem->tg_pt_gp = NULL;
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ }
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+ kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
+}
+
+void core_alua_free_tg_pt_gp_mem(struct se_port *port)
+{
+ struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+ struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+ if (alua->alua_type != SPC3_ALUA_EMULATED)
+ return;
+
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!(tg_pt_gp_mem))
+ return;
+
+ while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
+ cpu_relax();
+
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ if ((tg_pt_gp)) {
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ if (tg_pt_gp_mem->tg_pt_gp_assoc) {
+ list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+ tg_pt_gp->tg_pt_gp_members--;
+ tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+ }
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+ tg_pt_gp_mem->tg_pt_gp = NULL;
+ }
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+ kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
+}
+
+static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
+ struct se_subsystem_dev *su_dev,
+ const char *name)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct config_item *ci;
+
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+ tg_pt_gp_list) {
+ if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ continue;
+ ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
+ if (!(strcmp(config_item_name(ci), name))) {
+ atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ return tg_pt_gp;
+ }
+ }
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+ return NULL;
+}
+
+static void core_alua_put_tg_pt_gp_from_name(
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+ struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+
+ spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+}
+
+/*
+ * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
+ */
+void __core_alua_attach_tg_pt_gp_mem(
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
+ tg_pt_gp_mem->tg_pt_gp_assoc = 1;
+ list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
+ &tg_pt_gp->tg_pt_gp_mem_list);
+ tg_pt_gp->tg_pt_gp_members++;
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+/*
+ * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
+ */
+static void __core_alua_drop_tg_pt_gp_mem(
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+ tg_pt_gp_mem->tg_pt_gp = NULL;
+ tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+ tg_pt_gp->tg_pt_gp_members--;
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
+{
+ struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+ struct config_item *tg_pt_ci;
+ struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ ssize_t len = 0;
+
+ if (alua->alua_type != SPC3_ALUA_EMULATED)
+ return len;
+
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!(tg_pt_gp_mem))
+ return len;
+
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ if ((tg_pt_gp)) {
+ tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
+ len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
+ " %hu\nTG Port Primary Access State: %s\nTG Port "
+ "Primary Access Status: %s\nTG Port Secondary Access"
+ " State: %s\nTG Port Secondary Access Status: %s\n",
+ config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
+ core_alua_dump_state(atomic_read(
+ &tg_pt_gp->tg_pt_gp_alua_access_state)),
+ core_alua_dump_status(
+ tg_pt_gp->tg_pt_gp_alua_access_status),
+ (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
+ "Offline" : "None",
+ core_alua_dump_status(port->sep_tg_pt_secondary_stat));
+ }
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+ return len;
+}
+
+ssize_t core_alua_store_tg_pt_gp_info(
+ struct se_port *port,
+ const char *page,
+ size_t count)
+{
+ struct se_portal_group *tpg;
+ struct se_lun *lun;
+ struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+ struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ unsigned char buf[TG_PT_GROUP_NAME_BUF];
+ int move = 0;
+
+ tpg = port->sep_tpg;
+ lun = port->sep_lun;
+
+ if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
+ printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for"
+ " %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg),
+ TPG_TFO(tpg)->tpg_get_tag(tpg),
+ config_item_name(&lun->lun_group.cg_item));
+ return -EINVAL;
+ }
+
+ if (count > TG_PT_GROUP_NAME_BUF) {
+ printk(KERN_ERR "ALUA Target Port Group alias too large!\n");
+ return -EINVAL;
+ }
+ memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+ memcpy(buf, page, count);
+ /*
+ * Any ALUA target port group alias besides "NULL" means we will be
+ * making a new group association.
+ */
+ if (strcmp(strstrip(buf), "NULL")) {
+ /*
+ * core_alua_get_tg_pt_gp_by_name() will increment reference to
+ * struct t10_alua_tg_pt_gp. This reference is released with
+ * core_alua_put_tg_pt_gp_from_name() below.
+ */
+ tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
+ strstrip(buf));
+ if (!(tg_pt_gp_new))
+ return -ENODEV;
+ }
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!(tg_pt_gp_mem)) {
+ if (tg_pt_gp_new)
+ core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
+ printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ if ((tg_pt_gp)) {
+ /*
+ * Clearing an existing tg_pt_gp association, and replacing
+ * with the default_tg_pt_gp.
+ */
+ if (!(tg_pt_gp_new)) {
+ printk(KERN_INFO "Target_Core_ConfigFS: Moving"
+ " %s/tpgt_%hu/%s from ALUA Target Port Group:"
+ " alua/%s, ID: %hu back to"
+ " default_tg_pt_gp\n",
+ TPG_TFO(tpg)->tpg_get_wwn(tpg),
+ TPG_TFO(tpg)->tpg_get_tag(tpg),
+ config_item_name(&lun->lun_group.cg_item),
+ config_item_name(
+ &tg_pt_gp->tg_pt_gp_group.cg_item),
+ tg_pt_gp->tg_pt_gp_id);
+
+ __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
+ __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+ T10_ALUA(su_dev)->default_tg_pt_gp);
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+ return count;
+ }
+ /*
+ * Removing existing association of tg_pt_gp_mem with tg_pt_gp
+ */
+ __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
+ move = 1;
+ }
+ /*
+ * Associate tg_pt_gp_mem with tg_pt_gp_new.
+ */
+ __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
+ " Target Port Group: alua/%s, ID: %hu\n", (move) ?
+ "Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg),
+ TPG_TFO(tpg)->tpg_get_tag(tpg),
+ config_item_name(&lun->lun_group.cg_item),
+ config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
+ tg_pt_gp_new->tg_pt_gp_id);
+
+ core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
+ return count;
+}
+
+ssize_t core_alua_show_access_type(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
+ (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
+ return sprintf(page, "Implict and Explict\n");
+ else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
+ return sprintf(page, "Implict\n");
+ else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
+ return sprintf(page, "Explict\n");
+ else
+ return sprintf(page, "None\n");
+}
+
+ssize_t core_alua_store_access_type(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract alua_access_type\n");
+ return -EINVAL;
+ }
+ if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
+ printk(KERN_ERR "Illegal value for alua_access_type:"
+ " %lu\n", tmp);
+ return -EINVAL;
+ }
+ if (tmp == 3)
+ tg_pt_gp->tg_pt_gp_alua_access_type =
+ TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
+ else if (tmp == 2)
+ tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
+ else if (tmp == 1)
+ tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
+ else
+ tg_pt_gp->tg_pt_gp_alua_access_type = 0;
+
+ return count;
+}
+
+ssize_t core_alua_show_nonop_delay_msecs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
+}
+
+ssize_t core_alua_store_nonop_delay_msecs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract nonop_delay_msecs\n");
+ return -EINVAL;
+ }
+ if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
+ printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds"
+ " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
+ ALUA_MAX_NONOP_DELAY_MSECS);
+ return -EINVAL;
+ }
+ tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
+
+ return count;
+}
+
+ssize_t core_alua_show_trans_delay_msecs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+}
+
+ssize_t core_alua_store_trans_delay_msecs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract trans_delay_msecs\n");
+ return -EINVAL;
+ }
+ if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
+ printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds"
+ " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
+ ALUA_MAX_TRANS_DELAY_MSECS);
+ return -EINVAL;
+ }
+ tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
+
+ return count;
+}
+
+ssize_t core_alua_show_preferred_bit(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
+}
+
+ssize_t core_alua_store_preferred_bit(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract preferred ALUA value\n");
+ return -EINVAL;
+ }
+ if ((tmp != 0) && (tmp != 1)) {
+ printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp);
+ return -EINVAL;
+ }
+ tg_pt_gp->tg_pt_gp_pref = (int)tmp;
+
+ return count;
+}
+
+ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
+{
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return sprintf(page, "%d\n",
+ atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
+}
+
+ssize_t core_alua_store_offline_bit(
+ struct se_lun *lun,
+ const char *page,
+ size_t count)
+{
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ unsigned long tmp;
+ int ret;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n");
+ return -EINVAL;
+ }
+ if ((tmp != 0) && (tmp != 1)) {
+ printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n",
+ tmp);
+ return -EINVAL;
+ }
+ tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
+ if (!(tg_pt_gp_mem)) {
+ printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n");
+ return -EINVAL;
+ }
+
+ ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
+ lun->lun_sep, 0, (int)tmp);
+ if (ret < 0)
+ return -EINVAL;
+
+ return count;
+}
+
+ssize_t core_alua_show_secondary_status(
+ struct se_lun *lun,
+ char *page)
+{
+ return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
+}
+
+ssize_t core_alua_store_secondary_status(
+ struct se_lun *lun,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract alua_tg_pt_status\n");
+ return -EINVAL;
+ }
+ if ((tmp != ALUA_STATUS_NONE) &&
+ (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
+ (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+ printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n",
+ tmp);
+ return -EINVAL;
+ }
+ lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
+
+ return count;
+}
+
+ssize_t core_alua_show_secondary_write_metadata(
+ struct se_lun *lun,
+ char *page)
+{
+ return sprintf(page, "%d\n",
+ lun->lun_sep->sep_tg_pt_secondary_write_md);
+}
+
+ssize_t core_alua_store_secondary_write_metadata(
+ struct se_lun *lun,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n");
+ return -EINVAL;
+ }
+ if ((tmp != 0) && (tmp != 1)) {
+ printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:"
+ " %lu\n", tmp);
+ return -EINVAL;
+ }
+ lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
+
+ return count;
+}
+
+int core_setup_alua(struct se_device *dev, int force_pt)
+{
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+ struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+ /*
+ * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
+ * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
+ * cause a problem because libata and some SATA RAID HBAs appear
+ * under Linux/SCSI, but emulate SCSI logic themselves.
+ */
+ if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+ !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) {
+ alua->alua_type = SPC_ALUA_PASSTHROUGH;
+ alua->alua_state_check = &core_alua_state_check_nop;
+ printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
+ " emulation\n", TRANSPORT(dev)->name);
+ return 0;
+ }
+ /*
+ * If SPC-3 or above is reported by real or emulated struct se_device,
+ * use emulated ALUA.
+ */
+ if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
+ printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3"
+ " device\n", TRANSPORT(dev)->name);
+ /*
+ * Assoicate this struct se_device with the default ALUA
+ * LUN Group.
+ */
+ lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
+ if (IS_ERR(lu_gp_mem) || !lu_gp_mem)
+ return -1;
+
+ alua->alua_type = SPC3_ALUA_EMULATED;
+ alua->alua_state_check = &core_alua_state_check;
+ spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+ __core_alua_attach_lu_gp_mem(lu_gp_mem,
+ se_global->default_lu_gp);
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+ printk(KERN_INFO "%s: Adding to default ALUA LU Group:"
+ " core/alua/lu_gps/default_lu_gp\n",
+ TRANSPORT(dev)->name);
+ } else {
+ alua->alua_type = SPC2_ALUA_DISABLED;
+ alua->alua_state_check = &core_alua_state_check_nop;
+ printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2"
+ " device\n", TRANSPORT(dev)->name);
+ }
+
+ return 0;
+}
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
new file mode 100644
index 000000000000..c86f97a081ed
--- /dev/null
+++ b/drivers/target/target_core_alua.h
@@ -0,0 +1,126 @@
+#ifndef TARGET_CORE_ALUA_H
+#define TARGET_CORE_ALUA_H
+
+/*
+ * INQUIRY response data, TPGS Field
+ *
+ * from spc4r17 section 6.4.2 Table 135
+ */
+#define TPGS_NO_ALUA 0x00
+#define TPGS_IMPLICT_ALUA 0x10
+#define TPGS_EXPLICT_ALUA 0x20
+
+/*
+ * ASYMMETRIC ACCESS STATE field
+ *
+ * from spc4r17 section 6.27 Table 245
+ */
+#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0
+#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
+#define ALUA_ACCESS_STATE_STANDBY 0x2
+#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
+#define ALUA_ACCESS_STATE_OFFLINE 0xe
+#define ALUA_ACCESS_STATE_TRANSITION 0xf
+
+/*
+ * REPORT_TARGET_PORT_GROUP STATUS CODE
+ *
+ * from spc4r17 section 6.27 Table 246
+ */
+#define ALUA_STATUS_NONE 0x00
+#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01
+#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02
+
+/*
+ * From spc4r17, Table D.1: ASC and ASCQ Assignement
+ */
+#define ASCQ_04H_ALUA_STATE_TRANSITION 0x0a
+#define ASCQ_04H_ALUA_TG_PT_STANDBY 0x0b
+#define ASCQ_04H_ALUA_TG_PT_UNAVAILABLE 0x0c
+#define ASCQ_04H_ALUA_OFFLINE 0x12
+
+/*
+ * Used as the default for Active/NonOptimized delay (in milliseconds)
+ * This can also be changed via configfs on a per target port group basis..
+ */
+#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100
+#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */
+/*
+ * Used for implict and explict ALUA transitional delay, that is disabled
+ * by default, and is intended to be used for debugging client side ALUA code.
+ */
+#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0
+#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */
+/*
+ * Used by core_alua_update_tpg_primary_metadata() and
+ * core_alua_update_tpg_secondary_metadata()
+ */
+#define ALUA_METADATA_PATH_LEN 512
+/*
+ * Used by core_alua_update_tpg_secondary_metadata()
+ */
+#define ALUA_SECONDARY_METADATA_WWN_LEN 256
+
+extern struct kmem_cache *t10_alua_lu_gp_cache;
+extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
+extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
+extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+
+extern int core_emulate_report_target_port_groups(struct se_cmd *);
+extern int core_emulate_set_target_port_groups(struct se_cmd *);
+extern int core_alua_check_nonop_delay(struct se_cmd *);
+extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
+ struct se_device *, struct se_port *,
+ struct se_node_acl *, int, int);
+extern char *core_alua_dump_status(int);
+extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
+extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
+extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
+extern void core_alua_free_lu_gp_mem(struct se_device *);
+extern struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *);
+extern void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *);
+extern void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *,
+ struct t10_alua_lu_gp *);
+extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
+ struct t10_alua_lu_gp *);
+extern void core_alua_drop_lu_gp_dev(struct se_device *);
+extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
+ struct se_subsystem_dev *, const char *, int);
+extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
+extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
+ struct se_port *);
+extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *);
+extern void core_alua_free_tg_pt_gp_mem(struct se_port *);
+extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *,
+ struct t10_alua_tg_pt_gp *);
+extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *);
+extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *,
+ size_t);
+extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *);
+extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
+ const char *, size_t);
+extern ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
+ char *);
+extern ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
+ const char *, size_t);
+extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
+ char *);
+extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
+ const char *, size_t);
+extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
+ char *);
+extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *,
+ const char *, size_t);
+extern ssize_t core_alua_show_offline_bit(struct se_lun *, char *);
+extern ssize_t core_alua_store_offline_bit(struct se_lun *, const char *,
+ size_t);
+extern ssize_t core_alua_show_secondary_status(struct se_lun *, char *);
+extern ssize_t core_alua_store_secondary_status(struct se_lun *,
+ const char *, size_t);
+extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
+ char *);
+extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
+ const char *, size_t);
+extern int core_setup_alua(struct se_device *, int);
+
+#endif /* TARGET_CORE_ALUA_H */
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
new file mode 100644
index 000000000000..366080baf474
--- /dev/null
+++ b/drivers/target/target_core_cdb.c
@@ -0,0 +1,1131 @@
+/*
+ * CDB emulation for non-READ/WRITE commands.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include "target_core_ua.h"
+
+static void
+target_fill_alua_data(struct se_port *port, unsigned char *buf)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+ /*
+ * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
+ */
+ buf[5] = 0x80;
+
+ /*
+ * Set TPGS field for explict and/or implict ALUA access type
+ * and opteration.
+ *
+ * See spc4r17 section 6.4.2 Table 135
+ */
+ if (!port)
+ return;
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!tg_pt_gp_mem)
+ return;
+
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ if (tg_pt_gp)
+ buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+}
+
+static int
+target_emulate_inquiry_std(struct se_cmd *cmd)
+{
+ struct se_lun *lun = SE_LUN(cmd);
+ struct se_device *dev = SE_DEV(cmd);
+ unsigned char *buf = cmd->t_task->t_task_buf;
+
+ /*
+ * Make sure we at least have 6 bytes of INQUIRY response
+ * payload going back for EVPD=0
+ */
+ if (cmd->data_length < 6) {
+ printk(KERN_ERR "SCSI Inquiry payload length: %u"
+ " too small for EVPD=0\n", cmd->data_length);
+ return -1;
+ }
+
+ buf[0] = dev->transport->get_device_type(dev);
+ if (buf[0] == TYPE_TAPE)
+ buf[1] = 0x80;
+ buf[2] = dev->transport->get_device_rev(dev);
+
+ /*
+ * Enable SCCS and TPGS fields for Emulated ALUA
+ */
+ if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED)
+ target_fill_alua_data(lun->lun_sep, buf);
+
+ if (cmd->data_length < 8) {
+ buf[4] = 1; /* Set additional length to 1 */
+ return 0;
+ }
+
+ buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
+
+ /*
+ * Do not include vendor, product, reversion info in INQUIRY
+ * response payload for cdbs with a small allocation length.
+ */
+ if (cmd->data_length < 36) {
+ buf[4] = 3; /* Set additional length to 3 */
+ return 0;
+ }
+
+ snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
+ snprintf((unsigned char *)&buf[16], 16, "%s",
+ &DEV_T10_WWN(dev)->model[0]);
+ snprintf((unsigned char *)&buf[32], 4, "%s",
+ &DEV_T10_WWN(dev)->revision[0]);
+ buf[4] = 31; /* Set additional length to 31 */
+ return 0;
+}
+
+/* supported vital product data pages */
+static int
+target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
+{
+ buf[1] = 0x00;
+ if (cmd->data_length < 8)
+ return 0;
+
+ buf[4] = 0x0;
+ /*
+ * Only report the INQUIRY EVPD=1 pages after a valid NAA
+ * Registered Extended LUN WWN has been set via ConfigFS
+ * during device creation/restart.
+ */
+ if (SE_DEV(cmd)->se_sub_dev->su_dev_flags &
+ SDF_EMULATED_VPD_UNIT_SERIAL) {
+ buf[3] = 3;
+ buf[5] = 0x80;
+ buf[6] = 0x83;
+ buf[7] = 0x86;
+ }
+
+ return 0;
+}
+
+/* unit serial number */
+static int
+target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ u16 len = 0;
+
+ buf[1] = 0x80;
+ if (dev->se_sub_dev->su_dev_flags &
+ SDF_EMULATED_VPD_UNIT_SERIAL) {
+ u32 unit_serial_len;
+
+ unit_serial_len =
+ strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
+ unit_serial_len++; /* For NULL Terminator */
+
+ if (((len + 4) + unit_serial_len) > cmd->data_length) {
+ len += unit_serial_len;
+ buf[2] = ((len >> 8) & 0xff);
+ buf[3] = (len & 0xff);
+ return 0;
+ }
+ len += sprintf((unsigned char *)&buf[4], "%s",
+ &DEV_T10_WWN(dev)->unit_serial[0]);
+ len++; /* Extra Byte for NULL Terminator */
+ buf[3] = len;
+ }
+ return 0;
+}
+
+/*
+ * Device identification VPD, for a complete list of
+ * DESIGNATOR TYPEs see spc4r17 Table 459.
+ */
+static int
+target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_lun *lun = SE_LUN(cmd);
+ struct se_port *port = NULL;
+ struct se_portal_group *tpg = NULL;
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ unsigned char binary, binary_new;
+ unsigned char *prod = &DEV_T10_WWN(dev)->model[0];
+ u32 prod_len;
+ u32 unit_serial_len, off = 0;
+ int i;
+ u16 len = 0, id_len;
+
+ buf[1] = 0x83;
+ off = 4;
+
+ /*
+ * NAA IEEE Registered Extended Assigned designator format, see
+ * spc4r17 section 7.7.3.6.5
+ *
+ * We depend upon a target_core_mod/ConfigFS provided
+ * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
+ * value in order to return the NAA id.
+ */
+ if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
+ goto check_t10_vend_desc;
+
+ if (off + 20 > cmd->data_length)
+ goto check_t10_vend_desc;
+
+ /* CODE SET == Binary */
+ buf[off++] = 0x1;
+
+ /* Set ASSOICATION == addressed logical unit: 0)b */
+ buf[off] = 0x00;
+
+ /* Identifier/Designator type == NAA identifier */
+ buf[off++] = 0x3;
+ off++;
+
+ /* Identifier/Designator length */
+ buf[off++] = 0x10;
+
+ /*
+ * Start NAA IEEE Registered Extended Identifier/Designator
+ */
+ buf[off++] = (0x6 << 4);
+
+ /*
+ * Use OpenFabrics IEEE Company ID: 00 14 05
+ */
+ buf[off++] = 0x01;
+ buf[off++] = 0x40;
+ buf[off] = (0x5 << 4);
+
+ /*
+ * Return ConfigFS Unit Serial Number information for
+ * VENDOR_SPECIFIC_IDENTIFIER and
+ * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
+ */
+ binary = transport_asciihex_to_binaryhex(
+ &DEV_T10_WWN(dev)->unit_serial[0]);
+ buf[off++] |= (binary & 0xf0) >> 4;
+ for (i = 0; i < 24; i += 2) {
+ binary_new = transport_asciihex_to_binaryhex(
+ &DEV_T10_WWN(dev)->unit_serial[i+2]);
+ buf[off] = (binary & 0x0f) << 4;
+ buf[off++] |= (binary_new & 0xf0) >> 4;
+ binary = binary_new;
+ }
+ len = 20;
+ off = (len + 4);
+
+check_t10_vend_desc:
+ /*
+ * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
+ */
+ id_len = 8; /* For Vendor field */
+ prod_len = 4; /* For VPD Header */
+ prod_len += 8; /* For Vendor field */
+ prod_len += strlen(prod);
+ prod_len++; /* For : */
+
+ if (dev->se_sub_dev->su_dev_flags &
+ SDF_EMULATED_VPD_UNIT_SERIAL) {
+ unit_serial_len =
+ strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
+ unit_serial_len++; /* For NULL Terminator */
+
+ if ((len + (id_len + 4) +
+ (prod_len + unit_serial_len)) >
+ cmd->data_length) {
+ len += (prod_len + unit_serial_len);
+ goto check_port;
+ }
+ id_len += sprintf((unsigned char *)&buf[off+12],
+ "%s:%s", prod,
+ &DEV_T10_WWN(dev)->unit_serial[0]);
+ }
+ buf[off] = 0x2; /* ASCII */
+ buf[off+1] = 0x1; /* T10 Vendor ID */
+ buf[off+2] = 0x0;
+ memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8);
+ /* Extra Byte for NULL Terminator */
+ id_len++;
+ /* Identifier Length */
+ buf[off+3] = id_len;
+ /* Header size for Designation descriptor */
+ len += (id_len + 4);
+ off += (id_len + 4);
+ /*
+ * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
+ */
+check_port:
+ port = lun->lun_sep;
+ if (port) {
+ struct t10_alua_lu_gp *lu_gp;
+ u32 padding, scsi_name_len;
+ u16 lu_gp_id = 0;
+ u16 tg_pt_gp_id = 0;
+ u16 tpgt;
+
+ tpg = port->sep_tpg;
+ /*
+ * Relative target port identifer, see spc4r17
+ * section 7.7.3.7
+ *
+ * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+ * section 7.5.1 Table 362
+ */
+ if (((len + 4) + 8) > cmd->data_length) {
+ len += 8;
+ goto check_tpgi;
+ }
+ buf[off] =
+ (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+ buf[off++] |= 0x1; /* CODE SET == Binary */
+ buf[off] = 0x80; /* Set PIV=1 */
+ /* Set ASSOICATION == target port: 01b */
+ buf[off] |= 0x10;
+ /* DESIGNATOR TYPE == Relative target port identifer */
+ buf[off++] |= 0x4;
+ off++; /* Skip over Reserved */
+ buf[off++] = 4; /* DESIGNATOR LENGTH */
+ /* Skip over Obsolete field in RTPI payload
+ * in Table 472 */
+ off += 2;
+ buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+ buf[off++] = (port->sep_rtpi & 0xff);
+ len += 8; /* Header size + Designation descriptor */
+ /*
+ * Target port group identifier, see spc4r17
+ * section 7.7.3.8
+ *
+ * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+ * section 7.5.1 Table 362
+ */
+check_tpgi:
+ if (T10_ALUA(dev->se_sub_dev)->alua_type !=
+ SPC3_ALUA_EMULATED)
+ goto check_scsi_name;
+
+ if (((len + 4) + 8) > cmd->data_length) {
+ len += 8;
+ goto check_lu_gp;
+ }
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!tg_pt_gp_mem)
+ goto check_lu_gp;
+
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ if (!(tg_pt_gp)) {
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ goto check_lu_gp;
+ }
+ tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+ buf[off] =
+ (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+ buf[off++] |= 0x1; /* CODE SET == Binary */
+ buf[off] = 0x80; /* Set PIV=1 */
+ /* Set ASSOICATION == target port: 01b */
+ buf[off] |= 0x10;
+ /* DESIGNATOR TYPE == Target port group identifier */
+ buf[off++] |= 0x5;
+ off++; /* Skip over Reserved */
+ buf[off++] = 4; /* DESIGNATOR LENGTH */
+ off += 2; /* Skip over Reserved Field */
+ buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
+ buf[off++] = (tg_pt_gp_id & 0xff);
+ len += 8; /* Header size + Designation descriptor */
+ /*
+ * Logical Unit Group identifier, see spc4r17
+ * section 7.7.3.8
+ */
+check_lu_gp:
+ if (((len + 4) + 8) > cmd->data_length) {
+ len += 8;
+ goto check_scsi_name;
+ }
+ lu_gp_mem = dev->dev_alua_lu_gp_mem;
+ if (!(lu_gp_mem))
+ goto check_scsi_name;
+
+ spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+ lu_gp = lu_gp_mem->lu_gp;
+ if (!(lu_gp)) {
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+ goto check_scsi_name;
+ }
+ lu_gp_id = lu_gp->lu_gp_id;
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+ buf[off++] |= 0x1; /* CODE SET == Binary */
+ /* DESIGNATOR TYPE == Logical Unit Group identifier */
+ buf[off++] |= 0x6;
+ off++; /* Skip over Reserved */
+ buf[off++] = 4; /* DESIGNATOR LENGTH */
+ off += 2; /* Skip over Reserved Field */
+ buf[off++] = ((lu_gp_id >> 8) & 0xff);
+ buf[off++] = (lu_gp_id & 0xff);
+ len += 8; /* Header size + Designation descriptor */
+ /*
+ * SCSI name string designator, see spc4r17
+ * section 7.7.3.11
+ *
+ * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+ * section 7.5.1 Table 362
+ */
+check_scsi_name:
+ scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg));
+ /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
+ scsi_name_len += 10;
+ /* Check for 4-byte padding */
+ padding = ((-scsi_name_len) & 3);
+ if (padding != 0)
+ scsi_name_len += padding;
+ /* Header size + Designation descriptor */
+ scsi_name_len += 4;
+
+ if (((len + 4) + scsi_name_len) > cmd->data_length) {
+ len += scsi_name_len;
+ goto set_len;
+ }
+ buf[off] =
+ (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+ buf[off++] |= 0x3; /* CODE SET == UTF-8 */
+ buf[off] = 0x80; /* Set PIV=1 */
+ /* Set ASSOICATION == target port: 01b */
+ buf[off] |= 0x10;
+ /* DESIGNATOR TYPE == SCSI name string */
+ buf[off++] |= 0x8;
+ off += 2; /* Skip over Reserved and length */
+ /*
+ * SCSI name string identifer containing, $FABRIC_MOD
+ * dependent information. For LIO-Target and iSCSI
+ * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
+ * UTF-8 encoding.
+ */
+ tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
+ scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
+ TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt);
+ scsi_name_len += 1 /* Include NULL terminator */;
+ /*
+ * The null-terminated, null-padded (see 4.4.2) SCSI
+ * NAME STRING field contains a UTF-8 format string.
+ * The number of bytes in the SCSI NAME STRING field
+ * (i.e., the value in the DESIGNATOR LENGTH field)
+ * shall be no larger than 256 and shall be a multiple
+ * of four.
+ */
+ if (padding)
+ scsi_name_len += padding;
+
+ buf[off-1] = scsi_name_len;
+ off += scsi_name_len;
+ /* Header size + Designation descriptor */
+ len += (scsi_name_len + 4);
+ }
+set_len:
+ buf[2] = ((len >> 8) & 0xff);
+ buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
+ return 0;
+}
+
+/* Extended INQUIRY Data VPD Page */
+static int
+target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
+{
+ if (cmd->data_length < 60)
+ return 0;
+
+ buf[1] = 0x86;
+ buf[2] = 0x3c;
+ /* Set HEADSUP, ORDSUP, SIMPSUP */
+ buf[5] = 0x07;
+
+ /* If WriteCache emulation is enabled, set V_SUP */
+ if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0)
+ buf[6] = 0x01;
+ return 0;
+}
+
+/* Block Limits VPD page */
+static int
+target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ int have_tp = 0;
+
+ /*
+ * Following sbc3r22 section 6.5.3 Block Limits VPD page, when
+ * emulate_tpu=1 or emulate_tpws=1 we will be expect a
+ * different page length for Thin Provisioning.
+ */
+ if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+ have_tp = 1;
+
+ if (cmd->data_length < (0x10 + 4)) {
+ printk(KERN_INFO "Received data_length: %u"
+ " too small for EVPD 0xb0\n",
+ cmd->data_length);
+ return -1;
+ }
+
+ if (have_tp && cmd->data_length < (0x3c + 4)) {
+ printk(KERN_INFO "Received data_length: %u"
+ " too small for TPE=1 EVPD 0xb0\n",
+ cmd->data_length);
+ have_tp = 0;
+ }
+
+ buf[0] = dev->transport->get_device_type(dev);
+ buf[1] = 0xb0;
+ buf[3] = have_tp ? 0x3c : 0x10;
+
+ /*
+ * Set OPTIMAL TRANSFER LENGTH GRANULARITY
+ */
+ put_unaligned_be16(1, &buf[6]);
+
+ /*
+ * Set MAXIMUM TRANSFER LENGTH
+ */
+ put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]);
+
+ /*
+ * Set OPTIMAL TRANSFER LENGTH
+ */
+ put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]);
+
+ /*
+ * Exit now if we don't support TP or the initiator sent a too
+ * short buffer.
+ */
+ if (!have_tp || cmd->data_length < (0x3c + 4))
+ return 0;
+
+ /*
+ * Set MAXIMUM UNMAP LBA COUNT
+ */
+ put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]);
+
+ /*
+ * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
+ */
+ put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count,
+ &buf[24]);
+
+ /*
+ * Set OPTIMAL UNMAP GRANULARITY
+ */
+ put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]);
+
+ /*
+ * UNMAP GRANULARITY ALIGNMENT
+ */
+ put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment,
+ &buf[32]);
+ if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0)
+ buf[32] |= 0x80; /* Set the UGAVALID bit */
+
+ return 0;
+}
+
+/* Thin Provisioning VPD */
+static int
+target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = SE_DEV(cmd);
+
+ /*
+ * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
+ *
+ * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
+ * zero, then the page length shall be set to 0004h. If the DP bit
+ * is set to one, then the page length shall be set to the value
+ * defined in table 162.
+ */
+ buf[0] = dev->transport->get_device_type(dev);
+ buf[1] = 0xb2;
+
+ /*
+ * Set Hardcoded length mentioned above for DP=0
+ */
+ put_unaligned_be16(0x0004, &buf[2]);
+
+ /*
+ * The THRESHOLD EXPONENT field indicates the threshold set size in
+ * LBAs as a power of 2 (i.e., the threshold set size is equal to
+ * 2(threshold exponent)).
+ *
+ * Note that this is currently set to 0x00 as mkp says it will be
+ * changing again. We can enable this once it has settled in T10
+ * and is actually used by Linux/SCSI ML code.
+ */
+ buf[4] = 0x00;
+
+ /*
+ * A TPU bit set to one indicates that the device server supports
+ * the UNMAP command (see 5.25). A TPU bit set to zero indicates
+ * that the device server does not support the UNMAP command.
+ */
+ if (DEV_ATTRIB(dev)->emulate_tpu != 0)
+ buf[5] = 0x80;
+
+ /*
+ * A TPWS bit set to one indicates that the device server supports
+ * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
+ * A TPWS bit set to zero indicates that the device server does not
+ * support the use of the WRITE SAME (16) command to unmap LBAs.
+ */
+ if (DEV_ATTRIB(dev)->emulate_tpws != 0)
+ buf[5] |= 0x40;
+
+ return 0;
+}
+
+static int
+target_emulate_inquiry(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ unsigned char *buf = cmd->t_task->t_task_buf;
+ unsigned char *cdb = cmd->t_task->t_task_cdb;
+
+ if (!(cdb[1] & 0x1))
+ return target_emulate_inquiry_std(cmd);
+
+ /*
+ * Make sure we at least have 4 bytes of INQUIRY response
+ * payload for 0x00 going back for EVPD=1. Note that 0x80
+ * and 0x83 will check for enough payload data length and
+ * jump to set_len: label when there is not enough inquiry EVPD
+ * payload length left for the next outgoing EVPD metadata
+ */
+ if (cmd->data_length < 4) {
+ printk(KERN_ERR "SCSI Inquiry payload length: %u"
+ " too small for EVPD=1\n", cmd->data_length);
+ return -1;
+ }
+ buf[0] = dev->transport->get_device_type(dev);
+
+ switch (cdb[2]) {
+ case 0x00:
+ return target_emulate_evpd_00(cmd, buf);
+ case 0x80:
+ return target_emulate_evpd_80(cmd, buf);
+ case 0x83:
+ return target_emulate_evpd_83(cmd, buf);
+ case 0x86:
+ return target_emulate_evpd_86(cmd, buf);
+ case 0xb0:
+ return target_emulate_evpd_b0(cmd, buf);
+ case 0xb2:
+ return target_emulate_evpd_b2(cmd, buf);
+ default:
+ printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+target_emulate_readcapacity(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ unsigned char *buf = cmd->t_task->t_task_buf;
+ u32 blocks = dev->transport->get_blocks(dev);
+
+ buf[0] = (blocks >> 24) & 0xff;
+ buf[1] = (blocks >> 16) & 0xff;
+ buf[2] = (blocks >> 8) & 0xff;
+ buf[3] = blocks & 0xff;
+ buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
+ buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
+ buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
+ buf[7] = DEV_ATTRIB(dev)->block_size & 0xff;
+ /*
+ * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
+ */
+ if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+ put_unaligned_be32(0xFFFFFFFF, &buf[0]);
+
+ return 0;
+}
+
+static int
+target_emulate_readcapacity_16(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ unsigned char *buf = cmd->t_task->t_task_buf;
+ unsigned long long blocks = dev->transport->get_blocks(dev);
+
+ buf[0] = (blocks >> 56) & 0xff;
+ buf[1] = (blocks >> 48) & 0xff;
+ buf[2] = (blocks >> 40) & 0xff;
+ buf[3] = (blocks >> 32) & 0xff;
+ buf[4] = (blocks >> 24) & 0xff;
+ buf[5] = (blocks >> 16) & 0xff;
+ buf[6] = (blocks >> 8) & 0xff;
+ buf[7] = blocks & 0xff;
+ buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
+ buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
+ buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
+ buf[11] = DEV_ATTRIB(dev)->block_size & 0xff;
+ /*
+ * Set Thin Provisioning Enable bit following sbc3r22 in section
+ * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
+ */
+ if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+ buf[14] = 0x80;
+
+ return 0;
+}
+
+static int
+target_modesense_rwrecovery(unsigned char *p)
+{
+ p[0] = 0x01;
+ p[1] = 0x0a;
+
+ return 12;
+}
+
+static int
+target_modesense_control(struct se_device *dev, unsigned char *p)
+{
+ p[0] = 0x0a;
+ p[1] = 0x0a;
+ p[2] = 2;
+ /*
+ * From spc4r17, section 7.4.6 Control mode Page
+ *
+ * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
+ *
+ * 00b: The logical unit shall clear any unit attention condition
+ * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+ * status and shall not establish a unit attention condition when a com-
+ * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
+ * status.
+ *
+ * 10b: The logical unit shall not clear any unit attention condition
+ * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+ * status and shall not establish a unit attention condition when
+ * a command is completed with BUSY, TASK SET FULL, or RESERVATION
+ * CONFLICT status.
+ *
+ * 11b a The logical unit shall not clear any unit attention condition
+ * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+ * status and shall establish a unit attention condition for the
+ * initiator port associated with the I_T nexus on which the BUSY,
+ * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
+ * Depending on the status, the additional sense code shall be set to
+ * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
+ * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
+ * command, a unit attention condition shall be established only once
+ * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
+ * to the number of commands completed with one of those status codes.
+ */
+ p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 :
+ (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+ /*
+ * From spc4r17, section 7.4.6 Control mode Page
+ *
+ * Task Aborted Status (TAS) bit set to zero.
+ *
+ * A task aborted status (TAS) bit set to zero specifies that aborted
+ * tasks shall be terminated by the device server without any response
+ * to the application client. A TAS bit set to one specifies that tasks
+ * aborted by the actions of an I_T nexus other than the I_T nexus on
+ * which the command was received shall be completed with TASK ABORTED
+ * status (see SAM-4).
+ */
+ p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00;
+ p[8] = 0xff;
+ p[9] = 0xff;
+ p[11] = 30;
+
+ return 12;
+}
+
+static int
+target_modesense_caching(struct se_device *dev, unsigned char *p)
+{
+ p[0] = 0x08;
+ p[1] = 0x12;
+ if (DEV_ATTRIB(dev)->emulate_write_cache > 0)
+ p[2] = 0x04; /* Write Cache Enable */
+ p[12] = 0x20; /* Disabled Read Ahead */
+
+ return 20;
+}
+
+static void
+target_modesense_write_protect(unsigned char *buf, int type)
+{
+ /*
+ * I believe that the WP bit (bit 7) in the mode header is the same for
+ * all device types..
+ */
+ switch (type) {
+ case TYPE_DISK:
+ case TYPE_TAPE:
+ default:
+ buf[0] |= 0x80; /* WP bit */
+ break;
+ }
+}
+
+static void
+target_modesense_dpofua(unsigned char *buf, int type)
+{
+ switch (type) {
+ case TYPE_DISK:
+ buf[0] |= 0x10; /* DPOFUA bit */
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+target_emulate_modesense(struct se_cmd *cmd, int ten)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ char *cdb = cmd->t_task->t_task_cdb;
+ unsigned char *rbuf = cmd->t_task->t_task_buf;
+ int type = dev->transport->get_device_type(dev);
+ int offset = (ten) ? 8 : 4;
+ int length = 0;
+ unsigned char buf[SE_MODE_PAGE_BUF];
+
+ memset(buf, 0, SE_MODE_PAGE_BUF);
+
+ switch (cdb[2] & 0x3f) {
+ case 0x01:
+ length = target_modesense_rwrecovery(&buf[offset]);
+ break;
+ case 0x08:
+ length = target_modesense_caching(dev, &buf[offset]);
+ break;
+ case 0x0a:
+ length = target_modesense_control(dev, &buf[offset]);
+ break;
+ case 0x3f:
+ length = target_modesense_rwrecovery(&buf[offset]);
+ length += target_modesense_caching(dev, &buf[offset+length]);
+ length += target_modesense_control(dev, &buf[offset+length]);
+ break;
+ default:
+ printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n",
+ cdb[2] & 0x3f);
+ return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+ }
+ offset += length;
+
+ if (ten) {
+ offset -= 2;
+ buf[0] = (offset >> 8) & 0xff;
+ buf[1] = offset & 0xff;
+
+ if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ (cmd->se_deve &&
+ (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+ target_modesense_write_protect(&buf[3], type);
+
+ if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
+ (DEV_ATTRIB(dev)->emulate_fua_write > 0))
+ target_modesense_dpofua(&buf[3], type);
+
+ if ((offset + 2) > cmd->data_length)
+ offset = cmd->data_length;
+
+ } else {
+ offset -= 1;
+ buf[0] = offset & 0xff;
+
+ if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ (cmd->se_deve &&
+ (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+ target_modesense_write_protect(&buf[2], type);
+
+ if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
+ (DEV_ATTRIB(dev)->emulate_fua_write > 0))
+ target_modesense_dpofua(&buf[2], type);
+
+ if ((offset + 1) > cmd->data_length)
+ offset = cmd->data_length;
+ }
+ memcpy(rbuf, buf, offset);
+
+ return 0;
+}
+
+static int
+target_emulate_request_sense(struct se_cmd *cmd)
+{
+ unsigned char *cdb = cmd->t_task->t_task_cdb;
+ unsigned char *buf = cmd->t_task->t_task_buf;
+ u8 ua_asc = 0, ua_ascq = 0;
+
+ if (cdb[1] & 0x01) {
+ printk(KERN_ERR "REQUEST_SENSE description emulation not"
+ " supported\n");
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+ if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) {
+ /*
+ * CURRENT ERROR, UNIT ATTENTION
+ */
+ buf[0] = 0x70;
+ buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+ /*
+ * Make sure request data length is enough for additional
+ * sense data.
+ */
+ if (cmd->data_length <= 18) {
+ buf[7] = 0x00;
+ return 0;
+ }
+ /*
+ * The Additional Sense Code (ASC) from the UNIT ATTENTION
+ */
+ buf[SPC_ASC_KEY_OFFSET] = ua_asc;
+ buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
+ buf[7] = 0x0A;
+ } else {
+ /*
+ * CURRENT ERROR, NO SENSE
+ */
+ buf[0] = 0x70;
+ buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
+ /*
+ * Make sure request data length is enough for additional
+ * sense data.
+ */
+ if (cmd->data_length <= 18) {
+ buf[7] = 0x00;
+ return 0;
+ }
+ /*
+ * NO ADDITIONAL SENSE INFORMATION
+ */
+ buf[SPC_ASC_KEY_OFFSET] = 0x00;
+ buf[7] = 0x0A;
+ }
+
+ return 0;
+}
+
+/*
+ * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
+ * Note this is not used for TCM/pSCSI passthrough
+ */
+static int
+target_emulate_unmap(struct se_task *task)
+{
+ struct se_cmd *cmd = TASK_CMD(task);
+ struct se_device *dev = SE_DEV(cmd);
+ unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL;
+ unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
+ sector_t lba;
+ unsigned int size = cmd->data_length, range;
+ int ret, offset;
+ unsigned short dl, bd_dl;
+
+ /* First UNMAP block descriptor starts at 8 byte offset */
+ offset = 8;
+ size -= 8;
+ dl = get_unaligned_be16(&cdb[0]);
+ bd_dl = get_unaligned_be16(&cdb[2]);
+ ptr = &buf[offset];
+ printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
+ " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
+
+ while (size) {
+ lba = get_unaligned_be64(&ptr[0]);
+ range = get_unaligned_be32(&ptr[8]);
+ printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n",
+ (unsigned long long)lba, range);
+
+ ret = dev->transport->do_discard(dev, lba, range);
+ if (ret < 0) {
+ printk(KERN_ERR "blkdev_issue_discard() failed: %d\n",
+ ret);
+ return -1;
+ }
+
+ ptr += 16;
+ size -= 16;
+ }
+
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ return 0;
+}
+
+/*
+ * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
+ * Note this is not used for TCM/pSCSI passthrough
+ */
+static int
+target_emulate_write_same(struct se_task *task)
+{
+ struct se_cmd *cmd = TASK_CMD(task);
+ struct se_device *dev = SE_DEV(cmd);
+ sector_t lba = cmd->t_task->t_task_lba;
+ unsigned int range;
+ int ret;
+
+ range = (cmd->data_length / DEV_ATTRIB(dev)->block_size);
+
+ printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n",
+ (unsigned long long)lba, range);
+
+ ret = dev->transport->do_discard(dev, lba, range);
+ if (ret < 0) {
+ printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n");
+ return -1;
+ }
+
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ return 0;
+}
+
+int
+transport_emulate_control_cdb(struct se_task *task)
+{
+ struct se_cmd *cmd = TASK_CMD(task);
+ struct se_device *dev = SE_DEV(cmd);
+ unsigned short service_action;
+ int ret = 0;
+
+ switch (cmd->t_task->t_task_cdb[0]) {
+ case INQUIRY:
+ ret = target_emulate_inquiry(cmd);
+ break;
+ case READ_CAPACITY:
+ ret = target_emulate_readcapacity(cmd);
+ break;
+ case MODE_SENSE:
+ ret = target_emulate_modesense(cmd, 0);
+ break;
+ case MODE_SENSE_10:
+ ret = target_emulate_modesense(cmd, 1);
+ break;
+ case SERVICE_ACTION_IN:
+ switch (cmd->t_task->t_task_cdb[1] & 0x1f) {
+ case SAI_READ_CAPACITY_16:
+ ret = target_emulate_readcapacity_16(cmd);
+ break;
+ default:
+ printk(KERN_ERR "Unsupported SA: 0x%02x\n",
+ cmd->t_task->t_task_cdb[1] & 0x1f);
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ break;
+ case REQUEST_SENSE:
+ ret = target_emulate_request_sense(cmd);
+ break;
+ case UNMAP:
+ if (!dev->transport->do_discard) {
+ printk(KERN_ERR "UNMAP emulation not supported for: %s\n",
+ dev->transport->name);
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ ret = target_emulate_unmap(task);
+ break;
+ case WRITE_SAME_16:
+ if (!dev->transport->do_discard) {
+ printk(KERN_ERR "WRITE_SAME_16 emulation not supported"
+ " for: %s\n", dev->transport->name);
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ ret = target_emulate_write_same(task);
+ break;
+ case VARIABLE_LENGTH_CMD:
+ service_action =
+ get_unaligned_be16(&cmd->t_task->t_task_cdb[8]);
+ switch (service_action) {
+ case WRITE_SAME_32:
+ if (!dev->transport->do_discard) {
+ printk(KERN_ERR "WRITE_SAME_32 SA emulation not"
+ " supported for: %s\n",
+ dev->transport->name);
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ ret = target_emulate_write_same(task);
+ break;
+ default:
+ printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:"
+ " 0x%02x\n", service_action);
+ break;
+ }
+ break;
+ case SYNCHRONIZE_CACHE:
+ case 0x91: /* SYNCHRONIZE_CACHE_16: */
+ if (!dev->transport->do_sync_cache) {
+ printk(KERN_ERR
+ "SYNCHRONIZE_CACHE emulation not supported"
+ " for: %s\n", dev->transport->name);
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ dev->transport->do_sync_cache(task);
+ break;
+ case ALLOW_MEDIUM_REMOVAL:
+ case ERASE:
+ case REZERO_UNIT:
+ case SEEK_10:
+ case SPACE:
+ case START_STOP:
+ case TEST_UNIT_READY:
+ case VERIFY:
+ case WRITE_FILEMARKS:
+ break;
+ default:
+ printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
+ cmd->t_task->t_task_cdb[0], dev->transport->name);
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+
+ if (ret < 0)
+ return ret;
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
new file mode 100644
index 000000000000..caf8dc18ee0a
--- /dev/null
+++ b/drivers/target/target_core_configfs.c
@@ -0,0 +1,3240 @@
+/*******************************************************************************
+ * Filename: target_core_configfs.c
+ *
+ * This file contains ConfigFS logic for the Generic Target Engine project.
+ *
+ * Copyright (c) 2008-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * based on configfs Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/syscalls.h>
+#include <linux/configfs.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_rd.h"
+
+static struct list_head g_tf_list;
+static struct mutex g_tf_lock;
+
+struct target_core_configfs_attribute {
+ struct configfs_attribute attr;
+ ssize_t (*show)(void *, char *);
+ ssize_t (*store)(void *, const char *, size_t);
+};
+
+static inline struct se_hba *
+item_to_hba(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct se_hba, hba_group);
+}
+
+/*
+ * Attributes for /sys/kernel/config/target/
+ */
+static ssize_t target_core_attr_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
+ " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION,
+ utsname()->sysname, utsname()->machine);
+}
+
+static struct configfs_item_operations target_core_fabric_item_ops = {
+ .show_attribute = target_core_attr_show,
+};
+
+static struct configfs_attribute target_core_item_attr_version = {
+ .ca_owner = THIS_MODULE,
+ .ca_name = "version",
+ .ca_mode = S_IRUGO,
+};
+
+static struct target_fabric_configfs *target_core_get_fabric(
+ const char *name)
+{
+ struct target_fabric_configfs *tf;
+
+ if (!(name))
+ return NULL;
+
+ mutex_lock(&g_tf_lock);
+ list_for_each_entry(tf, &g_tf_list, tf_list) {
+ if (!(strcmp(tf->tf_name, name))) {
+ atomic_inc(&tf->tf_access_cnt);
+ mutex_unlock(&g_tf_lock);
+ return tf;
+ }
+ }
+ mutex_unlock(&g_tf_lock);
+
+ return NULL;
+}
+
+/*
+ * Called from struct target_core_group_ops->make_group()
+ */
+static struct config_group *target_core_register_fabric(
+ struct config_group *group,
+ const char *name)
+{
+ struct target_fabric_configfs *tf;
+ int ret;
+
+ printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:"
+ " %s\n", group, name);
+ /*
+ * Ensure that TCM subsystem plugins are loaded at this point for
+ * using the RAMDISK_DR virtual LUN 0 and all other struct se_port
+ * LUN symlinks.
+ */
+ if (transport_subsystem_check_init() < 0)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Below are some hardcoded request_module() calls to automatically
+ * local fabric modules when the following is called:
+ *
+ * mkdir -p /sys/kernel/config/target/$MODULE_NAME
+ *
+ * Note that this does not limit which TCM fabric module can be
+ * registered, but simply provids auto loading logic for modules with
+ * mkdir(2) system calls with known TCM fabric modules.
+ */
+ if (!(strncmp(name, "iscsi", 5))) {
+ /*
+ * Automatically load the LIO Target fabric module when the
+ * following is called:
+ *
+ * mkdir -p $CONFIGFS/target/iscsi
+ */
+ ret = request_module("iscsi_target_mod");
+ if (ret < 0) {
+ printk(KERN_ERR "request_module() failed for"
+ " iscsi_target_mod.ko: %d\n", ret);
+ return ERR_PTR(-EINVAL);
+ }
+ } else if (!(strncmp(name, "loopback", 8))) {
+ /*
+ * Automatically load the tcm_loop fabric module when the
+ * following is called:
+ *
+ * mkdir -p $CONFIGFS/target/loopback
+ */
+ ret = request_module("tcm_loop");
+ if (ret < 0) {
+ printk(KERN_ERR "request_module() failed for"
+ " tcm_loop.ko: %d\n", ret);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ tf = target_core_get_fabric(name);
+ if (!(tf)) {
+ printk(KERN_ERR "target_core_get_fabric() failed for %s\n",
+ name);
+ return ERR_PTR(-EINVAL);
+ }
+ printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:"
+ " %s\n", tf->tf_name);
+ /*
+ * On a successful target_core_get_fabric() look, the returned
+ * struct target_fabric_configfs *tf will contain a usage reference.
+ */
+ printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
+ &TF_CIT_TMPL(tf)->tfc_wwn_cit);
+
+ tf->tf_group.default_groups = tf->tf_default_groups;
+ tf->tf_group.default_groups[0] = &tf->tf_disc_group;
+ tf->tf_group.default_groups[1] = NULL;
+
+ config_group_init_type_name(&tf->tf_group, name,
+ &TF_CIT_TMPL(tf)->tfc_wwn_cit);
+ config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
+ &TF_CIT_TMPL(tf)->tfc_discovery_cit);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
+ " %s\n", tf->tf_group.cg_item.ci_name);
+ /*
+ * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
+ */
+ tf->tf_ops.tf_subsys = tf->tf_subsys;
+ tf->tf_fabric = &tf->tf_group.cg_item;
+ printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
+ " for %s\n", name);
+
+ return &tf->tf_group;
+}
+
+/*
+ * Called from struct target_core_group_ops->drop_item()
+ */
+static void target_core_deregister_fabric(
+ struct config_group *group,
+ struct config_item *item)
+{
+ struct target_fabric_configfs *tf = container_of(
+ to_config_group(item), struct target_fabric_configfs, tf_group);
+ struct config_group *tf_group;
+ struct config_item *df_item;
+ int i;
+
+ printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
+ " tf list\n", config_item_name(item));
+
+ printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:"
+ " %s\n", tf->tf_name);
+ atomic_dec(&tf->tf_access_cnt);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing"
+ " tf->tf_fabric for %s\n", tf->tf_name);
+ tf->tf_fabric = NULL;
+
+ printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
+ " %s\n", config_item_name(item));
+
+ tf_group = &tf->tf_group;
+ for (i = 0; tf_group->default_groups[i]; i++) {
+ df_item = &tf_group->default_groups[i]->cg_item;
+ tf_group->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_fabric_group_ops = {
+ .make_group = &target_core_register_fabric,
+ .drop_item = &target_core_deregister_fabric,
+};
+
+/*
+ * All item attributes appearing in /sys/kernel/target/ appear here.
+ */
+static struct configfs_attribute *target_core_fabric_item_attrs[] = {
+ &target_core_item_attr_version,
+ NULL,
+};
+
+/*
+ * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
+ */
+static struct config_item_type target_core_fabrics_item = {
+ .ct_item_ops = &target_core_fabric_item_ops,
+ .ct_group_ops = &target_core_fabric_group_ops,
+ .ct_attrs = target_core_fabric_item_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem target_core_fabrics = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "target",
+ .ci_type = &target_core_fabrics_item,
+ },
+ },
+};
+
+static struct configfs_subsystem *target_core_subsystem[] = {
+ &target_core_fabrics,
+ NULL,
+};
+
+/*##############################################################################
+// Start functions called by external Target Fabrics Modules
+//############################################################################*/
+
+/*
+ * First function called by fabric modules to:
+ *
+ * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer.
+ * 2) Add struct target_fabric_configfs to g_tf_list
+ * 3) Return struct target_fabric_configfs to fabric module to be passed
+ * into target_fabric_configfs_register().
+ */
+struct target_fabric_configfs *target_fabric_configfs_init(
+ struct module *fabric_mod,
+ const char *name)
+{
+ struct target_fabric_configfs *tf;
+
+ if (!(fabric_mod)) {
+ printk(KERN_ERR "Missing struct module *fabric_mod pointer\n");
+ return NULL;
+ }
+ if (!(name)) {
+ printk(KERN_ERR "Unable to locate passed fabric name\n");
+ return NULL;
+ }
+ if (strlen(name) > TARGET_FABRIC_NAME_SIZE) {
+ printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
+ "_NAME_SIZE\n", name);
+ return NULL;
+ }
+
+ tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
+ if (!(tf))
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&tf->tf_list);
+ atomic_set(&tf->tf_access_cnt, 0);
+ /*
+ * Setup the default generic struct config_item_type's (cits) in
+ * struct target_fabric_configfs->tf_cit_tmpl
+ */
+ tf->tf_module = fabric_mod;
+ target_fabric_setup_cits(tf);
+
+ tf->tf_subsys = target_core_subsystem[0];
+ snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name);
+
+ mutex_lock(&g_tf_lock);
+ list_add_tail(&tf->tf_list, &g_tf_list);
+ mutex_unlock(&g_tf_lock);
+
+ printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
+ ">>>>>>>>>>>>>>\n");
+ printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for"
+ " %s\n", tf, tf->tf_name);
+ return tf;
+}
+EXPORT_SYMBOL(target_fabric_configfs_init);
+
+/*
+ * Called by fabric plugins after FAILED target_fabric_configfs_register() call.
+ */
+void target_fabric_configfs_free(
+ struct target_fabric_configfs *tf)
+{
+ mutex_lock(&g_tf_lock);
+ list_del(&tf->tf_list);
+ mutex_unlock(&g_tf_lock);
+
+ kfree(tf);
+}
+EXPORT_SYMBOL(target_fabric_configfs_free);
+
+/*
+ * Perform a sanity check of the passed tf->tf_ops before completing
+ * TCM fabric module registration.
+ */
+static int target_fabric_tf_ops_check(
+ struct target_fabric_configfs *tf)
+{
+ struct target_core_fabric_ops *tfo = &tf->tf_ops;
+
+ if (!(tfo->get_fabric_name)) {
+ printk(KERN_ERR "Missing tfo->get_fabric_name()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->get_fabric_proto_ident)) {
+ printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_get_wwn)) {
+ printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_get_tag)) {
+ printk(KERN_ERR "Missing tfo->tpg_get_tag()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_get_default_depth)) {
+ printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_get_pr_transport_id)) {
+ printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_get_pr_transport_id_len)) {
+ printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_check_demo_mode)) {
+ printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_check_demo_mode_cache)) {
+ printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_check_demo_mode_write_protect)) {
+ printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_check_prod_mode_write_protect)) {
+ printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_alloc_fabric_acl)) {
+ printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_release_fabric_acl)) {
+ printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->tpg_get_inst_index)) {
+ printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->release_cmd_to_pool)) {
+ printk(KERN_ERR "Missing tfo->release_cmd_to_pool()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->release_cmd_direct)) {
+ printk(KERN_ERR "Missing tfo->release_cmd_direct()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->shutdown_session)) {
+ printk(KERN_ERR "Missing tfo->shutdown_session()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->close_session)) {
+ printk(KERN_ERR "Missing tfo->close_session()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->stop_session)) {
+ printk(KERN_ERR "Missing tfo->stop_session()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->fall_back_to_erl0)) {
+ printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->sess_logged_in)) {
+ printk(KERN_ERR "Missing tfo->sess_logged_in()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->sess_get_index)) {
+ printk(KERN_ERR "Missing tfo->sess_get_index()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->write_pending)) {
+ printk(KERN_ERR "Missing tfo->write_pending()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->write_pending_status)) {
+ printk(KERN_ERR "Missing tfo->write_pending_status()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->set_default_node_attributes)) {
+ printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->get_task_tag)) {
+ printk(KERN_ERR "Missing tfo->get_task_tag()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->get_cmd_state)) {
+ printk(KERN_ERR "Missing tfo->get_cmd_state()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->new_cmd_failure)) {
+ printk(KERN_ERR "Missing tfo->new_cmd_failure()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->queue_data_in)) {
+ printk(KERN_ERR "Missing tfo->queue_data_in()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->queue_status)) {
+ printk(KERN_ERR "Missing tfo->queue_status()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->queue_tm_rsp)) {
+ printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->set_fabric_sense_len)) {
+ printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->get_fabric_sense_len)) {
+ printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->is_state_remove)) {
+ printk(KERN_ERR "Missing tfo->is_state_remove()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->pack_lun)) {
+ printk(KERN_ERR "Missing tfo->pack_lun()\n");
+ return -EINVAL;
+ }
+ /*
+ * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
+ * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
+ * target_core_fabric_configfs.c WWN+TPG group context code.
+ */
+ if (!(tfo->fabric_make_wwn)) {
+ printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->fabric_drop_wwn)) {
+ printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->fabric_make_tpg)) {
+ printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n");
+ return -EINVAL;
+ }
+ if (!(tfo->fabric_drop_tpg)) {
+ printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Called 2nd from fabric module with returned parameter of
+ * struct target_fabric_configfs * from target_fabric_configfs_init().
+ *
+ * Upon a successful registration, the new fabric's struct config_item is
+ * return. Also, a pointer to this struct is set in the passed
+ * struct target_fabric_configfs.
+ */
+int target_fabric_configfs_register(
+ struct target_fabric_configfs *tf)
+{
+ struct config_group *su_group;
+ int ret;
+
+ if (!(tf)) {
+ printk(KERN_ERR "Unable to locate target_fabric_configfs"
+ " pointer\n");
+ return -EINVAL;
+ }
+ if (!(tf->tf_subsys)) {
+ printk(KERN_ERR "Unable to target struct config_subsystem"
+ " pointer\n");
+ return -EINVAL;
+ }
+ su_group = &tf->tf_subsys->su_group;
+ if (!(su_group)) {
+ printk(KERN_ERR "Unable to locate target struct config_group"
+ " pointer\n");
+ return -EINVAL;
+ }
+ ret = target_fabric_tf_ops_check(tf);
+ if (ret < 0)
+ return ret;
+
+ printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
+ ">>>>>>>>>>\n");
+ return 0;
+}
+EXPORT_SYMBOL(target_fabric_configfs_register);
+
+void target_fabric_configfs_deregister(
+ struct target_fabric_configfs *tf)
+{
+ struct config_group *su_group;
+ struct configfs_subsystem *su;
+
+ if (!(tf)) {
+ printk(KERN_ERR "Unable to locate passed target_fabric_"
+ "configfs\n");
+ return;
+ }
+ su = tf->tf_subsys;
+ if (!(su)) {
+ printk(KERN_ERR "Unable to locate passed tf->tf_subsys"
+ " pointer\n");
+ return;
+ }
+ su_group = &tf->tf_subsys->su_group;
+ if (!(su_group)) {
+ printk(KERN_ERR "Unable to locate target struct config_group"
+ " pointer\n");
+ return;
+ }
+
+ printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
+ ">>>>>>>>>>>>\n");
+ mutex_lock(&g_tf_lock);
+ if (atomic_read(&tf->tf_access_cnt)) {
+ mutex_unlock(&g_tf_lock);
+ printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n",
+ tf->tf_name);
+ BUG();
+ }
+ list_del(&tf->tf_list);
+ mutex_unlock(&g_tf_lock);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
+ " %s\n", tf->tf_name);
+ tf->tf_module = NULL;
+ tf->tf_subsys = NULL;
+ kfree(tf);
+
+ printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
+ ">>>>>\n");
+ return;
+}
+EXPORT_SYMBOL(target_fabric_configfs_deregister);
+
+/*##############################################################################
+// Stop functions called by external Target Fabrics Modules
+//############################################################################*/
+
+/* Start functions for struct config_item_type target_core_dev_attrib_cit */
+
+#define DEF_DEV_ATTRIB_SHOW(_name) \
+static ssize_t target_core_dev_show_attr_##_name( \
+ struct se_dev_attrib *da, \
+ char *page) \
+{ \
+ struct se_device *dev; \
+ struct se_subsystem_dev *se_dev = da->da_sub_dev; \
+ ssize_t rb; \
+ \
+ spin_lock(&se_dev->se_dev_lock); \
+ dev = se_dev->se_dev_ptr; \
+ if (!(dev)) { \
+ spin_unlock(&se_dev->se_dev_lock); \
+ return -ENODEV; \
+ } \
+ rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \
+ spin_unlock(&se_dev->se_dev_lock); \
+ \
+ return rb; \
+}
+
+#define DEF_DEV_ATTRIB_STORE(_name) \
+static ssize_t target_core_dev_store_attr_##_name( \
+ struct se_dev_attrib *da, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct se_device *dev; \
+ struct se_subsystem_dev *se_dev = da->da_sub_dev; \
+ unsigned long val; \
+ int ret; \
+ \
+ spin_lock(&se_dev->se_dev_lock); \
+ dev = se_dev->se_dev_ptr; \
+ if (!(dev)) { \
+ spin_unlock(&se_dev->se_dev_lock); \
+ return -ENODEV; \
+ } \
+ ret = strict_strtoul(page, 0, &val); \
+ if (ret < 0) { \
+ spin_unlock(&se_dev->se_dev_lock); \
+ printk(KERN_ERR "strict_strtoul() failed with" \
+ " ret: %d\n", ret); \
+ return -EINVAL; \
+ } \
+ ret = se_dev_set_##_name(dev, (u32)val); \
+ spin_unlock(&se_dev->se_dev_lock); \
+ \
+ return (!ret) ? count : -EINVAL; \
+}
+
+#define DEF_DEV_ATTRIB(_name) \
+DEF_DEV_ATTRIB_SHOW(_name); \
+DEF_DEV_ATTRIB_STORE(_name);
+
+#define DEF_DEV_ATTRIB_RO(_name) \
+DEF_DEV_ATTRIB_SHOW(_name);
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
+#define SE_DEV_ATTR(_name, _mode) \
+static struct target_core_dev_attrib_attribute \
+ target_core_dev_attrib_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_core_dev_show_attr_##_name, \
+ target_core_dev_store_attr_##_name);
+
+#define SE_DEV_ATTR_RO(_name); \
+static struct target_core_dev_attrib_attribute \
+ target_core_dev_attrib_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_core_dev_show_attr_##_name);
+
+DEF_DEV_ATTRIB(emulate_dpo);
+SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_fua_write);
+SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_fua_read);
+SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_write_cache);
+SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
+SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tas);
+SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tpu);
+SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tpws);
+SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(enforce_pr_isids);
+SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_block_size);
+SE_DEV_ATTR_RO(hw_block_size);
+
+DEF_DEV_ATTRIB(block_size);
+SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_max_sectors);
+SE_DEV_ATTR_RO(hw_max_sectors);
+
+DEF_DEV_ATTRIB(max_sectors);
+SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(optimal_sectors);
+SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_queue_depth);
+SE_DEV_ATTR_RO(hw_queue_depth);
+
+DEF_DEV_ATTRIB(queue_depth);
+SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(task_timeout);
+SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(max_unmap_lba_count);
+SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(max_unmap_block_desc_count);
+SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(unmap_granularity);
+SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(unmap_granularity_alignment);
+SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
+
+static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
+ &target_core_dev_attrib_emulate_dpo.attr,
+ &target_core_dev_attrib_emulate_fua_write.attr,
+ &target_core_dev_attrib_emulate_fua_read.attr,
+ &target_core_dev_attrib_emulate_write_cache.attr,
+ &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
+ &target_core_dev_attrib_emulate_tas.attr,
+ &target_core_dev_attrib_emulate_tpu.attr,
+ &target_core_dev_attrib_emulate_tpws.attr,
+ &target_core_dev_attrib_enforce_pr_isids.attr,
+ &target_core_dev_attrib_hw_block_size.attr,
+ &target_core_dev_attrib_block_size.attr,
+ &target_core_dev_attrib_hw_max_sectors.attr,
+ &target_core_dev_attrib_max_sectors.attr,
+ &target_core_dev_attrib_optimal_sectors.attr,
+ &target_core_dev_attrib_hw_queue_depth.attr,
+ &target_core_dev_attrib_queue_depth.attr,
+ &target_core_dev_attrib_task_timeout.attr,
+ &target_core_dev_attrib_max_unmap_lba_count.attr,
+ &target_core_dev_attrib_max_unmap_block_desc_count.attr,
+ &target_core_dev_attrib_unmap_granularity.attr,
+ &target_core_dev_attrib_unmap_granularity_alignment.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_core_dev_attrib_ops = {
+ .show_attribute = target_core_dev_attrib_attr_show,
+ .store_attribute = target_core_dev_attrib_attr_store,
+};
+
+static struct config_item_type target_core_dev_attrib_cit = {
+ .ct_item_ops = &target_core_dev_attrib_ops,
+ .ct_attrs = target_core_dev_attrib_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_dev_attrib_cit */
+
+/* Start functions for struct config_item_type target_core_dev_wwn_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
+#define SE_DEV_WWN_ATTR(_name, _mode) \
+static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_core_dev_wwn_show_attr_##_name, \
+ target_core_dev_wwn_store_attr_##_name);
+
+#define SE_DEV_WWN_ATTR_RO(_name); \
+do { \
+ static struct target_core_dev_wwn_attribute \
+ target_core_dev_wwn_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_core_dev_wwn_show_attr_##_name); \
+} while (0);
+
+/*
+ * VPD page 0x80 Unit serial
+ */
+static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
+ struct t10_wwn *t10_wwn,
+ char *page)
+{
+ struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
+ struct se_device *dev;
+
+ dev = se_dev->se_dev_ptr;
+ if (!(dev))
+ return -ENODEV;
+
+ return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
+ &t10_wwn->unit_serial[0]);
+}
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
+ struct t10_wwn *t10_wwn,
+ const char *page,
+ size_t count)
+{
+ struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev;
+ struct se_device *dev;
+ unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
+
+ /*
+ * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
+ * from the struct scsi_device level firmware, do not allow
+ * VPD Unit Serial to be emulated.
+ *
+ * Note this struct scsi_device could also be emulating VPD
+ * information from its drivers/scsi LLD. But for now we assume
+ * it is doing 'the right thing' wrt a world wide unique
+ * VPD Unit Serial Number that OS dependent multipath can depend on.
+ */
+ if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
+ printk(KERN_ERR "Underlying SCSI device firmware provided VPD"
+ " Unit Serial, ignoring request\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) {
+ printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
+ " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
+ return -EOVERFLOW;
+ }
+ /*
+ * Check to see if any active $FABRIC_MOD exports exist. If they
+ * do exist, fail here as changing this information on the fly
+ * (underneath the initiator side OS dependent multipath code)
+ * could cause negative effects.
+ */
+ dev = su_dev->se_dev_ptr;
+ if ((dev)) {
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ printk(KERN_ERR "Unable to set VPD Unit Serial while"
+ " active %d $FABRIC_MOD exports exist\n",
+ atomic_read(&dev->dev_export_obj.obj_access_count));
+ return -EINVAL;
+ }
+ }
+ /*
+ * This currently assumes ASCII encoding for emulated VPD Unit Serial.
+ *
+ * Also, strip any newline added from the userspace
+ * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
+ */
+ memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
+ snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
+ snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
+ "%s", strstrip(buf));
+ su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
+ " %s\n", su_dev->t10_wwn.unit_serial);
+
+ return count;
+}
+
+SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Protocol Identifier
+ */
+static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
+ struct t10_wwn *t10_wwn,
+ char *page)
+{
+ struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
+ struct se_device *dev;
+ struct t10_vpd *vpd;
+ unsigned char buf[VPD_TMP_BUF_SIZE];
+ ssize_t len = 0;
+
+ dev = se_dev->se_dev_ptr;
+ if (!(dev))
+ return -ENODEV;
+
+ memset(buf, 0, VPD_TMP_BUF_SIZE);
+
+ spin_lock(&t10_wwn->t10_vpd_lock);
+ list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
+ if (!(vpd->protocol_identifier_set))
+ continue;
+
+ transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
+
+ if ((len + strlen(buf) > PAGE_SIZE))
+ break;
+
+ len += sprintf(page+len, "%s", buf);
+ }
+ spin_unlock(&t10_wwn->t10_vpd_lock);
+
+ return len;
+}
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier(
+ struct t10_wwn *t10_wwn,
+ const char *page,
+ size_t count)
+{
+ return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR);
+
+/*
+ * Generic wrapper for dumping VPD identifiers by association.
+ */
+#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
+static ssize_t target_core_dev_wwn_show_attr_##_name( \
+ struct t10_wwn *t10_wwn, \
+ char *page) \
+{ \
+ struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; \
+ struct se_device *dev; \
+ struct t10_vpd *vpd; \
+ unsigned char buf[VPD_TMP_BUF_SIZE]; \
+ ssize_t len = 0; \
+ \
+ dev = se_dev->se_dev_ptr; \
+ if (!(dev)) \
+ return -ENODEV; \
+ \
+ spin_lock(&t10_wwn->t10_vpd_lock); \
+ list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
+ if (vpd->association != _assoc) \
+ continue; \
+ \
+ memset(buf, 0, VPD_TMP_BUF_SIZE); \
+ transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
+ if ((len + strlen(buf) > PAGE_SIZE)) \
+ break; \
+ len += sprintf(page+len, "%s", buf); \
+ \
+ memset(buf, 0, VPD_TMP_BUF_SIZE); \
+ transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
+ if ((len + strlen(buf) > PAGE_SIZE)) \
+ break; \
+ len += sprintf(page+len, "%s", buf); \
+ \
+ memset(buf, 0, VPD_TMP_BUF_SIZE); \
+ transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
+ if ((len + strlen(buf) > PAGE_SIZE)) \
+ break; \
+ len += sprintf(page+len, "%s", buf); \
+ } \
+ spin_unlock(&t10_wwn->t10_vpd_lock); \
+ \
+ return len; \
+}
+
+/*
+ * VPD page 0x83 Assoication: Logical Unit
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(
+ struct t10_wwn *t10_wwn,
+ const char *page,
+ size_t count)
+{
+ return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Association: Target Port
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port(
+ struct t10_wwn *t10_wwn,
+ const char *page,
+ size_t count)
+{
+ return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Association: SCSI Target Device
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(
+ struct t10_wwn *t10_wwn,
+ const char *page,
+ size_t count)
+{
+ return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group);
+
+static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
+ &target_core_dev_wwn_vpd_unit_serial.attr,
+ &target_core_dev_wwn_vpd_protocol_identifier.attr,
+ &target_core_dev_wwn_vpd_assoc_logical_unit.attr,
+ &target_core_dev_wwn_vpd_assoc_target_port.attr,
+ &target_core_dev_wwn_vpd_assoc_scsi_target_device.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_core_dev_wwn_ops = {
+ .show_attribute = target_core_dev_wwn_attr_show,
+ .store_attribute = target_core_dev_wwn_attr_store,
+};
+
+static struct config_item_type target_core_dev_wwn_cit = {
+ .ct_item_ops = &target_core_dev_wwn_ops,
+ .ct_attrs = target_core_dev_wwn_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_dev_wwn_cit */
+
+/* Start functions for struct config_item_type target_core_dev_pr_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev);
+#define SE_DEV_PR_ATTR(_name, _mode) \
+static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_core_dev_pr_show_attr_##_name, \
+ target_core_dev_pr_store_attr_##_name);
+
+#define SE_DEV_PR_ATTR_RO(_name); \
+static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_core_dev_pr_show_attr_##_name);
+
+/*
+ * res_holder
+ */
+static ssize_t target_core_dev_pr_show_spc3_res(
+ struct se_device *dev,
+ char *page,
+ ssize_t *len)
+{
+ struct se_node_acl *se_nacl;
+ struct t10_pr_registration *pr_reg;
+ char i_buf[PR_REG_ISID_ID_LEN];
+ int prf_isid;
+
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_reg = dev->dev_pr_res_holder;
+ if (!(pr_reg)) {
+ *len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
+ spin_unlock(&dev->dev_reservation_lock);
+ return *len;
+ }
+ se_nacl = pr_reg->pr_reg_nacl;
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+
+ *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
+ TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
+ se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return *len;
+}
+
+static ssize_t target_core_dev_pr_show_spc2_res(
+ struct se_device *dev,
+ char *page,
+ ssize_t *len)
+{
+ struct se_node_acl *se_nacl;
+
+ spin_lock(&dev->dev_reservation_lock);
+ se_nacl = dev->dev_reserved_node_acl;
+ if (!(se_nacl)) {
+ *len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
+ spin_unlock(&dev->dev_reservation_lock);
+ return *len;
+ }
+ *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
+ TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
+ se_nacl->initiatorname);
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return *len;
+}
+
+static ssize_t target_core_dev_pr_show_attr_res_holder(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ ssize_t len = 0;
+
+ if (!(su_dev->se_dev_ptr))
+ return -ENODEV;
+
+ switch (T10_RES(su_dev)->res_type) {
+ case SPC3_PERSISTENT_RESERVATIONS:
+ target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
+ page, &len);
+ break;
+ case SPC2_RESERVATIONS:
+ target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr,
+ page, &len);
+ break;
+ case SPC_PASSTHROUGH:
+ len += sprintf(page+len, "Passthrough\n");
+ break;
+ default:
+ len += sprintf(page+len, "Unknown\n");
+ break;
+ }
+
+ return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_holder);
+
+/*
+ * res_pr_all_tgt_pts
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ struct se_device *dev;
+ struct t10_pr_registration *pr_reg;
+ ssize_t len = 0;
+
+ dev = su_dev->se_dev_ptr;
+ if (!(dev))
+ return -ENODEV;
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return len;
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_reg = dev->dev_pr_res_holder;
+ if (!(pr_reg)) {
+ len = sprintf(page, "No SPC-3 Reservation holder\n");
+ spin_unlock(&dev->dev_reservation_lock);
+ return len;
+ }
+ /*
+ * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3
+ * Basic PERSISTENT RESERVER OUT parameter list, page 290
+ */
+ if (pr_reg->pr_reg_all_tg_pt)
+ len = sprintf(page, "SPC-3 Reservation: All Target"
+ " Ports registration\n");
+ else
+ len = sprintf(page, "SPC-3 Reservation: Single"
+ " Target Port registration\n");
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
+
+/*
+ * res_pr_generation
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ if (!(su_dev->se_dev_ptr))
+ return -ENODEV;
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return 0;
+
+ return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation);
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_generation);
+
+/*
+ * res_pr_holder_tg_port
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ struct se_device *dev;
+ struct se_node_acl *se_nacl;
+ struct se_lun *lun;
+ struct se_portal_group *se_tpg;
+ struct t10_pr_registration *pr_reg;
+ struct target_core_fabric_ops *tfo;
+ ssize_t len = 0;
+
+ dev = su_dev->se_dev_ptr;
+ if (!(dev))
+ return -ENODEV;
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return len;
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_reg = dev->dev_pr_res_holder;
+ if (!(pr_reg)) {
+ len = sprintf(page, "No SPC-3 Reservation holder\n");
+ spin_unlock(&dev->dev_reservation_lock);
+ return len;
+ }
+ se_nacl = pr_reg->pr_reg_nacl;
+ se_tpg = se_nacl->se_tpg;
+ lun = pr_reg->pr_reg_tg_pt_lun;
+ tfo = TPG_TFO(se_tpg);
+
+ len += sprintf(page+len, "SPC-3 Reservation: %s"
+ " Target Node Endpoint: %s\n", tfo->get_fabric_name(),
+ tfo->tpg_get_wwn(se_tpg));
+ len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
+ " Identifer Tag: %hu %s Portal Group Tag: %hu"
+ " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
+ tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
+ tfo->get_fabric_name(), lun->unpacked_lun);
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
+
+/*
+ * res_pr_registered_i_pts
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ struct target_core_fabric_ops *tfo;
+ struct t10_pr_registration *pr_reg;
+ unsigned char buf[384];
+ char i_buf[PR_REG_ISID_ID_LEN];
+ ssize_t len = 0;
+ int reg_count = 0, prf_isid;
+
+ if (!(su_dev->se_dev_ptr))
+ return -ENODEV;
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return len;
+
+ len += sprintf(page+len, "SPC-3 PR Registrations:\n");
+
+ spin_lock(&T10_RES(su_dev)->registration_lock);
+ list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+ pr_reg_list) {
+
+ memset(buf, 0, 384);
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+ sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
+ tfo->get_fabric_name(),
+ pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ?
+ &i_buf[0] : "", pr_reg->pr_res_key,
+ pr_reg->pr_res_generation);
+
+ if ((len + strlen(buf) > PAGE_SIZE))
+ break;
+
+ len += sprintf(page+len, "%s", buf);
+ reg_count++;
+ }
+ spin_unlock(&T10_RES(su_dev)->registration_lock);
+
+ if (!(reg_count))
+ len += sprintf(page+len, "None\n");
+
+ return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
+
+/*
+ * res_pr_type
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_type(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ struct se_device *dev;
+ struct t10_pr_registration *pr_reg;
+ ssize_t len = 0;
+
+ dev = su_dev->se_dev_ptr;
+ if (!(dev))
+ return -ENODEV;
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return len;
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_reg = dev->dev_pr_res_holder;
+ if (!(pr_reg)) {
+ len = sprintf(page, "No SPC-3 Reservation holder\n");
+ spin_unlock(&dev->dev_reservation_lock);
+ return len;
+ }
+ len = sprintf(page, "SPC-3 Reservation Type: %s\n",
+ core_scsi3_pr_dump_type(pr_reg->pr_res_type));
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_type);
+
+/*
+ * res_type
+ */
+static ssize_t target_core_dev_pr_show_attr_res_type(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ ssize_t len = 0;
+
+ if (!(su_dev->se_dev_ptr))
+ return -ENODEV;
+
+ switch (T10_RES(su_dev)->res_type) {
+ case SPC3_PERSISTENT_RESERVATIONS:
+ len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
+ break;
+ case SPC2_RESERVATIONS:
+ len = sprintf(page, "SPC2_RESERVATIONS\n");
+ break;
+ case SPC_PASSTHROUGH:
+ len = sprintf(page, "SPC_PASSTHROUGH\n");
+ break;
+ default:
+ len = sprintf(page, "UNKNOWN\n");
+ break;
+ }
+
+ return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_type);
+
+/*
+ * res_aptpl_active
+ */
+
+static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ if (!(su_dev->se_dev_ptr))
+ return -ENODEV;
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return 0;
+
+ return sprintf(page, "APTPL Bit Status: %s\n",
+ (T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled");
+}
+
+SE_DEV_PR_ATTR_RO(res_aptpl_active);
+
+/*
+ * res_aptpl_metadata
+ */
+static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
+ struct se_subsystem_dev *su_dev,
+ char *page)
+{
+ if (!(su_dev->se_dev_ptr))
+ return -ENODEV;
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return 0;
+
+ return sprintf(page, "Ready to process PR APTPL metadata..\n");
+}
+
+enum {
+ Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
+ Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
+ Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
+ Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
+};
+
+static match_table_t tokens = {
+ {Opt_initiator_fabric, "initiator_fabric=%s"},
+ {Opt_initiator_node, "initiator_node=%s"},
+ {Opt_initiator_sid, "initiator_sid=%s"},
+ {Opt_sa_res_key, "sa_res_key=%s"},
+ {Opt_res_holder, "res_holder=%d"},
+ {Opt_res_type, "res_type=%d"},
+ {Opt_res_scope, "res_scope=%d"},
+ {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
+ {Opt_mapped_lun, "mapped_lun=%d"},
+ {Opt_target_fabric, "target_fabric=%s"},
+ {Opt_target_node, "target_node=%s"},
+ {Opt_tpgt, "tpgt=%d"},
+ {Opt_port_rtpi, "port_rtpi=%d"},
+ {Opt_target_lun, "target_lun=%d"},
+ {Opt_err, NULL}
+};
+
+static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
+ struct se_subsystem_dev *su_dev,
+ const char *page,
+ size_t count)
+{
+ struct se_device *dev;
+ unsigned char *i_fabric, *t_fabric, *i_port = NULL, *t_port = NULL;
+ unsigned char *isid = NULL;
+ char *orig, *ptr, *arg_p, *opts;
+ substring_t args[MAX_OPT_ARGS];
+ unsigned long long tmp_ll;
+ u64 sa_res_key = 0;
+ u32 mapped_lun = 0, target_lun = 0;
+ int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
+ u16 port_rpti = 0, tpgt = 0;
+ u8 type = 0, scope;
+
+ dev = su_dev->se_dev_ptr;
+ if (!(dev))
+ return -ENODEV;
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return 0;
+
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ printk(KERN_INFO "Unable to process APTPL metadata while"
+ " active fabric exports exist\n");
+ return -EINVAL;
+ }
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ orig = opts;
+ while ((ptr = strsep(&opts, ",")) != NULL) {
+ if (!*ptr)
+ continue;
+
+ token = match_token(ptr, tokens, args);
+ switch (token) {
+ case Opt_initiator_fabric:
+ i_fabric = match_strdup(&args[0]);
+ break;
+ case Opt_initiator_node:
+ i_port = match_strdup(&args[0]);
+ if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) {
+ printk(KERN_ERR "APTPL metadata initiator_node="
+ " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
+ PR_APTPL_MAX_IPORT_LEN);
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ case Opt_initiator_sid:
+ isid = match_strdup(&args[0]);
+ if (strlen(isid) > PR_REG_ISID_LEN) {
+ printk(KERN_ERR "APTPL metadata initiator_isid"
+ "= exceeds PR_REG_ISID_LEN: %d\n",
+ PR_REG_ISID_LEN);
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ case Opt_sa_res_key:
+ arg_p = match_strdup(&args[0]);
+ ret = strict_strtoull(arg_p, 0, &tmp_ll);
+ if (ret < 0) {
+ printk(KERN_ERR "strict_strtoull() failed for"
+ " sa_res_key=\n");
+ goto out;
+ }
+ sa_res_key = (u64)tmp_ll;
+ break;
+ /*
+ * PR APTPL Metadata for Reservation
+ */
+ case Opt_res_holder:
+ match_int(args, &arg);
+ res_holder = arg;
+ break;
+ case Opt_res_type:
+ match_int(args, &arg);
+ type = (u8)arg;
+ break;
+ case Opt_res_scope:
+ match_int(args, &arg);
+ scope = (u8)arg;
+ break;
+ case Opt_res_all_tg_pt:
+ match_int(args, &arg);
+ all_tg_pt = (int)arg;
+ break;
+ case Opt_mapped_lun:
+ match_int(args, &arg);
+ mapped_lun = (u32)arg;
+ break;
+ /*
+ * PR APTPL Metadata for Target Port
+ */
+ case Opt_target_fabric:
+ t_fabric = match_strdup(&args[0]);
+ break;
+ case Opt_target_node:
+ t_port = match_strdup(&args[0]);
+ if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) {
+ printk(KERN_ERR "APTPL metadata target_node="
+ " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
+ PR_APTPL_MAX_TPORT_LEN);
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ case Opt_tpgt:
+ match_int(args, &arg);
+ tpgt = (u16)arg;
+ break;
+ case Opt_port_rtpi:
+ match_int(args, &arg);
+ port_rpti = (u16)arg;
+ break;
+ case Opt_target_lun:
+ match_int(args, &arg);
+ target_lun = (u32)arg;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!(i_port) || !(t_port) || !(sa_res_key)) {
+ printk(KERN_ERR "Illegal parameters for APTPL registration\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (res_holder && !(type)) {
+ printk(KERN_ERR "Illegal PR type: 0x%02x for reservation"
+ " holder\n", type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key,
+ i_port, isid, mapped_lun, t_port, tpgt, target_lun,
+ res_holder, all_tg_pt, type);
+out:
+ kfree(orig);
+ return (ret == 0) ? count : ret;
+}
+
+SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group);
+
+static struct configfs_attribute *target_core_dev_pr_attrs[] = {
+ &target_core_dev_pr_res_holder.attr,
+ &target_core_dev_pr_res_pr_all_tgt_pts.attr,
+ &target_core_dev_pr_res_pr_generation.attr,
+ &target_core_dev_pr_res_pr_holder_tg_port.attr,
+ &target_core_dev_pr_res_pr_registered_i_pts.attr,
+ &target_core_dev_pr_res_pr_type.attr,
+ &target_core_dev_pr_res_type.attr,
+ &target_core_dev_pr_res_aptpl_active.attr,
+ &target_core_dev_pr_res_aptpl_metadata.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_core_dev_pr_ops = {
+ .show_attribute = target_core_dev_pr_attr_show,
+ .store_attribute = target_core_dev_pr_attr_store,
+};
+
+static struct config_item_type target_core_dev_pr_cit = {
+ .ct_item_ops = &target_core_dev_pr_ops,
+ .ct_attrs = target_core_dev_pr_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_dev_pr_cit */
+
+/* Start functions for struct config_item_type target_core_dev_cit */
+
+static ssize_t target_core_show_dev_info(void *p, char *page)
+{
+ struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_hba *hba = se_dev->se_dev_hba;
+ struct se_subsystem_api *t = hba->transport;
+ int bl = 0;
+ ssize_t read_bytes = 0;
+
+ if (!(se_dev->se_dev_ptr))
+ return -ENODEV;
+
+ transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
+ read_bytes += bl;
+ read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes);
+ return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_info = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "info",
+ .ca_mode = S_IRUGO },
+ .show = target_core_show_dev_info,
+ .store = NULL,
+};
+
+static ssize_t target_core_store_dev_control(
+ void *p,
+ const char *page,
+ size_t count)
+{
+ struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_hba *hba = se_dev->se_dev_hba;
+ struct se_subsystem_api *t = hba->transport;
+
+ if (!(se_dev->se_dev_su_ptr)) {
+ printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se"
+ "_dev_su_ptr\n");
+ return -EINVAL;
+ }
+
+ return t->set_configfs_dev_params(hba, se_dev, page, count);
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_control = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "control",
+ .ca_mode = S_IWUSR },
+ .show = NULL,
+ .store = target_core_store_dev_control,
+};
+
+static ssize_t target_core_show_dev_alias(void *p, char *page)
+{
+ struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+
+ if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
+ return 0;
+
+ return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias);
+}
+
+static ssize_t target_core_store_dev_alias(
+ void *p,
+ const char *page,
+ size_t count)
+{
+ struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_hba *hba = se_dev->se_dev_hba;
+ ssize_t read_bytes;
+
+ if (count > (SE_DEV_ALIAS_LEN-1)) {
+ printk(KERN_ERR "alias count: %d exceeds"
+ " SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
+ SE_DEV_ALIAS_LEN-1);
+ return -EINVAL;
+ }
+
+ se_dev->su_dev_flags |= SDF_USING_ALIAS;
+ read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
+ "%s", page);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n",
+ config_item_name(&hba->hba_group.cg_item),
+ config_item_name(&se_dev->se_dev_group.cg_item),
+ se_dev->se_dev_alias);
+
+ return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_alias = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "alias",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = target_core_show_dev_alias,
+ .store = target_core_store_dev_alias,
+};
+
+static ssize_t target_core_show_dev_udev_path(void *p, char *page)
+{
+ struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+
+ if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
+ return 0;
+
+ return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path);
+}
+
+static ssize_t target_core_store_dev_udev_path(
+ void *p,
+ const char *page,
+ size_t count)
+{
+ struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_hba *hba = se_dev->se_dev_hba;
+ ssize_t read_bytes;
+
+ if (count > (SE_UDEV_PATH_LEN-1)) {
+ printk(KERN_ERR "udev_path count: %d exceeds"
+ " SE_UDEV_PATH_LEN-1: %u\n", (int)count,
+ SE_UDEV_PATH_LEN-1);
+ return -EINVAL;
+ }
+
+ se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
+ read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
+ "%s", page);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
+ config_item_name(&hba->hba_group.cg_item),
+ config_item_name(&se_dev->se_dev_group.cg_item),
+ se_dev->se_dev_udev_path);
+
+ return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "udev_path",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = target_core_show_dev_udev_path,
+ .store = target_core_store_dev_udev_path,
+};
+
+static ssize_t target_core_store_dev_enable(
+ void *p,
+ const char *page,
+ size_t count)
+{
+ struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_device *dev;
+ struct se_hba *hba = se_dev->se_dev_hba;
+ struct se_subsystem_api *t = hba->transport;
+ char *ptr;
+
+ ptr = strstr(page, "1");
+ if (!(ptr)) {
+ printk(KERN_ERR "For dev_enable ops, only valid value"
+ " is \"1\"\n");
+ return -EINVAL;
+ }
+ if ((se_dev->se_dev_ptr)) {
+ printk(KERN_ERR "se_dev->se_dev_ptr already set for storage"
+ " object\n");
+ return -EEXIST;
+ }
+
+ if (t->check_configfs_dev_params(hba, se_dev) < 0)
+ return -EINVAL;
+
+ dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
+ if (!(dev) || IS_ERR(dev))
+ return -EINVAL;
+
+ se_dev->se_dev_ptr = dev;
+ printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
+ " %p\n", se_dev->se_dev_ptr);
+
+ return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_enable = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "enable",
+ .ca_mode = S_IWUSR },
+ .show = NULL,
+ .store = target_core_store_dev_enable,
+};
+
+static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
+{
+ struct se_device *dev;
+ struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+ struct config_item *lu_ci;
+ struct t10_alua_lu_gp *lu_gp;
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+ ssize_t len = 0;
+
+ dev = su_dev->se_dev_ptr;
+ if (!(dev))
+ return -ENODEV;
+
+ if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED)
+ return len;
+
+ lu_gp_mem = dev->dev_alua_lu_gp_mem;
+ if (!(lu_gp_mem)) {
+ printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+ " pointer\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+ lu_gp = lu_gp_mem->lu_gp;
+ if ((lu_gp)) {
+ lu_ci = &lu_gp->lu_gp_group.cg_item;
+ len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
+ config_item_name(lu_ci), lu_gp->lu_gp_id);
+ }
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+ return len;
+}
+
+static ssize_t target_core_store_alua_lu_gp(
+ void *p,
+ const char *page,
+ size_t count)
+{
+ struct se_device *dev;
+ struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+ struct se_hba *hba = su_dev->se_dev_hba;
+ struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+ unsigned char buf[LU_GROUP_NAME_BUF];
+ int move = 0;
+
+ dev = su_dev->se_dev_ptr;
+ if (!(dev))
+ return -ENODEV;
+
+ if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
+ printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n",
+ config_item_name(&hba->hba_group.cg_item),
+ config_item_name(&su_dev->se_dev_group.cg_item));
+ return -EINVAL;
+ }
+ if (count > LU_GROUP_NAME_BUF) {
+ printk(KERN_ERR "ALUA LU Group Alias too large!\n");
+ return -EINVAL;
+ }
+ memset(buf, 0, LU_GROUP_NAME_BUF);
+ memcpy(buf, page, count);
+ /*
+ * Any ALUA logical unit alias besides "NULL" means we will be
+ * making a new group association.
+ */
+ if (strcmp(strstrip(buf), "NULL")) {
+ /*
+ * core_alua_get_lu_gp_by_name() will increment reference to
+ * struct t10_alua_lu_gp. This reference is released with
+ * core_alua_get_lu_gp_by_name below().
+ */
+ lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
+ if (!(lu_gp_new))
+ return -ENODEV;
+ }
+ lu_gp_mem = dev->dev_alua_lu_gp_mem;
+ if (!(lu_gp_mem)) {
+ if (lu_gp_new)
+ core_alua_put_lu_gp_from_name(lu_gp_new);
+ printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+ " pointer\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+ lu_gp = lu_gp_mem->lu_gp;
+ if ((lu_gp)) {
+ /*
+ * Clearing an existing lu_gp association, and replacing
+ * with NULL
+ */
+ if (!(lu_gp_new)) {
+ printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s"
+ " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
+ " %hu\n",
+ config_item_name(&hba->hba_group.cg_item),
+ config_item_name(&su_dev->se_dev_group.cg_item),
+ config_item_name(&lu_gp->lu_gp_group.cg_item),
+ lu_gp->lu_gp_id);
+
+ __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+ return count;
+ }
+ /*
+ * Removing existing association of lu_gp_mem with lu_gp
+ */
+ __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
+ move = 1;
+ }
+ /*
+ * Associate lu_gp_mem with lu_gp_new.
+ */
+ __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
+ spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
+ " core/alua/lu_gps/%s, ID: %hu\n",
+ (move) ? "Moving" : "Adding",
+ config_item_name(&hba->hba_group.cg_item),
+ config_item_name(&su_dev->se_dev_group.cg_item),
+ config_item_name(&lu_gp_new->lu_gp_group.cg_item),
+ lu_gp_new->lu_gp_id);
+
+ core_alua_put_lu_gp_from_name(lu_gp_new);
+ return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "alua_lu_gp",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = target_core_show_alua_lu_gp,
+ .store = target_core_store_alua_lu_gp,
+};
+
+static struct configfs_attribute *lio_core_dev_attrs[] = {
+ &target_core_attr_dev_info.attr,
+ &target_core_attr_dev_control.attr,
+ &target_core_attr_dev_alias.attr,
+ &target_core_attr_dev_udev_path.attr,
+ &target_core_attr_dev_enable.attr,
+ &target_core_attr_dev_alua_lu_gp.attr,
+ NULL,
+};
+
+static void target_core_dev_release(struct config_item *item)
+{
+ struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
+ struct se_subsystem_dev, se_dev_group);
+ struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+ struct se_subsystem_api *t = hba->transport;
+ struct config_group *dev_cg = &se_dev->se_dev_group;
+
+ kfree(dev_cg->default_groups);
+ /*
+ * This pointer will set when the storage is enabled with:
+ *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+ */
+ if (se_dev->se_dev_ptr) {
+ printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
+ "virtual_device() for se_dev_ptr: %p\n",
+ se_dev->se_dev_ptr);
+
+ se_free_virtual_device(se_dev->se_dev_ptr, hba);
+ } else {
+ /*
+ * Release struct se_subsystem_dev->se_dev_su_ptr..
+ */
+ printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
+ "device() for se_dev_su_ptr: %p\n",
+ se_dev->se_dev_su_ptr);
+
+ t->free_device(se_dev->se_dev_su_ptr);
+ }
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
+ "_dev_t: %p\n", se_dev);
+ kfree(se_dev);
+}
+
+static ssize_t target_core_dev_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ struct se_subsystem_dev *se_dev = container_of(
+ to_config_group(item), struct se_subsystem_dev,
+ se_dev_group);
+ struct target_core_configfs_attribute *tc_attr = container_of(
+ attr, struct target_core_configfs_attribute, attr);
+
+ if (!(tc_attr->show))
+ return -EINVAL;
+
+ return tc_attr->show((void *)se_dev, page);
+}
+
+static ssize_t target_core_dev_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *page, size_t count)
+{
+ struct se_subsystem_dev *se_dev = container_of(
+ to_config_group(item), struct se_subsystem_dev,
+ se_dev_group);
+ struct target_core_configfs_attribute *tc_attr = container_of(
+ attr, struct target_core_configfs_attribute, attr);
+
+ if (!(tc_attr->store))
+ return -EINVAL;
+
+ return tc_attr->store((void *)se_dev, page, count);
+}
+
+static struct configfs_item_operations target_core_dev_item_ops = {
+ .release = target_core_dev_release,
+ .show_attribute = target_core_dev_show,
+ .store_attribute = target_core_dev_store,
+};
+
+static struct config_item_type target_core_dev_cit = {
+ .ct_item_ops = &target_core_dev_item_ops,
+ .ct_attrs = lio_core_dev_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_dev_cit */
+
+/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp);
+#define SE_DEV_ALUA_LU_ATTR(_name, _mode) \
+static struct target_core_alua_lu_gp_attribute \
+ target_core_alua_lu_gp_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_core_alua_lu_gp_show_attr_##_name, \
+ target_core_alua_lu_gp_store_attr_##_name);
+
+#define SE_DEV_ALUA_LU_ATTR_RO(_name) \
+static struct target_core_alua_lu_gp_attribute \
+ target_core_alua_lu_gp_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_core_alua_lu_gp_show_attr_##_name);
+
+/*
+ * lu_gp_id
+ */
+static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
+ struct t10_alua_lu_gp *lu_gp,
+ char *page)
+{
+ if (!(lu_gp->lu_gp_valid_id))
+ return 0;
+
+ return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
+}
+
+static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
+ struct t10_alua_lu_gp *lu_gp,
+ const char *page,
+ size_t count)
+{
+ struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
+ unsigned long lu_gp_id;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &lu_gp_id);
+ if (ret < 0) {
+ printk(KERN_ERR "strict_strtoul() returned %d for"
+ " lu_gp_id\n", ret);
+ return -EINVAL;
+ }
+ if (lu_gp_id > 0x0000ffff) {
+ printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:"
+ " 0x0000ffff\n", lu_gp_id);
+ return -EINVAL;
+ }
+
+ ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
+ if (ret < 0)
+ return -EINVAL;
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit"
+ " Group: core/alua/lu_gps/%s to ID: %hu\n",
+ config_item_name(&alua_lu_gp_cg->cg_item),
+ lu_gp->lu_gp_id);
+
+ return count;
+}
+
+SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR);
+
+/*
+ * members
+ */
+static ssize_t target_core_alua_lu_gp_show_attr_members(
+ struct t10_alua_lu_gp *lu_gp,
+ char *page)
+{
+ struct se_device *dev;
+ struct se_hba *hba;
+ struct se_subsystem_dev *su_dev;
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+ ssize_t len = 0, cur_len;
+ unsigned char buf[LU_GROUP_NAME_BUF];
+
+ memset(buf, 0, LU_GROUP_NAME_BUF);
+
+ spin_lock(&lu_gp->lu_gp_lock);
+ list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
+ dev = lu_gp_mem->lu_gp_mem_dev;
+ su_dev = dev->se_sub_dev;
+ hba = su_dev->se_dev_hba;
+
+ cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
+ config_item_name(&hba->hba_group.cg_item),
+ config_item_name(&su_dev->se_dev_group.cg_item));
+ cur_len++; /* Extra byte for NULL terminator */
+
+ if ((cur_len + len) > PAGE_SIZE) {
+ printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+ "_members buffer\n");
+ break;
+ }
+ memcpy(page+len, buf, cur_len);
+ len += cur_len;
+ }
+ spin_unlock(&lu_gp->lu_gp_lock);
+
+ return len;
+}
+
+SE_DEV_ALUA_LU_ATTR_RO(members);
+
+CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group);
+
+static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
+ &target_core_alua_lu_gp_lu_gp_id.attr,
+ &target_core_alua_lu_gp_members.attr,
+ NULL,
+};
+
+static void target_core_alua_lu_gp_release(struct config_item *item)
+{
+ struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+ struct t10_alua_lu_gp, lu_gp_group);
+
+ core_alua_free_lu_gp(lu_gp);
+}
+
+static struct configfs_item_operations target_core_alua_lu_gp_ops = {
+ .release = target_core_alua_lu_gp_release,
+ .show_attribute = target_core_alua_lu_gp_attr_show,
+ .store_attribute = target_core_alua_lu_gp_attr_store,
+};
+
+static struct config_item_type target_core_alua_lu_gp_cit = {
+ .ct_item_ops = &target_core_alua_lu_gp_ops,
+ .ct_attrs = target_core_alua_lu_gp_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_lu_gp_cit */
+
+/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
+
+static struct config_group *target_core_alua_create_lu_gp(
+ struct config_group *group,
+ const char *name)
+{
+ struct t10_alua_lu_gp *lu_gp;
+ struct config_group *alua_lu_gp_cg = NULL;
+ struct config_item *alua_lu_gp_ci = NULL;
+
+ lu_gp = core_alua_allocate_lu_gp(name, 0);
+ if (IS_ERR(lu_gp))
+ return NULL;
+
+ alua_lu_gp_cg = &lu_gp->lu_gp_group;
+ alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
+
+ config_group_init_type_name(alua_lu_gp_cg, name,
+ &target_core_alua_lu_gp_cit);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit"
+ " Group: core/alua/lu_gps/%s\n",
+ config_item_name(alua_lu_gp_ci));
+
+ return alua_lu_gp_cg;
+
+}
+
+static void target_core_alua_drop_lu_gp(
+ struct config_group *group,
+ struct config_item *item)
+{
+ struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+ struct t10_alua_lu_gp, lu_gp_group);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
+ " Group: core/alua/lu_gps/%s, ID: %hu\n",
+ config_item_name(item), lu_gp->lu_gp_id);
+ /*
+ * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
+ * -> target_core_alua_lu_gp_release()
+ */
+ config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
+ .make_group = &target_core_alua_create_lu_gp,
+ .drop_item = &target_core_alua_drop_lu_gp,
+};
+
+static struct config_item_type target_core_alua_lu_gps_cit = {
+ .ct_item_ops = NULL,
+ .ct_group_ops = &target_core_alua_lu_gps_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_lu_gps_cit */
+
+/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp);
+#define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode) \
+static struct target_core_alua_tg_pt_gp_attribute \
+ target_core_alua_tg_pt_gp_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_core_alua_tg_pt_gp_show_attr_##_name, \
+ target_core_alua_tg_pt_gp_store_attr_##_name);
+
+#define SE_DEV_ALUA_TG_PT_ATTR_RO(_name) \
+static struct target_core_alua_tg_pt_gp_attribute \
+ target_core_alua_tg_pt_gp_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_core_alua_tg_pt_gp_show_attr_##_name);
+
+/*
+ * alua_access_state
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return sprintf(page, "%d\n",
+ atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state));
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ unsigned long tmp;
+ int new_state, ret;
+
+ if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
+ printk(KERN_ERR "Unable to do implict ALUA on non valid"
+ " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
+ return -EINVAL;
+ }
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk("Unable to extract new ALUA access state from"
+ " %s\n", page);
+ return -EINVAL;
+ }
+ new_state = (int)tmp;
+
+ if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
+ printk(KERN_ERR "Unable to process implict configfs ALUA"
+ " transition while TPGS_IMPLICT_ALUA is diabled\n");
+ return -EINVAL;
+ }
+
+ ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr,
+ NULL, NULL, new_state, 0);
+ return (!ret) ? count : -EINVAL;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_access_status
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return sprintf(page, "%s\n",
+ core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int new_status, ret;
+
+ if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
+ printk(KERN_ERR "Unable to do set ALUA access status on non"
+ " valid tg_pt_gp ID: %hu\n",
+ tg_pt_gp->tg_pt_gp_valid_id);
+ return -EINVAL;
+ }
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract new ALUA access status"
+ " from %s\n", page);
+ return -EINVAL;
+ }
+ new_status = (int)tmp;
+
+ if ((new_status != ALUA_STATUS_NONE) &&
+ (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
+ (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+ printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n",
+ new_status);
+ return -EINVAL;
+ }
+
+ tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
+ return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_access_type
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return core_alua_show_access_type(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ return core_alua_store_access_type(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_write_metadata
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract alua_write_metadata\n");
+ return -EINVAL;
+ }
+
+ if ((tmp != 0) && (tmp != 1)) {
+ printk(KERN_ERR "Illegal value for alua_write_metadata:"
+ " %lu\n", tmp);
+ return -EINVAL;
+ }
+ tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
+
+ return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR);
+
+
+
+/*
+ * nonop_delay_msecs
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return core_alua_show_nonop_delay_msecs(tg_pt_gp, page);
+
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR);
+
+/*
+ * trans_delay_msecs
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return core_alua_show_trans_delay_msecs(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
+
+/*
+ * preferred
+ */
+
+static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ return core_alua_show_preferred_bit(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ return core_alua_store_preferred_bit(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR);
+
+/*
+ * tg_pt_gp_id
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ return 0;
+
+ return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ const char *page,
+ size_t count)
+{
+ struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
+ unsigned long tg_pt_gp_id;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tg_pt_gp_id);
+ if (ret < 0) {
+ printk(KERN_ERR "strict_strtoul() returned %d for"
+ " tg_pt_gp_id\n", ret);
+ return -EINVAL;
+ }
+ if (tg_pt_gp_id > 0x0000ffff) {
+ printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:"
+ " 0x0000ffff\n", tg_pt_gp_id);
+ return -EINVAL;
+ }
+
+ ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
+ if (ret < 0)
+ return -EINVAL;
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: "
+ "core/alua/tg_pt_gps/%s to ID: %hu\n",
+ config_item_name(&alua_tg_pt_gp_cg->cg_item),
+ tg_pt_gp->tg_pt_gp_id);
+
+ return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR);
+
+/*
+ * members
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ char *page)
+{
+ struct se_port *port;
+ struct se_portal_group *tpg;
+ struct se_lun *lun;
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ ssize_t len = 0, cur_len;
+ unsigned char buf[TG_PT_GROUP_NAME_BUF];
+
+ memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+
+ spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+ list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
+ tg_pt_gp_mem_list) {
+ port = tg_pt_gp_mem->tg_pt;
+ tpg = port->sep_tpg;
+ lun = port->sep_lun;
+
+ cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
+ "/%s\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_wwn(tpg),
+ TPG_TFO(tpg)->tpg_get_tag(tpg),
+ config_item_name(&lun->lun_group.cg_item));
+ cur_len++; /* Extra byte for NULL terminator */
+
+ if ((cur_len + len) > PAGE_SIZE) {
+ printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+ "_members buffer\n");
+ break;
+ }
+ memcpy(page+len, buf, cur_len);
+ len += cur_len;
+ }
+ spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+ return len;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR_RO(members);
+
+CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp,
+ tg_pt_gp_group);
+
+static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
+ &target_core_alua_tg_pt_gp_alua_access_state.attr,
+ &target_core_alua_tg_pt_gp_alua_access_status.attr,
+ &target_core_alua_tg_pt_gp_alua_access_type.attr,
+ &target_core_alua_tg_pt_gp_alua_write_metadata.attr,
+ &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
+ &target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
+ &target_core_alua_tg_pt_gp_preferred.attr,
+ &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
+ &target_core_alua_tg_pt_gp_members.attr,
+ NULL,
+};
+
+static void target_core_alua_tg_pt_gp_release(struct config_item *item)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+ struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+ core_alua_free_tg_pt_gp(tg_pt_gp);
+}
+
+static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
+ .release = target_core_alua_tg_pt_gp_release,
+ .show_attribute = target_core_alua_tg_pt_gp_attr_show,
+ .store_attribute = target_core_alua_tg_pt_gp_attr_store,
+};
+
+static struct config_item_type target_core_alua_tg_pt_gp_cit = {
+ .ct_item_ops = &target_core_alua_tg_pt_gp_ops,
+ .ct_attrs = target_core_alua_tg_pt_gp_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
+
+/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+
+static struct config_group *target_core_alua_create_tg_pt_gp(
+ struct config_group *group,
+ const char *name)
+{
+ struct t10_alua *alua = container_of(group, struct t10_alua,
+ alua_tg_pt_gps_group);
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
+ struct config_group *alua_tg_pt_gp_cg = NULL;
+ struct config_item *alua_tg_pt_gp_ci = NULL;
+
+ tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
+ if (!(tg_pt_gp))
+ return NULL;
+
+ alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
+ alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
+
+ config_group_init_type_name(alua_tg_pt_gp_cg, name,
+ &target_core_alua_tg_pt_gp_cit);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port"
+ " Group: alua/tg_pt_gps/%s\n",
+ config_item_name(alua_tg_pt_gp_ci));
+
+ return alua_tg_pt_gp_cg;
+}
+
+static void target_core_alua_drop_tg_pt_gp(
+ struct config_group *group,
+ struct config_item *item)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+ struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
+ " Group: alua/tg_pt_gps/%s, ID: %hu\n",
+ config_item_name(item), tg_pt_gp->tg_pt_gp_id);
+ /*
+ * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
+ * -> target_core_alua_tg_pt_gp_release().
+ */
+ config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
+ .make_group = &target_core_alua_create_tg_pt_gp,
+ .drop_item = &target_core_alua_drop_tg_pt_gp,
+};
+
+static struct config_item_type target_core_alua_tg_pt_gps_cit = {
+ .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+
+/* Start functions for struct config_item_type target_core_alua_cit */
+
+/*
+ * target_core_alua_cit is a ConfigFS group that lives under
+ * /sys/kernel/config/target/core/alua. There are default groups
+ * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
+ * target_core_alua_cit in target_core_init_configfs() below.
+ */
+static struct config_item_type target_core_alua_cit = {
+ .ct_item_ops = NULL,
+ .ct_attrs = NULL,
+ .ct_owner = THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_cit */
+
+/* Start functions for struct config_item_type target_core_hba_cit */
+
+static struct config_group *target_core_make_subdev(
+ struct config_group *group,
+ const char *name)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ struct se_subsystem_dev *se_dev;
+ struct se_subsystem_api *t;
+ struct config_item *hba_ci = &group->cg_item;
+ struct se_hba *hba = item_to_hba(hba_ci);
+ struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
+
+ if (mutex_lock_interruptible(&hba->hba_access_mutex))
+ return NULL;
+
+ /*
+ * Locate the struct se_subsystem_api from parent's struct se_hba.
+ */
+ t = hba->transport;
+
+ se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
+ if (!se_dev) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " struct se_subsystem_dev\n");
+ goto unlock;
+ }
+ INIT_LIST_HEAD(&se_dev->g_se_dev_list);
+ INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
+ spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
+ INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
+ INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
+ spin_lock_init(&se_dev->t10_reservation.registration_lock);
+ spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
+ INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
+ spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock_init(&se_dev->se_dev_lock);
+ se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+ se_dev->t10_wwn.t10_sub_dev = se_dev;
+ se_dev->t10_alua.t10_sub_dev = se_dev;
+ se_dev->se_dev_attrib.da_sub_dev = se_dev;
+
+ se_dev->se_dev_hba = hba;
+ dev_cg = &se_dev->se_dev_group;
+
+ dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
+ GFP_KERNEL);
+ if (!(dev_cg->default_groups))
+ goto out;
+ /*
+ * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
+ * for ->allocate_virtdevice()
+ *
+ * se_dev->se_dev_ptr will be set after ->create_virtdev()
+ * has been called successfully in the next level up in the
+ * configfs tree for device object's struct config_group.
+ */
+ se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
+ if (!(se_dev->se_dev_su_ptr)) {
+ printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+ " from allocate_virtdevice()\n");
+ goto out;
+ }
+ spin_lock(&se_global->g_device_lock);
+ list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list);
+ spin_unlock(&se_global->g_device_lock);
+
+ config_group_init_type_name(&se_dev->se_dev_group, name,
+ &target_core_dev_cit);
+ config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
+ &target_core_dev_attrib_cit);
+ config_group_init_type_name(&se_dev->se_dev_pr_group, "pr",
+ &target_core_dev_pr_cit);
+ config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn",
+ &target_core_dev_wwn_cit);
+ config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
+ "alua", &target_core_alua_tg_pt_gps_cit);
+ dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
+ dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
+ dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
+ dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
+ dev_cg->default_groups[4] = NULL;
+ /*
+ * Add core/$HBA/$DEV/alua/tg_pt_gps/default_tg_pt_gp
+ */
+ tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
+ if (!(tg_pt_gp))
+ goto out;
+
+ tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
+ tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!(tg_pt_gp_cg->default_groups)) {
+ printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->"
+ "default_groups\n");
+ goto out;
+ }
+
+ config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
+ "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
+ tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
+ tg_pt_gp_cg->default_groups[1] = NULL;
+ T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp;
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
+ " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
+
+ mutex_unlock(&hba->hba_access_mutex);
+ return &se_dev->se_dev_group;
+out:
+ if (T10_ALUA(se_dev)->default_tg_pt_gp) {
+ core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
+ T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
+ }
+ if (tg_pt_gp_cg)
+ kfree(tg_pt_gp_cg->default_groups);
+ if (dev_cg)
+ kfree(dev_cg->default_groups);
+ if (se_dev->se_dev_su_ptr)
+ t->free_device(se_dev->se_dev_su_ptr);
+ kfree(se_dev);
+unlock:
+ mutex_unlock(&hba->hba_access_mutex);
+ return NULL;
+}
+
+static void target_core_drop_subdev(
+ struct config_group *group,
+ struct config_item *item)
+{
+ struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
+ struct se_subsystem_dev, se_dev_group);
+ struct se_hba *hba;
+ struct se_subsystem_api *t;
+ struct config_item *df_item;
+ struct config_group *dev_cg, *tg_pt_gp_cg;
+ int i;
+
+ hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+
+ mutex_lock(&hba->hba_access_mutex);
+ t = hba->transport;
+
+ spin_lock(&se_global->g_device_lock);
+ list_del(&se_dev->g_se_dev_list);
+ spin_unlock(&se_global->g_device_lock);
+
+ tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
+ for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
+ df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
+ tg_pt_gp_cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ kfree(tg_pt_gp_cg->default_groups);
+ /*
+ * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
+ * directly from target_core_alua_tg_pt_gp_release().
+ */
+ T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
+
+ dev_cg = &se_dev->se_dev_group;
+ for (i = 0; dev_cg->default_groups[i]; i++) {
+ df_item = &dev_cg->default_groups[i]->cg_item;
+ dev_cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ /*
+ * The releasing of se_dev and associated se_dev->se_dev_ptr is done
+ * from target_core_dev_item_ops->release() ->target_core_dev_release().
+ */
+ config_item_put(item);
+ mutex_unlock(&hba->hba_access_mutex);
+}
+
+static struct configfs_group_operations target_core_hba_group_ops = {
+ .make_group = target_core_make_subdev,
+ .drop_item = target_core_drop_subdev,
+};
+
+CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba);
+#define SE_HBA_ATTR(_name, _mode) \
+static struct target_core_hba_attribute \
+ target_core_hba_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_core_hba_show_attr_##_name, \
+ target_core_hba_store_attr_##_name);
+
+#define SE_HBA_ATTR_RO(_name) \
+static struct target_core_hba_attribute \
+ target_core_hba_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_core_hba_show_attr_##_name);
+
+static ssize_t target_core_hba_show_attr_hba_info(
+ struct se_hba *hba,
+ char *page)
+{
+ return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
+ hba->hba_id, hba->transport->name,
+ TARGET_CORE_CONFIGFS_VERSION);
+}
+
+SE_HBA_ATTR_RO(hba_info);
+
+static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
+ char *page)
+{
+ int hba_mode = 0;
+
+ if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
+ hba_mode = 1;
+
+ return sprintf(page, "%d\n", hba_mode);
+}
+
+static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
+ const char *page, size_t count)
+{
+ struct se_subsystem_api *transport = hba->transport;
+ unsigned long mode_flag;
+ int ret;
+
+ if (transport->pmode_enable_hba == NULL)
+ return -EINVAL;
+
+ ret = strict_strtoul(page, 0, &mode_flag);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret);
+ return -EINVAL;
+ }
+
+ spin_lock(&hba->device_lock);
+ if (!(list_empty(&hba->hba_dev_list))) {
+ printk(KERN_ERR "Unable to set hba_mode with active devices\n");
+ spin_unlock(&hba->device_lock);
+ return -EINVAL;
+ }
+ spin_unlock(&hba->device_lock);
+
+ ret = transport->pmode_enable_hba(hba, mode_flag);
+ if (ret < 0)
+ return -EINVAL;
+ if (ret > 0)
+ hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
+ else if (ret == 0)
+ hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+
+ return count;
+}
+
+SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
+
+static void target_core_hba_release(struct config_item *item)
+{
+ struct se_hba *hba = container_of(to_config_group(item),
+ struct se_hba, hba_group);
+ core_delete_hba(hba);
+}
+
+static struct configfs_attribute *target_core_hba_attrs[] = {
+ &target_core_hba_hba_info.attr,
+ &target_core_hba_hba_mode.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_core_hba_item_ops = {
+ .release = target_core_hba_release,
+ .show_attribute = target_core_hba_attr_show,
+ .store_attribute = target_core_hba_attr_store,
+};
+
+static struct config_item_type target_core_hba_cit = {
+ .ct_item_ops = &target_core_hba_item_ops,
+ .ct_group_ops = &target_core_hba_group_ops,
+ .ct_attrs = target_core_hba_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *target_core_call_addhbatotarget(
+ struct config_group *group,
+ const char *name)
+{
+ char *se_plugin_str, *str, *str2;
+ struct se_hba *hba;
+ char buf[TARGET_CORE_NAME_MAX_LEN];
+ unsigned long plugin_dep_id = 0;
+ int ret;
+
+ memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
+ if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) {
+ printk(KERN_ERR "Passed *name strlen(): %d exceeds"
+ " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
+ TARGET_CORE_NAME_MAX_LEN);
+ return ERR_PTR(-ENAMETOOLONG);
+ }
+ snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
+
+ str = strstr(buf, "_");
+ if (!(str)) {
+ printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
+ return ERR_PTR(-EINVAL);
+ }
+ se_plugin_str = buf;
+ /*
+ * Special case for subsystem plugins that have "_" in their names.
+ * Namely rd_direct and rd_mcp..
+ */
+ str2 = strstr(str+1, "_");
+ if ((str2)) {
+ *str2 = '\0'; /* Terminate for *se_plugin_str */
+ str2++; /* Skip to start of plugin dependent ID */
+ str = str2;
+ } else {
+ *str = '\0'; /* Terminate for *se_plugin_str */
+ str++; /* Skip to start of plugin dependent ID */
+ }
+
+ ret = strict_strtoul(str, 0, &plugin_dep_id);
+ if (ret < 0) {
+ printk(KERN_ERR "strict_strtoul() returned %d for"
+ " plugin_dep_id\n", ret);
+ return ERR_PTR(-EINVAL);
+ }
+ /*
+ * Load up TCM subsystem plugins if they have not already been loaded.
+ */
+ if (transport_subsystem_check_init() < 0)
+ return ERR_PTR(-EINVAL);
+
+ hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
+ if (IS_ERR(hba))
+ return ERR_CAST(hba);
+
+ config_group_init_type_name(&hba->hba_group, name,
+ &target_core_hba_cit);
+
+ return &hba->hba_group;
+}
+
+static void target_core_call_delhbafromtarget(
+ struct config_group *group,
+ struct config_item *item)
+{
+ /*
+ * core_delete_hba() is called from target_core_hba_item_ops->release()
+ * -> target_core_hba_release()
+ */
+ config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_group_ops = {
+ .make_group = target_core_call_addhbatotarget,
+ .drop_item = target_core_call_delhbafromtarget,
+};
+
+static struct config_item_type target_core_cit = {
+ .ct_item_ops = NULL,
+ .ct_group_ops = &target_core_group_ops,
+ .ct_attrs = NULL,
+ .ct_owner = THIS_MODULE,
+};
+
+/* Stop functions for struct config_item_type target_core_hba_cit */
+
+static int target_core_init_configfs(void)
+{
+ struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
+ struct config_group *lu_gp_cg = NULL;
+ struct configfs_subsystem *subsys;
+ struct t10_alua_lu_gp *lu_gp;
+ int ret;
+
+ printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage"
+ " Engine: %s on %s/%s on "UTS_RELEASE"\n",
+ TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
+
+ subsys = target_core_subsystem[0];
+ config_group_init(&subsys->su_group);
+ mutex_init(&subsys->su_mutex);
+
+ INIT_LIST_HEAD(&g_tf_list);
+ mutex_init(&g_tf_lock);
+ init_scsi_index_table();
+ ret = init_se_global();
+ if (ret < 0)
+ return -1;
+ /*
+ * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
+ * and ALUA Logical Unit Group and Target Port Group infrastructure.
+ */
+ target_cg = &subsys->su_group;
+ target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!(target_cg->default_groups)) {
+ printk(KERN_ERR "Unable to allocate target_cg->default_groups\n");
+ goto out_global;
+ }
+
+ config_group_init_type_name(&se_global->target_core_hbagroup,
+ "core", &target_core_cit);
+ target_cg->default_groups[0] = &se_global->target_core_hbagroup;
+ target_cg->default_groups[1] = NULL;
+ /*
+ * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
+ */
+ hba_cg = &se_global->target_core_hbagroup;
+ hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!(hba_cg->default_groups)) {
+ printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n");
+ goto out_global;
+ }
+ config_group_init_type_name(&se_global->alua_group,
+ "alua", &target_core_alua_cit);
+ hba_cg->default_groups[0] = &se_global->alua_group;
+ hba_cg->default_groups[1] = NULL;
+ /*
+ * Add ALUA Logical Unit Group and Target Port Group ConfigFS
+ * groups under /sys/kernel/config/target/core/alua/
+ */
+ alua_cg = &se_global->alua_group;
+ alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!(alua_cg->default_groups)) {
+ printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n");
+ goto out_global;
+ }
+
+ config_group_init_type_name(&se_global->alua_lu_gps_group,
+ "lu_gps", &target_core_alua_lu_gps_cit);
+ alua_cg->default_groups[0] = &se_global->alua_lu_gps_group;
+ alua_cg->default_groups[1] = NULL;
+ /*
+ * Add core/alua/lu_gps/default_lu_gp
+ */
+ lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
+ if (IS_ERR(lu_gp))
+ goto out_global;
+
+ lu_gp_cg = &se_global->alua_lu_gps_group;
+ lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!(lu_gp_cg->default_groups)) {
+ printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n");
+ goto out_global;
+ }
+
+ config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
+ &target_core_alua_lu_gp_cit);
+ lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
+ lu_gp_cg->default_groups[1] = NULL;
+ se_global->default_lu_gp = lu_gp;
+ /*
+ * Register the target_core_mod subsystem with configfs.
+ */
+ ret = configfs_register_subsystem(subsys);
+ if (ret < 0) {
+ printk(KERN_ERR "Error %d while registering subsystem %s\n",
+ ret, subsys->su_group.cg_item.ci_namebuf);
+ goto out_global;
+ }
+ printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric"
+ " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
+ " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
+ /*
+ * Register built-in RAMDISK subsystem logic for virtual LUN 0
+ */
+ ret = rd_module_init();
+ if (ret < 0)
+ goto out;
+
+ if (core_dev_setup_virtual_lun0() < 0)
+ goto out;
+
+ return 0;
+
+out:
+ configfs_unregister_subsystem(subsys);
+ core_dev_release_virtual_lun0();
+ rd_module_exit();
+out_global:
+ if (se_global->default_lu_gp) {
+ core_alua_free_lu_gp(se_global->default_lu_gp);
+ se_global->default_lu_gp = NULL;
+ }
+ if (lu_gp_cg)
+ kfree(lu_gp_cg->default_groups);
+ if (alua_cg)
+ kfree(alua_cg->default_groups);
+ if (hba_cg)
+ kfree(hba_cg->default_groups);
+ kfree(target_cg->default_groups);
+ release_se_global();
+ return -1;
+}
+
+static void target_core_exit_configfs(void)
+{
+ struct configfs_subsystem *subsys;
+ struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
+ struct config_item *item;
+ int i;
+
+ se_global->in_shutdown = 1;
+ subsys = target_core_subsystem[0];
+
+ lu_gp_cg = &se_global->alua_lu_gps_group;
+ for (i = 0; lu_gp_cg->default_groups[i]; i++) {
+ item = &lu_gp_cg->default_groups[i]->cg_item;
+ lu_gp_cg->default_groups[i] = NULL;
+ config_item_put(item);
+ }
+ kfree(lu_gp_cg->default_groups);
+ lu_gp_cg->default_groups = NULL;
+
+ alua_cg = &se_global->alua_group;
+ for (i = 0; alua_cg->default_groups[i]; i++) {
+ item = &alua_cg->default_groups[i]->cg_item;
+ alua_cg->default_groups[i] = NULL;
+ config_item_put(item);
+ }
+ kfree(alua_cg->default_groups);
+ alua_cg->default_groups = NULL;
+
+ hba_cg = &se_global->target_core_hbagroup;
+ for (i = 0; hba_cg->default_groups[i]; i++) {
+ item = &hba_cg->default_groups[i]->cg_item;
+ hba_cg->default_groups[i] = NULL;
+ config_item_put(item);
+ }
+ kfree(hba_cg->default_groups);
+ hba_cg->default_groups = NULL;
+ /*
+ * We expect subsys->su_group.default_groups to be released
+ * by configfs subsystem provider logic..
+ */
+ configfs_unregister_subsystem(subsys);
+ kfree(subsys->su_group.default_groups);
+
+ core_alua_free_lu_gp(se_global->default_lu_gp);
+ se_global->default_lu_gp = NULL;
+
+ printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
+ " Infrastructure\n");
+
+ core_dev_release_virtual_lun0();
+ rd_module_exit();
+ release_se_global();
+
+ return;
+}
+
+MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(target_core_init_configfs);
+module_exit(target_core_exit_configfs);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
new file mode 100644
index 000000000000..5da051a07fa3
--- /dev/null
+++ b/drivers/target/target_core_device.c
@@ -0,0 +1,1693 @@
+/*******************************************************************************
+ * Filename: target_core_device.c (based on iscsi_target_device.c)
+ *
+ * This file contains the iSCSI Virtual Device and Disk Transport
+ * agnostic related functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/kthread.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+static void se_dev_start(struct se_device *dev);
+static void se_dev_stop(struct se_device *dev);
+
+int transport_get_lun_for_cmd(
+ struct se_cmd *se_cmd,
+ unsigned char *cdb,
+ u32 unpacked_lun)
+{
+ struct se_dev_entry *deve;
+ struct se_lun *se_lun = NULL;
+ struct se_session *se_sess = SE_SESS(se_cmd);
+ unsigned long flags;
+ int read_only = 0;
+
+ spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ deve = se_cmd->se_deve =
+ &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
+ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+ if (se_cmd) {
+ deve->total_cmds++;
+ deve->total_bytes += se_cmd->data_length;
+
+ if (se_cmd->data_direction == DMA_TO_DEVICE) {
+ if (deve->lun_flags &
+ TRANSPORT_LUNFLAGS_READ_ONLY) {
+ read_only = 1;
+ goto out;
+ }
+ deve->write_bytes += se_cmd->data_length;
+ } else if (se_cmd->data_direction ==
+ DMA_FROM_DEVICE) {
+ deve->read_bytes += se_cmd->data_length;
+ }
+ }
+ deve->deve_cmds++;
+
+ se_lun = se_cmd->se_lun = deve->se_lun;
+ se_cmd->pr_res_key = deve->pr_res_key;
+ se_cmd->orig_fe_lun = unpacked_lun;
+ se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+ se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+ }
+out:
+ spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+
+ if (!se_lun) {
+ if (read_only) {
+ se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+ " Access for 0x%08x\n",
+ CMD_TFO(se_cmd)->get_fabric_name(),
+ unpacked_lun);
+ return -1;
+ } else {
+ /*
+ * Use the se_portal_group->tpg_virt_lun0 to allow for
+ * REPORT_LUNS, et al to be returned when no active
+ * MappedLUN=0 exists for this Initiator Port.
+ */
+ if (unpacked_lun != 0) {
+ se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+ " Access for 0x%08x\n",
+ CMD_TFO(se_cmd)->get_fabric_name(),
+ unpacked_lun);
+ return -1;
+ }
+ /*
+ * Force WRITE PROTECT for virtual LUN 0
+ */
+ if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
+ (se_cmd->data_direction != DMA_NONE)) {
+ se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -1;
+ }
+#if 0
+ printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
+ CMD_TFO(se_cmd)->get_fabric_name());
+#endif
+ se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+ se_cmd->orig_fe_lun = 0;
+ se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+ se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+ }
+ }
+ /*
+ * Determine if the struct se_lun is online.
+ */
+/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
+ if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
+ se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -1;
+ }
+
+ {
+ struct se_device *dev = se_lun->lun_se_dev;
+ spin_lock(&dev->stats_lock);
+ dev->num_cmds++;
+ if (se_cmd->data_direction == DMA_TO_DEVICE)
+ dev->write_bytes += se_cmd->data_length;
+ else if (se_cmd->data_direction == DMA_FROM_DEVICE)
+ dev->read_bytes += se_cmd->data_length;
+ spin_unlock(&dev->stats_lock);
+ }
+
+ /*
+ * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
+ * for tracking state of struct se_cmds during LUN shutdown events.
+ */
+ spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
+ list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
+ atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1);
+#if 0
+ printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
+ CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun);
+#endif
+ spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(transport_get_lun_for_cmd);
+
+int transport_get_lun_for_tmr(
+ struct se_cmd *se_cmd,
+ u32 unpacked_lun)
+{
+ struct se_device *dev = NULL;
+ struct se_dev_entry *deve;
+ struct se_lun *se_lun = NULL;
+ struct se_session *se_sess = SE_SESS(se_cmd);
+ struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+
+ spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ deve = se_cmd->se_deve =
+ &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
+ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+ se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
+ dev = se_tmr->tmr_dev = se_lun->lun_se_dev;
+ se_cmd->pr_res_key = deve->pr_res_key;
+ se_cmd->orig_fe_lun = unpacked_lun;
+ se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+/* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
+ }
+ spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+
+ if (!se_lun) {
+ printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+ " Access for 0x%08x\n",
+ CMD_TFO(se_cmd)->get_fabric_name(),
+ unpacked_lun);
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -1;
+ }
+ /*
+ * Determine if the struct se_lun is online.
+ */
+/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
+ if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -1;
+ }
+
+ spin_lock(&dev->se_tmr_lock);
+ list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
+ spin_unlock(&dev->se_tmr_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(transport_get_lun_for_tmr);
+
+/*
+ * This function is called from core_scsi3_emulate_pro_register_and_move()
+ * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
+ * when a matching rtpi is found.
+ */
+struct se_dev_entry *core_get_se_deve_from_rtpi(
+ struct se_node_acl *nacl,
+ u16 rtpi)
+{
+ struct se_dev_entry *deve;
+ struct se_lun *lun;
+ struct se_port *port;
+ struct se_portal_group *tpg = nacl->se_tpg;
+ u32 i;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ deve = &nacl->device_list[i];
+
+ if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+ continue;
+
+ lun = deve->se_lun;
+ if (!(lun)) {
+ printk(KERN_ERR "%s device entries device pointer is"
+ " NULL, but Initiator has access.\n",
+ TPG_TFO(tpg)->get_fabric_name());
+ continue;
+ }
+ port = lun->lun_sep;
+ if (!(port)) {
+ printk(KERN_ERR "%s device entries device pointer is"
+ " NULL, but Initiator has access.\n",
+ TPG_TFO(tpg)->get_fabric_name());
+ continue;
+ }
+ if (port->sep_rtpi != rtpi)
+ continue;
+
+ atomic_inc(&deve->pr_ref_count);
+ smp_mb__after_atomic_inc();
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ return deve;
+ }
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ return NULL;
+}
+
+int core_free_device_list_for_node(
+ struct se_node_acl *nacl,
+ struct se_portal_group *tpg)
+{
+ struct se_dev_entry *deve;
+ struct se_lun *lun;
+ u32 i;
+
+ if (!nacl->device_list)
+ return 0;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ deve = &nacl->device_list[i];
+
+ if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+ continue;
+
+ if (!deve->se_lun) {
+ printk(KERN_ERR "%s device entries device pointer is"
+ " NULL, but Initiator has access.\n",
+ TPG_TFO(tpg)->get_fabric_name());
+ continue;
+ }
+ lun = deve->se_lun;
+
+ spin_unlock_irq(&nacl->device_list_lock);
+ core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+ spin_lock_irq(&nacl->device_list_lock);
+ }
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ kfree(nacl->device_list);
+ nacl->device_list = NULL;
+
+ return 0;
+}
+
+void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
+{
+ struct se_dev_entry *deve;
+
+ spin_lock_irq(&se_nacl->device_list_lock);
+ deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
+ deve->deve_cmds--;
+ spin_unlock_irq(&se_nacl->device_list_lock);
+
+ return;
+}
+
+void core_update_device_list_access(
+ u32 mapped_lun,
+ u32 lun_access,
+ struct se_node_acl *nacl)
+{
+ struct se_dev_entry *deve;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[mapped_lun];
+ if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+ deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+ } else {
+ deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+ }
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ return;
+}
+
+/* core_update_device_list_for_node():
+ *
+ *
+ */
+int core_update_device_list_for_node(
+ struct se_lun *lun,
+ struct se_lun_acl *lun_acl,
+ u32 mapped_lun,
+ u32 lun_access,
+ struct se_node_acl *nacl,
+ struct se_portal_group *tpg,
+ int enable)
+{
+ struct se_port *port = lun->lun_sep;
+ struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
+ int trans = 0;
+ /*
+ * If the MappedLUN entry is being disabled, the entry in
+ * port->sep_alua_list must be removed now before clearing the
+ * struct se_dev_entry pointers below as logic in
+ * core_alua_do_transition_tg_pt() depends on these being present.
+ */
+ if (!(enable)) {
+ /*
+ * deve->se_lun_acl will be NULL for demo-mode created LUNs
+ * that have not been explictly concerted to MappedLUNs ->
+ * struct se_lun_acl, but we remove deve->alua_port_list from
+ * port->sep_alua_list. This also means that active UAs and
+ * NodeACL context specific PR metadata for demo-mode
+ * MappedLUN *deve will be released below..
+ */
+ spin_lock_bh(&port->sep_alua_lock);
+ list_del(&deve->alua_port_list);
+ spin_unlock_bh(&port->sep_alua_lock);
+ }
+
+ spin_lock_irq(&nacl->device_list_lock);
+ if (enable) {
+ /*
+ * Check if the call is handling demo mode -> explict LUN ACL
+ * transition. This transition must be for the same struct se_lun
+ * + mapped_lun that was setup in demo mode..
+ */
+ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+ if (deve->se_lun_acl != NULL) {
+ printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
+ " already set for demo mode -> explict"
+ " LUN ACL transition\n");
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -1;
+ }
+ if (deve->se_lun != lun) {
+ printk(KERN_ERR "struct se_dev_entry->se_lun does"
+ " match passed struct se_lun for demo mode"
+ " -> explict LUN ACL transition\n");
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -1;
+ }
+ deve->se_lun_acl = lun_acl;
+ trans = 1;
+ } else {
+ deve->se_lun = lun;
+ deve->se_lun_acl = lun_acl;
+ deve->mapped_lun = mapped_lun;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
+ }
+
+ if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+ deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+ } else {
+ deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+ }
+
+ if (trans) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return 0;
+ }
+ deve->creation_time = get_jiffies_64();
+ deve->attach_count++;
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ spin_lock_bh(&port->sep_alua_lock);
+ list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
+ spin_unlock_bh(&port->sep_alua_lock);
+
+ return 0;
+ }
+ /*
+ * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
+ * PR operation to complete.
+ */
+ spin_unlock_irq(&nacl->device_list_lock);
+ while (atomic_read(&deve->pr_ref_count) != 0)
+ cpu_relax();
+ spin_lock_irq(&nacl->device_list_lock);
+ /*
+ * Disable struct se_dev_entry LUN ACL mapping
+ */
+ core_scsi3_ua_release_all(deve);
+ deve->se_lun = NULL;
+ deve->se_lun_acl = NULL;
+ deve->lun_flags = 0;
+ deve->creation_time = 0;
+ deve->attach_count--;
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
+ return 0;
+}
+
+/* core_clear_lun_from_tpg():
+ *
+ *
+ */
+void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
+{
+ struct se_node_acl *nacl;
+ struct se_dev_entry *deve;
+ u32 i;
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+ spin_lock_irq(&nacl->device_list_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ deve = &nacl->device_list[i];
+ if (lun != deve->se_lun)
+ continue;
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ core_update_device_list_for_node(lun, NULL,
+ deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
+ nacl, tpg, 0);
+
+ spin_lock_irq(&nacl->device_list_lock);
+ }
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ }
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+ return;
+}
+
+static struct se_port *core_alloc_port(struct se_device *dev)
+{
+ struct se_port *port, *port_tmp;
+
+ port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
+ if (!(port)) {
+ printk(KERN_ERR "Unable to allocate struct se_port\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&port->sep_alua_list);
+ INIT_LIST_HEAD(&port->sep_list);
+ atomic_set(&port->sep_tg_pt_secondary_offline, 0);
+ spin_lock_init(&port->sep_alua_lock);
+ mutex_init(&port->sep_tg_pt_md_mutex);
+
+ spin_lock(&dev->se_port_lock);
+ if (dev->dev_port_count == 0x0000ffff) {
+ printk(KERN_WARNING "Reached dev->dev_port_count =="
+ " 0x0000ffff\n");
+ spin_unlock(&dev->se_port_lock);
+ return NULL;
+ }
+again:
+ /*
+ * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
+ * Here is the table from spc4r17 section 7.7.3.8.
+ *
+ * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
+ *
+ * Code Description
+ * 0h Reserved
+ * 1h Relative port 1, historically known as port A
+ * 2h Relative port 2, historically known as port B
+ * 3h to FFFFh Relative port 3 through 65 535
+ */
+ port->sep_rtpi = dev->dev_rpti_counter++;
+ if (!(port->sep_rtpi))
+ goto again;
+
+ list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
+ /*
+ * Make sure RELATIVE TARGET PORT IDENTIFER is unique
+ * for 16-bit wrap..
+ */
+ if (port->sep_rtpi == port_tmp->sep_rtpi)
+ goto again;
+ }
+ spin_unlock(&dev->se_port_lock);
+
+ return port;
+}
+
+static void core_export_port(
+ struct se_device *dev,
+ struct se_portal_group *tpg,
+ struct se_port *port,
+ struct se_lun *lun)
+{
+ struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
+
+ spin_lock(&dev->se_port_lock);
+ spin_lock(&lun->lun_sep_lock);
+ port->sep_tpg = tpg;
+ port->sep_lun = lun;
+ lun->lun_sep = port;
+ spin_unlock(&lun->lun_sep_lock);
+
+ list_add_tail(&port->sep_list, &dev->dev_sep_list);
+ spin_unlock(&dev->se_port_lock);
+
+ if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) {
+ tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
+ if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
+ printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
+ "_gp_member_t\n");
+ return;
+ }
+ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+ T10_ALUA(su_dev)->default_tg_pt_gp);
+ spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
+ " Group: alua/default_tg_pt_gp\n",
+ TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name());
+ }
+
+ dev->dev_port_count++;
+ port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
+}
+
+/*
+ * Called with struct se_device->se_port_lock spinlock held.
+ */
+static void core_release_port(struct se_device *dev, struct se_port *port)
+{
+ /*
+ * Wait for any port reference for PR ALL_TG_PT=1 operation
+ * to complete in __core_scsi3_alloc_registration()
+ */
+ spin_unlock(&dev->se_port_lock);
+ if (atomic_read(&port->sep_tg_pt_ref_cnt))
+ cpu_relax();
+ spin_lock(&dev->se_port_lock);
+
+ core_alua_free_tg_pt_gp_mem(port);
+
+ list_del(&port->sep_list);
+ dev->dev_port_count--;
+ kfree(port);
+
+ return;
+}
+
+int core_dev_export(
+ struct se_device *dev,
+ struct se_portal_group *tpg,
+ struct se_lun *lun)
+{
+ struct se_port *port;
+
+ port = core_alloc_port(dev);
+ if (!(port))
+ return -1;
+
+ lun->lun_se_dev = dev;
+ se_dev_start(dev);
+
+ atomic_inc(&dev->dev_export_obj.obj_access_count);
+ core_export_port(dev, tpg, port, lun);
+ return 0;
+}
+
+void core_dev_unexport(
+ struct se_device *dev,
+ struct se_portal_group *tpg,
+ struct se_lun *lun)
+{
+ struct se_port *port = lun->lun_sep;
+
+ spin_lock(&lun->lun_sep_lock);
+ if (lun->lun_se_dev == NULL) {
+ spin_unlock(&lun->lun_sep_lock);
+ return;
+ }
+ spin_unlock(&lun->lun_sep_lock);
+
+ spin_lock(&dev->se_port_lock);
+ atomic_dec(&dev->dev_export_obj.obj_access_count);
+ core_release_port(dev, port);
+ spin_unlock(&dev->se_port_lock);
+
+ se_dev_stop(dev);
+ lun->lun_se_dev = NULL;
+}
+
+int transport_core_report_lun_response(struct se_cmd *se_cmd)
+{
+ struct se_dev_entry *deve;
+ struct se_lun *se_lun;
+ struct se_session *se_sess = SE_SESS(se_cmd);
+ struct se_task *se_task;
+ unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
+ u32 cdb_offset = 0, lun_count = 0, offset = 8;
+ u64 i, lun;
+
+ list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
+ break;
+
+ if (!(se_task)) {
+ printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+
+ /*
+ * If no struct se_session pointer is present, this struct se_cmd is
+ * coming via a target_core_mod PASSTHROUGH op, and not through
+ * a $FABRIC_MOD. In that case, report LUN=0 only.
+ */
+ if (!(se_sess)) {
+ lun = 0;
+ buf[offset++] = ((lun >> 56) & 0xff);
+ buf[offset++] = ((lun >> 48) & 0xff);
+ buf[offset++] = ((lun >> 40) & 0xff);
+ buf[offset++] = ((lun >> 32) & 0xff);
+ buf[offset++] = ((lun >> 24) & 0xff);
+ buf[offset++] = ((lun >> 16) & 0xff);
+ buf[offset++] = ((lun >> 8) & 0xff);
+ buf[offset++] = (lun & 0xff);
+ lun_count = 1;
+ goto done;
+ }
+
+ spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ deve = &SE_NODE_ACL(se_sess)->device_list[i];
+ if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+ continue;
+ se_lun = deve->se_lun;
+ /*
+ * We determine the correct LUN LIST LENGTH even once we
+ * have reached the initial allocation length.
+ * See SPC2-R20 7.19.
+ */
+ lun_count++;
+ if ((cdb_offset + 8) >= se_cmd->data_length)
+ continue;
+
+ lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun));
+ buf[offset++] = ((lun >> 56) & 0xff);
+ buf[offset++] = ((lun >> 48) & 0xff);
+ buf[offset++] = ((lun >> 40) & 0xff);
+ buf[offset++] = ((lun >> 32) & 0xff);
+ buf[offset++] = ((lun >> 24) & 0xff);
+ buf[offset++] = ((lun >> 16) & 0xff);
+ buf[offset++] = ((lun >> 8) & 0xff);
+ buf[offset++] = (lun & 0xff);
+ cdb_offset += 8;
+ }
+ spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+
+ /*
+ * See SPC3 r07, page 159.
+ */
+done:
+ lun_count *= 8;
+ buf[0] = ((lun_count >> 24) & 0xff);
+ buf[1] = ((lun_count >> 16) & 0xff);
+ buf[2] = ((lun_count >> 8) & 0xff);
+ buf[3] = (lun_count & 0xff);
+
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/* se_release_device_for_hba():
+ *
+ *
+ */
+void se_release_device_for_hba(struct se_device *dev)
+{
+ struct se_hba *hba = dev->se_hba;
+
+ if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
+ (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
+ (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
+ (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
+ (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
+ se_dev_stop(dev);
+
+ if (dev->dev_ptr) {
+ kthread_stop(dev->process_thread);
+ if (dev->transport->free_device)
+ dev->transport->free_device(dev->dev_ptr);
+ }
+
+ spin_lock(&hba->device_lock);
+ list_del(&dev->dev_list);
+ hba->dev_count--;
+ spin_unlock(&hba->device_lock);
+
+ core_scsi3_free_all_registrations(dev);
+ se_release_vpd_for_dev(dev);
+
+ kfree(dev->dev_status_queue_obj);
+ kfree(dev->dev_queue_obj);
+ kfree(dev);
+
+ return;
+}
+
+void se_release_vpd_for_dev(struct se_device *dev)
+{
+ struct t10_vpd *vpd, *vpd_tmp;
+
+ spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock);
+ list_for_each_entry_safe(vpd, vpd_tmp,
+ &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) {
+ list_del(&vpd->vpd_list);
+ kfree(vpd);
+ }
+ spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock);
+
+ return;
+}
+
+/*
+ * Called with struct se_hba->device_lock held.
+ */
+void se_clear_dev_ports(struct se_device *dev)
+{
+ struct se_hba *hba = dev->se_hba;
+ struct se_lun *lun;
+ struct se_portal_group *tpg;
+ struct se_port *sep, *sep_tmp;
+
+ spin_lock(&dev->se_port_lock);
+ list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
+ spin_unlock(&dev->se_port_lock);
+ spin_unlock(&hba->device_lock);
+
+ lun = sep->sep_lun;
+ tpg = sep->sep_tpg;
+ spin_lock(&lun->lun_sep_lock);
+ if (lun->lun_se_dev == NULL) {
+ spin_unlock(&lun->lun_sep_lock);
+ continue;
+ }
+ spin_unlock(&lun->lun_sep_lock);
+
+ core_dev_del_lun(tpg, lun->unpacked_lun);
+
+ spin_lock(&hba->device_lock);
+ spin_lock(&dev->se_port_lock);
+ }
+ spin_unlock(&dev->se_port_lock);
+
+ return;
+}
+
+/* se_free_virtual_device():
+ *
+ * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
+ */
+int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
+{
+ spin_lock(&hba->device_lock);
+ se_clear_dev_ports(dev);
+ spin_unlock(&hba->device_lock);
+
+ core_alua_free_lu_gp_mem(dev);
+ se_release_device_for_hba(dev);
+
+ return 0;
+}
+
+static void se_dev_start(struct se_device *dev)
+{
+ struct se_hba *hba = dev->se_hba;
+
+ spin_lock(&hba->device_lock);
+ atomic_inc(&dev->dev_obj.obj_access_count);
+ if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
+ if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
+ dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
+ dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
+ } else if (dev->dev_status &
+ TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
+ dev->dev_status &=
+ ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
+ dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
+ }
+ }
+ spin_unlock(&hba->device_lock);
+}
+
+static void se_dev_stop(struct se_device *dev)
+{
+ struct se_hba *hba = dev->se_hba;
+
+ spin_lock(&hba->device_lock);
+ atomic_dec(&dev->dev_obj.obj_access_count);
+ if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
+ if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
+ dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
+ dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
+ } else if (dev->dev_status &
+ TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
+ dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
+ dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
+ }
+ }
+ spin_unlock(&hba->device_lock);
+}
+
+int se_dev_check_online(struct se_device *dev)
+{
+ int ret;
+
+ spin_lock_irq(&dev->dev_status_lock);
+ ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
+ (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
+ spin_unlock_irq(&dev->dev_status_lock);
+
+ return ret;
+}
+
+int se_dev_check_shutdown(struct se_device *dev)
+{
+ int ret;
+
+ spin_lock_irq(&dev->dev_status_lock);
+ ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
+ spin_unlock_irq(&dev->dev_status_lock);
+
+ return ret;
+}
+
+void se_dev_set_default_attribs(
+ struct se_device *dev,
+ struct se_dev_limits *dev_limits)
+{
+ struct queue_limits *limits = &dev_limits->limits;
+
+ DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO;
+ DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE;
+ DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ;
+ DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE;
+ DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+ DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS;
+ DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU;
+ DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS;
+ DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS;
+ DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA;
+ DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+ /*
+ * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
+ * iblock_create_virtdevice() from struct queue_limits values
+ * if blk_queue_discard()==1
+ */
+ DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
+ DEV_ATTRIB(dev)->max_unmap_block_desc_count =
+ DA_MAX_UNMAP_BLOCK_DESC_COUNT;
+ DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
+ DEV_ATTRIB(dev)->unmap_granularity_alignment =
+ DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
+ /*
+ * block_size is based on subsystem plugin dependent requirements.
+ */
+ DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size;
+ DEV_ATTRIB(dev)->block_size = limits->logical_block_size;
+ /*
+ * max_sectors is based on subsystem plugin dependent requirements.
+ */
+ DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors;
+ DEV_ATTRIB(dev)->max_sectors = limits->max_sectors;
+ /*
+ * Set optimal_sectors from max_sectors, which can be lowered via
+ * configfs.
+ */
+ DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors;
+ /*
+ * queue_depth is based on subsystem plugin dependent requirements.
+ */
+ DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth;
+ DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth;
+}
+
+int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
+{
+ if (task_timeout > DA_TASK_TIMEOUT_MAX) {
+ printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
+ " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
+ return -1;
+ } else {
+ DEV_ATTRIB(dev)->task_timeout = task_timeout;
+ printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
+ dev, task_timeout);
+ }
+
+ return 0;
+}
+
+int se_dev_set_max_unmap_lba_count(
+ struct se_device *dev,
+ u32 max_unmap_lba_count)
+{
+ DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count;
+ printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
+ dev, DEV_ATTRIB(dev)->max_unmap_lba_count);
+ return 0;
+}
+
+int se_dev_set_max_unmap_block_desc_count(
+ struct se_device *dev,
+ u32 max_unmap_block_desc_count)
+{
+ DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count;
+ printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
+ dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count);
+ return 0;
+}
+
+int se_dev_set_unmap_granularity(
+ struct se_device *dev,
+ u32 unmap_granularity)
+{
+ DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity;
+ printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
+ dev, DEV_ATTRIB(dev)->unmap_granularity);
+ return 0;
+}
+
+int se_dev_set_unmap_granularity_alignment(
+ struct se_device *dev,
+ u32 unmap_granularity_alignment)
+{
+ DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment;
+ printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
+ dev, DEV_ATTRIB(dev)->unmap_granularity_alignment);
+ return 0;
+}
+
+int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+ if (TRANSPORT(dev)->dpo_emulated == NULL) {
+ printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n");
+ return -1;
+ }
+ if (TRANSPORT(dev)->dpo_emulated(dev) == 0) {
+ printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n");
+ return -1;
+ }
+ DEV_ATTRIB(dev)->emulate_dpo = flag;
+ printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
+ " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo);
+ return 0;
+}
+
+int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+ if (TRANSPORT(dev)->fua_write_emulated == NULL) {
+ printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n");
+ return -1;
+ }
+ if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) {
+ printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n");
+ return -1;
+ }
+ DEV_ATTRIB(dev)->emulate_fua_write = flag;
+ printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
+ dev, DEV_ATTRIB(dev)->emulate_fua_write);
+ return 0;
+}
+
+int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+ if (TRANSPORT(dev)->fua_read_emulated == NULL) {
+ printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n");
+ return -1;
+ }
+ if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) {
+ printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n");
+ return -1;
+ }
+ DEV_ATTRIB(dev)->emulate_fua_read = flag;
+ printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
+ dev, DEV_ATTRIB(dev)->emulate_fua_read);
+ return 0;
+}
+
+int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+ if (TRANSPORT(dev)->write_cache_emulated == NULL) {
+ printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n");
+ return -1;
+ }
+ if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) {
+ printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n");
+ return -1;
+ }
+ DEV_ATTRIB(dev)->emulate_write_cache = flag;
+ printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
+ dev, DEV_ATTRIB(dev)->emulate_write_cache);
+ return 0;
+}
+
+int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1) && (flag != 2)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+ " UA_INTRLCK_CTRL while dev_export_obj: %d count"
+ " exists\n", dev,
+ atomic_read(&dev->dev_export_obj.obj_access_count));
+ return -1;
+ }
+ DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag;
+ printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
+ dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl);
+
+ return 0;
+}
+
+int se_dev_set_emulate_tas(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
+ " dev_export_obj: %d count exists\n", dev,
+ atomic_read(&dev->dev_export_obj.obj_access_count));
+ return -1;
+ }
+ DEV_ATTRIB(dev)->emulate_tas = flag;
+ printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
+ dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled");
+
+ return 0;
+}
+
+int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+ /*
+ * We expect this value to be non-zero when generic Block Layer
+ * Discard supported is detected iblock_create_virtdevice().
+ */
+ if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
+ printk(KERN_ERR "Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
+
+ DEV_ATTRIB(dev)->emulate_tpu = flag;
+ printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
+ dev, flag);
+ return 0;
+}
+
+int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+ /*
+ * We expect this value to be non-zero when generic Block Layer
+ * Discard supported is detected iblock_create_virtdevice().
+ */
+ if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
+ printk(KERN_ERR "Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
+
+ DEV_ATTRIB(dev)->emulate_tpws = flag;
+ printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
+ dev, flag);
+ return 0;
+}
+
+int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
+ printk(KERN_ERR "Illegal value %d\n", flag);
+ return -1;
+ }
+ DEV_ATTRIB(dev)->enforce_pr_isids = flag;
+ printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
+ (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled");
+ return 0;
+}
+
+/*
+ * Note, this can only be called on unexported SE Device Object.
+ */
+int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
+{
+ u32 orig_queue_depth = dev->queue_depth;
+
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
+ " dev_export_obj: %d count exists\n", dev,
+ atomic_read(&dev->dev_export_obj.obj_access_count));
+ return -1;
+ }
+ if (!(queue_depth)) {
+ printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
+ "_depth\n", dev);
+ return -1;
+ }
+
+ if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
+ printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
+ " exceeds TCM/SE_Device TCQ: %u\n",
+ dev, queue_depth,
+ DEV_ATTRIB(dev)->hw_queue_depth);
+ return -1;
+ }
+ } else {
+ if (queue_depth > DEV_ATTRIB(dev)->queue_depth) {
+ if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
+ printk(KERN_ERR "dev[%p]: Passed queue_depth:"
+ " %u exceeds TCM/SE_Device MAX"
+ " TCQ: %u\n", dev, queue_depth,
+ DEV_ATTRIB(dev)->hw_queue_depth);
+ return -1;
+ }
+ }
+ }
+
+ DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth;
+ if (queue_depth > orig_queue_depth)
+ atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
+ else if (queue_depth < orig_queue_depth)
+ atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
+
+ printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
+ dev, queue_depth);
+ return 0;
+}
+
+int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
+{
+ int force = 0; /* Force setting for VDEVS */
+
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+ " max_sectors while dev_export_obj: %d count exists\n",
+ dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+ return -1;
+ }
+ if (!(max_sectors)) {
+ printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
+ " max_sectors\n", dev);
+ return -1;
+ }
+ if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
+ printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
+ " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
+ DA_STATUS_MAX_SECTORS_MIN);
+ return -1;
+ }
+ if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) {
+ printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+ " greater than TCM/SE_Device max_sectors:"
+ " %u\n", dev, max_sectors,
+ DEV_ATTRIB(dev)->hw_max_sectors);
+ return -1;
+ }
+ } else {
+ if (!(force) && (max_sectors >
+ DEV_ATTRIB(dev)->hw_max_sectors)) {
+ printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+ " greater than TCM/SE_Device max_sectors"
+ ": %u, use force=1 to override.\n", dev,
+ max_sectors, DEV_ATTRIB(dev)->hw_max_sectors);
+ return -1;
+ }
+ if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
+ printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+ " greater than DA_STATUS_MAX_SECTORS_MAX:"
+ " %u\n", dev, max_sectors,
+ DA_STATUS_MAX_SECTORS_MAX);
+ return -1;
+ }
+ }
+
+ DEV_ATTRIB(dev)->max_sectors = max_sectors;
+ printk("dev[%p]: SE Device max_sectors changed to %u\n",
+ dev, max_sectors);
+ return 0;
+}
+
+int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
+{
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+ " optimal_sectors while dev_export_obj: %d count exists\n",
+ dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+ return -EINVAL;
+ }
+ if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
+ " changed for TCM/pSCSI\n", dev);
+ return -EINVAL;
+ }
+ if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) {
+ printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
+ " greater than max_sectors: %u\n", dev,
+ optimal_sectors, DEV_ATTRIB(dev)->max_sectors);
+ return -EINVAL;
+ }
+
+ DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors;
+ printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
+ dev, optimal_sectors);
+ return 0;
+}
+
+int se_dev_set_block_size(struct se_device *dev, u32 block_size)
+{
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
+ " while dev_export_obj: %d count exists\n", dev,
+ atomic_read(&dev->dev_export_obj.obj_access_count));
+ return -1;
+ }
+
+ if ((block_size != 512) &&
+ (block_size != 1024) &&
+ (block_size != 2048) &&
+ (block_size != 4096)) {
+ printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
+ " for SE device, must be 512, 1024, 2048 or 4096\n",
+ dev, block_size);
+ return -1;
+ }
+
+ if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
+ " Physical Device, use for Linux/SCSI to change"
+ " block_size for underlying hardware\n", dev);
+ return -1;
+ }
+
+ DEV_ATTRIB(dev)->block_size = block_size;
+ printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
+ dev, block_size);
+ return 0;
+}
+
+struct se_lun *core_dev_add_lun(
+ struct se_portal_group *tpg,
+ struct se_hba *hba,
+ struct se_device *dev,
+ u32 lun)
+{
+ struct se_lun *lun_p;
+ u32 lun_access = 0;
+
+ if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
+ printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
+ atomic_read(&dev->dev_access_obj.obj_access_count));
+ return NULL;
+ }
+
+ lun_p = core_tpg_pre_addlun(tpg, lun);
+ if ((IS_ERR(lun_p)) || !(lun_p))
+ return NULL;
+
+ if (dev->dev_flags & DF_READ_ONLY)
+ lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+ else
+ lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+
+ if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
+ return NULL;
+
+ printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
+ " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun,
+ TPG_TFO(tpg)->get_fabric_name(), hba->hba_id);
+ /*
+ * Update LUN maps for dynamically added initiators when
+ * generate_node_acl is enabled.
+ */
+ if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) {
+ struct se_node_acl *acl;
+ spin_lock_bh(&tpg->acl_node_lock);
+ list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+ if (acl->dynamic_node_acl) {
+ spin_unlock_bh(&tpg->acl_node_lock);
+ core_tpg_add_node_to_devs(acl, tpg);
+ spin_lock_bh(&tpg->acl_node_lock);
+ }
+ }
+ spin_unlock_bh(&tpg->acl_node_lock);
+ }
+
+ return lun_p;
+}
+
+/* core_dev_del_lun():
+ *
+ *
+ */
+int core_dev_del_lun(
+ struct se_portal_group *tpg,
+ u32 unpacked_lun)
+{
+ struct se_lun *lun;
+ int ret = 0;
+
+ lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
+ if (!(lun))
+ return ret;
+
+ core_tpg_post_dellun(tpg, lun);
+
+ printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
+ " device object\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun,
+ TPG_TFO(tpg)->get_fabric_name());
+
+ return 0;
+}
+
+struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
+{
+ struct se_lun *lun;
+
+ spin_lock(&tpg->tpg_lun_lock);
+ if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+ printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
+ "_PER_TPG-1: %u for Target Portal Group: %hu\n",
+ TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ TRANSPORT_MAX_LUNS_PER_TPG-1,
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock(&tpg->tpg_lun_lock);
+ return NULL;
+ }
+ lun = &tpg->tpg_lun_list[unpacked_lun];
+
+ if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
+ printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
+ " Target Portal Group: %hu, ignoring request.\n",
+ TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock(&tpg->tpg_lun_lock);
+ return NULL;
+ }
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ return lun;
+}
+
+/* core_dev_get_lun():
+ *
+ *
+ */
+static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
+{
+ struct se_lun *lun;
+
+ spin_lock(&tpg->tpg_lun_lock);
+ if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+ printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
+ "_TPG-1: %u for Target Portal Group: %hu\n",
+ TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ TRANSPORT_MAX_LUNS_PER_TPG-1,
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock(&tpg->tpg_lun_lock);
+ return NULL;
+ }
+ lun = &tpg->tpg_lun_list[unpacked_lun];
+
+ if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
+ printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+ " Target Portal Group: %hu, ignoring request.\n",
+ TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock(&tpg->tpg_lun_lock);
+ return NULL;
+ }
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ return lun;
+}
+
+struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
+ struct se_portal_group *tpg,
+ u32 mapped_lun,
+ char *initiatorname,
+ int *ret)
+{
+ struct se_lun_acl *lacl;
+ struct se_node_acl *nacl;
+
+ if (strlen(initiatorname) > TRANSPORT_IQN_LEN) {
+ printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
+ TPG_TFO(tpg)->get_fabric_name());
+ *ret = -EOVERFLOW;
+ return NULL;
+ }
+ nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
+ if (!(nacl)) {
+ *ret = -EINVAL;
+ return NULL;
+ }
+ lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
+ if (!(lacl)) {
+ printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
+ *ret = -ENOMEM;
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&lacl->lacl_list);
+ lacl->mapped_lun = mapped_lun;
+ lacl->se_lun_nacl = nacl;
+ snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+
+ return lacl;
+}
+
+int core_dev_add_initiator_node_lun_acl(
+ struct se_portal_group *tpg,
+ struct se_lun_acl *lacl,
+ u32 unpacked_lun,
+ u32 lun_access)
+{
+ struct se_lun *lun;
+ struct se_node_acl *nacl;
+
+ lun = core_dev_get_lun(tpg, unpacked_lun);
+ if (!(lun)) {
+ printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+ " Target Portal Group: %hu, ignoring request.\n",
+ TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ return -EINVAL;
+ }
+
+ nacl = lacl->se_lun_nacl;
+ if (!(nacl))
+ return -EINVAL;
+
+ if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
+ (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
+ lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+
+ lacl->se_lun = lun;
+
+ if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
+ lun_access, nacl, tpg, 1) < 0)
+ return -EINVAL;
+
+ spin_lock(&lun->lun_acl_lock);
+ list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
+ atomic_inc(&lun->lun_acl_count);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&lun->lun_acl_lock);
+
+ printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
+ " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
+ (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
+ lacl->initiatorname);
+ /*
+ * Check to see if there are any existing persistent reservation APTPL
+ * pre-registrations that need to be enabled for this LUN ACL..
+ */
+ core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
+ return 0;
+}
+
+/* core_dev_del_initiator_node_lun_acl():
+ *
+ *
+ */
+int core_dev_del_initiator_node_lun_acl(
+ struct se_portal_group *tpg,
+ struct se_lun *lun,
+ struct se_lun_acl *lacl)
+{
+ struct se_node_acl *nacl;
+
+ nacl = lacl->se_lun_nacl;
+ if (!(nacl))
+ return -EINVAL;
+
+ spin_lock(&lun->lun_acl_lock);
+ list_del(&lacl->lacl_list);
+ atomic_dec(&lun->lun_acl_count);
+ smp_mb__after_atomic_dec();
+ spin_unlock(&lun->lun_acl_lock);
+
+ core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+
+ lacl->se_lun = NULL;
+
+ printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
+ " InitiatorNode: %s Mapped LUN: %u\n",
+ TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
+ lacl->initiatorname, lacl->mapped_lun);
+
+ return 0;
+}
+
+void core_dev_free_initiator_node_lun_acl(
+ struct se_portal_group *tpg,
+ struct se_lun_acl *lacl)
+{
+ printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
+ " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg),
+ TPG_TFO(tpg)->get_fabric_name(),
+ lacl->initiatorname, lacl->mapped_lun);
+
+ kfree(lacl);
+}
+
+int core_dev_setup_virtual_lun0(void)
+{
+ struct se_hba *hba;
+ struct se_device *dev;
+ struct se_subsystem_dev *se_dev = NULL;
+ struct se_subsystem_api *t;
+ char buf[16];
+ int ret;
+
+ hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
+ if (IS_ERR(hba))
+ return PTR_ERR(hba);
+
+ se_global->g_lun0_hba = hba;
+ t = hba->transport;
+
+ se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
+ if (!(se_dev)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " struct se_subsystem_dev\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ INIT_LIST_HEAD(&se_dev->g_se_dev_list);
+ INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
+ spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
+ INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
+ INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
+ spin_lock_init(&se_dev->t10_reservation.registration_lock);
+ spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
+ INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
+ spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock_init(&se_dev->se_dev_lock);
+ se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+ se_dev->t10_wwn.t10_sub_dev = se_dev;
+ se_dev->t10_alua.t10_sub_dev = se_dev;
+ se_dev->se_dev_attrib.da_sub_dev = se_dev;
+ se_dev->se_dev_hba = hba;
+
+ se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
+ if (!(se_dev->se_dev_su_ptr)) {
+ printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+ " from allocate_virtdevice()\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ se_global->g_lun0_su_dev = se_dev;
+
+ memset(buf, 0, 16);
+ sprintf(buf, "rd_pages=8");
+ t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
+
+ dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
+ if (!(dev) || IS_ERR(dev)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ se_dev->se_dev_ptr = dev;
+ se_global->g_lun0_dev = dev;
+
+ return 0;
+out:
+ se_global->g_lun0_su_dev = NULL;
+ kfree(se_dev);
+ if (se_global->g_lun0_hba) {
+ core_delete_hba(se_global->g_lun0_hba);
+ se_global->g_lun0_hba = NULL;
+ }
+ return ret;
+}
+
+
+void core_dev_release_virtual_lun0(void)
+{
+ struct se_hba *hba = se_global->g_lun0_hba;
+ struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev;
+
+ if (!(hba))
+ return;
+
+ if (se_global->g_lun0_dev)
+ se_free_virtual_device(se_global->g_lun0_dev, hba);
+
+ kfree(su_dev);
+ core_delete_hba(hba);
+}
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
new file mode 100644
index 000000000000..b65d1c8e7740
--- /dev/null
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -0,0 +1,1034 @@
+/*******************************************************************************
+* Filename: target_core_fabric_configfs.c
+ *
+ * This file contains generic fabric module configfs infrastructure for
+ * TCM v4.x code
+ *
+ * Copyright (c) 2010 Rising Tide Systems
+ * Copyright (c) 2010 Linux-iSCSI.org
+ *
+ * Copyright (c) 2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
+*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/syscalls.h>
+#include <linux/configfs.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+
+#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
+static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
+{ \
+ struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \
+ struct config_item_type *cit = &tfc->tfc_##_name##_cit; \
+ \
+ cit->ct_item_ops = _item_ops; \
+ cit->ct_group_ops = _group_ops; \
+ cit->ct_attrs = _attrs; \
+ cit->ct_owner = tf->tf_module; \
+ printk("Setup generic %s\n", __stringify(_name)); \
+}
+
+/* Start of tfc_tpg_mappedlun_cit */
+
+static int target_fabric_mappedlun_link(
+ struct config_item *lun_acl_ci,
+ struct config_item *lun_ci)
+{
+ struct se_dev_entry *deve;
+ struct se_lun *lun = container_of(to_config_group(lun_ci),
+ struct se_lun, lun_group);
+ struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
+ struct se_lun_acl, se_lun_group);
+ struct se_portal_group *se_tpg;
+ struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
+ int ret = 0, lun_access;
+ /*
+ * Ensure that the source port exists
+ */
+ if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) {
+ printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep"
+ "_tpg does not exist\n");
+ return -EINVAL;
+ }
+ se_tpg = lun->lun_sep->sep_tpg;
+
+ nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
+ tpg_ci = &nacl_ci->ci_group->cg_item;
+ wwn_ci = &tpg_ci->ci_group->cg_item;
+ tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item;
+ wwn_ci_s = &tpg_ci_s->ci_group->cg_item;
+ /*
+ * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
+ */
+ if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
+ printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n",
+ config_item_name(wwn_ci));
+ return -EINVAL;
+ }
+ if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
+ printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s"
+ " TPGT: %s\n", config_item_name(wwn_ci),
+ config_item_name(tpg_ci));
+ return -EINVAL;
+ }
+ /*
+ * If this struct se_node_acl was dynamically generated with
+ * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
+ * which be will write protected (READ-ONLY) when
+ * tpg_1/attrib/demo_mode_write_protect=1
+ */
+ spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
+ deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun];
+ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
+ lun_access = deve->lun_flags;
+ else
+ lun_access =
+ (TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect(
+ se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
+ TRANSPORT_LUNFLAGS_READ_WRITE;
+ spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
+ /*
+ * Determine the actual mapped LUN value user wants..
+ *
+ * This value is what the SCSI Initiator actually sees the
+ * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
+ */
+ ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl,
+ lun->unpacked_lun, lun_access);
+
+ return (ret < 0) ? -EINVAL : 0;
+}
+
+static int target_fabric_mappedlun_unlink(
+ struct config_item *lun_acl_ci,
+ struct config_item *lun_ci)
+{
+ struct se_lun *lun;
+ struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
+ struct se_lun_acl, se_lun_group);
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun];
+ struct se_portal_group *se_tpg;
+ /*
+ * Determine if the underlying MappedLUN has already been released..
+ */
+ if (!(deve->se_lun))
+ return 0;
+
+ lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
+ se_tpg = lun->lun_sep->sep_tpg;
+
+ core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl);
+ return 0;
+}
+
+CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl);
+#define TCM_MAPPEDLUN_ATTR(_name, _mode) \
+static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_fabric_mappedlun_show_##_name, \
+ target_fabric_mappedlun_store_##_name);
+
+static ssize_t target_fabric_mappedlun_show_write_protect(
+ struct se_lun_acl *lacl,
+ char *page)
+{
+ struct se_node_acl *se_nacl = lacl->se_lun_nacl;
+ struct se_dev_entry *deve;
+ ssize_t len;
+
+ spin_lock_irq(&se_nacl->device_list_lock);
+ deve = &se_nacl->device_list[lacl->mapped_lun];
+ len = sprintf(page, "%d\n",
+ (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
+ 1 : 0);
+ spin_unlock_irq(&se_nacl->device_list_lock);
+
+ return len;
+}
+
+static ssize_t target_fabric_mappedlun_store_write_protect(
+ struct se_lun_acl *lacl,
+ const char *page,
+ size_t count)
+{
+ struct se_node_acl *se_nacl = lacl->se_lun_nacl;
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ unsigned long op;
+
+ if (strict_strtoul(page, 0, &op))
+ return -EINVAL;
+
+ if ((op != 1) && (op != 0))
+ return -EINVAL;
+
+ core_update_device_list_access(lacl->mapped_lun, (op) ?
+ TRANSPORT_LUNFLAGS_READ_ONLY :
+ TRANSPORT_LUNFLAGS_READ_WRITE,
+ lacl->se_lun_nacl);
+
+ printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
+ " Mapped LUN: %u Write Protect bit to %s\n",
+ TPG_TFO(se_tpg)->get_fabric_name(),
+ lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
+
+ return count;
+
+}
+
+TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
+
+static void target_fabric_mappedlun_release(struct config_item *item)
+{
+ struct se_lun_acl *lacl = container_of(to_config_group(item),
+ struct se_lun_acl, se_lun_group);
+ struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
+
+ core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
+static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
+ &target_fabric_mappedlun_write_protect.attr,
+ NULL,
+};
+
+static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
+ .release = target_fabric_mappedlun_release,
+ .show_attribute = target_fabric_mappedlun_attr_show,
+ .store_attribute = target_fabric_mappedlun_attr_store,
+ .allow_link = target_fabric_mappedlun_link,
+ .drop_link = target_fabric_mappedlun_unlink,
+};
+
+TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL,
+ target_fabric_mappedlun_attrs);
+
+/* End of tfc_tpg_mappedlun_cit */
+
+/* Start of tfc_tpg_nacl_attrib_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group);
+
+static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = {
+ .show_attribute = target_fabric_nacl_attrib_attr_show,
+ .store_attribute = target_fabric_nacl_attrib_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_attrib_cit */
+
+/* Start of tfc_tpg_nacl_auth_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_auth, se_node_acl, acl_auth_group);
+
+static struct configfs_item_operations target_fabric_nacl_auth_item_ops = {
+ .show_attribute = target_fabric_nacl_auth_attr_show,
+ .store_attribute = target_fabric_nacl_auth_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_auth_cit */
+
+/* Start of tfc_tpg_nacl_param_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_param, se_node_acl, acl_param_group);
+
+static struct configfs_item_operations target_fabric_nacl_param_item_ops = {
+ .show_attribute = target_fabric_nacl_param_attr_show,
+ .store_attribute = target_fabric_nacl_param_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_param_cit */
+
+/* Start of tfc_tpg_nacl_base_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_base, se_node_acl, acl_group);
+
+static struct config_group *target_fabric_make_mappedlun(
+ struct config_group *group,
+ const char *name)
+{
+ struct se_node_acl *se_nacl = container_of(group,
+ struct se_node_acl, acl_group);
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+ struct se_lun_acl *lacl;
+ struct config_item *acl_ci;
+ char *buf;
+ unsigned long mapped_lun;
+ int ret = 0;
+
+ acl_ci = &group->cg_item;
+ if (!(acl_ci)) {
+ printk(KERN_ERR "Unable to locatel acl_ci\n");
+ return NULL;
+ }
+
+ buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
+ if (!(buf)) {
+ printk(KERN_ERR "Unable to allocate memory for name buf\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ snprintf(buf, strlen(name) + 1, "%s", name);
+ /*
+ * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
+ */
+ if (strstr(buf, "lun_") != buf) {
+ printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s"
+ " name: %s\n", buf, name);
+ ret = -EINVAL;
+ goto out;
+ }
+ /*
+ * Determine the Mapped LUN value. This is what the SCSI Initiator
+ * Port will actually see.
+ */
+ if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
+ config_item_name(acl_ci), &ret);
+ if (!(lacl))
+ goto out;
+
+ config_group_init_type_name(&lacl->se_lun_group, name,
+ &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
+
+ kfree(buf);
+ return &lacl->se_lun_group;
+out:
+ kfree(buf);
+ return ERR_PTR(ret);
+}
+
+static void target_fabric_drop_mappedlun(
+ struct config_group *group,
+ struct config_item *item)
+{
+ config_item_put(item);
+}
+
+static void target_fabric_nacl_base_release(struct config_item *item)
+{
+ struct se_node_acl *se_nacl = container_of(to_config_group(item),
+ struct se_node_acl, acl_group);
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+ tf->tf_ops.fabric_drop_nodeacl(se_nacl);
+}
+
+static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
+ .release = target_fabric_nacl_base_release,
+ .show_attribute = target_fabric_nacl_base_attr_show,
+ .store_attribute = target_fabric_nacl_base_attr_store,
+};
+
+static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
+ .make_group = target_fabric_make_mappedlun,
+ .drop_item = target_fabric_drop_mappedlun,
+};
+
+TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
+ &target_fabric_nacl_base_group_ops, NULL);
+
+/* End of tfc_tpg_nacl_base_cit */
+
+/* Start of tfc_tpg_nacl_cit */
+
+static struct config_group *target_fabric_make_nodeacl(
+ struct config_group *group,
+ const char *name)
+{
+ struct se_portal_group *se_tpg = container_of(group,
+ struct se_portal_group, tpg_acl_group);
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+ struct se_node_acl *se_nacl;
+ struct config_group *nacl_cg;
+
+ if (!(tf->tf_ops.fabric_make_nodeacl)) {
+ printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n");
+ return ERR_PTR(-ENOSYS);
+ }
+
+ se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
+ if (IS_ERR(se_nacl))
+ return ERR_PTR(PTR_ERR(se_nacl));
+
+ nacl_cg = &se_nacl->acl_group;
+ nacl_cg->default_groups = se_nacl->acl_default_groups;
+ nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
+ nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
+ nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
+ nacl_cg->default_groups[3] = NULL;
+
+ config_group_init_type_name(&se_nacl->acl_group, name,
+ &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
+ config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
+ &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit);
+ config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
+ &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
+ config_group_init_type_name(&se_nacl->acl_param_group, "param",
+ &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
+
+ return &se_nacl->acl_group;
+}
+
+static void target_fabric_drop_nodeacl(
+ struct config_group *group,
+ struct config_item *item)
+{
+ struct se_node_acl *se_nacl = container_of(to_config_group(item),
+ struct se_node_acl, acl_group);
+ struct config_item *df_item;
+ struct config_group *nacl_cg;
+ int i;
+
+ nacl_cg = &se_nacl->acl_group;
+ for (i = 0; nacl_cg->default_groups[i]; i++) {
+ df_item = &nacl_cg->default_groups[i]->cg_item;
+ nacl_cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ /*
+ * struct se_node_acl free is done in target_fabric_nacl_base_release()
+ */
+ config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_nacl_group_ops = {
+ .make_group = target_fabric_make_nodeacl,
+ .drop_item = target_fabric_drop_nodeacl,
+};
+
+TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
+
+/* End of tfc_tpg_nacl_cit */
+
+/* Start of tfc_tpg_np_base_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
+
+static void target_fabric_np_base_release(struct config_item *item)
+{
+ struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
+ struct se_tpg_np, tpg_np_group);
+ struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+ tf->tf_ops.fabric_drop_np(se_tpg_np);
+}
+
+static struct configfs_item_operations target_fabric_np_base_item_ops = {
+ .release = target_fabric_np_base_release,
+ .show_attribute = target_fabric_np_base_attr_show,
+ .store_attribute = target_fabric_np_base_attr_store,
+};
+
+TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_np_base_cit */
+
+/* Start of tfc_tpg_np_cit */
+
+static struct config_group *target_fabric_make_np(
+ struct config_group *group,
+ const char *name)
+{
+ struct se_portal_group *se_tpg = container_of(group,
+ struct se_portal_group, tpg_np_group);
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+ struct se_tpg_np *se_tpg_np;
+
+ if (!(tf->tf_ops.fabric_make_np)) {
+ printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n");
+ return ERR_PTR(-ENOSYS);
+ }
+
+ se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
+ if (!(se_tpg_np) || IS_ERR(se_tpg_np))
+ return ERR_PTR(-EINVAL);
+
+ se_tpg_np->tpg_np_parent = se_tpg;
+ config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
+ &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
+
+ return &se_tpg_np->tpg_np_group;
+}
+
+static void target_fabric_drop_np(
+ struct config_group *group,
+ struct config_item *item)
+{
+ /*
+ * struct se_tpg_np is released via target_fabric_np_base_release()
+ */
+ config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_np_group_ops = {
+ .make_group = &target_fabric_make_np,
+ .drop_item = &target_fabric_drop_np,
+};
+
+TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL);
+
+/* End of tfc_tpg_np_cit */
+
+/* Start of tfc_tpg_port_cit */
+
+CONFIGFS_EATTR_STRUCT(target_fabric_port, se_lun);
+#define TCM_PORT_ATTR(_name, _mode) \
+static struct target_fabric_port_attribute target_fabric_port_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ target_fabric_port_show_attr_##_name, \
+ target_fabric_port_store_attr_##_name);
+
+#define TCM_PORT_ATTOR_RO(_name) \
+ __CONFIGFS_EATTR_RO(_name, \
+ target_fabric_port_show_attr_##_name);
+
+/*
+ * alua_tg_pt_gp
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
+ struct se_lun *lun,
+ char *page)
+{
+ if (!(lun))
+ return -ENODEV;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
+ struct se_lun *lun,
+ const char *page,
+ size_t count)
+{
+ if (!(lun))
+ return -ENODEV;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_offline
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
+ struct se_lun *lun,
+ char *page)
+{
+ if (!(lun))
+ return -ENODEV;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return core_alua_show_offline_bit(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
+ struct se_lun *lun,
+ const char *page,
+ size_t count)
+{
+ if (!(lun))
+ return -ENODEV;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return core_alua_store_offline_bit(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_offline, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_status
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
+ struct se_lun *lun,
+ char *page)
+{
+ if (!(lun))
+ return -ENODEV;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return core_alua_show_secondary_status(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
+ struct se_lun *lun,
+ const char *page,
+ size_t count)
+{
+ if (!(lun))
+ return -ENODEV;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return core_alua_store_secondary_status(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_status, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_write_md
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
+ struct se_lun *lun,
+ char *page)
+{
+ if (!(lun))
+ return -ENODEV;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return core_alua_show_secondary_write_metadata(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
+ struct se_lun *lun,
+ const char *page,
+ size_t count)
+{
+ if (!(lun))
+ return -ENODEV;
+
+ if (!(lun->lun_sep))
+ return -ENODEV;
+
+ return core_alua_store_secondary_write_metadata(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_write_md, S_IRUGO | S_IWUSR);
+
+
+static struct configfs_attribute *target_fabric_port_attrs[] = {
+ &target_fabric_port_alua_tg_pt_gp.attr,
+ &target_fabric_port_alua_tg_pt_offline.attr,
+ &target_fabric_port_alua_tg_pt_status.attr,
+ &target_fabric_port_alua_tg_pt_write_md.attr,
+ NULL,
+};
+
+CONFIGFS_EATTR_OPS(target_fabric_port, se_lun, lun_group);
+
+static int target_fabric_port_link(
+ struct config_item *lun_ci,
+ struct config_item *se_dev_ci)
+{
+ struct config_item *tpg_ci;
+ struct se_device *dev;
+ struct se_lun *lun = container_of(to_config_group(lun_ci),
+ struct se_lun, lun_group);
+ struct se_lun *lun_p;
+ struct se_portal_group *se_tpg;
+ struct se_subsystem_dev *se_dev = container_of(
+ to_config_group(se_dev_ci), struct se_subsystem_dev,
+ se_dev_group);
+ struct target_fabric_configfs *tf;
+ int ret;
+
+ tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
+ se_tpg = container_of(to_config_group(tpg_ci),
+ struct se_portal_group, tpg_group);
+ tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+ if (lun->lun_se_dev != NULL) {
+ printk(KERN_ERR "Port Symlink already exists\n");
+ return -EEXIST;
+ }
+
+ dev = se_dev->se_dev_ptr;
+ if (!(dev)) {
+ printk(KERN_ERR "Unable to locate struct se_device pointer from"
+ " %s\n", config_item_name(se_dev_ci));
+ ret = -ENODEV;
+ goto out;
+ }
+
+ lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
+ lun->unpacked_lun);
+ if ((IS_ERR(lun_p)) || !(lun_p)) {
+ printk(KERN_ERR "core_dev_add_lun() failed\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (tf->tf_ops.fabric_post_link) {
+ /*
+ * Call the optional fabric_post_link() to allow a
+ * fabric module to setup any additional state once
+ * core_dev_add_lun() has been called..
+ */
+ tf->tf_ops.fabric_post_link(se_tpg, lun);
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+static int target_fabric_port_unlink(
+ struct config_item *lun_ci,
+ struct config_item *se_dev_ci)
+{
+ struct se_lun *lun = container_of(to_config_group(lun_ci),
+ struct se_lun, lun_group);
+ struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg;
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+ if (tf->tf_ops.fabric_pre_unlink) {
+ /*
+ * Call the optional fabric_pre_unlink() to allow a
+ * fabric module to release any additional stat before
+ * core_dev_del_lun() is called.
+ */
+ tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
+ }
+
+ core_dev_del_lun(se_tpg, lun->unpacked_lun);
+ return 0;
+}
+
+static struct configfs_item_operations target_fabric_port_item_ops = {
+ .show_attribute = target_fabric_port_attr_show,
+ .store_attribute = target_fabric_port_attr_store,
+ .allow_link = target_fabric_port_link,
+ .drop_link = target_fabric_port_unlink,
+};
+
+TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs);
+
+/* End of tfc_tpg_port_cit */
+
+/* Start of tfc_tpg_lun_cit */
+
+static struct config_group *target_fabric_make_lun(
+ struct config_group *group,
+ const char *name)
+{
+ struct se_lun *lun;
+ struct se_portal_group *se_tpg = container_of(group,
+ struct se_portal_group, tpg_lun_group);
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+ unsigned long unpacked_lun;
+
+ if (strstr(name, "lun_") != name) {
+ printk(KERN_ERR "Unable to locate \'_\" in"
+ " \"lun_$LUN_NUMBER\"\n");
+ return ERR_PTR(-EINVAL);
+ }
+ if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
+ if (!(lun))
+ return ERR_PTR(-EINVAL);
+
+ config_group_init_type_name(&lun->lun_group, name,
+ &TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
+
+ return &lun->lun_group;
+}
+
+static void target_fabric_drop_lun(
+ struct config_group *group,
+ struct config_item *item)
+{
+ config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_lun_group_ops = {
+ .make_group = &target_fabric_make_lun,
+ .drop_item = &target_fabric_drop_lun,
+};
+
+TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL);
+
+/* End of tfc_tpg_lun_cit */
+
+/* Start of tfc_tpg_attrib_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_tpg_attrib, se_portal_group, tpg_attrib_group);
+
+static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = {
+ .show_attribute = target_fabric_tpg_attrib_attr_show,
+ .store_attribute = target_fabric_tpg_attrib_attr_store,
+};
+
+TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_attrib_cit */
+
+/* Start of tfc_tpg_param_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group);
+
+static struct configfs_item_operations target_fabric_tpg_param_item_ops = {
+ .show_attribute = target_fabric_tpg_param_attr_show,
+ .store_attribute = target_fabric_tpg_param_attr_store,
+};
+
+TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_param_cit */
+
+/* Start of tfc_tpg_base_cit */
+/*
+ * For use with TF_TPG_ATTR() and TF_TPG_ATTR_RO()
+ */
+CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
+
+static void target_fabric_tpg_release(struct config_item *item)
+{
+ struct se_portal_group *se_tpg = container_of(to_config_group(item),
+ struct se_portal_group, tpg_group);
+ struct se_wwn *wwn = se_tpg->se_tpg_wwn;
+ struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+ tf->tf_ops.fabric_drop_tpg(se_tpg);
+}
+
+static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
+ .release = target_fabric_tpg_release,
+ .show_attribute = target_fabric_tpg_attr_show,
+ .store_attribute = target_fabric_tpg_attr_store,
+};
+
+TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_base_cit */
+
+/* Start of tfc_tpg_cit */
+
+static struct config_group *target_fabric_make_tpg(
+ struct config_group *group,
+ const char *name)
+{
+ struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
+ struct target_fabric_configfs *tf = wwn->wwn_tf;
+ struct se_portal_group *se_tpg;
+
+ if (!(tf->tf_ops.fabric_make_tpg)) {
+ printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n");
+ return ERR_PTR(-ENOSYS);
+ }
+
+ se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
+ if (!(se_tpg) || IS_ERR(se_tpg))
+ return ERR_PTR(-EINVAL);
+ /*
+ * Setup default groups from pre-allocated se_tpg->tpg_default_groups
+ */
+ se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups;
+ se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group;
+ se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
+ se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
+ se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
+ se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group;
+ se_tpg->tpg_group.default_groups[5] = NULL;
+
+ config_group_init_type_name(&se_tpg->tpg_group, name,
+ &TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
+ config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
+ &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit);
+ config_group_init_type_name(&se_tpg->tpg_np_group, "np",
+ &TF_CIT_TMPL(tf)->tfc_tpg_np_cit);
+ config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
+ &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
+ config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
+ &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
+ config_group_init_type_name(&se_tpg->tpg_param_group, "param",
+ &TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
+
+ return &se_tpg->tpg_group;
+}
+
+static void target_fabric_drop_tpg(
+ struct config_group *group,
+ struct config_item *item)
+{
+ struct se_portal_group *se_tpg = container_of(to_config_group(item),
+ struct se_portal_group, tpg_group);
+ struct config_group *tpg_cg = &se_tpg->tpg_group;
+ struct config_item *df_item;
+ int i;
+ /*
+ * Release default groups, but do not release tpg_cg->default_groups
+ * memory as it is statically allocated at se_tpg->tpg_default_groups.
+ */
+ for (i = 0; tpg_cg->default_groups[i]; i++) {
+ df_item = &tpg_cg->default_groups[i]->cg_item;
+ tpg_cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+
+ config_item_put(item);
+}
+
+static void target_fabric_release_wwn(struct config_item *item)
+{
+ struct se_wwn *wwn = container_of(to_config_group(item),
+ struct se_wwn, wwn_group);
+ struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+ tf->tf_ops.fabric_drop_wwn(wwn);
+}
+
+static struct configfs_item_operations target_fabric_tpg_item_ops = {
+ .release = target_fabric_release_wwn,
+};
+
+static struct configfs_group_operations target_fabric_tpg_group_ops = {
+ .make_group = target_fabric_make_tpg,
+ .drop_item = target_fabric_drop_tpg,
+};
+
+TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
+ NULL);
+
+/* End of tfc_tpg_cit */
+
+/* Start of tfc_wwn_cit */
+
+static struct config_group *target_fabric_make_wwn(
+ struct config_group *group,
+ const char *name)
+{
+ struct target_fabric_configfs *tf = container_of(group,
+ struct target_fabric_configfs, tf_group);
+ struct se_wwn *wwn;
+
+ if (!(tf->tf_ops.fabric_make_wwn)) {
+ printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n");
+ return ERR_PTR(-ENOSYS);
+ }
+
+ wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
+ if (!(wwn) || IS_ERR(wwn))
+ return ERR_PTR(-EINVAL);
+
+ wwn->wwn_tf = tf;
+ config_group_init_type_name(&wwn->wwn_group, name,
+ &TF_CIT_TMPL(tf)->tfc_tpg_cit);
+
+ return &wwn->wwn_group;
+}
+
+static void target_fabric_drop_wwn(
+ struct config_group *group,
+ struct config_item *item)
+{
+ config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_wwn_group_ops = {
+ .make_group = target_fabric_make_wwn,
+ .drop_item = target_fabric_drop_wwn,
+};
+/*
+ * For use with TF_WWN_ATTR() and TF_WWN_ATTR_RO()
+ */
+CONFIGFS_EATTR_OPS(target_fabric_wwn, target_fabric_configfs, tf_group);
+
+static struct configfs_item_operations target_fabric_wwn_item_ops = {
+ .show_attribute = target_fabric_wwn_attr_show,
+ .store_attribute = target_fabric_wwn_attr_store,
+};
+
+TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL);
+
+/* End of tfc_wwn_cit */
+
+/* Start of tfc_discovery_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_discovery, target_fabric_configfs,
+ tf_disc_group);
+
+static struct configfs_item_operations target_fabric_discovery_item_ops = {
+ .show_attribute = target_fabric_discovery_attr_show,
+ .store_attribute = target_fabric_discovery_attr_store,
+};
+
+TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL);
+
+/* End of tfc_discovery_cit */
+
+int target_fabric_setup_cits(struct target_fabric_configfs *tf)
+{
+ target_fabric_setup_discovery_cit(tf);
+ target_fabric_setup_wwn_cit(tf);
+ target_fabric_setup_tpg_cit(tf);
+ target_fabric_setup_tpg_base_cit(tf);
+ target_fabric_setup_tpg_port_cit(tf);
+ target_fabric_setup_tpg_lun_cit(tf);
+ target_fabric_setup_tpg_np_cit(tf);
+ target_fabric_setup_tpg_np_base_cit(tf);
+ target_fabric_setup_tpg_attrib_cit(tf);
+ target_fabric_setup_tpg_param_cit(tf);
+ target_fabric_setup_tpg_nacl_cit(tf);
+ target_fabric_setup_tpg_nacl_base_cit(tf);
+ target_fabric_setup_tpg_nacl_attrib_cit(tf);
+ target_fabric_setup_tpg_nacl_auth_cit(tf);
+ target_fabric_setup_tpg_nacl_param_cit(tf);
+ target_fabric_setup_tpg_mappedlun_cit(tf);
+
+ return 0;
+}
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
new file mode 100644
index 000000000000..26285644e4de
--- /dev/null
+++ b/drivers/target/target_core_fabric_lib.c
@@ -0,0 +1,451 @@
+/*******************************************************************************
+ * Filename: target_core_fabric_lib.c
+ *
+ * This file contains generic high level protocol identifier and PR
+ * handlers for TCM fabric modules
+ *
+ * Copyright (c) 2010 Rising Tide Systems, Inc.
+ * Copyright (c) 2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+
+/*
+ * Handlers for Serial Attached SCSI (SAS)
+ */
+u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ /*
+ * Return a SAS Serial SCSI Protocol identifier for loopback operations
+ * This is defined in section 7.5.1 Table 362 in spc4r17
+ */
+ return 0x6;
+}
+EXPORT_SYMBOL(sas_get_fabric_proto_ident);
+
+u32 sas_get_pr_transport_id(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ unsigned char binary, *ptr;
+ int i;
+ u32 off = 4;
+ /*
+ * Set PROTOCOL IDENTIFIER to 6h for SAS
+ */
+ buf[0] = 0x06;
+ /*
+ * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
+ * over SAS Serial SCSI Protocol
+ */
+ ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
+
+ for (i = 0; i < 16; i += 2) {
+ binary = transport_asciihex_to_binaryhex(&ptr[i]);
+ buf[off++] = binary;
+ }
+ /*
+ * The SAS Transport ID is a hardcoded 24-byte length
+ */
+ return 24;
+}
+EXPORT_SYMBOL(sas_get_pr_transport_id);
+
+u32 sas_get_pr_transport_id_len(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ *format_code = 0;
+ /*
+ * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
+ * over SAS Serial SCSI Protocol
+ *
+ * The SAS Transport ID is a hardcoded 24-byte length
+ */
+ return 24;
+}
+EXPORT_SYMBOL(sas_get_pr_transport_id_len);
+
+/*
+ * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
+ * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
+ */
+char *sas_parse_pr_out_transport_id(
+ struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ /*
+ * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
+ * for initiator ports using SCSI over SAS Serial SCSI Protocol
+ *
+ * The TransportID for a SAS Initiator Port is of fixed size of
+ * 24 bytes, and SAS does not contain a I_T nexus identifier,
+ * so we return the **port_nexus_ptr set to NULL.
+ */
+ *port_nexus_ptr = NULL;
+ *out_tid_len = 24;
+
+ return (char *)&buf[4];
+}
+EXPORT_SYMBOL(sas_parse_pr_out_transport_id);
+
+/*
+ * Handlers for Fibre Channel Protocol (FCP)
+ */
+u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */
+}
+EXPORT_SYMBOL(fc_get_fabric_proto_ident);
+
+u32 fc_get_pr_transport_id_len(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ *format_code = 0;
+ /*
+ * The FC Transport ID is a hardcoded 24-byte length
+ */
+ return 24;
+}
+EXPORT_SYMBOL(fc_get_pr_transport_id_len);
+
+u32 fc_get_pr_transport_id(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ unsigned char binary, *ptr;
+ int i;
+ u32 off = 8;
+ /*
+ * PROTOCOL IDENTIFIER is 0h for FCP-2
+ *
+ * From spc4r17, 7.5.4.2 TransportID for initiator ports using
+ * SCSI over Fibre Channel
+ *
+ * We convert the ASCII formatted N Port name into a binary
+ * encoded TransportID.
+ */
+ ptr = &se_nacl->initiatorname[0];
+
+ for (i = 0; i < 24; ) {
+ if (!(strncmp(&ptr[i], ":", 1))) {
+ i++;
+ continue;
+ }
+ binary = transport_asciihex_to_binaryhex(&ptr[i]);
+ buf[off++] = binary;
+ i += 2;
+ }
+ /*
+ * The FC Transport ID is a hardcoded 24-byte length
+ */
+ return 24;
+}
+EXPORT_SYMBOL(fc_get_pr_transport_id);
+
+char *fc_parse_pr_out_transport_id(
+ struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ /*
+ * The TransportID for a FC N Port is of fixed size of
+ * 24 bytes, and FC does not contain a I_T nexus identifier,
+ * so we return the **port_nexus_ptr set to NULL.
+ */
+ *port_nexus_ptr = NULL;
+ *out_tid_len = 24;
+
+ return (char *)&buf[8];
+}
+EXPORT_SYMBOL(fc_parse_pr_out_transport_id);
+
+/*
+ * Handlers for Internet Small Computer Systems Interface (iSCSI)
+ */
+
+u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ /*
+ * This value is defined for "Internet SCSI (iSCSI)"
+ * in spc4r17 section 7.5.1 Table 362
+ */
+ return 0x5;
+}
+EXPORT_SYMBOL(iscsi_get_fabric_proto_ident);
+
+u32 iscsi_get_pr_transport_id(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ u32 off = 4, padding = 0;
+ u16 len = 0;
+
+ spin_lock_irq(&se_nacl->nacl_sess_lock);
+ /*
+ * Set PROTOCOL IDENTIFIER to 5h for iSCSI
+ */
+ buf[0] = 0x05;
+ /*
+ * From spc4r17 Section 7.5.4.6: TransportID for initiator
+ * ports using SCSI over iSCSI.
+ *
+ * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field
+ * shall contain the iSCSI name of an iSCSI initiator node (see
+ * RFC 3720). The first ISCSI NAME field byte containing an ASCII
+ * null character terminates the ISCSI NAME field without regard for
+ * the specified length of the iSCSI TransportID or the contents of
+ * the ADDITIONAL LENGTH field.
+ */
+ len = sprintf(&buf[off], "%s", se_nacl->initiatorname);
+ /*
+ * Add Extra byte for NULL terminator
+ */
+ len++;
+ /*
+ * If there is ISID present with the registration and *format code == 1
+ * 1, use iSCSI Initiator port TransportID format.
+ *
+ * Otherwise use iSCSI Initiator device TransportID format that
+ * does not contain the ASCII encoded iSCSI Initiator iSID value
+ * provied by the iSCSi Initiator during the iSCSI login process.
+ */
+ if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) {
+ /*
+ * Set FORMAT CODE 01b for iSCSI Initiator port TransportID
+ * format.
+ */
+ buf[0] |= 0x40;
+ /*
+ * From spc4r17 Section 7.5.4.6: TransportID for initiator
+ * ports using SCSI over iSCSI. Table 390
+ *
+ * The SEPARATOR field shall contain the five ASCII
+ * characters ",i,0x".
+ *
+ * The null-terminated, null-padded ISCSI INITIATOR SESSION ID
+ * field shall contain the iSCSI initiator session identifier
+ * (see RFC 3720) in the form of ASCII characters that are the
+ * hexadecimal digits converted from the binary iSCSI initiator
+ * session identifier value. The first ISCSI INITIATOR SESSION
+ * ID field byte containing an ASCII null character
+ */
+ buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
+ buf[off+len] = 0x69; off++; /* ASCII Character: "i" */
+ buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
+ buf[off+len] = 0x30; off++; /* ASCII Character: "0" */
+ buf[off+len] = 0x78; off++; /* ASCII Character: "x" */
+ len += 5;
+ buf[off+len] = pr_reg->pr_reg_isid[0]; off++;
+ buf[off+len] = pr_reg->pr_reg_isid[1]; off++;
+ buf[off+len] = pr_reg->pr_reg_isid[2]; off++;
+ buf[off+len] = pr_reg->pr_reg_isid[3]; off++;
+ buf[off+len] = pr_reg->pr_reg_isid[4]; off++;
+ buf[off+len] = pr_reg->pr_reg_isid[5]; off++;
+ buf[off+len] = '\0'; off++;
+ len += 7;
+ }
+ spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ /*
+ * The ADDITIONAL LENGTH field specifies the number of bytes that follow
+ * in the TransportID. The additional length shall be at least 20 and
+ * shall be a multiple of four.
+ */
+ padding = ((-len) & 3);
+ if (padding != 0)
+ len += padding;
+
+ buf[2] = ((len >> 8) & 0xff);
+ buf[3] = (len & 0xff);
+ /*
+ * Increment value for total payload + header length for
+ * full status descriptor
+ */
+ len += 4;
+
+ return len;
+}
+EXPORT_SYMBOL(iscsi_get_pr_transport_id);
+
+u32 iscsi_get_pr_transport_id_len(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ u32 len = 0, padding = 0;
+
+ spin_lock_irq(&se_nacl->nacl_sess_lock);
+ len = strlen(se_nacl->initiatorname);
+ /*
+ * Add extra byte for NULL terminator
+ */
+ len++;
+ /*
+ * If there is ISID present with the registration, use format code:
+ * 01b: iSCSI Initiator port TransportID format
+ *
+ * If there is not an active iSCSI session, use format code:
+ * 00b: iSCSI Initiator device TransportID format
+ */
+ if (pr_reg->isid_present_at_reg) {
+ len += 5; /* For ",i,0x" ASCII seperator */
+ len += 7; /* For iSCSI Initiator Session ID + Null terminator */
+ *format_code = 1;
+ } else
+ *format_code = 0;
+ spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ /*
+ * The ADDITIONAL LENGTH field specifies the number of bytes that follow
+ * in the TransportID. The additional length shall be at least 20 and
+ * shall be a multiple of four.
+ */
+ padding = ((-len) & 3);
+ if (padding != 0)
+ len += padding;
+ /*
+ * Increment value for total payload + header length for
+ * full status descriptor
+ */
+ len += 4;
+
+ return len;
+}
+EXPORT_SYMBOL(iscsi_get_pr_transport_id_len);
+
+char *iscsi_parse_pr_out_transport_id(
+ struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ char *p;
+ u32 tid_len, padding;
+ int i;
+ u16 add_len;
+ u8 format_code = (buf[0] & 0xc0);
+ /*
+ * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6:
+ *
+ * TransportID for initiator ports using SCSI over iSCSI,
+ * from Table 388 -- iSCSI TransportID formats.
+ *
+ * 00b Initiator port is identified using the world wide unique
+ * SCSI device name of the iSCSI initiator
+ * device containing the initiator port (see table 389).
+ * 01b Initiator port is identified using the world wide unique
+ * initiator port identifier (see table 390).10b to 11b
+ * Reserved
+ */
+ if ((format_code != 0x00) && (format_code != 0x40)) {
+ printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI"
+ " Initiator Transport ID\n", format_code);
+ return NULL;
+ }
+ /*
+ * If the caller wants the TransportID Length, we set that value for the
+ * entire iSCSI Tarnsport ID now.
+ */
+ if (out_tid_len != NULL) {
+ add_len = ((buf[2] >> 8) & 0xff);
+ add_len |= (buf[3] & 0xff);
+
+ tid_len = strlen((char *)&buf[4]);
+ tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
+ tid_len += 1; /* Add one byte for NULL terminator */
+ padding = ((-tid_len) & 3);
+ if (padding != 0)
+ tid_len += padding;
+
+ if ((add_len + 4) != tid_len) {
+ printk(KERN_INFO "LIO-Target Extracted add_len: %hu "
+ "does not match calculated tid_len: %u,"
+ " using tid_len instead\n", add_len+4, tid_len);
+ *out_tid_len = tid_len;
+ } else
+ *out_tid_len = (add_len + 4);
+ }
+ /*
+ * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator
+ * Session ID as defined in Table 390 - iSCSI initiator port TransportID
+ * format.
+ */
+ if (format_code == 0x40) {
+ p = strstr((char *)&buf[4], ",i,0x");
+ if (!(p)) {
+ printk(KERN_ERR "Unable to locate \",i,0x\" seperator"
+ " for Initiator port identifier: %s\n",
+ (char *)&buf[4]);
+ return NULL;
+ }
+ *p = '\0'; /* Terminate iSCSI Name */
+ p += 5; /* Skip over ",i,0x" seperator */
+
+ *port_nexus_ptr = p;
+ /*
+ * Go ahead and do the lower case conversion of the received
+ * 12 ASCII characters representing the ISID in the TransportID
+ * for comparision against the running iSCSI session's ISID from
+ * iscsi_target.c:lio_sess_get_initiator_sid()
+ */
+ for (i = 0; i < 12; i++) {
+ if (isdigit(*p)) {
+ p++;
+ continue;
+ }
+ *p = tolower(*p);
+ p++;
+ }
+ }
+
+ return (char *)&buf[4];
+}
+EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
new file mode 100644
index 000000000000..0aaca885668f
--- /dev/null
+++ b/drivers/target/target_core_file.c
@@ -0,0 +1,688 @@
+/*******************************************************************************
+ * Filename: target_core_file.c
+ *
+ * This file contains the Storage Engine <-> FILEIO transport specific functions
+ *
+ * Copyright (c) 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_file.h"
+
+#if 1
+#define DEBUG_FD_CACHE(x...) printk(x)
+#else
+#define DEBUG_FD_CACHE(x...)
+#endif
+
+#if 1
+#define DEBUG_FD_FUA(x...) printk(x)
+#else
+#define DEBUG_FD_FUA(x...)
+#endif
+
+static struct se_subsystem_api fileio_template;
+
+/* fd_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int fd_attach_hba(struct se_hba *hba, u32 host_id)
+{
+ struct fd_host *fd_host;
+
+ fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
+ if (!(fd_host)) {
+ printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
+ return -1;
+ }
+
+ fd_host->fd_host_id = host_id;
+
+ atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH);
+ atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
+ hba->hba_ptr = (void *) fd_host;
+
+ printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
+ " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
+ TARGET_CORE_MOD_VERSION);
+ printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
+ " Target Core with TCQ Depth: %d MaxSectors: %u\n",
+ hba->hba_id, fd_host->fd_host_id,
+ atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
+
+ return 0;
+}
+
+static void fd_detach_hba(struct se_hba *hba)
+{
+ struct fd_host *fd_host = hba->hba_ptr;
+
+ printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
+ " Target Core\n", hba->hba_id, fd_host->fd_host_id);
+
+ kfree(fd_host);
+ hba->hba_ptr = NULL;
+}
+
+static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+ struct fd_dev *fd_dev;
+ struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+
+ fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
+ if (!(fd_dev)) {
+ printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
+ return NULL;
+ }
+
+ fd_dev->fd_host = fd_host;
+
+ printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
+
+ return fd_dev;
+}
+
+/* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static struct se_device *fd_create_virtdevice(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ void *p)
+{
+ char *dev_p = NULL;
+ struct se_device *dev;
+ struct se_dev_limits dev_limits;
+ struct queue_limits *limits;
+ struct fd_dev *fd_dev = (struct fd_dev *) p;
+ struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+ mm_segment_t old_fs;
+ struct file *file;
+ struct inode *inode = NULL;
+ int dev_flags = 0, flags;
+
+ memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ dev_p = getname(fd_dev->fd_dev_name);
+ set_fs(old_fs);
+
+ if (IS_ERR(dev_p)) {
+ printk(KERN_ERR "getname(%s) failed: %lu\n",
+ fd_dev->fd_dev_name, IS_ERR(dev_p));
+ goto fail;
+ }
+#if 0
+ if (di->no_create_file)
+ flags = O_RDWR | O_LARGEFILE;
+ else
+ flags = O_RDWR | O_CREAT | O_LARGEFILE;
+#else
+ flags = O_RDWR | O_CREAT | O_LARGEFILE;
+#endif
+/* flags |= O_DIRECT; */
+ /*
+ * If fd_buffered_io=1 has not been set explictly (the default),
+ * use O_SYNC to force FILEIO writes to disk.
+ */
+ if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
+ flags |= O_SYNC;
+
+ file = filp_open(dev_p, flags, 0600);
+
+ if (IS_ERR(file) || !file || !file->f_dentry) {
+ printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
+ goto fail;
+ }
+ fd_dev->fd_file = file;
+ /*
+ * If using a block backend with this struct file, we extract
+ * fd_dev->fd_[block,dev]_size from struct block_device.
+ *
+ * Otherwise, we use the passed fd_size= from configfs
+ */
+ inode = file->f_mapping->host;
+ if (S_ISBLK(inode->i_mode)) {
+ struct request_queue *q;
+ /*
+ * Setup the local scope queue_limits from struct request_queue->limits
+ * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+ */
+ q = bdev_get_queue(inode->i_bdev);
+ limits = &dev_limits.limits;
+ limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
+ limits->max_hw_sectors = queue_max_hw_sectors(q);
+ limits->max_sectors = queue_max_sectors(q);
+ /*
+ * Determine the number of bytes from i_size_read() minus
+ * one (1) logical sector from underlying struct block_device
+ */
+ fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
+ fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
+ fd_dev->fd_block_size);
+
+ printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
+ " block_device blocks: %llu logical_block_size: %d\n",
+ fd_dev->fd_dev_size,
+ div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
+ fd_dev->fd_block_size);
+ } else {
+ if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
+ printk(KERN_ERR "FILEIO: Missing fd_dev_size="
+ " parameter, and no backing struct"
+ " block_device\n");
+ goto fail;
+ }
+
+ limits = &dev_limits.limits;
+ limits->logical_block_size = FD_BLOCKSIZE;
+ limits->max_hw_sectors = FD_MAX_SECTORS;
+ limits->max_sectors = FD_MAX_SECTORS;
+ fd_dev->fd_block_size = FD_BLOCKSIZE;
+ }
+
+ dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
+ dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
+
+ dev = transport_add_device_to_core_hba(hba, &fileio_template,
+ se_dev, dev_flags, (void *)fd_dev,
+ &dev_limits, "FILEIO", FD_VERSION);
+ if (!(dev))
+ goto fail;
+
+ fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
+ fd_dev->fd_queue_depth = dev->queue_depth;
+
+ printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
+ " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
+ fd_dev->fd_dev_name, fd_dev->fd_dev_size);
+
+ putname(dev_p);
+ return dev;
+fail:
+ if (fd_dev->fd_file) {
+ filp_close(fd_dev->fd_file, NULL);
+ fd_dev->fd_file = NULL;
+ }
+ putname(dev_p);
+ return NULL;
+}
+
+/* fd_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void fd_free_device(void *p)
+{
+ struct fd_dev *fd_dev = (struct fd_dev *) p;
+
+ if (fd_dev->fd_file) {
+ filp_close(fd_dev->fd_file, NULL);
+ fd_dev->fd_file = NULL;
+ }
+
+ kfree(fd_dev);
+}
+
+static inline struct fd_request *FILE_REQ(struct se_task *task)
+{
+ return container_of(task, struct fd_request, fd_task);
+}
+
+
+static struct se_task *
+fd_alloc_task(struct se_cmd *cmd)
+{
+ struct fd_request *fd_req;
+
+ fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
+ if (!(fd_req)) {
+ printk(KERN_ERR "Unable to allocate struct fd_request\n");
+ return NULL;
+ }
+
+ fd_req->fd_dev = SE_DEV(cmd)->dev_ptr;
+
+ return &fd_req->fd_task;
+}
+
+static int fd_do_readv(struct se_task *task)
+{
+ struct fd_request *req = FILE_REQ(task);
+ struct file *fd = req->fd_dev->fd_file;
+ struct scatterlist *sg = task->task_sg;
+ struct iovec *iov;
+ mm_segment_t old_fs;
+ loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
+ int ret = 0, i;
+
+ iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
+ if (!(iov)) {
+ printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
+ return -1;
+ }
+
+ for (i = 0; i < task->task_sg_num; i++) {
+ iov[i].iov_len = sg[i].length;
+ iov[i].iov_base = sg_virt(&sg[i]);
+ }
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
+ set_fs(old_fs);
+
+ kfree(iov);
+ /*
+ * Return zeros and GOOD status even if the READ did not return
+ * the expected virt_size for struct file w/o a backing struct
+ * block_device.
+ */
+ if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
+ if (ret < 0 || ret != task->task_size) {
+ printk(KERN_ERR "vfs_readv() returned %d,"
+ " expecting %d for S_ISBLK\n", ret,
+ (int)task->task_size);
+ return -1;
+ }
+ } else {
+ if (ret < 0) {
+ printk(KERN_ERR "vfs_readv() returned %d for non"
+ " S_ISBLK\n", ret);
+ return -1;
+ }
+ }
+
+ return 1;
+}
+
+static int fd_do_writev(struct se_task *task)
+{
+ struct fd_request *req = FILE_REQ(task);
+ struct file *fd = req->fd_dev->fd_file;
+ struct scatterlist *sg = task->task_sg;
+ struct iovec *iov;
+ mm_segment_t old_fs;
+ loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
+ int ret, i = 0;
+
+ iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
+ if (!(iov)) {
+ printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
+ return -1;
+ }
+
+ for (i = 0; i < task->task_sg_num; i++) {
+ iov[i].iov_len = sg[i].length;
+ iov[i].iov_base = sg_virt(&sg[i]);
+ }
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
+ set_fs(old_fs);
+
+ kfree(iov);
+
+ if (ret < 0 || ret != task->task_size) {
+ printk(KERN_ERR "vfs_writev() returned %d\n", ret);
+ return -1;
+ }
+
+ return 1;
+}
+
+static void fd_emulate_sync_cache(struct se_task *task)
+{
+ struct se_cmd *cmd = TASK_CMD(task);
+ struct se_device *dev = cmd->se_dev;
+ struct fd_dev *fd_dev = dev->dev_ptr;
+ int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
+ loff_t start, end;
+ int ret;
+
+ /*
+ * If the Immediate bit is set, queue up the GOOD response
+ * for this SYNCHRONIZE_CACHE op
+ */
+ if (immed)
+ transport_complete_sync_cache(cmd, 1);
+
+ /*
+ * Determine if we will be flushing the entire device.
+ */
+ if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) {
+ start = 0;
+ end = LLONG_MAX;
+ } else {
+ start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size;
+ if (cmd->data_length)
+ end = start + cmd->data_length;
+ else
+ end = LLONG_MAX;
+ }
+
+ ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+ if (ret != 0)
+ printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+
+ if (!immed)
+ transport_complete_sync_cache(cmd, ret == 0);
+}
+
+/*
+ * Tell TCM Core that we are capable of WriteCache emulation for
+ * an underlying struct se_device.
+ */
+static int fd_emulated_write_cache(struct se_device *dev)
+{
+ return 1;
+}
+
+static int fd_emulated_dpo(struct se_device *dev)
+{
+ return 0;
+}
+/*
+ * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
+ * for TYPE_DISK.
+ */
+static int fd_emulated_fua_write(struct se_device *dev)
+{
+ return 1;
+}
+
+static int fd_emulated_fua_read(struct se_device *dev)
+{
+ return 0;
+}
+
+/*
+ * WRITE Force Unit Access (FUA) emulation on a per struct se_task
+ * LBA range basis..
+ */
+static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct fd_dev *fd_dev = dev->dev_ptr;
+ loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size;
+ loff_t end = start + task->task_size;
+ int ret;
+
+ DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
+ task->task_lba, task->task_size);
+
+ ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+ if (ret != 0)
+ printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+}
+
+static int fd_do_task(struct se_task *task)
+{
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = cmd->se_dev;
+ int ret = 0;
+
+ /*
+ * Call vectorized fileio functions to map struct scatterlist
+ * physical memory addresses to struct iovec virtual memory.
+ */
+ if (task->task_data_direction == DMA_FROM_DEVICE) {
+ ret = fd_do_readv(task);
+ } else {
+ ret = fd_do_writev(task);
+
+ if (ret > 0 &&
+ DEV_ATTRIB(dev)->emulate_write_cache > 0 &&
+ DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
+ T_TASK(cmd)->t_tasks_fua) {
+ /*
+ * We might need to be a bit smarter here
+ * and return some sense data to let the initiator
+ * know the FUA WRITE cache sync failed..?
+ */
+ fd_emulate_write_fua(cmd, task);
+ }
+
+ }
+
+ if (ret < 0)
+ return ret;
+ if (ret) {
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ }
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/* fd_free_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void fd_free_task(struct se_task *task)
+{
+ struct fd_request *req = FILE_REQ(task);
+
+ kfree(req);
+}
+
+enum {
+ Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
+};
+
+static match_table_t tokens = {
+ {Opt_fd_dev_name, "fd_dev_name=%s"},
+ {Opt_fd_dev_size, "fd_dev_size=%s"},
+ {Opt_fd_buffered_io, "fd_buffered_id=%d"},
+ {Opt_err, NULL}
+};
+
+static ssize_t fd_set_configfs_dev_params(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ const char *page, ssize_t count)
+{
+ struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ char *orig, *ptr, *arg_p, *opts;
+ substring_t args[MAX_OPT_ARGS];
+ int ret = 0, arg, token;
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ orig = opts;
+
+ while ((ptr = strsep(&opts, ",")) != NULL) {
+ if (!*ptr)
+ continue;
+
+ token = match_token(ptr, tokens, args);
+ switch (token) {
+ case Opt_fd_dev_name:
+ snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
+ "%s", match_strdup(&args[0]));
+ printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
+ fd_dev->fd_dev_name);
+ fd_dev->fbd_flags |= FBDF_HAS_PATH;
+ break;
+ case Opt_fd_dev_size:
+ arg_p = match_strdup(&args[0]);
+ ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
+ if (ret < 0) {
+ printk(KERN_ERR "strict_strtoull() failed for"
+ " fd_dev_size=\n");
+ goto out;
+ }
+ printk(KERN_INFO "FILEIO: Referencing Size: %llu"
+ " bytes\n", fd_dev->fd_dev_size);
+ fd_dev->fbd_flags |= FBDF_HAS_SIZE;
+ break;
+ case Opt_fd_buffered_io:
+ match_int(args, &arg);
+ if (arg != 1) {
+ printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ printk(KERN_INFO "FILEIO: Using buffered I/O"
+ " operations for struct fd_dev\n");
+
+ fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
+ break;
+ default:
+ break;
+ }
+ }
+
+out:
+ kfree(orig);
+ return (!ret) ? count : ret;
+}
+
+static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+{
+ struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
+
+ if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
+ printk(KERN_ERR "Missing fd_dev_name=\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static ssize_t fd_show_configfs_dev_params(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ char *b)
+{
+ struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ ssize_t bl = 0;
+
+ bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
+ bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
+ fd_dev->fd_dev_name, fd_dev->fd_dev_size,
+ (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
+ "Buffered" : "Synchronous");
+ return bl;
+}
+
+/* fd_get_cdb(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static unsigned char *fd_get_cdb(struct se_task *task)
+{
+ struct fd_request *req = FILE_REQ(task);
+
+ return req->fd_scsi_cdb;
+}
+
+/* fd_get_device_rev(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static u32 fd_get_device_rev(struct se_device *dev)
+{
+ return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+/* fd_get_device_type(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static u32 fd_get_device_type(struct se_device *dev)
+{
+ return TYPE_DISK;
+}
+
+static sector_t fd_get_blocks(struct se_device *dev)
+{
+ struct fd_dev *fd_dev = dev->dev_ptr;
+ unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
+ DEV_ATTRIB(dev)->block_size);
+
+ return blocks_long;
+}
+
+static struct se_subsystem_api fileio_template = {
+ .name = "fileio",
+ .owner = THIS_MODULE,
+ .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
+ .attach_hba = fd_attach_hba,
+ .detach_hba = fd_detach_hba,
+ .allocate_virtdevice = fd_allocate_virtdevice,
+ .create_virtdevice = fd_create_virtdevice,
+ .free_device = fd_free_device,
+ .dpo_emulated = fd_emulated_dpo,
+ .fua_write_emulated = fd_emulated_fua_write,
+ .fua_read_emulated = fd_emulated_fua_read,
+ .write_cache_emulated = fd_emulated_write_cache,
+ .alloc_task = fd_alloc_task,
+ .do_task = fd_do_task,
+ .do_sync_cache = fd_emulate_sync_cache,
+ .free_task = fd_free_task,
+ .check_configfs_dev_params = fd_check_configfs_dev_params,
+ .set_configfs_dev_params = fd_set_configfs_dev_params,
+ .show_configfs_dev_params = fd_show_configfs_dev_params,
+ .get_cdb = fd_get_cdb,
+ .get_device_rev = fd_get_device_rev,
+ .get_device_type = fd_get_device_type,
+ .get_blocks = fd_get_blocks,
+};
+
+static int __init fileio_module_init(void)
+{
+ return transport_subsystem_register(&fileio_template);
+}
+
+static void fileio_module_exit(void)
+{
+ transport_subsystem_release(&fileio_template);
+}
+
+MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(fileio_module_init);
+module_exit(fileio_module_exit);
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
new file mode 100644
index 000000000000..ef4de2b4bd46
--- /dev/null
+++ b/drivers/target/target_core_file.h
@@ -0,0 +1,50 @@
+#ifndef TARGET_CORE_FILE_H
+#define TARGET_CORE_FILE_H
+
+#define FD_VERSION "4.0"
+
+#define FD_MAX_DEV_NAME 256
+/* Maximum queuedepth for the FILEIO HBA */
+#define FD_HBA_QUEUE_DEPTH 256
+#define FD_DEVICE_QUEUE_DEPTH 32
+#define FD_MAX_DEVICE_QUEUE_DEPTH 128
+#define FD_BLOCKSIZE 512
+#define FD_MAX_SECTORS 1024
+
+#define RRF_EMULATE_CDB 0x01
+#define RRF_GOT_LBA 0x02
+
+struct fd_request {
+ struct se_task fd_task;
+ /* SCSI CDB from iSCSI Command PDU */
+ unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
+ /* FILEIO device */
+ struct fd_dev *fd_dev;
+} ____cacheline_aligned;
+
+#define FBDF_HAS_PATH 0x01
+#define FBDF_HAS_SIZE 0x02
+#define FDBD_USE_BUFFERED_IO 0x04
+
+struct fd_dev {
+ u32 fbd_flags;
+ unsigned char fd_dev_name[FD_MAX_DEV_NAME];
+ /* Unique Ramdisk Device ID in Ramdisk HBA */
+ u32 fd_dev_id;
+ /* Number of SG tables in sg_table_array */
+ u32 fd_table_count;
+ u32 fd_queue_depth;
+ u32 fd_block_size;
+ unsigned long long fd_dev_size;
+ struct file *fd_file;
+ /* FILEIO HBA device is connected to */
+ struct fd_host *fd_host;
+} ____cacheline_aligned;
+
+struct fd_host {
+ u32 fd_host_dev_id_count;
+ /* Unique FILEIO Host ID */
+ u32 fd_host_id;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_FILE_H */
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
new file mode 100644
index 000000000000..4bbe8208b241
--- /dev/null
+++ b/drivers/target/target_core_hba.c
@@ -0,0 +1,185 @@
+/*******************************************************************************
+ * Filename: target_core_hba.c
+ *
+ * This file copntains the iSCSI HBA Transport related functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_hba.h"
+
+static LIST_HEAD(subsystem_list);
+static DEFINE_MUTEX(subsystem_mutex);
+
+int transport_subsystem_register(struct se_subsystem_api *sub_api)
+{
+ struct se_subsystem_api *s;
+
+ INIT_LIST_HEAD(&sub_api->sub_api_list);
+
+ mutex_lock(&subsystem_mutex);
+ list_for_each_entry(s, &subsystem_list, sub_api_list) {
+ if (!(strcmp(s->name, sub_api->name))) {
+ printk(KERN_ERR "%p is already registered with"
+ " duplicate name %s, unable to process"
+ " request\n", s, s->name);
+ mutex_unlock(&subsystem_mutex);
+ return -EEXIST;
+ }
+ }
+ list_add_tail(&sub_api->sub_api_list, &subsystem_list);
+ mutex_unlock(&subsystem_mutex);
+
+ printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:"
+ " %p\n", sub_api->name, sub_api->owner);
+ return 0;
+}
+EXPORT_SYMBOL(transport_subsystem_register);
+
+void transport_subsystem_release(struct se_subsystem_api *sub_api)
+{
+ mutex_lock(&subsystem_mutex);
+ list_del(&sub_api->sub_api_list);
+ mutex_unlock(&subsystem_mutex);
+}
+EXPORT_SYMBOL(transport_subsystem_release);
+
+static struct se_subsystem_api *core_get_backend(const char *sub_name)
+{
+ struct se_subsystem_api *s;
+
+ mutex_lock(&subsystem_mutex);
+ list_for_each_entry(s, &subsystem_list, sub_api_list) {
+ if (!strcmp(s->name, sub_name))
+ goto found;
+ }
+ mutex_unlock(&subsystem_mutex);
+ return NULL;
+found:
+ if (s->owner && !try_module_get(s->owner))
+ s = NULL;
+ mutex_unlock(&subsystem_mutex);
+ return s;
+}
+
+struct se_hba *
+core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
+{
+ struct se_hba *hba;
+ int ret = 0;
+
+ hba = kzalloc(sizeof(*hba), GFP_KERNEL);
+ if (!hba) {
+ printk(KERN_ERR "Unable to allocate struct se_hba\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_LIST_HEAD(&hba->hba_dev_list);
+ spin_lock_init(&hba->device_lock);
+ spin_lock_init(&hba->hba_queue_lock);
+ mutex_init(&hba->hba_access_mutex);
+
+ hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
+ hba->hba_flags |= hba_flags;
+
+ atomic_set(&hba->max_queue_depth, 0);
+ atomic_set(&hba->left_queue_depth, 0);
+
+ hba->transport = core_get_backend(plugin_name);
+ if (!hba->transport) {
+ ret = -EINVAL;
+ goto out_free_hba;
+ }
+
+ ret = hba->transport->attach_hba(hba, plugin_dep_id);
+ if (ret < 0)
+ goto out_module_put;
+
+ spin_lock(&se_global->hba_lock);
+ hba->hba_id = se_global->g_hba_id_counter++;
+ list_add_tail(&hba->hba_list, &se_global->g_hba_list);
+ spin_unlock(&se_global->hba_lock);
+
+ printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
+ " Core\n", hba->hba_id);
+
+ return hba;
+
+out_module_put:
+ if (hba->transport->owner)
+ module_put(hba->transport->owner);
+ hba->transport = NULL;
+out_free_hba:
+ kfree(hba);
+ return ERR_PTR(ret);
+}
+
+int
+core_delete_hba(struct se_hba *hba)
+{
+ struct se_device *dev, *dev_tmp;
+
+ spin_lock(&hba->device_lock);
+ list_for_each_entry_safe(dev, dev_tmp, &hba->hba_dev_list, dev_list) {
+
+ se_clear_dev_ports(dev);
+ spin_unlock(&hba->device_lock);
+
+ se_release_device_for_hba(dev);
+
+ spin_lock(&hba->device_lock);
+ }
+ spin_unlock(&hba->device_lock);
+
+ hba->transport->detach_hba(hba);
+
+ spin_lock(&se_global->hba_lock);
+ list_del(&hba->hba_list);
+ spin_unlock(&se_global->hba_lock);
+
+ printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
+ " Core\n", hba->hba_id);
+
+ if (hba->transport->owner)
+ module_put(hba->transport->owner);
+
+ hba->transport = NULL;
+ kfree(hba);
+ return 0;
+}
diff --git a/drivers/target/target_core_hba.h b/drivers/target/target_core_hba.h
new file mode 100644
index 000000000000..bb0fea5f730c
--- /dev/null
+++ b/drivers/target/target_core_hba.h
@@ -0,0 +1,7 @@
+#ifndef TARGET_CORE_HBA_H
+#define TARGET_CORE_HBA_H
+
+extern struct se_hba *core_alloc_hba(const char *, u32, u32);
+extern int core_delete_hba(struct se_hba *);
+
+#endif /* TARGET_CORE_HBA_H */
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
new file mode 100644
index 000000000000..67f0c09983c8
--- /dev/null
+++ b/drivers/target/target_core_iblock.c
@@ -0,0 +1,810 @@
+/*******************************************************************************
+ * Filename: target_core_iblock.c
+ *
+ * This file contains the Storage Engine <-> Linux BlockIO transport
+ * specific functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/bio.h>
+#include <linux/genhd.h>
+#include <linux/file.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_iblock.h"
+
+#if 0
+#define DEBUG_IBLOCK(x...) printk(x)
+#else
+#define DEBUG_IBLOCK(x...)
+#endif
+
+static struct se_subsystem_api iblock_template;
+
+static void iblock_bio_done(struct bio *, int);
+
+/* iblock_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
+{
+ struct iblock_hba *ib_host;
+
+ ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
+ if (!(ib_host)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " struct iblock_hba\n");
+ return -ENOMEM;
+ }
+
+ ib_host->iblock_host_id = host_id;
+
+ atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
+ atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
+ hba->hba_ptr = (void *) ib_host;
+
+ printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
+ " Generic Target Core Stack %s\n", hba->hba_id,
+ IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
+
+ printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
+ " Target Core TCQ Depth: %d\n", hba->hba_id,
+ ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth));
+
+ return 0;
+}
+
+static void iblock_detach_hba(struct se_hba *hba)
+{
+ struct iblock_hba *ib_host = hba->hba_ptr;
+
+ printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
+ " Target Core\n", hba->hba_id, ib_host->iblock_host_id);
+
+ kfree(ib_host);
+ hba->hba_ptr = NULL;
+}
+
+static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+ struct iblock_dev *ib_dev = NULL;
+ struct iblock_hba *ib_host = hba->hba_ptr;
+
+ ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
+ if (!(ib_dev)) {
+ printk(KERN_ERR "Unable to allocate struct iblock_dev\n");
+ return NULL;
+ }
+ ib_dev->ibd_host = ib_host;
+
+ printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name);
+
+ return ib_dev;
+}
+
+static struct se_device *iblock_create_virtdevice(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ void *p)
+{
+ struct iblock_dev *ib_dev = p;
+ struct se_device *dev;
+ struct se_dev_limits dev_limits;
+ struct block_device *bd = NULL;
+ struct request_queue *q;
+ struct queue_limits *limits;
+ u32 dev_flags = 0;
+
+ if (!(ib_dev)) {
+ printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
+ return 0;
+ }
+ memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+ /*
+ * These settings need to be made tunable..
+ */
+ ib_dev->ibd_bio_set = bioset_create(32, 64);
+ if (!(ib_dev->ibd_bio_set)) {
+ printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
+ return 0;
+ }
+ printk(KERN_INFO "IBLOCK: Created bio_set()\n");
+ /*
+ * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
+ * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
+ */
+ printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n",
+ ib_dev->ibd_udev_path);
+
+ bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
+ FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
+ if (IS_ERR(bd))
+ goto failed;
+ /*
+ * Setup the local scope queue_limits from struct request_queue->limits
+ * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+ */
+ q = bdev_get_queue(bd);
+ limits = &dev_limits.limits;
+ limits->logical_block_size = bdev_logical_block_size(bd);
+ limits->max_hw_sectors = queue_max_hw_sectors(q);
+ limits->max_sectors = queue_max_sectors(q);
+ dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH;
+ dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH;
+
+ ib_dev->ibd_major = MAJOR(bd->bd_dev);
+ ib_dev->ibd_minor = MINOR(bd->bd_dev);
+ ib_dev->ibd_bd = bd;
+
+ dev = transport_add_device_to_core_hba(hba,
+ &iblock_template, se_dev, dev_flags, (void *)ib_dev,
+ &dev_limits, "IBLOCK", IBLOCK_VERSION);
+ if (!(dev))
+ goto failed;
+
+ ib_dev->ibd_depth = dev->queue_depth;
+
+ /*
+ * Check if the underlying struct block_device request_queue supports
+ * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
+ * in ATA and we need to set TPE=1
+ */
+ if (blk_queue_discard(bdev_get_queue(bd))) {
+ struct request_queue *q = bdev_get_queue(bd);
+
+ DEV_ATTRIB(dev)->max_unmap_lba_count =
+ q->limits.max_discard_sectors;
+ /*
+ * Currently hardcoded to 1 in Linux/SCSI code..
+ */
+ DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1;
+ DEV_ATTRIB(dev)->unmap_granularity =
+ q->limits.discard_granularity;
+ DEV_ATTRIB(dev)->unmap_granularity_alignment =
+ q->limits.discard_alignment;
+
+ printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
+ " disabled by default\n");
+ }
+
+ return dev;
+
+failed:
+ if (ib_dev->ibd_bio_set) {
+ bioset_free(ib_dev->ibd_bio_set);
+ ib_dev->ibd_bio_set = NULL;
+ }
+ ib_dev->ibd_bd = NULL;
+ ib_dev->ibd_major = 0;
+ ib_dev->ibd_minor = 0;
+ return NULL;
+}
+
+static void iblock_free_device(void *p)
+{
+ struct iblock_dev *ib_dev = p;
+
+ if (ib_dev->ibd_bd != NULL)
+ blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+ if (ib_dev->ibd_bio_set != NULL)
+ bioset_free(ib_dev->ibd_bio_set);
+ kfree(ib_dev);
+}
+
+static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
+{
+ return container_of(task, struct iblock_req, ib_task);
+}
+
+static struct se_task *
+iblock_alloc_task(struct se_cmd *cmd)
+{
+ struct iblock_req *ib_req;
+
+ ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
+ if (!(ib_req)) {
+ printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n");
+ return NULL;
+ }
+
+ ib_req->ib_dev = SE_DEV(cmd)->dev_ptr;
+ atomic_set(&ib_req->ib_bio_cnt, 0);
+ return &ib_req->ib_task;
+}
+
+static unsigned long long iblock_emulate_read_cap_with_block_size(
+ struct se_device *dev,
+ struct block_device *bd,
+ struct request_queue *q)
+{
+ unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
+ bdev_logical_block_size(bd)) - 1);
+ u32 block_size = bdev_logical_block_size(bd);
+
+ if (block_size == DEV_ATTRIB(dev)->block_size)
+ return blocks_long;
+
+ switch (block_size) {
+ case 4096:
+ switch (DEV_ATTRIB(dev)->block_size) {
+ case 2048:
+ blocks_long <<= 1;
+ break;
+ case 1024:
+ blocks_long <<= 2;
+ break;
+ case 512:
+ blocks_long <<= 3;
+ default:
+ break;
+ }
+ break;
+ case 2048:
+ switch (DEV_ATTRIB(dev)->block_size) {
+ case 4096:
+ blocks_long >>= 1;
+ break;
+ case 1024:
+ blocks_long <<= 1;
+ break;
+ case 512:
+ blocks_long <<= 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 1024:
+ switch (DEV_ATTRIB(dev)->block_size) {
+ case 4096:
+ blocks_long >>= 2;
+ break;
+ case 2048:
+ blocks_long >>= 1;
+ break;
+ case 512:
+ blocks_long <<= 1;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 512:
+ switch (DEV_ATTRIB(dev)->block_size) {
+ case 4096:
+ blocks_long >>= 3;
+ break;
+ case 2048:
+ blocks_long >>= 2;
+ break;
+ case 1024:
+ blocks_long >>= 1;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return blocks_long;
+}
+
+/*
+ * Emulate SYCHRONIZE_CACHE_*
+ */
+static void iblock_emulate_sync_cache(struct se_task *task)
+{
+ struct se_cmd *cmd = TASK_CMD(task);
+ struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
+ int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2);
+ sector_t error_sector;
+ int ret;
+
+ /*
+ * If the Immediate bit is set, queue up the GOOD response
+ * for this SYNCHRONIZE_CACHE op
+ */
+ if (immed)
+ transport_complete_sync_cache(cmd, 1);
+
+ /*
+ * blkdev_issue_flush() does not support a specifying a range, so
+ * we have to flush the entire cache.
+ */
+ ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
+ if (ret != 0) {
+ printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
+ " error_sector: %llu\n", ret,
+ (unsigned long long)error_sector);
+ }
+
+ if (!immed)
+ transport_complete_sync_cache(cmd, ret == 0);
+}
+
+/*
+ * Tell TCM Core that we are capable of WriteCache emulation for
+ * an underlying struct se_device.
+ */
+static int iblock_emulated_write_cache(struct se_device *dev)
+{
+ return 1;
+}
+
+static int iblock_emulated_dpo(struct se_device *dev)
+{
+ return 0;
+}
+
+/*
+ * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
+ * for TYPE_DISK.
+ */
+static int iblock_emulated_fua_write(struct se_device *dev)
+{
+ return 1;
+}
+
+static int iblock_emulated_fua_read(struct se_device *dev)
+{
+ return 0;
+}
+
+static int iblock_do_task(struct se_task *task)
+{
+ struct se_device *dev = task->task_se_cmd->se_dev;
+ struct iblock_req *req = IBLOCK_REQ(task);
+ struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev;
+ struct request_queue *q = bdev_get_queue(ibd->ibd_bd);
+ struct bio *bio = req->ib_bio, *nbio = NULL;
+ int rw;
+
+ if (task->task_data_direction == DMA_TO_DEVICE) {
+ /*
+ * Force data to disk if we pretend to not have a volatile
+ * write cache, or the initiator set the Force Unit Access bit.
+ */
+ if (DEV_ATTRIB(dev)->emulate_write_cache == 0 ||
+ (DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
+ T_TASK(task->task_se_cmd)->t_tasks_fua))
+ rw = WRITE_FUA;
+ else
+ rw = WRITE;
+ } else {
+ rw = READ;
+ }
+
+ while (bio) {
+ nbio = bio->bi_next;
+ bio->bi_next = NULL;
+ DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
+ " bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
+
+ submit_bio(rw, bio);
+ bio = nbio;
+ }
+
+ if (q->unplug_fn)
+ q->unplug_fn(q);
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
+{
+ struct iblock_dev *ibd = dev->dev_ptr;
+ struct block_device *bd = ibd->ibd_bd;
+ int barrier = 0;
+
+ return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
+}
+
+static void iblock_free_task(struct se_task *task)
+{
+ struct iblock_req *req = IBLOCK_REQ(task);
+ struct bio *bio, *hbio = req->ib_bio;
+ /*
+ * We only release the bio(s) here if iblock_bio_done() has not called
+ * bio_put() -> iblock_bio_destructor().
+ */
+ while (hbio != NULL) {
+ bio = hbio;
+ hbio = hbio->bi_next;
+ bio->bi_next = NULL;
+ bio_put(bio);
+ }
+
+ kfree(req);
+}
+
+enum {
+ Opt_udev_path, Opt_force, Opt_err
+};
+
+static match_table_t tokens = {
+ {Opt_udev_path, "udev_path=%s"},
+ {Opt_force, "force=%d"},
+ {Opt_err, NULL}
+};
+
+static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ const char *page, ssize_t count)
+{
+ struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
+ char *orig, *ptr, *opts;
+ substring_t args[MAX_OPT_ARGS];
+ int ret = 0, arg, token;
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ orig = opts;
+
+ while ((ptr = strsep(&opts, ",")) != NULL) {
+ if (!*ptr)
+ continue;
+
+ token = match_token(ptr, tokens, args);
+ switch (token) {
+ case Opt_udev_path:
+ if (ib_dev->ibd_bd) {
+ printk(KERN_ERR "Unable to set udev_path= while"
+ " ib_dev->ibd_bd exists\n");
+ ret = -EEXIST;
+ goto out;
+ }
+
+ ret = snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
+ "%s", match_strdup(&args[0]));
+ printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
+ ib_dev->ibd_udev_path);
+ ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
+ break;
+ case Opt_force:
+ match_int(args, &arg);
+ ib_dev->ibd_force = arg;
+ printk(KERN_INFO "IBLOCK: Set force=%d\n",
+ ib_dev->ibd_force);
+ break;
+ default:
+ break;
+ }
+ }
+
+out:
+ kfree(orig);
+ return (!ret) ? count : ret;
+}
+
+static ssize_t iblock_check_configfs_dev_params(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev)
+{
+ struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
+
+ if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
+ printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static ssize_t iblock_show_configfs_dev_params(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ char *b)
+{
+ struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
+ struct block_device *bd = ibd->ibd_bd;
+ char buf[BDEVNAME_SIZE];
+ ssize_t bl = 0;
+
+ if (bd)
+ bl += sprintf(b + bl, "iBlock device: %s",
+ bdevname(bd, buf));
+ if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
+ bl += sprintf(b + bl, " UDEV PATH: %s\n",
+ ibd->ibd_udev_path);
+ } else
+ bl += sprintf(b + bl, "\n");
+
+ bl += sprintf(b + bl, " ");
+ if (bd) {
+ bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
+ ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ?
+ "" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
+ "CLAIMED: IBLOCK" : "CLAIMED: OS");
+ } else {
+ bl += sprintf(b + bl, "Major: %d Minor: %d\n",
+ ibd->ibd_major, ibd->ibd_minor);
+ }
+
+ return bl;
+}
+
+static void iblock_bio_destructor(struct bio *bio)
+{
+ struct se_task *task = bio->bi_private;
+ struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
+
+ bio_free(bio, ib_dev->ibd_bio_set);
+}
+
+static struct bio *iblock_get_bio(
+ struct se_task *task,
+ struct iblock_req *ib_req,
+ struct iblock_dev *ib_dev,
+ int *ret,
+ sector_t lba,
+ u32 sg_num)
+{
+ struct bio *bio;
+
+ bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
+ if (!(bio)) {
+ printk(KERN_ERR "Unable to allocate memory for bio\n");
+ *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ return NULL;
+ }
+
+ DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
+ " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
+ DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
+
+ bio->bi_bdev = ib_dev->ibd_bd;
+ bio->bi_private = (void *) task;
+ bio->bi_destructor = iblock_bio_destructor;
+ bio->bi_end_io = &iblock_bio_done;
+ bio->bi_sector = lba;
+ atomic_inc(&ib_req->ib_bio_cnt);
+
+ DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
+ DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
+ atomic_read(&ib_req->ib_bio_cnt));
+ return bio;
+}
+
+static int iblock_map_task_SG(struct se_task *task)
+{
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = SE_DEV(cmd);
+ struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
+ struct iblock_req *ib_req = IBLOCK_REQ(task);
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
+ struct scatterlist *sg;
+ int ret = 0;
+ u32 i, sg_num = task->task_sg_num;
+ sector_t block_lba;
+ /*
+ * Do starting conversion up from non 512-byte blocksize with
+ * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
+ */
+ if (DEV_ATTRIB(dev)->block_size == 4096)
+ block_lba = (task->task_lba << 3);
+ else if (DEV_ATTRIB(dev)->block_size == 2048)
+ block_lba = (task->task_lba << 2);
+ else if (DEV_ATTRIB(dev)->block_size == 1024)
+ block_lba = (task->task_lba << 1);
+ else if (DEV_ATTRIB(dev)->block_size == 512)
+ block_lba = task->task_lba;
+ else {
+ printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
+ " %u\n", DEV_ATTRIB(dev)->block_size);
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+
+ bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
+ if (!(bio))
+ return ret;
+
+ ib_req->ib_bio = bio;
+ hbio = tbio = bio;
+ /*
+ * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
+ * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
+ */
+ for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
+ DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
+ " %p len: %u offset: %u\n", task, bio, sg_page(sg),
+ sg->length, sg->offset);
+again:
+ ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
+ if (ret != sg->length) {
+
+ DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
+ bio->bi_sector);
+ DEBUG_IBLOCK("** task->task_size: %u\n",
+ task->task_size);
+ DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
+ bio->bi_max_vecs);
+ DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
+ bio->bi_vcnt);
+
+ bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
+ block_lba, sg_num);
+ if (!(bio))
+ goto fail;
+
+ tbio = tbio->bi_next = bio;
+ DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
+ " list, Going to again\n", bio);
+ goto again;
+ }
+ /* Always in 512 byte units for Linux/Block */
+ block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+ sg_num--;
+ DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
+ " sg_num to %u\n", task, sg_num);
+ DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
+ " to %llu\n", task, block_lba);
+ DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
+ " %u\n", task, bio->bi_vcnt);
+ }
+
+ return 0;
+fail:
+ while (hbio) {
+ bio = hbio;
+ hbio = hbio->bi_next;
+ bio->bi_next = NULL;
+ bio_put(bio);
+ }
+ return ret;
+}
+
+static unsigned char *iblock_get_cdb(struct se_task *task)
+{
+ return IBLOCK_REQ(task)->ib_scsi_cdb;
+}
+
+static u32 iblock_get_device_rev(struct se_device *dev)
+{
+ return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+static u32 iblock_get_device_type(struct se_device *dev)
+{
+ return TYPE_DISK;
+}
+
+static sector_t iblock_get_blocks(struct se_device *dev)
+{
+ struct iblock_dev *ibd = dev->dev_ptr;
+ struct block_device *bd = ibd->ibd_bd;
+ struct request_queue *q = bdev_get_queue(bd);
+
+ return iblock_emulate_read_cap_with_block_size(dev, bd, q);
+}
+
+static void iblock_bio_done(struct bio *bio, int err)
+{
+ struct se_task *task = bio->bi_private;
+ struct iblock_req *ibr = IBLOCK_REQ(task);
+ /*
+ * Set -EIO if !BIO_UPTODATE and the passed is still err=0
+ */
+ if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
+ err = -EIO;
+
+ if (err != 0) {
+ printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
+ " err: %d\n", bio, err);
+ /*
+ * Bump the ib_bio_err_cnt and release bio.
+ */
+ atomic_inc(&ibr->ib_bio_err_cnt);
+ smp_mb__after_atomic_inc();
+ bio_put(bio);
+ /*
+ * Wait to complete the task until the last bio as completed.
+ */
+ if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+ return;
+
+ ibr->ib_bio = NULL;
+ transport_complete_task(task, 0);
+ return;
+ }
+ DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
+ task, bio, task->task_lba, bio->bi_sector, err);
+ /*
+ * bio_put() will call iblock_bio_destructor() to release the bio back
+ * to ibr->ib_bio_set.
+ */
+ bio_put(bio);
+ /*
+ * Wait to complete the task until the last bio as completed.
+ */
+ if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+ return;
+ /*
+ * Return GOOD status for task if zero ib_bio_err_cnt exists.
+ */
+ ibr->ib_bio = NULL;
+ transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt)));
+}
+
+static struct se_subsystem_api iblock_template = {
+ .name = "iblock",
+ .owner = THIS_MODULE,
+ .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
+ .map_task_SG = iblock_map_task_SG,
+ .attach_hba = iblock_attach_hba,
+ .detach_hba = iblock_detach_hba,
+ .allocate_virtdevice = iblock_allocate_virtdevice,
+ .create_virtdevice = iblock_create_virtdevice,
+ .free_device = iblock_free_device,
+ .dpo_emulated = iblock_emulated_dpo,
+ .fua_write_emulated = iblock_emulated_fua_write,
+ .fua_read_emulated = iblock_emulated_fua_read,
+ .write_cache_emulated = iblock_emulated_write_cache,
+ .alloc_task = iblock_alloc_task,
+ .do_task = iblock_do_task,
+ .do_discard = iblock_do_discard,
+ .do_sync_cache = iblock_emulate_sync_cache,
+ .free_task = iblock_free_task,
+ .check_configfs_dev_params = iblock_check_configfs_dev_params,
+ .set_configfs_dev_params = iblock_set_configfs_dev_params,
+ .show_configfs_dev_params = iblock_show_configfs_dev_params,
+ .get_cdb = iblock_get_cdb,
+ .get_device_rev = iblock_get_device_rev,
+ .get_device_type = iblock_get_device_type,
+ .get_blocks = iblock_get_blocks,
+};
+
+static int __init iblock_module_init(void)
+{
+ return transport_subsystem_register(&iblock_template);
+}
+
+static void iblock_module_exit(void)
+{
+ transport_subsystem_release(&iblock_template);
+}
+
+MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(iblock_module_init);
+module_exit(iblock_module_exit);
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
new file mode 100644
index 000000000000..64c1f4d69f76
--- /dev/null
+++ b/drivers/target/target_core_iblock.h
@@ -0,0 +1,40 @@
+#ifndef TARGET_CORE_IBLOCK_H
+#define TARGET_CORE_IBLOCK_H
+
+#define IBLOCK_VERSION "4.0"
+
+#define IBLOCK_HBA_QUEUE_DEPTH 512
+#define IBLOCK_DEVICE_QUEUE_DEPTH 32
+#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128
+#define IBLOCK_MAX_CDBS 16
+#define IBLOCK_LBA_SHIFT 9
+
+struct iblock_req {
+ struct se_task ib_task;
+ unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE];
+ atomic_t ib_bio_cnt;
+ atomic_t ib_bio_err_cnt;
+ struct bio *ib_bio;
+ struct iblock_dev *ib_dev;
+} ____cacheline_aligned;
+
+#define IBDF_HAS_UDEV_PATH 0x01
+#define IBDF_HAS_FORCE 0x02
+
+struct iblock_dev {
+ unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
+ int ibd_force;
+ int ibd_major;
+ int ibd_minor;
+ u32 ibd_depth;
+ u32 ibd_flags;
+ struct bio_set *ibd_bio_set;
+ struct block_device *ibd_bd;
+ struct iblock_hba *ibd_host;
+} ____cacheline_aligned;
+
+struct iblock_hba {
+ int iblock_host_id;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_IBLOCK_H */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
new file mode 100644
index 000000000000..2521f75362c3
--- /dev/null
+++ b/drivers/target/target_core_pr.c
@@ -0,0 +1,4252 @@
+/*******************************************************************************
+ * Filename: target_core_pr.c
+ *
+ * This file contains SPC-3 compliant persistent reservations and
+ * legacy SPC-2 reservations with compatible reservation handling (CRH=1)
+ *
+ * Copyright (c) 2009, 2010 Rising Tide Systems
+ * Copyright (c) 2009, 2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+/*
+ * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT)
+ */
+struct pr_transport_id_holder {
+ int dest_local_nexus;
+ struct t10_pr_registration *dest_pr_reg;
+ struct se_portal_group *dest_tpg;
+ struct se_node_acl *dest_node_acl;
+ struct se_dev_entry *dest_se_deve;
+ struct list_head dest_list;
+};
+
+int core_pr_dump_initiator_port(
+ struct t10_pr_registration *pr_reg,
+ char *buf,
+ u32 size)
+{
+ if (!(pr_reg->isid_present_at_reg))
+ return 0;
+
+ snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]);
+ return 1;
+}
+
+static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
+ struct t10_pr_registration *, int);
+
+static int core_scsi2_reservation_seq_non_holder(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ u32 pr_reg_type)
+{
+ switch (cdb[0]) {
+ case INQUIRY:
+ case RELEASE:
+ case RELEASE_10:
+ return 0;
+ default:
+ return 1;
+ }
+
+ return 1;
+}
+
+static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ int ret;
+
+ if (!(sess))
+ return 0;
+
+ spin_lock(&dev->dev_reservation_lock);
+ if (!dev->dev_reserved_node_acl || !sess) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return 0;
+ }
+ if (dev->dev_reserved_node_acl != sess->se_node_acl) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return -1;
+ }
+ if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return 0;
+ }
+ ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -1;
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return ret;
+}
+
+static int core_scsi2_reservation_release(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ struct se_portal_group *tpg = sess->se_tpg;
+
+ if (!(sess) || !(tpg))
+ return 0;
+
+ spin_lock(&dev->dev_reservation_lock);
+ if (!dev->dev_reserved_node_acl || !sess) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return 0;
+ }
+
+ if (dev->dev_reserved_node_acl != sess->se_node_acl) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return 0;
+ }
+ dev->dev_reserved_node_acl = NULL;
+ dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+ if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
+ dev->dev_res_bin_isid = 0;
+ dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
+ }
+ printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->"
+ " MAPPED LUN: %u for %s\n", TPG_TFO(tpg)->get_fabric_name(),
+ SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
+ sess->se_node_acl->initiatorname);
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return 0;
+}
+
+static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ struct se_portal_group *tpg = sess->se_tpg;
+
+ if ((T_TASK(cmd)->t_task_cdb[1] & 0x01) &&
+ (T_TASK(cmd)->t_task_cdb[1] & 0x02)) {
+ printk(KERN_ERR "LongIO and Obselete Bits set, returning"
+ " ILLEGAL_REQUEST\n");
+ return PYX_TRANSPORT_ILLEGAL_REQUEST;
+ }
+ /*
+ * This is currently the case for target_core_mod passthrough struct se_cmd
+ * ops
+ */
+ if (!(sess) || !(tpg))
+ return 0;
+
+ spin_lock(&dev->dev_reservation_lock);
+ if (dev->dev_reserved_node_acl &&
+ (dev->dev_reserved_node_acl != sess->se_node_acl)) {
+ printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
+ TPG_TFO(tpg)->get_fabric_name());
+ printk(KERN_ERR "Original reserver LUN: %u %s\n",
+ SE_LUN(cmd)->unpacked_lun,
+ dev->dev_reserved_node_acl->initiatorname);
+ printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u"
+ " from %s \n", SE_LUN(cmd)->unpacked_lun,
+ cmd->se_deve->mapped_lun,
+ sess->se_node_acl->initiatorname);
+ spin_unlock(&dev->dev_reservation_lock);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+
+ dev->dev_reserved_node_acl = sess->se_node_acl;
+ dev->dev_flags |= DF_SPC2_RESERVATIONS;
+ if (sess->sess_bin_isid != 0) {
+ dev->dev_res_bin_isid = sess->sess_bin_isid;
+ dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
+ }
+ printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
+ " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
+ SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
+ sess->se_node_acl->initiatorname);
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return 0;
+}
+
+static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
+ struct se_node_acl *, struct se_session *);
+static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
+
+/*
+ * Setup in target_core_transport.c:transport_generic_cmd_sequencer()
+ * and called via struct se_cmd->transport_emulate_cdb() in TCM processing
+ * thread context.
+ */
+int core_scsi2_emulate_crh(struct se_cmd *cmd)
+{
+ struct se_session *se_sess = cmd->se_sess;
+ struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+ struct t10_pr_registration *pr_reg;
+ struct t10_reservation_template *pr_tmpl = &su_dev->t10_reservation;
+ unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
+ int crh = (T10_RES(su_dev)->res_type == SPC3_PERSISTENT_RESERVATIONS);
+ int conflict = 0;
+
+ if (!(se_sess))
+ return 0;
+
+ if (!(crh))
+ goto after_crh;
+
+ pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
+ se_sess);
+ if (pr_reg) {
+ /*
+ * From spc4r17 5.7.3 Exceptions to SPC-2 RESERVE and RELEASE
+ * behavior
+ *
+ * A RESERVE(6) or RESERVE(10) command shall complete with GOOD
+ * status, but no reservation shall be established and the
+ * persistent reservation shall not be changed, if the command
+ * is received from a) and b) below.
+ *
+ * A RELEASE(6) or RELEASE(10) command shall complete with GOOD
+ * status, but the persistent reservation shall not be released,
+ * if the command is received from a) and b)
+ *
+ * a) An I_T nexus that is a persistent reservation holder; or
+ * b) An I_T nexus that is registered if a registrants only or
+ * all registrants type persistent reservation is present.
+ *
+ * In all other cases, a RESERVE(6) command, RESERVE(10) command,
+ * RELEASE(6) command, or RELEASE(10) command shall be processed
+ * as defined in SPC-2.
+ */
+ if (pr_reg->pr_res_holder) {
+ core_scsi3_put_pr_reg(pr_reg);
+ return 0;
+ }
+ if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
+ (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) ||
+ (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+ (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+ core_scsi3_put_pr_reg(pr_reg);
+ return 0;
+ }
+ core_scsi3_put_pr_reg(pr_reg);
+ conflict = 1;
+ } else {
+ /*
+ * Following spc2r20 5.5.1 Reservations overview:
+ *
+ * If a logical unit has executed a PERSISTENT RESERVE OUT
+ * command with the REGISTER or the REGISTER AND IGNORE
+ * EXISTING KEY service action and is still registered by any
+ * initiator, all RESERVE commands and all RELEASE commands
+ * regardless of initiator shall conflict and shall terminate
+ * with a RESERVATION CONFLICT status.
+ */
+ spin_lock(&pr_tmpl->registration_lock);
+ conflict = (list_empty(&pr_tmpl->registration_list)) ? 0 : 1;
+ spin_unlock(&pr_tmpl->registration_lock);
+ }
+
+ if (conflict) {
+ printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE"
+ " while active SPC-3 registrations exist,"
+ " returning RESERVATION_CONFLICT\n");
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+
+after_crh:
+ if ((cdb[0] == RESERVE) || (cdb[0] == RESERVE_10))
+ return core_scsi2_reservation_reserve(cmd);
+ else if ((cdb[0] == RELEASE) || (cdb[0] == RELEASE_10))
+ return core_scsi2_reservation_release(cmd);
+ else
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+}
+
+/*
+ * Begin SPC-3/SPC-4 Persistent Reservations emulation support
+ *
+ * This function is called by those initiator ports who are *NOT*
+ * the active PR reservation holder when a reservation is present.
+ */
+static int core_scsi3_pr_seq_non_holder(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ u32 pr_reg_type)
+{
+ struct se_dev_entry *se_deve;
+ struct se_session *se_sess = SE_SESS(cmd);
+ int other_cdb = 0, ignore_reg;
+ int registered_nexus = 0, ret = 1; /* Conflict by default */
+ int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */
+ int we = 0; /* Write Exclusive */
+ int legacy = 0; /* Act like a legacy device and return
+ * RESERVATION CONFLICT on some CDBs */
+ /*
+ * A legacy SPC-2 reservation is being held.
+ */
+ if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS)
+ return core_scsi2_reservation_seq_non_holder(cmd,
+ cdb, pr_reg_type);
+
+ se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+ /*
+ * Determine if the registration should be ignored due to
+ * non-matching ISIDs in core_scsi3_pr_reservation_check().
+ */
+ ignore_reg = (pr_reg_type & 0x80000000);
+ if (ignore_reg)
+ pr_reg_type &= ~0x80000000;
+
+ switch (pr_reg_type) {
+ case PR_TYPE_WRITE_EXCLUSIVE:
+ we = 1;
+ case PR_TYPE_EXCLUSIVE_ACCESS:
+ /*
+ * Some commands are only allowed for the persistent reservation
+ * holder.
+ */
+ if ((se_deve->def_pr_registered) && !(ignore_reg))
+ registered_nexus = 1;
+ break;
+ case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+ we = 1;
+ case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+ /*
+ * Some commands are only allowed for registered I_T Nexuses.
+ */
+ reg_only = 1;
+ if ((se_deve->def_pr_registered) && !(ignore_reg))
+ registered_nexus = 1;
+ break;
+ case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+ we = 1;
+ case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+ /*
+ * Each registered I_T Nexus is a reservation holder.
+ */
+ all_reg = 1;
+ if ((se_deve->def_pr_registered) && !(ignore_reg))
+ registered_nexus = 1;
+ break;
+ default:
+ return -1;
+ }
+ /*
+ * Referenced from spc4r17 table 45 for *NON* PR holder access
+ */
+ switch (cdb[0]) {
+ case SECURITY_PROTOCOL_IN:
+ if (registered_nexus)
+ return 0;
+ ret = (we) ? 0 : 1;
+ break;
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ case READ_ATTRIBUTE:
+ case READ_BUFFER:
+ case RECEIVE_DIAGNOSTIC:
+ if (legacy) {
+ ret = 1;
+ break;
+ }
+ if (registered_nexus) {
+ ret = 0;
+ break;
+ }
+ ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+ break;
+ case PERSISTENT_RESERVE_OUT:
+ /*
+ * This follows PERSISTENT_RESERVE_OUT service actions that
+ * are allowed in the presence of various reservations.
+ * See spc4r17, table 46
+ */
+ switch (cdb[1] & 0x1f) {
+ case PRO_CLEAR:
+ case PRO_PREEMPT:
+ case PRO_PREEMPT_AND_ABORT:
+ ret = (registered_nexus) ? 0 : 1;
+ break;
+ case PRO_REGISTER:
+ case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+ ret = 0;
+ break;
+ case PRO_REGISTER_AND_MOVE:
+ case PRO_RESERVE:
+ ret = 1;
+ break;
+ case PRO_RELEASE:
+ ret = (registered_nexus) ? 0 : 1;
+ break;
+ default:
+ printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+ " action: 0x%02x\n", cdb[1] & 0x1f);
+ return -1;
+ }
+ break;
+ case RELEASE:
+ case RELEASE_10:
+ /* Handled by CRH=1 in core_scsi2_emulate_crh() */
+ ret = 0;
+ break;
+ case RESERVE:
+ case RESERVE_10:
+ /* Handled by CRH=1 in core_scsi2_emulate_crh() */
+ ret = 0;
+ break;
+ case TEST_UNIT_READY:
+ ret = (legacy) ? 1 : 0; /* Conflict for legacy */
+ break;
+ case MAINTENANCE_IN:
+ switch (cdb[1] & 0x1f) {
+ case MI_MANAGEMENT_PROTOCOL_IN:
+ if (registered_nexus) {
+ ret = 0;
+ break;
+ }
+ ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+ break;
+ case MI_REPORT_SUPPORTED_OPERATION_CODES:
+ case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS:
+ if (legacy) {
+ ret = 1;
+ break;
+ }
+ if (registered_nexus) {
+ ret = 0;
+ break;
+ }
+ ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+ break;
+ case MI_REPORT_ALIASES:
+ case MI_REPORT_IDENTIFYING_INFORMATION:
+ case MI_REPORT_PRIORITY:
+ case MI_REPORT_TARGET_PGS:
+ case MI_REPORT_TIMESTAMP:
+ ret = 0; /* Allowed */
+ break;
+ default:
+ printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n",
+ (cdb[1] & 0x1f));
+ return -1;
+ }
+ break;
+ case ACCESS_CONTROL_IN:
+ case ACCESS_CONTROL_OUT:
+ case INQUIRY:
+ case LOG_SENSE:
+ case READ_MEDIA_SERIAL_NUMBER:
+ case REPORT_LUNS:
+ case REQUEST_SENSE:
+ ret = 0; /*/ Allowed CDBs */
+ break;
+ default:
+ other_cdb = 1;
+ break;
+ }
+ /*
+ * Case where the CDB is explictly allowed in the above switch
+ * statement.
+ */
+ if (!(ret) && !(other_cdb)) {
+#if 0
+ printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s"
+ " reservation holder\n", cdb[0],
+ core_scsi3_pr_dump_type(pr_reg_type));
+#endif
+ return ret;
+ }
+ /*
+ * Check if write exclusive initiator ports *NOT* holding the
+ * WRITE_EXCLUSIVE_* reservation.
+ */
+ if ((we) && !(registered_nexus)) {
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ /*
+ * Conflict for write exclusive
+ */
+ printk(KERN_INFO "%s Conflict for unregistered nexus"
+ " %s CDB: 0x%02x to %s reservation\n",
+ transport_dump_cmd_direction(cmd),
+ se_sess->se_node_acl->initiatorname, cdb[0],
+ core_scsi3_pr_dump_type(pr_reg_type));
+ return 1;
+ } else {
+ /*
+ * Allow non WRITE CDBs for all Write Exclusive
+ * PR TYPEs to pass for registered and
+ * non-registered_nexuxes NOT holding the reservation.
+ *
+ * We only make noise for the unregisterd nexuses,
+ * as we expect registered non-reservation holding
+ * nexuses to issue CDBs.
+ */
+#if 0
+ if (!(registered_nexus)) {
+ printk(KERN_INFO "Allowing implict CDB: 0x%02x"
+ " for %s reservation on unregistered"
+ " nexus\n", cdb[0],
+ core_scsi3_pr_dump_type(pr_reg_type));
+ }
+#endif
+ return 0;
+ }
+ } else if ((reg_only) || (all_reg)) {
+ if (registered_nexus) {
+ /*
+ * For PR_*_REG_ONLY and PR_*_ALL_REG reservations,
+ * allow commands from registered nexuses.
+ */
+#if 0
+ printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s"
+ " reservation\n", cdb[0],
+ core_scsi3_pr_dump_type(pr_reg_type));
+#endif
+ return 0;
+ }
+ }
+ printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x"
+ " for %s reservation\n", transport_dump_cmd_direction(cmd),
+ (registered_nexus) ? "" : "un",
+ se_sess->se_node_acl->initiatorname, cdb[0],
+ core_scsi3_pr_dump_type(pr_reg_type));
+
+ return 1; /* Conflict by default */
+}
+
+static u32 core_scsi3_pr_generation(struct se_device *dev)
+{
+ struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ u32 prg;
+ /*
+ * PRGeneration field shall contain the value of a 32-bit wrapping
+ * counter mainted by the device server.
+ *
+ * Note that this is done regardless of Active Persist across
+ * Target PowerLoss (APTPL)
+ *
+ * See spc4r17 section 6.3.12 READ_KEYS service action
+ */
+ spin_lock(&dev->dev_reservation_lock);
+ prg = T10_RES(su_dev)->pr_generation++;
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return prg;
+}
+
+static int core_scsi3_pr_reservation_check(
+ struct se_cmd *cmd,
+ u32 *pr_reg_type)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ int ret;
+
+ if (!(sess))
+ return 0;
+ /*
+ * A legacy SPC-2 reservation is being held.
+ */
+ if (dev->dev_flags & DF_SPC2_RESERVATIONS)
+ return core_scsi2_reservation_check(cmd, pr_reg_type);
+
+ spin_lock(&dev->dev_reservation_lock);
+ if (!(dev->dev_pr_res_holder)) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return 0;
+ }
+ *pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
+ cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
+ if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return -1;
+ }
+ if (!(dev->dev_pr_res_holder->isid_present_at_reg)) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return 0;
+ }
+ ret = (dev->dev_pr_res_holder->pr_reg_bin_isid ==
+ sess->sess_bin_isid) ? 0 : -1;
+ /*
+ * Use bit in *pr_reg_type to notify ISID mismatch in
+ * core_scsi3_pr_seq_non_holder().
+ */
+ if (ret != 0)
+ *pr_reg_type |= 0x80000000;
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return ret;
+}
+
+static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
+ struct se_device *dev,
+ struct se_node_acl *nacl,
+ struct se_dev_entry *deve,
+ unsigned char *isid,
+ u64 sa_res_key,
+ int all_tg_pt,
+ int aptpl)
+{
+ struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct t10_pr_registration *pr_reg;
+
+ pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
+ if (!(pr_reg)) {
+ printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
+ return NULL;
+ }
+
+ pr_reg->pr_aptpl_buf = kzalloc(T10_RES(su_dev)->pr_aptpl_buf_len,
+ GFP_ATOMIC);
+ if (!(pr_reg->pr_aptpl_buf)) {
+ printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n");
+ kmem_cache_free(t10_pr_reg_cache, pr_reg);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&pr_reg->pr_reg_list);
+ INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
+ INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
+ INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
+ INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
+ atomic_set(&pr_reg->pr_res_holders, 0);
+ pr_reg->pr_reg_nacl = nacl;
+ pr_reg->pr_reg_deve = deve;
+ pr_reg->pr_res_mapped_lun = deve->mapped_lun;
+ pr_reg->pr_aptpl_target_lun = deve->se_lun->unpacked_lun;
+ pr_reg->pr_res_key = sa_res_key;
+ pr_reg->pr_reg_all_tg_pt = all_tg_pt;
+ pr_reg->pr_reg_aptpl = aptpl;
+ pr_reg->pr_reg_tg_pt_lun = deve->se_lun;
+ /*
+ * If an ISID value for this SCSI Initiator Port exists,
+ * save it to the registration now.
+ */
+ if (isid != NULL) {
+ pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
+ snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
+ pr_reg->isid_present_at_reg = 1;
+ }
+
+ return pr_reg;
+}
+
+static int core_scsi3_lunacl_depend_item(struct se_dev_entry *);
+static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *);
+
+/*
+ * Function used for handling PR registrations for ALL_TG_PT=1 and ALL_TG_PT=0
+ * modes.
+ */
+static struct t10_pr_registration *__core_scsi3_alloc_registration(
+ struct se_device *dev,
+ struct se_node_acl *nacl,
+ struct se_dev_entry *deve,
+ unsigned char *isid,
+ u64 sa_res_key,
+ int all_tg_pt,
+ int aptpl)
+{
+ struct se_dev_entry *deve_tmp;
+ struct se_node_acl *nacl_tmp;
+ struct se_port *port, *port_tmp;
+ struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+ struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
+ int ret;
+ /*
+ * Create a registration for the I_T Nexus upon which the
+ * PROUT REGISTER was received.
+ */
+ pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
+ sa_res_key, all_tg_pt, aptpl);
+ if (!(pr_reg))
+ return NULL;
+ /*
+ * Return pointer to pr_reg for ALL_TG_PT=0
+ */
+ if (!(all_tg_pt))
+ return pr_reg;
+ /*
+ * Create list of matching SCSI Initiator Port registrations
+ * for ALL_TG_PT=1
+ */
+ spin_lock(&dev->se_port_lock);
+ list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
+ atomic_inc(&port->sep_tg_pt_ref_cnt);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&dev->se_port_lock);
+
+ spin_lock_bh(&port->sep_alua_lock);
+ list_for_each_entry(deve_tmp, &port->sep_alua_list,
+ alua_port_list) {
+ /*
+ * This pointer will be NULL for demo mode MappedLUNs
+ * that have not been make explict via a ConfigFS
+ * MappedLUN group for the SCSI Initiator Node ACL.
+ */
+ if (!(deve_tmp->se_lun_acl))
+ continue;
+
+ nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
+ /*
+ * Skip the matching struct se_node_acl that is allocated
+ * above..
+ */
+ if (nacl == nacl_tmp)
+ continue;
+ /*
+ * Only perform PR registrations for target ports on
+ * the same fabric module as the REGISTER w/ ALL_TG_PT=1
+ * arrived.
+ */
+ if (tfo != nacl_tmp->se_tpg->se_tpg_tfo)
+ continue;
+ /*
+ * Look for a matching Initiator Node ACL in ASCII format
+ */
+ if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
+ continue;
+
+ atomic_inc(&deve_tmp->pr_ref_count);
+ smp_mb__after_atomic_inc();
+ spin_unlock_bh(&port->sep_alua_lock);
+ /*
+ * Grab a configfs group dependency that is released
+ * for the exception path at label out: below, or upon
+ * completion of adding ALL_TG_PT=1 registrations in
+ * __core_scsi3_add_registration()
+ */
+ ret = core_scsi3_lunacl_depend_item(deve_tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "core_scsi3_lunacl_depend"
+ "_item() failed\n");
+ atomic_dec(&port->sep_tg_pt_ref_cnt);
+ smp_mb__after_atomic_dec();
+ atomic_dec(&deve_tmp->pr_ref_count);
+ smp_mb__after_atomic_dec();
+ goto out;
+ }
+ /*
+ * Located a matching SCSI Initiator Port on a different
+ * port, allocate the pr_reg_atp and attach it to the
+ * pr_reg->pr_reg_atp_list that will be processed once
+ * the original *pr_reg is processed in
+ * __core_scsi3_add_registration()
+ */
+ pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
+ nacl_tmp, deve_tmp, NULL,
+ sa_res_key, all_tg_pt, aptpl);
+ if (!(pr_reg_atp)) {
+ atomic_dec(&port->sep_tg_pt_ref_cnt);
+ smp_mb__after_atomic_dec();
+ atomic_dec(&deve_tmp->pr_ref_count);
+ smp_mb__after_atomic_dec();
+ core_scsi3_lunacl_undepend_item(deve_tmp);
+ goto out;
+ }
+
+ list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list,
+ &pr_reg->pr_reg_atp_list);
+ spin_lock_bh(&port->sep_alua_lock);
+ }
+ spin_unlock_bh(&port->sep_alua_lock);
+
+ spin_lock(&dev->se_port_lock);
+ atomic_dec(&port->sep_tg_pt_ref_cnt);
+ smp_mb__after_atomic_dec();
+ }
+ spin_unlock(&dev->se_port_lock);
+
+ return pr_reg;
+out:
+ list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+ &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+ list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+ core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+ kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
+ }
+ kmem_cache_free(t10_pr_reg_cache, pr_reg);
+ return NULL;
+}
+
+int core_scsi3_alloc_aptpl_registration(
+ struct t10_reservation_template *pr_tmpl,
+ u64 sa_res_key,
+ unsigned char *i_port,
+ unsigned char *isid,
+ u32 mapped_lun,
+ unsigned char *t_port,
+ u16 tpgt,
+ u32 target_lun,
+ int res_holder,
+ int all_tg_pt,
+ u8 type)
+{
+ struct t10_pr_registration *pr_reg;
+
+ if (!(i_port) || !(t_port) || !(sa_res_key)) {
+ printk(KERN_ERR "Illegal parameters for APTPL registration\n");
+ return -1;
+ }
+
+ pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL);
+ if (!(pr_reg)) {
+ printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
+ return -1;
+ }
+ pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL);
+
+ INIT_LIST_HEAD(&pr_reg->pr_reg_list);
+ INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
+ INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
+ INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
+ INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
+ atomic_set(&pr_reg->pr_res_holders, 0);
+ pr_reg->pr_reg_nacl = NULL;
+ pr_reg->pr_reg_deve = NULL;
+ pr_reg->pr_res_mapped_lun = mapped_lun;
+ pr_reg->pr_aptpl_target_lun = target_lun;
+ pr_reg->pr_res_key = sa_res_key;
+ pr_reg->pr_reg_all_tg_pt = all_tg_pt;
+ pr_reg->pr_reg_aptpl = 1;
+ pr_reg->pr_reg_tg_pt_lun = NULL;
+ pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */
+ pr_reg->pr_res_type = type;
+ /*
+ * If an ISID value had been saved in APTPL metadata for this
+ * SCSI Initiator Port, restore it now.
+ */
+ if (isid != NULL) {
+ pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
+ snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
+ pr_reg->isid_present_at_reg = 1;
+ }
+ /*
+ * Copy the i_port and t_port information from caller.
+ */
+ snprintf(pr_reg->pr_iport, PR_APTPL_MAX_IPORT_LEN, "%s", i_port);
+ snprintf(pr_reg->pr_tport, PR_APTPL_MAX_TPORT_LEN, "%s", t_port);
+ pr_reg->pr_reg_tpgt = tpgt;
+ /*
+ * Set pr_res_holder from caller, the pr_reg who is the reservation
+ * holder will get it's pointer set in core_scsi3_aptpl_reserve() once
+ * the Initiator Node LUN ACL from the fabric module is created for
+ * this registration.
+ */
+ pr_reg->pr_res_holder = res_holder;
+
+ list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list);
+ printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from"
+ " metadata\n", (res_holder) ? "+reservation" : "");
+ return 0;
+}
+
+static void core_scsi3_aptpl_reserve(
+ struct se_device *dev,
+ struct se_portal_group *tpg,
+ struct se_node_acl *node_acl,
+ struct t10_pr_registration *pr_reg)
+{
+ char i_buf[PR_REG_ISID_ID_LEN];
+ int prf_isid;
+
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+
+ spin_lock(&dev->dev_reservation_lock);
+ dev->dev_pr_res_holder = pr_reg;
+ spin_unlock(&dev->dev_reservation_lock);
+
+ printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created"
+ " new reservation holder TYPE: %s ALL_TG_PT: %d\n",
+ TPG_TFO(tpg)->get_fabric_name(),
+ core_scsi3_pr_dump_type(pr_reg->pr_res_type),
+ (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+ printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
+ TPG_TFO(tpg)->get_fabric_name(), node_acl->initiatorname,
+ (prf_isid) ? &i_buf[0] : "");
+}
+
+static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *,
+ struct t10_pr_registration *, int, int);
+
+static int __core_scsi3_check_aptpl_registration(
+ struct se_device *dev,
+ struct se_portal_group *tpg,
+ struct se_lun *lun,
+ u32 target_lun,
+ struct se_node_acl *nacl,
+ struct se_dev_entry *deve)
+{
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
+ unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
+ u16 tpgt;
+
+ memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
+ memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
+ /*
+ * Copy Initiator Port information from struct se_node_acl
+ */
+ snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname);
+ snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s",
+ TPG_TFO(tpg)->tpg_get_wwn(tpg));
+ tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
+ /*
+ * Look for the matching registrations+reservation from those
+ * created from APTPL metadata. Note that multiple registrations
+ * may exist for fabrics that use ISIDs in their SCSI Initiator Port
+ * TransportIDs.
+ */
+ spin_lock(&pr_tmpl->aptpl_reg_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
+ pr_reg_aptpl_list) {
+ if (!(strcmp(pr_reg->pr_iport, i_port)) &&
+ (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
+ !(strcmp(pr_reg->pr_tport, t_port)) &&
+ (pr_reg->pr_reg_tpgt == tpgt) &&
+ (pr_reg->pr_aptpl_target_lun == target_lun)) {
+
+ pr_reg->pr_reg_nacl = nacl;
+ pr_reg->pr_reg_deve = deve;
+ pr_reg->pr_reg_tg_pt_lun = lun;
+
+ list_del(&pr_reg->pr_reg_aptpl_list);
+ spin_unlock(&pr_tmpl->aptpl_reg_lock);
+ /*
+ * At this point all of the pointers in *pr_reg will
+ * be setup, so go ahead and add the registration.
+ */
+
+ __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
+ /*
+ * If this registration is the reservation holder,
+ * make that happen now..
+ */
+ if (pr_reg->pr_res_holder)
+ core_scsi3_aptpl_reserve(dev, tpg,
+ nacl, pr_reg);
+ /*
+ * Reenable pr_aptpl_active to accept new metadata
+ * updates once the SCSI device is active again..
+ */
+ spin_lock(&pr_tmpl->aptpl_reg_lock);
+ pr_tmpl->pr_aptpl_active = 1;
+ }
+ }
+ spin_unlock(&pr_tmpl->aptpl_reg_lock);
+
+ return 0;
+}
+
+int core_scsi3_check_aptpl_registration(
+ struct se_device *dev,
+ struct se_portal_group *tpg,
+ struct se_lun *lun,
+ struct se_lun_acl *lun_acl)
+{
+ struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_node_acl *nacl = lun_acl->se_lun_nacl;
+ struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun];
+
+ if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ return 0;
+
+ return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
+ lun->unpacked_lun, nacl, deve);
+}
+
+static void __core_scsi3_dump_registration(
+ struct target_core_fabric_ops *tfo,
+ struct se_device *dev,
+ struct se_node_acl *nacl,
+ struct t10_pr_registration *pr_reg,
+ int register_type)
+{
+ struct se_portal_group *se_tpg = nacl->se_tpg;
+ char i_buf[PR_REG_ISID_ID_LEN];
+ int prf_isid;
+
+ memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN);
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+
+ printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
+ " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ?
+ "_AND_MOVE" : (register_type == 1) ?
+ "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
+ (prf_isid) ? i_buf : "");
+ printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
+ tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
+ tfo->tpg_get_tag(se_tpg));
+ printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+ " Port(s)\n", tfo->get_fabric_name(),
+ (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
+ TRANSPORT(dev)->name);
+ printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+ " 0x%08x APTPL: %d\n", tfo->get_fabric_name(),
+ pr_reg->pr_res_key, pr_reg->pr_res_generation,
+ pr_reg->pr_reg_aptpl);
+}
+
+/*
+ * this function can be called with struct se_device->dev_reservation_lock
+ * when register_move = 1
+ */
+static void __core_scsi3_add_registration(
+ struct se_device *dev,
+ struct se_node_acl *nacl,
+ struct t10_pr_registration *pr_reg,
+ int register_type,
+ int register_move)
+{
+ struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+ struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+
+ /*
+ * Increment PRgeneration counter for struct se_device upon a successful
+ * REGISTER, see spc4r17 section 6.3.2 READ_KEYS service action
+ *
+ * Also, when register_move = 1 for PROUT REGISTER_AND_MOVE service
+ * action, the struct se_device->dev_reservation_lock will already be held,
+ * so we do not call core_scsi3_pr_generation() which grabs the lock
+ * for the REGISTER.
+ */
+ pr_reg->pr_res_generation = (register_move) ?
+ T10_RES(su_dev)->pr_generation++ :
+ core_scsi3_pr_generation(dev);
+
+ spin_lock(&pr_tmpl->registration_lock);
+ list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list);
+ pr_reg->pr_reg_deve->def_pr_registered = 1;
+
+ __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
+ spin_unlock(&pr_tmpl->registration_lock);
+ /*
+ * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
+ */
+ if (!(pr_reg->pr_reg_all_tg_pt) || (register_move))
+ return;
+ /*
+ * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
+ * allocated in __core_scsi3_alloc_registration()
+ */
+ list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+ &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+ list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+
+ pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev);
+
+ spin_lock(&pr_tmpl->registration_lock);
+ list_add_tail(&pr_reg_tmp->pr_reg_list,
+ &pr_tmpl->registration_list);
+ pr_reg_tmp->pr_reg_deve->def_pr_registered = 1;
+
+ __core_scsi3_dump_registration(tfo, dev,
+ pr_reg_tmp->pr_reg_nacl, pr_reg_tmp,
+ register_type);
+ spin_unlock(&pr_tmpl->registration_lock);
+ /*
+ * Drop configfs group dependency reference from
+ * __core_scsi3_alloc_registration()
+ */
+ core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+ }
+}
+
+static int core_scsi3_alloc_registration(
+ struct se_device *dev,
+ struct se_node_acl *nacl,
+ struct se_dev_entry *deve,
+ unsigned char *isid,
+ u64 sa_res_key,
+ int all_tg_pt,
+ int aptpl,
+ int register_type,
+ int register_move)
+{
+ struct t10_pr_registration *pr_reg;
+
+ pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
+ sa_res_key, all_tg_pt, aptpl);
+ if (!(pr_reg))
+ return -1;
+
+ __core_scsi3_add_registration(dev, nacl, pr_reg,
+ register_type, register_move);
+ return 0;
+}
+
+static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
+ struct se_device *dev,
+ struct se_node_acl *nacl,
+ unsigned char *isid)
+{
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+ struct se_portal_group *tpg;
+
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ &pr_tmpl->registration_list, pr_reg_list) {
+ /*
+ * First look for a matching struct se_node_acl
+ */
+ if (pr_reg->pr_reg_nacl != nacl)
+ continue;
+
+ tpg = pr_reg->pr_reg_nacl->se_tpg;
+ /*
+ * If this registration does NOT contain a fabric provided
+ * ISID, then we have found a match.
+ */
+ if (!(pr_reg->isid_present_at_reg)) {
+ /*
+ * Determine if this SCSI device server requires that
+ * SCSI Intiatior TransportID w/ ISIDs is enforced
+ * for fabric modules (iSCSI) requiring them.
+ */
+ if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
+ if (DEV_ATTRIB(dev)->enforce_pr_isids)
+ continue;
+ }
+ atomic_inc(&pr_reg->pr_res_holders);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&pr_tmpl->registration_lock);
+ return pr_reg;
+ }
+ /*
+ * If the *pr_reg contains a fabric defined ISID for multi-value
+ * SCSI Initiator Port TransportIDs, then we expect a valid
+ * matching ISID to be provided by the local SCSI Initiator Port.
+ */
+ if (!(isid))
+ continue;
+ if (strcmp(isid, pr_reg->pr_reg_isid))
+ continue;
+
+ atomic_inc(&pr_reg->pr_res_holders);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&pr_tmpl->registration_lock);
+ return pr_reg;
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+
+ return NULL;
+}
+
+static struct t10_pr_registration *core_scsi3_locate_pr_reg(
+ struct se_device *dev,
+ struct se_node_acl *nacl,
+ struct se_session *sess)
+{
+ struct se_portal_group *tpg = nacl->se_tpg;
+ unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+
+ if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
+ memset(&buf[0], 0, PR_REG_ISID_LEN);
+ TPG_TFO(tpg)->sess_get_initiator_sid(sess, &buf[0],
+ PR_REG_ISID_LEN);
+ isid_ptr = &buf[0];
+ }
+
+ return __core_scsi3_locate_pr_reg(dev, nacl, isid_ptr);
+}
+
+static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
+{
+ atomic_dec(&pr_reg->pr_res_holders);
+ smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_check_implict_release(
+ struct se_device *dev,
+ struct t10_pr_registration *pr_reg)
+{
+ struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+ struct t10_pr_registration *pr_res_holder;
+ int ret = 0;
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_res_holder = dev->dev_pr_res_holder;
+ if (!(pr_res_holder)) {
+ spin_unlock(&dev->dev_reservation_lock);
+ return ret;
+ }
+ if (pr_res_holder == pr_reg) {
+ /*
+ * Perform an implict RELEASE if the registration that
+ * is being released is holding the reservation.
+ *
+ * From spc4r17, section 5.7.11.1:
+ *
+ * e) If the I_T nexus is the persistent reservation holder
+ * and the persistent reservation is not an all registrants
+ * type, then a PERSISTENT RESERVE OUT command with REGISTER
+ * service action or REGISTER AND IGNORE EXISTING KEY
+ * service action with the SERVICE ACTION RESERVATION KEY
+ * field set to zero (see 5.7.11.3).
+ */
+ __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0);
+ ret = 1;
+ /*
+ * For 'All Registrants' reservation types, all existing
+ * registrations are still processed as reservation holders
+ * in core_scsi3_pr_seq_non_holder() after the initial
+ * reservation holder is implictly released here.
+ */
+ } else if (pr_reg->pr_reg_all_tg_pt &&
+ (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
+ pr_reg->pr_reg_nacl->initiatorname)) &&
+ (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) {
+ printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1"
+ " UNREGISTER while existing reservation with matching"
+ " key 0x%016Lx is present from another SCSI Initiator"
+ " Port\n", pr_reg->pr_res_key);
+ ret = -1;
+ }
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return ret;
+}
+
+/*
+ * Called with struct t10_reservation_template->registration_lock held.
+ */
+static void __core_scsi3_free_registration(
+ struct se_device *dev,
+ struct t10_pr_registration *pr_reg,
+ struct list_head *preempt_and_abort_list,
+ int dec_holders)
+{
+ struct target_core_fabric_ops *tfo =
+ pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ char i_buf[PR_REG_ISID_ID_LEN];
+ int prf_isid;
+
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+
+ pr_reg->pr_reg_deve->def_pr_registered = 0;
+ pr_reg->pr_reg_deve->pr_res_key = 0;
+ list_del(&pr_reg->pr_reg_list);
+ /*
+ * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
+ * so call core_scsi3_put_pr_reg() to decrement our reference.
+ */
+ if (dec_holders)
+ core_scsi3_put_pr_reg(pr_reg);
+ /*
+ * Wait until all reference from any other I_T nexuses for this
+ * *pr_reg have been released. Because list_del() is called above,
+ * the last core_scsi3_put_pr_reg(pr_reg) will release this reference
+ * count back to zero, and we release *pr_reg.
+ */
+ while (atomic_read(&pr_reg->pr_res_holders) != 0) {
+ spin_unlock(&pr_tmpl->registration_lock);
+ printk("SPC-3 PR [%s] waiting for pr_res_holders\n",
+ tfo->get_fabric_name());
+ cpu_relax();
+ spin_lock(&pr_tmpl->registration_lock);
+ }
+
+ printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
+ " Node: %s%s\n", tfo->get_fabric_name(),
+ pr_reg->pr_reg_nacl->initiatorname,
+ (prf_isid) ? &i_buf[0] : "");
+ printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+ " Port(s)\n", tfo->get_fabric_name(),
+ (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
+ TRANSPORT(dev)->name);
+ printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+ " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
+ pr_reg->pr_res_generation);
+
+ if (!(preempt_and_abort_list)) {
+ pr_reg->pr_reg_deve = NULL;
+ pr_reg->pr_reg_nacl = NULL;
+ kfree(pr_reg->pr_aptpl_buf);
+ kmem_cache_free(t10_pr_reg_cache, pr_reg);
+ return;
+ }
+ /*
+ * For PREEMPT_AND_ABORT, the list of *pr_reg in preempt_and_abort_list
+ * are released once the ABORT_TASK_SET has completed..
+ */
+ list_add_tail(&pr_reg->pr_reg_abort_list, preempt_and_abort_list);
+}
+
+void core_scsi3_free_pr_reg_from_nacl(
+ struct se_device *dev,
+ struct se_node_acl *nacl)
+{
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+ /*
+ * If the passed se_node_acl matches the reservation holder,
+ * release the reservation.
+ */
+ spin_lock(&dev->dev_reservation_lock);
+ pr_res_holder = dev->dev_pr_res_holder;
+ if ((pr_res_holder != NULL) &&
+ (pr_res_holder->pr_reg_nacl == nacl))
+ __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0);
+ spin_unlock(&dev->dev_reservation_lock);
+ /*
+ * Release any registration associated with the struct se_node_acl.
+ */
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ &pr_tmpl->registration_list, pr_reg_list) {
+
+ if (pr_reg->pr_reg_nacl != nacl)
+ continue;
+
+ __core_scsi3_free_registration(dev, pr_reg, NULL, 0);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+}
+
+void core_scsi3_free_all_registrations(
+ struct se_device *dev)
+{
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_res_holder = dev->dev_pr_res_holder;
+ if (pr_res_holder != NULL) {
+ struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+ __core_scsi3_complete_pro_release(dev, pr_res_nacl,
+ pr_res_holder, 0);
+ }
+ spin_unlock(&dev->dev_reservation_lock);
+
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ &pr_tmpl->registration_list, pr_reg_list) {
+
+ __core_scsi3_free_registration(dev, pr_reg, NULL, 0);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+
+ spin_lock(&pr_tmpl->aptpl_reg_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
+ pr_reg_aptpl_list) {
+ list_del(&pr_reg->pr_reg_aptpl_list);
+ kfree(pr_reg->pr_aptpl_buf);
+ kmem_cache_free(t10_pr_reg_cache, pr_reg);
+ }
+ spin_unlock(&pr_tmpl->aptpl_reg_lock);
+}
+
+static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
+{
+ return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+ &tpg->tpg_group.cg_item);
+}
+
+static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
+{
+ configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+ &tpg->tpg_group.cg_item);
+
+ atomic_dec(&tpg->tpg_pr_ref_count);
+ smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
+{
+ struct se_portal_group *tpg = nacl->se_tpg;
+
+ if (nacl->dynamic_node_acl)
+ return 0;
+
+ return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+ &nacl->acl_group.cg_item);
+}
+
+static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
+{
+ struct se_portal_group *tpg = nacl->se_tpg;
+
+ if (nacl->dynamic_node_acl) {
+ atomic_dec(&nacl->acl_pr_ref_count);
+ smp_mb__after_atomic_dec();
+ return;
+ }
+
+ configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+ &nacl->acl_group.cg_item);
+
+ atomic_dec(&nacl->acl_pr_ref_count);
+ smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
+{
+ struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
+ struct se_node_acl *nacl;
+ struct se_portal_group *tpg;
+ /*
+ * For nacl->dynamic_node_acl=1
+ */
+ if (!(lun_acl))
+ return 0;
+
+ nacl = lun_acl->se_lun_nacl;
+ tpg = nacl->se_tpg;
+
+ return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+ &lun_acl->se_lun_group.cg_item);
+}
+
+static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
+{
+ struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
+ struct se_node_acl *nacl;
+ struct se_portal_group *tpg;
+ /*
+ * For nacl->dynamic_node_acl=1
+ */
+ if (!(lun_acl)) {
+ atomic_dec(&se_deve->pr_ref_count);
+ smp_mb__after_atomic_dec();
+ return;
+ }
+ nacl = lun_acl->se_lun_nacl;
+ tpg = nacl->se_tpg;
+
+ configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+ &lun_acl->se_lun_group.cg_item);
+
+ atomic_dec(&se_deve->pr_ref_count);
+ smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_decode_spec_i_port(
+ struct se_cmd *cmd,
+ struct se_portal_group *tpg,
+ unsigned char *l_isid,
+ u64 sa_res_key,
+ int all_tg_pt,
+ int aptpl)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_port *tmp_port;
+ struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
+ struct se_session *se_sess = SE_SESS(cmd);
+ struct se_node_acl *dest_node_acl = NULL;
+ struct se_dev_entry *dest_se_deve = NULL, *local_se_deve;
+ struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
+ struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
+ struct list_head tid_dest_list;
+ struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
+ struct target_core_fabric_ops *tmp_tf_ops;
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
+ char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+ u32 tpdl, tid_len = 0;
+ int ret, dest_local_nexus, prf_isid;
+ u32 dest_rtpi = 0;
+
+ memset(dest_iport, 0, 64);
+ INIT_LIST_HEAD(&tid_dest_list);
+
+ local_se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+ /*
+ * Allocate a struct pr_transport_id_holder and setup the
+ * local_node_acl and local_se_deve pointers and add to
+ * struct list_head tid_dest_list for add registration
+ * processing in the loop of tid_dest_list below.
+ */
+ tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
+ if (!(tidh_new)) {
+ printk(KERN_ERR "Unable to allocate tidh_new\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ INIT_LIST_HEAD(&tidh_new->dest_list);
+ tidh_new->dest_tpg = tpg;
+ tidh_new->dest_node_acl = se_sess->se_node_acl;
+ tidh_new->dest_se_deve = local_se_deve;
+
+ local_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
+ se_sess->se_node_acl, local_se_deve, l_isid,
+ sa_res_key, all_tg_pt, aptpl);
+ if (!(local_pr_reg)) {
+ kfree(tidh_new);
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ tidh_new->dest_pr_reg = local_pr_reg;
+ /*
+ * The local I_T nexus does not hold any configfs dependances,
+ * so we set tid_h->dest_local_nexus=1 to prevent the
+ * configfs_undepend_item() calls in the tid_dest_list loops below.
+ */
+ tidh_new->dest_local_nexus = 1;
+ list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+ /*
+ * For a PERSISTENT RESERVE OUT specify initiator ports payload,
+ * first extract TransportID Parameter Data Length, and make sure
+ * the value matches up to the SCSI expected data transfer length.
+ */
+ tpdl = (buf[24] & 0xff) << 24;
+ tpdl |= (buf[25] & 0xff) << 16;
+ tpdl |= (buf[26] & 0xff) << 8;
+ tpdl |= buf[27] & 0xff;
+
+ if ((tpdl + 28) != cmd->data_length) {
+ printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header"
+ " does not equal CDB data_length: %u\n", tpdl,
+ cmd->data_length);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+ /*
+ * Start processing the received transport IDs using the
+ * receiving I_T Nexus portal's fabric dependent methods to
+ * obtain the SCSI Initiator Port/Device Identifiers.
+ */
+ ptr = &buf[28];
+
+ while (tpdl > 0) {
+ proto_ident = (ptr[0] & 0x0f);
+ dest_tpg = NULL;
+
+ spin_lock(&dev->se_port_lock);
+ list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
+ tmp_tpg = tmp_port->sep_tpg;
+ if (!(tmp_tpg))
+ continue;
+ tmp_tf_ops = TPG_TFO(tmp_tpg);
+ if (!(tmp_tf_ops))
+ continue;
+ if (!(tmp_tf_ops->get_fabric_proto_ident) ||
+ !(tmp_tf_ops->tpg_parse_pr_out_transport_id))
+ continue;
+ /*
+ * Look for the matching proto_ident provided by
+ * the received TransportID
+ */
+ tmp_proto_ident = tmp_tf_ops->get_fabric_proto_ident(tmp_tpg);
+ if (tmp_proto_ident != proto_ident)
+ continue;
+ dest_rtpi = tmp_port->sep_rtpi;
+
+ i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
+ tmp_tpg, (const char *)ptr, &tid_len,
+ &iport_ptr);
+ if (!(i_str))
+ continue;
+
+ atomic_inc(&tmp_tpg->tpg_pr_ref_count);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&dev->se_port_lock);
+
+ ret = core_scsi3_tpg_depend_item(tmp_tpg);
+ if (ret != 0) {
+ printk(KERN_ERR " core_scsi3_tpg_depend_item()"
+ " for tmp_tpg\n");
+ atomic_dec(&tmp_tpg->tpg_pr_ref_count);
+ smp_mb__after_atomic_dec();
+ ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ goto out;
+ }
+ /*
+ * Locate the desination initiator ACL to be registered
+ * from the decoded fabric module specific TransportID
+ * at *i_str.
+ */
+ spin_lock_bh(&tmp_tpg->acl_node_lock);
+ dest_node_acl = __core_tpg_get_initiator_node_acl(
+ tmp_tpg, i_str);
+ if (dest_node_acl) {
+ atomic_inc(&dest_node_acl->acl_pr_ref_count);
+ smp_mb__after_atomic_inc();
+ }
+ spin_unlock_bh(&tmp_tpg->acl_node_lock);
+
+ if (!(dest_node_acl)) {
+ core_scsi3_tpg_undepend_item(tmp_tpg);
+ spin_lock(&dev->se_port_lock);
+ continue;
+ }
+
+ ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
+ if (ret != 0) {
+ printk(KERN_ERR "configfs_depend_item() failed"
+ " for dest_node_acl->acl_group\n");
+ atomic_dec(&dest_node_acl->acl_pr_ref_count);
+ smp_mb__after_atomic_dec();
+ core_scsi3_tpg_undepend_item(tmp_tpg);
+ ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ goto out;
+ }
+
+ dest_tpg = tmp_tpg;
+ printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:"
+ " %s Port RTPI: %hu\n",
+ TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_node_acl->initiatorname, dest_rtpi);
+
+ spin_lock(&dev->se_port_lock);
+ break;
+ }
+ spin_unlock(&dev->se_port_lock);
+
+ if (!(dest_tpg)) {
+ printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate"
+ " dest_tpg\n");
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+#if 0
+ printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
+ " tid_len: %d for %s + %s\n",
+ TPG_TFO(dest_tpg)->get_fabric_name(), cmd->data_length,
+ tpdl, tid_len, i_str, iport_ptr);
+#endif
+ if (tid_len > tpdl) {
+ printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:"
+ " %u for Transport ID: %s\n", tid_len, ptr);
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_tpg);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+ /*
+ * Locate the desintation struct se_dev_entry pointer for matching
+ * RELATIVE TARGET PORT IDENTIFIER on the receiving I_T Nexus
+ * Target Port.
+ */
+ dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
+ dest_rtpi);
+ if (!(dest_se_deve)) {
+ printk(KERN_ERR "Unable to locate %s dest_se_deve"
+ " from destination RTPI: %hu\n",
+ TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_rtpi);
+
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_tpg);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+
+ ret = core_scsi3_lunacl_depend_item(dest_se_deve);
+ if (ret < 0) {
+ printk(KERN_ERR "core_scsi3_lunacl_depend_item()"
+ " failed\n");
+ atomic_dec(&dest_se_deve->pr_ref_count);
+ smp_mb__after_atomic_dec();
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_tpg);
+ ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ goto out;
+ }
+#if 0
+ printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s"
+ " dest_se_deve mapped_lun: %u\n",
+ TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
+#endif
+ /*
+ * Skip any TransportIDs that already have a registration for
+ * this target port.
+ */
+ pr_reg_e = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+ iport_ptr);
+ if (pr_reg_e) {
+ core_scsi3_put_pr_reg(pr_reg_e);
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_tpg);
+ ptr += tid_len;
+ tpdl -= tid_len;
+ tid_len = 0;
+ continue;
+ }
+ /*
+ * Allocate a struct pr_transport_id_holder and setup
+ * the dest_node_acl and dest_se_deve pointers for the
+ * loop below.
+ */
+ tidh_new = kzalloc(sizeof(struct pr_transport_id_holder),
+ GFP_KERNEL);
+ if (!(tidh_new)) {
+ printk(KERN_ERR "Unable to allocate tidh_new\n");
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_tpg);
+ ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ goto out;
+ }
+ INIT_LIST_HEAD(&tidh_new->dest_list);
+ tidh_new->dest_tpg = dest_tpg;
+ tidh_new->dest_node_acl = dest_node_acl;
+ tidh_new->dest_se_deve = dest_se_deve;
+
+ /*
+ * Allocate, but do NOT add the registration for the
+ * TransportID referenced SCSI Initiator port. This
+ * done because of the following from spc4r17 in section
+ * 6.14.3 wrt SPEC_I_PT:
+ *
+ * "If a registration fails for any initiator port (e.g., if th
+ * logical unit does not have enough resources available to
+ * hold the registration information), no registrations shall be
+ * made, and the command shall be terminated with
+ * CHECK CONDITION status."
+ *
+ * That means we call __core_scsi3_alloc_registration() here,
+ * and then call __core_scsi3_add_registration() in the
+ * 2nd loop which will never fail.
+ */
+ dest_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
+ dest_node_acl, dest_se_deve, iport_ptr,
+ sa_res_key, all_tg_pt, aptpl);
+ if (!(dest_pr_reg)) {
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_tpg);
+ kfree(tidh_new);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+ tidh_new->dest_pr_reg = dest_pr_reg;
+ list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+
+ ptr += tid_len;
+ tpdl -= tid_len;
+ tid_len = 0;
+
+ }
+ /*
+ * Go ahead and create a registrations from tid_dest_list for the
+ * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl
+ * and dest_se_deve.
+ *
+ * The SA Reservation Key from the PROUT is set for the
+ * registration, and ALL_TG_PT is also passed. ALL_TG_PT=1
+ * means that the TransportID Initiator port will be
+ * registered on all of the target ports in the SCSI target device
+ * ALL_TG_PT=0 means the registration will only be for the
+ * SCSI target port the PROUT REGISTER with SPEC_I_PT=1
+ * was received.
+ */
+ list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
+ dest_tpg = tidh->dest_tpg;
+ dest_node_acl = tidh->dest_node_acl;
+ dest_se_deve = tidh->dest_se_deve;
+ dest_pr_reg = tidh->dest_pr_reg;
+ dest_local_nexus = tidh->dest_local_nexus;
+
+ list_del(&tidh->dest_list);
+ kfree(tidh);
+
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+
+ __core_scsi3_add_registration(SE_DEV(cmd), dest_node_acl,
+ dest_pr_reg, 0, 0);
+
+ printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully"
+ " registered Transport ID for Node: %s%s Mapped LUN:"
+ " %u\n", TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_node_acl->initiatorname, (prf_isid) ?
+ &i_buf[0] : "", dest_se_deve->mapped_lun);
+
+ if (dest_local_nexus)
+ continue;
+
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_tpg);
+ }
+
+ return 0;
+out:
+ /*
+ * For the failure case, release everything from tid_dest_list
+ * including *dest_pr_reg and the configfs dependances..
+ */
+ list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
+ dest_tpg = tidh->dest_tpg;
+ dest_node_acl = tidh->dest_node_acl;
+ dest_se_deve = tidh->dest_se_deve;
+ dest_pr_reg = tidh->dest_pr_reg;
+ dest_local_nexus = tidh->dest_local_nexus;
+
+ list_del(&tidh->dest_list);
+ kfree(tidh);
+ /*
+ * Release any extra ALL_TG_PT=1 registrations for
+ * the SPEC_I_PT=1 case.
+ */
+ list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+ &dest_pr_reg->pr_reg_atp_list,
+ pr_reg_atp_mem_list) {
+ list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+ core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+ kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
+ }
+
+ kfree(dest_pr_reg->pr_aptpl_buf);
+ kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
+
+ if (dest_local_nexus)
+ continue;
+
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_tpg);
+ }
+ return ret;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held
+ */
+static int __core_scsi3_update_aptpl_buf(
+ struct se_device *dev,
+ unsigned char *buf,
+ u32 pr_aptpl_buf_len,
+ int clear_aptpl_metadata)
+{
+ struct se_lun *lun;
+ struct se_portal_group *tpg;
+ struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct t10_pr_registration *pr_reg;
+ unsigned char tmp[512], isid_buf[32];
+ ssize_t len = 0;
+ int reg_count = 0;
+
+ memset(buf, 0, pr_aptpl_buf_len);
+ /*
+ * Called to clear metadata once APTPL has been deactivated.
+ */
+ if (clear_aptpl_metadata) {
+ snprintf(buf, pr_aptpl_buf_len,
+ "No Registrations or Reservations\n");
+ return 0;
+ }
+ /*
+ * Walk the registration list..
+ */
+ spin_lock(&T10_RES(su_dev)->registration_lock);
+ list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+ pr_reg_list) {
+
+ tmp[0] = '\0';
+ isid_buf[0] = '\0';
+ tpg = pr_reg->pr_reg_nacl->se_tpg;
+ lun = pr_reg->pr_reg_tg_pt_lun;
+ /*
+ * Write out any ISID value to APTPL metadata that was included
+ * in the original registration.
+ */
+ if (pr_reg->isid_present_at_reg)
+ snprintf(isid_buf, 32, "initiator_sid=%s\n",
+ pr_reg->pr_reg_isid);
+ /*
+ * Include special metadata if the pr_reg matches the
+ * reservation holder.
+ */
+ if (dev->dev_pr_res_holder == pr_reg) {
+ snprintf(tmp, 512, "PR_REG_START: %d"
+ "\ninitiator_fabric=%s\n"
+ "initiator_node=%s\n%s"
+ "sa_res_key=%llu\n"
+ "res_holder=1\nres_type=%02x\n"
+ "res_scope=%02x\nres_all_tg_pt=%d\n"
+ "mapped_lun=%u\n", reg_count,
+ TPG_TFO(tpg)->get_fabric_name(),
+ pr_reg->pr_reg_nacl->initiatorname, isid_buf,
+ pr_reg->pr_res_key, pr_reg->pr_res_type,
+ pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
+ pr_reg->pr_res_mapped_lun);
+ } else {
+ snprintf(tmp, 512, "PR_REG_START: %d\n"
+ "initiator_fabric=%s\ninitiator_node=%s\n%s"
+ "sa_res_key=%llu\nres_holder=0\n"
+ "res_all_tg_pt=%d\nmapped_lun=%u\n",
+ reg_count, TPG_TFO(tpg)->get_fabric_name(),
+ pr_reg->pr_reg_nacl->initiatorname, isid_buf,
+ pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
+ pr_reg->pr_res_mapped_lun);
+ }
+
+ if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+ printk(KERN_ERR "Unable to update renaming"
+ " APTPL metadata\n");
+ spin_unlock(&T10_RES(su_dev)->registration_lock);
+ return -1;
+ }
+ len += sprintf(buf+len, "%s", tmp);
+
+ /*
+ * Include information about the associated SCSI target port.
+ */
+ snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
+ "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:"
+ " %d\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_wwn(tpg),
+ TPG_TFO(tpg)->tpg_get_tag(tpg),
+ lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
+
+ if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+ printk(KERN_ERR "Unable to update renaming"
+ " APTPL metadata\n");
+ spin_unlock(&T10_RES(su_dev)->registration_lock);
+ return -1;
+ }
+ len += sprintf(buf+len, "%s", tmp);
+ reg_count++;
+ }
+ spin_unlock(&T10_RES(su_dev)->registration_lock);
+
+ if (!(reg_count))
+ len += sprintf(buf+len, "No Registrations or Reservations");
+
+ return 0;
+}
+
+static int core_scsi3_update_aptpl_buf(
+ struct se_device *dev,
+ unsigned char *buf,
+ u32 pr_aptpl_buf_len,
+ int clear_aptpl_metadata)
+{
+ int ret;
+
+ spin_lock(&dev->dev_reservation_lock);
+ ret = __core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
+ clear_aptpl_metadata);
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return ret;
+}
+
+/*
+ * Called with struct se_device->aptpl_file_mutex held
+ */
+static int __core_scsi3_write_aptpl_to_file(
+ struct se_device *dev,
+ unsigned char *buf,
+ u32 pr_aptpl_buf_len)
+{
+ struct t10_wwn *wwn = &SU_DEV(dev)->t10_wwn;
+ struct file *file;
+ struct iovec iov[1];
+ mm_segment_t old_fs;
+ int flags = O_RDWR | O_CREAT | O_TRUNC;
+ char path[512];
+ int ret;
+
+ memset(iov, 0, sizeof(struct iovec));
+ memset(path, 0, 512);
+
+ if (strlen(&wwn->unit_serial[0]) > 512) {
+ printk(KERN_ERR "WWN value for struct se_device does not fit"
+ " into path buffer\n");
+ return -1;
+ }
+
+ snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
+ file = filp_open(path, flags, 0600);
+ if (IS_ERR(file) || !file || !file->f_dentry) {
+ printk(KERN_ERR "filp_open(%s) for APTPL metadata"
+ " failed\n", path);
+ return -1;
+ }
+
+ iov[0].iov_base = &buf[0];
+ if (!(pr_aptpl_buf_len))
+ iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
+ else
+ iov[0].iov_len = pr_aptpl_buf_len;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
+ set_fs(old_fs);
+
+ if (ret < 0) {
+ printk("Error writing APTPL metadata file: %s\n", path);
+ filp_close(file, NULL);
+ return -1;
+ }
+ filp_close(file, NULL);
+
+ return 0;
+}
+
+static int core_scsi3_update_and_write_aptpl(
+ struct se_device *dev,
+ unsigned char *in_buf,
+ u32 in_pr_aptpl_buf_len)
+{
+ unsigned char null_buf[64], *buf;
+ u32 pr_aptpl_buf_len;
+ int ret, clear_aptpl_metadata = 0;
+ /*
+ * Can be called with a NULL pointer from PROUT service action CLEAR
+ */
+ if (!(in_buf)) {
+ memset(null_buf, 0, 64);
+ buf = &null_buf[0];
+ /*
+ * This will clear the APTPL metadata to:
+ * "No Registrations or Reservations" status
+ */
+ pr_aptpl_buf_len = 64;
+ clear_aptpl_metadata = 1;
+ } else {
+ buf = in_buf;
+ pr_aptpl_buf_len = in_pr_aptpl_buf_len;
+ }
+
+ ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
+ clear_aptpl_metadata);
+ if (ret != 0)
+ return -1;
+ /*
+ * __core_scsi3_write_aptpl_to_file() will call strlen()
+ * on the passed buf to determine pr_aptpl_buf_len.
+ */
+ ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0);
+ if (ret != 0)
+ return -1;
+
+ return ret;
+}
+
+static int core_scsi3_emulate_pro_register(
+ struct se_cmd *cmd,
+ u64 res_key,
+ u64 sa_res_key,
+ int aptpl,
+ int all_tg_pt,
+ int spec_i_pt,
+ int ignore_key)
+{
+ struct se_session *se_sess = SE_SESS(cmd);
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_dev_entry *se_deve;
+ struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_portal_group *se_tpg;
+ struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ /* Used for APTPL metadata w/ UNREGISTER */
+ unsigned char *pr_aptpl_buf = NULL;
+ unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+ int pr_holder = 0, ret = 0, type;
+
+ if (!(se_sess) || !(se_lun)) {
+ printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ se_tpg = se_sess->se_tpg;
+ se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+
+ if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
+ memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
+ TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, &isid_buf[0],
+ PR_REG_ISID_LEN);
+ isid_ptr = &isid_buf[0];
+ }
+ /*
+ * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
+ */
+ pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
+ if (!(pr_reg_e)) {
+ if (res_key) {
+ printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero"
+ " for SA REGISTER, returning CONFLICT\n");
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * Do nothing but return GOOD status.
+ */
+ if (!(sa_res_key))
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+
+ if (!(spec_i_pt)) {
+ /*
+ * Perform the Service Action REGISTER on the Initiator
+ * Port Endpoint that the PRO was received from on the
+ * Logical Unit of the SCSI device server.
+ */
+ ret = core_scsi3_alloc_registration(SE_DEV(cmd),
+ se_sess->se_node_acl, se_deve, isid_ptr,
+ sa_res_key, all_tg_pt, aptpl,
+ ignore_key, 0);
+ if (ret != 0) {
+ printk(KERN_ERR "Unable to allocate"
+ " struct t10_pr_registration\n");
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ } else {
+ /*
+ * Register both the Initiator port that received
+ * PROUT SA REGISTER + SPEC_I_PT=1 and extract SCSI
+ * TransportID from Parameter list and loop through
+ * fabric dependent parameter list while calling
+ * logic from of core_scsi3_alloc_registration() for
+ * each TransportID provided SCSI Initiator Port/Device
+ */
+ ret = core_scsi3_decode_spec_i_port(cmd, se_tpg,
+ isid_ptr, sa_res_key, all_tg_pt, aptpl);
+ if (ret != 0)
+ return ret;
+ }
+ /*
+ * Nothing left to do for the APTPL=0 case.
+ */
+ if (!(aptpl)) {
+ pr_tmpl->pr_aptpl_active = 0;
+ core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
+ printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+ " REGISTER\n");
+ return 0;
+ }
+ /*
+ * Locate the newly allocated local I_T Nexus *pr_reg, and
+ * update the APTPL metadata information using its
+ * preallocated *pr_reg->pr_aptpl_buf.
+ */
+ pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd),
+ se_sess->se_node_acl, se_sess);
+
+ ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ &pr_reg->pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len);
+ if (!(ret)) {
+ pr_tmpl->pr_aptpl_active = 1;
+ printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
+ }
+
+ core_scsi3_put_pr_reg(pr_reg);
+ return ret;
+ } else {
+ /*
+ * Locate the existing *pr_reg via struct se_node_acl pointers
+ */
+ pr_reg = pr_reg_e;
+ type = pr_reg->pr_res_type;
+
+ if (!(ignore_key)) {
+ if (res_key != pr_reg->pr_res_key) {
+ printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+ " res_key: 0x%016Lx does not match"
+ " existing SA REGISTER res_key:"
+ " 0x%016Lx\n", res_key,
+ pr_reg->pr_res_key);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ }
+ if (spec_i_pt) {
+ printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT"
+ " set while sa_res_key=0\n");
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ /*
+ * An existing ALL_TG_PT=1 registration being released
+ * must also set ALL_TG_PT=1 in the incoming PROUT.
+ */
+ if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
+ printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1"
+ " registration exists, but ALL_TG_PT=1 bit not"
+ " present in received PROUT\n");
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+ /*
+ * Allocate APTPL metadata buffer used for UNREGISTER ops
+ */
+ if (aptpl) {
+ pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
+ GFP_KERNEL);
+ if (!(pr_aptpl_buf)) {
+ printk(KERN_ERR "Unable to allocate"
+ " pr_aptpl_buf\n");
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ }
+ /*
+ * sa_res_key=0 Unregister Reservation Key for registered I_T
+ * Nexus sa_res_key=1 Change Reservation Key for registered I_T
+ * Nexus.
+ */
+ if (!(sa_res_key)) {
+ pr_holder = core_scsi3_check_implict_release(
+ SE_DEV(cmd), pr_reg);
+ if (pr_holder < 0) {
+ kfree(pr_aptpl_buf);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+
+ spin_lock(&pr_tmpl->registration_lock);
+ /*
+ * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
+ * and matching pr_res_key.
+ */
+ if (pr_reg->pr_reg_all_tg_pt) {
+ list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
+ &pr_tmpl->registration_list,
+ pr_reg_list) {
+
+ if (!(pr_reg_p->pr_reg_all_tg_pt))
+ continue;
+
+ if (pr_reg_p->pr_res_key != res_key)
+ continue;
+
+ if (pr_reg == pr_reg_p)
+ continue;
+
+ if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
+ pr_reg_p->pr_reg_nacl->initiatorname))
+ continue;
+
+ __core_scsi3_free_registration(dev,
+ pr_reg_p, NULL, 0);
+ }
+ }
+ /*
+ * Release the calling I_T Nexus registration now..
+ */
+ __core_scsi3_free_registration(SE_DEV(cmd), pr_reg,
+ NULL, 1);
+ /*
+ * From spc4r17, section 5.7.11.3 Unregistering
+ *
+ * If the persistent reservation is a registrants only
+ * type, the device server shall establish a unit
+ * attention condition for the initiator port associated
+ * with every registered I_T nexus except for the I_T
+ * nexus on which the PERSISTENT RESERVE OUT command was
+ * received, with the additional sense code set to
+ * RESERVATIONS RELEASED.
+ */
+ if (pr_holder &&
+ ((type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
+ (type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY))) {
+ list_for_each_entry(pr_reg_p,
+ &pr_tmpl->registration_list,
+ pr_reg_list) {
+
+ core_scsi3_ua_allocate(
+ pr_reg_p->pr_reg_nacl,
+ pr_reg_p->pr_res_mapped_lun,
+ 0x2A,
+ ASCQ_2AH_RESERVATIONS_RELEASED);
+ }
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+
+ if (!(aptpl)) {
+ pr_tmpl->pr_aptpl_active = 0;
+ core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+ printk("SPC-3 PR: Set APTPL Bit Deactivated"
+ " for UNREGISTER\n");
+ return 0;
+ }
+
+ ret = core_scsi3_update_and_write_aptpl(dev,
+ &pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len);
+ if (!(ret)) {
+ pr_tmpl->pr_aptpl_active = 1;
+ printk("SPC-3 PR: Set APTPL Bit Activated"
+ " for UNREGISTER\n");
+ }
+
+ kfree(pr_aptpl_buf);
+ return ret;
+ } else {
+ /*
+ * Increment PRgeneration counter for struct se_device"
+ * upon a successful REGISTER, see spc4r17 section 6.3.2
+ * READ_KEYS service action.
+ */
+ pr_reg->pr_res_generation = core_scsi3_pr_generation(
+ SE_DEV(cmd));
+ pr_reg->pr_res_key = sa_res_key;
+ printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
+ " Key for %s to: 0x%016Lx PRgeneration:"
+ " 0x%08x\n", CMD_TFO(cmd)->get_fabric_name(),
+ (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
+ pr_reg->pr_reg_nacl->initiatorname,
+ pr_reg->pr_res_key, pr_reg->pr_res_generation);
+
+ if (!(aptpl)) {
+ pr_tmpl->pr_aptpl_active = 0;
+ core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+ core_scsi3_put_pr_reg(pr_reg);
+ printk("SPC-3 PR: Set APTPL Bit Deactivated"
+ " for REGISTER\n");
+ return 0;
+ }
+
+ ret = core_scsi3_update_and_write_aptpl(dev,
+ &pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len);
+ if (!(ret)) {
+ pr_tmpl->pr_aptpl_active = 1;
+ printk("SPC-3 PR: Set APTPL Bit Activated"
+ " for REGISTER\n");
+ }
+
+ kfree(pr_aptpl_buf);
+ core_scsi3_put_pr_reg(pr_reg);
+ }
+ }
+ return 0;
+}
+
+unsigned char *core_scsi3_pr_dump_type(int type)
+{
+ switch (type) {
+ case PR_TYPE_WRITE_EXCLUSIVE:
+ return "Write Exclusive Access";
+ case PR_TYPE_EXCLUSIVE_ACCESS:
+ return "Exclusive Access";
+ case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+ return "Write Exclusive Access, Registrants Only";
+ case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+ return "Exclusive Access, Registrants Only";
+ case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+ return "Write Exclusive Access, All Registrants";
+ case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+ return "Exclusive Access, All Registrants";
+ default:
+ break;
+ }
+
+ return "Unknown SPC-3 PR Type";
+}
+
+static int core_scsi3_pro_reserve(
+ struct se_cmd *cmd,
+ struct se_device *dev,
+ int type,
+ int scope,
+ u64 res_key)
+{
+ struct se_session *se_sess = SE_SESS(cmd);
+ struct se_dev_entry *se_deve;
+ struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_portal_group *se_tpg;
+ struct t10_pr_registration *pr_reg, *pr_res_holder;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ char i_buf[PR_REG_ISID_ID_LEN];
+ int ret, prf_isid;
+
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+
+ if (!(se_sess) || !(se_lun)) {
+ printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ se_tpg = se_sess->se_tpg;
+ se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+ /*
+ * Locate the existing *pr_reg via struct se_node_acl pointers
+ */
+ pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+ se_sess);
+ if (!(pr_reg)) {
+ printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ " PR_REGISTERED *pr_reg for RESERVE\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ /*
+ * From spc4r17 Section 5.7.9: Reserving:
+ *
+ * An application client creates a persistent reservation by issuing
+ * a PERSISTENT RESERVE OUT command with RESERVE service action through
+ * a registered I_T nexus with the following parameters:
+ * a) RESERVATION KEY set to the value of the reservation key that is
+ * registered with the logical unit for the I_T nexus; and
+ */
+ if (res_key != pr_reg->pr_res_key) {
+ printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
+ " does not match existing SA REGISTER res_key:"
+ " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * From spc4r17 Section 5.7.9: Reserving:
+ *
+ * From above:
+ * b) TYPE field and SCOPE field set to the persistent reservation
+ * being created.
+ *
+ * Only one persistent reservation is allowed at a time per logical unit
+ * and that persistent reservation has a scope of LU_SCOPE.
+ */
+ if (scope != PR_SCOPE_LU_SCOPE) {
+ printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ /*
+ * See if we have an existing PR reservation holder pointer at
+ * struct se_device->dev_pr_res_holder in the form struct t10_pr_registration
+ * *pr_res_holder.
+ */
+ spin_lock(&dev->dev_reservation_lock);
+ pr_res_holder = dev->dev_pr_res_holder;
+ if ((pr_res_holder)) {
+ /*
+ * From spc4r17 Section 5.7.9: Reserving:
+ *
+ * If the device server receives a PERSISTENT RESERVE OUT
+ * command from an I_T nexus other than a persistent reservation
+ * holder (see 5.7.10) that attempts to create a persistent
+ * reservation when a persistent reservation already exists for
+ * the logical unit, then the command shall be completed with
+ * RESERVATION CONFLICT status.
+ */
+ if (pr_res_holder != pr_reg) {
+ struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+ printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+ " [%s]: %s while reservation already held by"
+ " [%s]: %s, returning RESERVATION_CONFLICT\n",
+ CMD_TFO(cmd)->get_fabric_name(),
+ se_sess->se_node_acl->initiatorname,
+ TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+ pr_res_holder->pr_reg_nacl->initiatorname);
+
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * From spc4r17 Section 5.7.9: Reserving:
+ *
+ * If a persistent reservation holder attempts to modify the
+ * type or scope of an existing persistent reservation, the
+ * command shall be completed with RESERVATION CONFLICT status.
+ */
+ if ((pr_res_holder->pr_res_type != type) ||
+ (pr_res_holder->pr_res_scope != scope)) {
+ struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+ printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+ " [%s]: %s trying to change TYPE and/or SCOPE,"
+ " while reservation already held by [%s]: %s,"
+ " returning RESERVATION_CONFLICT\n",
+ CMD_TFO(cmd)->get_fabric_name(),
+ se_sess->se_node_acl->initiatorname,
+ TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+ pr_res_holder->pr_reg_nacl->initiatorname);
+
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * From spc4r17 Section 5.7.9: Reserving:
+ *
+ * If the device server receives a PERSISTENT RESERVE OUT
+ * command with RESERVE service action where the TYPE field and
+ * the SCOPE field contain the same values as the existing type
+ * and scope from a persistent reservation holder, it shall not
+ * make any change to the existing persistent reservation and
+ * shall completethe command with GOOD status.
+ */
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ }
+ /*
+ * Otherwise, our *pr_reg becomes the PR reservation holder for said
+ * TYPE/SCOPE. Also set the received scope and type in *pr_reg.
+ */
+ pr_reg->pr_res_scope = scope;
+ pr_reg->pr_res_type = type;
+ pr_reg->pr_res_holder = 1;
+ dev->dev_pr_res_holder = pr_reg;
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+
+ printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new"
+ " reservation holder TYPE: %s ALL_TG_PT: %d\n",
+ CMD_TFO(cmd)->get_fabric_name(), core_scsi3_pr_dump_type(type),
+ (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+ printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
+ CMD_TFO(cmd)->get_fabric_name(),
+ se_sess->se_node_acl->initiatorname,
+ (prf_isid) ? &i_buf[0] : "");
+ spin_unlock(&dev->dev_reservation_lock);
+
+ if (pr_tmpl->pr_aptpl_active) {
+ ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ &pr_reg->pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len);
+ if (!(ret))
+ printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+ " for RESERVE\n");
+ }
+
+ core_scsi3_put_pr_reg(pr_reg);
+ return 0;
+}
+
+static int core_scsi3_emulate_pro_reserve(
+ struct se_cmd *cmd,
+ int type,
+ int scope,
+ u64 res_key)
+{
+ struct se_device *dev = cmd->se_dev;
+ int ret = 0;
+
+ switch (type) {
+ case PR_TYPE_WRITE_EXCLUSIVE:
+ case PR_TYPE_EXCLUSIVE_ACCESS:
+ case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+ case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+ case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+ case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+ ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
+ break;
+ default:
+ printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:"
+ " 0x%02x\n", type);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+
+ return ret;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held.
+ */
+static void __core_scsi3_complete_pro_release(
+ struct se_device *dev,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int explict)
+{
+ struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
+ char i_buf[PR_REG_ISID_ID_LEN];
+ int prf_isid;
+
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+ /*
+ * Go ahead and release the current PR reservation holder.
+ */
+ dev->dev_pr_res_holder = NULL;
+
+ printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared"
+ " reservation holder TYPE: %s ALL_TG_PT: %d\n",
+ tfo->get_fabric_name(), (explict) ? "explict" : "implict",
+ core_scsi3_pr_dump_type(pr_reg->pr_res_type),
+ (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+ printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n",
+ tfo->get_fabric_name(), se_nacl->initiatorname,
+ (prf_isid) ? &i_buf[0] : "");
+ /*
+ * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE
+ */
+ pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0;
+}
+
+static int core_scsi3_emulate_pro_release(
+ struct se_cmd *cmd,
+ int type,
+ int scope,
+ u64 res_key)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *se_sess = SE_SESS(cmd);
+ struct se_lun *se_lun = SE_LUN(cmd);
+ struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ int ret, all_reg = 0;
+
+ if (!(se_sess) || !(se_lun)) {
+ printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ /*
+ * Locate the existing *pr_reg via struct se_node_acl pointers
+ */
+ pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
+ if (!(pr_reg)) {
+ printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ " PR_REGISTERED *pr_reg for RELEASE\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ /*
+ * From spc4r17 Section 5.7.11.2 Releasing:
+ *
+ * If there is no persistent reservation or in response to a persistent
+ * reservation release request from a registered I_T nexus that is not a
+ * persistent reservation holder (see 5.7.10), the device server shall
+ * do the following:
+ *
+ * a) Not release the persistent reservation, if any;
+ * b) Not remove any registrations; and
+ * c) Complete the command with GOOD status.
+ */
+ spin_lock(&dev->dev_reservation_lock);
+ pr_res_holder = dev->dev_pr_res_holder;
+ if (!(pr_res_holder)) {
+ /*
+ * No persistent reservation, return GOOD status.
+ */
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ }
+ if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+ (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
+ all_reg = 1;
+
+ if ((all_reg == 0) && (pr_res_holder != pr_reg)) {
+ /*
+ * Non 'All Registrants' PR Type cases..
+ * Release request from a registered I_T nexus that is not a
+ * persistent reservation holder. return GOOD status.
+ */
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ }
+ /*
+ * From spc4r17 Section 5.7.11.2 Releasing:
+ *
+ * Only the persistent reservation holder (see 5.7.10) is allowed to
+ * release a persistent reservation.
+ *
+ * An application client releases the persistent reservation by issuing
+ * a PERSISTENT RESERVE OUT command with RELEASE service action through
+ * an I_T nexus that is a persistent reservation holder with the
+ * following parameters:
+ *
+ * a) RESERVATION KEY field set to the value of the reservation key
+ * that is registered with the logical unit for the I_T nexus;
+ */
+ if (res_key != pr_reg->pr_res_key) {
+ printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
+ " does not match existing SA REGISTER res_key:"
+ " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * From spc4r17 Section 5.7.11.2 Releasing and above:
+ *
+ * b) TYPE field and SCOPE field set to match the persistent
+ * reservation being released.
+ */
+ if ((pr_res_holder->pr_res_type != type) ||
+ (pr_res_holder->pr_res_scope != scope)) {
+ struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+ printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release"
+ " reservation from [%s]: %s with different TYPE "
+ "and/or SCOPE while reservation already held by"
+ " [%s]: %s, returning RESERVATION_CONFLICT\n",
+ CMD_TFO(cmd)->get_fabric_name(),
+ se_sess->se_node_acl->initiatorname,
+ TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+ pr_res_holder->pr_reg_nacl->initiatorname);
+
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * In response to a persistent reservation release request from the
+ * persistent reservation holder the device server shall perform a
+ * release by doing the following as an uninterrupted series of actions:
+ * a) Release the persistent reservation;
+ * b) Not remove any registration(s);
+ * c) If the released persistent reservation is a registrants only type
+ * or all registrants type persistent reservation,
+ * the device server shall establish a unit attention condition for
+ * the initiator port associated with every regis-
+ * tered I_T nexus other than I_T nexus on which the PERSISTENT
+ * RESERVE OUT command with RELEASE service action was received,
+ * with the additional sense code set to RESERVATIONS RELEASED; and
+ * d) If the persistent reservation is of any other type, the device
+ * server shall not establish a unit attention condition.
+ */
+ __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
+ pr_reg, 1);
+
+ spin_unlock(&dev->dev_reservation_lock);
+
+ if ((type != PR_TYPE_WRITE_EXCLUSIVE_REGONLY) &&
+ (type != PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) &&
+ (type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
+ (type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+ /*
+ * If no UNIT ATTENTION conditions will be established for
+ * PR_TYPE_WRITE_EXCLUSIVE or PR_TYPE_EXCLUSIVE_ACCESS
+ * go ahead and check for APTPL=1 update+write below
+ */
+ goto write_aptpl;
+ }
+
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list,
+ pr_reg_list) {
+ /*
+ * Do not establish a UNIT ATTENTION condition
+ * for the calling I_T Nexus
+ */
+ if (pr_reg_p == pr_reg)
+ continue;
+
+ core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl,
+ pr_reg_p->pr_res_mapped_lun,
+ 0x2A, ASCQ_2AH_RESERVATIONS_RELEASED);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+
+write_aptpl:
+ if (pr_tmpl->pr_aptpl_active) {
+ ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ &pr_reg->pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len);
+ if (!(ret))
+ printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
+ }
+
+ core_scsi3_put_pr_reg(pr_reg);
+ return 0;
+}
+
+static int core_scsi3_emulate_pro_clear(
+ struct se_cmd *cmd,
+ u64 res_key)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_node_acl *pr_reg_nacl;
+ struct se_session *se_sess = SE_SESS(cmd);
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
+ u32 pr_res_mapped_lun = 0;
+ int calling_it_nexus = 0;
+ /*
+ * Locate the existing *pr_reg via struct se_node_acl pointers
+ */
+ pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd),
+ se_sess->se_node_acl, se_sess);
+ if (!(pr_reg_n)) {
+ printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ " PR_REGISTERED *pr_reg for CLEAR\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ /*
+ * From spc4r17 section 5.7.11.6, Clearing:
+ *
+ * Any application client may release the persistent reservation and
+ * remove all registrations from a device server by issuing a
+ * PERSISTENT RESERVE OUT command with CLEAR service action through a
+ * registered I_T nexus with the following parameter:
+ *
+ * a) RESERVATION KEY field set to the value of the reservation key
+ * that is registered with the logical unit for the I_T nexus.
+ */
+ if (res_key != pr_reg_n->pr_res_key) {
+ printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+ " res_key: 0x%016Lx does not match"
+ " existing SA REGISTER res_key:"
+ " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
+ core_scsi3_put_pr_reg(pr_reg_n);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * a) Release the persistent reservation, if any;
+ */
+ spin_lock(&dev->dev_reservation_lock);
+ pr_res_holder = dev->dev_pr_res_holder;
+ if (pr_res_holder) {
+ struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+ __core_scsi3_complete_pro_release(dev, pr_res_nacl,
+ pr_res_holder, 0);
+ }
+ spin_unlock(&dev->dev_reservation_lock);
+ /*
+ * b) Remove all registration(s) (see spc4r17 5.7.7);
+ */
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ &pr_tmpl->registration_list, pr_reg_list) {
+
+ calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+ pr_reg_nacl = pr_reg->pr_reg_nacl;
+ pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+ __core_scsi3_free_registration(dev, pr_reg, NULL,
+ calling_it_nexus);
+ /*
+ * e) Establish a unit attention condition for the initiator
+ * port associated with every registered I_T nexus other
+ * than the I_T nexus on which the PERSISTENT RESERVE OUT
+ * command with CLEAR service action was received, with the
+ * additional sense code set to RESERVATIONS PREEMPTED.
+ */
+ if (!(calling_it_nexus))
+ core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
+ 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+
+ printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n",
+ CMD_TFO(cmd)->get_fabric_name());
+
+ if (pr_tmpl->pr_aptpl_active) {
+ core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
+ printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+ " for CLEAR\n");
+ }
+
+ core_scsi3_pr_generation(dev);
+ return 0;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held.
+ */
+static void __core_scsi3_complete_pro_preempt(
+ struct se_device *dev,
+ struct t10_pr_registration *pr_reg,
+ struct list_head *preempt_and_abort_list,
+ int type,
+ int scope,
+ int abort)
+{
+ struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+ struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+ char i_buf[PR_REG_ISID_ID_LEN];
+ int prf_isid;
+
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+ /*
+ * Do an implict RELEASE of the existing reservation.
+ */
+ if (dev->dev_pr_res_holder)
+ __core_scsi3_complete_pro_release(dev, nacl,
+ dev->dev_pr_res_holder, 0);
+
+ dev->dev_pr_res_holder = pr_reg;
+ pr_reg->pr_res_holder = 1;
+ pr_reg->pr_res_type = type;
+ pr_reg->pr_res_scope = scope;
+
+ printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new"
+ " reservation holder TYPE: %s ALL_TG_PT: %d\n",
+ tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
+ core_scsi3_pr_dump_type(type),
+ (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+ printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
+ tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
+ nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
+ /*
+ * For PREEMPT_AND_ABORT, add the preempting reservation's
+ * struct t10_pr_registration to the list that will be compared
+ * against received CDBs..
+ */
+ if (preempt_and_abort_list)
+ list_add_tail(&pr_reg->pr_reg_abort_list,
+ preempt_and_abort_list);
+}
+
+static void core_scsi3_release_preempt_and_abort(
+ struct list_head *preempt_and_abort_list,
+ struct t10_pr_registration *pr_reg_holder)
+{
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
+ pr_reg_abort_list) {
+
+ list_del(&pr_reg->pr_reg_abort_list);
+ if (pr_reg_holder == pr_reg)
+ continue;
+ if (pr_reg->pr_res_holder) {
+ printk(KERN_WARNING "pr_reg->pr_res_holder still set\n");
+ continue;
+ }
+
+ pr_reg->pr_reg_deve = NULL;
+ pr_reg->pr_reg_nacl = NULL;
+ kfree(pr_reg->pr_aptpl_buf);
+ kmem_cache_free(t10_pr_reg_cache, pr_reg);
+ }
+}
+
+int core_scsi3_check_cdb_abort_and_preempt(
+ struct list_head *preempt_and_abort_list,
+ struct se_cmd *cmd)
+{
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
+ pr_reg_abort_list) {
+ if (pr_reg->pr_res_key == cmd->pr_res_key)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int core_scsi3_pro_preempt(
+ struct se_cmd *cmd,
+ int type,
+ int scope,
+ u64 res_key,
+ u64 sa_res_key,
+ int abort)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_dev_entry *se_deve;
+ struct se_node_acl *pr_reg_nacl;
+ struct se_session *se_sess = SE_SESS(cmd);
+ struct list_head preempt_and_abort_list;
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ u32 pr_res_mapped_lun = 0;
+ int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
+ int prh_type = 0, prh_scope = 0, ret;
+
+ if (!(se_sess))
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+ se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+ pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+ se_sess);
+ if (!(pr_reg_n)) {
+ printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ " PR_REGISTERED *pr_reg for PREEMPT%s\n",
+ (abort) ? "_AND_ABORT" : "");
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ if (pr_reg_n->pr_res_key != res_key) {
+ core_scsi3_put_pr_reg(pr_reg_n);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ if (scope != PR_SCOPE_LU_SCOPE) {
+ printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+ core_scsi3_put_pr_reg(pr_reg_n);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ INIT_LIST_HEAD(&preempt_and_abort_list);
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_res_holder = dev->dev_pr_res_holder;
+ if (pr_res_holder &&
+ ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+ (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)))
+ all_reg = 1;
+
+ if (!(all_reg) && !(sa_res_key)) {
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg_n);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ /*
+ * From spc4r17, section 5.7.11.4.4 Removing Registrations:
+ *
+ * If the SERVICE ACTION RESERVATION KEY field does not identify a
+ * persistent reservation holder or there is no persistent reservation
+ * holder (i.e., there is no persistent reservation), then the device
+ * server shall perform a preempt by doing the following in an
+ * uninterrupted series of actions. (See below..)
+ */
+ if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) {
+ /*
+ * No existing or SA Reservation Key matching reservations..
+ *
+ * PROUT SA PREEMPT with All Registrant type reservations are
+ * allowed to be processed without a matching SA Reservation Key
+ */
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ &pr_tmpl->registration_list, pr_reg_list) {
+ /*
+ * Removing of registrations in non all registrants
+ * type reservations without a matching SA reservation
+ * key.
+ *
+ * a) Remove the registrations for all I_T nexuses
+ * specified by the SERVICE ACTION RESERVATION KEY
+ * field;
+ * b) Ignore the contents of the SCOPE and TYPE fields;
+ * c) Process tasks as defined in 5.7.1; and
+ * d) Establish a unit attention condition for the
+ * initiator port associated with every I_T nexus
+ * that lost its registration other than the I_T
+ * nexus on which the PERSISTENT RESERVE OUT command
+ * was received, with the additional sense code set
+ * to REGISTRATIONS PREEMPTED.
+ */
+ if (!(all_reg)) {
+ if (pr_reg->pr_res_key != sa_res_key)
+ continue;
+
+ calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+ pr_reg_nacl = pr_reg->pr_reg_nacl;
+ pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+ __core_scsi3_free_registration(dev, pr_reg,
+ (abort) ? &preempt_and_abort_list :
+ NULL, calling_it_nexus);
+ released_regs++;
+ } else {
+ /*
+ * Case for any existing all registrants type
+ * reservation, follow logic in spc4r17 section
+ * 5.7.11.4 Preempting, Table 52 and Figure 7.
+ *
+ * For a ZERO SA Reservation key, release
+ * all other registrations and do an implict
+ * release of active persistent reservation.
+ *
+ * For a non-ZERO SA Reservation key, only
+ * release the matching reservation key from
+ * registrations.
+ */
+ if ((sa_res_key) &&
+ (pr_reg->pr_res_key != sa_res_key))
+ continue;
+
+ calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+ if (calling_it_nexus)
+ continue;
+
+ pr_reg_nacl = pr_reg->pr_reg_nacl;
+ pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+ __core_scsi3_free_registration(dev, pr_reg,
+ (abort) ? &preempt_and_abort_list :
+ NULL, 0);
+ released_regs++;
+ }
+ if (!(calling_it_nexus))
+ core_scsi3_ua_allocate(pr_reg_nacl,
+ pr_res_mapped_lun, 0x2A,
+ ASCQ_2AH_RESERVATIONS_PREEMPTED);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+ /*
+ * If a PERSISTENT RESERVE OUT with a PREEMPT service action or
+ * a PREEMPT AND ABORT service action sets the SERVICE ACTION
+ * RESERVATION KEY field to a value that does not match any
+ * registered reservation key, then the device server shall
+ * complete the command with RESERVATION CONFLICT status.
+ */
+ if (!(released_regs)) {
+ spin_unlock(&dev->dev_reservation_lock);
+ core_scsi3_put_pr_reg(pr_reg_n);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * For an existing all registrants type reservation
+ * with a zero SA rservation key, preempt the existing
+ * reservation with the new PR type and scope.
+ */
+ if (pr_res_holder && all_reg && !(sa_res_key)) {
+ __core_scsi3_complete_pro_preempt(dev, pr_reg_n,
+ (abort) ? &preempt_and_abort_list : NULL,
+ type, scope, abort);
+
+ if (abort)
+ core_scsi3_release_preempt_and_abort(
+ &preempt_and_abort_list, pr_reg_n);
+ }
+ spin_unlock(&dev->dev_reservation_lock);
+
+ if (pr_tmpl->pr_aptpl_active) {
+ ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ &pr_reg_n->pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len);
+ if (!(ret))
+ printk(KERN_INFO "SPC-3 PR: Updated APTPL"
+ " metadata for PREEMPT%s\n", (abort) ?
+ "_AND_ABORT" : "");
+ }
+
+ core_scsi3_put_pr_reg(pr_reg_n);
+ core_scsi3_pr_generation(SE_DEV(cmd));
+ return 0;
+ }
+ /*
+ * The PREEMPTing SA reservation key matches that of the
+ * existing persistent reservation, first, we check if
+ * we are preempting our own reservation.
+ * From spc4r17, section 5.7.11.4.3 Preempting
+ * persistent reservations and registration handling
+ *
+ * If an all registrants persistent reservation is not
+ * present, it is not an error for the persistent
+ * reservation holder to preempt itself (i.e., a
+ * PERSISTENT RESERVE OUT with a PREEMPT service action
+ * or a PREEMPT AND ABORT service action with the
+ * SERVICE ACTION RESERVATION KEY value equal to the
+ * persistent reservation holder's reservation key that
+ * is received from the persistent reservation holder).
+ * In that case, the device server shall establish the
+ * new persistent reservation and maintain the
+ * registration.
+ */
+ prh_type = pr_res_holder->pr_res_type;
+ prh_scope = pr_res_holder->pr_res_scope;
+ /*
+ * If the SERVICE ACTION RESERVATION KEY field identifies a
+ * persistent reservation holder (see 5.7.10), the device
+ * server shall perform a preempt by doing the following as
+ * an uninterrupted series of actions:
+ *
+ * a) Release the persistent reservation for the holder
+ * identified by the SERVICE ACTION RESERVATION KEY field;
+ */
+ if (pr_reg_n != pr_res_holder)
+ __core_scsi3_complete_pro_release(dev,
+ pr_res_holder->pr_reg_nacl,
+ dev->dev_pr_res_holder, 0);
+ /*
+ * b) Remove the registrations for all I_T nexuses identified
+ * by the SERVICE ACTION RESERVATION KEY field, except the
+ * I_T nexus that is being used for the PERSISTENT RESERVE
+ * OUT command. If an all registrants persistent reservation
+ * is present and the SERVICE ACTION RESERVATION KEY field
+ * is set to zero, then all registrations shall be removed
+ * except for that of the I_T nexus that is being used for
+ * the PERSISTENT RESERVE OUT command;
+ */
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ &pr_tmpl->registration_list, pr_reg_list) {
+
+ calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+ if (calling_it_nexus)
+ continue;
+
+ if (pr_reg->pr_res_key != sa_res_key)
+ continue;
+
+ pr_reg_nacl = pr_reg->pr_reg_nacl;
+ pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+ __core_scsi3_free_registration(dev, pr_reg,
+ (abort) ? &preempt_and_abort_list : NULL,
+ calling_it_nexus);
+ /*
+ * e) Establish a unit attention condition for the initiator
+ * port associated with every I_T nexus that lost its
+ * persistent reservation and/or registration, with the
+ * additional sense code set to REGISTRATIONS PREEMPTED;
+ */
+ core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
+ ASCQ_2AH_RESERVATIONS_PREEMPTED);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+ /*
+ * c) Establish a persistent reservation for the preempting
+ * I_T nexus using the contents of the SCOPE and TYPE fields;
+ */
+ __core_scsi3_complete_pro_preempt(dev, pr_reg_n,
+ (abort) ? &preempt_and_abort_list : NULL,
+ type, scope, abort);
+ /*
+ * d) Process tasks as defined in 5.7.1;
+ * e) See above..
+ * f) If the type or scope has changed, then for every I_T nexus
+ * whose reservation key was not removed, except for the I_T
+ * nexus on which the PERSISTENT RESERVE OUT command was
+ * received, the device server shall establish a unit
+ * attention condition for the initiator port associated with
+ * that I_T nexus, with the additional sense code set to
+ * RESERVATIONS RELEASED. If the type or scope have not
+ * changed, then no unit attention condition(s) shall be
+ * established for this reason.
+ */
+ if ((prh_type != type) || (prh_scope != scope)) {
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ &pr_tmpl->registration_list, pr_reg_list) {
+
+ calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+ if (calling_it_nexus)
+ continue;
+
+ core_scsi3_ua_allocate(pr_reg->pr_reg_nacl,
+ pr_reg->pr_res_mapped_lun, 0x2A,
+ ASCQ_2AH_RESERVATIONS_RELEASED);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+ }
+ spin_unlock(&dev->dev_reservation_lock);
+ /*
+ * Call LUN_RESET logic upon list of struct t10_pr_registration,
+ * All received CDBs for the matching existing reservation and
+ * registrations undergo ABORT_TASK logic.
+ *
+ * From there, core_scsi3_release_preempt_and_abort() will
+ * release every registration in the list (which have already
+ * been removed from the primary pr_reg list), except the
+ * new persistent reservation holder, the calling Initiator Port.
+ */
+ if (abort) {
+ core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd);
+ core_scsi3_release_preempt_and_abort(&preempt_and_abort_list,
+ pr_reg_n);
+ }
+
+ if (pr_tmpl->pr_aptpl_active) {
+ ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ &pr_reg_n->pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len);
+ if (!(ret))
+ printk("SPC-3 PR: Updated APTPL metadata for PREEMPT"
+ "%s\n", (abort) ? "_AND_ABORT" : "");
+ }
+
+ core_scsi3_put_pr_reg(pr_reg_n);
+ core_scsi3_pr_generation(SE_DEV(cmd));
+ return 0;
+}
+
+static int core_scsi3_emulate_pro_preempt(
+ struct se_cmd *cmd,
+ int type,
+ int scope,
+ u64 res_key,
+ u64 sa_res_key,
+ int abort)
+{
+ int ret = 0;
+
+ switch (type) {
+ case PR_TYPE_WRITE_EXCLUSIVE:
+ case PR_TYPE_EXCLUSIVE_ACCESS:
+ case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+ case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+ case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+ case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+ ret = core_scsi3_pro_preempt(cmd, type, scope,
+ res_key, sa_res_key, abort);
+ break;
+ default:
+ printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s"
+ " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+
+ return ret;
+}
+
+
+static int core_scsi3_emulate_pro_register_and_move(
+ struct se_cmd *cmd,
+ u64 res_key,
+ u64 sa_res_key,
+ int aptpl,
+ int unreg)
+{
+ struct se_session *se_sess = SE_SESS(cmd);
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_dev_entry *se_deve, *dest_se_deve = NULL;
+ struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
+ struct se_port *se_port;
+ struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
+ struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
+ struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *initiator_str;
+ char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+ u32 tid_len, tmp_tid_len;
+ int new_reg = 0, type, scope, ret, matching_iname, prf_isid;
+ unsigned short rtpi;
+ unsigned char proto_ident;
+
+ if (!(se_sess) || !(se_lun)) {
+ printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ memset(dest_iport, 0, 64);
+ memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ se_tpg = se_sess->se_tpg;
+ tf_ops = TPG_TFO(se_tpg);
+ se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+ /*
+ * Follow logic from spc4r17 Section 5.7.8, Table 50 --
+ * Register behaviors for a REGISTER AND MOVE service action
+ *
+ * Locate the existing *pr_reg via struct se_node_acl pointers
+ */
+ pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+ se_sess);
+ if (!(pr_reg)) {
+ printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED"
+ " *pr_reg for REGISTER_AND_MOVE\n");
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ /*
+ * The provided reservation key much match the existing reservation key
+ * provided during this initiator's I_T nexus registration.
+ */
+ if (res_key != pr_reg->pr_res_key) {
+ printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received"
+ " res_key: 0x%016Lx does not match existing SA REGISTER"
+ " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+ /*
+ * The service active reservation key needs to be non zero
+ */
+ if (!(sa_res_key)) {
+ printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero"
+ " sa_res_key\n");
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ /*
+ * Determine the Relative Target Port Identifier where the reservation
+ * will be moved to for the TransportID containing SCSI initiator WWN
+ * information.
+ */
+ rtpi = (buf[18] & 0xff) << 8;
+ rtpi |= buf[19] & 0xff;
+ tid_len = (buf[20] & 0xff) << 24;
+ tid_len |= (buf[21] & 0xff) << 16;
+ tid_len |= (buf[22] & 0xff) << 8;
+ tid_len |= buf[23] & 0xff;
+
+ if ((tid_len + 24) != cmd->data_length) {
+ printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header"
+ " does not equal CDB data_length: %u\n", tid_len,
+ cmd->data_length);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+
+ spin_lock(&dev->se_port_lock);
+ list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) {
+ if (se_port->sep_rtpi != rtpi)
+ continue;
+ dest_se_tpg = se_port->sep_tpg;
+ if (!(dest_se_tpg))
+ continue;
+ dest_tf_ops = TPG_TFO(dest_se_tpg);
+ if (!(dest_tf_ops))
+ continue;
+
+ atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&dev->se_port_lock);
+
+ ret = core_scsi3_tpg_depend_item(dest_se_tpg);
+ if (ret != 0) {
+ printk(KERN_ERR "core_scsi3_tpg_depend_item() failed"
+ " for dest_se_tpg\n");
+ atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
+ smp_mb__after_atomic_dec();
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+
+ spin_lock(&dev->se_port_lock);
+ break;
+ }
+ spin_unlock(&dev->se_port_lock);
+
+ if (!(dest_se_tpg) || (!dest_tf_ops)) {
+ printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+ " fabric ops from Relative Target Port Identifier:"
+ " %hu\n", rtpi);
+ core_scsi3_put_pr_reg(pr_reg);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ proto_ident = (buf[24] & 0x0f);
+#if 0
+ printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
+ " 0x%02x\n", proto_ident);
+#endif
+ if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
+ printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received"
+ " proto_ident: 0x%02x does not match ident: 0x%02x"
+ " from fabric: %s\n", proto_ident,
+ dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
+ dest_tf_ops->get_fabric_name());
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+ if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
+ printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
+ " containg a valid tpg_parse_pr_out_transport_id"
+ " function pointer\n");
+ ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ goto out;
+ }
+ initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
+ (const char *)&buf[24], &tmp_tid_len, &iport_ptr);
+ if (!(initiator_str)) {
+ printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+ " initiator_str from Transport ID\n");
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+
+ printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s"
+ " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
+ "port" : "device", initiator_str, (iport_ptr != NULL) ?
+ iport_ptr : "");
+ /*
+ * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
+ * action specifies a TransportID that is the same as the initiator port
+ * of the I_T nexus for the command received, then the command shall
+ * be terminated with CHECK CONDITION status, with the sense key set to
+ * ILLEGAL REQUEST, and the additional sense code set to INVALID FIELD
+ * IN PARAMETER LIST.
+ */
+ pr_reg_nacl = pr_reg->pr_reg_nacl;
+ matching_iname = (!strcmp(initiator_str,
+ pr_reg_nacl->initiatorname)) ? 1 : 0;
+ if (!(matching_iname))
+ goto after_iport_check;
+
+ if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) {
+ printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
+ " matches: %s on received I_T Nexus\n", initiator_str,
+ pr_reg_nacl->initiatorname);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+ if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) {
+ printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
+ " matches: %s %s on received I_T Nexus\n",
+ initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
+ pr_reg->pr_reg_isid);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+after_iport_check:
+ /*
+ * Locate the destination struct se_node_acl from the received Transport ID
+ */
+ spin_lock_bh(&dest_se_tpg->acl_node_lock);
+ dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
+ initiator_str);
+ if (dest_node_acl) {
+ atomic_inc(&dest_node_acl->acl_pr_ref_count);
+ smp_mb__after_atomic_inc();
+ }
+ spin_unlock_bh(&dest_se_tpg->acl_node_lock);
+
+ if (!(dest_node_acl)) {
+ printk(KERN_ERR "Unable to locate %s dest_node_acl for"
+ " TransportID%s\n", dest_tf_ops->get_fabric_name(),
+ initiator_str);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+ ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
+ if (ret != 0) {
+ printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for"
+ " dest_node_acl\n");
+ atomic_dec(&dest_node_acl->acl_pr_ref_count);
+ smp_mb__after_atomic_dec();
+ dest_node_acl = NULL;
+ ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ goto out;
+ }
+#if 0
+ printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
+ " %s from TransportID\n", dest_tf_ops->get_fabric_name(),
+ dest_node_acl->initiatorname);
+#endif
+ /*
+ * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET
+ * PORT IDENTIFIER.
+ */
+ dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
+ if (!(dest_se_deve)) {
+ printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:"
+ " %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+
+ ret = core_scsi3_lunacl_depend_item(dest_se_deve);
+ if (ret < 0) {
+ printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n");
+ atomic_dec(&dest_se_deve->pr_ref_count);
+ smp_mb__after_atomic_dec();
+ dest_se_deve = NULL;
+ ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ goto out;
+ }
+#if 0
+ printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
+ " ACL for dest_se_deve->mapped_lun: %u\n",
+ dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
+ dest_se_deve->mapped_lun);
+#endif
+ /*
+ * A persistent reservation needs to already existing in order to
+ * successfully complete the REGISTER_AND_MOVE service action..
+ */
+ spin_lock(&dev->dev_reservation_lock);
+ pr_res_holder = dev->dev_pr_res_holder;
+ if (!(pr_res_holder)) {
+ printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation"
+ " currently held\n");
+ spin_unlock(&dev->dev_reservation_lock);
+ ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ goto out;
+ }
+ /*
+ * The received on I_T Nexus must be the reservation holder.
+ *
+ * From spc4r17 section 5.7.8 Table 50 --
+ * Register behaviors for a REGISTER AND MOVE service action
+ */
+ if (pr_res_holder != pr_reg) {
+ printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
+ " Nexus is not reservation holder\n");
+ spin_unlock(&dev->dev_reservation_lock);
+ ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ goto out;
+ }
+ /*
+ * From spc4r17 section 5.7.8: registering and moving reservation
+ *
+ * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
+ * action is received and the established persistent reservation is a
+ * Write Exclusive - All Registrants type or Exclusive Access -
+ * All Registrants type reservation, then the command shall be completed
+ * with RESERVATION CONFLICT status.
+ */
+ if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+ (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+ printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move"
+ " reservation for type: %s\n",
+ core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
+ spin_unlock(&dev->dev_reservation_lock);
+ ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ goto out;
+ }
+ pr_res_nacl = pr_res_holder->pr_reg_nacl;
+ /*
+ * b) Ignore the contents of the (received) SCOPE and TYPE fields;
+ */
+ type = pr_res_holder->pr_res_type;
+ scope = pr_res_holder->pr_res_type;
+ /*
+ * c) Associate the reservation key specified in the SERVICE ACTION
+ * RESERVATION KEY field with the I_T nexus specified as the
+ * destination of the register and move, where:
+ * A) The I_T nexus is specified by the TransportID and the
+ * RELATIVE TARGET PORT IDENTIFIER field (see 6.14.4); and
+ * B) Regardless of the TransportID format used, the association for
+ * the initiator port is based on either the initiator port name
+ * (see 3.1.71) on SCSI transport protocols where port names are
+ * required or the initiator port identifier (see 3.1.70) on SCSI
+ * transport protocols where port names are not required;
+ * d) Register the reservation key specified in the SERVICE ACTION
+ * RESERVATION KEY field;
+ * e) Retain the reservation key specified in the SERVICE ACTION
+ * RESERVATION KEY field and associated information;
+ *
+ * Also, It is not an error for a REGISTER AND MOVE service action to
+ * register an I_T nexus that is already registered with the same
+ * reservation key or a different reservation key.
+ */
+ dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+ iport_ptr);
+ if (!(dest_pr_reg)) {
+ ret = core_scsi3_alloc_registration(SE_DEV(cmd),
+ dest_node_acl, dest_se_deve, iport_ptr,
+ sa_res_key, 0, aptpl, 2, 1);
+ if (ret != 0) {
+ spin_unlock(&dev->dev_reservation_lock);
+ ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
+ dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+ iport_ptr);
+ new_reg = 1;
+ }
+ /*
+ * f) Release the persistent reservation for the persistent reservation
+ * holder (i.e., the I_T nexus on which the
+ */
+ __core_scsi3_complete_pro_release(dev, pr_res_nacl,
+ dev->dev_pr_res_holder, 0);
+ /*
+ * g) Move the persistent reservation to the specified I_T nexus using
+ * the same scope and type as the persistent reservation released in
+ * item f); and
+ */
+ dev->dev_pr_res_holder = dest_pr_reg;
+ dest_pr_reg->pr_res_holder = 1;
+ dest_pr_reg->pr_res_type = type;
+ pr_reg->pr_res_scope = scope;
+ prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+ PR_REG_ISID_ID_LEN);
+ /*
+ * Increment PRGeneration for existing registrations..
+ */
+ if (!(new_reg))
+ dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++;
+ spin_unlock(&dev->dev_reservation_lock);
+
+ printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
+ " created new reservation holder TYPE: %s on object RTPI:"
+ " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
+ core_scsi3_pr_dump_type(type), rtpi,
+ dest_pr_reg->pr_res_generation);
+ printk(KERN_INFO "SPC-3 PR Successfully moved reservation from"
+ " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
+ tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
+ (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(),
+ dest_node_acl->initiatorname, (iport_ptr != NULL) ?
+ iport_ptr : "");
+ /*
+ * It is now safe to release configfs group dependencies for destination
+ * of Transport ID Initiator Device/Port Identifier
+ */
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_se_tpg);
+ /*
+ * h) If the UNREG bit is set to one, unregister (see 5.7.11.3) the I_T
+ * nexus on which PERSISTENT RESERVE OUT command was received.
+ */
+ if (unreg) {
+ spin_lock(&pr_tmpl->registration_lock);
+ __core_scsi3_free_registration(dev, pr_reg, NULL, 1);
+ spin_unlock(&pr_tmpl->registration_lock);
+ } else
+ core_scsi3_put_pr_reg(pr_reg);
+
+ /*
+ * Clear the APTPL metadata if APTPL has been disabled, otherwise
+ * write out the updated metadata to struct file for this SCSI device.
+ */
+ if (!(aptpl)) {
+ pr_tmpl->pr_aptpl_active = 0;
+ core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
+ printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+ " REGISTER_AND_MOVE\n");
+ } else {
+ pr_tmpl->pr_aptpl_active = 1;
+ ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ &dest_pr_reg->pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len);
+ if (!(ret))
+ printk("SPC-3 PR: Set APTPL Bit Activated for"
+ " REGISTER_AND_MOVE\n");
+ }
+
+ core_scsi3_put_pr_reg(dest_pr_reg);
+ return 0;
+out:
+ if (dest_se_deve)
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
+ if (dest_node_acl)
+ core_scsi3_nodeacl_undepend_item(dest_node_acl);
+ core_scsi3_tpg_undepend_item(dest_se_tpg);
+ core_scsi3_put_pr_reg(pr_reg);
+ return ret;
+}
+
+static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
+{
+ unsigned int __v1, __v2;
+
+ __v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3];
+ __v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7];
+
+ return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * See spc4r17 section 6.14 Table 170
+ */
+static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
+{
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ u64 res_key, sa_res_key;
+ int sa, scope, type, aptpl;
+ int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
+ /*
+ * FIXME: A NULL struct se_session pointer means an this is not coming from
+ * a $FABRIC_MOD's nexus, but from internal passthrough ops.
+ */
+ if (!(SE_SESS(cmd)))
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+ if (cmd->data_length < 24) {
+ printk(KERN_WARNING "SPC-PR: Recieved PR OUT parameter list"
+ " length too small: %u\n", cmd->data_length);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ /*
+ * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
+ */
+ sa = (cdb[1] & 0x1f);
+ scope = (cdb[2] & 0xf0);
+ type = (cdb[2] & 0x0f);
+ /*
+ * From PERSISTENT_RESERVE_OUT parameter list (payload)
+ */
+ res_key = core_scsi3_extract_reservation_key(&buf[0]);
+ sa_res_key = core_scsi3_extract_reservation_key(&buf[8]);
+ /*
+ * REGISTER_AND_MOVE uses a different SA parameter list containing
+ * SCSI TransportIDs.
+ */
+ if (sa != PRO_REGISTER_AND_MOVE) {
+ spec_i_pt = (buf[20] & 0x08);
+ all_tg_pt = (buf[20] & 0x04);
+ aptpl = (buf[20] & 0x01);
+ } else {
+ aptpl = (buf[17] & 0x01);
+ unreg = (buf[17] & 0x02);
+ }
+ /*
+ * SPEC_I_PT=1 is only valid for Service action: REGISTER
+ */
+ if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ /*
+ * From spc4r17 section 6.14:
+ *
+ * If the SPEC_I_PT bit is set to zero, the service action is not
+ * REGISTER AND MOVE, and the parameter list length is not 24, then
+ * the command shall be terminated with CHECK CONDITION status, with
+ * the sense key set to ILLEGAL REQUEST, and the additional sense
+ * code set to PARAMETER LIST LENGTH ERROR.
+ */
+ if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
+ (cmd->data_length != 24)) {
+ printk(KERN_WARNING "SPC-PR: Recieved PR OUT illegal parameter"
+ " list length: %u\n", cmd->data_length);
+ return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ }
+ /*
+ * (core_scsi3_emulate_pro_* function parameters
+ * are defined by spc4r17 Table 174:
+ * PERSISTENT_RESERVE_OUT service actions and valid parameters.
+ */
+ switch (sa) {
+ case PRO_REGISTER:
+ return core_scsi3_emulate_pro_register(cmd,
+ res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0);
+ case PRO_RESERVE:
+ return core_scsi3_emulate_pro_reserve(cmd,
+ type, scope, res_key);
+ case PRO_RELEASE:
+ return core_scsi3_emulate_pro_release(cmd,
+ type, scope, res_key);
+ case PRO_CLEAR:
+ return core_scsi3_emulate_pro_clear(cmd, res_key);
+ case PRO_PREEMPT:
+ return core_scsi3_emulate_pro_preempt(cmd, type, scope,
+ res_key, sa_res_key, 0);
+ case PRO_PREEMPT_AND_ABORT:
+ return core_scsi3_emulate_pro_preempt(cmd, type, scope,
+ res_key, sa_res_key, 1);
+ case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+ return core_scsi3_emulate_pro_register(cmd,
+ 0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1);
+ case PRO_REGISTER_AND_MOVE:
+ return core_scsi3_emulate_pro_register_and_move(cmd, res_key,
+ sa_res_key, aptpl, unreg);
+ default:
+ printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+ " action: 0x%02x\n", cdb[1] & 0x1f);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_KEYS
+ *
+ * See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160
+ */
+static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
+{
+ struct se_device *se_dev = SE_DEV(cmd);
+ struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+ struct t10_pr_registration *pr_reg;
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ u32 add_len = 0, off = 8;
+
+ if (cmd->data_length < 8) {
+ printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u"
+ " too small\n", cmd->data_length);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+
+ buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
+ buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
+ buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
+ buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+
+ spin_lock(&T10_RES(su_dev)->registration_lock);
+ list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+ pr_reg_list) {
+ /*
+ * Check for overflow of 8byte PRI READ_KEYS payload and
+ * next reservation key list descriptor.
+ */
+ if ((add_len + 8) > (cmd->data_length - 8))
+ break;
+
+ buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
+ buf[off++] = (pr_reg->pr_res_key & 0xff);
+
+ add_len += 8;
+ }
+ spin_unlock(&T10_RES(su_dev)->registration_lock);
+
+ buf[4] = ((add_len >> 24) & 0xff);
+ buf[5] = ((add_len >> 16) & 0xff);
+ buf[6] = ((add_len >> 8) & 0xff);
+ buf[7] = (add_len & 0xff);
+
+ return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_RESERVATION
+ *
+ * See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162
+ */
+static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
+{
+ struct se_device *se_dev = SE_DEV(cmd);
+ struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+ struct t10_pr_registration *pr_reg;
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ u64 pr_res_key;
+ u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
+
+ if (cmd->data_length < 8) {
+ printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
+ " too small\n", cmd->data_length);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+
+ buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
+ buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
+ buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
+ buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+
+ spin_lock(&se_dev->dev_reservation_lock);
+ pr_reg = se_dev->dev_pr_res_holder;
+ if ((pr_reg)) {
+ /*
+ * Set the hardcoded Additional Length
+ */
+ buf[4] = ((add_len >> 24) & 0xff);
+ buf[5] = ((add_len >> 16) & 0xff);
+ buf[6] = ((add_len >> 8) & 0xff);
+ buf[7] = (add_len & 0xff);
+
+ if (cmd->data_length < 22) {
+ spin_unlock(&se_dev->dev_reservation_lock);
+ return 0;
+ }
+ /*
+ * Set the Reservation key.
+ *
+ * From spc4r17, section 5.7.10:
+ * A persistent reservation holder has its reservation key
+ * returned in the parameter data from a PERSISTENT
+ * RESERVE IN command with READ RESERVATION service action as
+ * follows:
+ * a) For a persistent reservation of the type Write Exclusive
+ * - All Registrants or Exclusive Access ­ All Regitrants,
+ * the reservation key shall be set to zero; or
+ * b) For all other persistent reservation types, the
+ * reservation key shall be set to the registered
+ * reservation key for the I_T nexus that holds the
+ * persistent reservation.
+ */
+ if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+ (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
+ pr_res_key = 0;
+ else
+ pr_res_key = pr_reg->pr_res_key;
+
+ buf[8] = ((pr_res_key >> 56) & 0xff);
+ buf[9] = ((pr_res_key >> 48) & 0xff);
+ buf[10] = ((pr_res_key >> 40) & 0xff);
+ buf[11] = ((pr_res_key >> 32) & 0xff);
+ buf[12] = ((pr_res_key >> 24) & 0xff);
+ buf[13] = ((pr_res_key >> 16) & 0xff);
+ buf[14] = ((pr_res_key >> 8) & 0xff);
+ buf[15] = (pr_res_key & 0xff);
+ /*
+ * Set the SCOPE and TYPE
+ */
+ buf[21] = (pr_reg->pr_res_scope & 0xf0) |
+ (pr_reg->pr_res_type & 0x0f);
+ }
+ spin_unlock(&se_dev->dev_reservation_lock);
+
+ return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action REPORT_CAPABILITIES
+ *
+ * See spc4r17 section 6.13.4 Table 165
+ */
+static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ u16 add_len = 8; /* Hardcoded to 8. */
+
+ if (cmd->data_length < 6) {
+ printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
+ " %u too small\n", cmd->data_length);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+
+ buf[0] = ((add_len << 8) & 0xff);
+ buf[1] = (add_len & 0xff);
+ buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
+ buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
+ buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
+ buf[2] |= 0x01; /* PTPL_C: Persistence across Target Power Loss bit */
+ /*
+ * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
+ * set the TMV: Task Mask Valid bit.
+ */
+ buf[3] |= 0x80;
+ /*
+ * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
+ */
+ buf[3] |= 0x10; /* ALLOW COMMANDs field 001b */
+ /*
+ * PTPL_A: Persistence across Target Power Loss Active bit
+ */
+ if (pr_tmpl->pr_aptpl_active)
+ buf[3] |= 0x01;
+ /*
+ * Setup the PERSISTENT RESERVATION TYPE MASK from Table 167
+ */
+ buf[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+ buf[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
+ buf[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
+ buf[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
+ buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
+ buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+
+ return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_FULL_STATUS
+ *
+ * See spc4r17 section 6.13.5 Table 168 and 169
+ */
+static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+{
+ struct se_device *se_dev = SE_DEV(cmd);
+ struct se_node_acl *se_nacl;
+ struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+ struct se_portal_group *se_tpg;
+ struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+ struct t10_reservation_template *pr_tmpl = &SU_DEV(se_dev)->t10_reservation;
+ unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
+ u32 off = 8; /* off into first Full Status descriptor */
+ int format_code = 0;
+
+ if (cmd->data_length < 8) {
+ printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
+ " too small\n", cmd->data_length);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+
+ buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
+ buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
+ buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
+ buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+
+ spin_lock(&pr_tmpl->registration_lock);
+ list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+ &pr_tmpl->registration_list, pr_reg_list) {
+
+ se_nacl = pr_reg->pr_reg_nacl;
+ se_tpg = pr_reg->pr_reg_nacl->se_tpg;
+ add_desc_len = 0;
+
+ atomic_inc(&pr_reg->pr_res_holders);
+ smp_mb__after_atomic_inc();
+ spin_unlock(&pr_tmpl->registration_lock);
+ /*
+ * Determine expected length of $FABRIC_MOD specific
+ * TransportID full status descriptor..
+ */
+ exp_desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id_len(
+ se_tpg, se_nacl, pr_reg, &format_code);
+
+ if ((exp_desc_len + add_len) > cmd->data_length) {
+ printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran"
+ " out of buffer: %d\n", cmd->data_length);
+ spin_lock(&pr_tmpl->registration_lock);
+ atomic_dec(&pr_reg->pr_res_holders);
+ smp_mb__after_atomic_dec();
+ break;
+ }
+ /*
+ * Set RESERVATION KEY
+ */
+ buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
+ buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
+ buf[off++] = (pr_reg->pr_res_key & 0xff);
+ off += 4; /* Skip Over Reserved area */
+
+ /*
+ * Set ALL_TG_PT bit if PROUT SA REGISTER had this set.
+ */
+ if (pr_reg->pr_reg_all_tg_pt)
+ buf[off] = 0x02;
+ /*
+ * The struct se_lun pointer will be present for the
+ * reservation holder for PR_HOLDER bit.
+ *
+ * Also, if this registration is the reservation
+ * holder, fill in SCOPE and TYPE in the next byte.
+ */
+ if (pr_reg->pr_res_holder) {
+ buf[off++] |= 0x01;
+ buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
+ (pr_reg->pr_res_type & 0x0f);
+ } else
+ off += 2;
+
+ off += 4; /* Skip over reserved area */
+ /*
+ * From spc4r17 6.3.15:
+ *
+ * If the ALL_TG_PT bit set to zero, the RELATIVE TARGET PORT
+ * IDENTIFIER field contains the relative port identifier (see
+ * 3.1.120) of the target port that is part of the I_T nexus
+ * described by this full status descriptor. If the ALL_TG_PT
+ * bit is set to one, the contents of the RELATIVE TARGET PORT
+ * IDENTIFIER field are not defined by this standard.
+ */
+ if (!(pr_reg->pr_reg_all_tg_pt)) {
+ struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
+
+ buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+ buf[off++] = (port->sep_rtpi & 0xff);
+ } else
+ off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFER */
+
+ /*
+ * Now, have the $FABRIC_MOD fill in the protocol identifier
+ */
+ desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id(se_tpg,
+ se_nacl, pr_reg, &format_code, &buf[off+4]);
+
+ spin_lock(&pr_tmpl->registration_lock);
+ atomic_dec(&pr_reg->pr_res_holders);
+ smp_mb__after_atomic_dec();
+ /*
+ * Set the ADDITIONAL DESCRIPTOR LENGTH
+ */
+ buf[off++] = ((desc_len >> 24) & 0xff);
+ buf[off++] = ((desc_len >> 16) & 0xff);
+ buf[off++] = ((desc_len >> 8) & 0xff);
+ buf[off++] = (desc_len & 0xff);
+ /*
+ * Size of full desctipor header minus TransportID
+ * containing $FABRIC_MOD specific) initiator device/port
+ * WWN information.
+ *
+ * See spc4r17 Section 6.13.5 Table 169
+ */
+ add_desc_len = (24 + desc_len);
+
+ off += desc_len;
+ add_len += add_desc_len;
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+ /*
+ * Set ADDITIONAL_LENGTH
+ */
+ buf[4] = ((add_len >> 24) & 0xff);
+ buf[5] = ((add_len >> 16) & 0xff);
+ buf[6] = ((add_len >> 8) & 0xff);
+ buf[7] = (add_len & 0xff);
+
+ return 0;
+}
+
+static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
+{
+ switch (cdb[1] & 0x1f) {
+ case PRI_READ_KEYS:
+ return core_scsi3_pri_read_keys(cmd);
+ case PRI_READ_RESERVATION:
+ return core_scsi3_pri_read_reservation(cmd);
+ case PRI_REPORT_CAPABILITIES:
+ return core_scsi3_pri_report_capabilities(cmd);
+ case PRI_READ_FULL_STATUS:
+ return core_scsi3_pri_read_full_status(cmd);
+ default:
+ printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service"
+ " action: 0x%02x\n", cdb[1] & 0x1f);
+ return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ }
+
+}
+
+int core_scsi3_emulate_pr(struct se_cmd *cmd)
+{
+ unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
+ struct se_device *dev = cmd->se_dev;
+ /*
+ * Following spc2r20 5.5.1 Reservations overview:
+ *
+ * If a logical unit has been reserved by any RESERVE command and is
+ * still reserved by any initiator, all PERSISTENT RESERVE IN and all
+ * PERSISTENT RESERVE OUT commands shall conflict regardless of
+ * initiator or service action and shall terminate with a RESERVATION
+ * CONFLICT status.
+ */
+ if (dev->dev_flags & DF_SPC2_RESERVATIONS) {
+ printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy"
+ " SPC-2 reservation is held, returning"
+ " RESERVATION_CONFLICT\n");
+ return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ }
+
+ return (cdb[0] == PERSISTENT_RESERVE_OUT) ?
+ core_scsi3_emulate_pr_out(cmd, cdb) :
+ core_scsi3_emulate_pr_in(cmd, cdb);
+}
+
+static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type)
+{
+ return 0;
+}
+
+static int core_pt_seq_non_holder(
+ struct se_cmd *cmd,
+ unsigned char *cdb,
+ u32 pr_reg_type)
+{
+ return 0;
+}
+
+int core_setup_reservations(struct se_device *dev, int force_pt)
+{
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+ struct t10_reservation_template *rest = &su_dev->t10_reservation;
+ /*
+ * If this device is from Target_Core_Mod/pSCSI, use the reservations
+ * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
+ * cause a problem because libata and some SATA RAID HBAs appear
+ * under Linux/SCSI, but to emulate reservations themselves.
+ */
+ if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+ !(DEV_ATTRIB(dev)->emulate_reservations)) || force_pt) {
+ rest->res_type = SPC_PASSTHROUGH;
+ rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
+ rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
+ printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation"
+ " emulation\n", TRANSPORT(dev)->name);
+ return 0;
+ }
+ /*
+ * If SPC-3 or above is reported by real or emulated struct se_device,
+ * use emulated Persistent Reservations.
+ */
+ if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
+ rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
+ rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
+ rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
+ printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS"
+ " emulation\n", TRANSPORT(dev)->name);
+ } else {
+ rest->res_type = SPC2_RESERVATIONS;
+ rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
+ rest->pr_ops.t10_seq_non_holder =
+ &core_scsi2_reservation_seq_non_holder;
+ printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n",
+ TRANSPORT(dev)->name);
+ }
+
+ return 0;
+}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
new file mode 100644
index 000000000000..5603bcfd86d3
--- /dev/null
+++ b/drivers/target/target_core_pr.h
@@ -0,0 +1,67 @@
+#ifndef TARGET_CORE_PR_H
+#define TARGET_CORE_PR_H
+/*
+ * PERSISTENT_RESERVE_OUT service action codes
+ *
+ * spc4r17 section 6.14.2 Table 171
+ */
+#define PRO_REGISTER 0x00
+#define PRO_RESERVE 0x01
+#define PRO_RELEASE 0x02
+#define PRO_CLEAR 0x03
+#define PRO_PREEMPT 0x04
+#define PRO_PREEMPT_AND_ABORT 0x05
+#define PRO_REGISTER_AND_IGNORE_EXISTING_KEY 0x06
+#define PRO_REGISTER_AND_MOVE 0x07
+/*
+ * PERSISTENT_RESERVE_IN service action codes
+ *
+ * spc4r17 section 6.13.1 Table 159
+ */
+#define PRI_READ_KEYS 0x00
+#define PRI_READ_RESERVATION 0x01
+#define PRI_REPORT_CAPABILITIES 0x02
+#define PRI_READ_FULL_STATUS 0x03
+/*
+ * PERSISTENT_RESERVE_ SCOPE field
+ *
+ * spc4r17 section 6.13.3.3 Table 163
+ */
+#define PR_SCOPE_LU_SCOPE 0x00
+/*
+ * PERSISTENT_RESERVE_* TYPE field
+ *
+ * spc4r17 section 6.13.3.4 Table 164
+ */
+#define PR_TYPE_WRITE_EXCLUSIVE 0x01
+#define PR_TYPE_EXCLUSIVE_ACCESS 0x03
+#define PR_TYPE_WRITE_EXCLUSIVE_REGONLY 0x05
+#define PR_TYPE_EXCLUSIVE_ACCESS_REGONLY 0x06
+#define PR_TYPE_WRITE_EXCLUSIVE_ALLREG 0x07
+#define PR_TYPE_EXCLUSIVE_ACCESS_ALLREG 0x08
+
+#define PR_APTPL_MAX_IPORT_LEN 256
+#define PR_APTPL_MAX_TPORT_LEN 256
+
+extern struct kmem_cache *t10_pr_reg_cache;
+
+extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
+ char *, u32);
+extern int core_scsi2_emulate_crh(struct se_cmd *);
+extern int core_scsi3_alloc_aptpl_registration(
+ struct t10_reservation_template *, u64,
+ unsigned char *, unsigned char *, u32,
+ unsigned char *, u16, u32, int, int, u8);
+extern int core_scsi3_check_aptpl_registration(struct se_device *,
+ struct se_portal_group *, struct se_lun *,
+ struct se_lun_acl *);
+extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
+ struct se_node_acl *);
+extern void core_scsi3_free_all_registrations(struct se_device *);
+extern unsigned char *core_scsi3_pr_dump_type(int);
+extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
+ struct se_cmd *);
+extern int core_scsi3_emulate_pr(struct se_cmd *);
+extern int core_setup_reservations(struct se_device *, int);
+
+#endif /* TARGET_CORE_PR_H */
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
new file mode 100644
index 000000000000..f2a08477a68c
--- /dev/null
+++ b/drivers/target/target_core_pscsi.c
@@ -0,0 +1,1470 @@
+/*******************************************************************************
+ * Filename: target_core_pscsi.c
+ *
+ * This file contains the generic target mode <-> Linux SCSI subsystem plugin.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/blk_types.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/genhd.h>
+#include <linux/cdrom.h>
+#include <linux/file.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/libsas.h> /* For TASK_ATTR_* */
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_pscsi.h"
+
+#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
+
+static struct se_subsystem_api pscsi_template;
+
+static void pscsi_req_done(struct request *, int);
+
+/* pscsi_get_sh():
+ *
+ *
+ */
+static struct Scsi_Host *pscsi_get_sh(u32 host_no)
+{
+ struct Scsi_Host *sh = NULL;
+
+ sh = scsi_host_lookup(host_no);
+ if (IS_ERR(sh)) {
+ printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:"
+ " %u\n", host_no);
+ return NULL;
+ }
+
+ return sh;
+}
+
+/* pscsi_attach_hba():
+ *
+ * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
+ * from the passed SCSI Host ID.
+ */
+static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
+{
+ int hba_depth;
+ struct pscsi_hba_virt *phv;
+
+ phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
+ if (!(phv)) {
+ printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
+ return -1;
+ }
+ phv->phv_host_id = host_id;
+ phv->phv_mode = PHV_VIRUTAL_HOST_ID;
+ hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
+ atomic_set(&hba->left_queue_depth, hba_depth);
+ atomic_set(&hba->max_queue_depth, hba_depth);
+
+ hba->hba_ptr = (void *)phv;
+
+ printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
+ " Generic Target Core Stack %s\n", hba->hba_id,
+ PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
+ printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic"
+ " Target Core with TCQ Depth: %d\n", hba->hba_id,
+ atomic_read(&hba->max_queue_depth));
+
+ return 0;
+}
+
+static void pscsi_detach_hba(struct se_hba *hba)
+{
+ struct pscsi_hba_virt *phv = hba->hba_ptr;
+ struct Scsi_Host *scsi_host = phv->phv_lld_host;
+
+ if (scsi_host) {
+ scsi_host_put(scsi_host);
+
+ printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from"
+ " Generic Target Core\n", hba->hba_id,
+ (scsi_host->hostt->name) ? (scsi_host->hostt->name) :
+ "Unknown");
+ } else
+ printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA"
+ " from Generic Target Core\n", hba->hba_id);
+
+ kfree(phv);
+ hba->hba_ptr = NULL;
+}
+
+static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
+{
+ struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+ struct Scsi_Host *sh = phv->phv_lld_host;
+ int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
+ /*
+ * Release the struct Scsi_Host
+ */
+ if (!(mode_flag)) {
+ if (!(sh))
+ return 0;
+
+ phv->phv_lld_host = NULL;
+ phv->phv_mode = PHV_VIRUTAL_HOST_ID;
+ atomic_set(&hba->left_queue_depth, hba_depth);
+ atomic_set(&hba->max_queue_depth, hba_depth);
+
+ printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
+ " %s\n", hba->hba_id, (sh->hostt->name) ?
+ (sh->hostt->name) : "Unknown");
+
+ scsi_host_put(sh);
+ return 0;
+ }
+ /*
+ * Otherwise, locate struct Scsi_Host from the original passed
+ * pSCSI Host ID and enable for phba mode
+ */
+ sh = pscsi_get_sh(phv->phv_host_id);
+ if (!(sh)) {
+ printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
+ " phv_host_id: %d\n", phv->phv_host_id);
+ return -1;
+ }
+ /*
+ * Usually the SCSI LLD will use the hostt->can_queue value to define
+ * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set
+ * this at all and set sh->can_queue at runtime.
+ */
+ hba_depth = (sh->hostt->can_queue > sh->can_queue) ?
+ sh->hostt->can_queue : sh->can_queue;
+
+ atomic_set(&hba->left_queue_depth, hba_depth);
+ atomic_set(&hba->max_queue_depth, hba_depth);
+
+ phv->phv_lld_host = sh;
+ phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
+
+ printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
+ hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
+
+ return 1;
+}
+
+static void pscsi_tape_read_blocksize(struct se_device *dev,
+ struct scsi_device *sdev)
+{
+ unsigned char cdb[MAX_COMMAND_SIZE], *buf;
+ int ret;
+
+ buf = kzalloc(12, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ memset(cdb, 0, MAX_COMMAND_SIZE);
+ cdb[0] = MODE_SENSE;
+ cdb[4] = 0x0c; /* 12 bytes */
+
+ ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL,
+ HZ, 1, NULL);
+ if (ret)
+ goto out_free;
+
+ /*
+ * If MODE_SENSE still returns zero, set the default value to 1024.
+ */
+ sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+ if (!sdev->sector_size)
+ sdev->sector_size = 1024;
+out_free:
+ kfree(buf);
+}
+
+static void
+pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
+{
+ unsigned char *buf;
+
+ if (sdev->inquiry_len < INQUIRY_LEN)
+ return;
+
+ buf = sdev->inquiry;
+ if (!buf)
+ return;
+ /*
+ * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev()
+ */
+ memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor));
+ memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model));
+ memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision));
+}
+
+static int
+pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
+{
+ unsigned char cdb[MAX_COMMAND_SIZE], *buf;
+ int ret;
+
+ buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
+ if (!buf)
+ return -1;
+
+ memset(cdb, 0, MAX_COMMAND_SIZE);
+ cdb[0] = INQUIRY;
+ cdb[1] = 0x01; /* Query VPD */
+ cdb[2] = 0x80; /* Unit Serial Number */
+ cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff;
+ cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff);
+
+ ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
+ INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
+ if (ret)
+ goto out_free;
+
+ snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
+
+ wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL;
+
+ kfree(buf);
+ return 0;
+
+out_free:
+ kfree(buf);
+ return -1;
+}
+
+static void
+pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
+ struct t10_wwn *wwn)
+{
+ unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83;
+ int ident_len, page_len, off = 4, ret;
+ struct t10_vpd *vpd;
+
+ buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ memset(cdb, 0, MAX_COMMAND_SIZE);
+ cdb[0] = INQUIRY;
+ cdb[1] = 0x01; /* Query VPD */
+ cdb[2] = 0x83; /* Device Identifier */
+ cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff;
+ cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff);
+
+ ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
+ INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
+ NULL, HZ, 1, NULL);
+ if (ret)
+ goto out;
+
+ page_len = (buf[2] << 8) | buf[3];
+ while (page_len > 0) {
+ /* Grab a pointer to the Identification descriptor */
+ page_83 = &buf[off];
+ ident_len = page_83[3];
+ if (!ident_len) {
+ printk(KERN_ERR "page_83[3]: identifier"
+ " length zero!\n");
+ break;
+ }
+ printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len);
+
+ vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
+ if (!vpd) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " struct t10_vpd\n");
+ goto out;
+ }
+ INIT_LIST_HEAD(&vpd->vpd_list);
+
+ transport_set_vpd_proto_id(vpd, page_83);
+ transport_set_vpd_assoc(vpd, page_83);
+
+ if (transport_set_vpd_ident_type(vpd, page_83) < 0) {
+ off += (ident_len + 4);
+ page_len -= (ident_len + 4);
+ kfree(vpd);
+ continue;
+ }
+ if (transport_set_vpd_ident(vpd, page_83) < 0) {
+ off += (ident_len + 4);
+ page_len -= (ident_len + 4);
+ kfree(vpd);
+ continue;
+ }
+
+ list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list);
+ off += (ident_len + 4);
+ page_len -= (ident_len + 4);
+ }
+
+out:
+ kfree(buf);
+}
+
+/* pscsi_add_device_to_list():
+ *
+ *
+ */
+static struct se_device *pscsi_add_device_to_list(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ struct pscsi_dev_virt *pdv,
+ struct scsi_device *sd,
+ int dev_flags)
+{
+ struct se_device *dev;
+ struct se_dev_limits dev_limits;
+ struct request_queue *q;
+ struct queue_limits *limits;
+
+ memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+ if (!sd->queue_depth) {
+ sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
+
+ printk(KERN_ERR "Set broken SCSI Device %d:%d:%d"
+ " queue_depth to %d\n", sd->channel, sd->id,
+ sd->lun, sd->queue_depth);
+ }
+ /*
+ * Setup the local scope queue_limits from struct request_queue->limits
+ * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+ */
+ q = sd->request_queue;
+ limits = &dev_limits.limits;
+ limits->logical_block_size = sd->sector_size;
+ limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ?
+ queue_max_hw_sectors(q) : sd->host->max_sectors;
+ limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ?
+ queue_max_sectors(q) : sd->host->max_sectors;
+ dev_limits.hw_queue_depth = sd->queue_depth;
+ dev_limits.queue_depth = sd->queue_depth;
+ /*
+ * Setup our standard INQUIRY info into se_dev->t10_wwn
+ */
+ pscsi_set_inquiry_info(sd, &se_dev->t10_wwn);
+
+ /*
+ * Set the pointer pdv->pdv_sd to from passed struct scsi_device,
+ * which has already been referenced with Linux SCSI code with
+ * scsi_device_get() in this file's pscsi_create_virtdevice().
+ *
+ * The passthrough operations called by the transport_add_device_*
+ * function below will require this pointer to be set for passthroug
+ * ops.
+ *
+ * For the shutdown case in pscsi_free_device(), this struct
+ * scsi_device reference is released with Linux SCSI code
+ * scsi_device_put() and the pdv->pdv_sd cleared.
+ */
+ pdv->pdv_sd = sd;
+
+ dev = transport_add_device_to_core_hba(hba, &pscsi_template,
+ se_dev, dev_flags, (void *)pdv,
+ &dev_limits, NULL, NULL);
+ if (!(dev)) {
+ pdv->pdv_sd = NULL;
+ return NULL;
+ }
+
+ /*
+ * Locate VPD WWN Information used for various purposes within
+ * the Storage Engine.
+ */
+ if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) {
+ /*
+ * If VPD Unit Serial returned GOOD status, try
+ * VPD Device Identification page (0x83).
+ */
+ pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn);
+ }
+
+ /*
+ * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
+ */
+ if (sd->type == TYPE_TAPE)
+ pscsi_tape_read_blocksize(dev, sd);
+ return dev;
+}
+
+static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+ struct pscsi_dev_virt *pdv;
+
+ pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
+ if (!(pdv)) {
+ printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n");
+ return NULL;
+ }
+ pdv->pdv_se_hba = hba;
+
+ printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name);
+ return (void *)pdv;
+}
+
+/*
+ * Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_disk(
+ struct scsi_device *sd,
+ struct pscsi_dev_virt *pdv,
+ struct se_subsystem_dev *se_dev,
+ struct se_hba *hba)
+{
+ struct se_device *dev;
+ struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+ struct Scsi_Host *sh = sd->host;
+ struct block_device *bd;
+ u32 dev_flags = 0;
+
+ if (scsi_device_get(sd)) {
+ printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+ sh->host_no, sd->channel, sd->id, sd->lun);
+ spin_unlock_irq(sh->host_lock);
+ return NULL;
+ }
+ spin_unlock_irq(sh->host_lock);
+ /*
+ * Claim exclusive struct block_device access to struct scsi_device
+ * for TYPE_DISK using supplied udev_path
+ */
+ bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
+ FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
+ if (IS_ERR(bd)) {
+ printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
+ scsi_device_put(sd);
+ return NULL;
+ }
+ pdv->pdv_bd = bd;
+
+ dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+ if (!(dev)) {
+ blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+ scsi_device_put(sd);
+ return NULL;
+ }
+ printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
+ phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
+
+ return dev;
+}
+
+/*
+ * Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_rom(
+ struct scsi_device *sd,
+ struct pscsi_dev_virt *pdv,
+ struct se_subsystem_dev *se_dev,
+ struct se_hba *hba)
+{
+ struct se_device *dev;
+ struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+ struct Scsi_Host *sh = sd->host;
+ u32 dev_flags = 0;
+
+ if (scsi_device_get(sd)) {
+ printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+ sh->host_no, sd->channel, sd->id, sd->lun);
+ spin_unlock_irq(sh->host_lock);
+ return NULL;
+ }
+ spin_unlock_irq(sh->host_lock);
+
+ dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+ if (!(dev)) {
+ scsi_device_put(sd);
+ return NULL;
+ }
+ printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+ phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+ sd->channel, sd->id, sd->lun);
+
+ return dev;
+}
+
+/*
+ *Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_other(
+ struct scsi_device *sd,
+ struct pscsi_dev_virt *pdv,
+ struct se_subsystem_dev *se_dev,
+ struct se_hba *hba)
+{
+ struct se_device *dev;
+ struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+ struct Scsi_Host *sh = sd->host;
+ u32 dev_flags = 0;
+
+ spin_unlock_irq(sh->host_lock);
+ dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+ if (!(dev))
+ return NULL;
+
+ printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+ phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+ sd->channel, sd->id, sd->lun);
+
+ return dev;
+}
+
+static struct se_device *pscsi_create_virtdevice(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ void *p)
+{
+ struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p;
+ struct se_device *dev;
+ struct scsi_device *sd;
+ struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+ struct Scsi_Host *sh = phv->phv_lld_host;
+ int legacy_mode_enable = 0;
+
+ if (!(pdv)) {
+ printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
+ " parameter\n");
+ return NULL;
+ }
+ /*
+ * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
+ * struct Scsi_Host we will need to bring the TCM/pSCSI object online
+ */
+ if (!(sh)) {
+ if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
+ printk(KERN_ERR "pSCSI: Unable to locate struct"
+ " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
+ return NULL;
+ }
+ /*
+ * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device
+ * reference, we enforce that udev_path has been set
+ */
+ if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
+ printk(KERN_ERR "pSCSI: udev_path attribute has not"
+ " been set before ENABLE=1\n");
+ return NULL;
+ }
+ /*
+ * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID,
+ * use the original TCM hba ID to reference Linux/SCSI Host No
+ * and enable for PHV_LLD_SCSI_HOST_NO mode.
+ */
+ if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
+ spin_lock(&hba->device_lock);
+ if (!(list_empty(&hba->hba_dev_list))) {
+ printk(KERN_ERR "pSCSI: Unable to set hba_mode"
+ " with active devices\n");
+ spin_unlock(&hba->device_lock);
+ return NULL;
+ }
+ spin_unlock(&hba->device_lock);
+
+ if (pscsi_pmode_enable_hba(hba, 1) != 1)
+ return NULL;
+
+ legacy_mode_enable = 1;
+ hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
+ sh = phv->phv_lld_host;
+ } else {
+ sh = pscsi_get_sh(pdv->pdv_host_id);
+ if (!(sh)) {
+ printk(KERN_ERR "pSCSI: Unable to locate"
+ " pdv_host_id: %d\n", pdv->pdv_host_id);
+ return NULL;
+ }
+ }
+ } else {
+ if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
+ printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
+ " struct Scsi_Host exists\n");
+ return NULL;
+ }
+ }
+
+ spin_lock_irq(sh->host_lock);
+ list_for_each_entry(sd, &sh->__devices, siblings) {
+ if ((pdv->pdv_channel_id != sd->channel) ||
+ (pdv->pdv_target_id != sd->id) ||
+ (pdv->pdv_lun_id != sd->lun))
+ continue;
+ /*
+ * Functions will release the held struct scsi_host->host_lock
+ * before calling calling pscsi_add_device_to_list() to register
+ * struct scsi_device with target_core_mod.
+ */
+ switch (sd->type) {
+ case TYPE_DISK:
+ dev = pscsi_create_type_disk(sd, pdv, se_dev, hba);
+ break;
+ case TYPE_ROM:
+ dev = pscsi_create_type_rom(sd, pdv, se_dev, hba);
+ break;
+ default:
+ dev = pscsi_create_type_other(sd, pdv, se_dev, hba);
+ break;
+ }
+
+ if (!(dev)) {
+ if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+ scsi_host_put(sh);
+ else if (legacy_mode_enable) {
+ pscsi_pmode_enable_hba(hba, 0);
+ hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+ }
+ pdv->pdv_sd = NULL;
+ return NULL;
+ }
+ return dev;
+ }
+ spin_unlock_irq(sh->host_lock);
+
+ printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
+ pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id);
+
+ if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+ scsi_host_put(sh);
+ else if (legacy_mode_enable) {
+ pscsi_pmode_enable_hba(hba, 0);
+ hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+ }
+
+ return NULL;
+}
+
+/* pscsi_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void pscsi_free_device(void *p)
+{
+ struct pscsi_dev_virt *pdv = p;
+ struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+ struct scsi_device *sd = pdv->pdv_sd;
+
+ if (sd) {
+ /*
+ * Release exclusive pSCSI internal struct block_device claim for
+ * struct scsi_device with TYPE_DISK from pscsi_create_type_disk()
+ */
+ if ((sd->type == TYPE_DISK) && pdv->pdv_bd) {
+ blkdev_put(pdv->pdv_bd,
+ FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+ pdv->pdv_bd = NULL;
+ }
+ /*
+ * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
+ * to struct Scsi_Host now.
+ */
+ if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
+ (phv->phv_lld_host != NULL))
+ scsi_host_put(phv->phv_lld_host);
+
+ if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
+ scsi_device_put(sd);
+
+ pdv->pdv_sd = NULL;
+ }
+
+ kfree(pdv);
+}
+
+static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
+{
+ return container_of(task, struct pscsi_plugin_task, pscsi_task);
+}
+
+
+/* pscsi_transport_complete():
+ *
+ *
+ */
+static int pscsi_transport_complete(struct se_task *task)
+{
+ struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+ struct scsi_device *sd = pdv->pdv_sd;
+ int result;
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ unsigned char *cdb = &pt->pscsi_cdb[0];
+
+ result = pt->pscsi_result;
+ /*
+ * Hack to make sure that Write-Protect modepage is set if R/O mode is
+ * forced.
+ */
+ if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
+ (status_byte(result) << 1) == SAM_STAT_GOOD) {
+ if (!TASK_CMD(task)->se_deve)
+ goto after_mode_sense;
+
+ if (TASK_CMD(task)->se_deve->lun_flags &
+ TRANSPORT_LUNFLAGS_READ_ONLY) {
+ unsigned char *buf = (unsigned char *)
+ T_TASK(task->task_se_cmd)->t_task_buf;
+
+ if (cdb[0] == MODE_SENSE_10) {
+ if (!(buf[3] & 0x80))
+ buf[3] |= 0x80;
+ } else {
+ if (!(buf[2] & 0x80))
+ buf[2] |= 0x80;
+ }
+ }
+ }
+after_mode_sense:
+
+ if (sd->type != TYPE_TAPE)
+ goto after_mode_select;
+
+ /*
+ * Hack to correctly obtain the initiator requested blocksize for
+ * TYPE_TAPE. Since this value is dependent upon each tape media,
+ * struct scsi_device->sector_size will not contain the correct value
+ * by default, so we go ahead and set it so
+ * TRANSPORT(dev)->get_blockdev() returns the correct value to the
+ * storage engine.
+ */
+ if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
+ (status_byte(result) << 1) == SAM_STAT_GOOD) {
+ unsigned char *buf;
+ struct scatterlist *sg = task->task_sg;
+ u16 bdl;
+ u32 blocksize;
+
+ buf = sg_virt(&sg[0]);
+ if (!(buf)) {
+ printk(KERN_ERR "Unable to get buf for scatterlist\n");
+ goto after_mode_select;
+ }
+
+ if (cdb[0] == MODE_SELECT)
+ bdl = (buf[3]);
+ else
+ bdl = (buf[6] << 8) | (buf[7]);
+
+ if (!bdl)
+ goto after_mode_select;
+
+ if (cdb[0] == MODE_SELECT)
+ blocksize = (buf[9] << 16) | (buf[10] << 8) |
+ (buf[11]);
+ else
+ blocksize = (buf[13] << 16) | (buf[14] << 8) |
+ (buf[15]);
+
+ sd->sector_size = blocksize;
+ }
+after_mode_select:
+
+ if (status_byte(result) & CHECK_CONDITION)
+ return 1;
+
+ return 0;
+}
+
+static struct se_task *
+pscsi_alloc_task(struct se_cmd *cmd)
+{
+ struct pscsi_plugin_task *pt;
+ unsigned char *cdb = T_TASK(cmd)->t_task_cdb;
+
+ pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
+ if (!pt) {
+ printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n");
+ return NULL;
+ }
+
+ /*
+ * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation,
+ * allocate the extended CDB buffer for per struct se_task context
+ * pt->pscsi_cdb now.
+ */
+ if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) {
+
+ pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
+ if (!(pt->pscsi_cdb)) {
+ printk(KERN_ERR "pSCSI: Unable to allocate extended"
+ " pt->pscsi_cdb\n");
+ return NULL;
+ }
+ } else
+ pt->pscsi_cdb = &pt->__pscsi_cdb[0];
+
+ return &pt->pscsi_task;
+}
+
+static inline void pscsi_blk_init_request(
+ struct se_task *task,
+ struct pscsi_plugin_task *pt,
+ struct request *req,
+ int bidi_read)
+{
+ /*
+ * Defined as "scsi command" in include/linux/blkdev.h.
+ */
+ req->cmd_type = REQ_TYPE_BLOCK_PC;
+ /*
+ * For the extra BIDI-COMMAND READ struct request we do not
+ * need to setup the remaining structure members
+ */
+ if (bidi_read)
+ return;
+ /*
+ * Setup the done function pointer for struct request,
+ * also set the end_io_data pointer.to struct se_task.
+ */
+ req->end_io = pscsi_req_done;
+ req->end_io_data = (void *)task;
+ /*
+ * Load the referenced struct se_task's SCSI CDB into
+ * include/linux/blkdev.h:struct request->cmd
+ */
+ req->cmd_len = scsi_command_size(pt->pscsi_cdb);
+ req->cmd = &pt->pscsi_cdb[0];
+ /*
+ * Setup pointer for outgoing sense data.
+ */
+ req->sense = (void *)&pt->pscsi_sense[0];
+ req->sense_len = 0;
+}
+
+/*
+ * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB
+*/
+static int pscsi_blk_get_request(struct se_task *task)
+{
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+
+ pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
+ (task->task_data_direction == DMA_TO_DEVICE),
+ GFP_KERNEL);
+ if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) {
+ printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n",
+ IS_ERR(pt->pscsi_req));
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ /*
+ * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
+ * and setup rq callback, CDB and sense.
+ */
+ pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
+ return 0;
+}
+
+/* pscsi_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int pscsi_do_task(struct se_task *task)
+{
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+ /*
+ * Set the struct request->timeout value based on peripheral
+ * device type from SCSI.
+ */
+ if (pdv->pdv_sd->type == TYPE_DISK)
+ pt->pscsi_req->timeout = PS_TIMEOUT_DISK;
+ else
+ pt->pscsi_req->timeout = PS_TIMEOUT_OTHER;
+
+ pt->pscsi_req->retries = PS_RETRY;
+ /*
+ * Queue the struct request into the struct scsi_device->request_queue.
+ * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd
+ * descriptor
+ */
+ blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
+ (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ),
+ pscsi_req_done);
+
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+static void pscsi_free_task(struct se_task *task)
+{
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ struct se_cmd *cmd = task->task_se_cmd;
+
+ /*
+ * Release the extended CDB allocation from pscsi_alloc_task()
+ * if one exists.
+ */
+ if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb)
+ kfree(pt->pscsi_cdb);
+ /*
+ * We do not release the bio(s) here associated with this task, as
+ * this is handled by bio_put() and pscsi_bi_endio().
+ */
+ kfree(pt);
+}
+
+enum {
+ Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
+ Opt_scsi_lun_id, Opt_err
+};
+
+static match_table_t tokens = {
+ {Opt_scsi_host_id, "scsi_host_id=%d"},
+ {Opt_scsi_channel_id, "scsi_channel_id=%d"},
+ {Opt_scsi_target_id, "scsi_target_id=%d"},
+ {Opt_scsi_lun_id, "scsi_lun_id=%d"},
+ {Opt_err, NULL}
+};
+
+static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ const char *page,
+ ssize_t count)
+{
+ struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+ struct pscsi_hba_virt *phv = hba->hba_ptr;
+ char *orig, *ptr, *opts;
+ substring_t args[MAX_OPT_ARGS];
+ int ret = 0, arg, token;
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ orig = opts;
+
+ while ((ptr = strsep(&opts, ",")) != NULL) {
+ if (!*ptr)
+ continue;
+
+ token = match_token(ptr, tokens, args);
+ switch (token) {
+ case Opt_scsi_host_id:
+ if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
+ printk(KERN_ERR "PSCSI[%d]: Unable to accept"
+ " scsi_host_id while phv_mode =="
+ " PHV_LLD_SCSI_HOST_NO\n",
+ phv->phv_host_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ match_int(args, &arg);
+ pdv->pdv_host_id = arg;
+ printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:"
+ " %d\n", phv->phv_host_id, pdv->pdv_host_id);
+ pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
+ break;
+ case Opt_scsi_channel_id:
+ match_int(args, &arg);
+ pdv->pdv_channel_id = arg;
+ printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel"
+ " ID: %d\n", phv->phv_host_id,
+ pdv->pdv_channel_id);
+ pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
+ break;
+ case Opt_scsi_target_id:
+ match_int(args, &arg);
+ pdv->pdv_target_id = arg;
+ printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target"
+ " ID: %d\n", phv->phv_host_id,
+ pdv->pdv_target_id);
+ pdv->pdv_flags |= PDF_HAS_TARGET_ID;
+ break;
+ case Opt_scsi_lun_id:
+ match_int(args, &arg);
+ pdv->pdv_lun_id = arg;
+ printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:"
+ " %d\n", phv->phv_host_id, pdv->pdv_lun_id);
+ pdv->pdv_flags |= PDF_HAS_LUN_ID;
+ break;
+ default:
+ break;
+ }
+ }
+
+out:
+ kfree(orig);
+ return (!ret) ? count : ret;
+}
+
+static ssize_t pscsi_check_configfs_dev_params(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev)
+{
+ struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+
+ if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
+ !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
+ !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
+ printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
+ " scsi_lun_id= parameters\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ char *b)
+{
+ struct pscsi_hba_virt *phv = hba->hba_ptr;
+ struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+ struct scsi_device *sd = pdv->pdv_sd;
+ unsigned char host_id[16];
+ ssize_t bl;
+ int i;
+
+ if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+ snprintf(host_id, 16, "%d", pdv->pdv_host_id);
+ else
+ snprintf(host_id, 16, "PHBA Mode");
+
+ bl = sprintf(b, "SCSI Device Bus Location:"
+ " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n",
+ pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id,
+ host_id);
+
+ if (sd) {
+ bl += sprintf(b + bl, " ");
+ bl += sprintf(b + bl, "Vendor: ");
+ for (i = 0; i < 8; i++) {
+ if (ISPRINT(sd->vendor[i])) /* printable character? */
+ bl += sprintf(b + bl, "%c", sd->vendor[i]);
+ else
+ bl += sprintf(b + bl, " ");
+ }
+ bl += sprintf(b + bl, " Model: ");
+ for (i = 0; i < 16; i++) {
+ if (ISPRINT(sd->model[i])) /* printable character ? */
+ bl += sprintf(b + bl, "%c", sd->model[i]);
+ else
+ bl += sprintf(b + bl, " ");
+ }
+ bl += sprintf(b + bl, " Rev: ");
+ for (i = 0; i < 4; i++) {
+ if (ISPRINT(sd->rev[i])) /* printable character ? */
+ bl += sprintf(b + bl, "%c", sd->rev[i]);
+ else
+ bl += sprintf(b + bl, " ");
+ }
+ bl += sprintf(b + bl, "\n");
+ }
+ return bl;
+}
+
+static void pscsi_bi_endio(struct bio *bio, int error)
+{
+ bio_put(bio);
+}
+
+static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
+{
+ struct bio *bio;
+ /*
+ * Use bio_malloc() following the comment in for bio -> struct request
+ * in block/blk-core.c:blk_make_request()
+ */
+ bio = bio_kmalloc(GFP_KERNEL, sg_num);
+ if (!(bio)) {
+ printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n");
+ return NULL;
+ }
+ bio->bi_end_io = pscsi_bi_endio;
+
+ return bio;
+}
+
+#if 0
+#define DEBUG_PSCSI(x...) printk(x)
+#else
+#define DEBUG_PSCSI(x...)
+#endif
+
+static int __pscsi_map_task_SG(
+ struct se_task *task,
+ struct scatterlist *task_sg,
+ u32 task_sg_num,
+ int bidi_read)
+{
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
+ struct page *page;
+ struct scatterlist *sg;
+ u32 data_len = task->task_size, i, len, bytes, off;
+ int nr_pages = (task->task_size + task_sg[0].offset +
+ PAGE_SIZE - 1) >> PAGE_SHIFT;
+ int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ int rw = (task->task_data_direction == DMA_TO_DEVICE);
+
+ if (!task->task_size)
+ return 0;
+ /*
+ * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup
+ * the bio_vec maplist from TC< struct se_mem -> task->task_sg ->
+ * struct scatterlist memory. The struct se_task->task_sg[] currently needs
+ * to be attached to struct bios for submission to Linux/SCSI using
+ * struct request to struct scsi_device->request_queue.
+ *
+ * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI
+ * is ported to upstream SCSI passthrough functionality that accepts
+ * struct scatterlist->page_link or struct page as a paraemeter.
+ */
+ DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages);
+
+ for_each_sg(task_sg, sg, task_sg_num, i) {
+ page = sg_page(sg);
+ off = sg->offset;
+ len = sg->length;
+
+ DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i,
+ page, len, off);
+
+ while (len > 0 && data_len > 0) {
+ bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+ bytes = min(bytes, data_len);
+
+ if (!(bio)) {
+ nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
+ nr_pages -= nr_vecs;
+ /*
+ * Calls bio_kmalloc() and sets bio->bi_end_io()
+ */
+ bio = pscsi_get_bio(pdv, nr_vecs);
+ if (!(bio))
+ goto fail;
+
+ if (rw)
+ bio->bi_rw |= REQ_WRITE;
+
+ DEBUG_PSCSI("PSCSI: Allocated bio: %p,"
+ " dir: %s nr_vecs: %d\n", bio,
+ (rw) ? "rw" : "r", nr_vecs);
+ /*
+ * Set *hbio pointer to handle the case:
+ * nr_pages > BIO_MAX_PAGES, where additional
+ * bios need to be added to complete a given
+ * struct se_task
+ */
+ if (!hbio)
+ hbio = tbio = bio;
+ else
+ tbio = tbio->bi_next = bio;
+ }
+
+ DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d"
+ " bio: %p page: %p len: %d off: %d\n", i, bio,
+ page, len, off);
+
+ rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
+ bio, page, bytes, off);
+ if (rc != bytes)
+ goto fail;
+
+ DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
+ bio->bi_vcnt, nr_vecs);
+
+ if (bio->bi_vcnt > nr_vecs) {
+ DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:"
+ " %d i: %d bio: %p, allocating another"
+ " bio\n", bio->bi_vcnt, i, bio);
+ /*
+ * Clear the pointer so that another bio will
+ * be allocated with pscsi_get_bio() above, the
+ * current bio has already been set *tbio and
+ * bio->bi_next.
+ */
+ bio = NULL;
+ }
+
+ page++;
+ len -= bytes;
+ data_len -= bytes;
+ off = 0;
+ }
+ }
+ /*
+ * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
+ * primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
+ */
+ if (!(bidi_read)) {
+ /*
+ * Starting with v2.6.31, call blk_make_request() passing in *hbio to
+ * allocate the pSCSI task a struct request.
+ */
+ pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
+ hbio, GFP_KERNEL);
+ if (!(pt->pscsi_req)) {
+ printk(KERN_ERR "pSCSI: blk_make_request() failed\n");
+ goto fail;
+ }
+ /*
+ * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
+ * and setup rq callback, CDB and sense.
+ */
+ pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
+
+ return task->task_sg_num;
+ }
+ /*
+ * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
+ * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[]
+ */
+ pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
+ hbio, GFP_KERNEL);
+ if (!(pt->pscsi_req->next_rq)) {
+ printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n");
+ goto fail;
+ }
+ pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
+
+ return task->task_sg_num;
+fail:
+ while (hbio) {
+ bio = hbio;
+ hbio = hbio->bi_next;
+ bio->bi_next = NULL;
+ bio_endio(bio, 0);
+ }
+ return ret;
+}
+
+static int pscsi_map_task_SG(struct se_task *task)
+{
+ int ret;
+
+ /*
+ * Setup the main struct request for the task->task_sg[] payload
+ */
+
+ ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0);
+ if (ret >= 0 && task->task_sg_bidi) {
+ /*
+ * If present, set up the extra BIDI-COMMAND SCSI READ
+ * struct request and payload.
+ */
+ ret = __pscsi_map_task_SG(task, task->task_sg_bidi,
+ task->task_sg_num, 1);
+ }
+
+ if (ret < 0)
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ return 0;
+}
+
+/* pscsi_map_task_non_SG():
+ *
+ *
+ */
+static int pscsi_map_task_non_SG(struct se_task *task)
+{
+ struct se_cmd *cmd = TASK_CMD(task);
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+ int ret = 0;
+
+ if (pscsi_blk_get_request(task) < 0)
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+ if (!task->task_size)
+ return 0;
+
+ ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
+ pt->pscsi_req, T_TASK(cmd)->t_task_buf,
+ task->task_size, GFP_KERNEL);
+ if (ret < 0) {
+ printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ return 0;
+}
+
+static int pscsi_CDB_none(struct se_task *task)
+{
+ return pscsi_blk_get_request(task);
+}
+
+/* pscsi_get_cdb():
+ *
+ *
+ */
+static unsigned char *pscsi_get_cdb(struct se_task *task)
+{
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+ return pt->pscsi_cdb;
+}
+
+/* pscsi_get_sense_buffer():
+ *
+ *
+ */
+static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
+{
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+ return (unsigned char *)&pt->pscsi_sense[0];
+}
+
+/* pscsi_get_device_rev():
+ *
+ *
+ */
+static u32 pscsi_get_device_rev(struct se_device *dev)
+{
+ struct pscsi_dev_virt *pdv = dev->dev_ptr;
+ struct scsi_device *sd = pdv->pdv_sd;
+
+ return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
+}
+
+/* pscsi_get_device_type():
+ *
+ *
+ */
+static u32 pscsi_get_device_type(struct se_device *dev)
+{
+ struct pscsi_dev_virt *pdv = dev->dev_ptr;
+ struct scsi_device *sd = pdv->pdv_sd;
+
+ return sd->type;
+}
+
+static sector_t pscsi_get_blocks(struct se_device *dev)
+{
+ struct pscsi_dev_virt *pdv = dev->dev_ptr;
+
+ if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
+ return pdv->pdv_bd->bd_part->nr_sects;
+
+ dump_stack();
+ return 0;
+}
+
+/* pscsi_handle_SAM_STATUS_failures():
+ *
+ *
+ */
+static inline void pscsi_process_SAM_status(
+ struct se_task *task,
+ struct pscsi_plugin_task *pt)
+{
+ task->task_scsi_status = status_byte(pt->pscsi_result);
+ if ((task->task_scsi_status)) {
+ task->task_scsi_status <<= 1;
+ printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:"
+ " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
+ pt->pscsi_result);
+ }
+
+ switch (host_byte(pt->pscsi_result)) {
+ case DID_OK:
+ transport_complete_task(task, (!task->task_scsi_status));
+ break;
+ default:
+ printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:"
+ " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
+ pt->pscsi_result);
+ task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
+ task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ TASK_CMD(task)->transport_error_status =
+ PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ transport_complete_task(task, 0);
+ break;
+ }
+
+ return;
+}
+
+static void pscsi_req_done(struct request *req, int uptodate)
+{
+ struct se_task *task = req->end_io_data;
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+ pt->pscsi_result = req->errors;
+ pt->pscsi_resid = req->resid_len;
+
+ pscsi_process_SAM_status(task, pt);
+ /*
+ * Release BIDI-READ if present
+ */
+ if (req->next_rq != NULL)
+ __blk_put_request(req->q, req->next_rq);
+
+ __blk_put_request(req->q, req);
+ pt->pscsi_req = NULL;
+}
+
+static struct se_subsystem_api pscsi_template = {
+ .name = "pscsi",
+ .owner = THIS_MODULE,
+ .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV,
+ .cdb_none = pscsi_CDB_none,
+ .map_task_non_SG = pscsi_map_task_non_SG,
+ .map_task_SG = pscsi_map_task_SG,
+ .attach_hba = pscsi_attach_hba,
+ .detach_hba = pscsi_detach_hba,
+ .pmode_enable_hba = pscsi_pmode_enable_hba,
+ .allocate_virtdevice = pscsi_allocate_virtdevice,
+ .create_virtdevice = pscsi_create_virtdevice,
+ .free_device = pscsi_free_device,
+ .transport_complete = pscsi_transport_complete,
+ .alloc_task = pscsi_alloc_task,
+ .do_task = pscsi_do_task,
+ .free_task = pscsi_free_task,
+ .check_configfs_dev_params = pscsi_check_configfs_dev_params,
+ .set_configfs_dev_params = pscsi_set_configfs_dev_params,
+ .show_configfs_dev_params = pscsi_show_configfs_dev_params,
+ .get_cdb = pscsi_get_cdb,
+ .get_sense_buffer = pscsi_get_sense_buffer,
+ .get_device_rev = pscsi_get_device_rev,
+ .get_device_type = pscsi_get_device_type,
+ .get_blocks = pscsi_get_blocks,
+};
+
+static int __init pscsi_module_init(void)
+{
+ return transport_subsystem_register(&pscsi_template);
+}
+
+static void pscsi_module_exit(void)
+{
+ transport_subsystem_release(&pscsi_template);
+}
+
+MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(pscsi_module_init);
+module_exit(pscsi_module_exit);
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
new file mode 100644
index 000000000000..a4cd5d352c3a
--- /dev/null
+++ b/drivers/target/target_core_pscsi.h
@@ -0,0 +1,65 @@
+#ifndef TARGET_CORE_PSCSI_H
+#define TARGET_CORE_PSCSI_H
+
+#define PSCSI_VERSION "v4.0"
+#define PSCSI_VIRTUAL_HBA_DEPTH 2048
+
+/* used in pscsi_find_alloc_len() */
+#ifndef INQUIRY_DATA_SIZE
+#define INQUIRY_DATA_SIZE 0x24
+#endif
+
+/* used in pscsi_add_device_to_list() */
+#define PSCSI_DEFAULT_QUEUEDEPTH 1
+
+#define PS_RETRY 5
+#define PS_TIMEOUT_DISK (15*HZ)
+#define PS_TIMEOUT_OTHER (500*HZ)
+
+#include <linux/device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_device.h>
+#include <linux/kref.h>
+#include <linux/kobject.h>
+
+struct pscsi_plugin_task {
+ struct se_task pscsi_task;
+ unsigned char *pscsi_cdb;
+ unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
+ unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
+ int pscsi_direction;
+ int pscsi_result;
+ u32 pscsi_resid;
+ struct request *pscsi_req;
+} ____cacheline_aligned;
+
+#define PDF_HAS_CHANNEL_ID 0x01
+#define PDF_HAS_TARGET_ID 0x02
+#define PDF_HAS_LUN_ID 0x04
+#define PDF_HAS_VPD_UNIT_SERIAL 0x08
+#define PDF_HAS_VPD_DEV_IDENT 0x10
+#define PDF_HAS_VIRT_HOST_ID 0x20
+
+struct pscsi_dev_virt {
+ int pdv_flags;
+ int pdv_host_id;
+ int pdv_channel_id;
+ int pdv_target_id;
+ int pdv_lun_id;
+ struct block_device *pdv_bd;
+ struct scsi_device *pdv_sd;
+ struct se_hba *pdv_se_hba;
+} ____cacheline_aligned;
+
+typedef enum phv_modes {
+ PHV_VIRUTAL_HOST_ID,
+ PHV_LLD_SCSI_HOST_NO
+} phv_modes_t;
+
+struct pscsi_hba_virt {
+ int phv_host_id;
+ phv_modes_t phv_mode;
+ struct Scsi_Host *phv_lld_host;
+} ____cacheline_aligned;
+
+#endif /*** TARGET_CORE_PSCSI_H ***/
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
new file mode 100644
index 000000000000..979aebf20019
--- /dev/null
+++ b/drivers/target/target_core_rd.c
@@ -0,0 +1,1091 @@
+/*******************************************************************************
+ * Filename: target_core_rd.c
+ *
+ * This file contains the Storage Engine <-> Ramdisk transport
+ * specific functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+
+#include "target_core_rd.h"
+
+static struct se_subsystem_api rd_dr_template;
+static struct se_subsystem_api rd_mcp_template;
+
+/* #define DEBUG_RAMDISK_MCP */
+/* #define DEBUG_RAMDISK_DR */
+
+/* rd_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_attach_hba(struct se_hba *hba, u32 host_id)
+{
+ struct rd_host *rd_host;
+
+ rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
+ if (!(rd_host)) {
+ printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
+ return -ENOMEM;
+ }
+
+ rd_host->rd_host_id = host_id;
+
+ atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
+ atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
+ hba->hba_ptr = (void *) rd_host;
+
+ printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
+ " Generic Target Core Stack %s\n", hba->hba_id,
+ RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
+ printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
+ " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
+ rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
+ RD_MAX_SECTORS);
+
+ return 0;
+}
+
+static void rd_detach_hba(struct se_hba *hba)
+{
+ struct rd_host *rd_host = hba->hba_ptr;
+
+ printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
+ " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
+
+ kfree(rd_host);
+ hba->hba_ptr = NULL;
+}
+
+/* rd_release_device_space():
+ *
+ *
+ */
+static void rd_release_device_space(struct rd_dev *rd_dev)
+{
+ u32 i, j, page_count = 0, sg_per_table;
+ struct rd_dev_sg_table *sg_table;
+ struct page *pg;
+ struct scatterlist *sg;
+
+ if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
+ return;
+
+ sg_table = rd_dev->sg_table_array;
+
+ for (i = 0; i < rd_dev->sg_table_count; i++) {
+ sg = sg_table[i].sg_table;
+ sg_per_table = sg_table[i].rd_sg_count;
+
+ for (j = 0; j < sg_per_table; j++) {
+ pg = sg_page(&sg[j]);
+ if ((pg)) {
+ __free_page(pg);
+ page_count++;
+ }
+ }
+
+ kfree(sg);
+ }
+
+ printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
+ " Device ID: %u, pages %u in %u tables total bytes %lu\n",
+ rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
+ rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
+
+ kfree(sg_table);
+ rd_dev->sg_table_array = NULL;
+ rd_dev->sg_table_count = 0;
+}
+
+
+/* rd_build_device_space():
+ *
+ *
+ */
+static int rd_build_device_space(struct rd_dev *rd_dev)
+{
+ u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
+ u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
+ struct rd_dev_sg_table *sg_table;
+ struct page *pg;
+ struct scatterlist *sg;
+
+ if (rd_dev->rd_page_count <= 0) {
+ printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
+ rd_dev->rd_page_count);
+ return -1;
+ }
+ total_sg_needed = rd_dev->rd_page_count;
+
+ sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+ sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
+ if (!(sg_table)) {
+ printk(KERN_ERR "Unable to allocate memory for Ramdisk"
+ " scatterlist tables\n");
+ return -1;
+ }
+
+ rd_dev->sg_table_array = sg_table;
+ rd_dev->sg_table_count = sg_tables;
+
+ while (total_sg_needed) {
+ sg_per_table = (total_sg_needed > max_sg_per_table) ?
+ max_sg_per_table : total_sg_needed;
+
+ sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!(sg)) {
+ printk(KERN_ERR "Unable to allocate scatterlist array"
+ " for struct rd_dev\n");
+ return -1;
+ }
+
+ sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
+
+ sg_table[i].sg_table = sg;
+ sg_table[i].rd_sg_count = sg_per_table;
+ sg_table[i].page_start_offset = page_offset;
+ sg_table[i++].page_end_offset = (page_offset + sg_per_table)
+ - 1;
+
+ for (j = 0; j < sg_per_table; j++) {
+ pg = alloc_pages(GFP_KERNEL, 0);
+ if (!(pg)) {
+ printk(KERN_ERR "Unable to allocate scatterlist"
+ " pages for struct rd_dev_sg_table\n");
+ return -1;
+ }
+ sg_assign_page(&sg[j], pg);
+ sg[j].length = PAGE_SIZE;
+ }
+
+ page_offset += sg_per_table;
+ total_sg_needed -= sg_per_table;
+ }
+
+ printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
+ " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+ rd_dev->rd_dev_id, rd_dev->rd_page_count,
+ rd_dev->sg_table_count);
+
+ return 0;
+}
+
+static void *rd_allocate_virtdevice(
+ struct se_hba *hba,
+ const char *name,
+ int rd_direct)
+{
+ struct rd_dev *rd_dev;
+ struct rd_host *rd_host = hba->hba_ptr;
+
+ rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
+ if (!(rd_dev)) {
+ printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
+ return NULL;
+ }
+
+ rd_dev->rd_host = rd_host;
+ rd_dev->rd_direct = rd_direct;
+
+ return rd_dev;
+}
+
+static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+ return rd_allocate_virtdevice(hba, name, 1);
+}
+
+static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+ return rd_allocate_virtdevice(hba, name, 0);
+}
+
+/* rd_create_virtdevice():
+ *
+ *
+ */
+static struct se_device *rd_create_virtdevice(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ void *p,
+ int rd_direct)
+{
+ struct se_device *dev;
+ struct se_dev_limits dev_limits;
+ struct rd_dev *rd_dev = p;
+ struct rd_host *rd_host = hba->hba_ptr;
+ int dev_flags = 0;
+ char prod[16], rev[4];
+
+ memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+ if (rd_build_device_space(rd_dev) < 0)
+ goto fail;
+
+ snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
+ snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
+ RD_MCP_VERSION);
+
+ dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
+ dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
+ dev_limits.limits.max_sectors = RD_MAX_SECTORS;
+ dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
+ dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
+
+ dev = transport_add_device_to_core_hba(hba,
+ (rd_dev->rd_direct) ? &rd_dr_template :
+ &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
+ &dev_limits, prod, rev);
+ if (!(dev))
+ goto fail;
+
+ rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
+ rd_dev->rd_queue_depth = dev->queue_depth;
+
+ printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
+ " %u pages in %u tables, %lu total bytes\n",
+ rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
+ "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
+ rd_dev->sg_table_count,
+ (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
+
+ return dev;
+
+fail:
+ rd_release_device_space(rd_dev);
+ return NULL;
+}
+
+static struct se_device *rd_DIRECT_create_virtdevice(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ void *p)
+{
+ return rd_create_virtdevice(hba, se_dev, p, 1);
+}
+
+static struct se_device *rd_MEMCPY_create_virtdevice(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ void *p)
+{
+ return rd_create_virtdevice(hba, se_dev, p, 0);
+}
+
+/* rd_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void rd_free_device(void *p)
+{
+ struct rd_dev *rd_dev = p;
+
+ rd_release_device_space(rd_dev);
+ kfree(rd_dev);
+}
+
+static inline struct rd_request *RD_REQ(struct se_task *task)
+{
+ return container_of(task, struct rd_request, rd_task);
+}
+
+static struct se_task *
+rd_alloc_task(struct se_cmd *cmd)
+{
+ struct rd_request *rd_req;
+
+ rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
+ if (!rd_req) {
+ printk(KERN_ERR "Unable to allocate struct rd_request\n");
+ return NULL;
+ }
+ rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
+
+ return &rd_req->rd_task;
+}
+
+/* rd_get_sg_table():
+ *
+ *
+ */
+static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
+{
+ u32 i;
+ struct rd_dev_sg_table *sg_table;
+
+ for (i = 0; i < rd_dev->sg_table_count; i++) {
+ sg_table = &rd_dev->sg_table_array[i];
+ if ((sg_table->page_start_offset <= page) &&
+ (sg_table->page_end_offset >= page))
+ return sg_table;
+ }
+
+ printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
+ page);
+
+ return NULL;
+}
+
+/* rd_MEMCPY_read():
+ *
+ *
+ */
+static int rd_MEMCPY_read(struct rd_request *req)
+{
+ struct se_task *task = &req->rd_task;
+ struct rd_dev *dev = req->rd_dev;
+ struct rd_dev_sg_table *table;
+ struct scatterlist *sg_d, *sg_s;
+ void *dst, *src;
+ u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
+ u32 length, page_end = 0, table_sg_end;
+ u32 rd_offset = req->rd_offset;
+
+ table = rd_get_sg_table(dev, req->rd_page);
+ if (!(table))
+ return -1;
+
+ table_sg_end = (table->page_end_offset - req->rd_page);
+ sg_d = task->task_sg;
+ sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
+ " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
+ req->rd_page, req->rd_offset);
+#endif
+ src_offset = rd_offset;
+
+ while (req->rd_size) {
+ if ((sg_d[i].length - dst_offset) <
+ (sg_s[j].length - src_offset)) {
+ length = (sg_d[i].length - dst_offset);
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
+ " offset: %u sg_s[%d].length: %u\n", i,
+ &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
+ sg_s[j].length);
+ printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
+ " src_offset: %u\n", length, dst_offset,
+ src_offset);
+#endif
+ if (length > req->rd_size)
+ length = req->rd_size;
+
+ dst = sg_virt(&sg_d[i++]) + dst_offset;
+ if (!dst)
+ BUG();
+
+ src = sg_virt(&sg_s[j]) + src_offset;
+ if (!src)
+ BUG();
+
+ dst_offset = 0;
+ src_offset = length;
+ page_end = 0;
+ } else {
+ length = (sg_s[j].length - src_offset);
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
+ " offset: %u sg_s[%d].length: %u\n", i,
+ &sg_d[i], sg_d[i].length, sg_d[i].offset,
+ j, sg_s[j].length);
+ printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
+ " src_offset: %u\n", length, dst_offset,
+ src_offset);
+#endif
+ if (length > req->rd_size)
+ length = req->rd_size;
+
+ dst = sg_virt(&sg_d[i]) + dst_offset;
+ if (!dst)
+ BUG();
+
+ if (sg_d[i].length == length) {
+ i++;
+ dst_offset = 0;
+ } else
+ dst_offset = length;
+
+ src = sg_virt(&sg_s[j++]) + src_offset;
+ if (!src)
+ BUG();
+
+ src_offset = 0;
+ page_end = 1;
+ }
+
+ memcpy(dst, src, length);
+
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+ " i: %u, j: %u\n", req->rd_page,
+ (req->rd_size - length), length, i, j);
+#endif
+ req->rd_size -= length;
+ if (!(req->rd_size))
+ return 0;
+
+ if (!page_end)
+ continue;
+
+ if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "page: %u in same page table\n",
+ req->rd_page);
+#endif
+ continue;
+ }
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "getting new page table for page: %u\n",
+ req->rd_page);
+#endif
+ table = rd_get_sg_table(dev, req->rd_page);
+ if (!(table))
+ return -1;
+
+ sg_s = &table->sg_table[j = 0];
+ }
+
+ return 0;
+}
+
+/* rd_MEMCPY_write():
+ *
+ *
+ */
+static int rd_MEMCPY_write(struct rd_request *req)
+{
+ struct se_task *task = &req->rd_task;
+ struct rd_dev *dev = req->rd_dev;
+ struct rd_dev_sg_table *table;
+ struct scatterlist *sg_d, *sg_s;
+ void *dst, *src;
+ u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
+ u32 length, page_end = 0, table_sg_end;
+ u32 rd_offset = req->rd_offset;
+
+ table = rd_get_sg_table(dev, req->rd_page);
+ if (!(table))
+ return -1;
+
+ table_sg_end = (table->page_end_offset - req->rd_page);
+ sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
+ sg_s = task->task_sg;
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
+ " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
+ req->rd_page, req->rd_offset);
+#endif
+ dst_offset = rd_offset;
+
+ while (req->rd_size) {
+ if ((sg_s[i].length - src_offset) <
+ (sg_d[j].length - dst_offset)) {
+ length = (sg_s[i].length - src_offset);
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
+ " offset: %d sg_d[%d].length: %u\n", i,
+ &sg_s[i], sg_s[i].length, sg_s[i].offset,
+ j, sg_d[j].length);
+ printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
+ " dst_offset: %u\n", length, src_offset,
+ dst_offset);
+#endif
+ if (length > req->rd_size)
+ length = req->rd_size;
+
+ src = sg_virt(&sg_s[i++]) + src_offset;
+ if (!src)
+ BUG();
+
+ dst = sg_virt(&sg_d[j]) + dst_offset;
+ if (!dst)
+ BUG();
+
+ src_offset = 0;
+ dst_offset = length;
+ page_end = 0;
+ } else {
+ length = (sg_d[j].length - dst_offset);
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
+ " offset: %d sg_d[%d].length: %u\n", i,
+ &sg_s[i], sg_s[i].length, sg_s[i].offset,
+ j, sg_d[j].length);
+ printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
+ " dst_offset: %u\n", length, src_offset,
+ dst_offset);
+#endif
+ if (length > req->rd_size)
+ length = req->rd_size;
+
+ src = sg_virt(&sg_s[i]) + src_offset;
+ if (!src)
+ BUG();
+
+ if (sg_s[i].length == length) {
+ i++;
+ src_offset = 0;
+ } else
+ src_offset = length;
+
+ dst = sg_virt(&sg_d[j++]) + dst_offset;
+ if (!dst)
+ BUG();
+
+ dst_offset = 0;
+ page_end = 1;
+ }
+
+ memcpy(dst, src, length);
+
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+ " i: %u, j: %u\n", req->rd_page,
+ (req->rd_size - length), length, i, j);
+#endif
+ req->rd_size -= length;
+ if (!(req->rd_size))
+ return 0;
+
+ if (!page_end)
+ continue;
+
+ if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "page: %u in same page table\n",
+ req->rd_page);
+#endif
+ continue;
+ }
+#ifdef DEBUG_RAMDISK_MCP
+ printk(KERN_INFO "getting new page table for page: %u\n",
+ req->rd_page);
+#endif
+ table = rd_get_sg_table(dev, req->rd_page);
+ if (!(table))
+ return -1;
+
+ sg_d = &table->sg_table[j = 0];
+ }
+
+ return 0;
+}
+
+/* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_MEMCPY_do_task(struct se_task *task)
+{
+ struct se_device *dev = task->se_dev;
+ struct rd_request *req = RD_REQ(task);
+ unsigned long long lba;
+ int ret;
+
+ req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
+ lba = task->task_lba;
+ req->rd_offset = (do_div(lba,
+ (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
+ DEV_ATTRIB(dev)->block_size;
+ req->rd_size = task->task_size;
+
+ if (task->task_data_direction == DMA_FROM_DEVICE)
+ ret = rd_MEMCPY_read(req);
+ else
+ ret = rd_MEMCPY_write(req);
+
+ if (ret != 0)
+ return ret;
+
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/* rd_DIRECT_with_offset():
+ *
+ *
+ */
+static int rd_DIRECT_with_offset(
+ struct se_task *task,
+ struct list_head *se_mem_list,
+ u32 *se_mem_cnt,
+ u32 *task_offset)
+{
+ struct rd_request *req = RD_REQ(task);
+ struct rd_dev *dev = req->rd_dev;
+ struct rd_dev_sg_table *table;
+ struct se_mem *se_mem;
+ struct scatterlist *sg_s;
+ u32 j = 0, set_offset = 1;
+ u32 get_next_table = 0, offset_length, table_sg_end;
+
+ table = rd_get_sg_table(dev, req->rd_page);
+ if (!(table))
+ return -1;
+
+ table_sg_end = (table->page_end_offset - req->rd_page);
+ sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+ (task->task_data_direction == DMA_TO_DEVICE) ?
+ "Write" : "Read",
+ task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
+#endif
+ while (req->rd_size) {
+ se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+ if (!(se_mem)) {
+ printk(KERN_ERR "Unable to allocate struct se_mem\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&se_mem->se_list);
+
+ if (set_offset) {
+ offset_length = sg_s[j].length - req->rd_offset;
+ if (offset_length > req->rd_size)
+ offset_length = req->rd_size;
+
+ se_mem->se_page = sg_page(&sg_s[j++]);
+ se_mem->se_off = req->rd_offset;
+ se_mem->se_len = offset_length;
+
+ set_offset = 0;
+ get_next_table = (j > table_sg_end);
+ goto check_eot;
+ }
+
+ offset_length = (req->rd_size < req->rd_offset) ?
+ req->rd_size : req->rd_offset;
+
+ se_mem->se_page = sg_page(&sg_s[j]);
+ se_mem->se_len = offset_length;
+
+ set_offset = 1;
+
+check_eot:
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
+ " se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
+ req->rd_page, req->rd_size, offset_length, j, se_mem,
+ se_mem->se_page, se_mem->se_off, se_mem->se_len);
+#endif
+ list_add_tail(&se_mem->se_list, se_mem_list);
+ (*se_mem_cnt)++;
+
+ req->rd_size -= offset_length;
+ if (!(req->rd_size))
+ goto out;
+
+ if (!set_offset && !get_next_table)
+ continue;
+
+ if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "page: %u in same page table\n",
+ req->rd_page);
+#endif
+ continue;
+ }
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "getting new page table for page: %u\n",
+ req->rd_page);
+#endif
+ table = rd_get_sg_table(dev, req->rd_page);
+ if (!(table))
+ return -1;
+
+ sg_s = &table->sg_table[j = 0];
+ }
+
+out:
+ T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
+ *se_mem_cnt);
+#endif
+ return 0;
+}
+
+/* rd_DIRECT_without_offset():
+ *
+ *
+ */
+static int rd_DIRECT_without_offset(
+ struct se_task *task,
+ struct list_head *se_mem_list,
+ u32 *se_mem_cnt,
+ u32 *task_offset)
+{
+ struct rd_request *req = RD_REQ(task);
+ struct rd_dev *dev = req->rd_dev;
+ struct rd_dev_sg_table *table;
+ struct se_mem *se_mem;
+ struct scatterlist *sg_s;
+ u32 length, j = 0;
+
+ table = rd_get_sg_table(dev, req->rd_page);
+ if (!(table))
+ return -1;
+
+ sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
+ (task->task_data_direction == DMA_TO_DEVICE) ?
+ "Write" : "Read",
+ task->task_lba, req->rd_size, req->rd_page);
+#endif
+ while (req->rd_size) {
+ se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+ if (!(se_mem)) {
+ printk(KERN_ERR "Unable to allocate struct se_mem\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&se_mem->se_list);
+
+ length = (req->rd_size < sg_s[j].length) ?
+ req->rd_size : sg_s[j].length;
+
+ se_mem->se_page = sg_page(&sg_s[j++]);
+ se_mem->se_len = length;
+
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
+ " se_page: %p se_off: %u se_len: %u\n", req->rd_page,
+ req->rd_size, j, se_mem, se_mem->se_page,
+ se_mem->se_off, se_mem->se_len);
+#endif
+ list_add_tail(&se_mem->se_list, se_mem_list);
+ (*se_mem_cnt)++;
+
+ req->rd_size -= length;
+ if (!(req->rd_size))
+ goto out;
+
+ if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_DR
+ printk("page: %u in same page table\n",
+ req->rd_page);
+#endif
+ continue;
+ }
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "getting new page table for page: %u\n",
+ req->rd_page);
+#endif
+ table = rd_get_sg_table(dev, req->rd_page);
+ if (!(table))
+ return -1;
+
+ sg_s = &table->sg_table[j = 0];
+ }
+
+out:
+ T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
+#ifdef DEBUG_RAMDISK_DR
+ printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
+ *se_mem_cnt);
+#endif
+ return 0;
+}
+
+/* rd_DIRECT_do_se_mem_map():
+ *
+ *
+ */
+static int rd_DIRECT_do_se_mem_map(
+ struct se_task *task,
+ struct list_head *se_mem_list,
+ void *in_mem,
+ struct se_mem *in_se_mem,
+ struct se_mem **out_se_mem,
+ u32 *se_mem_cnt,
+ u32 *task_offset_in)
+{
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct rd_request *req = RD_REQ(task);
+ u32 task_offset = *task_offset_in;
+ unsigned long long lba;
+ int ret;
+
+ req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
+ PAGE_SIZE);
+ lba = task->task_lba;
+ req->rd_offset = (do_div(lba,
+ (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
+ DEV_ATTRIB(task->se_dev)->block_size;
+ req->rd_size = task->task_size;
+
+ if (req->rd_offset)
+ ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
+ task_offset_in);
+ else
+ ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
+ task_offset_in);
+
+ if (ret < 0)
+ return ret;
+
+ if (CMD_TFO(cmd)->task_sg_chaining == 0)
+ return 0;
+ /*
+ * Currently prevent writers from multiple HW fabrics doing
+ * pci_map_sg() to RD_DR's internal scatterlist memory.
+ */
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ printk(KERN_ERR "DMA_TO_DEVICE not supported for"
+ " RAMDISK_DR with task_sg_chaining=1\n");
+ return -1;
+ }
+ /*
+ * Special case for if task_sg_chaining is enabled, then
+ * we setup struct se_task->task_sg[], as it will be used by
+ * transport_do_task_sg_chain() for creating chainged SGLs
+ * across multiple struct se_task->task_sg[].
+ */
+ if (!(transport_calc_sg_num(task,
+ list_entry(T_TASK(cmd)->t_mem_list->next,
+ struct se_mem, se_list),
+ task_offset)))
+ return -1;
+
+ return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
+ list_entry(T_TASK(cmd)->t_mem_list->next,
+ struct se_mem, se_list),
+ out_se_mem, se_mem_cnt, task_offset_in);
+}
+
+/* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_DIRECT_do_task(struct se_task *task)
+{
+ /*
+ * At this point the locally allocated RD tables have been mapped
+ * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
+ */
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/* rd_free_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void rd_free_task(struct se_task *task)
+{
+ kfree(RD_REQ(task));
+}
+
+enum {
+ Opt_rd_pages, Opt_err
+};
+
+static match_table_t tokens = {
+ {Opt_rd_pages, "rd_pages=%d"},
+ {Opt_err, NULL}
+};
+
+static ssize_t rd_set_configfs_dev_params(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ const char *page,
+ ssize_t count)
+{
+ struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+ char *orig, *ptr, *opts;
+ substring_t args[MAX_OPT_ARGS];
+ int ret = 0, arg, token;
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ orig = opts;
+
+ while ((ptr = strsep(&opts, ",")) != NULL) {
+ if (!*ptr)
+ continue;
+
+ token = match_token(ptr, tokens, args);
+ switch (token) {
+ case Opt_rd_pages:
+ match_int(args, &arg);
+ rd_dev->rd_page_count = arg;
+ printk(KERN_INFO "RAMDISK: Referencing Page"
+ " Count: %u\n", rd_dev->rd_page_count);
+ rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ kfree(orig);
+ return (!ret) ? count : ret;
+}
+
+static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+{
+ struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+
+ if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
+ printk(KERN_INFO "Missing rd_pages= parameter\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static ssize_t rd_show_configfs_dev_params(
+ struct se_hba *hba,
+ struct se_subsystem_dev *se_dev,
+ char *b)
+{
+ struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+ ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n",
+ rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
+ "rd_direct" : "rd_mcp");
+ bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
+ " SG_table_count: %u\n", rd_dev->rd_page_count,
+ PAGE_SIZE, rd_dev->sg_table_count);
+ return bl;
+}
+
+/* rd_get_cdb(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static unsigned char *rd_get_cdb(struct se_task *task)
+{
+ struct rd_request *req = RD_REQ(task);
+
+ return req->rd_scsi_cdb;
+}
+
+static u32 rd_get_device_rev(struct se_device *dev)
+{
+ return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+static u32 rd_get_device_type(struct se_device *dev)
+{
+ return TYPE_DISK;
+}
+
+static sector_t rd_get_blocks(struct se_device *dev)
+{
+ struct rd_dev *rd_dev = dev->dev_ptr;
+ unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
+ DEV_ATTRIB(dev)->block_size) - 1;
+
+ return blocks_long;
+}
+
+static struct se_subsystem_api rd_dr_template = {
+ .name = "rd_dr",
+ .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
+ .attach_hba = rd_attach_hba,
+ .detach_hba = rd_detach_hba,
+ .allocate_virtdevice = rd_DIRECT_allocate_virtdevice,
+ .create_virtdevice = rd_DIRECT_create_virtdevice,
+ .free_device = rd_free_device,
+ .alloc_task = rd_alloc_task,
+ .do_task = rd_DIRECT_do_task,
+ .free_task = rd_free_task,
+ .check_configfs_dev_params = rd_check_configfs_dev_params,
+ .set_configfs_dev_params = rd_set_configfs_dev_params,
+ .show_configfs_dev_params = rd_show_configfs_dev_params,
+ .get_cdb = rd_get_cdb,
+ .get_device_rev = rd_get_device_rev,
+ .get_device_type = rd_get_device_type,
+ .get_blocks = rd_get_blocks,
+ .do_se_mem_map = rd_DIRECT_do_se_mem_map,
+};
+
+static struct se_subsystem_api rd_mcp_template = {
+ .name = "rd_mcp",
+ .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
+ .attach_hba = rd_attach_hba,
+ .detach_hba = rd_detach_hba,
+ .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice,
+ .create_virtdevice = rd_MEMCPY_create_virtdevice,
+ .free_device = rd_free_device,
+ .alloc_task = rd_alloc_task,
+ .do_task = rd_MEMCPY_do_task,
+ .free_task = rd_free_task,
+ .check_configfs_dev_params = rd_check_configfs_dev_params,
+ .set_configfs_dev_params = rd_set_configfs_dev_params,
+ .show_configfs_dev_params = rd_show_configfs_dev_params,
+ .get_cdb = rd_get_cdb,
+ .get_device_rev = rd_get_device_rev,
+ .get_device_type = rd_get_device_type,
+ .get_blocks = rd_get_blocks,
+};
+
+int __init rd_module_init(void)
+{
+ int ret;
+
+ ret = transport_subsystem_register(&rd_dr_template);
+ if (ret < 0)
+ return ret;
+
+ ret = transport_subsystem_register(&rd_mcp_template);
+ if (ret < 0) {
+ transport_subsystem_release(&rd_dr_template);
+ return ret;
+ }
+
+ return 0;
+}
+
+void rd_module_exit(void)
+{
+ transport_subsystem_release(&rd_dr_template);
+ transport_subsystem_release(&rd_mcp_template);
+}
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
new file mode 100644
index 000000000000..13badfbaf9c0
--- /dev/null
+++ b/drivers/target/target_core_rd.h
@@ -0,0 +1,73 @@
+#ifndef TARGET_CORE_RD_H
+#define TARGET_CORE_RD_H
+
+#define RD_HBA_VERSION "v4.0"
+#define RD_DR_VERSION "4.0"
+#define RD_MCP_VERSION "4.0"
+
+/* Largest piece of memory kmalloc can allocate */
+#define RD_MAX_ALLOCATION_SIZE 65536
+/* Maximum queuedepth for the Ramdisk HBA */
+#define RD_HBA_QUEUE_DEPTH 256
+#define RD_DEVICE_QUEUE_DEPTH 32
+#define RD_MAX_DEVICE_QUEUE_DEPTH 128
+#define RD_BLOCKSIZE 512
+#define RD_MAX_SECTORS 1024
+
+extern struct kmem_cache *se_mem_cache;
+
+/* Used in target_core_init_configfs() for virtual LUN 0 access */
+int __init rd_module_init(void);
+void rd_module_exit(void);
+
+#define RRF_EMULATE_CDB 0x01
+#define RRF_GOT_LBA 0x02
+
+struct rd_request {
+ struct se_task rd_task;
+
+ /* SCSI CDB from iSCSI Command PDU */
+ unsigned char rd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
+ /* Offset from start of page */
+ u32 rd_offset;
+ /* Starting page in Ramdisk for request */
+ u32 rd_page;
+ /* Total number of pages needed for request */
+ u32 rd_page_count;
+ /* Scatterlist count */
+ u32 rd_size;
+ /* Ramdisk device */
+ struct rd_dev *rd_dev;
+} ____cacheline_aligned;
+
+struct rd_dev_sg_table {
+ u32 page_start_offset;
+ u32 page_end_offset;
+ u32 rd_sg_count;
+ struct scatterlist *sg_table;
+} ____cacheline_aligned;
+
+#define RDF_HAS_PAGE_COUNT 0x01
+
+struct rd_dev {
+ int rd_direct;
+ u32 rd_flags;
+ /* Unique Ramdisk Device ID in Ramdisk HBA */
+ u32 rd_dev_id;
+ /* Total page count for ramdisk device */
+ u32 rd_page_count;
+ /* Number of SG tables in sg_table_array */
+ u32 sg_table_count;
+ u32 rd_queue_depth;
+ /* Array of rd_dev_sg_table_t containing scatterlists */
+ struct rd_dev_sg_table *sg_table_array;
+ /* Ramdisk HBA device is connected to */
+ struct rd_host *rd_host;
+} ____cacheline_aligned;
+
+struct rd_host {
+ u32 rd_host_dev_id_count;
+ u32 rd_host_id; /* Unique Ramdisk Host ID */
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_RD_H */
diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c
new file mode 100644
index 000000000000..dc6fed037ab3
--- /dev/null
+++ b/drivers/target/target_core_scdb.c
@@ -0,0 +1,105 @@
+/*******************************************************************************
+ * Filename: target_core_scdb.c
+ *
+ * This file contains the generic target engine Split CDB related functions.
+ *
+ * Copyright (c) 2004-2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <scsi/scsi.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_scdb.h"
+
+/* split_cdb_XX_6():
+ *
+ * 21-bit LBA w/ 8-bit SECTORS
+ */
+void split_cdb_XX_6(
+ unsigned long long lba,
+ u32 *sectors,
+ unsigned char *cdb)
+{
+ cdb[1] = (lba >> 16) & 0x1f;
+ cdb[2] = (lba >> 8) & 0xff;
+ cdb[3] = lba & 0xff;
+ cdb[4] = *sectors & 0xff;
+}
+
+/* split_cdb_XX_10():
+ *
+ * 32-bit LBA w/ 16-bit SECTORS
+ */
+void split_cdb_XX_10(
+ unsigned long long lba,
+ u32 *sectors,
+ unsigned char *cdb)
+{
+ put_unaligned_be32(lba, &cdb[2]);
+ put_unaligned_be16(*sectors, &cdb[7]);
+}
+
+/* split_cdb_XX_12():
+ *
+ * 32-bit LBA w/ 32-bit SECTORS
+ */
+void split_cdb_XX_12(
+ unsigned long long lba,
+ u32 *sectors,
+ unsigned char *cdb)
+{
+ put_unaligned_be32(lba, &cdb[2]);
+ put_unaligned_be32(*sectors, &cdb[6]);
+}
+
+/* split_cdb_XX_16():
+ *
+ * 64-bit LBA w/ 32-bit SECTORS
+ */
+void split_cdb_XX_16(
+ unsigned long long lba,
+ u32 *sectors,
+ unsigned char *cdb)
+{
+ put_unaligned_be64(lba, &cdb[2]);
+ put_unaligned_be32(*sectors, &cdb[10]);
+}
+
+/*
+ * split_cdb_XX_32():
+ *
+ * 64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32
+ */
+void split_cdb_XX_32(
+ unsigned long long lba,
+ u32 *sectors,
+ unsigned char *cdb)
+{
+ put_unaligned_be64(lba, &cdb[12]);
+ put_unaligned_be32(*sectors, &cdb[28]);
+}
diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h
new file mode 100644
index 000000000000..98cd1c01ed83
--- /dev/null
+++ b/drivers/target/target_core_scdb.h
@@ -0,0 +1,10 @@
+#ifndef TARGET_CORE_SCDB_H
+#define TARGET_CORE_SCDB_H
+
+extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *);
+
+#endif /* TARGET_CORE_SCDB_H */
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
new file mode 100644
index 000000000000..158cecbec718
--- /dev/null
+++ b/drivers/target/target_core_tmr.c
@@ -0,0 +1,404 @@
+/*******************************************************************************
+ * Filename: target_core_tmr.c
+ *
+ * This file contains SPC-3 task management infrastructure
+ *
+ * Copyright (c) 2009,2010 Rising Tide Systems
+ * Copyright (c) 2009,2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+
+#define DEBUG_LUN_RESET
+#ifdef DEBUG_LUN_RESET
+#define DEBUG_LR(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_LR(x...)
+#endif
+
+struct se_tmr_req *core_tmr_alloc_req(
+ struct se_cmd *se_cmd,
+ void *fabric_tmr_ptr,
+ u8 function)
+{
+ struct se_tmr_req *tmr;
+
+ tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL);
+ if (!(tmr)) {
+ printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ tmr->task_cmd = se_cmd;
+ tmr->fabric_tmr_ptr = fabric_tmr_ptr;
+ tmr->function = function;
+ INIT_LIST_HEAD(&tmr->tmr_list);
+
+ return tmr;
+}
+EXPORT_SYMBOL(core_tmr_alloc_req);
+
+void core_tmr_release_req(
+ struct se_tmr_req *tmr)
+{
+ struct se_device *dev = tmr->tmr_dev;
+
+ spin_lock(&dev->se_tmr_lock);
+ list_del(&tmr->tmr_list);
+ kmem_cache_free(se_tmr_req_cache, tmr);
+ spin_unlock(&dev->se_tmr_lock);
+}
+
+static void core_tmr_handle_tas_abort(
+ struct se_node_acl *tmr_nacl,
+ struct se_cmd *cmd,
+ int tas,
+ int fe_count)
+{
+ if (!(fe_count)) {
+ transport_cmd_finish_abort(cmd, 1);
+ return;
+ }
+ /*
+ * TASK ABORTED status (TAS) bit support
+ */
+ if (((tmr_nacl != NULL) &&
+ (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
+ transport_send_task_abort(cmd);
+
+ transport_cmd_finish_abort(cmd, 0);
+}
+
+int core_tmr_lun_reset(
+ struct se_device *dev,
+ struct se_tmr_req *tmr,
+ struct list_head *preempt_and_abort_list,
+ struct se_cmd *prout_cmd)
+{
+ struct se_cmd *cmd;
+ struct se_queue_req *qr, *qr_tmp;
+ struct se_node_acl *tmr_nacl = NULL;
+ struct se_portal_group *tmr_tpg = NULL;
+ struct se_queue_obj *qobj = dev->dev_queue_obj;
+ struct se_tmr_req *tmr_p, *tmr_pp;
+ struct se_task *task, *task_tmp;
+ unsigned long flags;
+ int fe_count, state, tas;
+ /*
+ * TASK_ABORTED status bit, this is configurable via ConfigFS
+ * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
+ *
+ * A task aborted status (TAS) bit set to zero specifies that aborted
+ * tasks shall be terminated by the device server without any response
+ * to the application client. A TAS bit set to one specifies that tasks
+ * aborted by the actions of an I_T nexus other than the I_T nexus on
+ * which the command was received shall be completed with TASK ABORTED
+ * status (see SAM-4).
+ */
+ tas = DEV_ATTRIB(dev)->emulate_tas;
+ /*
+ * Determine if this se_tmr is coming from a $FABRIC_MOD
+ * or struct se_device passthrough..
+ */
+ if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
+ tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
+ tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
+ if (tmr_nacl && tmr_tpg) {
+ DEBUG_LR("LUN_RESET: TMR caller fabric: %s"
+ " initiator port %s\n",
+ TPG_TFO(tmr_tpg)->get_fabric_name(),
+ tmr_nacl->initiatorname);
+ }
+ }
+ DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n",
+ (preempt_and_abort_list) ? "Preempt" : "TMR",
+ TRANSPORT(dev)->name, tas);
+ /*
+ * Release all pending and outgoing TMRs aside from the received
+ * LUN_RESET tmr..
+ */
+ spin_lock(&dev->se_tmr_lock);
+ list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
+ /*
+ * Allow the received TMR to return with FUNCTION_COMPLETE.
+ */
+ if (tmr && (tmr_p == tmr))
+ continue;
+
+ cmd = tmr_p->task_cmd;
+ if (!(cmd)) {
+ printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n");
+ continue;
+ }
+ /*
+ * If this function was called with a valid pr_res_key
+ * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
+ * skip non regisration key matching TMRs.
+ */
+ if ((preempt_and_abort_list != NULL) &&
+ (core_scsi3_check_cdb_abort_and_preempt(
+ preempt_and_abort_list, cmd) != 0))
+ continue;
+ spin_unlock(&dev->se_tmr_lock);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock(&dev->se_tmr_lock);
+ continue;
+ }
+ if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock(&dev->se_tmr_lock);
+ continue;
+ }
+ DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
+ " Response: 0x%02x, t_state: %d\n",
+ (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
+ tmr_p->function, tmr_p->response, cmd->t_state);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_cmd_finish_abort_tmr(cmd);
+ spin_lock(&dev->se_tmr_lock);
+ }
+ spin_unlock(&dev->se_tmr_lock);
+ /*
+ * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
+ * This is following sam4r17, section 5.6 Aborting commands, Table 38
+ * for TMR LUN_RESET:
+ *
+ * a) "Yes" indicates that each command that is aborted on an I_T nexus
+ * other than the one that caused the SCSI device condition is
+ * completed with TASK ABORTED status, if the TAS bit is set to one in
+ * the Control mode page (see SPC-4). "No" indicates that no status is
+ * returned for aborted commands.
+ *
+ * d) If the logical unit reset is caused by a particular I_T nexus
+ * (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
+ * (TASK_ABORTED status) applies.
+ *
+ * Otherwise (e.g., if triggered by a hard reset), "no"
+ * (no TASK_ABORTED SAM status) applies.
+ *
+ * Note that this seems to be independent of TAS (Task Aborted Status)
+ * in the Control Mode Page.
+ */
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
+ t_state_list) {
+ if (!(TASK_CMD(task))) {
+ printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
+ continue;
+ }
+ cmd = TASK_CMD(task);
+
+ if (!T_TASK(cmd)) {
+ printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
+ " %p ITT: 0x%08x\n", task, cmd,
+ CMD_TFO(cmd)->get_task_tag(cmd));
+ continue;
+ }
+ /*
+ * For PREEMPT_AND_ABORT usage, only process commands
+ * with a matching reservation key.
+ */
+ if ((preempt_and_abort_list != NULL) &&
+ (core_scsi3_check_cdb_abort_and_preempt(
+ preempt_and_abort_list, cmd) != 0))
+ continue;
+ /*
+ * Not aborting PROUT PREEMPT_AND_ABORT CDB..
+ */
+ if (prout_cmd == cmd)
+ continue;
+
+ list_del(&task->t_state_list);
+ atomic_set(&task->task_state_active, 0);
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
+ " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
+ "def_t_state: %d/%d cdb: 0x%02x\n",
+ (preempt_and_abort_list) ? "Preempt" : "", cmd, task,
+ CMD_TFO(cmd)->get_task_tag(cmd), 0,
+ CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
+ cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]);
+ DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
+ " t_task_cdbs: %d t_task_cdbs_left: %d"
+ " t_task_cdbs_sent: %d -- t_transport_active: %d"
+ " t_transport_stop: %d t_transport_sent: %d\n",
+ CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
+ T_TASK(cmd)->t_task_cdbs,
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
+ atomic_read(&T_TASK(cmd)->t_transport_active),
+ atomic_read(&T_TASK(cmd)->t_transport_stop),
+ atomic_read(&T_TASK(cmd)->t_transport_sent));
+
+ if (atomic_read(&task->task_active)) {
+ atomic_set(&task->task_stop, 1);
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+
+ DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
+ " for dev: %p\n", task, dev);
+ wait_for_completion(&task->task_stop_comp);
+ DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
+ " dev: %p\n", task, dev);
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+
+ atomic_set(&task->task_active, 0);
+ atomic_set(&task->task_stop, 0);
+ }
+ __transport_stop_task_timer(task, &flags);
+
+ if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+ DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
+ " t_task_cdbs_ex_left: %d\n", task, dev,
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ continue;
+ }
+ fe_count = atomic_read(&T_TASK(cmd)->t_fe_count);
+
+ if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
+ DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
+ " task: %p, t_fe_count: %d dev: %p\n", task,
+ fe_count, dev);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ flags);
+ core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ continue;
+ }
+ DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
+ " t_fe_count: %d dev: %p\n", task, fe_count, dev);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ }
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ /*
+ * Release all commands remaining in the struct se_device cmd queue.
+ *
+ * This follows the same logic as above for the struct se_device
+ * struct se_task state list, where commands are returned with
+ * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
+ * reference, otherwise the struct se_cmd is released.
+ */
+ spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+ list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) {
+ cmd = (struct se_cmd *)qr->cmd;
+ if (!(cmd)) {
+ /*
+ * Skip these for non PREEMPT_AND_ABORT usage..
+ */
+ if (preempt_and_abort_list != NULL)
+ continue;
+
+ atomic_dec(&qobj->queue_cnt);
+ list_del(&qr->qr_list);
+ kfree(qr);
+ continue;
+ }
+ /*
+ * For PREEMPT_AND_ABORT usage, only process commands
+ * with a matching reservation key.
+ */
+ if ((preempt_and_abort_list != NULL) &&
+ (core_scsi3_check_cdb_abort_and_preempt(
+ preempt_and_abort_list, cmd) != 0))
+ continue;
+ /*
+ * Not aborting PROUT PREEMPT_AND_ABORT CDB..
+ */
+ if (prout_cmd == cmd)
+ continue;
+
+ atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+ atomic_dec(&qobj->queue_cnt);
+ list_del(&qr->qr_list);
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+ state = qr->state;
+ kfree(qr);
+
+ DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
+ " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
+ "Preempt" : "", cmd, state,
+ atomic_read(&T_TASK(cmd)->t_fe_count));
+ /*
+ * Signal that the command has failed via cmd->se_cmd_flags,
+ * and call TFO->new_cmd_failure() to wakeup any fabric
+ * dependent code used to wait for unsolicited data out
+ * allocation to complete. The fabric module is expected
+ * to dump any remaining unsolicited data out for the aborted
+ * command at this point.
+ */
+ transport_new_cmd_failure(cmd);
+
+ core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
+ atomic_read(&T_TASK(cmd)->t_fe_count));
+ spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+ }
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+ /*
+ * Clear any legacy SPC-2 reservation when called during
+ * LOGICAL UNIT RESET
+ */
+ if (!(preempt_and_abort_list) &&
+ (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
+ spin_lock(&dev->dev_reservation_lock);
+ dev->dev_reserved_node_acl = NULL;
+ dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+ spin_unlock(&dev->dev_reservation_lock);
+ printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
+ }
+
+ spin_lock(&dev->stats_lock);
+ dev->num_resets++;
+ spin_unlock(&dev->stats_lock);
+
+ DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
+ (preempt_and_abort_list) ? "Preempt" : "TMR",
+ TRANSPORT(dev)->name);
+ return 0;
+}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
new file mode 100644
index 000000000000..c26f67467623
--- /dev/null
+++ b/drivers/target/target_core_tpg.c
@@ -0,0 +1,839 @@
+/*******************************************************************************
+ * Filename: target_core_tpg.c
+ *
+ * This file contains generic Target Portal Group related functions.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+
+#include "target_core_hba.h"
+
+/* core_clear_initiator_node_from_tpg():
+ *
+ *
+ */
+static void core_clear_initiator_node_from_tpg(
+ struct se_node_acl *nacl,
+ struct se_portal_group *tpg)
+{
+ int i;
+ struct se_dev_entry *deve;
+ struct se_lun *lun;
+ struct se_lun_acl *acl, *acl_tmp;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ deve = &nacl->device_list[i];
+
+ if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+ continue;
+
+ if (!deve->se_lun) {
+ printk(KERN_ERR "%s device entries device pointer is"
+ " NULL, but Initiator has access.\n",
+ TPG_TFO(tpg)->get_fabric_name());
+ continue;
+ }
+
+ lun = deve->se_lun;
+ spin_unlock_irq(&nacl->device_list_lock);
+ core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+
+ spin_lock(&lun->lun_acl_lock);
+ list_for_each_entry_safe(acl, acl_tmp,
+ &lun->lun_acl_list, lacl_list) {
+ if (!(strcmp(acl->initiatorname,
+ nacl->initiatorname)) &&
+ (acl->mapped_lun == deve->mapped_lun))
+ break;
+ }
+
+ if (!acl) {
+ printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
+ " mapped_lun: %u\n", nacl->initiatorname,
+ deve->mapped_lun);
+ spin_unlock(&lun->lun_acl_lock);
+ spin_lock_irq(&nacl->device_list_lock);
+ continue;
+ }
+
+ list_del(&acl->lacl_list);
+ spin_unlock(&lun->lun_acl_lock);
+
+ spin_lock_irq(&nacl->device_list_lock);
+ kfree(acl);
+ }
+ spin_unlock_irq(&nacl->device_list_lock);
+}
+
+/* __core_tpg_get_initiator_node_acl():
+ *
+ * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
+ */
+struct se_node_acl *__core_tpg_get_initiator_node_acl(
+ struct se_portal_group *tpg,
+ const char *initiatorname)
+{
+ struct se_node_acl *acl;
+
+ list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+ if (!(strcmp(acl->initiatorname, initiatorname)))
+ return acl;
+ }
+
+ return NULL;
+}
+
+/* core_tpg_get_initiator_node_acl():
+ *
+ *
+ */
+struct se_node_acl *core_tpg_get_initiator_node_acl(
+ struct se_portal_group *tpg,
+ unsigned char *initiatorname)
+{
+ struct se_node_acl *acl;
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+ if (!(strcmp(acl->initiatorname, initiatorname)) &&
+ (!(acl->dynamic_node_acl))) {
+ spin_unlock_bh(&tpg->acl_node_lock);
+ return acl;
+ }
+ }
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+ return NULL;
+}
+
+/* core_tpg_add_node_to_devs():
+ *
+ *
+ */
+void core_tpg_add_node_to_devs(
+ struct se_node_acl *acl,
+ struct se_portal_group *tpg)
+{
+ int i = 0;
+ u32 lun_access = 0;
+ struct se_lun *lun;
+ struct se_device *dev;
+
+ spin_lock(&tpg->tpg_lun_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ lun = &tpg->tpg_lun_list[i];
+ if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
+ continue;
+
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ dev = lun->lun_se_dev;
+ /*
+ * By default in LIO-Target $FABRIC_MOD,
+ * demo_mode_write_protect is ON, or READ_ONLY;
+ */
+ if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
+ if (dev->dev_flags & DF_READ_ONLY)
+ lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+ else
+ lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+ } else {
+ /*
+ * Allow only optical drives to issue R/W in default RO
+ * demo mode.
+ */
+ if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
+ lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+ else
+ lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+ }
+
+ printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
+ " access for LUN in Demo Mode\n",
+ TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
+ (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
+ "READ-WRITE" : "READ-ONLY");
+
+ core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
+ lun_access, acl, tpg, 1);
+ spin_lock(&tpg->tpg_lun_lock);
+ }
+ spin_unlock(&tpg->tpg_lun_lock);
+}
+
+/* core_set_queue_depth_for_node():
+ *
+ *
+ */
+static int core_set_queue_depth_for_node(
+ struct se_portal_group *tpg,
+ struct se_node_acl *acl)
+{
+ if (!acl->queue_depth) {
+ printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
+ "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
+ acl->initiatorname);
+ acl->queue_depth = 1;
+ }
+
+ return 0;
+}
+
+/* core_create_device_list_for_node():
+ *
+ *
+ */
+static int core_create_device_list_for_node(struct se_node_acl *nacl)
+{
+ struct se_dev_entry *deve;
+ int i;
+
+ nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
+ TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
+ if (!(nacl->device_list)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " struct se_node_acl->device_list\n");
+ return -1;
+ }
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ deve = &nacl->device_list[i];
+
+ atomic_set(&deve->ua_count, 0);
+ atomic_set(&deve->pr_ref_count, 0);
+ spin_lock_init(&deve->ua_lock);
+ INIT_LIST_HEAD(&deve->alua_port_list);
+ INIT_LIST_HEAD(&deve->ua_list);
+ }
+
+ return 0;
+}
+
+/* core_tpg_check_initiator_node_acl()
+ *
+ *
+ */
+struct se_node_acl *core_tpg_check_initiator_node_acl(
+ struct se_portal_group *tpg,
+ unsigned char *initiatorname)
+{
+ struct se_node_acl *acl;
+
+ acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
+ if ((acl))
+ return acl;
+
+ if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
+ return NULL;
+
+ acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
+ if (!(acl))
+ return NULL;
+
+ INIT_LIST_HEAD(&acl->acl_list);
+ INIT_LIST_HEAD(&acl->acl_sess_list);
+ spin_lock_init(&acl->device_list_lock);
+ spin_lock_init(&acl->nacl_sess_lock);
+ atomic_set(&acl->acl_pr_ref_count, 0);
+ acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
+ snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+ acl->se_tpg = tpg;
+ acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
+ spin_lock_init(&acl->stats_lock);
+ acl->dynamic_node_acl = 1;
+
+ TPG_TFO(tpg)->set_default_node_attributes(acl);
+
+ if (core_create_device_list_for_node(acl) < 0) {
+ TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ return NULL;
+ }
+
+ if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+ core_free_device_list_for_node(acl, tpg);
+ TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ return NULL;
+ }
+
+ core_tpg_add_node_to_devs(acl, tpg);
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ list_add_tail(&acl->acl_list, &tpg->acl_node_list);
+ tpg->num_node_acls++;
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+ printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
+ TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+
+ return acl;
+}
+EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
+
+void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
+{
+ while (atomic_read(&nacl->acl_pr_ref_count) != 0)
+ cpu_relax();
+}
+
+void core_tpg_clear_object_luns(struct se_portal_group *tpg)
+{
+ int i, ret;
+ struct se_lun *lun;
+
+ spin_lock(&tpg->tpg_lun_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ lun = &tpg->tpg_lun_list[i];
+
+ if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
+ (lun->lun_se_dev == NULL))
+ continue;
+
+ spin_unlock(&tpg->tpg_lun_lock);
+ ret = core_dev_del_lun(tpg, lun->unpacked_lun);
+ spin_lock(&tpg->tpg_lun_lock);
+ }
+ spin_unlock(&tpg->tpg_lun_lock);
+}
+EXPORT_SYMBOL(core_tpg_clear_object_luns);
+
+/* core_tpg_add_initiator_node_acl():
+ *
+ *
+ */
+struct se_node_acl *core_tpg_add_initiator_node_acl(
+ struct se_portal_group *tpg,
+ struct se_node_acl *se_nacl,
+ const char *initiatorname,
+ u32 queue_depth)
+{
+ struct se_node_acl *acl = NULL;
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+ if ((acl)) {
+ if (acl->dynamic_node_acl) {
+ acl->dynamic_node_acl = 0;
+ printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
+ " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
+ spin_unlock_bh(&tpg->acl_node_lock);
+ /*
+ * Release the locally allocated struct se_node_acl
+ * because * core_tpg_add_initiator_node_acl() returned
+ * a pointer to an existing demo mode node ACL.
+ */
+ if (se_nacl)
+ TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
+ se_nacl);
+ goto done;
+ }
+
+ printk(KERN_ERR "ACL entry for %s Initiator"
+ " Node %s already exists for TPG %u, ignoring"
+ " request.\n", TPG_TFO(tpg)->get_fabric_name(),
+ initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock_bh(&tpg->acl_node_lock);
+ return ERR_PTR(-EEXIST);
+ }
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+ if (!(se_nacl)) {
+ printk("struct se_node_acl pointer is NULL\n");
+ return ERR_PTR(-EINVAL);
+ }
+ /*
+ * For v4.x logic the se_node_acl_s is hanging off a fabric
+ * dependent structure allocated via
+ * struct target_core_fabric_ops->fabric_make_nodeacl()
+ */
+ acl = se_nacl;
+
+ INIT_LIST_HEAD(&acl->acl_list);
+ INIT_LIST_HEAD(&acl->acl_sess_list);
+ spin_lock_init(&acl->device_list_lock);
+ spin_lock_init(&acl->nacl_sess_lock);
+ atomic_set(&acl->acl_pr_ref_count, 0);
+ acl->queue_depth = queue_depth;
+ snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+ acl->se_tpg = tpg;
+ acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
+ spin_lock_init(&acl->stats_lock);
+
+ TPG_TFO(tpg)->set_default_node_attributes(acl);
+
+ if (core_create_device_list_for_node(acl) < 0) {
+ TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+ core_free_device_list_for_node(acl, tpg);
+ TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ return ERR_PTR(-EINVAL);
+ }
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ list_add_tail(&acl->acl_list, &tpg->acl_node_list);
+ tpg->num_node_acls++;
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+done:
+ printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
+ TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+
+ return acl;
+}
+EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
+
+/* core_tpg_del_initiator_node_acl():
+ *
+ *
+ */
+int core_tpg_del_initiator_node_acl(
+ struct se_portal_group *tpg,
+ struct se_node_acl *acl,
+ int force)
+{
+ struct se_session *sess, *sess_tmp;
+ int dynamic_acl = 0;
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ if (acl->dynamic_node_acl) {
+ acl->dynamic_node_acl = 0;
+ dynamic_acl = 1;
+ }
+ list_del(&acl->acl_list);
+ tpg->num_node_acls--;
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+ spin_lock_bh(&tpg->session_lock);
+ list_for_each_entry_safe(sess, sess_tmp,
+ &tpg->tpg_sess_list, sess_list) {
+ if (sess->se_node_acl != acl)
+ continue;
+ /*
+ * Determine if the session needs to be closed by our context.
+ */
+ if (!(TPG_TFO(tpg)->shutdown_session(sess)))
+ continue;
+
+ spin_unlock_bh(&tpg->session_lock);
+ /*
+ * If the $FABRIC_MOD session for the Initiator Node ACL exists,
+ * forcefully shutdown the $FABRIC_MOD session/nexus.
+ */
+ TPG_TFO(tpg)->close_session(sess);
+
+ spin_lock_bh(&tpg->session_lock);
+ }
+ spin_unlock_bh(&tpg->session_lock);
+
+ core_tpg_wait_for_nacl_pr_ref(acl);
+ core_clear_initiator_node_from_tpg(acl, tpg);
+ core_free_device_list_for_node(acl, tpg);
+
+ printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
+ TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
+
+ return 0;
+}
+EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
+
+/* core_tpg_set_initiator_node_queue_depth():
+ *
+ *
+ */
+int core_tpg_set_initiator_node_queue_depth(
+ struct se_portal_group *tpg,
+ unsigned char *initiatorname,
+ u32 queue_depth,
+ int force)
+{
+ struct se_session *sess, *init_sess = NULL;
+ struct se_node_acl *acl;
+ int dynamic_acl = 0;
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+ if (!(acl)) {
+ printk(KERN_ERR "Access Control List entry for %s Initiator"
+ " Node %s does not exists for TPG %hu, ignoring"
+ " request.\n", TPG_TFO(tpg)->get_fabric_name(),
+ initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock_bh(&tpg->acl_node_lock);
+ return -ENODEV;
+ }
+ if (acl->dynamic_node_acl) {
+ acl->dynamic_node_acl = 0;
+ dynamic_acl = 1;
+ }
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+ spin_lock_bh(&tpg->session_lock);
+ list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
+ if (sess->se_node_acl != acl)
+ continue;
+
+ if (!force) {
+ printk(KERN_ERR "Unable to change queue depth for %s"
+ " Initiator Node: %s while session is"
+ " operational. To forcefully change the queue"
+ " depth and force session reinstatement"
+ " use the \"force=1\" parameter.\n",
+ TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+ spin_unlock_bh(&tpg->session_lock);
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ if (dynamic_acl)
+ acl->dynamic_node_acl = 1;
+ spin_unlock_bh(&tpg->acl_node_lock);
+ return -EEXIST;
+ }
+ /*
+ * Determine if the session needs to be closed by our context.
+ */
+ if (!(TPG_TFO(tpg)->shutdown_session(sess)))
+ continue;
+
+ init_sess = sess;
+ break;
+ }
+
+ /*
+ * User has requested to change the queue depth for a Initiator Node.
+ * Change the value in the Node's struct se_node_acl, and call
+ * core_set_queue_depth_for_node() to add the requested queue depth.
+ *
+ * Finally call TPG_TFO(tpg)->close_session() to force session
+ * reinstatement to occur if there is an active session for the
+ * $FABRIC_MOD Initiator Node in question.
+ */
+ acl->queue_depth = queue_depth;
+
+ if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+ spin_unlock_bh(&tpg->session_lock);
+ /*
+ * Force session reinstatement if
+ * core_set_queue_depth_for_node() failed, because we assume
+ * the $FABRIC_MOD has already the set session reinstatement
+ * bit from TPG_TFO(tpg)->shutdown_session() called above.
+ */
+ if (init_sess)
+ TPG_TFO(tpg)->close_session(init_sess);
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ if (dynamic_acl)
+ acl->dynamic_node_acl = 1;
+ spin_unlock_bh(&tpg->acl_node_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&tpg->session_lock);
+ /*
+ * If the $FABRIC_MOD session for the Initiator Node ACL exists,
+ * forcefully shutdown the $FABRIC_MOD session/nexus.
+ */
+ if (init_sess)
+ TPG_TFO(tpg)->close_session(init_sess);
+
+ printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
+ " Node: %s on %s Target Portal Group: %u\n", queue_depth,
+ initiatorname, TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+
+ spin_lock_bh(&tpg->acl_node_lock);
+ if (dynamic_acl)
+ acl->dynamic_node_acl = 1;
+ spin_unlock_bh(&tpg->acl_node_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
+
+static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
+{
+ /* Set in core_dev_setup_virtual_lun0() */
+ struct se_device *dev = se_global->g_lun0_dev;
+ struct se_lun *lun = &se_tpg->tpg_virt_lun0;
+ u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+ int ret;
+
+ lun->unpacked_lun = 0;
+ lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+ atomic_set(&lun->lun_acl_count, 0);
+ init_completion(&lun->lun_shutdown_comp);
+ INIT_LIST_HEAD(&lun->lun_acl_list);
+ INIT_LIST_HEAD(&lun->lun_cmd_list);
+ spin_lock_init(&lun->lun_acl_lock);
+ spin_lock_init(&lun->lun_cmd_lock);
+ spin_lock_init(&lun->lun_sep_lock);
+
+ ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
+ if (ret < 0)
+ return -1;
+
+ return 0;
+}
+
+static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
+{
+ struct se_lun *lun = &se_tpg->tpg_virt_lun0;
+
+ core_tpg_post_dellun(se_tpg, lun);
+}
+
+int core_tpg_register(
+ struct target_core_fabric_ops *tfo,
+ struct se_wwn *se_wwn,
+ struct se_portal_group *se_tpg,
+ void *tpg_fabric_ptr,
+ int se_tpg_type)
+{
+ struct se_lun *lun;
+ u32 i;
+
+ se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
+ TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
+ if (!(se_tpg->tpg_lun_list)) {
+ printk(KERN_ERR "Unable to allocate struct se_portal_group->"
+ "tpg_lun_list\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ lun = &se_tpg->tpg_lun_list[i];
+ lun->unpacked_lun = i;
+ lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+ atomic_set(&lun->lun_acl_count, 0);
+ init_completion(&lun->lun_shutdown_comp);
+ INIT_LIST_HEAD(&lun->lun_acl_list);
+ INIT_LIST_HEAD(&lun->lun_cmd_list);
+ spin_lock_init(&lun->lun_acl_lock);
+ spin_lock_init(&lun->lun_cmd_lock);
+ spin_lock_init(&lun->lun_sep_lock);
+ }
+
+ se_tpg->se_tpg_type = se_tpg_type;
+ se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
+ se_tpg->se_tpg_tfo = tfo;
+ se_tpg->se_tpg_wwn = se_wwn;
+ atomic_set(&se_tpg->tpg_pr_ref_count, 0);
+ INIT_LIST_HEAD(&se_tpg->acl_node_list);
+ INIT_LIST_HEAD(&se_tpg->se_tpg_list);
+ INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
+ spin_lock_init(&se_tpg->acl_node_lock);
+ spin_lock_init(&se_tpg->session_lock);
+ spin_lock_init(&se_tpg->tpg_lun_lock);
+
+ if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
+ if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
+ kfree(se_tpg);
+ return -ENOMEM;
+ }
+ }
+
+ spin_lock_bh(&se_global->se_tpg_lock);
+ list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
+ spin_unlock_bh(&se_global->se_tpg_lock);
+
+ printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
+ " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
+ (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
+ "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
+ "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
+
+ return 0;
+}
+EXPORT_SYMBOL(core_tpg_register);
+
+int core_tpg_deregister(struct se_portal_group *se_tpg)
+{
+ struct se_node_acl *nacl, *nacl_tmp;
+
+ printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
+ " for endpoint: %s Portal Tag %u\n",
+ (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
+ "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
+ TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
+ TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
+
+ spin_lock_bh(&se_global->se_tpg_lock);
+ list_del(&se_tpg->se_tpg_list);
+ spin_unlock_bh(&se_global->se_tpg_lock);
+
+ while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
+ cpu_relax();
+ /*
+ * Release any remaining demo-mode generated se_node_acl that have
+ * not been released because of TFO->tpg_check_demo_mode_cache() == 1
+ * in transport_deregister_session().
+ */
+ spin_lock_bh(&se_tpg->acl_node_lock);
+ list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
+ acl_list) {
+ list_del(&nacl->acl_list);
+ se_tpg->num_node_acls--;
+ spin_unlock_bh(&se_tpg->acl_node_lock);
+
+ core_tpg_wait_for_nacl_pr_ref(nacl);
+ core_free_device_list_for_node(nacl, se_tpg);
+ TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
+
+ spin_lock_bh(&se_tpg->acl_node_lock);
+ }
+ spin_unlock_bh(&se_tpg->acl_node_lock);
+
+ if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
+ core_tpg_release_virtual_lun0(se_tpg);
+
+ se_tpg->se_tpg_fabric_ptr = NULL;
+ kfree(se_tpg->tpg_lun_list);
+ return 0;
+}
+EXPORT_SYMBOL(core_tpg_deregister);
+
+struct se_lun *core_tpg_pre_addlun(
+ struct se_portal_group *tpg,
+ u32 unpacked_lun)
+{
+ struct se_lun *lun;
+
+ if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+ printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+ "-1: %u for Target Portal Group: %u\n",
+ TPG_TFO(tpg)->get_fabric_name(),
+ unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ return ERR_PTR(-EOVERFLOW);
+ }
+
+ spin_lock(&tpg->tpg_lun_lock);
+ lun = &tpg->tpg_lun_list[unpacked_lun];
+ if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
+ printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
+ " on %s Target Portal Group: %u, ignoring request.\n",
+ unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock(&tpg->tpg_lun_lock);
+ return ERR_PTR(-EINVAL);
+ }
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ return lun;
+}
+
+int core_tpg_post_addlun(
+ struct se_portal_group *tpg,
+ struct se_lun *lun,
+ u32 lun_access,
+ void *lun_ptr)
+{
+ if (core_dev_export(lun_ptr, tpg, lun) < 0)
+ return -1;
+
+ spin_lock(&tpg->tpg_lun_lock);
+ lun->lun_access = lun_access;
+ lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ return 0;
+}
+
+static void core_tpg_shutdown_lun(
+ struct se_portal_group *tpg,
+ struct se_lun *lun)
+{
+ core_clear_lun_from_tpg(lun, tpg);
+ transport_clear_lun_from_sessions(lun);
+}
+
+struct se_lun *core_tpg_pre_dellun(
+ struct se_portal_group *tpg,
+ u32 unpacked_lun,
+ int *ret)
+{
+ struct se_lun *lun;
+
+ if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+ printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+ "-1: %u for Target Portal Group: %u\n",
+ TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ TRANSPORT_MAX_LUNS_PER_TPG-1,
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ return ERR_PTR(-EOVERFLOW);
+ }
+
+ spin_lock(&tpg->tpg_lun_lock);
+ lun = &tpg->tpg_lun_list[unpacked_lun];
+ if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
+ printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+ " Target Portal Group: %u, ignoring request.\n",
+ TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ TPG_TFO(tpg)->tpg_get_tag(tpg));
+ spin_unlock(&tpg->tpg_lun_lock);
+ return ERR_PTR(-ENODEV);
+ }
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ return lun;
+}
+
+int core_tpg_post_dellun(
+ struct se_portal_group *tpg,
+ struct se_lun *lun)
+{
+ core_tpg_shutdown_lun(tpg, lun);
+
+ core_dev_unexport(lun->lun_se_dev, tpg, lun);
+
+ spin_lock(&tpg->tpg_lun_lock);
+ lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ return 0;
+}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
new file mode 100644
index 000000000000..236e22d8cfae
--- /dev/null
+++ b/drivers/target/target_core_transport.c
@@ -0,0 +1,6164 @@
+/*******************************************************************************
+ * Filename: target_core_transport.c
+ *
+ * This file contains the Generic Target Engine Core.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/net.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/kthread.h>
+#include <linux/in.h>
+#include <linux/cdrom.h>
+#include <asm/unaligned.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/libsas.h> /* For TASK_ATTR_* */
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_scdb.h"
+#include "target_core_ua.h"
+
+/* #define DEBUG_CDB_HANDLER */
+#ifdef DEBUG_CDB_HANDLER
+#define DEBUG_CDB_H(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CDB_H(x...)
+#endif
+
+/* #define DEBUG_CMD_MAP */
+#ifdef DEBUG_CMD_MAP
+#define DEBUG_CMD_M(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CMD_M(x...)
+#endif
+
+/* #define DEBUG_MEM_ALLOC */
+#ifdef DEBUG_MEM_ALLOC
+#define DEBUG_MEM(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_MEM(x...)
+#endif
+
+/* #define DEBUG_MEM2_ALLOC */
+#ifdef DEBUG_MEM2_ALLOC
+#define DEBUG_MEM2(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_MEM2(x...)
+#endif
+
+/* #define DEBUG_SG_CALC */
+#ifdef DEBUG_SG_CALC
+#define DEBUG_SC(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_SC(x...)
+#endif
+
+/* #define DEBUG_SE_OBJ */
+#ifdef DEBUG_SE_OBJ
+#define DEBUG_SO(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_SO(x...)
+#endif
+
+/* #define DEBUG_CMD_VOL */
+#ifdef DEBUG_CMD_VOL
+#define DEBUG_VOL(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_VOL(x...)
+#endif
+
+/* #define DEBUG_CMD_STOP */
+#ifdef DEBUG_CMD_STOP
+#define DEBUG_CS(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CS(x...)
+#endif
+
+/* #define DEBUG_PASSTHROUGH */
+#ifdef DEBUG_PASSTHROUGH
+#define DEBUG_PT(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_PT(x...)
+#endif
+
+/* #define DEBUG_TASK_STOP */
+#ifdef DEBUG_TASK_STOP
+#define DEBUG_TS(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TS(x...)
+#endif
+
+/* #define DEBUG_TRANSPORT_STOP */
+#ifdef DEBUG_TRANSPORT_STOP
+#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TRANSPORT_S(x...)
+#endif
+
+/* #define DEBUG_TASK_FAILURE */
+#ifdef DEBUG_TASK_FAILURE
+#define DEBUG_TF(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TF(x...)
+#endif
+
+/* #define DEBUG_DEV_OFFLINE */
+#ifdef DEBUG_DEV_OFFLINE
+#define DEBUG_DO(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_DO(x...)
+#endif
+
+/* #define DEBUG_TASK_STATE */
+#ifdef DEBUG_TASK_STATE
+#define DEBUG_TSTATE(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TSTATE(x...)
+#endif
+
+/* #define DEBUG_STATUS_THR */
+#ifdef DEBUG_STATUS_THR
+#define DEBUG_ST(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_ST(x...)
+#endif
+
+/* #define DEBUG_TASK_TIMEOUT */
+#ifdef DEBUG_TASK_TIMEOUT
+#define DEBUG_TT(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TT(x...)
+#endif
+
+/* #define DEBUG_GENERIC_REQUEST_FAILURE */
+#ifdef DEBUG_GENERIC_REQUEST_FAILURE
+#define DEBUG_GRF(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_GRF(x...)
+#endif
+
+/* #define DEBUG_SAM_TASK_ATTRS */
+#ifdef DEBUG_SAM_TASK_ATTRS
+#define DEBUG_STA(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_STA(x...)
+#endif
+
+struct se_global *se_global;
+
+static struct kmem_cache *se_cmd_cache;
+static struct kmem_cache *se_sess_cache;
+struct kmem_cache *se_tmr_req_cache;
+struct kmem_cache *se_ua_cache;
+struct kmem_cache *se_mem_cache;
+struct kmem_cache *t10_pr_reg_cache;
+struct kmem_cache *t10_alua_lu_gp_cache;
+struct kmem_cache *t10_alua_lu_gp_mem_cache;
+struct kmem_cache *t10_alua_tg_pt_gp_cache;
+struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+
+/* Used for transport_dev_get_map_*() */
+typedef int (*map_func_t)(struct se_task *, u32);
+
+static int transport_generic_write_pending(struct se_cmd *);
+static int transport_processing_thread(void *);
+static int __transport_execute_tasks(struct se_device *dev);
+static void transport_complete_task_attr(struct se_cmd *cmd);
+static void transport_direct_request_timeout(struct se_cmd *cmd);
+static void transport_free_dev_tasks(struct se_cmd *cmd);
+static u32 transport_generic_get_cdb_count(struct se_cmd *cmd,
+ unsigned long long starting_lba, u32 sectors,
+ enum dma_data_direction data_direction,
+ struct list_head *mem_list, int set_counts);
+static int transport_generic_get_mem(struct se_cmd *cmd, u32 length,
+ u32 dma_size);
+static int transport_generic_remove(struct se_cmd *cmd,
+ int release_to_pool, int session_reinstatement);
+static int transport_get_sectors(struct se_cmd *cmd);
+static struct list_head *transport_init_se_mem_list(void);
+static int transport_map_sg_to_mem(struct se_cmd *cmd,
+ struct list_head *se_mem_list, void *in_mem,
+ u32 *se_mem_cnt);
+static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd,
+ unsigned char *dst, struct list_head *se_mem_list);
+static void transport_release_fe_cmd(struct se_cmd *cmd);
+static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
+ struct se_queue_obj *qobj);
+static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
+static void transport_stop_all_task_timers(struct se_cmd *cmd);
+
+int transport_emulate_control_cdb(struct se_task *task);
+
+int init_se_global(void)
+{
+ struct se_global *global;
+
+ global = kzalloc(sizeof(struct se_global), GFP_KERNEL);
+ if (!(global)) {
+ printk(KERN_ERR "Unable to allocate memory for struct se_global\n");
+ return -1;
+ }
+
+ INIT_LIST_HEAD(&global->g_lu_gps_list);
+ INIT_LIST_HEAD(&global->g_se_tpg_list);
+ INIT_LIST_HEAD(&global->g_hba_list);
+ INIT_LIST_HEAD(&global->g_se_dev_list);
+ spin_lock_init(&global->g_device_lock);
+ spin_lock_init(&global->hba_lock);
+ spin_lock_init(&global->se_tpg_lock);
+ spin_lock_init(&global->lu_gps_lock);
+ spin_lock_init(&global->plugin_class_lock);
+
+ se_cmd_cache = kmem_cache_create("se_cmd_cache",
+ sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
+ if (!(se_cmd_cache)) {
+ printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n");
+ goto out;
+ }
+ se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
+ sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
+ 0, NULL);
+ if (!(se_tmr_req_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req"
+ " failed\n");
+ goto out;
+ }
+ se_sess_cache = kmem_cache_create("se_sess_cache",
+ sizeof(struct se_session), __alignof__(struct se_session),
+ 0, NULL);
+ if (!(se_sess_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for struct se_session"
+ " failed\n");
+ goto out;
+ }
+ se_ua_cache = kmem_cache_create("se_ua_cache",
+ sizeof(struct se_ua), __alignof__(struct se_ua),
+ 0, NULL);
+ if (!(se_ua_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n");
+ goto out;
+ }
+ se_mem_cache = kmem_cache_create("se_mem_cache",
+ sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL);
+ if (!(se_mem_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n");
+ goto out;
+ }
+ t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
+ sizeof(struct t10_pr_registration),
+ __alignof__(struct t10_pr_registration), 0, NULL);
+ if (!(t10_pr_reg_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration"
+ " failed\n");
+ goto out;
+ }
+ t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
+ sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
+ 0, NULL);
+ if (!(t10_alua_lu_gp_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache"
+ " failed\n");
+ goto out;
+ }
+ t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
+ sizeof(struct t10_alua_lu_gp_member),
+ __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
+ if (!(t10_alua_lu_gp_mem_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_"
+ "cache failed\n");
+ goto out;
+ }
+ t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
+ sizeof(struct t10_alua_tg_pt_gp),
+ __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
+ if (!(t10_alua_tg_pt_gp_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+ "cache failed\n");
+ goto out;
+ }
+ t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
+ "t10_alua_tg_pt_gp_mem_cache",
+ sizeof(struct t10_alua_tg_pt_gp_member),
+ __alignof__(struct t10_alua_tg_pt_gp_member),
+ 0, NULL);
+ if (!(t10_alua_tg_pt_gp_mem_cache)) {
+ printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+ "mem_t failed\n");
+ goto out;
+ }
+
+ se_global = global;
+
+ return 0;
+out:
+ if (se_cmd_cache)
+ kmem_cache_destroy(se_cmd_cache);
+ if (se_tmr_req_cache)
+ kmem_cache_destroy(se_tmr_req_cache);
+ if (se_sess_cache)
+ kmem_cache_destroy(se_sess_cache);
+ if (se_ua_cache)
+ kmem_cache_destroy(se_ua_cache);
+ if (se_mem_cache)
+ kmem_cache_destroy(se_mem_cache);
+ if (t10_pr_reg_cache)
+ kmem_cache_destroy(t10_pr_reg_cache);
+ if (t10_alua_lu_gp_cache)
+ kmem_cache_destroy(t10_alua_lu_gp_cache);
+ if (t10_alua_lu_gp_mem_cache)
+ kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+ if (t10_alua_tg_pt_gp_cache)
+ kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+ if (t10_alua_tg_pt_gp_mem_cache)
+ kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+ kfree(global);
+ return -1;
+}
+
+void release_se_global(void)
+{
+ struct se_global *global;
+
+ global = se_global;
+ if (!(global))
+ return;
+
+ kmem_cache_destroy(se_cmd_cache);
+ kmem_cache_destroy(se_tmr_req_cache);
+ kmem_cache_destroy(se_sess_cache);
+ kmem_cache_destroy(se_ua_cache);
+ kmem_cache_destroy(se_mem_cache);
+ kmem_cache_destroy(t10_pr_reg_cache);
+ kmem_cache_destroy(t10_alua_lu_gp_cache);
+ kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+ kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+ kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+ kfree(global);
+
+ se_global = NULL;
+}
+
+/* SCSI statistics table index */
+static struct scsi_index_table scsi_index_table;
+
+/*
+ * Initialize the index table for allocating unique row indexes to various mib
+ * tables.
+ */
+void init_scsi_index_table(void)
+{
+ memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
+ spin_lock_init(&scsi_index_table.lock);
+}
+
+/*
+ * Allocate a new row index for the entry type specified
+ */
+u32 scsi_get_new_index(scsi_index_t type)
+{
+ u32 new_index;
+
+ if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
+ printk(KERN_ERR "Invalid index type %d\n", type);
+ return -EINVAL;
+ }
+
+ spin_lock(&scsi_index_table.lock);
+ new_index = ++scsi_index_table.scsi_mib_index[type];
+ if (new_index == 0)
+ new_index = ++scsi_index_table.scsi_mib_index[type];
+ spin_unlock(&scsi_index_table.lock);
+
+ return new_index;
+}
+
+void transport_init_queue_obj(struct se_queue_obj *qobj)
+{
+ atomic_set(&qobj->queue_cnt, 0);
+ INIT_LIST_HEAD(&qobj->qobj_list);
+ init_waitqueue_head(&qobj->thread_wq);
+ spin_lock_init(&qobj->cmd_queue_lock);
+}
+EXPORT_SYMBOL(transport_init_queue_obj);
+
+static int transport_subsystem_reqmods(void)
+{
+ int ret;
+
+ ret = request_module("target_core_iblock");
+ if (ret != 0)
+ printk(KERN_ERR "Unable to load target_core_iblock\n");
+
+ ret = request_module("target_core_file");
+ if (ret != 0)
+ printk(KERN_ERR "Unable to load target_core_file\n");
+
+ ret = request_module("target_core_pscsi");
+ if (ret != 0)
+ printk(KERN_ERR "Unable to load target_core_pscsi\n");
+
+ ret = request_module("target_core_stgt");
+ if (ret != 0)
+ printk(KERN_ERR "Unable to load target_core_stgt\n");
+
+ return 0;
+}
+
+int transport_subsystem_check_init(void)
+{
+ if (se_global->g_sub_api_initialized)
+ return 0;
+ /*
+ * Request the loading of known TCM subsystem plugins..
+ */
+ if (transport_subsystem_reqmods() < 0)
+ return -1;
+
+ se_global->g_sub_api_initialized = 1;
+ return 0;
+}
+
+struct se_session *transport_init_session(void)
+{
+ struct se_session *se_sess;
+
+ se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
+ if (!(se_sess)) {
+ printk(KERN_ERR "Unable to allocate struct se_session from"
+ " se_sess_cache\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&se_sess->sess_list);
+ INIT_LIST_HEAD(&se_sess->sess_acl_list);
+
+ return se_sess;
+}
+EXPORT_SYMBOL(transport_init_session);
+
+/*
+ * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
+ */
+void __transport_register_session(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct se_session *se_sess,
+ void *fabric_sess_ptr)
+{
+ unsigned char buf[PR_REG_ISID_LEN];
+
+ se_sess->se_tpg = se_tpg;
+ se_sess->fabric_sess_ptr = fabric_sess_ptr;
+ /*
+ * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
+ *
+ * Only set for struct se_session's that will actually be moving I/O.
+ * eg: *NOT* discovery sessions.
+ */
+ if (se_nacl) {
+ /*
+ * If the fabric module supports an ISID based TransportID,
+ * save this value in binary from the fabric I_T Nexus now.
+ */
+ if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
+ memset(&buf[0], 0, PR_REG_ISID_LEN);
+ TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess,
+ &buf[0], PR_REG_ISID_LEN);
+ se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
+ }
+ spin_lock_irq(&se_nacl->nacl_sess_lock);
+ /*
+ * The se_nacl->nacl_sess pointer will be set to the
+ * last active I_T Nexus for each struct se_node_acl.
+ */
+ se_nacl->nacl_sess = se_sess;
+
+ list_add_tail(&se_sess->sess_acl_list,
+ &se_nacl->acl_sess_list);
+ spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ }
+ list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
+
+ printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
+ TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr);
+}
+EXPORT_SYMBOL(__transport_register_session);
+
+void transport_register_session(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct se_session *se_sess,
+ void *fabric_sess_ptr)
+{
+ spin_lock_bh(&se_tpg->session_lock);
+ __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
+ spin_unlock_bh(&se_tpg->session_lock);
+}
+EXPORT_SYMBOL(transport_register_session);
+
+void transport_deregister_session_configfs(struct se_session *se_sess)
+{
+ struct se_node_acl *se_nacl;
+
+ /*
+ * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
+ */
+ se_nacl = se_sess->se_node_acl;
+ if ((se_nacl)) {
+ spin_lock_irq(&se_nacl->nacl_sess_lock);
+ list_del(&se_sess->sess_acl_list);
+ /*
+ * If the session list is empty, then clear the pointer.
+ * Otherwise, set the struct se_session pointer from the tail
+ * element of the per struct se_node_acl active session list.
+ */
+ if (list_empty(&se_nacl->acl_sess_list))
+ se_nacl->nacl_sess = NULL;
+ else {
+ se_nacl->nacl_sess = container_of(
+ se_nacl->acl_sess_list.prev,
+ struct se_session, sess_acl_list);
+ }
+ spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ }
+}
+EXPORT_SYMBOL(transport_deregister_session_configfs);
+
+void transport_free_session(struct se_session *se_sess)
+{
+ kmem_cache_free(se_sess_cache, se_sess);
+}
+EXPORT_SYMBOL(transport_free_session);
+
+void transport_deregister_session(struct se_session *se_sess)
+{
+ struct se_portal_group *se_tpg = se_sess->se_tpg;
+ struct se_node_acl *se_nacl;
+
+ if (!(se_tpg)) {
+ transport_free_session(se_sess);
+ return;
+ }
+
+ spin_lock_bh(&se_tpg->session_lock);
+ list_del(&se_sess->sess_list);
+ se_sess->se_tpg = NULL;
+ se_sess->fabric_sess_ptr = NULL;
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ /*
+ * Determine if we need to do extra work for this initiator node's
+ * struct se_node_acl if it had been previously dynamically generated.
+ */
+ se_nacl = se_sess->se_node_acl;
+ if ((se_nacl)) {
+ spin_lock_bh(&se_tpg->acl_node_lock);
+ if (se_nacl->dynamic_node_acl) {
+ if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache(
+ se_tpg))) {
+ list_del(&se_nacl->acl_list);
+ se_tpg->num_node_acls--;
+ spin_unlock_bh(&se_tpg->acl_node_lock);
+
+ core_tpg_wait_for_nacl_pr_ref(se_nacl);
+ core_free_device_list_for_node(se_nacl, se_tpg);
+ TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
+ se_nacl);
+ spin_lock_bh(&se_tpg->acl_node_lock);
+ }
+ }
+ spin_unlock_bh(&se_tpg->acl_node_lock);
+ }
+
+ transport_free_session(se_sess);
+
+ printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n",
+ TPG_TFO(se_tpg)->get_fabric_name());
+}
+EXPORT_SYMBOL(transport_deregister_session);
+
+/*
+ * Called with T_TASK(cmd)->t_state_lock held.
+ */
+static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
+{
+ struct se_device *dev;
+ struct se_task *task;
+ unsigned long flags;
+
+ if (!T_TASK(cmd))
+ return;
+
+ list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ dev = task->se_dev;
+ if (!(dev))
+ continue;
+
+ if (atomic_read(&task->task_active))
+ continue;
+
+ if (!(atomic_read(&task->task_state_active)))
+ continue;
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ list_del(&task->t_state_list);
+ DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n",
+ CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task);
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+ atomic_set(&task->task_state_active, 0);
+ atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left);
+ }
+}
+
+/* transport_cmd_check_stop():
+ *
+ * 'transport_off = 1' determines if t_transport_active should be cleared.
+ * 'transport_off = 2' determines if task_dev_state should be removed.
+ *
+ * A non-zero u8 t_state sets cmd->t_state.
+ * Returns 1 when command is stopped, else 0.
+ */
+static int transport_cmd_check_stop(
+ struct se_cmd *cmd,
+ int transport_off,
+ u8 t_state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ /*
+ * Determine if IOCTL context caller in requesting the stopping of this
+ * command for LUN shutdown purposes.
+ */
+ if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
+ DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)"
+ " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
+ CMD_TFO(cmd)->get_task_tag(cmd));
+
+ cmd->deferred_t_state = cmd->t_state;
+ cmd->t_state = TRANSPORT_DEFERRED_CMD;
+ atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+ if (transport_off == 2)
+ transport_all_task_dev_remove_state(cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ complete(&T_TASK(cmd)->transport_lun_stop_comp);
+ return 1;
+ }
+ /*
+ * Determine if frontend context caller is requesting the stopping of
+ * this command for frontend excpections.
+ */
+ if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
+ DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) =="
+ " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
+ CMD_TFO(cmd)->get_task_tag(cmd));
+
+ cmd->deferred_t_state = cmd->t_state;
+ cmd->t_state = TRANSPORT_DEFERRED_CMD;
+ if (transport_off == 2)
+ transport_all_task_dev_remove_state(cmd);
+
+ /*
+ * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
+ * to FE.
+ */
+ if (transport_off == 2)
+ cmd->se_lun = NULL;
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ complete(&T_TASK(cmd)->t_transport_stop_comp);
+ return 1;
+ }
+ if (transport_off) {
+ atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+ if (transport_off == 2) {
+ transport_all_task_dev_remove_state(cmd);
+ /*
+ * Clear struct se_cmd->se_lun before the transport_off == 2
+ * handoff to fabric module.
+ */
+ cmd->se_lun = NULL;
+ /*
+ * Some fabric modules like tcm_loop can release
+ * their internally allocated I/O refrence now and
+ * struct se_cmd now.
+ */
+ if (CMD_TFO(cmd)->check_stop_free != NULL) {
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+
+ CMD_TFO(cmd)->check_stop_free(cmd);
+ return 1;
+ }
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ return 0;
+ } else if (t_state)
+ cmd->t_state = t_state;
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ return 0;
+}
+
+static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
+{
+ return transport_cmd_check_stop(cmd, 2, 0);
+}
+
+static void transport_lun_remove_cmd(struct se_cmd *cmd)
+{
+ struct se_lun *lun = SE_LUN(cmd);
+ unsigned long flags;
+
+ if (!lun)
+ return;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ goto check_lun;
+ }
+ atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ transport_all_task_dev_remove_state(cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_free_dev_tasks(cmd);
+
+check_lun:
+ spin_lock_irqsave(&lun->lun_cmd_lock, flags);
+ if (atomic_read(&T_TASK(cmd)->transport_lun_active)) {
+ list_del(&cmd->se_lun_list);
+ atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
+#if 0
+ printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
+ CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun);
+#endif
+ }
+ spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
+}
+
+void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+{
+ transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+ transport_lun_remove_cmd(cmd);
+
+ if (transport_cmd_check_stop_to_fabric(cmd))
+ return;
+ if (remove)
+ transport_generic_remove(cmd, 0, 0);
+}
+
+void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
+{
+ transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+
+ if (transport_cmd_check_stop_to_fabric(cmd))
+ return;
+
+ transport_generic_remove(cmd, 0, 0);
+}
+
+static int transport_add_cmd_to_queue(
+ struct se_cmd *cmd,
+ int t_state)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_queue_obj *qobj = dev->dev_queue_obj;
+ struct se_queue_req *qr;
+ unsigned long flags;
+
+ qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC);
+ if (!(qr)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " struct se_queue_req\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&qr->qr_list);
+
+ qr->cmd = (void *)cmd;
+ qr->state = t_state;
+
+ if (t_state) {
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ cmd->t_state = t_state;
+ atomic_set(&T_TASK(cmd)->t_transport_active, 1);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ }
+
+ spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+ list_add_tail(&qr->qr_list, &qobj->qobj_list);
+ atomic_inc(&T_TASK(cmd)->t_transport_queue_active);
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+ atomic_inc(&qobj->queue_cnt);
+ wake_up_interruptible(&qobj->thread_wq);
+ return 0;
+}
+
+/*
+ * Called with struct se_queue_obj->cmd_queue_lock held.
+ */
+static struct se_queue_req *
+__transport_get_qr_from_queue(struct se_queue_obj *qobj)
+{
+ struct se_cmd *cmd;
+ struct se_queue_req *qr = NULL;
+
+ if (list_empty(&qobj->qobj_list))
+ return NULL;
+
+ list_for_each_entry(qr, &qobj->qobj_list, qr_list)
+ break;
+
+ if (qr->cmd) {
+ cmd = (struct se_cmd *)qr->cmd;
+ atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+ }
+ list_del(&qr->qr_list);
+ atomic_dec(&qobj->queue_cnt);
+
+ return qr;
+}
+
+static struct se_queue_req *
+transport_get_qr_from_queue(struct se_queue_obj *qobj)
+{
+ struct se_cmd *cmd;
+ struct se_queue_req *qr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+ if (list_empty(&qobj->qobj_list)) {
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+ return NULL;
+ }
+
+ list_for_each_entry(qr, &qobj->qobj_list, qr_list)
+ break;
+
+ if (qr->cmd) {
+ cmd = (struct se_cmd *)qr->cmd;
+ atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+ }
+ list_del(&qr->qr_list);
+ atomic_dec(&qobj->queue_cnt);
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+ return qr;
+}
+
+static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
+ struct se_queue_obj *qobj)
+{
+ struct se_cmd *q_cmd;
+ struct se_queue_req *qr = NULL, *qr_p = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+ if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) {
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+ return;
+ }
+
+ list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) {
+ q_cmd = (struct se_cmd *)qr->cmd;
+ if (q_cmd != cmd)
+ continue;
+
+ atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active);
+ atomic_dec(&qobj->queue_cnt);
+ list_del(&qr->qr_list);
+ kfree(qr);
+ }
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+ if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) {
+ printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
+ CMD_TFO(cmd)->get_task_tag(cmd),
+ atomic_read(&T_TASK(cmd)->t_transport_queue_active));
+ }
+}
+
+/*
+ * Completion function used by TCM subsystem plugins (such as FILEIO)
+ * for queueing up response from struct se_subsystem_api->do_task()
+ */
+void transport_complete_sync_cache(struct se_cmd *cmd, int good)
+{
+ struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next,
+ struct se_task, t_list);
+
+ if (good) {
+ cmd->scsi_status = SAM_STAT_GOOD;
+ task->task_scsi_status = GOOD;
+ } else {
+ task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
+ task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
+ TASK_CMD(task)->transport_error_status =
+ PYX_TRANSPORT_ILLEGAL_REQUEST;
+ }
+
+ transport_complete_task(task, good);
+}
+EXPORT_SYMBOL(transport_complete_sync_cache);
+
+/* transport_complete_task():
+ *
+ * Called from interrupt and non interrupt context depending
+ * on the transport plugin.
+ */
+void transport_complete_task(struct se_task *task, int success)
+{
+ struct se_cmd *cmd = TASK_CMD(task);
+ struct se_device *dev = task->se_dev;
+ int t_state;
+ unsigned long flags;
+#if 0
+ printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
+ T_TASK(cmd)->t_task_cdb[0], dev);
+#endif
+ if (dev) {
+ spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
+ atomic_inc(&dev->depth_left);
+ atomic_inc(&SE_HBA(dev)->left_queue_depth);
+ spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+ }
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_set(&task->task_active, 0);
+
+ /*
+ * See if any sense data exists, if so set the TASK_SENSE flag.
+ * Also check for any other post completion work that needs to be
+ * done by the plugins.
+ */
+ if (dev && dev->transport->transport_complete) {
+ if (dev->transport->transport_complete(task) != 0) {
+ cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
+ task->task_sense = 1;
+ success = 1;
+ }
+ }
+
+ /*
+ * See if we are waiting for outstanding struct se_task
+ * to complete for an exception condition
+ */
+ if (atomic_read(&task->task_stop)) {
+ /*
+ * Decrement T_TASK(cmd)->t_se_count if this task had
+ * previously thrown its timeout exception handler.
+ */
+ if (atomic_read(&task->task_timeout)) {
+ atomic_dec(&T_TASK(cmd)->t_se_count);
+ atomic_set(&task->task_timeout, 0);
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ complete(&task->task_stop_comp);
+ return;
+ }
+ /*
+ * If the task's timeout handler has fired, use the t_task_cdbs_timeout
+ * left counter to determine when the struct se_cmd is ready to be queued to
+ * the processing thread.
+ */
+ if (atomic_read(&task->task_timeout)) {
+ if (!(atomic_dec_and_test(
+ &T_TASK(cmd)->t_task_cdbs_timeout_left))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ flags);
+ return;
+ }
+ t_state = TRANSPORT_COMPLETE_TIMEOUT;
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_add_cmd_to_queue(cmd, t_state);
+ return;
+ }
+ atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left);
+
+ /*
+ * Decrement the outstanding t_task_cdbs_left count. The last
+ * struct se_task from struct se_cmd will complete itself into the
+ * device queue depending upon int success.
+ */
+ if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
+ if (!success)
+ T_TASK(cmd)->t_tasks_failed = 1;
+
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ return;
+ }
+
+ if (!success || T_TASK(cmd)->t_tasks_failed) {
+ t_state = TRANSPORT_COMPLETE_FAILURE;
+ if (!task->task_error_status) {
+ task->task_error_status =
+ PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->transport_error_status =
+ PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ } else {
+ atomic_set(&T_TASK(cmd)->t_transport_complete, 1);
+ t_state = TRANSPORT_COMPLETE_OK;
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_add_cmd_to_queue(cmd, t_state);
+}
+EXPORT_SYMBOL(transport_complete_task);
+
+/*
+ * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
+ * struct se_task list are ready to be added to the active execution list
+ * struct se_device
+
+ * Called with se_dev_t->execute_task_lock called.
+ */
+static inline int transport_add_task_check_sam_attr(
+ struct se_task *task,
+ struct se_task *task_prev,
+ struct se_device *dev)
+{
+ /*
+ * No SAM Task attribute emulation enabled, add to tail of
+ * execution queue
+ */
+ if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
+ list_add_tail(&task->t_execute_list, &dev->execute_task_list);
+ return 0;
+ }
+ /*
+ * HEAD_OF_QUEUE attribute for received CDB, which means
+ * the first task that is associated with a struct se_cmd goes to
+ * head of the struct se_device->execute_task_list, and task_prev
+ * after that for each subsequent task
+ */
+ if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ list_add(&task->t_execute_list,
+ (task_prev != NULL) ?
+ &task_prev->t_execute_list :
+ &dev->execute_task_list);
+
+ DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
+ " in execution queue\n",
+ T_TASK(task->task_se_cmd)->t_task_cdb[0]);
+ return 1;
+ }
+ /*
+ * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
+ * transitioned from Dermant -> Active state, and are added to the end
+ * of the struct se_device->execute_task_list
+ */
+ list_add_tail(&task->t_execute_list, &dev->execute_task_list);
+ return 0;
+}
+
+/* __transport_add_task_to_execute_queue():
+ *
+ * Called with se_dev_t->execute_task_lock called.
+ */
+static void __transport_add_task_to_execute_queue(
+ struct se_task *task,
+ struct se_task *task_prev,
+ struct se_device *dev)
+{
+ int head_of_queue;
+
+ head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
+ atomic_inc(&dev->execute_tasks);
+
+ if (atomic_read(&task->task_state_active))
+ return;
+ /*
+ * Determine if this task needs to go to HEAD_OF_QUEUE for the
+ * state list as well. Running with SAM Task Attribute emulation
+ * will always return head_of_queue == 0 here
+ */
+ if (head_of_queue)
+ list_add(&task->t_state_list, (task_prev) ?
+ &task_prev->t_state_list :
+ &dev->state_task_list);
+ else
+ list_add_tail(&task->t_state_list, &dev->state_task_list);
+
+ atomic_set(&task->task_state_active, 1);
+
+ DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
+ CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd),
+ task, dev);
+}
+
+static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
+{
+ struct se_device *dev;
+ struct se_task *task;
+ unsigned long flags;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ dev = task->se_dev;
+
+ if (atomic_read(&task->task_state_active))
+ continue;
+
+ spin_lock(&dev->execute_task_lock);
+ list_add_tail(&task->t_state_list, &dev->state_task_list);
+ atomic_set(&task->task_state_active, 1);
+
+ DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
+ CMD_TFO(task->task_se_cmd)->get_task_tag(
+ task->task_se_cmd), task, dev);
+
+ spin_unlock(&dev->execute_task_lock);
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_task *task, *task_prev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ if (atomic_read(&task->task_execute_queue))
+ continue;
+ /*
+ * __transport_add_task_to_execute_queue() handles the
+ * SAM Task Attribute emulation if enabled
+ */
+ __transport_add_task_to_execute_queue(task, task_prev, dev);
+ atomic_set(&task->task_execute_queue, 1);
+ task_prev = task;
+ }
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+ return;
+}
+
+/* transport_get_task_from_execute_queue():
+ *
+ * Called with dev->execute_task_lock held.
+ */
+static struct se_task *
+transport_get_task_from_execute_queue(struct se_device *dev)
+{
+ struct se_task *task;
+
+ if (list_empty(&dev->execute_task_list))
+ return NULL;
+
+ list_for_each_entry(task, &dev->execute_task_list, t_execute_list)
+ break;
+
+ list_del(&task->t_execute_list);
+ atomic_dec(&dev->execute_tasks);
+
+ return task;
+}
+
+/* transport_remove_task_from_execute_queue():
+ *
+ *
+ */
+static void transport_remove_task_from_execute_queue(
+ struct se_task *task,
+ struct se_device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ list_del(&task->t_execute_list);
+ atomic_dec(&dev->execute_tasks);
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+}
+
+unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
+{
+ switch (cmd->data_direction) {
+ case DMA_NONE:
+ return "NONE";
+ case DMA_FROM_DEVICE:
+ return "READ";
+ case DMA_TO_DEVICE:
+ return "WRITE";
+ case DMA_BIDIRECTIONAL:
+ return "BIDI";
+ default:
+ break;
+ }
+
+ return "UNKNOWN";
+}
+
+void transport_dump_dev_state(
+ struct se_device *dev,
+ char *b,
+ int *bl)
+{
+ *bl += sprintf(b + *bl, "Status: ");
+ switch (dev->dev_status) {
+ case TRANSPORT_DEVICE_ACTIVATED:
+ *bl += sprintf(b + *bl, "ACTIVATED");
+ break;
+ case TRANSPORT_DEVICE_DEACTIVATED:
+ *bl += sprintf(b + *bl, "DEACTIVATED");
+ break;
+ case TRANSPORT_DEVICE_SHUTDOWN:
+ *bl += sprintf(b + *bl, "SHUTDOWN");
+ break;
+ case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
+ case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
+ *bl += sprintf(b + *bl, "OFFLINE");
+ break;
+ default:
+ *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
+ break;
+ }
+
+ *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d",
+ atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
+ dev->queue_depth);
+ *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
+ DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors);
+ *bl += sprintf(b + *bl, " ");
+}
+
+/* transport_release_all_cmds():
+ *
+ *
+ */
+static void transport_release_all_cmds(struct se_device *dev)
+{
+ struct se_cmd *cmd = NULL;
+ struct se_queue_req *qr = NULL, *qr_p = NULL;
+ int bug_out = 0, t_state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+ list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list,
+ qr_list) {
+
+ cmd = (struct se_cmd *)qr->cmd;
+ t_state = qr->state;
+ list_del(&qr->qr_list);
+ kfree(qr);
+ spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock,
+ flags);
+
+ printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u,"
+ " t_state: %u directly\n",
+ CMD_TFO(cmd)->get_task_tag(cmd),
+ CMD_TFO(cmd)->get_cmd_state(cmd), t_state);
+
+ transport_release_fe_cmd(cmd);
+ bug_out = 1;
+
+ spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+ }
+ spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
+#if 0
+ if (bug_out)
+ BUG();
+#endif
+}
+
+void transport_dump_vpd_proto_id(
+ struct t10_vpd *vpd,
+ unsigned char *p_buf,
+ int p_buf_len)
+{
+ unsigned char buf[VPD_TMP_BUF_SIZE];
+ int len;
+
+ memset(buf, 0, VPD_TMP_BUF_SIZE);
+ len = sprintf(buf, "T10 VPD Protocol Identifier: ");
+
+ switch (vpd->protocol_identifier) {
+ case 0x00:
+ sprintf(buf+len, "Fibre Channel\n");
+ break;
+ case 0x10:
+ sprintf(buf+len, "Parallel SCSI\n");
+ break;
+ case 0x20:
+ sprintf(buf+len, "SSA\n");
+ break;
+ case 0x30:
+ sprintf(buf+len, "IEEE 1394\n");
+ break;
+ case 0x40:
+ sprintf(buf+len, "SCSI Remote Direct Memory Access"
+ " Protocol\n");
+ break;
+ case 0x50:
+ sprintf(buf+len, "Internet SCSI (iSCSI)\n");
+ break;
+ case 0x60:
+ sprintf(buf+len, "SAS Serial SCSI Protocol\n");
+ break;
+ case 0x70:
+ sprintf(buf+len, "Automation/Drive Interface Transport"
+ " Protocol\n");
+ break;
+ case 0x80:
+ sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
+ break;
+ default:
+ sprintf(buf+len, "Unknown 0x%02x\n",
+ vpd->protocol_identifier);
+ break;
+ }
+
+ if (p_buf)
+ strncpy(p_buf, buf, p_buf_len);
+ else
+ printk(KERN_INFO "%s", buf);
+}
+
+void
+transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
+{
+ /*
+ * Check if the Protocol Identifier Valid (PIV) bit is set..
+ *
+ * from spc3r23.pdf section 7.5.1
+ */
+ if (page_83[1] & 0x80) {
+ vpd->protocol_identifier = (page_83[0] & 0xf0);
+ vpd->protocol_identifier_set = 1;
+ transport_dump_vpd_proto_id(vpd, NULL, 0);
+ }
+}
+EXPORT_SYMBOL(transport_set_vpd_proto_id);
+
+int transport_dump_vpd_assoc(
+ struct t10_vpd *vpd,
+ unsigned char *p_buf,
+ int p_buf_len)
+{
+ unsigned char buf[VPD_TMP_BUF_SIZE];
+ int ret = 0, len;
+
+ memset(buf, 0, VPD_TMP_BUF_SIZE);
+ len = sprintf(buf, "T10 VPD Identifier Association: ");
+
+ switch (vpd->association) {
+ case 0x00:
+ sprintf(buf+len, "addressed logical unit\n");
+ break;
+ case 0x10:
+ sprintf(buf+len, "target port\n");
+ break;
+ case 0x20:
+ sprintf(buf+len, "SCSI target device\n");
+ break;
+ default:
+ sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
+ ret = -1;
+ break;
+ }
+
+ if (p_buf)
+ strncpy(p_buf, buf, p_buf_len);
+ else
+ printk("%s", buf);
+
+ return ret;
+}
+
+int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
+{
+ /*
+ * The VPD identification association..
+ *
+ * from spc3r23.pdf Section 7.6.3.1 Table 297
+ */
+ vpd->association = (page_83[1] & 0x30);
+ return transport_dump_vpd_assoc(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_assoc);
+
+int transport_dump_vpd_ident_type(
+ struct t10_vpd *vpd,
+ unsigned char *p_buf,
+ int p_buf_len)
+{
+ unsigned char buf[VPD_TMP_BUF_SIZE];
+ int ret = 0, len;
+
+ memset(buf, 0, VPD_TMP_BUF_SIZE);
+ len = sprintf(buf, "T10 VPD Identifier Type: ");
+
+ switch (vpd->device_identifier_type) {
+ case 0x00:
+ sprintf(buf+len, "Vendor specific\n");
+ break;
+ case 0x01:
+ sprintf(buf+len, "T10 Vendor ID based\n");
+ break;
+ case 0x02:
+ sprintf(buf+len, "EUI-64 based\n");
+ break;
+ case 0x03:
+ sprintf(buf+len, "NAA\n");
+ break;
+ case 0x04:
+ sprintf(buf+len, "Relative target port identifier\n");
+ break;
+ case 0x08:
+ sprintf(buf+len, "SCSI name string\n");
+ break;
+ default:
+ sprintf(buf+len, "Unsupported: 0x%02x\n",
+ vpd->device_identifier_type);
+ ret = -1;
+ break;
+ }
+
+ if (p_buf)
+ strncpy(p_buf, buf, p_buf_len);
+ else
+ printk("%s", buf);
+
+ return ret;
+}
+
+int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
+{
+ /*
+ * The VPD identifier type..
+ *
+ * from spc3r23.pdf Section 7.6.3.1 Table 298
+ */
+ vpd->device_identifier_type = (page_83[1] & 0x0f);
+ return transport_dump_vpd_ident_type(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_ident_type);
+
+int transport_dump_vpd_ident(
+ struct t10_vpd *vpd,
+ unsigned char *p_buf,
+ int p_buf_len)
+{
+ unsigned char buf[VPD_TMP_BUF_SIZE];
+ int ret = 0;
+
+ memset(buf, 0, VPD_TMP_BUF_SIZE);
+
+ switch (vpd->device_identifier_code_set) {
+ case 0x01: /* Binary */
+ sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
+ &vpd->device_identifier[0]);
+ break;
+ case 0x02: /* ASCII */
+ sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
+ &vpd->device_identifier[0]);
+ break;
+ case 0x03: /* UTF-8 */
+ sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
+ &vpd->device_identifier[0]);
+ break;
+ default:
+ sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
+ " 0x%02x", vpd->device_identifier_code_set);
+ ret = -1;
+ break;
+ }
+
+ if (p_buf)
+ strncpy(p_buf, buf, p_buf_len);
+ else
+ printk("%s", buf);
+
+ return ret;
+}
+
+int
+transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
+{
+ static const char hex_str[] = "0123456789abcdef";
+ int j = 0, i = 4; /* offset to start of the identifer */
+
+ /*
+ * The VPD Code Set (encoding)
+ *
+ * from spc3r23.pdf Section 7.6.3.1 Table 296
+ */
+ vpd->device_identifier_code_set = (page_83[0] & 0x0f);
+ switch (vpd->device_identifier_code_set) {
+ case 0x01: /* Binary */
+ vpd->device_identifier[j++] =
+ hex_str[vpd->device_identifier_type];
+ while (i < (4 + page_83[3])) {
+ vpd->device_identifier[j++] =
+ hex_str[(page_83[i] & 0xf0) >> 4];
+ vpd->device_identifier[j++] =
+ hex_str[page_83[i] & 0x0f];
+ i++;
+ }
+ break;
+ case 0x02: /* ASCII */
+ case 0x03: /* UTF-8 */
+ while (i < (4 + page_83[3]))
+ vpd->device_identifier[j++] = page_83[i++];
+ break;
+ default:
+ break;
+ }
+
+ return transport_dump_vpd_ident(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_ident);
+
+static void core_setup_task_attr_emulation(struct se_device *dev)
+{
+ /*
+ * If this device is from Target_Core_Mod/pSCSI, disable the
+ * SAM Task Attribute emulation.
+ *
+ * This is currently not available in upsream Linux/SCSI Target
+ * mode code, and is assumed to be disabled while using TCM/pSCSI.
+ */
+ if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
+ return;
+ }
+
+ dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
+ DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
+ " device\n", TRANSPORT(dev)->name,
+ TRANSPORT(dev)->get_device_rev(dev));
+}
+
+static void scsi_dump_inquiry(struct se_device *dev)
+{
+ struct t10_wwn *wwn = DEV_T10_WWN(dev);
+ int i, device_type;
+ /*
+ * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
+ */
+ printk(" Vendor: ");
+ for (i = 0; i < 8; i++)
+ if (wwn->vendor[i] >= 0x20)
+ printk("%c", wwn->vendor[i]);
+ else
+ printk(" ");
+
+ printk(" Model: ");
+ for (i = 0; i < 16; i++)
+ if (wwn->model[i] >= 0x20)
+ printk("%c", wwn->model[i]);
+ else
+ printk(" ");
+
+ printk(" Revision: ");
+ for (i = 0; i < 4; i++)
+ if (wwn->revision[i] >= 0x20)
+ printk("%c", wwn->revision[i]);
+ else
+ printk(" ");
+
+ printk("\n");
+
+ device_type = TRANSPORT(dev)->get_device_type(dev);
+ printk(" Type: %s ", scsi_device_type(device_type));
+ printk(" ANSI SCSI revision: %02x\n",
+ TRANSPORT(dev)->get_device_rev(dev));
+}
+
+struct se_device *transport_add_device_to_core_hba(
+ struct se_hba *hba,
+ struct se_subsystem_api *transport,
+ struct se_subsystem_dev *se_dev,
+ u32 device_flags,
+ void *transport_dev,
+ struct se_dev_limits *dev_limits,
+ const char *inquiry_prod,
+ const char *inquiry_rev)
+{
+ int ret = 0, force_pt;
+ struct se_device *dev;
+
+ dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
+ if (!(dev)) {
+ printk(KERN_ERR "Unable to allocate memory for se_dev_t\n");
+ return NULL;
+ }
+ dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL);
+ if (!(dev->dev_queue_obj)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " dev->dev_queue_obj\n");
+ kfree(dev);
+ return NULL;
+ }
+ transport_init_queue_obj(dev->dev_queue_obj);
+
+ dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj),
+ GFP_KERNEL);
+ if (!(dev->dev_status_queue_obj)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " dev->dev_status_queue_obj\n");
+ kfree(dev->dev_queue_obj);
+ kfree(dev);
+ return NULL;
+ }
+ transport_init_queue_obj(dev->dev_status_queue_obj);
+
+ dev->dev_flags = device_flags;
+ dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
+ dev->dev_ptr = (void *) transport_dev;
+ dev->se_hba = hba;
+ dev->se_sub_dev = se_dev;
+ dev->transport = transport;
+ atomic_set(&dev->active_cmds, 0);
+ INIT_LIST_HEAD(&dev->dev_list);
+ INIT_LIST_HEAD(&dev->dev_sep_list);
+ INIT_LIST_HEAD(&dev->dev_tmr_list);
+ INIT_LIST_HEAD(&dev->execute_task_list);
+ INIT_LIST_HEAD(&dev->delayed_cmd_list);
+ INIT_LIST_HEAD(&dev->ordered_cmd_list);
+ INIT_LIST_HEAD(&dev->state_task_list);
+ spin_lock_init(&dev->execute_task_lock);
+ spin_lock_init(&dev->delayed_cmd_lock);
+ spin_lock_init(&dev->ordered_cmd_lock);
+ spin_lock_init(&dev->state_task_lock);
+ spin_lock_init(&dev->dev_alua_lock);
+ spin_lock_init(&dev->dev_reservation_lock);
+ spin_lock_init(&dev->dev_status_lock);
+ spin_lock_init(&dev->dev_status_thr_lock);
+ spin_lock_init(&dev->se_port_lock);
+ spin_lock_init(&dev->se_tmr_lock);
+
+ dev->queue_depth = dev_limits->queue_depth;
+ atomic_set(&dev->depth_left, dev->queue_depth);
+ atomic_set(&dev->dev_ordered_id, 0);
+
+ se_dev_set_default_attribs(dev, dev_limits);
+
+ dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
+ dev->creation_time = get_jiffies_64();
+ spin_lock_init(&dev->stats_lock);
+
+ spin_lock(&hba->device_lock);
+ list_add_tail(&dev->dev_list, &hba->hba_dev_list);
+ hba->dev_count++;
+ spin_unlock(&hba->device_lock);
+ /*
+ * Setup the SAM Task Attribute emulation for struct se_device
+ */
+ core_setup_task_attr_emulation(dev);
+ /*
+ * Force PR and ALUA passthrough emulation with internal object use.
+ */
+ force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
+ /*
+ * Setup the Reservations infrastructure for struct se_device
+ */
+ core_setup_reservations(dev, force_pt);
+ /*
+ * Setup the Asymmetric Logical Unit Assignment for struct se_device
+ */
+ if (core_setup_alua(dev, force_pt) < 0)
+ goto out;
+
+ /*
+ * Startup the struct se_device processing thread
+ */
+ dev->process_thread = kthread_run(transport_processing_thread, dev,
+ "LIO_%s", TRANSPORT(dev)->name);
+ if (IS_ERR(dev->process_thread)) {
+ printk(KERN_ERR "Unable to create kthread: LIO_%s\n",
+ TRANSPORT(dev)->name);
+ goto out;
+ }
+
+ /*
+ * Preload the initial INQUIRY const values if we are doing
+ * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
+ * passthrough because this is being provided by the backend LLD.
+ * This is required so that transport_get_inquiry() copies these
+ * originals once back into DEV_T10_WWN(dev) for the virtual device
+ * setup.
+ */
+ if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (!(inquiry_prod) || !(inquiry_prod)) {
+ printk(KERN_ERR "All non TCM/pSCSI plugins require"
+ " INQUIRY consts\n");
+ goto out;
+ }
+
+ strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8);
+ strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16);
+ strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4);
+ }
+ scsi_dump_inquiry(dev);
+
+out:
+ if (!ret)
+ return dev;
+ kthread_stop(dev->process_thread);
+
+ spin_lock(&hba->device_lock);
+ list_del(&dev->dev_list);
+ hba->dev_count--;
+ spin_unlock(&hba->device_lock);
+
+ se_release_vpd_for_dev(dev);
+
+ kfree(dev->dev_status_queue_obj);
+ kfree(dev->dev_queue_obj);
+ kfree(dev);
+
+ return NULL;
+}
+EXPORT_SYMBOL(transport_add_device_to_core_hba);
+
+/* transport_generic_prepare_cdb():
+ *
+ * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
+ * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
+ * The point of this is since we are mapping iSCSI LUNs to
+ * SCSI Target IDs having a non-zero LUN in the CDB will throw the
+ * devices and HBAs for a loop.
+ */
+static inline void transport_generic_prepare_cdb(
+ unsigned char *cdb)
+{
+ switch (cdb[0]) {
+ case READ_10: /* SBC - RDProtect */
+ case READ_12: /* SBC - RDProtect */
+ case READ_16: /* SBC - RDProtect */
+ case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
+ case VERIFY: /* SBC - VRProtect */
+ case VERIFY_16: /* SBC - VRProtect */
+ case WRITE_VERIFY: /* SBC - VRProtect */
+ case WRITE_VERIFY_12: /* SBC - VRProtect */
+ break;
+ default:
+ cdb[1] &= 0x1f; /* clear logical unit number */
+ break;
+ }
+}
+
+static struct se_task *
+transport_generic_get_task(struct se_cmd *cmd,
+ enum dma_data_direction data_direction)
+{
+ struct se_task *task;
+ struct se_device *dev = SE_DEV(cmd);
+ unsigned long flags;
+
+ task = dev->transport->alloc_task(cmd);
+ if (!task) {
+ printk(KERN_ERR "Unable to allocate struct se_task\n");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&task->t_list);
+ INIT_LIST_HEAD(&task->t_execute_list);
+ INIT_LIST_HEAD(&task->t_state_list);
+ init_completion(&task->task_stop_comp);
+ task->task_no = T_TASK(cmd)->t_tasks_no++;
+ task->task_se_cmd = cmd;
+ task->se_dev = dev;
+ task->task_data_direction = data_direction;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ return task;
+}
+
+static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
+
+void transport_device_setup_cmd(struct se_cmd *cmd)
+{
+ cmd->se_dev = SE_LUN(cmd)->lun_se_dev;
+}
+EXPORT_SYMBOL(transport_device_setup_cmd);
+
+/*
+ * Used by fabric modules containing a local struct se_cmd within their
+ * fabric dependent per I/O descriptor.
+ */
+void transport_init_se_cmd(
+ struct se_cmd *cmd,
+ struct target_core_fabric_ops *tfo,
+ struct se_session *se_sess,
+ u32 data_length,
+ int data_direction,
+ int task_attr,
+ unsigned char *sense_buffer)
+{
+ INIT_LIST_HEAD(&cmd->se_lun_list);
+ INIT_LIST_HEAD(&cmd->se_delayed_list);
+ INIT_LIST_HEAD(&cmd->se_ordered_list);
+ /*
+ * Setup t_task pointer to t_task_backstore
+ */
+ cmd->t_task = &cmd->t_task_backstore;
+
+ INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list);
+ init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+ init_completion(&T_TASK(cmd)->transport_lun_stop_comp);
+ init_completion(&T_TASK(cmd)->t_transport_stop_comp);
+ spin_lock_init(&T_TASK(cmd)->t_state_lock);
+ atomic_set(&T_TASK(cmd)->transport_dev_active, 1);
+
+ cmd->se_tfo = tfo;
+ cmd->se_sess = se_sess;
+ cmd->data_length = data_length;
+ cmd->data_direction = data_direction;
+ cmd->sam_task_attr = task_attr;
+ cmd->sense_buffer = sense_buffer;
+}
+EXPORT_SYMBOL(transport_init_se_cmd);
+
+static int transport_check_alloc_task_attr(struct se_cmd *cmd)
+{
+ /*
+ * Check if SAM Task Attribute emulation is enabled for this
+ * struct se_device storage object
+ */
+ if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+ return 0;
+
+ if (cmd->sam_task_attr == TASK_ATTR_ACA) {
+ DEBUG_STA("SAM Task Attribute ACA"
+ " emulation is not supported\n");
+ return -1;
+ }
+ /*
+ * Used to determine when ORDERED commands should go from
+ * Dormant to Active status.
+ */
+ cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
+ smp_mb__after_atomic_inc();
+ DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
+ cmd->se_ordered_id, cmd->sam_task_attr,
+ TRANSPORT(cmd->se_dev)->name);
+ return 0;
+}
+
+void transport_free_se_cmd(
+ struct se_cmd *se_cmd)
+{
+ if (se_cmd->se_tmr_req)
+ core_tmr_release_req(se_cmd->se_tmr_req);
+ /*
+ * Check and free any extended CDB buffer that was allocated
+ */
+ if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb)
+ kfree(T_TASK(se_cmd)->t_task_cdb);
+}
+EXPORT_SYMBOL(transport_free_se_cmd);
+
+static void transport_generic_wait_for_tasks(struct se_cmd *, int, int);
+
+/* transport_generic_allocate_tasks():
+ *
+ * Called from fabric RX Thread.
+ */
+int transport_generic_allocate_tasks(
+ struct se_cmd *cmd,
+ unsigned char *cdb)
+{
+ int ret;
+
+ transport_generic_prepare_cdb(cdb);
+
+ /*
+ * This is needed for early exceptions.
+ */
+ cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
+
+ transport_device_setup_cmd(cmd);
+ /*
+ * Ensure that the received CDB is less than the max (252 + 8) bytes
+ * for VARIABLE_LENGTH_CMD
+ */
+ if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
+ printk(KERN_ERR "Received SCSI CDB with command_size: %d that"
+ " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
+ scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+ return -1;
+ }
+ /*
+ * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
+ * allocate the additional extended CDB buffer now.. Otherwise
+ * setup the pointer from __t_task_cdb to t_task_cdb.
+ */
+ if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) {
+ T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb),
+ GFP_KERNEL);
+ if (!(T_TASK(cmd)->t_task_cdb)) {
+ printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb"
+ " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n",
+ scsi_command_size(cdb),
+ (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb));
+ return -1;
+ }
+ } else
+ T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0];
+ /*
+ * Copy the original CDB into T_TASK(cmd).
+ */
+ memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb));
+ /*
+ * Setup the received CDB based on SCSI defined opcodes and
+ * perform unit attention, persistent reservations and ALUA
+ * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb
+ * pointer is expected to be setup before we reach this point.
+ */
+ ret = transport_generic_cmd_sequencer(cmd, cdb);
+ if (ret < 0)
+ return ret;
+ /*
+ * Check for SAM Task Attribute Emulation
+ */
+ if (transport_check_alloc_task_attr(cmd) < 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -2;
+ }
+ spin_lock(&cmd->se_lun->lun_sep_lock);
+ if (cmd->se_lun->lun_sep)
+ cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
+ spin_unlock(&cmd->se_lun->lun_sep_lock);
+ return 0;
+}
+EXPORT_SYMBOL(transport_generic_allocate_tasks);
+
+/*
+ * Used by fabric module frontends not defining a TFO->new_cmd_map()
+ * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis
+ */
+int transport_generic_handle_cdb(
+ struct se_cmd *cmd)
+{
+ if (!SE_LUN(cmd)) {
+ dump_stack();
+ printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
+ return -1;
+ }
+
+ transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
+ return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_cdb);
+
+/*
+ * Used by fabric module frontends defining a TFO->new_cmd_map() caller
+ * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
+ * complete setup in TCM process context w/ TFO->new_cmd_map().
+ */
+int transport_generic_handle_cdb_map(
+ struct se_cmd *cmd)
+{
+ if (!SE_LUN(cmd)) {
+ dump_stack();
+ printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
+ return -1;
+ }
+
+ transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
+ return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_cdb_map);
+
+/* transport_generic_handle_data():
+ *
+ *
+ */
+int transport_generic_handle_data(
+ struct se_cmd *cmd)
+{
+ /*
+ * For the software fabric case, then we assume the nexus is being
+ * failed/shutdown when signals are pending from the kthread context
+ * caller, so we return a failure. For the HW target mode case running
+ * in interrupt code, the signal_pending() check is skipped.
+ */
+ if (!in_interrupt() && signal_pending(current))
+ return -1;
+ /*
+ * If the received CDB has aleady been ABORTED by the generic
+ * target engine, we now call transport_check_aborted_status()
+ * to queue any delated TASK_ABORTED status for the received CDB to the
+ * fabric module as we are expecting no futher incoming DATA OUT
+ * sequences at this point.
+ */
+ if (transport_check_aborted_status(cmd, 1) != 0)
+ return 0;
+
+ transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
+ return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_data);
+
+/* transport_generic_handle_tmr():
+ *
+ *
+ */
+int transport_generic_handle_tmr(
+ struct se_cmd *cmd)
+{
+ /*
+ * This is needed for early exceptions.
+ */
+ cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
+ transport_device_setup_cmd(cmd);
+
+ transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
+ return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_tmr);
+
+static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
+{
+ struct se_task *task, *task_tmp;
+ unsigned long flags;
+ int ret = 0;
+
+ DEBUG_TS("ITT[0x%08x] - Stopping tasks\n",
+ CMD_TFO(cmd)->get_task_tag(cmd));
+
+ /*
+ * No tasks remain in the execution queue
+ */
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ list_for_each_entry_safe(task, task_tmp,
+ &T_TASK(cmd)->t_task_list, t_list) {
+ DEBUG_TS("task_no[%d] - Processing task %p\n",
+ task->task_no, task);
+ /*
+ * If the struct se_task has not been sent and is not active,
+ * remove the struct se_task from the execution queue.
+ */
+ if (!atomic_read(&task->task_sent) &&
+ !atomic_read(&task->task_active)) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ flags);
+ transport_remove_task_from_execute_queue(task,
+ task->se_dev);
+
+ DEBUG_TS("task_no[%d] - Removed from execute queue\n",
+ task->task_no);
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ continue;
+ }
+
+ /*
+ * If the struct se_task is active, sleep until it is returned
+ * from the plugin.
+ */
+ if (atomic_read(&task->task_active)) {
+ atomic_set(&task->task_stop, 1);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ flags);
+
+ DEBUG_TS("task_no[%d] - Waiting to complete\n",
+ task->task_no);
+ wait_for_completion(&task->task_stop_comp);
+ DEBUG_TS("task_no[%d] - Stopped successfully\n",
+ task->task_no);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+
+ atomic_set(&task->task_active, 0);
+ atomic_set(&task->task_stop, 0);
+ } else {
+ DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no);
+ ret++;
+ }
+
+ __transport_stop_task_timer(task, &flags);
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ return ret;
+}
+
+static void transport_failure_reset_queue_depth(struct se_device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);;
+ atomic_inc(&dev->depth_left);
+ atomic_inc(&SE_HBA(dev)->left_queue_depth);
+ spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+}
+
+/*
+ * Handle SAM-esque emulation for generic transport request failures.
+ */
+static void transport_generic_request_failure(
+ struct se_cmd *cmd,
+ struct se_device *dev,
+ int complete,
+ int sc)
+{
+ DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
+ " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
+ T_TASK(cmd)->t_task_cdb[0]);
+ DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
+ " %d/%d transport_error_status: %d\n",
+ CMD_TFO(cmd)->get_cmd_state(cmd),
+ cmd->t_state, cmd->deferred_t_state,
+ cmd->transport_error_status);
+ DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
+ " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
+ " t_transport_active: %d t_transport_stop: %d"
+ " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
+ atomic_read(&T_TASK(cmd)->t_transport_active),
+ atomic_read(&T_TASK(cmd)->t_transport_stop),
+ atomic_read(&T_TASK(cmd)->t_transport_sent));
+
+ transport_stop_all_task_timers(cmd);
+
+ if (dev)
+ transport_failure_reset_queue_depth(dev);
+ /*
+ * For SAM Task Attribute emulation for failed struct se_cmd
+ */
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ transport_complete_task_attr(cmd);
+
+ if (complete) {
+ transport_direct_request_timeout(cmd);
+ cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+
+ switch (cmd->transport_error_status) {
+ case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ break;
+ case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
+ cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
+ break;
+ case PYX_TRANSPORT_INVALID_CDB_FIELD:
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ break;
+ case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ break;
+ case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
+ if (!sc)
+ transport_new_cmd_failure(cmd);
+ /*
+ * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
+ * we force this session to fall back to session
+ * recovery.
+ */
+ CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess);
+ CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0);
+
+ goto check_stop;
+ case PYX_TRANSPORT_LU_COMM_FAILURE:
+ case PYX_TRANSPORT_ILLEGAL_REQUEST:
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ break;
+ case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
+ cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+ break;
+ case PYX_TRANSPORT_WRITE_PROTECTED:
+ cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ break;
+ case PYX_TRANSPORT_RESERVATION_CONFLICT:
+ /*
+ * No SENSE Data payload for this case, set SCSI Status
+ * and queue the response to $FABRIC_MOD.
+ *
+ * Uses linux/include/scsi/scsi.h SAM status codes defs
+ */
+ cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+ /*
+ * For UA Interlock Code 11b, a RESERVATION CONFLICT will
+ * establish a UNIT ATTENTION with PREVIOUS RESERVATION
+ * CONFLICT STATUS.
+ *
+ * See spc4r17, section 7.4.6 Control Mode Page, Table 349
+ */
+ if (SE_SESS(cmd) &&
+ DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
+ core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
+ cmd->orig_fe_lun, 0x2C,
+ ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+
+ CMD_TFO(cmd)->queue_status(cmd);
+ goto check_stop;
+ case PYX_TRANSPORT_USE_SENSE_REASON:
+ /*
+ * struct se_cmd->scsi_sense_reason already set
+ */
+ break;
+ default:
+ printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
+ T_TASK(cmd)->t_task_cdb[0],
+ cmd->transport_error_status);
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ break;
+ }
+
+ if (!sc)
+ transport_new_cmd_failure(cmd);
+ else
+ transport_send_check_condition_and_sense(cmd,
+ cmd->scsi_sense_reason, 0);
+check_stop:
+ transport_lun_remove_cmd(cmd);
+ if (!(transport_cmd_check_stop_to_fabric(cmd)))
+ ;
+}
+
+static void transport_direct_request_timeout(struct se_cmd *cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ return;
+ }
+ if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ return;
+ }
+
+ atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout),
+ &T_TASK(cmd)->t_se_count);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static void transport_generic_request_timeout(struct se_cmd *cmd)
+{
+ unsigned long flags;
+
+ /*
+ * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove()
+ * to allow last call to free memory resources.
+ */
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) {
+ int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1);
+
+ atomic_sub(tmp, &T_TASK(cmd)->t_se_count);
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_generic_remove(cmd, 0, 0);
+}
+
+static int
+transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
+{
+ unsigned char *buf;
+
+ buf = kzalloc(data_length, GFP_KERNEL);
+ if (!(buf)) {
+ printk(KERN_ERR "Unable to allocate memory for buffer\n");
+ return -1;
+ }
+
+ T_TASK(cmd)->t_tasks_se_num = 0;
+ T_TASK(cmd)->t_task_buf = buf;
+
+ return 0;
+}
+
+static inline u32 transport_lba_21(unsigned char *cdb)
+{
+ return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
+}
+
+static inline u32 transport_lba_32(unsigned char *cdb)
+{
+ return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+}
+
+static inline unsigned long long transport_lba_64(unsigned char *cdb)
+{
+ unsigned int __v1, __v2;
+
+ __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+ __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+
+ return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
+ */
+static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
+{
+ unsigned int __v1, __v2;
+
+ __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
+ __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
+
+ return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
+ se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
+ spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
+}
+
+/*
+ * Called from interrupt context.
+ */
+static void transport_task_timeout_handler(unsigned long data)
+{
+ struct se_task *task = (struct se_task *)data;
+ struct se_cmd *cmd = TASK_CMD(task);
+ unsigned long flags;
+
+ DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (task->task_flags & TF_STOP) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ return;
+ }
+ task->task_flags &= ~TF_RUNNING;
+
+ /*
+ * Determine if transport_complete_task() has already been called.
+ */
+ if (!(atomic_read(&task->task_active))) {
+ DEBUG_TT("transport task: %p cmd: %p timeout task_active"
+ " == 0\n", task, cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ return;
+ }
+
+ atomic_inc(&T_TASK(cmd)->t_se_count);
+ atomic_inc(&T_TASK(cmd)->t_transport_timeout);
+ T_TASK(cmd)->t_tasks_failed = 1;
+
+ atomic_set(&task->task_timeout, 1);
+ task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
+ task->task_scsi_status = 1;
+
+ if (atomic_read(&task->task_stop)) {
+ DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
+ " == 1\n", task, cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ complete(&task->task_stop_comp);
+ return;
+ }
+
+ if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
+ DEBUG_TT("transport task: %p cmd: %p timeout non zero"
+ " t_task_cdbs_left\n", task, cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ return;
+ }
+ DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
+ task, cmd);
+
+ cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
+}
+
+/*
+ * Called with T_TASK(cmd)->t_state_lock held.
+ */
+static void transport_start_task_timer(struct se_task *task)
+{
+ struct se_device *dev = task->se_dev;
+ int timeout;
+
+ if (task->task_flags & TF_RUNNING)
+ return;
+ /*
+ * If the task_timeout is disabled, exit now.
+ */
+ timeout = DEV_ATTRIB(dev)->task_timeout;
+ if (!(timeout))
+ return;
+
+ init_timer(&task->task_timer);
+ task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
+ task->task_timer.data = (unsigned long) task;
+ task->task_timer.function = transport_task_timeout_handler;
+
+ task->task_flags |= TF_RUNNING;
+ add_timer(&task->task_timer);
+#if 0
+ printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:"
+ " %d\n", task->task_se_cmd, task, timeout);
+#endif
+}
+
+/*
+ * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held.
+ */
+void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
+{
+ struct se_cmd *cmd = TASK_CMD(task);
+
+ if (!(task->task_flags & TF_RUNNING))
+ return;
+
+ task->task_flags |= TF_STOP;
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags);
+
+ del_timer_sync(&task->task_timer);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags);
+ task->task_flags &= ~TF_RUNNING;
+ task->task_flags &= ~TF_STOP;
+}
+
+static void transport_stop_all_task_timers(struct se_cmd *cmd)
+{
+ struct se_task *task = NULL, *task_tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ list_for_each_entry_safe(task, task_tmp,
+ &T_TASK(cmd)->t_task_list, t_list)
+ __transport_stop_task_timer(task, &flags);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static inline int transport_tcq_window_closed(struct se_device *dev)
+{
+ if (dev->dev_tcq_window_closed++ <
+ PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
+ msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
+ } else
+ msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
+
+ wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
+ return 0;
+}
+
+/*
+ * Called from Fabric Module context from transport_execute_tasks()
+ *
+ * The return of this function determins if the tasks from struct se_cmd
+ * get added to the execution queue in transport_execute_tasks(),
+ * or are added to the delayed or ordered lists here.
+ */
+static inline int transport_execute_task_attr(struct se_cmd *cmd)
+{
+ if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+ return 1;
+ /*
+ * Check for the existance of HEAD_OF_QUEUE, and if true return 1
+ * to allow the passed struct se_cmd list of tasks to the front of the list.
+ */
+ if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
+ smp_mb__after_atomic_inc();
+ DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
+ " 0x%02x, se_ordered_id: %u\n",
+ T_TASK(cmd)->t_task_cdb[0],
+ cmd->se_ordered_id);
+ return 1;
+ } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+ spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
+ list_add_tail(&cmd->se_ordered_list,
+ &SE_DEV(cmd)->ordered_cmd_list);
+ spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock);
+
+ atomic_inc(&SE_DEV(cmd)->dev_ordered_sync);
+ smp_mb__after_atomic_inc();
+
+ DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
+ " list, se_ordered_id: %u\n",
+ T_TASK(cmd)->t_task_cdb[0],
+ cmd->se_ordered_id);
+ /*
+ * Add ORDERED command to tail of execution queue if
+ * no other older commands exist that need to be
+ * completed first.
+ */
+ if (!(atomic_read(&SE_DEV(cmd)->simple_cmds)))
+ return 1;
+ } else {
+ /*
+ * For SIMPLE and UNTAGGED Task Attribute commands
+ */
+ atomic_inc(&SE_DEV(cmd)->simple_cmds);
+ smp_mb__after_atomic_inc();
+ }
+ /*
+ * Otherwise if one or more outstanding ORDERED task attribute exist,
+ * add the dormant task(s) built for the passed struct se_cmd to the
+ * execution queue and become in Active state for this struct se_device.
+ */
+ if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) {
+ /*
+ * Otherwise, add cmd w/ tasks to delayed cmd queue that
+ * will be drained upon competion of HEAD_OF_QUEUE task.
+ */
+ spin_lock(&SE_DEV(cmd)->delayed_cmd_lock);
+ cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
+ list_add_tail(&cmd->se_delayed_list,
+ &SE_DEV(cmd)->delayed_cmd_list);
+ spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock);
+
+ DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
+ " delayed CMD list, se_ordered_id: %u\n",
+ T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr,
+ cmd->se_ordered_id);
+ /*
+ * Return zero to let transport_execute_tasks() know
+ * not to add the delayed tasks to the execution list.
+ */
+ return 0;
+ }
+ /*
+ * Otherwise, no ORDERED task attributes exist..
+ */
+ return 1;
+}
+
+/*
+ * Called from fabric module context in transport_generic_new_cmd() and
+ * transport_generic_process_write()
+ */
+static int transport_execute_tasks(struct se_cmd *cmd)
+{
+ int add_tasks;
+
+ if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) {
+ if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
+ cmd->transport_error_status =
+ PYX_TRANSPORT_LU_COMM_FAILURE;
+ transport_generic_request_failure(cmd, NULL, 0, 1);
+ return 0;
+ }
+ }
+ /*
+ * Call transport_cmd_check_stop() to see if a fabric exception
+ * has occured that prevents execution.
+ */
+ if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
+ /*
+ * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
+ * attribute for the tasks of the received struct se_cmd CDB
+ */
+ add_tasks = transport_execute_task_attr(cmd);
+ if (add_tasks == 0)
+ goto execute_tasks;
+ /*
+ * This calls transport_add_tasks_from_cmd() to handle
+ * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
+ * (if enabled) in __transport_add_task_to_execute_queue() and
+ * transport_add_task_check_sam_attr().
+ */
+ transport_add_tasks_from_cmd(cmd);
+ }
+ /*
+ * Kick the execution queue for the cmd associated struct se_device
+ * storage object.
+ */
+execute_tasks:
+ __transport_execute_tasks(SE_DEV(cmd));
+ return 0;
+}
+
+/*
+ * Called to check struct se_device tcq depth window, and once open pull struct se_task
+ * from struct se_device->execute_task_list and
+ *
+ * Called from transport_processing_thread()
+ */
+static int __transport_execute_tasks(struct se_device *dev)
+{
+ int error;
+ struct se_cmd *cmd = NULL;
+ struct se_task *task;
+ unsigned long flags;
+
+ /*
+ * Check if there is enough room in the device and HBA queue to send
+ * struct se_transport_task's to the selected transport.
+ */
+check_depth:
+ spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
+ if (!(atomic_read(&dev->depth_left)) ||
+ !(atomic_read(&SE_HBA(dev)->left_queue_depth))) {
+ spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+ return transport_tcq_window_closed(dev);
+ }
+ dev->dev_tcq_window_closed = 0;
+
+ spin_lock(&dev->execute_task_lock);
+ task = transport_get_task_from_execute_queue(dev);
+ spin_unlock(&dev->execute_task_lock);
+
+ if (!task) {
+ spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+ return 0;
+ }
+
+ atomic_dec(&dev->depth_left);
+ atomic_dec(&SE_HBA(dev)->left_queue_depth);
+ spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+
+ cmd = TASK_CMD(task);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_set(&task->task_active, 1);
+ atomic_set(&task->task_sent, 1);
+ atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
+
+ if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
+ T_TASK(cmd)->t_task_cdbs)
+ atomic_set(&cmd->transport_sent, 1);
+
+ transport_start_task_timer(task);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ /*
+ * The struct se_cmd->transport_emulate_cdb() function pointer is used
+ * to grab REPORT_LUNS CDBs before they hit the
+ * struct se_subsystem_api->do_task() caller below.
+ */
+ if (cmd->transport_emulate_cdb) {
+ error = cmd->transport_emulate_cdb(cmd);
+ if (error != 0) {
+ cmd->transport_error_status = error;
+ atomic_set(&task->task_active, 0);
+ atomic_set(&cmd->transport_sent, 0);
+ transport_stop_tasks_for_cmd(cmd);
+ transport_generic_request_failure(cmd, dev, 0, 1);
+ goto check_depth;
+ }
+ /*
+ * Handle the successful completion for transport_emulate_cdb()
+ * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
+ * Otherwise the caller is expected to complete the task with
+ * proper status.
+ */
+ if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
+ cmd->scsi_status = SAM_STAT_GOOD;
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ }
+ } else {
+ /*
+ * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
+ * RAMDISK we use the internal transport_emulate_control_cdb() logic
+ * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
+ * LUN emulation code.
+ *
+ * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
+ * call ->do_task() directly and let the underlying TCM subsystem plugin
+ * code handle the CDB emulation.
+ */
+ if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
+ (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
+ error = transport_emulate_control_cdb(task);
+ else
+ error = TRANSPORT(dev)->do_task(task);
+
+ if (error != 0) {
+ cmd->transport_error_status = error;
+ atomic_set(&task->task_active, 0);
+ atomic_set(&cmd->transport_sent, 0);
+ transport_stop_tasks_for_cmd(cmd);
+ transport_generic_request_failure(cmd, dev, 0, 1);
+ }
+ }
+
+ goto check_depth;
+
+ return 0;
+}
+
+void transport_new_cmd_failure(struct se_cmd *se_cmd)
+{
+ unsigned long flags;
+ /*
+ * Any unsolicited data will get dumped for failed command inside of
+ * the fabric plugin
+ */
+ spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
+ se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
+
+ CMD_TFO(se_cmd)->new_cmd_failure(se_cmd);
+}
+
+static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
+
+static inline u32 transport_get_sectors_6(
+ unsigned char *cdb,
+ struct se_cmd *cmd,
+ int *ret)
+{
+ struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+ /*
+ * Assume TYPE_DISK for non struct se_device objects.
+ * Use 8-bit sector value.
+ */
+ if (!dev)
+ goto type_disk;
+
+ /*
+ * Use 24-bit allocation length for TYPE_TAPE.
+ */
+ if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
+ return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
+
+ /*
+ * Everything else assume TYPE_DISK Sector CDB location.
+ * Use 8-bit sector value.
+ */
+type_disk:
+ return (u32)cdb[4];
+}
+
+static inline u32 transport_get_sectors_10(
+ unsigned char *cdb,
+ struct se_cmd *cmd,
+ int *ret)
+{
+ struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+ /*
+ * Assume TYPE_DISK for non struct se_device objects.
+ * Use 16-bit sector value.
+ */
+ if (!dev)
+ goto type_disk;
+
+ /*
+ * XXX_10 is not defined in SSC, throw an exception
+ */
+ if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+ *ret = -1;
+ return 0;
+ }
+
+ /*
+ * Everything else assume TYPE_DISK Sector CDB location.
+ * Use 16-bit sector value.
+ */
+type_disk:
+ return (u32)(cdb[7] << 8) + cdb[8];
+}
+
+static inline u32 transport_get_sectors_12(
+ unsigned char *cdb,
+ struct se_cmd *cmd,
+ int *ret)
+{
+ struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+ /*
+ * Assume TYPE_DISK for non struct se_device objects.
+ * Use 32-bit sector value.
+ */
+ if (!dev)
+ goto type_disk;
+
+ /*
+ * XXX_12 is not defined in SSC, throw an exception
+ */
+ if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+ *ret = -1;
+ return 0;
+ }
+
+ /*
+ * Everything else assume TYPE_DISK Sector CDB location.
+ * Use 32-bit sector value.
+ */
+type_disk:
+ return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
+}
+
+static inline u32 transport_get_sectors_16(
+ unsigned char *cdb,
+ struct se_cmd *cmd,
+ int *ret)
+{
+ struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+ /*
+ * Assume TYPE_DISK for non struct se_device objects.
+ * Use 32-bit sector value.
+ */
+ if (!dev)
+ goto type_disk;
+
+ /*
+ * Use 24-bit allocation length for TYPE_TAPE.
+ */
+ if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
+ return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
+
+type_disk:
+ return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
+ (cdb[12] << 8) + cdb[13];
+}
+
+/*
+ * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
+ */
+static inline u32 transport_get_sectors_32(
+ unsigned char *cdb,
+ struct se_cmd *cmd,
+ int *ret)
+{
+ /*
+ * Assume TYPE_DISK for non struct se_device objects.
+ * Use 32-bit sector value.
+ */
+ return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
+ (cdb[30] << 8) + cdb[31];
+
+}
+
+static inline u32 transport_get_size(
+ u32 sectors,
+ unsigned char *cdb,
+ struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+
+ if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+ if (cdb[1] & 1) { /* sectors */
+ return DEV_ATTRIB(dev)->block_size * sectors;
+ } else /* bytes */
+ return sectors;
+ }
+#if 0
+ printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for"
+ " %s object\n", DEV_ATTRIB(dev)->block_size, sectors,
+ DEV_ATTRIB(dev)->block_size * sectors,
+ TRANSPORT(dev)->name);
+#endif
+ return DEV_ATTRIB(dev)->block_size * sectors;
+}
+
+unsigned char transport_asciihex_to_binaryhex(unsigned char val[2])
+{
+ unsigned char result = 0;
+ /*
+ * MSB
+ */
+ if ((val[0] >= 'a') && (val[0] <= 'f'))
+ result = ((val[0] - 'a' + 10) & 0xf) << 4;
+ else
+ if ((val[0] >= 'A') && (val[0] <= 'F'))
+ result = ((val[0] - 'A' + 10) & 0xf) << 4;
+ else /* digit */
+ result = ((val[0] - '0') & 0xf) << 4;
+ /*
+ * LSB
+ */
+ if ((val[1] >= 'a') && (val[1] <= 'f'))
+ result |= ((val[1] - 'a' + 10) & 0xf);
+ else
+ if ((val[1] >= 'A') && (val[1] <= 'F'))
+ result |= ((val[1] - 'A' + 10) & 0xf);
+ else /* digit */
+ result |= ((val[1] - '0') & 0xf);
+
+ return result;
+}
+EXPORT_SYMBOL(transport_asciihex_to_binaryhex);
+
+static void transport_xor_callback(struct se_cmd *cmd)
+{
+ unsigned char *buf, *addr;
+ struct se_mem *se_mem;
+ unsigned int offset;
+ int i;
+ /*
+ * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
+ *
+ * 1) read the specified logical block(s);
+ * 2) transfer logical blocks from the data-out buffer;
+ * 3) XOR the logical blocks transferred from the data-out buffer with
+ * the logical blocks read, storing the resulting XOR data in a buffer;
+ * 4) if the DISABLE WRITE bit is set to zero, then write the logical
+ * blocks transferred from the data-out buffer; and
+ * 5) transfer the resulting XOR data to the data-in buffer.
+ */
+ buf = kmalloc(cmd->data_length, GFP_KERNEL);
+ if (!(buf)) {
+ printk(KERN_ERR "Unable to allocate xor_callback buf\n");
+ return;
+ }
+ /*
+ * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list
+ * into the locally allocated *buf
+ */
+ transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list);
+ /*
+ * Now perform the XOR against the BIDI read memory located at
+ * T_TASK(cmd)->t_mem_bidi_list
+ */
+
+ offset = 0;
+ list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) {
+ addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
+ if (!(addr))
+ goto out;
+
+ for (i = 0; i < se_mem->se_len; i++)
+ *(addr + se_mem->se_off + i) ^= *(buf + offset + i);
+
+ offset += se_mem->se_len;
+ kunmap_atomic(addr, KM_USER0);
+ }
+out:
+ kfree(buf);
+}
+
+/*
+ * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
+ */
+static int transport_get_sense_data(struct se_cmd *cmd)
+{
+ unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
+ struct se_device *dev;
+ struct se_task *task = NULL, *task_tmp;
+ unsigned long flags;
+ u32 offset = 0;
+
+ if (!SE_LUN(cmd)) {
+ printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
+ return -1;
+ }
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ return 0;
+ }
+
+ list_for_each_entry_safe(task, task_tmp,
+ &T_TASK(cmd)->t_task_list, t_list) {
+
+ if (!task->task_sense)
+ continue;
+
+ dev = task->se_dev;
+ if (!(dev))
+ continue;
+
+ if (!TRANSPORT(dev)->get_sense_buffer) {
+ printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer"
+ " is NULL\n");
+ continue;
+ }
+
+ sense_buffer = TRANSPORT(dev)->get_sense_buffer(task);
+ if (!(sense_buffer)) {
+ printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate"
+ " sense buffer for task with sense\n",
+ CMD_TFO(cmd)->get_task_tag(cmd), task->task_no);
+ continue;
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
+ TRANSPORT_SENSE_BUFFER);
+
+ memcpy((void *)&buffer[offset], (void *)sense_buffer,
+ TRANSPORT_SENSE_BUFFER);
+ cmd->scsi_status = task->task_scsi_status;
+ /* Automatically padded */
+ cmd->scsi_sense_length =
+ (TRANSPORT_SENSE_BUFFER + offset);
+
+ printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
+ " and sense\n",
+ dev->se_hba->hba_id, TRANSPORT(dev)->name,
+ cmd->scsi_status);
+ return 0;
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ return -1;
+}
+
+static int transport_allocate_resources(struct se_cmd *cmd)
+{
+ u32 length = cmd->data_length;
+
+ if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
+ (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB))
+ return transport_generic_get_mem(cmd, length, PAGE_SIZE);
+ else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB)
+ return transport_generic_allocate_buf(cmd, length);
+ else
+ return 0;
+}
+
+static int
+transport_handle_reservation_conflict(struct se_cmd *cmd)
+{
+ cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+ cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+ /*
+ * For UA Interlock Code 11b, a RESERVATION CONFLICT will
+ * establish a UNIT ATTENTION with PREVIOUS RESERVATION
+ * CONFLICT STATUS.
+ *
+ * See spc4r17, section 7.4.6 Control Mode Page, Table 349
+ */
+ if (SE_SESS(cmd) &&
+ DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
+ core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
+ cmd->orig_fe_lun, 0x2C,
+ ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+ return -2;
+}
+
+/* transport_generic_cmd_sequencer():
+ *
+ * Generic Command Sequencer that should work for most DAS transport
+ * drivers.
+ *
+ * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
+ * RX Thread.
+ *
+ * FIXME: Need to support other SCSI OPCODES where as well.
+ */
+static int transport_generic_cmd_sequencer(
+ struct se_cmd *cmd,
+ unsigned char *cdb)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+ int ret = 0, sector_ret = 0, passthrough;
+ u32 sectors = 0, size = 0, pr_reg_type = 0;
+ u16 service_action;
+ u8 alua_ascq = 0;
+ /*
+ * Check for an existing UNIT ATTENTION condition
+ */
+ if (core_scsi3_ua_check(cmd, cdb) < 0) {
+ cmd->transport_wait_for_tasks =
+ &transport_nop_wait_for_tasks;
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
+ return -2;
+ }
+ /*
+ * Check status of Asymmetric Logical Unit Assignment port
+ */
+ ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq);
+ if (ret != 0) {
+ cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
+ /*
+ * Set SCSI additional sense code (ASC) to 'LUN Not Accessable';
+ * The ALUA additional sense code qualifier (ASCQ) is determined
+ * by the ALUA primary or secondary access state..
+ */
+ if (ret > 0) {
+#if 0
+ printk(KERN_INFO "[%s]: ALUA TG Port not available,"
+ " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
+ CMD_TFO(cmd)->get_fabric_name(), alua_ascq);
+#endif
+ transport_set_sense_codes(cmd, 0x04, alua_ascq);
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
+ return -2;
+ }
+ goto out_invalid_cdb_field;
+ }
+ /*
+ * Check status for SPC-3 Persistent Reservations
+ */
+ if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) {
+ if (T10_PR_OPS(su_dev)->t10_seq_non_holder(
+ cmd, cdb, pr_reg_type) != 0)
+ return transport_handle_reservation_conflict(cmd);
+ /*
+ * This means the CDB is allowed for the SCSI Initiator port
+ * when said port is *NOT* holding the legacy SPC-2 or
+ * SPC-3 Persistent Reservation.
+ */
+ }
+
+ switch (cdb[0]) {
+ case READ_6:
+ sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_6;
+ T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ break;
+ case READ_10:
+ sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_10;
+ T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ break;
+ case READ_12:
+ sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_12;
+ T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ break;
+ case READ_16:
+ sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_16;
+ T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ break;
+ case WRITE_6:
+ sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_6;
+ T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ break;
+ case WRITE_10:
+ sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_10;
+ T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ break;
+ case WRITE_12:
+ sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_12;
+ T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ break;
+ case WRITE_16:
+ sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_16;
+ T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+ T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ break;
+ case XDWRITEREAD_10:
+ if ((cmd->data_direction != DMA_TO_DEVICE) ||
+ !(T_TASK(cmd)->t_tasks_bidi))
+ goto out_invalid_cdb_field;
+ sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->transport_split_cdb = &split_cdb_XX_10;
+ T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+ passthrough = (TRANSPORT(dev)->transport_type ==
+ TRANSPORT_PLUGIN_PHBA_PDEV);
+ /*
+ * Skip the remaining assignments for TCM/PSCSI passthrough
+ */
+ if (passthrough)
+ break;
+ /*
+ * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
+ */
+ cmd->transport_complete_callback = &transport_xor_callback;
+ T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ break;
+ case VARIABLE_LENGTH_CMD:
+ service_action = get_unaligned_be16(&cdb[8]);
+ /*
+ * Determine if this is TCM/PSCSI device and we should disable
+ * internal emulation for this CDB.
+ */
+ passthrough = (TRANSPORT(dev)->transport_type ==
+ TRANSPORT_PLUGIN_PHBA_PDEV);
+
+ switch (service_action) {
+ case XDWRITEREAD_32:
+ sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ /*
+ * Use WRITE_32 and READ_32 opcodes for the emulated
+ * XDWRITE_READ_32 logic.
+ */
+ cmd->transport_split_cdb = &split_cdb_XX_32;
+ T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+
+ /*
+ * Skip the remaining assignments for TCM/PSCSI passthrough
+ */
+ if (passthrough)
+ break;
+
+ /*
+ * Setup BIDI XOR callback to be run during
+ * transport_generic_complete_ok()
+ */
+ cmd->transport_complete_callback = &transport_xor_callback;
+ T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8);
+ break;
+ case WRITE_SAME_32:
+ sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]);
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+
+ /*
+ * Skip the remaining assignments for TCM/PSCSI passthrough
+ */
+ if (passthrough)
+ break;
+
+ if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
+ printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+ " bits not supported for Block Discard"
+ " Emulation\n");
+ goto out_invalid_cdb_field;
+ }
+ /*
+ * Currently for the emulated case we only accept
+ * tpws with the UNMAP=1 bit set.
+ */
+ if (!(cdb[10] & 0x08)) {
+ printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not"
+ " supported for Block Discard Emulation\n");
+ goto out_invalid_cdb_field;
+ }
+ break;
+ default:
+ printk(KERN_ERR "VARIABLE_LENGTH_CMD service action"
+ " 0x%04x not supported\n", service_action);
+ goto out_unsupported_cdb;
+ }
+ break;
+ case 0xa3:
+ if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
+ /* MAINTENANCE_IN from SCC-2 */
+ /*
+ * Check for emulated MI_REPORT_TARGET_PGS.
+ */
+ if (cdb[1] == MI_REPORT_TARGET_PGS) {
+ cmd->transport_emulate_cdb =
+ (T10_ALUA(su_dev)->alua_type ==
+ SPC3_ALUA_EMULATED) ?
+ &core_emulate_report_target_port_groups :
+ NULL;
+ }
+ size = (cdb[6] << 24) | (cdb[7] << 16) |
+ (cdb[8] << 8) | cdb[9];
+ } else {
+ /* GPCMD_SEND_KEY from multi media commands */
+ size = (cdb[8] << 8) + cdb[9];
+ }
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case MODE_SELECT:
+ size = cdb[4];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ break;
+ case MODE_SELECT_10:
+ size = (cdb[7] << 8) + cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ break;
+ case MODE_SENSE:
+ size = cdb[4];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case MODE_SENSE_10:
+ case GPCMD_READ_BUFFER_CAPACITY:
+ case GPCMD_SEND_OPC:
+ case LOG_SELECT:
+ case LOG_SENSE:
+ size = (cdb[7] << 8) + cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case READ_BLOCK_LIMITS:
+ size = READ_BLOCK_LEN;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case GPCMD_GET_CONFIGURATION:
+ case GPCMD_READ_FORMAT_CAPACITIES:
+ case GPCMD_READ_DISC_INFO:
+ case GPCMD_READ_TRACK_RZONE_INFO:
+ size = (cdb[7] << 8) + cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ break;
+ case PERSISTENT_RESERVE_IN:
+ case PERSISTENT_RESERVE_OUT:
+ cmd->transport_emulate_cdb =
+ (T10_RES(su_dev)->res_type ==
+ SPC3_PERSISTENT_RESERVATIONS) ?
+ &core_scsi3_emulate_pr : NULL;
+ size = (cdb[7] << 8) + cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case GPCMD_MECHANISM_STATUS:
+ case GPCMD_READ_DVD_STRUCTURE:
+ size = (cdb[8] << 8) + cdb[9];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ break;
+ case READ_POSITION:
+ size = READ_POSITION_LEN;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case 0xa4:
+ if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
+ /* MAINTENANCE_OUT from SCC-2
+ *
+ * Check for emulated MO_SET_TARGET_PGS.
+ */
+ if (cdb[1] == MO_SET_TARGET_PGS) {
+ cmd->transport_emulate_cdb =
+ (T10_ALUA(su_dev)->alua_type ==
+ SPC3_ALUA_EMULATED) ?
+ &core_emulate_set_target_port_groups :
+ NULL;
+ }
+
+ size = (cdb[6] << 24) | (cdb[7] << 16) |
+ (cdb[8] << 8) | cdb[9];
+ } else {
+ /* GPCMD_REPORT_KEY from multi media commands */
+ size = (cdb[8] << 8) + cdb[9];
+ }
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case INQUIRY:
+ size = (cdb[3] << 8) + cdb[4];
+ /*
+ * Do implict HEAD_OF_QUEUE processing for INQUIRY.
+ * See spc4r17 section 5.3
+ */
+ if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ cmd->sam_task_attr = TASK_ATTR_HOQ;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case READ_BUFFER:
+ size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case READ_CAPACITY:
+ size = READ_CAP_LEN;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case READ_MEDIA_SERIAL_NUMBER:
+ case SECURITY_PROTOCOL_IN:
+ case SECURITY_PROTOCOL_OUT:
+ size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case SERVICE_ACTION_IN:
+ case ACCESS_CONTROL_IN:
+ case ACCESS_CONTROL_OUT:
+ case EXTENDED_COPY:
+ case READ_ATTRIBUTE:
+ case RECEIVE_COPY_RESULTS:
+ case WRITE_ATTRIBUTE:
+ size = (cdb[10] << 24) | (cdb[11] << 16) |
+ (cdb[12] << 8) | cdb[13];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case RECEIVE_DIAGNOSTIC:
+ case SEND_DIAGNOSTIC:
+ size = (cdb[3] << 8) | cdb[4];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
+#if 0
+ case GPCMD_READ_CD:
+ sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+ size = (2336 * sectors);
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+#endif
+ case READ_TOC:
+ size = cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case REQUEST_SENSE:
+ size = cdb[4];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case READ_ELEMENT_STATUS:
+ size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case WRITE_BUFFER:
+ size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case RESERVE:
+ case RESERVE_10:
+ /*
+ * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
+ * Assume the passthrough or $FABRIC_MOD will tell us about it.
+ */
+ if (cdb[0] == RESERVE_10)
+ size = (cdb[7] << 8) | cdb[8];
+ else
+ size = cmd->data_length;
+
+ /*
+ * Setup the legacy emulated handler for SPC-2 and
+ * >= SPC-3 compatible reservation handling (CRH=1)
+ * Otherwise, we assume the underlying SCSI logic is
+ * is running in SPC_PASSTHROUGH, and wants reservations
+ * emulation disabled.
+ */
+ cmd->transport_emulate_cdb =
+ (T10_RES(su_dev)->res_type !=
+ SPC_PASSTHROUGH) ?
+ &core_scsi2_emulate_crh : NULL;
+ cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+ break;
+ case RELEASE:
+ case RELEASE_10:
+ /*
+ * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
+ * Assume the passthrough or $FABRIC_MOD will tell us about it.
+ */
+ if (cdb[0] == RELEASE_10)
+ size = (cdb[7] << 8) | cdb[8];
+ else
+ size = cmd->data_length;
+
+ cmd->transport_emulate_cdb =
+ (T10_RES(su_dev)->res_type !=
+ SPC_PASSTHROUGH) ?
+ &core_scsi2_emulate_crh : NULL;
+ cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+ break;
+ case SYNCHRONIZE_CACHE:
+ case 0x91: /* SYNCHRONIZE_CACHE_16: */
+ /*
+ * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
+ */
+ if (cdb[0] == SYNCHRONIZE_CACHE) {
+ sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+ T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ } else {
+ sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+ T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+ }
+ if (sector_ret)
+ goto out_unsupported_cdb;
+
+ size = transport_get_size(sectors, cdb, cmd);
+ cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+
+ /*
+ * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
+ */
+ if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ break;
+ /*
+ * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
+ * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
+ */
+ cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
+ /*
+ * Check to ensure that LBA + Range does not exceed past end of
+ * device.
+ */
+ if (transport_get_sectors(cmd) < 0)
+ goto out_invalid_cdb_field;
+ break;
+ case UNMAP:
+ size = get_unaligned_be16(&cdb[7]);
+ passthrough = (TRANSPORT(dev)->transport_type ==
+ TRANSPORT_PLUGIN_PHBA_PDEV);
+ /*
+ * Determine if the received UNMAP used to for direct passthrough
+ * into Linux/SCSI with struct request via TCM/pSCSI or we are
+ * signaling the use of internal transport_generic_unmap() emulation
+ * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO
+ * subsystem plugin backstores.
+ */
+ if (!(passthrough))
+ cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP;
+
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ case WRITE_SAME_16:
+ sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+ size = transport_get_size(sectors, cdb, cmd);
+ T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]);
+ passthrough = (TRANSPORT(dev)->transport_type ==
+ TRANSPORT_PLUGIN_PHBA_PDEV);
+ /*
+ * Determine if the received WRITE_SAME_16 is used to for direct
+ * passthrough into Linux/SCSI with struct request via TCM/pSCSI
+ * or we are signaling the use of internal WRITE_SAME + UNMAP=1
+ * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
+ * TCM/FILEIO subsystem plugin backstores.
+ */
+ if (!(passthrough)) {
+ if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
+ printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+ " bits not supported for Block Discard"
+ " Emulation\n");
+ goto out_invalid_cdb_field;
+ }
+ /*
+ * Currently for the emulated case we only accept
+ * tpws with the UNMAP=1 bit set.
+ */
+ if (!(cdb[1] & 0x08)) {
+ printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not "
+ " supported for Block Discard Emulation\n");
+ goto out_invalid_cdb_field;
+ }
+ }
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ break;
+ case ALLOW_MEDIUM_REMOVAL:
+ case GPCMD_CLOSE_TRACK:
+ case ERASE:
+ case INITIALIZE_ELEMENT_STATUS:
+ case GPCMD_LOAD_UNLOAD:
+ case REZERO_UNIT:
+ case SEEK_10:
+ case GPCMD_SET_SPEED:
+ case SPACE:
+ case START_STOP:
+ case TEST_UNIT_READY:
+ case VERIFY:
+ case WRITE_FILEMARKS:
+ case MOVE_MEDIUM:
+ cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+ break;
+ case REPORT_LUNS:
+ cmd->transport_emulate_cdb =
+ &transport_core_report_lun_response;
+ size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+ /*
+ * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
+ * See spc4r17 section 5.3
+ */
+ if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ cmd->sam_task_attr = TASK_ATTR_HOQ;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ break;
+ default:
+ printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
+ " 0x%02x, sending CHECK_CONDITION.\n",
+ CMD_TFO(cmd)->get_fabric_name(), cdb[0]);
+ cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
+ goto out_unsupported_cdb;
+ }
+
+ if (size != cmd->data_length) {
+ printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:"
+ " %u does not match SCSI CDB Length: %u for SAM Opcode:"
+ " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(),
+ cmd->data_length, size, cdb[0]);
+
+ cmd->cmd_spdtl = size;
+
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ printk(KERN_ERR "Rejecting underflow/overflow"
+ " WRITE data\n");
+ goto out_invalid_cdb_field;
+ }
+ /*
+ * Reject READ_* or WRITE_* with overflow/underflow for
+ * type SCF_SCSI_DATA_SG_IO_CDB.
+ */
+ if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) {
+ printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op"
+ " CDB on non 512-byte sector setup subsystem"
+ " plugin: %s\n", TRANSPORT(dev)->name);
+ /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
+ goto out_invalid_cdb_field;
+ }
+
+ if (size > cmd->data_length) {
+ cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
+ cmd->residual_count = (size - cmd->data_length);
+ } else {
+ cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ cmd->residual_count = (cmd->data_length - size);
+ }
+ cmd->data_length = size;
+ }
+
+ transport_set_supported_SAM_opcode(cmd);
+ return ret;
+
+out_unsupported_cdb:
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -2;
+out_invalid_cdb_field:
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -2;
+}
+
+static inline void transport_release_tasks(struct se_cmd *);
+
+/*
+ * This function will copy a contiguous *src buffer into a destination
+ * struct scatterlist array.
+ */
+static void transport_memcpy_write_contig(
+ struct se_cmd *cmd,
+ struct scatterlist *sg_d,
+ unsigned char *src)
+{
+ u32 i = 0, length = 0, total_length = cmd->data_length;
+ void *dst;
+
+ while (total_length) {
+ length = sg_d[i].length;
+
+ if (length > total_length)
+ length = total_length;
+
+ dst = sg_virt(&sg_d[i]);
+
+ memcpy(dst, src, length);
+
+ if (!(total_length -= length))
+ return;
+
+ src += length;
+ i++;
+ }
+}
+
+/*
+ * This function will copy a struct scatterlist array *sg_s into a destination
+ * contiguous *dst buffer.
+ */
+static void transport_memcpy_read_contig(
+ struct se_cmd *cmd,
+ unsigned char *dst,
+ struct scatterlist *sg_s)
+{
+ u32 i = 0, length = 0, total_length = cmd->data_length;
+ void *src;
+
+ while (total_length) {
+ length = sg_s[i].length;
+
+ if (length > total_length)
+ length = total_length;
+
+ src = sg_virt(&sg_s[i]);
+
+ memcpy(dst, src, length);
+
+ if (!(total_length -= length))
+ return;
+
+ dst += length;
+ i++;
+ }
+}
+
+static void transport_memcpy_se_mem_read_contig(
+ struct se_cmd *cmd,
+ unsigned char *dst,
+ struct list_head *se_mem_list)
+{
+ struct se_mem *se_mem;
+ void *src;
+ u32 length = 0, total_length = cmd->data_length;
+
+ list_for_each_entry(se_mem, se_mem_list, se_list) {
+ length = se_mem->se_len;
+
+ if (length > total_length)
+ length = total_length;
+
+ src = page_address(se_mem->se_page) + se_mem->se_off;
+
+ memcpy(dst, src, length);
+
+ if (!(total_length -= length))
+ return;
+
+ dst += length;
+ }
+}
+
+/*
+ * Called from transport_generic_complete_ok() and
+ * transport_generic_request_failure() to determine which dormant/delayed
+ * and ordered cmds need to have their tasks added to the execution queue.
+ */
+static void transport_complete_task_attr(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_cmd *cmd_p, *cmd_tmp;
+ int new_active_tasks = 0;
+
+ if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) {
+ atomic_dec(&dev->simple_cmds);
+ smp_mb__after_atomic_dec();
+ dev->dev_cur_ordered_id++;
+ DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
+ " SIMPLE: %u\n", dev->dev_cur_ordered_id,
+ cmd->se_ordered_id);
+ } else if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+ atomic_dec(&dev->dev_hoq_count);
+ smp_mb__after_atomic_dec();
+ dev->dev_cur_ordered_id++;
+ DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
+ " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
+ cmd->se_ordered_id);
+ } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+ spin_lock(&dev->ordered_cmd_lock);
+ list_del(&cmd->se_ordered_list);
+ atomic_dec(&dev->dev_ordered_sync);
+ smp_mb__after_atomic_dec();
+ spin_unlock(&dev->ordered_cmd_lock);
+
+ dev->dev_cur_ordered_id++;
+ DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:"
+ " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
+ }
+ /*
+ * Process all commands up to the last received
+ * ORDERED task attribute which requires another blocking
+ * boundary
+ */
+ spin_lock(&dev->delayed_cmd_lock);
+ list_for_each_entry_safe(cmd_p, cmd_tmp,
+ &dev->delayed_cmd_list, se_delayed_list) {
+
+ list_del(&cmd_p->se_delayed_list);
+ spin_unlock(&dev->delayed_cmd_lock);
+
+ DEBUG_STA("Calling add_tasks() for"
+ " cmd_p: 0x%02x Task Attr: 0x%02x"
+ " Dormant -> Active, se_ordered_id: %u\n",
+ T_TASK(cmd_p)->t_task_cdb[0],
+ cmd_p->sam_task_attr, cmd_p->se_ordered_id);
+
+ transport_add_tasks_from_cmd(cmd_p);
+ new_active_tasks++;
+
+ spin_lock(&dev->delayed_cmd_lock);
+ if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED)
+ break;
+ }
+ spin_unlock(&dev->delayed_cmd_lock);
+ /*
+ * If new tasks have become active, wake up the transport thread
+ * to do the processing of the Active tasks.
+ */
+ if (new_active_tasks != 0)
+ wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
+}
+
+static void transport_generic_complete_ok(struct se_cmd *cmd)
+{
+ int reason = 0;
+ /*
+ * Check if we need to move delayed/dormant tasks from cmds on the
+ * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
+ * Attribute.
+ */
+ if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ transport_complete_task_attr(cmd);
+ /*
+ * Check if we need to retrieve a sense buffer from
+ * the struct se_cmd in question.
+ */
+ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+ if (transport_get_sense_data(cmd) < 0)
+ reason = TCM_NON_EXISTENT_LUN;
+
+ /*
+ * Only set when an struct se_task->task_scsi_status returned
+ * a non GOOD status.
+ */
+ if (cmd->scsi_status) {
+ transport_send_check_condition_and_sense(
+ cmd, reason, 1);
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+ return;
+ }
+ }
+ /*
+ * Check for a callback, used by amoungst other things
+ * XDWRITE_READ_10 emulation.
+ */
+ if (cmd->transport_complete_callback)
+ cmd->transport_complete_callback(cmd);
+
+ switch (cmd->data_direction) {
+ case DMA_FROM_DEVICE:
+ spin_lock(&cmd->se_lun->lun_sep_lock);
+ if (SE_LUN(cmd)->lun_sep) {
+ SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
+ cmd->data_length;
+ }
+ spin_unlock(&cmd->se_lun->lun_sep_lock);
+ /*
+ * If enabled by TCM fabirc module pre-registered SGL
+ * memory, perform the memcpy() from the TCM internal
+ * contigious buffer back to the original SGL.
+ */
+ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
+ transport_memcpy_write_contig(cmd,
+ T_TASK(cmd)->t_task_pt_sgl,
+ T_TASK(cmd)->t_task_buf);
+
+ CMD_TFO(cmd)->queue_data_in(cmd);
+ break;
+ case DMA_TO_DEVICE:
+ spin_lock(&cmd->se_lun->lun_sep_lock);
+ if (SE_LUN(cmd)->lun_sep) {
+ SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets +=
+ cmd->data_length;
+ }
+ spin_unlock(&cmd->se_lun->lun_sep_lock);
+ /*
+ * Check if we need to send READ payload for BIDI-COMMAND
+ */
+ if (T_TASK(cmd)->t_mem_bidi_list != NULL) {
+ spin_lock(&cmd->se_lun->lun_sep_lock);
+ if (SE_LUN(cmd)->lun_sep) {
+ SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
+ cmd->data_length;
+ }
+ spin_unlock(&cmd->se_lun->lun_sep_lock);
+ CMD_TFO(cmd)->queue_data_in(cmd);
+ break;
+ }
+ /* Fall through for DMA_TO_DEVICE */
+ case DMA_NONE:
+ CMD_TFO(cmd)->queue_status(cmd);
+ break;
+ default:
+ break;
+ }
+
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+}
+
+static void transport_free_dev_tasks(struct se_cmd *cmd)
+{
+ struct se_task *task, *task_tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ list_for_each_entry_safe(task, task_tmp,
+ &T_TASK(cmd)->t_task_list, t_list) {
+ if (atomic_read(&task->task_active))
+ continue;
+
+ kfree(task->task_sg_bidi);
+ kfree(task->task_sg);
+
+ list_del(&task->t_list);
+
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ if (task->se_dev)
+ TRANSPORT(task->se_dev)->free_task(task);
+ else
+ printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
+ task->task_no);
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static inline void transport_free_pages(struct se_cmd *cmd)
+{
+ struct se_mem *se_mem, *se_mem_tmp;
+ int free_page = 1;
+
+ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
+ free_page = 0;
+ if (cmd->se_dev->transport->do_se_mem_map)
+ free_page = 0;
+
+ if (T_TASK(cmd)->t_task_buf) {
+ kfree(T_TASK(cmd)->t_task_buf);
+ T_TASK(cmd)->t_task_buf = NULL;
+ return;
+ }
+
+ /*
+ * Caller will handle releasing of struct se_mem.
+ */
+ if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC)
+ return;
+
+ if (!(T_TASK(cmd)->t_tasks_se_num))
+ return;
+
+ list_for_each_entry_safe(se_mem, se_mem_tmp,
+ T_TASK(cmd)->t_mem_list, se_list) {
+ /*
+ * We only release call __free_page(struct se_mem->se_page) when
+ * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
+ */
+ if (free_page)
+ __free_page(se_mem->se_page);
+
+ list_del(&se_mem->se_list);
+ kmem_cache_free(se_mem_cache, se_mem);
+ }
+
+ if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) {
+ list_for_each_entry_safe(se_mem, se_mem_tmp,
+ T_TASK(cmd)->t_mem_bidi_list, se_list) {
+ /*
+ * We only release call __free_page(struct se_mem->se_page) when
+ * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
+ */
+ if (free_page)
+ __free_page(se_mem->se_page);
+
+ list_del(&se_mem->se_list);
+ kmem_cache_free(se_mem_cache, se_mem);
+ }
+ }
+
+ kfree(T_TASK(cmd)->t_mem_bidi_list);
+ T_TASK(cmd)->t_mem_bidi_list = NULL;
+ kfree(T_TASK(cmd)->t_mem_list);
+ T_TASK(cmd)->t_mem_list = NULL;
+ T_TASK(cmd)->t_tasks_se_num = 0;
+}
+
+static inline void transport_release_tasks(struct se_cmd *cmd)
+{
+ transport_free_dev_tasks(cmd);
+}
+
+static inline int transport_dec_and_check(struct se_cmd *cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ flags);
+ return 1;
+ }
+ }
+
+ if (atomic_read(&T_TASK(cmd)->t_se_count)) {
+ if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ flags);
+ return 1;
+ }
+ }
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ return 0;
+}
+
+static void transport_release_fe_cmd(struct se_cmd *cmd)
+{
+ unsigned long flags;
+
+ if (transport_dec_and_check(cmd))
+ return;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ goto free_pages;
+ }
+ atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ transport_all_task_dev_remove_state(cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_release_tasks(cmd);
+free_pages:
+ transport_free_pages(cmd);
+ transport_free_se_cmd(cmd);
+ CMD_TFO(cmd)->release_cmd_direct(cmd);
+}
+
+static int transport_generic_remove(
+ struct se_cmd *cmd,
+ int release_to_pool,
+ int session_reinstatement)
+{
+ unsigned long flags;
+
+ if (!(T_TASK(cmd)))
+ goto release_cmd;
+
+ if (transport_dec_and_check(cmd)) {
+ if (session_reinstatement) {
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ transport_all_task_dev_remove_state(cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ flags);
+ }
+ return 1;
+ }
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ goto free_pages;
+ }
+ atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ transport_all_task_dev_remove_state(cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_release_tasks(cmd);
+free_pages:
+ transport_free_pages(cmd);
+
+release_cmd:
+ if (release_to_pool) {
+ transport_release_cmd_to_pool(cmd);
+ } else {
+ transport_free_se_cmd(cmd);
+ CMD_TFO(cmd)->release_cmd_direct(cmd);
+ }
+
+ return 0;
+}
+
+/*
+ * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map
+ * @cmd: Associated se_cmd descriptor
+ * @mem: SGL style memory for TCM WRITE / READ
+ * @sg_mem_num: Number of SGL elements
+ * @mem_bidi_in: SGL style memory for TCM BIDI READ
+ * @sg_mem_bidi_num: Number of BIDI READ SGL elements
+ *
+ * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
+ * of parameters.
+ */
+int transport_generic_map_mem_to_cmd(
+ struct se_cmd *cmd,
+ struct scatterlist *mem,
+ u32 sg_mem_num,
+ struct scatterlist *mem_bidi_in,
+ u32 sg_mem_bidi_num)
+{
+ u32 se_mem_cnt_out = 0;
+ int ret;
+
+ if (!(mem) || !(sg_mem_num))
+ return 0;
+ /*
+ * Passed *mem will contain a list_head containing preformatted
+ * struct se_mem elements...
+ */
+ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) {
+ if ((mem_bidi_in) || (sg_mem_bidi_num)) {
+ printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported"
+ " with BIDI-COMMAND\n");
+ return -ENOSYS;
+ }
+
+ T_TASK(cmd)->t_mem_list = (struct list_head *)mem;
+ T_TASK(cmd)->t_tasks_se_num = sg_mem_num;
+ cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC;
+ return 0;
+ }
+ /*
+ * Otherwise, assume the caller is passing a struct scatterlist
+ * array from include/linux/scatterlist.h
+ */
+ if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
+ (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+ /*
+ * For CDB using TCM struct se_mem linked list scatterlist memory
+ * processed into a TCM struct se_subsystem_dev, we do the mapping
+ * from the passed physical memory to struct se_mem->se_page here.
+ */
+ T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
+ if (!(T_TASK(cmd)->t_mem_list))
+ return -ENOMEM;
+
+ ret = transport_map_sg_to_mem(cmd,
+ T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out);
+ if (ret < 0)
+ return -ENOMEM;
+
+ T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out;
+ /*
+ * Setup BIDI READ list of struct se_mem elements
+ */
+ if ((mem_bidi_in) && (sg_mem_bidi_num)) {
+ T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
+ if (!(T_TASK(cmd)->t_mem_bidi_list)) {
+ kfree(T_TASK(cmd)->t_mem_list);
+ return -ENOMEM;
+ }
+ se_mem_cnt_out = 0;
+
+ ret = transport_map_sg_to_mem(cmd,
+ T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in,
+ &se_mem_cnt_out);
+ if (ret < 0) {
+ kfree(T_TASK(cmd)->t_mem_list);
+ return -ENOMEM;
+ }
+
+ T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out;
+ }
+ cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+
+ } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
+ if (mem_bidi_in || sg_mem_bidi_num) {
+ printk(KERN_ERR "BIDI-Commands not supported using "
+ "SCF_SCSI_CONTROL_NONSG_IO_CDB\n");
+ return -ENOSYS;
+ }
+ /*
+ * For incoming CDBs using a contiguous buffer internall with TCM,
+ * save the passed struct scatterlist memory. After TCM storage object
+ * processing has completed for this struct se_cmd, TCM core will call
+ * transport_memcpy_[write,read]_contig() as necessary from
+ * transport_generic_complete_ok() and transport_write_pending() in order
+ * to copy the TCM buffer to/from the original passed *mem in SGL ->
+ * struct scatterlist format.
+ */
+ cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
+ T_TASK(cmd)->t_task_pt_sgl = mem;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
+
+
+static inline long long transport_dev_end_lba(struct se_device *dev)
+{
+ return dev->transport->get_blocks(dev) + 1;
+}
+
+static int transport_get_sectors(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+
+ T_TASK(cmd)->t_tasks_sectors =
+ (cmd->data_length / DEV_ATTRIB(dev)->block_size);
+ if (!(T_TASK(cmd)->t_tasks_sectors))
+ T_TASK(cmd)->t_tasks_sectors = 1;
+
+ if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK)
+ return 0;
+
+ if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) >
+ transport_dev_end_lba(dev)) {
+ printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
+ " transport_dev_end_lba(): %llu\n",
+ T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
+ transport_dev_end_lba(dev));
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
+ return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS;
+ }
+
+ return 0;
+}
+
+static int transport_new_cmd_obj(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ u32 task_cdbs = 0, rc;
+
+ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+ task_cdbs++;
+ T_TASK(cmd)->t_task_cdbs++;
+ } else {
+ int set_counts = 1;
+
+ /*
+ * Setup any BIDI READ tasks and memory from
+ * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks
+ * are queued first for the non pSCSI passthrough case.
+ */
+ if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
+ (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
+ rc = transport_generic_get_cdb_count(cmd,
+ T_TASK(cmd)->t_task_lba,
+ T_TASK(cmd)->t_tasks_sectors,
+ DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list,
+ set_counts);
+ if (!(rc)) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ set_counts = 0;
+ }
+ /*
+ * Setup the tasks and memory from T_TASK(cmd)->t_mem_list
+ * Note for BIDI transfers this will contain the WRITE payload
+ */
+ task_cdbs = transport_generic_get_cdb_count(cmd,
+ T_TASK(cmd)->t_task_lba,
+ T_TASK(cmd)->t_tasks_sectors,
+ cmd->data_direction, T_TASK(cmd)->t_mem_list,
+ set_counts);
+ if (!(task_cdbs)) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ T_TASK(cmd)->t_task_cdbs += task_cdbs;
+
+#if 0
+ printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
+ " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
+ T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
+ T_TASK(cmd)->t_task_cdbs);
+#endif
+ }
+
+ atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs);
+ atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs);
+ atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs);
+ return 0;
+}
+
+static struct list_head *transport_init_se_mem_list(void)
+{
+ struct list_head *se_mem_list;
+
+ se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!(se_mem_list)) {
+ printk(KERN_ERR "Unable to allocate memory for se_mem_list\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(se_mem_list);
+
+ return se_mem_list;
+}
+
+static int
+transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
+{
+ unsigned char *buf;
+ struct se_mem *se_mem;
+
+ T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
+ if (!(T_TASK(cmd)->t_mem_list))
+ return -ENOMEM;
+
+ /*
+ * If the device uses memory mapping this is enough.
+ */
+ if (cmd->se_dev->transport->do_se_mem_map)
+ return 0;
+
+ /*
+ * Setup BIDI-COMMAND READ list of struct se_mem elements
+ */
+ if (T_TASK(cmd)->t_tasks_bidi) {
+ T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
+ if (!(T_TASK(cmd)->t_mem_bidi_list)) {
+ kfree(T_TASK(cmd)->t_mem_list);
+ return -ENOMEM;
+ }
+ }
+
+ while (length) {
+ se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+ if (!(se_mem)) {
+ printk(KERN_ERR "Unable to allocate struct se_mem\n");
+ goto out;
+ }
+ INIT_LIST_HEAD(&se_mem->se_list);
+ se_mem->se_len = (length > dma_size) ? dma_size : length;
+
+/* #warning FIXME Allocate contigous pages for struct se_mem elements */
+ se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0);
+ if (!(se_mem->se_page)) {
+ printk(KERN_ERR "alloc_pages() failed\n");
+ goto out;
+ }
+
+ buf = kmap_atomic(se_mem->se_page, KM_IRQ0);
+ if (!(buf)) {
+ printk(KERN_ERR "kmap_atomic() failed\n");
+ goto out;
+ }
+ memset(buf, 0, se_mem->se_len);
+ kunmap_atomic(buf, KM_IRQ0);
+
+ list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list);
+ T_TASK(cmd)->t_tasks_se_num++;
+
+ DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
+ " Offset(%u)\n", se_mem->se_page, se_mem->se_len,
+ se_mem->se_off);
+
+ length -= se_mem->se_len;
+ }
+
+ DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
+ T_TASK(cmd)->t_tasks_se_num);
+
+ return 0;
+out:
+ return -1;
+}
+
+extern u32 transport_calc_sg_num(
+ struct se_task *task,
+ struct se_mem *in_se_mem,
+ u32 task_offset)
+{
+ struct se_cmd *se_cmd = task->task_se_cmd;
+ struct se_device *se_dev = SE_DEV(se_cmd);
+ struct se_mem *se_mem = in_se_mem;
+ struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd);
+ u32 sg_length, task_size = task->task_size, task_sg_num_padded;
+
+ while (task_size != 0) {
+ DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)"
+ " se_mem->se_off(%u) task_offset(%u)\n",
+ se_mem->se_page, se_mem->se_len,
+ se_mem->se_off, task_offset);
+
+ if (task_offset == 0) {
+ if (task_size >= se_mem->se_len) {
+ sg_length = se_mem->se_len;
+
+ if (!(list_is_last(&se_mem->se_list,
+ T_TASK(se_cmd)->t_mem_list)))
+ se_mem = list_entry(se_mem->se_list.next,
+ struct se_mem, se_list);
+ } else {
+ sg_length = task_size;
+ task_size -= sg_length;
+ goto next;
+ }
+
+ DEBUG_SC("sg_length(%u) task_size(%u)\n",
+ sg_length, task_size);
+ } else {
+ if ((se_mem->se_len - task_offset) > task_size) {
+ sg_length = task_size;
+ task_size -= sg_length;
+ goto next;
+ } else {
+ sg_length = (se_mem->se_len - task_offset);
+
+ if (!(list_is_last(&se_mem->se_list,
+ T_TASK(se_cmd)->t_mem_list)))
+ se_mem = list_entry(se_mem->se_list.next,
+ struct se_mem, se_list);
+ }
+
+ DEBUG_SC("sg_length(%u) task_size(%u)\n",
+ sg_length, task_size);
+
+ task_offset = 0;
+ }
+ task_size -= sg_length;
+next:
+ DEBUG_SC("task[%u] - Reducing task_size to(%u)\n",
+ task->task_no, task_size);
+
+ task->task_sg_num++;
+ }
+ /*
+ * Check if the fabric module driver is requesting that all
+ * struct se_task->task_sg[] be chained together.. If so,
+ * then allocate an extra padding SG entry for linking and
+ * marking the end of the chained SGL.
+ */
+ if (tfo->task_sg_chaining) {
+ task_sg_num_padded = (task->task_sg_num + 1);
+ task->task_padded_sg = 1;
+ } else
+ task_sg_num_padded = task->task_sg_num;
+
+ task->task_sg = kzalloc(task_sg_num_padded *
+ sizeof(struct scatterlist), GFP_KERNEL);
+ if (!(task->task_sg)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " task->task_sg\n");
+ return 0;
+ }
+ sg_init_table(&task->task_sg[0], task_sg_num_padded);
+ /*
+ * Setup task->task_sg_bidi for SCSI READ payload for
+ * TCM/pSCSI passthrough if present for BIDI-COMMAND
+ */
+ if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) &&
+ (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
+ task->task_sg_bidi = kzalloc(task_sg_num_padded *
+ sizeof(struct scatterlist), GFP_KERNEL);
+ if (!(task->task_sg_bidi)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " task->task_sg_bidi\n");
+ return 0;
+ }
+ sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded);
+ }
+ /*
+ * For the chaining case, setup the proper end of SGL for the
+ * initial submission struct task into struct se_subsystem_api.
+ * This will be cleared later by transport_do_task_sg_chain()
+ */
+ if (task->task_padded_sg) {
+ sg_mark_end(&task->task_sg[task->task_sg_num - 1]);
+ /*
+ * Added the 'if' check before marking end of bi-directional
+ * scatterlist (which gets created only in case of request
+ * (RD + WR).
+ */
+ if (task->task_sg_bidi)
+ sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]);
+ }
+
+ DEBUG_SC("Successfully allocated task->task_sg_num(%u),"
+ " task_sg_num_padded(%u)\n", task->task_sg_num,
+ task_sg_num_padded);
+
+ return task->task_sg_num;
+}
+
+static inline int transport_set_tasks_sectors_disk(
+ struct se_task *task,
+ struct se_device *dev,
+ unsigned long long lba,
+ u32 sectors,
+ int *max_sectors_set)
+{
+ if ((lba + sectors) > transport_dev_end_lba(dev)) {
+ task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1);
+
+ if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) {
+ task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
+ *max_sectors_set = 1;
+ }
+ } else {
+ if (sectors > DEV_ATTRIB(dev)->max_sectors) {
+ task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
+ *max_sectors_set = 1;
+ } else
+ task->task_sectors = sectors;
+ }
+
+ return 0;
+}
+
+static inline int transport_set_tasks_sectors_non_disk(
+ struct se_task *task,
+ struct se_device *dev,
+ unsigned long long lba,
+ u32 sectors,
+ int *max_sectors_set)
+{
+ if (sectors > DEV_ATTRIB(dev)->max_sectors) {
+ task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
+ *max_sectors_set = 1;
+ } else
+ task->task_sectors = sectors;
+
+ return 0;
+}
+
+static inline int transport_set_tasks_sectors(
+ struct se_task *task,
+ struct se_device *dev,
+ unsigned long long lba,
+ u32 sectors,
+ int *max_sectors_set)
+{
+ return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ?
+ transport_set_tasks_sectors_disk(task, dev, lba, sectors,
+ max_sectors_set) :
+ transport_set_tasks_sectors_non_disk(task, dev, lba, sectors,
+ max_sectors_set);
+}
+
+static int transport_map_sg_to_mem(
+ struct se_cmd *cmd,
+ struct list_head *se_mem_list,
+ void *in_mem,
+ u32 *se_mem_cnt)
+{
+ struct se_mem *se_mem;
+ struct scatterlist *sg;
+ u32 sg_count = 1, cmd_size = cmd->data_length;
+
+ if (!in_mem) {
+ printk(KERN_ERR "No source scatterlist\n");
+ return -1;
+ }
+ sg = (struct scatterlist *)in_mem;
+
+ while (cmd_size) {
+ se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+ if (!(se_mem)) {
+ printk(KERN_ERR "Unable to allocate struct se_mem\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&se_mem->se_list);
+ DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u"
+ " sg_page: %p offset: %d length: %d\n", cmd_size,
+ sg_page(sg), sg->offset, sg->length);
+
+ se_mem->se_page = sg_page(sg);
+ se_mem->se_off = sg->offset;
+
+ if (cmd_size > sg->length) {
+ se_mem->se_len = sg->length;
+ sg = sg_next(sg);
+ sg_count++;
+ } else
+ se_mem->se_len = cmd_size;
+
+ cmd_size -= se_mem->se_len;
+
+ DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n",
+ *se_mem_cnt, cmd_size);
+ DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n",
+ se_mem->se_page, se_mem->se_off, se_mem->se_len);
+
+ list_add_tail(&se_mem->se_list, se_mem_list);
+ (*se_mem_cnt)++;
+ }
+
+ DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)"
+ " struct se_mem\n", sg_count, *se_mem_cnt);
+
+ if (sg_count != *se_mem_cnt)
+ BUG();
+
+ return 0;
+}
+
+/* transport_map_mem_to_sg():
+ *
+ *
+ */
+int transport_map_mem_to_sg(
+ struct se_task *task,
+ struct list_head *se_mem_list,
+ void *in_mem,
+ struct se_mem *in_se_mem,
+ struct se_mem **out_se_mem,
+ u32 *se_mem_cnt,
+ u32 *task_offset)
+{
+ struct se_cmd *se_cmd = task->task_se_cmd;
+ struct se_mem *se_mem = in_se_mem;
+ struct scatterlist *sg = (struct scatterlist *)in_mem;
+ u32 task_size = task->task_size, sg_no = 0;
+
+ if (!sg) {
+ printk(KERN_ERR "Unable to locate valid struct"
+ " scatterlist pointer\n");
+ return -1;
+ }
+
+ while (task_size != 0) {
+ /*
+ * Setup the contigious array of scatterlists for
+ * this struct se_task.
+ */
+ sg_assign_page(sg, se_mem->se_page);
+
+ if (*task_offset == 0) {
+ sg->offset = se_mem->se_off;
+
+ if (task_size >= se_mem->se_len) {
+ sg->length = se_mem->se_len;
+
+ if (!(list_is_last(&se_mem->se_list,
+ T_TASK(se_cmd)->t_mem_list))) {
+ se_mem = list_entry(se_mem->se_list.next,
+ struct se_mem, se_list);
+ (*se_mem_cnt)++;
+ }
+ } else {
+ sg->length = task_size;
+ /*
+ * Determine if we need to calculate an offset
+ * into the struct se_mem on the next go around..
+ */
+ task_size -= sg->length;
+ if (!(task_size))
+ *task_offset = sg->length;
+
+ goto next;
+ }
+
+ } else {
+ sg->offset = (*task_offset + se_mem->se_off);
+
+ if ((se_mem->se_len - *task_offset) > task_size) {
+ sg->length = task_size;
+ /*
+ * Determine if we need to calculate an offset
+ * into the struct se_mem on the next go around..
+ */
+ task_size -= sg->length;
+ if (!(task_size))
+ *task_offset += sg->length;
+
+ goto next;
+ } else {
+ sg->length = (se_mem->se_len - *task_offset);
+
+ if (!(list_is_last(&se_mem->se_list,
+ T_TASK(se_cmd)->t_mem_list))) {
+ se_mem = list_entry(se_mem->se_list.next,
+ struct se_mem, se_list);
+ (*se_mem_cnt)++;
+ }
+ }
+
+ *task_offset = 0;
+ }
+ task_size -= sg->length;
+next:
+ DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing"
+ " task_size to(%u), task_offset: %u\n", task->task_no, sg_no,
+ sg_page(sg), sg->length, sg->offset, task_size, *task_offset);
+
+ sg_no++;
+ if (!(task_size))
+ break;
+
+ sg = sg_next(sg);
+
+ if (task_size > se_cmd->data_length)
+ BUG();
+ }
+ *out_se_mem = se_mem;
+
+ DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)"
+ " SGs\n", task->task_no, *se_mem_cnt, sg_no);
+
+ return 0;
+}
+
+/*
+ * This function can be used by HW target mode drivers to create a linked
+ * scatterlist from all contiguously allocated struct se_task->task_sg[].
+ * This is intended to be called during the completion path by TCM Core
+ * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
+ */
+void transport_do_task_sg_chain(struct se_cmd *cmd)
+{
+ struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL;
+ struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL;
+ struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL;
+ struct se_task *task;
+ struct target_core_fabric_ops *tfo = CMD_TFO(cmd);
+ u32 task_sg_num = 0, sg_count = 0;
+ int i;
+
+ if (tfo->task_sg_chaining == 0) {
+ printk(KERN_ERR "task_sg_chaining is diabled for fabric module:"
+ " %s\n", tfo->get_fabric_name());
+ dump_stack();
+ return;
+ }
+ /*
+ * Walk the struct se_task list and setup scatterlist chains
+ * for each contiguosly allocated struct se_task->task_sg[].
+ */
+ list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ if (!(task->task_sg) || !(task->task_padded_sg))
+ continue;
+
+ if (sg_head && sg_link) {
+ sg_head_cur = &task->task_sg[0];
+ sg_link_cur = &task->task_sg[task->task_sg_num];
+ /*
+ * Either add chain or mark end of scatterlist
+ */
+ if (!(list_is_last(&task->t_list,
+ &T_TASK(cmd)->t_task_list))) {
+ /*
+ * Clear existing SGL termination bit set in
+ * transport_calc_sg_num(), see sg_mark_end()
+ */
+ sg_end_cur = &task->task_sg[task->task_sg_num - 1];
+ sg_end_cur->page_link &= ~0x02;
+
+ sg_chain(sg_head, task_sg_num, sg_head_cur);
+ sg_count += (task->task_sg_num + 1);
+ } else
+ sg_count += task->task_sg_num;
+
+ sg_head = sg_head_cur;
+ sg_link = sg_link_cur;
+ task_sg_num = task->task_sg_num;
+ continue;
+ }
+ sg_head = sg_first = &task->task_sg[0];
+ sg_link = &task->task_sg[task->task_sg_num];
+ task_sg_num = task->task_sg_num;
+ /*
+ * Check for single task..
+ */
+ if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) {
+ /*
+ * Clear existing SGL termination bit set in
+ * transport_calc_sg_num(), see sg_mark_end()
+ */
+ sg_end = &task->task_sg[task->task_sg_num - 1];
+ sg_end->page_link &= ~0x02;
+ sg_count += (task->task_sg_num + 1);
+ } else
+ sg_count += task->task_sg_num;
+ }
+ /*
+ * Setup the starting pointer and total t_tasks_sg_linked_no including
+ * padding SGs for linking and to mark the end.
+ */
+ T_TASK(cmd)->t_tasks_sg_chained = sg_first;
+ T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
+
+ DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and"
+ " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained,
+ T_TASK(cmd)->t_tasks_sg_chained_no);
+
+ for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
+ T_TASK(cmd)->t_tasks_sg_chained_no, i) {
+
+ DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n",
+ sg, sg_page(sg), sg->length, sg->offset);
+ if (sg_is_chain(sg))
+ DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
+ if (sg_is_last(sg))
+ DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
+ }
+
+}
+EXPORT_SYMBOL(transport_do_task_sg_chain);
+
+static int transport_do_se_mem_map(
+ struct se_device *dev,
+ struct se_task *task,
+ struct list_head *se_mem_list,
+ void *in_mem,
+ struct se_mem *in_se_mem,
+ struct se_mem **out_se_mem,
+ u32 *se_mem_cnt,
+ u32 *task_offset_in)
+{
+ u32 task_offset = *task_offset_in;
+ int ret = 0;
+ /*
+ * se_subsystem_api_t->do_se_mem_map is used when internal allocation
+ * has been done by the transport plugin.
+ */
+ if (TRANSPORT(dev)->do_se_mem_map) {
+ ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list,
+ in_mem, in_se_mem, out_se_mem, se_mem_cnt,
+ task_offset_in);
+ if (ret == 0)
+ T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
+
+ return ret;
+ }
+
+ BUG_ON(list_empty(se_mem_list));
+ /*
+ * This is the normal path for all normal non BIDI and BIDI-COMMAND
+ * WRITE payloads.. If we need to do BIDI READ passthrough for
+ * TCM/pSCSI the first call to transport_do_se_mem_map ->
+ * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the
+ * allocation for task->task_sg_bidi, and the subsequent call to
+ * transport_do_se_mem_map() from transport_generic_get_cdb_count()
+ */
+ if (!(task->task_sg_bidi)) {
+ /*
+ * Assume default that transport plugin speaks preallocated
+ * scatterlists.
+ */
+ if (!(transport_calc_sg_num(task, in_se_mem, task_offset)))
+ return -1;
+ /*
+ * struct se_task->task_sg now contains the struct scatterlist array.
+ */
+ return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
+ in_se_mem, out_se_mem, se_mem_cnt,
+ task_offset_in);
+ }
+ /*
+ * Handle the se_mem_list -> struct task->task_sg_bidi
+ * memory map for the extra BIDI READ payload
+ */
+ return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi,
+ in_se_mem, out_se_mem, se_mem_cnt,
+ task_offset_in);
+}
+
+static u32 transport_generic_get_cdb_count(
+ struct se_cmd *cmd,
+ unsigned long long lba,
+ u32 sectors,
+ enum dma_data_direction data_direction,
+ struct list_head *mem_list,
+ int set_counts)
+{
+ unsigned char *cdb = NULL;
+ struct se_task *task;
+ struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
+ struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL;
+ struct se_device *dev = SE_DEV(cmd);
+ int max_sectors_set = 0, ret;
+ u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0;
+
+ if (!mem_list) {
+ printk(KERN_ERR "mem_list is NULL in transport_generic_get"
+ "_cdb_count()\n");
+ return 0;
+ }
+ /*
+ * While using RAMDISK_DR backstores is the only case where
+ * mem_list will ever be empty at this point.
+ */
+ if (!(list_empty(mem_list)))
+ se_mem = list_entry(mem_list->next, struct se_mem, se_list);
+ /*
+ * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
+ * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
+ */
+ if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
+ !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) &&
+ (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
+ se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next,
+ struct se_mem, se_list);
+
+ while (sectors) {
+ DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n",
+ CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors,
+ transport_dev_end_lba(dev));
+
+ task = transport_generic_get_task(cmd, data_direction);
+ if (!(task))
+ goto out;
+
+ transport_set_tasks_sectors(task, dev, lba, sectors,
+ &max_sectors_set);
+
+ task->task_lba = lba;
+ lba += task->task_sectors;
+ sectors -= task->task_sectors;
+ task->task_size = (task->task_sectors *
+ DEV_ATTRIB(dev)->block_size);
+
+ cdb = TRANSPORT(dev)->get_cdb(task);
+ if ((cdb)) {
+ memcpy(cdb, T_TASK(cmd)->t_task_cdb,
+ scsi_command_size(T_TASK(cmd)->t_task_cdb));
+ cmd->transport_split_cdb(task->task_lba,
+ &task->task_sectors, cdb);
+ }
+
+ /*
+ * Perform the SE OBJ plugin and/or Transport plugin specific
+ * mapping for T_TASK(cmd)->t_mem_list. And setup the
+ * task->task_sg and if necessary task->task_sg_bidi
+ */
+ ret = transport_do_se_mem_map(dev, task, mem_list,
+ NULL, se_mem, &se_mem_lout, &se_mem_cnt,
+ &task_offset_in);
+ if (ret < 0)
+ goto out;
+
+ se_mem = se_mem_lout;
+ /*
+ * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi
+ * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
+ *
+ * Note that the first call to transport_do_se_mem_map() above will
+ * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map()
+ * -> transport_calc_sg_num(), and the second here will do the
+ * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI.
+ */
+ if (task->task_sg_bidi != NULL) {
+ ret = transport_do_se_mem_map(dev, task,
+ T_TASK(cmd)->t_mem_bidi_list, NULL,
+ se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
+ &task_offset_in);
+ if (ret < 0)
+ goto out;
+
+ se_mem_bidi = se_mem_bidi_lout;
+ }
+ task_cdbs++;
+
+ DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n",
+ task_cdbs, task->task_sg_num);
+
+ if (max_sectors_set) {
+ max_sectors_set = 0;
+ continue;
+ }
+
+ if (!sectors)
+ break;
+ }
+
+ if (set_counts) {
+ atomic_inc(&T_TASK(cmd)->t_fe_count);
+ atomic_inc(&T_TASK(cmd)->t_se_count);
+ }
+
+ DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
+ CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE)
+ ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs);
+
+ return task_cdbs;
+out:
+ return 0;
+}
+
+static int
+transport_map_control_cmd_to_task(struct se_cmd *cmd)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ unsigned char *cdb;
+ struct se_task *task;
+ int ret;
+
+ task = transport_generic_get_task(cmd, cmd->data_direction);
+ if (!task)
+ return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+
+ cdb = TRANSPORT(dev)->get_cdb(task);
+ if (cdb)
+ memcpy(cdb, cmd->t_task->t_task_cdb,
+ scsi_command_size(cmd->t_task->t_task_cdb));
+
+ task->task_size = cmd->data_length;
+ task->task_sg_num =
+ (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
+
+ atomic_inc(&cmd->t_task->t_fe_count);
+ atomic_inc(&cmd->t_task->t_se_count);
+
+ if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
+ struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
+ u32 se_mem_cnt = 0, task_offset = 0;
+
+ if (!list_empty(T_TASK(cmd)->t_mem_list))
+ se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
+ struct se_mem, se_list);
+
+ ret = transport_do_se_mem_map(dev, task,
+ cmd->t_task->t_mem_list, NULL, se_mem,
+ &se_mem_lout, &se_mem_cnt, &task_offset);
+ if (ret < 0)
+ return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+
+ if (dev->transport->map_task_SG)
+ return dev->transport->map_task_SG(task);
+ return 0;
+ } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
+ if (dev->transport->map_task_non_SG)
+ return dev->transport->map_task_non_SG(task);
+ return 0;
+ } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
+ if (dev->transport->cdb_none)
+ return dev->transport->cdb_none(task);
+ return 0;
+ } else {
+ BUG();
+ return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ }
+}
+
+/* transport_generic_new_cmd(): Called from transport_processing_thread()
+ *
+ * Allocate storage transport resources from a set of values predefined
+ * by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
+ * Any non zero return here is treated as an "out of resource' op here.
+ */
+ /*
+ * Generate struct se_task(s) and/or their payloads for this CDB.
+ */
+static int transport_generic_new_cmd(struct se_cmd *cmd)
+{
+ struct se_portal_group *se_tpg;
+ struct se_task *task;
+ struct se_device *dev = SE_DEV(cmd);
+ int ret = 0;
+
+ /*
+ * Determine is the TCM fabric module has already allocated physical
+ * memory, and is directly calling transport_generic_map_mem_to_cmd()
+ * to setup beforehand the linked list of physical memory at
+ * T_TASK(cmd)->t_mem_list of struct se_mem->se_page
+ */
+ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
+ ret = transport_allocate_resources(cmd);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = transport_get_sectors(cmd);
+ if (ret < 0)
+ return ret;
+
+ ret = transport_new_cmd_obj(cmd);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Determine if the calling TCM fabric module is talking to
+ * Linux/NET via kernel sockets and needs to allocate a
+ * struct iovec array to complete the struct se_cmd
+ */
+ se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg;
+ if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) {
+ ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd);
+ if (ret < 0)
+ return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ }
+
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+ list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ if (atomic_read(&task->task_sent))
+ continue;
+ if (!dev->transport->map_task_SG)
+ continue;
+
+ ret = dev->transport->map_task_SG(task);
+ if (ret < 0)
+ return ret;
+ }
+ } else {
+ ret = transport_map_control_cmd_to_task(cmd);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready..
+ * This WRITE struct se_cmd (and all of its associated struct se_task's)
+ * will be added to the struct se_device execution queue after its WRITE
+ * data has arrived. (ie: It gets handled by the transport processing
+ * thread a second time)
+ */
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ transport_add_tasks_to_state_queue(cmd);
+ return transport_generic_write_pending(cmd);
+ }
+ /*
+ * Everything else but a WRITE, add the struct se_cmd's struct se_task's
+ * to the execution queue.
+ */
+ transport_execute_tasks(cmd);
+ return 0;
+}
+
+/* transport_generic_process_write():
+ *
+ *
+ */
+void transport_generic_process_write(struct se_cmd *cmd)
+{
+#if 0
+ /*
+ * Copy SCSI Presented DTL sector(s) from received buffers allocated to
+ * original EDTL
+ */
+ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ if (!T_TASK(cmd)->t_tasks_se_num) {
+ unsigned char *dst, *buf =
+ (unsigned char *)T_TASK(cmd)->t_task_buf;
+
+ dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
+ if (!(dst)) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ " WRITE underflow\n");
+ transport_generic_request_failure(cmd, NULL,
+ PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
+ return;
+ }
+ memcpy(dst, buf, cmd->cmd_spdtl);
+
+ kfree(T_TASK(cmd)->t_task_buf);
+ T_TASK(cmd)->t_task_buf = dst;
+ } else {
+ struct scatterlist *sg =
+ (struct scatterlist *sg)T_TASK(cmd)->t_task_buf;
+ struct scatterlist *orig_sg;
+
+ orig_sg = kzalloc(sizeof(struct scatterlist) *
+ T_TASK(cmd)->t_tasks_se_num,
+ GFP_KERNEL))) {
+ if (!(orig_sg)) {
+ printk(KERN_ERR "Unable to allocate memory"
+ " for WRITE underflow\n");
+ transport_generic_request_failure(cmd, NULL,
+ PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
+ return;
+ }
+
+ memcpy(orig_sg, T_TASK(cmd)->t_task_buf,
+ sizeof(struct scatterlist) *
+ T_TASK(cmd)->t_tasks_se_num);
+
+ cmd->data_length = cmd->cmd_spdtl;
+ /*
+ * FIXME, clear out original struct se_task and state
+ * information.
+ */
+ if (transport_generic_new_cmd(cmd) < 0) {
+ transport_generic_request_failure(cmd, NULL,
+ PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
+ kfree(orig_sg);
+ return;
+ }
+
+ transport_memcpy_write_sg(cmd, orig_sg);
+ }
+ }
+#endif
+ transport_execute_tasks(cmd);
+}
+EXPORT_SYMBOL(transport_generic_process_write);
+
+/* transport_generic_write_pending():
+ *
+ *
+ */
+static int transport_generic_write_pending(struct se_cmd *cmd)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ cmd->t_state = TRANSPORT_WRITE_PENDING;
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ /*
+ * For the TCM control CDBs using a contiguous buffer, do the memcpy
+ * from the passed Linux/SCSI struct scatterlist located at
+ * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at
+ * T_TASK(se_cmd)->t_task_buf.
+ */
+ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
+ transport_memcpy_read_contig(cmd,
+ T_TASK(cmd)->t_task_buf,
+ T_TASK(cmd)->t_task_pt_sgl);
+ /*
+ * Clear the se_cmd for WRITE_PENDING status in order to set
+ * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data
+ * can be called from HW target mode interrupt code. This is safe
+ * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending
+ * because the se_cmd->se_lun pointer is not being cleared.
+ */
+ transport_cmd_check_stop(cmd, 1, 0);
+
+ /*
+ * Call the fabric write_pending function here to let the
+ * frontend know that WRITE buffers are ready.
+ */
+ ret = CMD_TFO(cmd)->write_pending(cmd);
+ if (ret < 0)
+ return ret;
+
+ return PYX_TRANSPORT_WRITE_PENDING;
+}
+
+/* transport_release_cmd_to_pool():
+ *
+ *
+ */
+void transport_release_cmd_to_pool(struct se_cmd *cmd)
+{
+ BUG_ON(!T_TASK(cmd));
+ BUG_ON(!CMD_TFO(cmd));
+
+ transport_free_se_cmd(cmd);
+ CMD_TFO(cmd)->release_cmd_to_pool(cmd);
+}
+EXPORT_SYMBOL(transport_release_cmd_to_pool);
+
+/* transport_generic_free_cmd():
+ *
+ * Called from processing frontend to release storage engine resources
+ */
+void transport_generic_free_cmd(
+ struct se_cmd *cmd,
+ int wait_for_tasks,
+ int release_to_pool,
+ int session_reinstatement)
+{
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd))
+ transport_release_cmd_to_pool(cmd);
+ else {
+ core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
+
+ if (SE_LUN(cmd)) {
+#if 0
+ printk(KERN_INFO "cmd: %p ITT: 0x%08x contains"
+ " SE_LUN(cmd)\n", cmd,
+ CMD_TFO(cmd)->get_task_tag(cmd));
+#endif
+ transport_lun_remove_cmd(cmd);
+ }
+
+ if (wait_for_tasks && cmd->transport_wait_for_tasks)
+ cmd->transport_wait_for_tasks(cmd, 0, 0);
+
+ transport_generic_remove(cmd, release_to_pool,
+ session_reinstatement);
+ }
+}
+EXPORT_SYMBOL(transport_generic_free_cmd);
+
+static void transport_nop_wait_for_tasks(
+ struct se_cmd *cmd,
+ int remove_cmd,
+ int session_reinstatement)
+{
+ return;
+}
+
+/* transport_lun_wait_for_tasks():
+ *
+ * Called from ConfigFS context to stop the passed struct se_cmd to allow
+ * an struct se_lun to be successfully shutdown.
+ */
+static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
+{
+ unsigned long flags;
+ int ret;
+ /*
+ * If the frontend has already requested this struct se_cmd to
+ * be stopped, we can safely ignore this struct se_cmd.
+ */
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
+ atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
+ DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
+ " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd));
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ transport_cmd_check_stop(cmd, 1, 0);
+ return -1;
+ }
+ atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
+
+ ret = transport_stop_tasks_for_cmd(cmd);
+
+ DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
+ " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret);
+ if (!ret) {
+ DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
+ CMD_TFO(cmd)->get_task_tag(cmd));
+ wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp);
+ DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
+ CMD_TFO(cmd)->get_task_tag(cmd));
+ }
+ transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+
+ return 0;
+}
+
+/* #define DEBUG_CLEAR_LUN */
+#ifdef DEBUG_CLEAR_LUN
+#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CLEAR_L(x...)
+#endif
+
+static void __transport_clear_lun_from_sessions(struct se_lun *lun)
+{
+ struct se_cmd *cmd = NULL;
+ unsigned long lun_flags, cmd_flags;
+ /*
+ * Do exception processing and return CHECK_CONDITION status to the
+ * Initiator Port.
+ */
+ spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+ while (!list_empty_careful(&lun->lun_cmd_list)) {
+ cmd = list_entry(lun->lun_cmd_list.next,
+ struct se_cmd, se_lun_list);
+ list_del(&cmd->se_lun_list);
+
+ if (!(T_TASK(cmd))) {
+ printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL"
+ "[i,t]_state: %u/%u\n",
+ CMD_TFO(cmd)->get_task_tag(cmd),
+ CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
+ BUG();
+ }
+ atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
+ /*
+ * This will notify iscsi_target_transport.c:
+ * transport_cmd_check_stop() that a LUN shutdown is in
+ * progress for the iscsi_cmd_t.
+ */
+ spin_lock(&T_TASK(cmd)->t_state_lock);
+ DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport"
+ "_lun_stop for ITT: 0x%08x\n",
+ SE_LUN(cmd)->unpacked_lun,
+ CMD_TFO(cmd)->get_task_tag(cmd));
+ atomic_set(&T_TASK(cmd)->transport_lun_stop, 1);
+ spin_unlock(&T_TASK(cmd)->t_state_lock);
+
+ spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
+
+ if (!(SE_LUN(cmd))) {
+ printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n",
+ CMD_TFO(cmd)->get_task_tag(cmd),
+ CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
+ BUG();
+ }
+ /*
+ * If the Storage engine still owns the iscsi_cmd_t, determine
+ * and/or stop its context.
+ */
+ DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport"
+ "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun,
+ CMD_TFO(cmd)->get_task_tag(cmd));
+
+ if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) {
+ spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+ continue;
+ }
+
+ DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
+ "_wait_for_tasks(): SUCCESS\n",
+ SE_LUN(cmd)->unpacked_lun,
+ CMD_TFO(cmd)->get_task_tag(cmd));
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ goto check_cond;
+ }
+ atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ transport_all_task_dev_remove_state(cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+
+ transport_free_dev_tasks(cmd);
+ /*
+ * The Storage engine stopped this struct se_cmd before it was
+ * send to the fabric frontend for delivery back to the
+ * Initiator Node. Return this SCSI CDB back with an
+ * CHECK_CONDITION status.
+ */
+check_cond:
+ transport_send_check_condition_and_sense(cmd,
+ TCM_NON_EXISTENT_LUN, 0);
+ /*
+ * If the fabric frontend is waiting for this iscsi_cmd_t to
+ * be released, notify the waiting thread now that LU has
+ * finished accessing it.
+ */
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) {
+ DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
+ " struct se_cmd: %p ITT: 0x%08x\n",
+ lun->unpacked_lun,
+ cmd, CMD_TFO(cmd)->get_task_tag(cmd));
+
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ cmd_flags);
+ transport_cmd_check_stop(cmd, 1, 0);
+ complete(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+ spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+ continue;
+ }
+ DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
+ lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd));
+
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+ }
+ spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
+}
+
+static int transport_clear_lun_thread(void *p)
+{
+ struct se_lun *lun = (struct se_lun *)p;
+
+ __transport_clear_lun_from_sessions(lun);
+ complete(&lun->lun_shutdown_comp);
+
+ return 0;
+}
+
+int transport_clear_lun_from_sessions(struct se_lun *lun)
+{
+ struct task_struct *kt;
+
+ kt = kthread_run(transport_clear_lun_thread, (void *)lun,
+ "tcm_cl_%u", lun->unpacked_lun);
+ if (IS_ERR(kt)) {
+ printk(KERN_ERR "Unable to start clear_lun thread\n");
+ return -1;
+ }
+ wait_for_completion(&lun->lun_shutdown_comp);
+
+ return 0;
+}
+
+/* transport_generic_wait_for_tasks():
+ *
+ * Called from frontend or passthrough context to wait for storage engine
+ * to pause and/or release frontend generated struct se_cmd.
+ */
+static void transport_generic_wait_for_tasks(
+ struct se_cmd *cmd,
+ int remove_cmd,
+ int session_reinstatement)
+{
+ unsigned long flags;
+
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
+ return;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ /*
+ * If we are already stopped due to an external event (ie: LUN shutdown)
+ * sleep until the connection can have the passed struct se_cmd back.
+ * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by
+ * transport_clear_lun_from_sessions() once the ConfigFS context caller
+ * has completed its operation on the struct se_cmd.
+ */
+ if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
+
+ DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
+ " wait_for_completion(&T_TASK(cmd)transport_lun_fe"
+ "_stop_comp); for ITT: 0x%08x\n",
+ CMD_TFO(cmd)->get_task_tag(cmd));
+ /*
+ * There is a special case for WRITES where a FE exception +
+ * LUN shutdown means ConfigFS context is still sleeping on
+ * transport_lun_stop_comp in transport_lun_wait_for_tasks().
+ * We go ahead and up transport_lun_stop_comp just to be sure
+ * here.
+ */
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ complete(&T_TASK(cmd)->transport_lun_stop_comp);
+ wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+
+ transport_all_task_dev_remove_state(cmd);
+ /*
+ * At this point, the frontend who was the originator of this
+ * struct se_cmd, now owns the structure and can be released through
+ * normal means below.
+ */
+ DEBUG_TRANSPORT_S("wait_for_tasks: Stopped"
+ " wait_for_completion(&T_TASK(cmd)transport_lun_fe_"
+ "stop_comp); for ITT: 0x%08x\n",
+ CMD_TFO(cmd)->get_task_tag(cmd));
+
+ atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
+ }
+ if (!atomic_read(&T_TASK(cmd)->t_transport_active))
+ goto remove;
+
+ atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
+
+ DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
+ " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
+ " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
+ CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
+ cmd->deferred_t_state);
+
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
+
+ wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+ atomic_set(&T_TASK(cmd)->t_transport_stop, 0);
+
+ DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
+ "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n",
+ CMD_TFO(cmd)->get_task_tag(cmd));
+remove:
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ if (!remove_cmd)
+ return;
+
+ transport_generic_free_cmd(cmd, 0, 0, session_reinstatement);
+}
+
+static int transport_get_sense_codes(
+ struct se_cmd *cmd,
+ u8 *asc,
+ u8 *ascq)
+{
+ *asc = cmd->scsi_asc;
+ *ascq = cmd->scsi_ascq;
+
+ return 0;
+}
+
+static int transport_set_sense_codes(
+ struct se_cmd *cmd,
+ u8 asc,
+ u8 ascq)
+{
+ cmd->scsi_asc = asc;
+ cmd->scsi_ascq = ascq;
+
+ return 0;
+}
+
+int transport_send_check_condition_and_sense(
+ struct se_cmd *cmd,
+ u8 reason,
+ int from_transport)
+{
+ unsigned char *buffer = cmd->sense_buffer;
+ unsigned long flags;
+ int offset;
+ u8 asc = 0, ascq = 0;
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ return 0;
+ }
+ cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+ if (!reason && from_transport)
+ goto after_reason;
+
+ if (!from_transport)
+ cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+ /*
+ * Data Segment and SenseLength of the fabric response PDU.
+ *
+ * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
+ * from include/scsi/scsi_cmnd.h
+ */
+ offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
+ TRANSPORT_SENSE_BUFFER);
+ /*
+ * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
+ * SENSE KEY values from include/scsi/scsi.h
+ */
+ switch (reason) {
+ case TCM_NON_EXISTENT_LUN:
+ case TCM_UNSUPPORTED_SCSI_OPCODE:
+ case TCM_SECTOR_COUNT_TOO_MANY:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* INVALID COMMAND OPERATION CODE */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
+ break;
+ case TCM_UNKNOWN_MODE_PAGE:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* INVALID FIELD IN CDB */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
+ break;
+ case TCM_CHECK_CONDITION_ABORT_CMD:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* BUS DEVICE RESET FUNCTION OCCURRED */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
+ buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
+ break;
+ case TCM_INCORRECT_AMOUNT_OF_DATA:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* WRITE ERROR */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
+ /* NOT ENOUGH UNSOLICITED DATA */
+ buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
+ break;
+ case TCM_INVALID_CDB_FIELD:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* INVALID FIELD IN CDB */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
+ break;
+ case TCM_INVALID_PARAMETER_LIST:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* INVALID FIELD IN PARAMETER LIST */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
+ break;
+ case TCM_UNEXPECTED_UNSOLICITED_DATA:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* WRITE ERROR */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
+ /* UNEXPECTED_UNSOLICITED_DATA */
+ buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
+ break;
+ case TCM_SERVICE_CRC_ERROR:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* PROTOCOL SERVICE CRC ERROR */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
+ /* N/A */
+ buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
+ break;
+ case TCM_SNACK_REJECTED:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* READ ERROR */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
+ /* FAILED RETRANSMISSION REQUEST */
+ buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
+ break;
+ case TCM_WRITE_PROTECTED:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* DATA PROTECT */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
+ /* WRITE PROTECTED */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
+ break;
+ case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* UNIT ATTENTION */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+ core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
+ buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
+ buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
+ break;
+ case TCM_CHECK_CONDITION_NOT_READY:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* Not Ready */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
+ transport_get_sense_codes(cmd, &asc, &ascq);
+ buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
+ buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
+ break;
+ case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+ default:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL UNIT COMMUNICATION FAILURE */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
+ break;
+ }
+ /*
+ * This code uses linux/include/scsi/scsi.h SAM status codes!
+ */
+ cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ /*
+ * Automatically padded, this value is encoded in the fabric's
+ * data_length response PDU containing the SCSI defined sense data.
+ */
+ cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
+
+after_reason:
+ CMD_TFO(cmd)->queue_status(cmd);
+ return 0;
+}
+EXPORT_SYMBOL(transport_send_check_condition_and_sense);
+
+int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+{
+ int ret = 0;
+
+ if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
+ if (!(send_status) ||
+ (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+ return 1;
+#if 0
+ printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
+ " status for CDB: 0x%02x ITT: 0x%08x\n",
+ T_TASK(cmd)->t_task_cdb[0],
+ CMD_TFO(cmd)->get_task_tag(cmd));
+#endif
+ cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+ CMD_TFO(cmd)->queue_status(cmd);
+ ret = 1;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(transport_check_aborted_status);
+
+void transport_send_task_abort(struct se_cmd *cmd)
+{
+ /*
+ * If there are still expected incoming fabric WRITEs, we wait
+ * until until they have completed before sending a TASK_ABORTED
+ * response. This response with TASK_ABORTED status will be
+ * queued back to fabric module by transport_check_aborted_status().
+ */
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
+ atomic_inc(&T_TASK(cmd)->t_transport_aborted);
+ smp_mb__after_atomic_inc();
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ transport_new_cmd_failure(cmd);
+ return;
+ }
+ }
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+#if 0
+ printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
+ " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0],
+ CMD_TFO(cmd)->get_task_tag(cmd));
+#endif
+ CMD_TFO(cmd)->queue_status(cmd);
+}
+
+/* transport_generic_do_tmr():
+ *
+ *
+ */
+int transport_generic_do_tmr(struct se_cmd *cmd)
+{
+ struct se_cmd *ref_cmd;
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_tmr_req *tmr = cmd->se_tmr_req;
+ int ret;
+
+ switch (tmr->function) {
+ case ABORT_TASK:
+ ref_cmd = tmr->ref_cmd;
+ tmr->response = TMR_FUNCTION_REJECTED;
+ break;
+ case ABORT_TASK_SET:
+ case CLEAR_ACA:
+ case CLEAR_TASK_SET:
+ tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+ break;
+ case LUN_RESET:
+ ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
+ tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
+ TMR_FUNCTION_REJECTED;
+ break;
+#if 0
+ case TARGET_WARM_RESET:
+ transport_generic_host_reset(dev->se_hba);
+ tmr->response = TMR_FUNCTION_REJECTED;
+ break;
+ case TARGET_COLD_RESET:
+ transport_generic_host_reset(dev->se_hba);
+ transport_generic_cold_reset(dev->se_hba);
+ tmr->response = TMR_FUNCTION_REJECTED;
+ break;
+#endif
+ default:
+ printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
+ tmr->function);
+ tmr->response = TMR_FUNCTION_REJECTED;
+ break;
+ }
+
+ cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+ CMD_TFO(cmd)->queue_tm_rsp(cmd);
+
+ transport_cmd_check_stop(cmd, 2, 0);
+ return 0;
+}
+
+/*
+ * Called with spin_lock_irq(&dev->execute_task_lock); held
+ *
+ */
+static struct se_task *
+transport_get_task_from_state_list(struct se_device *dev)
+{
+ struct se_task *task;
+
+ if (list_empty(&dev->state_task_list))
+ return NULL;
+
+ list_for_each_entry(task, &dev->state_task_list, t_state_list)
+ break;
+
+ list_del(&task->t_state_list);
+ atomic_set(&task->task_state_active, 0);
+
+ return task;
+}
+
+static void transport_processing_shutdown(struct se_device *dev)
+{
+ struct se_cmd *cmd;
+ struct se_queue_req *qr;
+ struct se_task *task;
+ u8 state;
+ unsigned long flags;
+ /*
+ * Empty the struct se_device's struct se_task state list.
+ */
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ while ((task = transport_get_task_from_state_list(dev))) {
+ if (!(TASK_CMD(task))) {
+ printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
+ continue;
+ }
+ cmd = TASK_CMD(task);
+
+ if (!T_TASK(cmd)) {
+ printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
+ " %p ITT: 0x%08x\n", task, cmd,
+ CMD_TFO(cmd)->get_task_tag(cmd));
+ continue;
+ }
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+
+ DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
+ " i_state/def_i_state: %d/%d, t_state/def_t_state:"
+ " %d/%d cdb: 0x%02x\n", cmd, task,
+ CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn,
+ CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state,
+ cmd->t_state, cmd->deferred_t_state,
+ T_TASK(cmd)->t_task_cdb[0]);
+ DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
+ " %d t_task_cdbs_sent: %d -- t_transport_active: %d"
+ " t_transport_stop: %d t_transport_sent: %d\n",
+ CMD_TFO(cmd)->get_task_tag(cmd),
+ T_TASK(cmd)->t_task_cdbs,
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
+ atomic_read(&T_TASK(cmd)->t_transport_active),
+ atomic_read(&T_TASK(cmd)->t_transport_stop),
+ atomic_read(&T_TASK(cmd)->t_transport_sent));
+
+ if (atomic_read(&task->task_active)) {
+ atomic_set(&task->task_stop, 1);
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+
+ DEBUG_DO("Waiting for task: %p to shutdown for dev:"
+ " %p\n", task, dev);
+ wait_for_completion(&task->task_stop_comp);
+ DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
+ task, dev);
+
+ spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+
+ atomic_set(&task->task_active, 0);
+ atomic_set(&task->task_stop, 0);
+ }
+ __transport_stop_task_timer(task, &flags);
+
+ if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+
+ DEBUG_DO("Skipping task: %p, dev: %p for"
+ " t_task_cdbs_ex_left: %d\n", task, dev,
+ atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ continue;
+ }
+
+ if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
+ DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
+ " %p\n", task, dev);
+
+ if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+ transport_send_check_condition_and_sense(
+ cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
+ 0);
+ transport_remove_cmd_from_queue(cmd,
+ SE_DEV(cmd)->dev_queue_obj);
+
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop(cmd, 1, 0);
+ } else {
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+
+ transport_remove_cmd_from_queue(cmd,
+ SE_DEV(cmd)->dev_queue_obj);
+
+ transport_lun_remove_cmd(cmd);
+
+ if (transport_cmd_check_stop(cmd, 1, 0))
+ transport_generic_remove(cmd, 0, 0);
+ }
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ continue;
+ }
+ DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
+ task, dev);
+
+ if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+ transport_send_check_condition_and_sense(cmd,
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+ transport_remove_cmd_from_queue(cmd,
+ SE_DEV(cmd)->dev_queue_obj);
+
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop(cmd, 1, 0);
+ } else {
+ spin_unlock_irqrestore(
+ &T_TASK(cmd)->t_state_lock, flags);
+
+ transport_remove_cmd_from_queue(cmd,
+ SE_DEV(cmd)->dev_queue_obj);
+ transport_lun_remove_cmd(cmd);
+
+ if (transport_cmd_check_stop(cmd, 1, 0))
+ transport_generic_remove(cmd, 0, 0);
+ }
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ }
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ /*
+ * Empty the struct se_device's struct se_cmd list.
+ */
+ spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+ while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) {
+ spin_unlock_irqrestore(
+ &dev->dev_queue_obj->cmd_queue_lock, flags);
+ cmd = (struct se_cmd *)qr->cmd;
+ state = qr->state;
+ kfree(qr);
+
+ DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
+ cmd, state);
+
+ if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ transport_send_check_condition_and_sense(cmd,
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop(cmd, 1, 0);
+ } else {
+ transport_lun_remove_cmd(cmd);
+ if (transport_cmd_check_stop(cmd, 1, 0))
+ transport_generic_remove(cmd, 0, 0);
+ }
+ spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+ }
+ spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
+}
+
+/* transport_processing_thread():
+ *
+ *
+ */
+static int transport_processing_thread(void *param)
+{
+ int ret, t_state;
+ struct se_cmd *cmd;
+ struct se_device *dev = (struct se_device *) param;
+ struct se_queue_req *qr;
+
+ set_user_nice(current, -20);
+
+ while (!kthread_should_stop()) {
+ ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq,
+ atomic_read(&dev->dev_queue_obj->queue_cnt) ||
+ kthread_should_stop());
+ if (ret < 0)
+ goto out;
+
+ spin_lock_irq(&dev->dev_status_lock);
+ if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) {
+ spin_unlock_irq(&dev->dev_status_lock);
+ transport_processing_shutdown(dev);
+ continue;
+ }
+ spin_unlock_irq(&dev->dev_status_lock);
+
+get_cmd:
+ __transport_execute_tasks(dev);
+
+ qr = transport_get_qr_from_queue(dev->dev_queue_obj);
+ if (!(qr))
+ continue;
+
+ cmd = (struct se_cmd *)qr->cmd;
+ t_state = qr->state;
+ kfree(qr);
+
+ switch (t_state) {
+ case TRANSPORT_NEW_CMD_MAP:
+ if (!(CMD_TFO(cmd)->new_cmd_map)) {
+ printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is"
+ " NULL for TRANSPORT_NEW_CMD_MAP\n");
+ BUG();
+ }
+ ret = CMD_TFO(cmd)->new_cmd_map(cmd);
+ if (ret < 0) {
+ cmd->transport_error_status = ret;
+ transport_generic_request_failure(cmd, NULL,
+ 0, (cmd->data_direction !=
+ DMA_TO_DEVICE));
+ break;
+ }
+ /* Fall through */
+ case TRANSPORT_NEW_CMD:
+ ret = transport_generic_new_cmd(cmd);
+ if (ret < 0) {
+ cmd->transport_error_status = ret;
+ transport_generic_request_failure(cmd, NULL,
+ 0, (cmd->data_direction !=
+ DMA_TO_DEVICE));
+ }
+ break;
+ case TRANSPORT_PROCESS_WRITE:
+ transport_generic_process_write(cmd);
+ break;
+ case TRANSPORT_COMPLETE_OK:
+ transport_stop_all_task_timers(cmd);
+ transport_generic_complete_ok(cmd);
+ break;
+ case TRANSPORT_REMOVE:
+ transport_generic_remove(cmd, 1, 0);
+ break;
+ case TRANSPORT_PROCESS_TMR:
+ transport_generic_do_tmr(cmd);
+ break;
+ case TRANSPORT_COMPLETE_FAILURE:
+ transport_generic_request_failure(cmd, NULL, 1, 1);
+ break;
+ case TRANSPORT_COMPLETE_TIMEOUT:
+ transport_stop_all_task_timers(cmd);
+ transport_generic_request_timeout(cmd);
+ break;
+ default:
+ printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
+ " %d for ITT: 0x%08x i_state: %d on SE LUN:"
+ " %u\n", t_state, cmd->deferred_t_state,
+ CMD_TFO(cmd)->get_task_tag(cmd),
+ CMD_TFO(cmd)->get_cmd_state(cmd),
+ SE_LUN(cmd)->unpacked_lun);
+ BUG();
+ }
+
+ goto get_cmd;
+ }
+
+out:
+ transport_release_all_cmds(dev);
+ dev->process_thread = NULL;
+ return 0;
+}
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
new file mode 100644
index 000000000000..a2ef346087e8
--- /dev/null
+++ b/drivers/target/target_core_ua.c
@@ -0,0 +1,332 @@
+/*******************************************************************************
+ * Filename: target_core_ua.c
+ *
+ * This file contains logic for SPC-3 Unit Attention emulation
+ *
+ * Copyright (c) 2009,2010 Rising Tide Systems
+ * Copyright (c) 2009,2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+int core_scsi3_ua_check(
+ struct se_cmd *cmd,
+ unsigned char *cdb)
+{
+ struct se_dev_entry *deve;
+ struct se_session *sess = cmd->se_sess;
+ struct se_node_acl *nacl;
+
+ if (!(sess))
+ return 0;
+
+ nacl = sess->se_node_acl;
+ if (!(nacl))
+ return 0;
+
+ deve = &nacl->device_list[cmd->orig_fe_lun];
+ if (!(atomic_read(&deve->ua_count)))
+ return 0;
+ /*
+ * From sam4r14, section 5.14 Unit attention condition:
+ *
+ * a) if an INQUIRY command enters the enabled command state, the
+ * device server shall process the INQUIRY command and shall neither
+ * report nor clear any unit attention condition;
+ * b) if a REPORT LUNS command enters the enabled command state, the
+ * device server shall process the REPORT LUNS command and shall not
+ * report any unit attention condition;
+ * e) if a REQUEST SENSE command enters the enabled command state while
+ * a unit attention condition exists for the SCSI initiator port
+ * associated with the I_T nexus on which the REQUEST SENSE command
+ * was received, then the device server shall process the command
+ * and either:
+ */
+ switch (cdb[0]) {
+ case INQUIRY:
+ case REPORT_LUNS:
+ case REQUEST_SENSE:
+ return 0;
+ default:
+ return -1;
+ }
+
+ return -1;
+}
+
+int core_scsi3_ua_allocate(
+ struct se_node_acl *nacl,
+ u32 unpacked_lun,
+ u8 asc,
+ u8 ascq)
+{
+ struct se_dev_entry *deve;
+ struct se_ua *ua, *ua_p, *ua_tmp;
+ /*
+ * PASSTHROUGH OPS
+ */
+ if (!(nacl))
+ return -1;
+
+ ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
+ if (!(ua)) {
+ printk(KERN_ERR "Unable to allocate struct se_ua\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&ua->ua_dev_list);
+ INIT_LIST_HEAD(&ua->ua_nacl_list);
+
+ ua->ua_nacl = nacl;
+ ua->ua_asc = asc;
+ ua->ua_ascq = ascq;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[unpacked_lun];
+
+ spin_lock(&deve->ua_lock);
+ list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
+ /*
+ * Do not report the same UNIT ATTENTION twice..
+ */
+ if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
+ spin_unlock(&deve->ua_lock);
+ spin_unlock_irq(&nacl->device_list_lock);
+ kmem_cache_free(se_ua_cache, ua);
+ return 0;
+ }
+ /*
+ * Attach the highest priority Unit Attention to
+ * the head of the list following sam4r14,
+ * Section 5.14 Unit Attention Condition:
+ *
+ * POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest
+ * POWER ON OCCURRED or
+ * DEVICE INTERNAL RESET
+ * SCSI BUS RESET OCCURRED or
+ * MICROCODE HAS BEEN CHANGED or
+ * protocol specific
+ * BUS DEVICE RESET FUNCTION OCCURRED
+ * I_T NEXUS LOSS OCCURRED
+ * COMMANDS CLEARED BY POWER LOSS NOTIFICATION
+ * all others Lowest
+ *
+ * Each of the ASCQ codes listed above are defined in
+ * the 29h ASC family, see spc4r17 Table D.1
+ */
+ if (ua_p->ua_asc == 0x29) {
+ if ((asc == 0x29) && (ascq > ua_p->ua_ascq))
+ list_add(&ua->ua_nacl_list,
+ &deve->ua_list);
+ else
+ list_add_tail(&ua->ua_nacl_list,
+ &deve->ua_list);
+ } else if (ua_p->ua_asc == 0x2a) {
+ /*
+ * Incoming Family 29h ASCQ codes will override
+ * Family 2AHh ASCQ codes for Unit Attention condition.
+ */
+ if ((asc == 0x29) || (ascq > ua_p->ua_asc))
+ list_add(&ua->ua_nacl_list,
+ &deve->ua_list);
+ else
+ list_add_tail(&ua->ua_nacl_list,
+ &deve->ua_list);
+ } else
+ list_add_tail(&ua->ua_nacl_list,
+ &deve->ua_list);
+ spin_unlock(&deve->ua_lock);
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ atomic_inc(&deve->ua_count);
+ smp_mb__after_atomic_inc();
+ return 0;
+ }
+ list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
+ spin_unlock(&deve->ua_lock);
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
+ " 0x%02x, ASCQ: 0x%02x\n",
+ TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun,
+ asc, ascq);
+
+ atomic_inc(&deve->ua_count);
+ smp_mb__after_atomic_inc();
+ return 0;
+}
+
+void core_scsi3_ua_release_all(
+ struct se_dev_entry *deve)
+{
+ struct se_ua *ua, *ua_p;
+
+ spin_lock(&deve->ua_lock);
+ list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+ list_del(&ua->ua_nacl_list);
+ kmem_cache_free(se_ua_cache, ua);
+
+ atomic_dec(&deve->ua_count);
+ smp_mb__after_atomic_dec();
+ }
+ spin_unlock(&deve->ua_lock);
+}
+
+void core_scsi3_ua_for_check_condition(
+ struct se_cmd *cmd,
+ u8 *asc,
+ u8 *ascq)
+{
+ struct se_device *dev = SE_DEV(cmd);
+ struct se_dev_entry *deve;
+ struct se_session *sess = cmd->se_sess;
+ struct se_node_acl *nacl;
+ struct se_ua *ua = NULL, *ua_p;
+ int head = 1;
+
+ if (!(sess))
+ return;
+
+ nacl = sess->se_node_acl;
+ if (!(nacl))
+ return;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[cmd->orig_fe_lun];
+ if (!(atomic_read(&deve->ua_count))) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return;
+ }
+ /*
+ * The highest priority Unit Attentions are placed at the head of the
+ * struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
+ * sense data for the received CDB.
+ */
+ spin_lock(&deve->ua_lock);
+ list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+ /*
+ * For ua_intlck_ctrl code not equal to 00b, only report the
+ * highest priority UNIT_ATTENTION and ASC/ASCQ without
+ * clearing it.
+ */
+ if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) {
+ *asc = ua->ua_asc;
+ *ascq = ua->ua_ascq;
+ break;
+ }
+ /*
+ * Otherwise for the default 00b, release the UNIT ATTENTION
+ * condition. Return the ASC/ASCQ of the higest priority UA
+ * (head of the list) in the outgoing CHECK_CONDITION + sense.
+ */
+ if (head) {
+ *asc = ua->ua_asc;
+ *ascq = ua->ua_ascq;
+ head = 0;
+ }
+ list_del(&ua->ua_nacl_list);
+ kmem_cache_free(se_ua_cache, ua);
+
+ atomic_dec(&deve->ua_count);
+ smp_mb__after_atomic_dec();
+ }
+ spin_unlock(&deve->ua_lock);
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
+ " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
+ " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
+ TPG_TFO(nacl->se_tpg)->get_fabric_name(),
+ (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" :
+ "Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl,
+ cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq);
+}
+
+int core_scsi3_ua_clear_for_request_sense(
+ struct se_cmd *cmd,
+ u8 *asc,
+ u8 *ascq)
+{
+ struct se_dev_entry *deve;
+ struct se_session *sess = cmd->se_sess;
+ struct se_node_acl *nacl;
+ struct se_ua *ua = NULL, *ua_p;
+ int head = 1;
+
+ if (!(sess))
+ return -1;
+
+ nacl = sess->se_node_acl;
+ if (!(nacl))
+ return -1;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ deve = &nacl->device_list[cmd->orig_fe_lun];
+ if (!(atomic_read(&deve->ua_count))) {
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -1;
+ }
+ /*
+ * The highest priority Unit Attentions are placed at the head of the
+ * struct se_dev_entry->ua_list. The First (and hence highest priority)
+ * ASC/ASCQ will be returned in REQUEST_SENSE payload data for the
+ * matching struct se_lun.
+ *
+ * Once the returning ASC/ASCQ values are set, we go ahead and
+ * release all of the Unit Attention conditions for the assoicated
+ * struct se_lun.
+ */
+ spin_lock(&deve->ua_lock);
+ list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+ if (head) {
+ *asc = ua->ua_asc;
+ *ascq = ua->ua_ascq;
+ head = 0;
+ }
+ list_del(&ua->ua_nacl_list);
+ kmem_cache_free(se_ua_cache, ua);
+
+ atomic_dec(&deve->ua_count);
+ smp_mb__after_atomic_dec();
+ }
+ spin_unlock(&deve->ua_lock);
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
+ " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
+ " ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(),
+ cmd->orig_fe_lun, *asc, *ascq);
+
+ return (head) ? -1 : 0;
+}
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
new file mode 100644
index 000000000000..6e6b03460a1a
--- /dev/null
+++ b/drivers/target/target_core_ua.h
@@ -0,0 +1,36 @@
+#ifndef TARGET_CORE_UA_H
+
+/*
+ * From spc4r17, Table D.1: ASC and ASCQ Assignement
+ */
+#define ASCQ_29H_POWER_ON_RESET_OR_BUS_DEVICE_RESET_OCCURED 0x00
+#define ASCQ_29H_POWER_ON_OCCURRED 0x01
+#define ASCQ_29H_SCSI_BUS_RESET_OCCURED 0x02
+#define ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED 0x03
+#define ASCQ_29H_DEVICE_INTERNAL_RESET 0x04
+#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_SINGLE_ENDED 0x05
+#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_LVD 0x06
+#define ASCQ_29H_NEXUS_LOSS_OCCURRED 0x07
+
+#define ASCQ_2AH_PARAMETERS_CHANGED 0x00
+#define ASCQ_2AH_MODE_PARAMETERS_CHANGED 0x01
+#define ASCQ_2AH_LOG_PARAMETERS_CHANGED 0x02
+#define ASCQ_2AH_RESERVATIONS_PREEMPTED 0x03
+#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04
+#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05
+#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06
+#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
+#define ASCQ_2AH_PRIORITY_CHANGED 0x08
+
+#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09
+
+extern struct kmem_cache *se_ua_cache;
+
+extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *);
+extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
+extern void core_scsi3_ua_release_all(struct se_dev_entry *);
+extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
+extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
+ u8 *, u8 *);
+
+#endif /* TARGET_CORE_UA_H */
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index 0d236f4bb8c2..b00101972f20 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -284,12 +284,11 @@ static int samplerate = 100;
module_param(ixjdebug, int, 0);
-static struct pci_device_id ixj_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ixj_pci_tbl) = {
{ PCI_VENDOR_ID_QUICKNET, PCI_DEVICE_ID_QUICKNET_XJ,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
};
-
MODULE_DEVICE_TABLE(pci, ixj_pci_tbl);
/************************************************************************
@@ -6581,7 +6580,8 @@ static long do_ixj_ioctl(struct file *file_p, unsigned int cmd, unsigned long ar
case IXJCTL_SET_FILTER:
if (copy_from_user(&jf, argp, sizeof(jf)))
retval = -EFAULT;
- retval = ixj_init_filter(j, &jf);
+ else
+ retval = ixj_init_filter(j, &jf);
break;
case IXJCTL_SET_FILTER_RAW:
if (copy_from_user(&jfr, argp, sizeof(jfr)))
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 13c72c629329..713b7ea4a607 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -32,6 +32,8 @@
#include <linux/thermal.h>
#include <linux/spinlock.h>
#include <linux/reboot.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
MODULE_AUTHOR("Zhang Rui");
MODULE_DESCRIPTION("Generic thermal management sysfs support");
@@ -58,6 +60,8 @@ static LIST_HEAD(thermal_tz_list);
static LIST_HEAD(thermal_cdev_list);
static DEFINE_MUTEX(thermal_list_lock);
+static unsigned int thermal_event_seqnum;
+
static int get_idr(struct idr *idr, struct mutex *lock, int *id)
{
int err;
@@ -823,11 +827,8 @@ static struct class thermal_class = {
* @devdata: device private data.
* @ops: standard thermal cooling devices callbacks.
*/
-struct thermal_cooling_device *thermal_cooling_device_register(char *type,
- void *devdata,
- struct
- thermal_cooling_device_ops
- *ops)
+struct thermal_cooling_device *thermal_cooling_device_register(
+ char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
{
struct thermal_cooling_device *cdev;
struct thermal_zone_device *pos;
@@ -1048,13 +1049,9 @@ EXPORT_SYMBOL(thermal_zone_device_update);
* section 11.1.5.1 of the ACPI specification 3.0.
*/
struct thermal_zone_device *thermal_zone_device_register(char *type,
- int trips,
- void *devdata, struct
- thermal_zone_device_ops
- *ops, int tc1, int
- tc2,
- int passive_delay,
- int polling_delay)
+ int trips, void *devdata,
+ const struct thermal_zone_device_ops *ops,
+ int tc1, int tc2, int passive_delay, int polling_delay)
{
struct thermal_zone_device *tz;
struct thermal_cooling_device *pos;
@@ -1214,6 +1211,103 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
EXPORT_SYMBOL(thermal_zone_device_unregister);
+#ifdef CONFIG_NET
+static struct genl_family thermal_event_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .name = THERMAL_GENL_FAMILY_NAME,
+ .version = THERMAL_GENL_VERSION,
+ .maxattr = THERMAL_GENL_ATTR_MAX,
+};
+
+static struct genl_multicast_group thermal_event_mcgrp = {
+ .name = THERMAL_GENL_MCAST_GROUP_NAME,
+};
+
+int generate_netlink_event(u32 orig, enum events event)
+{
+ struct sk_buff *skb;
+ struct nlattr *attr;
+ struct thermal_genl_event *thermal_event;
+ void *msg_header;
+ int size;
+ int result;
+
+ /* allocate memory */
+ size = nla_total_size(sizeof(struct thermal_genl_event)) + \
+ nla_total_size(0);
+
+ skb = genlmsg_new(size, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ /* add the genetlink message header */
+ msg_header = genlmsg_put(skb, 0, thermal_event_seqnum++,
+ &thermal_event_genl_family, 0,
+ THERMAL_GENL_CMD_EVENT);
+ if (!msg_header) {
+ nlmsg_free(skb);
+ return -ENOMEM;
+ }
+
+ /* fill the data */
+ attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT, \
+ sizeof(struct thermal_genl_event));
+
+ if (!attr) {
+ nlmsg_free(skb);
+ return -EINVAL;
+ }
+
+ thermal_event = nla_data(attr);
+ if (!thermal_event) {
+ nlmsg_free(skb);
+ return -EINVAL;
+ }
+
+ memset(thermal_event, 0, sizeof(struct thermal_genl_event));
+
+ thermal_event->orig = orig;
+ thermal_event->event = event;
+
+ /* send multicast genetlink message */
+ result = genlmsg_end(skb, msg_header);
+ if (result < 0) {
+ nlmsg_free(skb);
+ return result;
+ }
+
+ result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC);
+ if (result)
+ printk(KERN_INFO "failed to send netlink event:%d", result);
+
+ return result;
+}
+EXPORT_SYMBOL(generate_netlink_event);
+
+static int genetlink_init(void)
+{
+ int result;
+
+ result = genl_register_family(&thermal_event_genl_family);
+ if (result)
+ return result;
+
+ result = genl_register_mc_group(&thermal_event_genl_family,
+ &thermal_event_mcgrp);
+ if (result)
+ genl_unregister_family(&thermal_event_genl_family);
+ return result;
+}
+
+static void genetlink_exit(void)
+{
+ genl_unregister_family(&thermal_event_genl_family);
+}
+#else /* !CONFIG_NET */
+static inline int genetlink_init(void) { return 0; }
+static inline void genetlink_exit(void) {}
+#endif /* !CONFIG_NET */
+
static int __init thermal_init(void)
{
int result = 0;
@@ -1225,6 +1319,7 @@ static int __init thermal_init(void)
mutex_destroy(&thermal_idr_lock);
mutex_destroy(&thermal_list_lock);
}
+ result = genetlink_init();
return result;
}
@@ -1235,7 +1330,8 @@ static void __exit thermal_exit(void)
idr_destroy(&thermal_cdev_idr);
mutex_destroy(&thermal_idr_lock);
mutex_destroy(&thermal_list_lock);
+ genetlink_exit();
}
-subsys_initcall(thermal_init);
+fs_initcall(thermal_init);
module_exit(thermal_exit);
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index c43ef48b1a0f..396277216e4f 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -9,3 +9,5 @@ obj-$(CONFIG_N_GSM) += n_gsm.o
obj-$(CONFIG_R3964) += n_r3964.o
obj-y += vt/
+obj-$(CONFIG_HVC_DRIVER) += hvc/
+obj-y += serial/
diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile
new file mode 100644
index 000000000000..d79e7e9bf9d2
--- /dev/null
+++ b/drivers/tty/hvc/Makefile
@@ -0,0 +1,12 @@
+obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
+obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
+obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
+obj-$(CONFIG_HVC_TILE) += hvc_tile.o
+obj-$(CONFIG_HVC_DCC) += hvc_dcc.o
+obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
+obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
+obj-$(CONFIG_HVC_IRQ) += hvc_irq.o
+obj-$(CONFIG_HVC_XEN) += hvc_xen.o
+obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o
+obj-$(CONFIG_HVC_UDBG) += hvc_udbg.o
+obj-$(CONFIG_HVCS) += hvcs.o
diff --git a/drivers/char/hvc_beat.c b/drivers/tty/hvc/hvc_beat.c
index 5fe4631e2a61..5fe4631e2a61 100644
--- a/drivers/char/hvc_beat.c
+++ b/drivers/tty/hvc/hvc_beat.c
diff --git a/drivers/char/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index e9cba13ee800..e9cba13ee800 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
diff --git a/drivers/char/hvc_console.h b/drivers/tty/hvc/hvc_console.h
index 54381eba4e4a..54381eba4e4a 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/tty/hvc/hvc_console.h
diff --git a/drivers/char/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 6470f63deb4b..6470f63deb4b 100644
--- a/drivers/char/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
diff --git a/drivers/char/hvc_irq.c b/drivers/tty/hvc/hvc_irq.c
index 2623e177e8d6..2623e177e8d6 100644
--- a/drivers/char/hvc_irq.c
+++ b/drivers/tty/hvc/hvc_irq.c
diff --git a/drivers/char/hvc_iseries.c b/drivers/tty/hvc/hvc_iseries.c
index 21c54955084e..21c54955084e 100644
--- a/drivers/char/hvc_iseries.c
+++ b/drivers/tty/hvc/hvc_iseries.c
diff --git a/drivers/char/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index c3425bb3a1f6..c3425bb3a1f6 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
diff --git a/drivers/char/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c
index 61c4a61558d9..61c4a61558d9 100644
--- a/drivers/char/hvc_rtas.c
+++ b/drivers/tty/hvc/hvc_rtas.c
diff --git a/drivers/char/hvc_tile.c b/drivers/tty/hvc/hvc_tile.c
index 7a84a0595477..7a84a0595477 100644
--- a/drivers/char/hvc_tile.c
+++ b/drivers/tty/hvc/hvc_tile.c
diff --git a/drivers/char/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
index b0957e61a7be..b0957e61a7be 100644
--- a/drivers/char/hvc_udbg.c
+++ b/drivers/tty/hvc/hvc_udbg.c
diff --git a/drivers/char/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index 27370e99c66f..5e2f52b33327 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -39,7 +39,7 @@
#include "hvc_console.h"
-char hvc_driver_name[] = "hvc_console";
+static const char hvc_driver_name[] = "hvc_console";
static struct vio_device_id hvc_driver_table[] __devinitdata = {
{"serial", "hvterm1"},
diff --git a/drivers/char/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 3740e327f180..3740e327f180 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
diff --git a/drivers/char/hvcs.c b/drivers/tty/hvc/hvcs.c
index bedc6c1b6fa5..bedc6c1b6fa5 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
diff --git a/drivers/char/hvsi.c b/drivers/tty/hvc/hvsi.c
index 67a75a502c01..67a75a502c01 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 44b8412a04e8..aa2e5d3eb01a 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2414,6 +2414,7 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
gsm->initiator = c->initiator;
gsm->mru = c->mru;
+ gsm->mtu = c->mtu;
gsm->encoding = c->encapsulation;
gsm->adaption = c->adaption;
gsm->n2 = c->n2;
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 47d32281032c..52fc0c9a6364 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -581,8 +581,9 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
__u8 __user *buf, size_t nr)
{
struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
- int ret;
+ int ret = 0;
struct n_hdlc_buf *rbuf;
+ DECLARE_WAITQUEUE(wait, current);
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d)n_hdlc_tty_read() called\n",__FILE__,__LINE__);
@@ -598,57 +599,55 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
return -EFAULT;
}
- tty_lock();
+ add_wait_queue(&tty->read_wait, &wait);
for (;;) {
if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
- tty_unlock();
- return -EIO;
+ ret = -EIO;
+ break;
}
+ if (tty_hung_up_p(file))
+ break;
- n_hdlc = tty2n_hdlc (tty);
- if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC ||
- tty != n_hdlc->tty) {
- tty_unlock();
- return 0;
- }
+ set_current_state(TASK_INTERRUPTIBLE);
rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
- if (rbuf)
+ if (rbuf) {
+ if (rbuf->count > nr) {
+ /* too large for caller's buffer */
+ ret = -EOVERFLOW;
+ } else {
+ if (copy_to_user(buf, rbuf->buf, rbuf->count))
+ ret = -EFAULT;
+ else
+ ret = rbuf->count;
+ }
+
+ if (n_hdlc->rx_free_buf_list.count >
+ DEFAULT_RX_BUF_COUNT)
+ kfree(rbuf);
+ else
+ n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
break;
+ }
/* no data */
if (file->f_flags & O_NONBLOCK) {
- tty_unlock();
- return -EAGAIN;
+ ret = -EAGAIN;
+ break;
}
-
- interruptible_sleep_on (&tty->read_wait);
+
+ schedule();
+
if (signal_pending(current)) {
- tty_unlock();
- return -EINTR;
+ ret = -EINTR;
+ break;
}
}
-
- if (rbuf->count > nr)
- /* frame too large for caller's buffer (discard frame) */
- ret = -EOVERFLOW;
- else {
- /* Copy the data to the caller's buffer */
- if (copy_to_user(buf, rbuf->buf, rbuf->count))
- ret = -EFAULT;
- else
- ret = rbuf->count;
- }
-
- /* return HDLC buffer to free list unless the free list */
- /* count has exceeded the default value, in which case the */
- /* buffer is freed back to the OS to conserve memory */
- if (n_hdlc->rx_free_buf_list.count > DEFAULT_RX_BUF_COUNT)
- kfree(rbuf);
- else
- n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,rbuf);
- tty_unlock();
+
+ remove_wait_queue(&tty->read_wait, &wait);
+ __set_current_state(TASK_RUNNING);
+
return ret;
} /* end of n_hdlc_tty_read() */
@@ -691,14 +690,15 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
count = maxframe;
}
- tty_lock();
-
add_wait_queue(&tty->write_wait, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
- /* Allocate transmit buffer */
- /* sleep until transmit buffer available */
- while (!(tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list))) {
+ tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list);
+ if (tbuf)
+ break;
+
if (file->f_flags & O_NONBLOCK) {
error = -EAGAIN;
break;
@@ -719,7 +719,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
}
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(&tty->write_wait, &wait);
if (!error) {
@@ -731,7 +731,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf);
n_hdlc_send_frames(n_hdlc,tty);
}
- tty_unlock();
+
return error;
} /* end of n_hdlc_tty_write() */
diff --git a/drivers/serial/21285.c b/drivers/tty/serial/21285.c
index d89aa38c5cf0..d89aa38c5cf0 100644
--- a/drivers/serial/21285.c
+++ b/drivers/tty/serial/21285.c
diff --git a/drivers/serial/68328serial.c b/drivers/tty/serial/68328serial.c
index be0ebce36e54..de0160e3f8c4 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/tty/serial/68328serial.c
@@ -262,7 +262,7 @@ static void status_handle(struct m68k_serial *info, unsigned short status)
static void receive_chars(struct m68k_serial *info, unsigned short rx)
{
- struct tty_struct *tty = info->port.tty;
+ struct tty_struct *tty = info->tty;
m68328_uart *uart = &uart_addr[info->line];
unsigned char ch, flag;
@@ -329,7 +329,7 @@ static void transmit_chars(struct m68k_serial *info)
goto clear_and_return;
}
- if((info->xmit_cnt <= 0) || info->port.tty->stopped) {
+ if((info->xmit_cnt <= 0) || info->tty->stopped) {
/* That's peculiar... TX ints off */
uart->ustcnt &= ~USTCNT_TX_INTR_MASK;
goto clear_and_return;
@@ -383,7 +383,7 @@ static void do_softint(struct work_struct *work)
struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue);
struct tty_struct *tty;
- tty = info->port.tty;
+ tty = info->tty;
if (!tty)
return;
#if 0
@@ -407,7 +407,7 @@ static void do_serial_hangup(struct work_struct *work)
struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue_hangup);
struct tty_struct *tty;
- tty = info->port.tty;
+ tty = info->tty;
if (!tty)
return;
@@ -451,8 +451,8 @@ static int startup(struct m68k_serial * info)
uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_RX_INTR_MASK;
#endif
- if (info->port.tty)
- clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
+ if (info->tty)
+ clear_bit(TTY_IO_ERROR, &info->tty->flags);
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
/*
@@ -486,8 +486,8 @@ static void shutdown(struct m68k_serial * info)
info->xmit_buf = 0;
}
- if (info->port.tty)
- set_bit(TTY_IO_ERROR, &info->port.tty->flags);
+ if (info->tty)
+ set_bit(TTY_IO_ERROR, &info->tty->flags);
info->flags &= ~S_INITIALIZED;
local_irq_restore(flags);
@@ -553,9 +553,9 @@ static void change_speed(struct m68k_serial *info)
unsigned cflag;
int i;
- if (!info->port.tty || !info->port.tty->termios)
+ if (!info->tty || !info->tty->termios)
return;
- cflag = info->port.tty->termios->c_cflag;
+ cflag = info->tty->termios->c_cflag;
if (!(port = info->port))
return;
@@ -970,7 +970,6 @@ static void send_break(struct m68k_serial * info, unsigned int duration)
static int rs_ioctl(struct tty_struct *tty, struct file * file,
unsigned int cmd, unsigned long arg)
{
- int error;
struct m68k_serial * info = (struct m68k_serial *)tty->driver_data;
int retval;
@@ -1104,7 +1103,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
tty_ldisc_flush(tty);
tty->closing = 0;
info->event = 0;
- info->port.tty = NULL;
+ info->tty = NULL;
#warning "This is not and has never been valid so fix it"
#if 0
if (tty->ldisc.num != ldiscs[N_TTY].num) {
@@ -1142,7 +1141,7 @@ void rs_hangup(struct tty_struct *tty)
info->event = 0;
info->count = 0;
info->flags &= ~S_NORMAL_ACTIVE;
- info->port.tty = NULL;
+ info->tty = NULL;
wake_up_interruptible(&info->open_wait);
}
@@ -1261,7 +1260,7 @@ int rs_open(struct tty_struct *tty, struct file * filp)
info->count++;
tty->driver_data = info;
- info->port.tty = tty;
+ info->tty = tty;
/*
* Start up serial port
@@ -1338,7 +1337,7 @@ rs68328_init(void)
info = &m68k_soft[i];
info->magic = SERIAL_MAGIC;
info->port = (int) &uart_addr[i];
- info->port.tty = NULL;
+ info->tty = NULL;
info->irq = uart_irqs[i];
info->custom_divisor = 16;
info->close_delay = 50;
diff --git a/drivers/serial/68328serial.h b/drivers/tty/serial/68328serial.h
index 664ceb0a158c..664ceb0a158c 100644
--- a/drivers/serial/68328serial.h
+++ b/drivers/tty/serial/68328serial.h
diff --git a/drivers/serial/68360serial.c b/drivers/tty/serial/68360serial.c
index 88b13356ec10..bc21eeae8fde 100644
--- a/drivers/serial/68360serial.c
+++ b/drivers/tty/serial/68360serial.c
@@ -2428,6 +2428,7 @@ static const struct tty_operations rs_360_ops = {
/* .read_proc = rs_360_read_proc, */
.tiocmget = rs_360_tiocmget,
.tiocmset = rs_360_tiocmset,
+ .get_icount = rs_360_get_icount,
};
static int __init rs_360_init(void)
diff --git a/drivers/serial/8250.c b/drivers/tty/serial/8250.c
index b25e6e490530..3975df6f7fdb 100644
--- a/drivers/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -236,7 +236,8 @@ static const struct serial8250_config uart_config[] = {
.fifo_size = 128,
.tx_loadsz = 128,
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
- .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+ /* UART_CAP_EFR breaks billionon CF bluetooth card. */
+ .flags = UART_CAP_FIFO | UART_CAP_SLEEP,
},
[PORT_16654] = {
.name = "ST16654",
diff --git a/drivers/serial/8250.h b/drivers/tty/serial/8250.h
index 6e19ea3e48d5..6e19ea3e48d5 100644
--- a/drivers/serial/8250.h
+++ b/drivers/tty/serial/8250.h
diff --git a/drivers/serial/8250_accent.c b/drivers/tty/serial/8250_accent.c
index 9c10262f2469..9c10262f2469 100644
--- a/drivers/serial/8250_accent.c
+++ b/drivers/tty/serial/8250_accent.c
diff --git a/drivers/serial/8250_acorn.c b/drivers/tty/serial/8250_acorn.c
index b0ce8c56f1a4..b0ce8c56f1a4 100644
--- a/drivers/serial/8250_acorn.c
+++ b/drivers/tty/serial/8250_acorn.c
diff --git a/drivers/serial/8250_boca.c b/drivers/tty/serial/8250_boca.c
index 3bfe0f7b26fb..3bfe0f7b26fb 100644
--- a/drivers/serial/8250_boca.c
+++ b/drivers/tty/serial/8250_boca.c
diff --git a/drivers/serial/8250_early.c b/drivers/tty/serial/8250_early.c
index eaafb98debed..eaafb98debed 100644
--- a/drivers/serial/8250_early.c
+++ b/drivers/tty/serial/8250_early.c
diff --git a/drivers/serial/8250_exar_st16c554.c b/drivers/tty/serial/8250_exar_st16c554.c
index 567143ace159..567143ace159 100644
--- a/drivers/serial/8250_exar_st16c554.c
+++ b/drivers/tty/serial/8250_exar_st16c554.c
diff --git a/drivers/serial/8250_fourport.c b/drivers/tty/serial/8250_fourport.c
index 6375d68b7913..6375d68b7913 100644
--- a/drivers/serial/8250_fourport.c
+++ b/drivers/tty/serial/8250_fourport.c
diff --git a/drivers/serial/8250_gsc.c b/drivers/tty/serial/8250_gsc.c
index d8c0ffbfa6e3..d8c0ffbfa6e3 100644
--- a/drivers/serial/8250_gsc.c
+++ b/drivers/tty/serial/8250_gsc.c
diff --git a/drivers/serial/8250_hp300.c b/drivers/tty/serial/8250_hp300.c
index c13438c93012..c13438c93012 100644
--- a/drivers/serial/8250_hp300.c
+++ b/drivers/tty/serial/8250_hp300.c
diff --git a/drivers/serial/8250_hub6.c b/drivers/tty/serial/8250_hub6.c
index 7609150e7d5e..7609150e7d5e 100644
--- a/drivers/serial/8250_hub6.c
+++ b/drivers/tty/serial/8250_hub6.c
diff --git a/drivers/serial/8250_mca.c b/drivers/tty/serial/8250_mca.c
index d10be944ad44..d10be944ad44 100644
--- a/drivers/serial/8250_mca.c
+++ b/drivers/tty/serial/8250_mca.c
diff --git a/drivers/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
index 8b8930f700b5..8b8930f700b5 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/tty/serial/8250_pci.c
diff --git a/drivers/serial/8250_pnp.c b/drivers/tty/serial/8250_pnp.c
index 4822cb50cd0f..4822cb50cd0f 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/tty/serial/8250_pnp.c
diff --git a/drivers/serial/Kconfig b/drivers/tty/serial/Kconfig
index ec3c214598d0..2b8334601c8b 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -81,7 +81,7 @@ config SERIAL_8250_GSC
default SERIAL_8250
config SERIAL_8250_PCI
- tristate "8250/16550 PCI device support" if EMBEDDED
+ tristate "8250/16550 PCI device support" if EXPERT
depends on SERIAL_8250 && PCI
default SERIAL_8250
help
@@ -90,7 +90,7 @@ config SERIAL_8250_PCI
Saves about 9K.
config SERIAL_8250_PNP
- tristate "8250/16550 PNP device support" if EMBEDDED
+ tristate "8250/16550 PNP device support" if EXPERT
depends on SERIAL_8250 && PNP
default SERIAL_8250
help
@@ -776,24 +776,7 @@ config BFIN_UART0_CTSRTS
bool "Enable UART0 hardware flow control"
depends on SERIAL_BFIN_UART0
help
- Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
- signal.
-
-config UART0_CTS_PIN
- int "UART0 CTS pin"
- depends on BFIN_UART0_CTSRTS && !BF548
- default 23
- help
- The default pin is GPIO_GP7.
- Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
-
-config UART0_RTS_PIN
- int "UART0 RTS pin"
- depends on BFIN_UART0_CTSRTS && !BF548
- default 22
- help
- The default pin is GPIO_GP6.
- Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
+ Enable hardware flow control in the driver.
config SERIAL_BFIN_UART1
bool "Enable UART1"
@@ -805,22 +788,7 @@ config BFIN_UART1_CTSRTS
bool "Enable UART1 hardware flow control"
depends on SERIAL_BFIN_UART1
help
- Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
- signal.
-
-config UART1_CTS_PIN
- int "UART1 CTS pin"
- depends on BFIN_UART1_CTSRTS && !BF548
- default -1
- help
- Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
-
-config UART1_RTS_PIN
- int "UART1 RTS pin"
- depends on BFIN_UART1_CTSRTS && !BF548
- default -1
- help
- Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
+ Enable hardware flow control in the driver.
config SERIAL_BFIN_UART2
bool "Enable UART2"
@@ -832,22 +800,7 @@ config BFIN_UART2_CTSRTS
bool "Enable UART2 hardware flow control"
depends on SERIAL_BFIN_UART2
help
- Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
- signal.
-
-config UART2_CTS_PIN
- int "UART2 CTS pin"
- depends on BFIN_UART2_CTSRTS && !BF548
- default -1
- help
- Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
-
-config UART2_RTS_PIN
- int "UART2 RTS pin"
- depends on BFIN_UART2_CTSRTS && !BF548
- default -1
- help
- Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
+ Enable hardware flow control in the driver.
config SERIAL_BFIN_UART3
bool "Enable UART3"
@@ -859,22 +812,7 @@ config BFIN_UART3_CTSRTS
bool "Enable UART3 hardware flow control"
depends on SERIAL_BFIN_UART3
help
- Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
- signal.
-
-config UART3_CTS_PIN
- int "UART3 CTS pin"
- depends on BFIN_UART3_CTSRTS && !BF548
- default -1
- help
- Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
-
-config UART3_RTS_PIN
- int "UART3 RTS pin"
- depends on BFIN_UART3_CTSRTS && !BF548
- default -1
- help
- Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
+ Enable hardware flow control in the driver.
config SERIAL_IMX
bool "IMX serial port support"
@@ -1412,7 +1350,7 @@ config SERIAL_NETX_CONSOLE
config SERIAL_OF_PLATFORM
tristate "Serial port on Open Firmware platform bus"
- depends on PPC_OF || MICROBLAZE
+ depends on OF
depends on SERIAL_8250 || SERIAL_OF_PLATFORM_NWPSERIAL
help
If you have a PowerPC based system that has serial ports
@@ -1580,6 +1518,7 @@ config SERIAL_BCM63XX_CONSOLE
config SERIAL_GRLIB_GAISLER_APBUART
tristate "GRLIB APBUART serial support"
depends on OF
+ select SERIAL_CORE
---help---
Add support for the GRLIB APBUART serial port.
diff --git a/drivers/serial/Makefile b/drivers/tty/serial/Makefile
index 8ea92e9c73b0..8ea92e9c73b0 100644
--- a/drivers/serial/Makefile
+++ b/drivers/tty/serial/Makefile
diff --git a/drivers/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index f9b49b5ff5e1..f9b49b5ff5e1 100644
--- a/drivers/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
diff --git a/drivers/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 721216292a50..721216292a50 100644
--- a/drivers/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
diff --git a/drivers/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 2904aa044126..2904aa044126 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
diff --git a/drivers/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index e76d7d000128..e76d7d000128 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
diff --git a/drivers/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 095a5d562618..095a5d562618 100644
--- a/drivers/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
diff --git a/drivers/serial/apbuart.h b/drivers/tty/serial/apbuart.h
index 5faf87c8d2bc..5faf87c8d2bc 100644
--- a/drivers/serial/apbuart.h
+++ b/drivers/tty/serial/apbuart.h
diff --git a/drivers/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 3892666b5fbd..2a1d52fb4936 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1732,6 +1732,11 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, port);
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL);
+ UART_PUT_CR(&port->uart, ATMEL_US_RTSEN);
+ }
+
return 0;
err_add_port:
diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index a1a0e55d0807..a1a0e55d0807 100644
--- a/drivers/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
diff --git a/drivers/serial/bfin_5xx.c b/drivers/tty/serial/bfin_5xx.c
index 19cac9f610fd..9b1ff2b6bb37 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/tty/serial/bfin_5xx.c
@@ -1,7 +1,7 @@
/*
* Blackfin On-Chip Serial Driver
*
- * Copyright 2006-2008 Analog Devices Inc.
+ * Copyright 2006-2010 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*
@@ -12,6 +12,9 @@
#define SUPPORT_SYSRQ
#endif
+#define DRIVER_NAME "bfin-uart"
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/gfp.h>
@@ -23,21 +26,20 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
-#include <linux/dma-mapping.h>
-
-#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
- defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
+#include <linux/gpio.h>
+#include <linux/irq.h>
#include <linux/kgdb.h>
-#include <asm/irq_regs.h>
-#endif
-
-#include <asm/gpio.h>
-#include <mach/bfin_serial_5xx.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/irq.h>
+#include <asm/portmux.h>
#include <asm/cacheflush.h>
+#include <asm/dma.h>
+
+#define port_membase(uart) (((struct bfin_serial_port *)(uart))->port.membase)
+#define get_lsr_cache(uart) (((struct bfin_serial_port *)(uart))->lsr)
+#define put_lsr_cache(uart, v) (((struct bfin_serial_port *)(uart))->lsr = (v))
+#include <asm/bfin_serial.h>
#ifdef CONFIG_SERIAL_BFIN_MODULE
# undef CONFIG_EARLY_PRINTK
@@ -48,12 +50,11 @@
#endif
/* UART name and device definitions */
-#define BFIN_SERIAL_NAME "ttyBF"
+#define BFIN_SERIAL_DEV_NAME "ttyBF"
#define BFIN_SERIAL_MAJOR 204
#define BFIN_SERIAL_MINOR 64
-static struct bfin_serial_port bfin_serial_ports[BFIN_UART_NR_PORTS];
-static int nr_active_ports = ARRAY_SIZE(bfin_serial_resource);
+static struct bfin_serial_port *bfin_serial_ports[BFIN_UART_NR_PORTS];
#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
@@ -369,10 +370,8 @@ static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
- spin_lock(&uart->port.lock);
while (UART_GET_LSR(uart) & DR)
bfin_serial_rx_chars(uart);
- spin_unlock(&uart->port.lock);
return IRQ_HANDLED;
}
@@ -489,9 +488,8 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
{
int x_pos, pos;
- dma_disable_irq(uart->tx_dma_channel);
- dma_disable_irq(uart->rx_dma_channel);
- spin_lock_bh(&uart->port.lock);
+ dma_disable_irq_nosync(uart->rx_dma_channel);
+ spin_lock_bh(&uart->rx_lock);
/* 2D DMA RX buffer ring is used. Because curr_y_count and
* curr_x_count can't be read as an atomic operation,
@@ -522,8 +520,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
}
- spin_unlock_bh(&uart->port.lock);
- dma_enable_irq(uart->tx_dma_channel);
+ spin_unlock_bh(&uart->rx_lock);
dma_enable_irq(uart->rx_dma_channel);
mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
@@ -570,7 +567,7 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
unsigned short irqstat;
int x_pos, pos;
- spin_lock(&uart->port.lock);
+ spin_lock(&uart->rx_lock);
irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
clear_dma_irqstat(uart->rx_dma_channel);
@@ -588,7 +585,7 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
}
- spin_unlock(&uart->port.lock);
+ spin_unlock(&uart->rx_lock);
return IRQ_HANDLED;
}
@@ -743,14 +740,14 @@ static int bfin_serial_startup(struct uart_port *port)
}
}
if (uart->rts_pin >= 0) {
- gpio_request(uart->rts_pin, DRIVER_NAME);
gpio_direction_output(uart->rts_pin, 0);
}
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (request_irq(uart->status_irq,
+ if (uart->cts_pin >= 0 && request_irq(uart->status_irq,
bfin_serial_mctrl_cts_int,
IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
+ uart->cts_pin = -1;
pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
}
@@ -796,11 +793,9 @@ static void bfin_serial_shutdown(struct uart_port *port)
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
if (uart->cts_pin >= 0)
free_irq(gpio_to_irq(uart->cts_pin), uart);
- if (uart->rts_pin >= 0)
- gpio_free(uart->rts_pin);
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (UART_GET_IER(uart) & EDSSI)
+ if (uart->cts_pin >= 0)
free_irq(uart->status_irq, uart);
#endif
}
@@ -962,33 +957,33 @@ bfin_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
*/
static void bfin_serial_set_ldisc(struct uart_port *port, int ld)
{
- int line = port->line;
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
unsigned short val;
switch (ld) {
case N_IRDA:
- val = UART_GET_GCTL(&bfin_serial_ports[line]);
+ val = UART_GET_GCTL(uart);
val |= (IREN | RPOLC);
- UART_PUT_GCTL(&bfin_serial_ports[line], val);
+ UART_PUT_GCTL(uart, val);
break;
default:
- val = UART_GET_GCTL(&bfin_serial_ports[line]);
+ val = UART_GET_GCTL(uart);
val &= ~(IREN | RPOLC);
- UART_PUT_GCTL(&bfin_serial_ports[line], val);
+ UART_PUT_GCTL(uart, val);
}
}
static void bfin_serial_reset_irda(struct uart_port *port)
{
- int line = port->line;
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
unsigned short val;
- val = UART_GET_GCTL(&bfin_serial_ports[line]);
+ val = UART_GET_GCTL(uart);
val &= ~(IREN | RPOLC);
- UART_PUT_GCTL(&bfin_serial_ports[line], val);
+ UART_PUT_GCTL(uart, val);
SSYNC();
val |= (IREN | RPOLC);
- UART_PUT_GCTL(&bfin_serial_ports[line], val);
+ UART_PUT_GCTL(uart, val);
SSYNC();
}
@@ -1070,85 +1065,6 @@ static struct uart_ops bfin_serial_pops = {
#endif
};
-static void __init bfin_serial_hw_init(void)
-{
-#ifdef CONFIG_SERIAL_BFIN_UART0
- peripheral_request(P_UART0_TX, DRIVER_NAME);
- peripheral_request(P_UART0_RX, DRIVER_NAME);
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_UART1
- peripheral_request(P_UART1_TX, DRIVER_NAME);
- peripheral_request(P_UART1_RX, DRIVER_NAME);
-
-# if defined(CONFIG_BFIN_UART1_CTSRTS) && defined(CONFIG_BF54x)
- peripheral_request(P_UART1_RTS, DRIVER_NAME);
- peripheral_request(P_UART1_CTS, DRIVER_NAME);
-# endif
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_UART2
- peripheral_request(P_UART2_TX, DRIVER_NAME);
- peripheral_request(P_UART2_RX, DRIVER_NAME);
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_UART3
- peripheral_request(P_UART3_TX, DRIVER_NAME);
- peripheral_request(P_UART3_RX, DRIVER_NAME);
-
-# if defined(CONFIG_BFIN_UART3_CTSRTS) && defined(CONFIG_BF54x)
- peripheral_request(P_UART3_RTS, DRIVER_NAME);
- peripheral_request(P_UART3_CTS, DRIVER_NAME);
-# endif
-#endif
-}
-
-static void __init bfin_serial_init_ports(void)
-{
- static int first = 1;
- int i;
-
- if (!first)
- return;
- first = 0;
-
- bfin_serial_hw_init();
-
- for (i = 0; i < nr_active_ports; i++) {
- spin_lock_init(&bfin_serial_ports[i].port.lock);
- bfin_serial_ports[i].port.uartclk = get_sclk();
- bfin_serial_ports[i].port.fifosize = BFIN_UART_TX_FIFO_SIZE;
- bfin_serial_ports[i].port.ops = &bfin_serial_pops;
- bfin_serial_ports[i].port.line = i;
- bfin_serial_ports[i].port.iotype = UPIO_MEM;
- bfin_serial_ports[i].port.membase =
- (void __iomem *)bfin_serial_resource[i].uart_base_addr;
- bfin_serial_ports[i].port.mapbase =
- bfin_serial_resource[i].uart_base_addr;
- bfin_serial_ports[i].port.irq =
- bfin_serial_resource[i].uart_irq;
- bfin_serial_ports[i].status_irq =
- bfin_serial_resource[i].uart_status_irq;
- bfin_serial_ports[i].port.flags = UPF_BOOT_AUTOCONF;
-#ifdef CONFIG_SERIAL_BFIN_DMA
- bfin_serial_ports[i].tx_done = 1;
- bfin_serial_ports[i].tx_count = 0;
- bfin_serial_ports[i].tx_dma_channel =
- bfin_serial_resource[i].uart_tx_dma_channel;
- bfin_serial_ports[i].rx_dma_channel =
- bfin_serial_resource[i].uart_rx_dma_channel;
- init_timer(&(bfin_serial_ports[i].rx_dma_timer));
-#endif
-#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
- defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
- bfin_serial_ports[i].cts_pin =
- bfin_serial_resource[i].uart_cts_pin;
- bfin_serial_ports[i].rts_pin =
- bfin_serial_resource[i].uart_rts_pin;
-#endif
- }
-}
-
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
/*
* If the port was already initialised (eg, by a boot loader),
@@ -1196,6 +1112,34 @@ bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud,
static struct uart_driver bfin_serial_reg;
+static void bfin_serial_console_putchar(struct uart_port *port, int ch)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ while (!(UART_GET_LSR(uart) & THRE))
+ barrier();
+ UART_PUT_CHAR(uart, ch);
+}
+
+#endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) ||
+ defined (CONFIG_EARLY_PRINTK) */
+
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+#define CLASS_BFIN_CONSOLE "bfin-console"
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+bfin_serial_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct bfin_serial_port *uart = bfin_serial_ports[co->index];
+ unsigned long flags;
+
+ spin_lock_irqsave(&uart->port.lock, flags);
+ uart_console_write(&uart->port, s, count, bfin_serial_console_putchar);
+ spin_unlock_irqrestore(&uart->port.lock, flags);
+
+}
+
static int __init
bfin_serial_console_setup(struct console *co, char *options)
{
@@ -1215,9 +1159,12 @@ bfin_serial_console_setup(struct console *co, char *options)
* if so, search for the first available port that does have
* console support.
*/
- if (co->index == -1 || co->index >= nr_active_ports)
- co->index = 0;
- uart = &bfin_serial_ports[co->index];
+ if (co->index < 0 || co->index >= BFIN_UART_NR_PORTS)
+ return -ENODEV;
+
+ uart = bfin_serial_ports[co->index];
+ if (!uart)
+ return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -1226,36 +1173,9 @@ bfin_serial_console_setup(struct console *co, char *options)
return uart_set_options(&uart->port, co, baud, parity, bits, flow);
}
-#endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) ||
- defined (CONFIG_EARLY_PRINTK) */
-
-#ifdef CONFIG_SERIAL_BFIN_CONSOLE
-static void bfin_serial_console_putchar(struct uart_port *port, int ch)
-{
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
- while (!(UART_GET_LSR(uart) & THRE))
- barrier();
- UART_PUT_CHAR(uart, ch);
- SSYNC();
-}
-
-/*
- * Interrupts are disabled on entering
- */
-static void
-bfin_serial_console_write(struct console *co, const char *s, unsigned int count)
-{
- struct bfin_serial_port *uart = &bfin_serial_ports[co->index];
- unsigned long flags;
-
- spin_lock_irqsave(&uart->port.lock, flags);
- uart_console_write(&uart->port, s, count, bfin_serial_console_putchar);
- spin_unlock_irqrestore(&uart->port.lock, flags);
-
-}
static struct console bfin_serial_console = {
- .name = BFIN_SERIAL_NAME,
+ .name = BFIN_SERIAL_DEV_NAME,
.write = bfin_serial_console_write,
.device = uart_console_device,
.setup = bfin_serial_console_setup,
@@ -1263,44 +1183,30 @@ static struct console bfin_serial_console = {
.index = -1,
.data = &bfin_serial_reg,
};
-
-static int __init bfin_serial_rs_console_init(void)
-{
- bfin_serial_init_ports();
- register_console(&bfin_serial_console);
-
- return 0;
-}
-console_initcall(bfin_serial_rs_console_init);
-
#define BFIN_SERIAL_CONSOLE &bfin_serial_console
#else
#define BFIN_SERIAL_CONSOLE NULL
#endif /* CONFIG_SERIAL_BFIN_CONSOLE */
+#ifdef CONFIG_EARLY_PRINTK
+static struct bfin_serial_port bfin_earlyprintk_port;
+#define CLASS_BFIN_EARLYPRINTK "bfin-earlyprintk"
-#ifdef CONFIG_EARLY_PRINTK
-static __init void early_serial_putc(struct uart_port *port, int ch)
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+bfin_earlyprintk_console_write(struct console *co, const char *s, unsigned int count)
{
- unsigned timeout = 0xffff;
- struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
-
- while ((!(UART_GET_LSR(uart) & THRE)) && --timeout)
- cpu_relax();
- UART_PUT_CHAR(uart, ch);
-}
+ unsigned long flags;
-static __init void early_serial_write(struct console *con, const char *s,
- unsigned int n)
-{
- struct bfin_serial_port *uart = &bfin_serial_ports[con->index];
- unsigned int i;
+ if (bfin_earlyprintk_port.port.line != co->index)
+ return;
- for (i = 0; i < n; i++, s++) {
- if (*s == '\n')
- early_serial_putc(&uart->port, '\r');
- early_serial_putc(&uart->port, *s);
- }
+ spin_lock_irqsave(&bfin_earlyprintk_port.port.lock, flags);
+ uart_console_write(&bfin_earlyprintk_port.port, s, count,
+ bfin_serial_console_putchar);
+ spin_unlock_irqrestore(&bfin_earlyprintk_port.port.lock, flags);
}
/*
@@ -1311,113 +1217,205 @@ static __init void early_serial_write(struct console *con, const char *s,
*/
static struct __initdata console bfin_early_serial_console = {
.name = "early_BFuart",
- .write = early_serial_write,
+ .write = bfin_earlyprintk_console_write,
.device = uart_console_device,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &bfin_serial_reg,
};
-
-struct console __init *bfin_earlyserial_init(unsigned int port,
- unsigned int cflag)
-{
- struct bfin_serial_port *uart;
- struct ktermios t;
-
-#ifdef CONFIG_SERIAL_BFIN_CONSOLE
- /*
- * If we are using early serial, don't let the normal console rewind
- * log buffer, since that causes things to be printed multiple times
- */
- bfin_serial_console.flags &= ~CON_PRINTBUFFER;
#endif
- if (port == -1 || port >= nr_active_ports)
- port = 0;
- bfin_serial_init_ports();
- bfin_early_serial_console.index = port;
- uart = &bfin_serial_ports[port];
- t.c_cflag = cflag;
- t.c_iflag = 0;
- t.c_oflag = 0;
- t.c_lflag = ICANON;
- t.c_line = port;
- bfin_serial_set_termios(&uart->port, &t, &t);
- return &bfin_early_serial_console;
-}
-
-#endif /* CONFIG_EARLY_PRINTK */
-
static struct uart_driver bfin_serial_reg = {
.owner = THIS_MODULE,
- .driver_name = "bfin-uart",
- .dev_name = BFIN_SERIAL_NAME,
+ .driver_name = DRIVER_NAME,
+ .dev_name = BFIN_SERIAL_DEV_NAME,
.major = BFIN_SERIAL_MAJOR,
.minor = BFIN_SERIAL_MINOR,
.nr = BFIN_UART_NR_PORTS,
.cons = BFIN_SERIAL_CONSOLE,
};
-static int bfin_serial_suspend(struct platform_device *dev, pm_message_t state)
+static int bfin_serial_suspend(struct platform_device *pdev, pm_message_t state)
{
- int i;
+ struct bfin_serial_port *uart = platform_get_drvdata(pdev);
- for (i = 0; i < nr_active_ports; i++) {
- if (bfin_serial_ports[i].port.dev != &dev->dev)
- continue;
- uart_suspend_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
- }
+ return uart_suspend_port(&bfin_serial_reg, &uart->port);
+}
- return 0;
+static int bfin_serial_resume(struct platform_device *pdev)
+{
+ struct bfin_serial_port *uart = platform_get_drvdata(pdev);
+
+ return uart_resume_port(&bfin_serial_reg, &uart->port);
}
-static int bfin_serial_resume(struct platform_device *dev)
+static int bfin_serial_probe(struct platform_device *pdev)
{
- int i;
+ struct resource *res;
+ struct bfin_serial_port *uart = NULL;
+ int ret = 0;
- for (i = 0; i < nr_active_ports; i++) {
- if (bfin_serial_ports[i].port.dev != &dev->dev)
- continue;
- uart_resume_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
+ if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
+ dev_err(&pdev->dev, "Wrong bfin uart platform device id.\n");
+ return -ENOENT;
}
- return 0;
-}
+ if (bfin_serial_ports[pdev->id] == NULL) {
-static int bfin_serial_probe(struct platform_device *dev)
-{
- struct resource *res = dev->resource;
- int i;
+ uart = kzalloc(sizeof(*uart), GFP_KERNEL);
+ if (!uart) {
+ dev_err(&pdev->dev,
+ "fail to malloc bfin_serial_port\n");
+ return -ENOMEM;
+ }
+ bfin_serial_ports[pdev->id] = uart;
- for (i = 0; i < dev->num_resources; i++, res++)
- if (res->flags & IORESOURCE_MEM)
- break;
+#ifdef CONFIG_EARLY_PRINTK
+ if (!(bfin_earlyprintk_port.port.membase
+ && bfin_earlyprintk_port.port.line == pdev->id)) {
+ /*
+ * If the peripheral PINs of current port is allocated
+ * in earlyprintk probe stage, don't do it again.
+ */
+#endif
+ ret = peripheral_request_list(
+ (unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail to request bfin serial peripherals\n");
+ goto out_error_free_mem;
+ }
+#ifdef CONFIG_EARLY_PRINTK
+ }
+#endif
+
+ spin_lock_init(&uart->port.lock);
+ uart->port.uartclk = get_sclk();
+ uart->port.fifosize = BFIN_UART_TX_FIFO_SIZE;
+ uart->port.ops = &bfin_serial_pops;
+ uart->port.line = pdev->id;
+ uart->port.iotype = UPIO_MEM;
+ uart->port.flags = UPF_BOOT_AUTOCONF;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ ret = -ENOENT;
+ goto out_error_free_peripherals;
+ }
+
+ uart->port.membase = ioremap(res->start,
+ res->end - res->start);
+ if (!uart->port.membase) {
+ dev_err(&pdev->dev, "Cannot map uart IO\n");
+ ret = -ENXIO;
+ goto out_error_free_peripherals;
+ }
+ uart->port.mapbase = res->start;
+
+ uart->port.irq = platform_get_irq(pdev, 0);
+ if (uart->port.irq < 0) {
+ dev_err(&pdev->dev, "No uart RX/TX IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+
+ uart->status_irq = platform_get_irq(pdev, 1);
+ if (uart->status_irq < 0) {
+ dev_err(&pdev->dev, "No uart status IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+ spin_lock_init(&uart->rx_lock);
+ uart->tx_done = 1;
+ uart->tx_count = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "No uart TX DMA channel specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+ uart->tx_dma_channel = res->start;
- if (i < dev->num_resources) {
- for (i = 0; i < nr_active_ports; i++, res++) {
- if (bfin_serial_ports[i].port.mapbase != res->start)
- continue;
- bfin_serial_ports[i].port.dev = &dev->dev;
- uart_add_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "No uart RX DMA channel specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
}
+ uart->rx_dma_channel = res->start;
+
+ init_timer(&(uart->rx_dma_timer));
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
+ defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (res == NULL)
+ uart->cts_pin = -1;
+ else
+ uart->cts_pin = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+ if (res == NULL)
+ uart->rts_pin = -1;
+ else
+ uart->rts_pin = res->start;
+# if defined(CONFIG_SERIAL_BFIN_CTSRTS)
+ if (uart->rts_pin >= 0)
+ gpio_request(uart->rts_pin, DRIVER_NAME);
+# endif
+#endif
}
- return 0;
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+ if (!is_early_platform_device(pdev)) {
+#endif
+ uart = bfin_serial_ports[pdev->id];
+ uart->port.dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, uart);
+ ret = uart_add_one_port(&bfin_serial_reg, &uart->port);
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+ }
+#endif
+
+ if (!ret)
+ return 0;
+
+ if (uart) {
+out_error_unmap:
+ iounmap(uart->port.membase);
+out_error_free_peripherals:
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+out_error_free_mem:
+ kfree(uart);
+ bfin_serial_ports[pdev->id] = NULL;
+ }
+
+ return ret;
}
-static int bfin_serial_remove(struct platform_device *dev)
+static int __devexit bfin_serial_remove(struct platform_device *pdev)
{
- int i;
-
- for (i = 0; i < nr_active_ports; i++) {
- if (bfin_serial_ports[i].port.dev != &dev->dev)
- continue;
- uart_remove_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
- bfin_serial_ports[i].port.dev = NULL;
-#if defined(CONFIG_SERIAL_BFIN_CTSRTS)
- gpio_free(bfin_serial_ports[i].cts_pin);
- gpio_free(bfin_serial_ports[i].rts_pin);
+ struct bfin_serial_port *uart = platform_get_drvdata(pdev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (uart) {
+ uart_remove_one_port(&bfin_serial_reg, &uart->port);
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+ if (uart->rts_pin >= 0)
+ gpio_free(uart->rts_pin);
#endif
+ iounmap(uart->port.membase);
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+ kfree(uart);
+ bfin_serial_ports[pdev->id] = NULL;
}
return 0;
@@ -1425,31 +1423,160 @@ static int bfin_serial_remove(struct platform_device *dev)
static struct platform_driver bfin_serial_driver = {
.probe = bfin_serial_probe,
- .remove = bfin_serial_remove,
+ .remove = __devexit_p(bfin_serial_remove),
.suspend = bfin_serial_suspend,
.resume = bfin_serial_resume,
.driver = {
- .name = "bfin-uart",
+ .name = DRIVER_NAME,
.owner = THIS_MODULE,
},
};
-static int __init bfin_serial_init(void)
+#if defined(CONFIG_SERIAL_BFIN_CONSOLE)
+static __initdata struct early_platform_driver early_bfin_serial_driver = {
+ .class_str = CLASS_BFIN_CONSOLE,
+ .pdrv = &bfin_serial_driver,
+ .requested_id = EARLY_PLATFORM_ID_UNSET,
+};
+
+static int __init bfin_serial_rs_console_init(void)
+{
+ early_platform_driver_register(&early_bfin_serial_driver, DRIVER_NAME);
+
+ early_platform_driver_probe(CLASS_BFIN_CONSOLE, BFIN_UART_NR_PORTS, 0);
+
+ register_console(&bfin_serial_console);
+
+ return 0;
+}
+console_initcall(bfin_serial_rs_console_init);
+#endif
+
+#ifdef CONFIG_EARLY_PRINTK
+/*
+ * Memory can't be allocated dynamically during earlyprink init stage.
+ * So, do individual probe for earlyprink with a static uart port variable.
+ */
+static int bfin_earlyprintk_probe(struct platform_device *pdev)
{
+ struct resource *res;
int ret;
- pr_info("Serial: Blackfin serial driver\n");
+ if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
+ dev_err(&pdev->dev, "Wrong earlyprintk platform device id.\n");
+ return -ENOENT;
+ }
+
+ ret = peripheral_request_list(
+ (unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail to request bfin serial peripherals\n");
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ ret = -ENOENT;
+ goto out_error_free_peripherals;
+ }
+
+ bfin_earlyprintk_port.port.membase = ioremap(res->start,
+ res->end - res->start);
+ if (!bfin_earlyprintk_port.port.membase) {
+ dev_err(&pdev->dev, "Cannot map uart IO\n");
+ ret = -ENXIO;
+ goto out_error_free_peripherals;
+ }
+ bfin_earlyprintk_port.port.mapbase = res->start;
+ bfin_earlyprintk_port.port.line = pdev->id;
+ bfin_earlyprintk_port.port.uartclk = get_sclk();
+ bfin_earlyprintk_port.port.fifosize = BFIN_UART_TX_FIFO_SIZE;
+ spin_lock_init(&bfin_earlyprintk_port.port.lock);
+
+ return 0;
+
+out_error_free_peripherals:
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+
+ return ret;
+}
+
+static struct platform_driver bfin_earlyprintk_driver = {
+ .probe = bfin_earlyprintk_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static __initdata struct early_platform_driver early_bfin_earlyprintk_driver = {
+ .class_str = CLASS_BFIN_EARLYPRINTK,
+ .pdrv = &bfin_earlyprintk_driver,
+ .requested_id = EARLY_PLATFORM_ID_UNSET,
+};
+
+struct console __init *bfin_earlyserial_init(unsigned int port,
+ unsigned int cflag)
+{
+ struct ktermios t;
+ char port_name[20];
+
+ if (port < 0 || port >= BFIN_UART_NR_PORTS)
+ return NULL;
+
+ /*
+ * Only probe resource of the given port in earlyprintk boot arg.
+ * The expected port id should be indicated in port name string.
+ */
+ snprintf(port_name, 20, DRIVER_NAME ".%d", port);
+ early_platform_driver_register(&early_bfin_earlyprintk_driver,
+ port_name);
+ early_platform_driver_probe(CLASS_BFIN_EARLYPRINTK, 1, 0);
+
+ if (!bfin_earlyprintk_port.port.membase)
+ return NULL;
+
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+ /*
+ * If we are using early serial, don't let the normal console rewind
+ * log buffer, since that causes things to be printed multiple times
+ */
+ bfin_serial_console.flags &= ~CON_PRINTBUFFER;
+#endif
+
+ bfin_early_serial_console.index = port;
+ t.c_cflag = cflag;
+ t.c_iflag = 0;
+ t.c_oflag = 0;
+ t.c_lflag = ICANON;
+ t.c_line = port;
+ bfin_serial_set_termios(&bfin_earlyprintk_port.port, &t, &t);
+
+ return &bfin_early_serial_console;
+}
+#endif /* CONFIG_EARLY_PRINTK */
- bfin_serial_init_ports();
+static int __init bfin_serial_init(void)
+{
+ int ret;
+
+ pr_info("Blackfin serial driver\n");
ret = uart_register_driver(&bfin_serial_reg);
- if (ret == 0) {
- ret = platform_driver_register(&bfin_serial_driver);
- if (ret) {
- pr_debug("uart register failed\n");
- uart_unregister_driver(&bfin_serial_reg);
- }
+ if (ret) {
+ pr_err("failed to register %s:%d\n",
+ bfin_serial_reg.driver_name, ret);
+ }
+
+ ret = platform_driver_register(&bfin_serial_driver);
+ if (ret) {
+ pr_err("fail to register bfin uart\n");
+ uart_unregister_driver(&bfin_serial_reg);
}
+
return ret;
}
@@ -1463,7 +1590,7 @@ static void __exit bfin_serial_exit(void)
module_init(bfin_serial_init);
module_exit(bfin_serial_exit);
-MODULE_AUTHOR("Aubrey.Li <aubrey.li@analog.com>");
+MODULE_AUTHOR("Sonic Zhang, Aubrey Li");
MODULE_DESCRIPTION("Blackfin generic serial port driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(BFIN_SERIAL_MAJOR);
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
index e95c524d9d18..e95c524d9d18 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/tty/serial/bfin_sport_uart.c
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/tty/serial/bfin_sport_uart.h
index 6d06ce1d5675..6d06ce1d5675 100644
--- a/drivers/serial/bfin_sport_uart.h
+++ b/drivers/tty/serial/bfin_sport_uart.h
diff --git a/drivers/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index b6acd19b458e..b6acd19b458e 100644
--- a/drivers/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
diff --git a/drivers/serial/cpm_uart/Makefile b/drivers/tty/serial/cpm_uart/Makefile
index e072724ea754..e072724ea754 100644
--- a/drivers/serial/cpm_uart/Makefile
+++ b/drivers/tty/serial/cpm_uart/Makefile
diff --git a/drivers/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h
index b754dcf0fda5..b754dcf0fda5 100644
--- a/drivers/serial/cpm_uart/cpm_uart.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart.h
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 8692ff98fc07..8692ff98fc07 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
index 3fc1d66e32c6..3fc1d66e32c6 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
index 10eecd6af6d4..10eecd6af6d4 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
index 814ac006393f..814ac006393f 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
index 7194c63dcf5f..7194c63dcf5f 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
diff --git a/drivers/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index bcc31f2140ac..bcc31f2140ac 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
diff --git a/drivers/serial/crisv10.h b/drivers/tty/serial/crisv10.h
index ea0beb46a10d..ea0beb46a10d 100644
--- a/drivers/serial/crisv10.h
+++ b/drivers/tty/serial/crisv10.h
diff --git a/drivers/serial/dz.c b/drivers/tty/serial/dz.c
index 57421d776329..57421d776329 100644
--- a/drivers/serial/dz.c
+++ b/drivers/tty/serial/dz.c
diff --git a/drivers/serial/dz.h b/drivers/tty/serial/dz.h
index faf169ed27b3..faf169ed27b3 100644
--- a/drivers/serial/dz.h
+++ b/drivers/tty/serial/dz.h
diff --git a/drivers/serial/icom.c b/drivers/tty/serial/icom.c
index 53a468227056..53a468227056 100644
--- a/drivers/serial/icom.c
+++ b/drivers/tty/serial/icom.c
diff --git a/drivers/serial/icom.h b/drivers/tty/serial/icom.h
index c8029e0025c9..c8029e0025c9 100644
--- a/drivers/serial/icom.h
+++ b/drivers/tty/serial/icom.h
diff --git a/drivers/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index ab93763862d5..ab93763862d5 100644
--- a/drivers/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
diff --git a/drivers/serial/ifx6x60.h b/drivers/tty/serial/ifx6x60.h
index deb7b8d977dc..deb7b8d977dc 100644
--- a/drivers/serial/ifx6x60.h
+++ b/drivers/tty/serial/ifx6x60.h
diff --git a/drivers/serial/imx.c b/drivers/tty/serial/imx.c
index dfcf4b1878aa..dfcf4b1878aa 100644
--- a/drivers/serial/imx.c
+++ b/drivers/tty/serial/imx.c
diff --git a/drivers/serial/ioc3_serial.c b/drivers/tty/serial/ioc3_serial.c
index ee43efc7bdcc..ee43efc7bdcc 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/tty/serial/ioc3_serial.c
diff --git a/drivers/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
index fcfe82653ac8..fcfe82653ac8 100644
--- a/drivers/serial/ioc4_serial.c
+++ b/drivers/tty/serial/ioc4_serial.c
diff --git a/drivers/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index ebff4a1d4bcc..ebff4a1d4bcc 100644
--- a/drivers/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
diff --git a/drivers/serial/ip22zilog.h b/drivers/tty/serial/ip22zilog.h
index a59a9a8341d2..a59a9a8341d2 100644
--- a/drivers/serial/ip22zilog.h
+++ b/drivers/tty/serial/ip22zilog.h
diff --git a/drivers/serial/jsm/Makefile b/drivers/tty/serial/jsm/Makefile
index e46b6e0f8b18..e46b6e0f8b18 100644
--- a/drivers/serial/jsm/Makefile
+++ b/drivers/tty/serial/jsm/Makefile
diff --git a/drivers/serial/jsm/jsm.h b/drivers/tty/serial/jsm/jsm.h
index 38a509c684cd..38a509c684cd 100644
--- a/drivers/serial/jsm/jsm.h
+++ b/drivers/tty/serial/jsm/jsm.h
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index 18f548449c63..18f548449c63 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
diff --git a/drivers/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
index 7960d9633c15..7960d9633c15 100644
--- a/drivers/serial/jsm/jsm_neo.c
+++ b/drivers/tty/serial/jsm/jsm_neo.c
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index 7a4a914ecff0..7a4a914ecff0 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
diff --git a/drivers/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 25a8bc565f40..25a8bc565f40 100644
--- a/drivers/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
diff --git a/drivers/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index bea5c215460c..bea5c215460c 100644
--- a/drivers/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
diff --git a/drivers/serial/m32r_sio.h b/drivers/tty/serial/m32r_sio.h
index e9b7e11793b1..e9b7e11793b1 100644
--- a/drivers/serial/m32r_sio.h
+++ b/drivers/tty/serial/m32r_sio.h
diff --git a/drivers/serial/m32r_sio_reg.h b/drivers/tty/serial/m32r_sio_reg.h
index 4671473793e3..4671473793e3 100644
--- a/drivers/serial/m32r_sio_reg.h
+++ b/drivers/tty/serial/m32r_sio_reg.h
diff --git a/drivers/serial/max3100.c b/drivers/tty/serial/max3100.c
index beb1afa27d8d..7b951adac54b 100644
--- a/drivers/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port)
s->rts = 0;
sprintf(b, "max3100-%d", s->minor);
- s->workqueue = create_freezeable_workqueue(b);
+ s->workqueue = create_freezable_workqueue(b);
if (!s->workqueue) {
dev_warn(&s->spi->dev, "cannot create workqueue\n");
return -EBUSY;
diff --git a/drivers/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
index a1fe304f2f52..a1fe304f2f52 100644
--- a/drivers/serial/max3107-aava.c
+++ b/drivers/tty/serial/max3107-aava.c
diff --git a/drivers/serial/max3107.c b/drivers/tty/serial/max3107.c
index 910870edf708..750b4f627315 100644
--- a/drivers/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port)
struct max3107_port *s = container_of(port, struct max3107_port, port);
/* Initialize work queue */
- s->workqueue = create_freezeable_workqueue("max3107");
+ s->workqueue = create_freezable_workqueue("max3107");
if (!s->workqueue) {
dev_err(&s->spi->dev, "Workqueue creation failed\n");
return -EBUSY;
diff --git a/drivers/serial/max3107.h b/drivers/tty/serial/max3107.h
index 7ab632392502..7ab632392502 100644
--- a/drivers/serial/max3107.h
+++ b/drivers/tty/serial/max3107.h
diff --git a/drivers/serial/mcf.c b/drivers/tty/serial/mcf.c
index 3394b7cc1722..3394b7cc1722 100644
--- a/drivers/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
diff --git a/drivers/serial/mfd.c b/drivers/tty/serial/mfd.c
index d40010a22ecd..d40010a22ecd 100644
--- a/drivers/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 126ec7f568ec..126ec7f568ec 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
diff --git a/drivers/serial/mpsc.c b/drivers/tty/serial/mpsc.c
index 6a9c6605666a..6a9c6605666a 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/tty/serial/mpsc.c
diff --git a/drivers/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index b62857bf2fdb..b62857bf2fdb 100644
--- a/drivers/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
diff --git a/drivers/serial/mrst_max3110.h b/drivers/tty/serial/mrst_max3110.h
index d1ef43af397c..d1ef43af397c 100644
--- a/drivers/serial/mrst_max3110.h
+++ b/drivers/tty/serial/mrst_max3110.h
diff --git a/drivers/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 8e43a7b69e64..8e43a7b69e64 100644
--- a/drivers/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
diff --git a/drivers/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index f6ca9ca79e98..f6ca9ca79e98 100644
--- a/drivers/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
diff --git a/drivers/serial/mux.c b/drivers/tty/serial/mux.c
index 9711e06a8374..9711e06a8374 100644
--- a/drivers/serial/mux.c
+++ b/drivers/tty/serial/mux.c
diff --git a/drivers/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
index 7735c9f35fa0..7735c9f35fa0 100644
--- a/drivers/serial/netx-serial.c
+++ b/drivers/tty/serial/netx-serial.c
diff --git a/drivers/serial/nwpserial.c b/drivers/tty/serial/nwpserial.c
index de173671e3d0..de173671e3d0 100644
--- a/drivers/serial/nwpserial.c
+++ b/drivers/tty/serial/nwpserial.c
diff --git a/drivers/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 17849dcb9adc..5c7abe4c94dd 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -15,6 +15,7 @@
#include <linux/serial_core.h>
#include <linux/serial_8250.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/nwpserial.h>
diff --git a/drivers/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 7f2f01058789..7f2f01058789 100644
--- a/drivers/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
diff --git a/drivers/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 70a61458ec42..70a61458ec42 100644
--- a/drivers/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
diff --git a/drivers/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 5b9cde79e4ea..5b9cde79e4ea 100644
--- a/drivers/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
diff --git a/drivers/serial/pmac_zilog.h b/drivers/tty/serial/pmac_zilog.h
index cbc34fbb1b20..cbc34fbb1b20 100644
--- a/drivers/serial/pmac_zilog.h
+++ b/drivers/tty/serial/pmac_zilog.h
diff --git a/drivers/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
index 0aa75a97531c..0aa75a97531c 100644
--- a/drivers/serial/pnx8xxx_uart.c
+++ b/drivers/tty/serial/pnx8xxx_uart.c
diff --git a/drivers/serial/pxa.c b/drivers/tty/serial/pxa.c
index 1102a39b44f5..1102a39b44f5 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
diff --git a/drivers/serial/s3c2400.c b/drivers/tty/serial/s3c2400.c
index fed1a9a1ffb4..fed1a9a1ffb4 100644
--- a/drivers/serial/s3c2400.c
+++ b/drivers/tty/serial/s3c2400.c
diff --git a/drivers/serial/s3c2410.c b/drivers/tty/serial/s3c2410.c
index 73f089d3efd6..73f089d3efd6 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/tty/serial/s3c2410.c
diff --git a/drivers/serial/s3c2412.c b/drivers/tty/serial/s3c2412.c
index 1700b1a2fb7e..1700b1a2fb7e 100644
--- a/drivers/serial/s3c2412.c
+++ b/drivers/tty/serial/s3c2412.c
diff --git a/drivers/serial/s3c2440.c b/drivers/tty/serial/s3c2440.c
index 094cc3904b13..094cc3904b13 100644
--- a/drivers/serial/s3c2440.c
+++ b/drivers/tty/serial/s3c2440.c
diff --git a/drivers/serial/s3c24a0.c b/drivers/tty/serial/s3c24a0.c
index fad6083ca427..fad6083ca427 100644
--- a/drivers/serial/s3c24a0.c
+++ b/drivers/tty/serial/s3c24a0.c
diff --git a/drivers/serial/s3c6400.c b/drivers/tty/serial/s3c6400.c
index 4be92ab50058..4be92ab50058 100644
--- a/drivers/serial/s3c6400.c
+++ b/drivers/tty/serial/s3c6400.c
diff --git a/drivers/serial/s5pv210.c b/drivers/tty/serial/s5pv210.c
index 6ebccd70a707..6ebccd70a707 100644
--- a/drivers/serial/s5pv210.c
+++ b/drivers/tty/serial/s5pv210.c
diff --git a/drivers/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 2199d819a987..2199d819a987 100644
--- a/drivers/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
diff --git a/drivers/serial/samsung.c b/drivers/tty/serial/samsung.c
index 7ac2bf5167cd..2335edafe903 100644
--- a/drivers/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -883,10 +883,10 @@ static struct uart_ops s3c24xx_serial_ops = {
static struct uart_driver s3c24xx_uart_drv = {
.owner = THIS_MODULE,
- .dev_name = "s3c2410_serial",
+ .driver_name = "s3c2410_serial",
.nr = CONFIG_SERIAL_SAMSUNG_UARTS,
.cons = S3C24XX_SERIAL_CONSOLE,
- .driver_name = S3C24XX_SERIAL_NAME,
+ .dev_name = S3C24XX_SERIAL_NAME,
.major = S3C24XX_SERIAL_MAJOR,
.minor = S3C24XX_SERIAL_MINOR,
};
diff --git a/drivers/serial/samsung.h b/drivers/tty/serial/samsung.h
index 0ac06a07d25f..0ac06a07d25f 100644
--- a/drivers/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
diff --git a/drivers/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index a2f2b3254499..602d9845c52f 100644
--- a/drivers/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -829,7 +829,7 @@ static void __init sbd_probe_duarts(void)
#ifdef CONFIG_SERIAL_SB1250_DUART_CONSOLE
/*
* Serial console stuff. Very basic, polling driver for doing serial
- * console output. The console_sem is held by the caller, so we
+ * console output. The console_lock is held by the caller, so we
* shouldn't be interrupted for more console activity.
*/
static void sbd_console_putchar(struct uart_port *uport, int ch)
diff --git a/drivers/serial/sc26xx.c b/drivers/tty/serial/sc26xx.c
index 75038ad2b242..75038ad2b242 100644
--- a/drivers/serial/sc26xx.c
+++ b/drivers/tty/serial/sc26xx.c
diff --git a/drivers/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 460a72d91bb7..460a72d91bb7 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
diff --git a/drivers/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c
index 93760b2ea172..1ef4df9bf7e4 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/tty/serial/serial_cs.c
@@ -712,6 +712,7 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
diff --git a/drivers/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
index b1962025b1aa..b1962025b1aa 100644
--- a/drivers/serial/serial_ks8695.c
+++ b/drivers/tty/serial/serial_ks8695.c
diff --git a/drivers/serial/serial_lh7a40x.c b/drivers/tty/serial/serial_lh7a40x.c
index ea744707c4d6..ea744707c4d6 100644
--- a/drivers/serial/serial_lh7a40x.c
+++ b/drivers/tty/serial/serial_lh7a40x.c
diff --git a/drivers/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index c50e9fbbf743..c50e9fbbf743 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
diff --git a/drivers/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index c291b3add1d2..92c91c83edde 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -3,7 +3,7 @@
*
* SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
*
- * Copyright (C) 2002 - 2008 Paul Mundt
+ * Copyright (C) 2002 - 2011 Paul Mundt
* Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
*
* based off of the old drivers/char/sh-sci.c by:
@@ -81,14 +81,22 @@ struct sci_port {
struct timer_list break_timer;
int break_flag;
+ /* SCSCR initialization */
+ unsigned int scscr;
+
+ /* SCBRR calculation algo */
+ unsigned int scbrr_algo_id;
+
/* Interface clock */
struct clk *iclk;
/* Function clock */
struct clk *fclk;
struct list_head node;
+
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
+
#ifdef CONFIG_SERIAL_SH_SCI_DMA
struct device *dma_dev;
unsigned int slave_tx;
@@ -415,9 +423,9 @@ static void sci_transmit_chars(struct uart_port *port)
if (!(status & SCxSR_TDxE(port))) {
ctrl = sci_in(port, SCSCR);
if (uart_circ_empty(xmit))
- ctrl &= ~SCI_CTRL_FLAGS_TIE;
+ ctrl &= ~SCSCR_TIE;
else
- ctrl |= SCI_CTRL_FLAGS_TIE;
+ ctrl |= SCSCR_TIE;
sci_out(port, SCSCR, ctrl);
return;
}
@@ -459,7 +467,7 @@ static void sci_transmit_chars(struct uart_port *port)
sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
}
- ctrl |= SCI_CTRL_FLAGS_TIE;
+ ctrl |= SCSCR_TIE;
sci_out(port, SCSCR, ctrl);
}
}
@@ -708,7 +716,7 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
disable_irq_nosync(irq);
scr |= 0x4000;
} else {
- scr &= ~SCI_CTRL_FLAGS_RIE;
+ scr &= ~SCSCR_RIE;
}
sci_out(port, SCSCR, scr);
/* Clear current interrupt */
@@ -777,6 +785,18 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr)
return IRQ_HANDLED;
}
+static inline unsigned long port_rx_irq_mask(struct uart_port *port)
+{
+ /*
+ * Not all ports (such as SCIFA) will support REIE. Rather than
+ * special-casing the port type, we check the port initialization
+ * IRQ enable mask to see whether the IRQ is desired at all. If
+ * it's unset, it's logically inferred that there's no point in
+ * testing for it.
+ */
+ return SCSCR_RIE | (to_sci_port(port)->scscr & SCSCR_REIE);
+}
+
static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
{
unsigned short ssr_status, scr_status, err_enabled;
@@ -786,22 +806,25 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
ssr_status = sci_in(port, SCxSR);
scr_status = sci_in(port, SCSCR);
- err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE);
+ err_enabled = scr_status & port_rx_irq_mask(port);
/* Tx Interrupt */
- if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE) &&
+ if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) &&
!s->chan_tx)
ret = sci_tx_interrupt(irq, ptr);
+
/*
* Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
* DR flags
*/
if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
- (scr_status & SCI_CTRL_FLAGS_RIE))
+ (scr_status & SCSCR_RIE))
ret = sci_rx_interrupt(irq, ptr);
+
/* Error Interrupt */
if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
ret = sci_er_interrupt(irq, ptr);
+
/* Break Interrupt */
if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
ret = sci_br_interrupt(irq, ptr);
@@ -951,7 +974,7 @@ static void sci_dma_tx_complete(void *arg)
schedule_work(&s->work_tx);
} else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
u16 ctrl = sci_in(port, SCSCR);
- sci_out(port, SCSCR, ctrl & ~SCI_CTRL_FLAGS_TIE);
+ sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
}
spin_unlock_irqrestore(&port->lock, flags);
@@ -1214,14 +1237,16 @@ static void sci_start_tx(struct uart_port *port)
if (new != scr)
sci_out(port, SCSCR, new);
}
+
if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
s->cookie_tx < 0)
schedule_work(&s->work_tx);
#endif
+
if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
/* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
ctrl = sci_in(port, SCSCR);
- sci_out(port, SCSCR, ctrl | SCI_CTRL_FLAGS_TIE);
+ sci_out(port, SCSCR, ctrl | SCSCR_TIE);
}
}
@@ -1231,20 +1256,24 @@ static void sci_stop_tx(struct uart_port *port)
/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
ctrl = sci_in(port, SCSCR);
+
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~0x8000;
- ctrl &= ~SCI_CTRL_FLAGS_TIE;
+
+ ctrl &= ~SCSCR_TIE;
+
sci_out(port, SCSCR, ctrl);
}
static void sci_start_rx(struct uart_port *port)
{
- unsigned short ctrl = SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE;
+ unsigned short ctrl;
+
+ ctrl = sci_in(port, SCSCR) | port_rx_irq_mask(port);
- /* Set RIE (Receive Interrupt Enable) bit in SCSCR */
- ctrl |= sci_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~0x4000;
+
sci_out(port, SCSCR, ctrl);
}
@@ -1252,11 +1281,13 @@ static void sci_stop_rx(struct uart_port *port)
{
unsigned short ctrl;
- /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */
ctrl = sci_in(port, SCSCR);
+
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~0x4000;
- ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE);
+
+ ctrl &= ~port_rx_irq_mask(port);
+
sci_out(port, SCSCR, ctrl);
}
@@ -1296,7 +1327,7 @@ static void rx_timer_fn(unsigned long arg)
scr &= ~0x4000;
enable_irq(s->irqs[1]);
}
- sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE);
+ sci_out(port, SCSCR, scr | SCSCR_RIE);
dev_dbg(port->dev, "DMA Rx timed out\n");
schedule_work(&s->work_rx);
}
@@ -1442,12 +1473,31 @@ static void sci_shutdown(struct uart_port *port)
s->disable(port);
}
+static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
+ unsigned long freq)
+{
+ switch (algo_id) {
+ case SCBRR_ALGO_1:
+ return ((freq + 16 * bps) / (16 * bps) - 1);
+ case SCBRR_ALGO_2:
+ return ((freq + 16 * bps) / (32 * bps) - 1);
+ case SCBRR_ALGO_3:
+ return (((freq * 2) + 16 * bps) / (16 * bps) - 1);
+ case SCBRR_ALGO_4:
+ return (((freq * 2) + 16 * bps) / (32 * bps) - 1);
+ case SCBRR_ALGO_5:
+ return (((freq * 1000 / 32) / bps) - 1);
+ }
+
+ /* Warn, but use a safe default */
+ WARN_ON(1);
+ return ((freq + 16 * bps) / (32 * bps) - 1);
+}
+
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
-#ifdef CONFIG_SERIAL_SH_SCI_DMA
struct sci_port *s = to_sci_port(port);
-#endif
unsigned int status, baud, smr_val, max_baud;
int t = -1;
u16 scfcr = 0;
@@ -1464,7 +1514,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
if (likely(baud && port->uartclk))
- t = SCBRR_VALUE(baud, port->uartclk);
+ t = sci_scbrr_calc(s->scbrr_algo_id, baud, port->uartclk);
do {
status = sci_in(port, SCxSR);
@@ -1490,7 +1540,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
sci_out(port, SCSMR, smr_val);
dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t,
- SCSCR_INIT(port));
+ s->scscr);
if (t > 0) {
if (t >= 256) {
@@ -1506,7 +1556,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
sci_init_pins(port, termios->c_cflag);
sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
- sci_out(port, SCSCR, SCSCR_INIT(port));
+ sci_out(port, SCSCR, s->scscr);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
/*
@@ -1679,9 +1729,11 @@ static int __devinit sci_init_single(struct platform_device *dev,
port->mapbase = p->mapbase;
port->membase = p->membase;
- port->irq = p->irqs[SCIx_TXI_IRQ];
- port->flags = p->flags;
- sci_port->type = port->type = p->type;
+ port->irq = p->irqs[SCIx_TXI_IRQ];
+ port->flags = p->flags;
+ sci_port->type = port->type = p->type;
+ sci_port->scscr = p->scscr;
+ sci_port->scbrr_algo_id = p->scbrr_algo_id;
#ifdef CONFIG_SERIAL_SH_SCI_DMA
sci_port->dma_dev = p->dma_dev;
diff --git a/drivers/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index 4bc614e4221c..b223d6cbf33a 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -15,27 +15,17 @@
defined(CONFIG_CPU_SUBTYPE_SH7709)
# define SCPCR 0xA4000116 /* 16 bit SCI and SCIF */
# define SCPDR 0xA4000136 /* 8 bit SCI and SCIF */
-# define SCSCR_INIT(port) 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */
#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
# define SCIF0 0xA4400000
# define SCIF2 0xA4410000
-# define SCSMR_Ir 0xA44A0000
-# define IRDA_SCIF SCIF0
# define SCPCR 0xA4000116
# define SCPDR 0xA4000136
-
-/* Set the clock source,
- * SCIF2 (0xA4410000) -> External clock, SCK pin used as clock input
- * SCIF0 (0xA4400000) -> Internal clock, SCK pin as serial clock output
- */
-# define SCSCR_INIT(port) (port->mapbase == SCIF2) ? 0xF3 : 0xF0
#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \
defined(CONFIG_CPU_SUBTYPE_SH7721) || \
defined(CONFIG_ARCH_SH73A0) || \
defined(CONFIG_ARCH_SH7367) || \
defined(CONFIG_ARCH_SH7377) || \
defined(CONFIG_ARCH_SH7372)
-# define SCSCR_INIT(port) 0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */
# define PORT_PTCR 0xA405011EUL
# define PORT_PVCR 0xA4050122UL
# define SCIF_ORER 0x0200 /* overrun error bit */
@@ -43,7 +33,6 @@
# define SCSPTR1 0xFFE0001C /* 8 bit SCIF */
# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
-# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
@@ -53,39 +42,31 @@
# define SCSPTR1 0xffe0001c /* 8 bit SCI */
# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
-# define SCSCR_INIT(port) (((port)->type == PORT_SCI) ? \
- 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ : \
- 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ )
#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
# define SCSPTR0 0xfe600024 /* 16 bit SCIF */
# define SCSPTR1 0xfe610024 /* 16 bit SCIF */
# define SCSPTR2 0xfe620024 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
-# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
# define SCSPTR0 0xA4400000 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
# define PACR 0xa4050100
# define PBCR 0xa4050102
-# define SCSCR_INIT(port) 0x3B
#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
# define SCSPTR0 0xffe00010 /* 16 bit SCIF */
# define SCSPTR1 0xffe10010 /* 16 bit SCIF */
# define SCSPTR2 0xffe20010 /* 16 bit SCIF */
# define SCSPTR3 0xffe30010 /* 16 bit SCIF */
-# define SCSCR_INIT(port) 0x32 /* TIE=0,RIE=0,TE=1,RE=1,REIE=0,CKE=1 */
#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
# define PADR 0xA4050120
# define PSDR 0xA405013e
# define PWDR 0xA4050166
# define PSCR 0xA405011E
# define SCIF_ORER 0x0001 /* overrun error bit */
-# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */
# define SCSPTR0 SCPDR0
# define SCIF_ORER 0x0001 /* overrun error bit */
-# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
# define SCSPTR0 0xa4050160
# define SCSPTR1 0xa405013e
@@ -94,62 +75,38 @@
# define SCSPTR4 0xa4050128
# define SCSPTR5 0xa4050128
# define SCIF_ORER 0x0001 /* overrun error bit */
-# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
# define SCIF_ORER 0x0001 /* overrun error bit */
-# define SCSCR_INIT(port) ((port)->type == PORT_SCIFA ? \
- 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ : \
- 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ )
#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
-# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
-# define SCIF_BASE_ADDR 0x01030000
-# define SCIF_ADDR_SH5 PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR
# define SCIF_PTR2_OFFS 0x0000020
-# define SCIF_LSR2_OFFS 0x0000024
# define SCSPTR2 ((port->mapbase)+SCIF_PTR2_OFFS) /* 16 bit SCIF */
-# define SCLSR2 ((port->mapbase)+SCIF_LSR2_OFFS) /* 16 bit SCIF */
-# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0, TE=1,RE=1,REIE=1 */
#elif defined(CONFIG_H83007) || defined(CONFIG_H83068)
-# define SCSCR_INIT(port) 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */
# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
#elif defined(CONFIG_H8S2678)
-# define SCSCR_INIT(port) 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */
# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
# define SCSPTR0 0xfe4b0020
# define SCSPTR1 0xfe4b0020
# define SCSPTR2 0xfe4b0020
# define SCIF_ORER 0x0001
-# define SCSCR_INIT(port) 0x38
# define SCIF_ONLY
#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
# define SCSPTR1 0xffe08024 /* 16 bit SCIF */
# define SCSPTR2 0xffe10020 /* 16 bit SCIF/IRDA */
# define SCIF_ORER 0x0001 /* overrun error bit */
-# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
#elif defined(CONFIG_CPU_SUBTYPE_SH7770)
# define SCSPTR0 0xff923020 /* 16 bit SCIF */
# define SCSPTR1 0xff924020 /* 16 bit SCIF */
# define SCSPTR2 0xff925020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
-# define SCSCR_INIT(port) 0x3c /* TIE=0,RIE=0,TE=1,RE=1,REIE=1,cke=2 */
#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
# define SCSPTR1 0xffe10024 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* Overrun error bit */
-
-#if defined(CONFIG_SH_SH2007)
-/* TIE=0,RIE=0,TE=1,RE=1,REIE=1,CKE1=0 */
-# define SCSCR_INIT(port) 0x38
-#else
-/* TIE=0,RIE=0,TE=1,RE=1,REIE=1,CKE1=1 */
-# define SCSCR_INIT(port) 0x3a
-#endif
-
#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
defined(CONFIG_CPU_SUBTYPE_SH7786)
# define SCSPTR0 0xffea0024 /* 16 bit SCIF */
@@ -159,7 +116,6 @@
# define SCSPTR4 0xffee0024 /* 16 bit SCIF */
# define SCSPTR5 0xffef0024 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* Overrun error bit */
-# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
defined(CONFIG_CPU_SUBTYPE_SH7203) || \
defined(CONFIG_CPU_SUBTYPE_SH7206) || \
@@ -174,52 +130,21 @@
# define SCSPTR6 0xfffeB020 /* 16 bit SCIF */
# define SCSPTR7 0xfffeB820 /* 16 bit SCIF */
# endif
-# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
# define SCSPTR0 0xf8400020 /* 16 bit SCIF */
# define SCSPTR1 0xf8410020 /* 16 bit SCIF */
# define SCSPTR2 0xf8420020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
-# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
# define SCSPTR0 0xffc30020 /* 16 bit SCIF */
# define SCSPTR1 0xffc40020 /* 16 bit SCIF */
# define SCSPTR2 0xffc50020 /* 16 bit SCIF */
# define SCSPTR3 0xffc60020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* Overrun error bit */
-# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
#else
# error CPU subtype not defined
#endif
-/* SCSCR */
-#define SCI_CTRL_FLAGS_TIE 0x80 /* all */
-#define SCI_CTRL_FLAGS_RIE 0x40 /* all */
-#define SCI_CTRL_FLAGS_TE 0x20 /* all */
-#define SCI_CTRL_FLAGS_RE 0x10 /* all */
-#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
- defined(CONFIG_CPU_SUBTYPE_SH7091) || \
- defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
- defined(CONFIG_CPU_SUBTYPE_SH7722) || \
- defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
- defined(CONFIG_CPU_SUBTYPE_SH7751) || \
- defined(CONFIG_CPU_SUBTYPE_SH7751R) || \
- defined(CONFIG_CPU_SUBTYPE_SH7763) || \
- defined(CONFIG_CPU_SUBTYPE_SH7780) || \
- defined(CONFIG_CPU_SUBTYPE_SH7785) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786) || \
- defined(CONFIG_CPU_SUBTYPE_SHX3)
-#define SCI_CTRL_FLAGS_REIE 0x08 /* 7750 SCIF */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
-#define SCI_CTRL_FLAGS_REIE ((port)->type == PORT_SCIFA ? 0 : 8)
-#else
-#define SCI_CTRL_FLAGS_REIE 0
-#endif
-/* SCI_CTRL_FLAGS_MPIE 0x08 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-/* SCI_CTRL_FLAGS_TEIE 0x04 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-/* SCI_CTRL_FLAGS_CKE1 0x02 * all */
-/* SCI_CTRL_FLAGS_CKE0 0x01 * 7707 SCI/SCIF, 7708 SCI, 7709 SCI/SCIF, 7750 SCI */
-
/* SCxSR SCI */
#define SCI_TDRE 0x80 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
#define SCI_RDRF 0x40 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
@@ -300,23 +225,11 @@
/* SCFCR */
#define SCFCR_RFRST 0x0002
#define SCFCR_TFRST 0x0004
-#define SCFCR_TCRST 0x4000
#define SCFCR_MCE 0x0008
#define SCI_MAJOR 204
#define SCI_MINOR_START 8
-/* Generic serial flags */
-#define SCI_RX_THROTTLE 0x0000001
-
-#define SCI_MAGIC 0xbabeface
-
-/*
- * Events are used to schedule things to happen at timer-interrupt
- * time, instead of at rs interrupt time.
- */
-#define SCI_EVENT_WRITE_WAKEUP 0
-
#define SCI_IN(size, offset) \
if ((size) == 8) { \
return ioread8(port->membase + (offset)); \
@@ -445,8 +358,6 @@
SCIF_FNS(SCSMR, 0x00, 16)
SCIF_FNS(SCBRR, 0x04, 8)
SCIF_FNS(SCSCR, 0x08, 16)
-SCIF_FNS(SCTDSR, 0x0c, 8)
-SCIF_FNS(SCFER, 0x10, 16)
SCIF_FNS(SCxSR, 0x14, 16)
SCIF_FNS(SCFCR, 0x18, 16)
SCIF_FNS(SCFDR, 0x1c, 16)
@@ -476,8 +387,6 @@ SCIx_FNS(SCxTDR, 0x20, 8, 0x0c, 8)
SCIx_FNS(SCxSR, 0x14, 16, 0x10, 16)
SCIx_FNS(SCxRDR, 0x24, 8, 0x14, 8)
SCIx_FNS(SCSPTR, 0, 0, 0, 0)
-SCIF_FNS(SCTDSR, 0x0c, 8)
-SCIF_FNS(SCFER, 0x10, 16)
SCIF_FNS(SCFCR, 0x18, 16)
SCIF_FNS(SCFDR, 0x1c, 16)
SCIF_FNS(SCLSR, 0x24, 16)
@@ -503,7 +412,6 @@ SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
SCIF_FNS(SCFDR, 0, 0, 0x1C, 16)
SCIF_FNS(SCSPTR2, 0, 0, 0x20, 16)
-SCIF_FNS(SCLSR2, 0, 0, 0x24, 16)
SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
@@ -597,64 +505,3 @@ static inline int sci_rxd_in(struct uart_port *port)
return 1;
}
#endif
-
-/*
- * Values for the BitRate Register (SCBRR)
- *
- * The values are actually divisors for a frequency which can
- * be internal to the SH3 (14.7456MHz) or derived from an external
- * clock source. This driver assumes the internal clock is used;
- * to support using an external clock source, config options or
- * possibly command-line options would need to be added.
- *
- * Also, to support speeds below 2400 (why?) the lower 2 bits of
- * the SCSMR register would also need to be set to non-zero values.
- *
- * -- Greg Banks 27Feb2000
- *
- * Answer: The SCBRR register is only eight bits, and the value in
- * it gets larger with lower baud rates. At around 2400 (depending on
- * the peripherial module clock) you run out of bits. However the
- * lower two bits of SCSMR allow the module clock to be divided down,
- * scaling the value which is needed in SCBRR.
- *
- * -- Stuart Menefy - 23 May 2000
- *
- * I meant, why would anyone bother with bitrates below 2400.
- *
- * -- Greg Banks - 7Jul2000
- *
- * You "speedist"! How will I use my 110bps ASR-33 teletype with paper
- * tape reader as a console!
- *
- * -- Mitch Davis - 15 Jul 2000
- */
-
-#if (defined(CONFIG_CPU_SUBTYPE_SH7780) || \
- defined(CONFIG_CPU_SUBTYPE_SH7785) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786)) && \
- !defined(CONFIG_SH_SH2007)
-#define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(16*bps)-1)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
- defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_ARCH_SH73A0) || \
- defined(CONFIG_ARCH_SH7367) || \
- defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372)
-#define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
- defined(CONFIG_CPU_SUBTYPE_SH7724)
-static inline int scbrr_calc(struct uart_port *port, int bps, int clk)
-{
- if (port->type == PORT_SCIF)
- return (clk+16*bps)/(32*bps)-1;
- else
- return ((clk*2)+16*bps)/(16*bps)-1;
-}
-#define SCBRR_VALUE(bps, clk) scbrr_calc(port, bps, clk)
-#elif defined(__H8300H__) || defined(__H8300S__)
-#define SCBRR_VALUE(bps, clk) (((clk*1000/32)/bps)-1)
-#else /* Generic SH */
-#define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(32*bps)-1)
-#endif
diff --git a/drivers/serial/sn_console.c b/drivers/tty/serial/sn_console.c
index cff9a306660f..cff9a306660f 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/tty/serial/sn_console.c
diff --git a/drivers/serial/suncore.c b/drivers/tty/serial/suncore.c
index 6381a0282ee7..6381a0282ee7 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/tty/serial/suncore.c
diff --git a/drivers/serial/suncore.h b/drivers/tty/serial/suncore.h
index db2057936c31..db2057936c31 100644
--- a/drivers/serial/suncore.h
+++ b/drivers/tty/serial/suncore.h
diff --git a/drivers/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index c9014868297d..c9014868297d 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
diff --git a/drivers/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 5b246b18f42f..5b246b18f42f 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
diff --git a/drivers/serial/sunsab.h b/drivers/tty/serial/sunsab.h
index b78e1f7b8050..b78e1f7b8050 100644
--- a/drivers/serial/sunsab.h
+++ b/drivers/tty/serial/sunsab.h
diff --git a/drivers/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 551ebfe3ccbb..551ebfe3ccbb 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
diff --git a/drivers/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index c1967ac1c07f..c1967ac1c07f 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
diff --git a/drivers/serial/sunzilog.h b/drivers/tty/serial/sunzilog.h
index 5dec7b47cc38..5dec7b47cc38 100644
--- a/drivers/serial/sunzilog.h
+++ b/drivers/tty/serial/sunzilog.h
diff --git a/drivers/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index 1f36b7eb7351..1f36b7eb7351 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
diff --git a/drivers/serial/timbuart.h b/drivers/tty/serial/timbuart.h
index 7e566766bc43..7e566766bc43 100644
--- a/drivers/serial/timbuart.h
+++ b/drivers/tty/serial/timbuart.h
diff --git a/drivers/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index d2fce865b731..d2fce865b731 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
diff --git a/drivers/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 3f4848e2174a..3f4848e2174a 100644
--- a/drivers/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
diff --git a/drivers/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index 3beb6ab4fa68..3beb6ab4fa68 100644
--- a/drivers/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
diff --git a/drivers/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 322bf56c0d89..322bf56c0d89 100644
--- a/drivers/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
diff --git a/drivers/serial/zs.c b/drivers/tty/serial/zs.c
index 1a7fd3e70315..1a7fd3e70315 100644
--- a/drivers/serial/zs.c
+++ b/drivers/tty/serial/zs.c
diff --git a/drivers/serial/zs.h b/drivers/tty/serial/zs.h
index aa921b57d827..aa921b57d827 100644
--- a/drivers/serial/zs.h
+++ b/drivers/tty/serial/zs.h
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index c556ed9db13d..81f13958e751 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -46,7 +46,7 @@
#include <asm/irq_regs.h>
/* Whether we react on sysrq keys or just ignore them */
-static int __read_mostly sysrq_enabled = 1;
+static int __read_mostly sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
static bool __read_mostly sysrq_always_enabled;
static bool sysrq_on(void)
@@ -571,6 +571,7 @@ struct sysrq_state {
unsigned int alt_use;
bool active;
bool need_reinject;
+ bool reinjecting;
};
static void sysrq_reinject_alt_sysrq(struct work_struct *work)
@@ -581,6 +582,10 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work)
unsigned int alt_code = sysrq->alt_use;
if (sysrq->need_reinject) {
+ /* we do not want the assignment to be reordered */
+ sysrq->reinjecting = true;
+ mb();
+
/* Simulate press and release of Alt + SysRq */
input_inject_event(handle, EV_KEY, alt_code, 1);
input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1);
@@ -589,6 +594,9 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work)
input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0);
input_inject_event(handle, EV_KEY, alt_code, 0);
input_inject_event(handle, EV_SYN, SYN_REPORT, 1);
+
+ mb();
+ sysrq->reinjecting = false;
}
}
@@ -599,6 +607,13 @@ static bool sysrq_filter(struct input_handle *handle,
bool was_active = sysrq->active;
bool suppress;
+ /*
+ * Do not filter anything if we are in the process of re-injecting
+ * Alt+SysRq combination.
+ */
+ if (sysrq->reinjecting)
+ return false;
+
switch (type) {
case EV_SYN:
@@ -629,7 +644,7 @@ static bool sysrq_filter(struct input_handle *handle,
sysrq->alt_use = sysrq->alt;
/*
* If nothing else will be pressed we'll need
- * to * re-inject Alt-SysRq keysroke.
+ * to re-inject Alt-SysRq keysroke.
*/
sysrq->need_reinject = true;
}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 464d09d97873..0065da4b11c1 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -3256,8 +3256,8 @@ static ssize_t show_cons_active(struct device *dev,
struct console *c;
ssize_t count = 0;
- acquire_console_sem();
- for (c = console_drivers; c; c = c->next) {
+ console_lock();
+ for_each_console(c) {
if (!c->device)
continue;
if (!c->write)
@@ -3271,7 +3271,7 @@ static ssize_t show_cons_active(struct device *dev,
while (i--)
count += sprintf(buf + count, "%s%d%c",
cs[i]->name, cs[i]->index, i ? ' ':'\n');
- release_console_sem();
+ console_unlock();
return count;
}
@@ -3306,7 +3306,7 @@ int __init tty_init(void)
if (IS_ERR(consdev))
consdev = NULL;
else
- device_create_file(consdev, &dev_attr_active);
+ WARN_ON(device_create_file(consdev, &dev_attr_active) < 0);
#ifdef CONFIG_VT
vty_init(&console_fops);
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index ebae344ce910..c956ed6c83a3 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -316,9 +316,9 @@ int paste_selection(struct tty_struct *tty)
/* always called with BTM from vt_ioctl */
WARN_ON(!tty_locked());
- acquire_console_sem();
+ console_lock();
poke_blanked_console();
- release_console_sem();
+ console_unlock();
ld = tty_ldisc_ref(tty);
if (!ld) {
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index eab3a1ff99e4..a672ed192d33 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -202,7 +202,7 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
/* Select the proper current console and verify
* sanity of the situation under the console lock.
*/
- acquire_console_sem();
+ console_lock();
attr = (currcons & 128);
currcons = (currcons & 127);
@@ -336,9 +336,9 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
* the pagefault handling code may want to call printk().
*/
- release_console_sem();
+ console_unlock();
ret = copy_to_user(buf, con_buf_start, orig_count);
- acquire_console_sem();
+ console_lock();
if (ret) {
read += (orig_count - ret);
@@ -354,7 +354,7 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
if (read)
ret = read;
unlock_out:
- release_console_sem();
+ console_unlock();
mutex_unlock(&con_buf_mtx);
return ret;
}
@@ -379,7 +379,7 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
/* Select the proper current console and verify
* sanity of the situation under the console lock.
*/
- acquire_console_sem();
+ console_lock();
attr = (currcons & 128);
currcons = (currcons & 127);
@@ -414,9 +414,9 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
/* Temporarily drop the console lock so that we can read
* in the write data from userspace safely.
*/
- release_console_sem();
+ console_unlock();
ret = copy_from_user(con_buf, buf, this_round);
- acquire_console_sem();
+ console_lock();
if (ret) {
this_round -= ret;
@@ -542,7 +542,7 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
vcs_scr_updated(vc);
unlock_out:
- release_console_sem();
+ console_unlock();
mutex_unlock(&con_buf_mtx);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 76407eca9ab0..147ede3423df 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1003,9 +1003,9 @@ static int vt_resize(struct tty_struct *tty, struct winsize *ws)
struct vc_data *vc = tty->driver_data;
int ret;
- acquire_console_sem();
+ console_lock();
ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row);
- release_console_sem();
+ console_unlock();
return ret;
}
@@ -1271,7 +1271,7 @@ static void default_attr(struct vc_data *vc)
vc->vc_color = vc->vc_def_color;
}
-/* console_sem is held */
+/* console_lock is held */
static void csi_m(struct vc_data *vc)
{
int i;
@@ -1415,7 +1415,7 @@ int mouse_reporting(void)
return vc_cons[fg_console].d->vc_report_mouse;
}
-/* console_sem is held */
+/* console_lock is held */
static void set_mode(struct vc_data *vc, int on_off)
{
int i;
@@ -1485,7 +1485,7 @@ static void set_mode(struct vc_data *vc, int on_off)
}
}
-/* console_sem is held */
+/* console_lock is held */
static void setterm_command(struct vc_data *vc)
{
switch(vc->vc_par[0]) {
@@ -1545,7 +1545,7 @@ static void setterm_command(struct vc_data *vc)
}
}
-/* console_sem is held */
+/* console_lock is held */
static void csi_at(struct vc_data *vc, unsigned int nr)
{
if (nr > vc->vc_cols - vc->vc_x)
@@ -1555,7 +1555,7 @@ static void csi_at(struct vc_data *vc, unsigned int nr)
insert_char(vc, nr);
}
-/* console_sem is held */
+/* console_lock is held */
static void csi_L(struct vc_data *vc, unsigned int nr)
{
if (nr > vc->vc_rows - vc->vc_y)
@@ -1566,7 +1566,7 @@ static void csi_L(struct vc_data *vc, unsigned int nr)
vc->vc_need_wrap = 0;
}
-/* console_sem is held */
+/* console_lock is held */
static void csi_P(struct vc_data *vc, unsigned int nr)
{
if (nr > vc->vc_cols - vc->vc_x)
@@ -1576,7 +1576,7 @@ static void csi_P(struct vc_data *vc, unsigned int nr)
delete_char(vc, nr);
}
-/* console_sem is held */
+/* console_lock is held */
static void csi_M(struct vc_data *vc, unsigned int nr)
{
if (nr > vc->vc_rows - vc->vc_y)
@@ -1587,7 +1587,7 @@ static void csi_M(struct vc_data *vc, unsigned int nr)
vc->vc_need_wrap = 0;
}
-/* console_sem is held (except via vc_init->reset_terminal */
+/* console_lock is held (except via vc_init->reset_terminal */
static void save_cur(struct vc_data *vc)
{
vc->vc_saved_x = vc->vc_x;
@@ -1603,7 +1603,7 @@ static void save_cur(struct vc_data *vc)
vc->vc_saved_G1 = vc->vc_G1_charset;
}
-/* console_sem is held */
+/* console_lock is held */
static void restore_cur(struct vc_data *vc)
{
gotoxy(vc, vc->vc_saved_x, vc->vc_saved_y);
@@ -1625,7 +1625,7 @@ enum { ESnormal, ESesc, ESsquare, ESgetpars, ESgotpars, ESfunckey,
EShash, ESsetG0, ESsetG1, ESpercent, ESignore, ESnonstd,
ESpalette };
-/* console_sem is held (except via vc_init()) */
+/* console_lock is held (except via vc_init()) */
static void reset_terminal(struct vc_data *vc, int do_clear)
{
vc->vc_top = 0;
@@ -1685,7 +1685,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
csi_J(vc, 2);
}
-/* console_sem is held */
+/* console_lock is held */
static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
{
/*
@@ -2119,7 +2119,7 @@ static int is_double_width(uint32_t ucs)
return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
}
-/* acquires console_sem */
+/* acquires console_lock */
static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
#ifdef VT_BUF_VRAM_ONLY
@@ -2147,11 +2147,11 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
might_sleep();
- acquire_console_sem();
+ console_lock();
vc = tty->driver_data;
if (vc == NULL) {
printk(KERN_ERR "vt: argh, driver_data is NULL !\n");
- release_console_sem();
+ console_unlock();
return 0;
}
@@ -2159,7 +2159,7 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
if (!vc_cons_allocated(currcons)) {
/* could this happen? */
printk_once("con_write: tty %d not allocated\n", currcons+1);
- release_console_sem();
+ console_unlock();
return 0;
}
@@ -2375,7 +2375,7 @@ rescan_last_byte:
}
FLUSH
console_conditional_schedule();
- release_console_sem();
+ console_unlock();
notify_update(vc);
return n;
#undef FLUSH
@@ -2388,11 +2388,11 @@ rescan_last_byte:
* us to do the switches asynchronously (needed when we want
* to switch due to a keyboard interrupt). Synchronization
* with other console code and prevention of re-entrancy is
- * ensured with console_sem.
+ * ensured with console_lock.
*/
static void console_callback(struct work_struct *ignored)
{
- acquire_console_sem();
+ console_lock();
if (want_console >= 0) {
if (want_console != fg_console &&
@@ -2422,7 +2422,7 @@ static void console_callback(struct work_struct *ignored)
}
notify_update(vc_cons[fg_console].d);
- release_console_sem();
+ console_unlock();
}
int set_console(int nr)
@@ -2603,7 +2603,7 @@ static struct console vt_console_driver = {
*/
/*
- * Generally a bit racy with respect to console_sem().
+ * Generally a bit racy with respect to console_lock();.
*
* There are some functions which don't need it.
*
@@ -2629,17 +2629,17 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
switch (type)
{
case TIOCL_SETSEL:
- acquire_console_sem();
+ console_lock();
ret = set_selection((struct tiocl_selection __user *)(p+1), tty);
- release_console_sem();
+ console_unlock();
break;
case TIOCL_PASTESEL:
ret = paste_selection(tty);
break;
case TIOCL_UNBLANKSCREEN:
- acquire_console_sem();
+ console_lock();
unblank_screen();
- release_console_sem();
+ console_unlock();
break;
case TIOCL_SELLOADLUT:
ret = sel_loadlut(p);
@@ -2688,10 +2688,10 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
}
break;
case TIOCL_BLANKSCREEN: /* until explicitly unblanked, not only poked */
- acquire_console_sem();
+ console_lock();
ignore_poke = 1;
do_blank_screen(0);
- release_console_sem();
+ console_unlock();
break;
case TIOCL_BLANKEDSCREEN:
ret = console_blanked;
@@ -2790,11 +2790,11 @@ static void con_flush_chars(struct tty_struct *tty)
return;
/* if we race with con_close(), vt may be null */
- acquire_console_sem();
+ console_lock();
vc = tty->driver_data;
if (vc)
set_cursor(vc);
- release_console_sem();
+ console_unlock();
}
/*
@@ -2805,7 +2805,7 @@ static int con_open(struct tty_struct *tty, struct file *filp)
unsigned int currcons = tty->index;
int ret = 0;
- acquire_console_sem();
+ console_lock();
if (tty->driver_data == NULL) {
ret = vc_allocate(currcons);
if (ret == 0) {
@@ -2813,7 +2813,7 @@ static int con_open(struct tty_struct *tty, struct file *filp)
/* Still being freed */
if (vc->port.tty) {
- release_console_sem();
+ console_unlock();
return -ERESTARTSYS;
}
tty->driver_data = vc;
@@ -2827,11 +2827,11 @@ static int con_open(struct tty_struct *tty, struct file *filp)
tty->termios->c_iflag |= IUTF8;
else
tty->termios->c_iflag &= ~IUTF8;
- release_console_sem();
+ console_unlock();
return ret;
}
}
- release_console_sem();
+ console_unlock();
return ret;
}
@@ -2844,9 +2844,9 @@ static void con_shutdown(struct tty_struct *tty)
{
struct vc_data *vc = tty->driver_data;
BUG_ON(vc == NULL);
- acquire_console_sem();
+ console_lock();
vc->port.tty = NULL;
- release_console_sem();
+ console_unlock();
tty_shutdown(tty);
}
@@ -2893,13 +2893,13 @@ static int __init con_init(void)
struct vc_data *vc;
unsigned int currcons = 0, i;
- acquire_console_sem();
+ console_lock();
if (conswitchp)
display_desc = conswitchp->con_startup();
if (!display_desc) {
fg_console = 0;
- release_console_sem();
+ console_unlock();
return 0;
}
@@ -2946,7 +2946,7 @@ static int __init con_init(void)
printable = 1;
printk("\n");
- release_console_sem();
+ console_unlock();
#ifdef CONFIG_VT_CONSOLE
register_console(&vt_console_driver);
@@ -2994,7 +2994,7 @@ int __init vty_init(const struct file_operations *console_fops)
if (IS_ERR(tty0dev))
tty0dev = NULL;
else
- device_create_file(tty0dev, &dev_attr_active);
+ WARN_ON(device_create_file(tty0dev, &dev_attr_active) < 0);
vcs_init();
@@ -3037,7 +3037,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
if (!try_module_get(owner))
return -ENODEV;
- acquire_console_sem();
+ console_lock();
/* check if driver is registered */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3122,7 +3122,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
retval = 0;
err:
- release_console_sem();
+ console_unlock();
module_put(owner);
return retval;
};
@@ -3171,7 +3171,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
if (!try_module_get(owner))
return -ENODEV;
- acquire_console_sem();
+ console_lock();
/* check if driver is registered and if it is unbindable */
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3185,7 +3185,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
}
if (retval) {
- release_console_sem();
+ console_unlock();
goto err;
}
@@ -3204,12 +3204,12 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
}
if (retval) {
- release_console_sem();
+ console_unlock();
goto err;
}
if (!con_is_bound(csw)) {
- release_console_sem();
+ console_unlock();
goto err;
}
@@ -3238,7 +3238,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
if (!con_is_bound(csw))
con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
- release_console_sem();
+ console_unlock();
/* ignore return value, binding should not fail */
bind_con_driver(defcsw, first, last, deflt);
err:
@@ -3538,14 +3538,14 @@ int register_con_driver(const struct consw *csw, int first, int last)
if (!try_module_get(owner))
return -ENODEV;
- acquire_console_sem();
+ console_lock();
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
con_driver = &registered_con_driver[i];
/* already registered */
if (con_driver->con == csw)
- retval = -EINVAL;
+ retval = -EBUSY;
}
if (retval)
@@ -3592,7 +3592,7 @@ int register_con_driver(const struct consw *csw, int first, int last)
}
err:
- release_console_sem();
+ console_unlock();
module_put(owner);
return retval;
}
@@ -3613,7 +3613,7 @@ int unregister_con_driver(const struct consw *csw)
{
int i, retval = -ENODEV;
- acquire_console_sem();
+ console_lock();
/* cannot unregister a bound driver */
if (con_is_bound(csw))
@@ -3639,7 +3639,7 @@ int unregister_con_driver(const struct consw *csw)
}
}
err:
- release_console_sem();
+ console_unlock();
return retval;
}
EXPORT_SYMBOL(unregister_con_driver);
@@ -3656,7 +3656,12 @@ int take_over_console(const struct consw *csw, int first, int last, int deflt)
int err;
err = register_con_driver(csw, first, last);
-
+ /* if we get an busy error we still want to bind the console driver
+ * and return success, as we may have unbound the console driver
+  * but not unregistered it.
+ */
+ if (err == -EBUSY)
+ err = 0;
if (!err)
bind_con_driver(csw, first, last, deflt);
@@ -3934,9 +3939,9 @@ int con_set_cmap(unsigned char __user *arg)
{
int rc;
- acquire_console_sem();
+ console_lock();
rc = set_get_cmap (arg,1);
- release_console_sem();
+ console_unlock();
return rc;
}
@@ -3945,9 +3950,9 @@ int con_get_cmap(unsigned char __user *arg)
{
int rc;
- acquire_console_sem();
+ console_lock();
rc = set_get_cmap (arg,0);
- release_console_sem();
+ console_unlock();
return rc;
}
@@ -3994,12 +3999,12 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op)
} else
font.data = NULL;
- acquire_console_sem();
+ console_lock();
if (vc->vc_sw->con_font_get)
rc = vc->vc_sw->con_font_get(vc, &font);
else
rc = -ENOSYS;
- release_console_sem();
+ console_unlock();
if (rc)
goto out;
@@ -4076,12 +4081,12 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
font.data = memdup_user(op->data, size);
if (IS_ERR(font.data))
return PTR_ERR(font.data);
- acquire_console_sem();
+ console_lock();
if (vc->vc_sw->con_font_set)
rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
else
rc = -ENOSYS;
- release_console_sem();
+ console_unlock();
kfree(font.data);
return rc;
}
@@ -4103,12 +4108,12 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
else
name[MAX_FONT_NAME - 1] = 0;
- acquire_console_sem();
+ console_lock();
if (vc->vc_sw->con_font_default)
rc = vc->vc_sw->con_font_default(vc, &font, s);
else
rc = -ENOSYS;
- release_console_sem();
+ console_unlock();
if (!rc) {
op->width = font.width;
op->height = font.height;
@@ -4124,7 +4129,7 @@ static int con_font_copy(struct vc_data *vc, struct console_font_op *op)
if (vc->vc_mode != KD_TEXT)
return -EINVAL;
- acquire_console_sem();
+ console_lock();
if (!vc->vc_sw->con_font_copy)
rc = -ENOSYS;
else if (con < 0 || !vc_cons_allocated(con))
@@ -4133,7 +4138,7 @@ static int con_font_copy(struct vc_data *vc, struct console_font_op *op)
rc = 0;
else
rc = vc->vc_sw->con_font_copy(vc, con);
- release_console_sem();
+ console_unlock();
return rc;
}
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 6b68a0fb4611..1235ebda6e1c 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -649,12 +649,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
/*
* explicitly blank/unblank the screen if switching modes
*/
- acquire_console_sem();
+ console_lock();
if (arg == KD_TEXT)
do_unblank_screen(1);
else
do_blank_screen(1);
- release_console_sem();
+ console_unlock();
break;
case KDGETMODE:
@@ -893,7 +893,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
ret = -EINVAL;
goto out;
}
- acquire_console_sem();
+ console_lock();
vc->vt_mode = tmp;
/* the frsig is ignored, so we set it to 0 */
vc->vt_mode.frsig = 0;
@@ -901,7 +901,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
vc->vt_pid = get_pid(task_pid(current));
/* no switch is required -- saw@shade.msu.ru */
vc->vt_newvt = -1;
- release_console_sem();
+ console_unlock();
break;
}
@@ -910,9 +910,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
struct vt_mode tmp;
int rc;
- acquire_console_sem();
+ console_lock();
memcpy(&tmp, &vc->vt_mode, sizeof(struct vt_mode));
- release_console_sem();
+ console_unlock();
rc = copy_to_user(up, &tmp, sizeof(struct vt_mode));
if (rc)
@@ -965,9 +965,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
ret = -ENXIO;
else {
arg--;
- acquire_console_sem();
+ console_lock();
ret = vc_allocate(arg);
- release_console_sem();
+ console_unlock();
if (ret)
break;
set_console(arg);
@@ -990,7 +990,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
ret = -ENXIO;
else {
vsa.console--;
- acquire_console_sem();
+ console_lock();
ret = vc_allocate(vsa.console);
if (ret == 0) {
struct vc_data *nvc;
@@ -1003,7 +1003,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
put_pid(nvc->vt_pid);
nvc->vt_pid = get_pid(task_pid(current));
}
- release_console_sem();
+ console_unlock();
if (ret)
break;
/* Commence switch and lock */
@@ -1044,7 +1044,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
/*
* Switching-from response
*/
- acquire_console_sem();
+ console_lock();
if (vc->vt_newvt >= 0) {
if (arg == 0)
/*
@@ -1063,7 +1063,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
vc->vt_newvt = -1;
ret = vc_allocate(newvt);
if (ret) {
- release_console_sem();
+ console_unlock();
break;
}
/*
@@ -1083,7 +1083,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
if (arg != VT_ACKACQ)
ret = -EINVAL;
}
- release_console_sem();
+ console_unlock();
break;
/*
@@ -1096,20 +1096,20 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
}
if (arg == 0) {
/* deallocate all unused consoles, but leave 0 */
- acquire_console_sem();
+ console_lock();
for (i=1; i<MAX_NR_CONSOLES; i++)
if (! VT_BUSY(i))
vc_deallocate(i);
- release_console_sem();
+ console_unlock();
} else {
/* deallocate a single console, if possible */
arg--;
if (VT_BUSY(arg))
ret = -EBUSY;
else if (arg) { /* leave 0 */
- acquire_console_sem();
+ console_lock();
vc_deallocate(arg);
- release_console_sem();
+ console_unlock();
}
}
break;
@@ -1126,7 +1126,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
get_user(cc, &vtsizes->v_cols))
ret = -EFAULT;
else {
- acquire_console_sem();
+ console_lock();
for (i = 0; i < MAX_NR_CONSOLES; i++) {
vc = vc_cons[i].d;
@@ -1135,7 +1135,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
vc_resize(vc_cons[i].d, cc, ll);
}
}
- release_console_sem();
+ console_unlock();
}
break;
}
@@ -1187,14 +1187,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (!vc_cons[i].d)
continue;
- acquire_console_sem();
+ console_lock();
if (vlin)
vc_cons[i].d->vc_scan_lines = vlin;
if (clin)
vc_cons[i].d->vc_font.height = clin;
vc_cons[i].d->vc_resize_user = 1;
vc_resize(vc_cons[i].d, cc, ll);
- release_console_sem();
+ console_unlock();
}
break;
}
@@ -1367,7 +1367,7 @@ void vc_SAK(struct work_struct *work)
struct vc_data *vc;
struct tty_struct *tty;
- acquire_console_sem();
+ console_lock();
vc = vc_con->d;
if (vc) {
tty = vc->port.tty;
@@ -1379,7 +1379,7 @@ void vc_SAK(struct work_struct *work)
__do_SAK(tty);
reset_vc(vc);
}
- release_console_sem();
+ console_unlock();
}
#ifdef CONFIG_COMPAT
@@ -1737,10 +1737,10 @@ int vt_move_to_console(unsigned int vt, int alloc)
{
int prev;
- acquire_console_sem();
+ console_lock();
/* Graphics mode - up to X */
if (disable_vt_switch) {
- release_console_sem();
+ console_unlock();
return 0;
}
prev = fg_console;
@@ -1748,7 +1748,7 @@ int vt_move_to_console(unsigned int vt, int alloc)
if (alloc && vc_allocate(vt)) {
/* we can't have a free VC for now. Too bad,
* we don't want to mess the screen for now. */
- release_console_sem();
+ console_unlock();
return -ENOSPC;
}
@@ -1758,10 +1758,10 @@ int vt_move_to_console(unsigned int vt, int alloc)
* Let the calling function know so it can decide
* what to do.
*/
- release_console_sem();
+ console_unlock();
return -EIO;
}
- release_console_sem();
+ console_unlock();
tty_lock();
if (vt_waitactive(vt + 1)) {
pr_debug("Suspend: Can't switch VCs.");
@@ -1781,8 +1781,8 @@ int vt_move_to_console(unsigned int vt, int alloc)
*/
void pm_set_vt_switch(int do_switch)
{
- acquire_console_sem();
+ console_lock();
disable_vt_switch = !do_switch;
- release_console_sem();
+ console_unlock();
}
EXPORT_SYMBOL(pm_set_vt_switch);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index d6ede989ff22..4ab49d4eebf4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1607,6 +1607,7 @@ static const struct usb_device_id acm_ids[] = {
{ NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
{ NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
{ NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
{ SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 6ee4451bfe2d..47085e5879ab 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -342,7 +342,7 @@ static ssize_t wdm_write
goto outnp;
}
- if (!file->f_flags && O_NONBLOCK)
+ if (!(file->f_flags & O_NONBLOCK))
r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
&desc->flags));
else
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index bcc24779ba0e..18d02e32a3d5 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -123,9 +123,9 @@ config USB_OTG
config USB_OTG_WHITELIST
bool "Rely on OTG Targeted Peripherals List"
- depends on USB_OTG || EMBEDDED
+ depends on USB_OTG || EXPERT
default y if USB_OTG
- default n if EMBEDDED
+ default n if EXPERT
help
If you say Y here, the "otg_whitelist.h" file will be used as a
product whitelist, so USB peripherals not listed there will be
@@ -141,7 +141,7 @@ config USB_OTG_WHITELIST
config USB_OTG_BLACKLIST_HUB
bool "Disable external hubs"
- depends on USB_OTG || EMBEDDED
+ depends on USB_OTG || EXPERT
help
If you say Y here, then Linux will refuse to enumerate
external hubs. OTG hosts are allowed to reduce hardware
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index b9278a1fb9e5..fca61720b873 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -375,7 +375,7 @@ static int usb_unbind_interface(struct device *dev)
* Just re-enable it without affecting the endpoint toggles.
*/
usb_enable_interface(udev, intf, false);
- } else if (!error && intf->dev.power.status == DPM_ON) {
+ } else if (!error && !intf->dev.power.in_suspend) {
r = usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
if (r < 0)
@@ -960,7 +960,7 @@ void usb_rebind_intf(struct usb_interface *intf)
}
/* Try to rebind the interface */
- if (intf->dev.power.status == DPM_ON) {
+ if (!intf->dev.power.in_suspend) {
intf->needs_binding = 0;
rc = device_attach(&intf->dev);
if (rc < 0)
@@ -1107,8 +1107,7 @@ static int usb_resume_interface(struct usb_device *udev,
if (intf->condition == USB_INTERFACE_UNBOUND) {
/* Carry out a deferred switch to altsetting 0 */
- if (intf->needs_altsetting0 &&
- intf->dev.power.status == DPM_ON) {
+ if (intf->needs_altsetting0 && !intf->dev.power.in_suspend) {
usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
intf->needs_altsetting0 = 0;
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 9da250563027..df502a98d0df 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -192,12 +192,12 @@ int usb_create_ep_devs(struct device *parent,
ep_dev->dev.parent = parent;
ep_dev->dev.release = ep_device_release;
dev_set_name(&ep_dev->dev, "ep_%02x", endpoint->desc.bEndpointAddress);
- device_enable_async_suspend(&ep_dev->dev);
retval = device_register(&ep_dev->dev);
if (retval)
goto error_register;
+ device_enable_async_suspend(&ep_dev->dev);
endpoint->ep_dev = ep_dev;
return retval;
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index b55d46070a25..f71e8e307e0f 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -405,7 +405,12 @@ static int suspend_common(struct device *dev, bool do_wakeup)
return retval;
}
- synchronize_irq(pci_dev->irq);
+ /* If MSI-X is enabled, the driver will have synchronized all vectors
+ * in pci_suspend(). If MSI or legacy PCI is enabled, that will be
+ * synchronized here.
+ */
+ if (!hcd->msix_enabled)
+ synchronize_irq(pci_dev->irq);
/* Downstream ports from this root hub should already be quiesced, so
* there will be no DMA activity. Now we can shut down the upstream
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 6a95017fa62b..e935f71d7a34 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1955,7 +1955,6 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
dev_dbg(&rhdev->dev, "usb %s%s\n",
(msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume");
- clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
if (!hcd->driver->bus_resume)
return -ENOENT;
if (hcd->state == HC_STATE_RUNNING)
@@ -1963,6 +1962,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
hcd->state = HC_STATE_RESUMING;
status = hcd->driver->bus_resume(hcd);
+ clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
if (status == 0) {
/* TRSMRCY = 10 msec */
msleep(10);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b98efae6a1cf..0f299b7aad60 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -676,6 +676,8 @@ static void hub_init_func3(struct work_struct *ws);
static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
{
struct usb_device *hdev = hub->hdev;
+ struct usb_hcd *hcd;
+ int ret;
int port1;
int status;
bool need_debounce_delay = false;
@@ -714,6 +716,25 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
usb_autopm_get_interface_no_resume(
to_usb_interface(hub->intfdev));
return; /* Continues at init2: below */
+ } else if (type == HUB_RESET_RESUME) {
+ /* The internal host controller state for the hub device
+ * may be gone after a host power loss on system resume.
+ * Update the device's info so the HW knows it's a hub.
+ */
+ hcd = bus_to_hcd(hdev->bus);
+ if (hcd->driver->update_hub_device) {
+ ret = hcd->driver->update_hub_device(hcd, hdev,
+ &hub->tt, GFP_NOIO);
+ if (ret < 0) {
+ dev_err(hub->intfdev, "Host not "
+ "accepting hub info "
+ "update.\n");
+ dev_err(hub->intfdev, "LS/FS devices "
+ "and hubs may not work "
+ "under this hub\n.");
+ }
+ }
+ hub_power_on(hub, true);
} else {
hub_power_on(hub, true);
}
@@ -2660,17 +2681,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
mutex_lock(&usb_address0_mutex);
- if (!udev->config && oldspeed == USB_SPEED_SUPER) {
- /* Don't reset USB 3.0 devices during an initial setup */
- usb_set_device_state(udev, USB_STATE_DEFAULT);
- } else {
- /* Reset the device; full speed may morph to high speed */
- /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
- retval = hub_port_reset(hub, port1, udev, delay);
- if (retval < 0) /* error or disconnect */
- goto fail;
- /* success, speed is known */
- }
+ /* Reset the device; full speed may morph to high speed */
+ /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+ retval = hub_port_reset(hub, port1, udev, delay);
+ if (retval < 0) /* error or disconnect */
+ goto fail;
+ /* success, speed is known */
+
retval = -ENODEV;
if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
@@ -2732,6 +2749,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
udev->ttport = hdev->ttport;
} else if (udev->speed != USB_SPEED_HIGH
&& hdev->speed == USB_SPEED_HIGH) {
+ if (!hub->tt.hub) {
+ dev_err(&udev->dev, "parent hub has no TT\n");
+ retval = -EINVAL;
+ goto fail;
+ }
udev->tt = &hub->tt;
udev->ttport = port1;
}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 44c595432d6f..81ce6a8e1d94 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -48,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* Samsung Android phone modem - ID conflict with SPH-I500 */
+ { USB_DEVICE(0x04e8, 0x6601), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* Roland SC-8820 */
{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -68,6 +72,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Keytouch QWERTY Panel keyboard */
+ { USB_DEVICE(0x0926, 0x3333), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 1dc9739277b4..d50099675f28 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -509,7 +509,7 @@ config USB_LANGWELL
select USB_GADGET_SELECTED
config USB_GADGET_EG20T
- boolean "Intel EG20T(Topcliff) USB Device controller"
+ boolean "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC"
depends on PCI
select USB_GADGET_DUALSPEED
help
@@ -525,6 +525,11 @@ config USB_GADGET_EG20T
This driver dose not support interrupt transfer or isochronous
transfer modes.
+ This driver also can be used for OKI SEMICONDUCTOR's ML7213 which is
+ for IVI(In-Vehicle Infotainment) use.
+ ML7213 is companion chip for Intel Atom E6xx series.
+ ML7213 is completely compatible for Intel EG20T PCH.
+
config USB_EG20T
tristate
depends on USB_GADGET_EG20T
@@ -541,6 +546,8 @@ config USB_GADGET_CI13XXX_MSM
ci13xxx_udc core.
This driver depends on OTG driver for PHY initialization,
clock management, powering up VBUS, and power management.
+ This driver is not supported on boards like trout which
+ has an external PHY.
Say "y" to link the driver statically, or "m" to build a
dynamically linked module called "ci13xxx_msm" and force all
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 31656a2b4ab4..a1c67ae1572a 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -76,10 +76,21 @@ static DEFINE_SPINLOCK(udc_lock);
/* control endpoint description */
static const struct usb_endpoint_descriptor
-ctrl_endpt_desc = {
+ctrl_endpt_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
+};
+
+static const struct usb_endpoint_descriptor
+ctrl_endpt_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
.wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
};
@@ -265,10 +276,10 @@ static int hw_device_init(void __iomem *base)
hw_bank.size /= sizeof(u32);
reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
- if (reg == 0 || reg > ENDPT_MAX)
- return -ENODEV;
+ hw_ep_max = reg * 2; /* cache hw ENDPT_MAX */
- hw_ep_max = reg; /* cache hw ENDPT_MAX */
+ if (hw_ep_max == 0 || hw_ep_max > ENDPT_MAX)
+ return -ENODEV;
/* setup lock mode ? */
@@ -1197,16 +1208,17 @@ static ssize_t show_qheads(struct device *dev, struct device_attribute *attr,
}
spin_lock_irqsave(udc->lock, flags);
- for (i = 0; i < hw_ep_max; i++) {
- struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+ for (i = 0; i < hw_ep_max/2; i++) {
+ struct ci13xxx_ep *mEpRx = &udc->ci13xxx_ep[i];
+ struct ci13xxx_ep *mEpTx = &udc->ci13xxx_ep[i + hw_ep_max/2];
n += scnprintf(buf + n, PAGE_SIZE - n,
"EP=%02i: RX=%08X TX=%08X\n",
- i, (u32)mEp->qh[RX].dma, (u32)mEp->qh[TX].dma);
+ i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma);
for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
n += scnprintf(buf + n, PAGE_SIZE - n,
" %04X: %08X %08X\n", j,
- *((u32 *)mEp->qh[RX].ptr + j),
- *((u32 *)mEp->qh[TX].ptr + j));
+ *((u32 *)mEpRx->qh.ptr + j),
+ *((u32 *)mEpTx->qh.ptr + j));
}
}
spin_unlock_irqrestore(udc->lock, flags);
@@ -1293,7 +1305,7 @@ static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
unsigned long flags;
struct list_head *ptr = NULL;
struct ci13xxx_req *req = NULL;
- unsigned i, j, k, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
+ unsigned i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
dbg_trace("[%s] %p\n", __func__, buf);
if (attr == NULL || buf == NULL) {
@@ -1303,22 +1315,20 @@ static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
spin_lock_irqsave(udc->lock, flags);
for (i = 0; i < hw_ep_max; i++)
- for (k = RX; k <= TX; k++)
- list_for_each(ptr, &udc->ci13xxx_ep[i].qh[k].queue)
- {
- req = list_entry(ptr,
- struct ci13xxx_req, queue);
+ list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue)
+ {
+ req = list_entry(ptr, struct ci13xxx_req, queue);
+
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "EP=%02i: TD=%08X %s\n",
+ i % hw_ep_max/2, (u32)req->dma,
+ ((i < hw_ep_max/2) ? "RX" : "TX"));
+ for (j = 0; j < qSize; j++)
n += scnprintf(buf + n, PAGE_SIZE - n,
- "EP=%02i: TD=%08X %s\n",
- i, (u32)req->dma,
- ((k == RX) ? "RX" : "TX"));
-
- for (j = 0; j < qSize; j++)
- n += scnprintf(buf + n, PAGE_SIZE - n,
- " %04X: %08X\n", j,
- *((u32 *)req->ptr + j));
- }
+ " %04X: %08X\n", j,
+ *((u32 *)req->ptr + j));
+ }
spin_unlock_irqrestore(udc->lock, flags);
return n;
@@ -1467,12 +1477,12 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
* At this point it's guaranteed exclusive access to qhead
* (endpt is not primed) so it's no need to use tripwire
*/
- mEp->qh[mEp->dir].ptr->td.next = mReq->dma; /* TERMINATE = 0 */
- mEp->qh[mEp->dir].ptr->td.token &= ~TD_STATUS; /* clear status */
+ mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */
+ mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */
if (mReq->req.zero == 0)
- mEp->qh[mEp->dir].ptr->cap |= QH_ZLT;
+ mEp->qh.ptr->cap |= QH_ZLT;
else
- mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT;
+ mEp->qh.ptr->cap &= ~QH_ZLT;
wmb(); /* synchronize before ep prime */
@@ -1542,11 +1552,11 @@ __acquires(mEp->lock)
hw_ep_flush(mEp->num, mEp->dir);
- while (!list_empty(&mEp->qh[mEp->dir].queue)) {
+ while (!list_empty(&mEp->qh.queue)) {
/* pop oldest request */
struct ci13xxx_req *mReq = \
- list_entry(mEp->qh[mEp->dir].queue.next,
+ list_entry(mEp->qh.queue.next,
struct ci13xxx_req, queue);
list_del_init(&mReq->queue);
mReq->req.status = -ESHUTDOWN;
@@ -1571,8 +1581,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
{
struct usb_ep *ep;
struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
- struct ci13xxx_ep *mEp = container_of(gadget->ep0,
- struct ci13xxx_ep, ep);
trace("%p", gadget);
@@ -1583,7 +1591,8 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
gadget_for_each_ep(ep, gadget) {
usb_ep_fifo_flush(ep);
}
- usb_ep_fifo_flush(gadget->ep0);
+ usb_ep_fifo_flush(&udc->ep0out.ep);
+ usb_ep_fifo_flush(&udc->ep0in.ep);
udc->driver->disconnect(gadget);
@@ -1591,11 +1600,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
gadget_for_each_ep(ep, gadget) {
usb_ep_disable(ep);
}
- usb_ep_disable(gadget->ep0);
+ usb_ep_disable(&udc->ep0out.ep);
+ usb_ep_disable(&udc->ep0in.ep);
- if (mEp->status != NULL) {
- usb_ep_free_request(gadget->ep0, mEp->status);
- mEp->status = NULL;
+ if (udc->status != NULL) {
+ usb_ep_free_request(&udc->ep0in.ep, udc->status);
+ udc->status = NULL;
}
return 0;
@@ -1614,7 +1624,6 @@ static void isr_reset_handler(struct ci13xxx *udc)
__releases(udc->lock)
__acquires(udc->lock)
{
- struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[0];
int retval;
trace("%p", udc);
@@ -1635,11 +1644,15 @@ __acquires(udc->lock)
if (retval)
goto done;
- retval = usb_ep_enable(&mEp->ep, &ctrl_endpt_desc);
+ retval = usb_ep_enable(&udc->ep0out.ep, &ctrl_endpt_out_desc);
+ if (retval)
+ goto done;
+
+ retval = usb_ep_enable(&udc->ep0in.ep, &ctrl_endpt_in_desc);
if (!retval) {
- mEp->status = usb_ep_alloc_request(&mEp->ep, GFP_ATOMIC);
- if (mEp->status == NULL) {
- usb_ep_disable(&mEp->ep);
+ udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_ATOMIC);
+ if (udc->status == NULL) {
+ usb_ep_disable(&udc->ep0out.ep);
retval = -ENOMEM;
}
}
@@ -1672,16 +1685,17 @@ static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
/**
* isr_get_status_response: get_status request response
- * @ep: endpoint
+ * @udc: udc struct
* @setup: setup request packet
*
* This function returns an error code
*/
-static int isr_get_status_response(struct ci13xxx_ep *mEp,
+static int isr_get_status_response(struct ci13xxx *udc,
struct usb_ctrlrequest *setup)
__releases(mEp->lock)
__acquires(mEp->lock)
{
+ struct ci13xxx_ep *mEp = &udc->ep0in;
struct usb_request *req = NULL;
gfp_t gfp_flags = GFP_ATOMIC;
int dir, num, retval;
@@ -1736,27 +1750,23 @@ __acquires(mEp->lock)
/**
* isr_setup_status_phase: queues the status phase of a setup transation
- * @mEp: endpoint
+ * @udc: udc struct
*
* This function returns an error code
*/
-static int isr_setup_status_phase(struct ci13xxx_ep *mEp)
+static int isr_setup_status_phase(struct ci13xxx *udc)
__releases(mEp->lock)
__acquires(mEp->lock)
{
int retval;
+ struct ci13xxx_ep *mEp;
- trace("%p", mEp);
-
- /* mEp is always valid & configured */
-
- if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
- mEp->dir = (mEp->dir == TX) ? RX : TX;
+ trace("%p", udc);
- mEp->status->no_interrupt = 1;
+ mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
spin_unlock(mEp->lock);
- retval = usb_ep_queue(&mEp->ep, mEp->status, GFP_ATOMIC);
+ retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
spin_lock(mEp->lock);
return retval;
@@ -1778,11 +1788,11 @@ __acquires(mEp->lock)
trace("%p", mEp);
- if (list_empty(&mEp->qh[mEp->dir].queue))
+ if (list_empty(&mEp->qh.queue))
return -EINVAL;
/* pop oldest request */
- mReq = list_entry(mEp->qh[mEp->dir].queue.next,
+ mReq = list_entry(mEp->qh.queue.next,
struct ci13xxx_req, queue);
list_del_init(&mReq->queue);
@@ -1794,10 +1804,10 @@ __acquires(mEp->lock)
dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
- if (!list_empty(&mEp->qh[mEp->dir].queue)) {
+ if (!list_empty(&mEp->qh.queue)) {
struct ci13xxx_req* mReqEnq;
- mReqEnq = list_entry(mEp->qh[mEp->dir].queue.next,
+ mReqEnq = list_entry(mEp->qh.queue.next,
struct ci13xxx_req, queue);
_hardware_enqueue(mEp, mReqEnq);
}
@@ -1836,16 +1846,14 @@ __acquires(udc->lock)
int type, num, err = -EINVAL;
struct usb_ctrlrequest req;
-
if (mEp->desc == NULL)
continue; /* not configured */
- if ((mEp->dir == RX && hw_test_and_clear_complete(i)) ||
- (mEp->dir == TX && hw_test_and_clear_complete(i + 16))) {
+ if (hw_test_and_clear_complete(i)) {
err = isr_tr_complete_low(mEp);
if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
if (err > 0) /* needs status phase */
- err = isr_setup_status_phase(mEp);
+ err = isr_setup_status_phase(udc);
if (err < 0) {
dbg_event(_usb_addr(mEp),
"ERROR", err);
@@ -1866,15 +1874,22 @@ __acquires(udc->lock)
continue;
}
+ /*
+ * Flush data and handshake transactions of previous
+ * setup packet.
+ */
+ _ep_nuke(&udc->ep0out);
+ _ep_nuke(&udc->ep0in);
+
/* read_setup_packet */
do {
hw_test_and_set_setup_guard();
- memcpy(&req, &mEp->qh[RX].ptr->setup, sizeof(req));
+ memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
} while (!hw_test_and_clear_setup_guard());
type = req.bRequestType;
- mEp->dir = (type & USB_DIR_IN) ? TX : RX;
+ udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
dbg_setup(_usb_addr(mEp), &req);
@@ -1895,7 +1910,7 @@ __acquires(udc->lock)
if (err)
break;
}
- err = isr_setup_status_phase(mEp);
+ err = isr_setup_status_phase(udc);
break;
case USB_REQ_GET_STATUS:
if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
@@ -1905,7 +1920,7 @@ __acquires(udc->lock)
if (le16_to_cpu(req.wLength) != 2 ||
le16_to_cpu(req.wValue) != 0)
break;
- err = isr_get_status_response(mEp, &req);
+ err = isr_get_status_response(udc, &req);
break;
case USB_REQ_SET_ADDRESS:
if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
@@ -1916,7 +1931,7 @@ __acquires(udc->lock)
err = hw_usb_set_address((u8)le16_to_cpu(req.wValue));
if (err)
break;
- err = isr_setup_status_phase(mEp);
+ err = isr_setup_status_phase(udc);
break;
case USB_REQ_SET_FEATURE:
if (type != (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
@@ -1932,12 +1947,12 @@ __acquires(udc->lock)
spin_lock(udc->lock);
if (err)
break;
- err = isr_setup_status_phase(mEp);
+ err = isr_setup_status_phase(udc);
break;
default:
delegate:
if (req.wLength == 0) /* no data phase */
- mEp->dir = TX;
+ udc->ep0_dir = TX;
spin_unlock(udc->lock);
err = udc->driver->setup(&udc->gadget, &req);
@@ -1968,7 +1983,7 @@ static int ep_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
- int direction, retval = 0;
+ int retval = 0;
unsigned long flags;
trace("%p, %p", ep, desc);
@@ -1982,7 +1997,7 @@ static int ep_enable(struct usb_ep *ep,
mEp->desc = desc;
- if (!list_empty(&mEp->qh[mEp->dir].queue))
+ if (!list_empty(&mEp->qh.queue))
warn("enabling a non-empty endpoint!");
mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
@@ -1991,29 +2006,22 @@ static int ep_enable(struct usb_ep *ep,
mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize);
- direction = mEp->dir;
- do {
- dbg_event(_usb_addr(mEp), "ENABLE", 0);
+ dbg_event(_usb_addr(mEp), "ENABLE", 0);
- mEp->qh[mEp->dir].ptr->cap = 0;
+ mEp->qh.ptr->cap = 0;
- if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
- mEp->qh[mEp->dir].ptr->cap |= QH_IOS;
- else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
- mEp->qh[mEp->dir].ptr->cap &= ~QH_MULT;
- else
- mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT;
-
- mEp->qh[mEp->dir].ptr->cap |=
- (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
- mEp->qh[mEp->dir].ptr->td.next |= TD_TERMINATE; /* needed? */
-
- retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ mEp->qh.ptr->cap |= QH_IOS;
+ else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
+ mEp->qh.ptr->cap &= ~QH_MULT;
+ else
+ mEp->qh.ptr->cap &= ~QH_ZLT;
- if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
- mEp->dir = (mEp->dir == TX) ? RX : TX;
+ mEp->qh.ptr->cap |=
+ (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
+ mEp->qh.ptr->td.next |= TD_TERMINATE; /* needed? */
- } while (mEp->dir != direction);
+ retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
spin_unlock_irqrestore(mEp->lock, flags);
return retval;
@@ -2146,7 +2154,7 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
spin_lock_irqsave(mEp->lock, flags);
if (mEp->type == USB_ENDPOINT_XFER_CONTROL &&
- !list_empty(&mEp->qh[mEp->dir].queue)) {
+ !list_empty(&mEp->qh.queue)) {
_ep_nuke(mEp);
retval = -EOVERFLOW;
warn("endpoint ctrl %X nuked", _usb_addr(mEp));
@@ -2170,9 +2178,9 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
/* push request */
mReq->req.status = -EINPROGRESS;
mReq->req.actual = 0;
- list_add_tail(&mReq->queue, &mEp->qh[mEp->dir].queue);
+ list_add_tail(&mReq->queue, &mEp->qh.queue);
- if (list_is_singular(&mEp->qh[mEp->dir].queue))
+ if (list_is_singular(&mEp->qh.queue))
retval = _hardware_enqueue(mEp, mReq);
if (retval == -EALREADY) {
@@ -2199,7 +2207,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
trace("%p, %p", ep, req);
if (ep == NULL || req == NULL || mEp->desc == NULL ||
- list_empty(&mReq->queue) || list_empty(&mEp->qh[mEp->dir].queue))
+ list_empty(&mReq->queue) || list_empty(&mEp->qh.queue))
return -EINVAL;
spin_lock_irqsave(mEp->lock, flags);
@@ -2244,7 +2252,7 @@ static int ep_set_halt(struct usb_ep *ep, int value)
#ifndef STALL_IN
/* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
- !list_empty(&mEp->qh[mEp->dir].queue)) {
+ !list_empty(&mEp->qh.queue)) {
spin_unlock_irqrestore(mEp->lock, flags);
return -EAGAIN;
}
@@ -2355,7 +2363,7 @@ static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
if (is_active) {
pm_runtime_get_sync(&_gadget->dev);
hw_device_reset(udc);
- hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma);
+ hw_device_state(udc->ep0out.qh.dma);
} else {
hw_device_state(0);
if (udc->udc_driver->notify_event)
@@ -2390,7 +2398,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct ci13xxx *udc = _udc;
- unsigned long i, k, flags;
+ unsigned long flags;
+ int i, j;
int retval = -ENOMEM;
trace("%p", driver);
@@ -2427,45 +2436,46 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
info("hw_ep_max = %d", hw_ep_max);
- udc->driver = driver;
udc->gadget.dev.driver = NULL;
retval = 0;
- for (i = 0; i < hw_ep_max; i++) {
- struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+ for (i = 0; i < hw_ep_max/2; i++) {
+ for (j = RX; j <= TX; j++) {
+ int k = i + j * hw_ep_max/2;
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
- scnprintf(mEp->name, sizeof(mEp->name), "ep%i", (int)i);
+ scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
+ (j == TX) ? "in" : "out");
- mEp->lock = udc->lock;
- mEp->device = &udc->gadget.dev;
- mEp->td_pool = udc->td_pool;
+ mEp->lock = udc->lock;
+ mEp->device = &udc->gadget.dev;
+ mEp->td_pool = udc->td_pool;
- mEp->ep.name = mEp->name;
- mEp->ep.ops = &usb_ep_ops;
- mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
+ mEp->ep.name = mEp->name;
+ mEp->ep.ops = &usb_ep_ops;
+ mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
- /* this allocation cannot be random */
- for (k = RX; k <= TX; k++) {
- INIT_LIST_HEAD(&mEp->qh[k].queue);
+ INIT_LIST_HEAD(&mEp->qh.queue);
spin_unlock_irqrestore(udc->lock, flags);
- mEp->qh[k].ptr = dma_pool_alloc(udc->qh_pool,
- GFP_KERNEL,
- &mEp->qh[k].dma);
+ mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
+ &mEp->qh.dma);
spin_lock_irqsave(udc->lock, flags);
- if (mEp->qh[k].ptr == NULL)
+ if (mEp->qh.ptr == NULL)
retval = -ENOMEM;
else
- memset(mEp->qh[k].ptr, 0,
- sizeof(*mEp->qh[k].ptr));
- }
- if (i == 0)
- udc->gadget.ep0 = &mEp->ep;
- else
+ memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
+
+ /* skip ep0 out and in endpoints */
+ if (i == 0)
+ continue;
+
list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
+ }
}
if (retval)
goto done;
+ udc->gadget.ep0 = &udc->ep0in.ep;
/* bind gadget */
driver->driver.bus = NULL;
udc->gadget.dev.driver = &driver->driver;
@@ -2479,6 +2489,7 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
goto done;
}
+ udc->driver = driver;
pm_runtime_get_sync(&udc->gadget.dev);
if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
if (udc->vbus_active) {
@@ -2490,14 +2501,12 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
}
}
- retval = hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma);
+ retval = hw_device_state(udc->ep0out.qh.dma);
if (retval)
pm_runtime_put_sync(&udc->gadget.dev);
done:
spin_unlock_irqrestore(udc->lock, flags);
- if (retval)
- usb_gadget_unregister_driver(driver);
return retval;
}
EXPORT_SYMBOL(usb_gadget_probe_driver);
@@ -2510,7 +2519,7 @@ EXPORT_SYMBOL(usb_gadget_probe_driver);
int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
{
struct ci13xxx *udc = _udc;
- unsigned long i, k, flags;
+ unsigned long i, flags;
trace("%p", driver);
@@ -2546,17 +2555,14 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
for (i = 0; i < hw_ep_max; i++) {
struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
- if (i == 0)
- udc->gadget.ep0 = NULL;
- else if (!list_empty(&mEp->ep.ep_list))
+ if (!list_empty(&mEp->ep.ep_list))
list_del_init(&mEp->ep.ep_list);
- for (k = RX; k <= TX; k++)
- if (mEp->qh[k].ptr != NULL)
- dma_pool_free(udc->qh_pool,
- mEp->qh[k].ptr, mEp->qh[k].dma);
+ if (mEp->qh.ptr != NULL)
+ dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
}
+ udc->gadget.ep0 = NULL;
udc->driver = NULL;
spin_unlock_irqrestore(udc->lock, flags);
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index f61fed07f76b..a2492b65f98c 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -20,7 +20,7 @@
* DEFINE
*****************************************************************************/
#define CI13XXX_PAGE_SIZE 4096ul /* page size for TD's */
-#define ENDPT_MAX (16)
+#define ENDPT_MAX (32)
#define CTRL_PAYLOAD_MAX (64)
#define RX (0) /* similar to USB_DIR_OUT but can be used as an index */
#define TX (1) /* similar to USB_DIR_IN but can be used as an index */
@@ -88,8 +88,7 @@ struct ci13xxx_ep {
struct list_head queue;
struct ci13xxx_qh *ptr;
dma_addr_t dma;
- } qh[2];
- struct usb_request *status;
+ } qh;
int wedge;
/* global resources */
@@ -119,9 +118,13 @@ struct ci13xxx {
struct dma_pool *qh_pool; /* DMA pool for queue heads */
struct dma_pool *td_pool; /* DMA pool for transfer descs */
+ struct usb_request *status; /* ep0 status request */
struct usb_gadget gadget; /* USB slave device */
struct ci13xxx_ep ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
+ u32 ep0_dir; /* ep0 direction */
+#define ep0out ci13xxx_ep[0]
+#define ep0in ci13xxx_ep[16]
struct usb_gadget_driver *driver; /* 3rd party gadget driver */
struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index f6ff8456d52d..1ba4befe336b 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -928,8 +928,9 @@ unknown:
*/
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_INTERFACE:
- if (cdev->config)
- f = cdev->config->interface[intf];
+ if (!cdev->config || w_index >= MAX_CONFIG_INTERFACES)
+ break;
+ f = cdev->config->interface[intf];
break;
case USB_RECIP_ENDPOINT:
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index b5dbb2308f56..6d8e533949eb 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -293,6 +293,7 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/composite.h>
#include "gadget_chips.h"
@@ -2763,7 +2764,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
return ERR_PTR(-ENOMEM);
common->free_storage_on_release = 1;
} else {
- memset(common, 0, sizeof common);
+ memset(common, 0, sizeof *common);
common->free_storage_on_release = 0;
}
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 3c6e1a058745..5e1495097ec3 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -346,14 +346,19 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
if (unlikely(!skb))
break;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
- req->actual);
- page = NULL;
- if (req->actual < req->length) { /* Last fragment */
+ if (skb->len == 0) { /* First fragment */
skb->protocol = htons(ETH_P_PHONET);
skb_reset_mac_header(skb);
- pskb_pull(skb, 1);
+ /* Can't use pskb_pull() on page in IRQ */
+ memcpy(skb_put(skb, 1), page_address(page), 1);
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ skb->len == 0, req->actual);
+ page = NULL;
+
+ if (req->actual < req->length) { /* Last fragment */
skb->dev = dev;
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index 1210534822d6..5408186afc35 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -1320,7 +1320,7 @@ static struct imx_udc_struct controller = {
};
/*******************************************************************************
- * USB gadged driver functions
+ * USB gadget driver functions
*******************************************************************************
*/
int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
index 777972454e3e..1eca8b47ce3c 100644
--- a/drivers/usb/gadget/langwell_udc.c
+++ b/drivers/usb/gadget/langwell_udc.c
@@ -3086,7 +3086,7 @@ static void langwell_udc_remove(struct pci_dev *pdev)
kfree(dev->ep);
- /* diable IRQ handler */
+ /* disable IRQ handler */
if (dev->got_irq)
free_irq(pdev->irq, dev);
@@ -3406,7 +3406,7 @@ static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
/* disable interrupt and set controller to stop state */
langwell_udc_stop(dev);
- /* diable IRQ handler */
+ /* disable IRQ handler */
if (dev->got_irq)
free_irq(pdev->irq, dev);
dev->got_irq = 0;
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 0c8dd81dddca..b120dbb64d0f 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -198,10 +198,10 @@
#define PCH_UDC_BRLEN 0x0F /* Burst length */
#define PCH_UDC_THLEN 0x1F /* Threshold length */
/* Value of EP Buffer Size */
-#define UDC_EP0IN_BUFF_SIZE 64
-#define UDC_EPIN_BUFF_SIZE 512
-#define UDC_EP0OUT_BUFF_SIZE 64
-#define UDC_EPOUT_BUFF_SIZE 512
+#define UDC_EP0IN_BUFF_SIZE 16
+#define UDC_EPIN_BUFF_SIZE 256
+#define UDC_EP0OUT_BUFF_SIZE 16
+#define UDC_EPOUT_BUFF_SIZE 256
/* Value of EP maximum packet size */
#define UDC_EP0IN_MAX_PKT_SIZE 64
#define UDC_EP0OUT_MAX_PKT_SIZE 64
@@ -351,7 +351,7 @@ struct pch_udc_dev {
struct pci_pool *data_requests;
struct pci_pool *stp_requests;
dma_addr_t dma_addr;
- unsigned long ep0out_buf[64];
+ void *ep0out_buf;
struct usb_ctrlrequest setup_data;
unsigned long phys_addr;
void __iomem *base_addr;
@@ -361,6 +361,8 @@ struct pch_udc_dev {
#define PCH_UDC_PCI_BAR 1
#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
+#define PCI_VENDOR_ID_ROHM 0x10DB
+#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
static const char ep0_string[] = "ep0in";
static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
@@ -1219,11 +1221,11 @@ static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
dev = ep->dev;
if (req->dma_mapped) {
if (ep->in)
- pci_unmap_single(dev->pdev, req->req.dma,
- req->req.length, PCI_DMA_TODEVICE);
+ dma_unmap_single(&dev->pdev->dev, req->req.dma,
+ req->req.length, DMA_TO_DEVICE);
else
- pci_unmap_single(dev->pdev, req->req.dma,
- req->req.length, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&dev->pdev->dev, req->req.dma,
+ req->req.length, DMA_FROM_DEVICE);
req->dma_mapped = 0;
req->req.dma = DMA_ADDR_INVALID;
}
@@ -1414,7 +1416,6 @@ static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
td_data = req->td_data;
- ep->td_data = req->td_data;
/* Set the status bits for all descriptors */
while (1) {
td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
@@ -1613,15 +1614,19 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
if (usbreq->length &&
((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
if (ep->in)
- usbreq->dma = pci_map_single(dev->pdev, usbreq->buf,
- usbreq->length, PCI_DMA_TODEVICE);
+ usbreq->dma = dma_map_single(&dev->pdev->dev,
+ usbreq->buf,
+ usbreq->length,
+ DMA_TO_DEVICE);
else
- usbreq->dma = pci_map_single(dev->pdev, usbreq->buf,
- usbreq->length, PCI_DMA_FROMDEVICE);
+ usbreq->dma = dma_map_single(&dev->pdev->dev,
+ usbreq->buf,
+ usbreq->length,
+ DMA_FROM_DEVICE);
req->dma_mapped = 1;
}
if (usbreq->length > 0) {
- retval = prepare_dma(ep, req, gfp);
+ retval = prepare_dma(ep, req, GFP_ATOMIC);
if (retval)
goto probe_end;
}
@@ -1646,7 +1651,6 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
pch_udc_wait_ep_stall(ep);
pch_udc_ep_clear_nak(ep);
pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
- pch_udc_set_dma(dev, DMA_DIR_TX);
}
}
/* Now add this request to the ep's pending requests */
@@ -1926,6 +1930,7 @@ static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
PCH_UDC_BS_DMA_DONE)
return;
pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
+ pch_udc_ep_set_ddptr(ep, 0);
if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
PCH_UDC_RTS_SUCC) {
dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
@@ -1963,7 +1968,7 @@ static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
u32 epsts;
struct pch_udc_ep *ep;
- ep = &dev->ep[2*ep_num];
+ ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
epsts = ep->epsts;
ep->epsts = 0;
@@ -2008,7 +2013,7 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
struct pch_udc_ep *ep;
struct pch_udc_request *req = NULL;
- ep = &dev->ep[2*ep_num + 1];
+ ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
epsts = ep->epsts;
ep->epsts = 0;
@@ -2025,10 +2030,11 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
}
if (epsts & UDC_EPSTS_HE)
return;
- if (epsts & UDC_EPSTS_RSS)
+ if (epsts & UDC_EPSTS_RSS) {
pch_udc_ep_set_stall(ep);
pch_udc_enable_ep_interrupts(ep->dev,
PCH_UDC_EPINT(ep->in, ep->num));
+ }
if (epsts & UDC_EPSTS_RCS) {
if (!dev->prot_stall) {
pch_udc_ep_clear_stall(ep);
@@ -2060,8 +2066,10 @@ static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
{
u32 epsts;
struct pch_udc_ep *ep;
+ struct pch_udc_ep *ep_out;
ep = &dev->ep[UDC_EP0IN_IDX];
+ ep_out = &dev->ep[UDC_EP0OUT_IDX];
epsts = ep->epsts;
ep->epsts = 0;
@@ -2073,8 +2081,16 @@ static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
return;
if (epsts & UDC_EPSTS_HE)
return;
- if ((epsts & UDC_EPSTS_TDC) && (!dev->stall))
+ if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
pch_udc_complete_transfer(ep);
+ pch_udc_clear_dma(dev, DMA_DIR_RX);
+ ep_out->td_data->status = (ep_out->td_data->status &
+ ~PCH_UDC_BUFF_STS) |
+ PCH_UDC_BS_HST_RDY;
+ pch_udc_ep_clear_nak(ep_out);
+ pch_udc_set_dma(dev, DMA_DIR_RX);
+ pch_udc_ep_set_rrdy(ep_out);
+ }
/* On IN interrupt, provide data if we have any */
if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
!(epsts & UDC_EPSTS_TXEMPTY))
@@ -2102,11 +2118,9 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
dev->stall = 0;
dev->ep[UDC_EP0IN_IDX].halted = 0;
dev->ep[UDC_EP0OUT_IDX].halted = 0;
- /* In data not ready */
- pch_udc_ep_set_nak(&(dev->ep[UDC_EP0IN_IDX]));
dev->setup_data = ep->td_stp->request;
pch_udc_init_setup_buff(ep->td_stp);
- pch_udc_clear_dma(dev, DMA_DIR_TX);
+ pch_udc_clear_dma(dev, DMA_DIR_RX);
pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
dev->ep[UDC_EP0IN_IDX].in);
if ((dev->setup_data.bRequestType & USB_DIR_IN))
@@ -2122,14 +2136,23 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
setup_supported = dev->driver->setup(&dev->gadget,
&dev->setup_data);
spin_lock(&dev->lock);
+
+ if (dev->setup_data.bRequestType & USB_DIR_IN) {
+ ep->td_data->status = (ep->td_data->status &
+ ~PCH_UDC_BUFF_STS) |
+ PCH_UDC_BS_HST_RDY;
+ pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
+ }
/* ep0 in returns data on IN phase */
if (setup_supported >= 0 && setup_supported <
UDC_EP0IN_MAX_PKT_SIZE) {
pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
/* Gadget would have queued a request when
* we called the setup */
- pch_udc_set_dma(dev, DMA_DIR_RX);
- pch_udc_ep_clear_nak(ep);
+ if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
+ pch_udc_set_dma(dev, DMA_DIR_RX);
+ pch_udc_ep_clear_nak(ep);
+ }
} else if (setup_supported < 0) {
/* if unsupported request, then stall */
pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
@@ -2142,22 +2165,13 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
}
} else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
UDC_EPSTS_OUT_DATA) && !dev->stall) {
- if (list_empty(&ep->queue)) {
- dev_err(&dev->pdev->dev, "%s: No request\n", __func__);
- ep->td_data->status = (ep->td_data->status &
- ~PCH_UDC_BUFF_STS) |
- PCH_UDC_BS_HST_RDY;
- pch_udc_set_dma(dev, DMA_DIR_RX);
- } else {
- /* control write */
- /* next function will pickuo an clear the status */
+ pch_udc_clear_dma(dev, DMA_DIR_RX);
+ pch_udc_ep_set_ddptr(ep, 0);
+ if (!list_empty(&ep->queue)) {
ep->epsts = stat;
-
- pch_udc_svc_data_out(dev, 0);
- /* re-program desc. pointer for possible ZLPs */
- pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
- pch_udc_set_dma(dev, DMA_DIR_RX);
+ pch_udc_svc_data_out(dev, PCH_UDC_EP0);
}
+ pch_udc_set_dma(dev, DMA_DIR_RX);
}
pch_udc_ep_set_rrdy(ep);
}
@@ -2174,7 +2188,7 @@ static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
struct pch_udc_ep *ep;
struct pch_udc_request *req;
- ep = &dev->ep[2*ep_num];
+ ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
if (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct pch_udc_request, queue);
pch_udc_enable_ep_interrupts(ep->dev,
@@ -2196,13 +2210,13 @@ static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
/* IN */
if (ep_intr & (0x1 << i)) {
- ep = &dev->ep[2*i];
+ ep = &dev->ep[UDC_EPIN_IDX(i)];
ep->epsts = pch_udc_read_ep_status(ep);
pch_udc_clear_ep_status(ep, ep->epsts);
}
/* OUT */
if (ep_intr & (0x10000 << i)) {
- ep = &dev->ep[2*i+1];
+ ep = &dev->ep[UDC_EPOUT_IDX(i)];
ep->epsts = pch_udc_read_ep_status(ep);
pch_udc_clear_ep_status(ep, ep->epsts);
}
@@ -2563,9 +2577,6 @@ static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
- dev->dma_addr = pci_map_single(dev->pdev, dev->ep0out_buf, 256,
- PCI_DMA_FROMDEVICE);
-
/* remove ep0 in and out from the list. They have own pointer */
list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
@@ -2637,6 +2648,13 @@ static int init_dma_pools(struct pch_udc_dev *dev)
dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
dev->ep[UDC_EP0IN_IDX].td_data = NULL;
dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
+
+ dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL);
+ if (!dev->ep0out_buf)
+ return -ENOMEM;
+ dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf,
+ UDC_EP0OUT_BUFF_SIZE * 4,
+ DMA_FROM_DEVICE);
return 0;
}
@@ -2700,7 +2718,8 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
- /* Assues that there are no pending requets with this driver */
+ /* Assures that there are no pending requests with this driver */
+ driver->disconnect(&dev->gadget);
driver->unbind(&dev->gadget);
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
@@ -2750,6 +2769,11 @@ static void pch_udc_remove(struct pci_dev *pdev)
pci_pool_destroy(dev->stp_requests);
}
+ if (dev->dma_addr)
+ dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
+ UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
+ kfree(dev->ep0out_buf);
+
pch_udc_exit(dev);
if (dev->irq_registered)
@@ -2792,11 +2816,7 @@ static int pch_udc_resume(struct pci_dev *pdev)
int ret;
pci_set_power_state(pdev, PCI_D0);
- ret = pci_restore_state(pdev);
- if (ret) {
- dev_err(&pdev->dev, "%s: pci_restore_state failed\n", __func__);
- return ret;
- }
+ pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
@@ -2914,6 +2934,11 @@ static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
.class_mask = 0xffffffff,
},
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
+ .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+ .class_mask = 0xffffffff,
+ },
{ 0 },
};
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 2fc8636316c5..12ff6cffedc9 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -131,31 +131,31 @@ static struct printer_dev usb_printer_gadget;
* parameters are in UTF-8 (superset of ASCII's 7 bit characters).
*/
-static ushort __initdata idVendor;
+static ushort idVendor;
module_param(idVendor, ushort, S_IRUGO);
MODULE_PARM_DESC(idVendor, "USB Vendor ID");
-static ushort __initdata idProduct;
+static ushort idProduct;
module_param(idProduct, ushort, S_IRUGO);
MODULE_PARM_DESC(idProduct, "USB Product ID");
-static ushort __initdata bcdDevice;
+static ushort bcdDevice;
module_param(bcdDevice, ushort, S_IRUGO);
MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
-static char *__initdata iManufacturer;
+static char *iManufacturer;
module_param(iManufacturer, charp, S_IRUGO);
MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
-static char *__initdata iProduct;
+static char *iProduct;
module_param(iProduct, charp, S_IRUGO);
MODULE_PARM_DESC(iProduct, "USB Product string");
-static char *__initdata iSerialNum;
+static char *iSerialNum;
module_param(iSerialNum, charp, S_IRUGO);
MODULE_PARM_DESC(iSerialNum, "1");
-static char *__initdata iPNPstring;
+static char *iPNPstring;
module_param(iPNPstring, charp, S_IRUGO);
MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;");
@@ -1596,13 +1596,12 @@ cleanup(void)
int status;
mutex_lock(&usb_printer_gadget.lock_printer_io);
- class_destroy(usb_gadget_class);
- unregister_chrdev_region(g_printer_devno, 2);
-
status = usb_gadget_unregister_driver(&printer_driver);
if (status)
ERROR(dev, "usb_gadget_unregister_driver %x\n", status);
+ unregister_chrdev_region(g_printer_devno, 2);
+ class_destroy(usb_gadget_class);
mutex_unlock(&usb_printer_gadget.lock_printer_io);
}
module_exit(cleanup);
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 20d43da319ae..015118535f77 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -258,7 +258,7 @@ static int pipe_buffer_setting(struct r8a66597 *r8a66597,
break;
case R8A66597_BULK:
/* isochronous pipes may be used as bulk pipes */
- if (info->pipe > R8A66597_BASE_PIPENUM_BULK)
+ if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
else
bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index 3b513bafaf2a..b015561fd602 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -543,7 +543,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
ro = curlun->initially_ro;
if (!ro) {
filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
- if (-EROFS == PTR_ERR(filp))
+ if (PTR_ERR(filp) == -EROFS || PTR_ERR(filp) == -EACCES)
ro = 1;
}
if (ro)
@@ -558,10 +558,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
if (filp->f_path.dentry)
inode = filp->f_path.dentry->d_inode;
- if (inode && S_ISBLK(inode->i_mode)) {
- if (bdev_read_only(inode->i_bdev))
- ro = 1;
- } else if (!inode || !S_ISREG(inode->i_mode)) {
+ if (!inode || (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
LINFO(curlun, "invalid file type: %s\n", filename);
goto out;
}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 24046c0f5878..0e6afa260ed8 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -151,6 +151,8 @@ config USB_EHCI_MSM
Qualcomm chipsets. Root Hub has inbuilt TT.
This driver depends on OTG driver for PHY initialization,
clock management, powering up VBUS, and power management.
+ This driver is not supported on boards like trout which
+ has an external PHY.
config USB_EHCI_HCD_PPC_OF
bool "EHCI support for PPC USB controller on OF platform bus"
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index 2baf8a849086..a869e3c103d3 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -227,8 +227,8 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
* mark HW unaccessible. The PM and USB cores make sure that
* the root hub is either suspended or stopped.
*/
- spin_lock_irqsave(&ehci->lock, flags);
ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev));
+ spin_lock_irqsave(&ehci->lock, flags);
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 86e42892016d..5c761df7fa83 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -52,7 +52,6 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
struct resource *res;
int irq;
int retval;
- unsigned int temp;
pr_debug("initializing FSL-SOC USB Controller\n");
@@ -126,18 +125,6 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
goto err3;
}
- /*
- * Check if it is MPC5121 SoC, otherwise set pdata->have_sysif_regs
- * flag for 83xx or 8536 system interface registers.
- */
- if (pdata->big_endian_mmio)
- temp = in_be32(hcd->regs + FSL_SOC_USB_ID);
- else
- temp = in_le32(hcd->regs + FSL_SOC_USB_ID);
-
- if ((temp & ID_MSK) != (~((temp & NID_MSK) >> 8) & ID_MSK))
- pdata->have_sysif_regs = 1;
-
/* Enable USB controller, 83xx or 8536 */
if (pdata->have_sysif_regs)
setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index 2c8353795226..3fabed33d940 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -19,9 +19,6 @@
#define _EHCI_FSL_H
/* offsets for the non-ehci registers in the FSL SOC USB controller */
-#define FSL_SOC_USB_ID 0x0
-#define ID_MSK 0x3f
-#define NID_MSK 0x3f00
#define FSL_SOC_USB_ULPIVP 0x170
#define FSL_SOC_USB_PORTSC1 0x184
#define PORT_PTS_MSK (3<<30)
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 6fee3cd58efe..74dcf49bd015 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -572,6 +572,8 @@ static int ehci_init(struct usb_hcd *hcd)
ehci->iaa_watchdog.function = ehci_iaa_watchdog;
ehci->iaa_watchdog.data = (unsigned long) ehci;
+ hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
+
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
@@ -579,11 +581,20 @@ static int ehci_init(struct usb_hcd *hcd)
ehci->periodic_size = DEFAULT_I_TDPS;
INIT_LIST_HEAD(&ehci->cached_itd_list);
INIT_LIST_HEAD(&ehci->cached_sitd_list);
+
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+ switch (EHCI_TUNE_FLS) {
+ case 0: ehci->periodic_size = 1024; break;
+ case 1: ehci->periodic_size = 512; break;
+ case 2: ehci->periodic_size = 256; break;
+ default: BUG();
+ }
+ }
if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
return retval;
/* controllers may cache some of the periodic schedule ... */
- hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
ehci->i_thresh = 2 + 8;
else // N microframes cached
@@ -637,12 +648,6 @@ static int ehci_init(struct usb_hcd *hcd)
/* periodic schedule size can be smaller than default */
temp &= ~(3 << 2);
temp |= (EHCI_TUNE_FLS << 2);
- switch (EHCI_TUNE_FLS) {
- case 0: ehci->periodic_size = 1024; break;
- case 1: ehci->periodic_size = 512; break;
- case 2: ehci->periodic_size = 256; break;
- default: BUG();
- }
}
if (HCC_LPM(hcc_params)) {
/* support link power management EHCI 1.1 addendum */
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 796ea0c8900f..8a515f0d5988 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -111,6 +111,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
{
int port;
u32 temp;
+ unsigned long flags;
/* If remote wakeup is enabled for the root hub but disabled
* for the controller, we must adjust all the port wakeup flags
@@ -120,6 +121,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
return;
+ spin_lock_irqsave(&ehci->lock, flags);
+
/* clear phy low-power mode before changing wakeup flags */
if (ehci->has_hostpc) {
port = HCS_N_PORTS(ehci->hcs_params);
@@ -131,7 +134,9 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
temp = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
}
+ spin_unlock_irqrestore(&ehci->lock, flags);
msleep(5);
+ spin_lock_irqsave(&ehci->lock, flags);
}
port = HCS_N_PORTS(ehci->hcs_params);
@@ -170,6 +175,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
/* Does the root hub have a port wakeup pending? */
if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD))
usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
}
static int ehci_bus_suspend (struct usb_hcd *hcd)
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index fa59b26fc5bc..c8e360d7d975 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -21,10 +21,13 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
#include <linux/slab.h>
#include <mach/mxc_ehci.h>
+#include <asm/mach-types.h>
+
#define ULPI_VIEWPORT_OFFSET 0x170
struct ehci_mxc_priv {
@@ -114,6 +117,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
struct usb_hcd *hcd;
struct resource *res;
int irq, ret;
+ unsigned int flags;
struct ehci_mxc_priv *priv;
struct device *dev = &pdev->dev;
struct ehci_hcd *ehci;
@@ -177,8 +181,8 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
clk_enable(priv->ahbclk);
}
- /* "dr" device has its own clock */
- if (pdev->id == 0) {
+ /* "dr" device has its own clock on i.MX51 */
+ if (cpu_is_mx51() && (pdev->id == 0)) {
priv->phy1clk = clk_get(dev, "usb_phy1");
if (IS_ERR(priv->phy1clk)) {
ret = PTR_ERR(priv->phy1clk);
@@ -240,6 +244,23 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
if (ret)
goto err_add;
+ if (pdata->otg) {
+ /*
+ * efikamx and efikasb have some hardware bug which is
+ * preventing usb to work unless CHRGVBUS is set.
+ * It's in violation of USB specs
+ */
+ if (machine_is_mx51_efikamx() || machine_is_mx51_efikasb()) {
+ flags = otg_io_read(pdata->otg, ULPI_OTG_CTRL);
+ flags |= ULPI_OTG_CTRL_CHRGVBUS;
+ ret = otg_io_write(pdata->otg, flags, ULPI_OTG_CTRL);
+ if (ret) {
+ dev_err(dev, "unable to set CHRVBUS\n");
+ goto err_add;
+ }
+ }
+ }
+
return 0;
err_add:
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 680f2ef4e59f..f784ceb862a3 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -796,7 +796,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
if (!hcd) {
- dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret);
+ dev_err(&pdev->dev, "failed to create hcd with err %d\n", ret);
ret = -ENOMEM;
goto err_create_hcd;
}
@@ -864,7 +864,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
ret = omap_start_ehc(omap, hcd);
if (ret) {
- dev_dbg(&pdev->dev, "failed to start ehci\n");
+ dev_err(&pdev->dev, "failed to start ehci with err %d\n", ret);
goto err_start;
}
@@ -879,7 +879,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
if (ret) {
- dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
+ dev_err(&pdev->dev, "failed to add hcd with err %d\n", ret);
goto err_add_hcd;
}
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 76179c39c0e3..07bb982e59f6 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -44,28 +44,35 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
return 0;
}
-static int ehci_quirk_amd_SB800(struct ehci_hcd *ehci)
+static int ehci_quirk_amd_hudson(struct ehci_hcd *ehci)
{
struct pci_dev *amd_smbus_dev;
u8 rev = 0;
amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
- if (!amd_smbus_dev)
- return 0;
-
- pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
- if (rev < 0x40) {
- pci_dev_put(amd_smbus_dev);
- amd_smbus_dev = NULL;
- return 0;
+ if (amd_smbus_dev) {
+ pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+ if (rev < 0x40) {
+ pci_dev_put(amd_smbus_dev);
+ amd_smbus_dev = NULL;
+ return 0;
+ }
+ } else {
+ amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x780b, NULL);
+ if (!amd_smbus_dev)
+ return 0;
+ pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+ if (rev < 0x11 || rev > 0x18) {
+ pci_dev_put(amd_smbus_dev);
+ amd_smbus_dev = NULL;
+ return 0;
+ }
}
if (!amd_nb_dev)
amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
- if (!amd_nb_dev)
- ehci_err(ehci, "QUIRK: unable to get AMD NB device\n");
- ehci_info(ehci, "QUIRK: Enable AMD SB800 L1 fix\n");
+ ehci_info(ehci, "QUIRK: Enable exception for AMD Hudson ASPM\n");
pci_dev_put(amd_smbus_dev);
amd_smbus_dev = NULL;
@@ -131,7 +138,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
- if (ehci_quirk_amd_SB800(ehci))
+ if (ehci_quirk_amd_hudson(ehci))
ehci->amd_l1_fix = 1;
retval = ehci_halt(ehci);
@@ -360,8 +367,8 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
* mark HW unaccessible. The PM and USB cores make sure that
* the root hub is either suspended or stopped.
*/
- spin_lock_irqsave (&ehci->lock, flags);
ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
+ spin_lock_irqsave (&ehci->lock, flags);
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index e8f4f36fdf0b..a6f21b891f68 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -29,6 +29,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
/**
* ehci_xilinx_of_setup - Initialize the device for ehci_reset()
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 20092a27a1e8..12fd184226f2 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -98,13 +98,13 @@ void fhci_usb_enable_interrupt(struct fhci_usb *usb)
usb->intr_nesting_cnt--;
}
-/* diable the usb interrupt */
+/* disable the usb interrupt */
void fhci_usb_disable_interrupt(struct fhci_usb *usb)
{
struct fhci_hcd *fhci = usb->fhci;
if (usb->intr_nesting_cnt == 0) {
- /* diable the timer interrupt */
+ /* disable the timer interrupt */
disable_irq_nosync(fhci->timer->irq);
/* disable the usb interrupt */
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index 7be548ca2183..38fe058fbe61 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -271,8 +271,8 @@ void fhci_init_ep_registers(struct fhci_usb *usb, struct endpoint *ep,
/*
* Collect the submitted frames and inform the application about them
- * It is also prepearing the TDs for new frames. If the Tx interrupts
- * are diabled, the application should call that routine to get
+ * It is also preparing the TDs for new frames. If the Tx interrupts
+ * are disabled, the application should call that routine to get
* confirmation about the submitted frames. Otherwise, the routine is
* called frome the interrupt service routine during the Tx interrupt.
* In that case the application is informed by calling the application
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 574b99ea0700..79a66d622f9c 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -262,19 +262,24 @@ static void fsl_usb2_mpc5121_exit(struct platform_device *pdev)
}
}
-struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
+static struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
.big_endian_desc = 1,
.big_endian_mmio = 1,
.es = 1,
+ .have_sysif_regs = 0,
.le_setup_buf = 1,
.init = fsl_usb2_mpc5121_init,
.exit = fsl_usb2_mpc5121_exit,
};
#endif /* CONFIG_PPC_MPC512x */
+static struct fsl_usb2_platform_data fsl_usb2_mpc8xxx_pd = {
+ .have_sysif_regs = 1,
+};
+
static const struct of_device_id fsl_usb2_mph_dr_of_match[] = {
- { .compatible = "fsl-usb2-mph", },
- { .compatible = "fsl-usb2-dr", },
+ { .compatible = "fsl-usb2-mph", .data = &fsl_usb2_mpc8xxx_pd, },
+ { .compatible = "fsl-usb2-dr", .data = &fsl_usb2_mpc8xxx_pd, },
#ifdef CONFIG_PPC_MPC512x
{ .compatible = "fsl,mpc5121-usb2-dr", .data = &fsl_usb2_mpc5121_pd, },
#endif
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index e49b75a78000..f90d003f2302 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -1658,7 +1658,7 @@ static int imx21_hc_reset(struct usb_hcd *hcd)
spin_lock_irqsave(&imx21->lock, flags);
- /* Reset the Host controler modules */
+ /* Reset the Host controller modules */
writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
imx21->regs + USBOTG_RST_CTRL);
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 32149be4ad8e..e0cb12b573f9 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3094,7 +3094,7 @@ static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
/* Some boards (mostly VIA?) report bogus overcurrent indications,
* causing massive log spam unless we completely ignore them. It
- * may be relevant that VIA VT8235 controlers, where PORT_POWER is
+ * may be relevant that VIA VT8235 controllers, where PORT_POWER is
* always set, seem to clear PORT_OCC and PORT_CSC when writing to
* PORT_POWER; that's surprising, but maybe within-spec.
*/
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 990f06b89eaa..2e9602a10e9b 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -861,6 +861,7 @@ static int sl811h_urb_enqueue(
DBG("dev %d ep%d maxpacket %d\n",
udev->devnum, epnum, ep->maxpacket);
retval = -EINVAL;
+ kfree(ep);
goto fail;
}
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index fcbf4abbf381..0231814a97a5 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci)
}
}
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
{
- void *addr;
+ struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
+ void __iomem *addr;
u32 temp;
u64 temp_64;
@@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
}
}
-void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
+static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
{
/* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8;
@@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
}
-void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
+static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int last_ep)
{
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 1d0f45f0e7a6..a9534396e85b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
/***************** Streams structures manipulation *************************/
-void xhci_free_stream_ctx(struct xhci_hcd *xhci,
+static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
@@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci,
* The stream context array must be a power of 2, and can be as small as
* 64 bytes or as large as 1MB.
*/
-struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
+static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs, dma_addr_t *dma,
gfp_t mem_flags)
{
@@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
val &= DBOFF_MASK;
xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
" from cap regs base addr\n", val);
- xhci->dba = (void *) xhci->cap_regs + val;
+ xhci->dba = (void __iomem *) xhci->cap_regs + val;
xhci_dbg_regs(xhci);
xhci_print_run_regs(xhci);
/* Set ir_set to interrupt register set 0 */
- xhci->ir_set = (void *) xhci->run_regs->ir_set;
+ xhci->ir_set = &xhci->run_regs->ir_set[0];
/*
* Event ring setup: Allocate a normal ring, but also setup
@@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
/*
* XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index df558f6f84e3..3289bf4832c9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -308,11 +308,8 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
/* Ring the host controller doorbell after placing a command on the ring */
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
{
- u32 temp;
-
xhci_dbg(xhci, "// Ding dong!\n");
- temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
- xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
+ xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
/* Flush PCI posted writes */
xhci_readl(xhci, &xhci->dba->doorbell[0]);
}
@@ -322,26 +319,24 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
unsigned int ep_index,
unsigned int stream_id)
{
- struct xhci_virt_ep *ep;
- unsigned int ep_state;
- u32 field;
__u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
+ struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+ unsigned int ep_state = ep->ep_state;
- ep = &xhci->devs[slot_id]->eps[ep_index];
- ep_state = ep->ep_state;
/* Don't ring the doorbell for this endpoint if there are pending
- * cancellations because the we don't want to interrupt processing.
+ * cancellations because we don't want to interrupt processing.
* We don't want to restart any stream rings if there's a set dequeue
* pointer command pending because the device can choose to start any
* stream once the endpoint is on the HW schedule.
* FIXME - check all the stream rings for pending cancellations.
*/
- if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
- && !(ep_state & EP_HALTED)) {
- field = xhci_readl(xhci, db_addr) & DB_MASK;
- field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
- xhci_writel(xhci, field, db_addr);
- }
+ if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
+ (ep_state & EP_HALTED))
+ return;
+ xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
+ /* The CPU has better things to do at this point than wait for a
+ * write-posting flush. It'll get there soon enough.
+ */
}
/* Ring the doorbell for any rings with pending URBs */
@@ -479,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
dev->eps[ep_index].stopped_trb,
&state->new_cycle_state);
- if (!state->new_deq_seg)
- BUG();
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
+
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg(xhci, "Finding endpoint context\n");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -491,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
state->new_deq_ptr,
&state->new_cycle_state);
- if (!state->new_deq_seg)
- BUG();
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
trb = &state->new_deq_ptr->generic;
if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@@ -1188,7 +1188,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1);
temp = xhci_readl(xhci, addr);
- if ((temp & PORT_CONNECT) && (hcd->state == HC_STATE_SUSPENDED)) {
+ if (hcd->state == HC_STATE_SUSPENDED) {
xhci_dbg(xhci, "resume root hub\n");
usb_hcd_resume_root_hub(hcd);
}
@@ -1710,8 +1710,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* Others already handled above */
break;
}
- dev_dbg(&td->urb->dev->dev,
- "ep %#x - asked for %d bytes, "
+ xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
"%d bytes untransferred\n",
td->urb->ep->desc.bEndpointAddress,
td->urb->transfer_buffer_length,
@@ -2369,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
/* Scatter gather list entries may cross 64KB boundaries */
running_total = TRB_MAX_BUFF_SIZE -
- (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
/* How many more 64KB chunks to transfer, how many more TRBs? */
- while (running_total < sg_dma_len(sg)) {
+ while (running_total < sg_dma_len(sg) && running_total < temp) {
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
@@ -2389,7 +2389,8 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
}
xhci_dbg(xhci, "\n");
if (!in_interrupt())
- dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
+ xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
+ "num_trbs = %d\n",
urb->ep->desc.bEndpointAddress,
urb->transfer_buffer_length,
num_trbs);
@@ -2399,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
{
if (num_trbs != 0)
- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
"TRBs, %d left\n", __func__,
urb->ep->desc.bEndpointAddress, num_trbs);
if (running_total != urb->transfer_buffer_length)
- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
"queued %#x (%d), asked for %#x (%d)\n",
__func__,
urb->ep->desc.bEndpointAddress,
@@ -2414,14 +2415,17 @@ static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, unsigned int stream_id, int start_cycle,
- struct xhci_generic_trb *start_trb, struct xhci_td *td)
+ struct xhci_generic_trb *start_trb)
{
/*
* Pass all the TRBs to the hardware at once and make sure this write
* isn't reordered.
*/
wmb();
- start_trb->field[3] |= start_cycle;
+ if (start_cycle)
+ start_trb->field[3] |= start_cycle;
+ else
+ start_trb->field[3] &= ~0x1;
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
}
@@ -2449,7 +2453,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
* to set the polling interval (once the API is added).
*/
if (xhci_interval != ep_interval) {
- if (!printk_ratelimit())
+ if (printk_ratelimit())
dev_dbg(&urb->dev->dev, "Driver uses different interval"
" (%d microframe%s) than xHCI "
"(%d microframe%s)\n",
@@ -2535,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
sg = urb->sg;
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
- trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
@@ -2551,9 +2554,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
u32 remainder = 0;
/* Don't change the cycle bit of the first TRB until later */
- if (first_trb)
+ if (first_trb) {
first_trb = false;
- else
+ if (start_cycle == 0)
+ field |= 0x1;
+ } else
field |= ep_ring->cycle_state;
/* Chain all the TRBs together; clear the chain bit in the last
@@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len);
if (TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+ (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (running_total + trb_buff_len > urb->transfer_buffer_length)
trb_buff_len =
@@ -2625,7 +2630,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
check_trb_math(urb, num_trbs, running_total);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
- start_cycle, start_trb, td);
+ start_cycle, start_trb);
return 0;
}
@@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
running_total = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
/* If there's some data on this 64KB chunk, or we have to send a
* zero-length transfer, we need at least one TRB
@@ -2671,7 +2677,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
if (!in_interrupt())
- dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
+ xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
+ "addr = %#llx, num_trbs = %d\n",
urb->ep->desc.bEndpointAddress,
urb->transfer_buffer_length,
urb->transfer_buffer_length,
@@ -2699,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* How much data is in the first TRB? */
addr = (u64) urb->transfer_dma;
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
- if (urb->transfer_buffer_length < trb_buff_len)
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+ if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;
@@ -2711,9 +2718,11 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field = 0;
/* Don't change the cycle bit of the first TRB until later */
- if (first_trb)
+ if (first_trb) {
first_trb = false;
- else
+ if (start_cycle == 0)
+ field |= 0x1;
+ } else
field |= ep_ring->cycle_state;
/* Chain all the TRBs together; clear the chain bit in the last
@@ -2757,7 +2766,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
check_trb_math(urb, num_trbs, running_total);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
- start_cycle, start_trb, td);
+ start_cycle, start_trb);
return 0;
}
@@ -2818,13 +2827,17 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Queue setup TRB - see section 6.4.1.2.1 */
/* FIXME better way to translate setup_packet into two u32 fields? */
setup = (struct usb_ctrlrequest *) urb->setup_packet;
+ field = 0;
+ field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
+ if (start_cycle == 0)
+ field |= 0x1;
queue_trb(xhci, ep_ring, false, true,
/* FIXME endianness is probably going to bite my ass here. */
setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
setup->wIndex | setup->wLength << 16,
TRB_LEN(8) | TRB_INTR_TARGET(0),
/* Immediate data in pointer */
- TRB_IDT | TRB_TYPE(TRB_SETUP));
+ field);
/* If there's data, queue data TRBs */
field = 0;
@@ -2859,7 +2872,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
giveback_first_trb(xhci, slot_id, ep_index, 0,
- start_cycle, start_trb, td);
+ start_cycle, start_trb);
return 0;
}
@@ -2872,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
td_len = urb->iso_frame_desc[i].length;
- running_total = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
@@ -2900,6 +2913,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
int running_total, trb_buff_len, td_len, td_remain_len, ret;
u64 start_addr, addr;
int i, j;
+ bool more_trbs_coming;
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
@@ -2910,7 +2924,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
if (!in_interrupt())
- dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d),"
+ xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
" addr = %#llx, num_tds = %d\n",
urb->ep->desc.bEndpointAddress,
urb->transfer_buffer_length,
@@ -2950,7 +2964,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field |= TRB_TYPE(TRB_ISOC);
/* Assume URB_ISO_ASAP is set */
field |= TRB_SIA;
- if (i > 0)
+ if (i == 0) {
+ if (start_cycle == 0)
+ field |= 0x1;
+ } else
field |= ep_ring->cycle_state;
first_trb = false;
} else {
@@ -2965,9 +2982,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
*/
if (j < trbs_per_td - 1) {
field |= TRB_CHAIN;
+ more_trbs_coming = true;
} else {
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
+ more_trbs_coming = false;
}
/* Calculate TRB length */
@@ -2980,7 +2999,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
length_field = TRB_LEN(trb_buff_len) |
remainder |
TRB_INTR_TARGET(0);
- queue_trb(xhci, ep_ring, false, false,
+ queue_trb(xhci, ep_ring, false, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
@@ -3003,10 +3022,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
}
- wmb();
- start_trb->field[3] |= start_cycle;
-
- xhci_ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
+ giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+ start_cycle, start_trb);
return 0;
}
@@ -3064,7 +3081,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* to set the polling interval (once the API is added).
*/
if (xhci_interval != ep_interval) {
- if (!printk_ratelimit())
+ if (printk_ratelimit())
dev_dbg(&urb->dev->dev, "Driver uses different interval"
" (%d microframe%s) than xHCI "
"(%d microframe%s)\n",
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 45e4a3108cc3..2083fc2179b2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci)
/*
* Set the run bit and wait for the host to be running.
*/
-int xhci_start(struct xhci_hcd *xhci)
+static int xhci_start(struct xhci_hcd *xhci)
{
u32 temp;
int ret;
@@ -226,7 +226,8 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
static int xhci_setup_msix(struct xhci_hcd *xhci)
{
int i, ret = 0;
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
/*
* calculate number of msi-x vectors supported.
@@ -265,6 +266,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
goto disable_msix;
}
+ hcd->msix_enabled = 1;
return ret;
disable_msix:
@@ -280,7 +282,8 @@ free_entries:
/* Free any IRQs and disable MSI-X */
static void xhci_cleanup_msix(struct xhci_hcd *xhci)
{
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
xhci_free_irq(xhci);
@@ -292,6 +295,7 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
pci_disable_msi(pdev);
}
+ hcd->msix_enabled = 0;
return;
}
@@ -325,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd)
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-void xhci_event_ring_work(unsigned long arg)
+static void xhci_event_ring_work(unsigned long arg)
{
unsigned long flags;
int temp;
@@ -469,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
xhci_writel(xhci, ER_IRQ_ENABLE(temp),
&xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
if (NUM_TEST_NOOPS > 0)
doorbell = xhci_setup_one_noop(xhci);
@@ -508,9 +512,10 @@ void xhci_stop(struct usb_hcd *hcd)
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
xhci_reset(xhci);
- xhci_cleanup_msix(xhci);
spin_unlock_irq(&xhci->lock);
+ xhci_cleanup_msix(xhci);
+
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
/* Tell the event ring poll function not to reschedule */
xhci->zombie = 1;
@@ -523,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd)
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
&xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -544,9 +549,10 @@ void xhci_shutdown(struct usb_hcd *hcd)
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
- xhci_cleanup_msix(xhci);
spin_unlock_irq(&xhci->lock);
+ xhci_cleanup_msix(xhci);
+
xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
xhci_readl(xhci, &xhci->op_regs->status));
}
@@ -647,6 +653,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
int rc = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
u32 command;
+ int i;
spin_lock_irq(&xhci->lock);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
@@ -677,10 +684,15 @@ int xhci_suspend(struct xhci_hcd *xhci)
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
- /* step 5: remove core well power */
- xhci_cleanup_msix(xhci);
spin_unlock_irq(&xhci->lock);
+ /* step 5: remove core well power */
+ /* synchronize irq when using MSI-X */
+ if (xhci->msix_entries) {
+ for (i = 0; i < xhci->msix_count; i++)
+ synchronize_irq(xhci->msix_entries[i].vector);
+ }
+
return rc;
}
@@ -694,7 +706,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
{
u32 command, temp = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
- struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int old_state, retval;
old_state = hcd->state;
@@ -729,9 +740,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci);
xhci_reset(xhci);
- if (hibernated)
- xhci_cleanup_msix(xhci);
spin_unlock_irq(&xhci->lock);
+ xhci_cleanup_msix(xhci);
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
/* Tell the event ring poll function not to reschedule */
@@ -745,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
&xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -765,30 +775,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
return retval;
}
- spin_unlock_irq(&xhci->lock);
- /* Re-setup MSI-X */
- if (hcd->irq)
- free_irq(hcd->irq, hcd);
- hcd->irq = -1;
-
- retval = xhci_setup_msix(xhci);
- if (retval)
- /* fall back to msi*/
- retval = xhci_setup_msi(xhci);
-
- if (retval) {
- /* fall back to legacy interrupt*/
- retval = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
- hcd->irq_descr, hcd);
- if (retval) {
- xhci_err(xhci, "request interrupt %d failed\n",
- pdev->irq);
- return retval;
- }
- hcd->irq = pdev->irq;
- }
-
- spin_lock_irq(&xhci->lock);
/* step 4: set Run/Stop bit */
command = xhci_readl(xhci, &xhci->op_regs->command);
command |= CMD_RUN;
@@ -871,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
/* Returns 1 if the arguments are OK;
* returns 0 this is a root hub; returns -EINVAL for NULL pointers.
*/
-int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
const char *func) {
struct xhci_hcd *xhci;
@@ -1707,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
}
-void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
+static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state)
{
@@ -2445,8 +2431,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
xhci_err(xhci, "Error while assigning device slot ID\n");
return 0;
}
- /* xhci_alloc_virt_device() does not touch rings; no need to lock */
- if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
+ /* xhci_alloc_virt_device() does not touch rings; no need to lock.
+ * Use GFP_NOIO, since this function can be called from
+ * xhci_discover_or_reset_device(), which may be called as part of
+ * mass storage driver error handling.
+ */
+ if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
/* Disable slot, if we can do it without mem alloc */
xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
spin_lock_irqsave(&xhci->lock, flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 170c367112d2..7f127df6dd55 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -436,22 +436,18 @@ struct xhci_run_regs {
/**
* struct doorbell_array
*
+ * Bits 0 - 7: Endpoint target
+ * Bits 8 - 15: RsvdZ
+ * Bits 16 - 31: Stream ID
+ *
* Section 5.6
*/
struct xhci_doorbell_array {
u32 doorbell[256];
};
-#define DB_TARGET_MASK 0xFFFFFF00
-#define DB_STREAM_ID_MASK 0x0000FFFF
-#define DB_TARGET_HOST 0x0
-#define DB_STREAM_ID_HOST 0x0
-#define DB_MASK (0xff << 8)
-
-/* Endpoint Target - bits 0:7 */
-#define EPI_TO_DB(p) (((p) + 1) & 0xff)
-#define STREAM_ID_TO_DB(p) (((p) & 0xffff) << 16)
-
+#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16))
+#define DB_VALUE_HOST 0x00000000
/**
* struct xhci_protocol_caps
@@ -1352,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
}
/* xHCI debugging */
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
void xhci_print_registers(struct xhci_hcd *xhci);
void xhci_dbg_regs(struct xhci_hcd *xhci);
void xhci_print_run_regs(struct xhci_hcd *xhci);
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 44f8b9225054..a6afd15f6a46 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -717,7 +717,7 @@ static int adu_probe(struct usb_interface *interface,
goto exit;
}
- /* allocate memory for our device state and intialize it */
+ /* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(struct adu_device), GFP_KERNEL);
if (dev == NULL) {
dev_err(&interface->dev, "Out of memory\n");
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index c9078e4e1f4d..e573e4704015 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -769,7 +769,7 @@ static int iowarrior_probe(struct usb_interface *interface,
int i;
int retval = -ENOMEM;
- /* allocate memory for our device state and intialize it */
+ /* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(struct iowarrior), GFP_KERNEL);
if (dev == NULL) {
dev_err(&interface->dev, "Out of memory\n");
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index edffef642337..eefb8275bb7e 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -642,7 +642,7 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
int i;
int retval = -ENOMEM;
- /* allocate memory for our device state and intialize it */
+ /* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 1732d9bc097e..1616ad1793a4 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -45,7 +45,7 @@ struct usb_led {
static void change_color(struct usb_led *led)
{
- int retval;
+ int retval = 0;
unsigned char *buffer;
buffer = kmalloc(8, GFP_KERNEL);
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 4ff21587ab03..f7a205738032 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -776,7 +776,6 @@ static const struct usb_device_id uss720_table[] = {
{ USB_DEVICE(0x0557, 0x2001) },
{ USB_DEVICE(0x0729, 0x1284) },
{ USB_DEVICE(0x1293, 0x0002) },
- { USB_DEVICE(0x1293, 0x0002) },
{ USB_DEVICE(0x050d, 0x0002) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index eeba228eb2af..9d49d1cd7ce2 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -404,6 +404,7 @@ static int bfin_musb_init(struct musb *musb)
musb->xceiv->set_power = bfin_musb_set_power;
musb->isr = blackfin_interrupt;
+ musb->double_buffer_not_ok = true;
return 0;
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 07cf394e491b..c292d5c499e7 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -128,12 +128,7 @@ MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
static inline struct musb *dev_to_musb(struct device *dev)
{
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- /* usbcore insists dev->driver_data is a "struct hcd *" */
- return hcd_to_musb(dev_get_drvdata(dev));
-#else
return dev_get_drvdata(dev);
-#endif
}
/*-------------------------------------------------------------------------*/
@@ -1869,6 +1864,7 @@ allocate_instance(struct device *dev,
INIT_LIST_HEAD(&musb->out_bulk);
hcd->uses_new_polling = 1;
+ hcd->has_tt = 1;
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
@@ -1876,10 +1872,9 @@ allocate_instance(struct device *dev,
musb = kzalloc(sizeof *musb, GFP_KERNEL);
if (!musb)
return NULL;
- dev_set_drvdata(dev, musb);
#endif
-
+ dev_set_drvdata(dev, musb);
musb->mregs = mbase;
musb->ctrl_base = mbase;
musb->nIrq = -ENODEV;
@@ -2191,7 +2186,7 @@ static int __init musb_probe(struct platform_device *pdev)
void __iomem *base;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iomem || irq == 0)
+ if (!iomem || irq <= 0)
return -ENODEV;
base = ioremap(iomem->start, resource_size(iomem));
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index d0c236f8e191..e6400be8a0f8 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -497,6 +497,19 @@ struct musb {
struct usb_gadget_driver *gadget_driver; /* its driver */
#endif
+ /*
+ * FIXME: Remove this flag.
+ *
+ * This is only added to allow Blackfin to work
+ * with current driver. For some unknown reason
+ * Blackfin doesn't work with double buffering
+ * and that's enabled by default.
+ *
+ * We added this flag to forcefully disable double
+ * buffering until we get it working.
+ */
+ unsigned double_buffer_not_ok:1 __deprecated;
+
struct musb_hdrc_config *config;
#ifdef MUSB_CONFIG_PROC_FS
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 9e8639d4e862..b0176e4569e0 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -36,7 +36,6 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/list.h>
-#include <linux/kobject.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/debugfs.h>
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 916065ba9e70..3a97c4e2d4f5 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -169,6 +169,9 @@ struct dma_controller {
dma_addr_t dma_addr,
u32 length);
int (*channel_abort)(struct dma_channel *);
+ int (*is_compatible)(struct dma_channel *channel,
+ u16 maxpacket,
+ void *buf, u32 length);
};
/* called after channel_program(), may indicate a fault */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 9b162dfaa4fb..2fe304611dcf 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -92,11 +92,33 @@
/* ----------------------------------------------------------------------- */
+#define is_buffer_mapped(req) (is_dma_capable() && \
+ (req->map_state != UN_MAPPED))
+
/* Maps the buffer to dma */
static inline void map_dma_buffer(struct musb_request *request,
- struct musb *musb)
+ struct musb *musb, struct musb_ep *musb_ep)
{
+ int compatible = true;
+ struct dma_controller *dma = musb->dma_controller;
+
+ request->map_state = UN_MAPPED;
+
+ if (!is_dma_capable() || !musb_ep->dma)
+ return;
+
+ /* Check if DMA engine can handle this request.
+ * DMA code must reject the USB request explicitly.
+ * Default behaviour is to map the request.
+ */
+ if (dma->is_compatible)
+ compatible = dma->is_compatible(musb_ep->dma,
+ musb_ep->packet_sz, request->request.buf,
+ request->request.length);
+ if (!compatible)
+ return;
+
if (request->request.dma == DMA_ADDR_INVALID) {
request->request.dma = dma_map_single(
musb->controller,
@@ -105,7 +127,7 @@ static inline void map_dma_buffer(struct musb_request *request,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
- request->mapped = 1;
+ request->map_state = MUSB_MAPPED;
} else {
dma_sync_single_for_device(musb->controller,
request->request.dma,
@@ -113,7 +135,7 @@ static inline void map_dma_buffer(struct musb_request *request,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
- request->mapped = 0;
+ request->map_state = PRE_MAPPED;
}
}
@@ -121,11 +143,14 @@ static inline void map_dma_buffer(struct musb_request *request,
static inline void unmap_dma_buffer(struct musb_request *request,
struct musb *musb)
{
+ if (!is_buffer_mapped(request))
+ return;
+
if (request->request.dma == DMA_ADDR_INVALID) {
DBG(20, "not unmapping a never mapped buffer\n");
return;
}
- if (request->mapped) {
+ if (request->map_state == MUSB_MAPPED) {
dma_unmap_single(musb->controller,
request->request.dma,
request->request.length,
@@ -133,16 +158,15 @@ static inline void unmap_dma_buffer(struct musb_request *request,
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
request->request.dma = DMA_ADDR_INVALID;
- request->mapped = 0;
- } else {
+ } else { /* PRE_MAPPED */
dma_sync_single_for_cpu(musb->controller,
request->request.dma,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
-
}
+ request->map_state = UN_MAPPED;
}
/*
@@ -172,8 +196,7 @@ __acquires(ep->musb->lock)
ep->busy = 1;
spin_unlock(&musb->lock);
- if (is_dma_capable() && ep->dma)
- unmap_dma_buffer(req, musb);
+ unmap_dma_buffer(req, musb);
if (request->status == 0)
DBG(5, "%s done request %p, %d/%d\n",
ep->end_point.name, request,
@@ -335,7 +358,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
csr);
#ifndef CONFIG_MUSB_PIO_ONLY
- if (is_dma_capable() && musb_ep->dma) {
+ if (is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
size_t request_size;
@@ -436,8 +459,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
* Unmap the dma buffer back to cpu if dma channel
* programming fails
*/
- if (is_dma_capable() && musb_ep->dma)
- unmap_dma_buffer(req, musb);
+ unmap_dma_buffer(req, musb);
musb_write_fifo(musb_ep->hw_ep, fifo_count,
(u8 *) (request->buf + request->actual));
@@ -627,7 +649,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
return;
}
- if (is_cppi_enabled() && musb_ep->dma) {
+ if (is_cppi_enabled() && is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
@@ -658,7 +680,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
len = musb_readw(epio, MUSB_RXCOUNT);
if (request->actual < request->length) {
#ifdef CONFIG_USB_INVENTRA_DMA
- if (is_dma_capable() && musb_ep->dma) {
+ if (is_buffer_mapped(req)) {
struct dma_controller *c;
struct dma_channel *channel;
int use_dma = 0;
@@ -742,7 +764,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
fifo_count = min_t(unsigned, len, fifo_count);
#ifdef CONFIG_USB_TUSB_OMAP_DMA
- if (tusb_dma_omap() && musb_ep->dma) {
+ if (tusb_dma_omap() && is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
u32 dma_addr = request->dma + request->actual;
@@ -762,7 +784,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
* programming fails. This buffer is mapped if the
* channel allocation is successful
*/
- if (is_dma_capable() && musb_ep->dma) {
+ if (is_buffer_mapped(req)) {
unmap_dma_buffer(req, musb);
/*
@@ -989,7 +1011,11 @@ static int musb_gadget_enable(struct usb_ep *ep,
/* Set TXMAXP with the FIFO size of the endpoint
* to disable double buffering mode.
*/
- musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
+ if (musb->double_buffer_not_ok)
+ musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
+ else
+ musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
+ | (musb_ep->hb_mult << 11));
csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
if (musb_readw(regs, MUSB_TXCSR)
@@ -1025,7 +1051,11 @@ static int musb_gadget_enable(struct usb_ep *ep,
/* Set RXMAXP with the FIFO size of the endpoint
* to disable double buffering mode.
*/
- musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
+ if (musb->double_buffer_not_ok)
+ musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
+ else
+ musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
+ | (musb_ep->hb_mult << 11));
/* force shared fifo to OUT-only mode */
if (hw_ep->is_shared_fifo) {
@@ -1214,10 +1244,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
request->epnum = musb_ep->current_epnum;
request->tx = musb_ep->is_in;
- if (is_dma_capable() && musb_ep->dma)
- map_dma_buffer(request, musb);
- else
- request->mapped = 0;
+ map_dma_buffer(request, musb, musb_ep);
spin_lock_irqsave(&musb->lock, lockflags);
@@ -1684,7 +1711,7 @@ static inline void __init musb_g_init_endpoints(struct musb *musb)
struct musb_hw_ep *hw_ep;
unsigned count = 0;
- /* intialize endpoint list just once */
+ /* initialize endpoint list just once */
INIT_LIST_HEAD(&(musb->g.ep_list));
for (epnum = 0, hw_ep = musb->endpoints;
@@ -1765,7 +1792,7 @@ void musb_gadget_cleanup(struct musb *musb)
*
* -EINVAL something went wrong (not driver)
* -EBUSY another gadget is already using the controller
- * -ENOMEM no memeory to perform the operation
+ * -ENOMEM no memory to perform the operation
*
* @param driver the gadget driver
* @param bind the driver's bind function
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index dec8dc008191..a55354fbccf5 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -35,13 +35,19 @@
#ifndef __MUSB_GADGET_H
#define __MUSB_GADGET_H
+enum buffer_map_state {
+ UN_MAPPED = 0,
+ PRE_MAPPED,
+ MUSB_MAPPED
+};
+
struct musb_request {
struct usb_request request;
struct musb_ep *ep;
struct musb *musb;
u8 tx; /* endpoint direction */
u8 epnum;
- u8 mapped;
+ enum buffer_map_state map_state;
};
static inline struct musb_request *to_musb_request(struct usb_request *req)
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 4d5bcb4e14d2..0f523d7db57b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -609,7 +609,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
/* Set RXMAXP with the FIFO size of the endpoint
* to disable double buffer mode.
*/
- if (musb->hwvers < MUSB_HWVERS_2000)
+ if (musb->double_buffer_not_ok)
musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
else
musb_writew(ep->regs, MUSB_RXMAXP,
@@ -784,14 +784,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
/* protocol/endpoint/interval/NAKlimit */
if (epnum) {
musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
- if (can_bulk_split(musb, qh->type))
+ if (musb->double_buffer_not_ok)
musb_writew(epio, MUSB_TXMAXP,
- packet_sz
- | ((hw_ep->max_packet_sz_tx /
- packet_sz) - 1) << 11);
+ hw_ep->max_packet_sz_tx);
else
musb_writew(epio, MUSB_TXMAXP,
- packet_sz);
+ qh->maxpacket |
+ ((qh->hb_mult - 1) << 11));
musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
} else {
musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index f763d62f151c..21056c924c74 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -94,24 +94,33 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase,
{
musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW),
- ((u16)((u32) dma_addr & 0xFFFF)));
+ dma_addr);
musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH),
- ((u16)(((u32) dma_addr >> 16) & 0xFFFF)));
+ (dma_addr >> 16));
}
static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
{
- return musb_readl(mbase,
+ u32 count = musb_readw(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
+
+ count = count << 16;
+
+ count |= musb_readw(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
+
+ return count;
}
static inline void musb_write_hsdma_count(void __iomem *mbase,
u8 bchannel, u32 len)
{
- musb_writel(mbase,
+ musb_writew(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),len);
+ musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH),
- len);
+ (len >> 16));
}
#endif /* CONFIG_BLACKFIN */
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index a3f12333fc41..bc8badd16897 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -362,6 +362,7 @@ static int omap2430_musb_init(struct musb *musb)
static int omap2430_musb_exit(struct musb *musb)
{
+ del_timer_sync(&musb_idle_timer);
omap2430_low_level_exit(musb);
otg_put_transceiver(musb->xceiv);
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 9fb875d5f09c..9ffc8237fb4b 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -103,6 +103,8 @@ config USB_MSM_OTG_72K
required after resetting the hardware and power management.
This driver is required even for peripheral only or host only
mode configurations.
+ This driver is not supported on boards like trout which
+ has an external PHY.
config AB8500_USB
tristate "AB8500 USB Transceiver Driver"
diff --git a/drivers/usb/otg/nop-usb-xceiv.c b/drivers/usb/otg/nop-usb-xceiv.c
index e70014ab0976..8acf165fe13b 100644
--- a/drivers/usb/otg/nop-usb-xceiv.c
+++ b/drivers/usb/otg/nop-usb-xceiv.c
@@ -132,6 +132,8 @@ static int __devinit nop_usb_xceiv_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, nop);
+ BLOCKING_INIT_NOTIFIER_HEAD(&nop->otg.notifier);
+
return 0;
exit:
kfree(nop);
diff --git a/drivers/usb/otg/ulpi.c b/drivers/usb/otg/ulpi.c
index 059d9ac0ab5b..770d799d5afb 100644
--- a/drivers/usb/otg/ulpi.c
+++ b/drivers/usb/otg/ulpi.c
@@ -45,7 +45,7 @@ struct ulpi_info {
/* ULPI hardcoded IDs, used for probing */
static struct ulpi_info ulpi_ids[] = {
ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"),
- ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB3319"),
+ ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB331x"),
};
static int ulpi_set_otg_flags(struct otg_transceiver *otg)
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 63f7cc45bcac..7b8815ddf368 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -486,12 +486,22 @@ static void ch341_read_int_callback(struct urb *urb)
if (actual_length >= 4) {
struct ch341_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
+ u8 prev_line_status = priv->line_status;
spin_lock_irqsave(&priv->lock, flags);
priv->line_status = (~(data[2])) & CH341_BITS_MODEM_STAT;
if ((data[1] & CH341_MULT_STAT))
priv->multi_status_change = 1;
spin_unlock_irqrestore(&priv->lock, flags);
+
+ if ((priv->line_status ^ prev_line_status) & CH341_BIT_DCD) {
+ struct tty_struct *tty = tty_port_tty_get(&port->port);
+ if (tty)
+ usb_serial_handle_dcd_change(port, tty,
+ priv->line_status & CH341_BIT_DCD);
+ tty_kref_put(tty);
+ }
+
wake_up_interruptible(&priv->delta_msr_wait);
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 8d7731dbf478..735ea03157ab 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -49,7 +49,6 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *,
static void cp210x_break_ctl(struct tty_struct *, int);
static int cp210x_startup(struct usb_serial *);
static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
-static int cp210x_carrier_raised(struct usb_serial_port *p);
static int debug;
@@ -87,7 +86,6 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
{ USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
{ USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
- { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
{ USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
@@ -110,7 +108,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
+ { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
+ { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
@@ -165,8 +165,7 @@ static struct usb_serial_driver cp210x_device = {
.tiocmget = cp210x_tiocmget,
.tiocmset = cp210x_tiocmset,
.attach = cp210x_startup,
- .dtr_rts = cp210x_dtr_rts,
- .carrier_raised = cp210x_carrier_raised
+ .dtr_rts = cp210x_dtr_rts
};
/* Config request types */
@@ -765,15 +764,6 @@ static int cp210x_tiocmget (struct tty_struct *tty, struct file *file)
return result;
}
-static int cp210x_carrier_raised(struct usb_serial_port *p)
-{
- unsigned int control;
- cp210x_get_config(p, CP210X_GET_MDMSTS, &control, 1);
- if (control & CONTROL_DCD)
- return 1;
- return 0;
-}
-
static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index b92070c103cd..666e5a6edd82 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -455,7 +455,6 @@ static int digi_write_room(struct tty_struct *tty);
static int digi_chars_in_buffer(struct tty_struct *tty);
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port);
static void digi_close(struct usb_serial_port *port);
-static int digi_carrier_raised(struct usb_serial_port *port);
static void digi_dtr_rts(struct usb_serial_port *port, int on);
static int digi_startup_device(struct usb_serial *serial);
static int digi_startup(struct usb_serial *serial);
@@ -511,7 +510,6 @@ static struct usb_serial_driver digi_acceleport_2_device = {
.open = digi_open,
.close = digi_close,
.dtr_rts = digi_dtr_rts,
- .carrier_raised = digi_carrier_raised,
.write = digi_write,
.write_room = digi_write_room,
.write_bulk_callback = digi_write_bulk_callback,
@@ -1339,14 +1337,6 @@ static void digi_dtr_rts(struct usb_serial_port *port, int on)
digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1);
}
-static int digi_carrier_raised(struct usb_serial_port *port)
-{
- struct digi_port *priv = usb_get_serial_port_data(port);
- if (priv->dp_modem_signals & TIOCM_CD)
- return 1;
- return 0;
-}
-
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int ret;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index a2668d089260..f349a3629d00 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -100,6 +100,7 @@ struct ftdi_sio_quirk {
static int ftdi_jtag_probe(struct usb_serial *serial);
static int ftdi_mtxorb_hack_setup(struct usb_serial *serial);
static int ftdi_NDI_device_setup(struct usb_serial *serial);
+static int ftdi_stmclite_probe(struct usb_serial *serial);
static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
@@ -123,6 +124,10 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
.port_probe = ftdi_HE_TIRA1_setup,
};
+static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
+ .probe = ftdi_stmclite_probe,
+};
+
/*
* The 8U232AM has the same API as the sio except for:
* - it can support MUCH higher baudrates; up to:
@@ -616,6 +621,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
{ USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
+ { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
{ USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
@@ -676,7 +682,17 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
- { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C2_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2D_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VT_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VR_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVT_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVR_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVT_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVR_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
@@ -800,6 +816,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
@@ -1699,6 +1717,25 @@ static int ftdi_jtag_probe(struct usb_serial *serial)
}
/*
+ * First and second port on STMCLiteadaptors is reserved for JTAG interface
+ * and the forth port for pio
+ */
+static int ftdi_stmclite_probe(struct usb_serial *serial)
+{
+ struct usb_device *udev = serial->dev;
+ struct usb_interface *interface = serial->interface;
+
+ dbg("%s", __func__);
+
+ if (interface == udev->actconfig->interface[2])
+ return 0;
+
+ dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
+
+ return -ENODEV;
+}
+
+/*
* The Matrix Orbital VK204-25-USB has an invalid IN endpoint.
* We have to correct it if we want to read from it.
*/
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index bf0867285481..117e8e6f93c6 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -518,6 +518,12 @@
#define RATOC_PRODUCT_ID_USB60F 0xb020
/*
+ * Acton Research Corp.
+ */
+#define ACTON_VID 0x0647 /* Vendor ID */
+#define ACTON_SPECTRAPRO_PID 0x0100
+
+/*
* Contec products (http://www.contec.com)
* Submitted by Daniel Sangorrin
*/
@@ -569,11 +575,23 @@
#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */
/*
- * Icom ID-1 digital transceiver
+ * Definitions for Icom Inc. devices
*/
-
-#define ICOM_ID1_VID 0x0C26
-#define ICOM_ID1_PID 0x0004
+#define ICOM_VID 0x0C26 /* Icom vendor ID */
+/* Note: ID-1 is a communications tranceiver for HAM-radio operators */
+#define ICOM_ID_1_PID 0x0004 /* ID-1 USB to RS-232 */
+/* Note: OPC is an Optional cable to connect an Icom Tranceiver */
+#define ICOM_OPC_U_UC_PID 0x0018 /* OPC-478UC, OPC-1122U cloning cable */
+/* Note: ID-RP* devices are Icom Repeater Devices for HAM-radio */
+#define ICOM_ID_RP2C1_PID 0x0009 /* ID-RP2C Asset 1 to RS-232 */
+#define ICOM_ID_RP2C2_PID 0x000A /* ID-RP2C Asset 2 to RS-232 */
+#define ICOM_ID_RP2D_PID 0x000B /* ID-RP2D configuration port*/
+#define ICOM_ID_RP2VT_PID 0x000C /* ID-RP2V Transmit config port */
+#define ICOM_ID_RP2VR_PID 0x000D /* ID-RP2V Receive config port */
+#define ICOM_ID_RP4KVT_PID 0x0010 /* ID-RP4000V Transmit config port */
+#define ICOM_ID_RP4KVR_PID 0x0011 /* ID-RP4000V Receive config port */
+#define ICOM_ID_RP2KVT_PID 0x0012 /* ID-RP2000V Transmit config port */
+#define ICOM_ID_RP2KVR_PID 0x0013 /* ID-RP2000V Receive config port */
/*
* GN Otometrics (http://www.otometrics.com)
@@ -1022,6 +1040,12 @@
#define WHT_PID 0x0004 /* Wireless Handheld Terminal */
/*
+ * STMicroelectonics
+ */
+#define ST_VID 0x0483
+#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */
+
+/*
* Papouch products (http://www.papouch.com/)
* Submitted by Folkert van Heusden
*/
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index e6833e216fc9..e4db5ad2bc55 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -479,6 +479,26 @@ int usb_serial_handle_break(struct usb_serial_port *port)
}
EXPORT_SYMBOL_GPL(usb_serial_handle_break);
+/**
+ * usb_serial_handle_dcd_change - handle a change of carrier detect state
+ * @port: usb_serial_port structure for the open port
+ * @tty: tty_struct structure for the port
+ * @status: new carrier detect status, nonzero if active
+ */
+void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
+ struct tty_struct *tty, unsigned int status)
+{
+ struct tty_port *port = &usb_port->port;
+
+ dbg("%s - port %d, status %d", __func__, usb_port->number, status);
+
+ if (status)
+ wake_up_interruptible(&port->open_wait);
+ else if (tty && !C_CLOCAL(tty))
+ tty_hangup(tty);
+}
+EXPORT_SYMBOL_GPL(usb_serial_handle_dcd_change);
+
int usb_serial_generic_resume(struct usb_serial *serial)
{
struct usb_serial_port *port;
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index cd769ef24f8a..3b246d93cf22 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2889,8 +2889,8 @@ static void load_application_firmware(struct edgeport_serial *edge_serial)
dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build);
- edge_serial->product_info.FirmwareMajorVersion = fw->data[0];
- edge_serial->product_info.FirmwareMinorVersion = fw->data[1];
+ edge_serial->product_info.FirmwareMajorVersion = rec->data[0];
+ edge_serial->product_info.FirmwareMinorVersion = rec->data[1];
edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build);
for (rec = ihex_next_binrec(rec); rec;
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index 6ab2a3f97fe8..178b22eb32b1 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -199,6 +199,7 @@ static struct usb_serial_driver epic_device = {
.name = "epic",
},
.description = "EPiC device",
+ .usb_driver = &io_driver,
.id_table = Epic_port_id_table,
.num_ports = 1,
.open = edge_open,
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 12ed594f5f80..99b97c04896f 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1275,6 +1275,7 @@ static struct usb_serial_driver iuu_device = {
.name = "iuu_phoenix",
},
.id_table = id_table,
+ .usb_driver = &iuu_driver,
.num_ports = 1,
.bulk_in_size = 512,
.bulk_out_size = 512,
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 2d8baf6ac472..ce134dc28ddf 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -546,6 +546,7 @@ static struct usb_serial_driver keyspan_pre_device = {
.name = "keyspan_no_firm",
},
.description = "Keyspan - (without firmware)",
+ .usb_driver = &keyspan_driver,
.id_table = keyspan_pre_ids,
.num_ports = 1,
.attach = keyspan_fake_startup,
@@ -557,6 +558,7 @@ static struct usb_serial_driver keyspan_1port_device = {
.name = "keyspan_1",
},
.description = "Keyspan 1 port adapter",
+ .usb_driver = &keyspan_driver,
.id_table = keyspan_1port_ids,
.num_ports = 1,
.open = keyspan_open,
@@ -579,6 +581,7 @@ static struct usb_serial_driver keyspan_2port_device = {
.name = "keyspan_2",
},
.description = "Keyspan 2 port adapter",
+ .usb_driver = &keyspan_driver,
.id_table = keyspan_2port_ids,
.num_ports = 2,
.open = keyspan_open,
@@ -601,6 +604,7 @@ static struct usb_serial_driver keyspan_4port_device = {
.name = "keyspan_4",
},
.description = "Keyspan 4 port adapter",
+ .usb_driver = &keyspan_driver,
.id_table = keyspan_4port_ids,
.num_ports = 4,
.open = keyspan_open,
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index a10dd5676ccc..554a8693a463 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -679,22 +679,6 @@ static void keyspan_pda_dtr_rts(struct usb_serial_port *port, int on)
}
}
-static int keyspan_pda_carrier_raised(struct usb_serial_port *port)
-{
- struct usb_serial *serial = port->serial;
- unsigned char modembits;
-
- /* If we can read the modem status and the DCD is low then
- carrier is not raised yet */
- if (keyspan_pda_get_modem_info(serial, &modembits) >= 0) {
- if (!(modembits & (1>>6)))
- return 0;
- }
- /* Carrier raised, or we failed (eg disconnected) so
- progress accordingly */
- return 1;
-}
-
static int keyspan_pda_open(struct tty_struct *tty,
struct usb_serial_port *port)
@@ -881,7 +865,6 @@ static struct usb_serial_driver keyspan_pda_device = {
.id_table = id_table_std,
.num_ports = 1,
.dtr_rts = keyspan_pda_dtr_rts,
- .carrier_raised = keyspan_pda_carrier_raised,
.open = keyspan_pda_open,
.close = keyspan_pda_close,
.write = keyspan_pda_write,
diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
index cf1718394e18..653465f61d4a 100644
--- a/drivers/usb/serial/moto_modem.c
+++ b/drivers/usb/serial/moto_modem.c
@@ -44,6 +44,7 @@ static struct usb_serial_driver moto_device = {
.name = "moto-modem",
},
.id_table = id_table,
+ .usb_driver = &moto_driver,
.num_ports = 1,
};
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index cdfb1868caef..5f46838dfee5 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -382,7 +382,16 @@ static void option_instat_callback(struct urb *urb);
#define HAIER_VENDOR_ID 0x201e
#define HAIER_PRODUCT_CE100 0x2009
-#define CINTERION_VENDOR_ID 0x0681
+/* Cinterion (formerly Siemens) products */
+#define SIEMENS_VENDOR_ID 0x0681
+#define CINTERION_VENDOR_ID 0x1e2d
+#define CINTERION_PRODUCT_HC25_MDM 0x0047
+#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
+#define CINTERION_PRODUCT_HC28_MDM 0x004C
+#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
+#define CINTERION_PRODUCT_EU3_E 0x0051
+#define CINTERION_PRODUCT_EU3_P 0x0052
+#define CINTERION_PRODUCT_PH8 0x0053
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -615,7 +624,6 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0007, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) },
@@ -945,7 +953,17 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
- { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
+ /* Cinterion */
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 5be866bb7a41..73613205be7a 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -157,6 +157,7 @@ static struct usb_serial_driver oti6858_device = {
.name = "oti6858",
},
.id_table = id_table,
+ .usb_driver = &oti6858_driver,
.num_ports = 1,
.open = oti6858_open,
.close = oti6858_close,
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 8ae4c6cbc38a..08c9181b8e48 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -50,6 +50,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
@@ -677,9 +678,11 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
{
struct pl2303_private *priv = usb_get_serial_port_data(port);
+ struct tty_struct *tty;
unsigned long flags;
u8 status_idx = UART_STATE;
u8 length = UART_STATE + 1;
+ u8 prev_line_status;
u16 idv, idp;
idv = le16_to_cpu(port->serial->dev->descriptor.idVendor);
@@ -701,11 +704,20 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
/* Save off the uart status for others to look at */
spin_lock_irqsave(&priv->lock, flags);
+ prev_line_status = priv->line_status;
priv->line_status = data[status_idx];
spin_unlock_irqrestore(&priv->lock, flags);
if (priv->line_status & UART_BREAK_ERROR)
usb_serial_handle_break(port);
wake_up_interruptible(&priv->delta_msr_wait);
+
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
+ if ((priv->line_status ^ prev_line_status) & UART_DCD)
+ usb_serial_handle_dcd_change(port, tty,
+ priv->line_status & UART_DCD);
+ tty_kref_put(tty);
}
static void pl2303_read_int_callback(struct urb *urb)
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 43eb9bdad422..1b025f75dafd 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -21,6 +21,7 @@
#define PL2303_PRODUCT_ID_MMX 0x0612
#define PL2303_PRODUCT_ID_GPRS 0x0609
#define PL2303_PRODUCT_ID_HCR331 0x331a
+#define PL2303_PRODUCT_ID_MOTOROLA 0x0307
#define ATEN_VENDOR_ID 0x0557
#define ATEN_VENDOR_ID2 0x0547
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
index 214a3e504292..30b73e68a904 100644
--- a/drivers/usb/serial/qcaux.c
+++ b/drivers/usb/serial/qcaux.c
@@ -36,6 +36,7 @@
#define UTSTARCOM_PRODUCT_UM175_V1 0x3712
#define UTSTARCOM_PRODUCT_UM175_V2 0x3714
#define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715
+#define PANTECH_PRODUCT_UML290_VZW 0x3718
/* CMOTECH devices */
#define CMOTECH_VENDOR_ID 0x16d8
@@ -66,6 +67,7 @@ static struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -84,6 +86,7 @@ static struct usb_serial_driver qcaux_device = {
.name = "qcaux",
},
.id_table = id_table,
+ .usb_driver = &qcaux_driver,
.num_ports = 1,
};
diff --git a/drivers/usb/serial/siemens_mpi.c b/drivers/usb/serial/siemens_mpi.c
index cb8195cabfde..74cd4ccdb3fc 100644
--- a/drivers/usb/serial/siemens_mpi.c
+++ b/drivers/usb/serial/siemens_mpi.c
@@ -42,6 +42,7 @@ static struct usb_serial_driver siemens_usb_mpi_device = {
.name = "siemens_mpi",
},
.id_table = id_table,
+ .usb_driver = &siemens_usb_mpi_driver,
.num_ports = 1,
};
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 7481ff8a49e4..0457813eebee 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -301,6 +301,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
+ { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
{ USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
{ }
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 765aa983bf58..cbfb70bffdd0 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -133,7 +133,7 @@ struct spcp8x5_usb_ctrl_arg {
/* how come ??? */
#define UART_STATE 0x08
-#define UART_STATE_TRANSIENT_MASK 0x74
+#define UART_STATE_TRANSIENT_MASK 0x75
#define UART_DCD 0x01
#define UART_DSR 0x02
#define UART_BREAK_ERROR 0x04
@@ -525,6 +525,10 @@ static void spcp8x5_process_read_urb(struct urb *urb)
/* overrun is special, not associated with a char */
if (status & UART_OVERRUN_ERROR)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+
+ if (status & UART_DCD)
+ usb_serial_handle_dcd_change(port, tty,
+ priv->line_status & MSR_STATUS_LINE_DCD);
}
tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
@@ -645,6 +649,7 @@ static struct usb_serial_driver spcp8x5_device = {
.name = "SPCP8x5",
},
.id_table = id_table,
+ .usb_driver = &spcp8x5_driver,
.num_ports = 1,
.open = spcp8x5_open,
.dtr_rts = spcp8x5_dtr_rts,
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index b2902f307b47..a910004f4079 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -369,9 +369,9 @@ failed_1port:
static void __exit ti_exit(void)
{
+ usb_deregister(&ti_usb_driver);
usb_serial_deregister(&ti_1port_device);
usb_serial_deregister(&ti_2port_device);
- usb_deregister(&ti_usb_driver);
}
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 6954de50c0ff..546a52179bec 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1344,11 +1344,15 @@ int usb_serial_register(struct usb_serial_driver *driver)
return -ENODEV;
fixup_generic(driver);
- if (driver->usb_driver)
- driver->usb_driver->supports_autosuspend = 1;
if (!driver->description)
driver->description = driver->driver.name;
+ if (!driver->usb_driver) {
+ WARN(1, "Serial driver %s has no usb_driver\n",
+ driver->description);
+ return -EINVAL;
+ }
+ driver->usb_driver->supports_autosuspend = 1;
/* Add this device to our list of devices */
mutex_lock(&table_lock);
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index f2ed6a31be77..95a82148ee81 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -75,6 +75,7 @@ static struct usb_serial_driver debug_device = {
.name = "debug",
},
.id_table = id_table,
+ .usb_driver = &debug_driver,
.num_ports = 1,
.bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE,
.break_ctl = usb_debug_break_ctl,
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index b004b2a485c3..9c014e2ecd68 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -295,12 +295,15 @@ static void usb_wwan_indat_callback(struct urb *urb)
__func__, status, endpoint);
} else {
tty = tty_port_tty_get(&port->port);
- if (urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
- } else
- dbg("%s: empty read urb received", __func__);
- tty_kref_put(tty);
+ if (tty) {
+ if (urb->actual_length) {
+ tty_insert_flip_string(tty, data,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
+ } else
+ dbg("%s: empty read urb received", __func__);
+ tty_kref_put(tty);
+ }
/* Resubmit urb so we continue receiving */
if (status != -ESHUTDOWN) {
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 15a5d89b7f39..1c11959a7d58 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -27,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
+#include <linux/usb/cdc.h>
#include "visor.h"
/*
@@ -479,6 +480,17 @@ static int visor_probe(struct usb_serial *serial,
dbg("%s", __func__);
+ /*
+ * some Samsung Android phones in modem mode have the same ID
+ * as SPH-I500, but they are ACM devices, so dont bind to them
+ */
+ if (id->idVendor == SAMSUNG_VENDOR_ID &&
+ id->idProduct == SAMSUNG_SPH_I500_ID &&
+ serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM &&
+ serial->dev->descriptor.bDeviceSubClass ==
+ USB_CDC_SUBCLASS_ACM)
+ return -ENODEV;
+
if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
serial->dev->actconfig->desc.bConfigurationValue);
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index c854fdebe0ae..2c8553026222 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -31,4 +31,9 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
"Cypress ISD-300LP",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
+ "Super Top",
+ "USB 2.0 SATA BRIDGE",
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+
#endif /* defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || ... */
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index fcc1e32ce256..c1602b8c5594 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1044,6 +1044,15 @@ UNUSUAL_DEV( 0x084d, 0x0011, 0x0110, 0x0110,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BULK32),
+/* Reported by <ttkspam@free.fr>
+ * The device reports a vendor-specific device class, requiring an
+ * explicit vendor/product match.
+ */
+UNUSUAL_DEV( 0x0851, 0x1542, 0x0002, 0x0002,
+ "MagicPixel",
+ "FW_Omega2",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL, 0),
+
/* Andrew Lunn <andrew@lunn.ch>
* PanDigital Digital Picture Frame. Does not like ALLOW_MEDIUM_REMOVAL
* on LUN 4.
@@ -1388,6 +1397,13 @@ UNUSUAL_DEV( 0x0f19, 0x0105, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+/* Submitted by Nick Holloway */
+UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
+ "VTech",
+ "Kidizoom",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
/* Reported by Michael Stattmann <michael@stattmann.com> */
UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
"Sony Ericsson",
@@ -1872,6 +1888,22 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_READ_DISC_INFO ),
+/* Patch by Richard Schütz <r.schtz@t-online.de>
+ * This external hard drive enclosure uses a JMicron chip which
+ * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
+UNUSUAL_DEV( 0x1e68, 0x001b, 0x0000, 0x0000,
+ "TrekStor GmbH & Co. KG",
+ "DataStation maxi g.u",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
+
+/* Reported by Jasper Mackenzie <scarletpimpernal@hotmail.com> */
+UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
+ "Coby Electronics",
+ "MP3 Player",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
+
UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
"ST",
"2A",
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index c7b1d8108de9..8cb9d80207fa 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -49,7 +49,7 @@
*
* USB Stack port number 4 (1 based)
* WUSB code port index 3 (0 based)
- * USB Addresss 5 (2 based -- 0 is for default, 1 for root hub)
+ * USB Address 5 (2 based -- 0 is for default, 1 for root hub)
*
* Now, because we don't use the concept as default address exactly
* like the (wired) USB code does, we need to kind of skip it. So we
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9b3ca103135f..f616cefc95ba 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -128,8 +128,7 @@ static void handle_tx(struct vhost_net *net)
size_t hdr_size;
struct socket *sock;
- /* TODO: check that we are running from vhost_worker?
- * Not sure it's worth it, it's straight-forward enough. */
+ /* TODO: check that we are running from vhost_worker? */
sock = rcu_dereference_check(vq->private_data, 1);
if (!sock)
return;
@@ -306,7 +305,8 @@ static void handle_rx_big(struct vhost_net *net)
size_t len, total_len = 0;
int err;
size_t hdr_size;
- struct socket *sock = rcu_dereference(vq->private_data);
+ /* TODO: check that we are running from vhost_worker? */
+ struct socket *sock = rcu_dereference_check(vq->private_data, 1);
if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
return;
@@ -415,7 +415,8 @@ static void handle_rx_mergeable(struct vhost_net *net)
int err, headcount;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
- struct socket *sock = rcu_dereference(vq->private_data);
+ /* TODO: check that we are running from vhost_worker? */
+ struct socket *sock = rcu_dereference_check(vq->private_data, 1);
if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
return;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 38244f59cdd9..ade0568c07a4 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -97,22 +97,26 @@ void vhost_poll_stop(struct vhost_poll *poll)
remove_wait_queue(poll->wqh, &poll->wait);
}
+static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
+ unsigned seq)
+{
+ int left;
+ spin_lock_irq(&dev->work_lock);
+ left = seq - work->done_seq;
+ spin_unlock_irq(&dev->work_lock);
+ return left <= 0;
+}
+
static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
unsigned seq;
- int left;
int flushing;
spin_lock_irq(&dev->work_lock);
seq = work->queue_seq;
work->flushing++;
spin_unlock_irq(&dev->work_lock);
- wait_event(work->done, ({
- spin_lock_irq(&dev->work_lock);
- left = seq - work->done_seq <= 0;
- spin_unlock_irq(&dev->work_lock);
- left;
- }));
+ wait_event(work->done, vhost_work_seq_done(dev, work, seq));
spin_lock_irq(&dev->work_lock);
flushing = --work->flushing;
spin_unlock_irq(&dev->work_lock);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 2af44b7b1f3f..b3363ae38518 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -173,9 +173,9 @@ static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
{
unsigned acked_features;
- acked_features =
- rcu_dereference_index_check(dev->acked_features,
- lockdep_is_held(&dev->mutex));
+ /* TODO: check that we are running from vhost_worker or dev mutex is
+ * held? */
+ acked_features = rcu_dereference_index_check(dev->acked_features, 1);
return acked_features & (1 << bit);
}
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 55dc6fb6e909..6bafb51bb437 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -11,6 +11,13 @@ config HAVE_FB_ATMEL
config HAVE_FB_IMX
bool
+config SH_MIPI_DSI
+ tristate
+ depends on (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
+
+config SH_LCD_MIPI_DSI
+ bool
+
source "drivers/char/agp/Kconfig"
source "drivers/gpu/vga/Kconfig"
@@ -414,7 +421,7 @@ config FB_SA1100
Y here.
config FB_IMX
- tristate "Motorola i.MX LCD support"
+ tristate "Freescale i.MX LCD support"
depends on FB && (HAVE_FB_IMX || ARCH_MX1 || ARCH_MX2)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -1220,7 +1227,7 @@ config FB_CARILLO_RANCH
config FB_INTEL
tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && FB && PCI && X86 && AGP_INTEL && EMBEDDED
+ depends on EXPERIMENTAL && FB && PCI && X86 && AGP_INTEL && EXPERT
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -1273,7 +1280,7 @@ config FB_MATROX
module will be called matroxfb.
You can pass several parameters to the driver at boot time or at
- module load time. The parameters look like "video=matrox:XXX", and
+ module load time. The parameters look like "video=matroxfb:XXX", and
are described in <file:Documentation/fb/matroxfb.txt>.
config FB_MATROX_MILLENIUM
@@ -1990,13 +1997,6 @@ config FB_W100
If unsure, say N.
-config SH_MIPI_DSI
- tristate
- depends on (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
-
-config SH_LCD_MIPI_DSI
- bool
-
config FB_SH_MOBILE_LCDC
tristate "SuperH Mobile LCDC framebuffer support"
depends on FB && (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
index d583bea608fd..391ac939f011 100644
--- a/drivers/video/arkfb.c
+++ b/drivers/video/arkfb.c
@@ -23,7 +23,7 @@
#include <linux/svga.h>
#include <linux/init.h>
#include <linux/pci.h>
-#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
+#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
#include <video/vga.h>
#ifdef CONFIG_MTRR
@@ -1091,12 +1091,12 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
dev_info(info->device, "suspend\n");
- acquire_console_sem();
+ console_lock();
mutex_lock(&(par->open_lock));
if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
mutex_unlock(&(par->open_lock));
- release_console_sem();
+ console_unlock();
return 0;
}
@@ -1107,7 +1107,7 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
pci_set_power_state(dev, pci_choose_state(dev, state));
mutex_unlock(&(par->open_lock));
- release_console_sem();
+ console_unlock();
return 0;
}
@@ -1122,7 +1122,7 @@ static int ark_pci_resume (struct pci_dev* dev)
dev_info(info->device, "resume\n");
- acquire_console_sem();
+ console_lock();
mutex_lock(&(par->open_lock));
if (par->ref_count == 0)
@@ -1141,7 +1141,7 @@ static int ark_pci_resume (struct pci_dev* dev)
fail:
mutex_unlock(&(par->open_lock));
- release_console_sem();
+ console_unlock();
return 0;
}
#else
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 8dce25126330..bac163450216 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -111,7 +111,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
}
-static struct backlight_ops atmel_lcdc_bl_ops = {
+static const struct backlight_ops atmel_lcdc_bl_ops = {
.update_status = atmel_bl_update_status,
.get_brightness = atmel_bl_get_brightness,
};
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 34a0851bcbfa..4cb6a576c567 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1786,7 +1786,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
return bd->props.brightness;
}
-static struct backlight_ops aty128_bl_data = {
+static const struct backlight_ops aty128_bl_data = {
.get_brightness = aty128_bl_get_brightness,
.update_status = aty128_bl_update_status,
};
@@ -1860,11 +1860,11 @@ static void aty128_early_resume(void *data)
{
struct aty128fb_par *par = data;
- if (try_acquire_console_sem())
+ if (!console_trylock())
return;
pci_restore_state(par->pdev);
aty128_do_resume(par->pdev);
- release_console_sem();
+ console_unlock();
}
#endif /* CONFIG_PPC_PMAC */
@@ -2438,7 +2438,7 @@ static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state)
printk(KERN_DEBUG "aty128fb: suspending...\n");
- acquire_console_sem();
+ console_lock();
fb_set_suspend(info, 1);
@@ -2470,7 +2470,7 @@ static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state)
if (state.event != PM_EVENT_ON)
aty128_set_suspend(par, 1);
- release_console_sem();
+ console_unlock();
pdev->dev.power.power_state = state;
@@ -2527,9 +2527,9 @@ static int aty128_pci_resume(struct pci_dev *pdev)
{
int rc;
- acquire_console_sem();
+ console_lock();
rc = aty128_do_resume(pdev);
- release_console_sem();
+ console_unlock();
return rc;
}
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 5a3ce3ad1ec8..94e293fce1d2 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2069,7 +2069,7 @@ static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
if (state.event == pdev->dev.power.power_state.event)
return 0;
- acquire_console_sem();
+ console_lock();
fb_set_suspend(info, 1);
@@ -2097,14 +2097,14 @@ static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
par->lock_blank = 0;
atyfb_blank(FB_BLANK_UNBLANK, info);
fb_set_suspend(info, 0);
- release_console_sem();
+ console_unlock();
return -EIO;
}
#else
pci_set_power_state(pdev, pci_choose_state(pdev, state));
#endif
- release_console_sem();
+ console_unlock();
pdev->dev.power.power_state = state;
@@ -2133,7 +2133,7 @@ static int atyfb_pci_resume(struct pci_dev *pdev)
if (pdev->dev.power.power_state.event == PM_EVENT_ON)
return 0;
- acquire_console_sem();
+ console_lock();
/*
* PCI state will have been restored by the core, so
@@ -2161,7 +2161,7 @@ static int atyfb_pci_resume(struct pci_dev *pdev)
par->lock_blank = 0;
atyfb_blank(FB_BLANK_UNBLANK, info);
- release_console_sem();
+ console_unlock();
pdev->dev.power.power_state = PMSG_ON;
@@ -2221,7 +2221,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
return bd->props.brightness;
}
-static struct backlight_ops aty_bl_data = {
+static const struct backlight_ops aty_bl_data = {
.get_brightness = aty_bl_get_brightness,
.update_status = aty_bl_update_status,
};
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index 256966e9667d..9b811ddbce83 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/aty/radeon_backlight.c
@@ -128,7 +128,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
return bd->props.brightness;
}
-static struct backlight_ops radeon_bl_data = {
+static const struct backlight_ops radeon_bl_data = {
.get_brightness = radeon_bl_get_brightness,
.update_status = radeon_bl_update_status,
};
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index c4e17642d9c5..92bda5848516 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -2626,7 +2626,7 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
goto done;
}
- acquire_console_sem();
+ console_lock();
fb_set_suspend(info, 1);
@@ -2690,7 +2690,7 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
if (rinfo->pm_mode & radeon_pm_d2)
radeon_set_suspend(rinfo, 1);
- release_console_sem();
+ console_unlock();
done:
pdev->dev.power.power_state = mesg;
@@ -2715,10 +2715,10 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
return 0;
if (rinfo->no_schedule) {
- if (try_acquire_console_sem())
+ if (!console_trylock())
return 0;
} else
- acquire_console_sem();
+ console_lock();
printk(KERN_DEBUG "radeonfb (%s): resuming from state: %d...\n",
pci_name(pdev), pdev->dev.power.power_state.event);
@@ -2783,7 +2783,7 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
pdev->dev.power.power_state = PMSG_ON;
bail:
- release_console_sem();
+ console_unlock();
return rc;
}
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 38ffc3fbcbe4..b224396b86d5 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -21,7 +21,7 @@
#define MAX_BRIGHTNESS (0xFF)
#define MIN_BRIGHTNESS (0)
-#define CURRENT_MASK (0x1F << 1)
+#define CURRENT_BITMASK (0x1F << 1)
struct pm860x_backlight_data {
struct pm860x_chip *chip;
@@ -85,7 +85,7 @@ static int pm860x_backlight_set(struct backlight_device *bl, int brightness)
if ((data->current_brightness == 0) && brightness) {
if (data->iset) {
ret = pm860x_set_bits(data->i2c, wled_idc(data->port),
- CURRENT_MASK, data->iset);
+ CURRENT_BITMASK, data->iset);
if (ret < 0)
goto out;
}
@@ -155,7 +155,7 @@ out:
return -EINVAL;
}
-static struct backlight_ops pm860x_backlight_ops = {
+static const struct backlight_ops pm860x_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = pm860x_backlight_update_status,
.get_brightness = pm860x_backlight_get_brightness,
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index c67801e57aaf..98ad3e5f7c85 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -25,7 +25,7 @@
struct l4f00242t03_priv {
struct spi_device *spi;
struct lcd_device *ld;
- int lcd_on:1;
+ int lcd_state;
struct regulator *io_reg;
struct regulator *core_reg;
};
@@ -62,11 +62,36 @@ static void l4f00242t03_lcd_init(struct spi_device *spi)
regulator_enable(priv->core_reg);
}
+ l4f00242t03_reset(pdata->reset_gpio);
+
gpio_set_value(pdata->data_enable_gpio, 1);
msleep(60);
spi_write(spi, (const u8 *)cmd, ARRAY_SIZE(cmd) * sizeof(u16));
}
+static void l4f00242t03_lcd_powerdown(struct spi_device *spi)
+{
+ struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
+ struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+
+ dev_dbg(&spi->dev, "Powering down LCD\n");
+
+ gpio_set_value(pdata->data_enable_gpio, 0);
+
+ if (priv->io_reg)
+ regulator_disable(priv->io_reg);
+
+ if (priv->core_reg)
+ regulator_disable(priv->core_reg);
+}
+
+static int l4f00242t03_lcd_power_get(struct lcd_device *ld)
+{
+ struct l4f00242t03_priv *priv = lcd_get_data(ld);
+
+ return priv->lcd_state;
+}
+
static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power)
{
struct l4f00242t03_priv *priv = lcd_get_data(ld);
@@ -79,35 +104,54 @@ static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power)
const u16 disoff = 0x28;
if (power <= FB_BLANK_NORMAL) {
- if (priv->lcd_on)
- return 0;
-
- dev_dbg(&spi->dev, "turning on LCD\n");
-
- spi_write(spi, (const u8 *)&slpout, sizeof(u16));
- msleep(60);
- spi_write(spi, (const u8 *)&dison, sizeof(u16));
-
- priv->lcd_on = 1;
+ if (priv->lcd_state <= FB_BLANK_NORMAL) {
+ /* Do nothing, the LCD is running */
+ } else if (priv->lcd_state < FB_BLANK_POWERDOWN) {
+ dev_dbg(&spi->dev, "Resuming LCD\n");
+
+ spi_write(spi, (const u8 *)&slpout, sizeof(u16));
+ msleep(60);
+ spi_write(spi, (const u8 *)&dison, sizeof(u16));
+ } else {
+ /* priv->lcd_state == FB_BLANK_POWERDOWN */
+ l4f00242t03_lcd_init(spi);
+ priv->lcd_state = FB_BLANK_VSYNC_SUSPEND;
+ l4f00242t03_lcd_power_set(priv->ld, power);
+ }
+ } else if (power < FB_BLANK_POWERDOWN) {
+ if (priv->lcd_state <= FB_BLANK_NORMAL) {
+ /* Send the display in standby */
+ dev_dbg(&spi->dev, "Standby the LCD\n");
+
+ spi_write(spi, (const u8 *)&disoff, sizeof(u16));
+ msleep(60);
+ spi_write(spi, (const u8 *)&slpin, sizeof(u16));
+ } else if (priv->lcd_state < FB_BLANK_POWERDOWN) {
+ /* Do nothing, the LCD is already in standby */
+ } else {
+ /* priv->lcd_state == FB_BLANK_POWERDOWN */
+ l4f00242t03_lcd_init(spi);
+ priv->lcd_state = FB_BLANK_UNBLANK;
+ l4f00242t03_lcd_power_set(ld, power);
+ }
} else {
- if (!priv->lcd_on)
- return 0;
-
- dev_dbg(&spi->dev, "turning off LCD\n");
-
- spi_write(spi, (const u8 *)&disoff, sizeof(u16));
- msleep(60);
- spi_write(spi, (const u8 *)&slpin, sizeof(u16));
-
- priv->lcd_on = 0;
+ /* power == FB_BLANK_POWERDOWN */
+ if (priv->lcd_state != FB_BLANK_POWERDOWN) {
+ /* Clear the screen before shutting down */
+ spi_write(spi, (const u8 *)&disoff, sizeof(u16));
+ msleep(60);
+ l4f00242t03_lcd_powerdown(spi);
+ }
}
+ priv->lcd_state = power;
+
return 0;
}
static struct lcd_ops l4f_ops = {
.set_power = l4f00242t03_lcd_power_set,
- .get_power = NULL,
+ .get_power = l4f00242t03_lcd_power_get,
};
static int __devinit l4f00242t03_probe(struct spi_device *spi)
@@ -185,9 +229,9 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
}
/* Init the LCD */
- l4f00242t03_reset(pdata->reset_gpio);
l4f00242t03_lcd_init(spi);
- l4f00242t03_lcd_power_set(priv->ld, 1);
+ priv->lcd_state = FB_BLANK_VSYNC_SUSPEND;
+ l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_UNBLANK);
dev_info(&spi->dev, "Epson l4f00242t03 lcd probed.\n");
@@ -214,9 +258,11 @@ static int __devexit l4f00242t03_remove(struct spi_device *spi)
struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
struct l4f00242t03_pdata *pdata = priv->spi->dev.platform_data;
- l4f00242t03_lcd_power_set(priv->ld, 0);
+ l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
lcd_device_unregister(priv->ld);
+ dev_set_drvdata(&spi->dev, NULL);
+
gpio_free(pdata->data_enable_gpio);
gpio_free(pdata->reset_gpio);
@@ -230,6 +276,15 @@ static int __devexit l4f00242t03_remove(struct spi_device *spi)
return 0;
}
+static void l4f00242t03_shutdown(struct spi_device *spi)
+{
+ struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+
+ if (priv)
+ l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
+
+}
+
static struct spi_driver l4f00242t03_driver = {
.driver = {
.name = "l4f00242t03",
@@ -237,6 +292,7 @@ static struct spi_driver l4f00242t03_driver = {
},
.probe = l4f00242t03_probe,
.remove = __devexit_p(l4f00242t03_remove),
+ .shutdown = l4f00242t03_shutdown,
};
static __init int l4f00242t03_init(void)
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 8010aaeb5adb..dd0e84a9bd2f 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -239,11 +239,15 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
lcd->spi = spi;
lcd->power = FB_BLANK_POWERDOWN;
lcd->buffer = kzalloc(8, GFP_KERNEL);
+ if (!lcd->buffer) {
+ ret = -ENOMEM;
+ goto out_free_lcd;
+ }
ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
- goto out_free_lcd;
+ goto out_free_buffer;
}
lcd->ld = ld;
@@ -257,6 +261,8 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
out_unregister:
lcd_device_unregister(ld);
+out_free_buffer:
+ kfree(lcd->buffer);
out_free_lcd:
kfree(lcd);
return ret;
@@ -268,6 +274,7 @@ static int __devexit ltv350qv_remove(struct spi_device *spi)
ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->ld);
+ kfree(lcd->buffer);
kfree(lcd);
return 0;
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index b2b2c7ba1f63..209acc105cbc 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -92,7 +92,7 @@ static int max8925_backlight_get_brightness(struct backlight_device *bl)
return ret;
}
-static struct backlight_ops max8925_backlight_ops = {
+static const struct backlight_ops max8925_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = max8925_backlight_update_status,
.get_brightness = max8925_backlight_get_brightness,
diff --git a/drivers/video/bf537-lq035.c b/drivers/video/bf537-lq035.c
index 18c507874ff1..47c21fb2c82f 100644
--- a/drivers/video/bf537-lq035.c
+++ b/drivers/video/bf537-lq035.c
@@ -696,6 +696,7 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
{
struct backlight_properties props;
dma_addr_t dma_handle;
+ int ret;
if (request_dma(CH_PPI, KBUILD_MODNAME)) {
pr_err("couldn't request PPI DMA\n");
@@ -704,17 +705,16 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
if (request_ports()) {
pr_err("couldn't request gpio port\n");
- free_dma(CH_PPI);
- return -EFAULT;
+ ret = -EFAULT;
+ goto out_ports;
}
fb_buffer = dma_alloc_coherent(NULL, TOTAL_VIDEO_MEM_SIZE,
&dma_handle, GFP_KERNEL);
if (fb_buffer == NULL) {
pr_err("couldn't allocate dma buffer\n");
- free_dma(CH_PPI);
- free_ports();
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_dma_coherent;
}
if (L1_DATA_A_LENGTH)
@@ -725,10 +725,8 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
if (dma_desc_table == NULL) {
pr_err("couldn't allocate dma descriptor\n");
- free_dma(CH_PPI);
- free_ports();
- dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_table;
}
bfin_lq035_fb.screen_base = (void *)fb_buffer;
@@ -771,31 +769,21 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
bfin_lq035_fb.pseudo_palette = kzalloc(sizeof(u32) * 16, GFP_KERNEL);
if (bfin_lq035_fb.pseudo_palette == NULL) {
pr_err("failed to allocate pseudo_palette\n");
- free_dma(CH_PPI);
- free_ports();
- dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_palette;
}
if (fb_alloc_cmap(&bfin_lq035_fb.cmap, NBR_PALETTE, 0) < 0) {
pr_err("failed to allocate colormap (%d entries)\n",
NBR_PALETTE);
- free_dma(CH_PPI);
- free_ports();
- dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
- kfree(bfin_lq035_fb.pseudo_palette);
- return -EFAULT;
+ ret = -EFAULT;
+ goto out_cmap;
}
if (register_framebuffer(&bfin_lq035_fb) < 0) {
pr_err("unable to register framebuffer\n");
- free_dma(CH_PPI);
- free_ports();
- dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
- fb_buffer = NULL;
- kfree(bfin_lq035_fb.pseudo_palette);
- fb_dealloc_cmap(&bfin_lq035_fb.cmap);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_reg;
}
i2c_add_driver(&ad5280_driver);
@@ -807,11 +795,31 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
lcd_dev = lcd_device_register(KBUILD_MODNAME, &pdev->dev, NULL,
&bfin_lcd_ops);
+ if (IS_ERR(lcd_dev)) {
+ pr_err("unable to register lcd\n");
+ ret = PTR_ERR(lcd_dev);
+ goto out_lcd;
+ }
lcd_dev->props.max_contrast = 255,
pr_info("initialized");
return 0;
+out_lcd:
+ unregister_framebuffer(&bfin_lq035_fb);
+out_reg:
+ fb_dealloc_cmap(&bfin_lq035_fb.cmap);
+out_cmap:
+ kfree(bfin_lq035_fb.pseudo_palette);
+out_palette:
+out_table:
+ dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
+ fb_buffer = NULL;
+out_dma_coherent:
+ free_ports();
+out_ports:
+ free_dma(CH_PPI);
+ return ret;
}
static int __devexit bfin_lq035_remove(struct platform_device *pdev)
diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c
index d637e1f53172..cff742abdc5d 100644
--- a/drivers/video/chipsfb.c
+++ b/drivers/video/chipsfb.c
@@ -460,10 +460,10 @@ static int chipsfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
if (!(state.event & PM_EVENT_SLEEP))
goto done;
- acquire_console_sem();
+ console_lock();
chipsfb_blank(1, p);
fb_set_suspend(p, 1);
- release_console_sem();
+ console_unlock();
done:
pdev->dev.power.power_state = state;
return 0;
@@ -473,10 +473,10 @@ static int chipsfb_pci_resume(struct pci_dev *pdev)
{
struct fb_info *p = pci_get_drvdata(pdev);
- acquire_console_sem();
+ console_lock();
fb_set_suspend(p, 0);
chipsfb_blank(0, p);
- release_console_sem();
+ console_unlock();
pdev->dev.power.power_state = PMSG_ON;
return 0;
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 5a35f22372b9..2209e354f531 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -5,7 +5,7 @@
menu "Console display driver support"
config VGA_CONSOLE
- bool "VGA text console" if EMBEDDED || !X86
+ bool "VGA text console" if EXPERT || !X86
depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER)
default y
help
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 7ccc967831f0..9c092b8d64e6 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -375,14 +375,14 @@ static void fb_flashcursor(struct work_struct *work)
int c;
int mode;
- acquire_console_sem();
+ console_lock();
if (ops && ops->currcon != -1)
vc = vc_cons[ops->currcon].d;
if (!vc || !CON_IS_VISIBLE(vc) ||
registered_fb[con2fb_map[vc->vc_num]] != info ||
vc->vc_deccm != 1) {
- release_console_sem();
+ console_unlock();
return;
}
@@ -392,7 +392,7 @@ static void fb_flashcursor(struct work_struct *work)
CM_ERASE : CM_DRAW;
ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1),
get_color(vc, info, c, 0));
- release_console_sem();
+ console_unlock();
}
static void cursor_timer_handler(unsigned long dev_addr)
@@ -836,7 +836,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
found = search_fb_in_map(newidx);
- acquire_console_sem();
+ console_lock();
con2fb_map[unit] = newidx;
if (!err && !found)
err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
@@ -863,7 +863,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
if (!search_fb_in_map(info_idx))
info_idx = newidx;
- release_console_sem();
+ console_unlock();
return err;
}
@@ -3321,7 +3321,7 @@ static ssize_t store_rotate(struct device *device,
if (fbcon_has_exited)
return count;
- acquire_console_sem();
+ console_lock();
idx = con2fb_map[fg_console];
if (idx == -1 || registered_fb[idx] == NULL)
@@ -3331,7 +3331,7 @@ static ssize_t store_rotate(struct device *device,
rotate = simple_strtoul(buf, last, 0);
fbcon_rotate(info, rotate);
err:
- release_console_sem();
+ console_unlock();
return count;
}
@@ -3346,7 +3346,7 @@ static ssize_t store_rotate_all(struct device *device,
if (fbcon_has_exited)
return count;
- acquire_console_sem();
+ console_lock();
idx = con2fb_map[fg_console];
if (idx == -1 || registered_fb[idx] == NULL)
@@ -3356,7 +3356,7 @@ static ssize_t store_rotate_all(struct device *device,
rotate = simple_strtoul(buf, last, 0);
fbcon_rotate_all(info, rotate);
err:
- release_console_sem();
+ console_unlock();
return count;
}
@@ -3369,7 +3369,7 @@ static ssize_t show_rotate(struct device *device,
if (fbcon_has_exited)
return 0;
- acquire_console_sem();
+ console_lock();
idx = con2fb_map[fg_console];
if (idx == -1 || registered_fb[idx] == NULL)
@@ -3378,7 +3378,7 @@ static ssize_t show_rotate(struct device *device,
info = registered_fb[idx];
rotate = fbcon_get_rotate(info);
err:
- release_console_sem();
+ console_unlock();
return snprintf(buf, PAGE_SIZE, "%d\n", rotate);
}
@@ -3392,7 +3392,7 @@ static ssize_t show_cursor_blink(struct device *device,
if (fbcon_has_exited)
return 0;
- acquire_console_sem();
+ console_lock();
idx = con2fb_map[fg_console];
if (idx == -1 || registered_fb[idx] == NULL)
@@ -3406,7 +3406,7 @@ static ssize_t show_cursor_blink(struct device *device,
blink = (ops->flags & FBCON_FLAGS_CURSOR_TIMER) ? 1 : 0;
err:
- release_console_sem();
+ console_unlock();
return snprintf(buf, PAGE_SIZE, "%d\n", blink);
}
@@ -3421,7 +3421,7 @@ static ssize_t store_cursor_blink(struct device *device,
if (fbcon_has_exited)
return count;
- acquire_console_sem();
+ console_lock();
idx = con2fb_map[fg_console];
if (idx == -1 || registered_fb[idx] == NULL)
@@ -3443,7 +3443,7 @@ static ssize_t store_cursor_blink(struct device *device,
}
err:
- release_console_sem();
+ console_unlock();
return count;
}
@@ -3482,7 +3482,7 @@ static void fbcon_start(void)
if (num_registered_fb) {
int i;
- acquire_console_sem();
+ console_lock();
for (i = 0; i < FB_MAX; i++) {
if (registered_fb[i] != NULL) {
@@ -3491,7 +3491,7 @@ static void fbcon_start(void)
}
}
- release_console_sem();
+ console_unlock();
fbcon_takeover(0);
}
}
@@ -3552,7 +3552,7 @@ static int __init fb_console_init(void)
{
int i;
- acquire_console_sem();
+ console_lock();
fb_register_client(&fbcon_event_notifier);
fbcon_device = device_create(fb_class, NULL, MKDEV(0, 0), NULL,
"fbcon");
@@ -3568,7 +3568,7 @@ static int __init fb_console_init(void)
for (i = 0; i < MAX_NR_CONSOLES; i++)
con2fb_map[i] = -1;
- release_console_sem();
+ console_unlock();
fbcon_start();
return 0;
}
@@ -3591,12 +3591,12 @@ static void __exit fbcon_deinit_device(void)
static void __exit fb_console_exit(void)
{
- acquire_console_sem();
+ console_lock();
fb_unregister_client(&fbcon_event_notifier);
fbcon_deinit_device();
device_destroy(fb_class, MKDEV(0, 0));
fbcon_exit();
- release_console_sem();
+ console_unlock();
unregister_con_driver(&fb_con);
}
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 915448ec75bf..915fd74da7a2 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -202,11 +202,7 @@ static void vgacon_scrollback_init(int pitch)
}
}
-/*
- * Called only duing init so call of alloc_bootmen is ok.
- * Marked __init_refok to silence modpost.
- */
-static void __init_refok vgacon_scrollback_startup(void)
+static void vgacon_scrollback_startup(void)
{
vgacon_scrollback = kcalloc(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, GFP_NOWAIT);
vgacon_scrollback_init(vga_video_num_columns * 2);
@@ -375,7 +371,8 @@ static const char *vgacon_startup(void)
u16 saved1, saved2;
volatile u16 *p;
- if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB) {
+ if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB ||
+ screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) {
no_vga:
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index c265aed09e04..8d61ef96eedd 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -1092,9 +1092,10 @@ static int __init fb_probe(struct platform_device *device)
irq_freq:
#ifdef CONFIG_CPU_FREQ
+ lcd_da8xx_cpufreq_deregister(par);
+#endif
err_cpu_freq:
unregister_framebuffer(da8xx_fb_info);
-#endif
err_dealloc_cmap:
fb_dealloc_cmap(&da8xx_fb_info->cmap);
@@ -1130,14 +1131,14 @@ static int fb_suspend(struct platform_device *dev, pm_message_t state)
struct fb_info *info = platform_get_drvdata(dev);
struct da8xx_fb_par *par = info->par;
- acquire_console_sem();
+ console_lock();
if (par->panel_power_ctrl)
par->panel_power_ctrl(0);
fb_set_suspend(info, 1);
lcd_disable_raster();
clk_disable(par->lcdc_clk);
- release_console_sem();
+ console_unlock();
return 0;
}
@@ -1146,14 +1147,14 @@ static int fb_resume(struct platform_device *dev)
struct fb_info *info = platform_get_drvdata(dev);
struct da8xx_fb_par *par = info->par;
- acquire_console_sem();
+ console_lock();
if (par->panel_power_ctrl)
par->panel_power_ctrl(1);
clk_enable(par->lcdc_clk);
lcd_enable_raster();
fb_set_suspend(info, 0);
- release_console_sem();
+ console_unlock();
return 0;
}
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index 0c99de0562ca..b358d045f130 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -483,7 +483,7 @@ static void ep93xxfb_dealloc_videomem(struct fb_info *info)
info->screen_base, info->fix.smem_start);
}
-static int __init ep93xxfb_probe(struct platform_device *pdev)
+static int __devinit ep93xxfb_probe(struct platform_device *pdev)
{
struct ep93xxfb_mach_info *mach_info = pdev->dev.platform_data;
struct fb_info *info;
@@ -598,7 +598,7 @@ failed:
return err;
}
-static int ep93xxfb_remove(struct platform_device *pdev)
+static int __devexit ep93xxfb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
struct ep93xx_fbi *fbi = info->par;
@@ -622,7 +622,7 @@ static int ep93xxfb_remove(struct platform_device *pdev)
static struct platform_driver ep93xxfb_driver = {
.probe = ep93xxfb_probe,
- .remove = ep93xxfb_remove,
+ .remove = __devexit_p(ep93xxfb_remove),
.driver = {
.name = "ep93xx-fb",
.owner = THIS_MODULE,
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 4ac1201ad6c2..e2bf95370e40 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1036,11 +1036,11 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
return -EFAULT;
if (!lock_fb_info(info))
return -ENODEV;
- acquire_console_sem();
+ console_lock();
info->flags |= FBINFO_MISC_USEREVENT;
ret = fb_set_var(info, &var);
info->flags &= ~FBINFO_MISC_USEREVENT;
- release_console_sem();
+ console_unlock();
unlock_fb_info(info);
if (!ret && copy_to_user(argp, &var, sizeof(var)))
ret = -EFAULT;
@@ -1072,9 +1072,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
return -EFAULT;
if (!lock_fb_info(info))
return -ENODEV;
- acquire_console_sem();
+ console_lock();
ret = fb_pan_display(info, &var);
- release_console_sem();
+ console_unlock();
unlock_fb_info(info);
if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
return -EFAULT;
@@ -1119,11 +1119,11 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
case FBIOBLANK:
if (!lock_fb_info(info))
return -ENODEV;
- acquire_console_sem();
+ console_lock();
info->flags |= FBINFO_MISC_USEREVENT;
ret = fb_blank(info, arg);
info->flags &= ~FBINFO_MISC_USEREVENT;
- release_console_sem();
+ console_unlock();
unlock_fb_info(info);
break;
default:
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 0a08f1341227..f4a32779168b 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -90,11 +90,11 @@ static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var)
int err;
var->activate |= FB_ACTIVATE_FORCE;
- acquire_console_sem();
+ console_lock();
fb_info->flags |= FBINFO_MISC_USEREVENT;
err = fb_set_var(fb_info, var);
fb_info->flags &= ~FBINFO_MISC_USEREVENT;
- release_console_sem();
+ console_unlock();
if (err)
return err;
return 0;
@@ -175,7 +175,7 @@ static ssize_t store_modes(struct device *device,
if (i * sizeof(struct fb_videomode) != count)
return -EINVAL;
- acquire_console_sem();
+ console_lock();
list_splice(&fb_info->modelist, &old_list);
fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
&fb_info->modelist);
@@ -185,7 +185,7 @@ static ssize_t store_modes(struct device *device,
} else
fb_destroy_modelist(&old_list);
- release_console_sem();
+ console_unlock();
return 0;
}
@@ -301,11 +301,11 @@ static ssize_t store_blank(struct device *device,
char *last = NULL;
int err;
- acquire_console_sem();
+ console_lock();
fb_info->flags |= FBINFO_MISC_USEREVENT;
err = fb_blank(fb_info, simple_strtoul(buf, &last, 0));
fb_info->flags &= ~FBINFO_MISC_USEREVENT;
- release_console_sem();
+ console_unlock();
if (err < 0)
return err;
return count;
@@ -364,9 +364,9 @@ static ssize_t store_pan(struct device *device,
return -EINVAL;
var.yoffset = simple_strtoul(last, &last, 0);
- acquire_console_sem();
+ console_lock();
err = fb_pan_display(fb_info, &var);
- release_console_sem();
+ console_unlock();
if (err < 0)
return err;
@@ -399,9 +399,9 @@ static ssize_t store_fbstate(struct device *device,
state = simple_strtoul(buf, &last, 0);
- acquire_console_sem();
+ console_lock();
fb_set_suspend(fb_info, (int)state);
- release_console_sem();
+ console_unlock();
return count;
}
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index 70b1d9d51c96..b4f19db9bb54 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -344,10 +344,10 @@ static int gxfb_suspend(struct pci_dev *pdev, pm_message_t state)
struct fb_info *info = pci_get_drvdata(pdev);
if (state.event == PM_EVENT_SUSPEND) {
- acquire_console_sem();
+ console_lock();
gx_powerdown(info);
fb_set_suspend(info, 1);
- release_console_sem();
+ console_unlock();
}
/* there's no point in setting PCI states; we emulate PCI, so
@@ -361,7 +361,7 @@ static int gxfb_resume(struct pci_dev *pdev)
struct fb_info *info = pci_get_drvdata(pdev);
int ret;
- acquire_console_sem();
+ console_lock();
ret = gx_powerup(info);
if (ret) {
printk(KERN_ERR "gxfb: power up failed!\n");
@@ -369,7 +369,7 @@ static int gxfb_resume(struct pci_dev *pdev)
}
fb_set_suspend(info, 0);
- release_console_sem();
+ console_unlock();
return 0;
}
#endif
diff --git a/drivers/video/geode/lxfb_core.c b/drivers/video/geode/lxfb_core.c
index 39bdbedf43b4..416851ca8754 100644
--- a/drivers/video/geode/lxfb_core.c
+++ b/drivers/video/geode/lxfb_core.c
@@ -465,10 +465,10 @@ static int lxfb_suspend(struct pci_dev *pdev, pm_message_t state)
struct fb_info *info = pci_get_drvdata(pdev);
if (state.event == PM_EVENT_SUSPEND) {
- acquire_console_sem();
+ console_lock();
lx_powerdown(info);
fb_set_suspend(info, 1);
- release_console_sem();
+ console_unlock();
}
/* there's no point in setting PCI states; we emulate PCI, so
@@ -482,7 +482,7 @@ static int lxfb_resume(struct pci_dev *pdev)
struct fb_info *info = pci_get_drvdata(pdev);
int ret;
- acquire_console_sem();
+ console_lock();
ret = lx_powerup(info);
if (ret) {
printk(KERN_ERR "lxfb: power up failed!\n");
@@ -490,7 +490,7 @@ static int lxfb_resume(struct pci_dev *pdev)
}
fb_set_suspend(info, 0);
- release_console_sem();
+ console_unlock();
return 0;
}
#else
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 5743ea25e818..318f6fb895b2 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1574,7 +1574,7 @@ static int i810fb_suspend(struct pci_dev *dev, pm_message_t mesg)
return 0;
}
- acquire_console_sem();
+ console_lock();
fb_set_suspend(info, 1);
if (info->fbops->fb_sync)
@@ -1587,7 +1587,7 @@ static int i810fb_suspend(struct pci_dev *dev, pm_message_t mesg)
pci_save_state(dev);
pci_disable_device(dev);
pci_set_power_state(dev, pci_choose_state(dev, mesg));
- release_console_sem();
+ console_unlock();
return 0;
}
@@ -1605,7 +1605,7 @@ static int i810fb_resume(struct pci_dev *dev)
return 0;
}
- acquire_console_sem();
+ console_lock();
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
@@ -1621,7 +1621,7 @@ static int i810fb_resume(struct pci_dev *dev)
fb_set_suspend (info, 0);
info->fbops->fb_blank(VESA_NO_BLANKING, info);
fail:
- release_console_sem();
+ console_unlock();
return 0;
}
/***********************************************************************
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 1ab2c2588675..69bd4a581d4a 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -974,6 +974,6 @@ static void __exit imxfb_cleanup(void)
module_init(imxfb_init);
module_exit(imxfb_cleanup);
-MODULE_DESCRIPTION("Motorola i.MX framebuffer driver");
+MODULE_DESCRIPTION("Freescale i.MX framebuffer driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
index 670ecaa0385a..de366937c933 100644
--- a/drivers/video/jz4740_fb.c
+++ b/drivers/video/jz4740_fb.c
@@ -778,9 +778,9 @@ static int jzfb_suspend(struct device *dev)
{
struct jzfb *jzfb = dev_get_drvdata(dev);
- acquire_console_sem();
+ console_lock();
fb_set_suspend(jzfb->fb, 1);
- release_console_sem();
+ console_unlock();
mutex_lock(&jzfb->lock);
if (jzfb->is_enabled)
@@ -800,9 +800,9 @@ static int jzfb_resume(struct device *dev)
jzfb_enable(jzfb);
mutex_unlock(&jzfb->lock);
- acquire_console_sem();
+ console_lock();
fb_set_suspend(jzfb->fb, 0);
- release_console_sem();
+ console_unlock();
return 0;
}
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 052dd9f0b760..a082debe824b 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -1247,46 +1247,46 @@ static struct { struct fb_bitfield red, green, blue, transp; int bits_per_pixel;
};
/* initialized by setup, see explanation at end of file (search for MODULE_PARM_DESC) */
-static unsigned int mem; /* "matrox:mem:xxxxxM" */
+static unsigned int mem; /* "matroxfb:mem:xxxxxM" */
static int option_precise_width = 1; /* cannot be changed, option_precise_width==0 must imply noaccel */
-static int inv24; /* "matrox:inv24" */
-static int cross4MB = -1; /* "matrox:cross4MB" */
-static int disabled; /* "matrox:disabled" */
-static int noaccel; /* "matrox:noaccel" */
-static int nopan; /* "matrox:nopan" */
-static int no_pci_retry; /* "matrox:nopciretry" */
-static int novga; /* "matrox:novga" */
-static int nobios; /* "matrox:nobios" */
-static int noinit = 1; /* "matrox:init" */
-static int inverse; /* "matrox:inverse" */
-static int sgram; /* "matrox:sgram" */
+static int inv24; /* "matroxfb:inv24" */
+static int cross4MB = -1; /* "matroxfb:cross4MB" */
+static int disabled; /* "matroxfb:disabled" */
+static int noaccel; /* "matroxfb:noaccel" */
+static int nopan; /* "matroxfb:nopan" */
+static int no_pci_retry; /* "matroxfb:nopciretry" */
+static int novga; /* "matroxfb:novga" */
+static int nobios; /* "matroxfb:nobios" */
+static int noinit = 1; /* "matroxfb:init" */
+static int inverse; /* "matroxfb:inverse" */
+static int sgram; /* "matroxfb:sgram" */
#ifdef CONFIG_MTRR
-static int mtrr = 1; /* "matrox:nomtrr" */
+static int mtrr = 1; /* "matroxfb:nomtrr" */
#endif
-static int grayscale; /* "matrox:grayscale" */
-static int dev = -1; /* "matrox:dev:xxxxx" */
-static unsigned int vesa = ~0; /* "matrox:vesa:xxxxx" */
-static int depth = -1; /* "matrox:depth:xxxxx" */
-static unsigned int xres; /* "matrox:xres:xxxxx" */
-static unsigned int yres; /* "matrox:yres:xxxxx" */
-static unsigned int upper = ~0; /* "matrox:upper:xxxxx" */
-static unsigned int lower = ~0; /* "matrox:lower:xxxxx" */
-static unsigned int vslen; /* "matrox:vslen:xxxxx" */
-static unsigned int left = ~0; /* "matrox:left:xxxxx" */
-static unsigned int right = ~0; /* "matrox:right:xxxxx" */
-static unsigned int hslen; /* "matrox:hslen:xxxxx" */
-static unsigned int pixclock; /* "matrox:pixclock:xxxxx" */
-static int sync = -1; /* "matrox:sync:xxxxx" */
-static unsigned int fv; /* "matrox:fv:xxxxx" */
-static unsigned int fh; /* "matrox:fh:xxxxxk" */
-static unsigned int maxclk; /* "matrox:maxclk:xxxxM" */
-static int dfp; /* "matrox:dfp */
-static int dfp_type = -1; /* "matrox:dfp:xxx */
-static int memtype = -1; /* "matrox:memtype:xxx" */
-static char outputs[8]; /* "matrox:outputs:xxx" */
+static int grayscale; /* "matroxfb:grayscale" */
+static int dev = -1; /* "matroxfb:dev:xxxxx" */
+static unsigned int vesa = ~0; /* "matroxfb:vesa:xxxxx" */
+static int depth = -1; /* "matroxfb:depth:xxxxx" */
+static unsigned int xres; /* "matroxfb:xres:xxxxx" */
+static unsigned int yres; /* "matroxfb:yres:xxxxx" */
+static unsigned int upper = ~0; /* "matroxfb:upper:xxxxx" */
+static unsigned int lower = ~0; /* "matroxfb:lower:xxxxx" */
+static unsigned int vslen; /* "matroxfb:vslen:xxxxx" */
+static unsigned int left = ~0; /* "matroxfb:left:xxxxx" */
+static unsigned int right = ~0; /* "matroxfb:right:xxxxx" */
+static unsigned int hslen; /* "matroxfb:hslen:xxxxx" */
+static unsigned int pixclock; /* "matroxfb:pixclock:xxxxx" */
+static int sync = -1; /* "matroxfb:sync:xxxxx" */
+static unsigned int fv; /* "matroxfb:fv:xxxxx" */
+static unsigned int fh; /* "matroxfb:fh:xxxxxk" */
+static unsigned int maxclk; /* "matroxfb:maxclk:xxxxM" */
+static int dfp; /* "matroxfb:dfp */
+static int dfp_type = -1; /* "matroxfb:dfp:xxx */
+static int memtype = -1; /* "matroxfb:memtype:xxx" */
+static char outputs[8]; /* "matroxfb:outputs:xxx" */
#ifndef MODULE
-static char videomode[64]; /* "matrox:mode:xxxxx" or "matrox:xxxxx" */
+static char videomode[64]; /* "matroxfb:mode:xxxxx" or "matroxfb:xxxxx" */
#endif
static int matroxfb_getmemory(struct matrox_fb_info *minfo,
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index d2bb365f09b3..48c3ea8652b6 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -32,300 +32,320 @@
const char *fb_mode_option;
EXPORT_SYMBOL_GPL(fb_mode_option);
- /*
- * Standard video mode definitions (taken from XFree86)
- */
+/*
+ * Standard video mode definitions (taken from XFree86)
+ */
static const struct fb_videomode modedb[] = {
- {
+
/* 640x400 @ 70 Hz, 31.5 kHz hsync */
- NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 640x480 @ 60 Hz, 31.5 kHz hsync */
- NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 800x600 @ 56 Hz, 35.15 kHz hsync */
- NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1024x768 @ 87 Hz interlaced, 35.5 kHz hsync */
- NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8,
- 0, FB_VMODE_INTERLACED
- }, {
+ { NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8, 0,
+ FB_VMODE_INTERLACED },
+
/* 640x400 @ 85 Hz, 37.86 kHz hsync */
- NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3,
- FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3,
+ FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED },
+
/* 640x480 @ 72 Hz, 36.5 kHz hsync */
- NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 640x480 @ 75 Hz, 37.50 kHz hsync */
- NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 800x600 @ 60 Hz, 37.8 kHz hsync */
- NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4,
- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED },
+
/* 640x480 @ 85 Hz, 43.27 kHz hsync */
- NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1152x864 @ 89 Hz interlaced, 44 kHz hsync */
- NULL, 89, 1152, 864, 15384, 96, 16, 110, 1, 216, 10,
- 0, FB_VMODE_INTERLACED
- }, {
+ { NULL, 89, 1152, 864, 15384, 96, 16, 110, 1, 216, 10, 0,
+ FB_VMODE_INTERLACED },
/* 800x600 @ 72 Hz, 48.0 kHz hsync */
- NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6,
- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED },
+
/* 1024x768 @ 60 Hz, 48.4 kHz hsync */
- NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 640x480 @ 100 Hz, 53.01 kHz hsync */
- NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1152x864 @ 60 Hz, 53.5 kHz hsync */
- NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 800x600 @ 85 Hz, 55.84 kHz hsync */
- NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1024x768 @ 70 Hz, 56.5 kHz hsync */
- NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1280x1024 @ 87 Hz interlaced, 51 kHz hsync */
- NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12,
- 0, FB_VMODE_INTERLACED
- }, {
+ { NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12, 0,
+ FB_VMODE_INTERLACED },
+
/* 800x600 @ 100 Hz, 64.02 kHz hsync */
- NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1024x768 @ 76 Hz, 62.5 kHz hsync */
- NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1152x864 @ 70 Hz, 62.4 kHz hsync */
- NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1280x1024 @ 61 Hz, 64.2 kHz hsync */
- NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1400x1050 @ 60Hz, 63.9 kHz hsync */
- NULL, 60, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 60, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1400x1050 @ 75,107 Hz, 82,392 kHz +hsync +vsync*/
- NULL, 75, 1400, 1050, 7190, 120, 56, 23, 10, 112, 13,
- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 75, 1400, 1050, 7190, 120, 56, 23, 10, 112, 13,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED },
+
/* 1400x1050 @ 60 Hz, ? kHz +hsync +vsync*/
- NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3,
- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED },
+
/* 1024x768 @ 85 Hz, 70.24 kHz hsync */
- NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1152x864 @ 78 Hz, 70.8 kHz hsync */
- NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1280x1024 @ 70 Hz, 74.59 kHz hsync */
- NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1600x1200 @ 60Hz, 75.00 kHz hsync */
- NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3,
- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED },
+
/* 1152x864 @ 84 Hz, 76.0 kHz hsync */
- NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1280x1024 @ 74 Hz, 78.85 kHz hsync */
- NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1024x768 @ 100Hz, 80.21 kHz hsync */
- NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1280x1024 @ 76 Hz, 81.13 kHz hsync */
- NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1600x1200 @ 70 Hz, 87.50 kHz hsync */
- NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1152x864 @ 100 Hz, 89.62 kHz hsync */
- NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1280x1024 @ 85 Hz, 91.15 kHz hsync */
- NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3,
- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED },
+
/* 1600x1200 @ 75 Hz, 93.75 kHz hsync */
- NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3,
- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED },
+
/* 1680x1050 @ 60 Hz, 65.191 kHz hsync */
- NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6,
- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED },
+
/* 1600x1200 @ 85 Hz, 105.77 kHz hsync */
- NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3,
- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED },
+
/* 1280x1024 @ 100 Hz, 107.16 kHz hsync */
- NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1800x1440 @ 64Hz, 96.15 kHz hsync */
- NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3,
- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED },
+
/* 1800x1440 @ 70Hz, 104.52 kHz hsync */
- NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3,
- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED },
+
/* 512x384 @ 78 Hz, 31.50 kHz hsync */
- NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 512x384 @ 85 Hz, 34.38 kHz hsync */
- NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 320x200 @ 70 Hz, 31.5 kHz hsync, 8:5 aspect ratio */
- NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1,
- 0, FB_VMODE_DOUBLE
- }, {
+ { NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1, 0,
+ FB_VMODE_DOUBLE },
+
/* 320x240 @ 60 Hz, 31.5 kHz hsync, 4:3 aspect ratio */
- NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1,
- 0, FB_VMODE_DOUBLE
- }, {
+ { NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1, 0,
+ FB_VMODE_DOUBLE },
+
/* 320x240 @ 72 Hz, 36.5 kHz hsync */
- NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2,
- 0, FB_VMODE_DOUBLE
- }, {
+ { NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2, 0,
+ FB_VMODE_DOUBLE },
+
/* 400x300 @ 56 Hz, 35.2 kHz hsync, 4:3 aspect ratio */
- NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1,
- 0, FB_VMODE_DOUBLE
- }, {
+ { NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1, 0,
+ FB_VMODE_DOUBLE },
+
/* 400x300 @ 60 Hz, 37.8 kHz hsync */
- NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2,
- 0, FB_VMODE_DOUBLE
- }, {
+ { NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2, 0,
+ FB_VMODE_DOUBLE },
+
/* 400x300 @ 72 Hz, 48.0 kHz hsync */
- NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3,
- 0, FB_VMODE_DOUBLE
- }, {
+ { NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3, 0,
+ FB_VMODE_DOUBLE },
+
/* 480x300 @ 56 Hz, 35.2 kHz hsync, 8:5 aspect ratio */
- NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1,
- 0, FB_VMODE_DOUBLE
- }, {
+ { NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1, 0,
+ FB_VMODE_DOUBLE },
+
/* 480x300 @ 60 Hz, 37.8 kHz hsync */
- NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2,
- 0, FB_VMODE_DOUBLE
- }, {
+ { NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2, 0,
+ FB_VMODE_DOUBLE },
+
/* 480x300 @ 63 Hz, 39.6 kHz hsync */
- NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2,
- 0, FB_VMODE_DOUBLE
- }, {
+ { NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2, 0,
+ FB_VMODE_DOUBLE },
+
/* 480x300 @ 72 Hz, 48.0 kHz hsync */
- NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3,
- 0, FB_VMODE_DOUBLE
- }, {
+ { NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, 0,
+ FB_VMODE_DOUBLE },
+
/* 1920x1200 @ 60 Hz, 74.5 Khz hsync */
- NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED },
+
/* 1152x768, 60 Hz, PowerBook G4 Titanium I and II */
- NULL, 60, 1152, 768, 14047, 158, 26, 29, 3, 136, 6,
- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 60, 1152, 768, 14047, 158, 26, 29, 3, 136, 6,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED },
+
/* 1366x768, 60 Hz, 47.403 kHz hsync, WXGA 16:9 aspect ratio */
- NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5,
- 0, FB_VMODE_NONINTERLACED
- }, {
+ { NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5, 0,
+ FB_VMODE_NONINTERLACED },
+
/* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */
- NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3,
- 0, FB_VMODE_NONINTERLACED
- }, {
- /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
- NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5,
- 0, FB_VMODE_INTERLACED
- }, {
- /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
- NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5,
- 0, FB_VMODE_INTERLACED
- }, {
+ { NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3, 0,
+ FB_VMODE_NONINTERLACED },
+
+ /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
+ { NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5, 0,
+ FB_VMODE_INTERLACED },
+
+ /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
+ { NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5, 0,
+ FB_VMODE_INTERLACED },
+
/* 864x480 @ 60 Hz, 35.15 kHz hsync */
- NULL, 60, 864, 480, 27777, 1, 1, 1, 1, 0, 0,
- 0, FB_VMODE_NONINTERLACED
- },
+ { NULL, 60, 864, 480, 27777, 1, 1, 1, 1, 0, 0,
+ 0, FB_VMODE_NONINTERLACED },
};
#ifdef CONFIG_FB_MODE_HELPERS
const struct fb_videomode cea_modes[64] = {
/* #1: 640x480p@59.94/60Hz */
[1] = {
- NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0, FB_VMODE_NONINTERLACED, 0,
+ NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0,
+ FB_VMODE_NONINTERLACED, 0,
},
/* #3: 720x480p@59.94/60Hz */
[3] = {
- NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0, FB_VMODE_NONINTERLACED, 0,
+ NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0,
+ FB_VMODE_NONINTERLACED, 0,
},
/* #5: 1920x1080i@59.94/60Hz */
[5] = {
NULL, 60, 1920, 1080, 13763, 148, 88, 15, 2, 44, 5,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_INTERLACED, 0,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_INTERLACED, 0,
},
/* #7: 720(1440)x480iH@59.94/60Hz */
[7] = {
- NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0, FB_VMODE_INTERLACED, 0,
+ NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0,
+ FB_VMODE_INTERLACED, 0,
},
/* #9: 720(1440)x240pH@59.94/60Hz */
[9] = {
- NULL, 60, 1440, 240, 18554, 114, 38, 16, 4, 124, 3, 0, FB_VMODE_NONINTERLACED, 0,
+ NULL, 60, 1440, 240, 18554, 114, 38, 16, 4, 124, 3, 0,
+ FB_VMODE_NONINTERLACED, 0,
},
/* #18: 720x576pH@50Hz */
[18] = {
- NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0, FB_VMODE_NONINTERLACED, 0,
+ NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0,
+ FB_VMODE_NONINTERLACED, 0,
},
/* #19: 1280x720p@50Hz */
[19] = {
NULL, 50, 1280, 720, 13468, 220, 440, 20, 5, 40, 5,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, 0,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED, 0,
},
/* #20: 1920x1080i@50Hz */
[20] = {
NULL, 50, 1920, 1080, 13480, 148, 528, 15, 5, 528, 5,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_INTERLACED, 0,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_INTERLACED, 0,
},
/* #32: 1920x1080p@23.98/24Hz */
[32] = {
NULL, 24, 1920, 1080, 13468, 148, 638, 36, 4, 44, 5,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, 0,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED, 0,
},
/* #35: (2880)x480p4x@59.94/60Hz */
[35] = {
- NULL, 60, 2880, 480, 9250, 240, 64, 30, 9, 248, 6, 0, FB_VMODE_NONINTERLACED, 0,
+ NULL, 60, 2880, 480, 9250, 240, 64, 30, 9, 248, 6, 0,
+ FB_VMODE_NONINTERLACED, 0,
},
};
@@ -340,10 +360,10 @@ const struct fb_videomode vesa_modes[] = {
{ NULL, 85, 721, 400, 28169, 108, 36, 42, 01, 72, 3,
FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 3 640x480-60 VESA */
- { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
+ { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 4 640x480-72 VESA */
- { NULL, 72, 640, 480, 31746, 128, 24, 29, 9, 40, 2,
+ { NULL, 72, 640, 480, 31746, 128, 24, 29, 9, 40, 2,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 5 640x480-75 VESA */
{ NULL, 75, 640, 480, 31746, 120, 16, 16, 01, 64, 3,
@@ -426,7 +446,7 @@ const struct fb_videomode vesa_modes[] = {
FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 26 1600x1200-75 VESA */
- { NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3,
+ { NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3,
FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 27 1600x1200-85 VESA */
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index cb013919e9ce..7e3a490e8d76 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -1177,9 +1177,9 @@ static int mx3fb_suspend(struct platform_device *pdev, pm_message_t state)
struct mx3fb_data *mx3fb = platform_get_drvdata(pdev);
struct mx3fb_info *mx3_fbi = mx3fb->fbi->par;
- acquire_console_sem();
+ console_lock();
fb_set_suspend(mx3fb->fbi, 1);
- release_console_sem();
+ console_unlock();
if (mx3_fbi->blank == FB_BLANK_UNBLANK) {
sdc_disable_channel(mx3_fbi);
@@ -1202,9 +1202,9 @@ static int mx3fb_resume(struct platform_device *pdev)
sdc_set_brightness(mx3fb, mx3fb->backlight_level);
}
- acquire_console_sem();
+ console_lock();
fb_set_suspend(mx3fb->fbi, 0);
- release_console_sem();
+ console_unlock();
return 0;
}
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 81687ed26ba9..f838d9e277f0 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -15,6 +15,7 @@
*/
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
@@ -597,9 +598,9 @@ static int __devinit nuc900fb_probe(struct platform_device *pdev)
}
fbi->clk = clk_get(&pdev->dev, NULL);
- if (!fbi->clk || IS_ERR(fbi->clk)) {
+ if (IS_ERR(fbi->clk)) {
printk(KERN_ERR "nuc900-lcd:failed to get lcd clock source\n");
- ret = -ENOENT;
+ ret = PTR_ERR(fbi->clk);
goto release_irq;
}
@@ -695,6 +696,8 @@ static int nuc900fb_remove(struct platform_device *pdev)
nuc900fb_stop_lcd(fbinfo);
msleep(1);
+ unregister_framebuffer(fbinfo);
+ nuc900fb_cpufreq_deregister(fbi);
nuc900fb_unmap_video_memory(fbinfo);
iounmap(fbi->io);
@@ -722,7 +725,7 @@ static int nuc900fb_suspend(struct platform_device *dev, pm_message_t state)
struct fb_info *fbinfo = platform_get_drvdata(dev);
struct nuc900fb_info *info = fbinfo->par;
- nuc900fb_stop_lcd();
+ nuc900fb_stop_lcd(fbinfo);
msleep(1);
clk_disable(info->clk);
return 0;
@@ -739,7 +742,7 @@ static int nuc900fb_resume(struct platform_device *dev)
msleep(1);
nuc900fb_init_registers(fbinfo);
- nuc900fb_activate_var(bfinfo);
+ nuc900fb_activate_var(fbinfo);
return 0;
}
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
index 2fb552a6f32c..6aac6d1b937b 100644
--- a/drivers/video/nvidia/nv_backlight.c
+++ b/drivers/video/nvidia/nv_backlight.c
@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
return bd->props.brightness;
}
-static struct backlight_ops nvidia_bl_ops = {
+static const struct backlight_ops nvidia_bl_ops = {
.get_brightness = nvidia_bl_get_brightness,
.update_status = nvidia_bl_update_status,
};
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index efe10ff86d63..081dc4745274 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -1057,7 +1057,7 @@ static int nvidiafb_suspend(struct pci_dev *dev, pm_message_t mesg)
if (mesg.event == PM_EVENT_PRETHAW)
mesg.event = PM_EVENT_FREEZE;
- acquire_console_sem();
+ console_lock();
par->pm_state = mesg.event;
if (mesg.event & PM_EVENT_SLEEP) {
@@ -1070,7 +1070,7 @@ static int nvidiafb_suspend(struct pci_dev *dev, pm_message_t mesg)
}
dev->dev.power.power_state = mesg;
- release_console_sem();
+ console_unlock();
return 0;
}
@@ -1079,7 +1079,7 @@ static int nvidiafb_resume(struct pci_dev *dev)
struct fb_info *info = pci_get_drvdata(dev);
struct nvidia_par *par = info->par;
- acquire_console_sem();
+ console_lock();
pci_set_power_state(dev, PCI_D0);
if (par->pm_state != PM_EVENT_FREEZE) {
@@ -1097,7 +1097,7 @@ static int nvidiafb_resume(struct pci_dev *dev)
nvidiafb_blank(FB_BLANK_UNBLANK, info);
fail:
- release_console_sem();
+ console_unlock();
return 0;
}
#else
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
index 12327bbfdbbb..940cab394c2e 100644
--- a/drivers/video/omap2/displays/Kconfig
+++ b/drivers/video/omap2/displays/Kconfig
@@ -1,11 +1,13 @@
menu "OMAP2/3 Display Device Drivers"
depends on OMAP2_DSS
-config PANEL_GENERIC
- tristate "Generic Panel"
+config PANEL_GENERIC_DPI
+ tristate "Generic DPI Panel"
help
- Generic panel driver.
- Used for DVI output for Beagle and OMAP3 SDP.
+ Generic DPI panel driver.
+ Supports DVI output for Beagle and OMAP3 SDP.
+ Supports LCD Panel used in TI SDP3430 and EVM boards,
+ OMAP3517 EVM boards and CM-T35.
config PANEL_SHARP_LS037V7DW01
tristate "Sharp LS037V7DW01 LCD Panel"
@@ -14,11 +16,12 @@ config PANEL_SHARP_LS037V7DW01
help
LCD Panel used in TI's SDP3430 and EVM boards
-config PANEL_SHARP_LQ043T1DG01
- tristate "Sharp LQ043T1DG01 LCD Panel"
- depends on OMAP2_DSS
- help
- LCD Panel used in TI's OMAP3517 EVM boards
+config PANEL_NEC_NL8048HL11_01B
+ tristate "NEC NL8048HL11-01B Panel"
+ depends on OMAP2_DSS
+ help
+ This NEC NL8048HL11-01B panel is TFT LCD
+ used in the Zoom2/3/3630 sdp boards.
config PANEL_TAAL
tristate "Taal DSI Panel"
@@ -26,12 +29,6 @@ config PANEL_TAAL
help
Taal DSI command mode panel from TPO.
-config PANEL_TOPPOLY_TDO35S
- tristate "Toppoly TDO35S LCD Panel support"
- depends on OMAP2_DSS
- help
- LCD Panel used in CM-T35
-
config PANEL_TPO_TD043MTEA1
tristate "TPO TD043MTEA1 LCD Panel"
depends on OMAP2_DSS && SPI
diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile
index aa386095d7c4..861f0255ec6b 100644
--- a/drivers/video/omap2/displays/Makefile
+++ b/drivers/video/omap2/displays/Makefile
@@ -1,8 +1,7 @@
-obj-$(CONFIG_PANEL_GENERIC) += panel-generic.o
+obj-$(CONFIG_PANEL_GENERIC_DPI) += panel-generic-dpi.o
obj-$(CONFIG_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
-obj-$(CONFIG_PANEL_SHARP_LQ043T1DG01) += panel-sharp-lq043t1dg01.o
+obj-$(CONFIG_PANEL_NEC_NL8048HL11_01B) += panel-nec-nl8048hl11-01b.o
obj-$(CONFIG_PANEL_TAAL) += panel-taal.o
-obj-$(CONFIG_PANEL_TOPPOLY_TDO35S) += panel-toppoly-tdo35s.o
obj-$(CONFIG_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_PANEL_ACX565AKM) += panel-acx565akm.o
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
new file mode 100644
index 000000000000..07eb30ee59c8
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -0,0 +1,365 @@
+/*
+ * Generic DPI Panels support
+ *
+ * Copyright (C) 2010 Canonical Ltd.
+ * Author: Bryan Wu <bryan.wu@canonical.com>
+ *
+ * LCD panel driver for Sharp LQ043T1DG01
+ *
+ * Copyright (C) 2009 Texas Instruments Inc
+ * Author: Vaibhav Hiremath <hvaibhav@ti.com>
+ *
+ * LCD panel driver for Toppoly TDO35S
+ *
+ * Copyright (C) 2009 CompuLab, Ltd.
+ * Author: Mike Rapoport <mike@compulab.co.il>
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <plat/panel-generic-dpi.h>
+
+struct panel_config {
+ struct omap_video_timings timings;
+
+ int acbi; /* ac-bias pin transitions per interrupt */
+ /* Unit: line clocks */
+ int acb; /* ac-bias pin frequency */
+
+ enum omap_panel_config config;
+
+ int power_on_delay;
+ int power_off_delay;
+
+ /*
+ * Used to match device to panel configuration
+ * when use generic panel driver
+ */
+ const char *name;
+};
+
+/* Panel configurations */
+static struct panel_config generic_dpi_panels[] = {
+ /* Generic Panel */
+ {
+ {
+ .x_res = 640,
+ .y_res = 480,
+
+ .pixel_clock = 23500,
+
+ .hfp = 48,
+ .hsw = 32,
+ .hbp = 80,
+
+ .vfp = 3,
+ .vsw = 4,
+ .vbp = 7,
+ },
+ .acbi = 0x0,
+ .acb = 0x0,
+ .config = OMAP_DSS_LCD_TFT,
+ .power_on_delay = 0,
+ .power_off_delay = 0,
+ .name = "generic",
+ },
+
+ /* Sharp LQ043T1DG01 */
+ {
+ {
+ .x_res = 480,
+ .y_res = 272,
+
+ .pixel_clock = 9000,
+
+ .hsw = 42,
+ .hfp = 3,
+ .hbp = 2,
+
+ .vsw = 11,
+ .vfp = 3,
+ .vbp = 2,
+ },
+ .acbi = 0x0,
+ .acb = 0x0,
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO,
+ .power_on_delay = 50,
+ .power_off_delay = 100,
+ .name = "sharp_lq",
+ },
+
+ /* Sharp LS037V7DW01 */
+ {
+ {
+ .x_res = 480,
+ .y_res = 640,
+
+ .pixel_clock = 19200,
+
+ .hsw = 2,
+ .hfp = 1,
+ .hbp = 28,
+
+ .vsw = 1,
+ .vfp = 1,
+ .vbp = 1,
+ },
+ .acbi = 0x0,
+ .acb = 0x28,
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS,
+ .power_on_delay = 50,
+ .power_off_delay = 100,
+ .name = "sharp_ls",
+ },
+
+ /* Toppoly TDO35S */
+ {
+ {
+ .x_res = 480,
+ .y_res = 640,
+
+ .pixel_clock = 26000,
+
+ .hfp = 104,
+ .hsw = 8,
+ .hbp = 8,
+
+ .vfp = 4,
+ .vsw = 2,
+ .vbp = 2,
+ },
+ .acbi = 0x0,
+ .acb = 0x0,
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC |
+ OMAP_DSS_LCD_ONOFF,
+ .power_on_delay = 0,
+ .power_off_delay = 0,
+ .name = "toppoly_tdo35s",
+ },
+};
+
+struct panel_drv_data {
+
+ struct omap_dss_device *dssdev;
+
+ struct panel_config *panel_config;
+};
+
+static inline struct panel_generic_dpi_data
+*get_panel_data(const struct omap_dss_device *dssdev)
+{
+ return (struct panel_generic_dpi_data *) dssdev->data;
+}
+
+static int generic_dpi_panel_power_on(struct omap_dss_device *dssdev)
+{
+ int r;
+ struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+ struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
+ struct panel_config *panel_config = drv_data->panel_config;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
+ r = omapdss_dpi_display_enable(dssdev);
+ if (r)
+ goto err0;
+
+ /* wait couple of vsyncs until enabling the LCD */
+ if (panel_config->power_on_delay)
+ msleep(panel_config->power_on_delay);
+
+ if (panel_data->platform_enable) {
+ r = panel_data->platform_enable(dssdev);
+ if (r)
+ goto err1;
+ }
+
+ return 0;
+err1:
+ omapdss_dpi_display_disable(dssdev);
+err0:
+ return r;
+}
+
+static void generic_dpi_panel_power_off(struct omap_dss_device *dssdev)
+{
+ struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+ struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
+ struct panel_config *panel_config = drv_data->panel_config;
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return;
+
+ if (panel_data->platform_disable)
+ panel_data->platform_disable(dssdev);
+
+ /* wait couple of vsyncs after disabling the LCD */
+ if (panel_config->power_off_delay)
+ msleep(panel_config->power_off_delay);
+
+ omapdss_dpi_display_disable(dssdev);
+}
+
+static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
+{
+ struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+ struct panel_config *panel_config = NULL;
+ struct panel_drv_data *drv_data = NULL;
+ int i;
+
+ dev_dbg(&dssdev->dev, "probe\n");
+
+ if (!panel_data || !panel_data->name)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(generic_dpi_panels); i++) {
+ if (strcmp(panel_data->name, generic_dpi_panels[i].name) == 0) {
+ panel_config = &generic_dpi_panels[i];
+ break;
+ }
+ }
+
+ if (!panel_config)
+ return -EINVAL;
+
+ dssdev->panel.config = panel_config->config;
+ dssdev->panel.timings = panel_config->timings;
+ dssdev->panel.acb = panel_config->acb;
+ dssdev->panel.acbi = panel_config->acbi;
+
+ drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ drv_data->dssdev = dssdev;
+ drv_data->panel_config = panel_config;
+
+ dev_set_drvdata(&dssdev->dev, drv_data);
+
+ return 0;
+}
+
+static void generic_dpi_panel_remove(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
+
+ dev_dbg(&dssdev->dev, "remove\n");
+
+ kfree(drv_data);
+
+ dev_set_drvdata(&dssdev->dev, NULL);
+}
+
+static int generic_dpi_panel_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ r = generic_dpi_panel_power_on(dssdev);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void generic_dpi_panel_disable(struct omap_dss_device *dssdev)
+{
+ generic_dpi_panel_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static int generic_dpi_panel_suspend(struct omap_dss_device *dssdev)
+{
+ generic_dpi_panel_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+ return 0;
+}
+
+static int generic_dpi_panel_resume(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ r = generic_dpi_panel_power_on(dssdev);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ dpi_set_timings(dssdev, timings);
+}
+
+static void generic_dpi_panel_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ *timings = dssdev->panel.timings;
+}
+
+static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ return dpi_check_timings(dssdev, timings);
+}
+
+static struct omap_dss_driver dpi_driver = {
+ .probe = generic_dpi_panel_probe,
+ .remove = generic_dpi_panel_remove,
+
+ .enable = generic_dpi_panel_enable,
+ .disable = generic_dpi_panel_disable,
+ .suspend = generic_dpi_panel_suspend,
+ .resume = generic_dpi_panel_resume,
+
+ .set_timings = generic_dpi_panel_set_timings,
+ .get_timings = generic_dpi_panel_get_timings,
+ .check_timings = generic_dpi_panel_check_timings,
+
+ .driver = {
+ .name = "generic_dpi_panel",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init generic_dpi_panel_drv_init(void)
+{
+ return omap_dss_register_driver(&dpi_driver);
+}
+
+static void __exit generic_dpi_panel_drv_exit(void)
+{
+ omap_dss_unregister_driver(&dpi_driver);
+}
+
+module_init(generic_dpi_panel_drv_init);
+module_exit(generic_dpi_panel_drv_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-generic.c b/drivers/video/omap2/displays/panel-generic.c
deleted file mode 100644
index 395a68de3990..000000000000
--- a/drivers/video/omap2/displays/panel-generic.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Generic panel support
- *
- * Copyright (C) 2008 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-
-#include <plat/display.h>
-
-static struct omap_video_timings generic_panel_timings = {
- /* 640 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */
- .x_res = 640,
- .y_res = 480,
- .pixel_clock = 23500,
- .hfp = 48,
- .hsw = 32,
- .hbp = 80,
- .vfp = 3,
- .vsw = 4,
- .vbp = 7,
-};
-
-static int generic_panel_power_on(struct omap_dss_device *dssdev)
-{
- int r;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- r = omapdss_dpi_display_enable(dssdev);
- if (r)
- goto err0;
-
- if (dssdev->platform_enable) {
- r = dssdev->platform_enable(dssdev);
- if (r)
- goto err1;
- }
-
- return 0;
-err1:
- omapdss_dpi_display_disable(dssdev);
-err0:
- return r;
-}
-
-static void generic_panel_power_off(struct omap_dss_device *dssdev)
-{
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return;
-
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
-
- omapdss_dpi_display_disable(dssdev);
-}
-
-static int generic_panel_probe(struct omap_dss_device *dssdev)
-{
- dssdev->panel.config = OMAP_DSS_LCD_TFT;
- dssdev->panel.timings = generic_panel_timings;
-
- return 0;
-}
-
-static void generic_panel_remove(struct omap_dss_device *dssdev)
-{
-}
-
-static int generic_panel_enable(struct omap_dss_device *dssdev)
-{
- int r = 0;
-
- r = generic_panel_power_on(dssdev);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static void generic_panel_disable(struct omap_dss_device *dssdev)
-{
- generic_panel_power_off(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static int generic_panel_suspend(struct omap_dss_device *dssdev)
-{
- generic_panel_power_off(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
- return 0;
-}
-
-static int generic_panel_resume(struct omap_dss_device *dssdev)
-{
- int r = 0;
-
- r = generic_panel_power_on(dssdev);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static void generic_panel_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- dpi_set_timings(dssdev, timings);
-}
-
-static void generic_panel_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- *timings = dssdev->panel.timings;
-}
-
-static int generic_panel_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- return dpi_check_timings(dssdev, timings);
-}
-
-static struct omap_dss_driver generic_driver = {
- .probe = generic_panel_probe,
- .remove = generic_panel_remove,
-
- .enable = generic_panel_enable,
- .disable = generic_panel_disable,
- .suspend = generic_panel_suspend,
- .resume = generic_panel_resume,
-
- .set_timings = generic_panel_set_timings,
- .get_timings = generic_panel_get_timings,
- .check_timings = generic_panel_check_timings,
-
- .driver = {
- .name = "generic_panel",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init generic_panel_drv_init(void)
-{
- return omap_dss_register_driver(&generic_driver);
-}
-
-static void __exit generic_panel_drv_exit(void)
-{
- omap_dss_unregister_driver(&generic_driver);
-}
-
-module_init(generic_panel_drv_init);
-module_exit(generic_panel_drv_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
new file mode 100644
index 000000000000..925e0fadff54
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
@@ -0,0 +1,325 @@
+/*
+ * Support for NEC-nl8048hl11-01b panel driver
+ *
+ * Copyright (C) 2010 Texas Instruments Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+
+#include <plat/display.h>
+
+#define LCD_XRES 800
+#define LCD_YRES 480
+/*
+ * NEC PIX Clock Ratings
+ * MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz
+ */
+#define LCD_PIXEL_CLOCK 23800
+
+struct nec_8048_data {
+ struct backlight_device *bl;
+};
+
+static const struct {
+ unsigned char addr;
+ unsigned char dat;
+} nec_8048_init_seq[] = {
+ { 3, 0x01 }, { 0, 0x00 }, { 1, 0x01 }, { 4, 0x00 }, { 5, 0x14 },
+ { 6, 0x24 }, { 16, 0xD7 }, { 17, 0x00 }, { 18, 0x00 }, { 19, 0x55 },
+ { 20, 0x01 }, { 21, 0x70 }, { 22, 0x1E }, { 23, 0x25 }, { 24, 0x25 },
+ { 25, 0x02 }, { 26, 0x02 }, { 27, 0xA0 }, { 32, 0x2F }, { 33, 0x0F },
+ { 34, 0x0F }, { 35, 0x0F }, { 36, 0x0F }, { 37, 0x0F }, { 38, 0x0F },
+ { 39, 0x00 }, { 40, 0x02 }, { 41, 0x02 }, { 42, 0x02 }, { 43, 0x0F },
+ { 44, 0x0F }, { 45, 0x0F }, { 46, 0x0F }, { 47, 0x0F }, { 48, 0x0F },
+ { 49, 0x0F }, { 50, 0x00 }, { 51, 0x02 }, { 52, 0x02 }, { 53, 0x02 },
+ { 80, 0x0C }, { 83, 0x42 }, { 84, 0x42 }, { 85, 0x41 }, { 86, 0x14 },
+ { 89, 0x88 }, { 90, 0x01 }, { 91, 0x00 }, { 92, 0x02 }, { 93, 0x0C },
+ { 94, 0x1C }, { 95, 0x27 }, { 98, 0x49 }, { 99, 0x27 }, { 102, 0x76 },
+ { 103, 0x27 }, { 112, 0x01 }, { 113, 0x0E }, { 114, 0x02 },
+ { 115, 0x0C }, { 118, 0x0C }, { 121, 0x30 }, { 130, 0x00 },
+ { 131, 0x00 }, { 132, 0xFC }, { 134, 0x00 }, { 136, 0x00 },
+ { 138, 0x00 }, { 139, 0x00 }, { 140, 0x00 }, { 141, 0xFC },
+ { 143, 0x00 }, { 145, 0x00 }, { 147, 0x00 }, { 148, 0x00 },
+ { 149, 0x00 }, { 150, 0xFC }, { 152, 0x00 }, { 154, 0x00 },
+ { 156, 0x00 }, { 157, 0x00 }, { 2, 0x00 },
+};
+
+/*
+ * NEC NL8048HL11-01B Manual
+ * defines HFB, HSW, HBP, VFP, VSW, VBP as shown below
+ */
+
+static struct omap_video_timings nec_8048_panel_timings = {
+ /* 800 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */
+ .x_res = LCD_XRES,
+ .y_res = LCD_YRES,
+ .pixel_clock = LCD_PIXEL_CLOCK,
+ .hfp = 6,
+ .hsw = 1,
+ .hbp = 4,
+ .vfp = 3,
+ .vsw = 1,
+ .vbp = 4,
+};
+
+static int nec_8048_bl_update_status(struct backlight_device *bl)
+{
+ struct omap_dss_device *dssdev = dev_get_drvdata(&bl->dev);
+ int level;
+
+ if (!dssdev->set_backlight)
+ return -EINVAL;
+
+ if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
+ bl->props.power == FB_BLANK_UNBLANK)
+ level = bl->props.brightness;
+ else
+ level = 0;
+
+ return dssdev->set_backlight(dssdev, level);
+}
+
+static int nec_8048_bl_get_brightness(struct backlight_device *bl)
+{
+ if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
+ bl->props.power == FB_BLANK_UNBLANK)
+ return bl->props.brightness;
+
+ return 0;
+}
+
+static const struct backlight_ops nec_8048_bl_ops = {
+ .get_brightness = nec_8048_bl_get_brightness,
+ .update_status = nec_8048_bl_update_status,
+};
+
+static int nec_8048_panel_probe(struct omap_dss_device *dssdev)
+{
+ struct backlight_device *bl;
+ struct nec_8048_data *necd;
+ struct backlight_properties props;
+ int r;
+
+ dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_RF |
+ OMAP_DSS_LCD_ONOFF;
+ dssdev->panel.timings = nec_8048_panel_timings;
+
+ necd = kzalloc(sizeof(*necd), GFP_KERNEL);
+ if (!necd)
+ return -ENOMEM;
+
+ dev_set_drvdata(&dssdev->dev, necd);
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 255;
+
+ bl = backlight_device_register("nec-8048", &dssdev->dev, dssdev,
+ &nec_8048_bl_ops, &props);
+ if (IS_ERR(bl)) {
+ r = PTR_ERR(bl);
+ kfree(necd);
+ return r;
+ }
+ necd->bl = bl;
+
+ bl->props.fb_blank = FB_BLANK_UNBLANK;
+ bl->props.power = FB_BLANK_UNBLANK;
+ bl->props.max_brightness = dssdev->max_backlight_level;
+ bl->props.brightness = dssdev->max_backlight_level;
+
+ r = nec_8048_bl_update_status(bl);
+ if (r < 0)
+ dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+
+ return 0;
+}
+
+static void nec_8048_panel_remove(struct omap_dss_device *dssdev)
+{
+ struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
+ struct backlight_device *bl = necd->bl;
+
+ bl->props.power = FB_BLANK_POWERDOWN;
+ nec_8048_bl_update_status(bl);
+ backlight_device_unregister(bl);
+
+ kfree(necd);
+}
+
+static int nec_8048_panel_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+ struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
+ struct backlight_device *bl = necd->bl;
+
+ if (dssdev->platform_enable) {
+ r = dssdev->platform_enable(dssdev);
+ if (r)
+ return r;
+ }
+
+ r = nec_8048_bl_update_status(bl);
+ if (r < 0)
+ dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+
+ r = omapdss_dpi_display_enable(dssdev);
+
+ return r;
+}
+
+static void nec_8048_panel_disable(struct omap_dss_device *dssdev)
+{
+ struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
+ struct backlight_device *bl = necd->bl;
+
+ omapdss_dpi_display_disable(dssdev);
+
+ bl->props.brightness = 0;
+ nec_8048_bl_update_status(bl);
+
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+}
+
+static int nec_8048_panel_suspend(struct omap_dss_device *dssdev)
+{
+ nec_8048_panel_disable(dssdev);
+ return 0;
+}
+
+static int nec_8048_panel_resume(struct omap_dss_device *dssdev)
+{
+ return nec_8048_panel_enable(dssdev);
+}
+
+static int nec_8048_recommended_bpp(struct omap_dss_device *dssdev)
+{
+ return 16;
+}
+
+static struct omap_dss_driver nec_8048_driver = {
+ .probe = nec_8048_panel_probe,
+ .remove = nec_8048_panel_remove,
+ .enable = nec_8048_panel_enable,
+ .disable = nec_8048_panel_disable,
+ .suspend = nec_8048_panel_suspend,
+ .resume = nec_8048_panel_resume,
+ .get_recommended_bpp = nec_8048_recommended_bpp,
+
+ .driver = {
+ .name = "NEC_8048_panel",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int nec_8048_spi_send(struct spi_device *spi, unsigned char reg_addr,
+ unsigned char reg_data)
+{
+ int ret = 0;
+ unsigned int cmd = 0, data = 0;
+
+ cmd = 0x0000 | reg_addr; /* register address write */
+ data = 0x0100 | reg_data ; /* register data write */
+ data = (cmd << 16) | data;
+
+ ret = spi_write(spi, (unsigned char *)&data, 4);
+ if (ret)
+ pr_err("error in spi_write %x\n", data);
+
+ return ret;
+}
+
+static int init_nec_8048_wvga_lcd(struct spi_device *spi)
+{
+ unsigned int i;
+ /* Initialization Sequence */
+ /* nec_8048_spi_send(spi, REG, VAL) */
+ for (i = 0; i < (ARRAY_SIZE(nec_8048_init_seq) - 1); i++)
+ nec_8048_spi_send(spi, nec_8048_init_seq[i].addr,
+ nec_8048_init_seq[i].dat);
+ udelay(20);
+ nec_8048_spi_send(spi, nec_8048_init_seq[i].addr,
+ nec_8048_init_seq[i].dat);
+ return 0;
+}
+
+static int nec_8048_spi_probe(struct spi_device *spi)
+{
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 32;
+ spi_setup(spi);
+
+ init_nec_8048_wvga_lcd(spi);
+
+ return omap_dss_register_driver(&nec_8048_driver);
+}
+
+static int nec_8048_spi_remove(struct spi_device *spi)
+{
+ omap_dss_unregister_driver(&nec_8048_driver);
+
+ return 0;
+}
+
+static int nec_8048_spi_suspend(struct spi_device *spi, pm_message_t mesg)
+{
+ nec_8048_spi_send(spi, 2, 0x01);
+ mdelay(40);
+
+ return 0;
+}
+
+static int nec_8048_spi_resume(struct spi_device *spi)
+{
+ /* reinitialize the panel */
+ spi_setup(spi);
+ nec_8048_spi_send(spi, 2, 0x00);
+ init_nec_8048_wvga_lcd(spi);
+
+ return 0;
+}
+
+static struct spi_driver nec_8048_spi_driver = {
+ .probe = nec_8048_spi_probe,
+ .remove = __devexit_p(nec_8048_spi_remove),
+ .suspend = nec_8048_spi_suspend,
+ .resume = nec_8048_spi_resume,
+ .driver = {
+ .name = "nec_8048_spi",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init nec_8048_lcd_init(void)
+{
+ return spi_register_driver(&nec_8048_spi_driver);
+}
+
+static void __exit nec_8048_lcd_exit(void)
+{
+ return spi_unregister_driver(&nec_8048_spi_driver);
+}
+
+module_init(nec_8048_lcd_init);
+module_exit(nec_8048_lcd_exit);
+MODULE_AUTHOR("Erik Gilling <konkers@android.com>");
+MODULE_DESCRIPTION("NEC-nl8048hl11-01b Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c b/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c
deleted file mode 100644
index 0c6896cea2d0..000000000000
--- a/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * LCD panel driver for Sharp LQ043T1DG01
- *
- * Copyright (C) 2009 Texas Instruments Inc
- * Author: Vaibhav Hiremath <hvaibhav@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/err.h>
-
-#include <plat/display.h>
-
-static struct omap_video_timings sharp_lq_timings = {
- .x_res = 480,
- .y_res = 272,
-
- .pixel_clock = 9000,
-
- .hsw = 42,
- .hfp = 3,
- .hbp = 2,
-
- .vsw = 11,
- .vfp = 3,
- .vbp = 2,
-};
-
-static int sharp_lq_panel_power_on(struct omap_dss_device *dssdev)
-{
- int r;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- r = omapdss_dpi_display_enable(dssdev);
- if (r)
- goto err0;
-
- /* wait couple of vsyncs until enabling the LCD */
- msleep(50);
-
- if (dssdev->platform_enable) {
- r = dssdev->platform_enable(dssdev);
- if (r)
- goto err1;
- }
-
- return 0;
-err1:
- omapdss_dpi_display_disable(dssdev);
-err0:
- return r;
-}
-
-static void sharp_lq_panel_power_off(struct omap_dss_device *dssdev)
-{
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return;
-
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
-
- /* wait at least 5 vsyncs after disabling the LCD */
- msleep(100);
-
- omapdss_dpi_display_disable(dssdev);
-}
-
-static int sharp_lq_panel_probe(struct omap_dss_device *dssdev)
-{
-
- dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO;
- dssdev->panel.acb = 0x0;
- dssdev->panel.timings = sharp_lq_timings;
-
- return 0;
-}
-
-static void sharp_lq_panel_remove(struct omap_dss_device *dssdev)
-{
-}
-
-static int sharp_lq_panel_enable(struct omap_dss_device *dssdev)
-{
- int r = 0;
-
- r = sharp_lq_panel_power_on(dssdev);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static void sharp_lq_panel_disable(struct omap_dss_device *dssdev)
-{
- sharp_lq_panel_power_off(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static int sharp_lq_panel_suspend(struct omap_dss_device *dssdev)
-{
- sharp_lq_panel_power_off(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
- return 0;
-}
-
-static int sharp_lq_panel_resume(struct omap_dss_device *dssdev)
-{
- int r = 0;
-
- r = sharp_lq_panel_power_on(dssdev);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static struct omap_dss_driver sharp_lq_driver = {
- .probe = sharp_lq_panel_probe,
- .remove = sharp_lq_panel_remove,
-
- .enable = sharp_lq_panel_enable,
- .disable = sharp_lq_panel_disable,
- .suspend = sharp_lq_panel_suspend,
- .resume = sharp_lq_panel_resume,
-
- .driver = {
- .name = "sharp_lq_panel",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init sharp_lq_panel_drv_init(void)
-{
- return omap_dss_register_driver(&sharp_lq_driver);
-}
-
-static void __exit sharp_lq_panel_drv_exit(void)
-{
- omap_dss_unregister_driver(&sharp_lq_driver);
-}
-
-module_init(sharp_lq_panel_drv_init);
-module_exit(sharp_lq_panel_drv_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index e1c765d11419..61026f96ad20 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -465,7 +465,7 @@ static int taal_bl_get_intensity(struct backlight_device *dev)
return 0;
}
-static struct backlight_ops taal_bl_ops = {
+static const struct backlight_ops taal_bl_ops = {
.get_brightness = taal_bl_get_intensity,
.update_status = taal_bl_update_status,
};
diff --git a/drivers/video/omap2/displays/panel-toppoly-tdo35s.c b/drivers/video/omap2/displays/panel-toppoly-tdo35s.c
deleted file mode 100644
index 526e906c8a6c..000000000000
--- a/drivers/video/omap2/displays/panel-toppoly-tdo35s.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * LCD panel driver for Toppoly TDO35S
- *
- * Copyright (C) 2009 CompuLab, Ltd.
- * Author: Mike Rapoport <mike@compulab.co.il>
- *
- * Based on generic panel support
- * Copyright (C) 2008 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-
-#include <plat/display.h>
-
-static struct omap_video_timings toppoly_tdo_panel_timings = {
- /* 640 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */
- .x_res = 480,
- .y_res = 640,
-
- .pixel_clock = 26000,
-
- .hfp = 104,
- .hsw = 8,
- .hbp = 8,
-
- .vfp = 4,
- .vsw = 2,
- .vbp = 2,
-};
-
-static int toppoly_tdo_panel_power_on(struct omap_dss_device *dssdev)
-{
- int r;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- r = omapdss_dpi_display_enable(dssdev);
- if (r)
- goto err0;
-
- if (dssdev->platform_enable) {
- r = dssdev->platform_enable(dssdev);
- if (r)
- goto err1;
- }
-
- return 0;
-err1:
- omapdss_dpi_display_disable(dssdev);
-err0:
- return r;
-}
-
-static void toppoly_tdo_panel_power_off(struct omap_dss_device *dssdev)
-{
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return;
-
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
-
- omapdss_dpi_display_disable(dssdev);
-}
-
-static int toppoly_tdo_panel_probe(struct omap_dss_device *dssdev)
-{
- dssdev->panel.config = OMAP_DSS_LCD_TFT |
- OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS |
- OMAP_DSS_LCD_IPC |
- OMAP_DSS_LCD_ONOFF;
-
- dssdev->panel.timings = toppoly_tdo_panel_timings;
-
- return 0;
-}
-
-static void toppoly_tdo_panel_remove(struct omap_dss_device *dssdev)
-{
-}
-
-static int toppoly_tdo_panel_enable(struct omap_dss_device *dssdev)
-{
- int r = 0;
-
- r = toppoly_tdo_panel_power_on(dssdev);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static void toppoly_tdo_panel_disable(struct omap_dss_device *dssdev)
-{
- toppoly_tdo_panel_power_off(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static int toppoly_tdo_panel_suspend(struct omap_dss_device *dssdev)
-{
- toppoly_tdo_panel_power_off(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
- return 0;
-}
-
-static int toppoly_tdo_panel_resume(struct omap_dss_device *dssdev)
-{
- int r = 0;
-
- r = toppoly_tdo_panel_power_on(dssdev);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static struct omap_dss_driver generic_driver = {
- .probe = toppoly_tdo_panel_probe,
- .remove = toppoly_tdo_panel_remove,
-
- .enable = toppoly_tdo_panel_enable,
- .disable = toppoly_tdo_panel_disable,
- .suspend = toppoly_tdo_panel_suspend,
- .resume = toppoly_tdo_panel_resume,
-
- .driver = {
- .name = "toppoly_tdo35s_panel",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init toppoly_tdo_panel_drv_init(void)
-{
- return omap_dss_register_driver(&generic_driver);
-}
-
-static void __exit toppoly_tdo_panel_drv_exit(void)
-{
- omap_dss_unregister_driver(&generic_driver);
-}
-
-module_init(toppoly_tdo_panel_drv_init);
-module_exit(toppoly_tdo_panel_drv_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index fa40fa59a9ac..9f8c69f16e61 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -44,34 +44,40 @@
/* DISPC */
#define DISPC_BASE 0x48050400
-#define DISPC_SZ_REGS SZ_1K
+#define DISPC_SZ_REGS SZ_4K
struct dispc_reg { u16 idx; };
#define DISPC_REG(idx) ((const struct dispc_reg) { idx })
-/* DISPC common */
+/*
+ * DISPC common registers and
+ * DISPC channel registers , ch = 0 for LCD, ch = 1 for
+ * DIGIT, and ch = 2 for LCD2
+ */
#define DISPC_REVISION DISPC_REG(0x0000)
#define DISPC_SYSCONFIG DISPC_REG(0x0010)
#define DISPC_SYSSTATUS DISPC_REG(0x0014)
#define DISPC_IRQSTATUS DISPC_REG(0x0018)
#define DISPC_IRQENABLE DISPC_REG(0x001C)
#define DISPC_CONTROL DISPC_REG(0x0040)
+#define DISPC_CONTROL2 DISPC_REG(0x0238)
#define DISPC_CONFIG DISPC_REG(0x0044)
+#define DISPC_CONFIG2 DISPC_REG(0x0620)
#define DISPC_CAPABLE DISPC_REG(0x0048)
-#define DISPC_DEFAULT_COLOR0 DISPC_REG(0x004C)
-#define DISPC_DEFAULT_COLOR1 DISPC_REG(0x0050)
-#define DISPC_TRANS_COLOR0 DISPC_REG(0x0054)
-#define DISPC_TRANS_COLOR1 DISPC_REG(0x0058)
+#define DISPC_DEFAULT_COLOR(ch) DISPC_REG(ch == 0 ? 0x004C : \
+ (ch == 1 ? 0x0050 : 0x03AC))
+#define DISPC_TRANS_COLOR(ch) DISPC_REG(ch == 0 ? 0x0054 : \
+ (ch == 1 ? 0x0058 : 0x03B0))
#define DISPC_LINE_STATUS DISPC_REG(0x005C)
#define DISPC_LINE_NUMBER DISPC_REG(0x0060)
-#define DISPC_TIMING_H DISPC_REG(0x0064)
-#define DISPC_TIMING_V DISPC_REG(0x0068)
-#define DISPC_POL_FREQ DISPC_REG(0x006C)
-#define DISPC_DIVISOR DISPC_REG(0x0070)
+#define DISPC_TIMING_H(ch) DISPC_REG(ch != 2 ? 0x0064 : 0x0400)
+#define DISPC_TIMING_V(ch) DISPC_REG(ch != 2 ? 0x0068 : 0x0404)
+#define DISPC_POL_FREQ(ch) DISPC_REG(ch != 2 ? 0x006C : 0x0408)
+#define DISPC_DIVISOR(ch) DISPC_REG(ch != 2 ? 0x0070 : 0x040C)
#define DISPC_GLOBAL_ALPHA DISPC_REG(0x0074)
#define DISPC_SIZE_DIG DISPC_REG(0x0078)
-#define DISPC_SIZE_LCD DISPC_REG(0x007C)
+#define DISPC_SIZE_LCD(ch) DISPC_REG(ch != 2 ? 0x007C : 0x03CC)
/* DISPC GFX plane */
#define DISPC_GFX_BA0 DISPC_REG(0x0080)
@@ -86,13 +92,12 @@ struct dispc_reg { u16 idx; };
#define DISPC_GFX_WINDOW_SKIP DISPC_REG(0x00B4)
#define DISPC_GFX_TABLE_BA DISPC_REG(0x00B8)
-#define DISPC_DATA_CYCLE1 DISPC_REG(0x01D4)
-#define DISPC_DATA_CYCLE2 DISPC_REG(0x01D8)
-#define DISPC_DATA_CYCLE3 DISPC_REG(0x01DC)
-
-#define DISPC_CPR_COEF_R DISPC_REG(0x0220)
-#define DISPC_CPR_COEF_G DISPC_REG(0x0224)
-#define DISPC_CPR_COEF_B DISPC_REG(0x0228)
+#define DISPC_DATA_CYCLE1(ch) DISPC_REG(ch != 2 ? 0x01D4 : 0x03C0)
+#define DISPC_DATA_CYCLE2(ch) DISPC_REG(ch != 2 ? 0x01D8 : 0x03C4)
+#define DISPC_DATA_CYCLE3(ch) DISPC_REG(ch != 2 ? 0x01DC : 0x03C8)
+#define DISPC_CPR_COEF_R(ch) DISPC_REG(ch != 2 ? 0x0220 : 0x03BC)
+#define DISPC_CPR_COEF_G(ch) DISPC_REG(ch != 2 ? 0x0224 : 0x03B8)
+#define DISPC_CPR_COEF_B(ch) DISPC_REG(ch != 2 ? 0x0228 : 0x03B4)
#define DISPC_GFX_PRELOAD DISPC_REG(0x022C)
@@ -217,18 +222,29 @@ void dispc_save_context(void)
SR(IRQENABLE);
SR(CONTROL);
SR(CONFIG);
- SR(DEFAULT_COLOR0);
- SR(DEFAULT_COLOR1);
- SR(TRANS_COLOR0);
- SR(TRANS_COLOR1);
+ SR(DEFAULT_COLOR(0));
+ SR(DEFAULT_COLOR(1));
+ SR(TRANS_COLOR(0));
+ SR(TRANS_COLOR(1));
SR(LINE_NUMBER);
- SR(TIMING_H);
- SR(TIMING_V);
- SR(POL_FREQ);
- SR(DIVISOR);
+ SR(TIMING_H(0));
+ SR(TIMING_V(0));
+ SR(POL_FREQ(0));
+ SR(DIVISOR(0));
SR(GLOBAL_ALPHA);
SR(SIZE_DIG);
- SR(SIZE_LCD);
+ SR(SIZE_LCD(0));
+ if (dss_has_feature(FEAT_MGR_LCD2)) {
+ SR(CONTROL2);
+ SR(DEFAULT_COLOR(2));
+ SR(TRANS_COLOR(2));
+ SR(SIZE_LCD(2));
+ SR(TIMING_H(2));
+ SR(TIMING_V(2));
+ SR(POL_FREQ(2));
+ SR(DIVISOR(2));
+ SR(CONFIG2);
+ }
SR(GFX_BA0);
SR(GFX_BA1);
@@ -241,13 +257,22 @@ void dispc_save_context(void)
SR(GFX_WINDOW_SKIP);
SR(GFX_TABLE_BA);
- SR(DATA_CYCLE1);
- SR(DATA_CYCLE2);
- SR(DATA_CYCLE3);
-
- SR(CPR_COEF_R);
- SR(CPR_COEF_G);
- SR(CPR_COEF_B);
+ SR(DATA_CYCLE1(0));
+ SR(DATA_CYCLE2(0));
+ SR(DATA_CYCLE3(0));
+
+ SR(CPR_COEF_R(0));
+ SR(CPR_COEF_G(0));
+ SR(CPR_COEF_B(0));
+ if (dss_has_feature(FEAT_MGR_LCD2)) {
+ SR(CPR_COEF_B(2));
+ SR(CPR_COEF_G(2));
+ SR(CPR_COEF_R(2));
+
+ SR(DATA_CYCLE1(2));
+ SR(DATA_CYCLE2(2));
+ SR(DATA_CYCLE3(2));
+ }
SR(GFX_PRELOAD);
@@ -356,18 +381,28 @@ void dispc_restore_context(void)
/*RR(IRQENABLE);*/
/*RR(CONTROL);*/
RR(CONFIG);
- RR(DEFAULT_COLOR0);
- RR(DEFAULT_COLOR1);
- RR(TRANS_COLOR0);
- RR(TRANS_COLOR1);
+ RR(DEFAULT_COLOR(0));
+ RR(DEFAULT_COLOR(1));
+ RR(TRANS_COLOR(0));
+ RR(TRANS_COLOR(1));
RR(LINE_NUMBER);
- RR(TIMING_H);
- RR(TIMING_V);
- RR(POL_FREQ);
- RR(DIVISOR);
+ RR(TIMING_H(0));
+ RR(TIMING_V(0));
+ RR(POL_FREQ(0));
+ RR(DIVISOR(0));
RR(GLOBAL_ALPHA);
RR(SIZE_DIG);
- RR(SIZE_LCD);
+ RR(SIZE_LCD(0));
+ if (dss_has_feature(FEAT_MGR_LCD2)) {
+ RR(DEFAULT_COLOR(2));
+ RR(TRANS_COLOR(2));
+ RR(SIZE_LCD(2));
+ RR(TIMING_H(2));
+ RR(TIMING_V(2));
+ RR(POL_FREQ(2));
+ RR(DIVISOR(2));
+ RR(CONFIG2);
+ }
RR(GFX_BA0);
RR(GFX_BA1);
@@ -380,13 +415,22 @@ void dispc_restore_context(void)
RR(GFX_WINDOW_SKIP);
RR(GFX_TABLE_BA);
- RR(DATA_CYCLE1);
- RR(DATA_CYCLE2);
- RR(DATA_CYCLE3);
-
- RR(CPR_COEF_R);
- RR(CPR_COEF_G);
- RR(CPR_COEF_B);
+ RR(DATA_CYCLE1(0));
+ RR(DATA_CYCLE2(0));
+ RR(DATA_CYCLE3(0));
+
+ RR(CPR_COEF_R(0));
+ RR(CPR_COEF_G(0));
+ RR(CPR_COEF_B(0));
+ if (dss_has_feature(FEAT_MGR_LCD2)) {
+ RR(DATA_CYCLE1(2));
+ RR(DATA_CYCLE2(2));
+ RR(DATA_CYCLE3(2));
+
+ RR(CPR_COEF_B(2));
+ RR(CPR_COEF_G(2));
+ RR(CPR_COEF_R(2));
+ }
RR(GFX_PRELOAD);
@@ -490,7 +534,8 @@ void dispc_restore_context(void)
/* enable last, because LCD & DIGIT enable are here */
RR(CONTROL);
-
+ if (dss_has_feature(FEAT_MGR_LCD2))
+ RR(CONTROL2);
/* clear spurious SYNC_LOST_DIGIT interrupts */
dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
@@ -516,42 +561,63 @@ bool dispc_go_busy(enum omap_channel channel)
{
int bit;
- if (channel == OMAP_DSS_CHANNEL_LCD)
+ if (channel == OMAP_DSS_CHANNEL_LCD ||
+ channel == OMAP_DSS_CHANNEL_LCD2)
bit = 5; /* GOLCD */
else
bit = 6; /* GODIGIT */
- return REG_GET(DISPC_CONTROL, bit, bit) == 1;
+ if (channel == OMAP_DSS_CHANNEL_LCD2)
+ return REG_GET(DISPC_CONTROL2, bit, bit) == 1;
+ else
+ return REG_GET(DISPC_CONTROL, bit, bit) == 1;
}
void dispc_go(enum omap_channel channel)
{
int bit;
+ bool enable_bit, go_bit;
enable_clocks(1);
- if (channel == OMAP_DSS_CHANNEL_LCD)
+ if (channel == OMAP_DSS_CHANNEL_LCD ||
+ channel == OMAP_DSS_CHANNEL_LCD2)
bit = 0; /* LCDENABLE */
else
bit = 1; /* DIGITALENABLE */
/* if the channel is not enabled, we don't need GO */
- if (REG_GET(DISPC_CONTROL, bit, bit) == 0)
+ if (channel == OMAP_DSS_CHANNEL_LCD2)
+ enable_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1;
+ else
+ enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1;
+
+ if (!enable_bit)
goto end;
- if (channel == OMAP_DSS_CHANNEL_LCD)
+ if (channel == OMAP_DSS_CHANNEL_LCD ||
+ channel == OMAP_DSS_CHANNEL_LCD2)
bit = 5; /* GOLCD */
else
bit = 6; /* GODIGIT */
- if (REG_GET(DISPC_CONTROL, bit, bit) == 1) {
+ if (channel == OMAP_DSS_CHANNEL_LCD2)
+ go_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1;
+ else
+ go_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1;
+
+ if (go_bit) {
DSSERR("GO bit not down for channel %d\n", channel);
goto end;
}
- DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : "DIGIT");
+ DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" :
+ (channel == OMAP_DSS_CHANNEL_LCD2 ? "LCD2" : "DIGIT"));
- REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
+ if (channel == OMAP_DSS_CHANNEL_LCD2)
+ REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit);
+ else
+ REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
end:
enable_clocks(0);
}
@@ -773,13 +839,26 @@ static void _dispc_set_vid_size(enum omap_plane plane, int width, int height)
dispc_write_reg(vsi_reg[plane-1], val);
}
+static void _dispc_set_pre_mult_alpha(enum omap_plane plane, bool enable)
+{
+ if (!dss_has_feature(FEAT_PRE_MULT_ALPHA))
+ return;
+
+ if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
+ plane == OMAP_DSS_VIDEO1)
+ return;
+
+ REG_FLD_MOD(dispc_reg_att[plane], enable ? 1 : 0, 28, 28);
+}
+
static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha)
{
if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
return;
- BUG_ON(!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
- plane == OMAP_DSS_VIDEO1);
+ if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
+ plane == OMAP_DSS_VIDEO1)
+ return;
if (plane == OMAP_DSS_GFX)
REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 7, 0);
@@ -851,6 +930,7 @@ static void _dispc_set_channel_out(enum omap_plane plane,
{
int shift;
u32 val;
+ int chan = 0, chan2 = 0;
switch (plane) {
case OMAP_DSS_GFX:
@@ -866,7 +946,29 @@ static void _dispc_set_channel_out(enum omap_plane plane,
}
val = dispc_read_reg(dispc_reg_att[plane]);
- val = FLD_MOD(val, channel, shift, shift);
+ if (dss_has_feature(FEAT_MGR_LCD2)) {
+ switch (channel) {
+ case OMAP_DSS_CHANNEL_LCD:
+ chan = 0;
+ chan2 = 0;
+ break;
+ case OMAP_DSS_CHANNEL_DIGIT:
+ chan = 1;
+ chan2 = 0;
+ break;
+ case OMAP_DSS_CHANNEL_LCD2:
+ chan = 0;
+ chan2 = 1;
+ break;
+ default:
+ BUG();
+ }
+
+ val = FLD_MOD(val, chan, shift, shift);
+ val = FLD_MOD(val, chan2, 31, 30);
+ } else {
+ val = FLD_MOD(val, channel, shift, shift);
+ }
dispc_write_reg(dispc_reg_att[plane], val);
}
@@ -923,13 +1025,13 @@ void dispc_enable_replication(enum omap_plane plane, bool enable)
enable_clocks(0);
}
-void dispc_set_lcd_size(u16 width, u16 height)
+void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
{
u32 val;
BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
enable_clocks(1);
- dispc_write_reg(DISPC_SIZE_LCD, val);
+ dispc_write_reg(DISPC_SIZE_LCD(channel), val);
enable_clocks(0);
}
@@ -1426,12 +1528,13 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
}
}
-static unsigned long calc_fclk_five_taps(u16 width, u16 height,
- u16 out_width, u16 out_height, enum omap_color_mode color_mode)
+static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
+ u16 height, u16 out_width, u16 out_height,
+ enum omap_color_mode color_mode)
{
u32 fclk = 0;
/* FIXME venc pclk? */
- u64 tmp, pclk = dispc_pclk_rate();
+ u64 tmp, pclk = dispc_pclk_rate(channel);
if (height > out_height) {
/* FIXME get real display PPL */
@@ -1463,8 +1566,8 @@ static unsigned long calc_fclk_five_taps(u16 width, u16 height,
return fclk;
}
-static unsigned long calc_fclk(u16 width, u16 height,
- u16 out_width, u16 out_height)
+static unsigned long calc_fclk(enum omap_channel channel, u16 width,
+ u16 height, u16 out_width, u16 out_height)
{
unsigned int hf, vf;
@@ -1488,7 +1591,7 @@ static unsigned long calc_fclk(u16 width, u16 height,
vf = 1;
/* FIXME venc pclk? */
- return dispc_pclk_rate() * vf * hf;
+ return dispc_pclk_rate(channel) * vf * hf;
}
void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel_out)
@@ -1507,7 +1610,8 @@ static int _dispc_setup_plane(enum omap_plane plane,
bool ilace,
enum omap_dss_rotation_type rotation_type,
u8 rotation, int mirror,
- u8 global_alpha)
+ u8 global_alpha, u8 pre_mult_alpha,
+ enum omap_channel channel)
{
const int maxdownscale = cpu_is_omap34xx() ? 4 : 2;
bool five_taps = 0;
@@ -1536,29 +1640,12 @@ static int _dispc_setup_plane(enum omap_plane plane,
height, pos_y, out_height);
}
+ if (!dss_feat_color_mode_supported(plane, color_mode))
+ return -EINVAL;
+
if (plane == OMAP_DSS_GFX) {
if (width != out_width || height != out_height)
return -EINVAL;
-
- switch (color_mode) {
- case OMAP_DSS_COLOR_ARGB16:
- case OMAP_DSS_COLOR_ARGB32:
- case OMAP_DSS_COLOR_RGBA32:
- if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
- return -EINVAL;
- case OMAP_DSS_COLOR_RGBX32:
- if (cpu_is_omap24xx())
- return -EINVAL;
- /* fall through */
- case OMAP_DSS_COLOR_RGB12U:
- case OMAP_DSS_COLOR_RGB16:
- case OMAP_DSS_COLOR_RGB24P:
- case OMAP_DSS_COLOR_RGB24U:
- break;
-
- default:
- return -EINVAL;
- }
} else {
/* video plane */
@@ -1572,42 +1659,16 @@ static int _dispc_setup_plane(enum omap_plane plane,
out_height > height * 8)
return -EINVAL;
- switch (color_mode) {
- case OMAP_DSS_COLOR_RGBX32:
- case OMAP_DSS_COLOR_RGB12U:
- if (cpu_is_omap24xx())
- return -EINVAL;
- /* fall through */
- case OMAP_DSS_COLOR_RGB16:
- case OMAP_DSS_COLOR_RGB24P:
- case OMAP_DSS_COLOR_RGB24U:
- break;
-
- case OMAP_DSS_COLOR_ARGB16:
- case OMAP_DSS_COLOR_ARGB32:
- case OMAP_DSS_COLOR_RGBA32:
- if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
- return -EINVAL;
- if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
- plane == OMAP_DSS_VIDEO1)
- return -EINVAL;
- break;
-
- case OMAP_DSS_COLOR_YUV2:
- case OMAP_DSS_COLOR_UYVY:
+ if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+ color_mode == OMAP_DSS_COLOR_UYVY)
cconv = 1;
- break;
-
- default:
- return -EINVAL;
- }
/* Must use 5-tap filter? */
five_taps = height > out_height * 2;
if (!five_taps) {
- fclk = calc_fclk(width, height,
- out_width, out_height);
+ fclk = calc_fclk(channel, width, height, out_width,
+ out_height);
/* Try 5-tap filter if 3-tap fclk is too high */
if (cpu_is_omap34xx() && height > out_height &&
@@ -1621,7 +1682,7 @@ static int _dispc_setup_plane(enum omap_plane plane,
}
if (five_taps)
- fclk = calc_fclk_five_taps(width, height,
+ fclk = calc_fclk_five_taps(channel, width, height,
out_width, out_height, color_mode);
DSSDBG("required fclk rate = %lu Hz\n", fclk);
@@ -1693,8 +1754,8 @@ static int _dispc_setup_plane(enum omap_plane plane,
_dispc_set_rotation_attrs(plane, rotation, mirror, color_mode);
- if (plane != OMAP_DSS_VIDEO1)
- _dispc_setup_global_alpha(plane, global_alpha);
+ _dispc_set_pre_mult_alpha(plane, pre_mult_alpha);
+ _dispc_setup_global_alpha(plane, global_alpha);
return 0;
}
@@ -1710,36 +1771,44 @@ static void dispc_disable_isr(void *data, u32 mask)
complete(compl);
}
-static void _enable_lcd_out(bool enable)
+static void _enable_lcd_out(enum omap_channel channel, bool enable)
{
- REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
+ if (channel == OMAP_DSS_CHANNEL_LCD2)
+ REG_FLD_MOD(DISPC_CONTROL2, enable ? 1 : 0, 0, 0);
+ else
+ REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
}
-static void dispc_enable_lcd_out(bool enable)
+static void dispc_enable_lcd_out(enum omap_channel channel, bool enable)
{
struct completion frame_done_completion;
bool is_on;
int r;
+ u32 irq;
enable_clocks(1);
/* When we disable LCD output, we need to wait until frame is done.
* Otherwise the DSS is still working, and turning off the clocks
* prevents DSS from going to OFF mode */
- is_on = REG_GET(DISPC_CONTROL, 0, 0);
+ is_on = channel == OMAP_DSS_CHANNEL_LCD2 ?
+ REG_GET(DISPC_CONTROL2, 0, 0) :
+ REG_GET(DISPC_CONTROL, 0, 0);
+
+ irq = channel == OMAP_DSS_CHANNEL_LCD2 ? DISPC_IRQ_FRAMEDONE2 :
+ DISPC_IRQ_FRAMEDONE;
if (!enable && is_on) {
init_completion(&frame_done_completion);
r = omap_dispc_register_isr(dispc_disable_isr,
- &frame_done_completion,
- DISPC_IRQ_FRAMEDONE);
+ &frame_done_completion, irq);
if (r)
DSSERR("failed to register FRAMEDONE isr\n");
}
- _enable_lcd_out(enable);
+ _enable_lcd_out(channel, enable);
if (!enable && is_on) {
if (!wait_for_completion_timeout(&frame_done_completion,
@@ -1747,8 +1816,7 @@ static void dispc_enable_lcd_out(bool enable)
DSSERR("timeout waiting for FRAME DONE\n");
r = omap_dispc_unregister_isr(dispc_disable_isr,
- &frame_done_completion,
- DISPC_IRQ_FRAMEDONE);
+ &frame_done_completion, irq);
if (r)
DSSERR("failed to unregister FRAMEDONE isr\n");
@@ -1818,6 +1886,8 @@ static void dispc_enable_digit_out(bool enable)
unsigned long flags;
spin_lock_irqsave(&dispc.irq_lock, flags);
dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
+ if (dss_has_feature(FEAT_MGR_LCD2))
+ dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
_omap_dispc_set_irqs();
spin_unlock_irqrestore(&dispc.irq_lock, flags);
@@ -1832,14 +1902,17 @@ bool dispc_is_channel_enabled(enum omap_channel channel)
return !!REG_GET(DISPC_CONTROL, 0, 0);
else if (channel == OMAP_DSS_CHANNEL_DIGIT)
return !!REG_GET(DISPC_CONTROL, 1, 1);
+ else if (channel == OMAP_DSS_CHANNEL_LCD2)
+ return !!REG_GET(DISPC_CONTROL2, 0, 0);
else
BUG();
}
void dispc_enable_channel(enum omap_channel channel, bool enable)
{
- if (channel == OMAP_DSS_CHANNEL_LCD)
- dispc_enable_lcd_out(enable);
+ if (channel == OMAP_DSS_CHANNEL_LCD ||
+ channel == OMAP_DSS_CHANNEL_LCD2)
+ dispc_enable_lcd_out(channel, enable);
else if (channel == OMAP_DSS_CHANNEL_DIGIT)
dispc_enable_digit_out(enable);
else
@@ -1848,6 +1921,9 @@ void dispc_enable_channel(enum omap_channel channel, bool enable)
void dispc_lcd_enable_signal_polarity(bool act_high)
{
+ if (!dss_has_feature(FEAT_LCDENABLEPOL))
+ return;
+
enable_clocks(1);
REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
enable_clocks(0);
@@ -1855,6 +1931,9 @@ void dispc_lcd_enable_signal_polarity(bool act_high)
void dispc_lcd_enable_signal(bool enable)
{
+ if (!dss_has_feature(FEAT_LCDENABLESIGNAL))
+ return;
+
enable_clocks(1);
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28);
enable_clocks(0);
@@ -1862,20 +1941,27 @@ void dispc_lcd_enable_signal(bool enable)
void dispc_pck_free_enable(bool enable)
{
+ if (!dss_has_feature(FEAT_PCKFREEENABLE))
+ return;
+
enable_clocks(1);
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
enable_clocks(0);
}
-void dispc_enable_fifohandcheck(bool enable)
+void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable)
{
enable_clocks(1);
- REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16);
+ if (channel == OMAP_DSS_CHANNEL_LCD2)
+ REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16);
+ else
+ REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16);
enable_clocks(0);
}
-void dispc_set_lcd_display_type(enum omap_lcd_display_type type)
+void dispc_set_lcd_display_type(enum omap_channel channel,
+ enum omap_lcd_display_type type)
{
int mode;
@@ -1894,7 +1980,10 @@ void dispc_set_lcd_display_type(enum omap_lcd_display_type type)
}
enable_clocks(1);
- REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
+ if (channel == OMAP_DSS_CHANNEL_LCD2)
+ REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3);
+ else
+ REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
enable_clocks(0);
}
@@ -1908,25 +1997,21 @@ void dispc_set_loadmode(enum omap_dss_load_mode mode)
void dispc_set_default_color(enum omap_channel channel, u32 color)
{
- const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0,
- DISPC_DEFAULT_COLOR1 };
-
enable_clocks(1);
- dispc_write_reg(def_reg[channel], color);
+ dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color);
enable_clocks(0);
}
u32 dispc_get_default_color(enum omap_channel channel)
{
- const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0,
- DISPC_DEFAULT_COLOR1 };
u32 l;
BUG_ON(channel != OMAP_DSS_CHANNEL_DIGIT &&
- channel != OMAP_DSS_CHANNEL_LCD);
+ channel != OMAP_DSS_CHANNEL_LCD &&
+ channel != OMAP_DSS_CHANNEL_LCD2);
enable_clocks(1);
- l = dispc_read_reg(def_reg[channel]);
+ l = dispc_read_reg(DISPC_DEFAULT_COLOR(channel));
enable_clocks(0);
return l;
@@ -1936,16 +2021,15 @@ void dispc_set_trans_key(enum omap_channel ch,
enum omap_dss_trans_key_type type,
u32 trans_key)
{
- const struct dispc_reg tr_reg[] = {
- DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 };
-
enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, type, 11, 11);
- else /* OMAP_DSS_CHANNEL_DIGIT */
+ else if (ch == OMAP_DSS_CHANNEL_DIGIT)
REG_FLD_MOD(DISPC_CONFIG, type, 13, 13);
+ else /* OMAP_DSS_CHANNEL_LCD2 */
+ REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11);
- dispc_write_reg(tr_reg[ch], trans_key);
+ dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key);
enable_clocks(0);
}
@@ -1953,21 +2037,20 @@ void dispc_get_trans_key(enum omap_channel ch,
enum omap_dss_trans_key_type *type,
u32 *trans_key)
{
- const struct dispc_reg tr_reg[] = {
- DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 };
-
enable_clocks(1);
if (type) {
if (ch == OMAP_DSS_CHANNEL_LCD)
*type = REG_GET(DISPC_CONFIG, 11, 11);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
*type = REG_GET(DISPC_CONFIG, 13, 13);
+ else if (ch == OMAP_DSS_CHANNEL_LCD2)
+ *type = REG_GET(DISPC_CONFIG2, 11, 11);
else
BUG();
}
if (trans_key)
- *trans_key = dispc_read_reg(tr_reg[ch]);
+ *trans_key = dispc_read_reg(DISPC_TRANS_COLOR(ch));
enable_clocks(0);
}
@@ -1976,8 +2059,10 @@ void dispc_enable_trans_key(enum omap_channel ch, bool enable)
enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
- else /* OMAP_DSS_CHANNEL_DIGIT */
+ else if (ch == OMAP_DSS_CHANNEL_DIGIT)
REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12);
+ else /* OMAP_DSS_CHANNEL_LCD2 */
+ REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10);
enable_clocks(0);
}
void dispc_enable_alpha_blending(enum omap_channel ch, bool enable)
@@ -1988,8 +2073,10 @@ void dispc_enable_alpha_blending(enum omap_channel ch, bool enable)
enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18);
- else /* OMAP_DSS_CHANNEL_DIGIT */
+ else if (ch == OMAP_DSS_CHANNEL_DIGIT)
REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
+ else /* OMAP_DSS_CHANNEL_LCD2 */
+ REG_FLD_MOD(DISPC_CONFIG2, enable, 18, 18);
enable_clocks(0);
}
bool dispc_alpha_blending_enabled(enum omap_channel ch)
@@ -2003,13 +2090,14 @@ bool dispc_alpha_blending_enabled(enum omap_channel ch)
if (ch == OMAP_DSS_CHANNEL_LCD)
enabled = REG_GET(DISPC_CONFIG, 18, 18);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
- enabled = REG_GET(DISPC_CONFIG, 18, 18);
+ enabled = REG_GET(DISPC_CONFIG, 19, 19);
+ else if (ch == OMAP_DSS_CHANNEL_LCD2)
+ enabled = REG_GET(DISPC_CONFIG2, 18, 18);
else
BUG();
enable_clocks(0);
return enabled;
-
}
@@ -2022,6 +2110,8 @@ bool dispc_trans_key_enabled(enum omap_channel ch)
enabled = REG_GET(DISPC_CONFIG, 10, 10);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
enabled = REG_GET(DISPC_CONFIG, 12, 12);
+ else if (ch == OMAP_DSS_CHANNEL_LCD2)
+ enabled = REG_GET(DISPC_CONFIG2, 10, 10);
else
BUG();
enable_clocks(0);
@@ -2030,7 +2120,7 @@ bool dispc_trans_key_enabled(enum omap_channel ch)
}
-void dispc_set_tft_data_lines(u8 data_lines)
+void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
{
int code;
@@ -2053,11 +2143,15 @@ void dispc_set_tft_data_lines(u8 data_lines)
}
enable_clocks(1);
- REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
+ if (channel == OMAP_DSS_CHANNEL_LCD2)
+ REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8);
+ else
+ REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
enable_clocks(0);
}
-void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode)
+void dispc_set_parallel_interface_mode(enum omap_channel channel,
+ enum omap_parallel_interface_mode mode)
{
u32 l;
int stallmode;
@@ -2087,13 +2181,17 @@ void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode)
enable_clocks(1);
- l = dispc_read_reg(DISPC_CONTROL);
-
- l = FLD_MOD(l, stallmode, 11, 11);
- l = FLD_MOD(l, gpout0, 15, 15);
- l = FLD_MOD(l, gpout1, 16, 16);
-
- dispc_write_reg(DISPC_CONTROL, l);
+ if (channel == OMAP_DSS_CHANNEL_LCD2) {
+ l = dispc_read_reg(DISPC_CONTROL2);
+ l = FLD_MOD(l, stallmode, 11, 11);
+ dispc_write_reg(DISPC_CONTROL2, l);
+ } else {
+ l = dispc_read_reg(DISPC_CONTROL);
+ l = FLD_MOD(l, stallmode, 11, 11);
+ l = FLD_MOD(l, gpout0, 15, 15);
+ l = FLD_MOD(l, gpout1, 16, 16);
+ dispc_write_reg(DISPC_CONTROL, l);
+ }
enable_clocks(0);
}
@@ -2129,8 +2227,8 @@ bool dispc_lcd_timings_ok(struct omap_video_timings *timings)
timings->vfp, timings->vbp);
}
-static void _dispc_set_lcd_timings(int hsw, int hfp, int hbp,
- int vsw, int vfp, int vbp)
+static void _dispc_set_lcd_timings(enum omap_channel channel, int hsw,
+ int hfp, int hbp, int vsw, int vfp, int vbp)
{
u32 timing_h, timing_v;
@@ -2149,13 +2247,14 @@ static void _dispc_set_lcd_timings(int hsw, int hfp, int hbp,
}
enable_clocks(1);
- dispc_write_reg(DISPC_TIMING_H, timing_h);
- dispc_write_reg(DISPC_TIMING_V, timing_v);
+ dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
+ dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
enable_clocks(0);
}
/* change name to mode? */
-void dispc_set_lcd_timings(struct omap_video_timings *timings)
+void dispc_set_lcd_timings(enum omap_channel channel,
+ struct omap_video_timings *timings)
{
unsigned xtot, ytot;
unsigned long ht, vt;
@@ -2165,10 +2264,11 @@ void dispc_set_lcd_timings(struct omap_video_timings *timings)
timings->vfp, timings->vbp))
BUG();
- _dispc_set_lcd_timings(timings->hsw, timings->hfp, timings->hbp,
- timings->vsw, timings->vfp, timings->vbp);
+ _dispc_set_lcd_timings(channel, timings->hsw, timings->hfp,
+ timings->hbp, timings->vsw, timings->vfp,
+ timings->vbp);
- dispc_set_lcd_size(timings->x_res, timings->y_res);
+ dispc_set_lcd_size(channel, timings->x_res, timings->y_res);
xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp;
ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp;
@@ -2176,7 +2276,8 @@ void dispc_set_lcd_timings(struct omap_video_timings *timings)
ht = (timings->pixel_clock * 1000) / xtot;
vt = (timings->pixel_clock * 1000) / xtot / ytot;
- DSSDBG("xres %u yres %u\n", timings->x_res, timings->y_res);
+ DSSDBG("channel %d xres %u yres %u\n", channel, timings->x_res,
+ timings->y_res);
DSSDBG("pck %u\n", timings->pixel_clock);
DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
timings->hsw, timings->hfp, timings->hbp,
@@ -2185,21 +2286,23 @@ void dispc_set_lcd_timings(struct omap_video_timings *timings)
DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
}
-static void dispc_set_lcd_divisor(u16 lck_div, u16 pck_div)
+static void dispc_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
+ u16 pck_div)
{
BUG_ON(lck_div < 1);
BUG_ON(pck_div < 2);
enable_clocks(1);
- dispc_write_reg(DISPC_DIVISOR,
+ dispc_write_reg(DISPC_DIVISOR(channel),
FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
enable_clocks(0);
}
-static void dispc_get_lcd_divisor(int *lck_div, int *pck_div)
+static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div,
+ int *pck_div)
{
u32 l;
- l = dispc_read_reg(DISPC_DIVISOR);
+ l = dispc_read_reg(DISPC_DIVISOR(channel));
*lck_div = FLD_GET(l, 23, 16);
*pck_div = FLD_GET(l, 7, 0);
}
@@ -2219,13 +2322,13 @@ unsigned long dispc_fclk_rate(void)
return r;
}
-unsigned long dispc_lclk_rate(void)
+unsigned long dispc_lclk_rate(enum omap_channel channel)
{
int lcd;
unsigned long r;
u32 l;
- l = dispc_read_reg(DISPC_DIVISOR);
+ l = dispc_read_reg(DISPC_DIVISOR(channel));
lcd = FLD_GET(l, 23, 16);
@@ -2234,13 +2337,13 @@ unsigned long dispc_lclk_rate(void)
return r / lcd;
}
-unsigned long dispc_pclk_rate(void)
+unsigned long dispc_pclk_rate(enum omap_channel channel)
{
int lcd, pcd;
unsigned long r;
u32 l;
- l = dispc_read_reg(DISPC_DIVISOR);
+ l = dispc_read_reg(DISPC_DIVISOR(channel));
lcd = FLD_GET(l, 23, 16);
pcd = FLD_GET(l, 7, 0);
@@ -2256,8 +2359,6 @@ void dispc_dump_clocks(struct seq_file *s)
enable_clocks(1);
- dispc_get_lcd_divisor(&lcd, &pcd);
-
seq_printf(s, "- DISPC -\n");
seq_printf(s, "dispc fclk source = %s\n",
@@ -2265,9 +2366,25 @@ void dispc_dump_clocks(struct seq_file *s)
"dss1_alwon_fclk" : "dsi1_pll_fclk");
seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
- seq_printf(s, "lck\t\t%-16lulck div\t%u\n", dispc_lclk_rate(), lcd);
- seq_printf(s, "pck\t\t%-16lupck div\t%u\n", dispc_pclk_rate(), pcd);
+ seq_printf(s, "- LCD1 -\n");
+
+ dispc_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD, &lcd, &pcd);
+
+ seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
+ dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD), lcd);
+ seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
+ dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD), pcd);
+ if (dss_has_feature(FEAT_MGR_LCD2)) {
+ seq_printf(s, "- LCD2 -\n");
+
+ dispc_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD2, &lcd, &pcd);
+
+ seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
+ dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD2), lcd);
+ seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
+ dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd);
+ }
enable_clocks(0);
}
@@ -2309,6 +2426,12 @@ void dispc_dump_irqs(struct seq_file *s)
PIS(SYNC_LOST);
PIS(SYNC_LOST_DIGIT);
PIS(WAKEUP);
+ if (dss_has_feature(FEAT_MGR_LCD2)) {
+ PIS(FRAMEDONE2);
+ PIS(VSYNC2);
+ PIS(ACBIAS_COUNT_STAT2);
+ PIS(SYNC_LOST2);
+ }
#undef PIS
}
#endif
@@ -2327,19 +2450,30 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_CONTROL);
DUMPREG(DISPC_CONFIG);
DUMPREG(DISPC_CAPABLE);
- DUMPREG(DISPC_DEFAULT_COLOR0);
- DUMPREG(DISPC_DEFAULT_COLOR1);
- DUMPREG(DISPC_TRANS_COLOR0);
- DUMPREG(DISPC_TRANS_COLOR1);
+ DUMPREG(DISPC_DEFAULT_COLOR(0));
+ DUMPREG(DISPC_DEFAULT_COLOR(1));
+ DUMPREG(DISPC_TRANS_COLOR(0));
+ DUMPREG(DISPC_TRANS_COLOR(1));
DUMPREG(DISPC_LINE_STATUS);
DUMPREG(DISPC_LINE_NUMBER);
- DUMPREG(DISPC_TIMING_H);
- DUMPREG(DISPC_TIMING_V);
- DUMPREG(DISPC_POL_FREQ);
- DUMPREG(DISPC_DIVISOR);
+ DUMPREG(DISPC_TIMING_H(0));
+ DUMPREG(DISPC_TIMING_V(0));
+ DUMPREG(DISPC_POL_FREQ(0));
+ DUMPREG(DISPC_DIVISOR(0));
DUMPREG(DISPC_GLOBAL_ALPHA);
DUMPREG(DISPC_SIZE_DIG);
- DUMPREG(DISPC_SIZE_LCD);
+ DUMPREG(DISPC_SIZE_LCD(0));
+ if (dss_has_feature(FEAT_MGR_LCD2)) {
+ DUMPREG(DISPC_CONTROL2);
+ DUMPREG(DISPC_CONFIG2);
+ DUMPREG(DISPC_DEFAULT_COLOR(2));
+ DUMPREG(DISPC_TRANS_COLOR(2));
+ DUMPREG(DISPC_TIMING_H(2));
+ DUMPREG(DISPC_TIMING_V(2));
+ DUMPREG(DISPC_POL_FREQ(2));
+ DUMPREG(DISPC_DIVISOR(2));
+ DUMPREG(DISPC_SIZE_LCD(2));
+ }
DUMPREG(DISPC_GFX_BA0);
DUMPREG(DISPC_GFX_BA1);
@@ -2353,13 +2487,22 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_GFX_WINDOW_SKIP);
DUMPREG(DISPC_GFX_TABLE_BA);
- DUMPREG(DISPC_DATA_CYCLE1);
- DUMPREG(DISPC_DATA_CYCLE2);
- DUMPREG(DISPC_DATA_CYCLE3);
-
- DUMPREG(DISPC_CPR_COEF_R);
- DUMPREG(DISPC_CPR_COEF_G);
- DUMPREG(DISPC_CPR_COEF_B);
+ DUMPREG(DISPC_DATA_CYCLE1(0));
+ DUMPREG(DISPC_DATA_CYCLE2(0));
+ DUMPREG(DISPC_DATA_CYCLE3(0));
+
+ DUMPREG(DISPC_CPR_COEF_R(0));
+ DUMPREG(DISPC_CPR_COEF_G(0));
+ DUMPREG(DISPC_CPR_COEF_B(0));
+ if (dss_has_feature(FEAT_MGR_LCD2)) {
+ DUMPREG(DISPC_DATA_CYCLE1(2));
+ DUMPREG(DISPC_DATA_CYCLE2(2));
+ DUMPREG(DISPC_DATA_CYCLE3(2));
+
+ DUMPREG(DISPC_CPR_COEF_R(2));
+ DUMPREG(DISPC_CPR_COEF_G(2));
+ DUMPREG(DISPC_CPR_COEF_B(2));
+ }
DUMPREG(DISPC_GFX_PRELOAD);
@@ -2458,8 +2601,8 @@ void dispc_dump_regs(struct seq_file *s)
#undef DUMPREG
}
-static void _dispc_set_pol_freq(bool onoff, bool rf, bool ieo, bool ipc,
- bool ihs, bool ivs, u8 acbi, u8 acb)
+static void _dispc_set_pol_freq(enum omap_channel channel, bool onoff, bool rf,
+ bool ieo, bool ipc, bool ihs, bool ivs, u8 acbi, u8 acb)
{
u32 l = 0;
@@ -2476,13 +2619,14 @@ static void _dispc_set_pol_freq(bool onoff, bool rf, bool ieo, bool ipc,
l |= FLD_VAL(acb, 7, 0);
enable_clocks(1);
- dispc_write_reg(DISPC_POL_FREQ, l);
+ dispc_write_reg(DISPC_POL_FREQ(channel), l);
enable_clocks(0);
}
-void dispc_set_pol_freq(enum omap_panel_config config, u8 acbi, u8 acb)
+void dispc_set_pol_freq(enum omap_channel channel,
+ enum omap_panel_config config, u8 acbi, u8 acb)
{
- _dispc_set_pol_freq((config & OMAP_DSS_LCD_ONOFF) != 0,
+ _dispc_set_pol_freq(channel, (config & OMAP_DSS_LCD_ONOFF) != 0,
(config & OMAP_DSS_LCD_RF) != 0,
(config & OMAP_DSS_LCD_IEO) != 0,
(config & OMAP_DSS_LCD_IPC) != 0,
@@ -2551,24 +2695,26 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
return 0;
}
-int dispc_set_clock_div(struct dispc_clock_info *cinfo)
+int dispc_set_clock_div(enum omap_channel channel,
+ struct dispc_clock_info *cinfo)
{
DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div);
DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div);
- dispc_set_lcd_divisor(cinfo->lck_div, cinfo->pck_div);
+ dispc_set_lcd_divisor(channel, cinfo->lck_div, cinfo->pck_div);
return 0;
}
-int dispc_get_clock_div(struct dispc_clock_info *cinfo)
+int dispc_get_clock_div(enum omap_channel channel,
+ struct dispc_clock_info *cinfo)
{
unsigned long fck;
fck = dispc_fclk_rate();
- cinfo->lck_div = REG_GET(DISPC_DIVISOR, 23, 16);
- cinfo->pck_div = REG_GET(DISPC_DIVISOR, 7, 0);
+ cinfo->lck_div = REG_GET(DISPC_DIVISOR(channel), 23, 16);
+ cinfo->pck_div = REG_GET(DISPC_DIVISOR(channel), 7, 0);
cinfo->lck = fck / cinfo->lck_div;
cinfo->pck = cinfo->lck / cinfo->pck_div;
@@ -2708,6 +2854,8 @@ static void print_irq_status(u32 status)
PIS(VID2_FIFO_UNDERFLOW);
PIS(SYNC_LOST);
PIS(SYNC_LOST_DIGIT);
+ if (dss_has_feature(FEAT_MGR_LCD2))
+ PIS(SYNC_LOST2);
#undef PIS
printk("\n");
@@ -2926,6 +3074,45 @@ static void dispc_error_worker(struct work_struct *work)
}
}
+ if (errors & DISPC_IRQ_SYNC_LOST2) {
+ struct omap_overlay_manager *manager = NULL;
+ bool enable = false;
+
+ DSSERR("SYNC_LOST for LCD2, disabling LCD2\n");
+
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+ struct omap_overlay_manager *mgr;
+ mgr = omap_dss_get_overlay_manager(i);
+
+ if (mgr->id == OMAP_DSS_CHANNEL_LCD2) {
+ manager = mgr;
+ enable = mgr->device->state ==
+ OMAP_DSS_DISPLAY_ACTIVE;
+ mgr->device->driver->disable(mgr->device);
+ break;
+ }
+ }
+
+ if (manager) {
+ struct omap_dss_device *dssdev = manager->device;
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ struct omap_overlay *ovl;
+ ovl = omap_dss_get_overlay(i);
+
+ if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+ continue;
+
+ if (ovl->id != 0 && ovl->manager == manager)
+ dispc_enable_plane(ovl->id, 0);
+ }
+
+ dispc_go(manager->id);
+ mdelay(50);
+ if (enable)
+ dssdev->driver->enable(dssdev);
+ }
+ }
+
if (errors & DISPC_IRQ_OCP_ERR) {
DSSERR("OCP_ERR\n");
for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
@@ -3033,6 +3220,8 @@ static void _omap_dispc_initialize_irq(void)
memset(dispc.registered_isr, 0, sizeof(dispc.registered_isr));
dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
+ if (dss_has_feature(FEAT_MGR_LCD2))
+ dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
/* there's SYNC_LOST_DIGIT waiting after enabling the DSS,
* so clear it */
@@ -3065,7 +3254,8 @@ static void _omap_dispc_initial_config(void)
dispc_write_reg(DISPC_SYSCONFIG, l);
/* FUNCGATED */
- REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
+ if (dss_has_feature(FEAT_FUNCGATED))
+ REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
/* L3 firewall setting: enable access to OCM RAM */
/* XXX this should be somewhere in plat-omap */
@@ -3139,17 +3329,18 @@ int dispc_setup_plane(enum omap_plane plane,
enum omap_color_mode color_mode,
bool ilace,
enum omap_dss_rotation_type rotation_type,
- u8 rotation, bool mirror, u8 global_alpha)
+ u8 rotation, bool mirror, u8 global_alpha,
+ u8 pre_mult_alpha, enum omap_channel channel)
{
int r = 0;
DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> "
- "%dx%d, ilace %d, cmode %x, rot %d, mir %d\n",
+ "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n",
plane, paddr, screen_width, pos_x, pos_y,
width, height,
out_width, out_height,
ilace, color_mode,
- rotation, mirror);
+ rotation, mirror, channel);
enable_clocks(1);
@@ -3161,7 +3352,8 @@ int dispc_setup_plane(enum omap_plane plane,
color_mode, ilace,
rotation_type,
rotation, mirror,
- global_alpha);
+ global_alpha,
+ pre_mult_alpha, channel);
enable_clocks(0);
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 960e977a8bf0..75fb0a515430 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -40,8 +40,9 @@ static struct {
} dpi;
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
-static int dpi_set_dsi_clk(bool is_tft, unsigned long pck_req,
- unsigned long *fck, int *lck_div, int *pck_div)
+static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft,
+ unsigned long pck_req, unsigned long *fck, int *lck_div,
+ int *pck_div)
{
struct dsi_clock_info dsi_cinfo;
struct dispc_clock_info dispc_cinfo;
@@ -58,7 +59,7 @@ static int dpi_set_dsi_clk(bool is_tft, unsigned long pck_req,
dss_select_dispc_clk_source(DSS_SRC_DSI1_PLL_FCLK);
- r = dispc_set_clock_div(&dispc_cinfo);
+ r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
if (r)
return r;
@@ -69,8 +70,9 @@ static int dpi_set_dsi_clk(bool is_tft, unsigned long pck_req,
return 0;
}
#else
-static int dpi_set_dispc_clk(bool is_tft, unsigned long pck_req,
- unsigned long *fck, int *lck_div, int *pck_div)
+static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft,
+ unsigned long pck_req, unsigned long *fck, int *lck_div,
+ int *pck_div)
{
struct dss_clock_info dss_cinfo;
struct dispc_clock_info dispc_cinfo;
@@ -84,7 +86,7 @@ static int dpi_set_dispc_clk(bool is_tft, unsigned long pck_req,
if (r)
return r;
- r = dispc_set_clock_div(&dispc_cinfo);
+ r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
if (r)
return r;
@@ -107,17 +109,17 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
- dispc_set_pol_freq(dssdev->panel.config, dssdev->panel.acbi,
- dssdev->panel.acb);
+ dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
+ dssdev->panel.acbi, dssdev->panel.acb);
is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
- r = dpi_set_dsi_clk(is_tft, t->pixel_clock * 1000,
- &fck, &lck_div, &pck_div);
+ r = dpi_set_dsi_clk(dssdev, is_tft, t->pixel_clock * 1000, &fck,
+ &lck_div, &pck_div);
#else
- r = dpi_set_dispc_clk(is_tft, t->pixel_clock * 1000,
- &fck, &lck_div, &pck_div);
+ r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000, &fck,
+ &lck_div, &pck_div);
#endif
if (r)
goto err0;
@@ -132,7 +134,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
t->pixel_clock = pck;
}
- dispc_set_lcd_timings(t);
+ dispc_set_lcd_timings(dssdev->manager->id, t);
err0:
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
@@ -145,10 +147,12 @@ static int dpi_basic_init(struct omap_dss_device *dssdev)
is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
- dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS);
- dispc_set_lcd_display_type(is_tft ? OMAP_DSS_LCD_DISPLAY_TFT :
- OMAP_DSS_LCD_DISPLAY_STN);
- dispc_set_tft_data_lines(dssdev->phy.dpi.data_lines);
+ dispc_set_parallel_interface_mode(dssdev->manager->id,
+ OMAP_DSS_PARALLELMODE_BYPASS);
+ dispc_set_lcd_display_type(dssdev->manager->id, is_tft ?
+ OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN);
+ dispc_set_tft_data_lines(dssdev->manager->id,
+ dssdev->phy.dpi.data_lines);
return 0;
}
@@ -234,7 +238,7 @@ void dpi_set_timings(struct omap_dss_device *dssdev,
dssdev->panel.timings = *timings;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
dpi_set_mode(dssdev);
- dispc_go(OMAP_DSS_CHANNEL_LCD);
+ dispc_go(dssdev->manager->id);
}
}
EXPORT_SYMBOL(dpi_set_timings);
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index aa4f7a5fae29..ddf3a0560822 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -792,7 +792,8 @@ static int dsi_pll_power(enum dsi_pll_power_state state)
}
/* calculate clock rates using dividers in cinfo */
-static int dsi_calc_clock_rates(struct dsi_clock_info *cinfo)
+static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
+ struct dsi_clock_info *cinfo)
{
if (cinfo->regn == 0 || cinfo->regn > REGN_MAX)
return -EINVAL;
@@ -812,7 +813,7 @@ static int dsi_calc_clock_rates(struct dsi_clock_info *cinfo)
* with DSS2_FCK source also */
cinfo->highfreq = 0;
} else {
- cinfo->clkin = dispc_pclk_rate();
+ cinfo->clkin = dispc_pclk_rate(dssdev->manager->id);
if (cinfo->clkin < 32000000)
cinfo->highfreq = 0;
@@ -1206,8 +1207,8 @@ void dsi_dump_clocks(struct seq_file *s)
seq_printf(s, "VP_CLK\t\t%lu\n"
"VP_PCLK\t\t%lu\n",
- dispc_lclk_rate(),
- dispc_pclk_rate());
+ dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD),
+ dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD));
enable_clocks(0);
}
@@ -2888,7 +2889,7 @@ int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
dss_setup_partial_planes(dssdev, x, y, w, h,
enlarge_update_area);
- dispc_set_lcd_size(*w, *h);
+ dispc_set_lcd_size(dssdev->manager->id, *w, *h);
}
return 0;
@@ -2947,12 +2948,14 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
return r;
}
- dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
+ dispc_set_lcd_display_type(dssdev->manager->id,
+ OMAP_DSS_LCD_DISPLAY_TFT);
- dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_DSI);
- dispc_enable_fifohandcheck(1);
+ dispc_set_parallel_interface_mode(dssdev->manager->id,
+ OMAP_DSS_PARALLELMODE_DSI);
+ dispc_enable_fifohandcheck(dssdev->manager->id, 1);
- dispc_set_tft_data_lines(dssdev->ctrl.pixel_size);
+ dispc_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size);
{
struct omap_video_timings timings = {
@@ -2964,7 +2967,7 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
.vbp = 0,
};
- dispc_set_lcd_timings(&timings);
+ dispc_set_lcd_timings(dssdev->manager->id, &timings);
}
return 0;
@@ -2987,7 +2990,7 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
cinfo.regm = dssdev->phy.dsi.div.regm;
cinfo.regm3 = dssdev->phy.dsi.div.regm3;
cinfo.regm4 = dssdev->phy.dsi.div.regm4;
- r = dsi_calc_clock_rates(&cinfo);
+ r = dsi_calc_clock_rates(dssdev, &cinfo);
if (r) {
DSSERR("Failed to calc dsi clocks\n");
return r;
@@ -3019,7 +3022,7 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
return r;
}
- r = dispc_set_clock_div(&dispc_cinfo);
+ r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
if (r) {
DSSERR("Failed to set dispc clocks\n");
return r;
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 5c7940d5f282..b394951120ac 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -333,9 +333,9 @@ void dispc_disable_sidle(void);
void dispc_lcd_enable_signal_polarity(bool act_high);
void dispc_lcd_enable_signal(bool enable);
void dispc_pck_free_enable(bool enable);
-void dispc_enable_fifohandcheck(bool enable);
+void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable);
-void dispc_set_lcd_size(u16 width, u16 height);
+void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
void dispc_set_digit_size(u16 width, u16 height);
u32 dispc_get_plane_fifo_size(enum omap_plane plane);
void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high);
@@ -359,7 +359,8 @@ int dispc_setup_plane(enum omap_plane plane,
bool ilace,
enum omap_dss_rotation_type rotation_type,
u8 rotation, bool mirror,
- u8 global_alpha);
+ u8 global_alpha, u8 pre_mult_alpha,
+ enum omap_channel channel);
bool dispc_go_busy(enum omap_channel channel);
void dispc_go(enum omap_channel channel);
@@ -368,9 +369,11 @@ bool dispc_is_channel_enabled(enum omap_channel channel);
int dispc_enable_plane(enum omap_plane plane, bool enable);
void dispc_enable_replication(enum omap_plane plane, bool enable);
-void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode);
-void dispc_set_tft_data_lines(u8 data_lines);
-void dispc_set_lcd_display_type(enum omap_lcd_display_type type);
+void dispc_set_parallel_interface_mode(enum omap_channel channel,
+ enum omap_parallel_interface_mode mode);
+void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
+void dispc_set_lcd_display_type(enum omap_channel channel,
+ enum omap_lcd_display_type type);
void dispc_set_loadmode(enum omap_dss_load_mode mode);
void dispc_set_default_color(enum omap_channel channel, u32 color);
@@ -387,17 +390,21 @@ bool dispc_trans_key_enabled(enum omap_channel ch);
bool dispc_alpha_blending_enabled(enum omap_channel ch);
bool dispc_lcd_timings_ok(struct omap_video_timings *timings);
-void dispc_set_lcd_timings(struct omap_video_timings *timings);
+void dispc_set_lcd_timings(enum omap_channel channel,
+ struct omap_video_timings *timings);
unsigned long dispc_fclk_rate(void);
-unsigned long dispc_lclk_rate(void);
-unsigned long dispc_pclk_rate(void);
-void dispc_set_pol_freq(enum omap_panel_config config, u8 acbi, u8 acb);
+unsigned long dispc_lclk_rate(enum omap_channel channel);
+unsigned long dispc_pclk_rate(enum omap_channel channel);
+void dispc_set_pol_freq(enum omap_channel channel,
+ enum omap_panel_config config, u8 acbi, u8 acb);
void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
struct dispc_clock_info *cinfo);
int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo);
-int dispc_set_clock_div(struct dispc_clock_info *cinfo);
-int dispc_get_clock_div(struct dispc_clock_info *cinfo);
+int dispc_set_clock_div(enum omap_channel channel,
+ struct dispc_clock_info *cinfo);
+int dispc_get_clock_div(enum omap_channel channel,
+ struct dispc_clock_info *cinfo);
/* VENC */
@@ -424,8 +431,8 @@ void rfbi_dump_regs(struct seq_file *s);
int rfbi_configure(int rfbi_module, int bpp, int lines);
void rfbi_enable_rfbi(bool enable);
-void rfbi_transfer_area(u16 width, u16 height,
- void (callback)(void *data), void *data);
+void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
+ u16 height, void (callback)(void *data), void *data);
void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
unsigned long rfbi_get_max_tx_rate(void);
int rfbi_init_display(struct omap_dss_device *display);
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index 867f68de125f..cf3ef696e141 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -82,6 +82,18 @@ static const enum omap_display_type omap3_dss_supported_displays[] = {
OMAP_DISPLAY_TYPE_VENC,
};
+static const enum omap_display_type omap4_dss_supported_displays[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI,
+
+ /* OMAP_DSS_CHANNEL_DIGIT */
+ OMAP_DISPLAY_TYPE_VENC,
+
+ /* OMAP_DSS_CHANNEL_LCD2 */
+ OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
+ OMAP_DISPLAY_TYPE_DSI,
+};
+
static const enum omap_color_mode omap2_dss_supported_color_modes[] = {
/* OMAP_DSS_GFX */
OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
@@ -127,6 +139,10 @@ static struct omap_dss_features omap2_dss_features = {
.reg_fields = omap2_dss_reg_fields,
.num_reg_fields = ARRAY_SIZE(omap2_dss_reg_fields),
+ .has_feature =
+ FEAT_LCDENABLEPOL | FEAT_LCDENABLESIGNAL |
+ FEAT_PCKFREEENABLE | FEAT_FUNCGATED,
+
.num_mgrs = 2,
.num_ovls = 3,
.supported_displays = omap2_dss_supported_displays,
@@ -134,11 +150,29 @@ static struct omap_dss_features omap2_dss_features = {
};
/* OMAP3 DSS Features */
-static struct omap_dss_features omap3_dss_features = {
+static struct omap_dss_features omap3430_dss_features = {
+ .reg_fields = omap3_dss_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
+
+ .has_feature =
+ FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL |
+ FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
+ FEAT_FUNCGATED,
+
+ .num_mgrs = 2,
+ .num_ovls = 3,
+ .supported_displays = omap3_dss_supported_displays,
+ .supported_color_modes = omap3_dss_supported_color_modes,
+};
+
+static struct omap_dss_features omap3630_dss_features = {
.reg_fields = omap3_dss_reg_fields,
.num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
- .has_feature = FEAT_GLOBAL_ALPHA,
+ .has_feature =
+ FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL |
+ FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
+ FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED,
.num_mgrs = 2,
.num_ovls = 3,
@@ -146,6 +180,21 @@ static struct omap_dss_features omap3_dss_features = {
.supported_color_modes = omap3_dss_supported_color_modes,
};
+/* OMAP4 DSS Features */
+static struct omap_dss_features omap4_dss_features = {
+ .reg_fields = omap3_dss_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
+
+ .has_feature =
+ FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA |
+ FEAT_MGR_LCD2,
+
+ .num_mgrs = 3,
+ .num_ovls = 3,
+ .supported_displays = omap4_dss_supported_displays,
+ .supported_color_modes = omap3_dss_supported_color_modes,
+};
+
/* Functions returning values related to a DSS feature */
int dss_feat_get_num_mgrs(void)
{
@@ -167,6 +216,13 @@ enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane)
return omap_current_dss_features->supported_color_modes[plane];
}
+bool dss_feat_color_mode_supported(enum omap_plane plane,
+ enum omap_color_mode color_mode)
+{
+ return omap_current_dss_features->supported_color_modes[plane] &
+ color_mode;
+}
+
/* DSS has_feature check */
bool dss_has_feature(enum dss_feat_id id)
{
@@ -186,6 +242,10 @@ void dss_features_init(void)
{
if (cpu_is_omap24xx())
omap_current_dss_features = &omap2_dss_features;
+ else if (cpu_is_omap3630())
+ omap_current_dss_features = &omap3630_dss_features;
+ else if (cpu_is_omap34xx())
+ omap_current_dss_features = &omap3430_dss_features;
else
- omap_current_dss_features = &omap3_dss_features;
+ omap_current_dss_features = &omap4_dss_features;
}
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index cb231eaa9b31..b9c70be92588 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -20,13 +20,19 @@
#ifndef __OMAP2_DSS_FEATURES_H
#define __OMAP2_DSS_FEATURES_H
-#define MAX_DSS_MANAGERS 2
+#define MAX_DSS_MANAGERS 3
#define MAX_DSS_OVERLAYS 3
/* DSS has feature id */
enum dss_feat_id {
FEAT_GLOBAL_ALPHA = 1 << 0,
FEAT_GLOBAL_ALPHA_VID1 = 1 << 1,
+ FEAT_PRE_MULT_ALPHA = 1 << 2,
+ FEAT_LCDENABLEPOL = 1 << 3,
+ FEAT_LCDENABLESIGNAL = 1 << 4,
+ FEAT_PCKFREEENABLE = 1 << 5,
+ FEAT_FUNCGATED = 1 << 6,
+ FEAT_MGR_LCD2 = 1 << 7,
};
/* DSS register field id */
@@ -43,6 +49,8 @@ int dss_feat_get_num_mgrs(void);
int dss_feat_get_num_ovls(void);
enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel);
enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
+bool dss_feat_color_mode_supported(enum omap_plane plane,
+ enum omap_color_mode color_mode);
bool dss_has_feature(enum dss_feat_id id);
void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 545e9b9a4d92..172d4e697309 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -406,6 +406,7 @@ struct overlay_cache_data {
u16 out_width; /* if 0, out_width == width */
u16 out_height; /* if 0, out_height == height */
u8 global_alpha;
+ u8 pre_mult_alpha;
enum omap_channel channel;
bool replication;
@@ -512,11 +513,14 @@ static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
unsigned long timeout = msecs_to_jiffies(500);
u32 irq;
- if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC)
+ if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) {
irq = DISPC_IRQ_EVSYNC_ODD;
- else
- irq = DISPC_IRQ_VSYNC;
-
+ } else {
+ if (mgr->id == OMAP_DSS_CHANNEL_LCD)
+ irq = DISPC_IRQ_VSYNC;
+ else
+ irq = DISPC_IRQ_VSYNC2;
+ }
return omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
}
@@ -524,7 +528,6 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
{
unsigned long timeout = msecs_to_jiffies(500);
struct manager_cache_data *mc;
- enum omap_channel channel;
u32 irq;
int r;
int i;
@@ -535,7 +538,6 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
- channel = OMAP_DSS_CHANNEL_DIGIT;
} else {
if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
enum omap_dss_update_mode mode;
@@ -543,11 +545,14 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
if (mode != OMAP_DSS_UPDATE_AUTO)
return 0;
- irq = DISPC_IRQ_FRAMEDONE;
+ irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+ DISPC_IRQ_FRAMEDONE
+ : DISPC_IRQ_FRAMEDONE2;
} else {
- irq = DISPC_IRQ_VSYNC;
+ irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+ DISPC_IRQ_VSYNC
+ : DISPC_IRQ_VSYNC2;
}
- channel = OMAP_DSS_CHANNEL_LCD;
}
mc = &dss_cache.manager_cache[mgr->id];
@@ -594,7 +599,6 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
{
unsigned long timeout = msecs_to_jiffies(500);
- enum omap_channel channel;
struct overlay_cache_data *oc;
struct omap_dss_device *dssdev;
u32 irq;
@@ -611,7 +615,6 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
- channel = OMAP_DSS_CHANNEL_DIGIT;
} else {
if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
enum omap_dss_update_mode mode;
@@ -619,11 +622,14 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
if (mode != OMAP_DSS_UPDATE_AUTO)
return 0;
- irq = DISPC_IRQ_FRAMEDONE;
+ irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+ DISPC_IRQ_FRAMEDONE
+ : DISPC_IRQ_FRAMEDONE2;
} else {
- irq = DISPC_IRQ_VSYNC;
+ irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+ DISPC_IRQ_VSYNC
+ : DISPC_IRQ_VSYNC2;
}
- channel = OMAP_DSS_CHANNEL_LCD;
}
oc = &dss_cache.overlay_cache[ovl->id];
@@ -842,7 +848,9 @@ static int configure_overlay(enum omap_plane plane)
c->rotation_type,
c->rotation,
c->mirror,
- c->global_alpha);
+ c->global_alpha,
+ c->pre_mult_alpha,
+ c->channel);
if (r) {
/* this shouldn't happen */
@@ -894,10 +902,10 @@ static int configure_dispc(void)
r = 0;
busy = false;
- mgr_busy[0] = dispc_go_busy(0);
- mgr_busy[1] = dispc_go_busy(1);
- mgr_go[0] = false;
- mgr_go[1] = false;
+ for (i = 0; i < num_mgrs; i++) {
+ mgr_busy[i] = dispc_go_busy(i);
+ mgr_go[i] = false;
+ }
/* Commit overlay settings */
for (i = 0; i < num_ovls; ++i) {
@@ -1156,9 +1164,10 @@ static void dss_apply_irq_handler(void *data, u32 mask)
const int num_mgrs = dss_feat_get_num_mgrs();
int i, r;
bool mgr_busy[MAX_DSS_MANAGERS];
+ u32 irq_mask;
- mgr_busy[0] = dispc_go_busy(0);
- mgr_busy[1] = dispc_go_busy(1);
+ for (i = 0; i < num_mgrs; i++)
+ mgr_busy[i] = dispc_go_busy(i);
spin_lock(&dss_cache.lock);
@@ -1179,8 +1188,8 @@ static void dss_apply_irq_handler(void *data, u32 mask)
goto end;
/* re-read busy flags */
- mgr_busy[0] = dispc_go_busy(0);
- mgr_busy[1] = dispc_go_busy(1);
+ for (i = 0; i < num_mgrs; i++)
+ mgr_busy[i] = dispc_go_busy(i);
/* keep running as long as there are busy managers, so that
* we can collect overlay-applied information */
@@ -1189,9 +1198,12 @@ static void dss_apply_irq_handler(void *data, u32 mask)
goto end;
}
- omap_dispc_unregister_isr(dss_apply_irq_handler, NULL,
- DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
- DISPC_IRQ_EVSYNC_EVEN);
+ irq_mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
+ DISPC_IRQ_EVSYNC_EVEN;
+ if (dss_has_feature(FEAT_MGR_LCD2))
+ irq_mask |= DISPC_IRQ_VSYNC2;
+
+ omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, irq_mask);
dss_cache.irq_enabled = false;
end:
@@ -1265,6 +1277,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
oc->out_width = ovl->info.out_width;
oc->out_height = ovl->info.out_height;
oc->global_alpha = ovl->info.global_alpha;
+ oc->pre_mult_alpha = ovl->info.pre_mult_alpha;
oc->replication =
dss_use_replication(dssdev, ovl->info.color_mode);
@@ -1383,9 +1396,14 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
r = 0;
dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
if (!dss_cache.irq_enabled) {
- r = omap_dispc_register_isr(dss_apply_irq_handler, NULL,
- DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
- DISPC_IRQ_EVSYNC_EVEN);
+ u32 mask;
+
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
+ DISPC_IRQ_EVSYNC_EVEN;
+ if (dss_has_feature(FEAT_MGR_LCD2))
+ mask |= DISPC_IRQ_VSYNC2;
+
+ r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
dss_cache.irq_enabled = true;
}
configure_dispc();
@@ -1477,6 +1495,10 @@ int dss_init_overlay_managers(struct platform_device *pdev)
mgr->name = "tv";
mgr->id = OMAP_DSS_CHANNEL_DIGIT;
break;
+ case 2:
+ mgr->name = "lcd2";
+ mgr->id = OMAP_DSS_CHANNEL_LCD2;
+ break;
}
mgr->set_device = &omap_dss_set_device;
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 75642c22cac7..456efef03c20 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -257,6 +257,43 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
return size;
}
+static ssize_t overlay_pre_mult_alpha_show(struct omap_overlay *ovl,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ ovl->info.pre_mult_alpha);
+}
+
+static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl,
+ const char *buf, size_t size)
+{
+ int r;
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
+ /* only GFX and Video2 plane support pre alpha multiplied
+ * set zero for Video1 plane
+ */
+ if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
+ ovl->id == OMAP_DSS_VIDEO1)
+ info.pre_mult_alpha = 0;
+ else
+ info.pre_mult_alpha = simple_strtoul(buf, NULL, 10);
+
+ r = ovl->set_overlay_info(ovl, &info);
+ if (r)
+ return r;
+
+ if (ovl->manager) {
+ r = ovl->manager->apply(ovl->manager);
+ if (r)
+ return r;
+ }
+
+ return size;
+}
+
struct overlay_attribute {
struct attribute attr;
ssize_t (*show)(struct omap_overlay *, char *);
@@ -280,6 +317,9 @@ static OVERLAY_ATTR(enabled, S_IRUGO|S_IWUSR,
overlay_enabled_show, overlay_enabled_store);
static OVERLAY_ATTR(global_alpha, S_IRUGO|S_IWUSR,
overlay_global_alpha_show, overlay_global_alpha_store);
+static OVERLAY_ATTR(pre_mult_alpha, S_IRUGO|S_IWUSR,
+ overlay_pre_mult_alpha_show,
+ overlay_pre_mult_alpha_store);
static struct attribute *overlay_sysfs_attrs[] = {
&overlay_attr_name.attr,
@@ -290,6 +330,7 @@ static struct attribute *overlay_sysfs_attrs[] = {
&overlay_attr_output_size.attr,
&overlay_attr_enabled.attr,
&overlay_attr_global_alpha.attr,
+ &overlay_attr_pre_mult_alpha.attr,
NULL
};
@@ -623,12 +664,22 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
int i;
struct omap_overlay_manager *lcd_mgr;
struct omap_overlay_manager *tv_mgr;
+ struct omap_overlay_manager *lcd2_mgr = NULL;
struct omap_overlay_manager *mgr = NULL;
lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD);
tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_TV);
-
- if (dssdev->type != OMAP_DISPLAY_TYPE_VENC) {
+ if (dss_has_feature(FEAT_MGR_LCD2))
+ lcd2_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD2);
+
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) {
+ if (!lcd2_mgr->device || force) {
+ if (lcd2_mgr->device)
+ lcd2_mgr->unset_device(lcd2_mgr);
+ lcd2_mgr->set_device(lcd2_mgr, dssdev);
+ mgr = lcd2_mgr;
+ }
+ } else if (dssdev->type != OMAP_DISPLAY_TYPE_VENC) {
if (!lcd_mgr->device || force) {
if (lcd_mgr->device)
lcd_mgr->unset_device(lcd_mgr);
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index bbe62464e92d..10a2ffe02882 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -301,8 +301,8 @@ void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
}
EXPORT_SYMBOL(omap_rfbi_write_pixels);
-void rfbi_transfer_area(u16 width, u16 height,
- void (callback)(void *data), void *data)
+void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
+ u16 height, void (*callback)(void *data), void *data)
{
u32 l;
@@ -311,9 +311,9 @@ void rfbi_transfer_area(u16 width, u16 height,
DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
- dispc_set_lcd_size(width, height);
+ dispc_set_lcd_size(dssdev->manager->id, width, height);
- dispc_enable_channel(OMAP_DSS_CHANNEL_LCD, true);
+ dispc_enable_channel(dssdev->manager->id, true);
rfbi.framedone_callback = callback;
rfbi.framedone_callback_data = data;
@@ -887,7 +887,7 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
dss_setup_partial_planes(dssdev, x, y, w, h, true);
- dispc_set_lcd_size(*w, *h);
+ dispc_set_lcd_size(dssdev->manager->id, *w, *h);
}
return 0;
@@ -899,7 +899,7 @@ int omap_rfbi_update(struct omap_dss_device *dssdev,
void (*callback)(void *), void *data)
{
if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
- rfbi_transfer_area(w, h, callback, data);
+ rfbi_transfer_area(dssdev, w, h, callback, data);
} else {
struct omap_overlay *ovl;
void __iomem *addr;
@@ -1018,11 +1018,13 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
goto err1;
}
- dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
+ dispc_set_lcd_display_type(dssdev->manager->id,
+ OMAP_DSS_LCD_DISPLAY_TFT);
- dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_RFBI);
+ dispc_set_parallel_interface_mode(dssdev->manager->id,
+ OMAP_DSS_PARALLELMODE_RFBI);
- dispc_set_tft_data_lines(dssdev->ctrl.pixel_size);
+ dispc_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size);
rfbi_configure(dssdev->phy.rfbi.channel,
dssdev->ctrl.pixel_size,
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index ee07a3cc22ef..b64adf7dfc88 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -35,12 +35,16 @@ static struct {
struct regulator *vdds_sdi_reg;
} sdi;
-static void sdi_basic_init(void)
+static void sdi_basic_init(struct omap_dss_device *dssdev)
+
{
- dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS);
+ dispc_set_parallel_interface_mode(dssdev->manager->id,
+ OMAP_DSS_PARALLELMODE_BYPASS);
+
+ dispc_set_lcd_display_type(dssdev->manager->id,
+ OMAP_DSS_LCD_DISPLAY_TFT);
- dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
- dispc_set_tft_data_lines(24);
+ dispc_set_tft_data_lines(dssdev->manager->id, 24);
dispc_lcd_enable_signal_polarity(1);
}
@@ -68,20 +72,20 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
if (!sdi.skip_init)
dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
- sdi_basic_init();
+ sdi_basic_init(dssdev);
/* 15.5.9.1.2 */
dssdev->panel.config |= OMAP_DSS_LCD_RF | OMAP_DSS_LCD_ONOFF;
- dispc_set_pol_freq(dssdev->panel.config, dssdev->panel.acbi,
- dssdev->panel.acb);
+ dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
+ dssdev->panel.acbi, dssdev->panel.acb);
if (!sdi.skip_init) {
r = dss_calc_clock_div(1, t->pixel_clock * 1000,
&dss_cinfo, &dispc_cinfo);
} else {
r = dss_get_clock_div(&dss_cinfo);
- r = dispc_get_clock_div(&dispc_cinfo);
+ r = dispc_get_clock_div(dssdev->manager->id, &dispc_cinfo);
}
if (r)
@@ -102,13 +106,13 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
}
- dispc_set_lcd_timings(t);
+ dispc_set_lcd_timings(dssdev->manager->id, t);
r = dss_set_clock_div(&dss_cinfo);
if (r)
goto err2;
- r = dispc_set_clock_div(&dispc_cinfo);
+ r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
if (r)
goto err2;
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 6a704f176c22..4fdab8e9c496 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -2132,8 +2132,9 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev)
char *str, *options, *this_opt;
int r = 0;
- str = kmalloc(strlen(def_mode) + 1, GFP_KERNEL);
- strcpy(str, def_mode);
+ str = kstrdup(def_mode, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
options = str;
while (!r && (this_opt = strsep(&options, ",")) != NULL) {
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 9c0144ee7ae5..65560a1a0439 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -513,9 +513,9 @@ static int ps3fb_release(struct fb_info *info, int user)
if (atomic_dec_and_test(&ps3fb.f_count)) {
if (atomic_read(&ps3fb.ext_flip)) {
atomic_set(&ps3fb.ext_flip, 0);
- if (!try_acquire_console_sem()) {
+ if (console_trylock()) {
ps3fb_sync(info, 0); /* single buffer */
- release_console_sem();
+ console_unlock();
}
}
}
@@ -830,14 +830,14 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
if (vmode) {
var = info->var;
fb_videomode_to_var(&var, vmode);
- acquire_console_sem();
+ console_lock();
info->flags |= FBINFO_MISC_USEREVENT;
/* Force, in case only special bits changed */
var.activate |= FB_ACTIVATE_FORCE;
par->new_mode_id = val;
retval = fb_set_var(info, &var);
info->flags &= ~FBINFO_MISC_USEREVENT;
- release_console_sem();
+ console_unlock();
}
break;
}
@@ -881,9 +881,9 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
break;
dev_dbg(info->device, "PS3FB_IOCTL_FSEL:%d\n", val);
- acquire_console_sem();
+ console_lock();
retval = ps3fb_sync(info, val);
- release_console_sem();
+ console_unlock();
break;
default:
@@ -903,9 +903,9 @@ static int ps3fbd(void *arg)
set_current_state(TASK_INTERRUPTIBLE);
if (ps3fb.is_kicked) {
ps3fb.is_kicked = 0;
- acquire_console_sem();
+ console_lock();
ps3fb_sync(info, 0); /* single buffer */
- release_console_sem();
+ console_unlock();
}
schedule();
}
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index cea6403ae71c..35f61dd0cb3a 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -701,16 +701,12 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
*/
pxa168fb_init_mode(info, mi);
- ret = pxa168fb_check_var(&info->var, info);
- if (ret)
- goto failed_free_fbmem;
-
/*
* Fill in sane defaults.
*/
ret = pxa168fb_check_var(&info->var, info);
if (ret)
- goto failed;
+ goto failed_free_fbmem;
/*
* enable controller clock
diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/pxa3xx-gcu.c
index b81168df253d..cf4beb9dc9bb 100644
--- a/drivers/video/pxa3xx-gcu.c
+++ b/drivers/video/pxa3xx-gcu.c
@@ -1,5 +1,5 @@
/*
- * pxa3xx-gc.c - Linux kernel module for PXA3xx graphics controllers
+ * pxa3xx-gcu.c - Linux kernel module for PXA3xx graphics controllers
*
* This driver needs a DirectFB counterpart in user space, communication
* is handled via mmap()ed memory areas and an ioctl.
@@ -421,7 +421,7 @@ pxa3xx_gcu_misc_write(struct file *filp, const char *buff,
buffer->next = priv->free;
priv->free = buffer;
spin_unlock_irqrestore(&priv->spinlock, flags);
- return ret;
+ return -EFAULT;
}
buffer->length = words;
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 618f36bec10d..da388186d617 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
return bd->props.brightness;
}
-static struct backlight_ops riva_bl_ops = {
+static const struct backlight_ops riva_bl_ops = {
.get_brightness = riva_bl_get_brightness,
.update_status = riva_bl_update_status,
};
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 46b430978bcc..61c819e35f7f 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
@@ -918,9 +919,9 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
}
info->clk = clk_get(NULL, "lcd");
- if (!info->clk || IS_ERR(info->clk)) {
+ if (IS_ERR(info->clk)) {
printk(KERN_ERR "failed to get lcd clock source\n");
- ret = -ENOENT;
+ ret = PTR_ERR(info->clk);
goto release_irq;
}
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index dce8c97b4333..75738a928610 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -22,7 +22,7 @@
#include <linux/svga.h>
#include <linux/init.h>
#include <linux/pci.h>
-#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
+#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
#include <video/vga.h>
#ifdef CONFIG_MTRR
@@ -1113,12 +1113,12 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state)
dev_info(info->device, "suspend\n");
- acquire_console_sem();
+ console_lock();
mutex_lock(&(par->open_lock));
if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
mutex_unlock(&(par->open_lock));
- release_console_sem();
+ console_unlock();
return 0;
}
@@ -1129,7 +1129,7 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state)
pci_set_power_state(dev, pci_choose_state(dev, state));
mutex_unlock(&(par->open_lock));
- release_console_sem();
+ console_unlock();
return 0;
}
@@ -1145,12 +1145,12 @@ static int s3_pci_resume(struct pci_dev* dev)
dev_info(info->device, "resume\n");
- acquire_console_sem();
+ console_lock();
mutex_lock(&(par->open_lock));
if (par->ref_count == 0) {
mutex_unlock(&(par->open_lock));
- release_console_sem();
+ console_unlock();
return 0;
}
@@ -1159,7 +1159,7 @@ static int s3_pci_resume(struct pci_dev* dev)
err = pci_enable_device(dev);
if (err) {
mutex_unlock(&(par->open_lock));
- release_console_sem();
+ console_unlock();
dev_err(info->device, "error %d enabling device for resume\n", err);
return err;
}
@@ -1169,7 +1169,7 @@ static int s3_pci_resume(struct pci_dev* dev)
fb_set_suspend(info, 0);
mutex_unlock(&(par->open_lock));
- release_console_sem();
+ console_unlock();
return 0;
}
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 842d157e1025..487911e2926c 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -2373,7 +2373,7 @@ static int savagefb_suspend(struct pci_dev *dev, pm_message_t mesg)
if (mesg.event == PM_EVENT_FREEZE)
return 0;
- acquire_console_sem();
+ console_lock();
fb_set_suspend(info, 1);
if (info->fbops->fb_sync)
@@ -2385,7 +2385,7 @@ static int savagefb_suspend(struct pci_dev *dev, pm_message_t mesg)
pci_save_state(dev);
pci_disable_device(dev);
pci_set_power_state(dev, pci_choose_state(dev, mesg));
- release_console_sem();
+ console_unlock();
return 0;
}
@@ -2409,7 +2409,7 @@ static int savagefb_resume(struct pci_dev* dev)
return 0;
}
- acquire_console_sem();
+ console_lock();
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
@@ -2423,7 +2423,7 @@ static int savagefb_resume(struct pci_dev* dev)
savagefb_set_par(info);
fb_set_suspend(info, 0);
savagefb_blank(FB_BLANK_UNBLANK, info);
- release_console_sem();
+ console_unlock();
return 0;
}
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index 8c59cc8c5a9c..2b9e56a6bde4 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/initval.h>
@@ -221,6 +222,7 @@ struct sh_hdmi {
struct delayed_work edid_work;
struct fb_var_screeninfo var;
struct fb_monspecs monspec;
+ struct notifier_block notifier;
};
static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg)
@@ -737,7 +739,7 @@ static int sh_hdmi_read_edid(struct sh_hdmi *hdmi, unsigned long *hdmi_rate,
struct fb_modelist *modelist = NULL;
unsigned int f_width = 0, f_height = 0, f_refresh = 0;
unsigned long found_rate_error = ULONG_MAX; /* silly compiler... */
- bool exact_match = false;
+ bool scanning = false, preferred_bad = false;
u8 edid[128];
char *forced;
int i;
@@ -800,6 +802,9 @@ static int sh_hdmi_read_edid(struct sh_hdmi *hdmi, unsigned long *hdmi_rate,
if (i < 2) {
f_width = 0;
f_height = 0;
+ } else {
+ /* The user wants us to use the EDID data */
+ scanning = true;
}
dev_dbg(hdmi->dev, "Forced mode %ux%u@%uHz\n",
f_width, f_height, f_refresh);
@@ -807,37 +812,56 @@ static int sh_hdmi_read_edid(struct sh_hdmi *hdmi, unsigned long *hdmi_rate,
/* Walk monitor modes to find the best or the exact match */
for (i = 0, mode = hdmi->monspec.modedb;
- f_width && f_height && i < hdmi->monspec.modedb_len && !exact_match;
+ i < hdmi->monspec.modedb_len && scanning;
i++, mode++) {
unsigned long rate_error;
- /* No interest in unmatching modes */
- if (f_width != mode->xres || f_height != mode->yres)
+ if (!f_width && !f_height) {
+ /*
+ * A parameter string "video=sh_mobile_lcdc:0x0" means
+ * use the preferred EDID mode. If it is rejected by
+ * .fb_check_var(), keep looking, until an acceptable
+ * one is found.
+ */
+ if ((mode->flag & FB_MODE_IS_FIRST) || preferred_bad)
+ scanning = false;
+ else
+ continue;
+ } else if (f_width != mode->xres || f_height != mode->yres) {
+ /* No interest in unmatching modes */
continue;
+ }
rate_error = sh_hdmi_rate_error(hdmi, mode, hdmi_rate, parent_rate);
- if (f_refresh == mode->refresh || (!f_refresh && !rate_error))
- /*
- * Exact match if either the refresh rate matches or it
- * hasn't been specified and we've found a mode, for
- * which we can configure the clock precisely
- */
- exact_match = true;
- else if (found && found_rate_error <= rate_error)
- /*
- * We otherwise search for the closest matching clock
- * rate - either if no refresh rate has been specified
- * or we cannot find an exactly matching one
- */
- continue;
+ if (scanning) {
+ if (f_refresh == mode->refresh || (!f_refresh && !rate_error))
+ /*
+ * Exact match if either the refresh rate
+ * matches or it hasn't been specified and we've
+ * found a mode, for which we can configure the
+ * clock precisely
+ */
+ scanning = false;
+ else if (found && found_rate_error <= rate_error)
+ /*
+ * We otherwise search for the closest matching
+ * clock rate - either if no refresh rate has
+ * been specified or we cannot find an exactly
+ * matching one
+ */
+ continue;
+ }
/* Check if supported: sufficient fb memory, supported clock-rate */
fb_videomode_to_var(var, mode);
+ var->bits_per_pixel = info->var.bits_per_pixel;
+
if (info && info->fbops->fb_check_var &&
info->fbops->fb_check_var(var, info)) {
- exact_match = false;
+ scanning = true;
+ preferred_bad = true;
continue;
}
@@ -855,9 +879,9 @@ static int sh_hdmi_read_edid(struct sh_hdmi *hdmi, unsigned long *hdmi_rate,
* driver, and passing ->info with HDMI platform data.
*/
if (info && !found) {
- modelist = hdmi->info->modelist.next &&
- !list_empty(&hdmi->info->modelist) ?
- list_entry(hdmi->info->modelist.next,
+ modelist = info->modelist.next &&
+ !list_empty(&info->modelist) ?
+ list_entry(info->modelist.next,
struct fb_modelist, list) :
NULL;
@@ -1100,6 +1124,7 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
mutex_lock(&hdmi->mutex);
if (hdmi->hp_state == HDMI_HOTPLUG_CONNECTED) {
+ struct fb_info *info = hdmi->info;
unsigned long parent_rate = 0, hdmi_rate;
/* A device has been plugged in */
@@ -1121,22 +1146,21 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
/* Switched to another (d) power-save mode */
msleep(10);
- if (!hdmi->info)
+ if (!info)
goto out;
- ch = hdmi->info->par;
+ ch = info->par;
- acquire_console_sem();
+ console_lock();
/* HDMI plug in */
if (!sh_hdmi_must_reconfigure(hdmi) &&
- hdmi->info->state == FBINFO_STATE_RUNNING) {
+ info->state == FBINFO_STATE_RUNNING) {
/*
* First activation with the default monitor - just turn
* on, if we run a resume here, the logo disappears
*/
- if (lock_fb_info(hdmi->info)) {
- struct fb_info *info = hdmi->info;
+ if (lock_fb_info(info)) {
info->var.width = hdmi->var.width;
info->var.height = hdmi->var.height;
sh_hdmi_display_on(hdmi, info);
@@ -1144,10 +1168,10 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
}
} else {
/* New monitor or have to wake up */
- fb_set_suspend(hdmi->info, 0);
+ fb_set_suspend(info, 0);
}
- release_console_sem();
+ console_unlock();
} else {
ret = 0;
if (!hdmi->info)
@@ -1157,12 +1181,12 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
fb_destroy_modedb(hdmi->monspec.modedb);
hdmi->monspec.modedb = NULL;
- acquire_console_sem();
+ console_lock();
/* HDMI disconnect */
fb_set_suspend(hdmi->info, 1);
- release_console_sem();
+ console_unlock();
pm_runtime_put(hdmi->dev);
}
@@ -1175,13 +1199,6 @@ out:
}
static int sh_hdmi_notify(struct notifier_block *nb,
- unsigned long action, void *data);
-
-static struct notifier_block sh_hdmi_notifier = {
- .notifier_call = sh_hdmi_notify,
-};
-
-static int sh_hdmi_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct fb_event *event = data;
@@ -1190,7 +1207,7 @@ static int sh_hdmi_notify(struct notifier_block *nb,
struct sh_mobile_lcdc_board_cfg *board_cfg = &ch->cfg.board_cfg;
struct sh_hdmi *hdmi = board_cfg->board_data;
- if (nb != &sh_hdmi_notifier || !hdmi || hdmi->info != info)
+ if (!hdmi || nb != &hdmi->notifier || hdmi->info != info)
return NOTIFY_DONE;
switch(action) {
@@ -1209,11 +1226,11 @@ static int sh_hdmi_notify(struct notifier_block *nb,
* temporarily, synchronise with the work queue and re-acquire
* the info->lock.
*/
- unlock_fb_info(hdmi->info);
+ unlock_fb_info(info);
mutex_lock(&hdmi->mutex);
hdmi->info = NULL;
mutex_unlock(&hdmi->mutex);
- lock_fb_info(hdmi->info);
+ lock_fb_info(info);
return NOTIFY_OK;
}
return NOTIFY_DONE;
@@ -1311,6 +1328,9 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
goto ecodec;
}
+ hdmi->notifier.notifier_call = sh_hdmi_notify;
+ fb_register_client(&hdmi->notifier);
+
return 0;
ecodec:
@@ -1341,6 +1361,8 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev)
snd_soc_unregister_codec(&pdev->dev);
+ fb_unregister_client(&hdmi->notifier);
+
board_cfg->display_on = NULL;
board_cfg->display_off = NULL;
board_cfg->board_data = NULL;
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index bd4840a8a6b7..bf12e53aed5c 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -912,9 +912,9 @@ static int sh_mobile_release(struct fb_info *info, int user)
/* Nothing to reconfigure, when called from fbcon */
if (user) {
- acquire_console_sem();
+ console_lock();
sh_mobile_fb_reconfig(info);
- release_console_sem();
+ console_unlock();
}
mutex_unlock(&ch->open_lock);
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index b7dc1800efa9..bcb44a594ebc 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -2010,9 +2010,9 @@ static int sm501fb_suspend_fb(struct sm501fb_info *info,
/* tell console/fb driver we are suspending */
- acquire_console_sem();
+ console_lock();
fb_set_suspend(fbi, 1);
- release_console_sem();
+ console_unlock();
/* backup copies in case chip is powered down over suspend */
@@ -2069,9 +2069,9 @@ static void sm501fb_resume_fb(struct sm501fb_info *info,
memcpy_toio(par->cursor.k_addr, par->store_cursor,
par->cursor.size);
- acquire_console_sem();
+ console_lock();
fb_set_suspend(fbi, 0);
- release_console_sem();
+ console_unlock();
vfree(par->store_fb);
vfree(par->store_cursor);
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index dee64c3b1e67..2ab704118c44 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -536,7 +536,7 @@ static int sstfb_set_par(struct fb_info *info)
fbiinit2 = sst_read(FBIINIT2);
fbiinit3 = sst_read(FBIINIT3);
- /* everything is reset. we enable fbiinit2/3 remap : dac acces ok */
+ /* everything is reset. we enable fbiinit2/3 remap : dac access ok */
pci_write_config_dword(sst_dev, PCI_INIT_ENABLE,
PCI_EN_INIT_WR | PCI_REMAP_DAC );
diff --git a/drivers/video/tmiofb.c b/drivers/video/tmiofb.c
index 6913fe168c25..dfef88c803d4 100644
--- a/drivers/video/tmiofb.c
+++ b/drivers/video/tmiofb.c
@@ -25,7 +25,7 @@
#include <linux/fb.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-/* Why should fb driver call console functions? because acquire_console_sem() */
+/* Why should fb driver call console functions? because console_lock() */
#include <linux/console.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
@@ -944,7 +944,7 @@ static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
struct mfd_cell *cell = dev->dev.platform_data;
int retval = 0;
- acquire_console_sem();
+ console_lock();
fb_set_suspend(info, 1);
@@ -965,7 +965,7 @@ static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
if (cell->suspend)
retval = cell->suspend(dev);
- release_console_sem();
+ console_unlock();
return retval;
}
@@ -976,7 +976,7 @@ static int tmiofb_resume(struct platform_device *dev)
struct mfd_cell *cell = dev->dev.platform_data;
int retval = 0;
- acquire_console_sem();
+ console_lock();
if (cell->resume) {
retval = cell->resume(dev);
@@ -992,7 +992,7 @@ static int tmiofb_resume(struct platform_device *dev)
fb_set_suspend(info, 0);
out:
- release_console_sem();
+ console_unlock();
return retval;
}
#else
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 020589a6bf02..2c8364e9b632 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -1128,14 +1128,13 @@ static int dlfb_realloc_framebuffer(struct dlfb_data *dev, struct fb_info *info)
* But with imperfect damage info we may send pixels over USB
* that were, in fact, unchanged - wasting limited USB bandwidth
*/
- new_back = vmalloc(new_len);
+ new_back = vzalloc(new_len);
if (!new_back)
- pr_info("No shadow/backing buffer allcoated\n");
+ pr_info("No shadow/backing buffer allocated\n");
else {
if (dev->backing_buffer)
vfree(dev->backing_buffer);
dev->backing_buffer = new_back;
- memset(dev->backing_buffer, 0, new_len);
}
}
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 289edd519527..4e66349e4366 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -1674,17 +1674,17 @@ static int parse_mode(const char *str, u32 *xres, u32 *yres)
#ifdef CONFIG_PM
static int viafb_suspend(void *unused)
{
- acquire_console_sem();
+ console_lock();
fb_set_suspend(viafbinfo, 1);
viafb_sync(viafbinfo);
- release_console_sem();
+ console_unlock();
return 0;
}
static int viafb_resume(void *unused)
{
- acquire_console_sem();
+ console_lock();
if (viaparinfo->shared->vdev->engine_mmio)
viafb_reset_engine(viaparinfo);
viafb_set_par(viafbinfo);
@@ -1692,7 +1692,7 @@ static int viafb_resume(void *unused)
viafb_set_par(viafbinfo1);
fb_set_suspend(viafbinfo, 0);
- release_console_sem();
+ console_unlock();
return 0;
}
diff --git a/drivers/video/vt8500lcdfb.c b/drivers/video/vt8500lcdfb.c
index 7617f12e4fd7..0e120d67eb65 100644
--- a/drivers/video/vt8500lcdfb.c
+++ b/drivers/video/vt8500lcdfb.c
@@ -215,6 +215,33 @@ static int vt8500lcd_pan_display(struct fb_var_screeninfo *var,
return 0;
}
+/*
+ * vt8500lcd_blank():
+ * Blank the display by setting all palette values to zero. Note,
+ * True Color modes do not really use the palette, so this will not
+ * blank the display in all modes.
+ */
+static int vt8500lcd_blank(int blank, struct fb_info *info)
+{
+ int i;
+
+ switch (blank) {
+ case FB_BLANK_POWERDOWN:
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_NORMAL:
+ if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR ||
+ info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
+ for (i = 0; i < 256; i++)
+ vt8500lcd_setcolreg(i, 0, 0, 0, 0, info);
+ case FB_BLANK_UNBLANK:
+ if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR ||
+ info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
+ fb_set_cmap(&info->cmap, info);
+ }
+ return 0;
+}
+
static struct fb_ops vt8500lcd_ops = {
.owner = THIS_MODULE,
.fb_set_par = vt8500lcd_set_par,
@@ -225,6 +252,7 @@ static struct fb_ops vt8500lcd_ops = {
.fb_sync = wmt_ge_sync,
.fb_ioctl = vt8500lcd_ioctl,
.fb_pan_display = vt8500lcd_pan_display,
+ .fb_blank = vt8500lcd_blank,
};
static irqreturn_t vt8500lcd_handle_irq(int irq, void *dev_id)
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index 85d76ec4c63e..a2965ab92cfb 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -23,7 +23,7 @@
#include <linux/svga.h>
#include <linux/init.h>
#include <linux/pci.h>
-#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
+#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
#include <video/vga.h>
#ifdef CONFIG_MTRR
@@ -819,12 +819,12 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
dev_info(info->device, "suspend\n");
- acquire_console_sem();
+ console_lock();
mutex_lock(&(par->open_lock));
if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
mutex_unlock(&(par->open_lock));
- release_console_sem();
+ console_unlock();
return 0;
}
@@ -835,7 +835,7 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
pci_set_power_state(dev, pci_choose_state(dev, state));
mutex_unlock(&(par->open_lock));
- release_console_sem();
+ console_unlock();
return 0;
}
@@ -850,7 +850,7 @@ static int vt8623_pci_resume(struct pci_dev* dev)
dev_info(info->device, "resume\n");
- acquire_console_sem();
+ console_lock();
mutex_lock(&(par->open_lock));
if (par->ref_count == 0)
@@ -869,7 +869,7 @@ static int vt8623_pci_resume(struct pci_dev* dev)
fail:
mutex_unlock(&(par->open_lock));
- release_console_sem();
+ console_unlock();
return 0;
}
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 4abb0b9ed653..a20218c2fda8 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -491,12 +491,12 @@ xenfb_make_preferred_console(void)
if (console_set_on_cmdline)
return;
- acquire_console_sem();
+ console_lock();
for_each_console(c) {
if (!strcmp(c->name, "tty") && c->index == 0)
break;
}
- release_console_sem();
+ console_unlock();
if (c) {
unregister_console(c);
c->flags |= CON_CONSDEV;
@@ -562,26 +562,24 @@ static void xenfb_init_shared_page(struct xenfb_info *info,
static int xenfb_connect_backend(struct xenbus_device *dev,
struct xenfb_info *info)
{
- int ret, evtchn;
+ int ret, evtchn, irq;
struct xenbus_transaction xbt;
ret = xenbus_alloc_evtchn(dev, &evtchn);
if (ret)
return ret;
- ret = bind_evtchn_to_irqhandler(evtchn, xenfb_event_handler,
+ irq = bind_evtchn_to_irqhandler(evtchn, xenfb_event_handler,
0, dev->devicetype, info);
- if (ret < 0) {
+ if (irq < 0) {
xenbus_free_evtchn(dev, evtchn);
xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler");
- return ret;
+ return irq;
}
- info->irq = ret;
-
again:
ret = xenbus_transaction_start(&xbt);
if (ret) {
xenbus_dev_fatal(dev, ret, "starting transaction");
- return ret;
+ goto unbind_irq;
}
ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
virt_to_mfn(info->page));
@@ -603,20 +601,25 @@ static int xenfb_connect_backend(struct xenbus_device *dev,
if (ret == -EAGAIN)
goto again;
xenbus_dev_fatal(dev, ret, "completing transaction");
- return ret;
+ goto unbind_irq;
}
xenbus_switch_state(dev, XenbusStateInitialised);
+ info->irq = irq;
return 0;
error_xenbus:
xenbus_transaction_end(xbt, 1);
xenbus_dev_fatal(dev, ret, "writing xenstore");
+ unbind_irq:
+ unbind_from_irqhandler(irq, info);
return ret;
}
static void xenfb_disconnect_backend(struct xenfb_info *info)
{
+ /* Prevent xenfb refresh */
+ info->update_wanted = 0;
if (info->irq >= 0)
unbind_from_irqhandler(info->irq, info);
info->irq = -1;
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index ef8d9d558fc7..4fb5b2bf2348 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -96,11 +96,6 @@ static struct pci_device_id virtio_pci_id_table[] = {
MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
-/* A PCI device has it's own struct device and so does a virtio device so
- * we create a place for the virtio devices to show up in sysfs. I think it
- * would make more sense for virtio to not insist on having it's own device. */
-static struct device *virtio_pci_root;
-
/* Convert a generic virtio device to our structure */
static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
{
@@ -629,7 +624,7 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
if (vp_dev == NULL)
return -ENOMEM;
- vp_dev->vdev.dev.parent = virtio_pci_root;
+ vp_dev->vdev.dev.parent = &pci_dev->dev;
vp_dev->vdev.dev.release = virtio_pci_release_dev;
vp_dev->vdev.config = &virtio_pci_config_ops;
vp_dev->pci_dev = pci_dev;
@@ -717,17 +712,7 @@ static struct pci_driver virtio_pci_driver = {
static int __init virtio_pci_init(void)
{
- int err;
-
- virtio_pci_root = root_device_register("virtio-pci");
- if (IS_ERR(virtio_pci_root))
- return PTR_ERR(virtio_pci_root);
-
- err = pci_register_driver(&virtio_pci_driver);
- if (err)
- root_device_unregister(virtio_pci_root);
-
- return err;
+ return pci_register_driver(&virtio_pci_driver);
}
module_init(virtio_pci_init);
@@ -735,7 +720,6 @@ module_init(virtio_pci_init);
static void __exit virtio_pci_exit(void)
{
pci_unregister_driver(&virtio_pci_driver);
- root_device_unregister(virtio_pci_root);
}
module_exit(virtio_pci_exit);
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 3a7e9ff8a746..38e96ab90945 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -593,19 +593,17 @@ static int __devinit omap_hdq_probe(struct platform_device *pdev)
/* get interface & functional clock objects */
hdq_data->hdq_ick = clk_get(&pdev->dev, "ick");
- hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
+ if (IS_ERR(hdq_data->hdq_ick)) {
+ dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n");
+ ret = PTR_ERR(hdq_data->hdq_ick);
+ goto err_ick;
+ }
- if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) {
- dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n");
- if (IS_ERR(hdq_data->hdq_ick)) {
- ret = PTR_ERR(hdq_data->hdq_ick);
- goto err_clk;
- }
- if (IS_ERR(hdq_data->hdq_fck)) {
- ret = PTR_ERR(hdq_data->hdq_fck);
- clk_put(hdq_data->hdq_ick);
- goto err_clk;
- }
+ hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
+ if (IS_ERR(hdq_data->hdq_fck)) {
+ dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n");
+ ret = PTR_ERR(hdq_data->hdq_fck);
+ goto err_fck;
}
hdq_data->hdq_usecount = 0;
@@ -665,10 +663,12 @@ err_fnclk:
clk_disable(hdq_data->hdq_ick);
err_intfclk:
- clk_put(hdq_data->hdq_ick);
clk_put(hdq_data->hdq_fck);
-err_clk:
+err_fck:
+ clk_put(hdq_data->hdq_ick);
+
+err_ick:
iounmap(hdq_data->hdq_base);
err_ioremap:
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 1f51366417b9..f0c909625bd1 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -16,6 +16,17 @@ config W1_SLAVE_SMEM
Say Y here if you want to connect 1-wire
simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
+config W1_SLAVE_DS2423
+ tristate "Counter 1-wire device (DS2423)"
+ select CRC16
+ help
+ If you enable this you can read the counter values available
+ in the DS2423 chipset from the w1_slave file under the
+ sys file system.
+
+ Say Y here if you want to use a 1-wire
+ counter family device (DS2423).
+
config W1_SLAVE_DS2431
tristate "1kb EEPROM family support (DS2431)"
help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index f1f51f19b129..3c76350a24f7 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o
obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o
+obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o
obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o
diff --git a/drivers/w1/slaves/w1_ds2423.c b/drivers/w1/slaves/w1_ds2423.c
new file mode 100644
index 000000000000..7a7dbe5026f1
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2423.c
@@ -0,0 +1,166 @@
+/*
+ * w1_ds2423.c
+ *
+ * Copyright (c) 2010 Mika Laitio <lamikr@pilppa.org>
+ *
+ * This driver will read and write the value of 4 counters to w1_slave file in
+ * sys filesystem.
+ * Inspired by the w1_therm and w1_ds2431 drivers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the therms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/crc16.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+#include "../w1_family.h"
+
+#define CRC16_VALID 0xb001
+#define CRC16_INIT 0
+
+#define COUNTER_COUNT 4
+#define READ_BYTE_COUNT 42
+
+static ssize_t w1_counter_read(struct device *device,
+ struct device_attribute *attr, char *buf);
+
+static struct device_attribute w1_counter_attr =
+ __ATTR(w1_slave, S_IRUGO, w1_counter_read, NULL);
+
+static ssize_t w1_counter_read(struct device *device,
+ struct device_attribute *attr, char *out_buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ struct w1_master *dev = sl->master;
+ u8 rbuf[COUNTER_COUNT * READ_BYTE_COUNT];
+ u8 wrbuf[3];
+ int rom_addr;
+ int read_byte_count;
+ int result;
+ ssize_t c;
+ int ii;
+ int p;
+ int crc;
+
+ c = PAGE_SIZE;
+ rom_addr = (12 << 5) + 31;
+ wrbuf[0] = 0xA5;
+ wrbuf[1] = rom_addr & 0xFF;
+ wrbuf[2] = rom_addr >> 8;
+ mutex_lock(&dev->mutex);
+ if (!w1_reset_select_slave(sl)) {
+ w1_write_block(dev, wrbuf, 3);
+ read_byte_count = 0;
+ for (p = 0; p < 4; p++) {
+ /*
+ * 1 byte for first bytes in ram page read
+ * 4 bytes for counter
+ * 4 bytes for zero bits
+ * 2 bytes for crc
+ * 31 remaining bytes from the ram page
+ */
+ read_byte_count += w1_read_block(dev,
+ rbuf + (p * READ_BYTE_COUNT), READ_BYTE_COUNT);
+ for (ii = 0; ii < READ_BYTE_COUNT; ++ii)
+ c -= snprintf(out_buf + PAGE_SIZE - c,
+ c, "%02x ",
+ rbuf[(p * READ_BYTE_COUNT) + ii]);
+ if (read_byte_count != (p + 1) * READ_BYTE_COUNT) {
+ dev_warn(device,
+ "w1_counter_read() returned %u bytes "
+ "instead of %d bytes wanted.\n",
+ read_byte_count,
+ READ_BYTE_COUNT);
+ c -= snprintf(out_buf + PAGE_SIZE - c,
+ c, "crc=NO\n");
+ } else {
+ if (p == 0) {
+ crc = crc16(CRC16_INIT, wrbuf, 3);
+ crc = crc16(crc, rbuf, 11);
+ } else {
+ /*
+ * DS2423 calculates crc from all bytes
+ * read after the previous crc bytes.
+ */
+ crc = crc16(CRC16_INIT,
+ (rbuf + 11) +
+ ((p - 1) * READ_BYTE_COUNT),
+ READ_BYTE_COUNT);
+ }
+ if (crc == CRC16_VALID) {
+ result = 0;
+ for (ii = 4; ii > 0; ii--) {
+ result <<= 8;
+ result |= rbuf[(p *
+ READ_BYTE_COUNT) + ii];
+ }
+ c -= snprintf(out_buf + PAGE_SIZE - c,
+ c, "crc=YES c=%d\n", result);
+ } else {
+ c -= snprintf(out_buf + PAGE_SIZE - c,
+ c, "crc=NO\n");
+ }
+ }
+ }
+ } else {
+ c -= snprintf(out_buf + PAGE_SIZE - c, c, "Connection error");
+ }
+ mutex_unlock(&dev->mutex);
+ return PAGE_SIZE - c;
+}
+
+static int w1_f1d_add_slave(struct w1_slave *sl)
+{
+ return device_create_file(&sl->dev, &w1_counter_attr);
+}
+
+static void w1_f1d_remove_slave(struct w1_slave *sl)
+{
+ device_remove_file(&sl->dev, &w1_counter_attr);
+}
+
+static struct w1_family_ops w1_f1d_fops = {
+ .add_slave = w1_f1d_add_slave,
+ .remove_slave = w1_f1d_remove_slave,
+};
+
+static struct w1_family w1_family_1d = {
+ .fid = W1_COUNTER_DS2423,
+ .fops = &w1_f1d_fops,
+};
+
+static int __init w1_f1d_init(void)
+{
+ return w1_register_family(&w1_family_1d);
+}
+
+static void __exit w1_f1d_exit(void)
+{
+ w1_unregister_family(&w1_family_1d);
+}
+
+module_init(w1_f1d_init);
+module_exit(w1_f1d_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mika Laitio <lamikr@pilppa.org>");
+MODULE_DESCRIPTION("w1 family 1d driver for DS2423, 4 counters and 4kb ram");
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 3ca1b9298f21..f3b636d7cafe 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -30,6 +30,7 @@
#define W1_FAMILY_SMEM_01 0x01
#define W1_FAMILY_SMEM_81 0x81
#define W1_THERM_DS18S20 0x10
+#define W1_COUNTER_DS2423 0x1D
#define W1_THERM_DS1822 0x22
#define W1_EEPROM_DS2433 0x23
#define W1_THERM_DS18B20 0x28
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index a5ad77ef4266..31649b7b672f 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -409,15 +409,26 @@ config ALIM7101_WDT
Most people will say N.
config F71808E_WDT
- tristate "Fintek F71808E, F71882FG and F71889FG Watchdog"
+ tristate "Fintek F71808E, F71862FG, F71869, F71882FG and F71889FG Watchdog"
depends on X86 && EXPERIMENTAL
help
This is the driver for the hardware watchdog on the Fintek
- F71808E, F71882FG and F71889FG Super I/O controllers.
+ F71808E, F71862FG, F71869, F71882FG and F71889FG Super I/O controllers.
You can compile this driver directly into the kernel, or use
it as a module. The module will be called f71808e_wdt.
+config SP5100_TCO
+ tristate "AMD/ATI SP5100 TCO Timer/Watchdog"
+ depends on X86 && PCI
+ ---help---
+ Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO
+ (Total Cost of Ownership) timer is a watchdog timer that will reboot
+ the machine after its expiration. The expiration time can be
+ configured with the "heartbeat" parameter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sp5100_tco.
config GEODE_WDT
tristate "AMD Geode CS5535/CS5536 Watchdog"
@@ -631,6 +642,24 @@ config PC87413_WDT
Most people will say N.
+config NV_TCO
+ tristate "nVidia TCO Timer/Watchdog"
+ depends on X86 && PCI
+ ---help---
+ Hardware driver for the TCO timer built into the nVidia Hub family
+ (such as the MCP51). The TCO (Total Cost of Ownership) timer is a
+ watchdog timer that will reboot the machine after its second
+ expiration. The expiration time can be configured with the
+ "heartbeat" parameter.
+
+ On some motherboards the driver may fail to reset the chipset's
+ NO_REBOOT flag which prevents the watchdog from rebooting the
+ machine. If this is the case you will get a kernel message like
+ "failed to reset NO_REBOOT flag, reboot disabled by hardware".
+
+ To compile this driver as a module, choose M here: the
+ module will be called nv_tco.
+
config RDC321X_WDT
tristate "RDC R-321x SoC watchdog"
depends on X86_RDC321X
@@ -722,14 +751,15 @@ config SMSC37B787_WDT
Most people will say N.
config W83627HF_WDT
- tristate "W83627HF Watchdog Timer"
+ tristate "W83627HF/W83627DHG Watchdog Timer"
depends on X86
---help---
This is the driver for the hardware watchdog on the W83627HF chipset
as used in Advantech PC-9578 and Tyan S2721-533 motherboards
- (and likely others). This watchdog simply watches your kernel to
- make sure it doesn't freeze, and if it does, it reboots your computer
- after a certain amount of time.
+ (and likely others). The driver also supports the W83627DHG chip.
+ This watchdog simply watches your kernel to make sure it doesn't
+ freeze, and if it does, it reboots your computer after a certain
+ amount of time.
To compile this driver as a module, choose M here: the
module will be called w83627hf_wdt.
@@ -832,10 +862,22 @@ config SBC_EPX_C3_WATCHDOG
# M68K Architecture
-# M68KNOMMU Architecture
+config M54xx_WATCHDOG
+ tristate "MCF54xx watchdog support"
+ depends on M548x
+ help
+ To compile this driver as a module, choose M here: the
+ module will be called m54xx_wdt.
# MIPS Architecture
+config ATH79_WDT
+ tristate "Atheros AR71XX/AR724X/AR913X hardware watchdog"
+ depends on ATH79
+ help
+ Hardware driver for the built-in watchdog timer on the Atheros
+ AR71XX/AR724X/AR913X SoCs.
+
config BCM47XX_WDT
tristate "Broadcom BCM47xx Watchdog Timer"
depends on BCM47XX
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 4b0ef386229d..20e44c4782b3 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o
obj-$(CONFIG_ALIM1535_WDT) += alim1535_wdt.o
obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o
obj-$(CONFIG_F71808E_WDT) += f71808e_wdt.o
+obj-$(CONFIG_SP5100_TCO) += sp5100_tco.o
obj-$(CONFIG_GEODE_WDT) += geodewdt.o
obj-$(CONFIG_SC520_WDT) += sc520_wdt.o
obj-$(CONFIG_SBC_FITPC2_WATCHDOG) += sbc_fitpc2_wdt.o
@@ -86,6 +87,7 @@ obj-$(CONFIG_HP_WATCHDOG) += hpwdt.o
obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o
obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o
obj-$(CONFIG_PC87413_WDT) += pc87413_wdt.o
+obj-$(CONFIG_NV_TCO) += nv_tco.o
obj-$(CONFIG_RDC321X_WDT) += rdc321x_wdt.o
obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o
obj-$(CONFIG_SBC8360_WDT) += sbc8360.o
@@ -104,10 +106,10 @@ obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
# M32R Architecture
# M68K Architecture
-
-# M68KNOMMU Architecture
+obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o
# MIPS Architecture
+obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o
obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o
obj-$(CONFIG_BCM63XX_WDT) += bcm63xx_wdt.o
obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index 1e9caea8ff8a..fa4d36033552 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -301,7 +301,7 @@ static int ali_notify_sys(struct notifier_block *this,
* want to register another driver on the same PCI id.
*/
-static struct pci_device_id ali_pci_tbl[] = {
+static struct pci_device_id ali_pci_tbl[] __used = {
{ PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,},
{ PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,},
{ 0, },
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index d8d4da9a483d..4b7a2b4138ed 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -430,7 +430,7 @@ err_out:
module_init(alim7101_wdt_init);
module_exit(alim7101_wdt_unload);
-static struct pci_device_id alim7101_pci_tbl[] __devinitdata = {
+static struct pci_device_id alim7101_pci_tbl[] __devinitdata __used = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533) },
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ }
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
new file mode 100644
index 000000000000..725c84bfdd76
--- /dev/null
+++ b/drivers/watchdog/ath79_wdt.c
@@ -0,0 +1,305 @@
+/*
+ * Atheros AR71XX/AR724X/AR913X built-in hardware watchdog timer.
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This driver was based on: drivers/watchdog/ixp4xx_wdt.c
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ * Copyright 2004 (c) MontaVista, Software, Inc.
+ *
+ * which again was based on sa1100 driver,
+ * Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+
+#define DRIVER_NAME "ath79-wdt"
+
+#define WDT_TIMEOUT 15 /* seconds */
+
+#define WDOG_CTRL_LAST_RESET BIT(31)
+#define WDOG_CTRL_ACTION_MASK 3
+#define WDOG_CTRL_ACTION_NONE 0 /* no action */
+#define WDOG_CTRL_ACTION_GPI 1 /* general purpose interrupt */
+#define WDOG_CTRL_ACTION_NMI 2 /* NMI */
+#define WDOG_CTRL_ACTION_FCR 3 /* full chip reset */
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int timeout = WDT_TIMEOUT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds "
+ "(default=" __MODULE_STRING(WDT_TIMEOUT) "s)");
+
+static unsigned long wdt_flags;
+
+#define WDT_FLAGS_BUSY 0
+#define WDT_FLAGS_EXPECT_CLOSE 1
+
+static struct clk *wdt_clk;
+static unsigned long wdt_freq;
+static int boot_status;
+static int max_timeout;
+
+static inline void ath79_wdt_keepalive(void)
+{
+ ath79_reset_wr(AR71XX_RESET_REG_WDOG, wdt_freq * timeout);
+}
+
+static inline void ath79_wdt_enable(void)
+{
+ ath79_wdt_keepalive();
+ ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_FCR);
+}
+
+static inline void ath79_wdt_disable(void)
+{
+ ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_NONE);
+}
+
+static int ath79_wdt_set_timeout(int val)
+{
+ if (val < 1 || val > max_timeout)
+ return -EINVAL;
+
+ timeout = val;
+ ath79_wdt_keepalive();
+
+ return 0;
+}
+
+static int ath79_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(WDT_FLAGS_BUSY, &wdt_flags))
+ return -EBUSY;
+
+ clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags);
+ ath79_wdt_enable();
+
+ return nonseekable_open(inode, file);
+}
+
+static int ath79_wdt_release(struct inode *inode, struct file *file)
+{
+ if (test_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags))
+ ath79_wdt_disable();
+ else {
+ pr_crit(DRIVER_NAME ": device closed unexpectedly, "
+ "watchdog timer will not stop!\n");
+ ath79_wdt_keepalive();
+ }
+
+ clear_bit(WDT_FLAGS_BUSY, &wdt_flags);
+ clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags);
+
+ return 0;
+}
+
+static ssize_t ath79_wdt_write(struct file *file, const char *data,
+ size_t len, loff_t *ppos)
+{
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags);
+
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, data + i))
+ return -EFAULT;
+
+ if (c == 'V')
+ set_bit(WDT_FLAGS_EXPECT_CLOSE,
+ &wdt_flags);
+ }
+ }
+
+ ath79_wdt_keepalive();
+ }
+
+ return len;
+}
+
+static const struct watchdog_info ath79_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE | WDIOF_CARDRESET,
+ .firmware_version = 0,
+ .identity = "ATH79 watchdog",
+};
+
+static long ath79_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int err;
+ int t;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ err = copy_to_user(argp, &ath79_wdt_info,
+ sizeof(ath79_wdt_info)) ? -EFAULT : 0;
+ break;
+
+ case WDIOC_GETSTATUS:
+ err = put_user(0, p);
+ break;
+
+ case WDIOC_GETBOOTSTATUS:
+ err = put_user(boot_status, p);
+ break;
+
+ case WDIOC_KEEPALIVE:
+ ath79_wdt_keepalive();
+ err = 0;
+ break;
+
+ case WDIOC_SETTIMEOUT:
+ err = get_user(t, p);
+ if (err)
+ break;
+
+ err = ath79_wdt_set_timeout(t);
+ if (err)
+ break;
+
+ /* fallthrough */
+ case WDIOC_GETTIMEOUT:
+ err = put_user(timeout, p);
+ break;
+
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ return err;
+}
+
+static const struct file_operations ath79_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = ath79_wdt_write,
+ .unlocked_ioctl = ath79_wdt_ioctl,
+ .open = ath79_wdt_open,
+ .release = ath79_wdt_release,
+};
+
+static struct miscdevice ath79_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &ath79_wdt_fops,
+};
+
+static int __devinit ath79_wdt_probe(struct platform_device *pdev)
+{
+ u32 ctrl;
+ int err;
+
+ wdt_clk = clk_get(&pdev->dev, "wdt");
+ if (IS_ERR(wdt_clk))
+ return PTR_ERR(wdt_clk);
+
+ err = clk_enable(wdt_clk);
+ if (err)
+ goto err_clk_put;
+
+ wdt_freq = clk_get_rate(wdt_clk);
+ if (!wdt_freq) {
+ err = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ max_timeout = (0xfffffffful / wdt_freq);
+ if (timeout < 1 || timeout > max_timeout) {
+ timeout = max_timeout;
+ dev_info(&pdev->dev,
+ "timeout value must be 0 < timeout < %d, using %d\n",
+ max_timeout, timeout);
+ }
+
+ ctrl = ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
+ boot_status = (ctrl & WDOG_CTRL_LAST_RESET) ? WDIOF_CARDRESET : 0;
+
+ err = misc_register(&ath79_wdt_miscdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "unable to register misc device, err=%d\n", err);
+ goto err_clk_disable;
+ }
+
+ return 0;
+
+err_clk_disable:
+ clk_disable(wdt_clk);
+err_clk_put:
+ clk_put(wdt_clk);
+ return err;
+}
+
+static int __devexit ath79_wdt_remove(struct platform_device *pdev)
+{
+ misc_deregister(&ath79_wdt_miscdev);
+ clk_disable(wdt_clk);
+ clk_put(wdt_clk);
+ return 0;
+}
+
+static void ath97_wdt_shutdown(struct platform_device *pdev)
+{
+ ath79_wdt_disable();
+}
+
+static struct platform_driver ath79_wdt_driver = {
+ .remove = __devexit_p(ath79_wdt_remove),
+ .shutdown = ath97_wdt_shutdown,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ath79_wdt_init(void)
+{
+ return platform_driver_probe(&ath79_wdt_driver, ath79_wdt_probe);
+}
+module_init(ath79_wdt_init);
+
+static void __exit ath79_wdt_exit(void)
+{
+ platform_driver_unregister(&ath79_wdt_driver);
+}
+module_exit(ath79_wdt_exit);
+
+MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X hardware watchdog driver");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org");
+MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index d11ffb091b0d..7e7ec9c35b6a 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -85,6 +85,22 @@ static unsigned int sec_to_period(unsigned int secs)
return 0;
}
+static void __booke_wdt_set(void *data)
+{
+ u32 val;
+
+ val = mfspr(SPRN_TCR);
+ val &= ~WDTP_MASK;
+ val |= WDTP(booke_wdt_period);
+
+ mtspr(SPRN_TCR, val);
+}
+
+static void booke_wdt_set(void)
+{
+ on_each_cpu(__booke_wdt_set, NULL, 0);
+}
+
static void __booke_wdt_ping(void *data)
{
mtspr(SPRN_TSR, TSR_ENW|TSR_WIS);
@@ -181,8 +197,7 @@ static long booke_wdt_ioctl(struct file *file,
#else
booke_wdt_period = tmp;
#endif
- mtspr(SPRN_TCR, (mfspr(SPRN_TCR) & ~WDTP_MASK) |
- WDTP(booke_wdt_period));
+ booke_wdt_set();
return 0;
case WDIOC_GETTIMEOUT:
return put_user(booke_wdt_period, p);
@@ -193,8 +208,15 @@ static long booke_wdt_ioctl(struct file *file,
return 0;
}
+/* wdt_is_active stores wether or not the /dev/watchdog device is opened */
+static unsigned long wdt_is_active;
+
static int booke_wdt_open(struct inode *inode, struct file *file)
{
+ /* /dev/watchdog can only be opened once */
+ if (test_and_set_bit(0, &wdt_is_active))
+ return -EBUSY;
+
spin_lock(&booke_wdt_lock);
if (booke_wdt_enabled == 0) {
booke_wdt_enabled = 1;
@@ -210,8 +232,17 @@ static int booke_wdt_open(struct inode *inode, struct file *file)
static int booke_wdt_release(struct inode *inode, struct file *file)
{
+#ifndef CONFIG_WATCHDOG_NOWAYOUT
+ /* Normally, the watchdog is disabled when /dev/watchdog is closed, but
+ * if CONFIG_WATCHDOG_NOWAYOUT is defined, then it means that the
+ * watchdog should remain enabled. So we disable it only if
+ * CONFIG_WATCHDOG_NOWAYOUT is not defined.
+ */
on_each_cpu(__booke_wdt_disable, NULL, 0);
booke_wdt_enabled = 0;
+#endif
+
+ clear_bit(0, &wdt_is_active);
return 0;
}
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 65e579635dba..d4d8d1fdccc4 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -42,18 +42,21 @@
#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */
#define SIO_REG_DEVREV 0x22 /* Device revision */
#define SIO_REG_MANID 0x23 /* Fintek ID (2 bytes) */
+#define SIO_REG_ROM_ADDR_SEL 0x27 /* ROM address select */
+#define SIO_REG_MFUNCT1 0x29 /* Multi function select 1 */
+#define SIO_REG_MFUNCT2 0x2a /* Multi function select 2 */
+#define SIO_REG_MFUNCT3 0x2b /* Multi function select 3 */
#define SIO_REG_ENABLE 0x30 /* Logical device enable */
#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
#define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */
-#define SIO_F71808_ID 0x0901 /* Chipset ID */
-#define SIO_F71858_ID 0x0507 /* Chipset ID */
+#define SIO_F71808_ID 0x0901 /* Chipset ID */
+#define SIO_F71858_ID 0x0507 /* Chipset ID */
#define SIO_F71862_ID 0x0601 /* Chipset ID */
+#define SIO_F71869_ID 0x0814 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
#define SIO_F71889_ID 0x0723 /* Chipset ID */
-#define F71882FG_REG_START 0x01
-
#define F71808FG_REG_WDO_CONF 0xf0
#define F71808FG_REG_WDT_CONF 0xf5
#define F71808FG_REG_WD_TIME 0xf6
@@ -70,13 +73,15 @@
#define WATCHDOG_MAX_TIMEOUT (60 * 255)
#define WATCHDOG_PULSE_WIDTH 125 /* 125 ms, default pulse width for
watchdog signal */
+#define WATCHDOG_F71862FG_PIN 63 /* default watchdog reset output
+ pin number 63 */
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
static const int max_timeout = WATCHDOG_MAX_TIMEOUT;
-static int timeout = 60; /* default timeout in seconds */
+static int timeout = WATCHDOG_TIMEOUT; /* default timeout in seconds */
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds. 1<= timeout <="
@@ -89,6 +94,12 @@ MODULE_PARM_DESC(pulse_width,
"Watchdog signal pulse width. 0(=level), 1 ms, 25 ms, 125 ms or 5000 ms"
" (default=" __MODULE_STRING(WATCHDOG_PULSE_WIDTH) ")");
+static unsigned int f71862fg_pin = WATCHDOG_F71862FG_PIN;
+module_param(f71862fg_pin, uint, 0);
+MODULE_PARM_DESC(f71862fg_pin,
+ "Watchdog f71862fg reset output pin configuration. Choose pin 56 or 63"
+ " (default=" __MODULE_STRING(WATCHDOG_F71862FG_PIN)")");
+
static int nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0444);
MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
@@ -98,12 +109,13 @@ module_param(start_withtimeout, uint, 0);
MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with"
" given initial timeout. Zero (default) disables this feature.");
-enum chips { f71808fg, f71858fg, f71862fg, f71882fg, f71889fg };
+enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg };
static const char *f71808e_names[] = {
"f71808fg",
"f71858fg",
"f71862fg",
+ "f71869",
"f71882fg",
"f71889fg",
};
@@ -282,6 +294,28 @@ exit_unlock:
return err;
}
+static int f71862fg_pin_configure(unsigned short ioaddr)
+{
+ /* When ioaddr is non-zero the calling function has to take care of
+ mutex handling and superio preparation! */
+
+ if (f71862fg_pin == 63) {
+ if (ioaddr) {
+ /* SPI must be disabled first to use this pin! */
+ superio_clear_bit(ioaddr, SIO_REG_ROM_ADDR_SEL, 6);
+ superio_set_bit(ioaddr, SIO_REG_MFUNCT3, 4);
+ }
+ } else if (f71862fg_pin == 56) {
+ if (ioaddr)
+ superio_set_bit(ioaddr, SIO_REG_MFUNCT1, 1);
+ } else {
+ printk(KERN_ERR DRVNAME ": Invalid argument f71862fg_pin=%d\n",
+ f71862fg_pin);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int watchdog_start(void)
{
/* Make sure we don't die as soon as the watchdog is enabled below */
@@ -299,19 +333,30 @@ static int watchdog_start(void)
switch (watchdog.type) {
case f71808fg:
/* Set pin 21 to GPIO23/WDTRST#, then to WDTRST# */
- superio_clear_bit(watchdog.sioaddr, 0x2a, 3);
- superio_clear_bit(watchdog.sioaddr, 0x2b, 3);
+ superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT2, 3);
+ superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT3, 3);
+ break;
+
+ case f71862fg:
+ err = f71862fg_pin_configure(watchdog.sioaddr);
+ if (err)
+ goto exit_superio;
+ break;
+
+ case f71869:
+ /* GPIO14 --> WDTRST# */
+ superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 4);
break;
case f71882fg:
/* Set pin 56 to WDTRST# */
- superio_set_bit(watchdog.sioaddr, 0x29, 1);
+ superio_set_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 1);
break;
case f71889fg:
/* set pin 40 to WDTRST# */
- superio_outb(watchdog.sioaddr, 0x2b,
- superio_inb(watchdog.sioaddr, 0x2b) & 0xcf);
+ superio_outb(watchdog.sioaddr, SIO_REG_MFUNCT3,
+ superio_inb(watchdog.sioaddr, SIO_REG_MFUNCT3) & 0xcf);
break;
default:
@@ -711,16 +756,19 @@ static int __init f71808e_find(int sioaddr)
case SIO_F71808_ID:
watchdog.type = f71808fg;
break;
+ case SIO_F71862_ID:
+ watchdog.type = f71862fg;
+ err = f71862fg_pin_configure(0); /* validate module parameter */
+ break;
+ case SIO_F71869_ID:
+ watchdog.type = f71869;
+ break;
case SIO_F71882_ID:
watchdog.type = f71882fg;
break;
case SIO_F71889_ID:
watchdog.type = f71889fg;
break;
- case SIO_F71862_ID:
- /* These have a watchdog, though it isn't implemented (yet). */
- err = -ENOSYS;
- goto exit;
case SIO_F71858_ID:
/* Confirmed (by datasheet) not to have a watchdog. */
err = -ENODEV;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index dea7b5bf6e2c..24b966d5061a 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -469,7 +469,7 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
unsigned long rom_pl;
static int die_nmi_called;
- if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI)
+ if (ulReason != DIE_NMIUNKNOWN)
goto out;
if (!hpwdt_nmi_decoding)
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index b8838d2c67a6..2c6c2b4ad8bf 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -1,7 +1,7 @@
/*
* intel TCO Watchdog Driver
*
- * (c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>.
+ * (c) Copyright 2006-2010 Wim Van Sebroeck <wim@iguana.be>.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -26,13 +26,15 @@
* document number 301473-002, 301474-026: 82801F (ICH6)
* document number 313082-001, 313075-006: 631xESB, 632xESB
* document number 307013-003, 307014-024: 82801G (ICH7)
+ * document number 322896-001, 322897-001: NM10
* document number 313056-003, 313057-017: 82801H (ICH8)
* document number 316972-004, 316973-012: 82801I (ICH9)
* document number 319973-002, 319974-002: 82801J (ICH10)
* document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH)
* document number 320066-003, 320257-008: EP80597 (IICH)
- * document number TBD : Cougar Point (CPT)
+ * document number 324645-001, 324646-001: Cougar Point (CPT)
* document number TBD : Patsburg (PBG)
+ * document number TBD : DH89xxCC
*/
/*
@@ -85,6 +87,7 @@ enum iTCO_chipsets {
TCO_ICH7DH, /* ICH7DH */
TCO_ICH7M, /* ICH7-M & ICH7-U */
TCO_ICH7MDH, /* ICH7-M DH */
+ TCO_NM10, /* NM10 */
TCO_ICH8, /* ICH8 & ICH8R */
TCO_ICH8DH, /* ICH8DH */
TCO_ICH8DO, /* ICH8DO */
@@ -149,6 +152,7 @@ enum iTCO_chipsets {
TCO_CPT31, /* Cougar Point */
TCO_PBG1, /* Patsburg */
TCO_PBG2, /* Patsburg */
+ TCO_DH89XXCC, /* DH89xxCC */
};
static struct {
@@ -174,6 +178,7 @@ static struct {
{"ICH7DH", 2},
{"ICH7-M or ICH7-U", 2},
{"ICH7-M DH", 2},
+ {"NM10", 2},
{"ICH8 or ICH8R", 2},
{"ICH8DH", 2},
{"ICH8DO", 2},
@@ -238,6 +243,7 @@ static struct {
{"Cougar Point", 2},
{"Patsburg", 2},
{"Patsburg", 2},
+ {"DH89xxCC", 2},
{NULL, 0}
};
@@ -291,6 +297,7 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_30, TCO_ICH7DH)},
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_1, TCO_ICH7M)},
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_31, TCO_ICH7MDH)},
+ { ITCO_PCI_DEVICE(0x27bc, TCO_NM10)},
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_0, TCO_ICH8)},
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_2, TCO_ICH8DH)},
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_3, TCO_ICH8DO)},
@@ -355,6 +362,7 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
{ ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)},
{ ITCO_PCI_DEVICE(0x1d40, TCO_PBG1)},
{ ITCO_PCI_DEVICE(0x1d41, TCO_PBG2)},
+ { ITCO_PCI_DEVICE(0x2310, TCO_DH89XXCC)},
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index 2852bb2e3fd9..811471903e8a 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -21,7 +21,7 @@
#include <linux/watchdog.h>
#include <linux/io.h>
#include <linux/uaccess.h>
-#include <mach/timex.h>
+#include <mach/hardware.h>
#include <mach/regs-timer.h>
#define WDT_DEFAULT_TIME 5 /* seconds */
diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c
new file mode 100644
index 000000000000..4d43286074aa
--- /dev/null
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -0,0 +1,227 @@
+/*
+ * drivers/watchdog/m54xx_wdt.c
+ *
+ * Watchdog driver for ColdFire MCF547x & MCF548x processors
+ * Copyright 2010 (c) Philippe De Muyter <phdm@macqel.be>
+ *
+ * Adapted from the IXP4xx watchdog driver, which carries these notices:
+ *
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Copyright 2004 (c) MontaVista, Software, Inc.
+ * Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/ioport.h>
+#include <linux/uaccess.h>
+
+#include <asm/coldfire.h>
+#include <asm/m54xxsim.h>
+#include <asm/m54xxgpt.h>
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+static unsigned int heartbeat = 30; /* (secs) Default is 0.5 minute */
+static unsigned long wdt_status;
+
+#define WDT_IN_USE 0
+#define WDT_OK_TO_CLOSE 1
+
+static void wdt_enable(void)
+{
+ unsigned int gms0;
+
+ /* preserve GPIO usage, if any */
+ gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0);
+ if (gms0 & MCF_GPT_GMS_TMS_GPIO)
+ gms0 &= (MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_GPIO_MASK
+ | MCF_GPT_GMS_OD);
+ else
+ gms0 = MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_OD;
+ __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+ __raw_writel(MCF_GPT_GCIR_PRE(heartbeat*(MCF_BUSCLK/0xffff)) |
+ MCF_GPT_GCIR_CNT(0xffff), MCF_MBAR + MCF_GPT_GCIR0);
+ gms0 |= MCF_GPT_GMS_OCPW(0xA5) | MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE;
+ __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+}
+
+static void wdt_disable(void)
+{
+ unsigned int gms0;
+
+ /* disable watchdog */
+ gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0);
+ gms0 &= ~(MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE);
+ __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+}
+
+static void wdt_keepalive(void)
+{
+ unsigned int gms0;
+
+ gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0);
+ gms0 |= MCF_GPT_GMS_OCPW(0xA5);
+ __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+}
+
+static int m54xx_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(WDT_IN_USE, &wdt_status))
+ return -EBUSY;
+
+ clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+ wdt_enable();
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t m54xx_wdt_write(struct file *file, const char *data,
+ size_t len, loff_t *ppos)
+{
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ set_bit(WDT_OK_TO_CLOSE, &wdt_status);
+ }
+ }
+ wdt_keepalive();
+ }
+ return len;
+}
+
+static const struct watchdog_info ident = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING,
+ .identity = "Coldfire M54xx Watchdog",
+};
+
+static long m54xx_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -ENOTTY;
+ int time;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ ret = copy_to_user((struct watchdog_info *)arg, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+ break;
+
+ case WDIOC_GETSTATUS:
+ ret = put_user(0, (int *)arg);
+ break;
+
+ case WDIOC_GETBOOTSTATUS:
+ ret = put_user(0, (int *)arg);
+ break;
+
+ case WDIOC_KEEPALIVE:
+ wdt_keepalive();
+ ret = 0;
+ break;
+
+ case WDIOC_SETTIMEOUT:
+ ret = get_user(time, (int *)arg);
+ if (ret)
+ break;
+
+ if (time <= 0 || time > 30) {
+ ret = -EINVAL;
+ break;
+ }
+
+ heartbeat = time;
+ wdt_enable();
+ /* Fall through */
+
+ case WDIOC_GETTIMEOUT:
+ ret = put_user(heartbeat, (int *)arg);
+ break;
+ }
+ return ret;
+}
+
+static int m54xx_wdt_release(struct inode *inode, struct file *file)
+{
+ if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
+ wdt_disable();
+ else {
+ printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - "
+ "timer will not stop\n");
+ wdt_keepalive();
+ }
+ clear_bit(WDT_IN_USE, &wdt_status);
+ clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+
+ return 0;
+}
+
+
+static const struct file_operations m54xx_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = m54xx_wdt_write,
+ .unlocked_ioctl = m54xx_wdt_ioctl,
+ .open = m54xx_wdt_open,
+ .release = m54xx_wdt_release,
+};
+
+static struct miscdevice m54xx_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &m54xx_wdt_fops,
+};
+
+static int __init m54xx_wdt_init(void)
+{
+ if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4,
+ "Coldfire M54xx Watchdog")) {
+ printk(KERN_WARNING
+ "Coldfire M54xx Watchdog : I/O region busy\n");
+ return -EBUSY;
+ }
+ printk(KERN_INFO "ColdFire watchdog driver is loaded.\n");
+
+ return misc_register(&m54xx_wdt_miscdev);
+}
+
+static void __exit m54xx_wdt_exit(void)
+{
+ misc_deregister(&m54xx_wdt_miscdev);
+ release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4);
+}
+
+module_init(m54xx_wdt_init);
+module_exit(m54xx_wdt_exit);
+
+MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>");
+MODULE_DESCRIPTION("Coldfire M54xx Watchdog");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)");
+
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
new file mode 100644
index 000000000000..1a50aa7079bf
--- /dev/null
+++ b/drivers/watchdog/nv_tco.c
@@ -0,0 +1,512 @@
+/*
+ * nv_tco 0.01: TCO timer driver for NV chipsets
+ *
+ * (c) Copyright 2005 Google Inc., All Rights Reserved.
+ *
+ * Based off i8xx_tco.c:
+ * (c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
+ * Reserved.
+ * http://www.kernelconcepts.de
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * TCO timer driver for NV chipsets
+ * based on softdog.c by Alan Cox <alan@redhat.com>
+ */
+
+/*
+ * Includes, defines, variables, module parameters, ...
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "nv_tco.h"
+
+/* Module and version information */
+#define TCO_VERSION "0.01"
+#define TCO_MODULE_NAME "NV_TCO"
+#define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION
+#define PFX TCO_MODULE_NAME ": "
+
+/* internal variables */
+static unsigned int tcobase;
+static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */
+static unsigned long timer_alive;
+static char tco_expect_close;
+static struct pci_dev *tco_pci;
+
+/* the watchdog platform device */
+static struct platform_device *nv_tco_platform_device;
+
+/* module parameters */
+#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat (2<heartbeat<39) */
+static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (2<heartbeat<39, "
+ "default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
+ " (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+/*
+ * Some TCO specific functions
+ */
+static inline unsigned char seconds_to_ticks(int seconds)
+{
+ /* the internal timer is stored as ticks which decrement
+ * every 0.6 seconds */
+ return (seconds * 10) / 6;
+}
+
+static void tco_timer_start(void)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tco_lock, flags);
+ val = inl(TCO_CNT(tcobase));
+ val &= ~TCO_CNT_TCOHALT;
+ outl(val, TCO_CNT(tcobase));
+ spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static void tco_timer_stop(void)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tco_lock, flags);
+ val = inl(TCO_CNT(tcobase));
+ val |= TCO_CNT_TCOHALT;
+ outl(val, TCO_CNT(tcobase));
+ spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static void tco_timer_keepalive(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tco_lock, flags);
+ outb(0x01, TCO_RLD(tcobase));
+ spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static int tco_timer_set_heartbeat(int t)
+{
+ int ret = 0;
+ unsigned char tmrval;
+ unsigned long flags;
+ u8 val;
+
+ /*
+ * note seconds_to_ticks(t) > t, so if t > 0x3f, so is
+ * tmrval=seconds_to_ticks(t). Check that the count in seconds isn't
+ * out of range on it's own (to avoid overflow in tmrval).
+ */
+ if (t < 0 || t > 0x3f)
+ return -EINVAL;
+ tmrval = seconds_to_ticks(t);
+
+ /* "Values of 0h-3h are ignored and should not be attempted" */
+ if (tmrval > 0x3f || tmrval < 0x04)
+ return -EINVAL;
+
+ /* Write new heartbeat to watchdog */
+ spin_lock_irqsave(&tco_lock, flags);
+ val = inb(TCO_TMR(tcobase));
+ val &= 0xc0;
+ val |= tmrval;
+ outb(val, TCO_TMR(tcobase));
+ val = inb(TCO_TMR(tcobase));
+
+ if ((val & 0x3f) != tmrval)
+ ret = -EINVAL;
+ spin_unlock_irqrestore(&tco_lock, flags);
+
+ if (ret)
+ return ret;
+
+ heartbeat = t;
+ return 0;
+}
+
+/*
+ * /dev/watchdog handling
+ */
+
+static int nv_tco_open(struct inode *inode, struct file *file)
+{
+ /* /dev/watchdog can only be opened once */
+ if (test_and_set_bit(0, &timer_alive))
+ return -EBUSY;
+
+ /* Reload and activate timer */
+ tco_timer_keepalive();
+ tco_timer_start();
+ return nonseekable_open(inode, file);
+}
+
+static int nv_tco_release(struct inode *inode, struct file *file)
+{
+ /* Shut off the timer */
+ if (tco_expect_close == 42) {
+ tco_timer_stop();
+ } else {
+ printk(KERN_CRIT PFX "Unexpected close, not stopping "
+ "watchdog!\n");
+ tco_timer_keepalive();
+ }
+ clear_bit(0, &timer_alive);
+ tco_expect_close = 0;
+ return 0;
+}
+
+static ssize_t nv_tco_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ /* See if we got the magic character 'V' and reload the timer */
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ /*
+ * note: just in case someone wrote the magic character
+ * five months ago...
+ */
+ tco_expect_close = 0;
+
+ /*
+ * scan to see whether or not we got the magic
+ * character
+ */
+ for (i = 0; i != len; i++) {
+ char c;
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ tco_expect_close = 42;
+ }
+ }
+
+ /* someone wrote to us, we should reload the timer */
+ tco_timer_keepalive();
+ }
+ return len;
+}
+
+static long nv_tco_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int new_options, retval = -EINVAL;
+ int new_heartbeat;
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ static const struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+ .firmware_version = 0,
+ .identity = TCO_MODULE_NAME,
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+ case WDIOC_SETOPTIONS:
+ if (get_user(new_options, p))
+ return -EFAULT;
+ if (new_options & WDIOS_DISABLECARD) {
+ tco_timer_stop();
+ retval = 0;
+ }
+ if (new_options & WDIOS_ENABLECARD) {
+ tco_timer_keepalive();
+ tco_timer_start();
+ retval = 0;
+ }
+ return retval;
+ case WDIOC_KEEPALIVE:
+ tco_timer_keepalive();
+ return 0;
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_heartbeat, p))
+ return -EFAULT;
+ if (tco_timer_set_heartbeat(new_heartbeat))
+ return -EINVAL;
+ tco_timer_keepalive();
+ /* Fall through */
+ case WDIOC_GETTIMEOUT:
+ return put_user(heartbeat, p);
+ default:
+ return -ENOTTY;
+ }
+}
+
+/*
+ * Kernel Interfaces
+ */
+
+static const struct file_operations nv_tco_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = nv_tco_write,
+ .unlocked_ioctl = nv_tco_ioctl,
+ .open = nv_tco_open,
+ .release = nv_tco_release,
+};
+
+static struct miscdevice nv_tco_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &nv_tco_fops,
+};
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE. We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static struct pci_device_id tco_pci_tbl[] = {
+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, }, /* End of list */
+};
+MODULE_DEVICE_TABLE(pci, tco_pci_tbl);
+
+/*
+ * Init & exit routines
+ */
+
+static unsigned char __init nv_tco_getdevice(void)
+{
+ struct pci_dev *dev = NULL;
+ u32 val;
+
+ /* Find the PCI device */
+ for_each_pci_dev(dev) {
+ if (pci_match_id(tco_pci_tbl, dev) != NULL) {
+ tco_pci = dev;
+ break;
+ }
+ }
+
+ if (!tco_pci)
+ return 0;
+
+ /* Find the base io port */
+ pci_read_config_dword(tco_pci, 0x64, &val);
+ val &= 0xffff;
+ if (val == 0x0001 || val == 0x0000) {
+ /* Something is wrong here, bar isn't setup */
+ printk(KERN_ERR PFX "failed to get tcobase address\n");
+ return 0;
+ }
+ val &= 0xff00;
+ tcobase = val + 0x40;
+
+ if (!request_region(tcobase, 0x10, "NV TCO")) {
+ printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
+ tcobase);
+ return 0;
+ }
+
+ /* Set a reasonable heartbeat before we stop the timer */
+ tco_timer_set_heartbeat(30);
+
+ /*
+ * Stop the TCO before we change anything so we don't race with
+ * a zeroed timer.
+ */
+ tco_timer_keepalive();
+ tco_timer_stop();
+
+ /* Disable SMI caused by TCO */
+ if (!request_region(MCP51_SMI_EN(tcobase), 4, "NV TCO")) {
+ printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
+ MCP51_SMI_EN(tcobase));
+ goto out;
+ }
+ val = inl(MCP51_SMI_EN(tcobase));
+ val &= ~MCP51_SMI_EN_TCO;
+ outl(val, MCP51_SMI_EN(tcobase));
+ val = inl(MCP51_SMI_EN(tcobase));
+ release_region(MCP51_SMI_EN(tcobase), 4);
+ if (val & MCP51_SMI_EN_TCO) {
+ printk(KERN_ERR PFX "Could not disable SMI caused by TCO\n");
+ goto out;
+ }
+
+ /* Check chipset's NO_REBOOT bit */
+ pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+ val |= MCP51_SMBUS_SETUP_B_TCO_REBOOT;
+ pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
+ pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+ if (!(val & MCP51_SMBUS_SETUP_B_TCO_REBOOT)) {
+ printk(KERN_ERR PFX "failed to reset NO_REBOOT flag, reboot "
+ "disabled by hardware\n");
+ goto out;
+ }
+
+ return 1;
+out:
+ release_region(tcobase, 0x10);
+ return 0;
+}
+
+static int __devinit nv_tco_init(struct platform_device *dev)
+{
+ int ret;
+
+ /* Check whether or not the hardware watchdog is there */
+ if (!nv_tco_getdevice())
+ return -ENODEV;
+
+ /* Check to see if last reboot was due to watchdog timeout */
+ printk(KERN_INFO PFX "Watchdog reboot %sdetected.\n",
+ inl(TCO_STS(tcobase)) & TCO_STS_TCO2TO_STS ? "" : "not ");
+
+ /* Clear out the old status */
+ outl(TCO_STS_RESET, TCO_STS(tcobase));
+
+ /*
+ * Check that the heartbeat value is within it's range.
+ * If not, reset to the default.
+ */
+ if (tco_timer_set_heartbeat(heartbeat)) {
+ heartbeat = WATCHDOG_HEARTBEAT;
+ tco_timer_set_heartbeat(heartbeat);
+ printk(KERN_INFO PFX "heartbeat value must be 2<heartbeat<39, "
+ "using %d\n", heartbeat);
+ }
+
+ ret = misc_register(&nv_tco_miscdev);
+ if (ret != 0) {
+ printk(KERN_ERR PFX "cannot register miscdev on minor=%d "
+ "(err=%d)\n", WATCHDOG_MINOR, ret);
+ goto unreg_region;
+ }
+
+ clear_bit(0, &timer_alive);
+
+ tco_timer_stop();
+
+ printk(KERN_INFO PFX "initialized (0x%04x). heartbeat=%d sec "
+ "(nowayout=%d)\n", tcobase, heartbeat, nowayout);
+
+ return 0;
+
+unreg_region:
+ release_region(tcobase, 0x10);
+ return ret;
+}
+
+static void __devexit nv_tco_cleanup(void)
+{
+ u32 val;
+
+ /* Stop the timer before we leave */
+ if (!nowayout)
+ tco_timer_stop();
+
+ /* Set the NO_REBOOT bit to prevent later reboots, just for sure */
+ pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+ val &= ~MCP51_SMBUS_SETUP_B_TCO_REBOOT;
+ pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
+ pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+ if (val & MCP51_SMBUS_SETUP_B_TCO_REBOOT) {
+ printk(KERN_CRIT PFX "Couldn't unset REBOOT bit. Machine may "
+ "soon reset\n");
+ }
+
+ /* Deregister */
+ misc_deregister(&nv_tco_miscdev);
+ release_region(tcobase, 0x10);
+}
+
+static int __devexit nv_tco_remove(struct platform_device *dev)
+{
+ if (tcobase)
+ nv_tco_cleanup();
+
+ return 0;
+}
+
+static void nv_tco_shutdown(struct platform_device *dev)
+{
+ tco_timer_stop();
+}
+
+static struct platform_driver nv_tco_driver = {
+ .probe = nv_tco_init,
+ .remove = __devexit_p(nv_tco_remove),
+ .shutdown = nv_tco_shutdown,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = TCO_MODULE_NAME,
+ },
+};
+
+static int __init nv_tco_init_module(void)
+{
+ int err;
+
+ printk(KERN_INFO PFX "NV TCO WatchDog Timer Driver v%s\n",
+ TCO_VERSION);
+
+ err = platform_driver_register(&nv_tco_driver);
+ if (err)
+ return err;
+
+ nv_tco_platform_device = platform_device_register_simple(
+ TCO_MODULE_NAME, -1, NULL, 0);
+ if (IS_ERR(nv_tco_platform_device)) {
+ err = PTR_ERR(nv_tco_platform_device);
+ goto unreg_platform_driver;
+ }
+
+ return 0;
+
+unreg_platform_driver:
+ platform_driver_unregister(&nv_tco_driver);
+ return err;
+}
+
+static void __exit nv_tco_cleanup_module(void)
+{
+ platform_device_unregister(nv_tco_platform_device);
+ platform_driver_unregister(&nv_tco_driver);
+ printk(KERN_INFO PFX "NV TCO Watchdog Module Unloaded.\n");
+}
+
+module_init(nv_tco_init_module);
+module_exit(nv_tco_cleanup_module);
+
+MODULE_AUTHOR("Mike Waychison");
+MODULE_DESCRIPTION("TCO timer driver for NV chipsets");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/nv_tco.h b/drivers/watchdog/nv_tco.h
new file mode 100644
index 000000000000..c2d1d04e055b
--- /dev/null
+++ b/drivers/watchdog/nv_tco.h
@@ -0,0 +1,64 @@
+/*
+ * nv_tco: TCO timer driver for nVidia chipsets.
+ *
+ * (c) Copyright 2005 Google Inc., All Rights Reserved.
+ *
+ * Supported Chipsets:
+ * - MCP51/MCP55
+ *
+ * (c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
+ * Reserved.
+ * http://www.kernelconcepts.de
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Neither kernel concepts nor Nils Faerber admit liability nor provide
+ * warranty for any of this software. This material is provided
+ * "AS-IS" and at no charge.
+ *
+ * (c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>
+ * developed for
+ * Jentro AG, Haar/Munich (Germany)
+ *
+ * TCO timer driver for NV chipsets
+ * based on softdog.c by Alan Cox <alan@redhat.com>
+ */
+
+/*
+ * Some address definitions for the TCO
+ */
+
+#define TCO_RLD(base) ((base) + 0x00) /* TCO Timer Reload and Current Value */
+#define TCO_TMR(base) ((base) + 0x01) /* TCO Timer Initial Value */
+
+#define TCO_STS(base) ((base) + 0x04) /* TCO Status Register */
+/*
+ * TCO Boot Status bit: set on TCO reset, reset by software or standby
+ * power-good (survives reboots), unfortunately this bit is never
+ * set.
+ */
+# define TCO_STS_BOOT_STS (1 << 9)
+/*
+ * first and 2nd timeout status bits, these also survive a warm boot,
+ * and they work, so we use them.
+ */
+# define TCO_STS_TCO_INT_STS (1 << 1)
+# define TCO_STS_TCO2TO_STS (1 << 10)
+# define TCO_STS_RESET (TCO_STS_BOOT_STS | TCO_STS_TCO2TO_STS | \
+ TCO_STS_TCO_INT_STS)
+
+#define TCO_CNT(base) ((base) + 0x08) /* TCO Control Register */
+# define TCO_CNT_TCOHALT (1 << 12)
+
+#define MCP51_SMBUS_SETUP_B 0xe8
+# define MCP51_SMBUS_SETUP_B_TCO_REBOOT (1 << 25)
+
+/*
+ * The SMI_EN register is at the base io address + 0x04,
+ * while TCOBASE is + 0x40.
+ */
+#define MCP51_SMI_EN(base) ((base) - 0x40 + 0x04)
+# define MCP51_SMI_EN_TCO ((1 << 4) | (1 << 5))
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
new file mode 100644
index 000000000000..808372883e88
--- /dev/null
+++ b/drivers/watchdog/sp5100_tco.c
@@ -0,0 +1,480 @@
+/*
+ * sp5100_tco : TCO timer driver for sp5100 chipsets
+ *
+ * (c) Copyright 2009 Google Inc., All Rights Reserved.
+ *
+ * Based on i8xx_tco.c:
+ * (c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
+ * Reserved.
+ * http://www.kernelconcepts.de
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * See AMD Publication 43009 "AMD SB700/710/750 Register Reference Guide"
+ */
+
+/*
+ * Includes, defines, variables, module parameters, ...
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "sp5100_tco.h"
+
+/* Module and version information */
+#define TCO_VERSION "0.01"
+#define TCO_MODULE_NAME "SP5100 TCO timer"
+#define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION
+#define PFX TCO_MODULE_NAME ": "
+
+/* internal variables */
+static void __iomem *tcobase;
+static unsigned int pm_iobase;
+static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */
+static unsigned long timer_alive;
+static char tco_expect_close;
+static struct pci_dev *sp5100_tco_pci;
+
+/* the watchdog platform device */
+static struct platform_device *sp5100_tco_platform_device;
+
+/* module parameters */
+
+#define WATCHDOG_HEARTBEAT 60 /* 60 sec default heartbeat. */
+static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (default="
+ __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
+ " (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+/*
+ * Some TCO specific functions
+ */
+static void tco_timer_start(void)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tco_lock, flags);
+ val = readl(SP5100_WDT_CONTROL(tcobase));
+ val |= SP5100_WDT_START_STOP_BIT;
+ writel(val, SP5100_WDT_CONTROL(tcobase));
+ spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static void tco_timer_stop(void)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tco_lock, flags);
+ val = readl(SP5100_WDT_CONTROL(tcobase));
+ val &= ~SP5100_WDT_START_STOP_BIT;
+ writel(val, SP5100_WDT_CONTROL(tcobase));
+ spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static void tco_timer_keepalive(void)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tco_lock, flags);
+ val = readl(SP5100_WDT_CONTROL(tcobase));
+ val |= SP5100_WDT_TRIGGER_BIT;
+ writel(val, SP5100_WDT_CONTROL(tcobase));
+ spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static int tco_timer_set_heartbeat(int t)
+{
+ unsigned long flags;
+
+ if (t < 0 || t > 0xffff)
+ return -EINVAL;
+
+ /* Write new heartbeat to watchdog */
+ spin_lock_irqsave(&tco_lock, flags);
+ writel(t, SP5100_WDT_COUNT(tcobase));
+ spin_unlock_irqrestore(&tco_lock, flags);
+
+ heartbeat = t;
+ return 0;
+}
+
+/*
+ * /dev/watchdog handling
+ */
+
+static int sp5100_tco_open(struct inode *inode, struct file *file)
+{
+ /* /dev/watchdog can only be opened once */
+ if (test_and_set_bit(0, &timer_alive))
+ return -EBUSY;
+
+ /* Reload and activate timer */
+ tco_timer_start();
+ tco_timer_keepalive();
+ return nonseekable_open(inode, file);
+}
+
+static int sp5100_tco_release(struct inode *inode, struct file *file)
+{
+ /* Shut off the timer. */
+ if (tco_expect_close == 42) {
+ tco_timer_stop();
+ } else {
+ printk(KERN_CRIT PFX
+ "Unexpected close, not stopping watchdog!\n");
+ tco_timer_keepalive();
+ }
+ clear_bit(0, &timer_alive);
+ tco_expect_close = 0;
+ return 0;
+}
+
+static ssize_t sp5100_tco_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ /* See if we got the magic character 'V' and reload the timer */
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ /* note: just in case someone wrote the magic character
+ * five months ago... */
+ tco_expect_close = 0;
+
+ /* scan to see whether or not we got the magic character
+ */
+ for (i = 0; i != len; i++) {
+ char c;
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ tco_expect_close = 42;
+ }
+ }
+
+ /* someone wrote to us, we should reload the timer */
+ tco_timer_keepalive();
+ }
+ return len;
+}
+
+static long sp5100_tco_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int new_options, retval = -EINVAL;
+ int new_heartbeat;
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ static const struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+ .firmware_version = 0,
+ .identity = TCO_MODULE_NAME,
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+ case WDIOC_SETOPTIONS:
+ if (get_user(new_options, p))
+ return -EFAULT;
+ if (new_options & WDIOS_DISABLECARD) {
+ tco_timer_stop();
+ retval = 0;
+ }
+ if (new_options & WDIOS_ENABLECARD) {
+ tco_timer_start();
+ tco_timer_keepalive();
+ retval = 0;
+ }
+ return retval;
+ case WDIOC_KEEPALIVE:
+ tco_timer_keepalive();
+ return 0;
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_heartbeat, p))
+ return -EFAULT;
+ if (tco_timer_set_heartbeat(new_heartbeat))
+ return -EINVAL;
+ tco_timer_keepalive();
+ /* Fall through */
+ case WDIOC_GETTIMEOUT:
+ return put_user(heartbeat, p);
+ default:
+ return -ENOTTY;
+ }
+}
+
+/*
+ * Kernel Interfaces
+ */
+
+static const struct file_operations sp5100_tco_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = sp5100_tco_write,
+ .unlocked_ioctl = sp5100_tco_ioctl,
+ .open = sp5100_tco_open,
+ .release = sp5100_tco_release,
+};
+
+static struct miscdevice sp5100_tco_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &sp5100_tco_fops,
+};
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE. We do not actually
+ * register a pci_driver, because someone else might
+ * want to register another driver on the same PCI id.
+ */
+static struct pci_device_id sp5100_tco_pci_tbl[] = {
+ { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_ANY_ID,
+ PCI_ANY_ID, },
+ { 0, }, /* End of list */
+};
+MODULE_DEVICE_TABLE(pci, sp5100_tco_pci_tbl);
+
+/*
+ * Init & exit routines
+ */
+
+static unsigned char __devinit sp5100_tco_setupdevice(void)
+{
+ struct pci_dev *dev = NULL;
+ u32 val;
+
+ /* Match the PCI device */
+ for_each_pci_dev(dev) {
+ if (pci_match_id(sp5100_tco_pci_tbl, dev) != NULL) {
+ sp5100_tco_pci = dev;
+ break;
+ }
+ }
+
+ if (!sp5100_tco_pci)
+ return 0;
+
+ /* Request the IO ports used by this driver */
+ pm_iobase = SP5100_IO_PM_INDEX_REG;
+ if (!request_region(pm_iobase, SP5100_PM_IOPORTS_SIZE, "SP5100 TCO")) {
+ printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
+ pm_iobase);
+ goto exit;
+ }
+
+ /* Find the watchdog base address. */
+ outb(SP5100_PM_WATCHDOG_BASE3, SP5100_IO_PM_INDEX_REG);
+ val = inb(SP5100_IO_PM_DATA_REG);
+ outb(SP5100_PM_WATCHDOG_BASE2, SP5100_IO_PM_INDEX_REG);
+ val = val << 8 | inb(SP5100_IO_PM_DATA_REG);
+ outb(SP5100_PM_WATCHDOG_BASE1, SP5100_IO_PM_INDEX_REG);
+ val = val << 8 | inb(SP5100_IO_PM_DATA_REG);
+ outb(SP5100_PM_WATCHDOG_BASE0, SP5100_IO_PM_INDEX_REG);
+ /* Low three bits of BASE0 are reserved. */
+ val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8);
+
+ tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE);
+ if (tcobase == 0) {
+ printk(KERN_ERR PFX "failed to get tcobase address\n");
+ goto unreg_region;
+ }
+
+ /* Enable watchdog decode bit */
+ pci_read_config_dword(sp5100_tco_pci,
+ SP5100_PCI_WATCHDOG_MISC_REG,
+ &val);
+
+ val |= SP5100_PCI_WATCHDOG_DECODE_EN;
+
+ pci_write_config_dword(sp5100_tco_pci,
+ SP5100_PCI_WATCHDOG_MISC_REG,
+ val);
+
+ /* Enable Watchdog timer and set the resolution to 1 sec. */
+ outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG);
+ val = inb(SP5100_IO_PM_DATA_REG);
+ val |= SP5100_PM_WATCHDOG_SECOND_RES;
+ val &= ~SP5100_PM_WATCHDOG_DISABLE;
+ outb(val, SP5100_IO_PM_DATA_REG);
+
+ /* Check that the watchdog action is set to reset the system. */
+ val = readl(SP5100_WDT_CONTROL(tcobase));
+ val &= ~SP5100_PM_WATCHDOG_ACTION_RESET;
+ writel(val, SP5100_WDT_CONTROL(tcobase));
+
+ /* Set a reasonable heartbeat before we stop the timer */
+ tco_timer_set_heartbeat(heartbeat);
+
+ /*
+ * Stop the TCO before we change anything so we don't race with
+ * a zeroed timer.
+ */
+ tco_timer_stop();
+
+ /* Done */
+ return 1;
+
+ iounmap(tcobase);
+unreg_region:
+ release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
+exit:
+ return 0;
+}
+
+static int __devinit sp5100_tco_init(struct platform_device *dev)
+{
+ int ret;
+ u32 val;
+
+ /* Check whether or not the hardware watchdog is there. If found, then
+ * set it up.
+ */
+ if (!sp5100_tco_setupdevice())
+ return -ENODEV;
+
+ /* Check to see if last reboot was due to watchdog timeout */
+ printk(KERN_INFO PFX "Watchdog reboot %sdetected.\n",
+ readl(SP5100_WDT_CONTROL(tcobase)) & SP5100_PM_WATCHDOG_FIRED ?
+ "" : "not ");
+
+ /* Clear out the old status */
+ val = readl(SP5100_WDT_CONTROL(tcobase));
+ val &= ~SP5100_PM_WATCHDOG_FIRED;
+ writel(val, SP5100_WDT_CONTROL(tcobase));
+
+ /*
+ * Check that the heartbeat value is within it's range.
+ * If not, reset to the default.
+ */
+ if (tco_timer_set_heartbeat(heartbeat)) {
+ heartbeat = WATCHDOG_HEARTBEAT;
+ tco_timer_set_heartbeat(heartbeat);
+ }
+
+ ret = misc_register(&sp5100_tco_miscdev);
+ if (ret != 0) {
+ printk(KERN_ERR PFX "cannot register miscdev on minor="
+ "%d (err=%d)\n",
+ WATCHDOG_MINOR, ret);
+ goto exit;
+ }
+
+ clear_bit(0, &timer_alive);
+
+ printk(KERN_INFO PFX "initialized (0x%p). heartbeat=%d sec"
+ " (nowayout=%d)\n",
+ tcobase, heartbeat, nowayout);
+
+ return 0;
+
+exit:
+ iounmap(tcobase);
+ release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
+ return ret;
+}
+
+static void __devexit sp5100_tco_cleanup(void)
+{
+ /* Stop the timer before we leave */
+ if (!nowayout)
+ tco_timer_stop();
+
+ /* Deregister */
+ misc_deregister(&sp5100_tco_miscdev);
+ iounmap(tcobase);
+ release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
+}
+
+static int __devexit sp5100_tco_remove(struct platform_device *dev)
+{
+ if (tcobase)
+ sp5100_tco_cleanup();
+ return 0;
+}
+
+static void sp5100_tco_shutdown(struct platform_device *dev)
+{
+ tco_timer_stop();
+}
+
+static struct platform_driver sp5100_tco_driver = {
+ .probe = sp5100_tco_init,
+ .remove = __devexit_p(sp5100_tco_remove),
+ .shutdown = sp5100_tco_shutdown,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = TCO_MODULE_NAME,
+ },
+};
+
+static int __init sp5100_tco_init_module(void)
+{
+ int err;
+
+ printk(KERN_INFO PFX "SP5100 TCO WatchDog Timer Driver v%s\n",
+ TCO_VERSION);
+
+ err = platform_driver_register(&sp5100_tco_driver);
+ if (err)
+ return err;
+
+ sp5100_tco_platform_device = platform_device_register_simple(
+ TCO_MODULE_NAME, -1, NULL, 0);
+ if (IS_ERR(sp5100_tco_platform_device)) {
+ err = PTR_ERR(sp5100_tco_platform_device);
+ goto unreg_platform_driver;
+ }
+
+ return 0;
+
+unreg_platform_driver:
+ platform_driver_unregister(&sp5100_tco_driver);
+ return err;
+}
+
+static void __exit sp5100_tco_cleanup_module(void)
+{
+ platform_device_unregister(sp5100_tco_platform_device);
+ platform_driver_unregister(&sp5100_tco_driver);
+ printk(KERN_INFO PFX "SP5100 TCO Watchdog Module Unloaded.\n");
+}
+
+module_init(sp5100_tco_init_module);
+module_exit(sp5100_tco_cleanup_module);
+
+MODULE_AUTHOR("Priyanka Gupta");
+MODULE_DESCRIPTION("TCO timer driver for SP5100 chipset");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
new file mode 100644
index 000000000000..a5a16cc90a34
--- /dev/null
+++ b/drivers/watchdog/sp5100_tco.h
@@ -0,0 +1,41 @@
+/*
+ * sp5100_tco: TCO timer driver for sp5100 chipsets.
+ *
+ * (c) Copyright 2009 Google Inc., All Rights Reserved.
+ *
+ * TCO timer driver for sp5100 chipsets
+ */
+
+/*
+ * Some address definitions for the Watchdog
+ */
+
+#define SP5100_WDT_MEM_MAP_SIZE 0x08
+#define SP5100_WDT_CONTROL(base) ((base) + 0x00) /* Watchdog Control */
+#define SP5100_WDT_COUNT(base) ((base) + 0x04) /* Watchdog Count */
+
+#define SP5100_WDT_START_STOP_BIT 1
+#define SP5100_WDT_TRIGGER_BIT (1 << 7)
+
+#define SP5100_PCI_WATCHDOG_MISC_REG 0x41
+#define SP5100_PCI_WATCHDOG_DECODE_EN (1 << 3)
+
+#define SP5100_PM_IOPORTS_SIZE 0x02
+
+/* These two IO registers are hardcoded and there doesn't seem to be a way to
+ * read them from a register.
+ */
+#define SP5100_IO_PM_INDEX_REG 0xCD6
+#define SP5100_IO_PM_DATA_REG 0xCD7
+
+#define SP5100_PM_WATCHDOG_CONTROL 0x69
+#define SP5100_PM_WATCHDOG_BASE0 0x6C
+#define SP5100_PM_WATCHDOG_BASE1 0x6D
+#define SP5100_PM_WATCHDOG_BASE2 0x6E
+#define SP5100_PM_WATCHDOG_BASE3 0x6F
+
+#define SP5100_PM_WATCHDOG_FIRED (1 << 1)
+#define SP5100_PM_WATCHDOG_ACTION_RESET (1 << 2)
+
+#define SP5100_PM_WATCHDOG_DISABLE 1
+#define SP5100_PM_WATCHDOG_SECOND_RES (3 << 1)
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 0f5288df0091..e5c91d4404ed 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -42,7 +42,7 @@
#include <asm/system.h>
-#define WATCHDOG_NAME "w83627hf/thf/hg WDT"
+#define WATCHDOG_NAME "w83627hf/thf/hg/dhg WDT"
#define PFX WATCHDOG_NAME ": "
#define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */
@@ -89,7 +89,7 @@ static void w83627hf_select_wd_register(void)
c = ((inb_p(WDT_EFDR) & 0xf7) | 0x04); /* select WDT0 */
outb_p(0x2b, WDT_EFER);
outb_p(c, WDT_EFDR); /* set GPIO3 to WDT0 */
- } else if (c == 0x88) { /* W83627EHF */
+ } else if (c == 0x88 || c == 0xa0) { /* W83627EHF / W83627DHG */
outb_p(0x2d, WDT_EFER); /* select GPIO5 */
c = inb_p(WDT_EFDR) & ~0x01; /* PIN77 -> WDT0# */
outb_p(0x2d, WDT_EFER);
@@ -129,6 +129,8 @@ static void w83627hf_init(void)
t = inb_p(WDT_EFDR); /* read CRF5 */
t &= ~0x0C; /* set second mode & disable keyboard
turning off watchdog */
+ t |= 0x02; /* enable the WDTO# output low pulse
+ to the KBRST# pin (PIN60) */
outb_p(t, WDT_EFDR); /* Write back to CRF5 */
outb_p(0xF7, WDT_EFER); /* Select CRF7 */
@@ -321,7 +323,7 @@ static int __init wdt_init(void)
{
int ret;
- printk(KERN_INFO "WDT driver for the Winbond(TM) W83627HF/THF/HG Super I/O chip initialising.\n");
+ printk(KERN_INFO "WDT driver for the Winbond(TM) W83627HF/THF/HG/DHG Super I/O chip initialising.\n");
if (wdt_set_heartbeat(timeout)) {
wdt_set_heartbeat(WATCHDOG_TIMEOUT);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 6e6180ccd726..07bec09d1dad 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -29,6 +29,14 @@ config XEN_DEV_EVTCHN
firing.
If in doubt, say yes.
+config XEN_BACKEND
+ bool "Backend driver support"
+ depends on XEN_DOM0
+ default y
+ help
+ Support for backend device drivers that provide I/O services
+ to other virtual machines.
+
config XENFS
tristate "Xen filesystem"
default y
@@ -62,9 +70,19 @@ config XEN_SYS_HYPERVISOR
virtual environment, /sys/hypervisor will still be present,
but will have no xen contents.
+config XEN_XENBUS_FRONTEND
+ tristate
+
+config XEN_GNTDEV
+ tristate "userspace grant access device driver"
+ depends on XEN
+ select MMU_NOTIFIER
+ help
+ Allows userspace processes to use grants.
+
config XEN_PLATFORM_PCI
tristate "xen platform pci device driver"
- depends on XEN_PVHVM
+ depends on XEN_PVHVM && PCI
default m
help
Driver for the Xen PCI Platform device: it is responsible for
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 533a199e7a3f..5088cc2e6fe2 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -9,11 +9,14 @@ obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
obj-$(CONFIG_XEN_BALLOON) += balloon.o
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
+obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
obj-$(CONFIG_XENFS) += xenfs/
obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
-obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o
+obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_DOM0) += pci.o
xen-evtchn-y := evtchn.o
+xen-gntdev-y := gntdev.o
+xen-platform-pci-y := platform-pci.o
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 65f8637d13cf..74681478100a 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -170,6 +170,9 @@ static struct irq_info *info_for_irq(unsigned irq)
static unsigned int evtchn_from_irq(unsigned irq)
{
+ if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
+ return 0;
+
return info_for_irq(irq)->evtchn;
}
@@ -405,15 +408,21 @@ static int find_unbound_irq(void)
{
struct irq_data *data;
int irq, res;
- int start = get_nr_hw_irqs();
+ int bottom = get_nr_hw_irqs();
+ int top = nr_irqs-1;
- if (start == nr_irqs)
+ if (bottom == nr_irqs)
goto no_irqs;
- /* nr_irqs is a magic value. Must not use it.*/
- for (irq = nr_irqs-1; irq > start; irq--) {
+ /* This loop starts from the top of IRQ space and goes down.
+ * We need this b/c if we have a PCI device in a Xen PV guest
+ * we do not have an IO-APIC (though the backend might have them)
+ * mapped in. To not have a collision of physical IRQs with the Xen
+ * event channels start at the top of the IRQ space for virtual IRQs.
+ */
+ for (irq = top; irq > bottom; irq--) {
data = irq_get_irq_data(irq);
- /* only 0->15 have init'd desc; handle irq > 16 */
+ /* only 15->0 have init'd desc; handle irq > 16 */
if (!data)
break;
if (data->chip == &no_irq_chip)
@@ -424,7 +433,7 @@ static int find_unbound_irq(void)
return irq;
}
- if (irq == start)
+ if (irq == bottom)
goto no_irqs;
res = irq_alloc_desc_at(irq, -1);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
new file mode 100644
index 000000000000..1e31cdcdae1e
--- /dev/null
+++ b/drivers/xen/gntdev.c
@@ -0,0 +1,665 @@
+/******************************************************************************
+ * gntdev.c
+ *
+ * Device for accessing (in user-space) pages that have been granted by other
+ * domains.
+ *
+ * Copyright (c) 2006-2007, D G Murray.
+ * (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/mmu_notifier.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+#include <xen/xen.h>
+#include <xen/grant_table.h>
+#include <xen/gntdev.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/page.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
+ "Gerd Hoffmann <kraxel@redhat.com>");
+MODULE_DESCRIPTION("User-space granted page access driver");
+
+static int limit = 1024;
+module_param(limit, int, 0644);
+MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped at "
+ "once by a gntdev instance");
+
+struct gntdev_priv {
+ struct list_head maps;
+ uint32_t used;
+ uint32_t limit;
+ /* lock protects maps from concurrent changes */
+ spinlock_t lock;
+ struct mm_struct *mm;
+ struct mmu_notifier mn;
+};
+
+struct grant_map {
+ struct list_head next;
+ struct gntdev_priv *priv;
+ struct vm_area_struct *vma;
+ int index;
+ int count;
+ int flags;
+ int is_mapped;
+ struct ioctl_gntdev_grant_ref *grants;
+ struct gnttab_map_grant_ref *map_ops;
+ struct gnttab_unmap_grant_ref *unmap_ops;
+ struct page **pages;
+};
+
+/* ------------------------------------------------------------------ */
+
+static void gntdev_print_maps(struct gntdev_priv *priv,
+ char *text, int text_index)
+{
+#ifdef DEBUG
+ struct grant_map *map;
+
+ pr_debug("maps list (priv %p, usage %d/%d)\n",
+ priv, priv->used, priv->limit);
+
+ list_for_each_entry(map, &priv->maps, next)
+ pr_debug(" index %2d, count %2d %s\n",
+ map->index, map->count,
+ map->index == text_index && text ? text : "");
+#endif
+}
+
+static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
+{
+ struct grant_map *add;
+ int i;
+
+ add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
+ if (NULL == add)
+ return NULL;
+
+ add->grants = kzalloc(sizeof(add->grants[0]) * count, GFP_KERNEL);
+ add->map_ops = kzalloc(sizeof(add->map_ops[0]) * count, GFP_KERNEL);
+ add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL);
+ add->pages = kzalloc(sizeof(add->pages[0]) * count, GFP_KERNEL);
+ if (NULL == add->grants ||
+ NULL == add->map_ops ||
+ NULL == add->unmap_ops ||
+ NULL == add->pages)
+ goto err;
+
+ for (i = 0; i < count; i++) {
+ add->pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+ if (add->pages[i] == NULL)
+ goto err;
+ }
+
+ add->index = 0;
+ add->count = count;
+ add->priv = priv;
+
+ if (add->count + priv->used > priv->limit)
+ goto err;
+
+ return add;
+
+err:
+ if (add->pages)
+ for (i = 0; i < count; i++) {
+ if (add->pages[i])
+ __free_page(add->pages[i]);
+ }
+ kfree(add->pages);
+ kfree(add->grants);
+ kfree(add->map_ops);
+ kfree(add->unmap_ops);
+ kfree(add);
+ return NULL;
+}
+
+static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
+{
+ struct grant_map *map;
+
+ list_for_each_entry(map, &priv->maps, next) {
+ if (add->index + add->count < map->index) {
+ list_add_tail(&add->next, &map->next);
+ goto done;
+ }
+ add->index = map->index + map->count;
+ }
+ list_add_tail(&add->next, &priv->maps);
+
+done:
+ priv->used += add->count;
+ gntdev_print_maps(priv, "[new]", add->index);
+}
+
+static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
+ int index, int count)
+{
+ struct grant_map *map;
+
+ list_for_each_entry(map, &priv->maps, next) {
+ if (map->index != index)
+ continue;
+ if (map->count != count)
+ continue;
+ return map;
+ }
+ return NULL;
+}
+
+static struct grant_map *gntdev_find_map_vaddr(struct gntdev_priv *priv,
+ unsigned long vaddr)
+{
+ struct grant_map *map;
+
+ list_for_each_entry(map, &priv->maps, next) {
+ if (!map->vma)
+ continue;
+ if (vaddr < map->vma->vm_start)
+ continue;
+ if (vaddr >= map->vma->vm_end)
+ continue;
+ return map;
+ }
+ return NULL;
+}
+
+static int gntdev_del_map(struct grant_map *map)
+{
+ int i;
+
+ if (map->vma)
+ return -EBUSY;
+ for (i = 0; i < map->count; i++)
+ if (map->unmap_ops[i].handle)
+ return -EBUSY;
+
+ map->priv->used -= map->count;
+ list_del(&map->next);
+ return 0;
+}
+
+static void gntdev_free_map(struct grant_map *map)
+{
+ int i;
+
+ if (!map)
+ return;
+
+ if (map->pages)
+ for (i = 0; i < map->count; i++) {
+ if (map->pages[i])
+ __free_page(map->pages[i]);
+ }
+ kfree(map->pages);
+ kfree(map->grants);
+ kfree(map->map_ops);
+ kfree(map->unmap_ops);
+ kfree(map);
+}
+
+/* ------------------------------------------------------------------ */
+
+static int find_grant_ptes(pte_t *pte, pgtable_t token,
+ unsigned long addr, void *data)
+{
+ struct grant_map *map = data;
+ unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
+ u64 pte_maddr;
+
+ BUG_ON(pgnr >= map->count);
+ pte_maddr = arbitrary_virt_to_machine(pte).maddr;
+
+ gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr,
+ GNTMAP_contains_pte | map->flags,
+ map->grants[pgnr].ref,
+ map->grants[pgnr].domid);
+ gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr,
+ GNTMAP_contains_pte | map->flags,
+ 0 /* handle */);
+ return 0;
+}
+
+static int map_grant_pages(struct grant_map *map)
+{
+ int i, err = 0;
+
+ pr_debug("map %d+%d\n", map->index, map->count);
+ err = gnttab_map_refs(map->map_ops, map->pages, map->count);
+ if (err)
+ return err;
+
+ for (i = 0; i < map->count; i++) {
+ if (map->map_ops[i].status)
+ err = -EINVAL;
+ map->unmap_ops[i].handle = map->map_ops[i].handle;
+ }
+ return err;
+}
+
+static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
+{
+ int i, err = 0;
+
+ pr_debug("map %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
+ err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages, pages);
+ if (err)
+ return err;
+
+ for (i = 0; i < pages; i++) {
+ if (map->unmap_ops[offset+i].status)
+ err = -EINVAL;
+ map->unmap_ops[offset+i].handle = 0;
+ }
+ return err;
+}
+
+/* ------------------------------------------------------------------ */
+
+static void gntdev_vma_close(struct vm_area_struct *vma)
+{
+ struct grant_map *map = vma->vm_private_data;
+
+ pr_debug("close %p\n", vma);
+ map->is_mapped = 0;
+ map->vma = NULL;
+ vma->vm_private_data = NULL;
+}
+
+static int gntdev_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ pr_debug("vaddr %p, pgoff %ld (shouldn't happen)\n",
+ vmf->virtual_address, vmf->pgoff);
+ vmf->flags = VM_FAULT_ERROR;
+ return 0;
+}
+
+static struct vm_operations_struct gntdev_vmops = {
+ .close = gntdev_vma_close,
+ .fault = gntdev_vma_fault,
+};
+
+/* ------------------------------------------------------------------ */
+
+static void mn_invl_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
+ struct grant_map *map;
+ unsigned long mstart, mend;
+ int err;
+
+ spin_lock(&priv->lock);
+ list_for_each_entry(map, &priv->maps, next) {
+ if (!map->vma)
+ continue;
+ if (!map->is_mapped)
+ continue;
+ if (map->vma->vm_start >= end)
+ continue;
+ if (map->vma->vm_end <= start)
+ continue;
+ mstart = max(start, map->vma->vm_start);
+ mend = min(end, map->vma->vm_end);
+ pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
+ map->index, map->count,
+ map->vma->vm_start, map->vma->vm_end,
+ start, end, mstart, mend);
+ err = unmap_grant_pages(map,
+ (mstart - map->vma->vm_start) >> PAGE_SHIFT,
+ (mend - mstart) >> PAGE_SHIFT);
+ WARN_ON(err);
+ }
+ spin_unlock(&priv->lock);
+}
+
+static void mn_invl_page(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
+}
+
+static void mn_release(struct mmu_notifier *mn,
+ struct mm_struct *mm)
+{
+ struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
+ struct grant_map *map;
+ int err;
+
+ spin_lock(&priv->lock);
+ list_for_each_entry(map, &priv->maps, next) {
+ if (!map->vma)
+ continue;
+ pr_debug("map %d+%d (%lx %lx)\n",
+ map->index, map->count,
+ map->vma->vm_start, map->vma->vm_end);
+ err = unmap_grant_pages(map, /* offset */ 0, map->count);
+ WARN_ON(err);
+ }
+ spin_unlock(&priv->lock);
+}
+
+struct mmu_notifier_ops gntdev_mmu_ops = {
+ .release = mn_release,
+ .invalidate_page = mn_invl_page,
+ .invalidate_range_start = mn_invl_range_start,
+};
+
+/* ------------------------------------------------------------------ */
+
+static int gntdev_open(struct inode *inode, struct file *flip)
+{
+ struct gntdev_priv *priv;
+ int ret = 0;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&priv->maps);
+ spin_lock_init(&priv->lock);
+ priv->limit = limit;
+
+ priv->mm = get_task_mm(current);
+ if (!priv->mm) {
+ kfree(priv);
+ return -ENOMEM;
+ }
+ priv->mn.ops = &gntdev_mmu_ops;
+ ret = mmu_notifier_register(&priv->mn, priv->mm);
+ mmput(priv->mm);
+
+ if (ret) {
+ kfree(priv);
+ return ret;
+ }
+
+ flip->private_data = priv;
+ pr_debug("priv %p\n", priv);
+
+ return 0;
+}
+
+static int gntdev_release(struct inode *inode, struct file *flip)
+{
+ struct gntdev_priv *priv = flip->private_data;
+ struct grant_map *map;
+ int err;
+
+ pr_debug("priv %p\n", priv);
+
+ spin_lock(&priv->lock);
+ while (!list_empty(&priv->maps)) {
+ map = list_entry(priv->maps.next, struct grant_map, next);
+ err = gntdev_del_map(map);
+ if (WARN_ON(err))
+ gntdev_free_map(map);
+
+ }
+ spin_unlock(&priv->lock);
+
+ mmu_notifier_unregister(&priv->mn, priv->mm);
+ kfree(priv);
+ return 0;
+}
+
+static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
+ struct ioctl_gntdev_map_grant_ref __user *u)
+{
+ struct ioctl_gntdev_map_grant_ref op;
+ struct grant_map *map;
+ int err;
+
+ if (copy_from_user(&op, u, sizeof(op)) != 0)
+ return -EFAULT;
+ pr_debug("priv %p, add %d\n", priv, op.count);
+ if (unlikely(op.count <= 0))
+ return -EINVAL;
+ if (unlikely(op.count > priv->limit))
+ return -EINVAL;
+
+ err = -ENOMEM;
+ map = gntdev_alloc_map(priv, op.count);
+ if (!map)
+ return err;
+ if (copy_from_user(map->grants, &u->refs,
+ sizeof(map->grants[0]) * op.count) != 0) {
+ gntdev_free_map(map);
+ return err;
+ }
+
+ spin_lock(&priv->lock);
+ gntdev_add_map(priv, map);
+ op.index = map->index << PAGE_SHIFT;
+ spin_unlock(&priv->lock);
+
+ if (copy_to_user(u, &op, sizeof(op)) != 0) {
+ spin_lock(&priv->lock);
+ gntdev_del_map(map);
+ spin_unlock(&priv->lock);
+ gntdev_free_map(map);
+ return err;
+ }
+ return 0;
+}
+
+static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
+ struct ioctl_gntdev_unmap_grant_ref __user *u)
+{
+ struct ioctl_gntdev_unmap_grant_ref op;
+ struct grant_map *map;
+ int err = -ENOENT;
+
+ if (copy_from_user(&op, u, sizeof(op)) != 0)
+ return -EFAULT;
+ pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
+
+ spin_lock(&priv->lock);
+ map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
+ if (map)
+ err = gntdev_del_map(map);
+ spin_unlock(&priv->lock);
+ if (!err)
+ gntdev_free_map(map);
+ return err;
+}
+
+static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
+ struct ioctl_gntdev_get_offset_for_vaddr __user *u)
+{
+ struct ioctl_gntdev_get_offset_for_vaddr op;
+ struct grant_map *map;
+
+ if (copy_from_user(&op, u, sizeof(op)) != 0)
+ return -EFAULT;
+ pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
+
+ spin_lock(&priv->lock);
+ map = gntdev_find_map_vaddr(priv, op.vaddr);
+ if (map == NULL ||
+ map->vma->vm_start != op.vaddr) {
+ spin_unlock(&priv->lock);
+ return -EINVAL;
+ }
+ op.offset = map->index << PAGE_SHIFT;
+ op.count = map->count;
+ spin_unlock(&priv->lock);
+
+ if (copy_to_user(u, &op, sizeof(op)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static long gntdev_ioctl_set_max_grants(struct gntdev_priv *priv,
+ struct ioctl_gntdev_set_max_grants __user *u)
+{
+ struct ioctl_gntdev_set_max_grants op;
+
+ if (copy_from_user(&op, u, sizeof(op)) != 0)
+ return -EFAULT;
+ pr_debug("priv %p, limit %d\n", priv, op.count);
+ if (op.count > limit)
+ return -E2BIG;
+
+ spin_lock(&priv->lock);
+ priv->limit = op.count;
+ spin_unlock(&priv->lock);
+ return 0;
+}
+
+static long gntdev_ioctl(struct file *flip,
+ unsigned int cmd, unsigned long arg)
+{
+ struct gntdev_priv *priv = flip->private_data;
+ void __user *ptr = (void __user *)arg;
+
+ switch (cmd) {
+ case IOCTL_GNTDEV_MAP_GRANT_REF:
+ return gntdev_ioctl_map_grant_ref(priv, ptr);
+
+ case IOCTL_GNTDEV_UNMAP_GRANT_REF:
+ return gntdev_ioctl_unmap_grant_ref(priv, ptr);
+
+ case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
+ return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
+
+ case IOCTL_GNTDEV_SET_MAX_GRANTS:
+ return gntdev_ioctl_set_max_grants(priv, ptr);
+
+ default:
+ pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
+ return -ENOIOCTLCMD;
+ }
+
+ return 0;
+}
+
+static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+{
+ struct gntdev_priv *priv = flip->private_data;
+ int index = vma->vm_pgoff;
+ int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ struct grant_map *map;
+ int err = -EINVAL;
+
+ if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ pr_debug("map %d+%d at %lx (pgoff %lx)\n",
+ index, count, vma->vm_start, vma->vm_pgoff);
+
+ spin_lock(&priv->lock);
+ map = gntdev_find_map_index(priv, index, count);
+ if (!map)
+ goto unlock_out;
+ if (map->vma)
+ goto unlock_out;
+ if (priv->mm != vma->vm_mm) {
+ printk(KERN_WARNING "Huh? Other mm?\n");
+ goto unlock_out;
+ }
+
+ vma->vm_ops = &gntdev_vmops;
+
+ vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP;
+
+ vma->vm_private_data = map;
+ map->vma = vma;
+
+ map->flags = GNTMAP_host_map | GNTMAP_application_map;
+ if (!(vma->vm_flags & VM_WRITE))
+ map->flags |= GNTMAP_readonly;
+
+ spin_unlock(&priv->lock);
+
+ err = apply_to_page_range(vma->vm_mm, vma->vm_start,
+ vma->vm_end - vma->vm_start,
+ find_grant_ptes, map);
+ if (err) {
+ printk(KERN_WARNING "find_grant_ptes() failure.\n");
+ return err;
+ }
+
+ err = map_grant_pages(map);
+ if (err) {
+ printk(KERN_WARNING "map_grant_pages() failure.\n");
+ return err;
+ }
+
+ map->is_mapped = 1;
+
+ return 0;
+
+unlock_out:
+ spin_unlock(&priv->lock);
+ return err;
+}
+
+static const struct file_operations gntdev_fops = {
+ .owner = THIS_MODULE,
+ .open = gntdev_open,
+ .release = gntdev_release,
+ .mmap = gntdev_mmap,
+ .unlocked_ioctl = gntdev_ioctl
+};
+
+static struct miscdevice gntdev_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "xen/gntdev",
+ .fops = &gntdev_fops,
+};
+
+/* ------------------------------------------------------------------ */
+
+static int __init gntdev_init(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ err = misc_register(&gntdev_miscdev);
+ if (err != 0) {
+ printk(KERN_ERR "Could not register gntdev device\n");
+ return err;
+ }
+ return 0;
+}
+
+static void __exit gntdev_exit(void)
+{
+ misc_deregister(&gntdev_miscdev);
+}
+
+module_init(gntdev_init);
+module_exit(gntdev_exit);
+
+/* ------------------------------------------------------------------ */
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 6c4531816496..9ef54ebc1194 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -447,6 +447,52 @@ unsigned int gnttab_max_grant_frames(void)
}
EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
+int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+ struct page **pages, unsigned int count)
+{
+ int i, ret;
+ pte_t *pte;
+ unsigned long mfn;
+
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ /* m2p override only supported for GNTMAP_contains_pte mappings */
+ if (!(map_ops[i].flags & GNTMAP_contains_pte))
+ continue;
+ pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
+ (map_ops[i].host_addr & ~PAGE_MASK));
+ mfn = pte_mfn(*pte);
+ ret = m2p_add_override(mfn, pages[i]);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gnttab_map_refs);
+
+int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
+ struct page **pages, unsigned int count)
+{
+ int i, ret;
+
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ ret = m2p_remove_override(pages[i]);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
+
static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
struct gnttab_setup_table setup;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index db8c4c4ac880..24177272bcb8 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -37,11 +37,19 @@ static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
#ifdef CONFIG_PM_SLEEP
static int xen_hvm_suspend(void *data)
{
+ int err;
struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
int *cancelled = data;
BUG_ON(!irqs_disabled());
+ err = sysdev_suspend(PMSG_SUSPEND);
+ if (err) {
+ printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n",
+ err);
+ return err;
+ }
+
*cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
xen_hvm_post_suspend(*cancelled);
@@ -53,6 +61,8 @@ static int xen_hvm_suspend(void *data)
xen_timer_resume();
}
+ sysdev_resume();
+
return 0;
}
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index c01b5ddce529..afbe041f42c5 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -105,7 +105,7 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int i, ret;
- long ioaddr, iolen;
+ long ioaddr;
long mmio_addr, mmio_len;
unsigned int max_nr_gframes;
@@ -114,7 +114,6 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
return i;
ioaddr = pci_resource_start(pdev, 0);
- iolen = pci_resource_len(pdev, 0);
mmio_addr = pci_resource_start(pdev, 1);
mmio_len = pci_resource_len(pdev, 1);
@@ -125,19 +124,13 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
goto pci_out;
}
- if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) {
- dev_err(&pdev->dev, "MEM I/O resource 0x%lx @ 0x%lx busy\n",
- mmio_addr, mmio_len);
- ret = -EBUSY;
+ ret = pci_request_region(pdev, 1, DRV_NAME);
+ if (ret < 0)
goto pci_out;
- }
- if (request_region(ioaddr, iolen, DRV_NAME) == NULL) {
- dev_err(&pdev->dev, "I/O resource 0x%lx @ 0x%lx busy\n",
- iolen, ioaddr);
- ret = -EBUSY;
+ ret = pci_request_region(pdev, 0, DRV_NAME);
+ if (ret < 0)
goto mem_out;
- }
platform_mmio = mmio_addr;
platform_mmiolen = mmio_len;
@@ -169,9 +162,9 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
return 0;
out:
- release_region(ioaddr, iolen);
+ pci_release_region(pdev, 0);
mem_out:
- release_mem_region(mmio_addr, mmio_len);
+ pci_release_region(pdev, 1);
pci_out:
pci_disable_device(pdev);
return ret;
diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile
index 5571f5b84223..8dca685358b4 100644
--- a/drivers/xen/xenbus/Makefile
+++ b/drivers/xen/xenbus/Makefile
@@ -5,3 +5,8 @@ xenbus-objs += xenbus_client.o
xenbus-objs += xenbus_comms.o
xenbus-objs += xenbus_xs.o
xenbus-objs += xenbus_probe.o
+
+xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
+xenbus-objs += $(xenbus-be-objs-y)
+
+obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index deb9c4ba3a93..baa65e7fbbc7 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -56,7 +56,6 @@
#include <xen/events.h>
#include <xen/page.h>
-#include <xen/platform_pci.h>
#include <xen/hvm.h>
#include "xenbus_comms.h"
@@ -73,15 +72,6 @@ static unsigned long xen_store_mfn;
static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
-static void wait_for_devices(struct xenbus_driver *xendrv);
-
-static int xenbus_probe_frontend(const char *type, const char *name);
-
-static void xenbus_dev_shutdown(struct device *_dev);
-
-static int xenbus_dev_suspend(struct device *dev, pm_message_t state);
-static int xenbus_dev_resume(struct device *dev);
-
/* If something in array of ids matches this device, return it. */
static const struct xenbus_device_id *
match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
@@ -102,34 +92,7 @@ int xenbus_match(struct device *_dev, struct device_driver *_drv)
return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
}
-
-static int xenbus_uevent(struct device *_dev, struct kobj_uevent_env *env)
-{
- struct xenbus_device *dev = to_xenbus_device(_dev);
-
- if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
- return -ENOMEM;
-
- return 0;
-}
-
-/* device/<type>/<id> => <type>-<id> */
-static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
-{
- nodename = strchr(nodename, '/');
- if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) {
- printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
- return -EINVAL;
- }
-
- strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
- if (!strchr(bus_id, '/')) {
- printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
- return -EINVAL;
- }
- *strchr(bus_id, '/') = '-';
- return 0;
-}
+EXPORT_SYMBOL_GPL(xenbus_match);
static void free_otherend_details(struct xenbus_device *dev)
@@ -149,7 +112,30 @@ static void free_otherend_watch(struct xenbus_device *dev)
}
-int read_otherend_details(struct xenbus_device *xendev,
+static int talk_to_otherend(struct xenbus_device *dev)
+{
+ struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
+
+ free_otherend_watch(dev);
+ free_otherend_details(dev);
+
+ return drv->read_otherend_details(dev);
+}
+
+
+
+static int watch_otherend(struct xenbus_device *dev)
+{
+ struct xen_bus_type *bus =
+ container_of(dev->dev.bus, struct xen_bus_type, bus);
+
+ return xenbus_watch_pathfmt(dev, &dev->otherend_watch,
+ bus->otherend_changed,
+ "%s/%s", dev->otherend, "state");
+}
+
+
+int xenbus_read_otherend_details(struct xenbus_device *xendev,
char *id_node, char *path_node)
{
int err = xenbus_gather(XBT_NIL, xendev->nodename,
@@ -174,39 +160,11 @@ int read_otherend_details(struct xenbus_device *xendev,
return 0;
}
+EXPORT_SYMBOL_GPL(xenbus_read_otherend_details);
-
-static int read_backend_details(struct xenbus_device *xendev)
-{
- return read_otherend_details(xendev, "backend-id", "backend");
-}
-
-static struct device_attribute xenbus_dev_attrs[] = {
- __ATTR_NULL
-};
-
-/* Bus type for frontend drivers. */
-static struct xen_bus_type xenbus_frontend = {
- .root = "device",
- .levels = 2, /* device/type/<id> */
- .get_bus_id = frontend_bus_id,
- .probe = xenbus_probe_frontend,
- .bus = {
- .name = "xen",
- .match = xenbus_match,
- .uevent = xenbus_uevent,
- .probe = xenbus_dev_probe,
- .remove = xenbus_dev_remove,
- .shutdown = xenbus_dev_shutdown,
- .dev_attrs = xenbus_dev_attrs,
-
- .suspend = xenbus_dev_suspend,
- .resume = xenbus_dev_resume,
- },
-};
-
-static void otherend_changed(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
+void xenbus_otherend_changed(struct xenbus_watch *watch,
+ const char **vec, unsigned int len,
+ int ignore_on_shutdown)
{
struct xenbus_device *dev =
container_of(watch, struct xenbus_device, otherend_watch);
@@ -234,11 +192,7 @@ static void otherend_changed(struct xenbus_watch *watch,
* work that can fail e.g., when the rootfs is gone.
*/
if (system_state > SYSTEM_RUNNING) {
- struct xen_bus_type *bus = bus;
- bus = container_of(dev->dev.bus, struct xen_bus_type, bus);
- /* If we're frontend, drive the state machine to Closed. */
- /* This should cause the backend to release our resources. */
- if ((bus == &xenbus_frontend) && (state == XenbusStateClosing))
+ if (ignore_on_shutdown && (state == XenbusStateClosing))
xenbus_frontend_closed(dev);
return;
}
@@ -246,25 +200,7 @@ static void otherend_changed(struct xenbus_watch *watch,
if (drv->otherend_changed)
drv->otherend_changed(dev, state);
}
-
-
-static int talk_to_otherend(struct xenbus_device *dev)
-{
- struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
-
- free_otherend_watch(dev);
- free_otherend_details(dev);
-
- return drv->read_otherend_details(dev);
-}
-
-
-static int watch_otherend(struct xenbus_device *dev)
-{
- return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed,
- "%s/%s", dev->otherend, "state");
-}
-
+EXPORT_SYMBOL_GPL(xenbus_otherend_changed);
int xenbus_dev_probe(struct device *_dev)
{
@@ -308,8 +244,9 @@ int xenbus_dev_probe(struct device *_dev)
fail:
xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
xenbus_switch_state(dev, XenbusStateClosed);
- return -ENODEV;
+ return err;
}
+EXPORT_SYMBOL_GPL(xenbus_dev_probe);
int xenbus_dev_remove(struct device *_dev)
{
@@ -327,8 +264,9 @@ int xenbus_dev_remove(struct device *_dev)
xenbus_switch_state(dev, XenbusStateClosed);
return 0;
}
+EXPORT_SYMBOL_GPL(xenbus_dev_remove);
-static void xenbus_dev_shutdown(struct device *_dev)
+void xenbus_dev_shutdown(struct device *_dev)
{
struct xenbus_device *dev = to_xenbus_device(_dev);
unsigned long timeout = 5*HZ;
@@ -349,6 +287,7 @@ static void xenbus_dev_shutdown(struct device *_dev)
out:
put_device(&dev->dev);
}
+EXPORT_SYMBOL_GPL(xenbus_dev_shutdown);
int xenbus_register_driver_common(struct xenbus_driver *drv,
struct xen_bus_type *bus,
@@ -362,25 +301,7 @@ int xenbus_register_driver_common(struct xenbus_driver *drv,
return driver_register(&drv->driver);
}
-
-int __xenbus_register_frontend(struct xenbus_driver *drv,
- struct module *owner, const char *mod_name)
-{
- int ret;
-
- drv->read_otherend_details = read_backend_details;
-
- ret = xenbus_register_driver_common(drv, &xenbus_frontend,
- owner, mod_name);
- if (ret)
- return ret;
-
- /* If this driver is loaded as a module wait for devices to attach. */
- wait_for_devices(drv);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+EXPORT_SYMBOL_GPL(xenbus_register_driver_common);
void xenbus_unregister_driver(struct xenbus_driver *drv)
{
@@ -551,24 +472,7 @@ fail:
kfree(xendev);
return err;
}
-
-/* device/<typename>/<name> */
-static int xenbus_probe_frontend(const char *type, const char *name)
-{
- char *nodename;
- int err;
-
- nodename = kasprintf(GFP_KERNEL, "%s/%s/%s",
- xenbus_frontend.root, type, name);
- if (!nodename)
- return -ENOMEM;
-
- DPRINTK("%s", nodename);
-
- err = xenbus_probe_node(&xenbus_frontend, type, nodename);
- kfree(nodename);
- return err;
-}
+EXPORT_SYMBOL_GPL(xenbus_probe_node);
static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
{
@@ -582,10 +486,11 @@ static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
return PTR_ERR(dir);
for (i = 0; i < dir_n; i++) {
- err = bus->probe(type, dir[i]);
+ err = bus->probe(bus, type, dir[i]);
if (err)
break;
}
+
kfree(dir);
return err;
}
@@ -605,9 +510,11 @@ int xenbus_probe_devices(struct xen_bus_type *bus)
if (err)
break;
}
+
kfree(dir);
return err;
}
+EXPORT_SYMBOL_GPL(xenbus_probe_devices);
static unsigned int char_count(const char *str, char c)
{
@@ -670,32 +577,18 @@ void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
}
EXPORT_SYMBOL_GPL(xenbus_dev_changed);
-static void frontend_changed(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
-{
- DPRINTK("");
-
- xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
-}
-
-/* We watch for devices appearing and vanishing. */
-static struct xenbus_watch fe_watch = {
- .node = "device",
- .callback = frontend_changed,
-};
-
-static int xenbus_dev_suspend(struct device *dev, pm_message_t state)
+int xenbus_dev_suspend(struct device *dev, pm_message_t state)
{
int err = 0;
struct xenbus_driver *drv;
- struct xenbus_device *xdev;
+ struct xenbus_device *xdev
+ = container_of(dev, struct xenbus_device, dev);
- DPRINTK("");
+ DPRINTK("%s", xdev->nodename);
if (dev->driver == NULL)
return 0;
drv = to_xenbus_driver(dev->driver);
- xdev = container_of(dev, struct xenbus_device, dev);
if (drv->suspend)
err = drv->suspend(xdev, state);
if (err)
@@ -703,21 +596,20 @@ static int xenbus_dev_suspend(struct device *dev, pm_message_t state)
"xenbus: suspend %s failed: %i\n", dev_name(dev), err);
return 0;
}
+EXPORT_SYMBOL_GPL(xenbus_dev_suspend);
-static int xenbus_dev_resume(struct device *dev)
+int xenbus_dev_resume(struct device *dev)
{
int err;
struct xenbus_driver *drv;
- struct xenbus_device *xdev;
+ struct xenbus_device *xdev
+ = container_of(dev, struct xenbus_device, dev);
- DPRINTK("");
+ DPRINTK("%s", xdev->nodename);
if (dev->driver == NULL)
return 0;
-
drv = to_xenbus_driver(dev->driver);
- xdev = container_of(dev, struct xenbus_device, dev);
-
err = talk_to_otherend(xdev);
if (err) {
printk(KERN_WARNING
@@ -748,6 +640,7 @@ static int xenbus_dev_resume(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(xenbus_dev_resume);
/* A flag to determine if xenstored is 'ready' (i.e. has started) */
int xenstored_ready = 0;
@@ -776,11 +669,6 @@ void xenbus_probe(struct work_struct *unused)
{
xenstored_ready = 1;
- /* Enumerate devices in xenstore and watch for changes. */
- xenbus_probe_devices(&xenbus_frontend);
- register_xenbus_watch(&fe_watch);
- xenbus_backend_probe_and_watch();
-
/* Notify others that xenstore is up */
blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
}
@@ -809,16 +697,7 @@ static int __init xenbus_init(void)
err = -ENODEV;
if (!xen_domain())
- goto out_error;
-
- /* Register ourselves with the kernel bus subsystem */
- err = bus_register(&xenbus_frontend.bus);
- if (err)
- goto out_error;
-
- err = xenbus_backend_bus_register();
- if (err)
- goto out_unreg_front;
+ return err;
/*
* Domain0 doesn't have a store_evtchn or store_mfn yet.
@@ -874,7 +753,7 @@ static int __init xenbus_init(void)
if (err) {
printk(KERN_WARNING
"XENBUS: Error initializing xenstore comms: %i\n", err);
- goto out_unreg_back;
+ goto out_error;
}
#ifdef CONFIG_XEN_COMPAT_XENFS
@@ -887,133 +766,13 @@ static int __init xenbus_init(void)
return 0;
- out_unreg_back:
- xenbus_backend_bus_unregister();
-
- out_unreg_front:
- bus_unregister(&xenbus_frontend.bus);
-
out_error:
if (page != 0)
free_page(page);
+
return err;
}
postcore_initcall(xenbus_init);
MODULE_LICENSE("GPL");
-
-static int is_device_connecting(struct device *dev, void *data)
-{
- struct xenbus_device *xendev = to_xenbus_device(dev);
- struct device_driver *drv = data;
- struct xenbus_driver *xendrv;
-
- /*
- * A device with no driver will never connect. We care only about
- * devices which should currently be in the process of connecting.
- */
- if (!dev->driver)
- return 0;
-
- /* Is this search limited to a particular driver? */
- if (drv && (dev->driver != drv))
- return 0;
-
- xendrv = to_xenbus_driver(dev->driver);
- return (xendev->state < XenbusStateConnected ||
- (xendev->state == XenbusStateConnected &&
- xendrv->is_ready && !xendrv->is_ready(xendev)));
-}
-
-static int exists_connecting_device(struct device_driver *drv)
-{
- return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
- is_device_connecting);
-}
-
-static int print_device_status(struct device *dev, void *data)
-{
- struct xenbus_device *xendev = to_xenbus_device(dev);
- struct device_driver *drv = data;
-
- /* Is this operation limited to a particular driver? */
- if (drv && (dev->driver != drv))
- return 0;
-
- if (!dev->driver) {
- /* Information only: is this too noisy? */
- printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
- xendev->nodename);
- } else if (xendev->state < XenbusStateConnected) {
- enum xenbus_state rstate = XenbusStateUnknown;
- if (xendev->otherend)
- rstate = xenbus_read_driver_state(xendev->otherend);
- printk(KERN_WARNING "XENBUS: Timeout connecting "
- "to device: %s (local state %d, remote state %d)\n",
- xendev->nodename, xendev->state, rstate);
- }
-
- return 0;
-}
-
-/* We only wait for device setup after most initcalls have run. */
-static int ready_to_wait_for_devices;
-
-/*
- * On a 5-minute timeout, wait for all devices currently configured. We need
- * to do this to guarantee that the filesystems and / or network devices
- * needed for boot are available, before we can allow the boot to proceed.
- *
- * This needs to be on a late_initcall, to happen after the frontend device
- * drivers have been initialised, but before the root fs is mounted.
- *
- * A possible improvement here would be to have the tools add a per-device
- * flag to the store entry, indicating whether it is needed at boot time.
- * This would allow people who knew what they were doing to accelerate their
- * boot slightly, but of course needs tools or manual intervention to set up
- * those flags correctly.
- */
-static void wait_for_devices(struct xenbus_driver *xendrv)
-{
- unsigned long start = jiffies;
- struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
- unsigned int seconds_waited = 0;
-
- if (!ready_to_wait_for_devices || !xen_domain())
- return;
-
- while (exists_connecting_device(drv)) {
- if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
- if (!seconds_waited)
- printk(KERN_WARNING "XENBUS: Waiting for "
- "devices to initialise: ");
- seconds_waited += 5;
- printk("%us...", 300 - seconds_waited);
- if (seconds_waited == 300)
- break;
- }
-
- schedule_timeout_interruptible(HZ/10);
- }
-
- if (seconds_waited)
- printk("\n");
-
- bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
- print_device_status);
-}
-
-#ifndef MODULE
-static int __init boot_wait_for_devices(void)
-{
- if (xen_hvm_domain() && !xen_platform_pci_unplug)
- return -ENODEV;
-
- ready_to_wait_for_devices = 1;
- wait_for_devices(NULL);
- return 0;
-}
-
-late_initcall(boot_wait_for_devices);
-#endif
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 6c5e3185a6a2..24665812316a 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -36,26 +36,15 @@
#define XEN_BUS_ID_SIZE 20
-#ifdef CONFIG_XEN_BACKEND
-extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
-extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
-extern void xenbus_backend_probe_and_watch(void);
-extern int xenbus_backend_bus_register(void);
-extern void xenbus_backend_bus_unregister(void);
-#else
-static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
-static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
-static inline void xenbus_backend_probe_and_watch(void) {}
-static inline int xenbus_backend_bus_register(void) { return 0; }
-static inline void xenbus_backend_bus_unregister(void) {}
-#endif
-
struct xen_bus_type
{
char *root;
unsigned int levels;
int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
- int (*probe)(const char *type, const char *dir);
+ int (*probe)(struct xen_bus_type *bus, const char *type,
+ const char *dir);
+ void (*otherend_changed)(struct xenbus_watch *watch, const char **vec,
+ unsigned int len);
struct bus_type bus;
};
@@ -73,4 +62,16 @@ extern int xenbus_probe_devices(struct xen_bus_type *bus);
extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
+extern void xenbus_dev_shutdown(struct device *_dev);
+
+extern int xenbus_dev_suspend(struct device *dev, pm_message_t state);
+extern int xenbus_dev_resume(struct device *dev);
+
+extern void xenbus_otherend_changed(struct xenbus_watch *watch,
+ const char **vec, unsigned int len,
+ int ignore_on_shutdown);
+
+extern int xenbus_read_otherend_details(struct xenbus_device *xendev,
+ char *id_node, char *path_node);
+
#endif
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
new file mode 100644
index 000000000000..6cf467bf63ec
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -0,0 +1,276 @@
+/******************************************************************************
+ * Talks to Xen Store to figure out what devices we have (backend half).
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
+ * Copyright (C) 2005, 2006 XenSource Ltd
+ * Copyright (C) 2007 Solarflare Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define DPRINTK(fmt, args...) \
+ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
+ __func__, __LINE__, ##args)
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/hypervisor.h>
+#include <xen/xenbus.h>
+#include <xen/features.h>
+
+#include "xenbus_comms.h"
+#include "xenbus_probe.h"
+
+/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
+static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
+{
+ int domid, err;
+ const char *devid, *type, *frontend;
+ unsigned int typelen;
+
+ type = strchr(nodename, '/');
+ if (!type)
+ return -EINVAL;
+ type++;
+ typelen = strcspn(type, "/");
+ if (!typelen || type[typelen] != '/')
+ return -EINVAL;
+
+ devid = strrchr(nodename, '/') + 1;
+
+ err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid,
+ "frontend", NULL, &frontend,
+ NULL);
+ if (err)
+ return err;
+ if (strlen(frontend) == 0)
+ err = -ERANGE;
+ if (!err && !xenbus_exists(XBT_NIL, frontend, ""))
+ err = -ENOENT;
+ kfree(frontend);
+
+ if (err)
+ return err;
+
+ if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s",
+ typelen, type, domid, devid) >= XEN_BUS_ID_SIZE)
+ return -ENOSPC;
+ return 0;
+}
+
+static int xenbus_uevent_backend(struct device *dev,
+ struct kobj_uevent_env *env)
+{
+ struct xenbus_device *xdev;
+ struct xenbus_driver *drv;
+ struct xen_bus_type *bus;
+
+ DPRINTK("");
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ xdev = to_xenbus_device(dev);
+ bus = container_of(xdev->dev.bus, struct xen_bus_type, bus);
+ if (xdev == NULL)
+ return -ENODEV;
+
+ /* stuff we want to pass to /sbin/hotplug */
+ if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "XENBUS_BASE_PATH=%s", bus->root))
+ return -ENOMEM;
+
+ if (dev->driver) {
+ drv = to_xenbus_driver(dev->driver);
+ if (drv && drv->uevent)
+ return drv->uevent(xdev, env);
+ }
+
+ return 0;
+}
+
+/* backend/<typename>/<frontend-uuid>/<name> */
+static int xenbus_probe_backend_unit(struct xen_bus_type *bus,
+ const char *dir,
+ const char *type,
+ const char *name)
+{
+ char *nodename;
+ int err;
+
+ nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
+ if (!nodename)
+ return -ENOMEM;
+
+ DPRINTK("%s\n", nodename);
+
+ err = xenbus_probe_node(bus, type, nodename);
+ kfree(nodename);
+ return err;
+}
+
+/* backend/<typename>/<frontend-domid> */
+static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type,
+ const char *domid)
+{
+ char *nodename;
+ int err = 0;
+ char **dir;
+ unsigned int i, dir_n = 0;
+
+ DPRINTK("");
+
+ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, domid);
+ if (!nodename)
+ return -ENOMEM;
+
+ dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n);
+ if (IS_ERR(dir)) {
+ kfree(nodename);
+ return PTR_ERR(dir);
+ }
+
+ for (i = 0; i < dir_n; i++) {
+ err = xenbus_probe_backend_unit(bus, nodename, type, dir[i]);
+ if (err)
+ break;
+ }
+ kfree(dir);
+ kfree(nodename);
+ return err;
+}
+
+static void frontend_changed(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ xenbus_otherend_changed(watch, vec, len, 0);
+}
+
+static struct device_attribute xenbus_backend_dev_attrs[] = {
+ __ATTR_NULL
+};
+
+static struct xen_bus_type xenbus_backend = {
+ .root = "backend",
+ .levels = 3, /* backend/type/<frontend>/<id> */
+ .get_bus_id = backend_bus_id,
+ .probe = xenbus_probe_backend,
+ .otherend_changed = frontend_changed,
+ .bus = {
+ .name = "xen-backend",
+ .match = xenbus_match,
+ .uevent = xenbus_uevent_backend,
+ .probe = xenbus_dev_probe,
+ .remove = xenbus_dev_remove,
+ .shutdown = xenbus_dev_shutdown,
+ .dev_attrs = xenbus_backend_dev_attrs,
+ },
+};
+
+static void backend_changed(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ DPRINTK("");
+
+ xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
+}
+
+static struct xenbus_watch be_watch = {
+ .node = "backend",
+ .callback = backend_changed,
+};
+
+static int read_frontend_details(struct xenbus_device *xendev)
+{
+ return xenbus_read_otherend_details(xendev, "frontend-id", "frontend");
+}
+
+int xenbus_dev_is_online(struct xenbus_device *dev)
+{
+ int rc, val;
+
+ rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
+ if (rc != 1)
+ val = 0; /* no online node present */
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
+
+int __xenbus_register_backend(struct xenbus_driver *drv,
+ struct module *owner, const char *mod_name)
+{
+ drv->read_otherend_details = read_frontend_details;
+
+ return xenbus_register_driver_common(drv, &xenbus_backend,
+ owner, mod_name);
+}
+EXPORT_SYMBOL_GPL(__xenbus_register_backend);
+
+static int backend_probe_and_watch(struct notifier_block *notifier,
+ unsigned long event,
+ void *data)
+{
+ /* Enumerate devices in xenstore and watch for changes. */
+ xenbus_probe_devices(&xenbus_backend);
+ register_xenbus_watch(&be_watch);
+
+ return NOTIFY_DONE;
+}
+
+static int __init xenbus_probe_backend_init(void)
+{
+ static struct notifier_block xenstore_notifier = {
+ .notifier_call = backend_probe_and_watch
+ };
+ int err;
+
+ DPRINTK("");
+
+ /* Register ourselves with the kernel bus subsystem */
+ err = bus_register(&xenbus_backend.bus);
+ if (err)
+ return err;
+
+ register_xenstore_notifier(&xenstore_notifier);
+
+ return 0;
+}
+subsys_initcall(xenbus_probe_backend_init);
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
new file mode 100644
index 000000000000..5bcc2d6cf129
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -0,0 +1,294 @@
+#define DPRINTK(fmt, args...) \
+ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
+ __func__, __LINE__, ##args)
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include <xen/platform_pci.h>
+
+#include "xenbus_comms.h"
+#include "xenbus_probe.h"
+
+
+/* device/<type>/<id> => <type>-<id> */
+static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
+{
+ nodename = strchr(nodename, '/');
+ if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) {
+ printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
+ return -EINVAL;
+ }
+
+ strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
+ if (!strchr(bus_id, '/')) {
+ printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
+ return -EINVAL;
+ }
+ *strchr(bus_id, '/') = '-';
+ return 0;
+}
+
+/* device/<typename>/<name> */
+static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type,
+ const char *name)
+{
+ char *nodename;
+ int err;
+
+ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name);
+ if (!nodename)
+ return -ENOMEM;
+
+ DPRINTK("%s", nodename);
+
+ err = xenbus_probe_node(bus, type, nodename);
+ kfree(nodename);
+ return err;
+}
+
+static int xenbus_uevent_frontend(struct device *_dev,
+ struct kobj_uevent_env *env)
+{
+ struct xenbus_device *dev = to_xenbus_device(_dev);
+
+ if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
+ return -ENOMEM;
+
+ return 0;
+}
+
+
+static void backend_changed(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ xenbus_otherend_changed(watch, vec, len, 1);
+}
+
+static struct device_attribute xenbus_frontend_dev_attrs[] = {
+ __ATTR_NULL
+};
+
+static struct xen_bus_type xenbus_frontend = {
+ .root = "device",
+ .levels = 2, /* device/type/<id> */
+ .get_bus_id = frontend_bus_id,
+ .probe = xenbus_probe_frontend,
+ .otherend_changed = backend_changed,
+ .bus = {
+ .name = "xen",
+ .match = xenbus_match,
+ .uevent = xenbus_uevent_frontend,
+ .probe = xenbus_dev_probe,
+ .remove = xenbus_dev_remove,
+ .shutdown = xenbus_dev_shutdown,
+ .dev_attrs = xenbus_frontend_dev_attrs,
+
+ .suspend = xenbus_dev_suspend,
+ .resume = xenbus_dev_resume,
+ },
+};
+
+static void frontend_changed(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ DPRINTK("");
+
+ xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
+}
+
+
+/* We watch for devices appearing and vanishing. */
+static struct xenbus_watch fe_watch = {
+ .node = "device",
+ .callback = frontend_changed,
+};
+
+static int read_backend_details(struct xenbus_device *xendev)
+{
+ return xenbus_read_otherend_details(xendev, "backend-id", "backend");
+}
+
+static int is_device_connecting(struct device *dev, void *data)
+{
+ struct xenbus_device *xendev = to_xenbus_device(dev);
+ struct device_driver *drv = data;
+ struct xenbus_driver *xendrv;
+
+ /*
+ * A device with no driver will never connect. We care only about
+ * devices which should currently be in the process of connecting.
+ */
+ if (!dev->driver)
+ return 0;
+
+ /* Is this search limited to a particular driver? */
+ if (drv && (dev->driver != drv))
+ return 0;
+
+ xendrv = to_xenbus_driver(dev->driver);
+ return (xendev->state < XenbusStateConnected ||
+ (xendev->state == XenbusStateConnected &&
+ xendrv->is_ready && !xendrv->is_ready(xendev)));
+}
+
+static int exists_connecting_device(struct device_driver *drv)
+{
+ return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+ is_device_connecting);
+}
+
+static int print_device_status(struct device *dev, void *data)
+{
+ struct xenbus_device *xendev = to_xenbus_device(dev);
+ struct device_driver *drv = data;
+
+ /* Is this operation limited to a particular driver? */
+ if (drv && (dev->driver != drv))
+ return 0;
+
+ if (!dev->driver) {
+ /* Information only: is this too noisy? */
+ printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
+ xendev->nodename);
+ } else if (xendev->state < XenbusStateConnected) {
+ enum xenbus_state rstate = XenbusStateUnknown;
+ if (xendev->otherend)
+ rstate = xenbus_read_driver_state(xendev->otherend);
+ printk(KERN_WARNING "XENBUS: Timeout connecting "
+ "to device: %s (local state %d, remote state %d)\n",
+ xendev->nodename, xendev->state, rstate);
+ }
+
+ return 0;
+}
+
+/* We only wait for device setup after most initcalls have run. */
+static int ready_to_wait_for_devices;
+
+/*
+ * On a 5-minute timeout, wait for all devices currently configured. We need
+ * to do this to guarantee that the filesystems and / or network devices
+ * needed for boot are available, before we can allow the boot to proceed.
+ *
+ * This needs to be on a late_initcall, to happen after the frontend device
+ * drivers have been initialised, but before the root fs is mounted.
+ *
+ * A possible improvement here would be to have the tools add a per-device
+ * flag to the store entry, indicating whether it is needed at boot time.
+ * This would allow people who knew what they were doing to accelerate their
+ * boot slightly, but of course needs tools or manual intervention to set up
+ * those flags correctly.
+ */
+static void wait_for_devices(struct xenbus_driver *xendrv)
+{
+ unsigned long start = jiffies;
+ struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
+ unsigned int seconds_waited = 0;
+
+ if (!ready_to_wait_for_devices || !xen_domain())
+ return;
+
+ while (exists_connecting_device(drv)) {
+ if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
+ if (!seconds_waited)
+ printk(KERN_WARNING "XENBUS: Waiting for "
+ "devices to initialise: ");
+ seconds_waited += 5;
+ printk("%us...", 300 - seconds_waited);
+ if (seconds_waited == 300)
+ break;
+ }
+
+ schedule_timeout_interruptible(HZ/10);
+ }
+
+ if (seconds_waited)
+ printk("\n");
+
+ bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+ print_device_status);
+}
+
+int __xenbus_register_frontend(struct xenbus_driver *drv,
+ struct module *owner, const char *mod_name)
+{
+ int ret;
+
+ drv->read_otherend_details = read_backend_details;
+
+ ret = xenbus_register_driver_common(drv, &xenbus_frontend,
+ owner, mod_name);
+ if (ret)
+ return ret;
+
+ /* If this driver is loaded as a module wait for devices to attach. */
+ wait_for_devices(drv);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+
+static int frontend_probe_and_watch(struct notifier_block *notifier,
+ unsigned long event,
+ void *data)
+{
+ /* Enumerate devices in xenstore and watch for changes. */
+ xenbus_probe_devices(&xenbus_frontend);
+ register_xenbus_watch(&fe_watch);
+
+ return NOTIFY_DONE;
+}
+
+
+static int __init xenbus_probe_frontend_init(void)
+{
+ static struct notifier_block xenstore_notifier = {
+ .notifier_call = frontend_probe_and_watch
+ };
+ int err;
+
+ DPRINTK("");
+
+ /* Register ourselves with the kernel bus subsystem */
+ err = bus_register(&xenbus_frontend.bus);
+ if (err)
+ return err;
+
+ register_xenstore_notifier(&xenstore_notifier);
+
+ return 0;
+}
+subsys_initcall(xenbus_probe_frontend_init);
+
+#ifndef MODULE
+static int __init boot_wait_for_devices(void)
+{
+ if (xen_hvm_domain() && !xen_platform_pci_unplug)
+ return -ENODEV;
+
+ ready_to_wait_for_devices = 1;
+ wait_for_devices(NULL);
+ return 0;
+}
+
+late_initcall(boot_wait_for_devices);
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c
index 1c1236087f78..bbd000f88af7 100644
--- a/drivers/xen/xenfs/xenbus.c
+++ b/drivers/xen/xenfs/xenbus.c
@@ -122,6 +122,7 @@ static ssize_t xenbus_file_read(struct file *filp,
int ret;
mutex_lock(&u->reply_mutex);
+again:
while (list_empty(&u->read_buffers)) {
mutex_unlock(&u->reply_mutex);
if (filp->f_flags & O_NONBLOCK)
@@ -144,7 +145,7 @@ static ssize_t xenbus_file_read(struct file *filp,
i += sz - ret;
rb->cons += sz - ret;
- if (ret != sz) {
+ if (ret != 0) {
if (i == 0)
i = -EFAULT;
goto out;
@@ -160,6 +161,8 @@ static ssize_t xenbus_file_read(struct file *filp,
struct read_buffer, list);
}
}
+ if (i == 0)
+ goto again;
out:
mutex_unlock(&u->reply_mutex);
@@ -407,6 +410,7 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
mutex_lock(&u->reply_mutex);
rc = queue_reply(&u->read_buffers, &reply, sizeof(reply));
+ wake_up(&u->read_waitq);
mutex_unlock(&u->reply_mutex);
}
@@ -455,7 +459,7 @@ static ssize_t xenbus_file_write(struct file *filp,
ret = copy_from_user(u->u.buffer + u->len, ubuf, len);
- if (ret == len) {
+ if (ret != 0) {
rc = -EFAULT;
goto out;
}
@@ -488,21 +492,6 @@ static ssize_t xenbus_file_write(struct file *filp,
msg_type = u->u.msg.type;
switch (msg_type) {
- case XS_TRANSACTION_START:
- case XS_TRANSACTION_END:
- case XS_DIRECTORY:
- case XS_READ:
- case XS_GET_PERMS:
- case XS_RELEASE:
- case XS_GET_DOMAIN_PATH:
- case XS_WRITE:
- case XS_MKDIR:
- case XS_RM:
- case XS_SET_PERMS:
- /* Send out a transaction */
- ret = xenbus_write_transaction(msg_type, u);
- break;
-
case XS_WATCH:
case XS_UNWATCH:
/* (Un)Ask for some path to be watched for changes */
@@ -510,7 +499,8 @@ static ssize_t xenbus_file_write(struct file *filp,
break;
default:
- ret = -EINVAL;
+ /* Send out a transaction */
+ ret = xenbus_write_transaction(msg_type, u);
break;
}
if (ret != 0)
@@ -555,6 +545,7 @@ static int xenbus_file_release(struct inode *inode, struct file *filp)
struct xenbus_file_priv *u = filp->private_data;
struct xenbus_transaction_holder *trans, *tmp;
struct watch_adapter *watch, *tmp_watch;
+ struct read_buffer *rb, *tmp_rb;
/*
* No need for locking here because there are no other users,
@@ -573,6 +564,10 @@ static int xenbus_file_release(struct inode *inode, struct file *filp)
free_watch_adapter(watch);
}
+ list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
+ list_del(&rb->list);
+ kfree(rb);
+ }
kfree(u);
return 0;